repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/mip_generation/mod.rs | crates/bevy_core_pipeline/src/mip_generation/mod.rs | //! Downsampling of textures to produce mipmap levels.
//!
//! This module implements variations on the [AMD FidelityFX single-pass
//! downsampling] shader. It's used for generating mipmaps for textures
//! ([`MipGenerationJobs`]) and for creating hierarchical Z-buffers (the
//! [`experimental::depth`] module).
//!
//! See the documentation for [`MipGenerationJobs`] and [`experimental::depth`]
//! for more information.
//!
//! [AMD FidelityFX single-pass downsampling]: https://gpuopen.com/fidelityfx-spd/
use crate::core_3d::{
graph::{Core3d, Node3d},
prepare_core_3d_depth_textures,
};
use crate::mip_generation::experimental::depth::{
self, DownsampleDepthNode, DownsampleDepthPipeline, DownsampleDepthPipelines,
};
use bevy_app::{App, Plugin};
use bevy_asset::{embedded_asset, load_embedded_asset, AssetId, Assets, Handle};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
prelude::resource_exists,
resource::Resource,
schedule::IntoScheduleConfigs as _,
system::{Res, ResMut},
world::{FromWorld, World},
};
use bevy_image::Image;
use bevy_log::error;
use bevy_math::{vec2, Vec2};
use bevy_platform::collections::{hash_map::Entry, HashMap, HashSet};
use bevy_render::{
diagnostic::RecordDiagnostics as _,
render_asset::RenderAssets,
render_resource::{
binding_types::uniform_buffer, BindGroupLayoutDescriptor, FilterMode, ShaderType,
TextureFormatFeatureFlags, UniformBuffer,
},
renderer::{RenderAdapter, RenderQueue},
settings::WgpuFeatures,
texture::GpuImage,
RenderStartup,
};
use bevy_render::{
render_graph::{Node, NodeRunError, RenderGraphContext, RenderGraphExt},
render_resource::{
binding_types::{sampler, texture_2d, texture_storage_2d},
BindGroup, BindGroupEntries, BindGroupLayoutEntries, CachedComputePipelineId,
ComputePassDescriptor, ComputePipelineDescriptor, Extent3d, PipelineCache, Sampler,
SamplerBindingType, SamplerDescriptor, ShaderStages, SpecializedComputePipelines,
StorageTextureAccess, TextureAspect, TextureDescriptor, TextureDimension, TextureFormat,
TextureUsages, TextureView, TextureViewDescriptor, TextureViewDimension,
},
renderer::{RenderContext, RenderDevice},
Render, RenderApp, RenderSystems,
};
use bevy_shader::{Shader, ShaderDefVal};
use bevy_utils::default;
pub mod experimental;
/// A resource that stores the shaders that perform downsampling.
#[derive(Clone, Resource)]
pub struct DownsampleShaders {
/// The experimental shader that downsamples depth
/// (`downsample_depth.wgsl`).
pub depth: Handle<Shader>,
/// The shaders that perform downsampling of color textures.
///
/// This table maps a [`TextureFormat`] to the shader that performs
/// downsampling for textures in that format.
pub general: HashMap<TextureFormat, Handle<Shader>>,
}
// The number of storage textures required to combine the bind groups in the
// downsampling shader.
const REQUIRED_STORAGE_TEXTURES: u32 = 12;
/// All texture formats that we can perform downsampling for.
///
/// This is a list of pairs, each of which consists of the [`TextureFormat`] and
/// the WGSL name for that texture format.
///
/// The comprehensive list of WGSL names for texture formats can be found in
/// [the relevant section of the WGSL specification].
///
/// [the relevant section of the WGSL specification]:
/// https://www.w3.org/TR/WGSL/#texel-formats
static TEXTURE_FORMATS: [(TextureFormat, &str); 40] = [
(TextureFormat::Rgba8Unorm, "rgba8unorm"),
(TextureFormat::Rgba8Snorm, "rgba8snorm"),
(TextureFormat::Rgba8Uint, "rgba8uint"),
(TextureFormat::Rgba8Sint, "rgba8sint"),
(TextureFormat::Rgba16Unorm, "rgba16unorm"),
(TextureFormat::Rgba16Snorm, "rgba16snorm"),
(TextureFormat::Rgba16Uint, "rgba16uint"),
(TextureFormat::Rgba16Sint, "rgba16sint"),
(TextureFormat::Rgba16Float, "rgba16float"),
(TextureFormat::Rg8Unorm, "rg8unorm"),
(TextureFormat::Rg8Snorm, "rg8snorm"),
(TextureFormat::Rg8Uint, "rg8uint"),
(TextureFormat::Rg8Sint, "rg8sint"),
(TextureFormat::Rg16Unorm, "rg16unorm"),
(TextureFormat::Rg16Snorm, "rg16snorm"),
(TextureFormat::Rg16Uint, "rg16uint"),
(TextureFormat::Rg16Sint, "rg16sint"),
(TextureFormat::Rg16Float, "rg16float"),
(TextureFormat::R32Uint, "r32uint"),
(TextureFormat::R32Sint, "r32sint"),
(TextureFormat::R32Float, "r32float"),
(TextureFormat::Rg32Uint, "rg32uint"),
(TextureFormat::Rg32Sint, "rg32sint"),
(TextureFormat::Rg32Float, "rg32float"),
(TextureFormat::Rgba32Uint, "rgba32uint"),
(TextureFormat::Rgba32Sint, "rgba32sint"),
(TextureFormat::Rgba32Float, "rgba32float"),
(TextureFormat::Bgra8Unorm, "bgra8unorm"),
(TextureFormat::R8Unorm, "r8unorm"),
(TextureFormat::R8Snorm, "r8snorm"),
(TextureFormat::R8Uint, "r8uint"),
(TextureFormat::R8Sint, "r8sint"),
(TextureFormat::R16Unorm, "r16unorm"),
(TextureFormat::R16Snorm, "r16snorm"),
(TextureFormat::R16Uint, "r16uint"),
(TextureFormat::R16Sint, "r16sint"),
(TextureFormat::R16Float, "r16float"),
(TextureFormat::Rgb10a2Unorm, "rgb10a2unorm"),
(TextureFormat::Rgb10a2Uint, "rgb10a2uint"),
(TextureFormat::Rg11b10Ufloat, "rg11b10ufloat"),
];
/// A render-world resource that stores a list of [`Image`]s that will have
/// mipmaps generated for them.
///
/// You can add images to this list via the [`MipGenerationJobs::add`] method,
/// in the render world. Note that this, by itself, isn't enough to generate
/// the mipmaps; you must also add a [`MipGenerationNode`] to the render graph.
///
/// This resource exists only in the render world, not the main world.
/// Therefore, you typically want to place images in this resource in a system
/// that runs in the [`bevy_render::ExtractSchedule`] of the
/// [`bevy_render::RenderApp`].
///
/// See `dynamic_mip_generation` for an example of usage.
#[derive(Resource, Default, Deref, DerefMut)]
pub struct MipGenerationJobs(pub HashMap<MipGenerationPhaseId, MipGenerationPhase>);
impl MipGenerationJobs {
/// Schedules the generation of mipmaps for an image.
///
/// Mipmaps will be generated during the execution of the
/// [`MipGenerationNode`] corresponding to the [`MipGenerationPhaseId`].
/// Note that, by default, Bevy doesn't automatically add any such node to
/// the render graph; it's up to you to manually add that node.
pub fn add(&mut self, phase: MipGenerationPhaseId, image: impl Into<AssetId<Image>>) {
self.entry(phase).or_default().push(image.into());
}
}
/// The list of [`Image`]s that will have mipmaps generated for them during a
/// specific phase.
///
/// The [`MipGenerationJobs`] resource stores one of these lists per mipmap
/// generation phase.
///
/// To add images to this list, use [`MipGenerationJobs::add`] in a render app
/// system.
#[derive(Default, Deref, DerefMut)]
pub struct MipGenerationPhase(pub Vec<AssetId<Image>>);
/// Identifies a *phase* during which mipmaps will be generated for an image.
///
/// Sometimes, mipmaps must be generated at a specific time during the rendering
/// process. This typically occurs when a camera renders to the image and then
/// the image is sampled later in the frame as a second camera renders the
/// scene. In this case, the mipmaps must be generated after the first camera
/// renders to the image rendered to but before the second camera's rendering
/// samples the image. To express these kinds of dependencies, you group images
/// into *phases* and schedule [`MipGenerationNode`]s in the render graph
/// targeting each phase at the appropriate time.
///
/// Each phase has an ID, which is an arbitrary 32-bit integer. You may specify
/// any value you wish as a phase ID, so long as the [`MipGenerationNode`] that
/// generates mipmaps for the images in that phase uses the same ID.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct MipGenerationPhaseId(pub u32);
/// Stores all render pipelines and bind groups associated with the mipmap
/// generation shader.
///
/// The `prepare_mip_generator_pipelines` system populates this resource lazily
/// as new textures are scheduled.
#[derive(Resource, Default)]
struct MipGenerationPipelines {
/// The pipeline for each texture format.
///
/// Note that pipelines can be shared among all images that use a single
/// texture format.
pipelines: HashMap<TextureFormat, MipGenerationTextureFormatPipelines>,
/// The bind group for each image.
///
/// These are cached from frame to frame if the same image needs mips
/// generated for it on immediately-consecutive frames.
bind_groups: HashMap<AssetId<Image>, MipGenerationJobBindGroups>,
}
/// The compute pipelines and bind group layouts for the single-pass
/// downsampling shader for a single texture format.
///
/// Note that, despite the name, the single-pass downsampling shader has two
/// passes, not one. This is because WGSL doesn't presently support
/// globally-coherent buffers; the only way to have a synchronization point is
/// to issue a second dispatch.
struct MipGenerationTextureFormatPipelines {
/// The bind group layout for the first pass of the downsampling shader.
downsampling_bind_group_layout_pass_1: BindGroupLayoutDescriptor,
/// The bind group layout for the second pass of the downsampling shader.
downsampling_bind_group_layout_pass_2: BindGroupLayoutDescriptor,
/// The compute pipeline for the first pass of the downsampling shader.
downsampling_pipeline_pass_1: CachedComputePipelineId,
/// The compute pipeline for the second pass of the downsampling shader.
downsampling_pipeline_pass_2: CachedComputePipelineId,
}
/// Bind groups for the downsampling shader associated with a single texture.
struct MipGenerationJobBindGroups {
/// The bind group for the first downsampling compute pass.
downsampling_bind_group_pass_1: BindGroup,
/// The bind group for the second downsampling compute pass.
downsampling_bind_group_pass_2: BindGroup,
}
/// Constants for the single-pass downsampling shader generated on the CPU and
/// read on the GPU.
///
/// These constants are stored within a uniform buffer. There's one such uniform
/// buffer per image.
#[derive(Clone, Copy, ShaderType)]
#[repr(C)]
pub struct DownsamplingConstants {
/// The number of mip levels that this image possesses.
pub mips: u32,
/// The reciprocal of the size of the first mipmap level for this texture.
pub inverse_input_size: Vec2,
/// Padding.
pub _padding: u32,
}
/// A plugin that allows Bevy to repeatedly downsample textures to create
/// mipmaps.
///
/// Generation of mipmaps happens on the GPU.
pub struct MipGenerationPlugin;
impl Plugin for MipGenerationPlugin {
fn build(&self, app: &mut App) {
embedded_asset!(app, "experimental/downsample_depth.wgsl");
embedded_asset!(app, "downsample.wgsl");
let depth_shader = load_embedded_asset!(app, "experimental/downsample_depth.wgsl");
// We don't have string-valued shader definitions in `naga_oil`, so we
// use a text-pasting hack. The `downsample.wgsl` shader is eagerly
// specialized for each texture format by replacing `##TEXTURE_FORMAT##`
// with each possible format.
// When we have WESL, we should probably revisit this.
let mut shader_assets = app.world_mut().resource_mut::<Assets<Shader>>();
let shader_template_source = include_str!("downsample.wgsl");
let general_shaders: HashMap<_, _> = TEXTURE_FORMATS
.iter()
.map(|(texture_format, identifier)| {
let shader_source =
shader_template_source.replace("##TEXTURE_FORMAT##", identifier);
(
*texture_format,
shader_assets.add(Shader::from_wgsl(shader_source, "downsample.wgsl")),
)
})
.collect();
let downsample_shaders = DownsampleShaders {
depth: depth_shader,
general: general_shaders,
};
app.insert_resource(downsample_shaders.clone());
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.init_resource::<SpecializedComputePipelines<DownsampleDepthPipeline>>()
.init_resource::<MipGenerationJobs>()
.init_resource::<MipGenerationPipelines>()
.insert_resource(downsample_shaders)
.add_render_graph_node::<DownsampleDepthNode>(Core3d, Node3d::EarlyDownsampleDepth)
.add_render_graph_node::<DownsampleDepthNode>(Core3d, Node3d::LateDownsampleDepth)
.add_render_graph_edges(
Core3d,
(
Node3d::EarlyPrepass,
Node3d::EarlyDeferredPrepass,
Node3d::EarlyDownsampleDepth,
Node3d::LatePrepass,
Node3d::LateDeferredPrepass,
),
)
.add_render_graph_edges(
Core3d,
(
Node3d::StartMainPassPostProcessing,
Node3d::LateDownsampleDepth,
Node3d::EndMainPassPostProcessing,
),
)
.add_systems(RenderStartup, depth::init_depth_pyramid_dummy_texture)
.add_systems(
Render,
depth::create_downsample_depth_pipelines.in_set(RenderSystems::Prepare),
)
.add_systems(
Render,
(
depth::prepare_view_depth_pyramids,
depth::prepare_downsample_depth_view_bind_groups,
)
.chain()
.in_set(RenderSystems::PrepareResources)
.run_if(resource_exists::<DownsampleDepthPipelines>)
.after(prepare_core_3d_depth_textures),
)
.add_systems(
Render,
prepare_mip_generator_pipelines.in_set(RenderSystems::PrepareResources),
)
.add_systems(
Render,
reset_mip_generation_jobs.in_set(RenderSystems::Cleanup),
);
}
fn finish(&self, app: &mut App) {
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
// This needs to be done here so that we have access to the
// `RenderDevice`.
render_app.init_resource::<MipGenerationResources>();
}
}
/// Global GPU resources that the mip generation pipelines use.
///
/// At the moment, the only such resource is a texture sampler.
#[derive(Resource)]
struct MipGenerationResources {
/// The texture sampler that the single-pass downsampling pipelines use to
/// sample the source texture.
sampler: Sampler,
}
impl FromWorld for MipGenerationResources {
fn from_world(world: &mut World) -> Self {
let render_device = world.resource_mut::<RenderDevice>();
MipGenerationResources {
sampler: render_device.create_sampler(&SamplerDescriptor {
label: Some("mip generation sampler"),
mag_filter: FilterMode::Linear,
min_filter: FilterMode::Linear,
mipmap_filter: FilterMode::Nearest,
..default()
}),
}
}
}
/// A [`Node`] for use in the render graph that generates mipmaps for a single
/// [`MipGenerationPhaseId`].
///
/// In order to execute a job in [`MipGenerationJobs`], a [`MipGenerationNode`]
/// for the phase that the job belongs to must be added to the
/// [`bevy_render::render_graph::RenderGraph`]. The phased nature of mipmap
/// generation allows precise control over the time when mipmaps are generated
/// for each image. Your application should use
/// [`bevy_render::render_graph::RenderGraph::add_node_edge`] to order each
/// [`MipGenerationNode`] relative to other systems so that the mipmaps will be
/// generated after any passes that *write* to the images in question but before
/// any shaders that *read* from those images execute.
///
/// See `dynamic_mip_generation` for an example of use.
#[derive(Deref, DerefMut)]
pub struct MipGenerationNode(pub MipGenerationPhaseId);
impl Node for MipGenerationNode {
fn run<'w>(
&self,
_: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
world: &'w World,
) -> Result<(), NodeRunError> {
let mip_generation_jobs = world.resource::<MipGenerationJobs>();
let Some(mip_generation_phase) = mip_generation_jobs.get(&self.0) else {
return Ok(());
};
if mip_generation_phase.is_empty() {
// Quickly bail out if there's nothing to do.
return Ok(());
}
let pipeline_cache = world.resource::<PipelineCache>();
let mip_generation_bind_groups = world.resource::<MipGenerationPipelines>();
let gpu_images = world.resource::<RenderAssets<GpuImage>>();
let diagnostics = render_context.diagnostic_recorder();
for mip_generation_job in mip_generation_phase.iter() {
let Some(gpu_image) = gpu_images.get(*mip_generation_job) else {
continue;
};
let Some(mip_generation_job_bind_groups) = mip_generation_bind_groups
.bind_groups
.get(mip_generation_job)
else {
continue;
};
let Some(mip_generation_pipelines) = mip_generation_bind_groups
.pipelines
.get(&gpu_image.texture_format)
else {
continue;
};
// Fetch the mip generation pipelines.
let (Some(mip_generation_pipeline_pass_1), Some(mip_generation_pipeline_pass_2)) = (
pipeline_cache
.get_compute_pipeline(mip_generation_pipelines.downsampling_pipeline_pass_1),
pipeline_cache
.get_compute_pipeline(mip_generation_pipelines.downsampling_pipeline_pass_2),
) else {
continue;
};
// Perform the first downsampling pass.
{
let mut compute_pass_1 =
render_context
.command_encoder()
.begin_compute_pass(&ComputePassDescriptor {
label: Some("mip generation pass 1"),
timestamp_writes: None,
});
let pass_span = diagnostics.pass_span(&mut compute_pass_1, "mip generation pass 1");
compute_pass_1.set_pipeline(mip_generation_pipeline_pass_1);
compute_pass_1.set_bind_group(
0,
&mip_generation_job_bind_groups.downsampling_bind_group_pass_1,
&[],
);
compute_pass_1.dispatch_workgroups(
gpu_image.size.width.div_ceil(64),
gpu_image.size.height.div_ceil(64),
1,
);
pass_span.end(&mut compute_pass_1);
}
// Perform the second downsampling pass.
{
let mut compute_pass_2 =
render_context
.command_encoder()
.begin_compute_pass(&ComputePassDescriptor {
label: Some("mip generation pass 2"),
timestamp_writes: None,
});
let pass_span = diagnostics.pass_span(&mut compute_pass_2, "mip generation pass 2");
compute_pass_2.set_pipeline(mip_generation_pipeline_pass_2);
compute_pass_2.set_bind_group(
0,
&mip_generation_job_bind_groups.downsampling_bind_group_pass_2,
&[],
);
compute_pass_2.dispatch_workgroups(
gpu_image.size.width.div_ceil(256),
gpu_image.size.height.div_ceil(256),
1,
);
pass_span.end(&mut compute_pass_2);
}
}
Ok(())
}
}
/// Creates all bind group layouts, bind groups, and pipelines for all mipmap
/// generation jobs that have been enqueued this frame.
///
/// Bind group layouts, bind groups, and pipelines are all cached for images
/// that are being processed every frame.
fn prepare_mip_generator_pipelines(
mip_generation_bind_groups: ResMut<MipGenerationPipelines>,
mip_generation_resources: Res<MipGenerationResources>,
mip_generation_jobs: Res<MipGenerationJobs>,
pipeline_cache: Res<PipelineCache>,
gpu_images: Res<RenderAssets<GpuImage>>,
downsample_shaders: Res<DownsampleShaders>,
render_adapter: Res<RenderAdapter>,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
) {
let mip_generation_pipelines = mip_generation_bind_groups.into_inner();
// Check to see whether we can combine downsampling bind groups on this
// hardware and driver.
let combine_downsampling_bind_groups =
can_combine_downsampling_bind_groups(&render_adapter, &render_device);
// Make a record of all jobs that we saw so that we can expire cached bind
// groups at the end of this process.
let mut all_source_images = HashSet::new();
for mip_generation_phase in mip_generation_jobs.values() {
for mip_generation_job in mip_generation_phase.iter() {
let Some(gpu_image) = gpu_images.get(*mip_generation_job) else {
continue;
};
// Note this job.
all_source_images.insert(mip_generation_job);
// Create pipelines for this texture format if necessary. We have at
// most one pipeline per texture format, regardless of the number of
// jobs that use that texture format that there are.
let Some(pipelines) = get_or_create_mip_generation_pipelines(
&render_device,
&pipeline_cache,
&downsample_shaders,
&mut mip_generation_pipelines.pipelines,
gpu_image.texture_format,
mip_generation_job,
combine_downsampling_bind_groups,
) else {
continue;
};
// Create bind groups for the job if necessary.
let Entry::Vacant(vacant_entry) = mip_generation_pipelines
.bind_groups
.entry(*mip_generation_job)
else {
continue;
};
let downsampling_constants_buffer =
create_downsampling_constants_buffer(&render_device, &render_queue, gpu_image);
let (downsampling_bind_group_pass_1, downsampling_bind_group_pass_2) =
create_downsampling_bind_groups(
&render_device,
&pipeline_cache,
&mip_generation_resources,
&downsampling_constants_buffer,
pipelines,
gpu_image,
combine_downsampling_bind_groups,
);
vacant_entry.insert(MipGenerationJobBindGroups {
downsampling_bind_group_pass_1,
downsampling_bind_group_pass_2,
});
}
}
// Expire all bind groups for jobs that we didn't see this frame.
//
// Note that this logic ensures that we don't recreate bind groups for
// images that are updated every frame.
mip_generation_pipelines
.bind_groups
.retain(|asset_id, _| all_source_images.contains(asset_id));
}
/// Returns the [`MipGenerationTextureFormatPipelines`] for a single texture
/// format, creating it if necessary.
///
/// The [`MipGenerationTextureFormatPipelines`] that this function returns
/// contains both the bind group layouts and pipelines for all invocations of
/// the single-pass downsampling shader. Note that all images that share a
/// texture format can share the same [`MipGenerationTextureFormatPipelines`]
/// instance.
fn get_or_create_mip_generation_pipelines<'a>(
render_device: &RenderDevice,
pipeline_cache: &PipelineCache,
downsample_shaders: &DownsampleShaders,
mip_generation_pipelines: &'a mut HashMap<TextureFormat, MipGenerationTextureFormatPipelines>,
texture_format: TextureFormat,
mip_generation_job: &AssetId<Image>,
combine_downsampling_bind_groups: bool,
) -> Option<&'a MipGenerationTextureFormatPipelines> {
match mip_generation_pipelines.entry(texture_format) {
Entry::Vacant(vacant_entry) => {
let Some(downsample_shader) = downsample_shaders.general.get(&texture_format) else {
error!(
"Attempted to generate mips for texture {:?} with format {:?}, but no \
downsample shader was available for that texture format",
mip_generation_job, texture_format
);
return None;
};
let (downsampling_bind_group_layout_pass_1, downsampling_bind_group_layout_pass_2) =
create_downsampling_bind_group_layouts(
texture_format,
combine_downsampling_bind_groups,
);
let (downsampling_pipeline_pass_1, downsampling_pipeline_pass_2) =
create_downsampling_pipelines(
render_device,
pipeline_cache,
&downsampling_bind_group_layout_pass_1,
&downsampling_bind_group_layout_pass_2,
downsample_shader,
texture_format,
combine_downsampling_bind_groups,
);
Some(vacant_entry.insert(MipGenerationTextureFormatPipelines {
downsampling_bind_group_layout_pass_1,
downsampling_bind_group_layout_pass_2,
downsampling_pipeline_pass_1,
downsampling_pipeline_pass_2,
}))
}
Entry::Occupied(occupied_entry) => Some(occupied_entry.into_mut()),
}
}
/// Creates the [`BindGroupLayoutDescriptor`]s for the single-pass downsampling
/// shader for a single texture format.
fn create_downsampling_bind_group_layouts(
texture_format: TextureFormat,
combine_downsampling_bind_groups: bool,
) -> (BindGroupLayoutDescriptor, BindGroupLayoutDescriptor) {
let texture_sample_type = texture_format.sample_type(None, None).expect(
"Depth and multisample texture formats shouldn't have mip generation shaders to begin with",
);
let mips_storage = texture_storage_2d(texture_format, StorageTextureAccess::WriteOnly);
if combine_downsampling_bind_groups {
let bind_group_layout_descriptor = BindGroupLayoutDescriptor::new(
"combined mip generation bind group layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
sampler(SamplerBindingType::Filtering),
uniform_buffer::<DownsamplingConstants>(false),
texture_2d(texture_sample_type),
mips_storage, // 1
mips_storage, // 2
mips_storage, // 3
mips_storage, // 4
mips_storage, // 5
texture_storage_2d(texture_format, StorageTextureAccess::ReadWrite), // 6
mips_storage, // 7
mips_storage, // 8
mips_storage, // 9
mips_storage, // 10
mips_storage, // 11
mips_storage, // 12
),
),
);
return (
bind_group_layout_descriptor.clone(),
bind_group_layout_descriptor,
);
}
// If we got here, we use a split layout. The first pass outputs mip levels
// [0, 6]; the second pass outputs mip levels [7, 12].
let bind_group_layout_descriptor_pass_1 = BindGroupLayoutDescriptor::new(
"mip generation bind group layout, pass 1",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
sampler(SamplerBindingType::Filtering),
uniform_buffer::<DownsamplingConstants>(false),
// Input mip 0
texture_2d(texture_sample_type),
mips_storage, // 1
mips_storage, // 2
mips_storage, // 3
mips_storage, // 4
mips_storage, // 5
mips_storage, // 6
),
),
);
let bind_group_layout_descriptor_pass_2 = BindGroupLayoutDescriptor::new(
"mip generation bind group layout, pass 2",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
sampler(SamplerBindingType::Filtering),
uniform_buffer::<DownsamplingConstants>(false),
// Input mip 6
texture_2d(texture_sample_type),
mips_storage, // 7
mips_storage, // 8
mips_storage, // 9
mips_storage, // 10
mips_storage, // 11
mips_storage, // 12
),
),
);
(
bind_group_layout_descriptor_pass_1,
bind_group_layout_descriptor_pass_2,
)
}
/// Creates the bind groups for the single-pass downsampling shader associated
/// with a single texture.
///
/// Depending on whether bind groups can be combined on this platform, this
/// returns either two copies of a single bind group or two separate bind
/// groups.
fn create_downsampling_bind_groups(
render_device: &RenderDevice,
pipeline_cache: &PipelineCache,
mip_generation_resources: &MipGenerationResources,
downsampling_constants_buffer: &UniformBuffer<DownsamplingConstants>,
pipelines: &MipGenerationTextureFormatPipelines,
gpu_image: &GpuImage,
combine_downsampling_bind_groups: bool,
) -> (BindGroup, BindGroup) {
let input_texture_view_pass_1 = gpu_image.texture.create_view(&TextureViewDescriptor {
label: Some("mip generation input texture view, pass 1"),
format: Some(gpu_image.texture.format()),
dimension: Some(TextureViewDimension::D2),
base_mip_level: 0,
mip_level_count: Some(1),
..default()
});
// If we can combine downsampling bind groups on this platform, we only need
// one bind group.
if combine_downsampling_bind_groups {
let bind_group = render_device.create_bind_group(
Some("combined mip generation bind group"),
&pipeline_cache.get_bind_group_layout(&pipelines.downsampling_bind_group_layout_pass_1),
&BindGroupEntries::sequential((
&mip_generation_resources.sampler,
downsampling_constants_buffer,
&input_texture_view_pass_1,
&get_mip_storage_view(render_device, gpu_image, 1),
&get_mip_storage_view(render_device, gpu_image, 2),
&get_mip_storage_view(render_device, gpu_image, 3),
&get_mip_storage_view(render_device, gpu_image, 4),
&get_mip_storage_view(render_device, gpu_image, 5),
&get_mip_storage_view(render_device, gpu_image, 6),
&get_mip_storage_view(render_device, gpu_image, 7),
&get_mip_storage_view(render_device, gpu_image, 8),
&get_mip_storage_view(render_device, gpu_image, 9),
&get_mip_storage_view(render_device, gpu_image, 10),
&get_mip_storage_view(render_device, gpu_image, 11),
&get_mip_storage_view(render_device, gpu_image, 12),
)),
);
return (bind_group.clone(), bind_group);
}
// Otherwise, create two separate bind groups.
let input_texture_view_pass_2 = gpu_image.texture.create_view(&TextureViewDescriptor {
label: Some("mip generation input texture view, pass 2"),
format: Some(gpu_image.texture.format()),
dimension: Some(TextureViewDimension::D2),
base_mip_level: gpu_image.mip_level_count.min(6),
mip_level_count: Some(1),
..default()
});
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/mip_generation/experimental/depth.rs | crates/bevy_core_pipeline/src/mip_generation/experimental/depth.rs | //! Generation of hierarchical Z buffers for occlusion culling.
//!
//! This is marked experimental because the shader is designed only for
//! power-of-two texture sizes and is slightly incorrect for non-power-of-two
//! depth buffer sizes.
use core::array;
use crate::mip_generation::DownsampleShaders;
use bevy_asset::Handle;
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
component::Component,
entity::Entity,
prelude::Without,
query::{Or, QueryState, With},
resource::Resource,
system::{lifetimeless::Read, Commands, Local, Query, Res, ResMut},
world::{FromWorld, World},
};
use bevy_math::{uvec2, UVec2, Vec4Swizzles as _};
use bevy_render::{
batching::gpu_preprocessing::GpuPreprocessingSupport,
render_resource::BindGroupLayoutDescriptor,
};
use bevy_render::{
experimental::occlusion_culling::{
OcclusionCulling, OcclusionCullingSubview, OcclusionCullingSubviewEntities,
},
render_graph::{Node, NodeRunError, RenderGraphContext},
render_resource::{
binding_types::{sampler, texture_2d, texture_2d_multisampled, texture_storage_2d},
BindGroup, BindGroupEntries, BindGroupLayout, BindGroupLayoutEntries,
CachedComputePipelineId, ComputePassDescriptor, ComputePipeline, ComputePipelineDescriptor,
Extent3d, IntoBinding, PipelineCache, PushConstantRange, Sampler, SamplerBindingType,
SamplerDescriptor, ShaderStages, SpecializedComputePipeline, SpecializedComputePipelines,
StorageTextureAccess, TextureAspect, TextureDescriptor, TextureDimension, TextureFormat,
TextureSampleType, TextureUsages, TextureView, TextureViewDescriptor, TextureViewDimension,
},
renderer::{RenderContext, RenderDevice},
texture::TextureCache,
view::{ExtractedView, NoIndirectDrawing, ViewDepthTexture},
};
use bevy_shader::Shader;
use bevy_utils::default;
use bitflags::bitflags;
use tracing::debug;
/// The maximum number of mip levels that we can produce.
///
/// 2^12 is 4096, so that's the maximum size of the depth buffer that we
/// support.
pub const DEPTH_PYRAMID_MIP_COUNT: usize = 12;
/// The nodes that produce a hierarchical Z-buffer, also known as a depth
/// pyramid.
///
/// This runs the single-pass downsampling (SPD) shader with the *min* filter in
/// order to generate a series of mipmaps for the Z buffer. The resulting
/// hierarchical Z-buffer can be used for occlusion culling.
///
/// There are two instances of this node. The *early* downsample depth pass is
/// the first hierarchical Z-buffer stage, which runs after the early prepass
/// and before the late prepass. It prepares the Z-buffer for the bounding box
/// tests that the late mesh preprocessing stage will perform. The *late*
/// downsample depth pass runs at the end of the main phase. It prepares the
/// Z-buffer for the occlusion culling that the early mesh preprocessing phase
/// of the *next* frame will perform.
///
/// This node won't do anything if occlusion culling isn't on.
pub struct DownsampleDepthNode {
/// The query that we use to find views that need occlusion culling for
/// their Z-buffer.
main_view_query: QueryState<(
Read<ViewDepthPyramid>,
Read<ViewDownsampleDepthBindGroup>,
Read<ViewDepthTexture>,
Option<Read<OcclusionCullingSubviewEntities>>,
)>,
/// The query that we use to find shadow maps that need occlusion culling.
shadow_view_query: QueryState<(
Read<ViewDepthPyramid>,
Read<ViewDownsampleDepthBindGroup>,
Read<OcclusionCullingSubview>,
)>,
}
impl FromWorld for DownsampleDepthNode {
fn from_world(world: &mut World) -> Self {
Self {
main_view_query: QueryState::new(world),
shadow_view_query: QueryState::new(world),
}
}
}
impl Node for DownsampleDepthNode {
fn update(&mut self, world: &mut World) {
self.main_view_query.update_archetypes(world);
self.shadow_view_query.update_archetypes(world);
}
fn run<'w>(
&self,
render_graph_context: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
world: &'w World,
) -> Result<(), NodeRunError> {
let Ok((
view_depth_pyramid,
view_downsample_depth_bind_group,
view_depth_texture,
maybe_view_light_entities,
)) = self
.main_view_query
.get_manual(world, render_graph_context.view_entity())
else {
return Ok(());
};
// Downsample depth for the main Z-buffer.
downsample_depth(
render_graph_context,
render_context,
world,
view_depth_pyramid,
view_downsample_depth_bind_group,
uvec2(
view_depth_texture.texture.width(),
view_depth_texture.texture.height(),
),
view_depth_texture.texture.sample_count(),
)?;
// Downsample depth for shadow maps that have occlusion culling enabled.
if let Some(view_light_entities) = maybe_view_light_entities {
for &view_light_entity in &view_light_entities.0 {
let Ok((view_depth_pyramid, view_downsample_depth_bind_group, occlusion_culling)) =
self.shadow_view_query.get_manual(world, view_light_entity)
else {
continue;
};
downsample_depth(
render_graph_context,
render_context,
world,
view_depth_pyramid,
view_downsample_depth_bind_group,
UVec2::splat(occlusion_culling.depth_texture_size),
1,
)?;
}
}
Ok(())
}
}
/// Produces a depth pyramid from the current depth buffer for a single view.
/// The resulting depth pyramid can be used for occlusion testing.
fn downsample_depth<'w>(
render_graph_context: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
world: &'w World,
view_depth_pyramid: &ViewDepthPyramid,
view_downsample_depth_bind_group: &ViewDownsampleDepthBindGroup,
view_size: UVec2,
sample_count: u32,
) -> Result<(), NodeRunError> {
let downsample_depth_pipelines = world.resource::<DownsampleDepthPipelines>();
let pipeline_cache = world.resource::<PipelineCache>();
// Despite the name "single-pass downsampling", we actually need two
// passes because of the lack of `coherent` buffers in WGPU/WGSL.
// Between each pass, there's an implicit synchronization barrier.
// Fetch the appropriate pipeline ID, depending on whether the depth
// buffer is multisampled or not.
let (Some(first_downsample_depth_pipeline_id), Some(second_downsample_depth_pipeline_id)) =
(if sample_count > 1 {
(
downsample_depth_pipelines.first_multisample.pipeline_id,
downsample_depth_pipelines.second_multisample.pipeline_id,
)
} else {
(
downsample_depth_pipelines.first.pipeline_id,
downsample_depth_pipelines.second.pipeline_id,
)
})
else {
return Ok(());
};
// Fetch the pipelines for the two passes.
let (Some(first_downsample_depth_pipeline), Some(second_downsample_depth_pipeline)) = (
pipeline_cache.get_compute_pipeline(first_downsample_depth_pipeline_id),
pipeline_cache.get_compute_pipeline(second_downsample_depth_pipeline_id),
) else {
return Ok(());
};
// Run the depth downsampling.
view_depth_pyramid.downsample_depth(
&format!("{:?}", render_graph_context.label()),
render_context,
view_size,
view_downsample_depth_bind_group,
first_downsample_depth_pipeline,
second_downsample_depth_pipeline,
);
Ok(())
}
/// A single depth downsample pipeline.
#[derive(Resource)]
pub struct DownsampleDepthPipeline {
/// The bind group layout for this pipeline.
bind_group_layout: BindGroupLayoutDescriptor,
/// A handle that identifies the compiled shader.
pipeline_id: Option<CachedComputePipelineId>,
/// The shader asset handle.
shader: Handle<Shader>,
}
impl DownsampleDepthPipeline {
/// Creates a new [`DownsampleDepthPipeline`] from a bind group layout and the downsample
/// shader.
///
/// This doesn't actually specialize the pipeline; that must be done
/// afterward.
fn new(
bind_group_layout: BindGroupLayoutDescriptor,
shader: Handle<Shader>,
) -> DownsampleDepthPipeline {
DownsampleDepthPipeline {
bind_group_layout,
pipeline_id: None,
shader,
}
}
}
/// Stores all depth buffer downsampling pipelines.
#[derive(Resource)]
pub struct DownsampleDepthPipelines {
/// The first pass of the pipeline, when the depth buffer is *not*
/// multisampled.
first: DownsampleDepthPipeline,
/// The second pass of the pipeline, when the depth buffer is *not*
/// multisampled.
second: DownsampleDepthPipeline,
/// The first pass of the pipeline, when the depth buffer is multisampled.
first_multisample: DownsampleDepthPipeline,
/// The second pass of the pipeline, when the depth buffer is multisampled.
second_multisample: DownsampleDepthPipeline,
/// The sampler that the depth downsampling shader uses to sample the depth
/// buffer.
sampler: Sampler,
}
/// Creates the [`DownsampleDepthPipelines`] if downsampling is supported on the
/// current platform.
pub(crate) fn create_downsample_depth_pipelines(
mut commands: Commands,
render_device: Res<RenderDevice>,
pipeline_cache: Res<PipelineCache>,
mut specialized_compute_pipelines: ResMut<SpecializedComputePipelines<DownsampleDepthPipeline>>,
gpu_preprocessing_support: Res<GpuPreprocessingSupport>,
downsample_depth_shader: Res<DownsampleShaders>,
mut has_run: Local<bool>,
) {
// Only run once.
// We can't use a `resource_exists` or similar run condition here because
// this function might fail to create downsample depth pipelines if the
// current platform doesn't support compute shaders.
if *has_run {
return;
}
*has_run = true;
if !gpu_preprocessing_support.is_culling_supported() {
debug!("Downsample depth is not supported on this platform.");
return;
}
// Create the bind group layouts. The bind group layouts are identical
// between the first and second passes, so the only thing we need to
// treat specially is the type of the first mip level (non-multisampled
// or multisampled).
let standard_bind_group_layout = create_downsample_depth_bind_group_layout(false);
let multisampled_bind_group_layout = create_downsample_depth_bind_group_layout(true);
// Create the depth pyramid sampler. This is shared among all shaders.
let sampler = render_device.create_sampler(&SamplerDescriptor {
label: Some("depth pyramid sampler"),
..SamplerDescriptor::default()
});
// Initialize the pipelines.
let mut downsample_depth_pipelines = DownsampleDepthPipelines {
first: DownsampleDepthPipeline::new(
standard_bind_group_layout.clone(),
downsample_depth_shader.depth.clone(),
),
second: DownsampleDepthPipeline::new(
standard_bind_group_layout.clone(),
downsample_depth_shader.depth.clone(),
),
first_multisample: DownsampleDepthPipeline::new(
multisampled_bind_group_layout.clone(),
downsample_depth_shader.depth.clone(),
),
second_multisample: DownsampleDepthPipeline::new(
multisampled_bind_group_layout.clone(),
downsample_depth_shader.depth.clone(),
),
sampler,
};
// Specialize each pipeline with the appropriate
// `DownsampleDepthPipelineKey`.
downsample_depth_pipelines.first.pipeline_id = Some(specialized_compute_pipelines.specialize(
&pipeline_cache,
&downsample_depth_pipelines.first,
DownsampleDepthPipelineKey::empty(),
));
downsample_depth_pipelines.second.pipeline_id = Some(specialized_compute_pipelines.specialize(
&pipeline_cache,
&downsample_depth_pipelines.second,
DownsampleDepthPipelineKey::SECOND_PHASE,
));
downsample_depth_pipelines.first_multisample.pipeline_id =
Some(specialized_compute_pipelines.specialize(
&pipeline_cache,
&downsample_depth_pipelines.first_multisample,
DownsampleDepthPipelineKey::MULTISAMPLE,
));
downsample_depth_pipelines.second_multisample.pipeline_id =
Some(specialized_compute_pipelines.specialize(
&pipeline_cache,
&downsample_depth_pipelines.second_multisample,
DownsampleDepthPipelineKey::SECOND_PHASE | DownsampleDepthPipelineKey::MULTISAMPLE,
));
commands.insert_resource(downsample_depth_pipelines);
}
/// Creates a single bind group layout for the downsample depth pass.
fn create_downsample_depth_bind_group_layout(is_multisampled: bool) -> BindGroupLayoutDescriptor {
BindGroupLayoutDescriptor::new(
if is_multisampled {
"downsample multisample depth bind group layout"
} else {
"downsample depth bind group layout"
},
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
// We only care about the multisample status of the depth buffer
// for the first mip level. After the first mip level is
// sampled, we drop to a single sample.
if is_multisampled {
texture_2d_multisampled(TextureSampleType::Depth)
} else {
texture_2d(TextureSampleType::Depth)
},
// All the mip levels follow:
texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly),
texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly),
texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly),
texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly),
texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly),
texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::ReadWrite),
texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly),
texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly),
texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly),
texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly),
texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly),
texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly),
sampler(SamplerBindingType::NonFiltering),
),
),
)
}
bitflags! {
/// Uniquely identifies a configuration of the downsample depth shader.
///
/// Note that meshlets maintain their downsample depth shaders on their own
/// and don't use this infrastructure; thus there's no flag for meshlets in
/// here, even though the shader has defines for it.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct DownsampleDepthPipelineKey: u8 {
/// True if the depth buffer is multisampled.
const MULTISAMPLE = 1;
/// True if this shader is the second phase of the downsample depth
/// process; false if this shader is the first phase.
const SECOND_PHASE = 2;
}
}
impl SpecializedComputePipeline for DownsampleDepthPipeline {
type Key = DownsampleDepthPipelineKey;
fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor {
let mut shader_defs = vec![];
if key.contains(DownsampleDepthPipelineKey::MULTISAMPLE) {
shader_defs.push("MULTISAMPLE".into());
}
let label = format!(
"downsample depth{}{} pipeline",
if key.contains(DownsampleDepthPipelineKey::MULTISAMPLE) {
" multisample"
} else {
""
},
if key.contains(DownsampleDepthPipelineKey::SECOND_PHASE) {
" second phase"
} else {
" first phase"
}
)
.into();
ComputePipelineDescriptor {
label: Some(label),
layout: vec![self.bind_group_layout.clone()],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::COMPUTE,
range: 0..4,
}],
shader: self.shader.clone(),
shader_defs,
entry_point: Some(if key.contains(DownsampleDepthPipelineKey::SECOND_PHASE) {
"downsample_depth_second".into()
} else {
"downsample_depth_first".into()
}),
..default()
}
}
}
/// Stores a placeholder texture that can be bound to a depth pyramid binding if
/// no depth pyramid is needed.
#[derive(Resource, Deref, DerefMut)]
pub struct DepthPyramidDummyTexture(TextureView);
pub fn init_depth_pyramid_dummy_texture(mut commands: Commands, render_device: Res<RenderDevice>) {
commands.insert_resource(DepthPyramidDummyTexture(
create_depth_pyramid_dummy_texture(
&render_device,
"depth pyramid dummy texture",
"depth pyramid dummy texture view",
),
));
}
/// Creates a placeholder texture that can be bound to a depth pyramid binding
/// if no depth pyramid is needed.
pub fn create_depth_pyramid_dummy_texture(
render_device: &RenderDevice,
texture_label: &'static str,
texture_view_label: &'static str,
) -> TextureView {
render_device
.create_texture(&TextureDescriptor {
label: Some(texture_label),
size: Extent3d::default(),
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: TextureFormat::R32Float,
usage: TextureUsages::STORAGE_BINDING,
view_formats: &[],
})
.create_view(&TextureViewDescriptor {
label: Some(texture_view_label),
format: Some(TextureFormat::R32Float),
dimension: Some(TextureViewDimension::D2),
usage: None,
aspect: TextureAspect::All,
base_mip_level: 0,
mip_level_count: Some(1),
base_array_layer: 0,
array_layer_count: Some(1),
})
}
/// Stores a hierarchical Z-buffer for a view, which is a series of mipmaps
/// useful for efficient occlusion culling.
///
/// This will only be present on a view when occlusion culling is enabled.
#[derive(Component)]
pub struct ViewDepthPyramid {
/// A texture view containing the entire depth texture.
pub all_mips: TextureView,
/// A series of texture views containing one mip level each.
pub mips: [TextureView; DEPTH_PYRAMID_MIP_COUNT],
/// The total number of mipmap levels.
///
/// This is the base-2 logarithm of the greatest dimension of the depth
/// buffer, rounded up.
pub mip_count: u32,
}
impl ViewDepthPyramid {
/// Allocates a new depth pyramid for a depth buffer with the given size.
pub fn new(
render_device: &RenderDevice,
texture_cache: &mut TextureCache,
depth_pyramid_dummy_texture: &TextureView,
size: UVec2,
texture_label: &'static str,
texture_view_label: &'static str,
) -> ViewDepthPyramid {
// Calculate the size of the depth pyramid.
let depth_pyramid_size = Extent3d {
width: size.x.div_ceil(2),
height: size.y.div_ceil(2),
depth_or_array_layers: 1,
};
// Calculate the number of mip levels we need.
let depth_pyramid_mip_count = depth_pyramid_size.max_mips(TextureDimension::D2);
// Create the depth pyramid.
let depth_pyramid = texture_cache.get(
render_device,
TextureDescriptor {
label: Some(texture_label),
size: depth_pyramid_size,
mip_level_count: depth_pyramid_mip_count,
sample_count: 1,
dimension: TextureDimension::D2,
format: TextureFormat::R32Float,
usage: TextureUsages::STORAGE_BINDING | TextureUsages::TEXTURE_BINDING,
view_formats: &[],
},
);
// Create individual views for each level of the depth pyramid.
let depth_pyramid_mips = array::from_fn(|i| {
if (i as u32) < depth_pyramid_mip_count {
depth_pyramid.texture.create_view(&TextureViewDescriptor {
label: Some(texture_view_label),
format: Some(TextureFormat::R32Float),
dimension: Some(TextureViewDimension::D2),
usage: None,
aspect: TextureAspect::All,
base_mip_level: i as u32,
mip_level_count: Some(1),
base_array_layer: 0,
array_layer_count: Some(1),
})
} else {
(*depth_pyramid_dummy_texture).clone()
}
});
// Create the view for the depth pyramid as a whole.
let depth_pyramid_all_mips = depth_pyramid.default_view.clone();
Self {
all_mips: depth_pyramid_all_mips,
mips: depth_pyramid_mips,
mip_count: depth_pyramid_mip_count,
}
}
/// Creates a bind group that allows the depth buffer to be attached to the
/// `downsample_depth.wgsl` shader.
pub fn create_bind_group<'a, R>(
&'a self,
render_device: &RenderDevice,
label: &'static str,
bind_group_layout: &BindGroupLayout,
source_image: R,
sampler: &'a Sampler,
) -> BindGroup
where
R: IntoBinding<'a>,
{
render_device.create_bind_group(
label,
bind_group_layout,
&BindGroupEntries::sequential((
source_image,
&self.mips[0],
&self.mips[1],
&self.mips[2],
&self.mips[3],
&self.mips[4],
&self.mips[5],
&self.mips[6],
&self.mips[7],
&self.mips[8],
&self.mips[9],
&self.mips[10],
&self.mips[11],
sampler,
)),
)
}
/// Invokes the shaders to generate the hierarchical Z-buffer.
///
/// This is intended to be invoked as part of a render node.
pub fn downsample_depth(
&self,
label: &str,
render_context: &mut RenderContext,
view_size: UVec2,
downsample_depth_bind_group: &BindGroup,
downsample_depth_first_pipeline: &ComputePipeline,
downsample_depth_second_pipeline: &ComputePipeline,
) {
let command_encoder = render_context.command_encoder();
let mut downsample_pass = command_encoder.begin_compute_pass(&ComputePassDescriptor {
label: Some(label),
timestamp_writes: None,
});
downsample_pass.set_pipeline(downsample_depth_first_pipeline);
// Pass the mip count as a push constant, for simplicity.
downsample_pass.set_push_constants(0, &self.mip_count.to_le_bytes());
downsample_pass.set_bind_group(0, downsample_depth_bind_group, &[]);
downsample_pass.dispatch_workgroups(view_size.x.div_ceil(64), view_size.y.div_ceil(64), 1);
if self.mip_count >= 7 {
downsample_pass.set_pipeline(downsample_depth_second_pipeline);
downsample_pass.dispatch_workgroups(1, 1, 1);
}
}
}
/// Creates depth pyramids for views that have occlusion culling enabled.
pub fn prepare_view_depth_pyramids(
mut commands: Commands,
render_device: Res<RenderDevice>,
mut texture_cache: ResMut<TextureCache>,
depth_pyramid_dummy_texture: Res<DepthPyramidDummyTexture>,
views: Query<(Entity, &ExtractedView), (With<OcclusionCulling>, Without<NoIndirectDrawing>)>,
) {
for (view_entity, view) in &views {
commands.entity(view_entity).insert(ViewDepthPyramid::new(
&render_device,
&mut texture_cache,
&depth_pyramid_dummy_texture,
view.viewport.zw(),
"view depth pyramid texture",
"view depth pyramid texture view",
));
}
}
/// The bind group that we use to attach the depth buffer and depth pyramid for
/// a view to the `downsample_depth.wgsl` shader.
///
/// This will only be present for a view if occlusion culling is enabled.
#[derive(Component, Deref, DerefMut)]
pub struct ViewDownsampleDepthBindGroup(BindGroup);
/// Creates the [`ViewDownsampleDepthBindGroup`]s for all views with occlusion
/// culling enabled.
pub(crate) fn prepare_downsample_depth_view_bind_groups(
mut commands: Commands,
render_device: Res<RenderDevice>,
downsample_depth_pipelines: Res<DownsampleDepthPipelines>,
pipeline_cache: Res<PipelineCache>,
view_depth_textures: Query<
(
Entity,
&ViewDepthPyramid,
Option<&ViewDepthTexture>,
Option<&OcclusionCullingSubview>,
),
Or<(With<ViewDepthTexture>, With<OcclusionCullingSubview>)>,
>,
) {
for (view_entity, view_depth_pyramid, view_depth_texture, shadow_occlusion_culling) in
&view_depth_textures
{
let is_multisampled = view_depth_texture
.is_some_and(|view_depth_texture| view_depth_texture.texture.sample_count() > 1);
commands
.entity(view_entity)
.insert(ViewDownsampleDepthBindGroup(
view_depth_pyramid.create_bind_group(
&render_device,
if is_multisampled {
"downsample multisample depth bind group"
} else {
"downsample depth bind group"
},
&pipeline_cache.get_bind_group_layout(if is_multisampled {
&downsample_depth_pipelines
.first_multisample
.bind_group_layout
} else {
&downsample_depth_pipelines.first.bind_group_layout
}),
match (view_depth_texture, shadow_occlusion_culling) {
(Some(view_depth_texture), _) => view_depth_texture.view(),
(None, Some(shadow_occlusion_culling)) => {
&shadow_occlusion_culling.depth_texture_view
}
(None, None) => panic!("Should never happen"),
},
&downsample_depth_pipelines.sampler,
),
));
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/mip_generation/experimental/mod.rs | crates/bevy_core_pipeline/src/mip_generation/experimental/mod.rs | //! Experimental functionality related to mipmap generation.
//!
//! Currently, this consists only of hierarchical Z buffer generation.
pub mod depth;
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/experimental/mod.rs | crates/bevy_core_pipeline/src/experimental/mod.rs | //! Experimental rendering features.
//!
//! Experimental features are features with known problems, missing features,
//! compatibility issues, low performance, and/or future breaking changes, but
//! are included nonetheless for testing purposes.
pub mod depth;
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/blit/mod.rs | crates/bevy_core_pipeline/src/blit/mod.rs | use crate::FullscreenShader;
use bevy_app::{App, Plugin};
use bevy_asset::{embedded_asset, load_embedded_asset, AssetServer, Handle};
use bevy_ecs::prelude::*;
use bevy_render::{
render_resource::{
binding_types::{sampler, texture_2d},
*,
},
renderer::RenderDevice,
RenderApp, RenderStartup,
};
use bevy_shader::Shader;
use bevy_utils::default;
/// Adds support for specialized "blit pipelines", which can be used to write one texture to another.
pub struct BlitPlugin;
impl Plugin for BlitPlugin {
fn build(&self, app: &mut App) {
embedded_asset!(app, "blit.wgsl");
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.allow_ambiguous_resource::<SpecializedRenderPipelines<BlitPipeline>>()
.init_resource::<SpecializedRenderPipelines<BlitPipeline>>()
.add_systems(RenderStartup, init_blit_pipeline);
}
}
#[derive(Resource)]
pub struct BlitPipeline {
pub layout: BindGroupLayoutDescriptor,
pub sampler: Sampler,
pub fullscreen_shader: FullscreenShader,
pub fragment_shader: Handle<Shader>,
}
pub fn init_blit_pipeline(
mut commands: Commands,
render_device: Res<RenderDevice>,
fullscreen_shader: Res<FullscreenShader>,
asset_server: Res<AssetServer>,
) {
let layout = BindGroupLayoutDescriptor::new(
"blit_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::FRAGMENT,
(
texture_2d(TextureSampleType::Float { filterable: false }),
sampler(SamplerBindingType::NonFiltering),
),
),
);
let sampler = render_device.create_sampler(&SamplerDescriptor::default());
commands.insert_resource(BlitPipeline {
layout,
sampler,
fullscreen_shader: fullscreen_shader.clone(),
fragment_shader: load_embedded_asset!(asset_server.as_ref(), "blit.wgsl"),
});
}
impl BlitPipeline {
pub fn create_bind_group(
&self,
render_device: &RenderDevice,
src_texture: &TextureView,
pipeline_cache: &PipelineCache,
) -> BindGroup {
render_device.create_bind_group(
None,
&pipeline_cache.get_bind_group_layout(&self.layout),
&BindGroupEntries::sequential((src_texture, &self.sampler)),
)
}
}
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
pub struct BlitPipelineKey {
pub texture_format: TextureFormat,
pub blend_state: Option<BlendState>,
pub samples: u32,
}
impl SpecializedRenderPipeline for BlitPipeline {
type Key = BlitPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
RenderPipelineDescriptor {
label: Some("blit pipeline".into()),
layout: vec![self.layout.clone()],
vertex: self.fullscreen_shader.to_vertex_state(),
fragment: Some(FragmentState {
shader: self.fragment_shader.clone(),
targets: vec![Some(ColorTargetState {
format: key.texture_format,
blend: key.blend_state,
write_mask: ColorWrites::ALL,
})],
..default()
}),
multisample: MultisampleState {
count: key.samples,
..default()
},
..default()
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/core_2d/main_transparent_pass_2d_node.rs | crates/bevy_core_pipeline/src/core_2d/main_transparent_pass_2d_node.rs | use crate::core_2d::Transparent2d;
use bevy_ecs::prelude::*;
use bevy_render::{
camera::ExtractedCamera,
diagnostic::RecordDiagnostics,
render_graph::{NodeRunError, RenderGraphContext, ViewNode},
render_phase::{TrackedRenderPass, ViewSortedRenderPhases},
render_resource::{CommandEncoderDescriptor, RenderPassDescriptor, StoreOp},
renderer::RenderContext,
view::{ExtractedView, ViewDepthTexture, ViewTarget},
};
use tracing::error;
#[cfg(feature = "trace")]
use tracing::info_span;
#[derive(Default)]
pub struct MainTransparentPass2dNode {}
impl ViewNode for MainTransparentPass2dNode {
type ViewQuery = (
&'static ExtractedCamera,
&'static ExtractedView,
&'static ViewTarget,
&'static ViewDepthTexture,
);
fn run<'w>(
&self,
graph: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
(camera, view, target, depth): bevy_ecs::query::QueryItem<'w, '_, Self::ViewQuery>,
world: &'w World,
) -> Result<(), NodeRunError> {
let Some(transparent_phases) =
world.get_resource::<ViewSortedRenderPhases<Transparent2d>>()
else {
return Ok(());
};
let view_entity = graph.view_entity();
let Some(transparent_phase) = transparent_phases.get(&view.retained_view_entity) else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
let color_attachments = [Some(target.get_color_attachment())];
// NOTE: For the transparent pass we load the depth buffer. There should be no
// need to write to it, but store is set to `true` as a workaround for issue #3776,
// https://github.com/bevyengine/bevy/issues/3776
// so that wgpu does not clear the depth buffer.
// As the opaque and alpha mask passes run first, opaque meshes can occlude
// transparent ones.
let depth_stencil_attachment = Some(depth.get_attachment(StoreOp::Store));
render_context.add_command_buffer_generation_task(move |render_device| {
// Command encoder setup
let mut command_encoder =
render_device.create_command_encoder(&CommandEncoderDescriptor {
label: Some("main_transparent_pass_2d_command_encoder"),
});
// This needs to run at least once to clear the background color, even if there are no items to render
{
#[cfg(feature = "trace")]
let _main_pass_2d = info_span!("main_transparent_pass_2d").entered();
let render_pass = command_encoder.begin_render_pass(&RenderPassDescriptor {
label: Some("main_transparent_pass_2d"),
color_attachments: &color_attachments,
depth_stencil_attachment,
timestamp_writes: None,
occlusion_query_set: None,
});
let mut render_pass = TrackedRenderPass::new(&render_device, render_pass);
let pass_span = diagnostics.pass_span(&mut render_pass, "main_transparent_pass_2d");
if let Some(viewport) = camera.viewport.as_ref() {
render_pass.set_camera_viewport(viewport);
}
if !transparent_phase.items.is_empty() {
#[cfg(feature = "trace")]
let _transparent_main_pass_2d_span =
info_span!("transparent_main_pass_2d").entered();
if let Err(err) = transparent_phase.render(&mut render_pass, world, view_entity)
{
error!(
"Error encountered while rendering the transparent 2D phase {err:?}"
);
}
}
pass_span.end(&mut render_pass);
}
// WebGL2 quirk: if ending with a render pass with a custom viewport, the viewport isn't
// reset for the next render pass so add an empty render pass without a custom viewport
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
if camera.viewport.is_some() {
#[cfg(feature = "trace")]
let _reset_viewport_pass_2d = info_span!("reset_viewport_pass_2d").entered();
let pass_descriptor = RenderPassDescriptor {
label: Some("reset_viewport_pass_2d"),
color_attachments: &[Some(target.get_color_attachment())],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
};
command_encoder.begin_render_pass(&pass_descriptor);
}
command_encoder.finish()
});
Ok(())
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/core_2d/mod.rs | crates/bevy_core_pipeline/src/core_2d/mod.rs | mod main_opaque_pass_2d_node;
mod main_transparent_pass_2d_node;
pub mod graph {
use bevy_render::render_graph::{RenderLabel, RenderSubGraph};
#[derive(Debug, Hash, PartialEq, Eq, Clone, RenderSubGraph)]
pub struct Core2d;
pub mod input {
pub const VIEW_ENTITY: &str = "view_entity";
}
#[derive(Debug, Hash, PartialEq, Eq, Clone, RenderLabel)]
pub enum Node2d {
MsaaWriteback,
StartMainPass,
MainOpaquePass,
MainTransparentPass,
EndMainPass,
Wireframe,
StartMainPassPostProcessing,
Bloom,
PostProcessing,
Tonemapping,
Fxaa,
Smaa,
Upscaling,
ContrastAdaptiveSharpening,
EndMainPassPostProcessing,
}
}
use core::ops::Range;
use bevy_asset::UntypedAssetId;
use bevy_camera::{Camera, Camera2d};
use bevy_image::ToExtents;
use bevy_platform::collections::{HashMap, HashSet};
use bevy_render::{
batching::gpu_preprocessing::GpuPreprocessingMode,
camera::CameraRenderGraph,
render_phase::PhaseItemBatchSetKey,
view::{ExtractedView, RetainedViewEntity},
};
pub use main_opaque_pass_2d_node::*;
pub use main_transparent_pass_2d_node::*;
use crate::{
tonemapping::{DebandDither, Tonemapping, TonemappingNode},
upscaling::UpscalingNode,
};
use bevy_app::{App, Plugin};
use bevy_ecs::prelude::*;
use bevy_math::FloatOrd;
use bevy_render::{
camera::ExtractedCamera,
extract_component::ExtractComponentPlugin,
render_graph::{EmptyNode, RenderGraphExt, ViewNodeRunner},
render_phase::{
sort_phase_system, BinnedPhaseItem, CachedRenderPipelinePhaseItem, DrawFunctionId,
DrawFunctions, PhaseItem, PhaseItemExtraIndex, SortedPhaseItem, ViewBinnedRenderPhases,
ViewSortedRenderPhases,
},
render_resource::{
BindGroupId, CachedRenderPipelineId, TextureDescriptor, TextureDimension, TextureFormat,
TextureUsages,
},
renderer::RenderDevice,
sync_world::MainEntity,
texture::TextureCache,
view::{Msaa, ViewDepthTexture},
Extract, ExtractSchedule, Render, RenderApp, RenderSystems,
};
use self::graph::{Core2d, Node2d};
pub const CORE_2D_DEPTH_FORMAT: TextureFormat = TextureFormat::Depth32Float;
pub struct Core2dPlugin;
impl Plugin for Core2dPlugin {
fn build(&self, app: &mut App) {
app.register_required_components::<Camera2d, DebandDither>()
.register_required_components_with::<Camera2d, CameraRenderGraph>(|| {
CameraRenderGraph::new(Core2d)
})
.register_required_components_with::<Camera2d, Tonemapping>(|| Tonemapping::None)
.add_plugins(ExtractComponentPlugin::<Camera2d>::default());
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.init_resource::<DrawFunctions<Opaque2d>>()
.init_resource::<DrawFunctions<AlphaMask2d>>()
.init_resource::<DrawFunctions<Transparent2d>>()
.init_resource::<ViewSortedRenderPhases<Transparent2d>>()
.init_resource::<ViewBinnedRenderPhases<Opaque2d>>()
.init_resource::<ViewBinnedRenderPhases<AlphaMask2d>>()
.add_systems(ExtractSchedule, extract_core_2d_camera_phases)
.add_systems(
Render,
(
sort_phase_system::<Transparent2d>.in_set(RenderSystems::PhaseSort),
prepare_core_2d_depth_textures.in_set(RenderSystems::PrepareResources),
),
);
render_app
.add_render_sub_graph(Core2d)
.add_render_graph_node::<EmptyNode>(Core2d, Node2d::StartMainPass)
.add_render_graph_node::<ViewNodeRunner<MainOpaquePass2dNode>>(
Core2d,
Node2d::MainOpaquePass,
)
.add_render_graph_node::<ViewNodeRunner<MainTransparentPass2dNode>>(
Core2d,
Node2d::MainTransparentPass,
)
.add_render_graph_node::<EmptyNode>(Core2d, Node2d::EndMainPass)
.add_render_graph_node::<EmptyNode>(Core2d, Node2d::StartMainPassPostProcessing)
.add_render_graph_node::<ViewNodeRunner<TonemappingNode>>(Core2d, Node2d::Tonemapping)
.add_render_graph_node::<EmptyNode>(Core2d, Node2d::EndMainPassPostProcessing)
.add_render_graph_node::<ViewNodeRunner<UpscalingNode>>(Core2d, Node2d::Upscaling)
.add_render_graph_edges(
Core2d,
(
Node2d::StartMainPass,
Node2d::MainOpaquePass,
Node2d::MainTransparentPass,
Node2d::EndMainPass,
Node2d::StartMainPassPostProcessing,
Node2d::Tonemapping,
Node2d::EndMainPassPostProcessing,
Node2d::Upscaling,
),
);
}
}
/// Opaque 2D [`BinnedPhaseItem`]s.
pub struct Opaque2d {
/// Determines which objects can be placed into a *batch set*.
///
/// Objects in a single batch set can potentially be multi-drawn together,
/// if it's enabled and the current platform supports it.
pub batch_set_key: BatchSetKey2d,
/// The key, which determines which can be batched.
pub bin_key: Opaque2dBinKey,
/// An entity from which data will be fetched, including the mesh if
/// applicable.
pub representative_entity: (Entity, MainEntity),
/// The ranges of instances.
pub batch_range: Range<u32>,
/// An extra index, which is either a dynamic offset or an index in the
/// indirect parameters list.
pub extra_index: PhaseItemExtraIndex,
}
/// Data that must be identical in order to batch phase items together.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Opaque2dBinKey {
/// The identifier of the render pipeline.
pub pipeline: CachedRenderPipelineId,
/// The function used to draw.
pub draw_function: DrawFunctionId,
/// The asset that this phase item is associated with.
///
/// Normally, this is the ID of the mesh, but for non-mesh items it might be
/// the ID of another type of asset.
pub asset_id: UntypedAssetId,
/// The ID of a bind group specific to the material.
pub material_bind_group_id: Option<BindGroupId>,
}
impl PhaseItem for Opaque2d {
#[inline]
fn entity(&self) -> Entity {
self.representative_entity.0
}
fn main_entity(&self) -> MainEntity {
self.representative_entity.1
}
#[inline]
fn draw_function(&self) -> DrawFunctionId {
self.bin_key.draw_function
}
#[inline]
fn batch_range(&self) -> &Range<u32> {
&self.batch_range
}
#[inline]
fn batch_range_mut(&mut self) -> &mut Range<u32> {
&mut self.batch_range
}
fn extra_index(&self) -> PhaseItemExtraIndex {
self.extra_index.clone()
}
fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range<u32>, &mut PhaseItemExtraIndex) {
(&mut self.batch_range, &mut self.extra_index)
}
}
impl BinnedPhaseItem for Opaque2d {
// Since 2D meshes presently can't be multidrawn, the batch set key is
// irrelevant.
type BatchSetKey = BatchSetKey2d;
type BinKey = Opaque2dBinKey;
fn new(
batch_set_key: Self::BatchSetKey,
bin_key: Self::BinKey,
representative_entity: (Entity, MainEntity),
batch_range: Range<u32>,
extra_index: PhaseItemExtraIndex,
) -> Self {
Opaque2d {
batch_set_key,
bin_key,
representative_entity,
batch_range,
extra_index,
}
}
}
/// 2D meshes aren't currently multi-drawn together, so this batch set key only
/// stores whether the mesh is indexed.
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct BatchSetKey2d {
/// True if the mesh is indexed.
pub indexed: bool,
}
impl PhaseItemBatchSetKey for BatchSetKey2d {
fn indexed(&self) -> bool {
self.indexed
}
}
impl CachedRenderPipelinePhaseItem for Opaque2d {
#[inline]
fn cached_pipeline(&self) -> CachedRenderPipelineId {
self.bin_key.pipeline
}
}
/// Alpha mask 2D [`BinnedPhaseItem`]s.
pub struct AlphaMask2d {
/// Determines which objects can be placed into a *batch set*.
///
/// Objects in a single batch set can potentially be multi-drawn together,
/// if it's enabled and the current platform supports it.
pub batch_set_key: BatchSetKey2d,
/// The key, which determines which can be batched.
pub bin_key: AlphaMask2dBinKey,
/// An entity from which data will be fetched, including the mesh if
/// applicable.
pub representative_entity: (Entity, MainEntity),
/// The ranges of instances.
pub batch_range: Range<u32>,
/// An extra index, which is either a dynamic offset or an index in the
/// indirect parameters list.
pub extra_index: PhaseItemExtraIndex,
}
/// Data that must be identical in order to batch phase items together.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct AlphaMask2dBinKey {
/// The identifier of the render pipeline.
pub pipeline: CachedRenderPipelineId,
/// The function used to draw.
pub draw_function: DrawFunctionId,
/// The asset that this phase item is associated with.
///
/// Normally, this is the ID of the mesh, but for non-mesh items it might be
/// the ID of another type of asset.
pub asset_id: UntypedAssetId,
/// The ID of a bind group specific to the material.
pub material_bind_group_id: Option<BindGroupId>,
}
impl PhaseItem for AlphaMask2d {
#[inline]
fn entity(&self) -> Entity {
self.representative_entity.0
}
#[inline]
fn main_entity(&self) -> MainEntity {
self.representative_entity.1
}
#[inline]
fn draw_function(&self) -> DrawFunctionId {
self.bin_key.draw_function
}
#[inline]
fn batch_range(&self) -> &Range<u32> {
&self.batch_range
}
#[inline]
fn batch_range_mut(&mut self) -> &mut Range<u32> {
&mut self.batch_range
}
fn extra_index(&self) -> PhaseItemExtraIndex {
self.extra_index.clone()
}
fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range<u32>, &mut PhaseItemExtraIndex) {
(&mut self.batch_range, &mut self.extra_index)
}
}
impl BinnedPhaseItem for AlphaMask2d {
// Since 2D meshes presently can't be multidrawn, the batch set key is
// irrelevant.
type BatchSetKey = BatchSetKey2d;
type BinKey = AlphaMask2dBinKey;
fn new(
batch_set_key: Self::BatchSetKey,
bin_key: Self::BinKey,
representative_entity: (Entity, MainEntity),
batch_range: Range<u32>,
extra_index: PhaseItemExtraIndex,
) -> Self {
AlphaMask2d {
batch_set_key,
bin_key,
representative_entity,
batch_range,
extra_index,
}
}
}
impl CachedRenderPipelinePhaseItem for AlphaMask2d {
#[inline]
fn cached_pipeline(&self) -> CachedRenderPipelineId {
self.bin_key.pipeline
}
}
/// Transparent 2D [`SortedPhaseItem`]s.
pub struct Transparent2d {
pub sort_key: FloatOrd,
pub entity: (Entity, MainEntity),
pub pipeline: CachedRenderPipelineId,
pub draw_function: DrawFunctionId,
pub batch_range: Range<u32>,
pub extracted_index: usize,
pub extra_index: PhaseItemExtraIndex,
/// Whether the mesh in question is indexed (uses an index buffer in
/// addition to its vertex buffer).
pub indexed: bool,
}
impl PhaseItem for Transparent2d {
#[inline]
fn entity(&self) -> Entity {
self.entity.0
}
#[inline]
fn main_entity(&self) -> MainEntity {
self.entity.1
}
#[inline]
fn draw_function(&self) -> DrawFunctionId {
self.draw_function
}
#[inline]
fn batch_range(&self) -> &Range<u32> {
&self.batch_range
}
#[inline]
fn batch_range_mut(&mut self) -> &mut Range<u32> {
&mut self.batch_range
}
#[inline]
fn extra_index(&self) -> PhaseItemExtraIndex {
self.extra_index.clone()
}
#[inline]
fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range<u32>, &mut PhaseItemExtraIndex) {
(&mut self.batch_range, &mut self.extra_index)
}
}
impl SortedPhaseItem for Transparent2d {
type SortKey = FloatOrd;
#[inline]
fn sort_key(&self) -> Self::SortKey {
self.sort_key
}
#[inline]
fn sort(items: &mut [Self]) {
// radsort is a stable radix sort that performed better than `slice::sort_by_key` or `slice::sort_unstable_by_key`.
radsort::sort_by_key(items, |item| item.sort_key().0);
}
fn indexed(&self) -> bool {
self.indexed
}
}
impl CachedRenderPipelinePhaseItem for Transparent2d {
#[inline]
fn cached_pipeline(&self) -> CachedRenderPipelineId {
self.pipeline
}
}
pub fn extract_core_2d_camera_phases(
mut transparent_2d_phases: ResMut<ViewSortedRenderPhases<Transparent2d>>,
mut opaque_2d_phases: ResMut<ViewBinnedRenderPhases<Opaque2d>>,
mut alpha_mask_2d_phases: ResMut<ViewBinnedRenderPhases<AlphaMask2d>>,
cameras_2d: Extract<Query<(Entity, &Camera), With<Camera2d>>>,
mut live_entities: Local<HashSet<RetainedViewEntity>>,
) {
live_entities.clear();
for (main_entity, camera) in &cameras_2d {
if !camera.is_active {
continue;
}
// This is the main 2D camera, so we use the first subview index (0).
let retained_view_entity = RetainedViewEntity::new(main_entity.into(), None, 0);
transparent_2d_phases.insert_or_clear(retained_view_entity);
opaque_2d_phases.prepare_for_new_frame(retained_view_entity, GpuPreprocessingMode::None);
alpha_mask_2d_phases
.prepare_for_new_frame(retained_view_entity, GpuPreprocessingMode::None);
live_entities.insert(retained_view_entity);
}
// Clear out all dead views.
transparent_2d_phases.retain(|camera_entity, _| live_entities.contains(camera_entity));
opaque_2d_phases.retain(|camera_entity, _| live_entities.contains(camera_entity));
alpha_mask_2d_phases.retain(|camera_entity, _| live_entities.contains(camera_entity));
}
pub fn prepare_core_2d_depth_textures(
mut commands: Commands,
mut texture_cache: ResMut<TextureCache>,
render_device: Res<RenderDevice>,
transparent_2d_phases: Res<ViewSortedRenderPhases<Transparent2d>>,
opaque_2d_phases: Res<ViewBinnedRenderPhases<Opaque2d>>,
views_2d: Query<(Entity, &ExtractedCamera, &ExtractedView, &Msaa), (With<Camera2d>,)>,
) {
let mut textures = <HashMap<_, _>>::default();
for (view, camera, extracted_view, msaa) in &views_2d {
if !opaque_2d_phases.contains_key(&extracted_view.retained_view_entity)
|| !transparent_2d_phases.contains_key(&extracted_view.retained_view_entity)
{
continue;
};
let Some(physical_target_size) = camera.physical_target_size else {
continue;
};
let cached_texture = textures
.entry(camera.target.clone())
.or_insert_with(|| {
let descriptor = TextureDescriptor {
label: Some("view_depth_texture"),
// The size of the depth texture
size: physical_target_size.to_extents(),
mip_level_count: 1,
sample_count: msaa.samples(),
dimension: TextureDimension::D2,
format: CORE_2D_DEPTH_FORMAT,
usage: TextureUsages::RENDER_ATTACHMENT,
view_formats: &[],
};
texture_cache.get(&render_device, descriptor)
})
.clone();
commands
.entity(view)
.insert(ViewDepthTexture::new(cached_texture, Some(0.0)));
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/core_2d/main_opaque_pass_2d_node.rs | crates/bevy_core_pipeline/src/core_2d/main_opaque_pass_2d_node.rs | use crate::core_2d::Opaque2d;
use bevy_ecs::{prelude::World, query::QueryItem};
use bevy_render::{
camera::ExtractedCamera,
diagnostic::RecordDiagnostics,
render_graph::{NodeRunError, RenderGraphContext, ViewNode},
render_phase::{TrackedRenderPass, ViewBinnedRenderPhases},
render_resource::{CommandEncoderDescriptor, RenderPassDescriptor, StoreOp},
renderer::RenderContext,
view::{ExtractedView, ViewDepthTexture, ViewTarget},
};
use tracing::error;
#[cfg(feature = "trace")]
use tracing::info_span;
use super::AlphaMask2d;
/// A [`bevy_render::render_graph::Node`] that runs the
/// [`Opaque2d`] [`ViewBinnedRenderPhases`] and [`AlphaMask2d`] [`ViewBinnedRenderPhases`]
#[derive(Default)]
pub struct MainOpaquePass2dNode;
impl ViewNode for MainOpaquePass2dNode {
type ViewQuery = (
&'static ExtractedCamera,
&'static ExtractedView,
&'static ViewTarget,
&'static ViewDepthTexture,
);
fn run<'w>(
&self,
graph: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
(camera, view, target, depth): QueryItem<'w, '_, Self::ViewQuery>,
world: &'w World,
) -> Result<(), NodeRunError> {
let (Some(opaque_phases), Some(alpha_mask_phases)) = (
world.get_resource::<ViewBinnedRenderPhases<Opaque2d>>(),
world.get_resource::<ViewBinnedRenderPhases<AlphaMask2d>>(),
) else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
let color_attachments = [Some(target.get_color_attachment())];
let depth_stencil_attachment = Some(depth.get_attachment(StoreOp::Store));
let view_entity = graph.view_entity();
let (Some(opaque_phase), Some(alpha_mask_phase)) = (
opaque_phases.get(&view.retained_view_entity),
alpha_mask_phases.get(&view.retained_view_entity),
) else {
return Ok(());
};
render_context.add_command_buffer_generation_task(move |render_device| {
#[cfg(feature = "trace")]
let _main_opaque_pass_2d_span = info_span!("main_opaque_pass_2d").entered();
// Command encoder setup
let mut command_encoder =
render_device.create_command_encoder(&CommandEncoderDescriptor {
label: Some("main_opaque_pass_2d_command_encoder"),
});
// Render pass setup
let render_pass = command_encoder.begin_render_pass(&RenderPassDescriptor {
label: Some("main_opaque_pass_2d"),
color_attachments: &color_attachments,
depth_stencil_attachment,
timestamp_writes: None,
occlusion_query_set: None,
});
let mut render_pass = TrackedRenderPass::new(&render_device, render_pass);
let pass_span = diagnostics.pass_span(&mut render_pass, "main_opaque_pass_2d");
if let Some(viewport) = camera.viewport.as_ref() {
render_pass.set_camera_viewport(viewport);
}
// Opaque draws
if !opaque_phase.is_empty() {
#[cfg(feature = "trace")]
let _opaque_main_pass_2d_span = info_span!("opaque_main_pass_2d").entered();
if let Err(err) = opaque_phase.render(&mut render_pass, world, view_entity) {
error!("Error encountered while rendering the 2d opaque phase {err:?}");
}
}
// Alpha mask draws
if !alpha_mask_phase.is_empty() {
#[cfg(feature = "trace")]
let _alpha_mask_main_pass_2d_span = info_span!("alpha_mask_main_pass_2d").entered();
if let Err(err) = alpha_mask_phase.render(&mut render_pass, world, view_entity) {
error!("Error encountered while rendering the 2d alpha mask phase {err:?}");
}
}
pass_span.end(&mut render_pass);
drop(render_pass);
command_encoder.finish()
});
Ok(())
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/fullscreen_vertex_shader/mod.rs | crates/bevy_core_pipeline/src/fullscreen_vertex_shader/mod.rs | use bevy_asset::{load_embedded_asset, Handle};
use bevy_ecs::{resource::Resource, world::FromWorld};
use bevy_render::render_resource::VertexState;
use bevy_shader::Shader;
/// A shader that renders to the whole screen. Useful for post-processing.
#[derive(Resource, Clone)]
pub struct FullscreenShader(Handle<Shader>);
impl FromWorld for FullscreenShader {
fn from_world(world: &mut bevy_ecs::world::World) -> Self {
Self(load_embedded_asset!(world, "fullscreen.wgsl"))
}
}
impl FullscreenShader {
/// Gets the raw shader handle.
pub fn shader(&self) -> Handle<Shader> {
self.0.clone()
}
/// Creates a [`VertexState`] that uses the [`FullscreenShader`] to output a
/// ```wgsl
/// struct FullscreenVertexOutput {
/// @builtin(position)
/// position: vec4<f32>;
/// @location(0)
/// uv: vec2<f32>;
/// };
/// ```
/// from the vertex shader.
/// The draw call should render one triangle: `render_pass.draw(0..3, 0..1);`
pub fn to_vertex_state(&self) -> VertexState {
VertexState {
shader: self.0.clone(),
shader_defs: Vec::new(),
entry_point: Some("fullscreen_vertex_shader".into()),
buffers: Vec::new(),
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_window/src/raw_handle.rs | crates/bevy_window/src/raw_handle.rs | #![expect(
unsafe_code,
reason = "This module acts as a wrapper around the `raw_window_handle` crate, which exposes many unsafe interfaces; thus, we have to use unsafe code here."
)]
use alloc::sync::Arc;
use bevy_ecs::prelude::Component;
use bevy_platform::sync::Mutex;
use core::{any::Any, marker::PhantomData, ops::Deref};
use raw_window_handle::{
DisplayHandle, HandleError, HasDisplayHandle, HasWindowHandle, RawDisplayHandle,
RawWindowHandle, WindowHandle,
};
/// A wrapper over a window.
///
/// This allows us to extend the lifetime of the window, so it doesn't get eagerly dropped while a
/// pipelined renderer still has frames in flight that need to draw to it.
///
/// This is achieved by storing a shared reference to the window in the [`RawHandleWrapper`],
/// which gets picked up by the renderer during extraction.
#[derive(Debug)]
pub struct WindowWrapper<W> {
reference: Arc<dyn Any + Send + Sync>,
ty: PhantomData<W>,
}
impl<W: Send + Sync + 'static> WindowWrapper<W> {
/// Creates a `WindowWrapper` from a window.
pub fn new(window: W) -> WindowWrapper<W> {
WindowWrapper {
reference: Arc::new(window),
ty: PhantomData,
}
}
}
impl<W: 'static> Deref for WindowWrapper<W> {
type Target = W;
fn deref(&self) -> &Self::Target {
self.reference.downcast_ref::<W>().unwrap()
}
}
/// A wrapper over [`RawWindowHandle`] and [`RawDisplayHandle`] that allows us to safely pass it across threads.
///
/// Depending on the platform, the underlying pointer-containing handle cannot be used on all threads,
/// and so we cannot simply make it (or any type that has a safe operation to get a [`RawWindowHandle`] or [`RawDisplayHandle`])
/// thread-safe.
#[derive(Debug, Clone, Component)]
pub struct RawHandleWrapper {
/// A shared reference to the window.
/// This allows us to extend the lifetime of the window,
/// so it doesn’t get eagerly dropped while a pipelined
/// renderer still has frames in flight that need to draw to it.
_window: Arc<dyn Any + Send + Sync>,
/// Raw handle to a window.
window_handle: RawWindowHandle,
/// Raw handle to the display server.
display_handle: RawDisplayHandle,
}
impl RawHandleWrapper {
/// Creates a `RawHandleWrapper` from a `WindowWrapper`.
pub fn new<W: HasWindowHandle + HasDisplayHandle + 'static>(
window: &WindowWrapper<W>,
) -> Result<RawHandleWrapper, HandleError> {
Ok(RawHandleWrapper {
_window: window.reference.clone(),
window_handle: window.window_handle()?.as_raw(),
display_handle: window.display_handle()?.as_raw(),
})
}
/// Returns a [`HasWindowHandle`] + [`HasDisplayHandle`] impl, which exposes [`WindowHandle`] and [`DisplayHandle`].
///
/// # Safety
///
/// Some platforms have constraints on where/how this handle can be used. For example, some platforms don't support doing window
/// operations off of the main thread. The caller must ensure the [`RawHandleWrapper`] is only used in valid contexts.
pub unsafe fn get_handle(&self) -> ThreadLockedRawWindowHandleWrapper {
ThreadLockedRawWindowHandleWrapper(self.clone())
}
/// Gets the stored window handle.
pub fn get_window_handle(&self) -> RawWindowHandle {
self.window_handle
}
/// Sets the window handle.
///
/// # Safety
///
/// The passed in [`RawWindowHandle`] must be a valid window handle.
// NOTE: The use of an explicit setter instead of a getter for a mutable reference is to limit the amount of time unsoundness can happen.
// If we handed out a mutable reference the user would have to maintain safety invariants throughout its lifetime. For consistency
// we also prefer to handout copies of the handles instead of immutable references.
pub unsafe fn set_window_handle(&mut self, window_handle: RawWindowHandle) -> &mut Self {
self.window_handle = window_handle;
self
}
/// Gets the stored display handle
pub fn get_display_handle(&self) -> RawDisplayHandle {
self.display_handle
}
/// Sets the display handle.
///
/// # Safety
///
/// The passed in [`RawDisplayHandle`] must be a valid display handle.
pub fn set_display_handle(&mut self, display_handle: RawDisplayHandle) -> &mut Self {
self.display_handle = display_handle;
self
}
}
// SAFETY: [`RawHandleWrapper`] is just a normal "raw pointer", which doesn't impl Send/Sync. However the pointer is only
// exposed via an unsafe method that forces the user to make a call for a given platform. (ex: some platforms don't
// support doing window operations off of the main thread).
// A recommendation for this pattern (and more context) is available here:
// https://github.com/rust-windowing/raw-window-handle/issues/59
unsafe impl Send for RawHandleWrapper {}
// SAFETY: This is safe for the same reasons as the Send impl above.
unsafe impl Sync for RawHandleWrapper {}
/// A [`RawHandleWrapper`] that cannot be sent across threads.
///
/// This safely exposes [`RawWindowHandle`] and [`RawDisplayHandle`], but care must be taken to ensure that the construction itself is correct.
///
/// This can only be constructed via the [`RawHandleWrapper::get_handle()`] method;
/// be sure to read the safety docs there about platform-specific limitations.
/// In many cases, this should only be constructed on the main thread.
pub struct ThreadLockedRawWindowHandleWrapper(RawHandleWrapper);
impl HasWindowHandle for ThreadLockedRawWindowHandleWrapper {
fn window_handle(&self) -> Result<WindowHandle<'_>, HandleError> {
// SAFETY: the caller has validated that this is a valid context to get [`RawHandleWrapper`]
// as otherwise an instance of this type could not have been constructed
// NOTE: we cannot simply impl HasRawWindowHandle for RawHandleWrapper,
// as the `raw_window_handle` method is safe. We cannot guarantee that all calls
// of this method are correct (as it may be off the main thread on an incompatible platform),
// and so exposing a safe method to get a [`RawWindowHandle`] directly would be UB.
Ok(unsafe { WindowHandle::borrow_raw(self.0.window_handle) })
}
}
impl HasDisplayHandle for ThreadLockedRawWindowHandleWrapper {
fn display_handle(&self) -> Result<DisplayHandle<'_>, HandleError> {
// SAFETY: the caller has validated that this is a valid context to get [`RawDisplayHandle`]
// as otherwise an instance of this type could not have been constructed
// NOTE: we cannot simply impl HasRawDisplayHandle for RawHandleWrapper,
// as the `raw_display_handle` method is safe. We cannot guarantee that all calls
// of this method are correct (as it may be off the main thread on an incompatible platform),
// and so exposing a safe method to get a [`RawDisplayHandle`] directly would be UB.
Ok(unsafe { DisplayHandle::borrow_raw(self.0.display_handle) })
}
}
/// Holder of the [`RawHandleWrapper`] with wrappers, to allow use in asynchronous context
#[derive(Debug, Clone, Component)]
pub struct RawHandleWrapperHolder(pub Arc<Mutex<Option<RawHandleWrapper>>>);
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_window/src/event.rs | crates/bevy_window/src/event.rs | use alloc::string::String;
use bevy_ecs::{entity::Entity, message::Message};
use bevy_input::{
gestures::*,
keyboard::{KeyboardFocusLost, KeyboardInput},
mouse::{MouseButtonInput, MouseMotion, MouseWheel},
touch::TouchInput,
};
use bevy_math::{IVec2, Vec2};
#[cfg(feature = "std")]
use std::path::PathBuf;
#[cfg(not(feature = "std"))]
use alloc::string::String as PathBuf;
#[cfg(feature = "bevy_reflect")]
use bevy_reflect::Reflect;
#[cfg(feature = "serialize")]
use bevy_reflect::{ReflectDeserialize, ReflectSerialize};
use crate::WindowTheme;
/// A window event that is sent whenever a window's logical size has changed.
#[derive(Message, Debug, Clone, PartialEq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct WindowResized {
/// Window that has changed.
pub window: Entity,
/// The new logical width of the window.
pub width: f32,
/// The new logical height of the window.
pub height: f32,
}
/// An event that indicates all of the application's windows should be redrawn,
/// even if their control flow is set to `Wait` and there have been no window events.
#[derive(Message, Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct RequestRedraw;
/// An event that is sent whenever a new window is created.
///
/// To create a new window, spawn an entity with a [`Window`](`crate::Window`) on it.
#[derive(Message, Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct WindowCreated {
/// Window that has been created.
pub window: Entity,
}
/// An event that is sent whenever the operating systems requests that a window
/// be closed. This will be sent when the close button of the window is pressed.
///
/// If the default [`WindowPlugin`] is used, these events are handled
/// by closing the corresponding [`Window`].
/// To disable this behavior, set `close_when_requested` on the [`WindowPlugin`]
/// to `false`.
///
/// [`WindowPlugin`]: crate::WindowPlugin
/// [`Window`]: crate::Window
#[derive(Message, Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct WindowCloseRequested {
/// Window to close.
pub window: Entity,
}
/// An event that is sent whenever a window is closed. This will be sent when
/// the window entity loses its [`Window`](crate::window::Window) component or is despawned.
#[derive(Message, Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct WindowClosed {
/// Window that has been closed.
///
/// Note that this entity probably no longer exists
/// by the time this event is received.
pub window: Entity,
}
/// An event that is sent whenever a window is closing. This will be sent when
/// after a [`WindowCloseRequested`] event is received and the window is in the process of closing.
#[derive(Message, Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct WindowClosing {
/// Window that has been requested to close and is the process of closing.
pub window: Entity,
}
/// An event that is sent whenever a window is destroyed by the underlying window system.
///
/// Note that if your application only has a single window, this event may be your last chance to
/// persist state before the application terminates.
#[derive(Message, Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct WindowDestroyed {
/// Window that has been destroyed.
///
/// Note that this entity probably no longer exists
/// by the time this event is received.
pub window: Entity,
}
/// An event reporting that the mouse cursor has moved inside a window.
///
/// The event is sent only if the cursor is over one of the application's windows.
/// It is the translated version of [`WindowEvent::CursorMoved`] from the `winit` crate with the addition of `delta`.
///
/// Not to be confused with the `MouseMotion` event from `bevy_input`.
///
/// Because the range of data is limited by the window area and it may have been transformed by the OS to implement certain effects like acceleration,
/// you should not use it for non-cursor-like behavior such as 3D camera control. Please see `MouseMotion` instead.
///
/// [`WindowEvent::CursorMoved`]: https://docs.rs/winit/latest/winit/event/enum.WindowEvent.html#variant.CursorMoved
#[derive(Message, Debug, Clone, PartialEq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct CursorMoved {
/// Window that the cursor moved inside.
pub window: Entity,
/// The cursor position in logical pixels.
pub position: Vec2,
/// The change in the position of the cursor since the last event was sent.
/// This value is `None` if the cursor was outside the window area during the last frame.
// Because the range of this data is limited by the display area and it may have been
// transformed by the OS to implement effects such as cursor acceleration, it should
// not be used to implement non-cursor-like interactions such as 3D camera control.
pub delta: Option<Vec2>,
}
/// An event that is sent whenever the user's cursor enters a window.
#[derive(Message, Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct CursorEntered {
/// Window that the cursor entered.
pub window: Entity,
}
/// An event that is sent whenever the user's cursor leaves a window.
#[derive(Message, Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct CursorLeft {
/// Window that the cursor left.
pub window: Entity,
}
/// An Input Method Editor event.
///
/// This event is the translated version of the `WindowEvent::Ime` from the `winit` crate.
///
/// It is only sent if IME was enabled on the window with [`Window::ime_enabled`](crate::window::Window::ime_enabled).
#[derive(Message, Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub enum Ime {
/// Notifies when a new composing text should be set at the cursor position.
Preedit {
/// Window that received the event.
window: Entity,
/// Current value.
value: String,
/// Cursor begin and end position.
///
/// `None` indicated the cursor should be hidden
cursor: Option<(usize, usize)>,
},
/// Notifies when text should be inserted into the editor widget.
Commit {
/// Window that received the event.
window: Entity,
/// Input string
value: String,
},
/// Notifies when the IME was enabled.
///
/// After this event, you will receive events `Ime::Preedit` and `Ime::Commit`.
Enabled {
/// Window that received the event.
window: Entity,
},
/// Notifies when the IME was disabled.
Disabled {
/// Window that received the event.
window: Entity,
},
}
/// An event that indicates a window has received or lost focus.
#[derive(Message, Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct WindowFocused {
/// Window that changed focus.
pub window: Entity,
/// Whether it was focused (true) or lost focused (false).
pub focused: bool,
}
/// The window has been occluded (completely hidden from view).
///
/// This is different to window visibility as it depends on
/// whether the window is closed, minimized, set invisible,
/// or fully occluded by another window.
///
/// It is the translated version of [`WindowEvent::Occluded`] from the `winit` crate.
///
/// [`WindowEvent::Occluded`]: https://docs.rs/winit/latest/winit/event/enum.WindowEvent.html#variant.Occluded
#[derive(Message, Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct WindowOccluded {
/// Window that changed occluded state.
pub window: Entity,
/// Whether it was occluded (true) or not occluded (false).
pub occluded: bool,
}
/// An event that indicates a window's scale factor has changed.
#[derive(Message, Debug, Clone, PartialEq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct WindowScaleFactorChanged {
/// Window that had its scale factor changed.
pub window: Entity,
/// The new scale factor.
pub scale_factor: f64,
}
/// An event that indicates a window's OS-reported scale factor has changed.
#[derive(Message, Debug, Clone, PartialEq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct WindowBackendScaleFactorChanged {
/// Window that had its scale factor changed by the backend.
pub window: Entity,
/// The new scale factor.
pub scale_factor: f64,
}
/// Events related to files being dragged and dropped on a window.
#[derive(Message, Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub enum FileDragAndDrop {
/// File is being dropped into a window.
DroppedFile {
/// Window the file was dropped into.
window: Entity,
/// Path to the file that was dropped in.
path_buf: PathBuf,
},
/// File is currently being hovered over a window.
HoveredFile {
/// Window a file is possibly going to be dropped into.
window: Entity,
/// Path to the file that might be dropped in.
path_buf: PathBuf,
},
/// File hovering was canceled.
HoveredFileCanceled {
/// Window that had a canceled file drop.
window: Entity,
},
}
/// An event that is sent when a window is repositioned in physical pixels.
#[derive(Message, Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct WindowMoved {
/// Window that moved.
pub window: Entity,
/// Where the window moved to in physical pixels.
pub position: IVec2,
}
/// An event sent when the system theme changes for a window.
///
/// This event is only sent when the window is relying on the system theme to control its appearance.
/// i.e. It is only sent when [`Window::window_theme`](crate::window::Window::window_theme) is `None` and the system theme changes.
#[derive(Message, Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct WindowThemeChanged {
/// Window for which the system theme has changed.
pub window: Entity,
/// The new system theme.
pub theme: WindowTheme,
}
/// Application lifetime events
#[derive(Message, Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub enum AppLifecycle {
/// The application is not started yet.
Idle,
/// The application is running.
Running,
/// The application is going to be suspended.
/// Applications have one frame to react to this event before being paused in the background.
WillSuspend,
/// The application was suspended.
Suspended,
/// The application is going to be resumed.
/// Applications have one extra frame to react to this event before being fully resumed.
WillResume,
}
impl AppLifecycle {
/// Return `true` if the app can be updated.
#[inline]
pub fn is_active(&self) -> bool {
match self {
Self::Idle | Self::Suspended => false,
Self::Running | Self::WillSuspend | Self::WillResume => true,
}
}
}
/// Wraps all `bevy_window` and `bevy_input` events in a common enum.
///
/// Read these events with `MessageReader<WindowEvent>` if you need to
/// access window events in the order they were received from the
/// operating system. Otherwise, the event types are individually
/// readable with `MessageReader<E>` (e.g. `MessageReader<KeyboardInput>`).
#[derive(Message, Debug, Clone, PartialEq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub enum WindowEvent {
/// An application lifecycle event.
AppLifecycle(AppLifecycle),
/// The user's cursor has entered a window.
CursorEntered(CursorEntered),
///The user's cursor has left a window.
CursorLeft(CursorLeft),
/// The user's cursor has moved inside a window.
CursorMoved(CursorMoved),
/// A file drag and drop event.
FileDragAndDrop(FileDragAndDrop),
/// An Input Method Editor event.
Ime(Ime),
/// A redraw of all of the application's windows has been requested.
RequestRedraw(RequestRedraw),
/// The window's OS-reported scale factor has changed.
WindowBackendScaleFactorChanged(WindowBackendScaleFactorChanged),
/// The OS has requested that a window be closed.
WindowCloseRequested(WindowCloseRequested),
/// A new window has been created.
WindowCreated(WindowCreated),
/// A window has been destroyed by the underlying windowing system.
WindowDestroyed(WindowDestroyed),
/// A window has received or lost focus.
WindowFocused(WindowFocused),
/// A window has been moved.
WindowMoved(WindowMoved),
/// A window has started or stopped being occluded.
WindowOccluded(WindowOccluded),
/// A window's logical size has changed.
WindowResized(WindowResized),
/// A window's scale factor has changed.
WindowScaleFactorChanged(WindowScaleFactorChanged),
/// Sent for windows that are using the system theme when the system theme changes.
WindowThemeChanged(WindowThemeChanged),
/// The state of a mouse button has changed.
MouseButtonInput(MouseButtonInput),
/// The physical position of a pointing device has changed.
MouseMotion(MouseMotion),
/// The mouse wheel has moved.
MouseWheel(MouseWheel),
/// A two finger pinch gesture.
PinchGesture(PinchGesture),
/// A two finger rotation gesture.
RotationGesture(RotationGesture),
/// A double tap gesture.
DoubleTapGesture(DoubleTapGesture),
/// A pan gesture.
PanGesture(PanGesture),
/// A touch input state change.
TouchInput(TouchInput),
/// A keyboard input.
KeyboardInput(KeyboardInput),
/// Sent when focus has been lost for all Bevy windows.
///
/// Used to clear pressed key state.
KeyboardFocusLost(KeyboardFocusLost),
}
impl From<AppLifecycle> for WindowEvent {
fn from(e: AppLifecycle) -> Self {
Self::AppLifecycle(e)
}
}
impl From<CursorEntered> for WindowEvent {
fn from(e: CursorEntered) -> Self {
Self::CursorEntered(e)
}
}
impl From<CursorLeft> for WindowEvent {
fn from(e: CursorLeft) -> Self {
Self::CursorLeft(e)
}
}
impl From<CursorMoved> for WindowEvent {
fn from(e: CursorMoved) -> Self {
Self::CursorMoved(e)
}
}
impl From<FileDragAndDrop> for WindowEvent {
fn from(e: FileDragAndDrop) -> Self {
Self::FileDragAndDrop(e)
}
}
impl From<Ime> for WindowEvent {
fn from(e: Ime) -> Self {
Self::Ime(e)
}
}
impl From<RequestRedraw> for WindowEvent {
fn from(e: RequestRedraw) -> Self {
Self::RequestRedraw(e)
}
}
impl From<WindowBackendScaleFactorChanged> for WindowEvent {
fn from(e: WindowBackendScaleFactorChanged) -> Self {
Self::WindowBackendScaleFactorChanged(e)
}
}
impl From<WindowCloseRequested> for WindowEvent {
fn from(e: WindowCloseRequested) -> Self {
Self::WindowCloseRequested(e)
}
}
impl From<WindowCreated> for WindowEvent {
fn from(e: WindowCreated) -> Self {
Self::WindowCreated(e)
}
}
impl From<WindowDestroyed> for WindowEvent {
fn from(e: WindowDestroyed) -> Self {
Self::WindowDestroyed(e)
}
}
impl From<WindowFocused> for WindowEvent {
fn from(e: WindowFocused) -> Self {
Self::WindowFocused(e)
}
}
impl From<WindowMoved> for WindowEvent {
fn from(e: WindowMoved) -> Self {
Self::WindowMoved(e)
}
}
impl From<WindowOccluded> for WindowEvent {
fn from(e: WindowOccluded) -> Self {
Self::WindowOccluded(e)
}
}
impl From<WindowResized> for WindowEvent {
fn from(e: WindowResized) -> Self {
Self::WindowResized(e)
}
}
impl From<WindowScaleFactorChanged> for WindowEvent {
fn from(e: WindowScaleFactorChanged) -> Self {
Self::WindowScaleFactorChanged(e)
}
}
impl From<WindowThemeChanged> for WindowEvent {
fn from(e: WindowThemeChanged) -> Self {
Self::WindowThemeChanged(e)
}
}
impl From<MouseButtonInput> for WindowEvent {
fn from(e: MouseButtonInput) -> Self {
Self::MouseButtonInput(e)
}
}
impl From<MouseMotion> for WindowEvent {
fn from(e: MouseMotion) -> Self {
Self::MouseMotion(e)
}
}
impl From<MouseWheel> for WindowEvent {
fn from(e: MouseWheel) -> Self {
Self::MouseWheel(e)
}
}
impl From<PinchGesture> for WindowEvent {
fn from(e: PinchGesture) -> Self {
Self::PinchGesture(e)
}
}
impl From<RotationGesture> for WindowEvent {
fn from(e: RotationGesture) -> Self {
Self::RotationGesture(e)
}
}
impl From<DoubleTapGesture> for WindowEvent {
fn from(e: DoubleTapGesture) -> Self {
Self::DoubleTapGesture(e)
}
}
impl From<PanGesture> for WindowEvent {
fn from(e: PanGesture) -> Self {
Self::PanGesture(e)
}
}
impl From<TouchInput> for WindowEvent {
fn from(e: TouchInput) -> Self {
Self::TouchInput(e)
}
}
impl From<KeyboardInput> for WindowEvent {
fn from(e: KeyboardInput) -> Self {
Self::KeyboardInput(e)
}
}
impl From<KeyboardFocusLost> for WindowEvent {
fn from(e: KeyboardFocusLost) -> Self {
Self::KeyboardFocusLost(e)
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_window/src/lib.rs | crates/bevy_window/src/lib.rs | #![cfg_attr(docsrs, feature(doc_cfg))]
#![doc(
html_logo_url = "https://bevy.org/assets/icon.png",
html_favicon_url = "https://bevy.org/assets/icon.png"
)]
#![no_std]
//! `bevy_window` provides a platform-agnostic interface for windowing in Bevy.
//!
//! This crate contains types for window management and events,
//! used by windowing implementors such as `bevy_winit`.
//! The [`WindowPlugin`] sets up some global window-related parameters and
//! is part of the [`DefaultPlugins`](https://docs.rs/bevy/latest/bevy/struct.DefaultPlugins.html).
#[cfg(feature = "std")]
extern crate std;
extern crate alloc;
mod cursor;
mod event;
mod monitor;
mod raw_handle;
mod system;
mod window;
pub use crate::raw_handle::*;
pub use cursor::*;
pub use event::*;
pub use monitor::*;
pub use system::*;
pub use window::*;
/// The windowing prelude.
///
/// This includes the most common types in this crate, re-exported for your convenience.
pub mod prelude {
#[doc(hidden)]
pub use crate::{
CursorEntered, CursorLeft, CursorMoved, FileDragAndDrop, Ime, MonitorSelection,
VideoModeSelection, Window, WindowMoved, WindowPlugin, WindowPosition,
WindowResizeConstraints,
};
}
use alloc::sync::Arc;
use bevy_app::prelude::*;
use bevy_platform::sync::Mutex;
impl Default for WindowPlugin {
fn default() -> Self {
WindowPlugin {
primary_window: Some(Window::default()),
primary_cursor_options: Some(CursorOptions::default()),
exit_condition: ExitCondition::OnAllClosed,
close_when_requested: true,
}
}
}
/// A [`Plugin`] that defines an interface for windowing support in Bevy.
pub struct WindowPlugin {
/// Settings for the primary window.
///
/// `Some(custom_window)` will spawn an entity with `custom_window` and [`PrimaryWindow`] as components.
/// `None` will not spawn a primary window.
///
/// Defaults to `Some(Window::default())`.
///
/// Note that if there are no windows the App will exit (by default) due to
/// [`exit_on_all_closed`].
pub primary_window: Option<Window>,
/// Settings for the cursor on the primary window.
///
/// Defaults to `Some(CursorOptions::default())`.
///
/// Has no effect if [`WindowPlugin::primary_window`] is `None`.
pub primary_cursor_options: Option<CursorOptions>,
/// Whether to exit the app when there are no open windows.
///
/// If disabling this, ensure that you send the [`bevy_app::AppExit`]
/// event when the app should exit. If this does not occur, you will
/// create 'headless' processes (processes without windows), which may
/// surprise your users. It is recommended to leave this setting to
/// either [`ExitCondition::OnAllClosed`] or [`ExitCondition::OnPrimaryClosed`].
///
/// [`ExitCondition::OnAllClosed`] will add [`exit_on_all_closed`] to [`Update`].
/// [`ExitCondition::OnPrimaryClosed`] will add [`exit_on_primary_closed`] to [`Update`].
pub exit_condition: ExitCondition,
/// Whether to close windows when they are requested to be closed (i.e.
/// when the close button is pressed).
///
/// If true, this plugin will add [`close_when_requested`] to [`Update`].
/// If this system (or a replacement) is not running, the close button will have no effect.
/// This may surprise your users. It is recommended to leave this setting as `true`.
pub close_when_requested: bool,
}
impl Plugin for WindowPlugin {
fn build(&self, app: &mut App) {
// User convenience events
app.add_message::<WindowEvent>()
.add_message::<WindowResized>()
.add_message::<WindowCreated>()
.add_message::<WindowClosing>()
.add_message::<WindowClosed>()
.add_message::<WindowCloseRequested>()
.add_message::<WindowDestroyed>()
.add_message::<RequestRedraw>()
.add_message::<CursorMoved>()
.add_message::<CursorEntered>()
.add_message::<CursorLeft>()
.add_message::<Ime>()
.add_message::<WindowFocused>()
.add_message::<WindowOccluded>()
.add_message::<WindowScaleFactorChanged>()
.add_message::<WindowBackendScaleFactorChanged>()
.add_message::<FileDragAndDrop>()
.add_message::<WindowMoved>()
.add_message::<WindowThemeChanged>()
.add_message::<AppLifecycle>();
if let Some(primary_window) = &self.primary_window {
let mut entity_commands = app.world_mut().spawn(primary_window.clone());
entity_commands.insert((
PrimaryWindow,
RawHandleWrapperHolder(Arc::new(Mutex::new(None))),
));
if let Some(primary_cursor_options) = &self.primary_cursor_options {
entity_commands.insert(primary_cursor_options.clone());
}
}
match self.exit_condition {
ExitCondition::OnPrimaryClosed => {
app.add_systems(PostUpdate, exit_on_primary_closed);
}
ExitCondition::OnAllClosed => {
app.add_systems(PostUpdate, exit_on_all_closed);
}
ExitCondition::DontExit => {}
}
if self.close_when_requested {
// Need to run before `exit_on_*` systems
app.add_systems(Update, close_when_requested);
}
}
}
/// Defines the specific conditions the application should exit on
#[derive(Clone)]
pub enum ExitCondition {
/// Close application when the primary window is closed
///
/// The plugin will add [`exit_on_primary_closed`] to [`PostUpdate`].
OnPrimaryClosed,
/// Close application when all windows are closed
///
/// The plugin will add [`exit_on_all_closed`] to [`PostUpdate`].
OnAllClosed,
/// Keep application running headless even after closing all windows
///
/// If selecting this, ensure that you send the [`bevy_app::AppExit`]
/// event when the app should exit. If this does not occur, you will
/// create 'headless' processes (processes without windows), which may
/// surprise your users.
DontExit,
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_window/src/system.rs | crates/bevy_window/src/system.rs | use crate::{ClosingWindow, PrimaryWindow, Window, WindowCloseRequested};
use bevy_app::AppExit;
use bevy_ecs::prelude::*;
/// Exit the application when there are no open windows.
///
/// This system is added by the [`WindowPlugin`] in the default configuration.
/// To disable this behavior, set `close_when_requested` (on the [`WindowPlugin`]) to `false`.
/// Ensure that you read the caveats documented on that field if doing so.
///
/// [`WindowPlugin`]: crate::WindowPlugin
pub fn exit_on_all_closed(mut app_exit_writer: MessageWriter<AppExit>, windows: Query<&Window>) {
if windows.is_empty() {
log::info!("No windows are open, exiting");
app_exit_writer.write(AppExit::Success);
}
}
/// Exit the application when the primary window has been closed
///
/// This system is added by the [`WindowPlugin`]
///
/// [`WindowPlugin`]: crate::WindowPlugin
pub fn exit_on_primary_closed(
mut app_exit_writer: MessageWriter<AppExit>,
windows: Query<(), (With<Window>, With<PrimaryWindow>)>,
) {
if windows.is_empty() {
log::info!("Primary window was closed, exiting");
app_exit_writer.write(AppExit::Success);
}
}
/// Close windows in response to [`WindowCloseRequested`] (e.g. when the close button is pressed).
///
/// This system is added by the [`WindowPlugin`] in the default configuration.
/// To disable this behavior, set `close_when_requested` (on the [`WindowPlugin`]) to `false`.
/// Ensure that you read the caveats documented on that field if doing so.
///
/// [`WindowPlugin`]: crate::WindowPlugin
pub fn close_when_requested(
mut commands: Commands,
mut closed: MessageReader<WindowCloseRequested>,
closing: Query<Entity, With<ClosingWindow>>,
) {
// This was inserted by us on the last frame so now we can despawn the window
for window in closing.iter() {
commands.entity(window).despawn();
}
// Mark the window as closing so we can despawn it on the next frame
for event in closed.read() {
// When spamming the window close button on windows (other platforms too probably)
// we may receive a `WindowCloseRequested` for a window we've just despawned in the above
// loop.
commands.entity(event.window).try_insert(ClosingWindow);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_window/src/window.rs | crates/bevy_window/src/window.rs | #[cfg(feature = "std")]
use alloc::format;
use alloc::{borrow::ToOwned, string::String};
use core::num::NonZero;
use bevy_ecs::{
entity::{ContainsEntity, Entity},
prelude::Component,
};
use bevy_math::{CompassOctant, DVec2, IVec2, UVec2, Vec2};
use bevy_platform::sync::LazyLock;
use log::warn;
#[cfg(feature = "bevy_reflect")]
use {
bevy_ecs::prelude::ReflectComponent,
bevy_reflect::{std_traits::ReflectDefault, Reflect},
};
#[cfg(all(feature = "serialize", feature = "bevy_reflect"))]
use bevy_reflect::{ReflectDeserialize, ReflectSerialize};
use crate::VideoMode;
/// Default string used for the window title.
///
/// It will try to use the name of the current exe if possible, otherwise it defaults to "App"
static DEFAULT_WINDOW_TITLE: LazyLock<String> = LazyLock::new(|| {
#[cfg(feature = "std")]
{
std::env::current_exe()
.ok()
.and_then(|current_exe| Some(format!("{}", current_exe.file_stem()?.to_string_lossy())))
.unwrap_or_else(|| "App".to_owned())
}
#[cfg(not(feature = "std"))]
{
"App".to_owned()
}
});
/// Marker [`Component`] for the window considered the primary window.
///
/// Currently this is assumed to only exist on 1 entity at a time.
///
/// [`WindowPlugin`](crate::WindowPlugin) will spawn a [`Window`] entity
/// with this component if [`primary_window`](crate::WindowPlugin::primary_window)
/// is `Some`.
#[derive(Default, Debug, Component, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Component, Debug, Default, PartialEq, Clone)
)]
pub struct PrimaryWindow;
/// Reference to a [`Window`], whether it be a direct link to a specific entity or
/// a more vague defaulting choice.
#[repr(C)]
#[derive(Default, Copy, Clone, Debug)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, Default, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub enum WindowRef {
/// This will be linked to the primary window that is created by default
/// in the [`WindowPlugin`](crate::WindowPlugin::primary_window).
#[default]
Primary,
/// A more direct link to a window entity.
///
/// Use this if you want to reference a secondary/tertiary/... window.
///
/// To create a new window you can spawn an entity with a [`Window`],
/// then you can use that entity here for usage in cameras.
Entity(Entity),
}
impl WindowRef {
/// Normalize the window reference so that it can be compared to other window references.
pub fn normalize(&self, primary_window: Option<Entity>) -> Option<NormalizedWindowRef> {
let entity = match self {
Self::Primary => primary_window,
Self::Entity(entity) => Some(*entity),
};
entity.map(NormalizedWindowRef)
}
}
/// A flattened representation of a window reference for equality/hashing purposes.
///
/// For most purposes you probably want to use the unnormalized version [`WindowRef`].
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Hash, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct NormalizedWindowRef(Entity);
impl ContainsEntity for NormalizedWindowRef {
fn entity(&self) -> Entity {
self.0
}
}
/// The defining [`Component`] for window entities,
/// storing information about how it should appear and behave.
///
/// Each window corresponds to an entity, and is uniquely identified by the value of their [`Entity`].
/// When the [`Window`] component is added to an entity, a new window will be opened.
/// When it is removed or the entity is despawned, the window will close.
///
/// The primary window entity (and the corresponding window) is spawned by default
/// by [`WindowPlugin`](crate::WindowPlugin) and is marked with the [`PrimaryWindow`] component.
///
/// This component is synchronized with `winit` through `bevy_winit`:
/// it will reflect the current state of the window and can be modified to change this state.
///
/// # Example
///
/// Because this component is synchronized with `winit`, it can be used to perform
/// OS-integrated windowing operations. For example, here's a simple system
/// to change the window mode:
///
/// ```
/// # use bevy_ecs::query::With;
/// # use bevy_ecs::system::Query;
/// # use bevy_window::{WindowMode, PrimaryWindow, Window, MonitorSelection, VideoModeSelection};
/// fn change_window_mode(mut windows: Query<&mut Window, With<PrimaryWindow>>) {
/// // Query returns one window typically.
/// for mut window in windows.iter_mut() {
/// window.mode =
/// WindowMode::Fullscreen(MonitorSelection::Current, VideoModeSelection::Current);
/// }
/// }
/// ```
#[derive(Component, Debug, Clone)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Component, Default, Debug, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
#[require(CursorOptions)]
pub struct Window {
/// What presentation mode to give the window.
pub present_mode: PresentMode,
/// Which fullscreen or windowing mode should be used.
pub mode: WindowMode,
/// Where the window should be placed.
pub position: WindowPosition,
/// What resolution the window should have.
pub resolution: WindowResolution,
/// Stores the title of the window.
pub title: String,
/// Stores the application ID (on **`Wayland`**), `WM_CLASS` (on **`X11`**) or window class name (on **`Windows`**) of the window.
///
/// For details about application ID conventions, see the [Desktop Entry Spec](https://specifications.freedesktop.org/desktop-entry/latest/file-naming.html#desktop-file-id).
/// For details about `WM_CLASS`, see the [X11 Manual Pages](https://www.x.org/releases/current/doc/man/man3/XAllocClassHint.3.xhtml).
/// For details about **`Windows`**'s window class names, see [About Window Classes](https://learn.microsoft.com/en-us/windows/win32/winmsg/about-window-classes).
///
/// ## Platform-specific
///
/// - **`Windows`**: Can only be set while building the window, setting the window's window class name.
/// - **`Wayland`**: Can only be set while building the window, setting the window's application ID.
/// - **`X11`**: Can only be set while building the window, setting the window's `WM_CLASS`.
/// - **`macOS`**, **`iOS`**, **`Android`**, and **`Web`**: not applicable.
///
/// Notes: Changing this field during runtime will have no effect for now.
pub name: Option<String>,
/// How the alpha channel of textures should be handled while compositing.
pub composite_alpha_mode: CompositeAlphaMode,
/// The limits of the window's logical size
/// (found in its [`resolution`](WindowResolution)) when resizing.
pub resize_constraints: WindowResizeConstraints,
/// Should the window be resizable?
///
/// Note: This does not stop the program from fullscreening/setting
/// the size programmatically.
pub resizable: bool,
/// Specifies which window control buttons should be enabled.
///
/// ## Platform-specific
///
/// **`iOS`**, **`Android`**, and the **`Web`** do not have window control buttons.
///
/// On some **`Linux`** environments these values have no effect.
pub enabled_buttons: EnabledButtons,
/// Should the window have decorations enabled?
///
/// (Decorations are the minimize, maximize, and close buttons on desktop apps)
///
/// ## Platform-specific
///
/// **`iOS`**, **`Android`**, and the **`Web`** do not have decorations.
pub decorations: bool,
/// Should the window be transparent?
///
/// Defines whether the background of the window should be transparent.
///
/// ## Platform-specific
/// - iOS / Android / Web: Unsupported.
/// - macOS: Not working as expected.
///
/// macOS transparent works with winit out of the box, so this issue might be related to: <https://github.com/gfx-rs/wgpu/issues/687>.
/// You should also set the window `composite_alpha_mode` to `CompositeAlphaMode::PostMultiplied`.
pub transparent: bool,
/// Get/set whether the window is focused.
///
/// It cannot be set unfocused after creation.
///
/// ## Platform-specific
///
/// - iOS / Android / X11 / Wayland: Spawning unfocused is
/// [not supported](https://docs.rs/winit/latest/winit/window/struct.WindowAttributes.html#method.with_active).
/// - iOS / Android / Web / Wayland: Setting focused after creation is
/// [not supported](https://docs.rs/winit/latest/winit/window/struct.Window.html#method.focus_window).
pub focused: bool,
/// Where should the window appear relative to other overlapping window.
///
/// ## Platform-specific
///
/// - iOS / Android / Web / Wayland: Unsupported.
pub window_level: WindowLevel,
/// The "html canvas" element selector.
///
/// If set, this selector will be used to find a matching html canvas element,
/// rather than creating a new one.
/// Uses the [CSS selector format](https://developer.mozilla.org/en-US/docs/Web/API/Document/querySelector).
///
/// This value has no effect on non-web platforms.
pub canvas: Option<String>,
/// Whether or not to fit the canvas element's size to its parent element's size.
///
/// **Warning**: this will not behave as expected for parents that set their size according to the size of their
/// children. This creates a "feedback loop" that will result in the canvas growing on each resize. When using this
/// feature, ensure the parent's size is not affected by its children.
///
/// This value has no effect on non-web platforms.
pub fit_canvas_to_parent: bool,
/// Whether or not to stop events from propagating out of the canvas element
///
/// When `true`, this will prevent common browser hotkeys like F5, F12, Ctrl+R, tab, etc.
/// from performing their default behavior while the bevy app has focus.
///
/// This value has no effect on non-web platforms.
pub prevent_default_event_handling: bool,
/// Stores internal state that isn't directly accessible.
pub internal: InternalWindowState,
/// Should the window use Input Method Editor?
///
/// If enabled, the window will receive [`Ime`](crate::Ime) events instead of
/// `KeyboardInput` from `bevy_input`.
///
/// IME should be enabled during text input, but not when you expect to get the exact key pressed.
///
/// ## Platform-specific
///
/// - iOS / Android / Web: Unsupported.
pub ime_enabled: bool,
/// Sets location of IME candidate box in client area coordinates relative to the top left.
///
/// ## Platform-specific
///
/// - iOS / Android / Web: Unsupported.
pub ime_position: Vec2,
/// Sets a specific theme for the window.
///
/// If `None` is provided, the window will use the system theme.
///
/// ## Platform-specific
///
/// - iOS / Android / Web: Unsupported.
pub window_theme: Option<WindowTheme>,
/// Sets the window's visibility.
///
/// If `false`, this will hide the window completely, it won't appear on the screen or in the task bar.
/// If `true`, this will show the window.
/// Note that this doesn't change its focused or minimized state.
///
/// ## Platform-specific
///
/// - **Android / Wayland / Web:** Unsupported.
pub visible: bool,
/// Sets whether the window should be shown in the taskbar.
///
/// If `true`, the window will not appear in the taskbar.
/// If `false`, the window will appear in the taskbar.
///
/// Note that this will only take effect on window creation.
///
/// ## Platform-specific
///
/// - Only supported on Windows.
pub skip_taskbar: bool,
/// Sets whether the window should draw over its child windows.
///
/// If `true`, the window excludes drawing over areas obscured by child windows.
/// If `false`, the window can draw over child windows.
///
/// ## Platform-specific
///
/// - Only supported on Windows.
pub clip_children: bool,
/// Optional hint given to the rendering API regarding the maximum number of queued frames admissible on the GPU.
///
/// Given values are usually within the 1-3 range. If not provided, this will default to 2.
///
/// See [`wgpu::SurfaceConfiguration::desired_maximum_frame_latency`].
///
/// [`wgpu::SurfaceConfiguration::desired_maximum_frame_latency`]:
/// https://docs.rs/wgpu/latest/wgpu/type.SurfaceConfiguration.html#structfield.desired_maximum_frame_latency
pub desired_maximum_frame_latency: Option<NonZero<u32>>,
/// Sets whether this window recognizes [`PinchGesture`](https://docs.rs/bevy/latest/bevy/input/gestures/struct.PinchGesture.html)
///
/// ## Platform-specific
///
/// - Only used on iOS.
/// - On macOS, they are recognized by default and can't be disabled.
pub recognize_pinch_gesture: bool,
/// Sets whether this window recognizes [`RotationGesture`](https://docs.rs/bevy/latest/bevy/input/gestures/struct.RotationGesture.html)
///
/// ## Platform-specific
///
/// - Only used on iOS.
/// - On macOS, they are recognized by default and can't be disabled.
pub recognize_rotation_gesture: bool,
/// Sets whether this window recognizes [`DoubleTapGesture`](https://docs.rs/bevy/latest/bevy/input/gestures/struct.DoubleTapGesture.html)
///
/// ## Platform-specific
///
/// - Only used on iOS.
/// - On macOS, they are recognized by default and can't be disabled.
pub recognize_doubletap_gesture: bool,
/// Sets whether this window recognizes [`PanGesture`](https://docs.rs/bevy/latest/bevy/input/gestures/struct.PanGesture.html),
/// with a number of fingers between the first value and the last.
///
/// ## Platform-specific
///
/// - Only used on iOS.
pub recognize_pan_gesture: Option<(u8, u8)>,
/// Enables click-and-drag behavior for the entire window, not just the titlebar.
///
/// Corresponds to [`WindowAttributesExtMacOS::with_movable_by_window_background`].
///
/// # Platform-specific
///
/// - Only used on macOS.
///
/// [`WindowAttributesExtMacOS::with_movable_by_window_background`]: https://docs.rs/winit/latest/x86_64-apple-darwin/winit/platform/macos/trait.WindowAttributesExtMacOS.html#tymethod.with_movable_by_window_background
pub movable_by_window_background: bool,
/// Makes the window content appear behind the titlebar.
///
/// Corresponds to [`WindowAttributesExtMacOS::with_fullsize_content_view`].
///
/// For apps which want to render the window buttons on top of the apps
/// itself, this should be enabled along with [`titlebar_transparent`].
///
/// # Platform-specific
///
/// - Only used on macOS.
///
/// [`WindowAttributesExtMacOS::with_fullsize_content_view`]: https://docs.rs/winit/latest/x86_64-apple-darwin/winit/platform/macos/trait.WindowAttributesExtMacOS.html#tymethod.with_fullsize_content_view
/// [`titlebar_transparent`]: Self::titlebar_transparent
pub fullsize_content_view: bool,
/// Toggles drawing the drop shadow behind the window.
///
/// Corresponds to [`WindowAttributesExtMacOS::with_has_shadow`].
///
/// # Platform-specific
///
/// - Only used on macOS.
///
/// [`WindowAttributesExtMacOS::with_has_shadow`]: https://docs.rs/winit/latest/x86_64-apple-darwin/winit/platform/macos/trait.WindowAttributesExtMacOS.html#tymethod.with_has_shadow
pub has_shadow: bool,
/// Toggles drawing the titlebar.
///
/// Corresponds to [`WindowAttributesExtMacOS::with_titlebar_hidden`].
///
/// # Platform-specific
///
/// - Only used on macOS.
///
/// [`WindowAttributesExtMacOS::with_titlebar_hidden`]: https://docs.rs/winit/latest/x86_64-apple-darwin/winit/platform/macos/trait.WindowAttributesExtMacOS.html#tymethod.with_titlebar_hidden
pub titlebar_shown: bool,
/// Makes the titlebar transparent, allowing the app content to appear behind it.
///
/// Corresponds to [`WindowAttributesExtMacOS::with_titlebar_transparent`].
///
/// # Platform-specific
///
/// - Only used on macOS.
///
/// [`WindowAttributesExtMacOS::with_titlebar_transparent`]: https://docs.rs/winit/latest/x86_64-apple-darwin/winit/platform/macos/trait.WindowAttributesExtMacOS.html#tymethod.with_titlebar_transparent
pub titlebar_transparent: bool,
/// Toggles showing the window title.
///
/// Corresponds to [`WindowAttributesExtMacOS::with_title_hidden`].
///
/// # Platform-specific
///
/// - Only used on macOS.
///
/// [`WindowAttributesExtMacOS::with_title_hidden`]: https://docs.rs/winit/latest/x86_64-apple-darwin/winit/platform/macos/trait.WindowAttributesExtMacOS.html#tymethod.with_title_hidden
pub titlebar_show_title: bool,
/// Toggles showing the traffic light window buttons.
///
/// Corresponds to [`WindowAttributesExtMacOS::with_titlebar_buttons_hidden`].
///
/// # Platform-specific
///
/// - Only used on macOS.
///
/// [`WindowAttributesExtMacOS::with_titlebar_buttons_hidden`]: https://docs.rs/winit/latest/x86_64-apple-darwin/winit/platform/macos/trait.WindowAttributesExtMacOS.html#tymethod.with_titlebar_buttons_hidden
pub titlebar_show_buttons: bool,
/// Sets whether the Window prefers the home indicator hidden.
///
/// Corresponds to [`WindowAttributesExtIOS::with_prefers_home_indicator_hidden`].
///
/// # Platform-specific
///
/// - Only used on iOS.
///
/// [`WindowAttributesExtIOS::with_prefers_home_indicator_hidden`]: https://docs.rs/winit/latest/x86_64-apple-darwin/winit/platform/ios/trait.WindowAttributesExtIOS.html#tymethod.with_prefers_home_indicator_hidden
pub prefers_home_indicator_hidden: bool,
/// Sets whether the Window prefers the status bar hidden.
///
/// Corresponds to [`WindowAttributesExtIOS::with_prefers_status_bar_hidden`].
///
/// # Platform-specific
///
/// - Only used on iOS.
///
/// [`WindowAttributesExtIOS::with_prefers_status_bar_hidden`]: https://docs.rs/winit/latest/x86_64-apple-darwin/winit/platform/ios/trait.WindowAttributesExtIOS.html#tymethod.with_prefers_status_bar_hidden
pub prefers_status_bar_hidden: bool,
/// Sets screen edges for which you want your gestures to take precedence
/// over the system gestures.
///
/// Corresponds to [`WindowAttributesExtIOS::with_preferred_screen_edges_deferring_system_gestures`].
///
/// # Platform-specific
///
/// - Only used on iOS.
///
/// [`WindowAttributesExtIOS::with_preferred_screen_edges_deferring_system_gestures`]: https://docs.rs/winit/latest/x86_64-apple-darwin/winit/platform/ios/trait.WindowAttributesExtIOS.html#tymethod.with_preferred_screen_edges_deferring_system_gestures
pub preferred_screen_edges_deferring_system_gestures: ScreenEdge,
}
impl Default for Window {
fn default() -> Self {
Self {
title: DEFAULT_WINDOW_TITLE.to_owned(),
name: None,
present_mode: Default::default(),
mode: Default::default(),
position: Default::default(),
resolution: Default::default(),
internal: Default::default(),
composite_alpha_mode: Default::default(),
resize_constraints: Default::default(),
ime_enabled: Default::default(),
ime_position: Default::default(),
resizable: true,
enabled_buttons: Default::default(),
decorations: true,
transparent: false,
focused: true,
window_level: Default::default(),
fit_canvas_to_parent: false,
prevent_default_event_handling: true,
canvas: None,
window_theme: None,
visible: true,
skip_taskbar: false,
clip_children: true,
desired_maximum_frame_latency: None,
recognize_pinch_gesture: false,
recognize_rotation_gesture: false,
recognize_doubletap_gesture: false,
recognize_pan_gesture: None,
movable_by_window_background: false,
fullsize_content_view: false,
has_shadow: true,
titlebar_shown: true,
titlebar_transparent: false,
titlebar_show_title: true,
titlebar_show_buttons: true,
prefers_home_indicator_hidden: false,
prefers_status_bar_hidden: false,
preferred_screen_edges_deferring_system_gestures: Default::default(),
}
}
}
impl Window {
/// Setting to true will attempt to maximize the window.
///
/// Setting to false will attempt to un-maximize the window.
pub fn set_maximized(&mut self, maximized: bool) {
self.internal.maximize_request = Some(maximized);
}
/// Setting to true will attempt to minimize the window.
///
/// Setting to false will attempt to un-minimize the window.
pub fn set_minimized(&mut self, minimized: bool) {
self.internal.minimize_request = Some(minimized);
}
/// Calling this will attempt to start a drag-move of the window.
///
/// There is no guarantee that this will work unless the left mouse button was
/// pressed immediately before this function was called.
pub fn start_drag_move(&mut self) {
self.internal.drag_move_request = true;
}
/// Calling this will attempt to start a drag-resize of the window.
///
/// There is no guarantee that this will work unless the left mouse button was
/// pressed immediately before this function was called.
pub fn start_drag_resize(&mut self, direction: CompassOctant) {
self.internal.drag_resize_request = Some(direction);
}
/// The window's client area width in logical pixels.
///
/// See [`WindowResolution`] for an explanation about logical/physical sizes.
#[inline]
pub fn width(&self) -> f32 {
self.resolution.width()
}
/// The window's client area height in logical pixels.
///
/// See [`WindowResolution`] for an explanation about logical/physical sizes.
#[inline]
pub fn height(&self) -> f32 {
self.resolution.height()
}
/// The window's client size in logical pixels
///
/// See [`WindowResolution`] for an explanation about logical/physical sizes.
#[inline]
pub fn size(&self) -> Vec2 {
self.resolution.size()
}
/// The window's client area width in physical pixels.
///
/// See [`WindowResolution`] for an explanation about logical/physical sizes.
#[inline]
pub fn physical_width(&self) -> u32 {
self.resolution.physical_width()
}
/// The window's client area height in physical pixels.
///
/// See [`WindowResolution`] for an explanation about logical/physical sizes.
#[inline]
pub fn physical_height(&self) -> u32 {
self.resolution.physical_height()
}
/// The window's client size in physical pixels
///
/// See [`WindowResolution`] for an explanation about logical/physical sizes.
#[inline]
pub fn physical_size(&self) -> UVec2 {
self.resolution.physical_size()
}
/// The window's scale factor.
///
/// Ratio of physical size to logical size, see [`WindowResolution`].
#[inline]
pub fn scale_factor(&self) -> f32 {
self.resolution.scale_factor()
}
/// The cursor position in this window in logical pixels.
///
/// Returns `None` if the cursor is outside the window area.
///
/// See [`WindowResolution`] for an explanation about logical/physical sizes.
#[inline]
pub fn cursor_position(&self) -> Option<Vec2> {
self.physical_cursor_position()
.map(|position| (position.as_dvec2() / self.scale_factor() as f64).as_vec2())
}
/// The cursor position in this window in physical pixels.
///
/// Returns `None` if the cursor is outside the window area.
///
/// See [`WindowResolution`] for an explanation about logical/physical sizes.
#[inline]
pub fn physical_cursor_position(&self) -> Option<Vec2> {
match self.internal.physical_cursor_position {
Some(position) => {
if position.x >= 0.
&& position.y >= 0.
&& position.x < self.physical_width() as f64
&& position.y < self.physical_height() as f64
{
Some(position.as_vec2())
} else {
None
}
}
None => None,
}
}
/// Set the cursor position in this window in logical pixels.
///
/// See [`WindowResolution`] for an explanation about logical/physical sizes.
pub fn set_cursor_position(&mut self, position: Option<Vec2>) {
self.internal.physical_cursor_position =
position.map(|p| p.as_dvec2() * self.scale_factor() as f64);
}
/// Set the cursor position in this window in physical pixels.
///
/// See [`WindowResolution`] for an explanation about logical/physical sizes.
pub fn set_physical_cursor_position(&mut self, position: Option<DVec2>) {
self.internal.physical_cursor_position = position;
}
}
/// The size limits on a [`Window`].
///
/// These values are measured in logical pixels (see [`WindowResolution`]), so the user's
/// scale factor does affect the size limits on the window.
///
/// Please note that if the window is resizable, then when the window is
/// maximized it may have a size outside of these limits. The functionality
/// required to disable maximizing is not yet exposed by winit.
#[derive(Debug, Clone, Copy, PartialEq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Default, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct WindowResizeConstraints {
/// The minimum width the window can have.
pub min_width: f32,
/// The minimum height the window can have.
pub min_height: f32,
/// The maximum width the window can have.
pub max_width: f32,
/// The maximum height the window can have.
pub max_height: f32,
}
impl Default for WindowResizeConstraints {
fn default() -> Self {
Self {
min_width: 180.,
min_height: 120.,
max_width: f32::INFINITY,
max_height: f32::INFINITY,
}
}
}
impl WindowResizeConstraints {
/// Checks if the constraints are valid.
///
/// Will output warnings if it isn't.
#[must_use]
pub fn check_constraints(&self) -> Self {
let &WindowResizeConstraints {
mut min_width,
mut min_height,
mut max_width,
mut max_height,
} = self;
min_width = min_width.max(1.);
min_height = min_height.max(1.);
if max_width < min_width {
warn!(
"The given maximum width {max_width} is smaller than the minimum width {min_width}"
);
max_width = min_width;
}
if max_height < min_height {
warn!(
"The given maximum height {max_height} is smaller than the minimum height {min_height}",
);
max_height = min_height;
}
WindowResizeConstraints {
min_width,
min_height,
max_width,
max_height,
}
}
}
/// Cursor data for a [`Window`].
#[derive(Component, Debug, Clone)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Component, Debug, Default, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct CursorOptions {
/// Whether the cursor is visible or not.
///
/// ## Platform-specific
///
/// - **`Windows`**, **`X11`**, and **`Wayland`**: The cursor is hidden only when inside the window.
/// To stop the cursor from leaving the window, change [`CursorOptions::grab_mode`] to [`CursorGrabMode::Locked`] or [`CursorGrabMode::Confined`]
/// - **`macOS`**: The cursor is hidden only when the window is focused.
/// - **`iOS`** and **`Android`** do not have cursors
pub visible: bool,
/// Whether or not the cursor is locked by or confined within the window.
///
/// ## Platform-specific
///
/// - **`macOS`** doesn't support [`CursorGrabMode::Confined`]
/// - **`X11`** doesn't support [`CursorGrabMode::Locked`]
/// - **`iOS/Android`** don't have cursors.
///
/// Since `macOS` and `X11` don't have full [`CursorGrabMode`] support, we first try to set the grab mode that was asked for. If it doesn't work then use the alternate grab mode.
pub grab_mode: CursorGrabMode,
/// Set whether or not mouse events within *this* window are captured or fall through to the Window below.
///
/// ## Platform-specific
///
/// - iOS / Android / Web / X11: Unsupported.
pub hit_test: bool,
}
impl Default for CursorOptions {
fn default() -> Self {
CursorOptions {
visible: true,
grab_mode: CursorGrabMode::None,
hit_test: true,
}
}
}
/// Defines where a [`Window`] should be placed on the screen.
#[derive(Default, Debug, Clone, Copy, PartialEq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub enum WindowPosition {
/// Position will be set by the window manager.
/// Bevy will delegate this decision to the window manager and no guarantees can be made about where the window will be placed.
///
/// Used at creation but will be changed to [`At`](WindowPosition::At).
#[default]
Automatic,
/// Window will be centered on the selected monitor.
///
/// Note that this does not account for window decorations.
///
/// Used at creation or for update but will be changed to [`At`](WindowPosition::At)
Centered(MonitorSelection),
/// The window's top-left corner should be placed at the specified position (in physical pixels).
///
/// (0,0) represents top-left corner of screen space.
At(IVec2),
}
impl WindowPosition {
/// Creates a new [`WindowPosition`] at a position.
pub fn new(position: IVec2) -> Self {
Self::At(position)
}
/// Set the position to a specific point.
pub fn set(&mut self, position: IVec2) {
*self = WindowPosition::At(position);
}
/// Set the window to a specific monitor.
pub fn center(&mut self, monitor: MonitorSelection) {
*self = WindowPosition::Centered(monitor);
}
}
/// Controls the size of a [`Window`]
///
/// ## Physical, logical and requested sizes
///
/// There are three sizes associated with a window:
/// - the physical size,
/// which represents the actual height and width in physical pixels
/// the window occupies on the monitor,
/// - the logical size,
/// which represents the size that should be used to scale elements
/// inside the window, measured in logical pixels,
/// - the requested size,
/// measured in logical pixels, which is the value submitted
/// to the API when creating the window, or requesting that it be resized.
///
/// ## Scale factor
///
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_window/src/monitor.rs | crates/bevy_window/src/monitor.rs | use alloc::{string::String, vec::Vec};
use bevy_ecs::component::Component;
use bevy_math::{IVec2, UVec2};
#[cfg(feature = "bevy_reflect")]
use {bevy_ecs::prelude::ReflectComponent, bevy_reflect::Reflect};
#[cfg(all(feature = "serialize", feature = "bevy_reflect"))]
use bevy_reflect::{ReflectDeserialize, ReflectSerialize};
/// Represents an available monitor as reported by the user's operating system, which can be used
/// to query information about the display, such as its size, position, and video modes.
///
/// Each monitor corresponds to an entity and can be used to position a monitor using
/// [`MonitorSelection::Entity`](`crate::window::MonitorSelection::Entity`).
///
/// # Warning
///
/// This component is synchronized with `winit` through `bevy_winit`, but is effectively
/// read-only as `winit` does not support changing monitor properties.
#[derive(Component, Debug, Clone)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Component, Debug, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct Monitor {
/// The name of the monitor
pub name: Option<String>,
/// The height of the monitor in physical pixels
pub physical_height: u32,
/// The width of the monitor in physical pixels
pub physical_width: u32,
/// The position of the monitor in physical pixels
pub physical_position: IVec2,
/// The refresh rate of the monitor in millihertz
pub refresh_rate_millihertz: Option<u32>,
/// The scale factor of the monitor
pub scale_factor: f64,
/// The video modes that the monitor supports
pub video_modes: Vec<VideoMode>,
}
/// A marker component for the primary monitor
#[derive(Component, Debug, Clone)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Component, Debug, Clone)
)]
pub struct PrimaryMonitor;
impl Monitor {
/// Returns the physical size of the monitor in pixels
pub fn physical_size(&self) -> UVec2 {
UVec2::new(self.physical_width, self.physical_height)
}
}
/// Represents a video mode that a monitor supports
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub struct VideoMode {
/// The resolution of the video mode
pub physical_size: UVec2,
/// The bit depth of the video mode
pub bit_depth: u16,
/// The refresh rate in millihertz
pub refresh_rate_millihertz: u32,
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_window/src/cursor/system_cursor.rs | crates/bevy_window/src/cursor/system_cursor.rs | // This file contains a portion of the CSS Basic User Interface Module Level 3
// specification. In particular, the names for the cursor from the #cursor
// section and documentation for some of the variants were taken.
//
// The original document is https://www.w3.org/TR/css-ui-3/#cursor.
// Copyright © 2018 W3C® (MIT, ERCIM, Keio, Beihang)
//
// These documents were used under the terms of the following license. This W3C
// license as well as the W3C short notice apply to the `CursorIcon` enum's
// variants and documentation attached to them.
// --------- BEGINNING OF W3C LICENSE
// --------------------------------------------------------------
//
// License
//
// By obtaining and/or copying this work, you (the licensee) agree that you have
// read, understood, and will comply with the following terms and conditions.
//
// Permission to copy, modify, and distribute this work, with or without
// modification, for any purpose and without fee or royalty is hereby granted,
// provided that you include the following on ALL copies of the work or portions
// thereof, including modifications:
//
// - The full text of this NOTICE in a location viewable to users of the
// redistributed or derivative work.
// - Any pre-existing intellectual property disclaimers, notices, or terms and
// conditions. If none exist, the W3C Software and Document Short Notice
// should be included.
// - Notice of any changes or modifications, through a copyright statement on
// the new code or document such as "This software or document includes
// material copied from or derived from [title and URI of the W3C document].
// Copyright © [YEAR] W3C® (MIT, ERCIM, Keio, Beihang)."
//
// Disclaimers
//
// THIS WORK IS PROVIDED "AS IS," AND COPYRIGHT HOLDERS MAKE NO REPRESENTATIONS
// OR WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO, WARRANTIES
// OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF
// THE SOFTWARE OR DOCUMENT WILL NOT INFRINGE ANY THIRD PARTY PATENTS,
// COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS.
//
// COPYRIGHT HOLDERS WILL NOT BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL OR
// CONSEQUENTIAL DAMAGES ARISING OUT OF ANY USE OF THE SOFTWARE OR DOCUMENT.
//
// The name and trademarks of copyright holders may NOT be used in advertising
// or publicity pertaining to the work without specific, written prior
// permission. Title to copyright in this work will at all times remain with
// copyright holders.
//
// --------- END OF W3C LICENSE
// --------------------------------------------------------------------
// --------- BEGINNING OF W3C SHORT NOTICE
// ---------------------------------------------------------
//
// winit: https://github.com/rust-windowing/cursor-icon
//
// Copyright © 2023 World Wide Web Consortium, (Massachusetts Institute of
// Technology, European Research Consortium for Informatics and Mathematics,
// Keio University, Beihang). All Rights Reserved. This work is distributed
// under the W3C® Software License [1] in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE.
//
// [1] http://www.w3.org/Consortium/Legal/copyright-software
//
// --------- END OF W3C SHORT NOTICE
// --------------------------------------------------------------
#[cfg(feature = "bevy_reflect")]
use bevy_reflect::{prelude::ReflectDefault, Reflect};
#[cfg(feature = "serialize")]
use bevy_reflect::{ReflectDeserialize, ReflectSerialize};
/// The icon to display for a window.
///
/// Examples of all of these cursors can be found [here](https://www.w3schools.com/cssref/playit.php?filename=playcss_cursor&preval=crosshair).
/// This `enum` is simply a copy of a similar `enum` found in [`winit`](https://docs.rs/winit/latest/winit/window/enum.CursorIcon.html).
/// `winit`, in turn, is based upon the [CSS3 UI spec](https://www.w3.org/TR/css-ui-3/#cursor).
///
/// See the [`window_settings`] example for usage.
///
/// [`window_settings`]: https://github.com/bevyengine/bevy/blob/latest/examples/window/window_settings.rs
#[derive(Default, Debug, Hash, PartialEq, Eq, Clone, Copy)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, PartialEq, Hash, Default, Clone)
)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
all(feature = "serialize", feature = "bevy_reflect"),
reflect(Serialize, Deserialize)
)]
pub enum SystemCursorIcon {
/// The platform-dependent default cursor. Often rendered as arrow.
#[default]
Default,
/// A context menu is available for the object under the cursor. Often
/// rendered as an arrow with a small menu-like graphic next to it.
ContextMenu,
/// Help is available for the object under the cursor. Often rendered as a
/// question mark or a balloon.
Help,
/// The cursor is a pointer that indicates a link. Often rendered as the
/// backside of a hand with the index finger extended.
Pointer,
/// A progress indicator. The program is performing some processing, but is
/// different from [`SystemCursorIcon::Wait`] in that the user may still interact
/// with the program.
Progress,
/// Indicates that the program is busy and the user should wait. Often
/// rendered as a watch or hourglass.
Wait,
/// Indicates that a cell or set of cells may be selected. Often rendered as
/// a thick plus-sign with a dot in the middle.
Cell,
/// A simple crosshair (e.g., short line segments resembling a "+" sign).
/// Often used to indicate a two dimensional bitmap selection mode.
Crosshair,
/// Indicates text that may be selected. Often rendered as an I-beam.
Text,
/// Indicates vertical-text that may be selected. Often rendered as a
/// horizontal I-beam.
VerticalText,
/// Indicates an alias of/shortcut to something is to be created. Often
/// rendered as an arrow with a small curved arrow next to it.
Alias,
/// Indicates something is to be copied. Often rendered as an arrow with a
/// small plus sign next to it.
Copy,
/// Indicates something is to be moved.
Move,
/// Indicates that the dragged item cannot be dropped at the current cursor
/// location. Often rendered as a hand or pointer with a small circle with a
/// line through it.
NoDrop,
/// Indicates that the requested action will not be carried out. Often
/// rendered as a circle with a line through it.
NotAllowed,
/// Indicates that something can be grabbed (dragged to be moved). Often
/// rendered as the backside of an open hand.
Grab,
/// Indicates that something is being grabbed (dragged to be moved). Often
/// rendered as the backside of a hand with fingers closed mostly out of
/// view.
Grabbing,
/// The east border to be moved.
EResize,
/// The north border to be moved.
NResize,
/// The north-east corner to be moved.
NeResize,
/// The north-west corner to be moved.
NwResize,
/// The south border to be moved.
SResize,
/// The south-east corner to be moved.
SeResize,
/// The south-west corner to be moved.
SwResize,
/// The west border to be moved.
WResize,
/// The east and west borders to be moved.
EwResize,
/// The south and north borders to be moved.
NsResize,
/// The north-east and south-west corners to be moved.
NeswResize,
/// The north-west and south-east corners to be moved.
NwseResize,
/// Indicates that the item/column can be resized horizontally. Often
/// rendered as arrows pointing left and right with a vertical bar
/// separating them.
ColResize,
/// Indicates that the item/row can be resized vertically. Often rendered as
/// arrows pointing up and down with a horizontal bar separating them.
RowResize,
/// Indicates that the something can be scrolled in any direction. Often
/// rendered as arrows pointing up, down, left, and right with a dot in the
/// middle.
AllScroll,
/// Indicates that something can be zoomed in. Often rendered as a
/// magnifying glass with a "+" in the center of the glass.
ZoomIn,
/// Indicates that something can be zoomed in. Often rendered as a
/// magnifying glass with a "-" in the center of the glass.
ZoomOut,
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_window/src/cursor/custom_cursor.rs | crates/bevy_window/src/cursor/custom_cursor.rs | use crate::cursor::CursorIcon;
use alloc::string::String;
use bevy_asset::Handle;
use bevy_image::{Image, TextureAtlas};
use bevy_math::URect;
#[cfg(feature = "bevy_reflect")]
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
/// A custom cursor created from an image.
#[derive(Debug, Clone, Default, PartialEq, Eq, Hash)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, Default, Hash, PartialEq, Clone)
)]
pub struct CustomCursorImage {
/// Handle to the image to use as the cursor. The image must be in 8 bit int
/// or 32 bit float rgba. PNG images work well for this.
pub handle: Handle<Image>,
/// An optional texture atlas used to render the image.
pub texture_atlas: Option<TextureAtlas>,
/// Whether the image should be flipped along its x-axis.
///
/// If true, the cursor's `hotspot` automatically flips along with the
/// image.
pub flip_x: bool,
/// Whether the image should be flipped along its y-axis.
///
/// If true, the cursor's `hotspot` automatically flips along with the
/// image.
pub flip_y: bool,
/// An optional rectangle representing the region of the image to render,
/// instead of rendering the full image. This is an easy one-off alternative
/// to using a [`TextureAtlas`].
///
/// When used with a [`TextureAtlas`], the rect is offset by the atlas's
/// minimal (top-left) corner position.
pub rect: Option<URect>,
/// X and Y coordinates of the hotspot in pixels. The hotspot must be within
/// the image bounds.
///
/// If you are flipping the image using `flip_x` or `flip_y`, you don't need
/// to adjust this field to account for the flip because it is adjusted
/// automatically.
pub hotspot: (u16, u16),
}
/// A custom cursor created from a URL. Note that this currently only works on the web.
#[derive(Debug, Clone, Default, PartialEq, Eq, Hash)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Debug, Default, Hash, PartialEq, Clone)
)]
pub struct CustomCursorUrl {
/// Web URL to an image to use as the cursor. PNGs are preferred. Cursor
/// creation can fail if the image is invalid or not reachable.
pub url: String,
/// X and Y coordinates of the hotspot in pixels. The hotspot must be within
/// the image bounds.
pub hotspot: (u16, u16),
}
/// Custom cursor image data.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Clone, PartialEq, Hash)
)]
pub enum CustomCursor {
/// Use an image as the cursor.
Image(CustomCursorImage),
/// Use a URL to an image as the cursor. Note that this currently only works on the web.
Url(CustomCursorUrl),
}
impl From<CustomCursor> for CursorIcon {
fn from(cursor: CustomCursor) -> Self {
CursorIcon::Custom(cursor)
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_window/src/cursor/mod.rs | crates/bevy_window/src/cursor/mod.rs | //! Components to customize the window cursor.
#[cfg(feature = "custom_cursor")]
mod custom_cursor;
mod system_cursor;
#[cfg(feature = "custom_cursor")]
pub use custom_cursor::*;
pub use system_cursor::*;
use bevy_ecs::component::Component;
#[cfg(feature = "bevy_reflect")]
use bevy_ecs::reflect::ReflectComponent;
#[cfg(feature = "bevy_reflect")]
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
#[cfg(feature = "custom_cursor")]
pub use crate::cursor::{CustomCursor, CustomCursorImage};
/// Insert into a window entity to set the cursor for that window.
#[derive(Component, Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Component, Debug, Default, PartialEq, Clone)
)]
pub enum CursorIcon {
#[cfg(feature = "custom_cursor")]
/// Custom cursor image.
Custom(CustomCursor),
/// System provided cursor icon.
System(SystemCursorIcon),
}
impl Default for CursorIcon {
fn default() -> Self {
CursorIcon::System(Default::default())
}
}
impl From<SystemCursorIcon> for CursorIcon {
fn from(icon: SystemCursorIcon) -> Self {
CursorIcon::System(icon)
}
}
impl CursorIcon {
/// Returns the system cursor icon if this is a system cursor.
pub fn as_system(&self) -> Option<&SystemCursorIcon> {
#[cfg(feature = "custom_cursor")]
{
if let CursorIcon::System(icon) = self {
Some(icon)
} else {
None
}
}
#[cfg(not(feature = "custom_cursor"))]
{
let CursorIcon::System(icon) = self;
Some(icon)
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_a11y/src/lib.rs | crates/bevy_a11y/src/lib.rs | #![forbid(unsafe_code)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc(
html_logo_url = "https://bevy.org/assets/icon.png",
html_favicon_url = "https://bevy.org/assets/icon.png"
)]
#![no_std]
//! Reusable accessibility primitives
//!
//! This crate provides accessibility integration for the engine. It exposes the
//! [`AccessibilityPlugin`]. This plugin integrates `AccessKit`, a Rust crate
//! providing OS-agnostic accessibility primitives, with Bevy's ECS.
//!
//! ## Some notes on utility
//!
//! While this crate defines useful types for accessibility, it does not
//! actually power accessibility features in Bevy.
//!
//! Instead, it helps other interfaces coordinate their approach to
//! accessibility. Binary authors should add the [`AccessibilityPlugin`], while
//! library maintainers may use the [`AccessibilityRequested`] and
//! [`ManageAccessibilityUpdates`] resources.
//!
//! The [`AccessibilityNode`] component is useful in both cases. It helps
//! describe an entity in terms of its accessibility factors through an
//! `AccessKit` "node".
//!
//! Typical UI concepts, like buttons, checkboxes, and textboxes, are easily
//! described by this component, though, technically, it can represent any kind
//! of Bevy [`Entity`].
//!
//! ## This crate no longer re-exports `AccessKit`
//!
//! As of Bevy version 0.15, [the `accesskit` crate][accesskit_crate] is no
//! longer re-exported from this crate.[^accesskit_node_confusion] If you need
//! to use `AccessKit` yourself, you'll have to add it as a separate dependency
//! in your project's `Cargo.toml`.
//!
//! Make sure to use the same version of the `accesskit` crate as Bevy.
//! Otherwise, you may experience errors similar to: "Perhaps two different
//! versions of crate `accesskit` are being used?"
//!
//! [accesskit_crate]: https://crates.io/crates/accesskit
//! [`Entity`]: bevy_ecs::entity::Entity
//!
//! <!--
//! note: multi-line footnotes need to be indented like this!
//!
//! please do not remove the indentation, or the second paragraph will display
//! at the end of the module docs, **before** the footnotes...
//! -->
//!
//! [^accesskit_node_confusion]: Some users were confused about `AccessKit`'s
//! `Node` type, sometimes thinking it was Bevy UI's primary way to define
//! nodes!
//!
//! For this reason, its re-export was removed by default. Users who need
//! its types can instead manually depend on the `accesskit` crate.
#[cfg(feature = "std")]
extern crate std;
extern crate alloc;
use alloc::sync::Arc;
use core::sync::atomic::{AtomicBool, Ordering};
use accesskit::Node;
use bevy_app::Plugin;
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{component::Component, message::Message, resource::Resource, schedule::SystemSet};
#[cfg(feature = "bevy_reflect")]
use {
bevy_ecs::reflect::ReflectResource, bevy_reflect::std_traits::ReflectDefault,
bevy_reflect::Reflect,
};
#[cfg(feature = "serialize")]
use serde::{Deserialize, Serialize};
#[cfg(all(feature = "bevy_reflect", feature = "serialize"))]
use bevy_reflect::{ReflectDeserialize, ReflectSerialize};
/// Wrapper struct for [`accesskit::ActionRequest`].
///
/// This newtype is required to use `ActionRequest` as a Bevy `Event`.
#[derive(Message, Deref, DerefMut)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct ActionRequest(pub accesskit::ActionRequest);
/// Tracks whether an assistive technology has requested accessibility
/// information.
///
/// This type is a [`Resource`] initialized by the
/// [`AccessibilityPlugin`]. It may be useful if a third-party plugin needs to
/// conditionally integrate with `AccessKit`.
///
/// In other words, this resource represents whether accessibility providers
/// are "turned on" or "turned off" across an entire Bevy `App`.
///
/// By default, it is set to `false`, indicating that nothing has requested
/// accessibility information yet.
///
/// [`Resource`]: bevy_ecs::resource::Resource
#[derive(Resource, Default, Clone, Debug, Deref, DerefMut)]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Default, Clone, Resource)
)]
pub struct AccessibilityRequested(Arc<AtomicBool>);
impl AccessibilityRequested {
/// Checks if any assistive technology has requested accessibility
/// information.
///
/// If so, this method returns `true`, indicating that accessibility tree
/// updates should be sent.
pub fn get(&self) -> bool {
self.load(Ordering::SeqCst)
}
/// Sets the app's preference for sending accessibility updates.
///
/// If the `value` argument is `true`, this method requests that the app,
/// including both Bevy and third-party interfaces, provides updates to
/// accessibility information.
///
/// Setting with `false` requests that the entire app stops providing these
/// updates.
pub fn set(&self, value: bool) {
self.store(value, Ordering::SeqCst);
}
}
/// Determines whether Bevy's ECS updates the accessibility tree.
///
/// This [`Resource`] tells Bevy internals whether it should be handling
/// `AccessKit` updates (`true`), or if something else is doing that (`false`).
///
/// It defaults to `true`. So, by default, Bevy is configured to maintain the
/// `AccessKit` tree.
///
/// Set to `false` in cases where an external GUI library is sending
/// accessibility updates instead. When this option is set inconsistently with
/// that requirement, the external library and ECS will generate conflicting
/// updates.
///
/// [`Resource`]: bevy_ecs::resource::Resource
#[derive(Resource, Clone, Debug, Deref, DerefMut)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
#[cfg_attr(
feature = "bevy_reflect",
derive(Reflect),
reflect(Resource, Clone, Default)
)]
#[cfg_attr(
all(feature = "bevy_reflect", feature = "serialize"),
reflect(Serialize, Deserialize)
)]
pub struct ManageAccessibilityUpdates(bool);
impl Default for ManageAccessibilityUpdates {
fn default() -> Self {
Self(true)
}
}
impl ManageAccessibilityUpdates {
/// Returns `true` if Bevy's ECS should update the accessibility tree.
pub fn get(&self) -> bool {
self.0
}
/// Sets whether Bevy's ECS should update the accessibility tree.
pub fn set(&mut self, value: bool) {
self.0 = value;
}
}
/// Represents an entity to `AccessKit` through an [`accesskit::Node`].
///
/// Platform-specific accessibility APIs utilize `AccessKit` nodes in their
/// accessibility frameworks. So, this component acts as a translation between
/// "Bevy entity" and "platform-agnostic accessibility element".
///
/// ## Organization in the `AccessKit` Accessibility Tree
///
/// `AccessKit` allows users to form a "tree of nodes" providing accessibility
/// information. That tree is **not** Bevy's ECS!
///
/// To explain, let's say this component is added to an entity, `E`.
///
/// ### Parent and Child
///
/// If `E` has a parent, `P`, and `P` also has this `AccessibilityNode`
/// component, then `E`'s `AccessKit` node will be a child of `P`'s `AccessKit`
/// node.
///
/// Resulting `AccessKit` tree:
/// - P
/// - E
///
/// In other words, parent-child relationships are maintained, but only if both
/// have this component.
///
/// ### On the Window
///
/// If `E` doesn't have a parent, or if the immediate parent doesn't have an
/// `AccessibilityNode`, its `AccessKit` node will be an immediate child of the
/// primary window.
///
/// Resulting `AccessKit` tree:
/// - Primary window
/// - E
///
/// When there's no `AccessKit`-compatible parent, the child lacks hierarchical
/// information in `AccessKit`. As such, it is placed directly under the
/// primary window on the `AccessKit` tree.
///
/// This behavior may or may not be intended, so please utilize
/// `AccessibilityNode`s with care.
#[derive(Component, Clone, Deref, DerefMut)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct AccessibilityNode(
/// A representation of this component's entity to `AccessKit`.
///
/// Note that, with its parent struct acting as just a newtype, users are
/// intended to directly update this field.
pub Node,
);
impl From<Node> for AccessibilityNode {
/// Converts an [`accesskit::Node`] into the Bevy Engine
/// [`AccessibilityNode`] newtype.
///
/// Doing so allows it to be inserted onto Bevy entities, representing Bevy
/// entities in the `AccessKit` tree.
fn from(node: Node) -> Self {
Self(node)
}
}
/// A system set relating to accessibility.
///
/// Helps run accessibility updates all at once.
#[derive(Debug, Hash, PartialEq, Eq, Clone, SystemSet)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "bevy_reflect", derive(Reflect))]
#[cfg_attr(
all(feature = "bevy_reflect", feature = "serialize"),
reflect(Serialize, Deserialize, Clone)
)]
pub enum AccessibilitySystems {
/// Update the accessibility tree.
Update,
}
/// Plugin managing integration with accessibility APIs.
///
/// Note that it doesn't handle GUI aspects of this integration, instead
/// providing helpful resources for other interfaces to utilize.
///
/// ## Behavior
///
/// This plugin's main role is to initialize the [`AccessibilityRequested`] and
/// [`ManageAccessibilityUpdates`] resources to their default values, meaning:
///
/// - no assistive technologies have requested accessibility information yet,
/// and
/// - Bevy's ECS will manage updates to the accessibility tree.
#[derive(Default)]
pub struct AccessibilityPlugin;
impl Plugin for AccessibilityPlugin {
fn build(&self, app: &mut bevy_app::App) {
app.init_resource::<AccessibilityRequested>()
.init_resource::<ManageAccessibilityUpdates>()
.allow_ambiguous_component::<AccessibilityNode>();
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/macros/src/lib.rs | crates/bevy_gizmos/macros/src/lib.rs | #![cfg_attr(docsrs, feature(doc_cfg))]
//! Derive implementations for `bevy_gizmos`.
use bevy_macro_utils::BevyManifest;
use proc_macro::TokenStream;
use quote::quote;
use syn::{parse_macro_input, parse_quote, DeriveInput, Path};
/// Implements the [`GizmoConfigGroup`] trait for a gizmo config group type.
#[proc_macro_derive(GizmoConfigGroup)]
pub fn derive_gizmo_config_group(input: TokenStream) -> TokenStream {
let mut ast = parse_macro_input!(input as DeriveInput);
BevyManifest::shared(|manifest| {
let bevy_gizmos_path: Path = manifest.get_path("bevy_gizmos");
let bevy_reflect_path: Path = manifest.get_path("bevy_reflect");
ast.generics.make_where_clause().predicates.push(
parse_quote! { Self: #bevy_reflect_path::Reflect + #bevy_reflect_path::TypePath + Default},
);
let struct_name = &ast.ident;
let (impl_generics, type_generics, where_clause) = &ast.generics.split_for_impl();
TokenStream::from(quote! {
impl #impl_generics #bevy_gizmos_path::config::GizmoConfigGroup for #struct_name #type_generics #where_clause {
}
})
})
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/config.rs | crates/bevy_gizmos/src/config.rs | //! A module for the [`GizmoConfig<T>`] [`Resource`].
use bevy_camera::visibility::RenderLayers;
pub use bevy_gizmos_macros::GizmoConfigGroup;
use {crate::GizmoAsset, bevy_asset::Handle, bevy_ecs::component::Component};
use bevy_ecs::{reflect::ReflectResource, resource::Resource};
use bevy_reflect::{std_traits::ReflectDefault, Reflect, TypePath};
use bevy_utils::TypeIdMap;
use core::{
any::TypeId,
hash::Hash,
ops::{Deref, DerefMut},
panic,
};
/// An enum configuring how line joints will be drawn.
#[derive(Debug, Default, Copy, Clone, Reflect, PartialEq, Eq, Hash)]
#[reflect(Default, PartialEq, Hash, Clone)]
pub enum GizmoLineJoint {
/// Does not draw any line joints.
#[default]
None,
/// Extends both lines at the joining point until they meet in a sharp point.
Miter,
/// Draws a round corner with the specified resolution between the two lines.
///
/// The resolution determines the amount of triangles drawn per joint,
/// e.g. `GizmoLineJoint::Round(4)` will draw 4 triangles at each line joint.
Round(u32),
/// Draws a bevel, a straight line in this case, to connect the ends of both lines.
Bevel,
}
/// An enum used to configure the style of gizmo lines, similar to CSS line-style
#[derive(Copy, Clone, Debug, Default, PartialEq, Reflect)]
#[reflect(Default, PartialEq, Hash, Clone)]
#[non_exhaustive]
pub enum GizmoLineStyle {
/// A solid line without any decorators
#[default]
Solid,
/// A dotted line
Dotted,
/// A dashed line with configurable gap and line sizes
Dashed {
/// The length of the gap in `line_width`s
gap_scale: f32,
/// The length of the visible line in `line_width`s
line_scale: f32,
},
}
impl Eq for GizmoLineStyle {}
impl Hash for GizmoLineStyle {
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
match self {
Self::Solid => {
0u64.hash(state);
}
Self::Dotted => 1u64.hash(state),
Self::Dashed {
gap_scale,
line_scale,
} => {
2u64.hash(state);
gap_scale.to_bits().hash(state);
line_scale.to_bits().hash(state);
}
}
}
}
/// A trait used to create gizmo configs groups.
///
/// Here you can store additional configuration for you gizmo group not covered by [`GizmoConfig`]
///
/// Make sure to derive [`Default`] + [`Reflect`] and register in the app using `app.init_gizmo_group::<T>()`
pub trait GizmoConfigGroup: Reflect + TypePath + Default {}
/// The default gizmo config group.
#[derive(Default, Reflect, GizmoConfigGroup)]
#[reflect(Default)]
pub struct DefaultGizmoConfigGroup;
/// Used when the gizmo config group needs to be type-erased.
/// Also used for retained gizmos, which can't have a gizmo config group.
#[derive(Default, Reflect, GizmoConfigGroup, Debug, Clone)]
#[reflect(Default, Clone)]
pub struct ErasedGizmoConfigGroup;
/// A [`Resource`] storing [`GizmoConfig`] and [`GizmoConfigGroup`] structs
///
/// Use `app.init_gizmo_group::<T>()` to register a custom config group.
#[derive(Reflect, Resource, Default)]
#[reflect(Resource, Default)]
pub struct GizmoConfigStore {
// INVARIANT: must map TypeId::of::<T>() to correct type T
#[reflect(ignore)]
store: TypeIdMap<(GizmoConfig, Box<dyn Reflect>)>,
}
impl GizmoConfigStore {
/// Returns [`GizmoConfig`] and [`GizmoConfigGroup`] associated with [`TypeId`] of a [`GizmoConfigGroup`]
pub fn get_config_dyn(&self, config_type_id: &TypeId) -> Option<(&GizmoConfig, &dyn Reflect)> {
let (config, ext) = self.store.get(config_type_id)?;
Some((config, ext.deref()))
}
/// Returns [`GizmoConfig`] and [`GizmoConfigGroup`] associated with [`GizmoConfigGroup`] `T`
pub fn config<T: GizmoConfigGroup>(&self) -> (&GizmoConfig, &T) {
let Some((config, ext)) = self.get_config_dyn(&TypeId::of::<T>()) else {
panic!("Requested config {} does not exist in `GizmoConfigStore`! Did you forget to add it using `app.init_gizmo_group<T>()`?", T::type_path());
};
// hash map invariant guarantees that &dyn Reflect is of correct type T
let ext = ext.as_any().downcast_ref().unwrap();
(config, ext)
}
/// Returns mutable [`GizmoConfig`] and [`GizmoConfigGroup`] associated with [`TypeId`] of a [`GizmoConfigGroup`]
pub fn get_config_mut_dyn(
&mut self,
config_type_id: &TypeId,
) -> Option<(&mut GizmoConfig, &mut dyn Reflect)> {
let (config, ext) = self.store.get_mut(config_type_id)?;
Some((config, ext.deref_mut()))
}
/// Returns mutable [`GizmoConfig`] and [`GizmoConfigGroup`] associated with [`GizmoConfigGroup`] `T`
pub fn config_mut<T: GizmoConfigGroup>(&mut self) -> (&mut GizmoConfig, &mut T) {
let Some((config, ext)) = self.get_config_mut_dyn(&TypeId::of::<T>()) else {
panic!("Requested config {} does not exist in `GizmoConfigStore`! Did you forget to add it using `app.init_gizmo_group<T>()`?", T::type_path());
};
// hash map invariant guarantees that &dyn Reflect is of correct type T
let ext = ext.as_any_mut().downcast_mut().unwrap();
(config, ext)
}
/// Returns an iterator over all [`GizmoConfig`]s.
pub fn iter(&self) -> impl Iterator<Item = (&TypeId, &GizmoConfig, &dyn Reflect)> + '_ {
self.store
.iter()
.map(|(id, (config, ext))| (id, config, ext.deref()))
}
/// Returns an iterator over all [`GizmoConfig`]s, by mutable reference.
pub fn iter_mut(
&mut self,
) -> impl Iterator<Item = (&TypeId, &mut GizmoConfig, &mut dyn Reflect)> + '_ {
self.store
.iter_mut()
.map(|(id, (config, ext))| (id, config, ext.deref_mut()))
}
/// Inserts [`GizmoConfig`] and [`GizmoConfigGroup`] replacing old values
pub fn insert<T: GizmoConfigGroup>(&mut self, config: GizmoConfig, ext_config: T) {
// INVARIANT: hash map must correctly map TypeId::of::<T>() to &dyn Reflect of type T
self.store
.insert(TypeId::of::<T>(), (config, Box::new(ext_config)));
}
pub(crate) fn register<T: GizmoConfigGroup>(&mut self) {
self.insert(GizmoConfig::default(), T::default());
}
}
/// A struct that stores configuration for gizmos.
#[derive(Clone, Reflect, Debug)]
#[reflect(Clone, Default)]
pub struct GizmoConfig {
/// Set to `false` to stop drawing gizmos.
///
/// Defaults to `true`.
pub enabled: bool,
/// Line settings.
pub line: GizmoLineConfig,
/// How closer to the camera than real geometry the gizmos should be.
///
/// In 2D this setting has no effect and is effectively always -1.
///
/// Value between -1 and 1 (inclusive).
/// * 0 means that there is no change to the line position when rendering
/// * 1 means it is furthest away from camera as possible
/// * -1 means that it will always render in front of other things.
///
/// This is typically useful if you are drawing wireframes on top of polygons
/// and your wireframe is z-fighting (flickering on/off) with your main model.
/// You would set this value to a negative number close to 0.
pub depth_bias: f32,
/// Describes which rendering layers gizmos will be rendered to.
///
/// Gizmos will only be rendered to cameras with intersecting layers.
pub render_layers: RenderLayers,
}
impl Default for GizmoConfig {
fn default() -> Self {
Self {
enabled: true,
line: Default::default(),
depth_bias: 0.,
render_layers: Default::default(),
}
}
}
/// A struct that stores configuration for gizmos.
#[derive(Clone, Reflect, Debug)]
#[reflect(Clone, Default)]
pub struct GizmoLineConfig {
/// Line width specified in pixels.
///
/// If `perspective` is `true` then this is the size in pixels at the camera's near plane.
///
/// Defaults to `2.0`.
pub width: f32,
/// Apply perspective to gizmo lines.
///
/// This setting only affects 3D, non-orthographic cameras.
///
/// Defaults to `false`.
pub perspective: bool,
/// Determine the style of gizmo lines.
pub style: GizmoLineStyle,
/// Describe how lines should join.
pub joints: GizmoLineJoint,
}
impl Default for GizmoLineConfig {
fn default() -> Self {
Self {
width: 2.,
perspective: false,
style: GizmoLineStyle::Solid,
joints: GizmoLineJoint::None,
}
}
}
/// Configuration for gizmo meshes.
#[derive(Component)]
pub struct GizmoMeshConfig {
/// Apply perspective to gizmo lines.
///
/// This setting only affects 3D, non-orthographic cameras.
///
/// Defaults to `false`.
pub line_perspective: bool,
/// Determine the style of gizmo lines.
pub line_style: GizmoLineStyle,
/// Describe how lines should join.
pub line_joints: GizmoLineJoint,
/// Describes which rendering layers gizmos will be rendered to.
///
/// Gizmos will only be rendered to cameras with intersecting layers.
pub render_layers: RenderLayers,
/// Handle of the gizmo asset.
pub handle: Handle<GizmoAsset>,
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/light.rs | crates/bevy_gizmos/src/light.rs | //! A module adding debug visualization of [`PointLight`]s, [`SpotLight`]s and [`DirectionalLight`]s.
use core::f32::consts::PI;
use crate::primitives::dim3::GizmoPrimitive3d;
use bevy_app::{Plugin, PostUpdate};
use bevy_color::{
palettes::basic::{BLUE, GREEN, RED},
Color, Oklcha,
};
use bevy_ecs::{
component::Component,
entity::Entity,
query::Without,
reflect::ReflectComponent,
schedule::IntoScheduleConfigs,
system::{Query, Res},
};
use bevy_light::{DirectionalLight, PointLight, SpotLight};
use bevy_math::{
ops,
primitives::{Cone, Sphere},
Isometry3d, Quat, Vec3,
};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_transform::{components::GlobalTransform, TransformSystems};
use crate::{
config::{GizmoConfigGroup, GizmoConfigStore},
gizmos::Gizmos,
AppGizmoBuilder,
};
/// Draws a standard sphere for the radius and an axis sphere for the range.
fn point_light_gizmo(
transform: &GlobalTransform,
point_light: &PointLight,
color: Color,
gizmos: &mut Gizmos<LightGizmoConfigGroup>,
) {
let position = transform.translation();
gizmos
.primitive_3d(&Sphere::new(point_light.radius), position, color)
.resolution(16);
gizmos
.sphere(position, point_light.range, color)
.resolution(32);
}
/// Draws a sphere for the radius, two cones for the inner and outer angles, plus two 3d arcs crossing the
/// farthest point of effect of the spot light along its direction.
fn spot_light_gizmo(
transform: &GlobalTransform,
spot_light: &SpotLight,
color: Color,
gizmos: &mut Gizmos<LightGizmoConfigGroup>,
) {
let (_, rotation, translation) = transform.to_scale_rotation_translation();
gizmos
.primitive_3d(&Sphere::new(spot_light.radius), translation, color)
.resolution(16);
// Offset the tip of the cone to the light position.
for angle in [spot_light.inner_angle, spot_light.outer_angle] {
let height = spot_light.range * ops::cos(angle);
let position = translation + rotation * Vec3::NEG_Z * height / 2.0;
gizmos
.primitive_3d(
&Cone {
radius: spot_light.range * ops::sin(angle),
height,
},
Isometry3d::new(position, rotation * Quat::from_rotation_x(PI / 2.0)),
color,
)
.height_resolution(4)
.base_resolution(32);
}
for arc_rotation in [
Quat::from_rotation_y(PI / 2.0 - spot_light.outer_angle),
Quat::from_euler(
bevy_math::EulerRot::XZY,
0.0,
PI / 2.0,
PI / 2.0 - spot_light.outer_angle,
),
] {
gizmos
.arc_3d(
2.0 * spot_light.outer_angle,
spot_light.range,
Isometry3d::new(translation, rotation * arc_rotation),
color,
)
.resolution(16);
}
}
/// Draws an arrow alongside the directional light direction.
fn directional_light_gizmo(
transform: &GlobalTransform,
color: Color,
gizmos: &mut Gizmos<LightGizmoConfigGroup>,
) {
let (_, rotation, translation) = transform.to_scale_rotation_translation();
gizmos
.arrow(translation, translation + rotation * Vec3::NEG_Z, color)
.with_tip_length(0.3);
}
/// A [`Plugin`] that provides visualization of [`PointLight`]s, [`SpotLight`]s
/// and [`DirectionalLight`]s for debugging.
pub struct LightGizmoPlugin;
impl Plugin for LightGizmoPlugin {
fn build(&self, app: &mut bevy_app::App) {
app.init_gizmo_group::<LightGizmoConfigGroup>().add_systems(
PostUpdate,
(
draw_lights,
draw_all_lights.run_if(|config: Res<GizmoConfigStore>| {
config.config::<LightGizmoConfigGroup>().1.draw_all
}),
)
.after(TransformSystems::Propagate),
);
}
}
/// Configures how a color is attributed to a light gizmo.
#[derive(Debug, Clone, Copy, Default, Reflect)]
#[reflect(Clone, Default)]
pub enum LightGizmoColor {
/// User-specified color.
Manual(Color),
/// Random color derived from the light's [`Entity`].
Varied,
/// Take the color of the represented light.
#[default]
MatchLightColor,
/// Take the color provided by [`LightGizmoConfigGroup`] depending on the light kind.
ByLightType,
}
/// The [`GizmoConfigGroup`] used to configure the visualization of lights.
#[derive(Clone, Reflect, GizmoConfigGroup)]
#[reflect(Clone, Default)]
pub struct LightGizmoConfigGroup {
/// Draw a gizmo for all lights if true.
///
/// Defaults to `false`.
pub draw_all: bool,
/// Default color strategy for all light gizmos.
///
/// Defaults to [`LightGizmoColor::MatchLightColor`].
pub color: LightGizmoColor,
/// [`Color`] to use for drawing a [`PointLight`] gizmo when [`LightGizmoColor::ByLightType`] is used.
///
/// Defaults to [`RED`].
pub point_light_color: Color,
/// [`Color`] to use for drawing a [`SpotLight`] gizmo when [`LightGizmoColor::ByLightType`] is used.
///
/// Defaults to [`GREEN`].
pub spot_light_color: Color,
/// [`Color`] to use for drawing a [`DirectionalLight`] gizmo when [`LightGizmoColor::ByLightType`] is used.
///
/// Defaults to [`BLUE`].
pub directional_light_color: Color,
}
impl Default for LightGizmoConfigGroup {
fn default() -> Self {
Self {
draw_all: false,
color: LightGizmoColor::MatchLightColor,
point_light_color: RED.into(),
spot_light_color: GREEN.into(),
directional_light_color: BLUE.into(),
}
}
}
/// Add this [`Component`] to an entity to draw any of its lights components
/// ([`PointLight`], [`SpotLight`] and [`DirectionalLight`]).
#[derive(Component, Reflect, Default, Debug)]
#[reflect(Component, Default, Debug)]
pub struct ShowLightGizmo {
/// Default color strategy for this light gizmo. if [`None`], use the one provided by [`LightGizmoConfigGroup`].
///
/// Defaults to [`None`].
pub color: Option<LightGizmoColor>,
}
fn draw_lights(
point_query: Query<(Entity, &PointLight, &GlobalTransform, &ShowLightGizmo)>,
spot_query: Query<(Entity, &SpotLight, &GlobalTransform, &ShowLightGizmo)>,
directional_query: Query<(Entity, &DirectionalLight, &GlobalTransform, &ShowLightGizmo)>,
mut gizmos: Gizmos<LightGizmoConfigGroup>,
) {
let color = |entity: Entity, gizmo_color: Option<LightGizmoColor>, light_color, type_color| {
match gizmo_color.unwrap_or(gizmos.config_ext.color) {
LightGizmoColor::Manual(color) => color,
LightGizmoColor::Varied => Oklcha::sequential_dispersed(entity.index_u32()).into(),
LightGizmoColor::MatchLightColor => light_color,
LightGizmoColor::ByLightType => type_color,
}
};
for (entity, light, transform, light_gizmo) in &point_query {
let color = color(
entity,
light_gizmo.color,
light.color,
gizmos.config_ext.point_light_color,
);
point_light_gizmo(transform, light, color, &mut gizmos);
}
for (entity, light, transform, light_gizmo) in &spot_query {
let color = color(
entity,
light_gizmo.color,
light.color,
gizmos.config_ext.spot_light_color,
);
spot_light_gizmo(transform, light, color, &mut gizmos);
}
for (entity, light, transform, light_gizmo) in &directional_query {
let color = color(
entity,
light_gizmo.color,
light.color,
gizmos.config_ext.directional_light_color,
);
directional_light_gizmo(transform, color, &mut gizmos);
}
}
fn draw_all_lights(
point_query: Query<(Entity, &PointLight, &GlobalTransform), Without<ShowLightGizmo>>,
spot_query: Query<(Entity, &SpotLight, &GlobalTransform), Without<ShowLightGizmo>>,
directional_query: Query<
(Entity, &DirectionalLight, &GlobalTransform),
Without<ShowLightGizmo>,
>,
mut gizmos: Gizmos<LightGizmoConfigGroup>,
) {
match gizmos.config_ext.color {
LightGizmoColor::Manual(color) => {
for (_, light, transform) in &point_query {
point_light_gizmo(transform, light, color, &mut gizmos);
}
for (_, light, transform) in &spot_query {
spot_light_gizmo(transform, light, color, &mut gizmos);
}
for (_, _, transform) in &directional_query {
directional_light_gizmo(transform, color, &mut gizmos);
}
}
LightGizmoColor::Varied => {
let color = |entity: Entity| Oklcha::sequential_dispersed(entity.index_u32()).into();
for (entity, light, transform) in &point_query {
point_light_gizmo(transform, light, color(entity), &mut gizmos);
}
for (entity, light, transform) in &spot_query {
spot_light_gizmo(transform, light, color(entity), &mut gizmos);
}
for (entity, _, transform) in &directional_query {
directional_light_gizmo(transform, color(entity), &mut gizmos);
}
}
LightGizmoColor::MatchLightColor => {
for (_, light, transform) in &point_query {
point_light_gizmo(transform, light, light.color, &mut gizmos);
}
for (_, light, transform) in &spot_query {
spot_light_gizmo(transform, light, light.color, &mut gizmos);
}
for (_, light, transform) in &directional_query {
directional_light_gizmo(transform, light.color, &mut gizmos);
}
}
LightGizmoColor::ByLightType => {
for (_, light, transform) in &point_query {
point_light_gizmo(
transform,
light,
gizmos.config_ext.point_light_color,
&mut gizmos,
);
}
for (_, light, transform) in &spot_query {
spot_light_gizmo(
transform,
light,
gizmos.config_ext.spot_light_color,
&mut gizmos,
);
}
for (_, _, transform) in &directional_query {
directional_light_gizmo(
transform,
gizmos.config_ext.directional_light_color,
&mut gizmos,
);
}
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/retained.rs | crates/bevy_gizmos/src/retained.rs | //! This module is for 'retained' alternatives to the 'immediate mode' [`Gizmos`](crate::gizmos::Gizmos) system parameter.
use core::ops::{Deref, DerefMut};
use bevy_asset::Handle;
use bevy_ecs::{component::Component, reflect::ReflectComponent};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_transform::components::Transform;
use crate::{
config::{ErasedGizmoConfigGroup, GizmoLineConfig},
gizmos::GizmoBuffer,
GizmoAsset,
};
impl Deref for GizmoAsset {
type Target = GizmoBuffer<ErasedGizmoConfigGroup, ()>;
fn deref(&self) -> &Self::Target {
&self.buffer
}
}
impl DerefMut for GizmoAsset {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.buffer
}
}
/// A component that draws the gizmos of a [`GizmoAsset`].
///
/// When drawing a greater number of static lines a [`Gizmo`] component can
/// have far better performance than the [`Gizmos`] system parameter,
/// but the system parameter will perform better for smaller lines that update often.
///
/// ## Example
/// ```
/// # use bevy_ecs::prelude::*;
/// # use bevy_gizmos::prelude::*;
/// # use bevy_asset::prelude::*;
/// # use bevy_color::palettes::css::*;
/// # use bevy_utils::default;
/// # use bevy_math::prelude::*;
/// fn system(
/// mut commands: Commands,
/// mut gizmo_assets: ResMut<Assets<GizmoAsset>>,
/// ) {
/// let mut gizmo = GizmoAsset::default();
///
/// gizmo.sphere(Vec3::ZERO, 1., RED);
///
/// commands.spawn(Gizmo {
/// handle: gizmo_assets.add(gizmo),
/// line_config: GizmoLineConfig {
/// width: 4.,
/// ..default()
/// },
/// ..default()
/// });
/// }
/// ```
///
/// [`Gizmos`]: crate::gizmos::Gizmos
#[derive(Component, Clone, Debug, Default, Reflect)]
#[reflect(Component, Clone, Default)]
#[require(Transform)]
pub struct Gizmo {
/// The handle to the gizmo to draw.
pub handle: Handle<GizmoAsset>,
/// The line specific configuration for this gizmo.
pub line_config: GizmoLineConfig,
/// How closer to the camera than real geometry the gizmo should be.
///
/// In 2D this setting has no effect and is effectively always -1.
///
/// Value between -1 and 1 (inclusive).
/// * 0 means that there is no change to the gizmo position when rendering
/// * 1 means it is furthest away from camera as possible
/// * -1 means that it will always render in front of other things.
///
/// This is typically useful if you are drawing wireframes on top of polygons
/// and your wireframe is z-fighting (flickering on/off) with your main model.
/// You would set this value to a negative number close to 0.
pub depth_bias: f32,
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/global.rs | crates/bevy_gizmos/src/global.rs | use std::sync::Mutex;
use bevy_app::{App, Last, Plugin};
use bevy_ecs::schedule::IntoScheduleConfigs;
use crate::{
config::DefaultGizmoConfigGroup,
gizmos::{GizmoBuffer, Gizmos},
GizmoMeshSystems,
};
static GLOBAL_GIZMO: Mutex<GizmoBuffer<DefaultGizmoConfigGroup, ()>> =
Mutex::new(GizmoBuffer::new());
/// Lets you use bevy gizmos outside of systems.
pub struct GlobalGizmosPlugin;
impl Plugin for GlobalGizmosPlugin {
fn build(&self, app: &mut App) {
app.add_systems(Last, flush_global_gizmos.before(GizmoMeshSystems));
}
}
fn flush_global_gizmos(mut gizmos: Gizmos) {
let mut buffer = GizmoBuffer::new();
{
core::mem::swap(&mut buffer, &mut GLOBAL_GIZMO.lock().unwrap());
}
gizmos.strip_positions.extend(buffer.strip_positions);
gizmos.strip_colors.extend(buffer.strip_colors);
gizmos.list_positions.extend(buffer.list_positions);
gizmos.list_colors.extend(buffer.list_colors);
}
/// A global gizmo context for use outside of bevy systems.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::WHITE;
/// fn draw() {
/// gizmo().sphere(Isometry3d::IDENTITY, 0.5, WHITE);
/// }
/// ```
pub fn gizmo() -> impl core::ops::DerefMut<Target = GizmoBuffer<DefaultGizmoConfigGroup, ()>> {
GLOBAL_GIZMO.lock().unwrap()
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/lib.rs | crates/bevy_gizmos/src/lib.rs | #![cfg_attr(docsrs, feature(doc_cfg))]
#![doc(
html_logo_url = "https://bevy.org/assets/icon.png",
html_favicon_url = "https://bevy.org/assets/icon.png"
)]
//! This crate adds an immediate mode drawing api to Bevy for visual debugging.
//!
//! # Example
//! ```
//! # use bevy_gizmos::prelude::*;
//! # use bevy_math::prelude::*;
//! # use bevy_color::palettes::basic::GREEN;
//! fn system(mut gizmos: Gizmos) {
//! gizmos.line(Vec3::ZERO, Vec3::X, GREEN);
//! }
//! # bevy_ecs::system::assert_is_system(system);
//! ```
//!
//! See the documentation on [Gizmos](crate::gizmos::Gizmos) for more examples.
// Required to make proc macros work in bevy itself.
extern crate self as bevy_gizmos;
pub mod aabb;
pub mod arcs;
pub mod arrows;
pub mod circles;
pub mod config;
pub mod cross;
pub mod curves;
pub mod gizmos;
mod global;
pub mod grid;
pub mod primitives;
pub mod retained;
pub mod rounded_box;
#[cfg(feature = "bevy_light")]
pub mod light;
/// The gizmos prelude.
///
/// This includes the most common types in this crate, re-exported for your convenience.
pub mod prelude {
#[doc(hidden)]
pub use crate::aabb::{AabbGizmoConfigGroup, ShowAabbGizmo};
#[doc(hidden)]
pub use crate::{
config::{
DefaultGizmoConfigGroup, GizmoConfig, GizmoConfigGroup, GizmoConfigStore,
GizmoLineConfig, GizmoLineJoint, GizmoLineStyle,
},
gizmos::Gizmos,
global::gizmo,
primitives::{dim2::GizmoPrimitive2d, dim3::GizmoPrimitive3d},
retained::Gizmo,
AppGizmoBuilder, GizmoAsset,
};
#[doc(hidden)]
#[cfg(feature = "bevy_light")]
pub use crate::light::{LightGizmoColor, LightGizmoConfigGroup, ShowLightGizmo};
}
use bevy_app::{App, FixedFirst, FixedLast, Last, Plugin, RunFixedMainLoop};
use bevy_asset::{Asset, AssetApp, Assets, Handle};
use bevy_ecs::{
resource::Resource,
schedule::{IntoScheduleConfigs, SystemSet},
system::{Res, ResMut},
};
use bevy_reflect::TypePath;
use crate::{config::ErasedGizmoConfigGroup, gizmos::GizmoBuffer};
use bevy_time::Fixed;
use bevy_utils::TypeIdMap;
use config::{DefaultGizmoConfigGroup, GizmoConfig, GizmoConfigGroup, GizmoConfigStore};
use core::{any::TypeId, marker::PhantomData, mem};
use gizmos::{GizmoStorage, Swap};
#[cfg(feature = "bevy_light")]
use light::LightGizmoPlugin;
/// A [`Plugin`] that provides an immediate mode drawing api for visual debugging.
#[derive(Default)]
pub struct GizmoPlugin;
impl Plugin for GizmoPlugin {
fn build(&self, app: &mut App) {
app.init_asset::<GizmoAsset>()
.init_resource::<GizmoHandles>()
// We insert the Resource GizmoConfigStore into the world implicitly here if it does not exist.
.init_gizmo_group::<DefaultGizmoConfigGroup>();
app.add_plugins((aabb::AabbGizmoPlugin, global::GlobalGizmosPlugin));
#[cfg(feature = "bevy_light")]
app.add_plugins(LightGizmoPlugin);
}
}
/// A extension trait adding `App::init_gizmo_group` and `App::insert_gizmo_config`.
pub trait AppGizmoBuilder {
/// Registers [`GizmoConfigGroup`] in the app enabling the use of [Gizmos<Config>](crate::gizmos::Gizmos).
///
/// Configurations can be set using the [`GizmoConfigStore`] [`Resource`].
fn init_gizmo_group<Config: GizmoConfigGroup>(&mut self) -> &mut Self;
/// Insert a [`GizmoConfig`] into a specific [`GizmoConfigGroup`].
///
/// This method should be preferred over [`AppGizmoBuilder::init_gizmo_group`] if and only if you need to configure fields upon initialization.
fn insert_gizmo_config<Config: GizmoConfigGroup>(
&mut self,
group: Config,
config: GizmoConfig,
) -> &mut Self;
}
impl AppGizmoBuilder for App {
fn init_gizmo_group<Config: GizmoConfigGroup>(&mut self) -> &mut Self {
if self.world().contains_resource::<GizmoStorage<Config, ()>>() {
return self;
}
self.world_mut()
.get_resource_or_init::<GizmoConfigStore>()
.register::<Config>();
let mut handles = self.world_mut().get_resource_or_init::<GizmoHandles>();
handles.handles.insert(TypeId::of::<Config>(), None);
// These handles are safe to mutate in any order
self.allow_ambiguous_resource::<GizmoHandles>();
self.init_resource::<GizmoStorage<Config, ()>>()
.init_resource::<GizmoStorage<Config, Fixed>>()
.init_resource::<GizmoStorage<Config, Swap<Fixed>>>()
.add_systems(
RunFixedMainLoop,
start_gizmo_context::<Config, Fixed>
.in_set(bevy_app::RunFixedMainLoopSystems::BeforeFixedMainLoop),
)
.add_systems(FixedFirst, clear_gizmo_context::<Config, Fixed>)
.add_systems(FixedLast, collect_requested_gizmos::<Config, Fixed>)
.add_systems(
RunFixedMainLoop,
end_gizmo_context::<Config, Fixed>
.in_set(bevy_app::RunFixedMainLoopSystems::AfterFixedMainLoop),
)
.add_systems(
Last,
(
propagate_gizmos::<Config, Fixed>.before(GizmoMeshSystems),
update_gizmo_meshes::<Config>.in_set(GizmoMeshSystems),
),
);
self
}
fn insert_gizmo_config<Config: GizmoConfigGroup>(
&mut self,
group: Config,
config: GizmoConfig,
) -> &mut Self {
self.init_gizmo_group::<Config>();
self.world_mut()
.get_resource_or_init::<GizmoConfigStore>()
.insert(config, group);
self
}
}
/// Holds handles to the line gizmos for each gizmo configuration group
// As `TypeIdMap` iteration order depends on the order of insertions and deletions, this uses
// `Option<Handle>` to be able to reserve the slot when creating the gizmo configuration group.
// That way iteration order is stable across executions and depends on the order of configuration
// group creation.
#[derive(Resource, Default)]
pub struct GizmoHandles {
handles: TypeIdMap<Option<Handle<GizmoAsset>>>,
}
impl GizmoHandles {
/// The handles to the gizmo assets of each gizmo configuration group.
pub fn handles(&self) -> &TypeIdMap<Option<Handle<GizmoAsset>>> {
&self.handles
}
}
/// Start a new gizmo clearing context.
///
/// Internally this pushes the parent default context into a swap buffer.
/// Gizmo contexts should be handled like a stack, so if you push a new context,
/// you must pop the context before the parent context ends.
pub fn start_gizmo_context<Config, Clear>(
mut swap: ResMut<GizmoStorage<Config, Swap<Clear>>>,
mut default: ResMut<GizmoStorage<Config, ()>>,
) where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
default.swap(&mut *swap);
}
/// End this gizmo clearing context.
///
/// Pop the default gizmos context out of the [`Swap<Clear>`] gizmo storage.
///
/// This must be called before [`GizmoMeshSystems`] in the [`Last`] schedule.
pub fn end_gizmo_context<Config, Clear>(
mut swap: ResMut<GizmoStorage<Config, Swap<Clear>>>,
mut default: ResMut<GizmoStorage<Config, ()>>,
) where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
default.clear();
default.swap(&mut *swap);
}
/// Collect the requested gizmos into a specific clear context.
pub fn collect_requested_gizmos<Config, Clear>(
mut update: ResMut<GizmoStorage<Config, ()>>,
mut context: ResMut<GizmoStorage<Config, Clear>>,
) where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
context.append_storage(&update);
update.clear();
}
/// Clear out the contextual gizmos.
pub fn clear_gizmo_context<Config, Clear>(mut context: ResMut<GizmoStorage<Config, Clear>>)
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
context.clear();
}
/// Propagate the contextual gizmo into the `Update` storage for rendering.
///
/// This should be before [`GizmoMeshSystems`].
pub fn propagate_gizmos<Config, Clear>(
mut update_storage: ResMut<GizmoStorage<Config, ()>>,
contextual_storage: Res<GizmoStorage<Config, Clear>>,
) where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
update_storage.append_storage(&*contextual_storage);
}
/// System set for updating the rendering meshes for drawing gizmos.
#[derive(SystemSet, Clone, Debug, PartialEq, Eq, Hash)]
pub struct GizmoMeshSystems;
/// Prepare gizmos for rendering.
///
/// This also clears the default `GizmoStorage`.
fn update_gizmo_meshes<Config: GizmoConfigGroup>(
mut gizmo_assets: ResMut<Assets<GizmoAsset>>,
mut handles: ResMut<GizmoHandles>,
mut storage: ResMut<GizmoStorage<Config, ()>>,
) {
if storage.list_positions.is_empty() && storage.strip_positions.is_empty() {
handles.handles.insert(TypeId::of::<Config>(), None);
} else if let Some(handle) = handles.handles.get_mut(&TypeId::of::<Config>()) {
if let Some(handle) = handle {
let gizmo = gizmo_assets.get_mut(handle.id()).unwrap();
gizmo.buffer.list_positions = mem::take(&mut storage.list_positions);
gizmo.buffer.list_colors = mem::take(&mut storage.list_colors);
gizmo.buffer.strip_positions = mem::take(&mut storage.strip_positions);
gizmo.buffer.strip_colors = mem::take(&mut storage.strip_colors);
} else {
let gizmo = GizmoAsset {
config_ty: TypeId::of::<Config>(),
buffer: GizmoBuffer {
enabled: true,
list_positions: mem::take(&mut storage.list_positions),
list_colors: mem::take(&mut storage.list_colors),
strip_positions: mem::take(&mut storage.strip_positions),
strip_colors: mem::take(&mut storage.strip_colors),
marker: PhantomData,
},
};
*handle = Some(gizmo_assets.add(gizmo));
}
}
}
/// A collection of gizmos.
///
/// Has the same gizmo drawing API as [`Gizmos`](crate::gizmos::Gizmos).
#[derive(Asset, Debug, Clone, TypePath)]
pub struct GizmoAsset {
/// vertex buffers.
buffer: GizmoBuffer<ErasedGizmoConfigGroup, ()>,
config_ty: TypeId,
}
impl GizmoAsset {
/// A reference to the gizmo's vertex buffer.
pub fn buffer(&self) -> &GizmoBuffer<ErasedGizmoConfigGroup, ()> {
&self.buffer
}
}
impl GizmoAsset {
/// Create a new [`GizmoAsset`].
pub fn new() -> Self {
GizmoAsset {
buffer: GizmoBuffer::default(),
config_ty: TypeId::of::<ErasedGizmoConfigGroup>(),
}
}
/// The type of the gizmo's configuration group.
pub fn config_typeid(&self) -> TypeId {
self.config_ty
}
}
impl Default for GizmoAsset {
fn default() -> Self {
GizmoAsset::new()
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/aabb.rs | crates/bevy_gizmos/src/aabb.rs | //! A module adding debug visualization of [`Aabb`]s.
use bevy_app::{Plugin, PostUpdate};
use bevy_camera::{primitives::Aabb, visibility::ViewVisibility};
use bevy_color::{Color, Oklcha};
use bevy_ecs::{
component::Component,
entity::Entity,
query::Without,
reflect::ReflectComponent,
schedule::IntoScheduleConfigs,
system::{Query, Res},
};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_transform::{components::GlobalTransform, TransformSystems};
use crate::{
config::{GizmoConfigGroup, GizmoConfigStore},
gizmos::Gizmos,
AppGizmoBuilder,
};
/// A [`Plugin`] that provides visualization of [`Aabb`]s for debugging.
pub struct AabbGizmoPlugin;
impl Plugin for AabbGizmoPlugin {
fn build(&self, app: &mut bevy_app::App) {
app.init_gizmo_group::<AabbGizmoConfigGroup>().add_systems(
PostUpdate,
(
draw_aabbs,
draw_all_aabbs.run_if(|config: Res<GizmoConfigStore>| {
config.config::<AabbGizmoConfigGroup>().1.draw_all
}),
)
.after(bevy_camera::visibility::VisibilitySystems::MarkNewlyHiddenEntitiesInvisible)
.after(TransformSystems::Propagate),
);
}
}
/// The [`GizmoConfigGroup`] used for debug visualizations of [`Aabb`] components on entities
#[derive(Clone, Default, Reflect, GizmoConfigGroup)]
#[reflect(Clone, Default)]
pub struct AabbGizmoConfigGroup {
/// Draws all bounding boxes in the scene when set to `true`.
///
/// To draw a specific entity's bounding box, you can add the [`ShowAabbGizmo`] component.
///
/// Defaults to `false`.
pub draw_all: bool,
/// The default color for bounding box gizmos.
///
/// A random color is chosen per box if `None`.
///
/// Defaults to `None`.
pub default_color: Option<Color>,
}
/// Add this [`Component`] to an entity to draw its [`Aabb`] component.
#[derive(Component, Reflect, Default, Debug)]
#[reflect(Component, Default, Debug)]
pub struct ShowAabbGizmo {
/// The color of the box.
///
/// The default color from the [`AabbGizmoConfigGroup`] config is used if `None`,
pub color: Option<Color>,
}
fn draw_aabbs(
query: Query<(
Entity,
&Aabb,
&GlobalTransform,
Option<&ViewVisibility>,
&ShowAabbGizmo,
)>,
mut gizmos: Gizmos<AabbGizmoConfigGroup>,
) {
for (entity, &aabb, &transform, view_visibility, gizmo) in &query {
if !is_visible(view_visibility) {
continue;
}
let color = gizmo
.color
.or(gizmos.config_ext.default_color)
.unwrap_or_else(|| color_from_entity(entity));
gizmos.aabb_3d(aabb, transform, color);
}
}
fn draw_all_aabbs(
query: Query<
(Entity, &Aabb, &GlobalTransform, Option<&ViewVisibility>),
Without<ShowAabbGizmo>,
>,
mut gizmos: Gizmos<AabbGizmoConfigGroup>,
) {
for (entity, &aabb, &transform, view_visibility) in &query {
if !is_visible(view_visibility) {
continue;
}
let color = gizmos
.config_ext
.default_color
.unwrap_or_else(|| color_from_entity(entity));
gizmos.aabb_3d(aabb, transform, color);
}
}
fn is_visible(view_visibility: Option<&ViewVisibility>) -> bool {
view_visibility.is_some_and(|v| v.get())
}
fn color_from_entity(entity: Entity) -> Color {
Oklcha::sequential_dispersed(entity.index_u32()).into()
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/arrows.rs | crates/bevy_gizmos/src/arrows.rs | //! Additional [`GizmoBuffer`] Functions -- Arrows
//!
//! Includes the implementation of [`GizmoBuffer::arrow`] and [`GizmoBuffer::arrow_2d`],
//! and assorted support items.
use crate::{gizmos::GizmoBuffer, prelude::GizmoConfigGroup};
use bevy_color::{
palettes::basic::{BLUE, GREEN, RED},
Color,
};
use bevy_math::{Quat, Vec2, Vec3, Vec3Swizzles};
use bevy_transform::TransformPoint;
/// A builder returned by [`GizmoBuffer::arrow`] and [`GizmoBuffer::arrow_2d`]
pub struct ArrowBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
start: Vec3,
end: Vec3,
color: Color,
double_ended: bool,
tip_length: f32,
}
impl<Config, Clear> ArrowBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Change the length of the tips to be `length`.
/// The default tip length is [length of the arrow]/10.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.arrow(Vec3::ZERO, Vec3::ONE, GREEN)
/// .with_tip_length(3.);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[doc(alias = "arrow_head_length")]
pub fn with_tip_length(mut self, length: f32) -> Self {
self.tip_length = length;
self
}
/// Adds another tip to the arrow, appended in the start point.
/// the default is only one tip at the end point.
pub fn with_double_end(mut self) -> Self {
self.double_ended = true;
self
}
}
impl<Config, Clear> Drop for ArrowBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Draws the arrow, by drawing lines with the stored [`GizmoBuffer`]
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
}
// first, draw the body of the arrow
self.gizmos.line(self.start, self.end, self.color);
// now the hard part is to draw the head in a sensible way
// put us in a coordinate system where the arrow is pointing towards +x and ends at the origin
let pointing_end = (self.end - self.start).normalize();
let rotation_end = Quat::from_rotation_arc(Vec3::X, pointing_end);
let tips = [
Vec3::new(-1., 1., 0.),
Vec3::new(-1., 0., 1.),
Vec3::new(-1., -1., 0.),
Vec3::new(-1., 0., -1.),
];
// - extend the vectors so their length is `tip_length`
// - rotate the world so +x is facing in the same direction as the arrow
// - translate over to the tip of the arrow
let tips_end = tips.map(|v| rotation_end * (v.normalize() * self.tip_length) + self.end);
for v in tips_end {
// then actually draw the tips
self.gizmos.line(self.end, v, self.color);
}
if self.double_ended {
let pointing_start = (self.start - self.end).normalize();
let rotation_start = Quat::from_rotation_arc(Vec3::X, pointing_start);
let tips_start =
tips.map(|v| rotation_start * (v.normalize() * self.tip_length) + self.start);
for v in tips_start {
// draw the start points tips
self.gizmos.line(self.start, v, self.color);
}
}
}
}
impl<Config, Clear> GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Draw an arrow in 3D, from `start` to `end`. Has four tips for convenient viewing from any direction.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.arrow(Vec3::ZERO, Vec3::ONE, GREEN);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
pub fn arrow(
&mut self,
start: Vec3,
end: Vec3,
color: impl Into<Color>,
) -> ArrowBuilder<'_, Config, Clear> {
let length = (end - start).length();
ArrowBuilder {
gizmos: self,
start,
end,
color: color.into(),
double_ended: false,
tip_length: length / 10.,
}
}
/// Draw an arrow in 2D (on the xy plane), from `start` to `end`.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.arrow_2d(Vec2::ZERO, Vec2::X, GREEN);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
pub fn arrow_2d(
&mut self,
start: Vec2,
end: Vec2,
color: impl Into<Color>,
) -> ArrowBuilder<'_, Config, Clear> {
self.arrow(start.extend(0.), end.extend(0.), color)
}
}
impl<Config, Clear> GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Draw a set of axes local to the given transform (`transform`), with length scaled by a factor
/// of `base_length`.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_ecs::prelude::*;
/// # use bevy_transform::components::Transform;
/// # #[derive(Component)]
/// # struct MyComponent;
/// fn draw_axes(
/// mut gizmos: Gizmos,
/// query: Query<&Transform, With<MyComponent>>,
/// ) {
/// for &transform in &query {
/// gizmos.axes(transform, 1.);
/// }
/// }
/// # bevy_ecs::system::assert_is_system(draw_axes);
/// ```
pub fn axes(&mut self, transform: impl TransformPoint, base_length: f32) {
let start = transform.transform_point(Vec3::ZERO);
let end_x = transform.transform_point(base_length * Vec3::X);
let end_y = transform.transform_point(base_length * Vec3::Y);
let end_z = transform.transform_point(base_length * Vec3::Z);
self.arrow(start, end_x, RED);
self.arrow(start, end_y, GREEN);
self.arrow(start, end_z, BLUE);
}
/// Draw a set of axes local to the given transform (`transform`), with length scaled by a factor
/// of `base_length`.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_ecs::prelude::*;
/// # use bevy_transform::components::Transform;
/// # #[derive(Component)]
/// # struct AxesComponent;
/// fn draw_axes_2d(
/// mut gizmos: Gizmos,
/// query: Query<&Transform, With<AxesComponent>>,
/// ) {
/// for &transform in &query {
/// gizmos.axes_2d(transform, 1.);
/// }
/// }
/// # bevy_ecs::system::assert_is_system(draw_axes_2d);
/// ```
pub fn axes_2d(&mut self, transform: impl TransformPoint, base_length: f32) {
let start = transform.transform_point(Vec3::ZERO);
let end_x = transform.transform_point(base_length * Vec3::X);
let end_y = transform.transform_point(base_length * Vec3::Y);
self.arrow_2d(start.xy(), end_x.xy(), RED);
self.arrow_2d(start.xy(), end_y.xy(), GREEN);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/rounded_box.rs | crates/bevy_gizmos/src/rounded_box.rs | //! Additional [`GizmoBuffer`] Functions -- Rounded cuboids and rectangles
//!
//! Includes the implementation of [`GizmoBuffer::rounded_rect`], [`GizmoBuffer::rounded_rect_2d`] and [`GizmoBuffer::rounded_cuboid`].
//! and assorted support items.
use core::f32::consts::FRAC_PI_2;
use crate::{gizmos::GizmoBuffer, prelude::GizmoConfigGroup};
use bevy_color::Color;
use bevy_math::{Isometry2d, Isometry3d, Quat, Vec2, Vec3};
use bevy_transform::components::Transform;
/// A builder returned by [`GizmoBuffer::rounded_rect`] and [`GizmoBuffer::rounded_rect_2d`]
pub struct RoundedRectBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
size: Vec2,
gizmos: &'a mut GizmoBuffer<Config, Clear>,
config: RoundedBoxConfig,
}
/// A builder returned by [`GizmoBuffer::rounded_cuboid`]
pub struct RoundedCuboidBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
size: Vec3,
gizmos: &'a mut GizmoBuffer<Config, Clear>,
config: RoundedBoxConfig,
}
struct RoundedBoxConfig {
isometry: Isometry3d,
color: Color,
corner_radius: f32,
arc_resolution: u32,
}
impl<Config, Clear> RoundedRectBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Change the radius of the corners to be `corner_radius`.
/// The default corner radius is [min axis of size] / 10.0
pub fn corner_radius(mut self, corner_radius: f32) -> Self {
self.config.corner_radius = corner_radius;
self
}
/// Change the resolution of the arcs at the corners of the rectangle.
/// The default value is 8
pub fn arc_resolution(mut self, arc_resolution: u32) -> Self {
self.config.arc_resolution = arc_resolution;
self
}
}
impl<Config, Clear> RoundedCuboidBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Change the radius of the edges to be `edge_radius`.
/// The default edge radius is [min axis of size] / 10.0
pub fn edge_radius(mut self, edge_radius: f32) -> Self {
self.config.corner_radius = edge_radius;
self
}
/// Change the resolution of the arcs at the edges of the cuboid.
/// The default value is 8
pub fn arc_resolution(mut self, arc_resolution: u32) -> Self {
self.config.arc_resolution = arc_resolution;
self
}
}
impl<Config, Clear> Drop for RoundedRectBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
}
let config = &self.config;
// Calculate inner and outer half size and ensure that the edge_radius is <= any half_length
let mut outer_half_size = self.size.abs() / 2.0;
let inner_half_size =
(outer_half_size - Vec2::splat(config.corner_radius.abs())).max(Vec2::ZERO);
let corner_radius = (outer_half_size - inner_half_size).min_element();
let mut inner_half_size = outer_half_size - Vec2::splat(corner_radius);
if config.corner_radius < 0. {
core::mem::swap(&mut outer_half_size, &mut inner_half_size);
}
// Handle cases where the rectangle collapses into simpler shapes
if outer_half_size.x * outer_half_size.y == 0. {
self.gizmos.line(
config.isometry * -outer_half_size.extend(0.),
config.isometry * outer_half_size.extend(0.),
config.color,
);
return;
}
if corner_radius == 0. {
self.gizmos.rect(config.isometry, self.size, config.color);
return;
}
let vertices = [
// top right
Vec3::new(inner_half_size.x, outer_half_size.y, 0.),
Vec3::new(inner_half_size.x, inner_half_size.y, 0.),
Vec3::new(outer_half_size.x, inner_half_size.y, 0.),
// bottom right
Vec3::new(outer_half_size.x, -inner_half_size.y, 0.),
Vec3::new(inner_half_size.x, -inner_half_size.y, 0.),
Vec3::new(inner_half_size.x, -outer_half_size.y, 0.),
// bottom left
Vec3::new(-inner_half_size.x, -outer_half_size.y, 0.),
Vec3::new(-inner_half_size.x, -inner_half_size.y, 0.),
Vec3::new(-outer_half_size.x, -inner_half_size.y, 0.),
// top left
Vec3::new(-outer_half_size.x, inner_half_size.y, 0.),
Vec3::new(-inner_half_size.x, inner_half_size.y, 0.),
Vec3::new(-inner_half_size.x, outer_half_size.y, 0.),
]
.map(|vec3| config.isometry * vec3);
for chunk in vertices.chunks_exact(3) {
self.gizmos
.short_arc_3d_between(chunk[1], chunk[0], chunk[2], config.color)
.resolution(config.arc_resolution);
}
let edges = if config.corner_radius > 0. {
[
(vertices[2], vertices[3]),
(vertices[5], vertices[6]),
(vertices[8], vertices[9]),
(vertices[11], vertices[0]),
]
} else {
[
(vertices[0], vertices[5]),
(vertices[3], vertices[8]),
(vertices[6], vertices[11]),
(vertices[9], vertices[2]),
]
};
for (start, end) in edges {
self.gizmos.line(start, end, config.color);
}
}
}
impl<Config, Clear> Drop for RoundedCuboidBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
}
let config = &self.config;
// Calculate inner and outer half size and ensure that the edge_radius is <= any half_length
let outer_half_size = self.size.abs() / 2.0;
let inner_half_size =
(outer_half_size - Vec3::splat(config.corner_radius.abs())).max(Vec3::ZERO);
let mut edge_radius = (outer_half_size - inner_half_size).min_element();
let inner_half_size = outer_half_size - Vec3::splat(edge_radius);
edge_radius *= config.corner_radius.signum();
// Handle cases where the rounded cuboid collapses into simpler shapes
if edge_radius == 0.0 {
let transform = Transform::from_translation(config.isometry.translation.into())
.with_rotation(config.isometry.rotation)
.with_scale(self.size);
self.gizmos.cube(transform, config.color);
return;
}
let rects = [
(
Vec3::X,
Vec2::new(self.size.z, self.size.y),
Quat::from_rotation_y(FRAC_PI_2),
),
(
Vec3::Y,
Vec2::new(self.size.x, self.size.z),
Quat::from_rotation_x(FRAC_PI_2),
),
(Vec3::Z, Vec2::new(self.size.x, self.size.y), Quat::IDENTITY),
];
for (position, size, rotation) in rects {
let local_position = position * inner_half_size;
self.gizmos
.rounded_rect(
config.isometry * Isometry3d::new(local_position, rotation),
size,
config.color,
)
.arc_resolution(config.arc_resolution)
.corner_radius(edge_radius);
self.gizmos
.rounded_rect(
config.isometry * Isometry3d::new(-local_position, rotation),
size,
config.color,
)
.arc_resolution(config.arc_resolution)
.corner_radius(edge_radius);
}
}
}
impl<Config, Clear> GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Draw a wireframe rectangle with rounded corners in 3D.
///
/// # Arguments
///
/// - `isometry` defines the translation and rotation of the rectangle.
/// - the translation specifies the center of the rectangle
/// - defines orientation of the rectangle, by default we assume the rectangle is contained in
/// a plane parallel to the XY plane.
/// - `size`: defines the size of the rectangle. This refers to the 'outer size', similar to a bounding box.
/// - `color`: color of the rectangle
///
/// # Builder methods
///
/// - The corner radius can be adjusted with the `.corner_radius(...)` method.
/// - The resolution of the arcs at each corner (i.e. the level of detail) can be adjusted with the
/// `.arc_resolution(...)` method.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::css::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.rounded_rect(
/// Isometry3d::IDENTITY,
/// Vec2::ONE,
/// GREEN
/// )
/// .corner_radius(0.25)
/// .arc_resolution(10);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
pub fn rounded_rect(
&mut self,
isometry: impl Into<Isometry3d>,
size: Vec2,
color: impl Into<Color>,
) -> RoundedRectBuilder<'_, Config, Clear> {
let corner_radius = size.min_element() * DEFAULT_CORNER_RADIUS;
RoundedRectBuilder {
gizmos: self,
config: RoundedBoxConfig {
isometry: isometry.into(),
color: color.into(),
corner_radius,
arc_resolution: DEFAULT_ARC_RESOLUTION,
},
size,
}
}
/// Draw a wireframe rectangle with rounded corners in 2D.
///
/// # Arguments
///
/// - `isometry` defines the translation and rotation of the rectangle.
/// - the translation specifies the center of the rectangle
/// - defines orientation of the rectangle, by default we assume the rectangle aligned with all axes.
/// - `size`: defines the size of the rectangle. This refers to the 'outer size', similar to a bounding box.
/// - `color`: color of the rectangle
///
/// # Builder methods
///
/// - The corner radius can be adjusted with the `.corner_radius(...)` method.
/// - The resolution of the arcs at each corner (i.e. the level of detail) can be adjusted with the
/// `.arc_resolution(...)` method.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::css::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.rounded_rect_2d(
/// Isometry2d::IDENTITY,
/// Vec2::ONE,
/// GREEN
/// )
/// .corner_radius(0.25)
/// .arc_resolution(10);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
pub fn rounded_rect_2d(
&mut self,
isometry: impl Into<Isometry2d>,
size: Vec2,
color: impl Into<Color>,
) -> RoundedRectBuilder<'_, Config, Clear> {
let isometry = isometry.into();
let corner_radius = size.min_element() * DEFAULT_CORNER_RADIUS;
RoundedRectBuilder {
gizmos: self,
config: RoundedBoxConfig {
isometry: Isometry3d::new(
isometry.translation.extend(0.0),
Quat::from_rotation_z(isometry.rotation.as_radians()),
),
color: color.into(),
corner_radius,
arc_resolution: DEFAULT_ARC_RESOLUTION,
},
size,
}
}
/// Draw a wireframe cuboid with rounded corners in 3D.
///
/// # Arguments
///
/// - `isometry` defines the translation and rotation of the cuboid.
/// - the translation specifies the center of the cuboid
/// - defines orientation of the cuboid, by default we assume the cuboid aligned with all axes.
/// - `size`: defines the size of the cuboid. This refers to the 'outer size', similar to a bounding box.
/// - `color`: color of the cuboid
///
/// # Builder methods
///
/// - The edge radius can be adjusted with the `.edge_radius(...)` method.
/// - The resolution of the arcs at each edge (i.e. the level of detail) can be adjusted with the
/// `.arc_resolution(...)` method.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::css::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.rounded_cuboid(
/// Isometry3d::IDENTITY,
/// Vec3::ONE,
/// GREEN
/// )
/// .edge_radius(0.25)
/// .arc_resolution(10);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
pub fn rounded_cuboid(
&mut self,
isometry: impl Into<Isometry3d>,
size: Vec3,
color: impl Into<Color>,
) -> RoundedCuboidBuilder<'_, Config, Clear> {
let corner_radius = size.min_element() * DEFAULT_CORNER_RADIUS;
RoundedCuboidBuilder {
gizmos: self,
config: RoundedBoxConfig {
isometry: isometry.into(),
color: color.into(),
corner_radius,
arc_resolution: DEFAULT_ARC_RESOLUTION,
},
size,
}
}
}
const DEFAULT_ARC_RESOLUTION: u32 = 8;
const DEFAULT_CORNER_RADIUS: f32 = 0.1;
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/gizmos.rs | crates/bevy_gizmos/src/gizmos.rs | //! A module for the [`Gizmos`] [`SystemParam`].
use core::{
iter,
marker::PhantomData,
mem,
ops::{Deref, DerefMut},
};
use bevy_color::{Color, LinearRgba};
use bevy_ecs::{
change_detection::Tick,
query::FilteredAccessSet,
resource::Resource,
system::{
Deferred, ReadOnlySystemParam, Res, SystemBuffer, SystemMeta, SystemParam,
SystemParamValidationError,
},
world::{unsafe_world_cell::UnsafeWorldCell, World},
};
use bevy_math::{bounding::Aabb3d, Isometry2d, Isometry3d, Vec2, Vec3};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_transform::TransformPoint;
use bevy_utils::default;
use crate::{
config::{DefaultGizmoConfigGroup, GizmoConfigGroup, GizmoConfigStore},
prelude::GizmoConfig,
};
/// Storage of gizmo primitives.
#[derive(Resource)]
pub struct GizmoStorage<Config, Clear> {
pub(crate) list_positions: Vec<Vec3>,
pub(crate) list_colors: Vec<LinearRgba>,
pub(crate) strip_positions: Vec<Vec3>,
pub(crate) strip_colors: Vec<LinearRgba>,
marker: PhantomData<(Config, Clear)>,
}
impl<Config, Clear> Default for GizmoStorage<Config, Clear> {
fn default() -> Self {
Self {
list_positions: default(),
list_colors: default(),
strip_positions: default(),
strip_colors: default(),
marker: PhantomData,
}
}
}
impl<Config, Clear> GizmoStorage<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Combine the other gizmo storage with this one.
pub fn append_storage<OtherConfig, OtherClear>(
&mut self,
other: &GizmoStorage<OtherConfig, OtherClear>,
) {
self.list_positions.extend(other.list_positions.iter());
self.list_colors.extend(other.list_colors.iter());
self.strip_positions.extend(other.strip_positions.iter());
self.strip_colors.extend(other.strip_colors.iter());
}
pub(crate) fn swap<OtherConfig, OtherClear>(
&mut self,
other: &mut GizmoStorage<OtherConfig, OtherClear>,
) {
mem::swap(&mut self.list_positions, &mut other.list_positions);
mem::swap(&mut self.list_colors, &mut other.list_colors);
mem::swap(&mut self.strip_positions, &mut other.strip_positions);
mem::swap(&mut self.strip_colors, &mut other.strip_colors);
}
/// Clear this gizmo storage of any requested gizmos.
pub fn clear(&mut self) {
self.list_positions.clear();
self.list_colors.clear();
self.strip_positions.clear();
self.strip_colors.clear();
}
}
/// Swap buffer for a specific clearing context.
///
/// This is to stash/store the default/requested gizmos so another context can
/// be substituted for that duration.
pub struct Swap<Clear>(PhantomData<Clear>);
/// A [`SystemParam`] for drawing gizmos.
///
/// They are drawn in immediate mode, which means they will be rendered only for
/// the frames, or ticks when in [`FixedMain`](bevy_app::FixedMain), in which
/// they are spawned.
///
/// A system in [`Main`](bevy_app::Main) will be cleared each rendering
/// frame, while a system in [`FixedMain`](bevy_app::FixedMain) will be
/// cleared each time the [`RunFixedMainLoop`](bevy_app::RunFixedMainLoop)
/// schedule is run.
///
/// Gizmos should be spawned before the [`Last`](bevy_app::Last) schedule
/// to ensure they are drawn.
///
/// To set up your own clearing context (useful for custom scheduling similar
/// to [`FixedMain`](bevy_app::FixedMain)):
///
/// ```
/// use bevy_gizmos::{prelude::*, *, gizmos::GizmoStorage};
/// # use bevy_app::prelude::*;
/// # use bevy_ecs::{schedule::ScheduleLabel, prelude::*};
/// # #[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)]
/// # struct StartOfMyContext;
/// # #[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)]
/// # struct EndOfMyContext;
/// # #[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)]
/// # struct StartOfRun;
/// # #[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)]
/// # struct EndOfRun;
/// # struct MyContext;
/// struct ClearContextSetup;
/// impl Plugin for ClearContextSetup {
/// fn build(&self, app: &mut App) {
/// app.init_resource::<GizmoStorage<DefaultGizmoConfigGroup, MyContext>>()
/// // Make sure this context starts/ends cleanly if inside another context. E.g. it
/// // should start after the parent context starts and end after the parent context ends.
/// .add_systems(StartOfMyContext, start_gizmo_context::<DefaultGizmoConfigGroup, MyContext>)
/// // If not running multiple times, put this with [`start_gizmo_context`].
/// .add_systems(StartOfRun, clear_gizmo_context::<DefaultGizmoConfigGroup, MyContext>)
/// // If not running multiple times, put this with [`end_gizmo_context`].
/// .add_systems(EndOfRun, collect_requested_gizmos::<DefaultGizmoConfigGroup, MyContext>)
/// .add_systems(EndOfMyContext, end_gizmo_context::<DefaultGizmoConfigGroup, MyContext>)
/// .add_systems(
/// Last,
/// propagate_gizmos::<DefaultGizmoConfigGroup, MyContext>.before(GizmoMeshSystems),
/// );
/// }
/// }
/// ```
pub struct Gizmos<'w, 's, Config = DefaultGizmoConfigGroup, Clear = ()>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
buffer: Deferred<'s, GizmoBuffer<Config, Clear>>,
/// The currently used [`GizmoConfig`]
pub config: &'w GizmoConfig,
/// The currently used [`GizmoConfigGroup`]
pub config_ext: &'w Config,
}
impl<'w, 's, Config, Clear> Deref for Gizmos<'w, 's, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Target = GizmoBuffer<Config, Clear>;
fn deref(&self) -> &Self::Target {
&self.buffer
}
}
impl<'w, 's, Config, Clear> DerefMut for Gizmos<'w, 's, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.buffer
}
}
type GizmosState<Config, Clear> = (
Deferred<'static, GizmoBuffer<Config, Clear>>,
Res<'static, GizmoConfigStore>,
);
#[doc(hidden)]
pub struct GizmosFetchState<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
state: <GizmosState<Config, Clear> as SystemParam>::State,
}
#[expect(
unsafe_code,
reason = "We cannot implement SystemParam without using unsafe code."
)]
// SAFETY: All methods are delegated to existing `SystemParam` implementations
unsafe impl<Config, Clear> SystemParam for Gizmos<'_, '_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type State = GizmosFetchState<Config, Clear>;
type Item<'w, 's> = Gizmos<'w, 's, Config, Clear>;
fn init_state(world: &mut World) -> Self::State {
GizmosFetchState {
state: GizmosState::<Config, Clear>::init_state(world),
}
}
fn init_access(
state: &Self::State,
system_meta: &mut SystemMeta,
component_access_set: &mut FilteredAccessSet,
world: &mut World,
) {
GizmosState::<Config, Clear>::init_access(
&state.state,
system_meta,
component_access_set,
world,
);
}
fn apply(state: &mut Self::State, system_meta: &SystemMeta, world: &mut World) {
GizmosState::<Config, Clear>::apply(&mut state.state, system_meta, world);
}
#[inline]
unsafe fn validate_param(
state: &mut Self::State,
system_meta: &SystemMeta,
world: UnsafeWorldCell,
) -> Result<(), SystemParamValidationError> {
// SAFETY: Delegated to existing `SystemParam` implementations.
unsafe {
GizmosState::<Config, Clear>::validate_param(&mut state.state, system_meta, world)
}
}
#[inline]
unsafe fn get_param<'w, 's>(
state: &'s mut Self::State,
system_meta: &SystemMeta,
world: UnsafeWorldCell<'w>,
change_tick: Tick,
) -> Self::Item<'w, 's> {
// SAFETY: Delegated to existing `SystemParam` implementations.
let (mut f0, f1) = unsafe {
GizmosState::<Config, Clear>::get_param(
&mut state.state,
system_meta,
world,
change_tick,
)
};
// Accessing the GizmoConfigStore in every API call reduces performance significantly.
// Implementing SystemParam manually allows us to cache whether the config is currently enabled.
// Having this available allows for cheap early returns when gizmos are disabled.
let (config, config_ext) = f1.into_inner().config::<Config>();
f0.enabled = config.enabled;
Gizmos {
buffer: f0,
config,
config_ext,
}
}
}
#[expect(
unsafe_code,
reason = "We cannot implement ReadOnlySystemParam without using unsafe code."
)]
// Safety: Each field is `ReadOnlySystemParam`, and Gizmos SystemParam does not mutate world
unsafe impl<'w, 's, Config, Clear> ReadOnlySystemParam for Gizmos<'w, 's, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
Deferred<'s, GizmoBuffer<Config, Clear>>: ReadOnlySystemParam,
Res<'w, GizmoConfigStore>: ReadOnlySystemParam,
{
}
/// Buffer for gizmo vertex data.
#[derive(Debug, Clone, Reflect)]
#[reflect(Default)]
pub struct GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
pub(crate) enabled: bool,
/// The positions of line segment endpoints.
pub list_positions: Vec<Vec3>,
/// The colors of line segment endpoints.
pub list_colors: Vec<LinearRgba>,
/// The positions of line strip vertices.
pub strip_positions: Vec<Vec3>,
/// The colors of line strip vertices.
pub strip_colors: Vec<LinearRgba>,
#[reflect(ignore, clone)]
pub(crate) marker: PhantomData<(Config, Clear)>,
}
impl<Config, Clear> Default for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn default() -> Self {
GizmoBuffer::new()
}
}
impl<Config, Clear> GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Constructs an empty `GizmoBuffer`.
pub const fn new() -> Self {
GizmoBuffer {
enabled: true,
list_positions: Vec::new(),
list_colors: Vec::new(),
strip_positions: Vec::new(),
strip_colors: Vec::new(),
marker: PhantomData,
}
}
}
/// Read-only view into [`GizmoBuffer`] data.
pub struct GizmoBufferView<'a> {
/// Vertex positions for line-list topology.
pub list_positions: &'a Vec<Vec3>,
/// Vertex colors for line-list topology.
pub list_colors: &'a Vec<LinearRgba>,
/// Vertex positions for line-strip topology.
pub strip_positions: &'a Vec<Vec3>,
/// Vertex colors for line-strip topology.
pub strip_colors: &'a Vec<LinearRgba>,
}
impl<Config, Clear> SystemBuffer for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn apply(&mut self, _system_meta: &SystemMeta, world: &mut World) {
let mut storage = world.resource_mut::<GizmoStorage<Config, Clear>>();
storage.list_positions.append(&mut self.list_positions);
storage.list_colors.append(&mut self.list_colors);
storage.strip_positions.append(&mut self.strip_positions);
storage.strip_colors.append(&mut self.strip_colors);
}
}
impl<Config, Clear> GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Clear all data.
pub fn clear(&mut self) {
self.list_positions.clear();
self.list_colors.clear();
self.strip_positions.clear();
self.strip_colors.clear();
}
/// Read-only view into the buffers data.
pub fn buffer(&self) -> GizmoBufferView<'_> {
let GizmoBuffer {
list_positions,
list_colors,
strip_positions,
strip_colors,
..
} = self;
GizmoBufferView {
list_positions,
list_colors,
strip_positions,
strip_colors,
}
}
/// Draw a line in 3D from `start` to `end`.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.line(Vec3::ZERO, Vec3::X, GREEN);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn line(&mut self, start: Vec3, end: Vec3, color: impl Into<Color>) {
if !self.enabled {
return;
}
self.extend_list_positions([start, end]);
self.add_list_color(color, 2);
}
/// Draw a line in 3D with a color gradient from `start` to `end`.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::{RED, GREEN};
/// fn system(mut gizmos: Gizmos) {
/// gizmos.line_gradient(Vec3::ZERO, Vec3::X, GREEN, RED);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn line_gradient<C: Into<Color>>(
&mut self,
start: Vec3,
end: Vec3,
start_color: C,
end_color: C,
) {
if !self.enabled {
return;
}
self.extend_list_positions([start, end]);
self.extend_list_colors([start_color, end_color]);
}
/// Draw a line in 3D from `start` to `start + vector`.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.ray(Vec3::Y, Vec3::X, GREEN);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn ray(&mut self, start: Vec3, vector: Vec3, color: impl Into<Color>) {
if !self.enabled {
return;
}
self.line(start, start + vector, color);
}
/// Draw a line in 3D with a color gradient from `start` to `start + vector`.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::{RED, GREEN};
/// fn system(mut gizmos: Gizmos) {
/// gizmos.ray_gradient(Vec3::Y, Vec3::X, GREEN, RED);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn ray_gradient<C: Into<Color>>(
&mut self,
start: Vec3,
vector: Vec3,
start_color: C,
end_color: C,
) {
if !self.enabled {
return;
}
self.line_gradient(start, start + vector, start_color, end_color);
}
/// Draw a line in 3D made of straight segments between the points.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.linestrip([Vec3::ZERO, Vec3::X, Vec3::Y], GREEN);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn linestrip(
&mut self,
positions: impl IntoIterator<Item = Vec3>,
color: impl Into<Color>,
) {
if !self.enabled {
return;
}
self.extend_strip_positions(positions);
let len = self.strip_positions.len();
let linear_color = LinearRgba::from(color.into());
self.strip_colors.resize(len - 1, linear_color);
self.strip_colors.push(LinearRgba::NAN);
}
/// Draw a line in 3D made of straight segments between the points, with the first and last connected.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.lineloop([Vec3::ZERO, Vec3::X, Vec3::Y], GREEN);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn lineloop(&mut self, positions: impl IntoIterator<Item = Vec3>, color: impl Into<Color>) {
if !self.enabled {
return;
}
// Loop back to the start; second is needed to ensure that
// the joint on the first corner is drawn.
let mut positions = positions.into_iter();
let first = positions.next();
let second = positions.next();
self.linestrip(
first
.into_iter()
.chain(second)
.chain(positions)
.chain(first)
.chain(second),
color,
);
}
/// Draw a line in 3D made of straight segments between the points, with a color gradient.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::{BLUE, GREEN, RED};
/// fn system(mut gizmos: Gizmos) {
/// gizmos.linestrip_gradient([
/// (Vec3::ZERO, GREEN),
/// (Vec3::X, RED),
/// (Vec3::Y, BLUE)
/// ]);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn linestrip_gradient<C: Into<Color>>(
&mut self,
points: impl IntoIterator<Item = (Vec3, C)>,
) {
if !self.enabled {
return;
}
let points = points.into_iter();
let GizmoBuffer {
strip_positions,
strip_colors,
..
} = self;
let (min, _) = points.size_hint();
strip_positions.reserve(min);
strip_colors.reserve(min);
for (position, color) in points {
strip_positions.push(position);
strip_colors.push(LinearRgba::from(color.into()));
}
strip_positions.push(Vec3::NAN);
strip_colors.push(LinearRgba::NAN);
}
/// Draw a wireframe rectangle in 3D with the given `isometry` applied.
///
/// If `isometry == Isometry3d::IDENTITY` then
///
/// - the center is at `Vec3::ZERO`
/// - the sizes are aligned with the `Vec3::X` and `Vec3::Y` axes.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.rect(Isometry3d::IDENTITY, Vec2::ONE, GREEN);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn rect(&mut self, isometry: impl Into<Isometry3d>, size: Vec2, color: impl Into<Color>) {
if !self.enabled {
return;
}
let isometry = isometry.into();
let [tl, tr, br, bl] = rect_inner(size).map(|vec2| isometry * vec2.extend(0.));
self.lineloop([tl, tr, br, bl], color);
}
/// Draw a wireframe cube in 3D.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_transform::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.cube(Transform::IDENTITY, GREEN);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn cube(&mut self, transform: impl TransformPoint, color: impl Into<Color>) {
let polymorphic_color: Color = color.into();
if !self.enabled {
return;
}
let rect = rect_inner(Vec2::ONE);
// Front
let [tlf, trf, brf, blf] = rect.map(|vec2| transform.transform_point(vec2.extend(0.5)));
// Back
let [tlb, trb, brb, blb] = rect.map(|vec2| transform.transform_point(vec2.extend(-0.5)));
let strip_positions = [
tlf, trf, brf, blf, tlf, // Front
tlb, trb, brb, blb, tlb, // Back
];
self.linestrip(strip_positions, polymorphic_color);
let list_positions = [
trf, trb, brf, brb, blf, blb, // Front to back
];
self.extend_list_positions(list_positions);
self.add_list_color(polymorphic_color, 6);
}
/// Draw a wireframe aabb in 3D.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_transform::prelude::*;
/// # use bevy_math::{bounding::Aabb3d, Vec3};
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.aabb_3d(Aabb3d::new(Vec3::ZERO, Vec3::ONE), Transform::IDENTITY, GREEN);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn aabb_3d(
&mut self,
aabb: impl Into<Aabb3d>,
transform: impl TransformPoint,
color: impl Into<Color>,
) {
let polymorphic_color: Color = color.into();
if !self.enabled {
return;
}
let aabb = aabb.into();
let [tlf, trf, brf, blf, tlb, trb, brb, blb] = [
Vec3::new(aabb.min.x, aabb.max.y, aabb.max.z),
Vec3::new(aabb.max.x, aabb.max.y, aabb.max.z),
Vec3::new(aabb.max.x, aabb.min.y, aabb.max.z),
Vec3::new(aabb.min.x, aabb.min.y, aabb.max.z),
Vec3::new(aabb.min.x, aabb.max.y, aabb.min.z),
Vec3::new(aabb.max.x, aabb.max.y, aabb.min.z),
Vec3::new(aabb.max.x, aabb.min.y, aabb.min.z),
Vec3::new(aabb.min.x, aabb.min.y, aabb.min.z),
]
.map(|v| transform.transform_point(v));
let strip_positions = [
tlf, trf, brf, blf, tlf, // Front
tlb, trb, brb, blb, tlb, // Back
];
self.linestrip(strip_positions, polymorphic_color);
let list_positions = [
trf, trb, brf, brb, blf, blb, // Front to back
];
self.extend_list_positions(list_positions);
self.add_list_color(polymorphic_color, 6);
}
/// Draw a line in 2D from `start` to `end`.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.line_2d(Vec2::ZERO, Vec2::X, GREEN);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn line_2d(&mut self, start: Vec2, end: Vec2, color: impl Into<Color>) {
if !self.enabled {
return;
}
self.line(start.extend(0.), end.extend(0.), color);
}
/// Draw a line in 2D with a color gradient from `start` to `end`.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::{RED, GREEN};
/// fn system(mut gizmos: Gizmos) {
/// gizmos.line_gradient_2d(Vec2::ZERO, Vec2::X, GREEN, RED);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn line_gradient_2d<C: Into<Color>>(
&mut self,
start: Vec2,
end: Vec2,
start_color: C,
end_color: C,
) {
if !self.enabled {
return;
}
self.line_gradient(start.extend(0.), end.extend(0.), start_color, end_color);
}
/// Draw a line in 2D made of straight segments between the points.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.linestrip_2d([Vec2::ZERO, Vec2::X, Vec2::Y], GREEN);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn linestrip_2d(
&mut self,
positions: impl IntoIterator<Item = Vec2>,
color: impl Into<Color>,
) {
if !self.enabled {
return;
}
self.linestrip(positions.into_iter().map(|vec2| vec2.extend(0.)), color);
}
/// Draw a line in 2D made of straight segments between the points, with the first and last connected.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.lineloop_2d([Vec2::ZERO, Vec2::X, Vec2::Y], GREEN);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn lineloop_2d(
&mut self,
positions: impl IntoIterator<Item = Vec2>,
color: impl Into<Color>,
) {
if !self.enabled {
return;
}
self.lineloop(positions.into_iter().map(|vec2| vec2.extend(0.)), color);
}
/// Draw a line in 2D made of straight segments between the points, with a color gradient.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::{RED, GREEN, BLUE};
/// fn system(mut gizmos: Gizmos) {
/// gizmos.linestrip_gradient_2d([
/// (Vec2::ZERO, GREEN),
/// (Vec2::X, RED),
/// (Vec2::Y, BLUE)
/// ]);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn linestrip_gradient_2d<C: Into<Color>>(
&mut self,
positions: impl IntoIterator<Item = (Vec2, C)>,
) {
if !self.enabled {
return;
}
self.linestrip_gradient(
positions
.into_iter()
.map(|(vec2, color)| (vec2.extend(0.), color)),
);
}
/// Draw a line in 2D from `start` to `start + vector`.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.ray_2d(Vec2::Y, Vec2::X, GREEN);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn ray_2d(&mut self, start: Vec2, vector: Vec2, color: impl Into<Color>) {
if !self.enabled {
return;
}
self.line_2d(start, start + vector, color);
}
/// Draw a line in 2D with a color gradient from `start` to `start + vector`.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::{RED, GREEN};
/// fn system(mut gizmos: Gizmos) {
/// gizmos.line_gradient(Vec3::Y, Vec3::X, GREEN, RED);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn ray_gradient_2d<C: Into<Color>>(
&mut self,
start: Vec2,
vector: Vec2,
start_color: C,
end_color: C,
) {
if !self.enabled {
return;
}
self.line_gradient_2d(start, start + vector, start_color, end_color);
}
/// Draw a wireframe rectangle in 2D with the given `isometry` applied.
///
/// If `isometry == Isometry2d::IDENTITY` then
///
/// - the center is at `Vec2::ZERO`
/// - the sizes are aligned with the `Vec2::X` and `Vec2::Y` axes.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.rect_2d(Isometry2d::IDENTITY, Vec2::ONE, GREEN);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn rect_2d(
&mut self,
isometry: impl Into<Isometry2d>,
size: Vec2,
color: impl Into<Color>,
) {
if !self.enabled {
return;
}
let isometry = isometry.into();
let [tl, tr, br, bl] = rect_inner(size).map(|vec2| isometry * vec2);
self.lineloop_2d([tl, tr, br, bl], color);
}
#[inline]
fn extend_list_positions(&mut self, positions: impl IntoIterator<Item = Vec3>) {
self.list_positions.extend(positions);
}
#[inline]
fn extend_list_colors(&mut self, colors: impl IntoIterator<Item = impl Into<Color>>) {
self.list_colors.extend(
colors
.into_iter()
.map(|color| LinearRgba::from(color.into())),
);
}
#[inline]
fn add_list_color(&mut self, color: impl Into<Color>, count: usize) {
let polymorphic_color: Color = color.into();
let linear_color = LinearRgba::from(polymorphic_color);
self.list_colors.extend(iter::repeat_n(linear_color, count));
}
#[inline]
fn extend_strip_positions(&mut self, positions: impl IntoIterator<Item = Vec3>) {
self.strip_positions.extend(positions);
self.strip_positions.push(Vec3::NAN);
}
}
fn rect_inner(size: Vec2) -> [Vec2; 4] {
let half_size = size / 2.;
let tl = Vec2::new(-half_size.x, half_size.y);
let tr = Vec2::new(half_size.x, half_size.y);
let bl = Vec2::new(-half_size.x, -half_size.y);
let br = Vec2::new(half_size.x, -half_size.y);
[tl, tr, br, bl]
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/arcs.rs | crates/bevy_gizmos/src/arcs.rs | //! Additional [`GizmoBuffer`] Functions -- Arcs
//!
//! Includes the implementation of [`GizmoBuffer::arc_2d`],
//! and assorted support items.
use crate::{circles::DEFAULT_CIRCLE_RESOLUTION, gizmos::GizmoBuffer, prelude::GizmoConfigGroup};
use bevy_color::Color;
use bevy_math::{Isometry2d, Isometry3d, Quat, Rot2, Vec2, Vec3};
use core::f32::consts::{FRAC_PI_2, TAU};
// === 2D ===
impl<Config, Clear> GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Draw an arc, which is a part of the circumference of a circle, in 2D.
///
/// # Arguments
/// - `isometry` defines the translation and rotation of the arc.
/// - the translation specifies the center of the arc
/// - the rotation is counter-clockwise starting from `Vec2::Y`
/// - `arc_angle` sets the length of this arc, in radians.
/// - `radius` controls the distance from `position` to this arc, and thus its curvature.
/// - `color` sets the color to draw the arc.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use std::f32::consts::FRAC_PI_4;
/// # use bevy_color::palettes::basic::{GREEN, RED};
/// fn system(mut gizmos: Gizmos) {
/// gizmos.arc_2d(Isometry2d::IDENTITY, FRAC_PI_4, 1., GREEN);
///
/// // Arcs have 32 line-segments by default.
/// // You may want to increase this for larger arcs.
/// gizmos
/// .arc_2d(Isometry2d::IDENTITY, FRAC_PI_4, 5., RED)
/// .resolution(64);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn arc_2d(
&mut self,
isometry: impl Into<Isometry2d>,
arc_angle: f32,
radius: f32,
color: impl Into<Color>,
) -> Arc2dBuilder<'_, Config, Clear> {
Arc2dBuilder {
gizmos: self,
isometry: isometry.into(),
arc_angle,
radius,
color: color.into(),
resolution: None,
}
}
}
/// A builder returned by [`GizmoBuffer::arc_2d`].
pub struct Arc2dBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
isometry: Isometry2d,
arc_angle: f32,
radius: f32,
color: Color,
resolution: Option<u32>,
}
impl<Config, Clear> Arc2dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Set the number of lines used to approximate the geometry of this arc.
pub fn resolution(mut self, resolution: u32) -> Self {
self.resolution.replace(resolution);
self
}
}
impl<Config, Clear> Drop for Arc2dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
}
let resolution = self
.resolution
.unwrap_or_else(|| resolution_from_angle(self.arc_angle));
let positions =
arc_2d_inner(self.arc_angle, self.radius, resolution).map(|vec2| self.isometry * vec2);
self.gizmos.linestrip_2d(positions, self.color);
}
}
fn arc_2d_inner(arc_angle: f32, radius: f32, resolution: u32) -> impl Iterator<Item = Vec2> {
(0..=resolution)
.map(move |n| arc_angle * n as f32 / resolution as f32)
.map(|angle| angle + FRAC_PI_2)
.map(Vec2::from_angle)
.map(move |vec2| vec2 * radius)
}
// === 3D ===
impl<Config, Clear> GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Draw an arc, which is a part of the circumference of a circle, in 3D. For default values
/// this is drawing a standard arc. A standard arc is defined as
///
/// - an arc with a center at `Vec3::ZERO`
/// - starting at `Vec3::X`
/// - embedded in the XZ plane
/// - rotates counterclockwise
///
/// # Arguments
/// - `angle`: sets how much of a circle circumference is passed, e.g. PI is half a circle. This
/// value should be in the range (-2 * PI..=2 * PI)
/// - `radius`: distance between the arc and its center point
/// - `isometry` defines the translation and rotation of the arc.
/// - the translation specifies the center of the arc
/// - the rotation is counter-clockwise starting from `Vec3::Y`
/// - `color`: color of the arc
///
/// # Builder methods
/// The resolution of the arc (i.e. the level of detail) can be adjusted with the
/// `.resolution(...)` method.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use std::f32::consts::PI;
/// # use bevy_color::palettes::css::ORANGE;
/// fn system(mut gizmos: Gizmos) {
/// // rotation rotates normal to point in the direction of `Vec3::NEG_ONE`
/// let rotation = Quat::from_rotation_arc(Vec3::Y, Vec3::NEG_ONE.normalize());
///
/// gizmos
/// .arc_3d(
/// 270.0_f32.to_radians(),
/// 0.25,
/// Isometry3d::new(Vec3::ONE, rotation),
/// ORANGE
/// )
/// .resolution(100);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn arc_3d(
&mut self,
angle: f32,
radius: f32,
isometry: impl Into<Isometry3d>,
color: impl Into<Color>,
) -> Arc3dBuilder<'_, Config, Clear> {
Arc3dBuilder {
gizmos: self,
start_vertex: Vec3::X,
isometry: isometry.into(),
angle,
radius,
color: color.into(),
resolution: None,
}
}
/// Draws the shortest arc between two points (`from` and `to`) relative to a specified `center` point.
///
/// # Arguments
///
/// - `center`: The center point around which the arc is drawn.
/// - `from`: The starting point of the arc.
/// - `to`: The ending point of the arc.
/// - `color`: color of the arc
///
/// # Builder methods
/// The resolution of the arc (i.e. the level of detail) can be adjusted with the
/// `.resolution(...)` method.
///
/// # Examples
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::css::ORANGE;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.short_arc_3d_between(
/// Vec3::ONE,
/// Vec3::ONE + Vec3::NEG_ONE,
/// Vec3::ZERO,
/// ORANGE
/// )
/// .resolution(100);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
///
/// # Notes
/// - This method assumes that the points `from` and `to` are distinct from `center`. If one of
/// the points is coincident with `center`, nothing is rendered.
/// - The arc is drawn as a portion of a circle with a radius equal to the distance from the
/// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then
/// the results will behave as if this were the case
#[inline]
pub fn short_arc_3d_between(
&mut self,
center: Vec3,
from: Vec3,
to: Vec3,
color: impl Into<Color>,
) -> Arc3dBuilder<'_, Config, Clear> {
self.arc_from_to(center, from, to, color, |x| x)
}
/// Draws the longest arc between two points (`from` and `to`) relative to a specified `center` point.
///
/// # Arguments
/// - `center`: The center point around which the arc is drawn.
/// - `from`: The starting point of the arc.
/// - `to`: The ending point of the arc.
/// - `color`: color of the arc
///
/// # Builder methods
/// The resolution of the arc (i.e. the level of detail) can be adjusted with the
/// `.resolution(...)` method.
///
/// # Examples
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::css::ORANGE;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.long_arc_3d_between(
/// Vec3::ONE,
/// Vec3::ONE + Vec3::NEG_ONE,
/// Vec3::ZERO,
/// ORANGE
/// )
/// .resolution(100);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
///
/// # Notes
/// - This method assumes that the points `from` and `to` are distinct from `center`. If one of
/// the points is coincident with `center`, nothing is rendered.
/// - The arc is drawn as a portion of a circle with a radius equal to the distance from the
/// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then
/// the results will behave as if this were the case.
#[inline]
pub fn long_arc_3d_between(
&mut self,
center: Vec3,
from: Vec3,
to: Vec3,
color: impl Into<Color>,
) -> Arc3dBuilder<'_, Config, Clear> {
self.arc_from_to(center, from, to, color, |angle| {
if angle > 0.0 {
TAU - angle
} else if angle < 0.0 {
-TAU - angle
} else {
0.0
}
})
}
#[inline]
fn arc_from_to(
&mut self,
center: Vec3,
from: Vec3,
to: Vec3,
color: impl Into<Color>,
angle_fn: impl Fn(f32) -> f32,
) -> Arc3dBuilder<'_, Config, Clear> {
// `from` and `to` can be the same here since in either case nothing gets rendered and the
// orientation ambiguity of `up` doesn't matter
let from_axis = (from - center).normalize_or_zero();
let to_axis = (to - center).normalize_or_zero();
let (up, angle) = Quat::from_rotation_arc(from_axis, to_axis).to_axis_angle();
let angle = angle_fn(angle);
let radius = center.distance(from);
let rotation = Quat::from_rotation_arc(Vec3::Y, up);
let start_vertex = rotation.inverse() * from_axis;
Arc3dBuilder {
gizmos: self,
start_vertex,
isometry: Isometry3d::new(center, rotation),
angle,
radius,
color: color.into(),
resolution: None,
}
}
/// Draws the shortest arc between two points (`from` and `to`) relative to a specified `center` point.
///
/// # Arguments
///
/// - `center`: The center point around which the arc is drawn.
/// - `from`: The starting point of the arc.
/// - `to`: The ending point of the arc.
/// - `color`: color of the arc
///
/// # Builder methods
/// The resolution of the arc (i.e. the level of detail) can be adjusted with the
/// `.resolution(...)` method.
///
/// # Examples
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::css::ORANGE;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.short_arc_2d_between(
/// Vec2::ZERO,
/// Vec2::X,
/// Vec2::Y,
/// ORANGE
/// )
/// .resolution(100);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
///
/// # Notes
/// - This method assumes that the points `from` and `to` are distinct from `center`. If one of
/// the points is coincident with `center`, nothing is rendered.
/// - The arc is drawn as a portion of a circle with a radius equal to the distance from the
/// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then
/// the results will behave as if this were the case
#[inline]
pub fn short_arc_2d_between(
&mut self,
center: Vec2,
from: Vec2,
to: Vec2,
color: impl Into<Color>,
) -> Arc2dBuilder<'_, Config, Clear> {
self.arc_2d_from_to(center, from, to, color, core::convert::identity)
}
/// Draws the longest arc between two points (`from` and `to`) relative to a specified `center` point.
///
/// # Arguments
/// - `center`: The center point around which the arc is drawn.
/// - `from`: The starting point of the arc.
/// - `to`: The ending point of the arc.
/// - `color`: color of the arc
///
/// # Builder methods
/// The resolution of the arc (i.e. the level of detail) can be adjusted with the
/// `.resolution(...)` method.
///
/// # Examples
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::css::ORANGE;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.long_arc_2d_between(
/// Vec2::ZERO,
/// Vec2::X,
/// Vec2::Y,
/// ORANGE
/// )
/// .resolution(100);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
///
/// # Notes
/// - This method assumes that the points `from` and `to` are distinct from `center`. If one of
/// the points is coincident with `center`, nothing is rendered.
/// - The arc is drawn as a portion of a circle with a radius equal to the distance from the
/// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then
/// the results will behave as if this were the case.
#[inline]
pub fn long_arc_2d_between(
&mut self,
center: Vec2,
from: Vec2,
to: Vec2,
color: impl Into<Color>,
) -> Arc2dBuilder<'_, Config, Clear> {
self.arc_2d_from_to(center, from, to, color, |angle| angle - TAU)
}
#[inline]
fn arc_2d_from_to(
&mut self,
center: Vec2,
from: Vec2,
to: Vec2,
color: impl Into<Color>,
angle_fn: impl Fn(f32) -> f32,
) -> Arc2dBuilder<'_, Config, Clear> {
// `from` and `to` can be the same here since in either case nothing gets rendered and the
// orientation ambiguity of `up` doesn't matter
let from_axis = (from - center).normalize_or_zero();
let to_axis = (to - center).normalize_or_zero();
let rotation = Vec2::Y.angle_to(from_axis);
let arc_angle_raw = from_axis.angle_to(to_axis);
let arc_angle = angle_fn(arc_angle_raw);
let radius = center.distance(from);
Arc2dBuilder {
gizmos: self,
isometry: Isometry2d::new(center, Rot2::radians(rotation)),
arc_angle,
radius,
color: color.into(),
resolution: None,
}
}
}
/// A builder returned by [`GizmoBuffer::arc_2d`].
pub struct Arc3dBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
// this is the vertex the arc starts on in the XZ plane. For the normal arc_3d method this is
// always starting at Vec3::X. For the short/long arc methods we actually need a way to start
// at the from position and this is where this internal field comes into play. Some implicit
// assumptions:
//
// 1. This is always in the XZ plane
// 2. This is always normalized
//
// DO NOT expose this field to users as it is easy to mess this up
start_vertex: Vec3,
isometry: Isometry3d,
angle: f32,
radius: f32,
color: Color,
resolution: Option<u32>,
}
impl<Config, Clear> Arc3dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Set the number of lines for this arc.
pub fn resolution(mut self, resolution: u32) -> Self {
self.resolution.replace(resolution);
self
}
}
impl<Config, Clear> Drop for Arc3dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
}
let resolution = self
.resolution
.unwrap_or_else(|| resolution_from_angle(self.angle));
let positions = arc_3d_inner(
self.start_vertex,
self.isometry,
self.angle,
self.radius,
resolution,
);
self.gizmos.linestrip(positions, self.color);
}
}
fn arc_3d_inner(
start_vertex: Vec3,
isometry: Isometry3d,
angle: f32,
radius: f32,
resolution: u32,
) -> impl Iterator<Item = Vec3> {
// drawing arcs bigger than TAU degrees or smaller than -TAU degrees makes no sense since
// we won't see the overlap and we would just decrease the level of details since the resolution
// would be larger
let angle = angle.clamp(-TAU, TAU);
(0..=resolution)
.map(move |frac| frac as f32 / resolution as f32)
.map(move |percentage| angle * percentage)
.map(move |frac_angle| Quat::from_axis_angle(Vec3::Y, frac_angle) * start_vertex)
.map(move |vec3| vec3 * radius)
.map(move |vec3| isometry * vec3)
}
// helper function for getting a default value for the resolution parameter
fn resolution_from_angle(angle: f32) -> u32 {
((angle.abs() / TAU) * DEFAULT_CIRCLE_RESOLUTION as f32).ceil() as u32
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/grid.rs | crates/bevy_gizmos/src/grid.rs | //! Additional [`GizmoBuffer`] Functions -- Grids
//!
//! Includes the implementation of [`GizmoBuffer::grid`] and [`GizmoBuffer::grid_2d`].
//! and assorted support items.
use crate::{gizmos::GizmoBuffer, prelude::GizmoConfigGroup};
use bevy_color::Color;
use bevy_math::{ops, Isometry2d, Isometry3d, Quat, UVec2, UVec3, Vec2, Vec3};
/// A builder returned by [`GizmoBuffer::grid_3d`]
pub struct GridBuilder3d<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
isometry: Isometry3d,
spacing: Vec3,
cell_count: UVec3,
skew: Vec3,
outer_edges: [bool; 3],
color: Color,
}
/// A builder returned by [`GizmoBuffer::grid`] and [`GizmoBuffer::grid_2d`]
pub struct GridBuilder2d<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
isometry: Isometry3d,
spacing: Vec2,
cell_count: UVec2,
skew: Vec2,
outer_edges: [bool; 2],
color: Color,
}
impl<Config, Clear> GridBuilder3d<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Skews the grid by `tan(skew)` in the x direction.
/// `skew` is in radians
pub fn skew_x(mut self, skew: f32) -> Self {
self.skew.x = skew;
self
}
/// Skews the grid by `tan(skew)` in the y direction.
/// `skew` is in radians
pub fn skew_y(mut self, skew: f32) -> Self {
self.skew.y = skew;
self
}
/// Skews the grid by `tan(skew)` in the z direction.
/// `skew` is in radians
pub fn skew_z(mut self, skew: f32) -> Self {
self.skew.z = skew;
self
}
/// Skews the grid by `tan(skew)` in the x, y and z directions.
/// `skew` is in radians
pub fn skew(mut self, skew: Vec3) -> Self {
self.skew = skew;
self
}
/// Declare that the outer edges of the grid parallel to the x axis should be drawn.
/// By default, the outer edges will not be drawn.
pub fn outer_edges_x(mut self) -> Self {
self.outer_edges[0] = true;
self
}
/// Declare that the outer edges of the grid parallel to the y axis should be drawn.
/// By default, the outer edges will not be drawn.
pub fn outer_edges_y(mut self) -> Self {
self.outer_edges[1] = true;
self
}
/// Declare that the outer edges of the grid parallel to the z axis should be drawn.
/// By default, the outer edges will not be drawn.
pub fn outer_edges_z(mut self) -> Self {
self.outer_edges[2] = true;
self
}
/// Declare that all outer edges of the grid should be drawn.
/// By default, the outer edges will not be drawn.
pub fn outer_edges(mut self) -> Self {
self.outer_edges.fill(true);
self
}
}
impl<Config, Clear> GridBuilder2d<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Skews the grid by `tan(skew)` in the x direction.
/// `skew` is in radians
pub fn skew_x(mut self, skew: f32) -> Self {
self.skew.x = skew;
self
}
/// Skews the grid by `tan(skew)` in the y direction.
/// `skew` is in radians
pub fn skew_y(mut self, skew: f32) -> Self {
self.skew.y = skew;
self
}
/// Skews the grid by `tan(skew)` in the x and y directions.
/// `skew` is in radians
pub fn skew(mut self, skew: Vec2) -> Self {
self.skew = skew;
self
}
/// Declare that the outer edges of the grid parallel to the x axis should be drawn.
/// By default, the outer edges will not be drawn.
pub fn outer_edges_x(mut self) -> Self {
self.outer_edges[0] = true;
self
}
/// Declare that the outer edges of the grid parallel to the y axis should be drawn.
/// By default, the outer edges will not be drawn.
pub fn outer_edges_y(mut self) -> Self {
self.outer_edges[1] = true;
self
}
/// Declare that all outer edges of the grid should be drawn.
/// By default, the outer edges will not be drawn.
pub fn outer_edges(mut self) -> Self {
self.outer_edges.fill(true);
self
}
}
impl<Config, Clear> Drop for GridBuilder3d<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Draws a grid, by drawing lines with the stored [`GizmoBuffer`]
fn drop(&mut self) {
draw_grid(
self.gizmos,
self.isometry,
self.spacing,
self.cell_count,
self.skew,
self.outer_edges,
self.color,
);
}
}
impl<Config, Clear> Drop for GridBuilder2d<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn drop(&mut self) {
draw_grid(
self.gizmos,
self.isometry,
self.spacing.extend(0.),
self.cell_count.extend(0),
self.skew.extend(0.),
[self.outer_edges[0], self.outer_edges[1], true],
self.color,
);
}
}
impl<Config, Clear> GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Draw a 2D grid in 3D.
///
/// The grid's default orientation aligns with the XY-plane.
///
/// # Arguments
///
/// - `isometry` defines the translation and rotation of the grid.
/// - the translation specifies the center of the grid
/// - defines the orientation of the grid, by default we assume the grid is contained in a
/// plane parallel to the XY plane
/// - `cell_count`: defines the amount of cells in the x and y axes
/// - `spacing`: defines the distance between cells along the x and y axes
/// - `color`: color of the grid
///
/// # Builder methods
///
/// - The skew of the grid can be adjusted using the `.skew(...)`, `.skew_x(...)` or `.skew_y(...)` methods. They behave very similar to their CSS equivalents.
/// - All outer edges can be toggled on or off using `.outer_edges(...)`. Alternatively you can use `.outer_edges_x(...)` or `.outer_edges_y(...)` to toggle the outer edges along an axis.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.grid(
/// Isometry3d::IDENTITY,
/// UVec2::new(10, 10),
/// Vec2::splat(2.),
/// GREEN
/// )
/// .skew_x(0.25)
/// .outer_edges();
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
pub fn grid(
&mut self,
isometry: impl Into<Isometry3d>,
cell_count: UVec2,
spacing: Vec2,
color: impl Into<Color>,
) -> GridBuilder2d<'_, Config, Clear> {
GridBuilder2d {
gizmos: self,
isometry: isometry.into(),
spacing,
cell_count,
skew: Vec2::ZERO,
outer_edges: [false, false],
color: color.into(),
}
}
/// Draw a 3D grid of voxel-like cells.
///
/// # Arguments
///
/// - `isometry` defines the translation and rotation of the grid.
/// - the translation specifies the center of the grid
/// - defines the orientation of the grid, by default we assume the grid is aligned with all axes
/// - `cell_count`: defines the amount of cells in the x, y and z axes
/// - `spacing`: defines the distance between cells along the x, y and z axes
/// - `color`: color of the grid
///
/// # Builder methods
///
/// - The skew of the grid can be adjusted using the `.skew(...)`, `.skew_x(...)`, `.skew_y(...)` or `.skew_z(...)` methods. They behave very similar to their CSS equivalents.
/// - All outer edges can be toggled on or off using `.outer_edges(...)`. Alternatively you can use `.outer_edges_x(...)`, `.outer_edges_y(...)` or `.outer_edges_z(...)` to toggle the outer edges along an axis.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.grid_3d(
/// Isometry3d::IDENTITY,
/// UVec3::new(10, 2, 10),
/// Vec3::splat(2.),
/// GREEN
/// )
/// .skew_x(0.25)
/// .outer_edges();
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
pub fn grid_3d(
&mut self,
isometry: impl Into<Isometry3d>,
cell_count: UVec3,
spacing: Vec3,
color: impl Into<Color>,
) -> GridBuilder3d<'_, Config, Clear> {
GridBuilder3d {
gizmos: self,
isometry: isometry.into(),
spacing,
cell_count,
skew: Vec3::ZERO,
outer_edges: [false, false, false],
color: color.into(),
}
}
/// Draw a grid in 2D.
///
/// # Arguments
///
/// - `isometry` defines the translation and rotation of the grid.
/// - the translation specifies the center of the grid
/// - defines the orientation of the grid, by default we assume the grid is aligned with all axes
/// - `cell_count`: defines the amount of cells in the x and y axes
/// - `spacing`: defines the distance between cells along the x and y axes
/// - `color`: color of the grid
///
/// # Builder methods
///
/// - The skew of the grid can be adjusted using the `.skew(...)`, `.skew_x(...)` or `.skew_y(...)` methods. They behave very similar to their CSS equivalents.
/// - All outer edges can be toggled on or off using `.outer_edges(...)`. Alternatively you can use `.outer_edges_x(...)` or `.outer_edges_y(...)` to toggle the outer edges along an axis.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::GREEN;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.grid_2d(
/// Isometry2d::IDENTITY,
/// UVec2::new(10, 10),
/// Vec2::splat(1.),
/// GREEN
/// )
/// .skew_x(0.25)
/// .outer_edges();
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
pub fn grid_2d(
&mut self,
isometry: impl Into<Isometry2d>,
cell_count: UVec2,
spacing: Vec2,
color: impl Into<Color>,
) -> GridBuilder2d<'_, Config, Clear> {
let isometry = isometry.into();
GridBuilder2d {
gizmos: self,
isometry: Isometry3d::new(
isometry.translation.extend(0.0),
Quat::from_rotation_z(isometry.rotation.as_radians()),
),
spacing,
cell_count,
skew: Vec2::ZERO,
outer_edges: [false, false],
color: color.into(),
}
}
}
fn draw_grid<Config, Clear>(
gizmos: &mut GizmoBuffer<Config, Clear>,
isometry: Isometry3d,
spacing: Vec3,
cell_count: UVec3,
skew: Vec3,
outer_edges: [bool; 3],
color: Color,
) where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
if !gizmos.enabled {
return;
}
#[inline]
fn or_zero(cond: bool, val: Vec3) -> Vec3 {
if cond {
val
} else {
Vec3::ZERO
}
}
// Offset between two adjacent grid cells along the x/y-axis and accounting for skew.
let skew_tan = Vec3::from(skew.to_array().map(ops::tan));
let dx = or_zero(
cell_count.x != 0,
spacing.x * Vec3::new(1., skew_tan.y, skew_tan.z),
);
let dy = or_zero(
cell_count.y != 0,
spacing.y * Vec3::new(skew_tan.x, 1., skew_tan.z),
);
let dz = or_zero(
cell_count.z != 0,
spacing.z * Vec3::new(skew_tan.x, skew_tan.y, 1.),
);
// Bottom-left-front corner of the grid
let cell_count_half = cell_count.as_vec3() * 0.5;
let grid_start = -cell_count_half.x * dx - cell_count_half.y * dy - cell_count_half.z * dz;
#[inline]
fn cell_count_to_line_count(include_outer: bool, cell_count: u32) -> u32 {
if include_outer {
cell_count.saturating_add(1)
} else {
cell_count.saturating_sub(1).max(1)
}
}
let x_line_count = UVec2::new(
cell_count_to_line_count(outer_edges[0], cell_count.y),
cell_count_to_line_count(outer_edges[0], cell_count.z),
);
let y_line_count = UVec2::new(
cell_count_to_line_count(outer_edges[1], cell_count.z),
cell_count_to_line_count(outer_edges[1], cell_count.x),
);
let z_line_count = UVec2::new(
cell_count_to_line_count(outer_edges[2], cell_count.x),
cell_count_to_line_count(outer_edges[2], cell_count.y),
);
let x_start = grid_start + or_zero(!outer_edges[0], dy + dz);
let y_start = grid_start + or_zero(!outer_edges[1], dx + dz);
let z_start = grid_start + or_zero(!outer_edges[2], dx + dy);
fn iter_lines(
delta_a: Vec3,
delta_b: Vec3,
delta_c: Vec3,
line_count: UVec2,
cell_count: u32,
start: Vec3,
) -> impl Iterator<Item = [Vec3; 2]> {
let dline = delta_a * cell_count as f32;
(0..line_count.x).map(|v| v as f32).flat_map(move |b| {
(0..line_count.y).map(|v| v as f32).map(move |c| {
let line_start = start + b * delta_b + c * delta_c;
let line_end = line_start + dline;
[line_start, line_end]
})
})
}
// Lines along the x direction
let x_lines = iter_lines(dx, dy, dz, x_line_count, cell_count.x, x_start);
// Lines along the y direction
let y_lines = iter_lines(dy, dz, dx, y_line_count, cell_count.y, y_start);
// Lines along the z direction
let z_lines = iter_lines(dz, dx, dy, z_line_count, cell_count.z, z_start);
x_lines
.chain(y_lines)
.chain(z_lines)
.map(|vec3s| vec3s.map(|vec3| isometry * vec3))
.for_each(|[start, end]| {
gizmos.line(start, end, color);
});
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/circles.rs | crates/bevy_gizmos/src/circles.rs | //! Additional [`GizmoBuffer`] Functions -- Circles
//!
//! Includes the implementation of [`GizmoBuffer::circle`] and [`GizmoBuffer::circle_2d`],
//! and assorted support items.
use crate::{gizmos::GizmoBuffer, prelude::GizmoConfigGroup};
use bevy_color::Color;
use bevy_math::{ops, Isometry2d, Isometry3d, Quat, Vec2, Vec3};
use core::f32::consts::TAU;
pub(crate) const DEFAULT_CIRCLE_RESOLUTION: u32 = 32;
fn ellipse_inner(half_size: Vec2, resolution: u32) -> impl Iterator<Item = Vec2> {
(0..resolution + 1).map(move |i| {
let angle = i as f32 * TAU / resolution as f32;
let (x, y) = ops::sin_cos(angle);
Vec2::new(x, y) * half_size
})
}
impl<Config, Clear> GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Draw an ellipse in 3D with the given `isometry` applied.
///
/// If `isometry == Isometry3d::IDENTITY` then
///
/// - the center is at `Vec3::ZERO`
/// - the `half_sizes` are aligned with the `Vec3::X` and `Vec3::Y` axes.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::{RED, GREEN};
/// fn system(mut gizmos: Gizmos) {
/// gizmos.ellipse(Isometry3d::IDENTITY, Vec2::new(1., 2.), GREEN);
///
/// // Ellipses have 32 line-segments by default.
/// // You may want to increase this for larger ellipses.
/// gizmos
/// .ellipse(Isometry3d::IDENTITY, Vec2::new(5., 1.), RED)
/// .resolution(64);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn ellipse(
&mut self,
isometry: impl Into<Isometry3d>,
half_size: Vec2,
color: impl Into<Color>,
) -> EllipseBuilder<'_, Config, Clear> {
EllipseBuilder {
gizmos: self,
isometry: isometry.into(),
half_size,
color: color.into(),
resolution: DEFAULT_CIRCLE_RESOLUTION,
}
}
/// Draw an ellipse in 2D with the given `isometry` applied.
///
/// If `isometry == Isometry2d::IDENTITY` then
///
/// - the center is at `Vec2::ZERO`
/// - the `half_sizes` are aligned with the `Vec2::X` and `Vec2::Y` axes.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::{RED, GREEN};
/// fn system(mut gizmos: Gizmos) {
/// gizmos.ellipse_2d(Isometry2d::from_rotation(Rot2::degrees(180.0)), Vec2::new(2., 1.), GREEN);
///
/// // Ellipses have 32 line-segments by default.
/// // You may want to increase this for larger ellipses.
/// gizmos
/// .ellipse_2d(Isometry2d::from_rotation(Rot2::degrees(180.0)), Vec2::new(5., 1.), RED)
/// .resolution(64);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn ellipse_2d(
&mut self,
isometry: impl Into<Isometry2d>,
half_size: Vec2,
color: impl Into<Color>,
) -> Ellipse2dBuilder<'_, Config, Clear> {
Ellipse2dBuilder {
gizmos: self,
isometry: isometry.into(),
half_size,
color: color.into(),
resolution: DEFAULT_CIRCLE_RESOLUTION,
}
}
/// Draw a circle in 3D with the given `isometry` applied.
///
/// If `isometry == Isometry3d::IDENTITY` then
///
/// - the center is at `Vec3::ZERO`
/// - the radius is aligned with the `Vec3::X` and `Vec3::Y` axes.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::{RED, GREEN};
/// fn system(mut gizmos: Gizmos) {
/// gizmos.circle(Isometry3d::IDENTITY, 1., GREEN);
///
/// // Circles have 32 line-segments by default.
/// // You may want to increase this for larger circles.
/// gizmos
/// .circle(Isometry3d::IDENTITY, 5., RED)
/// .resolution(64);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn circle(
&mut self,
isometry: impl Into<Isometry3d>,
radius: f32,
color: impl Into<Color>,
) -> EllipseBuilder<'_, Config, Clear> {
EllipseBuilder {
gizmos: self,
isometry: isometry.into(),
half_size: Vec2::splat(radius),
color: color.into(),
resolution: DEFAULT_CIRCLE_RESOLUTION,
}
}
/// Draw a circle in 2D with the given `isometry` applied.
///
/// If `isometry == Isometry2d::IDENTITY` then
///
/// - the center is at `Vec2::ZERO`
/// - the radius is aligned with the `Vec2::X` and `Vec2::Y` axes.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::{RED, GREEN};
/// fn system(mut gizmos: Gizmos) {
/// gizmos.circle_2d(Isometry2d::IDENTITY, 1., GREEN);
///
/// // Circles have 32 line-segments by default.
/// // You may want to increase this for larger circles.
/// gizmos
/// .circle_2d(Isometry2d::IDENTITY, 5., RED)
/// .resolution(64);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn circle_2d(
&mut self,
isometry: impl Into<Isometry2d>,
radius: f32,
color: impl Into<Color>,
) -> Ellipse2dBuilder<'_, Config, Clear> {
Ellipse2dBuilder {
gizmos: self,
isometry: isometry.into(),
half_size: Vec2::splat(radius),
color: color.into(),
resolution: DEFAULT_CIRCLE_RESOLUTION,
}
}
/// Draw a wireframe sphere in 3D made out of 3 circles around the axes with the given
/// `isometry` applied.
///
/// If `isometry == Isometry3d::IDENTITY` then
///
/// - the center is at `Vec3::ZERO`
/// - the 3 circles are in the XY, YZ and XZ planes.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::Color;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.sphere(Isometry3d::IDENTITY, 1., Color::BLACK);
///
/// // Each circle has 32 line-segments by default.
/// // You may want to increase this for larger spheres.
/// gizmos
/// .sphere(Isometry3d::IDENTITY, 5., Color::BLACK)
/// .resolution(64);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
#[inline]
pub fn sphere(
&mut self,
isometry: impl Into<Isometry3d>,
radius: f32,
color: impl Into<Color>,
) -> SphereBuilder<'_, Config, Clear> {
SphereBuilder {
gizmos: self,
radius,
isometry: isometry.into(),
color: color.into(),
resolution: DEFAULT_CIRCLE_RESOLUTION,
}
}
}
/// A builder returned by [`GizmoBuffer::ellipse`].
pub struct EllipseBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
isometry: Isometry3d,
half_size: Vec2,
color: Color,
resolution: u32,
}
impl<Config, Clear> EllipseBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Set the number of lines used to approximate the geometry of this ellipse.
pub fn resolution(mut self, resolution: u32) -> Self {
self.resolution = resolution;
self
}
}
impl<Config, Clear> Drop for EllipseBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
}
let positions = ellipse_inner(self.half_size, self.resolution)
.map(|vec2| self.isometry * vec2.extend(0.));
self.gizmos.linestrip(positions, self.color);
}
}
/// A builder returned by [`GizmoBuffer::ellipse_2d`].
pub struct Ellipse2dBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
isometry: Isometry2d,
half_size: Vec2,
color: Color,
resolution: u32,
}
impl<Config, Clear> Ellipse2dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Set the number of line-segments used to approximate the geometry of this ellipse.
pub fn resolution(mut self, resolution: u32) -> Self {
self.resolution = resolution;
self
}
}
impl<Config, Clear> Drop for Ellipse2dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Set the number of line-segments for this ellipse.
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
};
let positions =
ellipse_inner(self.half_size, self.resolution).map(|vec2| self.isometry * vec2);
self.gizmos.linestrip_2d(positions, self.color);
}
}
/// A builder returned by [`GizmoBuffer::sphere`].
pub struct SphereBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
// Radius of the sphere
radius: f32,
isometry: Isometry3d,
// Color of the sphere
color: Color,
// Number of line-segments used to approximate the sphere geometry
resolution: u32,
}
impl<Config, Clear> SphereBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Set the number of line-segments used to approximate the sphere geometry.
pub fn resolution(mut self, resolution: u32) -> Self {
self.resolution = resolution;
self
}
}
impl<Config, Clear> Drop for SphereBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
}
// draws one great circle around each of the local axes
Vec3::AXES.into_iter().for_each(|axis| {
let axis_rotation = Isometry3d::from_rotation(Quat::from_rotation_arc(Vec3::Z, axis));
self.gizmos
.circle(self.isometry * axis_rotation, self.radius, self.color)
.resolution(self.resolution);
});
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/cross.rs | crates/bevy_gizmos/src/cross.rs | //! Additional [`GizmoBuffer`] Functions -- Crosses
//!
//! Includes the implementation of [`GizmoBuffer::cross`] and [`GizmoBuffer::cross_2d`],
//! and assorted support items.
use crate::{gizmos::GizmoBuffer, prelude::GizmoConfigGroup};
use bevy_color::Color;
use bevy_math::{Isometry2d, Isometry3d, Vec2, Vec3};
impl<Config, Clear> GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Draw a cross in 3D with the given `isometry` applied.
///
/// If `isometry == Isometry3d::IDENTITY` then
///
/// - the center is at `Vec3::ZERO`
/// - the `half_size`s are aligned with the `Vec3::X`, `Vec3::Y` and `Vec3::Z` axes.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::WHITE;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.cross(Isometry3d::IDENTITY, 0.5, WHITE);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
pub fn cross(
&mut self,
isometry: impl Into<Isometry3d>,
half_size: f32,
color: impl Into<Color>,
) {
let isometry = isometry.into();
let color: Color = color.into();
[Vec3::X, Vec3::Y, Vec3::Z]
.map(|axis| axis * half_size)
.into_iter()
.for_each(|axis| {
self.line(isometry * axis, isometry * (-axis), color);
});
}
/// Draw a cross in 2D with the given `isometry` applied.
///
/// If `isometry == Isometry2d::IDENTITY` then
///
/// - the center is at `Vec3::ZERO`
/// - the `half_size`s are aligned with the `Vec3::X` and `Vec3::Y` axes.
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::WHITE;
/// fn system(mut gizmos: Gizmos) {
/// gizmos.cross_2d(Isometry2d::IDENTITY, 0.5, WHITE);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
pub fn cross_2d(
&mut self,
isometry: impl Into<Isometry2d>,
half_size: f32,
color: impl Into<Color>,
) {
let isometry = isometry.into();
let color: Color = color.into();
[Vec2::X, Vec2::Y]
.map(|axis| axis * half_size)
.into_iter()
.for_each(|axis| {
self.line_2d(isometry * axis, isometry * (-axis), color);
});
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/curves.rs | crates/bevy_gizmos/src/curves.rs | //! Additional [`GizmoBuffer`] Functions -- Curves
//!
//! Includes the implementation of [`GizmoBuffer::curve_2d`],
//! [`GizmoBuffer::curve_3d`] and assorted support items.
use bevy_color::Color;
use bevy_math::{
curve::{Curve, CurveExt},
Vec2, Vec3,
};
use crate::{gizmos::GizmoBuffer, prelude::GizmoConfigGroup};
impl<Config, Clear> GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Draw a curve, at the given time points, sampling in 2D.
///
/// Samples of time points outside of the curve's domain will be filtered out and won't
/// contribute to the rendering. If you wish to render the curve outside of its domain you need
/// to create a new curve with an extended domain.
///
/// # Arguments
/// - `curve_2d` some type that implements the [`Curve`] trait and samples `Vec2`s
/// - `times` some iterable type yielding `f32` which will be used for sampling the curve
/// - `color` the color of the curve
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::{RED};
/// fn system(mut gizmos: Gizmos) {
/// let domain = Interval::UNIT;
/// let curve = FunctionCurve::new(domain, |t| Vec2::from(t.sin_cos()));
/// gizmos.curve_2d(curve, (0..=100).map(|n| n as f32 / 100.0), RED);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
pub fn curve_2d(
&mut self,
curve_2d: impl Curve<Vec2>,
times: impl IntoIterator<Item = f32>,
color: impl Into<Color>,
) {
self.linestrip_2d(curve_2d.sample_iter(times).flatten(), color);
}
/// Draw a curve, at the given time points, sampling in 3D.
///
/// Samples of time points outside of the curve's domain will be filtered out and won't
/// contribute to the rendering. If you wish to render the curve outside of its domain you need
/// to create a new curve with an extended domain.
///
/// # Arguments
/// - `curve_3d` some type that implements the [`Curve`] trait and samples `Vec3`s
/// - `times` some iterable type yielding `f32` which will be used for sampling the curve
/// - `color` the color of the curve
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::palettes::basic::{RED};
/// fn system(mut gizmos: Gizmos) {
/// let domain = Interval::UNIT;
/// let curve = FunctionCurve::new(domain, |t| {
/// let (x,y) = t.sin_cos();
/// Vec3::new(x, y, t)
/// });
/// gizmos.curve_3d(curve, (0..=100).map(|n| n as f32 / 100.0), RED);
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
pub fn curve_3d(
&mut self,
curve_3d: impl Curve<Vec3>,
times: impl IntoIterator<Item = f32>,
color: impl Into<Color>,
) {
self.linestrip(curve_3d.sample_iter(times).flatten(), color);
}
/// Draw a curve, at the given time points, sampling in 2D, with a color gradient.
///
/// Samples of time points outside of the curve's domain will be filtered out and won't
/// contribute to the rendering. If you wish to render the curve outside of its domain you need
/// to create a new curve with an extended domain.
///
/// # Arguments
/// - `curve_2d` some type that implements the [`Curve`] trait and samples `Vec2`s
/// - `times_with_colors` some iterable type yielding `f32` which will be used for sampling
/// the curve together with the color at this position
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::{Mix, palettes::basic::{GREEN, RED}};
/// fn system(mut gizmos: Gizmos) {
/// let domain = Interval::UNIT;
/// let curve = FunctionCurve::new(domain, |t| Vec2::from(t.sin_cos()));
/// gizmos.curve_gradient_2d(
/// curve,
/// (0..=100).map(|n| n as f32 / 100.0)
/// .map(|t| (t, GREEN.mix(&RED, t)))
/// );
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
pub fn curve_gradient_2d<C>(
&mut self,
curve_2d: impl Curve<Vec2>,
times_with_colors: impl IntoIterator<Item = (f32, C)>,
) where
C: Into<Color>,
{
self.linestrip_gradient_2d(
times_with_colors
.into_iter()
.filter_map(|(time, color)| curve_2d.sample(time).map(|sample| (sample, color))),
);
}
/// Draw a curve, at the given time points, sampling in 3D, with a color gradient.
///
/// Samples of time points outside of the curve's domain will be filtered out and won't
/// contribute to the rendering. If you wish to render the curve outside of its domain you need
/// to create a new curve with an extended domain.
///
/// # Arguments
/// - `curve_3d` some type that implements the [`Curve`] trait and samples `Vec3`s
/// - `times_with_colors` some iterable type yielding `f32` which will be used for sampling
/// the curve together with the color at this position
///
/// # Example
/// ```
/// # use bevy_gizmos::prelude::*;
/// # use bevy_math::prelude::*;
/// # use bevy_color::{Mix, palettes::basic::{GREEN, RED}};
/// fn system(mut gizmos: Gizmos) {
/// let domain = Interval::UNIT;
/// let curve = FunctionCurve::new(domain, |t| {
/// let (x,y) = t.sin_cos();
/// Vec3::new(x, y, t)
/// });
/// gizmos.curve_gradient_3d(
/// curve,
/// (0..=100).map(|n| n as f32 / 100.0)
/// .map(|t| (t, GREEN.mix(&RED, t)))
/// );
/// }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
pub fn curve_gradient_3d<C>(
&mut self,
curve_3d: impl Curve<Vec3>,
times_with_colors: impl IntoIterator<Item = (f32, C)>,
) where
C: Into<Color>,
{
self.linestrip_gradient(
times_with_colors
.into_iter()
.filter_map(|(time, color)| curve_3d.sample(time).map(|sample| (sample, color))),
);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/primitives/dim2.rs | crates/bevy_gizmos/src/primitives/dim2.rs | //! A module for rendering each of the 2D [`bevy_math::primitives`] with [`GizmoBuffer`].
use core::f32::consts::{FRAC_PI_2, PI};
use super::helpers::*;
use bevy_color::Color;
use bevy_math::{
primitives::{
Annulus, Arc2d, Capsule2d, Circle, CircularSector, CircularSegment, Ellipse, Line2d,
Plane2d, Polygon, Polyline2d, Primitive2d, Rectangle, RegularPolygon, Rhombus, Segment2d,
Triangle2d,
},
Dir2, Isometry2d, Rot2, Vec2,
};
use crate::{gizmos::GizmoBuffer, prelude::GizmoConfigGroup};
// some magic number since using directions as offsets will result in lines of length 1 pixel
const MIN_LINE_LEN: f32 = 50.0;
const HALF_MIN_LINE_LEN: f32 = 25.0;
// length used to simulate infinite lines
const INFINITE_LEN: f32 = 100_000.0;
/// A trait for rendering 2D geometric primitives (`P`) with [`GizmoBuffer`].
pub trait GizmoPrimitive2d<P: Primitive2d> {
/// The output of `primitive_2d`. This is a builder to set non-default values.
type Output<'a>
where
Self: 'a;
/// Renders a 2D primitive with its associated details.
fn primitive_2d(
&mut self,
primitive: &P,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_>;
}
// direction 2d
impl<Config, Clear> GizmoPrimitive2d<Dir2> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_2d(
&mut self,
primitive: &Dir2,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
if !self.enabled {
return;
}
let isometry = isometry.into();
let start = Vec2::ZERO;
let end = *primitive * MIN_LINE_LEN;
self.arrow_2d(isometry * start, isometry * end, color);
}
}
// arc 2d
impl<Config, Clear> GizmoPrimitive2d<Arc2d> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_2d(
&mut self,
primitive: &Arc2d,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
if !self.enabled {
return;
}
let isometry = isometry.into();
let start_iso = isometry * Isometry2d::from_rotation(Rot2::radians(-primitive.half_angle));
self.arc_2d(
start_iso,
primitive.half_angle * 2.0,
primitive.radius,
color,
);
}
}
// circle 2d
impl<Config, Clear> GizmoPrimitive2d<Circle> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= crate::circles::Ellipse2dBuilder<'a, Config, Clear>
where
Self: 'a;
fn primitive_2d(
&mut self,
primitive: &Circle,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
self.circle_2d(isometry, primitive.radius, color)
}
}
// circular sector 2d
impl<Config, Clear> GizmoPrimitive2d<CircularSector> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_2d(
&mut self,
primitive: &CircularSector,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
if !self.enabled {
return;
}
let isometry = isometry.into();
let color = color.into();
let start_iso =
isometry * Isometry2d::from_rotation(Rot2::radians(-primitive.arc.half_angle));
let end_iso = isometry * Isometry2d::from_rotation(Rot2::radians(primitive.arc.half_angle));
// we need to draw the arc part of the sector, and the two lines connecting the arc and the center
self.arc_2d(
start_iso,
primitive.arc.half_angle * 2.0,
primitive.arc.radius,
color,
);
let end_position = primitive.arc.radius * Vec2::Y;
self.line_2d(isometry * Vec2::ZERO, start_iso * end_position, color);
self.line_2d(isometry * Vec2::ZERO, end_iso * end_position, color);
}
}
// circular segment 2d
impl<Config, Clear> GizmoPrimitive2d<CircularSegment> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_2d(
&mut self,
primitive: &CircularSegment,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
if !self.enabled {
return;
}
let isometry = isometry.into();
let color = color.into();
let start_iso =
isometry * Isometry2d::from_rotation(Rot2::radians(-primitive.arc.half_angle));
let end_iso = isometry * Isometry2d::from_rotation(Rot2::radians(primitive.arc.half_angle));
// we need to draw the arc part of the segment, and the line connecting the two ends
self.arc_2d(
start_iso,
primitive.arc.half_angle * 2.0,
primitive.arc.radius,
color,
);
let position = primitive.arc.radius * Vec2::Y;
self.line_2d(start_iso * position, end_iso * position, color);
}
}
// ellipse 2d
impl<Config, Clear> GizmoPrimitive2d<Ellipse> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= crate::circles::Ellipse2dBuilder<'a, Config, Clear>
where
Self: 'a;
fn primitive_2d<'a>(
&mut self,
primitive: &Ellipse,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
self.ellipse_2d(isometry, primitive.half_size, color)
}
}
// annulus 2d
/// Builder for configuring the drawing options of [`Annulus`].
pub struct Annulus2dBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
isometry: Isometry2d,
inner_radius: f32,
outer_radius: f32,
color: Color,
inner_resolution: u32,
outer_resolution: u32,
}
impl<Config, Clear> Annulus2dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Set the number of line-segments for each circle of the annulus.
pub fn resolution(mut self, resolution: u32) -> Self {
self.outer_resolution = resolution;
self.inner_resolution = resolution;
self
}
/// Set the number of line-segments for the outer circle of the annulus.
pub fn outer_resolution(mut self, resolution: u32) -> Self {
self.outer_resolution = resolution;
self
}
/// Set the number of line-segments for the inner circle of the annulus.
pub fn inner_resolution(mut self, resolution: u32) -> Self {
self.inner_resolution = resolution;
self
}
}
impl<Config, Clear> GizmoPrimitive2d<Annulus> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= Annulus2dBuilder<'a, Config, Clear>
where
Self: 'a;
fn primitive_2d(
&mut self,
primitive: &Annulus,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
Annulus2dBuilder {
gizmos: self,
isometry: isometry.into(),
inner_radius: primitive.inner_circle.radius,
outer_radius: primitive.outer_circle.radius,
color: color.into(),
inner_resolution: crate::circles::DEFAULT_CIRCLE_RESOLUTION,
outer_resolution: crate::circles::DEFAULT_CIRCLE_RESOLUTION,
}
}
}
impl<Config, Clear> Drop for Annulus2dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
}
let Annulus2dBuilder {
gizmos,
isometry,
inner_radius,
outer_radius,
inner_resolution,
outer_resolution,
color,
..
} = self;
gizmos
.circle_2d(*isometry, *outer_radius, *color)
.resolution(*outer_resolution);
gizmos
.circle_2d(*isometry, *inner_radius, *color)
.resolution(*inner_resolution);
}
}
// rhombus 2d
impl<Config, Clear> GizmoPrimitive2d<Rhombus> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_2d(
&mut self,
primitive: &Rhombus,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
if !self.enabled {
return;
};
let isometry = isometry.into();
let [a, b, c, d] =
[(1.0, 0.0), (0.0, 1.0), (-1.0, 0.0), (0.0, -1.0)].map(|(sign_x, sign_y)| {
Vec2::new(
primitive.half_diagonals.x * sign_x,
primitive.half_diagonals.y * sign_y,
)
});
let positions = [a, b, c, d].map(|vec2| isometry * vec2);
self.lineloop_2d(positions, color);
}
}
// capsule 2d
impl<Config, Clear> GizmoPrimitive2d<Capsule2d> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_2d(
&mut self,
primitive: &Capsule2d,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
let isometry = isometry.into();
let polymorphic_color: Color = color.into();
if !self.enabled {
return;
}
// transform points from the reference unit square to capsule "rectangle"
let [top_left, top_right, bottom_left, bottom_right, top_center, bottom_center] = [
[-1.0, 1.0],
[1.0, 1.0],
[-1.0, -1.0],
[1.0, -1.0],
// just reuse the pipeline for these points as well
[0.0, 1.0],
[0.0, -1.0],
]
.map(|[sign_x, sign_y]| Vec2::X * sign_x + Vec2::Y * sign_y)
.map(|reference_point| {
let scaling = Vec2::X * primitive.radius + Vec2::Y * primitive.half_length;
reference_point * scaling
})
.map(|vec2| isometry * vec2);
// draw left and right side of capsule "rectangle"
self.line_2d(bottom_left, top_left, polymorphic_color);
self.line_2d(bottom_right, top_right, polymorphic_color);
let start_angle_top = isometry.rotation.as_radians() - FRAC_PI_2;
let start_angle_bottom = isometry.rotation.as_radians() + FRAC_PI_2;
// draw arcs
self.arc_2d(
Isometry2d::new(top_center, Rot2::radians(start_angle_top)),
PI,
primitive.radius,
polymorphic_color,
);
self.arc_2d(
Isometry2d::new(bottom_center, Rot2::radians(start_angle_bottom)),
PI,
primitive.radius,
polymorphic_color,
);
}
}
// line 2d
//
/// Builder for configuring the drawing options of [`Line2d`].
pub struct Line2dBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
direction: Dir2, // Direction of the line
isometry: Isometry2d,
color: Color, // color of the line
draw_arrow: bool, // decides whether to indicate the direction of the line with an arrow
}
impl<Config, Clear> Line2dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Set the drawing mode of the line (arrow vs. plain line)
pub fn draw_arrow(mut self, is_enabled: bool) -> Self {
self.draw_arrow = is_enabled;
self
}
}
impl<Config, Clear> GizmoPrimitive2d<Line2d> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= Line2dBuilder<'a, Config, Clear>
where
Self: 'a;
fn primitive_2d(
&mut self,
primitive: &Line2d,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
Line2dBuilder {
gizmos: self,
direction: primitive.direction,
isometry: isometry.into(),
color: color.into(),
draw_arrow: false,
}
}
}
impl<Config, Clear> Drop for Line2dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
}
let [start, end] = [1.0, -1.0]
.map(|sign| sign * INFINITE_LEN)
// offset the line from the origin infinitely into the given direction
.map(|length| self.direction * length)
// transform the line with the given isometry
.map(|offset| self.isometry * offset);
self.gizmos.line_2d(start, end, self.color);
// optionally draw an arrow head at the center of the line
if self.draw_arrow {
self.gizmos.arrow_2d(
self.isometry * (-self.direction * MIN_LINE_LEN),
self.isometry * Vec2::ZERO,
self.color,
);
}
}
}
// plane 2d
impl<Config, Clear> GizmoPrimitive2d<Plane2d> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_2d(
&mut self,
primitive: &Plane2d,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
let isometry = isometry.into();
let polymorphic_color: Color = color.into();
if !self.enabled {
return;
}
// draw normal of the plane (orthogonal to the plane itself)
let normal = primitive.normal;
let normal_segment = Segment2d::from_direction_and_length(normal, HALF_MIN_LINE_LEN * 2.);
self.primitive_2d(
&normal_segment,
// offset the normal so it starts on the plane line
Isometry2d::new(isometry * (HALF_MIN_LINE_LEN * normal), isometry.rotation),
polymorphic_color,
)
.draw_arrow(true);
// draw the plane line
let direction = Dir2::new_unchecked(-normal.perp());
self.primitive_2d(&Line2d { direction }, isometry, polymorphic_color)
.draw_arrow(false);
// draw an arrow such that the normal is always left side of the plane with respect to the
// planes direction. This is to follow the "counter-clockwise" convention
self.arrow_2d(
isometry * Vec2::ZERO,
isometry * (MIN_LINE_LEN * direction),
polymorphic_color,
);
}
}
// segment 2d
/// Builder for configuring the drawing options of [`Segment2d`].
pub struct Segment2dBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
point1: Vec2, // First point of the segment
point2: Vec2, // Second point of the segment
isometry: Isometry2d, // isometric transformation of the line segment
color: Color, // color of the line segment
draw_arrow: bool, // decides whether to draw just a line or an arrow
}
impl<Config, Clear> Segment2dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Set the drawing mode of the line (arrow vs. plain line)
pub fn draw_arrow(mut self, is_enabled: bool) -> Self {
self.draw_arrow = is_enabled;
self
}
}
impl<Config, Clear> GizmoPrimitive2d<Segment2d> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= Segment2dBuilder<'a, Config, Clear>
where
Self: 'a;
fn primitive_2d(
&mut self,
primitive: &Segment2d,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
Segment2dBuilder {
gizmos: self,
point1: primitive.point1(),
point2: primitive.point2(),
isometry: isometry.into(),
color: color.into(),
draw_arrow: Default::default(),
}
}
}
impl<Config, Clear> Drop for Segment2dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
}
let segment = Segment2d::new(self.point1, self.point2).transformed(self.isometry);
if self.draw_arrow {
self.gizmos
.arrow_2d(segment.point1(), segment.point2(), self.color);
} else {
self.gizmos
.line_2d(segment.point1(), segment.point2(), self.color);
}
}
}
// polyline 2d
impl<Config, Clear> GizmoPrimitive2d<Polyline2d> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_2d(
&mut self,
primitive: &Polyline2d,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
if !self.enabled {
return;
}
let isometry = isometry.into();
self.linestrip_2d(
primitive
.vertices
.iter()
.copied()
.map(|vec2| isometry * vec2),
color,
);
}
}
// triangle 2d
impl<Config, Clear> GizmoPrimitive2d<Triangle2d> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_2d(
&mut self,
primitive: &Triangle2d,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
if !self.enabled {
return;
}
let isometry = isometry.into();
let [a, b, c] = primitive.vertices;
let positions = [a, b, c].map(|vec2| isometry * vec2);
self.lineloop_2d(positions, color);
}
}
// rectangle 2d
impl<Config, Clear> GizmoPrimitive2d<Rectangle> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_2d(
&mut self,
primitive: &Rectangle,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
if !self.enabled {
return;
}
let isometry = isometry.into();
let [a, b, c, d] =
[(1.0, 1.0), (1.0, -1.0), (-1.0, -1.0), (-1.0, 1.0)].map(|(sign_x, sign_y)| {
Vec2::new(
primitive.half_size.x * sign_x,
primitive.half_size.y * sign_y,
)
});
let positions = [a, b, c, d].map(|vec2| isometry * vec2);
self.lineloop_2d(positions, color);
}
}
// polygon 2d
impl<Config, Clear> GizmoPrimitive2d<Polygon> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_2d(
&mut self,
primitive: &Polygon,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
if !self.enabled {
return;
}
let isometry = isometry.into();
let vertices = if primitive.vertices.first() == primitive.vertices.last() {
// Strip closing point if there is one
&primitive.vertices[..primitive.vertices.len() - 1]
} else {
&primitive.vertices[..]
};
self.lineloop_2d(vertices.iter().map(|&vec2| isometry * vec2), color);
}
}
// regular polygon 2d
impl<Config, Clear> GizmoPrimitive2d<RegularPolygon> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_2d(
&mut self,
primitive: &RegularPolygon,
isometry: impl Into<Isometry2d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
if !self.enabled {
return;
}
let isometry = isometry.into();
let points = (0..primitive.sides)
.map(|n| single_circle_coordinate(primitive.circumcircle.radius, primitive.sides, n))
.map(|vec2| isometry * vec2);
self.lineloop_2d(points, color);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/primitives/helpers.rs | crates/bevy_gizmos/src/primitives/helpers.rs | use core::f32::consts::TAU;
use bevy_math::{ops, Vec2};
/// Calculates the `nth` coordinate of a circle.
///
/// Given a circle's radius and its resolution, this function computes the position
/// of the `nth` point along the circumference of the circle. The rotation starts at `(0.0, radius)`
/// and proceeds counter-clockwise.
pub(crate) fn single_circle_coordinate(radius: f32, resolution: u32, nth_point: u32) -> Vec2 {
let angle = nth_point as f32 * TAU / resolution as f32;
let (x, y) = ops::sin_cos(angle);
Vec2::new(x, y) * radius
}
/// Generates an iterator over the coordinates of a circle.
///
/// The coordinates form an open circle, meaning the first and last points aren't the same.
///
/// This function creates an iterator that yields the positions of points approximating a
/// circle with the given radius, divided into linear segments. The iterator produces `resolution`
/// number of points.
pub(crate) fn circle_coordinates(radius: f32, resolution: u32) -> impl Iterator<Item = Vec2> {
(0..)
.map(move |p| single_circle_coordinate(radius, resolution, p))
.take(resolution as usize)
}
/// Generates an iterator over the coordinates of a circle.
///
/// The coordinates form a closed circle, meaning the first and last points are the same.
///
/// This function creates an iterator that yields the positions of points approximating a
/// circle with the given radius, divided into linear segments. The iterator produces `resolution`
/// number of points.
pub(crate) fn circle_coordinates_closed(
radius: f32,
resolution: u32,
) -> impl Iterator<Item = Vec2> {
circle_coordinates(radius, resolution).chain(core::iter::once(single_circle_coordinate(
radius, resolution, resolution,
)))
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/primitives/mod.rs | crates/bevy_gizmos/src/primitives/mod.rs | //! A module for rendering each of the 2D and 3D [`bevy_math::primitives`] with [`Gizmos`](`crate::prelude::Gizmos`).
pub mod dim2;
pub mod dim3;
pub(crate) mod helpers;
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos/src/primitives/dim3.rs | crates/bevy_gizmos/src/primitives/dim3.rs | //! A module for rendering each of the 3D [`bevy_math::primitives`] with [`GizmoBuffer`].
use super::helpers::*;
use bevy_color::Color;
use bevy_math::{
primitives::{
Capsule3d, Cone, ConicalFrustum, Cuboid, Cylinder, Line3d, Plane3d, Polyline3d,
Primitive3d, Segment3d, Sphere, Tetrahedron, Torus, Triangle3d,
},
Dir3, Isometry3d, Quat, UVec2, Vec2, Vec3,
};
use crate::{circles::SphereBuilder, gizmos::GizmoBuffer, prelude::GizmoConfigGroup};
const DEFAULT_RESOLUTION: u32 = 5;
// length used to simulate infinite lines
const INFINITE_LEN: f32 = 10_000.0;
/// A trait for rendering 3D geometric primitives (`P`) with [`GizmoBuffer`].
pub trait GizmoPrimitive3d<P: Primitive3d> {
/// The output of `primitive_3d`. This is a builder to set non-default values.
type Output<'a>
where
Self: 'a;
/// Renders a 3D primitive with its associated details.
fn primitive_3d(
&mut self,
primitive: &P,
isometry: impl Into<Isometry3d>,
color: impl Into<Color>,
) -> Self::Output<'_>;
}
// direction 3d
impl<Config, Clear> GizmoPrimitive3d<Dir3> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_3d(
&mut self,
primitive: &Dir3,
isometry: impl Into<Isometry3d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
let isometry = isometry.into();
let start = Vec3::ZERO;
let end = primitive.as_vec3();
self.arrow(isometry * start, isometry * end, color);
}
}
// sphere
impl<Config, Clear> GizmoPrimitive3d<Sphere> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= SphereBuilder<'a, Config, Clear>
where
Self: 'a;
fn primitive_3d(
&mut self,
primitive: &Sphere,
isometry: impl Into<Isometry3d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
self.sphere(isometry, primitive.radius, color)
}
}
// plane 3d
/// Builder for configuring the drawing options of [`Plane3d`].
pub struct Plane3dBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
// Direction of the normal orthogonal to the plane
normal: Dir3,
isometry: Isometry3d,
// Color of the plane
color: Color,
// Defines the amount of cells in the x and y axes
cell_count: UVec2,
// Defines the distance between cells along the x and y axes
spacing: Vec2,
}
impl<Config, Clear> Plane3dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Set the number of cells in the x and y axes direction.
pub fn cell_count(mut self, cell_count: UVec2) -> Self {
self.cell_count = cell_count;
self
}
/// Set the distance between cells along the x and y axes.
pub fn spacing(mut self, spacing: Vec2) -> Self {
self.spacing = spacing;
self
}
}
impl<Config, Clear> GizmoPrimitive3d<Plane3d> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= Plane3dBuilder<'a, Config, Clear>
where
Self: 'a;
fn primitive_3d(
&mut self,
primitive: &Plane3d,
isometry: impl Into<Isometry3d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
Plane3dBuilder {
gizmos: self,
normal: primitive.normal,
isometry: isometry.into(),
color: color.into(),
cell_count: UVec2::splat(3),
spacing: Vec2::splat(1.0),
}
}
}
impl<Config, Clear> Drop for Plane3dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
}
self.gizmos
.primitive_3d(&self.normal, self.isometry, self.color);
// the default orientation of the grid is Z-up
let rot = Quat::from_rotation_arc(Vec3::Z, self.normal.as_vec3());
self.gizmos.grid(
Isometry3d::new(self.isometry.translation, self.isometry.rotation * rot),
self.cell_count,
self.spacing,
self.color,
);
}
}
// line 3d
impl<Config, Clear> GizmoPrimitive3d<Line3d> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_3d(
&mut self,
primitive: &Line3d,
isometry: impl Into<Isometry3d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
if !self.enabled {
return;
}
let isometry = isometry.into();
let color = color.into();
let direction = primitive.direction.as_vec3();
self.arrow(isometry * Vec3::ZERO, isometry * direction, color);
let [start, end] = [1.0, -1.0]
.map(|sign| sign * INFINITE_LEN)
.map(|length| primitive.direction * length)
.map(|offset| isometry * offset);
self.line(start, end, color);
}
}
// segment 3d
impl<Config, Clear> GizmoPrimitive3d<Segment3d> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_3d(
&mut self,
primitive: &Segment3d,
isometry: impl Into<Isometry3d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
if !self.enabled {
return;
}
let transformed = primitive.transformed(isometry);
self.line(transformed.point1(), transformed.point2(), color);
}
}
// polyline 3d
impl<Config, Clear> GizmoPrimitive3d<Polyline3d> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_3d(
&mut self,
primitive: &Polyline3d,
isometry: impl Into<Isometry3d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
if !self.enabled {
return;
}
let isometry = isometry.into();
self.linestrip(
primitive.vertices.iter().map(|vec3| isometry * *vec3),
color,
);
}
}
// triangle 3d
impl<Config, Clear> GizmoPrimitive3d<Triangle3d> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_3d(
&mut self,
primitive: &Triangle3d,
isometry: impl Into<Isometry3d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
if !self.enabled {
return;
}
let isometry = isometry.into();
let [a, b, c] = primitive.vertices;
self.lineloop([a, b, c].map(|vec3| isometry * vec3), color);
}
}
// cuboid
impl<Config, Clear> GizmoPrimitive3d<Cuboid> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_3d(
&mut self,
primitive: &Cuboid,
isometry: impl Into<Isometry3d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
if !self.enabled {
return;
}
let isometry = isometry.into();
// transform the points from the reference unit cube to the cuboid coords
let vertices @ [a, b, c, d, e, f, g, h] = [
[1.0, 1.0, 1.0],
[-1.0, 1.0, 1.0],
[-1.0, -1.0, 1.0],
[1.0, -1.0, 1.0],
[1.0, 1.0, -1.0],
[-1.0, 1.0, -1.0],
[-1.0, -1.0, -1.0],
[1.0, -1.0, -1.0],
]
.map(Vec3::from)
.map(|vec3| vec3 * primitive.half_size)
.map(|vec3| isometry * vec3);
// lines for the upper rectangle of the cuboid
let upper = [a, b, c, d]
.into_iter()
.zip([a, b, c, d].into_iter().cycle().skip(1));
// lines for the lower rectangle of the cuboid
let lower = [e, f, g, h]
.into_iter()
.zip([e, f, g, h].into_iter().cycle().skip(1));
// lines connecting upper and lower rectangles of the cuboid
let connections = vertices.into_iter().zip(vertices.into_iter().skip(4));
let color = color.into();
upper
.chain(lower)
.chain(connections)
.for_each(|(start, end)| {
self.line(start, end, color);
});
}
}
// cylinder 3d
/// Builder for configuring the drawing options of [`Cylinder`].
pub struct Cylinder3dBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
// Radius of the cylinder
radius: f32,
// Half height of the cylinder
half_height: f32,
isometry: Isometry3d,
// Color of the cylinder
color: Color,
// Number of lines used to approximate the cylinder geometry
resolution: u32,
}
impl<Config, Clear> Cylinder3dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Set the number of lines used to approximate the top and bottom of the cylinder geometry.
pub fn resolution(mut self, resolution: u32) -> Self {
self.resolution = resolution;
self
}
}
impl<Config, Clear> GizmoPrimitive3d<Cylinder> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= Cylinder3dBuilder<'a, Config, Clear>
where
Self: 'a;
fn primitive_3d(
&mut self,
primitive: &Cylinder,
isometry: impl Into<Isometry3d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
Cylinder3dBuilder {
gizmos: self,
radius: primitive.radius,
half_height: primitive.half_height,
isometry: isometry.into(),
color: color.into(),
resolution: DEFAULT_RESOLUTION,
}
}
}
impl<Config, Clear> Drop for Cylinder3dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
}
self.gizmos
.primitive_3d(
&ConicalFrustum {
radius_top: self.radius,
radius_bottom: self.radius,
height: self.half_height * 2.0,
},
self.isometry,
self.color,
)
.resolution(self.resolution);
}
}
// capsule 3d
/// Builder for configuring the drawing options of [`Capsule3d`].
pub struct Capsule3dBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
// Radius of the capsule
radius: f32,
// Half length of the capsule
half_length: f32,
isometry: Isometry3d,
// Color of the capsule
color: Color,
// Number of lines used to approximate the capsule geometry
resolution: u32,
}
impl<Config, Clear> Capsule3dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Set the number of lines used to approximate the capsule geometry.
pub fn resolution(mut self, resolution: u32) -> Self {
self.resolution = resolution;
self
}
}
impl<Config, Clear> GizmoPrimitive3d<Capsule3d> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= Capsule3dBuilder<'a, Config, Clear>
where
Self: 'a;
fn primitive_3d(
&mut self,
primitive: &Capsule3d,
isometry: impl Into<Isometry3d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
Capsule3dBuilder {
gizmos: self,
radius: primitive.radius,
half_length: primitive.half_length,
isometry: isometry.into(),
color: color.into(),
resolution: DEFAULT_RESOLUTION,
}
}
}
impl<Config, Clear> Drop for Capsule3dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
}
let [upper_apex, lower_apex] = [-1.0, 1.0]
.map(|sign| Vec3::Y * sign * (self.half_length + self.radius))
.map(|vec3| self.isometry * vec3);
let [upper_center, lower_center] = [-1.0, 1.0]
.map(|sign| Vec3::Y * sign * self.half_length)
.map(|vec3| self.isometry * vec3);
let [upper_points, lower_points] = [-1.0, 1.0]
.map(|sign| Vec3::Y * sign * self.half_length)
.map(|vec3| {
circle_coordinates_closed(self.radius, self.resolution)
.map(|vec2| Vec3::new(vec2.x, 0.0, vec2.y) + vec3)
.map(|vec3| self.isometry * vec3)
.collect::<Vec<_>>()
});
upper_points.iter().skip(1).copied().for_each(|start| {
self.gizmos
.short_arc_3d_between(upper_center, start, upper_apex, self.color);
});
lower_points.iter().skip(1).copied().for_each(|start| {
self.gizmos
.short_arc_3d_between(lower_center, start, lower_apex, self.color);
});
let circle_rotation = self
.isometry
.rotation
.mul_quat(Quat::from_rotation_x(core::f32::consts::FRAC_PI_2));
self.gizmos.circle(
Isometry3d::new(upper_center, circle_rotation),
self.radius,
self.color,
);
self.gizmos.circle(
Isometry3d::new(lower_center, circle_rotation),
self.radius,
self.color,
);
let connection_lines = upper_points.into_iter().zip(lower_points).skip(1);
connection_lines.for_each(|(start, end)| {
self.gizmos.line(start, end, self.color);
});
}
}
// cone 3d
/// Builder for configuring the drawing options of [`Cone`].
pub struct Cone3dBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
// Radius of the cone
radius: f32,
// Height of the cone
height: f32,
isometry: Isometry3d,
// Color of the cone
color: Color,
// Number of lines used to approximate the cone base geometry
base_resolution: u32,
// Number of lines used to approximate the cone height geometry
height_resolution: u32,
}
impl<Config, Clear> Cone3dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Set the number of lines used to approximate the cone geometry for its base and height.
pub fn resolution(mut self, resolution: u32) -> Self {
self.base_resolution = resolution;
self.height_resolution = resolution;
self
}
/// Set the number of lines used to approximate the height of the cone geometry.
///
/// `resolution` should be a multiple of the value passed to [`Self::height_resolution`]
/// for the height to connect properly with the base.
pub fn base_resolution(mut self, resolution: u32) -> Self {
self.base_resolution = resolution;
self
}
/// Set the number of lines used to approximate the height of the cone geometry.
///
/// `resolution` should be a divisor of the value passed to [`Self::base_resolution`]
/// for the height to connect properly with the base.
pub fn height_resolution(mut self, resolution: u32) -> Self {
self.height_resolution = resolution;
self
}
}
impl<Config, Clear> GizmoPrimitive3d<Cone> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= Cone3dBuilder<'a, Config, Clear>
where
Self: 'a;
fn primitive_3d(
&mut self,
primitive: &Cone,
isometry: impl Into<Isometry3d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
Cone3dBuilder {
gizmos: self,
radius: primitive.radius,
height: primitive.height,
isometry: isometry.into(),
color: color.into(),
base_resolution: DEFAULT_RESOLUTION,
height_resolution: DEFAULT_RESOLUTION,
}
}
}
impl<Config, Clear> Drop for Cone3dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
}
let half_height = self.height * 0.5;
let apex = self.isometry * (Vec3::Y * half_height);
let circle_center = half_height * Vec3::NEG_Y;
let circle_coords = circle_coordinates_closed(self.radius, self.height_resolution)
.map(|vec2| Vec3::new(vec2.x, 0.0, vec2.y) + circle_center)
.map(|vec3| self.isometry * vec3)
.collect::<Vec<_>>();
// connections to apex
circle_coords
.iter()
.skip(1)
.map(|vec3| (*vec3, apex))
.for_each(|(start, end)| {
self.gizmos.line(start, end, self.color);
});
// base circle
circle_coords
.windows(2)
.map(|win| (win[0], win[1]))
.for_each(|(start, end)| {
self.gizmos.line(start, end, self.color);
});
}
}
// conical frustum 3d
/// Builder for configuring the drawing options of [`ConicalFrustum`].
pub struct ConicalFrustum3dBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
// Radius of the top circle
radius_top: f32,
// Radius of the bottom circle
radius_bottom: f32,
// Height of the conical frustum
height: f32,
isometry: Isometry3d,
// Color of the conical frustum
color: Color,
// Number of lines used to approximate the curved surfaces
resolution: u32,
}
impl<Config, Clear> ConicalFrustum3dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Set the number of lines used to approximate the curved surfaces.
pub fn resolution(mut self, resolution: u32) -> Self {
self.resolution = resolution;
self
}
}
impl<Config, Clear> GizmoPrimitive3d<ConicalFrustum> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ConicalFrustum3dBuilder<'a, Config, Clear>
where
Self: 'a;
fn primitive_3d(
&mut self,
primitive: &ConicalFrustum,
isometry: impl Into<Isometry3d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
ConicalFrustum3dBuilder {
gizmos: self,
radius_top: primitive.radius_top,
radius_bottom: primitive.radius_bottom,
height: primitive.height,
isometry: isometry.into(),
color: color.into(),
resolution: DEFAULT_RESOLUTION,
}
}
}
impl<Config, Clear> Drop for ConicalFrustum3dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
}
let half_height = self.height * 0.5;
let [upper_points, lower_points] = [(-1.0, self.radius_bottom), (1.0, self.radius_top)]
.map(|(sign, radius)| {
let translation = Vec3::Y * sign * half_height;
circle_coordinates_closed(radius, self.resolution)
.map(|vec2| Vec3::new(vec2.x, 0.0, vec2.y) + translation)
.map(|vec3| self.isometry * vec3)
.collect::<Vec<_>>()
});
let upper_lines = upper_points.windows(2).map(|win| (win[0], win[1]));
let lower_lines = lower_points.windows(2).map(|win| (win[0], win[1]));
upper_lines.chain(lower_lines).for_each(|(start, end)| {
self.gizmos.line(start, end, self.color);
});
let connection_lines = upper_points.into_iter().zip(lower_points).skip(1);
connection_lines.for_each(|(start, end)| {
self.gizmos.line(start, end, self.color);
});
}
}
// torus 3d
/// Builder for configuring the drawing options of [`Torus`].
pub struct Torus3dBuilder<'a, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
gizmos: &'a mut GizmoBuffer<Config, Clear>,
// Radius of the minor circle (tube)
minor_radius: f32,
// Radius of the major circle (ring)
major_radius: f32,
isometry: Isometry3d,
// Color of the torus
color: Color,
// Number of lines in the minor (tube) direction
minor_resolution: u32,
// Number of lines in the major (ring) direction
major_resolution: u32,
}
impl<Config, Clear> Torus3dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
/// Set the number of lines in the minor (tube) direction.
pub fn minor_resolution(mut self, minor_resolution: u32) -> Self {
self.minor_resolution = minor_resolution;
self
}
/// Set the number of lines in the major (ring) direction.
pub fn major_resolution(mut self, major_resolution: u32) -> Self {
self.major_resolution = major_resolution;
self
}
}
impl<Config, Clear> GizmoPrimitive3d<Torus> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= Torus3dBuilder<'a, Config, Clear>
where
Self: 'a;
fn primitive_3d(
&mut self,
primitive: &Torus,
isometry: impl Into<Isometry3d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
Torus3dBuilder {
gizmos: self,
minor_radius: primitive.minor_radius,
major_radius: primitive.major_radius,
isometry: isometry.into(),
color: color.into(),
minor_resolution: DEFAULT_RESOLUTION,
major_resolution: DEFAULT_RESOLUTION,
}
}
}
impl<Config, Clear> Drop for Torus3dBuilder<'_, Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
fn drop(&mut self) {
if !self.gizmos.enabled {
return;
}
// draw 4 circles with major_radius
let [inner, outer, top, bottom] = [
(self.major_radius - self.minor_radius, 0.0),
(self.major_radius + self.minor_radius, 0.0),
(self.major_radius, self.minor_radius),
(self.major_radius, -self.minor_radius),
]
.map(|(radius, height)| {
let translation = height * Vec3::Y;
circle_coordinates_closed(radius, self.major_resolution)
.map(|vec2| Vec3::new(vec2.x, 0.0, vec2.y) + translation)
.map(|vec3| self.isometry * vec3)
.collect::<Vec<_>>()
});
[&inner, &outer, &top, &bottom]
.iter()
.flat_map(|points| points.windows(2).map(|win| (win[0], win[1])))
.for_each(|(start, end)| {
self.gizmos.line(start, end, self.color);
});
inner
.into_iter()
.zip(top)
.zip(outer)
.zip(bottom)
.flat_map(|(((inner, top), outer), bottom)| {
let center = (inner + top + outer + bottom) * 0.25;
[(inner, top), (top, outer), (outer, bottom), (bottom, inner)]
.map(|(start, end)| (start, end, center))
})
.for_each(|(from, to, center)| {
self.gizmos
.short_arc_3d_between(center, from, to, self.color)
.resolution(self.minor_resolution);
});
}
}
// tetrahedron
impl<Config, Clear> GizmoPrimitive3d<Tetrahedron> for GizmoBuffer<Config, Clear>
where
Config: GizmoConfigGroup,
Clear: 'static + Send + Sync,
{
type Output<'a>
= ()
where
Self: 'a;
fn primitive_3d(
&mut self,
primitive: &Tetrahedron,
isometry: impl Into<Isometry3d>,
color: impl Into<Color>,
) -> Self::Output<'_> {
if !self.enabled {
return;
}
let isometry = isometry.into();
let [a, b, c, d] = primitive.vertices.map(|vec3| isometry * vec3);
let lines = [(a, b), (a, c), (a, d), (b, c), (b, d), (c, d)];
let color = color.into();
lines.into_iter().for_each(|(start, end)| {
self.line(start, end, color);
});
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_shader/src/lib.rs | crates/bevy_shader/src/lib.rs | #![expect(missing_docs, reason = "Not all docs are written yet, see #3492.")]
extern crate alloc;
mod shader;
mod shader_cache;
pub use shader::*;
pub use shader_cache::*;
/// The shader prelude.
///
/// This includes the most common types in this crate, re-exported for your convenience.
pub mod prelude {
#[doc(hidden)]
pub use crate::Shader;
}
#[doc(hidden)]
pub mod _macro {
pub use bevy_asset;
}
/// Inline shader as an `embedded_asset` and load it permanently.
///
/// This works around a limitation of the shader loader not properly loading
/// dependencies of shaders.
#[macro_export]
macro_rules! load_shader_library {
($asset_server_provider: expr, $path: literal $(, $settings: expr)?) => {
$crate::_macro::bevy_asset::embedded_asset!($asset_server_provider, $path);
let handle: $crate::_macro::bevy_asset::prelude::Handle<$crate::prelude::Shader> =
$crate::_macro::bevy_asset::load_embedded_asset!(
$asset_server_provider,
$path
$(,$settings)?
);
core::mem::forget(handle);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_shader/src/shader_cache.rs | crates/bevy_shader/src/shader_cache.rs | use crate::shader::*;
use alloc::sync::Arc;
use bevy_asset::AssetId;
use bevy_platform::collections::{hash_map::EntryRef, HashMap, HashSet};
use core::hash::Hash;
use naga::valid::Capabilities;
use thiserror::Error;
use tracing::debug;
use wgpu_types::{DownlevelFlags, Features};
/// Source of a shader module.
///
/// The source will be parsed and validated.
///
/// Any necessary shader translation (e.g. from WGSL to SPIR-V or vice versa)
/// will be done internally by wgpu.
///
/// This type is unique to the Rust API of `wgpu`. In the WebGPU specification,
/// only WGSL source code strings are accepted.
///
/// This is roughly equivalent to `wgpu::ShaderSource`
#[cfg_attr(
not(feature = "decoupled_naga"),
expect(
clippy::large_enum_variant,
reason = "naga modules are the most common use, and are large"
)
)]
#[derive(Clone, Debug)]
pub enum ShaderCacheSource<'a> {
/// SPIR-V module represented as a slice of words.
SpirV(&'a [u8]),
/// WGSL module as a string slice.
Wgsl(String),
/// Naga module.
#[cfg(not(feature = "decoupled_naga"))]
Naga(naga::Module),
}
pub type CachedPipelineId = usize;
struct ShaderData<ShaderModule> {
pipelines: HashSet<CachedPipelineId>,
processed_shaders: HashMap<Box<[ShaderDefVal]>, Arc<ShaderModule>>,
resolved_imports: HashMap<ShaderImport, AssetId<Shader>>,
dependents: HashSet<AssetId<Shader>>,
}
impl<T> Default for ShaderData<T> {
fn default() -> Self {
Self {
pipelines: Default::default(),
processed_shaders: Default::default(),
resolved_imports: Default::default(),
dependents: Default::default(),
}
}
}
pub struct ShaderCache<ShaderModule, RenderDevice> {
data: HashMap<AssetId<Shader>, ShaderData<ShaderModule>>,
load_module: fn(
&RenderDevice,
ShaderCacheSource,
&ValidateShader,
) -> Result<ShaderModule, PipelineCacheError>,
#[cfg(feature = "shader_format_wesl")]
module_path_to_asset_id: HashMap<wesl::syntax::ModulePath, AssetId<Shader>>,
shaders: HashMap<AssetId<Shader>, Shader>,
import_path_shaders: HashMap<ShaderImport, AssetId<Shader>>,
waiting_on_import: HashMap<ShaderImport, Vec<AssetId<Shader>>>,
pub composer: naga_oil::compose::Composer,
}
#[derive(serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq, Debug, Hash)]
pub enum ShaderDefVal {
Bool(String, bool),
Int(String, i32),
UInt(String, u32),
}
impl From<&str> for ShaderDefVal {
fn from(key: &str) -> Self {
ShaderDefVal::Bool(key.to_string(), true)
}
}
impl From<String> for ShaderDefVal {
fn from(key: String) -> Self {
ShaderDefVal::Bool(key, true)
}
}
impl ShaderDefVal {
pub fn value_as_string(&self) -> String {
match self {
ShaderDefVal::Bool(_, def) => def.to_string(),
ShaderDefVal::Int(_, def) => def.to_string(),
ShaderDefVal::UInt(_, def) => def.to_string(),
}
}
}
impl<ShaderModule, RenderDevice> ShaderCache<ShaderModule, RenderDevice> {
pub fn new(
features: Features,
downlevel: DownlevelFlags,
load_module: fn(
&RenderDevice,
ShaderCacheSource,
&ValidateShader,
) -> Result<ShaderModule, PipelineCacheError>,
) -> Self {
let capabilities = get_capabilities(features, downlevel);
#[cfg(debug_assertions)]
let composer = naga_oil::compose::Composer::default();
#[cfg(not(debug_assertions))]
let composer = naga_oil::compose::Composer::non_validating();
let composer = composer.with_capabilities(capabilities);
Self {
composer,
load_module,
data: Default::default(),
#[cfg(feature = "shader_format_wesl")]
module_path_to_asset_id: Default::default(),
shaders: Default::default(),
import_path_shaders: Default::default(),
waiting_on_import: Default::default(),
}
}
#[expect(
clippy::result_large_err,
reason = "See https://github.com/bevyengine/bevy/issues/19220"
)]
fn add_import_to_composer(
composer: &mut naga_oil::compose::Composer,
import_path_shaders: &HashMap<ShaderImport, AssetId<Shader>>,
shaders: &HashMap<AssetId<Shader>, Shader>,
import: &ShaderImport,
) -> Result<(), PipelineCacheError> {
// Early out if we've already imported this module
if composer.contains_module(&import.module_name()) {
return Ok(());
}
// Check if the import is available (this handles the recursive import case)
let shader = import_path_shaders
.get(import)
.and_then(|handle| shaders.get(handle))
.ok_or(PipelineCacheError::ShaderImportNotYetAvailable)?;
// Recurse down to ensure all import dependencies are met
for import in &shader.imports {
Self::add_import_to_composer(composer, import_path_shaders, shaders, import)?;
}
composer.add_composable_module(shader.into())?;
// if we fail to add a module the composer will tell us what is missing
Ok(())
}
#[expect(
clippy::result_large_err,
reason = "See https://github.com/bevyengine/bevy/issues/19220"
)]
pub fn get(
&mut self,
render_device: &RenderDevice,
pipeline: CachedPipelineId,
id: AssetId<Shader>,
shader_defs: &[ShaderDefVal],
) -> Result<Arc<ShaderModule>, PipelineCacheError> {
let shader = self
.shaders
.get(&id)
.ok_or(PipelineCacheError::ShaderNotLoaded(id))?;
let data = self.data.entry(id).or_default();
let n_asset_imports = shader
.imports()
.filter(|import| matches!(import, ShaderImport::AssetPath(_)))
.count();
let n_resolved_asset_imports = data
.resolved_imports
.keys()
.filter(|import| matches!(import, ShaderImport::AssetPath(_)))
.count();
if n_asset_imports != n_resolved_asset_imports {
return Err(PipelineCacheError::ShaderImportNotYetAvailable);
}
data.pipelines.insert(pipeline);
// PERF: this shader_defs clone isn't great. use raw_entry_mut when it stabilizes
let module = match data.processed_shaders.entry_ref(shader_defs) {
EntryRef::Occupied(entry) => entry.into_mut(),
EntryRef::Vacant(entry) => {
debug!(
"processing shader {}, with shader defs {:?}",
id, shader_defs
);
let shader_source = match &shader.source {
Source::SpirV(data) => ShaderCacheSource::SpirV(data.as_ref()),
#[cfg(feature = "shader_format_wesl")]
Source::Wesl(_) => {
if let ShaderImport::AssetPath(path) = shader.import_path() {
let shader_resolver =
ShaderResolver::new(&self.module_path_to_asset_id, &self.shaders);
let module_path = wesl::syntax::ModulePath::from_path(path);
let mut compiler_options = wesl::CompileOptions {
imports: true,
condcomp: true,
lower: true,
..Default::default()
};
for shader_def in shader_defs {
match shader_def {
ShaderDefVal::Bool(key, value) => {
compiler_options.features.flags.insert(key.clone(), (*value).into());
}
_ => debug!(
"ShaderDefVal::Int and ShaderDefVal::UInt are not supported in wesl",
),
}
}
let compiled = wesl::compile(
&module_path,
&shader_resolver,
&wesl::EscapeMangler,
&compiler_options,
)
.unwrap();
ShaderCacheSource::Wgsl(compiled.to_string())
} else {
panic!("Wesl shaders must be imported from a file");
}
}
_ => {
for import in shader.imports() {
Self::add_import_to_composer(
&mut self.composer,
&self.import_path_shaders,
&self.shaders,
import,
)?;
}
let shader_defs = shader_defs
.iter()
.chain(shader.shader_defs.iter())
.map(|def| match def.clone() {
ShaderDefVal::Bool(k, v) => {
(k, naga_oil::compose::ShaderDefValue::Bool(v))
}
ShaderDefVal::Int(k, v) => {
(k, naga_oil::compose::ShaderDefValue::Int(v))
}
ShaderDefVal::UInt(k, v) => {
(k, naga_oil::compose::ShaderDefValue::UInt(v))
}
})
.collect::<std::collections::HashMap<_, _>>();
let naga = self.composer.make_naga_module(
naga_oil::compose::NagaModuleDescriptor {
shader_defs,
..shader.into()
},
)?;
#[cfg(not(feature = "decoupled_naga"))]
{
ShaderCacheSource::Naga(naga)
}
#[cfg(feature = "decoupled_naga")]
{
let mut validator = naga::valid::Validator::new(
naga::valid::ValidationFlags::all(),
self.composer.capabilities,
);
let module_info = validator.validate(&naga).unwrap();
let wgsl = naga::back::wgsl::write_string(
&naga,
&module_info,
naga::back::wgsl::WriterFlags::empty(),
)
.unwrap();
ShaderCacheSource::Wgsl(wgsl)
}
}
};
let shader_module =
(self.load_module)(render_device, shader_source, &shader.validate_shader)?;
entry.insert(Arc::new(shader_module))
}
};
Ok(module.clone())
}
fn clear(&mut self, id: AssetId<Shader>) -> Vec<CachedPipelineId> {
let mut shaders_to_clear = vec![id];
let mut pipelines_to_queue = Vec::new();
while let Some(handle) = shaders_to_clear.pop() {
if let Some(data) = self.data.get_mut(&handle) {
data.processed_shaders.clear();
pipelines_to_queue.extend(data.pipelines.iter().copied());
shaders_to_clear.extend(data.dependents.iter().copied());
if let Some(Shader { import_path, .. }) = self.shaders.get(&handle) {
self.composer
.remove_composable_module(&import_path.module_name());
}
}
}
pipelines_to_queue
}
pub fn set_shader(&mut self, id: AssetId<Shader>, shader: Shader) -> Vec<CachedPipelineId> {
let pipelines_to_queue = self.clear(id);
let path = shader.import_path();
self.import_path_shaders.insert(path.clone(), id);
if let Some(waiting_shaders) = self.waiting_on_import.get_mut(path) {
for waiting_shader in waiting_shaders.drain(..) {
// resolve waiting shader import
let data = self.data.entry(waiting_shader).or_default();
data.resolved_imports.insert(path.clone(), id);
// add waiting shader as dependent of this shader
let data = self.data.entry(id).or_default();
data.dependents.insert(waiting_shader);
}
}
for import in shader.imports() {
if let Some(import_id) = self.import_path_shaders.get(import).copied() {
// resolve import because it is currently available
let data = self.data.entry(id).or_default();
data.resolved_imports.insert(import.clone(), import_id);
// add this shader as a dependent of the import
let data = self.data.entry(import_id).or_default();
data.dependents.insert(id);
} else {
let waiting = self.waiting_on_import.entry(import.clone()).or_default();
waiting.push(id);
}
}
#[cfg(feature = "shader_format_wesl")]
if let Source::Wesl(_) = shader.source
&& let ShaderImport::AssetPath(path) = shader.import_path()
{
self.module_path_to_asset_id
.insert(wesl::syntax::ModulePath::from_path(path), id);
}
self.shaders.insert(id, shader);
pipelines_to_queue
}
pub fn remove(&mut self, id: AssetId<Shader>) -> Vec<CachedPipelineId> {
let pipelines_to_queue = self.clear(id);
if let Some(shader) = self.shaders.remove(&id) {
self.import_path_shaders.remove(shader.import_path());
}
pipelines_to_queue
}
}
#[cfg(feature = "shader_format_wesl")]
pub struct ShaderResolver<'a> {
module_path_to_asset_id: &'a HashMap<wesl::syntax::ModulePath, AssetId<Shader>>,
shaders: &'a HashMap<AssetId<Shader>, Shader>,
}
#[cfg(feature = "shader_format_wesl")]
impl<'a> ShaderResolver<'a> {
pub fn new(
module_path_to_asset_id: &'a HashMap<wesl::syntax::ModulePath, AssetId<Shader>>,
shaders: &'a HashMap<AssetId<Shader>, Shader>,
) -> Self {
Self {
module_path_to_asset_id,
shaders,
}
}
}
#[cfg(feature = "shader_format_wesl")]
impl<'a> wesl::Resolver for ShaderResolver<'a> {
fn resolve_source(
&self,
module_path: &wesl::syntax::ModulePath,
) -> Result<alloc::borrow::Cow<'_, str>, wesl::ResolveError> {
let asset_id = self
.module_path_to_asset_id
.get(module_path)
.ok_or_else(|| {
wesl::ResolveError::ModuleNotFound(
module_path.clone(),
"Invalid asset id".to_string(),
)
})?;
let shader = self.shaders.get(asset_id).unwrap();
Ok(alloc::borrow::Cow::Borrowed(shader.source.as_str()))
}
}
/// Type of error returned by a `PipelineCache` when the creation of a GPU pipeline object failed.
#[cfg_attr(
not(target_arch = "wasm32"),
expect(
clippy::large_enum_variant,
reason = "See https://github.com/bevyengine/bevy/issues/19220"
)
)]
#[derive(Error, Debug)]
pub enum PipelineCacheError {
#[error(
"Pipeline could not be compiled because the following shader could not be loaded: {0:?}"
)]
ShaderNotLoaded(AssetId<Shader>),
#[error(transparent)]
ProcessShaderError(#[from] naga_oil::compose::ComposerError),
#[error("Shader import not yet available.")]
ShaderImportNotYetAvailable,
#[error("Could not create shader module: {0}")]
CreateShaderModule(String),
}
// TODO: This needs to be kept up to date with the capabilities in the `create_validator` function in wgpu-core
// https://github.com/gfx-rs/wgpu/blob/trunk/wgpu-core/src/device/mod.rs#L449
// We can't use the `wgpu-core` function to detect the device's capabilities because `wgpu-core` isn't included in WebGPU builds.
/// Get the device's capabilities for use in `naga_oil`.
fn get_capabilities(features: Features, downlevel: DownlevelFlags) -> Capabilities {
let mut capabilities = Capabilities::empty();
capabilities.set(
Capabilities::PUSH_CONSTANT,
features.contains(Features::PUSH_CONSTANTS),
);
capabilities.set(
Capabilities::FLOAT64,
features.contains(Features::SHADER_F64),
);
capabilities.set(
Capabilities::PRIMITIVE_INDEX,
features.contains(Features::SHADER_PRIMITIVE_INDEX),
);
capabilities.set(
Capabilities::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
features.contains(Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING),
);
capabilities.set(
Capabilities::STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
features.contains(Features::STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING),
);
capabilities.set(
Capabilities::UNIFORM_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
features.contains(Features::UNIFORM_BUFFER_BINDING_ARRAYS),
);
// TODO: This needs a proper wgpu feature
capabilities.set(
Capabilities::SAMPLER_NON_UNIFORM_INDEXING,
features.contains(Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING),
);
capabilities.set(
Capabilities::STORAGE_TEXTURE_16BIT_NORM_FORMATS,
features.contains(Features::TEXTURE_FORMAT_16BIT_NORM),
);
capabilities.set(
Capabilities::MULTIVIEW,
features.contains(Features::MULTIVIEW),
);
capabilities.set(
Capabilities::EARLY_DEPTH_TEST,
features.contains(Features::SHADER_EARLY_DEPTH_TEST),
);
capabilities.set(
Capabilities::SHADER_INT64,
features.contains(Features::SHADER_INT64),
);
capabilities.set(
Capabilities::SHADER_INT64_ATOMIC_MIN_MAX,
features.intersects(
Features::SHADER_INT64_ATOMIC_MIN_MAX | Features::SHADER_INT64_ATOMIC_ALL_OPS,
),
);
capabilities.set(
Capabilities::SHADER_INT64_ATOMIC_ALL_OPS,
features.contains(Features::SHADER_INT64_ATOMIC_ALL_OPS),
);
capabilities.set(
Capabilities::MULTISAMPLED_SHADING,
downlevel.contains(DownlevelFlags::MULTISAMPLED_SHADING),
);
capabilities.set(
Capabilities::RAY_QUERY,
features.contains(Features::EXPERIMENTAL_RAY_QUERY),
);
capabilities.set(
Capabilities::DUAL_SOURCE_BLENDING,
features.contains(Features::DUAL_SOURCE_BLENDING),
);
capabilities.set(
Capabilities::CLIP_DISTANCE,
features.contains(Features::CLIP_DISTANCES),
);
capabilities.set(
Capabilities::CUBE_ARRAY_TEXTURES,
downlevel.contains(DownlevelFlags::CUBE_ARRAY_TEXTURES),
);
capabilities.set(
Capabilities::SUBGROUP,
features.intersects(Features::SUBGROUP | Features::SUBGROUP_VERTEX),
);
capabilities.set(
Capabilities::SUBGROUP_BARRIER,
features.intersects(Features::SUBGROUP_BARRIER),
);
capabilities.set(
Capabilities::SUBGROUP_VERTEX_STAGE,
features.contains(Features::SUBGROUP_VERTEX),
);
capabilities.set(
Capabilities::SHADER_FLOAT32_ATOMIC,
features.contains(Features::SHADER_FLOAT32_ATOMIC),
);
capabilities.set(
Capabilities::TEXTURE_ATOMIC,
features.contains(Features::TEXTURE_ATOMIC),
);
capabilities.set(
Capabilities::TEXTURE_INT64_ATOMIC,
features.contains(Features::TEXTURE_INT64_ATOMIC),
);
capabilities.set(
Capabilities::SHADER_FLOAT16,
features.contains(Features::SHADER_F16),
);
capabilities.set(
Capabilities::RAY_HIT_VERTEX_POSITION,
features.intersects(Features::EXPERIMENTAL_RAY_HIT_VERTEX_RETURN),
);
capabilities
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_shader/src/shader.rs | crates/bevy_shader/src/shader.rs | use super::ShaderDefVal;
use alloc::borrow::Cow;
use bevy_asset::{io::Reader, Asset, AssetLoader, AssetPath, Handle, LoadContext};
use bevy_reflect::TypePath;
use core::{marker::Copy, num::NonZero};
use thiserror::Error;
#[derive(Copy, Clone, Hash, Eq, PartialEq, PartialOrd, Ord, Debug)]
pub struct ShaderId(NonZero<u32>);
impl ShaderId {
#[expect(
clippy::new_without_default,
reason = "Implementing the `Default` trait on atomic IDs would imply that two `<AtomicIdType>::default()` equal each other. By only implementing `new()`, we indicate that each atomic ID created will be unique."
)]
pub fn new() -> Self {
use core::sync::atomic::{AtomicU32, Ordering};
static COUNTER: AtomicU32 = AtomicU32::new(1);
let counter = COUNTER.fetch_add(1, Ordering::Relaxed);
Self(NonZero::<u32>::new(counter).unwrap_or_else(|| {
panic!("The system ran out of unique `{}`s.", stringify!(ShaderId));
}))
}
}
impl From<ShaderId> for NonZero<u32> {
fn from(value: ShaderId) -> Self {
value.0
}
}
impl From<NonZero<u32>> for ShaderId {
fn from(value: NonZero<u32>) -> Self {
Self(value)
}
}
#[derive(Error, Debug)]
pub enum ShaderReflectError {
#[error(transparent)]
WgslParse(#[from] naga::front::wgsl::ParseError),
#[cfg(feature = "shader_format_glsl")]
#[error("GLSL Parse Error: {0:?}")]
GlslParse(Vec<naga::front::glsl::Error>),
#[cfg(feature = "shader_format_spirv")]
#[error(transparent)]
SpirVParse(#[from] naga::front::spv::Error),
#[error(transparent)]
Validation(#[from] naga::WithSpan<naga::valid::ValidationError>),
}
/// Describes whether or not to perform runtime checks on shaders.
/// Runtime checks can be enabled for safety at the cost of speed.
/// By default no runtime checks will be performed.
///
/// # Panics
/// Because no runtime checks are performed for spirv,
/// enabling `ValidateShader` for spirv will cause a panic
#[derive(Clone, Debug, Default)]
pub enum ValidateShader {
#[default]
/// No runtime checks for soundness (e.g. bound checking) are performed.
///
/// This is suitable for trusted shaders, written by your program or dependencies you trust.
Disabled,
/// Enable's runtime checks for soundness (e.g. bound checking).
///
/// While this can have a meaningful impact on performance,
/// this setting should *always* be enabled when loading untrusted shaders.
/// This might occur if you are creating a shader playground, running user-generated shaders
/// (as in `VRChat`), or writing a web browser in Bevy.
Enabled,
}
/// An "unprocessed" shader. It can contain preprocessor directives.
#[derive(Asset, TypePath, Debug, Clone)]
pub struct Shader {
pub path: String,
pub source: Source,
pub import_path: ShaderImport,
pub imports: Vec<ShaderImport>,
// extra imports not specified in the source string
pub additional_imports: Vec<naga_oil::compose::ImportDefinition>,
// any shader defs that will be included when this module is used
pub shader_defs: Vec<ShaderDefVal>,
// we must store strong handles to our dependencies to stop them
// from being immediately dropped if we are the only user.
pub file_dependencies: Vec<Handle<Shader>>,
/// Enable or disable runtime shader validation, trading safety against speed.
///
/// Please read the [`ValidateShader`] docs for a discussion of the tradeoffs involved.
pub validate_shader: ValidateShader,
}
impl Shader {
fn preprocess(source: &str, path: &str) -> (ShaderImport, Vec<ShaderImport>) {
let (import_path, imports, _) = naga_oil::compose::get_preprocessor_data(source);
let import_path = import_path
.map(ShaderImport::Custom)
.unwrap_or_else(|| ShaderImport::AssetPath(path.to_owned()));
let imports = imports
.into_iter()
.map(|import| {
if import.import.starts_with('\"') {
let import = import
.import
.chars()
.skip(1)
.take_while(|c| *c != '\"')
.collect();
ShaderImport::AssetPath(import)
} else {
ShaderImport::Custom(import.import)
}
})
.collect();
(import_path, imports)
}
pub fn from_wgsl(source: impl Into<Cow<'static, str>>, path: impl Into<String>) -> Shader {
let source = source.into();
let path = path.into();
let (import_path, imports) = Shader::preprocess(&source, &path);
Shader {
path,
imports,
import_path,
source: Source::Wgsl(source),
additional_imports: Default::default(),
shader_defs: Default::default(),
file_dependencies: Default::default(),
validate_shader: ValidateShader::Disabled,
}
}
pub fn from_wgsl_with_defs(
source: impl Into<Cow<'static, str>>,
path: impl Into<String>,
shader_defs: Vec<ShaderDefVal>,
) -> Shader {
Self {
shader_defs,
..Self::from_wgsl(source, path)
}
}
pub fn from_glsl(
source: impl Into<Cow<'static, str>>,
stage: naga::ShaderStage,
path: impl Into<String>,
) -> Shader {
let source = source.into();
let path = path.into();
let (import_path, imports) = Shader::preprocess(&source, &path);
Shader {
path,
imports,
import_path,
source: Source::Glsl(source, stage),
additional_imports: Default::default(),
shader_defs: Default::default(),
file_dependencies: Default::default(),
validate_shader: ValidateShader::Disabled,
}
}
pub fn from_spirv(source: impl Into<Cow<'static, [u8]>>, path: impl Into<String>) -> Shader {
let path = path.into();
Shader {
path: path.clone(),
imports: Vec::new(),
import_path: ShaderImport::AssetPath(path),
source: Source::SpirV(source.into()),
additional_imports: Default::default(),
shader_defs: Default::default(),
file_dependencies: Default::default(),
validate_shader: ValidateShader::Disabled,
}
}
#[cfg(feature = "shader_format_wesl")]
pub fn from_wesl(source: impl Into<Cow<'static, str>>, path: impl Into<String>) -> Shader {
let source = source.into();
let path = path.into();
let (import_path, imports) = Shader::preprocess(&source, &path);
match import_path {
ShaderImport::AssetPath(asset_path) => {
// Create the shader import path - always starting with "/"
let shader_path = std::path::Path::new("/").join(&asset_path);
// Convert to a string with forward slashes and without extension
let import_path_str = shader_path
.with_extension("")
.to_string_lossy()
.replace('\\', "/");
let import_path = ShaderImport::AssetPath(import_path_str.to_string());
Shader {
path,
imports,
import_path,
source: Source::Wesl(source),
additional_imports: Default::default(),
shader_defs: Default::default(),
file_dependencies: Default::default(),
validate_shader: ValidateShader::Disabled,
}
}
ShaderImport::Custom(_) => {
panic!("Wesl shaders must be imported from an asset path");
}
}
}
pub fn set_import_path<P: Into<String>>(&mut self, import_path: P) {
self.import_path = ShaderImport::Custom(import_path.into());
}
#[must_use]
pub fn with_import_path<P: Into<String>>(mut self, import_path: P) -> Self {
self.set_import_path(import_path);
self
}
#[inline]
pub fn import_path(&self) -> &ShaderImport {
&self.import_path
}
pub fn imports(&self) -> impl ExactSizeIterator<Item = &ShaderImport> {
self.imports.iter()
}
}
impl<'a> From<&'a Shader> for naga_oil::compose::ComposableModuleDescriptor<'a> {
fn from(shader: &'a Shader) -> Self {
let shader_defs = shader
.shader_defs
.iter()
.map(|def| match def {
ShaderDefVal::Bool(name, b) => {
(name.clone(), naga_oil::compose::ShaderDefValue::Bool(*b))
}
ShaderDefVal::Int(name, i) => {
(name.clone(), naga_oil::compose::ShaderDefValue::Int(*i))
}
ShaderDefVal::UInt(name, i) => {
(name.clone(), naga_oil::compose::ShaderDefValue::UInt(*i))
}
})
.collect();
let as_name = match &shader.import_path {
ShaderImport::AssetPath(asset_path) => Some(format!("\"{asset_path}\"")),
ShaderImport::Custom(_) => None,
};
naga_oil::compose::ComposableModuleDescriptor {
source: shader.source.as_str(),
file_path: &shader.path,
language: (&shader.source).into(),
additional_imports: &shader.additional_imports,
shader_defs,
as_name,
}
}
}
impl<'a> From<&'a Shader> for naga_oil::compose::NagaModuleDescriptor<'a> {
fn from(shader: &'a Shader) -> Self {
naga_oil::compose::NagaModuleDescriptor {
source: shader.source.as_str(),
file_path: &shader.path,
shader_type: (&shader.source).into(),
..Default::default()
}
}
}
#[derive(Debug, Clone)]
pub enum Source {
Wgsl(Cow<'static, str>),
Wesl(Cow<'static, str>),
Glsl(Cow<'static, str>, naga::ShaderStage),
SpirV(Cow<'static, [u8]>),
// TODO: consider the following
// PrecompiledSpirVMacros(HashMap<HashSet<String>, Vec<u32>>)
// NagaModule(Module) ... Module impls Serialize/Deserialize
}
impl Source {
pub fn as_str(&self) -> &str {
match self {
Source::Wgsl(s) | Source::Wesl(s) | Source::Glsl(s, _) => s,
Source::SpirV(_) => panic!("spirv not yet implemented"),
}
}
}
impl From<&Source> for naga_oil::compose::ShaderLanguage {
fn from(value: &Source) -> Self {
match value {
Source::Wgsl(_) => naga_oil::compose::ShaderLanguage::Wgsl,
#[cfg(any(feature = "shader_format_glsl", target_arch = "wasm32"))]
Source::Glsl(_, _) => naga_oil::compose::ShaderLanguage::Glsl,
#[cfg(all(not(feature = "shader_format_glsl"), not(target_arch = "wasm32")))]
Source::Glsl(_, _) => panic!(
"GLSL is not supported in this configuration; use the feature `shader_format_glsl`"
),
Source::SpirV(_) => panic!("spirv not yet implemented"),
Source::Wesl(_) => panic!("wesl not yet implemented"),
}
}
}
impl From<&Source> for naga_oil::compose::ShaderType {
fn from(value: &Source) -> Self {
match value {
Source::Wgsl(_) => naga_oil::compose::ShaderType::Wgsl,
#[cfg(any(feature = "shader_format_glsl", target_arch = "wasm32"))]
Source::Glsl(_, shader_stage) => match shader_stage {
naga::ShaderStage::Vertex => naga_oil::compose::ShaderType::GlslVertex,
naga::ShaderStage::Fragment => naga_oil::compose::ShaderType::GlslFragment,
naga::ShaderStage::Compute => panic!("glsl compute not yet implemented"),
naga::ShaderStage::Task => panic!("task shaders not yet implemented"),
naga::ShaderStage::Mesh => panic!("mesh shaders not yet implemented"),
},
#[cfg(all(not(feature = "shader_format_glsl"), not(target_arch = "wasm32")))]
Source::Glsl(_, _) => panic!(
"GLSL is not supported in this configuration; use the feature `shader_format_glsl`"
),
Source::SpirV(_) => panic!("spirv not yet implemented"),
Source::Wesl(_) => panic!("wesl not yet implemented"),
}
}
}
#[derive(Default, TypePath)]
pub struct ShaderLoader;
#[non_exhaustive]
#[derive(Debug, Error)]
pub enum ShaderLoaderError {
#[error("Could not load shader: {0}")]
Io(#[from] std::io::Error),
#[error("Could not parse shader: {0}")]
Parse(#[from] alloc::string::FromUtf8Error),
}
/// Settings for loading shaders.
#[derive(serde::Serialize, serde::Deserialize, Debug, Default)]
pub struct ShaderSettings {
/// The `#define` specified for this shader.
pub shader_defs: Vec<ShaderDefVal>,
}
impl AssetLoader for ShaderLoader {
type Asset = Shader;
type Settings = ShaderSettings;
type Error = ShaderLoaderError;
async fn load(
&self,
reader: &mut dyn Reader,
settings: &Self::Settings,
load_context: &mut LoadContext<'_>,
) -> Result<Shader, Self::Error> {
let ext = load_context
.path()
.path()
.extension()
.unwrap()
.to_str()
.unwrap();
let path = load_context.path().to_string();
// On windows, the path will inconsistently use \ or /.
// TODO: remove this once AssetPath forces cross-platform "slash" consistency. See #10511
let path = path.replace(std::path::MAIN_SEPARATOR, "/");
let mut bytes = Vec::new();
reader.read_to_end(&mut bytes).await?;
if ext != "wgsl" && !settings.shader_defs.is_empty() {
tracing::warn!(
"Tried to load a non-wgsl shader with shader defs, this isn't supported: \
The shader defs will be ignored."
);
}
let mut shader = match ext {
"spv" => Shader::from_spirv(bytes, load_context.path().path().to_string_lossy()),
"wgsl" => Shader::from_wgsl_with_defs(
String::from_utf8(bytes)?,
path,
settings.shader_defs.clone(),
),
"vert" => Shader::from_glsl(String::from_utf8(bytes)?, naga::ShaderStage::Vertex, path),
"frag" => {
Shader::from_glsl(String::from_utf8(bytes)?, naga::ShaderStage::Fragment, path)
}
"comp" => {
Shader::from_glsl(String::from_utf8(bytes)?, naga::ShaderStage::Compute, path)
}
#[cfg(feature = "shader_format_wesl")]
"wesl" => Shader::from_wesl(String::from_utf8(bytes)?, path),
_ => panic!("unhandled extension: {ext}"),
};
// collect and store file dependencies
for import in &shader.imports {
if let ShaderImport::AssetPath(asset_path) = import {
shader.file_dependencies.push(load_context.load(asset_path));
}
}
Ok(shader)
}
fn extensions(&self) -> &[&str] {
&["spv", "wgsl", "vert", "frag", "comp", "wesl"]
}
}
#[derive(Debug, PartialEq, Eq, Clone, Hash)]
pub enum ShaderImport {
AssetPath(String),
Custom(String),
}
impl ShaderImport {
pub fn module_name(&self) -> Cow<'_, String> {
match self {
ShaderImport::AssetPath(s) => Cow::Owned(format!("\"{s}\"")),
ShaderImport::Custom(s) => Cow::Borrowed(s),
}
}
}
/// A reference to a shader asset.
#[derive(Default)]
pub enum ShaderRef {
/// Use the "default" shader for the current context.
#[default]
Default,
/// A handle to a shader stored in the [`Assets<Shader>`](bevy_asset::Assets) resource
Handle(Handle<Shader>),
/// An asset path leading to a shader
Path(AssetPath<'static>),
}
impl From<Handle<Shader>> for ShaderRef {
fn from(handle: Handle<Shader>) -> Self {
Self::Handle(handle)
}
}
impl From<AssetPath<'static>> for ShaderRef {
fn from(path: AssetPath<'static>) -> Self {
Self::Path(path)
}
}
impl From<&'static str> for ShaderRef {
fn from(path: &'static str) -> Self {
Self::Path(AssetPath::from(path))
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/lib.rs | crates/bevy_pbr/src/lib.rs | #![expect(missing_docs, reason = "Not all docs are written yet, see #3492.")]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![forbid(unsafe_code)]
#![doc(
html_logo_url = "https://bevy.org/assets/icon.png",
html_favicon_url = "https://bevy.org/assets/icon.png"
)]
extern crate alloc;
#[cfg(feature = "meshlet")]
mod meshlet;
pub mod wireframe;
/// Experimental features that are not yet finished. Please report any issues you encounter!
///
/// Expect bugs, missing features, compatibility issues, low performance, and/or future breaking changes.
#[cfg(feature = "meshlet")]
pub mod experimental {
/// Render high-poly 3d meshes using an efficient GPU-driven method.
/// See [`MeshletPlugin`](meshlet::MeshletPlugin) and [`MeshletMesh`](meshlet::MeshletMesh) for details.
pub mod meshlet {
pub use crate::meshlet::*;
}
}
mod atmosphere;
mod cluster;
mod components;
pub mod decal;
pub mod deferred;
pub mod diagnostic;
mod extended_material;
mod fog;
mod light_probe;
mod lightmap;
mod material;
mod material_bind_groups;
mod medium;
mod mesh_material;
mod parallax;
mod pbr_material;
mod prepass;
mod render;
mod ssao;
mod ssr;
mod volumetric_fog;
use bevy_color::{Color, LinearRgba};
pub use atmosphere::*;
use bevy_light::{
AmbientLight, DirectionalLight, PointLight, ShadowFilteringMethod, SimulationLightSystems,
SpotLight,
};
use bevy_shader::{load_shader_library, ShaderRef};
pub use cluster::*;
pub use components::*;
pub use decal::clustered::ClusteredDecalPlugin;
pub use extended_material::*;
pub use fog::*;
pub use light_probe::*;
pub use lightmap::*;
pub use material::*;
pub use material_bind_groups::*;
pub use medium::*;
pub use mesh_material::*;
pub use parallax::*;
pub use pbr_material::*;
pub use prepass::*;
pub use render::*;
pub use ssao::*;
pub use ssr::*;
pub use volumetric_fog::VolumetricFogPlugin;
/// The PBR prelude.
///
/// This includes the most common types in this crate, re-exported for your convenience.
pub mod prelude {
#[doc(hidden)]
pub use crate::{
fog::{DistanceFog, FogFalloff},
material::{Material, MaterialPlugin},
mesh_material::MeshMaterial3d,
parallax::ParallaxMappingMethod,
pbr_material::StandardMaterial,
ssao::ScreenSpaceAmbientOcclusionPlugin,
};
}
pub mod graph {
use bevy_render::render_graph::RenderLabel;
/// Render graph nodes specific to 3D PBR rendering.
#[derive(Debug, Hash, PartialEq, Eq, Clone, RenderLabel)]
pub enum NodePbr {
/// Label for the shadow pass node that draws meshes that were visible
/// from the light last frame.
EarlyShadowPass,
/// Label for the shadow pass node that draws meshes that became visible
/// from the light this frame.
LateShadowPass,
/// Label for the screen space ambient occlusion render node.
ScreenSpaceAmbientOcclusion,
DeferredLightingPass,
/// Label for the volumetric lighting pass.
VolumetricFog,
/// Label for the shader that transforms and culls meshes that were
/// visible last frame.
EarlyGpuPreprocess,
/// Label for the shader that transforms and culls meshes that became
/// visible this frame.
LateGpuPreprocess,
/// Label for the screen space reflections pass.
ScreenSpaceReflections,
/// Label for the node that builds indirect draw parameters for meshes
/// that were visible last frame.
EarlyPrepassBuildIndirectParameters,
/// Label for the node that builds indirect draw parameters for meshes
/// that became visible this frame.
LatePrepassBuildIndirectParameters,
/// Label for the node that builds indirect draw parameters for the main
/// rendering pass, containing all meshes that are visible this frame.
MainBuildIndirectParameters,
ClearIndirectParametersMetadata,
}
}
use crate::{deferred::DeferredPbrLightingPlugin, graph::NodePbr};
use bevy_app::prelude::*;
use bevy_asset::{AssetApp, AssetPath, Assets, Handle, RenderAssetUsages};
use bevy_core_pipeline::core_3d::graph::{Core3d, Node3d};
use bevy_ecs::prelude::*;
#[cfg(feature = "bluenoise_texture")]
use bevy_image::{CompressedImageFormats, ImageType};
use bevy_image::{Image, ImageSampler};
use bevy_render::{
alpha::AlphaMode,
camera::sort_cameras,
extract_resource::ExtractResourcePlugin,
render_graph::RenderGraph,
render_resource::{
Extent3d, TextureDataOrder, TextureDescriptor, TextureDimension, TextureFormat,
TextureUsages,
},
sync_component::SyncComponentPlugin,
ExtractSchedule, Render, RenderApp, RenderDebugFlags, RenderStartup, RenderSystems,
};
use std::path::PathBuf;
fn shader_ref(path: PathBuf) -> ShaderRef {
ShaderRef::Path(AssetPath::from_path_buf(path).with_source("embedded"))
}
pub const TONEMAPPING_LUT_TEXTURE_BINDING_INDEX: u32 = 18;
pub const TONEMAPPING_LUT_SAMPLER_BINDING_INDEX: u32 = 19;
/// Sets up the entire PBR infrastructure of bevy.
pub struct PbrPlugin {
/// Controls if the prepass is enabled for the [`StandardMaterial`].
/// For more information about what a prepass is, see the [`bevy_core_pipeline::prepass`] docs.
pub prepass_enabled: bool,
/// Controls if [`DeferredPbrLightingPlugin`] is added.
pub add_default_deferred_lighting_plugin: bool,
/// Controls if GPU [`MeshUniform`] building is enabled.
///
/// This requires compute shader support and so will be forcibly disabled if
/// the platform doesn't support those.
pub use_gpu_instance_buffer_builder: bool,
/// Debugging flags that can optionally be set when constructing the renderer.
pub debug_flags: RenderDebugFlags,
}
impl Default for PbrPlugin {
fn default() -> Self {
Self {
prepass_enabled: true,
add_default_deferred_lighting_plugin: true,
use_gpu_instance_buffer_builder: true,
debug_flags: RenderDebugFlags::default(),
}
}
}
/// A resource that stores the spatio-temporal blue noise texture.
#[derive(Resource)]
pub struct Bluenoise {
/// Texture handle for spatio-temporal blue noise
pub texture: Handle<Image>,
}
impl Plugin for PbrPlugin {
fn build(&self, app: &mut App) {
load_shader_library!(app, "render/pbr_types.wgsl");
load_shader_library!(app, "render/pbr_bindings.wgsl");
load_shader_library!(app, "render/utils.wgsl");
load_shader_library!(app, "render/clustered_forward.wgsl");
load_shader_library!(app, "render/pbr_lighting.wgsl");
load_shader_library!(app, "render/pbr_transmission.wgsl");
load_shader_library!(app, "render/shadows.wgsl");
load_shader_library!(app, "deferred/pbr_deferred_types.wgsl");
load_shader_library!(app, "deferred/pbr_deferred_functions.wgsl");
load_shader_library!(app, "render/shadow_sampling.wgsl");
load_shader_library!(app, "render/pbr_functions.wgsl");
load_shader_library!(app, "render/rgb9e5.wgsl");
load_shader_library!(app, "render/pbr_ambient.wgsl");
load_shader_library!(app, "render/pbr_fragment.wgsl");
load_shader_library!(app, "render/pbr.wgsl");
load_shader_library!(app, "render/pbr_prepass_functions.wgsl");
load_shader_library!(app, "render/pbr_prepass.wgsl");
load_shader_library!(app, "render/parallax_mapping.wgsl");
load_shader_library!(app, "render/view_transformations.wgsl");
// Setup dummy shaders for when MeshletPlugin is not used to prevent shader import errors.
load_shader_library!(app, "meshlet/dummy_visibility_buffer_resolve.wgsl");
app.register_asset_reflect::<StandardMaterial>()
.init_resource::<DefaultOpaqueRendererMethod>()
.add_plugins((
MeshRenderPlugin {
use_gpu_instance_buffer_builder: self.use_gpu_instance_buffer_builder,
debug_flags: self.debug_flags,
},
MaterialsPlugin {
debug_flags: self.debug_flags,
},
MaterialPlugin::<StandardMaterial> {
debug_flags: self.debug_flags,
..Default::default()
},
ScreenSpaceAmbientOcclusionPlugin,
FogPlugin,
ExtractResourcePlugin::<DefaultOpaqueRendererMethod>::default(),
SyncComponentPlugin::<ShadowFilteringMethod>::default(),
LightmapPlugin,
LightProbePlugin,
GpuMeshPreprocessPlugin {
use_gpu_instance_buffer_builder: self.use_gpu_instance_buffer_builder,
},
VolumetricFogPlugin,
ScreenSpaceReflectionsPlugin,
ClusteredDecalPlugin,
))
.add_plugins((
decal::ForwardDecalPlugin,
SyncComponentPlugin::<DirectionalLight>::default(),
SyncComponentPlugin::<PointLight>::default(),
SyncComponentPlugin::<SpotLight>::default(),
SyncComponentPlugin::<AmbientLight>::default(),
))
.add_plugins((ScatteringMediumPlugin, AtmospherePlugin))
.configure_sets(
PostUpdate,
(
SimulationLightSystems::AddClusters,
SimulationLightSystems::AssignLightsToClusters,
)
.chain(),
);
if self.add_default_deferred_lighting_plugin {
app.add_plugins(DeferredPbrLightingPlugin);
}
// Initialize the default material handle.
app.world_mut()
.resource_mut::<Assets<StandardMaterial>>()
.insert(
&Handle::<StandardMaterial>::default(),
StandardMaterial {
base_color: Color::srgb(1.0, 0.0, 0.5),
..Default::default()
},
)
.unwrap();
let has_bluenoise = app
.get_sub_app(RenderApp)
.is_some_and(|render_app| render_app.world().is_resource_added::<Bluenoise>());
if !has_bluenoise {
let mut images = app.world_mut().resource_mut::<Assets<Image>>();
#[cfg(feature = "bluenoise_texture")]
let handle = {
let image = Image::from_buffer(
include_bytes!("bluenoise/stbn.ktx2"),
ImageType::Extension("ktx2"),
CompressedImageFormats::NONE,
false,
ImageSampler::Default,
RenderAssetUsages::RENDER_WORLD,
)
.expect("Failed to decode embedded blue-noise texture");
images.add(image)
};
#[cfg(not(feature = "bluenoise_texture"))]
let handle = { images.add(stbn_placeholder()) };
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app
.world_mut()
.insert_resource(Bluenoise { texture: handle });
}
}
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
// Extract the required data from the main world
render_app
.add_systems(
RenderStartup,
(
init_shadow_samplers,
init_global_clusterable_object_meta,
init_fallback_bindless_resources,
),
)
.add_systems(
ExtractSchedule,
(
extract_clusters,
extract_lights,
extract_ambient_light_resource,
extract_ambient_light,
extract_shadow_filtering_method,
late_sweep_material_instances,
),
)
.add_systems(
Render,
(
prepare_lights
.in_set(RenderSystems::ManageViews)
.after(sort_cameras),
prepare_clusters.in_set(RenderSystems::PrepareResources),
),
)
.init_resource::<LightMeta>()
.init_resource::<RenderMaterialBindings>();
render_app.world_mut().add_observer(add_light_view_entities);
render_app
.world_mut()
.add_observer(remove_light_view_entities);
render_app.world_mut().add_observer(extracted_light_removed);
let early_shadow_pass_node = EarlyShadowPassNode::from_world(render_app.world_mut());
let late_shadow_pass_node = LateShadowPassNode::from_world(render_app.world_mut());
let mut graph = render_app.world_mut().resource_mut::<RenderGraph>();
let draw_3d_graph = graph.get_sub_graph_mut(Core3d).unwrap();
draw_3d_graph.add_node(NodePbr::EarlyShadowPass, early_shadow_pass_node);
draw_3d_graph.add_node(NodePbr::LateShadowPass, late_shadow_pass_node);
draw_3d_graph.add_node_edges((
NodePbr::EarlyShadowPass,
NodePbr::LateShadowPass,
Node3d::StartMainPass,
));
}
fn finish(&self, app: &mut App) {
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
let global_cluster_settings = make_global_cluster_settings(render_app.world());
app.insert_resource(global_cluster_settings);
}
}
pub fn stbn_placeholder() -> Image {
let format = TextureFormat::Rgba8Unorm;
let data = vec![255, 0, 255, 255];
Image {
data: Some(data),
data_order: TextureDataOrder::default(),
texture_descriptor: TextureDescriptor {
size: Extent3d::default(),
format,
dimension: TextureDimension::D2,
label: None,
mip_level_count: 1,
sample_count: 1,
usage: TextureUsages::TEXTURE_BINDING,
view_formats: &[],
},
sampler: ImageSampler::Default,
texture_view_descriptor: None,
asset_usage: RenderAssetUsages::RENDER_WORLD,
copy_on_resize: false,
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/pbr_material.rs | crates/bevy_pbr/src/pbr_material.rs | use bevy_asset::Asset;
use bevy_color::{Alpha, ColorToComponents};
use bevy_math::{Affine2, Affine3, Mat2, Mat3, Vec2, Vec3, Vec4};
use bevy_mesh::MeshVertexBufferLayoutRef;
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::{render_asset::RenderAssets, render_resource::*, texture::GpuImage};
use bitflags::bitflags;
use crate::{deferred::DEFAULT_PBR_DEFERRED_LIGHTING_PASS_ID, *};
/// An enum to define which UV attribute to use for a texture.
///
/// It is used for every texture in the [`StandardMaterial`].
/// It only supports two UV attributes, [`bevy_mesh::Mesh::ATTRIBUTE_UV_0`] and
/// [`bevy_mesh::Mesh::ATTRIBUTE_UV_1`].
/// The default is [`UvChannel::Uv0`].
#[derive(Reflect, Default, Debug, Clone, PartialEq, Eq)]
#[reflect(Default, Debug, Clone, PartialEq)]
pub enum UvChannel {
#[default]
Uv0,
Uv1,
}
/// A material with "standard" properties used in PBR lighting.
/// Standard property values with pictures here:
/// <https://google.github.io/filament/notes/material_properties.html>.
///
/// May be created directly from a [`Color`] or an [`Image`].
#[derive(Asset, AsBindGroup, Reflect, Debug, Clone)]
#[bind_group_data(StandardMaterialKey)]
#[data(0, StandardMaterialUniform, binding_array(10))]
#[bindless(index_table(range(0..31)))]
#[reflect(Default, Debug, Clone)]
pub struct StandardMaterial {
/// The color of the surface of the material before lighting.
///
/// Doubles as diffuse albedo for non-metallic, specular for metallic and a mix for everything
/// in between. If used together with a `base_color_texture`, this is factored into the final
/// base color as `base_color * base_color_texture_value`.
///
/// Defaults to [`Color::WHITE`].
pub base_color: Color,
/// The UV channel to use for the [`StandardMaterial::base_color_texture`].
///
/// Defaults to [`UvChannel::Uv0`].
pub base_color_channel: UvChannel,
/// The texture component of the material's color before lighting.
/// The actual pre-lighting color is `base_color * this_texture`.
///
/// See [`base_color`] for details.
///
/// You should set `base_color` to [`Color::WHITE`] (the default)
/// if you want the texture to show as-is.
///
/// Setting `base_color` to something else than white will tint
/// the texture. For example, setting `base_color` to pure red will
/// tint the texture red.
///
/// [`base_color`]: StandardMaterial::base_color
#[texture(1)]
#[sampler(2)]
#[dependency]
pub base_color_texture: Option<Handle<Image>>,
// Use a color for user friendliness even though we technically don't use the alpha channel
// Might be used in the future for exposure correction in HDR
/// Color the material "emits" to the camera.
///
/// This is typically used for monitor screens or LED lights.
/// Anything that can be visible even in darkness.
///
/// The emissive color is added to what would otherwise be the material's visible color.
/// This means that for a light emissive value, in darkness,
/// you will mostly see the emissive component.
///
/// The default emissive color is [`LinearRgba::BLACK`], which doesn't add anything to the material color.
///
/// Emissive strength is controlled by the value of the color channels,
/// while the hue is controlled by their relative values.
///
/// As a result, channel values for `emissive`
/// colors can exceed `1.0`. For instance, a `base_color` of
/// `LinearRgba::rgb(1.0, 0.0, 0.0)` represents the brightest
/// red for objects that reflect light, but an emissive color
/// like `LinearRgba::rgb(1000.0, 0.0, 0.0)` can be used to create
/// intensely bright red emissive effects.
///
/// This results in a final luminance value when multiplied
/// by the value of the greyscale emissive texture (which ranges from 0 for black to 1 for white).
/// Luminance is a measure of the amount of light emitted per unit area,
/// and can be thought of as the "brightness" of the effect.
/// In Bevy, we treat these luminance values as the physical units of cd/m², aka nits.
///
/// Increasing the emissive strength of the color will impact visual effects
/// like bloom, but it's important to note that **an emissive material won't
/// typically light up surrounding areas like a light source**,
/// it just adds a value to the color seen on screen.
pub emissive: LinearRgba,
/// The weight in which the camera exposure influences the emissive color.
/// A value of `0.0` means the emissive color is not affected by the camera exposure.
/// In opposition, a value of `1.0` means the emissive color is multiplied by the camera exposure.
///
/// Defaults to `0.0`
pub emissive_exposure_weight: f32,
/// The UV channel to use for the [`StandardMaterial::emissive_texture`].
///
/// Defaults to [`UvChannel::Uv0`].
pub emissive_channel: UvChannel,
/// The emissive map, multiplies pixels with [`emissive`]
/// to get the final "emitting" color of a surface.
///
/// This color is multiplied by [`emissive`] to get the final emitted color.
/// Meaning that you should set [`emissive`] to [`Color::WHITE`]
/// if you want to use the full range of color of the emissive texture.
///
/// [`emissive`]: StandardMaterial::emissive
#[texture(3)]
#[sampler(4)]
#[dependency]
pub emissive_texture: Option<Handle<Image>>,
/// Linear perceptual roughness, clamped to `[0.089, 1.0]` in the shader.
///
/// Defaults to `0.5`.
///
/// Low values result in a "glossy" material with specular highlights,
/// while values close to `1` result in rough materials.
///
/// If used together with a roughness/metallic texture, this is factored into the final base
/// color as `roughness * roughness_texture_value`.
///
/// 0.089 is the minimum floating point value that won't be rounded down to 0 in the
/// calculations used.
// Technically for 32-bit floats, 0.045 could be used.
// See <https://google.github.io/filament/Filament.html#materialsystem/parameterization/>
pub perceptual_roughness: f32,
/// How "metallic" the material appears, within `[0.0, 1.0]`.
///
/// This should be set to 0.0 for dielectric materials or 1.0 for metallic materials.
/// For a hybrid surface such as corroded metal, you may need to use in-between values.
///
/// Defaults to `0.00`, for dielectric.
///
/// If used together with a roughness/metallic texture, this is factored into the final base
/// color as `metallic * metallic_texture_value`.
pub metallic: f32,
/// The UV channel to use for the [`StandardMaterial::metallic_roughness_texture`].
///
/// Defaults to [`UvChannel::Uv0`].
pub metallic_roughness_channel: UvChannel,
/// Metallic and roughness maps, stored as a single texture.
///
/// The blue channel contains metallic values,
/// and the green channel contains the roughness values.
/// Other channels are unused.
///
/// Those values are multiplied by the scalar ones of the material,
/// see [`metallic`] and [`perceptual_roughness`] for details.
///
/// Note that with the default values of [`metallic`] and [`perceptual_roughness`],
/// setting this texture has no effect. If you want to exclusively use the
/// `metallic_roughness_texture` values for your material, make sure to set [`metallic`]
/// and [`perceptual_roughness`] to `1.0`.
///
/// [`metallic`]: StandardMaterial::metallic
/// [`perceptual_roughness`]: StandardMaterial::perceptual_roughness
#[texture(5)]
#[sampler(6)]
#[dependency]
pub metallic_roughness_texture: Option<Handle<Image>>,
/// Specular intensity for non-metals on a linear scale of `[0.0, 1.0]`.
///
/// Use the value as a way to control the intensity of the
/// specular highlight of the material, i.e. how reflective is the material,
/// rather than the physical property "reflectance."
///
/// Set to `0.0`, no specular highlight is visible, the highlight is strongest
/// when `reflectance` is set to `1.0`.
///
/// Defaults to `0.5` which is mapped to 4% reflectance in the shader.
#[doc(alias = "specular_intensity")]
pub reflectance: f32,
/// A color with which to modulate the [`StandardMaterial::reflectance`] for
/// non-metals.
///
/// The specular highlights and reflection are tinted with this color. Note
/// that it has no effect for non-metals.
///
/// This feature is currently unsupported in the deferred rendering path, in
/// order to reduce the size of the geometry buffers.
///
/// Defaults to [`Color::WHITE`].
#[doc(alias = "specular_color")]
pub specular_tint: Color,
/// The amount of light transmitted _diffusely_ through the material (i.e. “translucency”).
///
/// Implemented as a second, flipped [Lambertian diffuse](https://en.wikipedia.org/wiki/Lambertian_reflectance) lobe,
/// which provides an inexpensive but plausible approximation of translucency for thin dielectric objects (e.g. paper,
/// leaves, some fabrics) or thicker volumetric materials with short scattering distances (e.g. porcelain, wax).
///
/// For specular transmission usecases with refraction (e.g. glass) use the [`StandardMaterial::specular_transmission`] and
/// [`StandardMaterial::ior`] properties instead.
///
/// - When set to `0.0` (the default) no diffuse light is transmitted;
/// - When set to `1.0` all diffuse light is transmitted through the material;
/// - Values higher than `0.5` will cause more diffuse light to be transmitted than reflected, resulting in a “darker”
/// appearance on the side facing the light than the opposite side. (e.g. plant leaves)
///
/// ## Notes
///
/// - The material's [`StandardMaterial::base_color`] also modulates the transmitted light;
/// - To receive transmitted shadows on the diffuse transmission lobe (i.e. the “backside”) of the material,
/// use the [`TransmittedShadowReceiver`](bevy_light::TransmittedShadowReceiver) component.
#[doc(alias = "translucency")]
pub diffuse_transmission: f32,
/// The UV channel to use for the [`StandardMaterial::diffuse_transmission_texture`].
///
/// Defaults to [`UvChannel::Uv0`].
#[cfg(feature = "pbr_transmission_textures")]
pub diffuse_transmission_channel: UvChannel,
/// A map that modulates diffuse transmission via its alpha channel. Multiplied by [`StandardMaterial::diffuse_transmission`]
/// to obtain the final result.
///
/// **Important:** The [`StandardMaterial::diffuse_transmission`] property must be set to a value higher than 0.0,
/// or this texture won't have any effect.
#[cfg_attr(feature = "pbr_transmission_textures", texture(19))]
#[cfg_attr(feature = "pbr_transmission_textures", sampler(20))]
#[cfg(feature = "pbr_transmission_textures")]
#[dependency]
pub diffuse_transmission_texture: Option<Handle<Image>>,
/// The amount of light transmitted _specularly_ through the material (i.e. via refraction).
///
/// - When set to `0.0` (the default) no light is transmitted.
/// - When set to `1.0` all light is transmitted through the material.
///
/// The material's [`StandardMaterial::base_color`] also modulates the transmitted light.
///
/// **Note:** Typically used in conjunction with [`StandardMaterial::thickness`], [`StandardMaterial::ior`] and [`StandardMaterial::perceptual_roughness`].
///
/// ## Performance
///
/// Specular transmission is implemented as a relatively expensive screen-space effect that allows occluded objects to be seen through the material,
/// with distortion and blur effects.
///
/// - [`Camera3d::screen_space_specular_transmission_steps`](bevy_camera::Camera3d::screen_space_specular_transmission_steps) can be used to enable transmissive objects
/// to be seen through other transmissive objects, at the cost of additional draw calls and texture copies; (Use with caution!)
/// - If a simplified approximation of specular transmission using only environment map lighting is sufficient, consider setting
/// [`Camera3d::screen_space_specular_transmission_steps`](bevy_camera::Camera3d::screen_space_specular_transmission_steps) to `0`.
/// - If purely diffuse light transmission is needed, (i.e. “translucency”) consider using [`StandardMaterial::diffuse_transmission`] instead,
/// for a much less expensive effect.
/// - Specular transmission is rendered before alpha blending, so any material with [`AlphaMode::Blend`], [`AlphaMode::Premultiplied`], [`AlphaMode::Add`] or [`AlphaMode::Multiply`]
/// won't be visible through specular transmissive materials.
#[doc(alias = "refraction")]
pub specular_transmission: f32,
/// The UV channel to use for the [`StandardMaterial::specular_transmission_texture`].
///
/// Defaults to [`UvChannel::Uv0`].
#[cfg(feature = "pbr_transmission_textures")]
pub specular_transmission_channel: UvChannel,
/// A map that modulates specular transmission via its red channel. Multiplied by [`StandardMaterial::specular_transmission`]
/// to obtain the final result.
///
/// **Important:** The [`StandardMaterial::specular_transmission`] property must be set to a value higher than 0.0,
/// or this texture won't have any effect.
#[cfg_attr(feature = "pbr_transmission_textures", texture(15))]
#[cfg_attr(feature = "pbr_transmission_textures", sampler(16))]
#[cfg(feature = "pbr_transmission_textures")]
#[dependency]
pub specular_transmission_texture: Option<Handle<Image>>,
/// Thickness of the volume beneath the material surface.
///
/// When set to `0.0` (the default) the material appears as an infinitely-thin film,
/// transmitting light without distorting it.
///
/// When set to any other value, the material distorts light like a thick lens.
///
/// **Note:** Typically used in conjunction with [`StandardMaterial::specular_transmission`] and [`StandardMaterial::ior`], or with
/// [`StandardMaterial::diffuse_transmission`].
#[doc(alias = "volume")]
#[doc(alias = "thin_walled")]
pub thickness: f32,
/// The UV channel to use for the [`StandardMaterial::thickness_texture`].
///
/// Defaults to [`UvChannel::Uv0`].
#[cfg(feature = "pbr_transmission_textures")]
pub thickness_channel: UvChannel,
/// A map that modulates thickness via its green channel. Multiplied by [`StandardMaterial::thickness`]
/// to obtain the final result.
///
/// **Important:** The [`StandardMaterial::thickness`] property must be set to a value higher than 0.0,
/// or this texture won't have any effect.
#[cfg_attr(feature = "pbr_transmission_textures", texture(17))]
#[cfg_attr(feature = "pbr_transmission_textures", sampler(18))]
#[cfg(feature = "pbr_transmission_textures")]
#[dependency]
pub thickness_texture: Option<Handle<Image>>,
/// The [index of refraction](https://en.wikipedia.org/wiki/Refractive_index) of the material.
///
/// Defaults to 1.5.
///
/// | Material | Index of Refraction |
/// |:----------------|:---------------------|
/// | Vacuum | 1 |
/// | Air | 1.00 |
/// | Ice | 1.31 |
/// | Water | 1.33 |
/// | Eyes | 1.38 |
/// | Quartz | 1.46 |
/// | Olive Oil | 1.47 |
/// | Honey | 1.49 |
/// | Acrylic | 1.49 |
/// | Window Glass | 1.52 |
/// | Polycarbonate | 1.58 |
/// | Flint Glass | 1.69 |
/// | Ruby | 1.71 |
/// | Glycerine | 1.74 |
/// | Sapphire | 1.77 |
/// | Cubic Zirconia | 2.15 |
/// | Diamond | 2.42 |
/// | Moissanite | 2.65 |
///
/// **Note:** Typically used in conjunction with [`StandardMaterial::specular_transmission`] and [`StandardMaterial::thickness`].
#[doc(alias = "index_of_refraction")]
#[doc(alias = "refraction_index")]
#[doc(alias = "refractive_index")]
pub ior: f32,
/// How far, on average, light travels through the volume beneath the material's
/// surface before being absorbed.
///
/// Defaults to [`f32::INFINITY`], i.e. light is never absorbed.
///
/// **Note:** To have any effect, must be used in conjunction with:
/// - [`StandardMaterial::attenuation_color`];
/// - [`StandardMaterial::thickness`];
/// - [`StandardMaterial::diffuse_transmission`] or [`StandardMaterial::specular_transmission`].
#[doc(alias = "absorption_distance")]
#[doc(alias = "extinction_distance")]
pub attenuation_distance: f32,
/// The resulting (non-absorbed) color after white light travels through the attenuation distance.
///
/// Defaults to [`Color::WHITE`], i.e. no change.
///
/// **Note:** To have any effect, must be used in conjunction with:
/// - [`StandardMaterial::attenuation_distance`];
/// - [`StandardMaterial::thickness`];
/// - [`StandardMaterial::diffuse_transmission`] or [`StandardMaterial::specular_transmission`].
#[doc(alias = "absorption_color")]
#[doc(alias = "extinction_color")]
pub attenuation_color: Color,
/// The UV channel to use for the [`StandardMaterial::normal_map_texture`].
///
/// Defaults to [`UvChannel::Uv0`].
pub normal_map_channel: UvChannel,
/// Used to fake the lighting of bumps and dents on a material.
///
/// A typical usage would be faking cobblestones on a flat plane mesh in 3D.
///
/// # Notes
///
/// Normal mapping with `StandardMaterial` and the core bevy PBR shaders requires:
/// - A normal map texture
/// - Vertex UVs
/// - Vertex tangents
/// - Vertex normals
///
/// Tangents do not have to be stored in your model,
/// they can be generated using the [`Mesh::generate_tangents`] or
/// [`Mesh::with_generated_tangents`] methods.
/// If your material has a normal map, but still renders as a flat surface,
/// make sure your meshes have their tangents set.
///
/// [`Mesh::generate_tangents`]: bevy_mesh::Mesh::generate_tangents
/// [`Mesh::with_generated_tangents`]: bevy_mesh::Mesh::with_generated_tangents
///
/// # Usage
///
/// ```
/// # use bevy_asset::{AssetServer, Handle};
/// # use bevy_ecs::change_detection::Res;
/// # use bevy_image::{Image, ImageLoaderSettings};
/// #
/// fn load_normal_map(asset_server: Res<AssetServer>) {
/// let normal_handle: Handle<Image> = asset_server.load_with_settings(
/// "textures/parallax_example/cube_normal.png",
/// // The normal map texture is in linear color space. Lighting won't look correct
/// // if `is_srgb` is `true`, which is the default.
/// |settings: &mut ImageLoaderSettings| settings.is_srgb = false,
/// );
/// }
/// ```
#[texture(9)]
#[sampler(10)]
#[dependency]
pub normal_map_texture: Option<Handle<Image>>,
/// Normal map textures authored for DirectX have their y-component flipped. Set this to flip
/// it to right-handed conventions.
pub flip_normal_map_y: bool,
/// The UV channel to use for the [`StandardMaterial::occlusion_texture`].
///
/// Defaults to [`UvChannel::Uv0`].
pub occlusion_channel: UvChannel,
/// Specifies the level of exposure to ambient light.
///
/// This is usually generated and stored automatically ("baked") by 3D-modeling software.
///
/// Typically, steep concave parts of a model (such as the armpit of a shirt) are darker,
/// because they have little exposure to light.
/// An occlusion map specifies those parts of the model that light doesn't reach well.
///
/// The material will be less lit in places where this texture is dark.
/// This is similar to ambient occlusion, but built into the model.
#[texture(7)]
#[sampler(8)]
#[dependency]
pub occlusion_texture: Option<Handle<Image>>,
/// The UV channel to use for the [`StandardMaterial::specular_texture`].
///
/// Defaults to [`UvChannel::Uv0`].
#[cfg(feature = "pbr_specular_textures")]
pub specular_channel: UvChannel,
/// A map that specifies reflectance for non-metallic materials.
///
/// Alpha values from [0.0, 1.0] in this texture are linearly mapped to
/// reflectance values of [0.0, 0.5] and multiplied by the constant
/// [`StandardMaterial::reflectance`] value. This follows the
/// `KHR_materials_specular` specification. The map will have no effect if
/// the material is fully metallic.
///
/// When using this map, you may wish to set the
/// [`StandardMaterial::reflectance`] value to 2.0 so that this map can
/// express the full [0.0, 1.0] range of values.
///
/// Note that, because the reflectance is stored in the alpha channel, and
/// the [`StandardMaterial::specular_tint_texture`] has no alpha value, it
/// may be desirable to pack the values together and supply the same
/// texture to both fields.
#[cfg_attr(feature = "pbr_specular_textures", texture(27))]
#[cfg_attr(feature = "pbr_specular_textures", sampler(28))]
#[cfg(feature = "pbr_specular_textures")]
#[dependency]
pub specular_texture: Option<Handle<Image>>,
/// The UV channel to use for the
/// [`StandardMaterial::specular_tint_texture`].
///
/// Defaults to [`UvChannel::Uv0`].
#[cfg(feature = "pbr_specular_textures")]
pub specular_tint_channel: UvChannel,
/// A map that specifies color adjustment to be applied to the specular
/// reflection for non-metallic materials.
///
/// The RGB values of this texture modulate the
/// [`StandardMaterial::specular_tint`] value. See the documentation for
/// that field for more information.
///
/// Like the fixed specular tint value, this texture map isn't supported in
/// the deferred renderer.
#[cfg_attr(feature = "pbr_specular_textures", texture(29))]
#[cfg_attr(feature = "pbr_specular_textures", sampler(30))]
#[cfg(feature = "pbr_specular_textures")]
#[dependency]
pub specular_tint_texture: Option<Handle<Image>>,
/// An extra thin translucent layer on top of the main PBR layer. This is
/// typically used for painted surfaces.
///
/// This value specifies the strength of the layer, which affects how
/// visible the clearcoat layer will be.
///
/// Defaults to zero, specifying no clearcoat layer.
pub clearcoat: f32,
/// The UV channel to use for the [`StandardMaterial::clearcoat_texture`].
///
/// Defaults to [`UvChannel::Uv0`].
#[cfg(feature = "pbr_multi_layer_material_textures")]
pub clearcoat_channel: UvChannel,
/// An image texture that specifies the strength of the clearcoat layer in
/// the red channel. Values sampled from this texture are multiplied by the
/// main [`StandardMaterial::clearcoat`] factor.
///
/// As this is a non-color map, it must not be loaded as sRGB.
#[cfg_attr(feature = "pbr_multi_layer_material_textures", texture(21))]
#[cfg_attr(feature = "pbr_multi_layer_material_textures", sampler(22))]
#[cfg(feature = "pbr_multi_layer_material_textures")]
#[dependency]
pub clearcoat_texture: Option<Handle<Image>>,
/// The roughness of the clearcoat material. This is specified in exactly
/// the same way as the [`StandardMaterial::perceptual_roughness`].
///
/// If the [`StandardMaterial::clearcoat`] value if zero, this has no
/// effect.
///
/// Defaults to 0.5.
pub clearcoat_perceptual_roughness: f32,
/// The UV channel to use for the [`StandardMaterial::clearcoat_roughness_texture`].
///
/// Defaults to [`UvChannel::Uv0`].
#[cfg(feature = "pbr_multi_layer_material_textures")]
pub clearcoat_roughness_channel: UvChannel,
/// An image texture that specifies the roughness of the clearcoat level in
/// the green channel. Values from this texture are multiplied by the main
/// [`StandardMaterial::clearcoat_perceptual_roughness`] factor.
///
/// As this is a non-color map, it must not be loaded as sRGB.
#[cfg_attr(feature = "pbr_multi_layer_material_textures", texture(23))]
#[cfg_attr(feature = "pbr_multi_layer_material_textures", sampler(24))]
#[cfg(feature = "pbr_multi_layer_material_textures")]
#[dependency]
pub clearcoat_roughness_texture: Option<Handle<Image>>,
/// The UV channel to use for the [`StandardMaterial::clearcoat_normal_texture`].
///
/// Defaults to [`UvChannel::Uv0`].
#[cfg(feature = "pbr_multi_layer_material_textures")]
pub clearcoat_normal_channel: UvChannel,
/// An image texture that specifies a normal map that is to be applied to
/// the clearcoat layer. This can be used to simulate, for example,
/// scratches on an outer layer of varnish. Normal maps are in the same
/// format as [`StandardMaterial::normal_map_texture`].
///
/// Note that, if a clearcoat normal map isn't specified, the main normal
/// map, if any, won't be applied to the clearcoat. If you want a normal map
/// that applies to both the main material and to the clearcoat, specify it
/// in both [`StandardMaterial::normal_map_texture`] and this field.
///
/// As this is a non-color map, it must not be loaded as sRGB.
#[cfg_attr(feature = "pbr_multi_layer_material_textures", texture(25))]
#[cfg_attr(feature = "pbr_multi_layer_material_textures", sampler(26))]
#[cfg(feature = "pbr_multi_layer_material_textures")]
#[dependency]
pub clearcoat_normal_texture: Option<Handle<Image>>,
/// Increases the roughness along a specific direction, so that the specular
/// highlight will be stretched instead of being a circular lobe.
///
/// This value ranges from 0 (perfectly circular) to 1 (maximally
/// stretched). The default direction (corresponding to a
/// [`StandardMaterial::anisotropy_rotation`] of 0) aligns with the
/// *tangent* of the mesh; thus mesh tangents must be specified in order for
/// this parameter to have any meaning. The direction can be changed using
/// the [`StandardMaterial::anisotropy_rotation`] parameter.
///
/// This is typically used for modeling surfaces such as brushed metal and
/// hair, in which one direction of the surface but not the other is smooth.
///
/// See the [`KHR_materials_anisotropy` specification] for more details.
///
/// [`KHR_materials_anisotropy` specification]:
/// https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_anisotropy/README.md
pub anisotropy_strength: f32,
/// The direction of increased roughness, in radians relative to the mesh
/// tangent.
///
/// This parameter causes the roughness to vary according to the
/// [`StandardMaterial::anisotropy_strength`]. The rotation is applied in
/// tangent-bitangent space; thus, mesh tangents must be present for this
/// parameter to have any meaning.
///
/// This parameter has no effect if
/// [`StandardMaterial::anisotropy_strength`] is zero. Its value can
/// optionally be adjusted across the mesh with the
/// [`StandardMaterial::anisotropy_texture`].
///
/// See the [`KHR_materials_anisotropy` specification] for more details.
///
/// [`KHR_materials_anisotropy` specification]:
/// https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_anisotropy/README.md
pub anisotropy_rotation: f32,
/// The UV channel to use for the [`StandardMaterial::anisotropy_texture`].
///
/// Defaults to [`UvChannel::Uv0`].
#[cfg(feature = "pbr_anisotropy_texture")]
pub anisotropy_channel: UvChannel,
/// An image texture that allows the
/// [`StandardMaterial::anisotropy_strength`] and
/// [`StandardMaterial::anisotropy_rotation`] to vary across the mesh.
///
/// The [`KHR_materials_anisotropy` specification] defines the format that
/// this texture must take. To summarize: the direction vector is encoded in
/// the red and green channels, while the strength is encoded in the blue
/// channels. For the direction vector, the red and green channels map the
/// color range [0, 1] to the vector range [-1, 1]. The direction vector
/// encoded in this texture modifies the default rotation direction in
/// tangent-bitangent space, before the
/// [`StandardMaterial::anisotropy_rotation`] parameter is applied. The
/// value in the blue channel is multiplied by the
/// [`StandardMaterial::anisotropy_strength`] value to produce the final
/// anisotropy strength.
///
/// As the texel values don't represent colors, this texture must be in
/// linear color space, not sRGB.
///
/// [`KHR_materials_anisotropy` specification]:
/// https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_anisotropy/README.md
#[cfg_attr(feature = "pbr_anisotropy_texture", texture(13))]
#[cfg_attr(feature = "pbr_anisotropy_texture", sampler(14))]
#[cfg(feature = "pbr_anisotropy_texture")]
#[dependency]
pub anisotropy_texture: Option<Handle<Image>>,
/// Support two-sided lighting by automatically flipping the normals for "back" faces
/// within the PBR lighting shader.
///
/// Defaults to `false`.
/// This does not automatically configure backface culling,
/// which can be done via `cull_mode`.
pub double_sided: bool,
/// Whether to cull the "front", "back" or neither side of a mesh.
/// If set to `None`, the two sides of the mesh are visible.
///
/// Defaults to `Some(Face::Back)`.
/// In bevy, the order of declaration of a triangle's vertices
/// in [`Mesh`] defines the triangle's front face.
///
/// When a triangle is in a viewport,
/// if its vertices appear counter-clockwise from the viewport's perspective,
/// then the viewport is seeing the triangle's front face.
/// Conversely, if the vertices appear clockwise, you are seeing the back face.
///
/// In short, in bevy, front faces winds counter-clockwise.
///
/// Your 3D editing software should manage all of that.
///
/// [`Mesh`]: bevy_mesh::Mesh
// TODO: include this in reflection somehow (maybe via remote types like serde https://serde.rs/remote-derive.html)
#[reflect(ignore, clone)]
pub cull_mode: Option<Face>,
/// Whether to apply only the base color to this material.
///
/// Normals, occlusion textures, roughness, metallic, reflectance, emissive,
/// shadows, alpha mode and ambient light are ignored if this is set to `true`.
pub unlit: bool,
/// Whether to enable fog for this material.
pub fog_enabled: bool,
/// How to apply the alpha channel of the `base_color_texture`.
///
/// See [`AlphaMode`] for details. Defaults to [`AlphaMode::Opaque`].
pub alpha_mode: AlphaMode,
/// Adjust rendered depth.
///
/// A material with a positive depth bias will render closer to the
/// camera while negative values cause the material to render behind
/// other objects. This is independent of the viewport.
///
/// `depth_bias` affects render ordering and depth write operations
/// using the `wgpu::DepthBiasState::Constant` field.
///
/// [z-fighting]: https://en.wikipedia.org/wiki/Z-fighting
pub depth_bias: f32,
/// The depth map used for [parallax mapping].
///
/// It is a grayscale image where white represents bottom and black the top.
/// If this field is set, bevy will apply [parallax mapping].
/// Parallax mapping, unlike simple normal maps, will move the texture
/// coordinate according to the current perspective,
/// giving actual depth to the texture.
///
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/cluster.rs | crates/bevy_pbr/src/cluster.rs | use core::num::NonZero;
use bevy_camera::Camera;
use bevy_ecs::{entity::EntityHashMap, prelude::*};
use bevy_light::cluster::{ClusterableObjectCounts, Clusters, GlobalClusterSettings};
use bevy_math::{uvec4, UVec3, UVec4, Vec4};
use bevy_render::{
render_resource::{
BindingResource, BufferBindingType, ShaderSize, ShaderType, StorageBuffer, UniformBuffer,
},
renderer::{RenderAdapter, RenderDevice, RenderQueue},
sync_world::RenderEntity,
Extract,
};
use tracing::warn;
use crate::MeshPipeline;
// NOTE: this must be kept in sync with the same constants in
// `mesh_view_types.wgsl`.
pub const MAX_UNIFORM_BUFFER_CLUSTERABLE_OBJECTS: usize = 204;
// Make sure that the clusterable object buffer doesn't overflow the maximum
// size of a UBO on WebGL 2.
const _: () =
assert!(size_of::<GpuClusterableObject>() * MAX_UNIFORM_BUFFER_CLUSTERABLE_OBJECTS <= 16384);
// NOTE: Clustered-forward rendering requires 3 storage buffer bindings so check that
// at least that many are supported using this constant and SupportedBindingType::from_device()
pub const CLUSTERED_FORWARD_STORAGE_BUFFER_COUNT: u32 = 3;
// this must match CLUSTER_COUNT_SIZE in pbr.wgsl
// and must be large enough to contain MAX_UNIFORM_BUFFER_CLUSTERABLE_OBJECTS
const CLUSTER_COUNT_SIZE: u32 = 9;
const CLUSTER_OFFSET_MASK: u32 = (1 << (32 - (CLUSTER_COUNT_SIZE * 2))) - 1;
const CLUSTER_COUNT_MASK: u32 = (1 << CLUSTER_COUNT_SIZE) - 1;
pub(crate) fn make_global_cluster_settings(world: &World) -> GlobalClusterSettings {
let device = world.resource::<RenderDevice>();
let adapter = world.resource::<RenderAdapter>();
let clustered_decals_are_usable =
crate::decal::clustered::clustered_decals_are_usable(device, adapter);
let supports_storage_buffers = matches!(
device.get_supported_read_only_binding_type(CLUSTERED_FORWARD_STORAGE_BUFFER_COUNT),
BufferBindingType::Storage { .. }
);
GlobalClusterSettings {
supports_storage_buffers,
clustered_decals_are_usable,
max_uniform_buffer_clusterable_objects: MAX_UNIFORM_BUFFER_CLUSTERABLE_OBJECTS,
view_cluster_bindings_max_indices: ViewClusterBindings::MAX_INDICES,
}
}
#[derive(Copy, Clone, ShaderType, Default, Debug)]
pub struct GpuClusterableObject {
// For point lights: the lower-right 2x2 values of the projection matrix [2][2] [2][3] [3][2] [3][3]
// For spot lights: 2 components of the direction (x,z), spot_scale and spot_offset
pub(crate) light_custom_data: Vec4,
pub(crate) color_inverse_square_range: Vec4,
pub(crate) position_radius: Vec4,
pub(crate) flags: u32,
pub(crate) shadow_depth_bias: f32,
pub(crate) shadow_normal_bias: f32,
pub(crate) spot_light_tan_angle: f32,
pub(crate) soft_shadow_size: f32,
pub(crate) shadow_map_near_z: f32,
pub(crate) decal_index: u32,
pub(crate) pad: f32,
}
#[derive(Resource)]
pub struct GlobalClusterableObjectMeta {
pub gpu_clusterable_objects: GpuClusterableObjects,
pub entity_to_index: EntityHashMap<usize>,
}
pub enum GpuClusterableObjects {
Uniform(UniformBuffer<GpuClusterableObjectsUniform>),
Storage(StorageBuffer<GpuClusterableObjectsStorage>),
}
#[derive(ShaderType)]
pub struct GpuClusterableObjectsUniform {
data: Box<[GpuClusterableObject; MAX_UNIFORM_BUFFER_CLUSTERABLE_OBJECTS]>,
}
#[derive(ShaderType, Default)]
pub struct GpuClusterableObjectsStorage {
#[shader(size(runtime))]
data: Vec<GpuClusterableObject>,
}
#[derive(Component)]
pub struct ExtractedClusterConfig {
/// Special near value for cluster calculations
pub(crate) near: f32,
pub(crate) far: f32,
/// Number of clusters in `X` / `Y` / `Z` in the view frustum
pub(crate) dimensions: UVec3,
}
enum ExtractedClusterableObjectElement {
ClusterHeader(ClusterableObjectCounts),
ClusterableObjectEntity(Entity),
}
#[derive(Component)]
pub struct ExtractedClusterableObjects {
data: Vec<ExtractedClusterableObjectElement>,
}
#[derive(ShaderType)]
struct GpuClusterOffsetsAndCountsUniform {
data: Box<[UVec4; ViewClusterBindings::MAX_UNIFORM_ITEMS]>,
}
#[derive(ShaderType, Default)]
struct GpuClusterableObjectIndexListsStorage {
#[shader(size(runtime))]
data: Vec<u32>,
}
#[derive(ShaderType, Default)]
struct GpuClusterOffsetsAndCountsStorage {
/// The starting offset, followed by the number of point lights, spot
/// lights, reflection probes, and irradiance volumes in each cluster, in
/// that order. The remaining fields are filled with zeroes.
#[shader(size(runtime))]
data: Vec<[UVec4; 2]>,
}
enum ViewClusterBuffers {
Uniform {
// NOTE: UVec4 is because all arrays in Std140 layout have 16-byte alignment
clusterable_object_index_lists: UniformBuffer<GpuClusterableObjectIndexListsUniform>,
// NOTE: UVec4 is because all arrays in Std140 layout have 16-byte alignment
cluster_offsets_and_counts: UniformBuffer<GpuClusterOffsetsAndCountsUniform>,
},
Storage {
clusterable_object_index_lists: StorageBuffer<GpuClusterableObjectIndexListsStorage>,
cluster_offsets_and_counts: StorageBuffer<GpuClusterOffsetsAndCountsStorage>,
},
}
#[derive(Component)]
pub struct ViewClusterBindings {
n_indices: usize,
n_offsets: usize,
buffers: ViewClusterBuffers,
}
pub fn init_global_clusterable_object_meta(
mut commands: Commands,
render_device: Res<RenderDevice>,
) {
commands.insert_resource(GlobalClusterableObjectMeta::new(
render_device.get_supported_read_only_binding_type(CLUSTERED_FORWARD_STORAGE_BUFFER_COUNT),
));
}
impl GlobalClusterableObjectMeta {
pub fn new(buffer_binding_type: BufferBindingType) -> Self {
Self {
gpu_clusterable_objects: GpuClusterableObjects::new(buffer_binding_type),
entity_to_index: EntityHashMap::default(),
}
}
}
impl GpuClusterableObjects {
fn new(buffer_binding_type: BufferBindingType) -> Self {
match buffer_binding_type {
BufferBindingType::Storage { .. } => Self::storage(),
BufferBindingType::Uniform => Self::uniform(),
}
}
fn uniform() -> Self {
Self::Uniform(UniformBuffer::default())
}
fn storage() -> Self {
Self::Storage(StorageBuffer::default())
}
pub(crate) fn set(&mut self, mut clusterable_objects: Vec<GpuClusterableObject>) {
match self {
GpuClusterableObjects::Uniform(buffer) => {
let len = clusterable_objects
.len()
.min(MAX_UNIFORM_BUFFER_CLUSTERABLE_OBJECTS);
let src = &clusterable_objects[..len];
let dst = &mut buffer.get_mut().data[..len];
dst.copy_from_slice(src);
}
GpuClusterableObjects::Storage(buffer) => {
buffer.get_mut().data.clear();
buffer.get_mut().data.append(&mut clusterable_objects);
}
}
}
pub(crate) fn write_buffer(
&mut self,
render_device: &RenderDevice,
render_queue: &RenderQueue,
) {
match self {
GpuClusterableObjects::Uniform(buffer) => {
buffer.write_buffer(render_device, render_queue);
}
GpuClusterableObjects::Storage(buffer) => {
buffer.write_buffer(render_device, render_queue);
}
}
}
pub fn binding(&self) -> Option<BindingResource<'_>> {
match self {
GpuClusterableObjects::Uniform(buffer) => buffer.binding(),
GpuClusterableObjects::Storage(buffer) => buffer.binding(),
}
}
pub fn min_size(buffer_binding_type: BufferBindingType) -> NonZero<u64> {
match buffer_binding_type {
BufferBindingType::Storage { .. } => GpuClusterableObjectsStorage::min_size(),
BufferBindingType::Uniform => GpuClusterableObjectsUniform::min_size(),
}
}
}
impl Default for GpuClusterableObjectsUniform {
fn default() -> Self {
Self {
data: Box::new(
[GpuClusterableObject::default(); MAX_UNIFORM_BUFFER_CLUSTERABLE_OBJECTS],
),
}
}
}
/// Extracts clusters from the main world from the render world.
pub fn extract_clusters(
mut commands: Commands,
views: Extract<Query<(RenderEntity, &Clusters, &Camera)>>,
mapper: Extract<Query<RenderEntity>>,
) {
for (entity, clusters, camera) in &views {
let mut entity_commands = commands
.get_entity(entity)
.expect("Clusters entity wasn't synced.");
if !camera.is_active {
entity_commands.remove::<(ExtractedClusterableObjects, ExtractedClusterConfig)>();
continue;
}
let entity_count: usize = clusters
.clusterable_objects
.iter()
.map(|l| l.entities.len())
.sum();
let mut data = Vec::with_capacity(clusters.clusterable_objects.len() + entity_count);
for cluster_objects in &clusters.clusterable_objects {
data.push(ExtractedClusterableObjectElement::ClusterHeader(
cluster_objects.counts,
));
for clusterable_entity in &cluster_objects.entities {
if let Ok(entity) = mapper.get(*clusterable_entity) {
data.push(ExtractedClusterableObjectElement::ClusterableObjectEntity(
entity,
));
}
}
}
entity_commands.insert((
ExtractedClusterableObjects { data },
ExtractedClusterConfig {
near: clusters.near,
far: clusters.far,
dimensions: clusters.dimensions,
},
));
}
}
pub fn prepare_clusters(
mut commands: Commands,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
mesh_pipeline: Res<MeshPipeline>,
global_clusterable_object_meta: Res<GlobalClusterableObjectMeta>,
views: Query<(Entity, &ExtractedClusterableObjects)>,
) {
let render_device = render_device.into_inner();
let supports_storage_buffers = matches!(
mesh_pipeline.clustered_forward_buffer_binding_type,
BufferBindingType::Storage { .. }
);
for (entity, extracted_clusters) in &views {
let mut view_clusters_bindings =
ViewClusterBindings::new(mesh_pipeline.clustered_forward_buffer_binding_type);
view_clusters_bindings.clear();
for record in &extracted_clusters.data {
match record {
ExtractedClusterableObjectElement::ClusterHeader(counts) => {
let offset = view_clusters_bindings.n_indices();
view_clusters_bindings.push_offset_and_counts(offset, counts);
}
ExtractedClusterableObjectElement::ClusterableObjectEntity(entity) => {
if let Some(clusterable_object_index) =
global_clusterable_object_meta.entity_to_index.get(entity)
{
if view_clusters_bindings.n_indices() >= ViewClusterBindings::MAX_INDICES
&& !supports_storage_buffers
{
warn!(
"Clusterable object index lists are full! The clusterable \
objects in the view are present in too many clusters."
);
break;
}
view_clusters_bindings.push_index(*clusterable_object_index);
}
}
}
}
view_clusters_bindings.write_buffers(render_device, &render_queue);
commands.entity(entity).insert(view_clusters_bindings);
}
}
impl ViewClusterBindings {
pub const MAX_OFFSETS: usize = 16384 / 4;
const MAX_UNIFORM_ITEMS: usize = Self::MAX_OFFSETS / 4;
pub const MAX_INDICES: usize = 16384;
pub fn new(buffer_binding_type: BufferBindingType) -> Self {
Self {
n_indices: 0,
n_offsets: 0,
buffers: ViewClusterBuffers::new(buffer_binding_type),
}
}
pub fn clear(&mut self) {
match &mut self.buffers {
ViewClusterBuffers::Uniform {
clusterable_object_index_lists,
cluster_offsets_and_counts,
} => {
*clusterable_object_index_lists.get_mut().data =
[UVec4::ZERO; Self::MAX_UNIFORM_ITEMS];
*cluster_offsets_and_counts.get_mut().data = [UVec4::ZERO; Self::MAX_UNIFORM_ITEMS];
}
ViewClusterBuffers::Storage {
clusterable_object_index_lists,
cluster_offsets_and_counts,
..
} => {
clusterable_object_index_lists.get_mut().data.clear();
cluster_offsets_and_counts.get_mut().data.clear();
}
}
}
fn push_offset_and_counts(&mut self, offset: usize, counts: &ClusterableObjectCounts) {
match &mut self.buffers {
ViewClusterBuffers::Uniform {
cluster_offsets_and_counts,
..
} => {
let array_index = self.n_offsets >> 2; // >> 2 is equivalent to / 4
if array_index >= Self::MAX_UNIFORM_ITEMS {
warn!("cluster offset and count out of bounds!");
return;
}
let component = self.n_offsets & ((1 << 2) - 1);
let packed =
pack_offset_and_counts(offset, counts.point_lights, counts.spot_lights);
cluster_offsets_and_counts.get_mut().data[array_index][component] = packed;
}
ViewClusterBuffers::Storage {
cluster_offsets_and_counts,
..
} => {
cluster_offsets_and_counts.get_mut().data.push([
uvec4(
offset as u32,
counts.point_lights,
counts.spot_lights,
counts.reflection_probes,
),
uvec4(counts.irradiance_volumes, counts.decals, 0, 0),
]);
}
}
self.n_offsets += 1;
}
pub fn n_indices(&self) -> usize {
self.n_indices
}
pub fn push_index(&mut self, index: usize) {
match &mut self.buffers {
ViewClusterBuffers::Uniform {
clusterable_object_index_lists,
..
} => {
let array_index = self.n_indices >> 4; // >> 4 is equivalent to / 16
let component = (self.n_indices >> 2) & ((1 << 2) - 1);
let sub_index = self.n_indices & ((1 << 2) - 1);
let index = index as u32;
clusterable_object_index_lists.get_mut().data[array_index][component] |=
index << (8 * sub_index);
}
ViewClusterBuffers::Storage {
clusterable_object_index_lists,
..
} => {
clusterable_object_index_lists
.get_mut()
.data
.push(index as u32);
}
}
self.n_indices += 1;
}
pub fn write_buffers(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) {
match &mut self.buffers {
ViewClusterBuffers::Uniform {
clusterable_object_index_lists,
cluster_offsets_and_counts,
} => {
clusterable_object_index_lists.write_buffer(render_device, render_queue);
cluster_offsets_and_counts.write_buffer(render_device, render_queue);
}
ViewClusterBuffers::Storage {
clusterable_object_index_lists,
cluster_offsets_and_counts,
} => {
clusterable_object_index_lists.write_buffer(render_device, render_queue);
cluster_offsets_and_counts.write_buffer(render_device, render_queue);
}
}
}
pub fn clusterable_object_index_lists_binding(&self) -> Option<BindingResource<'_>> {
match &self.buffers {
ViewClusterBuffers::Uniform {
clusterable_object_index_lists,
..
} => clusterable_object_index_lists.binding(),
ViewClusterBuffers::Storage {
clusterable_object_index_lists,
..
} => clusterable_object_index_lists.binding(),
}
}
pub fn offsets_and_counts_binding(&self) -> Option<BindingResource<'_>> {
match &self.buffers {
ViewClusterBuffers::Uniform {
cluster_offsets_and_counts,
..
} => cluster_offsets_and_counts.binding(),
ViewClusterBuffers::Storage {
cluster_offsets_and_counts,
..
} => cluster_offsets_and_counts.binding(),
}
}
pub fn min_size_clusterable_object_index_lists(
buffer_binding_type: BufferBindingType,
) -> NonZero<u64> {
match buffer_binding_type {
BufferBindingType::Storage { .. } => GpuClusterableObjectIndexListsStorage::min_size(),
BufferBindingType::Uniform => GpuClusterableObjectIndexListsUniform::min_size(),
}
}
pub fn min_size_cluster_offsets_and_counts(
buffer_binding_type: BufferBindingType,
) -> NonZero<u64> {
match buffer_binding_type {
BufferBindingType::Storage { .. } => GpuClusterOffsetsAndCountsStorage::min_size(),
BufferBindingType::Uniform => GpuClusterOffsetsAndCountsUniform::min_size(),
}
}
}
impl ViewClusterBuffers {
fn new(buffer_binding_type: BufferBindingType) -> Self {
match buffer_binding_type {
BufferBindingType::Storage { .. } => Self::storage(),
BufferBindingType::Uniform => Self::uniform(),
}
}
fn uniform() -> Self {
ViewClusterBuffers::Uniform {
clusterable_object_index_lists: UniformBuffer::default(),
cluster_offsets_and_counts: UniformBuffer::default(),
}
}
fn storage() -> Self {
ViewClusterBuffers::Storage {
clusterable_object_index_lists: StorageBuffer::default(),
cluster_offsets_and_counts: StorageBuffer::default(),
}
}
}
// Compresses the offset and counts of point and spot lights so that they fit in
// a UBO.
//
// This function is only used if storage buffers are unavailable on this
// platform: typically, on WebGL 2.
//
// NOTE: With uniform buffer max binding size as 16384 bytes
// that means we can fit 204 clusterable objects in one uniform
// buffer, which means the count can be at most 204 so it
// needs 9 bits.
// The array of indices can also use u8 and that means the
// offset in to the array of indices needs to be able to address
// 16384 values. log2(16384) = 14 bits.
// We use 32 bits to store the offset and counts so
// we pack the offset into the upper 14 bits of a u32,
// the point light count into bits 9-17, and the spot light count into bits 0-8.
// [ 31 .. 18 | 17 .. 9 | 8 .. 0 ]
// [ offset | point light count | spot light count ]
//
// NOTE: This assumes CPU and GPU endianness are the same which is true
// for all common and tested x86/ARM CPUs and AMD/NVIDIA/Intel/Apple/etc GPUs
//
// NOTE: On platforms that use this function, we don't cluster light probes, so
// the number of light probes is irrelevant.
fn pack_offset_and_counts(offset: usize, point_count: u32, spot_count: u32) -> u32 {
((offset as u32 & CLUSTER_OFFSET_MASK) << (CLUSTER_COUNT_SIZE * 2))
| ((point_count & CLUSTER_COUNT_MASK) << CLUSTER_COUNT_SIZE)
| (spot_count & CLUSTER_COUNT_MASK)
}
#[derive(ShaderType)]
struct GpuClusterableObjectIndexListsUniform {
data: Box<[UVec4; ViewClusterBindings::MAX_UNIFORM_ITEMS]>,
}
// NOTE: Assert at compile time that GpuClusterableObjectIndexListsUniform
// fits within the maximum uniform buffer binding size
const _: () = assert!(GpuClusterableObjectIndexListsUniform::SHADER_SIZE.get() <= 16384);
impl Default for GpuClusterableObjectIndexListsUniform {
fn default() -> Self {
Self {
data: Box::new([UVec4::ZERO; ViewClusterBindings::MAX_UNIFORM_ITEMS]),
}
}
}
impl Default for GpuClusterOffsetsAndCountsUniform {
fn default() -> Self {
Self {
data: Box::new([UVec4::ZERO; ViewClusterBindings::MAX_UNIFORM_ITEMS]),
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/extended_material.rs | crates/bevy_pbr/src/extended_material.rs | use alloc::borrow::Cow;
use bevy_asset::Asset;
use bevy_ecs::system::SystemParamItem;
use bevy_mesh::MeshVertexBufferLayoutRef;
use bevy_platform::{collections::HashSet, hash::FixedHasher};
use bevy_reflect::{impl_type_path, Reflect};
use bevy_render::{
alpha::AlphaMode,
render_resource::{
AsBindGroup, AsBindGroupError, BindGroupLayout, BindGroupLayoutEntry, BindlessDescriptor,
BindlessResourceType, BindlessSlabResourceLimit, RenderPipelineDescriptor,
SpecializedMeshPipelineError, UnpreparedBindGroup,
},
renderer::RenderDevice,
};
use bevy_shader::ShaderRef;
use crate::{Material, MaterialPipeline, MaterialPipelineKey, MeshPipeline, MeshPipelineKey};
pub struct MaterialExtensionPipeline {
pub mesh_pipeline: MeshPipeline,
}
pub struct MaterialExtensionKey<E: MaterialExtension> {
pub mesh_key: MeshPipelineKey,
pub bind_group_data: E::Data,
}
/// A subset of the `Material` trait for defining extensions to a base `Material`, such as the builtin `StandardMaterial`.
///
/// A user type implementing the trait should be used as the `E` generic param in an `ExtendedMaterial` struct.
pub trait MaterialExtension: Asset + AsBindGroup + Clone + Sized {
/// Returns this material's vertex shader. If [`ShaderRef::Default`] is returned, the base material mesh vertex shader
/// will be used.
fn vertex_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this material's fragment shader. If [`ShaderRef::Default`] is returned, the base material mesh fragment shader
/// will be used.
fn fragment_shader() -> ShaderRef {
ShaderRef::Default
}
// Returns this material’s AlphaMode. If None is returned, the base material alpha mode will be used.
fn alpha_mode() -> Option<AlphaMode> {
None
}
/// Controls if the prepass is enabled for the Material.
/// For more information about what a prepass is, see the [`bevy_core_pipeline::prepass`] docs.
#[inline]
fn enable_prepass() -> bool {
true
}
/// Controls if shadows are enabled for the Material.
#[inline]
fn enable_shadows() -> bool {
true
}
/// Returns this material's prepass vertex shader. If [`ShaderRef::Default`] is returned, the base material prepass vertex shader
/// will be used.
fn prepass_vertex_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this material's prepass fragment shader. If [`ShaderRef::Default`] is returned, the base material prepass fragment shader
/// will be used.
fn prepass_fragment_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this material's deferred vertex shader. If [`ShaderRef::Default`] is returned, the base material deferred vertex shader
/// will be used.
fn deferred_vertex_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this material's prepass fragment shader. If [`ShaderRef::Default`] is returned, the base material deferred fragment shader
/// will be used.
fn deferred_fragment_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this material's [`crate::meshlet::MeshletMesh`] fragment shader. If [`ShaderRef::Default`] is returned,
/// the default meshlet mesh fragment shader will be used.
#[cfg(feature = "meshlet")]
fn meshlet_mesh_fragment_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this material's [`crate::meshlet::MeshletMesh`] prepass fragment shader. If [`ShaderRef::Default`] is returned,
/// the default meshlet mesh prepass fragment shader will be used.
#[cfg(feature = "meshlet")]
fn meshlet_mesh_prepass_fragment_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this material's [`crate::meshlet::MeshletMesh`] deferred fragment shader. If [`ShaderRef::Default`] is returned,
/// the default meshlet mesh deferred fragment shader will be used.
#[cfg(feature = "meshlet")]
fn meshlet_mesh_deferred_fragment_shader() -> ShaderRef {
ShaderRef::Default
}
/// Customizes the default [`RenderPipelineDescriptor`] for a specific entity using the entity's
/// [`MaterialPipelineKey`] and [`MeshVertexBufferLayoutRef`] as input.
/// Specialization for the base material is applied before this function is called.
#[expect(
unused_variables,
reason = "The parameters here are intentionally unused by the default implementation; however, putting underscores here will result in the underscores being copied by rust-analyzer's tab completion."
)]
#[inline]
fn specialize(
pipeline: &MaterialExtensionPipeline,
descriptor: &mut RenderPipelineDescriptor,
layout: &MeshVertexBufferLayoutRef,
key: MaterialExtensionKey<Self>,
) -> Result<(), SpecializedMeshPipelineError> {
Ok(())
}
}
/// A material that extends a base [`Material`] with additional shaders and data.
///
/// The data from both materials will be combined and made available to the shader
/// so that shader functions built for the base material (and referencing the base material
/// bindings) will work as expected, and custom alterations based on custom data can also be used.
///
/// If the extension `E` returns a non-default result from `vertex_shader()` it will be used in place of the base
/// material's vertex shader.
///
/// If the extension `E` returns a non-default result from `fragment_shader()` it will be used in place of the base
/// fragment shader.
///
/// When used with `StandardMaterial` as the base, all the standard material fields are
/// present, so the `pbr_fragment` shader functions can be called from the extension shader (see
/// the `extended_material` example).
#[derive(Asset, Clone, Debug, Reflect)]
#[reflect(type_path = false)]
#[reflect(Clone)]
pub struct ExtendedMaterial<B: Material, E: MaterialExtension> {
pub base: B,
pub extension: E,
}
impl<B, E> Default for ExtendedMaterial<B, E>
where
B: Material + Default,
E: MaterialExtension + Default,
{
fn default() -> Self {
Self {
base: B::default(),
extension: E::default(),
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
#[repr(C, packed)]
pub struct MaterialExtensionBindGroupData<B, E> {
pub base: B,
pub extension: E,
}
// We don't use the `TypePath` derive here due to a bug where `#[reflect(type_path = false)]`
// causes the `TypePath` derive to not generate an implementation.
impl_type_path!((in bevy_pbr::extended_material) ExtendedMaterial<B: Material, E: MaterialExtension>);
impl<B: Material, E: MaterialExtension> AsBindGroup for ExtendedMaterial<B, E> {
type Data = MaterialExtensionBindGroupData<B::Data, E::Data>;
type Param = (<B as AsBindGroup>::Param, <E as AsBindGroup>::Param);
fn bindless_slot_count() -> Option<BindlessSlabResourceLimit> {
// We only enable bindless if both the base material and its extension
// are bindless. If we do enable bindless, we choose the smaller of the
// two slab size limits.
match (B::bindless_slot_count()?, E::bindless_slot_count()?) {
(BindlessSlabResourceLimit::Auto, BindlessSlabResourceLimit::Auto) => {
Some(BindlessSlabResourceLimit::Auto)
}
(BindlessSlabResourceLimit::Auto, BindlessSlabResourceLimit::Custom(limit))
| (BindlessSlabResourceLimit::Custom(limit), BindlessSlabResourceLimit::Auto) => {
Some(BindlessSlabResourceLimit::Custom(limit))
}
(
BindlessSlabResourceLimit::Custom(base_limit),
BindlessSlabResourceLimit::Custom(extended_limit),
) => Some(BindlessSlabResourceLimit::Custom(
base_limit.min(extended_limit),
)),
}
}
fn bindless_supported(render_device: &RenderDevice) -> bool {
B::bindless_supported(render_device) && E::bindless_supported(render_device)
}
fn label() -> &'static str {
E::label()
}
fn bind_group_data(&self) -> Self::Data {
MaterialExtensionBindGroupData {
base: self.base.bind_group_data(),
extension: self.extension.bind_group_data(),
}
}
fn unprepared_bind_group(
&self,
layout: &BindGroupLayout,
render_device: &RenderDevice,
(base_param, extended_param): &mut SystemParamItem<'_, '_, Self::Param>,
mut force_non_bindless: bool,
) -> Result<UnpreparedBindGroup, AsBindGroupError> {
force_non_bindless = force_non_bindless || Self::bindless_slot_count().is_none();
// add together the bindings of the base material and the extension
let UnpreparedBindGroup { mut bindings } = B::unprepared_bind_group(
&self.base,
layout,
render_device,
base_param,
force_non_bindless,
)?;
let UnpreparedBindGroup {
bindings: extension_bindings,
} = E::unprepared_bind_group(
&self.extension,
layout,
render_device,
extended_param,
force_non_bindless,
)?;
bindings.extend(extension_bindings.0);
Ok(UnpreparedBindGroup { bindings })
}
fn bind_group_layout_entries(
render_device: &RenderDevice,
mut force_non_bindless: bool,
) -> Vec<BindGroupLayoutEntry>
where
Self: Sized,
{
force_non_bindless = force_non_bindless || Self::bindless_slot_count().is_none();
// Add together the bindings of the standard material and the user
// material, skipping duplicate bindings. Duplicate bindings will occur
// when bindless mode is on, because of the common bindless resource
// arrays, and we need to eliminate the duplicates or `wgpu` will
// complain.
let base_entries = B::bind_group_layout_entries(render_device, force_non_bindless);
let extension_entries = E::bind_group_layout_entries(render_device, force_non_bindless);
let mut seen_bindings = HashSet::<u32>::with_hasher(FixedHasher);
base_entries
.into_iter()
.chain(extension_entries)
.filter(|entry| seen_bindings.insert(entry.binding))
.collect()
}
fn bindless_descriptor() -> Option<BindlessDescriptor> {
// We're going to combine the two bindless descriptors.
let base_bindless_descriptor = B::bindless_descriptor()?;
let extended_bindless_descriptor = E::bindless_descriptor()?;
// Combining the buffers and index tables is straightforward.
let mut buffers = base_bindless_descriptor.buffers.to_vec();
let mut index_tables = base_bindless_descriptor.index_tables.to_vec();
buffers.extend(extended_bindless_descriptor.buffers.iter().cloned());
index_tables.extend(extended_bindless_descriptor.index_tables.iter().cloned());
// Combining the resources is a little trickier because the resource
// array is indexed by bindless index, so we have to merge the two
// arrays, not just concatenate them.
let max_bindless_index = base_bindless_descriptor
.resources
.len()
.max(extended_bindless_descriptor.resources.len());
let mut resources = Vec::with_capacity(max_bindless_index);
for bindless_index in 0..max_bindless_index {
// In the event of a conflicting bindless index, we choose the
// base's binding.
match base_bindless_descriptor.resources.get(bindless_index) {
None | Some(&BindlessResourceType::None) => resources.push(
extended_bindless_descriptor
.resources
.get(bindless_index)
.copied()
.unwrap_or(BindlessResourceType::None),
),
Some(&resource_type) => resources.push(resource_type),
}
}
Some(BindlessDescriptor {
resources: Cow::Owned(resources),
buffers: Cow::Owned(buffers),
index_tables: Cow::Owned(index_tables),
})
}
}
impl<B: Material, E: MaterialExtension> Material for ExtendedMaterial<B, E> {
fn vertex_shader() -> ShaderRef {
match E::vertex_shader() {
ShaderRef::Default => B::vertex_shader(),
specified => specified,
}
}
fn fragment_shader() -> ShaderRef {
match E::fragment_shader() {
ShaderRef::Default => B::fragment_shader(),
specified => specified,
}
}
fn alpha_mode(&self) -> AlphaMode {
match E::alpha_mode() {
Some(specified) => specified,
None => B::alpha_mode(&self.base),
}
}
fn opaque_render_method(&self) -> crate::OpaqueRendererMethod {
B::opaque_render_method(&self.base)
}
fn depth_bias(&self) -> f32 {
B::depth_bias(&self.base)
}
fn reads_view_transmission_texture(&self) -> bool {
B::reads_view_transmission_texture(&self.base)
}
fn enable_prepass() -> bool {
E::enable_prepass()
}
fn enable_shadows() -> bool {
E::enable_shadows()
}
fn prepass_vertex_shader() -> ShaderRef {
match E::prepass_vertex_shader() {
ShaderRef::Default => B::prepass_vertex_shader(),
specified => specified,
}
}
fn prepass_fragment_shader() -> ShaderRef {
match E::prepass_fragment_shader() {
ShaderRef::Default => B::prepass_fragment_shader(),
specified => specified,
}
}
fn deferred_vertex_shader() -> ShaderRef {
match E::deferred_vertex_shader() {
ShaderRef::Default => B::deferred_vertex_shader(),
specified => specified,
}
}
fn deferred_fragment_shader() -> ShaderRef {
match E::deferred_fragment_shader() {
ShaderRef::Default => B::deferred_fragment_shader(),
specified => specified,
}
}
#[cfg(feature = "meshlet")]
fn meshlet_mesh_fragment_shader() -> ShaderRef {
match E::meshlet_mesh_fragment_shader() {
ShaderRef::Default => B::meshlet_mesh_fragment_shader(),
specified => specified,
}
}
#[cfg(feature = "meshlet")]
fn meshlet_mesh_prepass_fragment_shader() -> ShaderRef {
match E::meshlet_mesh_prepass_fragment_shader() {
ShaderRef::Default => B::meshlet_mesh_prepass_fragment_shader(),
specified => specified,
}
}
#[cfg(feature = "meshlet")]
fn meshlet_mesh_deferred_fragment_shader() -> ShaderRef {
match E::meshlet_mesh_deferred_fragment_shader() {
ShaderRef::Default => B::meshlet_mesh_deferred_fragment_shader(),
specified => specified,
}
}
fn specialize(
pipeline: &MaterialPipeline,
descriptor: &mut RenderPipelineDescriptor,
layout: &MeshVertexBufferLayoutRef,
key: MaterialPipelineKey<Self>,
) -> Result<(), SpecializedMeshPipelineError> {
// Call the base material's specialize function
let base_key = MaterialPipelineKey::<B> {
mesh_key: key.mesh_key,
bind_group_data: key.bind_group_data.base,
};
B::specialize(pipeline, descriptor, layout, base_key)?;
// Call the extended material's specialize function afterwards
E::specialize(
&MaterialExtensionPipeline {
mesh_pipeline: pipeline.mesh_pipeline.clone(),
},
descriptor,
layout,
MaterialExtensionKey {
mesh_key: key.mesh_key,
bind_group_data: key.bind_group_data.extension,
},
)
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/parallax.rs | crates/bevy_pbr/src/parallax.rs | use bevy_reflect::{std_traits::ReflectDefault, Reflect};
/// The [parallax mapping] method to use to compute depth based on the
/// material's [`depth_map`].
///
/// Parallax Mapping uses a depth map texture to give the illusion of depth
/// variation on a mesh surface that is geometrically flat.
///
/// See the `parallax_mapping.wgsl` shader code for implementation details
/// and explanation of the methods used.
///
/// [`depth_map`]: crate::StandardMaterial::depth_map
/// [parallax mapping]: https://en.wikipedia.org/wiki/Parallax_mapping
#[derive(Debug, Copy, Clone, PartialEq, Eq, Default, Reflect)]
#[reflect(Default, Clone, PartialEq)]
pub enum ParallaxMappingMethod {
/// A simple linear interpolation, using a single texture sample.
///
/// This method is named "Parallax Occlusion Mapping".
///
/// Unlike [`ParallaxMappingMethod::Relief`], only requires a single lookup,
/// but may skip small details and result in writhing material artifacts.
#[default]
Occlusion,
/// Discovers the best depth value based on binary search.
///
/// Each iteration incurs a texture sample.
/// The result has fewer visual artifacts than [`ParallaxMappingMethod::Occlusion`].
///
/// This method is named "Relief Mapping".
Relief {
/// How many additional steps to use at most to find the depth value.
max_steps: u32,
},
}
impl ParallaxMappingMethod {
/// [`ParallaxMappingMethod::Relief`] with a 5 steps, a reasonable default.
pub const DEFAULT_RELIEF_MAPPING: Self = ParallaxMappingMethod::Relief { max_steps: 5 };
pub(crate) fn max_steps(&self) -> u32 {
match self {
ParallaxMappingMethod::Occlusion => 0,
ParallaxMappingMethod::Relief { max_steps } => *max_steps,
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/material_bind_groups.rs | crates/bevy_pbr/src/material_bind_groups.rs | //! Material bind group management for bindless resources.
//!
//! In bindless mode, Bevy's renderer groups materials into bind groups. This
//! allocator manages each bind group, assigning slots to materials as
//! appropriate.
use crate::Material;
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
resource::Resource,
system::{Commands, Res},
};
use bevy_platform::collections::{HashMap, HashSet};
use bevy_reflect::{prelude::ReflectDefault, Reflect};
use bevy_render::render_resource::{BindlessSlabResourceLimit, PipelineCache};
use bevy_render::{
render_resource::{
BindGroup, BindGroupEntry, BindGroupLayoutDescriptor, BindingNumber, BindingResource,
BindingResources, BindlessDescriptor, BindlessIndex, BindlessIndexTableDescriptor,
BindlessResourceType, Buffer, BufferBinding, BufferDescriptor, BufferId,
BufferInitDescriptor, BufferUsages, CompareFunction, FilterMode, OwnedBindingResource,
PreparedBindGroup, RawBufferVec, Sampler, SamplerDescriptor, SamplerId, TextureView,
TextureViewDimension, TextureViewId, UnpreparedBindGroup, WgpuSampler, WgpuTextureView,
},
renderer::{RenderDevice, RenderQueue},
settings::WgpuFeatures,
texture::FallbackImage,
};
use bevy_utils::{default, TypeIdMap};
use bytemuck::Pod;
use core::hash::Hash;
use core::{cmp::Ordering, iter, mem, ops::Range};
use tracing::{error, trace};
#[derive(Resource, Deref, DerefMut, Default)]
pub struct MaterialBindGroupAllocators(TypeIdMap<MaterialBindGroupAllocator>);
/// A resource that places materials into bind groups and tracks their
/// resources.
///
/// Internally, Bevy has separate allocators for bindless and non-bindless
/// materials. This resource provides a common interface to the specific
/// allocator in use.
pub enum MaterialBindGroupAllocator {
/// The allocator used when the material is bindless.
Bindless(Box<MaterialBindGroupBindlessAllocator>),
/// The allocator used when the material is non-bindless.
NonBindless(Box<MaterialBindGroupNonBindlessAllocator>),
}
/// The allocator that places bindless materials into bind groups and tracks
/// their resources.
pub struct MaterialBindGroupBindlessAllocator {
/// The label of the bind group allocator to use for allocated buffers.
label: &'static str,
/// The slabs, each of which contains a bind group.
slabs: Vec<MaterialBindlessSlab>,
/// The layout of the bind groups that we produce.
bind_group_layout: BindGroupLayoutDescriptor,
/// Information about the bindless resources in the material.
///
/// We use this information to create and maintain bind groups.
bindless_descriptor: BindlessDescriptor,
/// Dummy buffers that we use to fill empty slots in buffer binding arrays.
///
/// There's one fallback buffer for each buffer in the bind group, each
/// appropriately sized. Each buffer contains one uninitialized element of
/// the applicable type.
fallback_buffers: HashMap<BindlessIndex, Buffer>,
/// The maximum number of resources that can be stored in a slab.
///
/// This corresponds to `SLAB_CAPACITY` in the `#[bindless(SLAB_CAPACITY)]`
/// attribute, when deriving `AsBindGroup`.
slab_capacity: u32,
}
/// A single bind group and the bookkeeping necessary to allocate into it.
pub struct MaterialBindlessSlab {
/// The current bind group, if it's up to date.
///
/// If this is `None`, then the bind group is dirty and needs to be
/// regenerated.
bind_group: Option<BindGroup>,
/// The GPU-accessible buffers that hold the mapping from binding index to
/// bindless slot.
///
/// This is conventionally assigned to bind group binding 0, but it can be
/// changed using the `#[bindless(index_table(binding(B)))]` attribute on
/// `AsBindGroup`.
///
/// Because the slab binary searches this table, the entries within must be
/// sorted by bindless index.
bindless_index_tables: Vec<MaterialBindlessIndexTable>,
/// The binding arrays containing samplers.
samplers: HashMap<BindlessResourceType, MaterialBindlessBindingArray<Sampler>>,
/// The binding arrays containing textures.
textures: HashMap<BindlessResourceType, MaterialBindlessBindingArray<TextureView>>,
/// The binding arrays containing buffers.
buffers: HashMap<BindlessIndex, MaterialBindlessBindingArray<Buffer>>,
/// The buffers that contain plain old data (i.e. the structure-level
/// `#[data]` attribute of `AsBindGroup`).
data_buffers: HashMap<BindlessIndex, MaterialDataBuffer>,
/// A list of free slot IDs.
free_slots: Vec<MaterialBindGroupSlot>,
/// The total number of materials currently allocated in this slab.
live_allocation_count: u32,
/// The total number of resources currently allocated in the binding arrays.
allocated_resource_count: u32,
}
/// A GPU-accessible buffer that holds the mapping from binding index to
/// bindless slot.
///
/// This is conventionally assigned to bind group binding 0, but it can be
/// changed by altering the [`Self::binding_number`], which corresponds to the
/// `#[bindless(index_table(binding(B)))]` attribute in `AsBindGroup`.
struct MaterialBindlessIndexTable {
/// The buffer containing the mappings.
buffer: RetainedRawBufferVec<u32>,
/// The range of bindless indices that this bindless index table covers.
///
/// If this range is M..N, then the field at index $i$ maps to bindless
/// index $i$ + M. The size of this table is N - M.
///
/// This corresponds to the `#[bindless(index_table(range(M..N)))]`
/// attribute in `AsBindGroup`.
index_range: Range<BindlessIndex>,
/// The binding number that this index table is assigned to in the shader.
binding_number: BindingNumber,
}
/// A single binding array for storing bindless resources and the bookkeeping
/// necessary to allocate into it.
struct MaterialBindlessBindingArray<R>
where
R: GetBindingResourceId,
{
/// The number of the binding that we attach this binding array to.
binding_number: BindingNumber,
/// A mapping from bindless slot index to the resource stored in that slot,
/// if any.
bindings: Vec<Option<MaterialBindlessBinding<R>>>,
/// The type of resource stored in this binding array.
resource_type: BindlessResourceType,
/// Maps a resource ID to the slot in which it's stored.
///
/// This is essentially the inverse mapping of [`Self::bindings`].
resource_to_slot: HashMap<BindingResourceId, u32>,
/// A list of free slots in [`Self::bindings`] that contain no binding.
free_slots: Vec<u32>,
/// The number of allocated objects in this binding array.
len: u32,
}
/// A single resource (sampler, texture, or buffer) in a binding array.
///
/// Resources hold a reference count, which specifies the number of materials
/// currently allocated within the slab that refer to this resource. When the
/// reference count drops to zero, the resource is freed.
struct MaterialBindlessBinding<R>
where
R: GetBindingResourceId,
{
/// The sampler, texture, or buffer.
resource: R,
/// The number of materials currently allocated within the containing slab
/// that use this resource.
ref_count: u32,
}
/// The allocator that stores bind groups for non-bindless materials.
pub struct MaterialBindGroupNonBindlessAllocator {
/// The label of the bind group allocator to use for allocated buffers.
label: &'static str,
/// A mapping from [`MaterialBindGroupIndex`] to the bind group allocated in
/// each slot.
bind_groups: Vec<Option<MaterialNonBindlessAllocatedBindGroup>>,
/// The bind groups that are dirty and need to be prepared.
///
/// To prepare the bind groups, call
/// [`MaterialBindGroupAllocator::prepare_bind_groups`].
to_prepare: HashSet<MaterialBindGroupIndex>,
/// A list of free bind group indices.
free_indices: Vec<MaterialBindGroupIndex>,
}
/// A single bind group that a [`MaterialBindGroupNonBindlessAllocator`] is
/// currently managing.
enum MaterialNonBindlessAllocatedBindGroup {
/// An unprepared bind group.
///
/// The allocator prepares all outstanding unprepared bind groups when
/// [`MaterialBindGroupNonBindlessAllocator::prepare_bind_groups`] is
/// called.
Unprepared {
/// The unprepared bind group, including extra data.
bind_group: UnpreparedBindGroup,
/// The layout of that bind group.
layout: BindGroupLayoutDescriptor,
},
/// A bind group that's already been prepared.
Prepared {
bind_group: PreparedBindGroup,
#[expect(dead_code, reason = "These buffers are only referenced by bind groups")]
uniform_buffers: Vec<Buffer>,
},
}
/// Dummy instances of various resources that we fill unused slots in binding
/// arrays with.
#[derive(Resource)]
pub struct FallbackBindlessResources {
/// A dummy filtering sampler.
filtering_sampler: Sampler,
/// A dummy non-filtering sampler.
non_filtering_sampler: Sampler,
/// A dummy comparison sampler.
comparison_sampler: Sampler,
}
/// The `wgpu` ID of a single bindless or non-bindless resource.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
enum BindingResourceId {
/// A buffer.
Buffer(BufferId),
/// A texture view, with the given dimension.
TextureView(TextureViewDimension, TextureViewId),
/// A sampler.
Sampler(SamplerId),
/// A buffer containing plain old data.
///
/// This corresponds to the `#[data]` structure-level attribute on
/// `AsBindGroup`.
DataBuffer,
}
/// A temporary list of references to `wgpu` bindless resources.
///
/// We need this because the `wgpu` bindless API takes a slice of references.
/// Thus we need to create intermediate vectors of bindless resources in order
/// to satisfy `wgpu`'s lifetime requirements.
enum BindingResourceArray<'a> {
/// A list of bindings.
Buffers(Vec<BufferBinding<'a>>),
/// A list of texture views.
TextureViews(Vec<&'a WgpuTextureView>),
/// A list of samplers.
Samplers(Vec<&'a WgpuSampler>),
}
/// The location of a material (either bindless or non-bindless) within the
/// slabs.
#[derive(Clone, Copy, Debug, Default, Reflect)]
#[reflect(Clone, Default)]
pub struct MaterialBindingId {
/// The index of the bind group (slab) where the GPU data is located.
pub group: MaterialBindGroupIndex,
/// The slot within that bind group.
///
/// Non-bindless materials will always have a slot of 0.
pub slot: MaterialBindGroupSlot,
}
/// The index of each material bind group.
///
/// In bindless mode, each bind group contains multiple materials. In
/// non-bindless mode, each bind group contains only one material.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash, Reflect, Deref, DerefMut)]
#[reflect(Default, Clone, PartialEq, Hash)]
pub struct MaterialBindGroupIndex(pub u32);
impl From<u32> for MaterialBindGroupIndex {
fn from(value: u32) -> Self {
MaterialBindGroupIndex(value)
}
}
/// The index of the slot containing material data within each material bind
/// group.
///
/// In bindless mode, this slot is needed to locate the material data in each
/// bind group, since multiple materials are packed into a single slab. In
/// non-bindless mode, this slot is always 0.
#[derive(Clone, Copy, Debug, Default, PartialEq, Reflect, Deref, DerefMut)]
#[reflect(Default, Clone, PartialEq)]
pub struct MaterialBindGroupSlot(pub u32);
/// The CPU/GPU synchronization state of a buffer that we maintain.
///
/// Currently, the only buffer that we maintain is the
/// [`MaterialBindlessIndexTable`].
enum BufferDirtyState {
/// The buffer is currently synchronized between the CPU and GPU.
Clean,
/// The buffer hasn't been created yet.
NeedsReserve,
/// The buffer exists on both CPU and GPU, but the GPU data is out of date.
NeedsUpload,
}
/// Information that describes a potential allocation of an
/// [`UnpreparedBindGroup`] into a slab.
struct BindlessAllocationCandidate {
/// A map that, for every resource in the [`UnpreparedBindGroup`] that
/// already existed in this slab, maps bindless index of that resource to
/// its slot in the appropriate binding array.
pre_existing_resources: HashMap<BindlessIndex, u32>,
/// Stores the number of free slots that are needed to satisfy this
/// allocation.
needed_free_slots: u32,
}
/// A trait that allows fetching the [`BindingResourceId`] from a
/// [`BindlessResourceType`].
///
/// This is used when freeing bindless resources, in order to locate the IDs
/// assigned to each resource so that they can be removed from the appropriate
/// maps.
trait GetBindingResourceId {
/// Returns the [`BindingResourceId`] for this resource.
///
/// `resource_type` specifies this resource's type. This is used for
/// textures, as a `wgpu` [`TextureView`] doesn't store enough information
/// itself to determine its dimension.
fn binding_resource_id(&self, resource_type: BindlessResourceType) -> BindingResourceId;
}
/// The public interface to a slab, which represents a single bind group.
pub struct MaterialSlab<'a>(MaterialSlabImpl<'a>);
/// The actual implementation of a material slab.
///
/// This has bindless and non-bindless variants.
enum MaterialSlabImpl<'a> {
/// The implementation of the slab interface we use when the slab
/// is bindless.
Bindless(&'a MaterialBindlessSlab),
/// The implementation of the slab interface we use when the slab
/// is non-bindless.
NonBindless(MaterialNonBindlessSlab<'a>),
}
/// A single bind group that the [`MaterialBindGroupNonBindlessAllocator`]
/// manages.
enum MaterialNonBindlessSlab<'a> {
/// A slab that has a bind group.
Prepared(&'a PreparedBindGroup),
/// A slab that doesn't yet have a bind group.
Unprepared,
}
/// Manages an array of untyped plain old data on GPU and allocates individual
/// slots within that array.
///
/// This supports the `#[data]` attribute of `AsBindGroup`.
struct MaterialDataBuffer {
/// The number of the binding that we attach this storage buffer to.
binding_number: BindingNumber,
/// The actual data.
///
/// Note that this is untyped (`u8`); the actual aligned size of each
/// element is given by [`Self::aligned_element_size`];
buffer: RetainedRawBufferVec<u8>,
/// The size of each element in the buffer, including padding and alignment
/// if any.
aligned_element_size: u32,
/// A list of free slots within the buffer.
free_slots: Vec<u32>,
/// The actual number of slots that have been allocated.
len: u32,
}
/// A buffer containing plain old data, already packed into the appropriate GPU
/// format, and that can be updated incrementally.
///
/// This structure exists in order to encapsulate the lazy update
/// ([`BufferDirtyState`]) logic in a single place.
#[derive(Deref, DerefMut)]
struct RetainedRawBufferVec<T>
where
T: Pod,
{
/// The contents of the buffer.
#[deref]
buffer: RawBufferVec<T>,
/// Whether the contents of the buffer have been uploaded to the GPU.
dirty: BufferDirtyState,
}
/// The size of the buffer that we assign to unused buffer slots, in bytes.
///
/// This is essentially arbitrary, as it doesn't seem to matter to `wgpu` what
/// the size is.
const DEFAULT_BINDLESS_FALLBACK_BUFFER_SIZE: u64 = 16;
impl From<u32> for MaterialBindGroupSlot {
fn from(value: u32) -> Self {
MaterialBindGroupSlot(value)
}
}
impl From<MaterialBindGroupSlot> for u32 {
fn from(value: MaterialBindGroupSlot) -> Self {
value.0
}
}
impl<'a> From<&'a OwnedBindingResource> for BindingResourceId {
fn from(value: &'a OwnedBindingResource) -> Self {
match *value {
OwnedBindingResource::Buffer(ref buffer) => BindingResourceId::Buffer(buffer.id()),
OwnedBindingResource::Data(_) => BindingResourceId::DataBuffer,
OwnedBindingResource::TextureView(ref texture_view_dimension, ref texture_view) => {
BindingResourceId::TextureView(*texture_view_dimension, texture_view.id())
}
OwnedBindingResource::Sampler(_, ref sampler) => {
BindingResourceId::Sampler(sampler.id())
}
}
}
}
impl GetBindingResourceId for Buffer {
fn binding_resource_id(&self, _: BindlessResourceType) -> BindingResourceId {
BindingResourceId::Buffer(self.id())
}
}
impl GetBindingResourceId for Sampler {
fn binding_resource_id(&self, _: BindlessResourceType) -> BindingResourceId {
BindingResourceId::Sampler(self.id())
}
}
impl GetBindingResourceId for TextureView {
fn binding_resource_id(&self, resource_type: BindlessResourceType) -> BindingResourceId {
let texture_view_dimension = match resource_type {
BindlessResourceType::Texture1d => TextureViewDimension::D1,
BindlessResourceType::Texture2d => TextureViewDimension::D2,
BindlessResourceType::Texture2dArray => TextureViewDimension::D2Array,
BindlessResourceType::Texture3d => TextureViewDimension::D3,
BindlessResourceType::TextureCube => TextureViewDimension::Cube,
BindlessResourceType::TextureCubeArray => TextureViewDimension::CubeArray,
_ => panic!("Resource type is not a texture"),
};
BindingResourceId::TextureView(texture_view_dimension, self.id())
}
}
impl MaterialBindGroupAllocator {
/// Creates a new [`MaterialBindGroupAllocator`] managing the data for a
/// single material.
pub fn new(
render_device: &RenderDevice,
label: &'static str,
bindless_descriptor: Option<BindlessDescriptor>,
bind_group_layout: BindGroupLayoutDescriptor,
slab_capacity: Option<BindlessSlabResourceLimit>,
) -> MaterialBindGroupAllocator {
if let Some(bindless_descriptor) = bindless_descriptor {
MaterialBindGroupAllocator::Bindless(Box::new(MaterialBindGroupBindlessAllocator::new(
render_device,
label,
bindless_descriptor,
bind_group_layout,
slab_capacity,
)))
} else {
MaterialBindGroupAllocator::NonBindless(Box::new(
MaterialBindGroupNonBindlessAllocator::new(label),
))
}
}
/// Returns the slab with the given index, if one exists.
pub fn get(&self, group: MaterialBindGroupIndex) -> Option<MaterialSlab<'_>> {
match *self {
MaterialBindGroupAllocator::Bindless(ref bindless_allocator) => bindless_allocator
.get(group)
.map(|bindless_slab| MaterialSlab(MaterialSlabImpl::Bindless(bindless_slab))),
MaterialBindGroupAllocator::NonBindless(ref non_bindless_allocator) => {
non_bindless_allocator.get(group).map(|non_bindless_slab| {
MaterialSlab(MaterialSlabImpl::NonBindless(non_bindless_slab))
})
}
}
}
/// Allocates an [`UnpreparedBindGroup`] and returns the resulting binding ID.
///
/// This method should generally be preferred over
/// [`Self::allocate_prepared`], because this method supports both bindless
/// and non-bindless bind groups. Only use [`Self::allocate_prepared`] if
/// you need to prepare the bind group yourself.
pub fn allocate_unprepared(
&mut self,
unprepared_bind_group: UnpreparedBindGroup,
bind_group_layout: &BindGroupLayoutDescriptor,
) -> MaterialBindingId {
match *self {
MaterialBindGroupAllocator::Bindless(
ref mut material_bind_group_bindless_allocator,
) => material_bind_group_bindless_allocator.allocate_unprepared(unprepared_bind_group),
MaterialBindGroupAllocator::NonBindless(
ref mut material_bind_group_non_bindless_allocator,
) => material_bind_group_non_bindless_allocator
.allocate_unprepared(unprepared_bind_group, (*bind_group_layout).clone()),
}
}
/// Places a pre-prepared bind group into a slab.
///
/// For bindless materials, the allocator internally manages the bind
/// groups, so calling this method will panic if this is a bindless
/// allocator. Only non-bindless allocators support this method.
///
/// It's generally preferred to use [`Self::allocate_unprepared`], because
/// that method supports both bindless and non-bindless allocators. Only use
/// this method if you need to prepare the bind group yourself.
pub fn allocate_prepared(
&mut self,
prepared_bind_group: PreparedBindGroup,
) -> MaterialBindingId {
match *self {
MaterialBindGroupAllocator::Bindless(_) => {
panic!(
"Bindless resources are incompatible with implementing `as_bind_group` \
directly; implement `unprepared_bind_group` instead or disable bindless"
)
}
MaterialBindGroupAllocator::NonBindless(ref mut non_bindless_allocator) => {
non_bindless_allocator.allocate_prepared(prepared_bind_group)
}
}
}
/// Deallocates the material with the given binding ID.
///
/// Any resources that are no longer referenced are removed from the slab.
pub fn free(&mut self, material_binding_id: MaterialBindingId) {
match *self {
MaterialBindGroupAllocator::Bindless(
ref mut material_bind_group_bindless_allocator,
) => material_bind_group_bindless_allocator.free(material_binding_id),
MaterialBindGroupAllocator::NonBindless(
ref mut material_bind_group_non_bindless_allocator,
) => material_bind_group_non_bindless_allocator.free(material_binding_id),
}
}
/// Recreates any bind groups corresponding to slabs that have been modified
/// since last calling [`MaterialBindGroupAllocator::prepare_bind_groups`].
pub fn prepare_bind_groups(
&mut self,
render_device: &RenderDevice,
pipeline_cache: &PipelineCache,
fallback_bindless_resources: &FallbackBindlessResources,
fallback_image: &FallbackImage,
) {
match *self {
MaterialBindGroupAllocator::Bindless(
ref mut material_bind_group_bindless_allocator,
) => material_bind_group_bindless_allocator.prepare_bind_groups(
render_device,
pipeline_cache,
fallback_bindless_resources,
fallback_image,
),
MaterialBindGroupAllocator::NonBindless(
ref mut material_bind_group_non_bindless_allocator,
) => material_bind_group_non_bindless_allocator
.prepare_bind_groups(render_device, pipeline_cache),
}
}
/// Uploads the contents of all buffers that this
/// [`MaterialBindGroupAllocator`] manages to the GPU.
///
/// Non-bindless allocators don't currently manage any buffers, so this
/// method only has an effect for bindless allocators.
pub fn write_buffers(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) {
match *self {
MaterialBindGroupAllocator::Bindless(
ref mut material_bind_group_bindless_allocator,
) => material_bind_group_bindless_allocator.write_buffers(render_device, render_queue),
MaterialBindGroupAllocator::NonBindless(_) => {
// Not applicable.
}
}
}
/// Get number of allocated slabs for bindless material, returns 0 if it is
/// [`Self::NonBindless`].
pub fn slab_count(&self) -> usize {
match self {
Self::Bindless(bless) => bless.slabs.len(),
Self::NonBindless(_) => 0,
}
}
/// Get total size of slabs allocated for bindless material, returns 0 if it is
/// [`Self::NonBindless`].
pub fn slabs_size(&self) -> usize {
match self {
Self::Bindless(bless) => bless
.slabs
.iter()
.flat_map(|slab| {
slab.data_buffers
.iter()
.map(|(_, buffer)| buffer.buffer.len())
})
.sum(),
Self::NonBindless(_) => 0,
}
}
/// Get number of bindless material allocations in slabs, returns 0 if it is
/// [`Self::NonBindless`].
pub fn allocations(&self) -> u64 {
match self {
Self::Bindless(bless) => bless
.slabs
.iter()
.map(|slab| u64::from(slab.allocated_resource_count))
.sum(),
Self::NonBindless(_) => 0,
}
}
}
impl MaterialBindlessIndexTable {
/// Creates a new [`MaterialBindlessIndexTable`] for a single slab.
fn new(
bindless_index_table_descriptor: &BindlessIndexTableDescriptor,
) -> MaterialBindlessIndexTable {
// Preallocate space for one bindings table, so that there will always be a buffer.
let mut buffer = RetainedRawBufferVec::new(BufferUsages::STORAGE);
for _ in *bindless_index_table_descriptor.indices.start
..*bindless_index_table_descriptor.indices.end
{
buffer.push(0);
}
MaterialBindlessIndexTable {
buffer,
index_range: bindless_index_table_descriptor.indices.clone(),
binding_number: bindless_index_table_descriptor.binding_number,
}
}
/// Returns the bindings in the binding index table.
///
/// If the current [`MaterialBindlessIndexTable::index_range`] is M..N, then
/// element *i* of the returned binding index table contains the slot of the
/// bindless resource with bindless index *i* + M.
fn get(&self, slot: MaterialBindGroupSlot) -> &[u32] {
let struct_size = *self.index_range.end as usize - *self.index_range.start as usize;
let start = struct_size * slot.0 as usize;
&self.buffer.values()[start..(start + struct_size)]
}
/// Returns a single binding from the binding index table.
fn get_binding(
&self,
slot: MaterialBindGroupSlot,
bindless_index: BindlessIndex,
) -> Option<u32> {
if bindless_index < self.index_range.start || bindless_index >= self.index_range.end {
return None;
}
self.get(slot)
.get((*bindless_index - *self.index_range.start) as usize)
.copied()
}
fn table_length(&self) -> u32 {
self.index_range.end.0 - self.index_range.start.0
}
/// Updates the binding index table for a single material.
///
/// The `allocated_resource_slots` map contains a mapping from the
/// [`BindlessIndex`] of each resource that the material references to the
/// slot that that resource occupies in the appropriate binding array. This
/// method serializes that map into a binding index table that the shader
/// can read.
fn set(
&mut self,
slot: MaterialBindGroupSlot,
allocated_resource_slots: &HashMap<BindlessIndex, u32>,
) {
let table_len = self.table_length() as usize;
let range = (slot.0 as usize * table_len)..((slot.0 as usize + 1) * table_len);
while self.buffer.len() < range.end {
self.buffer.push(0);
}
for (&bindless_index, &resource_slot) in allocated_resource_slots {
if self.index_range.contains(&bindless_index) {
self.buffer.set(
*bindless_index + range.start as u32 - *self.index_range.start,
resource_slot,
);
}
}
// Mark the buffer as needing to be recreated, in case we grew it.
self.buffer.dirty = BufferDirtyState::NeedsReserve;
}
/// Returns the [`BindGroupEntry`] for the index table itself.
fn bind_group_entry(&self) -> BindGroupEntry<'_> {
BindGroupEntry {
binding: *self.binding_number,
resource: self
.buffer
.buffer()
.expect("Bindings buffer must exist")
.as_entire_binding(),
}
}
}
impl<T> RetainedRawBufferVec<T>
where
T: Pod,
{
/// Creates a new empty [`RetainedRawBufferVec`] supporting the given
/// [`BufferUsages`].
fn new(buffer_usages: BufferUsages) -> RetainedRawBufferVec<T> {
RetainedRawBufferVec {
buffer: RawBufferVec::new(buffer_usages),
dirty: BufferDirtyState::NeedsUpload,
}
}
/// Recreates the GPU backing buffer if needed.
fn prepare(&mut self, render_device: &RenderDevice) {
match self.dirty {
BufferDirtyState::Clean | BufferDirtyState::NeedsUpload => {}
BufferDirtyState::NeedsReserve => {
let capacity = self.buffer.len();
self.buffer.reserve(capacity, render_device);
self.dirty = BufferDirtyState::NeedsUpload;
}
}
}
/// Writes the current contents of the buffer to the GPU if necessary.
fn write(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) {
match self.dirty {
BufferDirtyState::Clean => {}
BufferDirtyState::NeedsReserve | BufferDirtyState::NeedsUpload => {
self.buffer.write_buffer(render_device, render_queue);
self.dirty = BufferDirtyState::Clean;
}
}
}
}
impl MaterialBindGroupBindlessAllocator {
/// Creates a new [`MaterialBindGroupBindlessAllocator`] managing the data
/// for a single bindless material.
fn new(
render_device: &RenderDevice,
label: &'static str,
bindless_descriptor: BindlessDescriptor,
bind_group_layout: BindGroupLayoutDescriptor,
slab_capacity: Option<BindlessSlabResourceLimit>,
) -> MaterialBindGroupBindlessAllocator {
let fallback_buffers = bindless_descriptor
.buffers
.iter()
.map(|bindless_buffer_descriptor| {
(
bindless_buffer_descriptor.bindless_index,
render_device.create_buffer(&BufferDescriptor {
label: Some("bindless fallback buffer"),
size: match bindless_buffer_descriptor.size {
Some(size) => size as u64,
None => DEFAULT_BINDLESS_FALLBACK_BUFFER_SIZE,
},
usage: BufferUsages::STORAGE,
mapped_at_creation: false,
}),
)
})
.collect();
MaterialBindGroupBindlessAllocator {
label,
slabs: vec![],
bind_group_layout,
bindless_descriptor,
fallback_buffers,
slab_capacity: slab_capacity
.expect("Non-bindless materials should use the non-bindless allocator")
.resolve(),
}
}
/// Allocates the resources for a single material into a slab and returns
/// the resulting ID.
///
/// The returned [`MaterialBindingId`] can later be used to fetch the slab
/// that was used.
///
/// This function can't fail. If all slabs are full, then a new slab is
/// created, and the material is allocated into it.
fn allocate_unprepared(
&mut self,
mut unprepared_bind_group: UnpreparedBindGroup,
) -> MaterialBindingId {
for (slab_index, slab) in self.slabs.iter_mut().enumerate() {
trace!("Trying to allocate in slab {}", slab_index);
match slab.try_allocate(unprepared_bind_group, self.slab_capacity) {
Ok(slot) => {
return MaterialBindingId {
group: MaterialBindGroupIndex(slab_index as u32),
slot,
};
}
Err(bind_group) => unprepared_bind_group = bind_group,
}
}
let group = MaterialBindGroupIndex(self.slabs.len() as u32);
self.slabs
.push(MaterialBindlessSlab::new(&self.bindless_descriptor));
// Allocate into the newly-pushed slab.
let Ok(slot) = self
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/mesh_material.rs | crates/bevy_pbr/src/mesh_material.rs | use crate::Material;
use bevy_asset::{AsAssetId, AssetId, Handle};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{component::Component, reflect::ReflectComponent};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use derive_more::derive::From;
/// A [material](Material) used for rendering a [`Mesh3d`].
///
/// See [`Material`] for general information about 3D materials and how to implement your own materials.
///
/// [`Mesh3d`]: bevy_mesh::Mesh3d
///
/// # Example
///
/// ```
/// # use bevy_pbr::{Material, MeshMaterial3d, StandardMaterial};
/// # use bevy_ecs::prelude::*;
/// # use bevy_mesh::{Mesh, Mesh3d};
/// # use bevy_color::palettes::basic::RED;
/// # use bevy_asset::Assets;
/// # use bevy_math::primitives::Capsule3d;
/// #
/// // Spawn an entity with a mesh using `StandardMaterial`.
/// fn setup(
/// mut commands: Commands,
/// mut meshes: ResMut<Assets<Mesh>>,
/// mut materials: ResMut<Assets<StandardMaterial>>,
/// ) {
/// commands.spawn((
/// Mesh3d(meshes.add(Capsule3d::default())),
/// MeshMaterial3d(materials.add(StandardMaterial {
/// base_color: RED.into(),
/// ..Default::default()
/// })),
/// ));
/// }
/// ```
#[derive(Component, Clone, Debug, Deref, DerefMut, Reflect, From)]
#[reflect(Component, Default, Clone, PartialEq)]
pub struct MeshMaterial3d<M: Material>(pub Handle<M>);
impl<M: Material> Default for MeshMaterial3d<M> {
fn default() -> Self {
Self(Handle::default())
}
}
impl<M: Material> PartialEq for MeshMaterial3d<M> {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl<M: Material> Eq for MeshMaterial3d<M> {}
impl<M: Material> From<MeshMaterial3d<M>> for AssetId<M> {
fn from(material: MeshMaterial3d<M>) -> Self {
material.id()
}
}
impl<M: Material> From<&MeshMaterial3d<M>> for AssetId<M> {
fn from(material: &MeshMaterial3d<M>) -> Self {
material.id()
}
}
impl<M: Material> AsAssetId for MeshMaterial3d<M> {
type Asset = M;
fn as_asset_id(&self) -> AssetId<Self::Asset> {
self.id()
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/material.rs | crates/bevy_pbr/src/material.rs | use crate::material_bind_groups::{
FallbackBindlessResources, MaterialBindGroupAllocator, MaterialBindingId,
};
use crate::*;
use alloc::sync::Arc;
use bevy_asset::prelude::AssetChanged;
use bevy_asset::{Asset, AssetEventSystems, AssetId, AssetServer, UntypedAssetId};
use bevy_camera::visibility::ViewVisibility;
use bevy_camera::ScreenSpaceTransmissionQuality;
use bevy_core_pipeline::deferred::{AlphaMask3dDeferred, Opaque3dDeferred};
use bevy_core_pipeline::prepass::{AlphaMask3dPrepass, Opaque3dPrepass};
use bevy_core_pipeline::{
core_3d::{
AlphaMask3d, Opaque3d, Opaque3dBatchSetKey, Opaque3dBinKey, Transmissive3d, Transparent3d,
},
prepass::{OpaqueNoLightmap3dBatchSetKey, OpaqueNoLightmap3dBinKey},
tonemapping::Tonemapping,
};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::change_detection::Tick;
use bevy_ecs::system::SystemChangeTick;
use bevy_ecs::{
prelude::*,
system::{
lifetimeless::{SRes, SResMut},
SystemParamItem,
},
};
use bevy_mesh::{
mark_3d_meshes_as_changed_if_their_assets_changed, Mesh3d, MeshVertexBufferLayoutRef,
};
use bevy_platform::collections::hash_map::Entry;
use bevy_platform::collections::{HashMap, HashSet};
use bevy_platform::hash::FixedHasher;
use bevy_reflect::std_traits::ReflectDefault;
use bevy_reflect::Reflect;
use bevy_render::camera::extract_cameras;
use bevy_render::erased_render_asset::{
ErasedRenderAsset, ErasedRenderAssetPlugin, ErasedRenderAssets, PrepareAssetError,
};
use bevy_render::render_asset::{prepare_assets, RenderAssets};
use bevy_render::renderer::RenderQueue;
use bevy_render::RenderStartup;
use bevy_render::{
batching::gpu_preprocessing::GpuPreprocessingSupport,
extract_resource::ExtractResource,
mesh::RenderMesh,
prelude::*,
render_phase::*,
render_resource::*,
renderer::RenderDevice,
sync_world::MainEntity,
view::{ExtractedView, Msaa, RenderVisibilityRanges, RetainedViewEntity},
Extract,
};
use bevy_render::{mesh::allocator::MeshAllocator, sync_world::MainEntityHashMap};
use bevy_render::{texture::FallbackImage, view::RenderVisibleEntities};
use bevy_shader::{Shader, ShaderDefVal};
use bevy_utils::Parallel;
use core::any::{Any, TypeId};
use core::hash::{BuildHasher, Hasher};
use core::{hash::Hash, marker::PhantomData};
use smallvec::SmallVec;
use tracing::error;
pub const MATERIAL_BIND_GROUP_INDEX: usize = 3;
/// Materials are used alongside [`MaterialPlugin`], [`Mesh3d`], and [`MeshMaterial3d`]
/// to spawn entities that are rendered with a specific [`Material`] type. They serve as an easy to use high level
/// way to render [`Mesh3d`] entities with custom shader logic.
///
/// Materials must implement [`AsBindGroup`] to define how data will be transferred to the GPU and bound in shaders.
/// [`AsBindGroup`] can be derived, which makes generating bindings straightforward. See the [`AsBindGroup`] docs for details.
///
/// # Example
///
/// Here is a simple [`Material`] implementation. The [`AsBindGroup`] derive has many features. To see what else is available,
/// check out the [`AsBindGroup`] documentation.
///
/// ```
/// # use bevy_pbr::{Material, MeshMaterial3d};
/// # use bevy_ecs::prelude::*;
/// # use bevy_image::Image;
/// # use bevy_reflect::TypePath;
/// # use bevy_mesh::{Mesh, Mesh3d};
/// # use bevy_render::render_resource::AsBindGroup;
/// # use bevy_shader::ShaderRef;
/// # use bevy_color::LinearRgba;
/// # use bevy_color::palettes::basic::RED;
/// # use bevy_asset::{Handle, AssetServer, Assets, Asset};
/// # use bevy_math::primitives::Capsule3d;
/// #
/// #[derive(AsBindGroup, Debug, Clone, Asset, TypePath)]
/// pub struct CustomMaterial {
/// // Uniform bindings must implement `ShaderType`, which will be used to convert the value to
/// // its shader-compatible equivalent. Most core math types already implement `ShaderType`.
/// #[uniform(0)]
/// color: LinearRgba,
/// // Images can be bound as textures in shaders. If the Image's sampler is also needed, just
/// // add the sampler attribute with a different binding index.
/// #[texture(1)]
/// #[sampler(2)]
/// color_texture: Handle<Image>,
/// }
///
/// // All functions on `Material` have default impls. You only need to implement the
/// // functions that are relevant for your material.
/// impl Material for CustomMaterial {
/// fn fragment_shader() -> ShaderRef {
/// "shaders/custom_material.wgsl".into()
/// }
/// }
///
/// // Spawn an entity with a mesh using `CustomMaterial`.
/// fn setup(
/// mut commands: Commands,
/// mut meshes: ResMut<Assets<Mesh>>,
/// mut materials: ResMut<Assets<CustomMaterial>>,
/// asset_server: Res<AssetServer>
/// ) {
/// commands.spawn((
/// Mesh3d(meshes.add(Capsule3d::default())),
/// MeshMaterial3d(materials.add(CustomMaterial {
/// color: RED.into(),
/// color_texture: asset_server.load("some_image.png"),
/// })),
/// ));
/// }
/// ```
///
/// In WGSL shaders, the material's binding would look like this:
///
/// ```wgsl
/// @group(#{MATERIAL_BIND_GROUP}) @binding(0) var<uniform> color: vec4<f32>;
/// @group(#{MATERIAL_BIND_GROUP}) @binding(1) var color_texture: texture_2d<f32>;
/// @group(#{MATERIAL_BIND_GROUP}) @binding(2) var color_sampler: sampler;
/// ```
pub trait Material: Asset + AsBindGroup + Clone + Sized {
/// Returns this material's vertex shader. If [`ShaderRef::Default`] is returned, the default mesh vertex shader
/// will be used.
fn vertex_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this material's fragment shader. If [`ShaderRef::Default`] is returned, the default mesh fragment shader
/// will be used.
fn fragment_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this material's [`AlphaMode`]. Defaults to [`AlphaMode::Opaque`].
#[inline]
fn alpha_mode(&self) -> AlphaMode {
AlphaMode::Opaque
}
/// Returns if this material should be rendered by the deferred or forward renderer.
/// for `AlphaMode::Opaque` or `AlphaMode::Mask` materials.
/// If `OpaqueRendererMethod::Auto`, it will default to what is selected in the `DefaultOpaqueRendererMethod` resource.
#[inline]
fn opaque_render_method(&self) -> OpaqueRendererMethod {
OpaqueRendererMethod::Forward
}
#[inline]
/// Add a bias to the view depth of the mesh which can be used to force a specific render order.
/// for meshes with similar depth, to avoid z-fighting.
/// The bias is in depth-texture units so large values may be needed to overcome small depth differences.
fn depth_bias(&self) -> f32 {
0.0
}
#[inline]
/// Returns whether the material would like to read from [`ViewTransmissionTexture`](bevy_core_pipeline::core_3d::ViewTransmissionTexture).
///
/// This allows taking color output from the [`Opaque3d`] pass as an input, (for screen-space transmission) but requires
/// rendering to take place in a separate [`Transmissive3d`] pass.
fn reads_view_transmission_texture(&self) -> bool {
false
}
/// Controls if the prepass is enabled for the Material.
/// For more information about what a prepass is, see the [`bevy_core_pipeline::prepass`] docs.
#[inline]
fn enable_prepass() -> bool {
true
}
/// Controls if shadows are enabled for the Material.
#[inline]
fn enable_shadows() -> bool {
true
}
/// Returns this material's prepass vertex shader. If [`ShaderRef::Default`] is returned, the default prepass vertex shader
/// will be used.
///
/// This is used for the various [prepasses](bevy_core_pipeline::prepass) as well as for generating the depth maps
/// required for shadow mapping.
fn prepass_vertex_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this material's prepass fragment shader. If [`ShaderRef::Default`] is returned, the default prepass fragment shader
/// will be used.
///
/// This is used for the various [prepasses](bevy_core_pipeline::prepass) as well as for generating the depth maps
/// required for shadow mapping.
fn prepass_fragment_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this material's deferred vertex shader. If [`ShaderRef::Default`] is returned, the default deferred vertex shader
/// will be used.
fn deferred_vertex_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this material's deferred fragment shader. If [`ShaderRef::Default`] is returned, the default deferred fragment shader
/// will be used.
fn deferred_fragment_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this material's [`crate::meshlet::MeshletMesh`] fragment shader. If [`ShaderRef::Default`] is returned,
/// the default meshlet mesh fragment shader will be used.
///
/// This is part of an experimental feature, and is unnecessary to implement unless you are using `MeshletMesh`'s.
///
/// See [`crate::meshlet::MeshletMesh`] for limitations.
#[cfg(feature = "meshlet")]
fn meshlet_mesh_fragment_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this material's [`crate::meshlet::MeshletMesh`] prepass fragment shader. If [`ShaderRef::Default`] is returned,
/// the default meshlet mesh prepass fragment shader will be used.
///
/// This is part of an experimental feature, and is unnecessary to implement unless you are using `MeshletMesh`'s.
///
/// See [`crate::meshlet::MeshletMesh`] for limitations.
#[cfg(feature = "meshlet")]
fn meshlet_mesh_prepass_fragment_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this material's [`crate::meshlet::MeshletMesh`] deferred fragment shader. If [`ShaderRef::Default`] is returned,
/// the default meshlet mesh deferred fragment shader will be used.
///
/// This is part of an experimental feature, and is unnecessary to implement unless you are using `MeshletMesh`'s.
///
/// See [`crate::meshlet::MeshletMesh`] for limitations.
#[cfg(feature = "meshlet")]
fn meshlet_mesh_deferred_fragment_shader() -> ShaderRef {
ShaderRef::Default
}
/// Customizes the default [`RenderPipelineDescriptor`] for a specific entity using the entity's
/// [`MaterialPipelineKey`] and [`MeshVertexBufferLayoutRef`] as input.
#[expect(
unused_variables,
reason = "The parameters here are intentionally unused by the default implementation; however, putting underscores here will result in the underscores being copied by rust-analyzer's tab completion."
)]
#[inline]
fn specialize(
pipeline: &MaterialPipeline,
descriptor: &mut RenderPipelineDescriptor,
layout: &MeshVertexBufferLayoutRef,
key: MaterialPipelineKey<Self>,
) -> Result<(), SpecializedMeshPipelineError> {
Ok(())
}
}
#[derive(Default)]
pub struct MaterialsPlugin {
/// Debugging flags that can optionally be set when constructing the renderer.
pub debug_flags: RenderDebugFlags,
}
impl Plugin for MaterialsPlugin {
fn build(&self, app: &mut App) {
app.add_plugins((PrepassPipelinePlugin, PrepassPlugin::new(self.debug_flags)));
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app
.init_resource::<EntitySpecializationTicks>()
.init_resource::<SpecializedMaterialPipelineCache>()
.init_resource::<SpecializedMeshPipelines<MaterialPipelineSpecializer>>()
.init_resource::<LightKeyCache>()
.init_resource::<LightSpecializationTicks>()
.init_resource::<SpecializedShadowMaterialPipelineCache>()
.init_resource::<DrawFunctions<Shadow>>()
.init_resource::<RenderMaterialInstances>()
.init_resource::<MaterialBindGroupAllocators>()
.add_render_command::<Shadow, DrawPrepass>()
.add_render_command::<Transmissive3d, DrawMaterial>()
.add_render_command::<Transparent3d, DrawMaterial>()
.add_render_command::<Opaque3d, DrawMaterial>()
.add_render_command::<AlphaMask3d, DrawMaterial>()
.add_systems(RenderStartup, init_material_pipeline)
.add_systems(
Render,
(
specialize_material_meshes
.in_set(RenderSystems::PrepareMeshes)
.after(prepare_assets::<RenderMesh>)
.after(collect_meshes_for_gpu_building)
.after(set_mesh_motion_vector_flags),
queue_material_meshes.in_set(RenderSystems::QueueMeshes),
),
)
.add_systems(
Render,
(
prepare_material_bind_groups,
write_material_bind_group_buffers,
)
.chain()
.in_set(RenderSystems::PrepareBindGroups),
)
.add_systems(
Render,
(
check_views_lights_need_specialization.in_set(RenderSystems::PrepareAssets),
// specialize_shadows also needs to run after prepare_assets::<PreparedMaterial>,
// which is fine since ManageViews is after PrepareAssets
specialize_shadows
.in_set(RenderSystems::ManageViews)
.after(prepare_lights),
queue_shadows.in_set(RenderSystems::QueueMeshes),
),
);
}
}
}
/// Adds the necessary ECS resources and render logic to enable rendering entities using the given [`Material`]
/// asset type.
pub struct MaterialPlugin<M: Material> {
/// Debugging flags that can optionally be set when constructing the renderer.
pub debug_flags: RenderDebugFlags,
pub _marker: PhantomData<M>,
}
impl<M: Material> Default for MaterialPlugin<M> {
fn default() -> Self {
Self {
debug_flags: RenderDebugFlags::default(),
_marker: Default::default(),
}
}
}
impl<M: Material> Plugin for MaterialPlugin<M>
where
M::Data: PartialEq + Eq + Hash + Clone,
{
fn build(&self, app: &mut App) {
app.init_asset::<M>()
.register_type::<MeshMaterial3d<M>>()
.init_resource::<EntitiesNeedingSpecialization<M>>()
.add_plugins((ErasedRenderAssetPlugin::<MeshMaterial3d<M>>::default(),))
.add_systems(
PostUpdate,
(
mark_meshes_as_changed_if_their_materials_changed::<M>.ambiguous_with_all(),
check_entities_needing_specialization::<M>.after(AssetEventSystems),
)
.after(mark_3d_meshes_as_changed_if_their_assets_changed),
);
if M::enable_shadows() {
app.add_systems(
PostUpdate,
check_light_entities_needing_specialization::<M>
.after(check_entities_needing_specialization::<M>),
);
}
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app
.add_systems(RenderStartup, add_material_bind_group_allocator::<M>)
.add_systems(
ExtractSchedule,
(
extract_mesh_materials::<M>.in_set(MaterialExtractionSystems),
early_sweep_material_instances::<M>
.after(MaterialExtractionSystems)
.before(late_sweep_material_instances),
// See the comments in
// `sweep_entities_needing_specialization` for an
// explanation of why the systems are ordered this way.
extract_entities_needs_specialization::<M>
.in_set(MaterialExtractEntitiesNeedingSpecializationSystems),
sweep_entities_needing_specialization::<M>
.after(MaterialExtractEntitiesNeedingSpecializationSystems)
.after(MaterialExtractionSystems)
.after(extract_cameras)
.before(late_sweep_material_instances),
),
);
}
}
}
fn add_material_bind_group_allocator<M: Material>(
render_device: Res<RenderDevice>,
mut bind_group_allocators: ResMut<MaterialBindGroupAllocators>,
) {
bind_group_allocators.insert(
TypeId::of::<M>(),
MaterialBindGroupAllocator::new(
&render_device,
M::label(),
material_uses_bindless_resources::<M>(&render_device)
.then(|| M::bindless_descriptor())
.flatten(),
M::bind_group_layout_descriptor(&render_device),
M::bindless_slot_count(),
),
);
}
/// A dummy [`AssetId`] that we use as a placeholder whenever a mesh doesn't
/// have a material.
///
/// See the comments in [`RenderMaterialInstances::mesh_material`] for more
/// information.
pub(crate) static DUMMY_MESH_MATERIAL: AssetId<StandardMaterial> =
AssetId::<StandardMaterial>::invalid();
/// A key uniquely identifying a specialized [`MaterialPipeline`].
pub struct MaterialPipelineKey<M: Material> {
pub mesh_key: MeshPipelineKey,
pub bind_group_data: M::Data,
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct ErasedMaterialPipelineKey {
pub mesh_key: MeshPipelineKey,
pub material_key: ErasedMaterialKey,
pub type_id: TypeId,
}
/// Render pipeline data for a given [`Material`].
#[derive(Resource, Clone)]
pub struct MaterialPipeline {
pub mesh_pipeline: MeshPipeline,
}
pub struct MaterialPipelineSpecializer {
pub(crate) pipeline: MaterialPipeline,
pub(crate) properties: Arc<MaterialProperties>,
}
impl SpecializedMeshPipeline for MaterialPipelineSpecializer {
type Key = ErasedMaterialPipelineKey;
fn specialize(
&self,
key: Self::Key,
layout: &MeshVertexBufferLayoutRef,
) -> Result<RenderPipelineDescriptor, SpecializedMeshPipelineError> {
let mut descriptor = self
.pipeline
.mesh_pipeline
.specialize(key.mesh_key, layout)?;
descriptor.vertex.shader_defs.push(ShaderDefVal::UInt(
"MATERIAL_BIND_GROUP".into(),
MATERIAL_BIND_GROUP_INDEX as u32,
));
if let Some(ref mut fragment) = descriptor.fragment {
fragment.shader_defs.push(ShaderDefVal::UInt(
"MATERIAL_BIND_GROUP".into(),
MATERIAL_BIND_GROUP_INDEX as u32,
));
};
if let Some(vertex_shader) = self.properties.get_shader(MaterialVertexShader) {
descriptor.vertex.shader = vertex_shader.clone();
}
if let Some(fragment_shader) = self.properties.get_shader(MaterialFragmentShader) {
descriptor.fragment.as_mut().unwrap().shader = fragment_shader.clone();
}
descriptor
.layout
.insert(3, self.properties.material_layout.as_ref().unwrap().clone());
if let Some(specialize) = self.properties.specialize {
specialize(&self.pipeline, &mut descriptor, layout, key)?;
}
// If bindless mode is on, add a `BINDLESS` define.
if self.properties.bindless {
descriptor.vertex.shader_defs.push("BINDLESS".into());
if let Some(ref mut fragment) = descriptor.fragment {
fragment.shader_defs.push("BINDLESS".into());
}
}
Ok(descriptor)
}
}
pub fn init_material_pipeline(mut commands: Commands, mesh_pipeline: Res<MeshPipeline>) {
commands.insert_resource(MaterialPipeline {
mesh_pipeline: mesh_pipeline.clone(),
});
}
pub type DrawMaterial = (
SetItemPipeline,
SetMeshViewBindGroup<0>,
SetMeshViewBindingArrayBindGroup<1>,
SetMeshBindGroup<2>,
SetMaterialBindGroup<MATERIAL_BIND_GROUP_INDEX>,
DrawMesh,
);
/// Sets the bind group for a given [`Material`] at the configured `I` index.
pub struct SetMaterialBindGroup<const I: usize>;
impl<P: PhaseItem, const I: usize> RenderCommand<P> for SetMaterialBindGroup<I> {
type Param = (
SRes<ErasedRenderAssets<PreparedMaterial>>,
SRes<RenderMaterialInstances>,
SRes<MaterialBindGroupAllocators>,
);
type ViewQuery = ();
type ItemQuery = ();
#[inline]
fn render<'w>(
item: &P,
_view: (),
_item_query: Option<()>,
(materials, material_instances, material_bind_group_allocator): SystemParamItem<
'w,
'_,
Self::Param,
>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let materials = materials.into_inner();
let material_instances = material_instances.into_inner();
let material_bind_group_allocators = material_bind_group_allocator.into_inner();
let Some(material_instance) = material_instances.instances.get(&item.main_entity()) else {
return RenderCommandResult::Skip;
};
let Some(material_bind_group_allocator) =
material_bind_group_allocators.get(&material_instance.asset_id.type_id())
else {
return RenderCommandResult::Skip;
};
let Some(material) = materials.get(material_instance.asset_id) else {
return RenderCommandResult::Skip;
};
let Some(material_bind_group) = material_bind_group_allocator.get(material.binding.group)
else {
return RenderCommandResult::Skip;
};
let Some(bind_group) = material_bind_group.bind_group() else {
return RenderCommandResult::Skip;
};
pass.set_bind_group(I, bind_group, &[]);
RenderCommandResult::Success
}
}
/// Stores all extracted instances of all [`Material`]s in the render world.
#[derive(Resource, Default)]
pub struct RenderMaterialInstances {
/// Maps from each entity in the main world to the
/// [`RenderMaterialInstance`] associated with it.
pub instances: MainEntityHashMap<RenderMaterialInstance>,
/// A monotonically-increasing counter, which we use to sweep
/// [`RenderMaterialInstances::instances`] when the entities and/or required
/// components are removed.
pub current_change_tick: Tick,
}
impl RenderMaterialInstances {
/// Returns the mesh material ID for the entity with the given mesh, or a
/// dummy mesh material ID if the mesh has no material ID.
///
/// Meshes almost always have materials, but in very specific circumstances
/// involving custom pipelines they won't. (See the
/// `specialized_mesh_pipelines` example.)
pub(crate) fn mesh_material(&self, entity: MainEntity) -> UntypedAssetId {
match self.instances.get(&entity) {
Some(render_instance) => render_instance.asset_id,
None => DUMMY_MESH_MATERIAL.into(),
}
}
}
/// The material associated with a single mesh instance in the main world.
///
/// Note that this uses an [`UntypedAssetId`] and isn't generic over the
/// material type, for simplicity.
pub struct RenderMaterialInstance {
/// The material asset.
pub asset_id: UntypedAssetId,
/// The [`RenderMaterialInstances::current_change_tick`] at which this
/// material instance was last modified.
pub last_change_tick: Tick,
}
/// A [`SystemSet`] that contains all `extract_mesh_materials` systems.
#[derive(SystemSet, Clone, PartialEq, Eq, Debug, Hash)]
pub struct MaterialExtractionSystems;
/// A [`SystemSet`] that contains all `extract_entities_needs_specialization`
/// systems.
#[derive(SystemSet, Clone, PartialEq, Eq, Debug, Hash)]
pub struct MaterialExtractEntitiesNeedingSpecializationSystems;
pub const fn alpha_mode_pipeline_key(alpha_mode: AlphaMode, msaa: &Msaa) -> MeshPipelineKey {
match alpha_mode {
// Premultiplied and Add share the same pipeline key
// They're made distinct in the PBR shader, via `premultiply_alpha()`
AlphaMode::Premultiplied | AlphaMode::Add => MeshPipelineKey::BLEND_PREMULTIPLIED_ALPHA,
AlphaMode::Blend => MeshPipelineKey::BLEND_ALPHA,
AlphaMode::Multiply => MeshPipelineKey::BLEND_MULTIPLY,
AlphaMode::Mask(_) => MeshPipelineKey::MAY_DISCARD,
AlphaMode::AlphaToCoverage => match *msaa {
Msaa::Off => MeshPipelineKey::MAY_DISCARD,
_ => MeshPipelineKey::BLEND_ALPHA_TO_COVERAGE,
},
_ => MeshPipelineKey::NONE,
}
}
pub const fn tonemapping_pipeline_key(tonemapping: Tonemapping) -> MeshPipelineKey {
match tonemapping {
Tonemapping::None => MeshPipelineKey::TONEMAP_METHOD_NONE,
Tonemapping::Reinhard => MeshPipelineKey::TONEMAP_METHOD_REINHARD,
Tonemapping::ReinhardLuminance => MeshPipelineKey::TONEMAP_METHOD_REINHARD_LUMINANCE,
Tonemapping::AcesFitted => MeshPipelineKey::TONEMAP_METHOD_ACES_FITTED,
Tonemapping::AgX => MeshPipelineKey::TONEMAP_METHOD_AGX,
Tonemapping::SomewhatBoringDisplayTransform => {
MeshPipelineKey::TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM
}
Tonemapping::TonyMcMapface => MeshPipelineKey::TONEMAP_METHOD_TONY_MC_MAPFACE,
Tonemapping::BlenderFilmic => MeshPipelineKey::TONEMAP_METHOD_BLENDER_FILMIC,
}
}
pub const fn screen_space_specular_transmission_pipeline_key(
screen_space_transmissive_blur_quality: ScreenSpaceTransmissionQuality,
) -> MeshPipelineKey {
match screen_space_transmissive_blur_quality {
ScreenSpaceTransmissionQuality::Low => {
MeshPipelineKey::SCREEN_SPACE_SPECULAR_TRANSMISSION_LOW
}
ScreenSpaceTransmissionQuality::Medium => {
MeshPipelineKey::SCREEN_SPACE_SPECULAR_TRANSMISSION_MEDIUM
}
ScreenSpaceTransmissionQuality::High => {
MeshPipelineKey::SCREEN_SPACE_SPECULAR_TRANSMISSION_HIGH
}
ScreenSpaceTransmissionQuality::Ultra => {
MeshPipelineKey::SCREEN_SPACE_SPECULAR_TRANSMISSION_ULTRA
}
}
}
/// A system that ensures that
/// [`crate::render::mesh::extract_meshes_for_gpu_building`] re-extracts meshes
/// whose materials changed.
///
/// As [`crate::render::mesh::collect_meshes_for_gpu_building`] only considers
/// meshes that were newly extracted, and it writes information from the
/// [`RenderMaterialInstances`] into the
/// [`crate::render::mesh::MeshInputUniform`], we must tell
/// [`crate::render::mesh::extract_meshes_for_gpu_building`] to re-extract a
/// mesh if its material changed. Otherwise, the material binding information in
/// the [`crate::render::mesh::MeshInputUniform`] might not be updated properly.
/// The easiest way to ensure that
/// [`crate::render::mesh::extract_meshes_for_gpu_building`] re-extracts a mesh
/// is to mark its [`Mesh3d`] as changed, so that's what this system does.
fn mark_meshes_as_changed_if_their_materials_changed<M>(
mut changed_meshes_query: Query<
&mut Mesh3d,
Or<(Changed<MeshMaterial3d<M>>, AssetChanged<MeshMaterial3d<M>>)>,
>,
) where
M: Material,
{
for mut mesh in &mut changed_meshes_query {
mesh.set_changed();
}
}
/// Fills the [`RenderMaterialInstances`] resources from the meshes in the
/// scene.
fn extract_mesh_materials<M: Material>(
mut material_instances: ResMut<RenderMaterialInstances>,
changed_meshes_query: Extract<
Query<
(Entity, &ViewVisibility, &MeshMaterial3d<M>),
Or<(Changed<ViewVisibility>, Changed<MeshMaterial3d<M>>)>,
>,
>,
) {
let last_change_tick = material_instances.current_change_tick;
for (entity, view_visibility, material) in &changed_meshes_query {
if view_visibility.get() {
material_instances.instances.insert(
entity.into(),
RenderMaterialInstance {
asset_id: material.id().untyped(),
last_change_tick,
},
);
} else {
material_instances
.instances
.remove(&MainEntity::from(entity));
}
}
}
/// Removes mesh materials from [`RenderMaterialInstances`] when their
/// [`MeshMaterial3d`] components are removed.
///
/// This is tricky because we have to deal with the case in which a material of
/// type A was removed and replaced with a material of type B in the same frame
/// (which is actually somewhat common of an operation). In this case, even
/// though an entry will be present in `RemovedComponents<MeshMaterial3d<A>>`,
/// we must not remove the entry in `RenderMaterialInstances` which corresponds
/// to material B. To handle this case, we use change ticks to avoid removing
/// the entry if it was updated this frame.
///
/// This is the first of two sweep phases. Because this phase runs once per
/// material type, we need a second phase in order to guarantee that we only
/// bump [`RenderMaterialInstances::current_change_tick`] once.
fn early_sweep_material_instances<M>(
mut material_instances: ResMut<RenderMaterialInstances>,
mut removed_materials_query: Extract<RemovedComponents<MeshMaterial3d<M>>>,
) where
M: Material,
{
let last_change_tick = material_instances.current_change_tick;
for entity in removed_materials_query.read() {
if let Entry::Occupied(occupied_entry) = material_instances.instances.entry(entity.into()) {
// Only sweep the entry if it wasn't updated this frame.
if occupied_entry.get().last_change_tick != last_change_tick {
occupied_entry.remove();
}
}
}
}
/// Removes mesh materials from [`RenderMaterialInstances`] when their
/// [`ViewVisibility`] components are removed.
///
/// This runs after all invocations of `early_sweep_material_instances` and is
/// responsible for bumping [`RenderMaterialInstances::current_change_tick`] in
/// preparation for a new frame.
pub fn late_sweep_material_instances(
mut material_instances: ResMut<RenderMaterialInstances>,
mut removed_meshes_query: Extract<RemovedComponents<Mesh3d>>,
) {
let last_change_tick = material_instances.current_change_tick;
for entity in removed_meshes_query.read() {
if let Entry::Occupied(occupied_entry) = material_instances.instances.entry(entity.into()) {
// Only sweep the entry if it wasn't updated this frame. It's
// possible that a `ViewVisibility` component was removed and
// re-added in the same frame.
if occupied_entry.get().last_change_tick != last_change_tick {
occupied_entry.remove();
}
}
}
material_instances
.current_change_tick
.set(last_change_tick.get() + 1);
}
pub fn extract_entities_needs_specialization<M>(
entities_needing_specialization: Extract<Res<EntitiesNeedingSpecialization<M>>>,
mut entity_specialization_ticks: ResMut<EntitySpecializationTicks>,
render_material_instances: Res<RenderMaterialInstances>,
ticks: SystemChangeTick,
) where
M: Material,
{
for entity in entities_needing_specialization.iter() {
// Update the entity's specialization tick with this run's tick
entity_specialization_ticks.insert(
(*entity).into(),
EntitySpecializationTickPair {
system_tick: ticks.this_run(),
material_instances_tick: render_material_instances.current_change_tick,
},
);
}
}
/// A system that runs after all instances of
/// [`extract_entities_needs_specialization`] in order to delete specialization
/// ticks for entities that are no longer renderable.
///
/// We delete entities from the [`EntitySpecializationTicks`] table *after*
/// updating it with newly-discovered renderable entities in order to handle the
/// case in which a single entity changes material types. If we naïvely removed
/// entities from that table when their [`MeshMaterial3d<M>`] components were
/// removed, and an entity changed material types, we might end up adding a new
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/components.rs | crates/bevy_pbr/src/components.rs | use bevy_derive::{Deref, DerefMut};
use bevy_ecs::component::Component;
use bevy_ecs::entity::{Entity, EntityHashMap};
use bevy_ecs::reflect::ReflectComponent;
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::sync_world::MainEntity;
#[derive(Component, Clone, Debug, Default, Reflect, Deref, DerefMut)]
#[reflect(Component, Debug, Default, Clone)]
pub struct RenderVisibleMeshEntities {
#[reflect(ignore, clone)]
pub entities: Vec<(Entity, MainEntity)>,
}
#[derive(Component, Clone, Debug, Default, Reflect)]
#[reflect(Component, Debug, Default, Clone)]
pub struct RenderCubemapVisibleEntities {
#[reflect(ignore, clone)]
pub(crate) data: [RenderVisibleMeshEntities; 6],
}
impl RenderCubemapVisibleEntities {
pub fn get(&self, i: usize) -> &RenderVisibleMeshEntities {
&self.data[i]
}
pub fn get_mut(&mut self, i: usize) -> &mut RenderVisibleMeshEntities {
&mut self.data[i]
}
pub fn iter(&self) -> impl DoubleEndedIterator<Item = &RenderVisibleMeshEntities> {
self.data.iter()
}
pub fn iter_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut RenderVisibleMeshEntities> {
self.data.iter_mut()
}
}
#[derive(Component, Clone, Debug, Default, Reflect)]
#[reflect(Component, Default, Clone)]
pub struct RenderCascadesVisibleEntities {
/// Map of view entity to the visible entities for each cascade frustum.
#[reflect(ignore, clone)]
pub entities: EntityHashMap<Vec<RenderVisibleMeshEntities>>,
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/fog.rs | crates/bevy_pbr/src/fog.rs | use bevy_camera::Camera;
use bevy_color::{Color, ColorToComponents, LinearRgba};
use bevy_ecs::prelude::*;
use bevy_math::{ops, Vec3};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::extract_component::ExtractComponent;
/// Configures the “classic” computer graphics [distance fog](https://en.wikipedia.org/wiki/Distance_fog) effect,
/// in which objects appear progressively more covered in atmospheric haze the further away they are from the camera.
/// Affects meshes rendered via the PBR [`StandardMaterial`](crate::StandardMaterial).
///
/// ## Falloff
///
/// The rate at which fog intensity increases with distance is controlled by the falloff mode.
/// Currently, the following fog falloff modes are supported:
///
/// - [`FogFalloff::Linear`]
/// - [`FogFalloff::Exponential`]
/// - [`FogFalloff::ExponentialSquared`]
/// - [`FogFalloff::Atmospheric`]
///
/// ## Example
///
/// ```
/// # use bevy_ecs::prelude::*;
/// # use bevy_render::prelude::*;
/// # use bevy_camera::prelude::*;
/// # use bevy_pbr::prelude::*;
/// # use bevy_color::Color;
/// # fn system(mut commands: Commands) {
/// commands.spawn((
/// // Setup your camera as usual
/// Camera3d::default(),
/// // Add fog to the same entity
/// DistanceFog {
/// color: Color::WHITE,
/// falloff: FogFalloff::Exponential { density: 1e-3 },
/// ..Default::default()
/// },
/// ));
/// # }
/// # bevy_ecs::system::assert_is_system(system);
/// ```
///
/// ## Material Override
///
/// Once enabled for a specific camera, the fog effect can also be disabled for individual
/// [`StandardMaterial`](crate::StandardMaterial) instances via the `fog_enabled` flag.
#[derive(Debug, Clone, Component, Reflect, ExtractComponent)]
#[extract_component_filter(With<Camera>)]
#[reflect(Component, Default, Debug, Clone)]
pub struct DistanceFog {
/// The color of the fog effect.
///
/// **Tip:** The alpha channel of the color can be used to “modulate” the fog effect without
/// changing the fog falloff mode or parameters.
pub color: Color,
/// Color used to modulate the influence of directional light colors on the
/// fog, where the view direction aligns with each directional light direction,
/// producing a “glow” or light dispersion effect. (e.g. around the sun)
///
/// Use [`Color::NONE`] to disable the effect.
pub directional_light_color: Color,
/// The exponent applied to the directional light alignment calculation.
/// A higher value means a more concentrated “glow”.
pub directional_light_exponent: f32,
/// Determines which falloff mode to use, and its parameters.
pub falloff: FogFalloff,
}
/// Allows switching between different fog falloff modes, and configuring their parameters.
///
/// ## Convenience Methods
///
/// When using non-linear fog modes it can be hard to determine the right parameter values
/// for a given scene.
///
/// For easier artistic control, instead of creating the enum variants directly, you can use the
/// visibility-based convenience methods:
///
/// - For `FogFalloff::Exponential`:
/// - [`FogFalloff::from_visibility()`]
/// - [`FogFalloff::from_visibility_contrast()`]
///
/// - For `FogFalloff::ExponentialSquared`:
/// - [`FogFalloff::from_visibility_squared()`]
/// - [`FogFalloff::from_visibility_contrast_squared()`]
///
/// - For `FogFalloff::Atmospheric`:
/// - [`FogFalloff::from_visibility_color()`]
/// - [`FogFalloff::from_visibility_colors()`]
/// - [`FogFalloff::from_visibility_contrast_color()`]
/// - [`FogFalloff::from_visibility_contrast_colors()`]
#[derive(Debug, Clone, Reflect)]
#[reflect(Clone)]
pub enum FogFalloff {
/// A linear fog falloff that grows in intensity between `start` and `end` distances.
///
/// This falloff mode is simpler to control than other modes, however it can produce results that look “artificial”, depending on the scene.
///
/// ## Formula
///
/// The fog intensity for a given point in the scene is determined by the following formula:
///
/// ```text
/// let fog_intensity = 1.0 - ((end - distance) / (end - start)).clamp(0.0, 1.0);
/// ```
///
/// <svg width="370" height="212" viewBox="0 0 370 212" fill="none">
/// <title>Plot showing how linear fog falloff behaves for start and end values of 0.8 and 2.2, respectively.</title>
/// <path d="M331 151H42V49" stroke="currentColor" stroke-width="2"/>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-family="Inter" font-size="12" letter-spacing="0em"><tspan x="136" y="173.864">1</tspan></text>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-family="Inter" font-size="12" letter-spacing="0em"><tspan x="30" y="53.8636">1</tspan></text>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-family="Inter" font-size="12" letter-spacing="0em"><tspan x="42" y="173.864">0</tspan></text>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-family="Inter" font-size="12" letter-spacing="0em"><tspan x="232" y="173.864">2</tspan></text>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-family="Inter" font-size="12" letter-spacing="0em"><tspan x="332" y="173.864">3</tspan></text>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-family="Inter" font-size="12" letter-spacing="0em"><tspan x="161" y="190.864">distance</tspan></text>
/// <text font-family="sans-serif" transform="translate(10 132) rotate(-90)" fill="currentColor" style="white-space: pre" font-family="Inter" font-size="12" letter-spacing="0em"><tspan x="0" y="11.8636">fog intensity</tspan></text>
/// <path d="M43 150H117.227L263 48H331" stroke="#FF00E5"/>
/// <path d="M118 151V49" stroke="#FF00E5" stroke-dasharray="1 4"/>
/// <path d="M263 151V49" stroke="#FF00E5" stroke-dasharray="1 4"/>
/// <text font-family="sans-serif" fill="#FF00E5" style="white-space: pre" font-family="Inter" font-size="10" letter-spacing="0em"><tspan x="121" y="58.6364">start</tspan></text>
/// <text font-family="sans-serif" fill="#FF00E5" style="white-space: pre" font-family="Inter" font-size="10" letter-spacing="0em"><tspan x="267" y="58.6364">end</tspan></text>
/// </svg>
Linear {
/// Distance from the camera where fog is completely transparent, in world units.
start: f32,
/// Distance from the camera where fog is completely opaque, in world units.
end: f32,
},
/// An exponential fog falloff with a given `density`.
///
/// Initially gains intensity quickly with distance, then more slowly. Typically produces more natural results than [`FogFalloff::Linear`],
/// but is a bit harder to control.
///
/// To move the fog “further away”, use lower density values. To move it “closer” use higher density values.
///
/// ## Tips
///
/// - Use the [`FogFalloff::from_visibility()`] convenience method to create an exponential falloff with the proper
/// density for a desired visibility distance in world units;
/// - It's not _unusual_ to have very large or very small values for the density, depending on the scene
/// scale. Typically, for scenes with objects in the scale of thousands of units, you might want density values
/// in the ballpark of `0.001`. Conversely, for really small scale scenes you might want really high values of
/// density;
/// - Combine the `density` parameter with the [`DistanceFog`] `color`'s alpha channel for easier artistic control.
///
/// ## Formula
///
/// The fog intensity for a given point in the scene is determined by the following formula:
///
/// ```text
/// let fog_intensity = 1.0 - 1.0 / (distance * density).exp();
/// ```
///
/// <svg width="370" height="212" viewBox="0 0 370 212" fill="none">
/// <title>Plot showing how exponential fog falloff behaves for different density values</title>
/// <mask id="mask0_3_31" style="mask-type:alpha" maskUnits="userSpaceOnUse" x="42" y="42" width="286" height="108">
/// <rect x="42" y="42" width="286" height="108" fill="#D9D9D9"/>
/// </mask>
/// <g mask="url(#mask0_3_31)">
/// <path d="M42 150C42 150 98.3894 53 254.825 53L662 53" stroke="#FF003D" stroke-width="1"/>
/// <path d="M42 150C42 150 139.499 53 409.981 53L1114 53" stroke="#001AFF" stroke-width="1"/>
/// <path d="M42 150C42 150 206.348 53 662.281 53L1849 53" stroke="#14FF00" stroke-width="1"/>
/// </g>
/// <path d="M331 151H42V49" stroke="currentColor" stroke-width="2"/>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-size="12" letter-spacing="0em"><tspan x="136" y="173.864">1</tspan></text>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-size="12" letter-spacing="0em"><tspan x="30" y="53.8636">1</tspan></text>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-size="12" letter-spacing="0em"><tspan x="42" y="173.864">0</tspan></text>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-size="12" letter-spacing="0em"><tspan x="232" y="173.864">2</tspan></text>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-size="12" letter-spacing="0em"><tspan x="332" y="173.864">3</tspan></text>
/// <text font-family="sans-serif" fill="#FF003D" style="white-space: pre" font-size="10" letter-spacing="0em"><tspan x="77" y="64.6364">density = 2</tspan></text>
/// <text font-family="sans-serif" fill="#001AFF" style="white-space: pre" font-size="10" letter-spacing="0em"><tspan x="236" y="76.6364">density = 1</tspan></text>
/// <text font-family="sans-serif" fill="#14FF00" style="white-space: pre" font-size="10" letter-spacing="0em"><tspan x="205" y="115.636">density = 0.5</tspan></text>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-size="12" letter-spacing="0em"><tspan x="161" y="190.864">distance</tspan></text>
/// <text font-family="sans-serif" transform="translate(10 132) rotate(-90)" fill="currentColor" style="white-space: pre" font-size="12" letter-spacing="0em"><tspan x="0" y="11.8636">fog intensity</tspan></text>
/// </svg>
Exponential {
/// Multiplier applied to the world distance (within the exponential fog falloff calculation).
density: f32,
},
/// A squared exponential fog falloff with a given `density`.
///
/// Similar to [`FogFalloff::Exponential`], but grows more slowly in intensity for closer distances
/// before “catching up”.
///
/// To move the fog “further away”, use lower density values. To move it “closer” use higher density values.
///
/// ## Tips
///
/// - Use the [`FogFalloff::from_visibility_squared()`] convenience method to create an exponential squared falloff
/// with the proper density for a desired visibility distance in world units;
/// - Combine the `density` parameter with the [`DistanceFog`] `color`'s alpha channel for easier artistic control.
///
/// ## Formula
///
/// The fog intensity for a given point in the scene is determined by the following formula:
///
/// ```text
/// let fog_intensity = 1.0 - 1.0 / (distance * density).squared().exp();
/// ```
///
/// <svg width="370" height="212" viewBox="0 0 370 212" fill="none">
/// <title>Plot showing how exponential squared fog falloff behaves for different density values</title>
/// <mask id="mask0_1_3" style="mask-type:alpha" maskUnits="userSpaceOnUse" x="42" y="42" width="286" height="108">
/// <rect x="42" y="42" width="286" height="108" fill="#D9D9D9"/>
/// </mask>
/// <g mask="url(#mask0_1_3)">
/// <path d="M42 150C75.4552 150 74.9241 53.1724 166.262 53.1724L404 53.1724" stroke="#FF003D" stroke-width="1"/>
/// <path d="M42 150C107.986 150 106.939 53.1724 287.091 53.1724L756 53.1724" stroke="#001AFF" stroke-width="1"/>
/// <path d="M42 150C166.394 150 164.42 53.1724 504.035 53.1724L1388 53.1724" stroke="#14FF00" stroke-width="1"/>
/// </g>
/// <path d="M331 151H42V49" stroke="currentColor" stroke-width="2"/>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-size="12" letter-spacing="0em"><tspan x="136" y="173.864">1</tspan></text>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-size="12" letter-spacing="0em"><tspan x="30" y="53.8636">1</tspan></text>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-size="12" letter-spacing="0em"><tspan x="42" y="173.864">0</tspan></text>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-size="12" letter-spacing="0em"><tspan x="232" y="173.864">2</tspan></text>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-size="12" letter-spacing="0em"><tspan x="332" y="173.864">3</tspan></text>
/// <text font-family="sans-serif" fill="#FF003D" style="white-space: pre" font-size="10" letter-spacing="0em"><tspan x="61" y="54.6364">density = 2</tspan></text>
/// <text font-family="sans-serif" fill="#001AFF" style="white-space: pre" font-size="10" letter-spacing="0em"><tspan x="168" y="84.6364">density = 1</tspan></text>
/// <text font-family="sans-serif" fill="#14FF00" style="white-space: pre" font-size="10" letter-spacing="0em"><tspan x="174" y="121.636">density = 0.5</tspan></text>
/// <text font-family="sans-serif" fill="currentColor" style="white-space: pre" font-size="12" letter-spacing="0em"><tspan x="161" y="190.864">distance</tspan></text>
/// <text font-family="sans-serif" transform="translate(10 132) rotate(-90)" fill="currentColor" style="white-space: pre" font-size="12" letter-spacing="0em"><tspan x="0" y="11.8636">fog intensity</tspan></text>
/// </svg>
ExponentialSquared {
/// Multiplier applied to the world distance (within the exponential squared fog falloff calculation).
density: f32,
},
/// A more general form of the [`FogFalloff::Exponential`] mode. The falloff formula is separated into
/// two terms, `extinction` and `inscattering`, for a somewhat simplified atmospheric scattering model.
/// Additionally, individual color channels can have their own density values, resulting in a total of
/// six different configuration parameters.
///
/// ## Tips
///
/// - Use the [`FogFalloff::from_visibility_colors()`] or [`FogFalloff::from_visibility_color()`] convenience methods
/// to create an atmospheric falloff with the proper densities for a desired visibility distance in world units and
/// extinction and inscattering colors;
/// - Combine the atmospheric fog parameters with the [`DistanceFog`] `color`'s alpha channel for easier artistic control.
///
/// ## Formula
///
/// Unlike other modes, atmospheric falloff doesn't use a simple intensity-based blend of fog color with
/// object color. Instead, it calculates per-channel extinction and inscattering factors, which are
/// then used to calculate the final color.
///
/// ```text
/// let extinction_factor = 1.0 - 1.0 / (distance * extinction).exp();
/// let inscattering_factor = 1.0 - 1.0 / (distance * inscattering).exp();
/// let result = input_color * (1.0 - extinction_factor) + fog_color * inscattering_factor;
/// ```
///
/// ## Equivalence to [`FogFalloff::Exponential`]
///
/// For a density value of `D`, the following two falloff modes will produce identical visual results:
///
/// ```
/// # use bevy_pbr::prelude::*;
/// # use bevy_math::prelude::*;
/// # const D: f32 = 0.5;
/// #
/// let exponential = FogFalloff::Exponential {
/// density: D,
/// };
///
/// let atmospheric = FogFalloff::Atmospheric {
/// extinction: Vec3::new(D, D, D),
/// inscattering: Vec3::new(D, D, D),
/// };
/// ```
///
/// **Note:** While the results are identical, [`FogFalloff::Atmospheric`] is computationally more expensive.
Atmospheric {
/// Controls how much light is removed due to atmospheric “extinction”, i.e. loss of light due to
/// photons being absorbed by atmospheric particles.
///
/// Each component can be thought of as an independent per `R`/`G`/`B` channel `density` factor from
/// [`FogFalloff::Exponential`]: Multiplier applied to the world distance (within the fog
/// falloff calculation) for that specific channel.
///
/// **Note:**
/// This value is not a `Color`, since it affects the channels exponentially in a non-intuitive way.
/// For artistic control, use the [`FogFalloff::from_visibility_colors()`] convenience method.
extinction: Vec3,
/// Controls how much light is added due to light scattering from the sun through the atmosphere.
///
/// Each component can be thought of as an independent per `R`/`G`/`B` channel `density` factor from
/// [`FogFalloff::Exponential`]: A multiplier applied to the world distance (within the fog
/// falloff calculation) for that specific channel.
///
/// **Note:**
/// This value is not a `Color`, since it affects the channels exponentially in a non-intuitive way.
/// For artistic control, use the [`FogFalloff::from_visibility_colors()`] convenience method.
inscattering: Vec3,
},
}
impl FogFalloff {
/// Creates a [`FogFalloff::Exponential`] value from the given visibility distance in world units,
/// using the revised Koschmieder contrast threshold, [`FogFalloff::REVISED_KOSCHMIEDER_CONTRAST_THRESHOLD`].
pub fn from_visibility(visibility: f32) -> FogFalloff {
FogFalloff::from_visibility_contrast(
visibility,
FogFalloff::REVISED_KOSCHMIEDER_CONTRAST_THRESHOLD,
)
}
/// Creates a [`FogFalloff::Exponential`] value from the given visibility distance in world units,
/// and a given contrast threshold in the range of `0.0` to `1.0`.
pub fn from_visibility_contrast(visibility: f32, contrast_threshold: f32) -> FogFalloff {
FogFalloff::Exponential {
density: FogFalloff::koschmieder(visibility, contrast_threshold),
}
}
/// Creates a [`FogFalloff::ExponentialSquared`] value from the given visibility distance in world units,
/// using the revised Koschmieder contrast threshold, [`FogFalloff::REVISED_KOSCHMIEDER_CONTRAST_THRESHOLD`].
pub fn from_visibility_squared(visibility: f32) -> FogFalloff {
FogFalloff::from_visibility_contrast_squared(
visibility,
FogFalloff::REVISED_KOSCHMIEDER_CONTRAST_THRESHOLD,
)
}
/// Creates a [`FogFalloff::ExponentialSquared`] value from the given visibility distance in world units,
/// and a given contrast threshold in the range of `0.0` to `1.0`.
pub fn from_visibility_contrast_squared(
visibility: f32,
contrast_threshold: f32,
) -> FogFalloff {
FogFalloff::ExponentialSquared {
density: (FogFalloff::koschmieder(visibility, contrast_threshold) / visibility).sqrt(),
}
}
/// Creates a [`FogFalloff::Atmospheric`] value from the given visibility distance in world units,
/// and a shared color for both extinction and inscattering, using the revised Koschmieder contrast threshold,
/// [`FogFalloff::REVISED_KOSCHMIEDER_CONTRAST_THRESHOLD`].
pub fn from_visibility_color(
visibility: f32,
extinction_inscattering_color: Color,
) -> FogFalloff {
FogFalloff::from_visibility_contrast_colors(
visibility,
FogFalloff::REVISED_KOSCHMIEDER_CONTRAST_THRESHOLD,
extinction_inscattering_color,
extinction_inscattering_color,
)
}
/// Creates a [`FogFalloff::Atmospheric`] value from the given visibility distance in world units,
/// extinction and inscattering colors, using the revised Koschmieder contrast threshold,
/// [`FogFalloff::REVISED_KOSCHMIEDER_CONTRAST_THRESHOLD`].
///
/// ## Tips
/// - Alpha values of the provided colors can modulate the `extinction` and `inscattering` effects;
/// - Using an `extinction_color` of [`Color::WHITE`] or [`Color::NONE`] disables the extinction effect;
/// - Using an `inscattering_color` of [`Color::BLACK`] or [`Color::NONE`] disables the inscattering effect.
pub fn from_visibility_colors(
visibility: f32,
extinction_color: Color,
inscattering_color: Color,
) -> FogFalloff {
FogFalloff::from_visibility_contrast_colors(
visibility,
FogFalloff::REVISED_KOSCHMIEDER_CONTRAST_THRESHOLD,
extinction_color,
inscattering_color,
)
}
/// Creates a [`FogFalloff::Atmospheric`] value from the given visibility distance in world units,
/// a contrast threshold in the range of `0.0` to `1.0`, and a shared color for both extinction and inscattering.
pub fn from_visibility_contrast_color(
visibility: f32,
contrast_threshold: f32,
extinction_inscattering_color: Color,
) -> FogFalloff {
FogFalloff::from_visibility_contrast_colors(
visibility,
contrast_threshold,
extinction_inscattering_color,
extinction_inscattering_color,
)
}
/// Creates a [`FogFalloff::Atmospheric`] value from the given visibility distance in world units,
/// a contrast threshold in the range of `0.0` to `1.0`, extinction and inscattering colors.
///
/// ## Tips
/// - Alpha values of the provided colors can modulate the `extinction` and `inscattering` effects;
/// - Using an `extinction_color` of [`Color::WHITE`] or [`Color::NONE`] disables the extinction effect;
/// - Using an `inscattering_color` of [`Color::BLACK`] or [`Color::NONE`] disables the inscattering effect.
pub fn from_visibility_contrast_colors(
visibility: f32,
contrast_threshold: f32,
extinction_color: Color,
inscattering_color: Color,
) -> FogFalloff {
use core::f32::consts::E;
let [r_e, g_e, b_e, a_e] = LinearRgba::from(extinction_color).to_f32_array();
let [r_i, g_i, b_i, a_i] = LinearRgba::from(inscattering_color).to_f32_array();
FogFalloff::Atmospheric {
extinction: Vec3::new(
// Values are subtracted from 1.0 here to preserve the intuitive/artistic meaning of
// colors, since they're later subtracted. (e.g. by giving a blue extinction color, you
// get blue and _not_ yellow results)
ops::powf(1.0 - r_e, E),
ops::powf(1.0 - g_e, E),
ops::powf(1.0 - b_e, E),
) * FogFalloff::koschmieder(visibility, contrast_threshold)
* ops::powf(a_e, E),
inscattering: Vec3::new(ops::powf(r_i, E), ops::powf(g_i, E), ops::powf(b_i, E))
* FogFalloff::koschmieder(visibility, contrast_threshold)
* ops::powf(a_i, E),
}
}
/// A 2% contrast threshold was originally proposed by Koschmieder, being the
/// minimum visual contrast at which a human observer could detect an object.
/// We use a revised 5% contrast threshold, deemed more realistic for typical human observers.
pub const REVISED_KOSCHMIEDER_CONTRAST_THRESHOLD: f32 = 0.05;
/// Calculates the extinction coefficient β, from V and Cₜ, where:
///
/// - Cₜ is the contrast threshold, in the range of `0.0` to `1.0`
/// - V is the visibility distance in which a perfectly black object is still identifiable
/// against the horizon sky within the contrast threshold
///
/// We start with Koschmieder's equation:
///
/// ```text
/// -ln(Cₜ)
/// V = ─────────
/// β
/// ```
///
/// Multiplying both sides by β/V, that gives us:
///
/// ```text
/// -ln(Cₜ)
/// β = ─────────
/// V
/// ```
///
/// See:
/// - <https://en.wikipedia.org/wiki/Visibility>
/// - <https://www.biral.com/wp-content/uploads/2015/02/Introduction_to_visibility-v2-2.pdf>
pub fn koschmieder(v: f32, c_t: f32) -> f32 {
-ops::ln(c_t) / v
}
}
impl Default for DistanceFog {
fn default() -> Self {
DistanceFog {
color: Color::WHITE,
falloff: FogFalloff::Linear {
start: 0.0,
end: 100.0,
},
directional_light_color: Color::NONE,
directional_light_exponent: 8.0,
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/wireframe.rs | crates/bevy_pbr/src/wireframe.rs | use crate::{
DrawMesh, MeshPipeline, MeshPipelineKey, RenderLightmaps, RenderMeshInstanceFlags,
RenderMeshInstances, SetMeshBindGroup, SetMeshViewBindGroup, SetMeshViewBindingArrayBindGroup,
ViewKeyCache, ViewSpecializationTicks,
};
use bevy_app::{App, Plugin, PostUpdate, Startup, Update};
use bevy_asset::{
embedded_asset, load_embedded_asset, prelude::AssetChanged, AsAssetId, Asset, AssetApp,
AssetEventSystems, AssetId, AssetServer, Assets, Handle, UntypedAssetId,
};
use bevy_camera::{visibility::ViewVisibility, Camera, Camera3d};
use bevy_color::{Color, ColorToComponents};
use bevy_core_pipeline::core_3d::graph::{Core3d, Node3d};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
change_detection::Tick,
prelude::*,
query::QueryItem,
system::{lifetimeless::SRes, SystemChangeTick, SystemParamItem},
};
use bevy_mesh::{Mesh3d, MeshVertexBufferLayoutRef};
use bevy_platform::{
collections::{HashMap, HashSet},
hash::FixedHasher,
};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::{
batching::gpu_preprocessing::{GpuPreprocessingMode, GpuPreprocessingSupport},
camera::{extract_cameras, ExtractedCamera},
diagnostic::RecordDiagnostics,
extract_resource::ExtractResource,
mesh::{
allocator::{MeshAllocator, SlabId},
RenderMesh,
},
prelude::*,
render_asset::{
prepare_assets, PrepareAssetError, RenderAsset, RenderAssetPlugin, RenderAssets,
},
render_graph::{NodeRunError, RenderGraphContext, RenderGraphExt, ViewNode, ViewNodeRunner},
render_phase::{
AddRenderCommand, BinnedPhaseItem, BinnedRenderPhasePlugin, BinnedRenderPhaseType,
CachedRenderPipelinePhaseItem, DrawFunctionId, DrawFunctions, PhaseItem,
PhaseItemBatchSetKey, PhaseItemExtraIndex, RenderCommand, RenderCommandResult,
SetItemPipeline, TrackedRenderPass, ViewBinnedRenderPhases,
},
render_resource::*,
renderer::{RenderContext, RenderDevice},
sync_world::{MainEntity, MainEntityHashMap},
view::{
ExtractedView, NoIndirectDrawing, RenderVisibilityRanges, RenderVisibleEntities,
RetainedViewEntity, ViewDepthTexture, ViewTarget,
},
Extract, Render, RenderApp, RenderDebugFlags, RenderStartup, RenderSystems,
};
use bevy_shader::Shader;
use core::{hash::Hash, ops::Range};
use tracing::{error, warn};
/// A [`Plugin`] that draws wireframes.
///
/// Wireframes currently do not work when using webgl or webgpu.
/// Supported rendering backends:
/// - DX12
/// - Vulkan
/// - Metal
///
/// This is a native only feature.
#[derive(Debug, Default)]
pub struct WireframePlugin {
/// Debugging flags that can optionally be set when constructing the renderer.
pub debug_flags: RenderDebugFlags,
}
impl WireframePlugin {
/// Creates a new [`WireframePlugin`] with the given debug flags.
pub fn new(debug_flags: RenderDebugFlags) -> Self {
Self { debug_flags }
}
}
impl Plugin for WireframePlugin {
fn build(&self, app: &mut App) {
embedded_asset!(app, "render/wireframe.wgsl");
app.add_plugins((
BinnedRenderPhasePlugin::<Wireframe3d, MeshPipeline>::new(self.debug_flags),
RenderAssetPlugin::<RenderWireframeMaterial>::default(),
))
.init_asset::<WireframeMaterial>()
.init_resource::<SpecializedMeshPipelines<Wireframe3dPipeline>>()
.init_resource::<WireframeConfig>()
.init_resource::<WireframeEntitiesNeedingSpecialization>()
.add_systems(Startup, setup_global_wireframe_material)
.add_systems(
Update,
(
global_color_changed.run_if(resource_changed::<WireframeConfig>),
wireframe_color_changed,
// Run `apply_global_wireframe_material` after `apply_wireframe_material` so that the global
// wireframe setting is applied to a mesh on the same frame its wireframe marker component is removed.
(apply_wireframe_material, apply_global_wireframe_material).chain(),
),
)
.add_systems(
PostUpdate,
check_wireframe_entities_needing_specialization
.after(AssetEventSystems)
.run_if(resource_exists::<WireframeConfig>),
);
}
fn finish(&self, app: &mut App) {
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
let required_features = WgpuFeatures::POLYGON_MODE_LINE | WgpuFeatures::PUSH_CONSTANTS;
let render_device = render_app.world().resource::<RenderDevice>();
if !render_device.features().contains(required_features) {
warn!(
"WireframePlugin not loaded. GPU lacks support for required features: {:?}.",
required_features
);
return;
}
render_app
.init_resource::<WireframeEntitySpecializationTicks>()
.init_resource::<SpecializedWireframePipelineCache>()
.init_resource::<DrawFunctions<Wireframe3d>>()
.add_render_command::<Wireframe3d, DrawWireframe3d>()
.init_resource::<RenderWireframeInstances>()
.init_resource::<SpecializedMeshPipelines<Wireframe3dPipeline>>()
.add_render_graph_node::<ViewNodeRunner<Wireframe3dNode>>(Core3d, Node3d::Wireframe)
.add_render_graph_edges(
Core3d,
(
Node3d::EndMainPass,
Node3d::Wireframe,
Node3d::PostProcessing,
),
)
.add_systems(RenderStartup, init_wireframe_3d_pipeline)
.add_systems(
ExtractSchedule,
(
extract_wireframe_3d_camera,
extract_wireframe_entities_needing_specialization.after(extract_cameras),
extract_wireframe_materials,
),
)
.add_systems(
Render,
(
specialize_wireframes
.in_set(RenderSystems::PrepareMeshes)
.after(prepare_assets::<RenderWireframeMaterial>)
.after(prepare_assets::<RenderMesh>),
queue_wireframes
.in_set(RenderSystems::QueueMeshes)
.after(prepare_assets::<RenderWireframeMaterial>),
),
);
}
}
/// Enables wireframe rendering for any entity it is attached to.
/// It will ignore the [`WireframeConfig`] global setting.
///
/// This requires the [`WireframePlugin`] to be enabled.
#[derive(Component, Debug, Clone, Default, Reflect, Eq, PartialEq)]
#[reflect(Component, Default, Debug, PartialEq)]
pub struct Wireframe;
pub struct Wireframe3d {
/// Determines which objects can be placed into a *batch set*.
///
/// Objects in a single batch set can potentially be multi-drawn together,
/// if it's enabled and the current platform supports it.
pub batch_set_key: Wireframe3dBatchSetKey,
/// The key, which determines which can be batched.
pub bin_key: Wireframe3dBinKey,
/// An entity from which data will be fetched, including the mesh if
/// applicable.
pub representative_entity: (Entity, MainEntity),
/// The ranges of instances.
pub batch_range: Range<u32>,
/// An extra index, which is either a dynamic offset or an index in the
/// indirect parameters list.
pub extra_index: PhaseItemExtraIndex,
}
impl PhaseItem for Wireframe3d {
fn entity(&self) -> Entity {
self.representative_entity.0
}
fn main_entity(&self) -> MainEntity {
self.representative_entity.1
}
fn draw_function(&self) -> DrawFunctionId {
self.batch_set_key.draw_function
}
fn batch_range(&self) -> &Range<u32> {
&self.batch_range
}
fn batch_range_mut(&mut self) -> &mut Range<u32> {
&mut self.batch_range
}
fn extra_index(&self) -> PhaseItemExtraIndex {
self.extra_index.clone()
}
fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range<u32>, &mut PhaseItemExtraIndex) {
(&mut self.batch_range, &mut self.extra_index)
}
}
impl CachedRenderPipelinePhaseItem for Wireframe3d {
fn cached_pipeline(&self) -> CachedRenderPipelineId {
self.batch_set_key.pipeline
}
}
impl BinnedPhaseItem for Wireframe3d {
type BinKey = Wireframe3dBinKey;
type BatchSetKey = Wireframe3dBatchSetKey;
fn new(
batch_set_key: Self::BatchSetKey,
bin_key: Self::BinKey,
representative_entity: (Entity, MainEntity),
batch_range: Range<u32>,
extra_index: PhaseItemExtraIndex,
) -> Self {
Self {
batch_set_key,
bin_key,
representative_entity,
batch_range,
extra_index,
}
}
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Wireframe3dBatchSetKey {
/// The identifier of the render pipeline.
pub pipeline: CachedRenderPipelineId,
/// The wireframe material asset ID.
pub asset_id: UntypedAssetId,
/// The function used to draw.
pub draw_function: DrawFunctionId,
/// The ID of the slab of GPU memory that contains vertex data.
///
/// For non-mesh items, you can fill this with 0 if your items can be
/// multi-drawn, or with a unique value if they can't.
pub vertex_slab: SlabId,
/// The ID of the slab of GPU memory that contains index data, if present.
///
/// For non-mesh items, you can safely fill this with `None`.
pub index_slab: Option<SlabId>,
}
impl PhaseItemBatchSetKey for Wireframe3dBatchSetKey {
fn indexed(&self) -> bool {
self.index_slab.is_some()
}
}
/// Data that must be identical in order to *batch* phase items together.
///
/// Note that a *batch set* (if multi-draw is in use) contains multiple batches.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Wireframe3dBinKey {
/// The wireframe mesh asset ID.
pub asset_id: UntypedAssetId,
}
pub struct SetWireframe3dPushConstants;
impl<P: PhaseItem> RenderCommand<P> for SetWireframe3dPushConstants {
type Param = (
SRes<RenderWireframeInstances>,
SRes<RenderAssets<RenderWireframeMaterial>>,
);
type ViewQuery = ();
type ItemQuery = ();
#[inline]
fn render<'w>(
item: &P,
_view: (),
_item_query: Option<()>,
(wireframe_instances, wireframe_assets): SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let Some(wireframe_material) = wireframe_instances.get(&item.main_entity()) else {
return RenderCommandResult::Failure("No wireframe material found for entity");
};
let Some(wireframe_material) = wireframe_assets.get(*wireframe_material) else {
return RenderCommandResult::Failure("No wireframe material found for entity");
};
pass.set_push_constants(
ShaderStages::FRAGMENT,
0,
bytemuck::bytes_of(&wireframe_material.color),
);
RenderCommandResult::Success
}
}
pub type DrawWireframe3d = (
SetItemPipeline,
SetMeshViewBindGroup<0>,
SetMeshViewBindingArrayBindGroup<1>,
SetMeshBindGroup<2>,
SetWireframe3dPushConstants,
DrawMesh,
);
#[derive(Resource, Clone)]
pub struct Wireframe3dPipeline {
mesh_pipeline: MeshPipeline,
shader: Handle<Shader>,
}
pub fn init_wireframe_3d_pipeline(
mut commands: Commands,
mesh_pipeline: Res<MeshPipeline>,
asset_server: Res<AssetServer>,
) {
commands.insert_resource(Wireframe3dPipeline {
mesh_pipeline: mesh_pipeline.clone(),
shader: load_embedded_asset!(asset_server.as_ref(), "render/wireframe.wgsl"),
});
}
impl SpecializedMeshPipeline for Wireframe3dPipeline {
type Key = MeshPipelineKey;
fn specialize(
&self,
key: Self::Key,
layout: &MeshVertexBufferLayoutRef,
) -> Result<RenderPipelineDescriptor, SpecializedMeshPipelineError> {
let mut descriptor = self.mesh_pipeline.specialize(key, layout)?;
descriptor.label = Some("wireframe_3d_pipeline".into());
descriptor.push_constant_ranges.push(PushConstantRange {
stages: ShaderStages::FRAGMENT,
range: 0..16,
});
let fragment = descriptor.fragment.as_mut().unwrap();
fragment.shader = self.shader.clone();
descriptor.primitive.polygon_mode = PolygonMode::Line;
descriptor.depth_stencil.as_mut().unwrap().bias.slope_scale = 1.0;
Ok(descriptor)
}
}
#[derive(Default)]
struct Wireframe3dNode;
impl ViewNode for Wireframe3dNode {
type ViewQuery = (
&'static ExtractedCamera,
&'static ExtractedView,
&'static ViewTarget,
&'static ViewDepthTexture,
);
fn run<'w>(
&self,
graph: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
(camera, view, target, depth): QueryItem<'w, '_, Self::ViewQuery>,
world: &'w World,
) -> Result<(), NodeRunError> {
let Some(wireframe_phase) = world.get_resource::<ViewBinnedRenderPhases<Wireframe3d>>()
else {
return Ok(());
};
let Some(wireframe_phase) = wireframe_phase.get(&view.retained_view_entity) else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("wireframe_3d"),
color_attachments: &[Some(target.get_color_attachment())],
depth_stencil_attachment: Some(depth.get_attachment(StoreOp::Store)),
timestamp_writes: None,
occlusion_query_set: None,
});
let pass_span = diagnostics.pass_span(&mut render_pass, "wireframe_3d");
if let Some(viewport) = camera.viewport.as_ref() {
render_pass.set_camera_viewport(viewport);
}
if let Err(err) = wireframe_phase.render(&mut render_pass, world, graph.view_entity()) {
error!("Error encountered while rendering the stencil phase {err:?}");
return Err(NodeRunError::DrawError(err));
}
pass_span.end(&mut render_pass);
Ok(())
}
}
/// Sets the color of the [`Wireframe`] of the entity it is attached to.
///
/// If this component is present but there's no [`Wireframe`] component,
/// it will still affect the color of the wireframe when [`WireframeConfig::global`] is set to true.
///
/// This overrides the [`WireframeConfig::default_color`].
#[derive(Component, Debug, Clone, Default, Reflect)]
#[reflect(Component, Default, Debug)]
pub struct WireframeColor {
pub color: Color,
}
#[derive(Component, Debug, Clone, Default)]
pub struct ExtractedWireframeColor {
pub color: [f32; 4],
}
/// Disables wireframe rendering for any entity it is attached to.
/// It will ignore the [`WireframeConfig`] global setting.
///
/// This requires the [`WireframePlugin`] to be enabled.
#[derive(Component, Debug, Clone, Default, Reflect, Eq, PartialEq)]
#[reflect(Component, Default, Debug, PartialEq)]
pub struct NoWireframe;
#[derive(Resource, Debug, Clone, Default, ExtractResource, Reflect)]
#[reflect(Resource, Debug, Default)]
pub struct WireframeConfig {
/// Whether to show wireframes for all meshes.
/// Can be overridden for individual meshes by adding a [`Wireframe`] or [`NoWireframe`] component.
pub global: bool,
/// If [`Self::global`] is set, any [`Entity`] that does not have a [`Wireframe`] component attached to it will have
/// wireframes using this color. Otherwise, this will be the fallback color for any entity that has a [`Wireframe`],
/// but no [`WireframeColor`].
pub default_color: Color,
}
#[derive(Asset, Reflect, Clone, Debug, Default)]
#[reflect(Clone, Default)]
pub struct WireframeMaterial {
pub color: Color,
}
pub struct RenderWireframeMaterial {
pub color: [f32; 4],
}
#[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq)]
#[reflect(Component, Default, Clone, PartialEq)]
pub struct Mesh3dWireframe(pub Handle<WireframeMaterial>);
impl AsAssetId for Mesh3dWireframe {
type Asset = WireframeMaterial;
fn as_asset_id(&self) -> AssetId<Self::Asset> {
self.0.id()
}
}
impl RenderAsset for RenderWireframeMaterial {
type SourceAsset = WireframeMaterial;
type Param = ();
fn prepare_asset(
source_asset: Self::SourceAsset,
_asset_id: AssetId<Self::SourceAsset>,
_param: &mut SystemParamItem<Self::Param>,
_previous_asset: Option<&Self>,
) -> Result<Self, PrepareAssetError<Self::SourceAsset>> {
Ok(RenderWireframeMaterial {
color: source_asset.color.to_linear().to_f32_array(),
})
}
}
#[derive(Resource, Deref, DerefMut, Default)]
pub struct RenderWireframeInstances(MainEntityHashMap<AssetId<WireframeMaterial>>);
#[derive(Clone, Resource, Deref, DerefMut, Debug, Default)]
pub struct WireframeEntitiesNeedingSpecialization {
#[deref]
pub entities: Vec<Entity>,
}
#[derive(Resource, Deref, DerefMut, Clone, Debug, Default)]
pub struct WireframeEntitySpecializationTicks {
pub entities: MainEntityHashMap<Tick>,
}
/// Stores the [`SpecializedWireframeViewPipelineCache`] for each view.
#[derive(Resource, Deref, DerefMut, Default)]
pub struct SpecializedWireframePipelineCache {
// view entity -> view pipeline cache
#[deref]
map: HashMap<RetainedViewEntity, SpecializedWireframeViewPipelineCache>,
}
/// Stores the cached render pipeline ID for each entity in a single view, as
/// well as the last time it was changed.
#[derive(Deref, DerefMut, Default)]
pub struct SpecializedWireframeViewPipelineCache {
// material entity -> (tick, pipeline_id)
#[deref]
map: MainEntityHashMap<(Tick, CachedRenderPipelineId)>,
}
#[derive(Resource)]
struct GlobalWireframeMaterial {
// This handle will be reused when the global config is enabled
handle: Handle<WireframeMaterial>,
}
pub fn extract_wireframe_materials(
mut material_instances: ResMut<RenderWireframeInstances>,
changed_meshes_query: Extract<
Query<
(Entity, &ViewVisibility, &Mesh3dWireframe),
Or<(Changed<ViewVisibility>, Changed<Mesh3dWireframe>)>,
>,
>,
mut removed_visibilities_query: Extract<RemovedComponents<ViewVisibility>>,
mut removed_materials_query: Extract<RemovedComponents<Mesh3dWireframe>>,
) {
for (entity, view_visibility, material) in &changed_meshes_query {
if view_visibility.get() {
material_instances.insert(entity.into(), material.id());
} else {
material_instances.remove(&MainEntity::from(entity));
}
}
for entity in removed_visibilities_query
.read()
.chain(removed_materials_query.read())
{
// Only queue a mesh for removal if we didn't pick it up above.
// It's possible that a necessary component was removed and re-added in
// the same frame.
if !changed_meshes_query.contains(entity) {
material_instances.remove(&MainEntity::from(entity));
}
}
}
fn setup_global_wireframe_material(
mut commands: Commands,
mut materials: ResMut<Assets<WireframeMaterial>>,
config: Res<WireframeConfig>,
) {
// Create the handle used for the global material
commands.insert_resource(GlobalWireframeMaterial {
handle: materials.add(WireframeMaterial {
color: config.default_color,
}),
});
}
/// Updates the wireframe material of all entities without a [`WireframeColor`] or without a [`Wireframe`] component
fn global_color_changed(
config: Res<WireframeConfig>,
mut materials: ResMut<Assets<WireframeMaterial>>,
global_material: Res<GlobalWireframeMaterial>,
) {
if let Some(global_material) = materials.get_mut(&global_material.handle) {
global_material.color = config.default_color;
}
}
/// Updates the wireframe material when the color in [`WireframeColor`] changes
fn wireframe_color_changed(
mut materials: ResMut<Assets<WireframeMaterial>>,
mut colors_changed: Query<
(&mut Mesh3dWireframe, &WireframeColor),
(With<Wireframe>, Changed<WireframeColor>),
>,
) {
for (mut handle, wireframe_color) in &mut colors_changed {
handle.0 = materials.add(WireframeMaterial {
color: wireframe_color.color,
});
}
}
/// Applies or remove the wireframe material to any mesh with a [`Wireframe`] component, and removes it
/// for any mesh with a [`NoWireframe`] component.
fn apply_wireframe_material(
mut commands: Commands,
mut materials: ResMut<Assets<WireframeMaterial>>,
wireframes: Query<
(Entity, Option<&WireframeColor>),
(With<Wireframe>, Without<Mesh3dWireframe>),
>,
no_wireframes: Query<Entity, (With<NoWireframe>, With<Mesh3dWireframe>)>,
mut removed_wireframes: RemovedComponents<Wireframe>,
global_material: Res<GlobalWireframeMaterial>,
) {
for e in removed_wireframes.read().chain(no_wireframes.iter()) {
if let Ok(mut commands) = commands.get_entity(e) {
commands.remove::<Mesh3dWireframe>();
}
}
let mut material_to_spawn = vec![];
for (e, maybe_color) in &wireframes {
let material = get_wireframe_material(maybe_color, &mut materials, &global_material);
material_to_spawn.push((e, Mesh3dWireframe(material)));
}
commands.try_insert_batch(material_to_spawn);
}
type WireframeFilter = (With<Mesh3d>, Without<Wireframe>, Without<NoWireframe>);
/// Applies or removes a wireframe material on any mesh without a [`Wireframe`] or [`NoWireframe`] component.
fn apply_global_wireframe_material(
mut commands: Commands,
config: Res<WireframeConfig>,
meshes_without_material: Query<
(Entity, Option<&WireframeColor>),
(WireframeFilter, Without<Mesh3dWireframe>),
>,
meshes_with_global_material: Query<Entity, (WireframeFilter, With<Mesh3dWireframe>)>,
global_material: Res<GlobalWireframeMaterial>,
mut materials: ResMut<Assets<WireframeMaterial>>,
) {
if config.global {
let mut material_to_spawn = vec![];
for (e, maybe_color) in &meshes_without_material {
let material = get_wireframe_material(maybe_color, &mut materials, &global_material);
// We only add the material handle but not the Wireframe component
// This makes it easy to detect which mesh is using the global material and which ones are user specified
material_to_spawn.push((e, Mesh3dWireframe(material)));
}
commands.try_insert_batch(material_to_spawn);
} else {
for e in &meshes_with_global_material {
commands.entity(e).remove::<Mesh3dWireframe>();
}
}
}
/// Gets a handle to a wireframe material with a fallback on the default material
fn get_wireframe_material(
maybe_color: Option<&WireframeColor>,
wireframe_materials: &mut Assets<WireframeMaterial>,
global_material: &GlobalWireframeMaterial,
) -> Handle<WireframeMaterial> {
if let Some(wireframe_color) = maybe_color {
wireframe_materials.add(WireframeMaterial {
color: wireframe_color.color,
})
} else {
// If there's no color specified we can use the global material since it's already set to use the default_color
global_material.handle.clone()
}
}
fn extract_wireframe_3d_camera(
mut wireframe_3d_phases: ResMut<ViewBinnedRenderPhases<Wireframe3d>>,
cameras: Extract<Query<(Entity, &Camera, Has<NoIndirectDrawing>), With<Camera3d>>>,
mut live_entities: Local<HashSet<RetainedViewEntity>>,
gpu_preprocessing_support: Res<GpuPreprocessingSupport>,
) {
live_entities.clear();
for (main_entity, camera, no_indirect_drawing) in &cameras {
if !camera.is_active {
continue;
}
let gpu_preprocessing_mode = gpu_preprocessing_support.min(if !no_indirect_drawing {
GpuPreprocessingMode::Culling
} else {
GpuPreprocessingMode::PreprocessingOnly
});
let retained_view_entity = RetainedViewEntity::new(main_entity.into(), None, 0);
wireframe_3d_phases.prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode);
live_entities.insert(retained_view_entity);
}
// Clear out all dead views.
wireframe_3d_phases.retain(|camera_entity, _| live_entities.contains(camera_entity));
}
pub fn extract_wireframe_entities_needing_specialization(
entities_needing_specialization: Extract<Res<WireframeEntitiesNeedingSpecialization>>,
mut entity_specialization_ticks: ResMut<WireframeEntitySpecializationTicks>,
views: Query<&ExtractedView>,
mut specialized_wireframe_pipeline_cache: ResMut<SpecializedWireframePipelineCache>,
mut removed_meshes_query: Extract<RemovedComponents<Mesh3d>>,
ticks: SystemChangeTick,
) {
for entity in entities_needing_specialization.iter() {
// Update the entity's specialization tick with this run's tick
entity_specialization_ticks.insert((*entity).into(), ticks.this_run());
}
for entity in removed_meshes_query.read() {
for view in &views {
if let Some(specialized_wireframe_pipeline_cache) =
specialized_wireframe_pipeline_cache.get_mut(&view.retained_view_entity)
{
specialized_wireframe_pipeline_cache.remove(&MainEntity::from(entity));
}
}
}
}
pub fn check_wireframe_entities_needing_specialization(
needs_specialization: Query<
Entity,
Or<(
Changed<Mesh3d>,
AssetChanged<Mesh3d>,
Changed<Mesh3dWireframe>,
AssetChanged<Mesh3dWireframe>,
)>,
>,
mut entities_needing_specialization: ResMut<WireframeEntitiesNeedingSpecialization>,
) {
entities_needing_specialization.clear();
for entity in &needs_specialization {
entities_needing_specialization.push(entity);
}
}
pub fn specialize_wireframes(
render_meshes: Res<RenderAssets<RenderMesh>>,
render_mesh_instances: Res<RenderMeshInstances>,
render_wireframe_instances: Res<RenderWireframeInstances>,
render_visibility_ranges: Res<RenderVisibilityRanges>,
wireframe_phases: Res<ViewBinnedRenderPhases<Wireframe3d>>,
views: Query<(&ExtractedView, &RenderVisibleEntities)>,
view_key_cache: Res<ViewKeyCache>,
entity_specialization_ticks: Res<WireframeEntitySpecializationTicks>,
view_specialization_ticks: Res<ViewSpecializationTicks>,
mut specialized_material_pipeline_cache: ResMut<SpecializedWireframePipelineCache>,
mut pipelines: ResMut<SpecializedMeshPipelines<Wireframe3dPipeline>>,
pipeline: Res<Wireframe3dPipeline>,
pipeline_cache: Res<PipelineCache>,
render_lightmaps: Res<RenderLightmaps>,
ticks: SystemChangeTick,
) {
// Record the retained IDs of all views so that we can expire old
// pipeline IDs.
let mut all_views: HashSet<RetainedViewEntity, FixedHasher> = HashSet::default();
for (view, visible_entities) in &views {
all_views.insert(view.retained_view_entity);
if !wireframe_phases.contains_key(&view.retained_view_entity) {
continue;
}
let Some(view_key) = view_key_cache.get(&view.retained_view_entity) else {
continue;
};
let view_tick = view_specialization_ticks
.get(&view.retained_view_entity)
.unwrap();
let view_specialized_material_pipeline_cache = specialized_material_pipeline_cache
.entry(view.retained_view_entity)
.or_default();
for (_, visible_entity) in visible_entities.iter::<Mesh3d>() {
if !render_wireframe_instances.contains_key(visible_entity) {
continue;
};
let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(*visible_entity)
else {
continue;
};
let entity_tick = entity_specialization_ticks.get(visible_entity).unwrap();
let last_specialized_tick = view_specialized_material_pipeline_cache
.get(visible_entity)
.map(|(tick, _)| *tick);
let needs_specialization = last_specialized_tick.is_none_or(|tick| {
view_tick.is_newer_than(tick, ticks.this_run())
|| entity_tick.is_newer_than(tick, ticks.this_run())
});
if !needs_specialization {
continue;
}
let Some(mesh) = render_meshes.get(mesh_instance.mesh_asset_id) else {
continue;
};
let mut mesh_key = *view_key;
mesh_key |= MeshPipelineKey::from_primitive_topology(mesh.primitive_topology());
if render_visibility_ranges.entity_has_crossfading_visibility_ranges(*visible_entity) {
mesh_key |= MeshPipelineKey::VISIBILITY_RANGE_DITHER;
}
if view_key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) {
// If the previous frame have skins or morph targets, note that.
if mesh_instance
.flags
.contains(RenderMeshInstanceFlags::HAS_PREVIOUS_SKIN)
{
mesh_key |= MeshPipelineKey::HAS_PREVIOUS_SKIN;
}
if mesh_instance
.flags
.contains(RenderMeshInstanceFlags::HAS_PREVIOUS_MORPH)
{
mesh_key |= MeshPipelineKey::HAS_PREVIOUS_MORPH;
}
}
// Even though we don't use the lightmap in the wireframe, the
// `SetMeshBindGroup` render command will bind the data for it. So
// we need to include the appropriate flag in the mesh pipeline key
// to ensure that the necessary bind group layout entries are
// present.
if render_lightmaps
.render_lightmaps
.contains_key(visible_entity)
{
mesh_key |= MeshPipelineKey::LIGHTMAPPED;
}
let pipeline_id =
pipelines.specialize(&pipeline_cache, &pipeline, mesh_key, &mesh.layout);
let pipeline_id = match pipeline_id {
Ok(id) => id,
Err(err) => {
error!("{}", err);
continue;
}
};
view_specialized_material_pipeline_cache
.insert(*visible_entity, (ticks.this_run(), pipeline_id));
}
}
// Delete specialized pipelines belonging to views that have expired.
specialized_material_pipeline_cache
.retain(|retained_view_entity, _| all_views.contains(retained_view_entity));
}
fn queue_wireframes(
custom_draw_functions: Res<DrawFunctions<Wireframe3d>>,
render_mesh_instances: Res<RenderMeshInstances>,
gpu_preprocessing_support: Res<GpuPreprocessingSupport>,
mesh_allocator: Res<MeshAllocator>,
specialized_wireframe_pipeline_cache: Res<SpecializedWireframePipelineCache>,
render_wireframe_instances: Res<RenderWireframeInstances>,
mut wireframe_3d_phases: ResMut<ViewBinnedRenderPhases<Wireframe3d>>,
mut views: Query<(&ExtractedView, &RenderVisibleEntities)>,
) {
for (view, visible_entities) in &mut views {
let Some(wireframe_phase) = wireframe_3d_phases.get_mut(&view.retained_view_entity) else {
continue;
};
let draw_wireframe = custom_draw_functions.read().id::<DrawWireframe3d>();
let Some(view_specialized_material_pipeline_cache) =
specialized_wireframe_pipeline_cache.get(&view.retained_view_entity)
else {
continue;
};
for (render_entity, visible_entity) in visible_entities.iter::<Mesh3d>() {
let Some(wireframe_instance) = render_wireframe_instances.get(visible_entity) else {
continue;
};
let Some((current_change_tick, pipeline_id)) = view_specialized_material_pipeline_cache
.get(visible_entity)
.map(|(current_change_tick, pipeline_id)| (*current_change_tick, *pipeline_id))
else {
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/medium.rs | crates/bevy_pbr/src/medium.rs | use alloc::{borrow::Cow, sync::Arc};
use core::f32::{self, consts::PI};
use bevy_app::{App, Plugin};
use bevy_asset::{Asset, AssetApp, AssetId};
use bevy_ecs::{
resource::Resource,
system::{Commands, Res, SystemParamItem},
};
use bevy_math::{ops, Curve, FloatPow, Vec3, Vec4};
use bevy_reflect::TypePath;
use bevy_render::{
render_asset::{PrepareAssetError, RenderAsset, RenderAssetPlugin},
render_resource::{
Extent3d, FilterMode, Sampler, SamplerDescriptor, Texture, TextureDataOrder,
TextureDescriptor, TextureDimension, TextureFormat, TextureUsages, TextureView,
TextureViewDescriptor,
},
renderer::{RenderDevice, RenderQueue},
RenderApp, RenderStartup,
};
use smallvec::SmallVec;
#[doc(hidden)]
pub struct ScatteringMediumPlugin;
impl Plugin for ScatteringMediumPlugin {
fn build(&self, app: &mut App) {
app.init_asset::<ScatteringMedium>()
.add_plugins(RenderAssetPlugin::<GpuScatteringMedium>::default());
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app.add_systems(RenderStartup, init_scattering_medium_sampler);
}
}
}
/// An asset that defines how a material scatters light.
///
/// In order to calculate how light passes through a medium,
/// you need three pieces of information:
/// - how much light the medium *absorbs* per unit length
/// - how much light the medium *scatters* per unit length
/// - what *directions* the medium is likely to scatter light in.
///
/// The first two are fairly simple, and are sometimes referred to together
/// (accurately enough) as the medium's [optical density].
///
/// The last, defined by a [phase function], is the most important in creating
/// the look of a medium. Our brains are very good at noticing (if unconsciously)
/// that a dust storm scatters light differently than a rain cloud, for example.
/// See the docs on [`PhaseFunction`] for more info.
///
/// In reality, media are often composed of multiple elements that scatter light
/// independently, for Earth's atmosphere is composed of the gas itself, but also
/// suspended dust and particulate. These each scatter light differently, and are
/// distributed in different amounts at different altitudes. In a [`ScatteringMedium`],
/// these are each represented by a [`ScatteringTerm`]
///
/// ## Technical Details
///
/// A [`ScatteringMedium`] is represented on the GPU by a set of two LUTs, which
/// are re-created every time the asset is modified. See the docs on
/// [`GpuScatteringMedium`] for more info.
///
/// [optical density]: https://en.wikipedia.org/wiki/Optical_Density
/// [phase function]: https://www.pbr-book.org/4ed/Volume_Scattering/Phase_Functions
#[derive(TypePath, Asset, Clone)]
pub struct ScatteringMedium {
/// An optional label for the medium, used when creating the LUTs on the GPU.
pub label: Option<Cow<'static, str>>,
/// The resolution at which to sample the falloff distribution of each
/// scattering term. Custom or more detailed distributions may benefit
/// from a higher value, at the cost of more memory use.
pub falloff_resolution: u32,
/// The resolution at which to sample the phase function of each scattering
/// term. Custom or more detailed phase functions may benefit from a higher
/// value, at the cost of more memory use.
pub phase_resolution: u32,
/// The list of [`ScatteringTerm`]s that compose this [`ScatteringMedium`]
pub terms: SmallVec<[ScatteringTerm; 1]>,
}
impl ScatteringMedium {
// Returns a scattering medium with a default label and the
// specified scattering terms.
pub fn new(
falloff_resolution: u32,
phase_resolution: u32,
terms: impl IntoIterator<Item = ScatteringTerm>,
) -> Self {
Self {
label: None,
falloff_resolution,
phase_resolution,
terms: terms.into_iter().collect(),
}
}
// Consumes and returns this scattering medium with a new label.
pub fn with_label(self, label: impl Into<Cow<'static, str>>) -> Self {
Self {
label: Some(label.into()),
..self
}
}
// Consumes and returns this scattering medium with each scattering terms'
// densities multiplied by `multiplier`.
pub fn with_density_multiplier(mut self, multiplier: f32) -> Self {
self.terms.iter_mut().for_each(|term| {
term.absorption *= multiplier;
term.scattering *= multiplier;
});
self
}
/// Returns a scattering medium representing an earthlike atmosphere.
///
/// Uses physically-based scale heights from Earth's atmosphere, assuming
/// a 60 km atmosphere height:
/// - Rayleigh (molecular) scattering: 8 km scale height
/// - Mie (aerosol) scattering: 1.2 km scale height
pub fn earthlike(falloff_resolution: u32, phase_resolution: u32) -> Self {
Self::new(
falloff_resolution,
phase_resolution,
[
// Rayleigh scattering Term
ScatteringTerm {
absorption: Vec3::ZERO,
scattering: Vec3::new(5.802e-6, 13.558e-6, 33.100e-6),
falloff: Falloff::Exponential { scale: 8.0 / 60.0 },
phase: PhaseFunction::Rayleigh,
},
// Mie scattering Term
ScatteringTerm {
absorption: Vec3::splat(3.996e-6),
scattering: Vec3::splat(0.444e-6),
falloff: Falloff::Exponential { scale: 1.2 / 60.0 },
phase: PhaseFunction::Mie { asymmetry: 0.8 },
},
// Ozone scattering Term
ScatteringTerm {
absorption: Vec3::new(0.650e-6, 1.881e-6, 0.085e-6),
scattering: Vec3::ZERO,
falloff: Falloff::Tent {
center: 0.75,
width: 0.3,
},
phase: PhaseFunction::Isotropic,
},
],
)
.with_label("earthlike_atmosphere")
}
}
/// An individual element of a [`ScatteringMedium`].
///
/// A [`ScatteringMedium`] can be built out of a number of simpler [`ScatteringTerm`]s,
/// which correspond to an individual element of the medium. For example, Earth's
/// atmosphere would be (roughly) composed of two [`ScatteringTerm`]s: the atmospheric
/// gases themselves, which extend to the edge of space, and suspended dust particles,
/// which are denser but lie closer to the ground.
#[derive(Default, Clone)]
pub struct ScatteringTerm {
/// This term's optical absorption density, or how much light of each wavelength
/// it absorbs per meter.
///
/// units: m^-1
pub absorption: Vec3,
/// This term's optical scattering density, or how much light of each wavelength
/// it scatters per meter.
///
/// units: m^-1
pub scattering: Vec3,
/// This term's falloff distribution. See the docs on [`Falloff`] for more info.
pub falloff: Falloff,
/// This term's [phase function], which determines the character of how it
/// scatters light. See the docs on [`PhaseFunction`] for more info.
///
/// [phase function]: https://www.pbr-book.org/4ed/Volume_Scattering/Phase_Functions
pub phase: PhaseFunction,
}
/// Describes how the media in a [`ScatteringTerm`] is distributed.
///
/// This is closely related to the optical density values [`ScatteringTerm::absorption`] and
/// [`ScatteringTerm::scattering`]. Most media aren't the same density everywhere;
/// near the edge of space Earth's atmosphere is much less dense, and it absorbs
/// and scatters less light.
///
/// [`Falloff`] determines how the density of a medium changes as a function of
/// an abstract "falloff parameter" `p`. `p = 1` denotes where the medium is the
/// densest, i.e. at the surface of the Earth, `p = 0` denotes where the medium
/// fades away completely, i.e. at the edge of space, and values between scale
/// linearly with distance, so `p = 0.5` would be halfway between the surface
/// and the edge of space.
///
/// When processing a [`ScatteringMedium`], the `absorption` and `scattering` values
/// for each [`ScatteringTerm`] are multiplied by the value of the falloff function, `f(p)`.
#[derive(Default, Clone)]
pub enum Falloff {
/// A simple linear falloff function, which essentially
/// passes the falloff parameter through unchanged.
///
/// f(1) = 1
/// f(0) = 0
/// f(p) = p
#[default]
Linear,
/// An exponential falloff function parametrized by a proportional scale.
/// When paired with an absolute "falloff distance" like the distance from
/// Earth's surface to the edge of space, this is analogous to the "height
/// scale" value common in atmospheric scattering literature, though it will
/// diverge from this for large or negative `scale` values.
///
/// f(1) = 1
/// f(0) = 0
/// f(p) = (e^((1-p)/s) - e^(1/s))/(e - e^(1/s))
Exponential {
/// The "scale" of the exponential falloff. Values closer to zero will
/// produce steeper falloff, and values farther from zero will produce
/// gentler falloff, approaching linear falloff as scale goes to `+-∞`.
///
/// Negative values change the *concavity* of the falloff function:
/// rather than an initial narrow region of steep falloff followed by a
/// wide region of gentle falloff, there will be an initial wide region
/// of gentle falloff followed by a narrow region of steep falloff.
///
/// domain: (-∞, ∞)
///
/// NOTE, this function is not defined when `scale == 0`.
/// In that case, it will fall back to linear falloff.
scale: f32,
},
/// A tent-shaped falloff function, which produces a triangular
/// peak at the center and linearly falls off to either side.
///
/// f(`center`) = 1
/// f(`center` +- `width` / 2) = 0
Tent {
/// The center of the tent function peak
///
/// domain: [0, 1]
center: f32,
/// The total width of the tent function peak
///
/// domain: [0, 1]
width: f32,
},
/// A falloff function defined by a custom curve.
///
/// domain: [0, 1],
/// range: [0, 1],
Curve(Arc<dyn Curve<f32> + Send + Sync>),
}
impl Falloff {
/// Returns a falloff function corresponding to a custom curve.
pub fn from_curve(curve: impl Curve<f32> + Send + Sync + 'static) -> Self {
Self::Curve(Arc::new(curve))
}
fn sample(&self, p: f32) -> f32 {
match self {
Falloff::Linear => p,
Falloff::Exponential { scale } => {
// fill discontinuity at scale == 0,
// arbitrarily choose linear falloff
if *scale == 0.0 {
p
} else {
let s = -1.0 / scale;
let exp_p_s = ops::exp((1.0 - p) * s);
let exp_s = ops::exp(s);
(exp_p_s - exp_s) / (1.0 - exp_s)
}
}
Falloff::Tent { center, width } => (1.0 - (p - center).abs() / (0.5 * width)).max(0.0),
Falloff::Curve(curve) => curve.sample(p).unwrap_or(0.0),
}
}
}
/// Describes how a [`ScatteringTerm`] scatters light in different directions.
///
/// A [phase function] is a function `f: [-1, 1] -> [0, ∞)`, symmetric about `x=0`
/// whose input is the cosine of the angle between an incoming light direction and
/// and outgoing light direction, and whose output is the proportion of the incoming
/// light that is actually scattered in that direction.
///
/// The phase function has an important effect on the "look" of a medium in a scene.
/// Media consisting of particles of a different size or shape scatter light differently,
/// and our brains are very good at telling the difference. A dust cloud, which might
/// correspond roughly to `PhaseFunction::Mie { asymmetry: 0.8 }`, looks quite different
/// from the rest of the sky (atmospheric gases), which correspond to `PhaseFunction::Rayleigh`
///
/// [phase function]: https://www.pbr-book.org/4ed/Volume_Scattering/Phase_Functions
#[derive(Clone)]
pub enum PhaseFunction {
/// A phase function that scatters light evenly in all directions.
Isotropic,
/// A phase function representing [Rayleigh scattering].
///
/// Rayleigh scattering occurs naturally for particles much smaller than
/// the wavelengths of visible light, such as gas molecules in the atmosphere.
/// It's generally wavelength-dependent, where shorter wavelengths are scattered
/// more strongly, so [scattering](ScatteringTerm::scattering) should have
/// higher values for blue than green and green than red. Particles that
/// participate in Rayleigh scattering don't absorb any light, either.
///
/// [Rayleigh scattering]: https://en.wikipedia.org/wiki/Rayleigh_scattering
Rayleigh,
/// The [Henyey-Greenstein phase function], which approximates [Mie scattering].
///
/// Mie scattering occurs naturally for spherical particles of dust
/// and aerosols roughly the same size as the wavelengths of visible light,
/// so it's useful for representing dust or sea spray. It's generally
/// wavelength-independent, so [absorption](ScatteringTerm::absorption)
/// and [scattering](ScatteringTerm::scattering) should be set to a greyscale value.
///
/// [Mie scattering]: https://en.wikipedia.org/wiki/Mie_scattering
/// [Henyey-Greenstein phase function]: https://www.oceanopticsbook.info/view/scattering/level-2/the-henyey-greenstein-phase-function
Mie {
/// Whether the Mie scattering function is biased towards scattering
/// light forwards (asymmetry > 0) or backwards (asymmetry < 0).
///
/// domain: [-1, 1]
asymmetry: f32,
},
/// A phase function defined by a custom curve, where the input
/// is the cosine of the angle between the incoming light ray
/// and the scattered light ray, and the output is the fraction
/// of the incoming light scattered in that direction.
///
/// Note: it's important for photorealism that the phase function
/// be *energy conserving*, meaning that in total no more light can
/// be scattered than actually entered the medium. For this to be
/// the case, the integral of the phase function over its domain must
/// be equal to 1/2π.
///
/// 1
/// ∫ p(x) dx = 1/2π
/// -1
///
/// domain: [-1, 1]
/// range: [0, 1]
Curve(Arc<dyn Curve<f32> + Send + Sync>),
}
impl PhaseFunction {
/// A phase function defined by a custom curve.
pub fn from_curve(curve: impl Curve<f32> + Send + Sync + 'static) -> Self {
Self::Curve(Arc::new(curve))
}
fn sample(&self, neg_l_dot_v: f32) -> f32 {
const FRAC_4_PI: f32 = 0.25 / PI;
const FRAC_3_16_PI: f32 = 0.1875 / PI;
match self {
PhaseFunction::Isotropic => FRAC_4_PI,
PhaseFunction::Rayleigh => FRAC_3_16_PI * (1.0 + neg_l_dot_v * neg_l_dot_v),
PhaseFunction::Mie { asymmetry } => {
let denom = 1.0 + asymmetry.squared() - 2.0 * asymmetry * neg_l_dot_v;
FRAC_4_PI * (1.0 - asymmetry.squared()) / (denom * denom.sqrt())
}
PhaseFunction::Curve(curve) => curve.sample(neg_l_dot_v).unwrap_or(0.0),
}
}
}
impl Default for PhaseFunction {
fn default() -> Self {
Self::Mie { asymmetry: 0.8 }
}
}
/// The GPU representation of a [`ScatteringMedium`].
pub struct GpuScatteringMedium {
/// The terms of the scattering medium.
pub terms: SmallVec<[ScatteringTerm; 1]>,
/// The resolution at which to sample the falloff distribution of each
/// scattering term.
pub falloff_resolution: u32,
/// The resolution at which to sample the phase function of each
/// scattering term.
pub phase_resolution: u32,
/// The `density_lut`, a 2D `falloff_resolution x 2` LUT which contains the
/// medium's optical density with respect to the atmosphere's "falloff parameter",
/// a linear value which is 1.0 at the planet's surface and 0.0 at the edge of
/// space. The first and second rows correspond to absorption density and
/// scattering density respectively.
pub density_lut: Texture,
/// The default [`TextureView`] of the `density_lut`
pub density_lut_view: TextureView,
/// The `scattering_lut`, a 2D `falloff_resolution x phase_resolution` LUT which
/// contains the medium's scattering density multiplied by the phase function, with
/// the U axis corresponding to the falloff parameter and the V axis corresponding
/// to `neg_LdotV * 0.5 + 0.5`, where `neg_LdotV` is the dot product of the light
/// direction and the incoming view vector.
pub scattering_lut: Texture,
/// The default [`TextureView`] of the `scattering_lut`
pub scattering_lut_view: TextureView,
}
impl RenderAsset for GpuScatteringMedium {
type SourceAsset = ScatteringMedium;
type Param = (Res<'static, RenderDevice>, Res<'static, RenderQueue>);
fn prepare_asset(
source_asset: Self::SourceAsset,
_asset_id: AssetId<Self::SourceAsset>,
(render_device, render_queue): &mut SystemParamItem<Self::Param>,
_previous_asset: Option<&Self>,
) -> Result<Self, PrepareAssetError<Self::SourceAsset>> {
let mut density: Vec<Vec4> =
Vec::with_capacity(2 * source_asset.falloff_resolution as usize);
density.extend((0..source_asset.falloff_resolution).map(|i| {
let falloff = (i as f32 + 0.5) / source_asset.falloff_resolution as f32;
source_asset
.terms
.iter()
.map(|term| term.absorption.extend(0.0) * term.falloff.sample(falloff))
.sum::<Vec4>()
}));
density.extend((0..source_asset.falloff_resolution).map(|i| {
let falloff = (i as f32 + 0.5) / source_asset.falloff_resolution as f32;
source_asset
.terms
.iter()
.map(|term| term.scattering.extend(0.0) * term.falloff.sample(falloff))
.sum::<Vec4>()
}));
let mut scattering: Vec<Vec4> = Vec::with_capacity(
source_asset.falloff_resolution as usize * source_asset.phase_resolution as usize,
);
scattering.extend(
(0..source_asset.falloff_resolution * source_asset.phase_resolution).map(|raw_i| {
let i = raw_i % source_asset.phase_resolution;
let j = raw_i / source_asset.phase_resolution;
let falloff = (i as f32 + 0.5) / source_asset.falloff_resolution as f32;
let phase = (j as f32 + 0.5) / source_asset.phase_resolution as f32;
let neg_l_dot_v = phase * 2.0 - 1.0;
source_asset
.terms
.iter()
.map(|term| {
term.scattering.extend(0.0)
* term.falloff.sample(falloff)
* term.phase.sample(neg_l_dot_v)
})
.sum::<Vec4>()
}),
);
let density_lut = render_device.create_texture_with_data(
render_queue,
&TextureDescriptor {
label: source_asset
.label
.as_deref()
.map(|label| format!("{}_density_lut", label))
.as_deref()
.or(Some("scattering_medium_density_lut")),
size: Extent3d {
width: source_asset.falloff_resolution,
height: 2,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: TextureFormat::Rgba32Float,
usage: TextureUsages::TEXTURE_BINDING,
view_formats: &[],
},
TextureDataOrder::LayerMajor,
bytemuck::cast_slice(density.as_slice()),
);
let density_lut_view = density_lut.create_view(&TextureViewDescriptor {
label: source_asset
.label
.as_deref()
.map(|label| format!("{}_density_lut_view", label))
.as_deref()
.or(Some("scattering_medium_density_lut_view")),
..Default::default()
});
let scattering_lut = render_device.create_texture_with_data(
render_queue,
&TextureDescriptor {
label: source_asset
.label
.as_deref()
.map(|label| format!("{}_scattering_lut", label))
.as_deref()
.or(Some("scattering_medium_scattering_lut")),
size: Extent3d {
width: source_asset.falloff_resolution,
height: source_asset.phase_resolution,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: TextureFormat::Rgba32Float,
usage: TextureUsages::TEXTURE_BINDING,
view_formats: &[],
},
TextureDataOrder::LayerMajor,
bytemuck::cast_slice(scattering.as_slice()),
);
let scattering_lut_view = scattering_lut.create_view(&TextureViewDescriptor {
label: source_asset
.label
.as_deref()
.map(|label| format!("{}_scattering_lut", label))
.as_deref()
.or(Some("scattering_medium_scattering_lut_view")),
..Default::default()
});
Ok(Self {
terms: source_asset.terms,
falloff_resolution: source_asset.falloff_resolution,
phase_resolution: source_asset.phase_resolution,
density_lut,
density_lut_view,
scattering_lut,
scattering_lut_view,
})
}
}
/// The default sampler for all scattering media LUTs.
///
/// Just a bilinear clamp-to-edge sampler, nothing fancy.
#[derive(Resource)]
pub struct ScatteringMediumSampler(Sampler);
impl ScatteringMediumSampler {
pub fn sampler(&self) -> &Sampler {
&self.0
}
}
fn init_scattering_medium_sampler(mut commands: Commands, render_device: Res<RenderDevice>) {
let sampler = render_device.create_sampler(&SamplerDescriptor {
label: Some("scattering_medium_sampler"),
mag_filter: FilterMode::Linear,
min_filter: FilterMode::Linear,
..Default::default()
});
commands.insert_resource(ScatteringMediumSampler(sampler));
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/diagnostic.rs | crates/bevy_pbr/src/diagnostic.rs | use core::{
any::{type_name, Any, TypeId},
marker::PhantomData,
};
use bevy_app::{Plugin, PreUpdate};
use bevy_diagnostic::{Diagnostic, DiagnosticPath, Diagnostics, RegisterDiagnostic};
use bevy_ecs::{resource::Resource, system::Res};
use bevy_platform::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
use bevy_render::{Extract, ExtractSchedule, RenderApp};
use crate::{Material, MaterialBindGroupAllocators};
pub struct MaterialAllocatorDiagnosticPlugin<M: Material> {
suffix: &'static str,
_phantom: PhantomData<M>,
}
impl<M: Material> MaterialAllocatorDiagnosticPlugin<M> {
pub fn new(suffix: &'static str) -> Self {
Self {
suffix,
_phantom: PhantomData,
}
}
}
impl<M: Material> Default for MaterialAllocatorDiagnosticPlugin<M> {
fn default() -> Self {
Self {
suffix: " materials",
_phantom: PhantomData,
}
}
}
impl<M: Material> MaterialAllocatorDiagnosticPlugin<M> {
/// Get the [`DiagnosticPath`] for slab count
pub fn slabs_diagnostic_path() -> DiagnosticPath {
DiagnosticPath::from_components(["material_allocator_slabs", type_name::<M>()])
}
/// Get the [`DiagnosticPath`] for total slabs size
pub fn slabs_size_diagnostic_path() -> DiagnosticPath {
DiagnosticPath::from_components(["material_allocator_slabs_size", type_name::<M>()])
}
/// Get the [`DiagnosticPath`] for material allocations
pub fn allocations_diagnostic_path() -> DiagnosticPath {
DiagnosticPath::from_components(["material_allocator_allocations", type_name::<M>()])
}
}
impl<M: Material> Plugin for MaterialAllocatorDiagnosticPlugin<M> {
fn build(&self, app: &mut bevy_app::App) {
app.register_diagnostic(
Diagnostic::new(Self::slabs_diagnostic_path()).with_suffix(" slabs"),
)
.register_diagnostic(
Diagnostic::new(Self::slabs_size_diagnostic_path()).with_suffix(" bytes"),
)
.register_diagnostic(
Diagnostic::new(Self::allocations_diagnostic_path()).with_suffix(self.suffix),
)
.init_resource::<MaterialAllocatorMeasurements<M>>()
.add_systems(PreUpdate, add_material_allocator_measurement::<M>);
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app.add_systems(ExtractSchedule, measure_allocator::<M>);
}
}
}
#[derive(Debug, Resource)]
struct MaterialAllocatorMeasurements<M: Material> {
slabs: AtomicUsize,
slabs_size: AtomicUsize,
allocations: AtomicU64,
_phantom: PhantomData<M>,
}
impl<M: Material> Default for MaterialAllocatorMeasurements<M> {
fn default() -> Self {
Self {
slabs: AtomicUsize::default(),
slabs_size: AtomicUsize::default(),
allocations: AtomicU64::default(),
_phantom: PhantomData,
}
}
}
fn add_material_allocator_measurement<M: Material>(
mut diagnostics: Diagnostics,
measurements: Res<MaterialAllocatorMeasurements<M>>,
) {
diagnostics.add_measurement(
&MaterialAllocatorDiagnosticPlugin::<M>::slabs_diagnostic_path(),
|| measurements.slabs.load(Ordering::Relaxed) as f64,
);
diagnostics.add_measurement(
&MaterialAllocatorDiagnosticPlugin::<M>::slabs_size_diagnostic_path(),
|| measurements.slabs_size.load(Ordering::Relaxed) as f64,
);
diagnostics.add_measurement(
&MaterialAllocatorDiagnosticPlugin::<M>::allocations_diagnostic_path(),
|| measurements.allocations.load(Ordering::Relaxed) as f64,
);
}
fn measure_allocator<M: Material + Any>(
measurements: Extract<Res<MaterialAllocatorMeasurements<M>>>,
allocators: Res<MaterialBindGroupAllocators>,
) {
if let Some(allocator) = allocators.get(&TypeId::of::<M>()) {
measurements
.slabs
.store(allocator.slab_count(), Ordering::Relaxed);
measurements
.slabs_size
.store(allocator.slabs_size(), Ordering::Relaxed);
measurements
.allocations
.store(allocator.allocations(), Ordering::Relaxed);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/prepass/prepass_bindings.rs | crates/bevy_pbr/src/prepass/prepass_bindings.rs | use bevy_core_pipeline::prepass::ViewPrepassTextures;
use bevy_render::render_resource::{
binding_types::{
texture_2d, texture_2d_multisampled, texture_depth_2d, texture_depth_2d_multisampled,
},
BindGroupLayoutEntryBuilder, TextureAspect, TextureSampleType, TextureView,
TextureViewDescriptor,
};
use bevy_utils::default;
use crate::MeshPipelineViewLayoutKey;
pub fn get_bind_group_layout_entries(
layout_key: MeshPipelineViewLayoutKey,
) -> [Option<BindGroupLayoutEntryBuilder>; 4] {
let mut entries: [Option<BindGroupLayoutEntryBuilder>; 4] = [None; 4];
let multisampled = layout_key.contains(MeshPipelineViewLayoutKey::MULTISAMPLED);
if layout_key.contains(MeshPipelineViewLayoutKey::DEPTH_PREPASS) {
// Depth texture
entries[0] = if multisampled {
Some(texture_depth_2d_multisampled())
} else {
Some(texture_depth_2d())
};
}
if layout_key.contains(MeshPipelineViewLayoutKey::NORMAL_PREPASS) {
// Normal texture
entries[1] = if multisampled {
Some(texture_2d_multisampled(TextureSampleType::Float {
filterable: false,
}))
} else {
Some(texture_2d(TextureSampleType::Float { filterable: false }))
};
}
if layout_key.contains(MeshPipelineViewLayoutKey::MOTION_VECTOR_PREPASS) {
// Motion Vectors texture
entries[2] = if multisampled {
Some(texture_2d_multisampled(TextureSampleType::Float {
filterable: false,
}))
} else {
Some(texture_2d(TextureSampleType::Float { filterable: false }))
};
}
if layout_key.contains(MeshPipelineViewLayoutKey::DEFERRED_PREPASS) {
// Deferred texture
entries[3] = Some(texture_2d(TextureSampleType::Uint));
}
entries
}
pub fn get_bindings(prepass_textures: Option<&ViewPrepassTextures>) -> [Option<TextureView>; 4] {
let depth_desc = TextureViewDescriptor {
label: Some("prepass_depth"),
aspect: TextureAspect::DepthOnly,
..default()
};
let depth_view = prepass_textures
.and_then(|x| x.depth.as_ref())
.map(|texture| texture.texture.texture.create_view(&depth_desc));
[
depth_view,
prepass_textures.and_then(|pt| pt.normal_view().cloned()),
prepass_textures.and_then(|pt| pt.motion_vectors_view().cloned()),
prepass_textures.and_then(|pt| pt.deferred_view().cloned()),
]
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/prepass/mod.rs | crates/bevy_pbr/src/prepass/mod.rs | mod prepass_bindings;
use crate::{
alpha_mode_pipeline_key, binding_arrays_are_usable, buffer_layout,
collect_meshes_for_gpu_building, init_material_pipeline, set_mesh_motion_vector_flags,
setup_morph_and_skinning_defs, skin, DeferredAlphaMaskDrawFunction, DeferredFragmentShader,
DeferredOpaqueDrawFunction, DeferredVertexShader, DrawMesh, EntitySpecializationTicks,
ErasedMaterialPipelineKey, MaterialPipeline, MaterialProperties, MeshLayouts, MeshPipeline,
MeshPipelineKey, OpaqueRendererMethod, PreparedMaterial, PrepassAlphaMaskDrawFunction,
PrepassFragmentShader, PrepassOpaqueDrawFunction, PrepassVertexShader, RenderLightmaps,
RenderMaterialInstances, RenderMeshInstanceFlags, RenderMeshInstances, RenderPhaseType,
SetMaterialBindGroup, SetMeshBindGroup, ShadowView,
};
use bevy_app::{App, Plugin, PreUpdate};
use bevy_asset::{embedded_asset, load_embedded_asset, AssetServer, Handle};
use bevy_camera::{Camera, Camera3d};
use bevy_core_pipeline::{core_3d::CORE_3D_DEPTH_FORMAT, deferred::*, prepass::*};
use bevy_ecs::{
prelude::*,
system::{
lifetimeless::{Read, SRes},
SystemParamItem,
},
};
use bevy_math::{Affine3A, Mat4, Vec4};
use bevy_mesh::{Mesh, Mesh3d, MeshVertexBufferLayoutRef};
use bevy_render::{
alpha::AlphaMode,
batching::gpu_preprocessing::GpuPreprocessingSupport,
globals::{GlobalsBuffer, GlobalsUniform},
mesh::{allocator::MeshAllocator, RenderMesh},
render_asset::{prepare_assets, RenderAssets},
render_phase::*,
render_resource::{binding_types::uniform_buffer, *},
renderer::{RenderAdapter, RenderDevice, RenderQueue},
sync_world::RenderEntity,
view::{
ExtractedView, Msaa, RenderVisibilityRanges, RetainedViewEntity, ViewUniform,
ViewUniformOffset, ViewUniforms, VISIBILITY_RANGES_STORAGE_BUFFER_COUNT,
},
Extract, ExtractSchedule, Render, RenderApp, RenderDebugFlags, RenderStartup, RenderSystems,
};
use bevy_shader::{load_shader_library, Shader, ShaderDefVal};
use bevy_transform::prelude::GlobalTransform;
pub use prepass_bindings::*;
use tracing::{error, warn};
#[cfg(feature = "meshlet")]
use crate::meshlet::{
prepare_material_meshlet_meshes_prepass, queue_material_meshlet_meshes, InstanceManager,
MeshletMesh3d,
};
use alloc::sync::Arc;
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{change_detection::Tick, system::SystemChangeTick};
use bevy_platform::collections::HashMap;
use bevy_render::{
erased_render_asset::ErasedRenderAssets,
sync_world::MainEntityHashMap,
view::RenderVisibleEntities,
RenderSystems::{PrepareAssets, PrepareResources},
};
use bevy_utils::default;
/// Sets up everything required to use the prepass pipeline.
///
/// This does not add the actual prepasses, see [`PrepassPlugin`] for that.
pub struct PrepassPipelinePlugin;
impl Plugin for PrepassPipelinePlugin {
fn build(&self, app: &mut App) {
embedded_asset!(app, "prepass.wgsl");
load_shader_library!(app, "prepass_bindings.wgsl");
load_shader_library!(app, "prepass_utils.wgsl");
load_shader_library!(app, "prepass_io.wgsl");
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.add_systems(
RenderStartup,
(
init_prepass_pipeline.after(init_material_pipeline),
init_prepass_view_bind_group,
)
.chain(),
)
.add_systems(
Render,
prepare_prepass_view_bind_group.in_set(RenderSystems::PrepareBindGroups),
)
.init_resource::<SpecializedMeshPipelines<PrepassPipelineSpecializer>>();
}
}
/// Sets up the prepasses for a material.
///
/// This depends on the [`PrepassPipelinePlugin`].
pub struct PrepassPlugin {
/// Debugging flags that can optionally be set when constructing the renderer.
pub debug_flags: RenderDebugFlags,
}
impl PrepassPlugin {
/// Creates a new [`PrepassPlugin`] with the given debug flags.
pub fn new(debug_flags: RenderDebugFlags) -> Self {
PrepassPlugin { debug_flags }
}
}
impl Plugin for PrepassPlugin {
fn build(&self, app: &mut App) {
let no_prepass_plugin_loaded = app
.world()
.get_resource::<AnyPrepassPluginLoaded>()
.is_none();
if no_prepass_plugin_loaded {
app.insert_resource(AnyPrepassPluginLoaded)
// At the start of each frame, last frame's GlobalTransforms become this frame's PreviousGlobalTransforms
// and last frame's view projection matrices become this frame's PreviousViewProjections
.add_systems(
PreUpdate,
(
update_mesh_previous_global_transforms,
update_previous_view_data,
),
)
.add_plugins((
BinnedRenderPhasePlugin::<Opaque3dPrepass, MeshPipeline>::new(self.debug_flags),
BinnedRenderPhasePlugin::<AlphaMask3dPrepass, MeshPipeline>::new(
self.debug_flags,
),
));
}
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
if no_prepass_plugin_loaded {
render_app
.add_systems(ExtractSchedule, extract_camera_previous_view_data)
.add_systems(
Render,
prepare_previous_view_uniforms.in_set(PrepareResources),
);
}
render_app
.init_resource::<ViewPrepassSpecializationTicks>()
.init_resource::<ViewKeyPrepassCache>()
.init_resource::<SpecializedPrepassMaterialPipelineCache>()
.add_render_command::<Opaque3dPrepass, DrawPrepass>()
.add_render_command::<AlphaMask3dPrepass, DrawPrepass>()
.add_render_command::<Opaque3dDeferred, DrawPrepass>()
.add_render_command::<AlphaMask3dDeferred, DrawPrepass>()
.add_systems(
Render,
(
check_prepass_views_need_specialization.in_set(PrepareAssets),
specialize_prepass_material_meshes
.in_set(RenderSystems::PrepareMeshes)
.after(prepare_assets::<RenderMesh>)
.after(collect_meshes_for_gpu_building)
.after(set_mesh_motion_vector_flags),
queue_prepass_material_meshes.in_set(RenderSystems::QueueMeshes),
),
);
#[cfg(feature = "meshlet")]
render_app.add_systems(
Render,
prepare_material_meshlet_meshes_prepass
.in_set(RenderSystems::QueueMeshes)
.before(queue_material_meshlet_meshes)
.run_if(resource_exists::<InstanceManager>),
);
}
}
#[derive(Resource)]
struct AnyPrepassPluginLoaded;
pub fn update_previous_view_data(
mut commands: Commands,
query: Query<(Entity, &Camera, &GlobalTransform), Or<(With<Camera3d>, With<ShadowView>)>>,
) {
for (entity, camera, camera_transform) in &query {
let world_from_view = camera_transform.affine();
let view_from_world = Mat4::from(world_from_view.inverse());
let view_from_clip = camera.clip_from_view().inverse();
commands.entity(entity).try_insert(PreviousViewData {
view_from_world,
clip_from_world: camera.clip_from_view() * view_from_world,
clip_from_view: camera.clip_from_view(),
world_from_clip: Mat4::from(world_from_view) * view_from_clip,
view_from_clip,
});
}
}
#[derive(Component, PartialEq, Default)]
pub struct PreviousGlobalTransform(pub Affine3A);
#[cfg(not(feature = "meshlet"))]
type PreviousMeshFilter = With<Mesh3d>;
#[cfg(feature = "meshlet")]
type PreviousMeshFilter = Or<(With<Mesh3d>, With<MeshletMesh3d>)>;
pub fn update_mesh_previous_global_transforms(
mut commands: Commands,
views: Query<&Camera, Or<(With<Camera3d>, With<ShadowView>)>>,
new_meshes: Query<
(Entity, &GlobalTransform),
(PreviousMeshFilter, Without<PreviousGlobalTransform>),
>,
mut meshes: Query<(&GlobalTransform, &mut PreviousGlobalTransform), PreviousMeshFilter>,
) {
let should_run = views.iter().any(|camera| camera.is_active);
if should_run {
for (entity, transform) in &new_meshes {
let new_previous_transform = PreviousGlobalTransform(transform.affine());
commands.entity(entity).try_insert(new_previous_transform);
}
meshes.par_iter_mut().for_each(|(transform, mut previous)| {
previous.set_if_neq(PreviousGlobalTransform(transform.affine()));
});
}
}
#[derive(Resource, Clone)]
pub struct PrepassPipeline {
pub view_layout_motion_vectors: BindGroupLayoutDescriptor,
pub view_layout_no_motion_vectors: BindGroupLayoutDescriptor,
pub mesh_layouts: MeshLayouts,
pub empty_layout: BindGroupLayoutDescriptor,
pub default_prepass_shader: Handle<Shader>,
/// Whether skins will use uniform buffers on account of storage buffers
/// being unavailable on this platform.
pub skins_use_uniform_buffers: bool,
pub depth_clip_control_supported: bool,
/// Whether binding arrays (a.k.a. bindless textures) are usable on the
/// current render device.
pub binding_arrays_are_usable: bool,
pub material_pipeline: MaterialPipeline,
}
pub fn init_prepass_pipeline(
mut commands: Commands,
render_device: Res<RenderDevice>,
render_adapter: Res<RenderAdapter>,
mesh_pipeline: Res<MeshPipeline>,
material_pipeline: Res<MaterialPipeline>,
asset_server: Res<AssetServer>,
) {
let visibility_ranges_buffer_binding_type =
render_device.get_supported_read_only_binding_type(VISIBILITY_RANGES_STORAGE_BUFFER_COUNT);
let view_layout_motion_vectors = BindGroupLayoutDescriptor::new(
"prepass_view_layout_motion_vectors",
&BindGroupLayoutEntries::with_indices(
ShaderStages::VERTEX_FRAGMENT,
(
// View
(0, uniform_buffer::<ViewUniform>(true)),
// Globals
(1, uniform_buffer::<GlobalsUniform>(false)),
// PreviousViewUniforms
(2, uniform_buffer::<PreviousViewData>(true)),
// VisibilityRanges
(
14,
buffer_layout(
visibility_ranges_buffer_binding_type,
false,
Some(Vec4::min_size()),
)
.visibility(ShaderStages::VERTEX),
),
),
),
);
let view_layout_no_motion_vectors = BindGroupLayoutDescriptor::new(
"prepass_view_layout_no_motion_vectors",
&BindGroupLayoutEntries::with_indices(
ShaderStages::VERTEX_FRAGMENT,
(
// View
(0, uniform_buffer::<ViewUniform>(true)),
// Globals
(1, uniform_buffer::<GlobalsUniform>(false)),
// VisibilityRanges
(
14,
buffer_layout(
visibility_ranges_buffer_binding_type,
false,
Some(Vec4::min_size()),
)
.visibility(ShaderStages::VERTEX),
),
),
),
);
let depth_clip_control_supported = render_device
.features()
.contains(WgpuFeatures::DEPTH_CLIP_CONTROL);
commands.insert_resource(PrepassPipeline {
view_layout_motion_vectors,
view_layout_no_motion_vectors,
mesh_layouts: mesh_pipeline.mesh_layouts.clone(),
default_prepass_shader: load_embedded_asset!(asset_server.as_ref(), "prepass.wgsl"),
skins_use_uniform_buffers: skin::skins_use_uniform_buffers(&render_device.limits()),
depth_clip_control_supported,
binding_arrays_are_usable: binding_arrays_are_usable(&render_device, &render_adapter),
empty_layout: BindGroupLayoutDescriptor::new("prepass_empty_layout", &[]),
material_pipeline: material_pipeline.clone(),
});
}
pub struct PrepassPipelineSpecializer {
pub pipeline: PrepassPipeline,
pub properties: Arc<MaterialProperties>,
}
impl SpecializedMeshPipeline for PrepassPipelineSpecializer {
type Key = ErasedMaterialPipelineKey;
fn specialize(
&self,
key: Self::Key,
layout: &MeshVertexBufferLayoutRef,
) -> Result<RenderPipelineDescriptor, SpecializedMeshPipelineError> {
let mut shader_defs = Vec::new();
if self.properties.bindless {
shader_defs.push("BINDLESS".into());
}
let mut descriptor =
self.pipeline
.specialize(key.mesh_key, shader_defs, layout, &self.properties)?;
// This is a bit risky because it's possible to change something that would
// break the prepass but be fine in the main pass.
// Since this api is pretty low-level it doesn't matter that much, but it is a potential issue.
if let Some(specialize) = self.properties.specialize {
specialize(
&self.pipeline.material_pipeline,
&mut descriptor,
layout,
key,
)?;
}
Ok(descriptor)
}
}
impl PrepassPipeline {
fn specialize(
&self,
mesh_key: MeshPipelineKey,
shader_defs: Vec<ShaderDefVal>,
layout: &MeshVertexBufferLayoutRef,
material_properties: &MaterialProperties,
) -> Result<RenderPipelineDescriptor, SpecializedMeshPipelineError> {
let mut shader_defs = shader_defs;
let mut bind_group_layouts = vec![
if mesh_key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) {
self.view_layout_motion_vectors.clone()
} else {
self.view_layout_no_motion_vectors.clone()
},
self.empty_layout.clone(),
];
let mut vertex_attributes = Vec::new();
// Let the shader code know that it's running in a prepass pipeline.
// (PBR code will use this to detect that it's running in deferred mode,
// since that's the only time it gets called from a prepass pipeline.)
shader_defs.push("PREPASS_PIPELINE".into());
shader_defs.push(ShaderDefVal::UInt(
"MATERIAL_BIND_GROUP".into(),
crate::MATERIAL_BIND_GROUP_INDEX as u32,
));
// NOTE: Eventually, it would be nice to only add this when the shaders are overloaded by the Material.
// The main limitation right now is that bind group order is hardcoded in shaders.
bind_group_layouts.push(
material_properties
.material_layout
.as_ref()
.unwrap()
.clone(),
);
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
shader_defs.push("WEBGL2".into());
shader_defs.push("VERTEX_OUTPUT_INSTANCE_INDEX".into());
if mesh_key.contains(MeshPipelineKey::DEPTH_PREPASS) {
shader_defs.push("DEPTH_PREPASS".into());
}
if mesh_key.contains(MeshPipelineKey::MAY_DISCARD) {
shader_defs.push("MAY_DISCARD".into());
}
let blend_key = mesh_key.intersection(MeshPipelineKey::BLEND_RESERVED_BITS);
if blend_key == MeshPipelineKey::BLEND_PREMULTIPLIED_ALPHA {
shader_defs.push("BLEND_PREMULTIPLIED_ALPHA".into());
}
if blend_key == MeshPipelineKey::BLEND_ALPHA {
shader_defs.push("BLEND_ALPHA".into());
}
if layout.0.contains(Mesh::ATTRIBUTE_POSITION) {
shader_defs.push("VERTEX_POSITIONS".into());
vertex_attributes.push(Mesh::ATTRIBUTE_POSITION.at_shader_location(0));
}
// For directional light shadow map views, use unclipped depth via either the native GPU feature,
// or emulated by setting depth in the fragment shader for GPUs that don't support it natively.
let emulate_unclipped_depth = mesh_key.contains(MeshPipelineKey::UNCLIPPED_DEPTH_ORTHO)
&& !self.depth_clip_control_supported;
if emulate_unclipped_depth {
shader_defs.push("UNCLIPPED_DEPTH_ORTHO_EMULATION".into());
// PERF: This line forces the "prepass fragment shader" to always run in
// common scenarios like "directional light calculation". Doing so resolves
// a pretty nasty depth clamping bug, but it also feels a bit excessive.
// We should try to find a way to resolve this without forcing the fragment
// shader to run.
// https://github.com/bevyengine/bevy/pull/8877
shader_defs.push("PREPASS_FRAGMENT".into());
}
let unclipped_depth = mesh_key.contains(MeshPipelineKey::UNCLIPPED_DEPTH_ORTHO)
&& self.depth_clip_control_supported;
if layout.0.contains(Mesh::ATTRIBUTE_UV_0) {
shader_defs.push("VERTEX_UVS".into());
shader_defs.push("VERTEX_UVS_A".into());
vertex_attributes.push(Mesh::ATTRIBUTE_UV_0.at_shader_location(1));
}
if layout.0.contains(Mesh::ATTRIBUTE_UV_1) {
shader_defs.push("VERTEX_UVS".into());
shader_defs.push("VERTEX_UVS_B".into());
vertex_attributes.push(Mesh::ATTRIBUTE_UV_1.at_shader_location(2));
}
if mesh_key.contains(MeshPipelineKey::NORMAL_PREPASS) {
shader_defs.push("NORMAL_PREPASS".into());
}
if mesh_key.intersects(MeshPipelineKey::NORMAL_PREPASS | MeshPipelineKey::DEFERRED_PREPASS)
{
shader_defs.push("NORMAL_PREPASS_OR_DEFERRED_PREPASS".into());
if layout.0.contains(Mesh::ATTRIBUTE_NORMAL) {
shader_defs.push("VERTEX_NORMALS".into());
vertex_attributes.push(Mesh::ATTRIBUTE_NORMAL.at_shader_location(3));
} else if mesh_key.contains(MeshPipelineKey::NORMAL_PREPASS) {
warn!(
"The default normal prepass expects the mesh to have vertex normal attributes."
);
}
if layout.0.contains(Mesh::ATTRIBUTE_TANGENT) {
shader_defs.push("VERTEX_TANGENTS".into());
vertex_attributes.push(Mesh::ATTRIBUTE_TANGENT.at_shader_location(4));
}
}
if mesh_key
.intersects(MeshPipelineKey::MOTION_VECTOR_PREPASS | MeshPipelineKey::DEFERRED_PREPASS)
{
shader_defs.push("MOTION_VECTOR_PREPASS_OR_DEFERRED_PREPASS".into());
}
if mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS) {
shader_defs.push("DEFERRED_PREPASS".into());
}
if mesh_key.contains(MeshPipelineKey::LIGHTMAPPED) {
shader_defs.push("LIGHTMAP".into());
}
if mesh_key.contains(MeshPipelineKey::LIGHTMAP_BICUBIC_SAMPLING) {
shader_defs.push("LIGHTMAP_BICUBIC_SAMPLING".into());
}
if layout.0.contains(Mesh::ATTRIBUTE_COLOR) {
shader_defs.push("VERTEX_COLORS".into());
vertex_attributes.push(Mesh::ATTRIBUTE_COLOR.at_shader_location(7));
}
if mesh_key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) {
shader_defs.push("MOTION_VECTOR_PREPASS".into());
}
if mesh_key.contains(MeshPipelineKey::HAS_PREVIOUS_SKIN) {
shader_defs.push("HAS_PREVIOUS_SKIN".into());
}
if mesh_key.contains(MeshPipelineKey::HAS_PREVIOUS_MORPH) {
shader_defs.push("HAS_PREVIOUS_MORPH".into());
}
if self.binding_arrays_are_usable {
shader_defs.push("MULTIPLE_LIGHTMAPS_IN_ARRAY".into());
}
if mesh_key.contains(MeshPipelineKey::VISIBILITY_RANGE_DITHER) {
shader_defs.push("VISIBILITY_RANGE_DITHER".into());
}
if mesh_key.intersects(
MeshPipelineKey::NORMAL_PREPASS
| MeshPipelineKey::MOTION_VECTOR_PREPASS
| MeshPipelineKey::DEFERRED_PREPASS,
) {
shader_defs.push("PREPASS_FRAGMENT".into());
}
let bind_group = setup_morph_and_skinning_defs(
&self.mesh_layouts,
layout,
5,
&mesh_key,
&mut shader_defs,
&mut vertex_attributes,
self.skins_use_uniform_buffers,
);
bind_group_layouts.insert(2, bind_group);
let vertex_buffer_layout = layout.0.get_layout(&vertex_attributes)?;
// Setup prepass fragment targets - normals in slot 0 (or None if not needed), motion vectors in slot 1
let mut targets = prepass_target_descriptors(
mesh_key.contains(MeshPipelineKey::NORMAL_PREPASS),
mesh_key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS),
mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS),
);
if targets.iter().all(Option::is_none) {
// if no targets are required then clear the list, so that no fragment shader is required
// (though one may still be used for discarding depth buffer writes)
targets.clear();
}
// The fragment shader is only used when the normal prepass or motion vectors prepass
// is enabled, the material uses alpha cutoff values and doesn't rely on the standard
// prepass shader, or we are emulating unclipped depth in the fragment shader.
let fragment_required = !targets.is_empty()
|| emulate_unclipped_depth
|| (mesh_key.contains(MeshPipelineKey::MAY_DISCARD)
&& material_properties
.get_shader(PrepassFragmentShader)
.is_some());
let fragment = fragment_required.then(|| {
// Use the fragment shader from the material
let frag_shader_handle = if mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS) {
match material_properties.get_shader(DeferredFragmentShader) {
Some(frag_shader_handle) => frag_shader_handle,
None => self.default_prepass_shader.clone(),
}
} else {
match material_properties.get_shader(PrepassFragmentShader) {
Some(frag_shader_handle) => frag_shader_handle,
None => self.default_prepass_shader.clone(),
}
};
FragmentState {
shader: frag_shader_handle,
shader_defs: shader_defs.clone(),
targets,
..default()
}
});
// Use the vertex shader from the material if present
let vert_shader_handle = if mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS) {
if let Some(handle) = material_properties.get_shader(DeferredVertexShader) {
handle
} else {
self.default_prepass_shader.clone()
}
} else if let Some(handle) = material_properties.get_shader(PrepassVertexShader) {
handle
} else {
self.default_prepass_shader.clone()
};
let descriptor = RenderPipelineDescriptor {
vertex: VertexState {
shader: vert_shader_handle,
shader_defs,
buffers: vec![vertex_buffer_layout],
..default()
},
fragment,
layout: bind_group_layouts,
primitive: PrimitiveState {
topology: mesh_key.primitive_topology(),
unclipped_depth,
..default()
},
depth_stencil: Some(DepthStencilState {
format: CORE_3D_DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: CompareFunction::GreaterEqual,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
}),
multisample: MultisampleState {
count: mesh_key.msaa_samples(),
mask: !0,
alpha_to_coverage_enabled: false,
},
label: Some("prepass_pipeline".into()),
..default()
};
Ok(descriptor)
}
}
// Extract the render phases for the prepass
pub fn extract_camera_previous_view_data(
mut commands: Commands,
cameras_3d: Extract<Query<(RenderEntity, &Camera, Option<&PreviousViewData>), With<Camera3d>>>,
) {
for (entity, camera, maybe_previous_view_data) in cameras_3d.iter() {
let mut entity = commands
.get_entity(entity)
.expect("Camera entity wasn't synced.");
if camera.is_active {
if let Some(previous_view_data) = maybe_previous_view_data {
entity.insert(previous_view_data.clone());
}
} else {
entity.remove::<PreviousViewData>();
}
}
}
pub fn prepare_previous_view_uniforms(
mut commands: Commands,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
mut previous_view_uniforms: ResMut<PreviousViewUniforms>,
views: Query<
(Entity, &ExtractedView, Option<&PreviousViewData>),
Or<(With<Camera3d>, With<ShadowView>)>,
>,
) {
let views_iter = views.iter();
let view_count = views_iter.len();
let Some(mut writer) =
previous_view_uniforms
.uniforms
.get_writer(view_count, &render_device, &render_queue)
else {
return;
};
for (entity, camera, maybe_previous_view_uniforms) in views_iter {
let prev_view_data = match maybe_previous_view_uniforms {
Some(previous_view) => previous_view.clone(),
None => {
let world_from_view = camera.world_from_view.affine();
let view_from_world = Mat4::from(world_from_view.inverse());
let view_from_clip = camera.clip_from_view.inverse();
PreviousViewData {
view_from_world,
clip_from_world: camera.clip_from_view * view_from_world,
clip_from_view: camera.clip_from_view,
world_from_clip: Mat4::from(world_from_view) * view_from_clip,
view_from_clip,
}
}
};
commands.entity(entity).insert(PreviousViewUniformOffset {
offset: writer.write(&prev_view_data),
});
}
}
#[derive(Resource)]
pub struct PrepassViewBindGroup {
pub motion_vectors: Option<BindGroup>,
pub no_motion_vectors: Option<BindGroup>,
pub empty_bind_group: BindGroup,
}
pub fn init_prepass_view_bind_group(
mut commands: Commands,
render_device: Res<RenderDevice>,
pipeline_cache: Res<PipelineCache>,
pipeline: Res<PrepassPipeline>,
) {
let empty_bind_group = render_device.create_bind_group(
"prepass_view_empty_bind_group",
&pipeline_cache.get_bind_group_layout(&pipeline.empty_layout),
&[],
);
commands.insert_resource(PrepassViewBindGroup {
motion_vectors: None,
no_motion_vectors: None,
empty_bind_group,
});
}
pub fn prepare_prepass_view_bind_group(
render_device: Res<RenderDevice>,
pipeline_cache: Res<PipelineCache>,
prepass_pipeline: Res<PrepassPipeline>,
view_uniforms: Res<ViewUniforms>,
globals_buffer: Res<GlobalsBuffer>,
previous_view_uniforms: Res<PreviousViewUniforms>,
visibility_ranges: Res<RenderVisibilityRanges>,
mut prepass_view_bind_group: ResMut<PrepassViewBindGroup>,
) {
if let (Some(view_binding), Some(globals_binding), Some(visibility_ranges_buffer)) = (
view_uniforms.uniforms.binding(),
globals_buffer.buffer.binding(),
visibility_ranges.buffer().buffer(),
) {
prepass_view_bind_group.no_motion_vectors = Some(render_device.create_bind_group(
"prepass_view_no_motion_vectors_bind_group",
&pipeline_cache.get_bind_group_layout(&prepass_pipeline.view_layout_no_motion_vectors),
&BindGroupEntries::with_indices((
(0, view_binding.clone()),
(1, globals_binding.clone()),
(14, visibility_ranges_buffer.as_entire_binding()),
)),
));
if let Some(previous_view_uniforms_binding) = previous_view_uniforms.uniforms.binding() {
prepass_view_bind_group.motion_vectors = Some(render_device.create_bind_group(
"prepass_view_motion_vectors_bind_group",
&pipeline_cache.get_bind_group_layout(&prepass_pipeline.view_layout_motion_vectors),
&BindGroupEntries::with_indices((
(0, view_binding),
(1, globals_binding),
(2, previous_view_uniforms_binding),
(14, visibility_ranges_buffer.as_entire_binding()),
)),
));
}
}
}
/// Stores the [`SpecializedPrepassMaterialViewPipelineCache`] for each view.
#[derive(Resource, Deref, DerefMut, Default)]
pub struct SpecializedPrepassMaterialPipelineCache {
// view_entity -> view pipeline cache
#[deref]
map: HashMap<RetainedViewEntity, SpecializedPrepassMaterialViewPipelineCache>,
}
/// Stores the cached render pipeline ID for each entity in a single view, as
/// well as the last time it was changed.
#[derive(Deref, DerefMut, Default)]
pub struct SpecializedPrepassMaterialViewPipelineCache {
// material entity -> (tick, pipeline_id)
#[deref]
map: MainEntityHashMap<(Tick, CachedRenderPipelineId)>,
}
#[derive(Resource, Deref, DerefMut, Default, Clone)]
pub struct ViewKeyPrepassCache(HashMap<RetainedViewEntity, MeshPipelineKey>);
#[derive(Resource, Deref, DerefMut, Default, Clone)]
pub struct ViewPrepassSpecializationTicks(HashMap<RetainedViewEntity, Tick>);
pub fn check_prepass_views_need_specialization(
mut view_key_cache: ResMut<ViewKeyPrepassCache>,
mut view_specialization_ticks: ResMut<ViewPrepassSpecializationTicks>,
mut views: Query<(
&ExtractedView,
&Msaa,
Option<&DepthPrepass>,
Option<&NormalPrepass>,
Option<&MotionVectorPrepass>,
)>,
ticks: SystemChangeTick,
) {
for (view, msaa, depth_prepass, normal_prepass, motion_vector_prepass) in views.iter_mut() {
let mut view_key = MeshPipelineKey::from_msaa_samples(msaa.samples());
if depth_prepass.is_some() {
view_key |= MeshPipelineKey::DEPTH_PREPASS;
}
if normal_prepass.is_some() {
view_key |= MeshPipelineKey::NORMAL_PREPASS;
}
if motion_vector_prepass.is_some() {
view_key |= MeshPipelineKey::MOTION_VECTOR_PREPASS;
}
if let Some(current_key) = view_key_cache.get_mut(&view.retained_view_entity) {
if *current_key != view_key {
view_key_cache.insert(view.retained_view_entity, view_key);
view_specialization_ticks.insert(view.retained_view_entity, ticks.this_run());
}
} else {
view_key_cache.insert(view.retained_view_entity, view_key);
view_specialization_ticks.insert(view.retained_view_entity, ticks.this_run());
}
}
}
pub fn specialize_prepass_material_meshes(
render_meshes: Res<RenderAssets<RenderMesh>>,
render_materials: Res<ErasedRenderAssets<PreparedMaterial>>,
render_mesh_instances: Res<RenderMeshInstances>,
render_material_instances: Res<RenderMaterialInstances>,
render_lightmaps: Res<RenderLightmaps>,
render_visibility_ranges: Res<RenderVisibilityRanges>,
view_key_cache: Res<ViewKeyPrepassCache>,
views: Query<(
&ExtractedView,
&RenderVisibleEntities,
&Msaa,
Option<&MotionVectorPrepass>,
Option<&DeferredPrepass>,
)>,
(
opaque_prepass_render_phases,
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/deferred/mod.rs | crates/bevy_pbr/src/deferred/mod.rs | use crate::{
graph::NodePbr, MeshPipeline, MeshViewBindGroup, RenderViewLightProbes,
ScreenSpaceAmbientOcclusion, ScreenSpaceReflectionsUniform, ViewEnvironmentMapUniformOffset,
ViewLightProbesUniformOffset, ViewScreenSpaceReflectionsUniformOffset,
TONEMAPPING_LUT_SAMPLER_BINDING_INDEX, TONEMAPPING_LUT_TEXTURE_BINDING_INDEX,
};
use crate::{
DistanceFog, ExtractedAtmosphere, MeshPipelineKey, ViewFogUniformOffset,
ViewLightsUniformOffset,
};
use bevy_app::prelude::*;
use bevy_asset::{embedded_asset, load_embedded_asset, AssetServer, Handle};
use bevy_core_pipeline::{
core_3d::graph::{Core3d, Node3d},
deferred::{
copy_lighting_id::DeferredLightingIdDepthTexture, DEFERRED_LIGHTING_PASS_ID_DEPTH_FORMAT,
},
prepass::{DeferredPrepass, DepthPrepass, MotionVectorPrepass, NormalPrepass},
tonemapping::{DebandDither, Tonemapping},
};
use bevy_ecs::{prelude::*, query::QueryItem};
use bevy_image::BevyDefault as _;
use bevy_light::{EnvironmentMapLight, IrradianceVolume, ShadowFilteringMethod};
use bevy_render::RenderStartup;
use bevy_render::{
diagnostic::RecordDiagnostics,
extract_component::{
ComponentUniforms, ExtractComponent, ExtractComponentPlugin, UniformComponentPlugin,
},
render_graph::{NodeRunError, RenderGraphContext, RenderGraphExt, ViewNode, ViewNodeRunner},
render_resource::{binding_types::uniform_buffer, *},
renderer::RenderContext,
view::{ExtractedView, ViewTarget, ViewUniformOffset},
Render, RenderApp, RenderSystems,
};
use bevy_shader::{Shader, ShaderDefVal};
use bevy_utils::default;
pub struct DeferredPbrLightingPlugin;
pub const DEFAULT_PBR_DEFERRED_LIGHTING_PASS_ID: u8 = 1;
/// Component with a `depth_id` for specifying which corresponding materials should be rendered by this specific PBR deferred lighting pass.
///
/// Will be automatically added to entities with the [`DeferredPrepass`] component that don't already have a [`PbrDeferredLightingDepthId`].
#[derive(Component, Clone, Copy, ExtractComponent, ShaderType)]
pub struct PbrDeferredLightingDepthId {
depth_id: u32,
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
_webgl2_padding_0: f32,
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
_webgl2_padding_1: f32,
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
_webgl2_padding_2: f32,
}
impl PbrDeferredLightingDepthId {
pub fn new(value: u8) -> PbrDeferredLightingDepthId {
PbrDeferredLightingDepthId {
depth_id: value as u32,
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
_webgl2_padding_0: 0.0,
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
_webgl2_padding_1: 0.0,
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
_webgl2_padding_2: 0.0,
}
}
pub fn set(&mut self, value: u8) {
self.depth_id = value as u32;
}
pub fn get(&self) -> u8 {
self.depth_id as u8
}
}
impl Default for PbrDeferredLightingDepthId {
fn default() -> Self {
PbrDeferredLightingDepthId {
depth_id: DEFAULT_PBR_DEFERRED_LIGHTING_PASS_ID as u32,
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
_webgl2_padding_0: 0.0,
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
_webgl2_padding_1: 0.0,
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
_webgl2_padding_2: 0.0,
}
}
}
impl Plugin for DeferredPbrLightingPlugin {
fn build(&self, app: &mut App) {
app.add_plugins((
ExtractComponentPlugin::<PbrDeferredLightingDepthId>::default(),
UniformComponentPlugin::<PbrDeferredLightingDepthId>::default(),
))
.add_systems(PostUpdate, insert_deferred_lighting_pass_id_component);
embedded_asset!(app, "deferred_lighting.wgsl");
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.init_resource::<SpecializedRenderPipelines<DeferredLightingLayout>>()
.add_systems(RenderStartup, init_deferred_lighting_layout)
.add_systems(
Render,
(prepare_deferred_lighting_pipelines.in_set(RenderSystems::Prepare),),
)
.add_render_graph_node::<ViewNodeRunner<DeferredOpaquePass3dPbrLightingNode>>(
Core3d,
NodePbr::DeferredLightingPass,
)
.add_render_graph_edges(
Core3d,
(
Node3d::StartMainPass,
NodePbr::DeferredLightingPass,
Node3d::MainOpaquePass,
),
);
}
}
#[derive(Default)]
pub struct DeferredOpaquePass3dPbrLightingNode;
impl ViewNode for DeferredOpaquePass3dPbrLightingNode {
type ViewQuery = (
&'static ViewUniformOffset,
&'static ViewLightsUniformOffset,
&'static ViewFogUniformOffset,
&'static ViewLightProbesUniformOffset,
&'static ViewScreenSpaceReflectionsUniformOffset,
&'static ViewEnvironmentMapUniformOffset,
&'static MeshViewBindGroup,
&'static ViewTarget,
&'static DeferredLightingIdDepthTexture,
&'static DeferredLightingPipeline,
);
fn run(
&self,
_graph_context: &mut RenderGraphContext,
render_context: &mut RenderContext,
(
view_uniform_offset,
view_lights_offset,
view_fog_offset,
view_light_probes_offset,
view_ssr_offset,
view_environment_map_offset,
mesh_view_bind_group,
target,
deferred_lighting_id_depth_texture,
deferred_lighting_pipeline,
): QueryItem<Self::ViewQuery>,
world: &World,
) -> Result<(), NodeRunError> {
let pipeline_cache = world.resource::<PipelineCache>();
let deferred_lighting_layout = world.resource::<DeferredLightingLayout>();
let Some(pipeline) =
pipeline_cache.get_render_pipeline(deferred_lighting_pipeline.pipeline_id)
else {
return Ok(());
};
let deferred_lighting_pass_id =
world.resource::<ComponentUniforms<PbrDeferredLightingDepthId>>();
let Some(deferred_lighting_pass_id_binding) =
deferred_lighting_pass_id.uniforms().binding()
else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
let bind_group_2 = render_context.render_device().create_bind_group(
"deferred_lighting_layout_group_2",
&pipeline_cache.get_bind_group_layout(&deferred_lighting_layout.bind_group_layout_2),
&BindGroupEntries::single(deferred_lighting_pass_id_binding),
);
let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("deferred_lighting"),
color_attachments: &[Some(target.get_color_attachment())],
depth_stencil_attachment: Some(RenderPassDepthStencilAttachment {
view: &deferred_lighting_id_depth_texture.texture.default_view,
depth_ops: Some(Operations {
load: LoadOp::Load,
store: StoreOp::Discard,
}),
stencil_ops: None,
}),
timestamp_writes: None,
occlusion_query_set: None,
});
let pass_span = diagnostics.pass_span(&mut render_pass, "deferred_lighting");
render_pass.set_render_pipeline(pipeline);
render_pass.set_bind_group(
0,
&mesh_view_bind_group.main,
&[
view_uniform_offset.offset,
view_lights_offset.offset,
view_fog_offset.offset,
**view_light_probes_offset,
**view_ssr_offset,
**view_environment_map_offset,
],
);
render_pass.set_bind_group(1, &mesh_view_bind_group.binding_array, &[]);
render_pass.set_bind_group(2, &bind_group_2, &[]);
render_pass.draw(0..3, 0..1);
pass_span.end(&mut render_pass);
Ok(())
}
}
#[derive(Resource)]
pub struct DeferredLightingLayout {
mesh_pipeline: MeshPipeline,
bind_group_layout_2: BindGroupLayoutDescriptor,
deferred_lighting_shader: Handle<Shader>,
}
#[derive(Component)]
pub struct DeferredLightingPipeline {
pub pipeline_id: CachedRenderPipelineId,
}
impl SpecializedRenderPipeline for DeferredLightingLayout {
type Key = MeshPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let mut shader_defs = Vec::new();
// Let the shader code know that it's running in a deferred pipeline.
shader_defs.push("DEFERRED_LIGHTING_PIPELINE".into());
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
shader_defs.push("WEBGL2".into());
if key.contains(MeshPipelineKey::TONEMAP_IN_SHADER) {
shader_defs.push("TONEMAP_IN_SHADER".into());
shader_defs.push(ShaderDefVal::UInt(
"TONEMAPPING_LUT_TEXTURE_BINDING_INDEX".into(),
TONEMAPPING_LUT_TEXTURE_BINDING_INDEX,
));
shader_defs.push(ShaderDefVal::UInt(
"TONEMAPPING_LUT_SAMPLER_BINDING_INDEX".into(),
TONEMAPPING_LUT_SAMPLER_BINDING_INDEX,
));
let method = key.intersection(MeshPipelineKey::TONEMAP_METHOD_RESERVED_BITS);
if method == MeshPipelineKey::TONEMAP_METHOD_NONE {
shader_defs.push("TONEMAP_METHOD_NONE".into());
} else if method == MeshPipelineKey::TONEMAP_METHOD_REINHARD {
shader_defs.push("TONEMAP_METHOD_REINHARD".into());
} else if method == MeshPipelineKey::TONEMAP_METHOD_REINHARD_LUMINANCE {
shader_defs.push("TONEMAP_METHOD_REINHARD_LUMINANCE".into());
} else if method == MeshPipelineKey::TONEMAP_METHOD_ACES_FITTED {
shader_defs.push("TONEMAP_METHOD_ACES_FITTED".into());
} else if method == MeshPipelineKey::TONEMAP_METHOD_AGX {
shader_defs.push("TONEMAP_METHOD_AGX".into());
} else if method == MeshPipelineKey::TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM {
shader_defs.push("TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM".into());
} else if method == MeshPipelineKey::TONEMAP_METHOD_BLENDER_FILMIC {
shader_defs.push("TONEMAP_METHOD_BLENDER_FILMIC".into());
} else if method == MeshPipelineKey::TONEMAP_METHOD_TONY_MC_MAPFACE {
shader_defs.push("TONEMAP_METHOD_TONY_MC_MAPFACE".into());
}
// Debanding is tied to tonemapping in the shader, cannot run without it.
if key.contains(MeshPipelineKey::DEBAND_DITHER) {
shader_defs.push("DEBAND_DITHER".into());
}
}
if key.contains(MeshPipelineKey::SCREEN_SPACE_AMBIENT_OCCLUSION) {
shader_defs.push("SCREEN_SPACE_AMBIENT_OCCLUSION".into());
}
if key.contains(MeshPipelineKey::ENVIRONMENT_MAP) {
shader_defs.push("ENVIRONMENT_MAP".into());
}
if key.contains(MeshPipelineKey::IRRADIANCE_VOLUME) {
shader_defs.push("IRRADIANCE_VOLUME".into());
}
if key.contains(MeshPipelineKey::NORMAL_PREPASS) {
shader_defs.push("NORMAL_PREPASS".into());
}
if key.contains(MeshPipelineKey::DEPTH_PREPASS) {
shader_defs.push("DEPTH_PREPASS".into());
}
if key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) {
shader_defs.push("MOTION_VECTOR_PREPASS".into());
}
if key.contains(MeshPipelineKey::SCREEN_SPACE_REFLECTIONS) {
shader_defs.push("SCREEN_SPACE_REFLECTIONS".into());
}
if key.contains(MeshPipelineKey::HAS_PREVIOUS_SKIN) {
shader_defs.push("HAS_PREVIOUS_SKIN".into());
}
if key.contains(MeshPipelineKey::HAS_PREVIOUS_MORPH) {
shader_defs.push("HAS_PREVIOUS_MORPH".into());
}
if key.contains(MeshPipelineKey::DISTANCE_FOG) {
shader_defs.push("DISTANCE_FOG".into());
}
if key.contains(MeshPipelineKey::ATMOSPHERE) {
shader_defs.push("ATMOSPHERE".into());
}
// Always true, since we're in the deferred lighting pipeline
shader_defs.push("DEFERRED_PREPASS".into());
let shadow_filter_method =
key.intersection(MeshPipelineKey::SHADOW_FILTER_METHOD_RESERVED_BITS);
if shadow_filter_method == MeshPipelineKey::SHADOW_FILTER_METHOD_HARDWARE_2X2 {
shader_defs.push("SHADOW_FILTER_METHOD_HARDWARE_2X2".into());
} else if shadow_filter_method == MeshPipelineKey::SHADOW_FILTER_METHOD_GAUSSIAN {
shader_defs.push("SHADOW_FILTER_METHOD_GAUSSIAN".into());
} else if shadow_filter_method == MeshPipelineKey::SHADOW_FILTER_METHOD_TEMPORAL {
shader_defs.push("SHADOW_FILTER_METHOD_TEMPORAL".into());
}
if self.mesh_pipeline.binding_arrays_are_usable {
shader_defs.push("MULTIPLE_LIGHT_PROBES_IN_ARRAY".into());
shader_defs.push("MULTIPLE_LIGHTMAPS_IN_ARRAY".into());
}
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
shader_defs.push("SIXTEEN_BYTE_ALIGNMENT".into());
let layout = self.mesh_pipeline.get_view_layout(key.into());
RenderPipelineDescriptor {
label: Some("deferred_lighting_pipeline".into()),
layout: vec![
layout.main_layout.clone(),
layout.binding_array_layout.clone(),
self.bind_group_layout_2.clone(),
],
vertex: VertexState {
shader: self.deferred_lighting_shader.clone(),
shader_defs: shader_defs.clone(),
..default()
},
fragment: Some(FragmentState {
shader: self.deferred_lighting_shader.clone(),
shader_defs,
targets: vec![Some(ColorTargetState {
format: if key.contains(MeshPipelineKey::HDR) {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
},
blend: None,
write_mask: ColorWrites::ALL,
})],
..default()
}),
depth_stencil: Some(DepthStencilState {
format: DEFERRED_LIGHTING_PASS_ID_DEPTH_FORMAT,
depth_write_enabled: false,
depth_compare: CompareFunction::Equal,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
}),
..default()
}
}
}
pub fn init_deferred_lighting_layout(
mut commands: Commands,
mesh_pipeline: Res<MeshPipeline>,
asset_server: Res<AssetServer>,
) {
let layout = BindGroupLayoutDescriptor::new(
"deferred_lighting_layout",
&BindGroupLayoutEntries::single(
ShaderStages::VERTEX_FRAGMENT,
uniform_buffer::<PbrDeferredLightingDepthId>(false),
),
);
commands.insert_resource(DeferredLightingLayout {
mesh_pipeline: mesh_pipeline.clone(),
bind_group_layout_2: layout,
deferred_lighting_shader: load_embedded_asset!(
asset_server.as_ref(),
"deferred_lighting.wgsl"
),
});
}
pub fn insert_deferred_lighting_pass_id_component(
mut commands: Commands,
views: Query<Entity, (With<DeferredPrepass>, Without<PbrDeferredLightingDepthId>)>,
) {
for entity in views.iter() {
commands
.entity(entity)
.insert(PbrDeferredLightingDepthId::default());
}
}
pub fn prepare_deferred_lighting_pipelines(
mut commands: Commands,
pipeline_cache: Res<PipelineCache>,
mut pipelines: ResMut<SpecializedRenderPipelines<DeferredLightingLayout>>,
deferred_lighting_layout: Res<DeferredLightingLayout>,
views: Query<(
Entity,
&ExtractedView,
Option<&Tonemapping>,
Option<&DebandDither>,
Option<&ShadowFilteringMethod>,
(
Has<ScreenSpaceAmbientOcclusion>,
Has<ScreenSpaceReflectionsUniform>,
Has<DistanceFog>,
),
(
Has<NormalPrepass>,
Has<DepthPrepass>,
Has<MotionVectorPrepass>,
Has<DeferredPrepass>,
),
Has<RenderViewLightProbes<EnvironmentMapLight>>,
Has<RenderViewLightProbes<IrradianceVolume>>,
Has<SkipDeferredLighting>,
Has<ExtractedAtmosphere>,
)>,
) {
for (
entity,
view,
tonemapping,
dither,
shadow_filter_method,
(ssao, ssr, distance_fog),
(normal_prepass, depth_prepass, motion_vector_prepass, deferred_prepass),
has_environment_maps,
has_irradiance_volumes,
skip_deferred_lighting,
has_atmosphere,
) in &views
{
// If there is no deferred prepass or we want to skip the deferred lighting pass,
// remove the old pipeline if there was one. This handles the case in which a
// view using deferred stops using it.
if !deferred_prepass || skip_deferred_lighting {
commands.entity(entity).remove::<DeferredLightingPipeline>();
continue;
}
let mut view_key = MeshPipelineKey::from_hdr(view.hdr);
if normal_prepass {
view_key |= MeshPipelineKey::NORMAL_PREPASS;
}
if depth_prepass {
view_key |= MeshPipelineKey::DEPTH_PREPASS;
}
if motion_vector_prepass {
view_key |= MeshPipelineKey::MOTION_VECTOR_PREPASS;
}
if has_atmosphere {
view_key |= MeshPipelineKey::ATMOSPHERE;
}
if view.invert_culling {
view_key |= MeshPipelineKey::INVERT_CULLING;
}
// Always true, since we're in the deferred lighting pipeline
view_key |= MeshPipelineKey::DEFERRED_PREPASS;
if !view.hdr {
if let Some(tonemapping) = tonemapping {
view_key |= MeshPipelineKey::TONEMAP_IN_SHADER;
view_key |= match tonemapping {
Tonemapping::None => MeshPipelineKey::TONEMAP_METHOD_NONE,
Tonemapping::Reinhard => MeshPipelineKey::TONEMAP_METHOD_REINHARD,
Tonemapping::ReinhardLuminance => {
MeshPipelineKey::TONEMAP_METHOD_REINHARD_LUMINANCE
}
Tonemapping::AcesFitted => MeshPipelineKey::TONEMAP_METHOD_ACES_FITTED,
Tonemapping::AgX => MeshPipelineKey::TONEMAP_METHOD_AGX,
Tonemapping::SomewhatBoringDisplayTransform => {
MeshPipelineKey::TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM
}
Tonemapping::TonyMcMapface => MeshPipelineKey::TONEMAP_METHOD_TONY_MC_MAPFACE,
Tonemapping::BlenderFilmic => MeshPipelineKey::TONEMAP_METHOD_BLENDER_FILMIC,
};
}
if let Some(DebandDither::Enabled) = dither {
view_key |= MeshPipelineKey::DEBAND_DITHER;
}
}
if ssao {
view_key |= MeshPipelineKey::SCREEN_SPACE_AMBIENT_OCCLUSION;
}
if ssr {
view_key |= MeshPipelineKey::SCREEN_SPACE_REFLECTIONS;
}
if distance_fog {
view_key |= MeshPipelineKey::DISTANCE_FOG;
}
// We don't need to check to see whether the environment map is loaded
// because [`gather_light_probes`] already checked that for us before
// adding the [`RenderViewEnvironmentMaps`] component.
if has_environment_maps {
view_key |= MeshPipelineKey::ENVIRONMENT_MAP;
}
if has_irradiance_volumes {
view_key |= MeshPipelineKey::IRRADIANCE_VOLUME;
}
match shadow_filter_method.unwrap_or(&ShadowFilteringMethod::default()) {
ShadowFilteringMethod::Hardware2x2 => {
view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_HARDWARE_2X2;
}
ShadowFilteringMethod::Gaussian => {
view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_GAUSSIAN;
}
ShadowFilteringMethod::Temporal => {
view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_TEMPORAL;
}
}
let pipeline_id =
pipelines.specialize(&pipeline_cache, &deferred_lighting_layout, view_key);
commands
.entity(entity)
.insert(DeferredLightingPipeline { pipeline_id });
}
}
/// Component to skip running the deferred lighting pass in [`DeferredOpaquePass3dPbrLightingNode`] for a specific view.
///
/// This works like [`crate::PbrPlugin::add_default_deferred_lighting_plugin`], but is per-view instead of global.
///
/// Useful for cases where you want to generate a gbuffer, but skip the built-in deferred lighting pass
/// to run your own custom lighting pass instead.
///
/// Insert this component in the render world only.
#[derive(Component, Clone, Copy, Default)]
pub struct SkipDeferredLighting;
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/lightmap/mod.rs | crates/bevy_pbr/src/lightmap/mod.rs | //! Lightmaps, baked lighting textures that can be applied at runtime to provide
//! diffuse global illumination.
//!
//! Bevy doesn't currently have any way to actually bake lightmaps, but they can
//! be baked in an external tool like [Blender](http://blender.org), for example
//! with an addon like [The Lightmapper]. The tools in the [`bevy-baked-gi`]
//! project support other lightmap baking methods.
//!
//! When a [`Lightmap`] component is added to an entity with a [`Mesh3d`] and a
//! [`MeshMaterial3d<StandardMaterial>`], Bevy applies the lightmap when rendering. The brightness
//! of the lightmap may be controlled with the `lightmap_exposure` field on
//! [`StandardMaterial`].
//!
//! During the rendering extraction phase, we extract all lightmaps into the
//! [`RenderLightmaps`] table, which lives in the render world. Mesh bindgroup
//! and mesh uniform creation consults this table to determine which lightmap to
//! supply to the shader. Essentially, the lightmap is a special type of texture
//! that is part of the mesh instance rather than part of the material (because
//! multiple meshes can share the same material, whereas sharing lightmaps is
//! nonsensical).
//!
//! Note that multiple meshes can't be drawn in a single drawcall if they use
//! different lightmap textures, unless bindless textures are in use. If you
//! want to instance a lightmapped mesh, and your platform doesn't support
//! bindless textures, combine the lightmap textures into a single atlas, and
//! set the `uv_rect` field on [`Lightmap`] appropriately.
//!
//! [The Lightmapper]: https://github.com/Naxela/The_Lightmapper
//! [`Mesh3d`]: bevy_mesh::Mesh3d
//! [`MeshMaterial3d<StandardMaterial>`]: crate::StandardMaterial
//! [`StandardMaterial`]: crate::StandardMaterial
//! [`bevy-baked-gi`]: https://github.com/pcwalton/bevy-baked-gi
use bevy_app::{App, Plugin};
use bevy_asset::{AssetId, Handle};
use bevy_camera::visibility::ViewVisibility;
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
component::Component,
entity::Entity,
lifecycle::RemovedComponents,
query::{Changed, Or},
reflect::ReflectComponent,
resource::Resource,
schedule::IntoScheduleConfigs,
system::{Commands, Query, Res, ResMut},
};
use bevy_image::Image;
use bevy_math::{uvec2, vec4, Rect, UVec2};
use bevy_platform::collections::HashSet;
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::{
render_asset::RenderAssets,
render_resource::{Sampler, TextureView, WgpuSampler, WgpuTextureView},
renderer::RenderAdapter,
sync_world::MainEntity,
texture::{FallbackImage, GpuImage},
Extract, ExtractSchedule, RenderApp, RenderStartup,
};
use bevy_render::{renderer::RenderDevice, sync_world::MainEntityHashMap};
use bevy_shader::load_shader_library;
use bevy_utils::default;
use fixedbitset::FixedBitSet;
use nonmax::{NonMaxU16, NonMaxU32};
use tracing::error;
use crate::{binding_arrays_are_usable, MeshExtractionSystems};
/// The number of lightmaps that we store in a single slab, if bindless textures
/// are in use.
///
/// If bindless textures aren't in use, then only a single lightmap can be bound
/// at a time.
pub const LIGHTMAPS_PER_SLAB: usize = 4;
/// A plugin that provides an implementation of lightmaps.
pub struct LightmapPlugin;
/// A component that applies baked indirect diffuse global illumination from a
/// lightmap.
///
/// When assigned to an entity that contains a [`Mesh3d`](bevy_mesh::Mesh3d) and a
/// [`MeshMaterial3d<StandardMaterial>`](crate::StandardMaterial), if the mesh
/// has a second UV layer ([`ATTRIBUTE_UV_1`](bevy_mesh::Mesh::ATTRIBUTE_UV_1)),
/// then the lightmap will render using those UVs.
#[derive(Component, Clone, Reflect)]
#[reflect(Component, Default, Clone)]
pub struct Lightmap {
/// The lightmap texture.
pub image: Handle<Image>,
/// The rectangle within the lightmap texture that the UVs are relative to.
///
/// The top left coordinate is the `min` part of the rect, and the bottom
/// right coordinate is the `max` part of the rect. The rect ranges from (0,
/// 0) to (1, 1).
///
/// This field allows lightmaps for a variety of meshes to be packed into a
/// single atlas.
pub uv_rect: Rect,
/// Whether bicubic sampling should be used for sampling this lightmap.
///
/// Bicubic sampling is higher quality, but slower, and may lead to light leaks.
///
/// If true, the lightmap texture's sampler must be set to [`bevy_image::ImageSampler::linear`].
pub bicubic_sampling: bool,
}
/// Lightmap data stored in the render world.
///
/// There is one of these per visible lightmapped mesh instance.
#[derive(Debug)]
pub(crate) struct RenderLightmap {
/// The rectangle within the lightmap texture that the UVs are relative to.
///
/// The top left coordinate is the `min` part of the rect, and the bottom
/// right coordinate is the `max` part of the rect. The rect ranges from (0,
/// 0) to (1, 1).
pub(crate) uv_rect: Rect,
/// The index of the slab (i.e. binding array) in which the lightmap is
/// located.
pub(crate) slab_index: LightmapSlabIndex,
/// The index of the slot (i.e. element within the binding array) in which
/// the lightmap is located.
///
/// If bindless lightmaps aren't in use, this will be 0.
pub(crate) slot_index: LightmapSlotIndex,
// Whether or not bicubic sampling should be used for this lightmap.
pub(crate) bicubic_sampling: bool,
}
/// Stores data for all lightmaps in the render world.
///
/// This is cleared and repopulated each frame during the `extract_lightmaps`
/// system.
#[derive(Resource)]
pub struct RenderLightmaps {
/// The mapping from every lightmapped entity to its lightmap info.
///
/// Entities without lightmaps, or for which the mesh or lightmap isn't
/// loaded, won't have entries in this table.
pub(crate) render_lightmaps: MainEntityHashMap<RenderLightmap>,
/// The slabs (binding arrays) containing the lightmaps.
pub(crate) slabs: Vec<LightmapSlab>,
free_slabs: FixedBitSet,
pending_lightmaps: HashSet<(LightmapSlabIndex, LightmapSlotIndex)>,
/// Whether bindless textures are supported on this platform.
pub(crate) bindless_supported: bool,
}
/// A binding array that contains lightmaps.
///
/// This will have a single binding if bindless lightmaps aren't in use.
pub struct LightmapSlab {
/// The GPU images in this slab.
lightmaps: Vec<AllocatedLightmap>,
free_slots_bitmask: u32,
}
struct AllocatedLightmap {
gpu_image: GpuImage,
// This will only be present if the lightmap is allocated but not loaded.
asset_id: Option<AssetId<Image>>,
}
/// The index of the slab (binding array) in which a lightmap is located.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Deref, DerefMut)]
#[repr(transparent)]
pub struct LightmapSlabIndex(pub(crate) NonMaxU32);
/// The index of the slot (element within the binding array) in the slab in
/// which a lightmap is located.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Deref, DerefMut)]
#[repr(transparent)]
pub struct LightmapSlotIndex(pub(crate) NonMaxU16);
impl Plugin for LightmapPlugin {
fn build(&self, app: &mut App) {
load_shader_library!(app, "lightmap.wgsl");
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.add_systems(RenderStartup, init_render_lightmaps)
.add_systems(
ExtractSchedule,
extract_lightmaps.after(MeshExtractionSystems),
);
}
}
/// Extracts all lightmaps from the scene and populates the [`RenderLightmaps`]
/// resource.
fn extract_lightmaps(
render_lightmaps: ResMut<RenderLightmaps>,
changed_lightmaps_query: Extract<
Query<
(Entity, &ViewVisibility, &Lightmap),
Or<(Changed<ViewVisibility>, Changed<Lightmap>)>,
>,
>,
mut removed_lightmaps_query: Extract<RemovedComponents<Lightmap>>,
images: Res<RenderAssets<GpuImage>>,
fallback_images: Res<FallbackImage>,
) {
let render_lightmaps = render_lightmaps.into_inner();
// Loop over each entity.
for (entity, view_visibility, lightmap) in changed_lightmaps_query.iter() {
if render_lightmaps
.render_lightmaps
.contains_key(&MainEntity::from(entity))
{
continue;
}
// Only process visible entities.
if !view_visibility.get() {
continue;
}
let (slab_index, slot_index) =
render_lightmaps.allocate(&fallback_images, lightmap.image.id());
render_lightmaps.render_lightmaps.insert(
entity.into(),
RenderLightmap::new(
lightmap.uv_rect,
slab_index,
slot_index,
lightmap.bicubic_sampling,
),
);
render_lightmaps
.pending_lightmaps
.insert((slab_index, slot_index));
}
for entity in removed_lightmaps_query.read() {
if changed_lightmaps_query.contains(entity) {
continue;
}
let Some(RenderLightmap {
slab_index,
slot_index,
..
}) = render_lightmaps
.render_lightmaps
.remove(&MainEntity::from(entity))
else {
continue;
};
render_lightmaps.remove(&fallback_images, slab_index, slot_index);
render_lightmaps
.pending_lightmaps
.remove(&(slab_index, slot_index));
}
render_lightmaps
.pending_lightmaps
.retain(|&(slab_index, slot_index)| {
let Some(asset_id) = render_lightmaps.slabs[usize::from(slab_index)].lightmaps
[usize::from(slot_index)]
.asset_id
else {
error!(
"Allocated lightmap should have been removed from `pending_lightmaps` by now"
);
return false;
};
let Some(gpu_image) = images.get(asset_id) else {
return true;
};
render_lightmaps.slabs[usize::from(slab_index)].insert(slot_index, gpu_image.clone());
false
});
}
impl RenderLightmap {
/// Creates a new lightmap from a texture, a UV rect, and a slab and slot
/// index pair.
fn new(
uv_rect: Rect,
slab_index: LightmapSlabIndex,
slot_index: LightmapSlotIndex,
bicubic_sampling: bool,
) -> Self {
Self {
uv_rect,
slab_index,
slot_index,
bicubic_sampling,
}
}
}
/// Packs the lightmap UV rect into 64 bits (4 16-bit unsigned integers).
pub(crate) fn pack_lightmap_uv_rect(maybe_rect: Option<Rect>) -> UVec2 {
match maybe_rect {
Some(rect) => {
let rect_uvec4 = (vec4(rect.min.x, rect.min.y, rect.max.x, rect.max.y) * 65535.0)
.round()
.as_uvec4();
uvec2(
rect_uvec4.x | (rect_uvec4.y << 16),
rect_uvec4.z | (rect_uvec4.w << 16),
)
}
None => UVec2::ZERO,
}
}
impl Default for Lightmap {
fn default() -> Self {
Self {
image: Default::default(),
uv_rect: Rect::new(0.0, 0.0, 1.0, 1.0),
bicubic_sampling: false,
}
}
}
pub fn init_render_lightmaps(
mut commands: Commands,
render_device: Res<RenderDevice>,
render_adapter: Res<RenderAdapter>,
) {
let bindless_supported = binding_arrays_are_usable(&render_device, &render_adapter);
commands.insert_resource(RenderLightmaps {
render_lightmaps: default(),
slabs: vec![],
free_slabs: FixedBitSet::new(),
pending_lightmaps: default(),
bindless_supported,
});
}
impl RenderLightmaps {
/// Creates a new slab, appends it to the end of the list, and returns its
/// slab index.
fn create_slab(&mut self, fallback_images: &FallbackImage) -> LightmapSlabIndex {
let slab_index = LightmapSlabIndex::from(self.slabs.len());
self.free_slabs.grow_and_insert(slab_index.into());
self.slabs
.push(LightmapSlab::new(fallback_images, self.bindless_supported));
slab_index
}
fn allocate(
&mut self,
fallback_images: &FallbackImage,
image_id: AssetId<Image>,
) -> (LightmapSlabIndex, LightmapSlotIndex) {
let slab_index = match self.free_slabs.minimum() {
None => self.create_slab(fallback_images),
Some(slab_index) => slab_index.into(),
};
let slab = &mut self.slabs[usize::from(slab_index)];
let slot_index = slab.allocate(image_id);
if slab.is_full() {
self.free_slabs.remove(slab_index.into());
}
(slab_index, slot_index)
}
fn remove(
&mut self,
fallback_images: &FallbackImage,
slab_index: LightmapSlabIndex,
slot_index: LightmapSlotIndex,
) {
let slab = &mut self.slabs[usize::from(slab_index)];
slab.remove(fallback_images, slot_index);
if !slab.is_full() {
self.free_slabs.grow_and_insert(slab_index.into());
}
}
}
impl LightmapSlab {
fn new(fallback_images: &FallbackImage, bindless_supported: bool) -> LightmapSlab {
let count = if bindless_supported {
LIGHTMAPS_PER_SLAB
} else {
1
};
LightmapSlab {
lightmaps: (0..count)
.map(|_| AllocatedLightmap {
gpu_image: fallback_images.d2.clone(),
asset_id: None,
})
.collect(),
free_slots_bitmask: (1 << count) - 1,
}
}
fn is_full(&self) -> bool {
self.free_slots_bitmask == 0
}
fn allocate(&mut self, image_id: AssetId<Image>) -> LightmapSlotIndex {
assert!(
!self.is_full(),
"Attempting to allocate on a full lightmap slab"
);
let index = LightmapSlotIndex::from(self.free_slots_bitmask.trailing_zeros());
self.free_slots_bitmask &= !(1 << u32::from(index));
self.lightmaps[usize::from(index)].asset_id = Some(image_id);
index
}
fn insert(&mut self, index: LightmapSlotIndex, gpu_image: GpuImage) {
self.lightmaps[usize::from(index)] = AllocatedLightmap {
gpu_image,
asset_id: None,
}
}
fn remove(&mut self, fallback_images: &FallbackImage, index: LightmapSlotIndex) {
self.lightmaps[usize::from(index)] = AllocatedLightmap {
gpu_image: fallback_images.d2.clone(),
asset_id: None,
};
self.free_slots_bitmask |= 1 << u32::from(index);
}
/// Returns the texture views and samplers for the lightmaps in this slab,
/// ready to be placed into a bind group.
///
/// This is used when constructing bind groups in bindless mode. Before
/// returning, this function pads out the arrays with fallback images in
/// order to fulfill requirements of platforms that require full binding
/// arrays (e.g. DX12).
pub(crate) fn build_binding_arrays(&self) -> (Vec<&WgpuTextureView>, Vec<&WgpuSampler>) {
(
self.lightmaps
.iter()
.map(|allocated_lightmap| &*allocated_lightmap.gpu_image.texture_view)
.collect(),
self.lightmaps
.iter()
.map(|allocated_lightmap| &*allocated_lightmap.gpu_image.sampler)
.collect(),
)
}
/// Returns the texture view and sampler corresponding to the first
/// lightmap, which must exist.
///
/// This is used when constructing bind groups in non-bindless mode.
pub(crate) fn bindings_for_first_lightmap(&self) -> (&TextureView, &Sampler) {
(
&self.lightmaps[0].gpu_image.texture_view,
&self.lightmaps[0].gpu_image.sampler,
)
}
}
impl From<u32> for LightmapSlabIndex {
fn from(value: u32) -> Self {
Self(NonMaxU32::new(value).unwrap())
}
}
impl From<usize> for LightmapSlabIndex {
fn from(value: usize) -> Self {
Self::from(value as u32)
}
}
impl From<u32> for LightmapSlotIndex {
fn from(value: u32) -> Self {
Self(NonMaxU16::new(value as u16).unwrap())
}
}
impl From<usize> for LightmapSlotIndex {
fn from(value: usize) -> Self {
Self::from(value as u32)
}
}
impl From<LightmapSlabIndex> for usize {
fn from(value: LightmapSlabIndex) -> Self {
value.0.get() as usize
}
}
impl From<LightmapSlotIndex> for usize {
fn from(value: LightmapSlotIndex) -> Self {
value.0.get() as usize
}
}
impl From<LightmapSlotIndex> for u16 {
fn from(value: LightmapSlotIndex) -> Self {
value.0.get()
}
}
impl From<LightmapSlotIndex> for u32 {
fn from(value: LightmapSlotIndex) -> Self {
value.0.get() as u32
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/decal/clustered.rs | crates/bevy_pbr/src/decal/clustered.rs | //! Clustered decals, bounding regions that project textures onto surfaces.
//!
//! A *clustered decal* is a bounding box that projects a texture onto any
//! surface within its bounds along the positive Z axis. In Bevy, clustered
//! decals use the *clustered forward* rendering technique.
//!
//! Clustered decals are the highest-quality types of decals that Bevy supports,
//! but they require bindless textures. This means that they presently can't be
//! used on WebGL 2 or WebGPU. Bevy's clustered decals can be used
//! with forward or deferred rendering and don't require a prepass.
//!
//! Each clustered decal may contain up to 4 textures. By default, the 4
//! textures correspond to the base color, a normal map, a metallic-roughness
//! map, and an emissive map respectively. However, with a custom shader, you
//! can use these 4 textures for whatever you wish. Additionally, you can use
//! the built-in *tag* field to store additional application-specific data; by
//! reading the tag in the shader, you can modify the appearance of a clustered
//! decal arbitrarily. See the documentation in `clustered.wgsl` for more
//! information and the `clustered_decals` example for an example of use.
use core::{num::NonZero, ops::Deref};
use bevy_app::{App, Plugin};
use bevy_asset::{AssetId, Handle};
use bevy_camera::visibility::ViewVisibility;
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
entity::{Entity, EntityHashMap},
query::With,
resource::Resource,
schedule::IntoScheduleConfigs as _,
system::{Commands, Local, Query, Res, ResMut},
};
use bevy_image::Image;
use bevy_light::{ClusteredDecal, DirectionalLightTexture, PointLightTexture, SpotLightTexture};
use bevy_math::Mat4;
use bevy_platform::collections::HashMap;
use bevy_render::{
render_asset::RenderAssets,
render_resource::{
binding_types, BindGroupLayoutEntryBuilder, Buffer, BufferUsages, RawBufferVec, Sampler,
SamplerBindingType, ShaderType, TextureSampleType, TextureView,
},
renderer::{RenderAdapter, RenderDevice, RenderQueue},
settings::WgpuFeatures,
sync_component::SyncComponentPlugin,
sync_world::RenderEntity,
texture::{FallbackImage, GpuImage},
Extract, ExtractSchedule, Render, RenderApp, RenderSystems,
};
use bevy_shader::load_shader_library;
use bevy_transform::components::GlobalTransform;
use bytemuck::{Pod, Zeroable};
use crate::{binding_arrays_are_usable, prepare_lights, GlobalClusterableObjectMeta};
/// The number of textures that can be associated with each clustered decal.
const IMAGES_PER_DECAL: usize = 4;
/// A plugin that adds support for clustered decals.
///
/// In environments where bindless textures aren't available, clustered decals
/// can still be added to a scene, but they won't project any decals.
pub struct ClusteredDecalPlugin;
/// Stores information about all the clustered decals in the scene.
#[derive(Resource, Default)]
pub struct RenderClusteredDecals {
/// Maps an index in the shader binding array to the associated decal image.
///
/// [`Self::texture_to_binding_index`] holds the inverse mapping.
binding_index_to_textures: Vec<AssetId<Image>>,
/// Maps a decal image to the shader binding array.
///
/// [`Self::binding_index_to_textures`] holds the inverse mapping.
texture_to_binding_index: HashMap<AssetId<Image>, i32>,
/// The information concerning each decal that we provide to the shader.
decals: Vec<RenderClusteredDecal>,
/// Maps the [`bevy_render::sync_world::RenderEntity`] of each decal to the
/// index of that decal in the [`Self::decals`] list.
entity_to_decal_index: EntityHashMap<usize>,
}
impl RenderClusteredDecals {
/// Clears out this [`RenderClusteredDecals`] in preparation for a new
/// frame.
fn clear(&mut self) {
self.binding_index_to_textures.clear();
self.texture_to_binding_index.clear();
self.decals.clear();
self.entity_to_decal_index.clear();
}
pub fn insert_decal(
&mut self,
entity: Entity,
images: [Option<AssetId<Image>>; IMAGES_PER_DECAL],
local_from_world: Mat4,
tag: u32,
) {
let image_indices = images.map(|maybe_image_id| match maybe_image_id {
Some(ref image_id) => self.get_or_insert_image(image_id),
None => -1,
});
let decal_index = self.decals.len();
self.decals.push(RenderClusteredDecal {
local_from_world,
image_indices,
tag,
pad_a: 0,
pad_b: 0,
pad_c: 0,
});
self.entity_to_decal_index.insert(entity, decal_index);
}
pub fn get(&self, entity: Entity) -> Option<usize> {
self.entity_to_decal_index.get(&entity).copied()
}
}
/// The per-view bind group entries pertaining to decals.
pub(crate) struct RenderViewClusteredDecalBindGroupEntries<'a> {
/// The list of decals, corresponding to `mesh_view_bindings::decals` in the
/// shader.
pub(crate) decals: &'a Buffer,
/// The list of textures, corresponding to
/// `mesh_view_bindings::decal_textures` in the shader.
pub(crate) texture_views: Vec<&'a <TextureView as Deref>::Target>,
/// The sampler that the shader uses to sample decals, corresponding to
/// `mesh_view_bindings::decal_sampler` in the shader.
pub(crate) sampler: &'a Sampler,
}
/// A render-world resource that holds the buffer of [`ClusteredDecal`]s ready
/// to upload to the GPU.
#[derive(Resource, Deref, DerefMut)]
pub struct DecalsBuffer(RawBufferVec<RenderClusteredDecal>);
impl Default for DecalsBuffer {
fn default() -> Self {
DecalsBuffer(RawBufferVec::new(BufferUsages::STORAGE))
}
}
impl Plugin for ClusteredDecalPlugin {
fn build(&self, app: &mut App) {
load_shader_library!(app, "clustered.wgsl");
app.add_plugins(SyncComponentPlugin::<ClusteredDecal>::default());
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.init_resource::<DecalsBuffer>()
.init_resource::<RenderClusteredDecals>()
.add_systems(ExtractSchedule, (extract_decals, extract_clustered_decal))
.add_systems(
Render,
prepare_decals
.in_set(RenderSystems::ManageViews)
.after(prepare_lights),
)
.add_systems(
Render,
upload_decals.in_set(RenderSystems::PrepareResources),
);
}
}
// This is needed because of the orphan rule not allowing implementing
// foreign trait ExtractComponent on foreign type ClusteredDecal
fn extract_clustered_decal(
mut commands: Commands,
mut previous_len: Local<usize>,
query: Extract<Query<(RenderEntity, &ClusteredDecal)>>,
) {
let mut values = Vec::with_capacity(*previous_len);
for (entity, query_item) in &query {
values.push((entity, query_item.clone()));
}
*previous_len = values.len();
commands.try_insert_batch(values);
}
/// The GPU data structure that stores information about each decal.
#[derive(Clone, Copy, Default, ShaderType, Pod, Zeroable)]
#[repr(C)]
pub struct RenderClusteredDecal {
/// The inverse of the model matrix.
///
/// The shader uses this in order to back-transform world positions into
/// model space.
local_from_world: Mat4,
/// The index of each decal texture in the binding array.
///
/// These are in the order of the base color texture, the normal map
/// texture, the metallic-roughness map texture, and finally the emissive
/// texture.
///
/// If the decal doesn't have a texture assigned to a slot, the index at
/// that slot will be -1.
image_indices: [i32; 4],
/// A custom tag available for application-defined purposes.
tag: u32,
/// Padding.
pad_a: u32,
/// Padding.
pad_b: u32,
/// Padding.
pad_c: u32,
}
/// Extracts decals from the main world into the render world.
pub fn extract_decals(
decals: Extract<
Query<(
RenderEntity,
&ClusteredDecal,
&GlobalTransform,
&ViewVisibility,
)>,
>,
spot_light_textures: Extract<
Query<(
RenderEntity,
&SpotLightTexture,
&GlobalTransform,
&ViewVisibility,
)>,
>,
point_light_textures: Extract<
Query<(
RenderEntity,
&PointLightTexture,
&GlobalTransform,
&ViewVisibility,
)>,
>,
directional_light_textures: Extract<
Query<(
RenderEntity,
&DirectionalLightTexture,
&GlobalTransform,
&ViewVisibility,
)>,
>,
mut render_decals: ResMut<RenderClusteredDecals>,
) {
// Clear out the `RenderDecals` in preparation for a new frame.
render_decals.clear();
extract_clustered_decals(&decals, &mut render_decals);
extract_spot_light_textures(&spot_light_textures, &mut render_decals);
extract_point_light_textures(&point_light_textures, &mut render_decals);
extract_directional_light_textures(&directional_light_textures, &mut render_decals);
}
/// Extracts all clustered decals and light textures from the scene and transfers
/// them to the render world.
fn extract_clustered_decals(
decals: &Extract<
Query<(
RenderEntity,
&ClusteredDecal,
&GlobalTransform,
&ViewVisibility,
)>,
>,
render_decals: &mut RenderClusteredDecals,
) {
// Loop over each decal.
for (decal_entity, clustered_decal, global_transform, view_visibility) in decals {
// If the decal is invisible, skip it.
if !view_visibility.get() {
continue;
}
// Insert the decal, grabbing the ID of every associated texture as we
// do.
render_decals.insert_decal(
decal_entity,
[
clustered_decal.base_color_texture.as_ref().map(Handle::id),
clustered_decal.normal_map_texture.as_ref().map(Handle::id),
clustered_decal
.metallic_roughness_texture
.as_ref()
.map(Handle::id),
clustered_decal.emissive_texture.as_ref().map(Handle::id),
],
global_transform.affine().inverse().into(),
clustered_decal.tag,
);
}
}
/// Extracts all textures from spot lights from the main world to the render
/// world as clustered decals.
fn extract_spot_light_textures(
spot_light_textures: &Extract<
Query<(
RenderEntity,
&SpotLightTexture,
&GlobalTransform,
&ViewVisibility,
)>,
>,
render_decals: &mut RenderClusteredDecals,
) {
for (decal_entity, texture, global_transform, view_visibility) in spot_light_textures {
// If the texture is invisible, skip it.
if !view_visibility.get() {
continue;
}
render_decals.insert_decal(
decal_entity,
[Some(texture.image.id()), None, None, None],
global_transform.affine().inverse().into(),
0,
);
}
}
/// Extracts all textures from point lights from the main world to the render
/// world as clustered decals.
fn extract_point_light_textures(
point_light_textures: &Extract<
Query<(
RenderEntity,
&PointLightTexture,
&GlobalTransform,
&ViewVisibility,
)>,
>,
render_decals: &mut RenderClusteredDecals,
) {
for (decal_entity, texture, global_transform, view_visibility) in point_light_textures {
// If the texture is invisible, skip it.
if !view_visibility.get() {
continue;
}
render_decals.insert_decal(
decal_entity,
[Some(texture.image.id()), None, None, None],
global_transform.affine().inverse().into(),
texture.cubemap_layout as u32,
);
}
}
/// Extracts all textures from directional lights from the main world to the
/// render world as clustered decals.
fn extract_directional_light_textures(
directional_light_textures: &Extract<
Query<(
RenderEntity,
&DirectionalLightTexture,
&GlobalTransform,
&ViewVisibility,
)>,
>,
render_decals: &mut RenderClusteredDecals,
) {
for (decal_entity, texture, global_transform, view_visibility) in directional_light_textures {
// If the texture is invisible, skip it.
if !view_visibility.get() {
continue;
}
render_decals.insert_decal(
decal_entity,
[Some(texture.image.id()), None, None, None],
global_transform.affine().inverse().into(),
if texture.tiled { 1 } else { 0 },
);
}
}
/// Adds all decals in the scene to the [`GlobalClusterableObjectMeta`] table.
fn prepare_decals(
decals: Query<Entity, With<ClusteredDecal>>,
mut global_clusterable_object_meta: ResMut<GlobalClusterableObjectMeta>,
render_decals: Res<RenderClusteredDecals>,
) {
for decal_entity in &decals {
if let Some(index) = render_decals.entity_to_decal_index.get(&decal_entity) {
global_clusterable_object_meta
.entity_to_index
.insert(decal_entity, *index);
}
}
}
/// Returns the layout for the clustered-decal-related bind group entries for a
/// single view.
pub(crate) fn get_bind_group_layout_entries(
render_device: &RenderDevice,
render_adapter: &RenderAdapter,
) -> Option<[BindGroupLayoutEntryBuilder; 3]> {
// If binding arrays aren't supported on the current platform, we have no
// bind group layout entries.
if !clustered_decals_are_usable(render_device, render_adapter) {
return None;
}
Some([
// `decals`
binding_types::storage_buffer_read_only::<RenderClusteredDecal>(false),
// `decal_textures`
binding_types::texture_2d(TextureSampleType::Float { filterable: true })
.count(NonZero::<u32>::new(max_view_decals(render_device)).unwrap()),
// `decal_sampler`
binding_types::sampler(SamplerBindingType::Filtering),
])
}
impl<'a> RenderViewClusteredDecalBindGroupEntries<'a> {
/// Creates and returns the bind group entries for clustered decals for a
/// single view.
pub(crate) fn get(
render_decals: &RenderClusteredDecals,
decals_buffer: &'a DecalsBuffer,
images: &'a RenderAssets<GpuImage>,
fallback_image: &'a FallbackImage,
render_device: &RenderDevice,
render_adapter: &RenderAdapter,
) -> Option<RenderViewClusteredDecalBindGroupEntries<'a>> {
// Skip the entries if decals are unsupported on the current platform.
if !clustered_decals_are_usable(render_device, render_adapter) {
return None;
}
// We use the first sampler among all the images. This assumes that all
// images use the same sampler, which is a documented restriction. If
// there's no sampler, we just use the one from the fallback image.
let sampler = match render_decals
.binding_index_to_textures
.iter()
.filter_map(|image_id| images.get(*image_id))
.next()
{
Some(gpu_image) => &gpu_image.sampler,
None => &fallback_image.d2.sampler,
};
// Gather up the decal textures.
let mut texture_views = vec![];
for image_id in &render_decals.binding_index_to_textures {
match images.get(*image_id) {
None => texture_views.push(&*fallback_image.d2.texture_view),
Some(gpu_image) => texture_views.push(&*gpu_image.texture_view),
}
}
// If required on this platform, pad out the binding array to its
// maximum length.
if !render_device
.features()
.contains(WgpuFeatures::PARTIALLY_BOUND_BINDING_ARRAY)
{
let max_view_decals = max_view_decals(render_device);
while texture_views.len() < max_view_decals as usize {
texture_views.push(&*fallback_image.d2.texture_view);
}
} else if texture_views.is_empty() {
texture_views.push(&*fallback_image.d2.texture_view);
}
Some(RenderViewClusteredDecalBindGroupEntries {
decals: decals_buffer.buffer()?,
texture_views,
sampler,
})
}
}
impl RenderClusteredDecals {
/// Returns the index of the given image in the decal texture binding array,
/// adding it to the list if necessary.
fn get_or_insert_image(&mut self, image_id: &AssetId<Image>) -> i32 {
*self
.texture_to_binding_index
.entry(*image_id)
.or_insert_with(|| {
let index = self.binding_index_to_textures.len() as i32;
self.binding_index_to_textures.push(*image_id);
index
})
}
}
/// Uploads the list of decals from [`RenderClusteredDecals::decals`] to the
/// GPU.
fn upload_decals(
render_decals: Res<RenderClusteredDecals>,
mut decals_buffer: ResMut<DecalsBuffer>,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
) {
decals_buffer.clear();
for &decal in &render_decals.decals {
decals_buffer.push(decal);
}
// Make sure the buffer is non-empty.
// Otherwise there won't be a buffer to bind.
if decals_buffer.is_empty() {
decals_buffer.push(RenderClusteredDecal::default());
}
decals_buffer.write_buffer(&render_device, &render_queue);
}
/// Returns true if clustered decals are usable on the current platform or false
/// otherwise.
///
/// Clustered decals are currently disabled on macOS and iOS due to insufficient
/// texture bindings and limited bindless support in `wgpu`.
pub fn clustered_decals_are_usable(
render_device: &RenderDevice,
render_adapter: &RenderAdapter,
) -> bool {
// Disable binding arrays on Metal. There aren't enough texture bindings available.
// See issue #17553.
// Re-enable this when `wgpu` has first-class bindless.
binding_arrays_are_usable(render_device, render_adapter)
&& cfg!(feature = "pbr_clustered_decals")
}
/// Returns the maximum number of decals that can be in the scene, taking
/// platform limitations into account.
fn max_view_decals(render_device: &RenderDevice) -> u32 {
// If the current `wgpu` platform doesn't support partially-bound binding
// arrays, limit the number of decals to a low number. If we didn't do this,
// then on such platforms we'd pay the maximum overhead even if there are no
// decals are in the scene.
if render_device
.features()
.contains(WgpuFeatures::PARTIALLY_BOUND_BINDING_ARRAY)
{
// This number was determined arbitrarily as a reasonable value that
// would encompass most use cases (e.g. bullet holes in walls) while
// offering a failsafe to prevent shaders becoming too slow if there are
// extremely large numbers of decals.
1024
} else {
8
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/decal/mod.rs | crates/bevy_pbr/src/decal/mod.rs | //! Decal rendering.
//!
//! Decals are a material that render on top of the surface that they're placed above.
//! They can be used to render signs, paint, snow, impact craters, and other effects on top of surfaces.
// TODO: Once other decal types are added, write a paragraph comparing the different types in the module docs.
pub mod clustered;
mod forward;
pub use forward::*;
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/decal/forward.rs | crates/bevy_pbr/src/decal/forward.rs | use crate::{
ExtendedMaterial, Material, MaterialExtension, MaterialExtensionKey, MaterialExtensionPipeline,
MaterialPlugin, StandardMaterial,
};
use bevy_app::{App, Plugin};
use bevy_asset::{Asset, Assets, Handle};
use bevy_ecs::{
component::Component, lifecycle::HookContext, resource::Resource, world::DeferredWorld,
};
use bevy_math::{prelude::Rectangle, Quat, Vec2, Vec3};
use bevy_mesh::{Mesh, Mesh3d, MeshBuilder, MeshVertexBufferLayoutRef, Meshable};
use bevy_reflect::{Reflect, TypePath};
use bevy_render::{
alpha::AlphaMode,
render_asset::RenderAssets,
render_resource::{
AsBindGroup, AsBindGroupShaderType, CompareFunction, RenderPipelineDescriptor, ShaderType,
SpecializedMeshPipelineError,
},
texture::GpuImage,
RenderDebugFlags,
};
use bevy_shader::load_shader_library;
/// Plugin to render [`ForwardDecal`]s.
pub struct ForwardDecalPlugin;
impl Plugin for ForwardDecalPlugin {
fn build(&self, app: &mut App) {
load_shader_library!(app, "forward_decal.wgsl");
let mesh = app.world_mut().resource_mut::<Assets<Mesh>>().add(
Rectangle::from_size(Vec2::ONE)
.mesh()
.build()
.rotated_by(Quat::from_rotation_arc(Vec3::Z, Vec3::Y))
.with_generated_tangents()
.unwrap(),
);
app.insert_resource(ForwardDecalMesh(mesh));
app.add_plugins(MaterialPlugin::<ForwardDecalMaterial<StandardMaterial>> {
debug_flags: RenderDebugFlags::default(),
..Default::default()
});
}
}
/// A decal that renders via a 1x1 transparent quad mesh, smoothly alpha-blending with the underlying
/// geometry towards the edges.
///
/// Because forward decals are meshes, you can use arbitrary materials to control their appearance.
///
/// # Usage Notes
///
/// * Spawn this component on an entity with a [`crate::MeshMaterial3d`] component holding a [`ForwardDecalMaterial`].
/// * Any camera rendering a forward decal must have the [`bevy_core_pipeline::prepass::DepthPrepass`] component.
/// * Looking at forward decals at a steep angle can cause distortion. This can be mitigated by padding your decal's
/// texture with extra transparent pixels on the edges.
/// * On Wasm, requires using WebGPU and disabling `Msaa` on your camera.
#[derive(Component, Reflect)]
#[require(Mesh3d)]
#[component(on_add=forward_decal_set_mesh)]
pub struct ForwardDecal;
/// Type alias for an extended material with a [`ForwardDecalMaterialExt`] extension.
///
/// Make sure to register the [`MaterialPlugin`] for this material in your app setup.
///
/// [`StandardMaterial`] comes with out of the box support for forward decals.
#[expect(type_alias_bounds, reason = "Type alias generics not yet stable")]
pub type ForwardDecalMaterial<B: Material> = ExtendedMaterial<B, ForwardDecalMaterialExt>;
/// Material extension for a [`ForwardDecal`].
///
/// In addition to wrapping your material type with this extension, your shader must use
/// the `bevy_pbr::decal::forward::get_forward_decal_info` function.
///
/// The `FORWARD_DECAL` shader define will be made available to your shader so that you can gate
/// the forward decal code behind an ifdef.
#[derive(Asset, AsBindGroup, TypePath, Clone, Debug)]
#[uniform(200, ForwardDecalMaterialExtUniform)]
pub struct ForwardDecalMaterialExt {
/// Controls the distance threshold for decal blending with surfaces.
///
/// This parameter determines how far away a surface can be before the decal no longer blends
/// with it and instead renders with full opacity.
///
/// Lower values cause the decal to only blend with close surfaces, while higher values allow
/// blending with more distant surfaces.
///
/// Units are in meters.
pub depth_fade_factor: f32,
}
#[derive(Clone, Default, ShaderType)]
pub struct ForwardDecalMaterialExtUniform {
pub inv_depth_fade_factor: f32,
}
impl AsBindGroupShaderType<ForwardDecalMaterialExtUniform> for ForwardDecalMaterialExt {
fn as_bind_group_shader_type(
&self,
_images: &RenderAssets<GpuImage>,
) -> ForwardDecalMaterialExtUniform {
ForwardDecalMaterialExtUniform {
inv_depth_fade_factor: 1.0 / self.depth_fade_factor.max(0.001),
}
}
}
impl MaterialExtension for ForwardDecalMaterialExt {
fn alpha_mode() -> Option<AlphaMode> {
Some(AlphaMode::Blend)
}
fn enable_shadows() -> bool {
false
}
fn specialize(
_pipeline: &MaterialExtensionPipeline,
descriptor: &mut RenderPipelineDescriptor,
_layout: &MeshVertexBufferLayoutRef,
_key: MaterialExtensionKey<Self>,
) -> Result<(), SpecializedMeshPipelineError> {
descriptor.depth_stencil.as_mut().unwrap().depth_compare = CompareFunction::Always;
descriptor.vertex.shader_defs.push("FORWARD_DECAL".into());
if let Some(fragment) = &mut descriptor.fragment {
fragment.shader_defs.push("FORWARD_DECAL".into());
}
if let Some(label) = &mut descriptor.label {
*label = format!("forward_decal_{label}").into();
}
Ok(())
}
}
impl Default for ForwardDecalMaterialExt {
fn default() -> Self {
Self {
depth_fade_factor: 8.0,
}
}
}
#[derive(Resource)]
struct ForwardDecalMesh(Handle<Mesh>);
// Note: We need to use a hook here instead of required components since we cannot access resources
// with required components, and we can't otherwise get a handle to the asset from a required
// component constructor, since the constructor must be a function pointer, and we intentionally do
// not want to use `uuid_handle!`.
fn forward_decal_set_mesh(mut world: DeferredWorld, HookContext { entity, .. }: HookContext) {
let decal_mesh = world.resource::<ForwardDecalMesh>().0.clone();
let mut entity = world.entity_mut(entity);
let mut entity_mesh = entity.get_mut::<Mesh3d>().unwrap();
// Only replace the mesh handle if the mesh handle is defaulted.
if **entity_mesh == Handle::default() {
entity_mesh.0 = decal_mesh;
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/ssr/mod.rs | crates/bevy_pbr/src/ssr/mod.rs | //! Screen space reflections implemented via raymarching.
use bevy_app::{App, Plugin};
use bevy_asset::{load_embedded_asset, AssetServer, Handle};
use bevy_core_pipeline::{
core_3d::{
graph::{Core3d, Node3d},
DEPTH_TEXTURE_SAMPLING_SUPPORTED,
},
prepass::{DeferredPrepass, DepthPrepass, MotionVectorPrepass, NormalPrepass},
FullscreenShader,
};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
component::Component,
entity::Entity,
query::{Has, QueryItem, With},
reflect::ReflectComponent,
resource::Resource,
schedule::IntoScheduleConfigs as _,
system::{lifetimeless::Read, Commands, Query, Res, ResMut},
world::World,
};
use bevy_image::BevyDefault as _;
use bevy_light::EnvironmentMapLight;
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::{
diagnostic::RecordDiagnostics,
extract_component::{ExtractComponent, ExtractComponentPlugin},
render_graph::{
NodeRunError, RenderGraph, RenderGraphContext, RenderGraphExt, ViewNode, ViewNodeRunner,
},
render_resource::{
binding_types, AddressMode, BindGroupEntries, BindGroupLayoutDescriptor,
BindGroupLayoutEntries, CachedRenderPipelineId, ColorTargetState, ColorWrites,
DynamicUniformBuffer, FilterMode, FragmentState, Operations, PipelineCache,
RenderPassColorAttachment, RenderPassDescriptor, RenderPipelineDescriptor, Sampler,
SamplerBindingType, SamplerDescriptor, ShaderStages, ShaderType, SpecializedRenderPipeline,
SpecializedRenderPipelines, TextureFormat, TextureSampleType,
},
renderer::{RenderAdapter, RenderContext, RenderDevice, RenderQueue},
view::{ExtractedView, Msaa, ViewTarget, ViewUniformOffset},
Render, RenderApp, RenderStartup, RenderSystems,
};
use bevy_shader::{load_shader_library, Shader};
use bevy_utils::{once, prelude::default};
use tracing::info;
use crate::{
binding_arrays_are_usable, graph::NodePbr, ExtractedAtmosphere, MeshPipelineViewLayoutKey,
MeshPipelineViewLayouts, MeshViewBindGroup, RenderViewLightProbes,
ViewEnvironmentMapUniformOffset, ViewFogUniformOffset, ViewLightProbesUniformOffset,
ViewLightsUniformOffset,
};
/// Enables screen-space reflections for a camera.
///
/// Screen-space reflections are currently only supported with deferred rendering.
pub struct ScreenSpaceReflectionsPlugin;
/// Add this component to a camera to enable *screen-space reflections* (SSR).
///
/// Screen-space reflections currently require deferred rendering in order to
/// appear. Therefore, they also need the [`DepthPrepass`] and [`DeferredPrepass`]
/// components, which are inserted automatically,
/// but deferred rendering itself is not automatically enabled.
///
/// SSR currently performs no roughness filtering for glossy reflections, so
/// only very smooth surfaces will reflect objects in screen space. You can
/// adjust the `perceptual_roughness_threshold` in order to tune the threshold
/// below which screen-space reflections will be traced.
///
/// As with all screen-space techniques, SSR can only reflect objects on screen.
/// When objects leave the camera, they will disappear from reflections.
/// An alternative that doesn't suffer from this problem is the combination of
/// a [`LightProbe`](bevy_light::LightProbe) and [`EnvironmentMapLight`]. The advantage of SSR is
/// that it can reflect all objects, not just static ones.
///
/// SSR is an approximation technique and produces artifacts in some situations.
/// Hand-tuning the settings in this component will likely be useful.
///
/// Screen-space reflections are presently unsupported on WebGL 2 because of a
/// bug whereby Naga doesn't generate correct GLSL when sampling depth buffers,
/// which is required for screen-space raymarching.
#[derive(Clone, Copy, Component, Reflect)]
#[reflect(Component, Default, Clone)]
#[require(DepthPrepass, DeferredPrepass)]
#[doc(alias = "Ssr")]
pub struct ScreenSpaceReflections {
/// The maximum PBR roughness level that will enable screen space
/// reflections.
pub perceptual_roughness_threshold: f32,
/// When marching the depth buffer, we only have 2.5D information and don't
/// know how thick surfaces are. We shall assume that the depth buffer
/// fragments are cuboids with a constant thickness defined by this
/// parameter.
pub thickness: f32,
/// The number of steps to be taken at regular intervals to find an initial
/// intersection. Must not be zero.
///
/// Higher values result in higher-quality reflections, because the
/// raymarching shader is less likely to miss objects. However, they take
/// more GPU time.
pub linear_steps: u32,
/// Exponent to be applied in the linear part of the march.
///
/// A value of 1.0 will result in equidistant steps, and higher values will
/// compress the earlier steps, and expand the later ones. This might be
/// desirable in order to get more detail close to objects.
///
/// For optimal performance, this should be a small unsigned integer, such
/// as 1 or 2.
pub linear_march_exponent: f32,
/// Number of steps in a bisection (binary search) to perform once the
/// linear search has found an intersection. Helps narrow down the hit,
/// increasing the chance of the secant method finding an accurate hit
/// point.
pub bisection_steps: u32,
/// Approximate the root position using the secant method—by solving for
/// line-line intersection between the ray approach rate and the surface
/// gradient.
pub use_secant: bool,
}
/// A version of [`ScreenSpaceReflections`] for upload to the GPU.
///
/// For more information on these fields, see the corresponding documentation in
/// [`ScreenSpaceReflections`].
#[derive(Clone, Copy, Component, ShaderType)]
pub struct ScreenSpaceReflectionsUniform {
perceptual_roughness_threshold: f32,
thickness: f32,
linear_steps: u32,
linear_march_exponent: f32,
bisection_steps: u32,
/// A boolean converted to a `u32`.
use_secant: u32,
}
/// The node in the render graph that traces screen space reflections.
#[derive(Default)]
pub struct ScreenSpaceReflectionsNode;
/// Identifies which screen space reflections render pipeline a view needs.
#[derive(Component, Deref, DerefMut)]
pub struct ScreenSpaceReflectionsPipelineId(pub CachedRenderPipelineId);
/// Information relating to the render pipeline for the screen space reflections
/// shader.
#[derive(Resource)]
pub struct ScreenSpaceReflectionsPipeline {
mesh_view_layouts: MeshPipelineViewLayouts,
color_sampler: Sampler,
depth_linear_sampler: Sampler,
depth_nearest_sampler: Sampler,
bind_group_layout: BindGroupLayoutDescriptor,
binding_arrays_are_usable: bool,
fullscreen_shader: FullscreenShader,
fragment_shader: Handle<Shader>,
}
/// A GPU buffer that stores the screen space reflection settings for each view.
#[derive(Resource, Default, Deref, DerefMut)]
pub struct ScreenSpaceReflectionsBuffer(pub DynamicUniformBuffer<ScreenSpaceReflectionsUniform>);
/// A component that stores the offset within the
/// [`ScreenSpaceReflectionsBuffer`] for each view.
#[derive(Component, Default, Deref, DerefMut)]
pub struct ViewScreenSpaceReflectionsUniformOffset(u32);
/// Identifies a specific configuration of the SSR pipeline shader.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct ScreenSpaceReflectionsPipelineKey {
mesh_pipeline_view_key: MeshPipelineViewLayoutKey,
is_hdr: bool,
has_environment_maps: bool,
has_atmosphere: bool,
}
impl Plugin for ScreenSpaceReflectionsPlugin {
fn build(&self, app: &mut App) {
load_shader_library!(app, "ssr.wgsl");
load_shader_library!(app, "raymarch.wgsl");
app.add_plugins(ExtractComponentPlugin::<ScreenSpaceReflections>::default());
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.init_resource::<ScreenSpaceReflectionsBuffer>()
.init_resource::<SpecializedRenderPipelines<ScreenSpaceReflectionsPipeline>>()
.add_systems(
RenderStartup,
(
init_screen_space_reflections_pipeline,
add_screen_space_reflections_render_graph_edges,
),
)
.add_systems(Render, prepare_ssr_pipelines.in_set(RenderSystems::Prepare))
.add_systems(
Render,
prepare_ssr_settings.in_set(RenderSystems::PrepareResources),
)
// Note: we add this node here but then we add edges in
// `add_screen_space_reflections_render_graph_edges`.
.add_render_graph_node::<ViewNodeRunner<ScreenSpaceReflectionsNode>>(
Core3d,
NodePbr::ScreenSpaceReflections,
);
}
}
fn add_screen_space_reflections_render_graph_edges(mut render_graph: ResMut<RenderGraph>) {
let subgraph = render_graph.sub_graph_mut(Core3d);
subgraph.add_node_edge(NodePbr::ScreenSpaceReflections, Node3d::MainOpaquePass);
if subgraph
.get_node_state(NodePbr::DeferredLightingPass)
.is_ok()
{
subgraph.add_node_edge(
NodePbr::DeferredLightingPass,
NodePbr::ScreenSpaceReflections,
);
}
}
impl Default for ScreenSpaceReflections {
// Reasonable default values.
//
// These are from
// <https://gist.github.com/h3r2tic/9c8356bdaefbe80b1a22ae0aaee192db?permalink_comment_id=4552149#gistcomment-4552149>.
fn default() -> Self {
Self {
perceptual_roughness_threshold: 0.1,
linear_steps: 16,
bisection_steps: 4,
use_secant: true,
thickness: 0.25,
linear_march_exponent: 1.0,
}
}
}
impl ViewNode for ScreenSpaceReflectionsNode {
type ViewQuery = (
Read<ViewTarget>,
Read<ViewUniformOffset>,
Read<ViewLightsUniformOffset>,
Read<ViewFogUniformOffset>,
Read<ViewLightProbesUniformOffset>,
Read<ViewScreenSpaceReflectionsUniformOffset>,
Read<ViewEnvironmentMapUniformOffset>,
Read<MeshViewBindGroup>,
Read<ScreenSpaceReflectionsPipelineId>,
);
fn run<'w>(
&self,
_: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
(
view_target,
view_uniform_offset,
view_lights_offset,
view_fog_offset,
view_light_probes_offset,
view_ssr_offset,
view_environment_map_offset,
view_bind_group,
ssr_pipeline_id,
): QueryItem<'w, '_, Self::ViewQuery>,
world: &'w World,
) -> Result<(), NodeRunError> {
// Grab the render pipeline.
let pipeline_cache = world.resource::<PipelineCache>();
let Some(render_pipeline) = pipeline_cache.get_render_pipeline(**ssr_pipeline_id) else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
// Set up a standard pair of postprocessing textures.
let postprocess = view_target.post_process_write();
// Create the bind group for this view.
let ssr_pipeline = world.resource::<ScreenSpaceReflectionsPipeline>();
let ssr_bind_group = render_context.render_device().create_bind_group(
"SSR bind group",
&pipeline_cache.get_bind_group_layout(&ssr_pipeline.bind_group_layout),
&BindGroupEntries::sequential((
postprocess.source,
&ssr_pipeline.color_sampler,
&ssr_pipeline.depth_linear_sampler,
&ssr_pipeline.depth_nearest_sampler,
)),
);
// Build the SSR render pass.
let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("ssr"),
color_attachments: &[Some(RenderPassColorAttachment {
view: postprocess.destination,
depth_slice: None,
resolve_target: None,
ops: Operations::default(),
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
let pass_span = diagnostics.pass_span(&mut render_pass, "ssr");
// Set bind groups.
render_pass.set_render_pipeline(render_pipeline);
render_pass.set_bind_group(
0,
&view_bind_group.main,
&[
view_uniform_offset.offset,
view_lights_offset.offset,
view_fog_offset.offset,
**view_light_probes_offset,
**view_ssr_offset,
**view_environment_map_offset,
],
);
render_pass.set_bind_group(1, &view_bind_group.binding_array, &[]);
// Perform the SSR render pass.
render_pass.set_bind_group(2, &ssr_bind_group, &[]);
render_pass.draw(0..3, 0..1);
pass_span.end(&mut render_pass);
Ok(())
}
}
pub fn init_screen_space_reflections_pipeline(
mut commands: Commands,
render_device: Res<RenderDevice>,
render_adapter: Res<RenderAdapter>,
mesh_view_layouts: Res<MeshPipelineViewLayouts>,
fullscreen_shader: Res<FullscreenShader>,
asset_server: Res<AssetServer>,
) {
// Create the bind group layout.
let bind_group_layout = BindGroupLayoutDescriptor::new(
"SSR bind group layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::FRAGMENT,
(
binding_types::texture_2d(TextureSampleType::Float { filterable: true }),
binding_types::sampler(SamplerBindingType::Filtering),
binding_types::sampler(SamplerBindingType::Filtering),
binding_types::sampler(SamplerBindingType::NonFiltering),
),
),
);
// Create the samplers we need.
let color_sampler = render_device.create_sampler(&SamplerDescriptor {
label: "SSR color sampler".into(),
address_mode_u: AddressMode::ClampToEdge,
address_mode_v: AddressMode::ClampToEdge,
mag_filter: FilterMode::Linear,
min_filter: FilterMode::Linear,
..default()
});
let depth_linear_sampler = render_device.create_sampler(&SamplerDescriptor {
label: "SSR depth linear sampler".into(),
address_mode_u: AddressMode::ClampToEdge,
address_mode_v: AddressMode::ClampToEdge,
mag_filter: FilterMode::Linear,
min_filter: FilterMode::Linear,
..default()
});
let depth_nearest_sampler = render_device.create_sampler(&SamplerDescriptor {
label: "SSR depth nearest sampler".into(),
address_mode_u: AddressMode::ClampToEdge,
address_mode_v: AddressMode::ClampToEdge,
mag_filter: FilterMode::Nearest,
min_filter: FilterMode::Nearest,
..default()
});
commands.insert_resource(ScreenSpaceReflectionsPipeline {
mesh_view_layouts: mesh_view_layouts.clone(),
color_sampler,
depth_linear_sampler,
depth_nearest_sampler,
bind_group_layout,
binding_arrays_are_usable: binding_arrays_are_usable(&render_device, &render_adapter),
fullscreen_shader: fullscreen_shader.clone(),
// Even though ssr was loaded using load_shader_library, we can still access it like a
// normal embedded asset (so we can use it as both a library or a kernel).
fragment_shader: load_embedded_asset!(asset_server.as_ref(), "ssr.wgsl"),
});
}
/// Sets up screen space reflection pipelines for each applicable view.
pub fn prepare_ssr_pipelines(
mut commands: Commands,
pipeline_cache: Res<PipelineCache>,
mut pipelines: ResMut<SpecializedRenderPipelines<ScreenSpaceReflectionsPipeline>>,
ssr_pipeline: Res<ScreenSpaceReflectionsPipeline>,
views: Query<
(
Entity,
&ExtractedView,
Has<RenderViewLightProbes<EnvironmentMapLight>>,
Has<NormalPrepass>,
Has<MotionVectorPrepass>,
Has<ExtractedAtmosphere>,
),
(
With<ScreenSpaceReflectionsUniform>,
With<DepthPrepass>,
With<DeferredPrepass>,
),
>,
) {
for (
entity,
extracted_view,
has_environment_maps,
has_normal_prepass,
has_motion_vector_prepass,
has_atmosphere,
) in &views
{
// SSR is only supported in the deferred pipeline, which has no MSAA
// support. Thus we can assume MSAA is off.
let mut mesh_pipeline_view_key = MeshPipelineViewLayoutKey::from(Msaa::Off)
| MeshPipelineViewLayoutKey::DEPTH_PREPASS
| MeshPipelineViewLayoutKey::DEFERRED_PREPASS;
mesh_pipeline_view_key.set(
MeshPipelineViewLayoutKey::NORMAL_PREPASS,
has_normal_prepass,
);
mesh_pipeline_view_key.set(
MeshPipelineViewLayoutKey::MOTION_VECTOR_PREPASS,
has_motion_vector_prepass,
);
mesh_pipeline_view_key.set(MeshPipelineViewLayoutKey::ATMOSPHERE, has_atmosphere);
// Build the pipeline.
let pipeline_id = pipelines.specialize(
&pipeline_cache,
&ssr_pipeline,
ScreenSpaceReflectionsPipelineKey {
mesh_pipeline_view_key,
is_hdr: extracted_view.hdr,
has_environment_maps,
has_atmosphere,
},
);
// Note which pipeline ID was used.
commands
.entity(entity)
.insert(ScreenSpaceReflectionsPipelineId(pipeline_id));
}
}
/// Gathers up screen space reflection settings for each applicable view and
/// writes them into a GPU buffer.
pub fn prepare_ssr_settings(
mut commands: Commands,
views: Query<(Entity, Option<&ScreenSpaceReflectionsUniform>), With<ExtractedView>>,
mut ssr_settings_buffer: ResMut<ScreenSpaceReflectionsBuffer>,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
) {
let Some(mut writer) =
ssr_settings_buffer.get_writer(views.iter().len(), &render_device, &render_queue)
else {
return;
};
for (view, ssr_uniform) in views.iter() {
let uniform_offset = match ssr_uniform {
None => 0,
Some(ssr_uniform) => writer.write(ssr_uniform),
};
commands
.entity(view)
.insert(ViewScreenSpaceReflectionsUniformOffset(uniform_offset));
}
}
impl ExtractComponent for ScreenSpaceReflections {
type QueryData = Read<ScreenSpaceReflections>;
type QueryFilter = ();
type Out = ScreenSpaceReflectionsUniform;
fn extract_component(settings: QueryItem<'_, '_, Self::QueryData>) -> Option<Self::Out> {
if !DEPTH_TEXTURE_SAMPLING_SUPPORTED {
once!(info!(
"Disabling screen-space reflections on this platform because depth textures \
aren't supported correctly"
));
return None;
}
Some((*settings).into())
}
}
impl SpecializedRenderPipeline for ScreenSpaceReflectionsPipeline {
type Key = ScreenSpaceReflectionsPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let layout = self
.mesh_view_layouts
.get_view_layout(key.mesh_pipeline_view_key);
let layout = vec![
layout.main_layout.clone(),
layout.binding_array_layout.clone(),
self.bind_group_layout.clone(),
];
let mut shader_defs = vec![
"DEPTH_PREPASS".into(),
"DEFERRED_PREPASS".into(),
"SCREEN_SPACE_REFLECTIONS".into(),
];
if key.has_environment_maps {
shader_defs.push("ENVIRONMENT_MAP".into());
}
if self.binding_arrays_are_usable {
shader_defs.push("MULTIPLE_LIGHT_PROBES_IN_ARRAY".into());
}
if key.has_atmosphere {
shader_defs.push("ATMOSPHERE".into());
}
#[cfg(not(target_arch = "wasm32"))]
shader_defs.push("USE_DEPTH_SAMPLERS".into());
RenderPipelineDescriptor {
label: Some("SSR pipeline".into()),
layout,
vertex: self.fullscreen_shader.to_vertex_state(),
fragment: Some(FragmentState {
shader: self.fragment_shader.clone(),
shader_defs,
targets: vec![Some(ColorTargetState {
format: if key.is_hdr {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
},
blend: None,
write_mask: ColorWrites::ALL,
})],
..default()
}),
..default()
}
}
}
impl From<ScreenSpaceReflections> for ScreenSpaceReflectionsUniform {
fn from(settings: ScreenSpaceReflections) -> Self {
Self {
perceptual_roughness_threshold: settings.perceptual_roughness_threshold,
thickness: settings.thickness,
linear_steps: settings.linear_steps,
linear_march_exponent: settings.linear_march_exponent,
bisection_steps: settings.bisection_steps,
use_secant: settings.use_secant as u32,
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/ssao/mod.rs | crates/bevy_pbr/src/ssao/mod.rs | use crate::NodePbr;
use bevy_app::{App, Plugin};
use bevy_asset::{embedded_asset, load_embedded_asset, Handle};
use bevy_camera::{Camera, Camera3d};
use bevy_core_pipeline::{
core_3d::graph::{Core3d, Node3d},
prepass::{DepthPrepass, NormalPrepass, ViewPrepassTextures},
};
use bevy_ecs::{
prelude::{Component, Entity},
query::{Has, QueryItem, With},
reflect::ReflectComponent,
resource::Resource,
schedule::IntoScheduleConfigs,
system::{Commands, Query, Res, ResMut},
world::{FromWorld, World},
};
use bevy_image::ToExtents;
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::{
camera::{ExtractedCamera, TemporalJitter},
diagnostic::RecordDiagnostics,
extract_component::ExtractComponent,
globals::{GlobalsBuffer, GlobalsUniform},
render_graph::{NodeRunError, RenderGraphContext, RenderGraphExt, ViewNode, ViewNodeRunner},
render_resource::{
binding_types::{
sampler, texture_2d, texture_depth_2d, texture_storage_2d, uniform_buffer,
},
*,
},
renderer::{RenderAdapter, RenderContext, RenderDevice, RenderQueue},
sync_component::SyncComponentPlugin,
sync_world::RenderEntity,
texture::{CachedTexture, TextureCache},
view::{Msaa, ViewUniform, ViewUniformOffset, ViewUniforms},
Extract, ExtractSchedule, Render, RenderApp, RenderSystems,
};
use bevy_shader::{load_shader_library, Shader, ShaderDefVal};
use bevy_utils::prelude::default;
use core::mem;
use tracing::{error, warn};
/// Plugin for screen space ambient occlusion.
pub struct ScreenSpaceAmbientOcclusionPlugin;
impl Plugin for ScreenSpaceAmbientOcclusionPlugin {
fn build(&self, app: &mut App) {
load_shader_library!(app, "ssao_utils.wgsl");
embedded_asset!(app, "preprocess_depth.wgsl");
embedded_asset!(app, "ssao.wgsl");
embedded_asset!(app, "spatial_denoise.wgsl");
app.add_plugins(SyncComponentPlugin::<ScreenSpaceAmbientOcclusion>::default());
}
fn finish(&self, app: &mut App) {
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
if render_app
.world()
.resource::<RenderDevice>()
.limits()
.max_storage_textures_per_shader_stage
< 5
{
warn!("ScreenSpaceAmbientOcclusionPlugin not loaded. GPU lacks support: Limits::max_storage_textures_per_shader_stage is less than 5.");
return;
}
render_app
.init_resource::<SsaoPipelines>()
.init_resource::<SpecializedComputePipelines<SsaoPipelines>>()
.add_systems(ExtractSchedule, extract_ssao_settings)
.add_systems(
Render,
(
prepare_ssao_pipelines.in_set(RenderSystems::Prepare),
prepare_ssao_textures.in_set(RenderSystems::PrepareResources),
prepare_ssao_bind_groups.in_set(RenderSystems::PrepareBindGroups),
),
)
.add_render_graph_node::<ViewNodeRunner<SsaoNode>>(
Core3d,
NodePbr::ScreenSpaceAmbientOcclusion,
)
.add_render_graph_edges(
Core3d,
(
// END_PRE_PASSES -> SCREEN_SPACE_AMBIENT_OCCLUSION -> MAIN_PASS
Node3d::EndPrepasses,
NodePbr::ScreenSpaceAmbientOcclusion,
Node3d::StartMainPass,
),
);
}
}
/// Component to apply screen space ambient occlusion to a 3d camera.
///
/// Screen space ambient occlusion (SSAO) approximates small-scale,
/// local occlusion of _indirect_ diffuse light between objects, based on what's visible on-screen.
/// SSAO does not apply to direct lighting, such as point or directional lights.
///
/// This darkens creases, e.g. on staircases, and gives nice contact shadows
/// where objects meet, giving entities a more "grounded" feel.
///
/// # Usage Notes
///
/// Requires that you add [`ScreenSpaceAmbientOcclusionPlugin`] to your app.
///
/// It strongly recommended that you use SSAO in conjunction with
/// TAA (`TemporalAntiAliasing`).
/// Doing so greatly reduces SSAO noise.
///
/// SSAO is not supported on `WebGL2`, and is not currently supported on `WebGPU`.
#[derive(Component, ExtractComponent, Reflect, PartialEq, Clone, Debug)]
#[reflect(Component, Debug, Default, PartialEq, Clone)]
#[require(DepthPrepass, NormalPrepass)]
#[doc(alias = "Ssao")]
pub struct ScreenSpaceAmbientOcclusion {
/// Quality of the SSAO effect.
pub quality_level: ScreenSpaceAmbientOcclusionQualityLevel,
/// A constant estimated thickness of objects.
///
/// This value is used to decide how far behind an object a ray of light needs to be in order
/// to pass behind it. Any ray closer than that will be occluded.
pub constant_object_thickness: f32,
}
impl Default for ScreenSpaceAmbientOcclusion {
fn default() -> Self {
Self {
quality_level: ScreenSpaceAmbientOcclusionQualityLevel::default(),
constant_object_thickness: 0.25,
}
}
}
#[derive(Reflect, PartialEq, Eq, Hash, Clone, Copy, Default, Debug)]
#[reflect(PartialEq, Hash, Clone, Default)]
pub enum ScreenSpaceAmbientOcclusionQualityLevel {
Low,
Medium,
#[default]
High,
Ultra,
Custom {
/// Higher slice count means less noise, but worse performance.
slice_count: u32,
/// Samples per slice side is also tweakable, but recommended to be left at 2 or 3.
samples_per_slice_side: u32,
},
}
impl ScreenSpaceAmbientOcclusionQualityLevel {
fn sample_counts(&self) -> (u32, u32) {
match self {
Self::Low => (1, 2), // 4 spp (1 * (2 * 2)), plus optional temporal samples
Self::Medium => (2, 2), // 8 spp (2 * (2 * 2)), plus optional temporal samples
Self::High => (3, 3), // 18 spp (3 * (3 * 2)), plus optional temporal samples
Self::Ultra => (9, 3), // 54 spp (9 * (3 * 2)), plus optional temporal samples
Self::Custom {
slice_count: slices,
samples_per_slice_side,
} => (*slices, *samples_per_slice_side),
}
}
}
#[derive(Default)]
struct SsaoNode {}
impl ViewNode for SsaoNode {
type ViewQuery = (
&'static ExtractedCamera,
&'static SsaoPipelineId,
&'static SsaoBindGroups,
&'static ViewUniformOffset,
);
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
(camera, pipeline_id, bind_groups, view_uniform_offset): QueryItem<Self::ViewQuery>,
world: &World,
) -> Result<(), NodeRunError> {
let pipelines = world.resource::<SsaoPipelines>();
let pipeline_cache = world.resource::<PipelineCache>();
let (
Some(camera_size),
Some(preprocess_depth_pipeline),
Some(spatial_denoise_pipeline),
Some(ssao_pipeline),
) = (
camera.physical_viewport_size,
pipeline_cache.get_compute_pipeline(pipelines.preprocess_depth_pipeline),
pipeline_cache.get_compute_pipeline(pipelines.spatial_denoise_pipeline),
pipeline_cache.get_compute_pipeline(pipeline_id.0),
)
else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
let command_encoder = render_context.command_encoder();
command_encoder.push_debug_group("ssao");
let time_span = diagnostics.time_span(command_encoder, "ssao");
{
let mut preprocess_depth_pass =
command_encoder.begin_compute_pass(&ComputePassDescriptor {
label: Some("ssao_preprocess_depth"),
timestamp_writes: None,
});
preprocess_depth_pass.set_pipeline(preprocess_depth_pipeline);
preprocess_depth_pass.set_bind_group(0, &bind_groups.preprocess_depth_bind_group, &[]);
preprocess_depth_pass.set_bind_group(
1,
&bind_groups.common_bind_group,
&[view_uniform_offset.offset],
);
preprocess_depth_pass.dispatch_workgroups(
camera_size.x.div_ceil(16),
camera_size.y.div_ceil(16),
1,
);
}
{
let mut ssao_pass = command_encoder.begin_compute_pass(&ComputePassDescriptor {
label: Some("ssao"),
timestamp_writes: None,
});
ssao_pass.set_pipeline(ssao_pipeline);
ssao_pass.set_bind_group(0, &bind_groups.ssao_bind_group, &[]);
ssao_pass.set_bind_group(
1,
&bind_groups.common_bind_group,
&[view_uniform_offset.offset],
);
ssao_pass.dispatch_workgroups(camera_size.x.div_ceil(8), camera_size.y.div_ceil(8), 1);
}
{
let mut spatial_denoise_pass =
command_encoder.begin_compute_pass(&ComputePassDescriptor {
label: Some("ssao_spatial_denoise"),
timestamp_writes: None,
});
spatial_denoise_pass.set_pipeline(spatial_denoise_pipeline);
spatial_denoise_pass.set_bind_group(0, &bind_groups.spatial_denoise_bind_group, &[]);
spatial_denoise_pass.set_bind_group(
1,
&bind_groups.common_bind_group,
&[view_uniform_offset.offset],
);
spatial_denoise_pass.dispatch_workgroups(
camera_size.x.div_ceil(8),
camera_size.y.div_ceil(8),
1,
);
}
time_span.end(command_encoder);
command_encoder.pop_debug_group();
Ok(())
}
}
#[derive(Resource)]
struct SsaoPipelines {
preprocess_depth_pipeline: CachedComputePipelineId,
spatial_denoise_pipeline: CachedComputePipelineId,
common_bind_group_layout: BindGroupLayoutDescriptor,
preprocess_depth_bind_group_layout: BindGroupLayoutDescriptor,
ssao_bind_group_layout: BindGroupLayoutDescriptor,
spatial_denoise_bind_group_layout: BindGroupLayoutDescriptor,
hilbert_index_lut: TextureView,
point_clamp_sampler: Sampler,
linear_clamp_sampler: Sampler,
shader: Handle<Shader>,
depth_format: TextureFormat,
}
impl FromWorld for SsaoPipelines {
fn from_world(world: &mut World) -> Self {
let render_device = world.resource::<RenderDevice>();
let render_queue = world.resource::<RenderQueue>();
let pipeline_cache = world.resource::<PipelineCache>();
// Detect the depth format support
let render_adapter = world.resource::<RenderAdapter>();
let depth_format = if render_adapter
.get_texture_format_features(TextureFormat::R16Float)
.allowed_usages
.contains(TextureUsages::STORAGE_BINDING)
{
TextureFormat::R16Float
} else {
TextureFormat::R32Float
};
let hilbert_index_lut = render_device
.create_texture_with_data(
render_queue,
&(TextureDescriptor {
label: Some("ssao_hilbert_index_lut"),
size: Extent3d {
width: HILBERT_WIDTH as u32,
height: HILBERT_WIDTH as u32,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: TextureFormat::R16Uint,
usage: TextureUsages::TEXTURE_BINDING,
view_formats: &[],
}),
TextureDataOrder::default(),
bytemuck::cast_slice(&generate_hilbert_index_lut()),
)
.create_view(&TextureViewDescriptor::default());
let point_clamp_sampler = render_device.create_sampler(&SamplerDescriptor {
min_filter: FilterMode::Nearest,
mag_filter: FilterMode::Nearest,
mipmap_filter: FilterMode::Nearest,
address_mode_u: AddressMode::ClampToEdge,
address_mode_v: AddressMode::ClampToEdge,
..Default::default()
});
let linear_clamp_sampler = render_device.create_sampler(&SamplerDescriptor {
min_filter: FilterMode::Linear,
mag_filter: FilterMode::Linear,
mipmap_filter: FilterMode::Nearest,
address_mode_u: AddressMode::ClampToEdge,
address_mode_v: AddressMode::ClampToEdge,
..Default::default()
});
let common_bind_group_layout = BindGroupLayoutDescriptor::new(
"ssao_common_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
sampler(SamplerBindingType::NonFiltering),
sampler(SamplerBindingType::Filtering),
uniform_buffer::<ViewUniform>(true),
),
),
);
let preprocess_depth_bind_group_layout = BindGroupLayoutDescriptor::new(
"ssao_preprocess_depth_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
texture_depth_2d(),
texture_storage_2d(depth_format, StorageTextureAccess::WriteOnly),
texture_storage_2d(depth_format, StorageTextureAccess::WriteOnly),
texture_storage_2d(depth_format, StorageTextureAccess::WriteOnly),
texture_storage_2d(depth_format, StorageTextureAccess::WriteOnly),
texture_storage_2d(depth_format, StorageTextureAccess::WriteOnly),
),
),
);
let ssao_bind_group_layout = BindGroupLayoutDescriptor::new(
"ssao_ssao_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
texture_2d(TextureSampleType::Float { filterable: true }),
texture_2d(TextureSampleType::Float { filterable: false }),
texture_2d(TextureSampleType::Uint),
texture_storage_2d(depth_format, StorageTextureAccess::WriteOnly),
texture_storage_2d(TextureFormat::R32Uint, StorageTextureAccess::WriteOnly),
uniform_buffer::<GlobalsUniform>(false),
uniform_buffer::<f32>(false),
),
),
);
let spatial_denoise_bind_group_layout = BindGroupLayoutDescriptor::new(
"ssao_spatial_denoise_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
texture_2d(TextureSampleType::Float { filterable: false }),
texture_2d(TextureSampleType::Uint),
texture_storage_2d(depth_format, StorageTextureAccess::WriteOnly),
),
),
);
let mut shader_defs = Vec::new();
if depth_format == TextureFormat::R16Float {
shader_defs.push("USE_R16FLOAT".into());
}
let preprocess_depth_pipeline =
pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("ssao_preprocess_depth_pipeline".into()),
layout: vec![
preprocess_depth_bind_group_layout.clone(),
common_bind_group_layout.clone(),
],
shader: load_embedded_asset!(world, "preprocess_depth.wgsl"),
shader_defs: shader_defs.clone(),
..default()
});
let spatial_denoise_pipeline =
pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("ssao_spatial_denoise_pipeline".into()),
layout: vec![
spatial_denoise_bind_group_layout.clone(),
common_bind_group_layout.clone(),
],
shader: load_embedded_asset!(world, "spatial_denoise.wgsl"),
shader_defs,
..default()
});
Self {
preprocess_depth_pipeline,
spatial_denoise_pipeline,
common_bind_group_layout,
preprocess_depth_bind_group_layout,
ssao_bind_group_layout,
spatial_denoise_bind_group_layout,
hilbert_index_lut,
point_clamp_sampler,
linear_clamp_sampler,
shader: load_embedded_asset!(world, "ssao.wgsl"),
depth_format,
}
}
}
#[derive(PartialEq, Eq, Hash, Clone)]
struct SsaoPipelineKey {
quality_level: ScreenSpaceAmbientOcclusionQualityLevel,
temporal_jitter: bool,
}
impl SpecializedComputePipeline for SsaoPipelines {
type Key = SsaoPipelineKey;
fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor {
let (slice_count, samples_per_slice_side) = key.quality_level.sample_counts();
let mut shader_defs = vec![
ShaderDefVal::Int("SLICE_COUNT".to_string(), slice_count as i32),
ShaderDefVal::Int(
"SAMPLES_PER_SLICE_SIDE".to_string(),
samples_per_slice_side as i32,
),
];
if key.temporal_jitter {
shader_defs.push("TEMPORAL_JITTER".into());
}
if self.depth_format == TextureFormat::R16Float {
shader_defs.push("USE_R16FLOAT".into());
}
ComputePipelineDescriptor {
label: Some("ssao_ssao_pipeline".into()),
layout: vec![
self.ssao_bind_group_layout.clone(),
self.common_bind_group_layout.clone(),
],
shader: self.shader.clone(),
shader_defs,
..default()
}
}
}
fn extract_ssao_settings(
mut commands: Commands,
cameras: Extract<
Query<
(RenderEntity, &Camera, &ScreenSpaceAmbientOcclusion, &Msaa),
(With<Camera3d>, With<DepthPrepass>, With<NormalPrepass>),
>,
>,
) {
for (entity, camera, ssao_settings, msaa) in &cameras {
if *msaa != Msaa::Off {
error!(
"SSAO is being used which requires Msaa::Off, but Msaa is currently set to Msaa::{:?}",
*msaa
);
return;
}
let mut entity_commands = commands
.get_entity(entity)
.expect("SSAO entity wasn't synced.");
if camera.is_active {
entity_commands.insert(ssao_settings.clone());
} else {
entity_commands.remove::<ScreenSpaceAmbientOcclusion>();
}
}
}
#[derive(Component)]
pub struct ScreenSpaceAmbientOcclusionResources {
preprocessed_depth_texture: CachedTexture,
ssao_noisy_texture: CachedTexture, // Pre-spatially denoised texture
pub screen_space_ambient_occlusion_texture: CachedTexture, // Spatially denoised texture
depth_differences_texture: CachedTexture,
thickness_buffer: Buffer,
}
fn prepare_ssao_textures(
mut commands: Commands,
mut texture_cache: ResMut<TextureCache>,
render_device: Res<RenderDevice>,
pipelines: Res<SsaoPipelines>,
views: Query<(Entity, &ExtractedCamera, &ScreenSpaceAmbientOcclusion)>,
) {
for (entity, camera, ssao_settings) in &views {
let Some(physical_viewport_size) = camera.physical_viewport_size else {
continue;
};
let size = physical_viewport_size.to_extents();
let preprocessed_depth_texture = texture_cache.get(
&render_device,
TextureDescriptor {
label: Some("ssao_preprocessed_depth_texture"),
size,
mip_level_count: 5,
sample_count: 1,
dimension: TextureDimension::D2,
format: pipelines.depth_format,
usage: TextureUsages::STORAGE_BINDING | TextureUsages::TEXTURE_BINDING,
view_formats: &[],
},
);
let ssao_noisy_texture = texture_cache.get(
&render_device,
TextureDescriptor {
label: Some("ssao_noisy_texture"),
size,
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: pipelines.depth_format,
usage: TextureUsages::STORAGE_BINDING | TextureUsages::TEXTURE_BINDING,
view_formats: &[],
},
);
let ssao_texture = texture_cache.get(
&render_device,
TextureDescriptor {
label: Some("ssao_texture"),
size,
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: pipelines.depth_format,
usage: TextureUsages::STORAGE_BINDING | TextureUsages::TEXTURE_BINDING,
view_formats: &[],
},
);
let depth_differences_texture = texture_cache.get(
&render_device,
TextureDescriptor {
label: Some("ssao_depth_differences_texture"),
size,
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: TextureFormat::R32Uint,
usage: TextureUsages::STORAGE_BINDING | TextureUsages::TEXTURE_BINDING,
view_formats: &[],
},
);
let thickness_buffer = render_device.create_buffer_with_data(&BufferInitDescriptor {
label: Some("thickness_buffer"),
contents: &ssao_settings.constant_object_thickness.to_le_bytes(),
usage: BufferUsages::UNIFORM,
});
commands
.entity(entity)
.insert(ScreenSpaceAmbientOcclusionResources {
preprocessed_depth_texture,
ssao_noisy_texture,
screen_space_ambient_occlusion_texture: ssao_texture,
depth_differences_texture,
thickness_buffer,
});
}
}
#[derive(Component)]
struct SsaoPipelineId(CachedComputePipelineId);
fn prepare_ssao_pipelines(
mut commands: Commands,
pipeline_cache: Res<PipelineCache>,
mut pipelines: ResMut<SpecializedComputePipelines<SsaoPipelines>>,
pipeline: Res<SsaoPipelines>,
views: Query<(Entity, &ScreenSpaceAmbientOcclusion, Has<TemporalJitter>)>,
) {
for (entity, ssao_settings, temporal_jitter) in &views {
let pipeline_id = pipelines.specialize(
&pipeline_cache,
&pipeline,
SsaoPipelineKey {
quality_level: ssao_settings.quality_level,
temporal_jitter,
},
);
commands.entity(entity).insert(SsaoPipelineId(pipeline_id));
}
}
#[derive(Component)]
struct SsaoBindGroups {
common_bind_group: BindGroup,
preprocess_depth_bind_group: BindGroup,
ssao_bind_group: BindGroup,
spatial_denoise_bind_group: BindGroup,
}
fn prepare_ssao_bind_groups(
mut commands: Commands,
render_device: Res<RenderDevice>,
pipelines: Res<SsaoPipelines>,
view_uniforms: Res<ViewUniforms>,
global_uniforms: Res<GlobalsBuffer>,
pipeline_cache: Res<PipelineCache>,
views: Query<(
Entity,
&ScreenSpaceAmbientOcclusionResources,
&ViewPrepassTextures,
)>,
) {
let (Some(view_uniforms), Some(globals_uniforms)) = (
view_uniforms.uniforms.binding(),
global_uniforms.buffer.binding(),
) else {
return;
};
for (entity, ssao_resources, prepass_textures) in &views {
let common_bind_group = render_device.create_bind_group(
"ssao_common_bind_group",
&pipeline_cache.get_bind_group_layout(&pipelines.common_bind_group_layout),
&BindGroupEntries::sequential((
&pipelines.point_clamp_sampler,
&pipelines.linear_clamp_sampler,
view_uniforms.clone(),
)),
);
let create_depth_view = |mip_level| {
ssao_resources
.preprocessed_depth_texture
.texture
.create_view(&TextureViewDescriptor {
label: Some("ssao_preprocessed_depth_texture_mip_view"),
base_mip_level: mip_level,
format: Some(pipelines.depth_format),
dimension: Some(TextureViewDimension::D2),
mip_level_count: Some(1),
..default()
})
};
let preprocess_depth_bind_group = render_device.create_bind_group(
"ssao_preprocess_depth_bind_group",
&pipeline_cache.get_bind_group_layout(&pipelines.preprocess_depth_bind_group_layout),
&BindGroupEntries::sequential((
prepass_textures.depth_view().unwrap(),
&create_depth_view(0),
&create_depth_view(1),
&create_depth_view(2),
&create_depth_view(3),
&create_depth_view(4),
)),
);
let ssao_bind_group = render_device.create_bind_group(
"ssao_ssao_bind_group",
&pipeline_cache.get_bind_group_layout(&pipelines.ssao_bind_group_layout),
&BindGroupEntries::sequential((
&ssao_resources.preprocessed_depth_texture.default_view,
prepass_textures.normal_view().unwrap(),
&pipelines.hilbert_index_lut,
&ssao_resources.ssao_noisy_texture.default_view,
&ssao_resources.depth_differences_texture.default_view,
globals_uniforms.clone(),
ssao_resources.thickness_buffer.as_entire_binding(),
)),
);
let spatial_denoise_bind_group = render_device.create_bind_group(
"ssao_spatial_denoise_bind_group",
&pipeline_cache.get_bind_group_layout(&pipelines.spatial_denoise_bind_group_layout),
&BindGroupEntries::sequential((
&ssao_resources.ssao_noisy_texture.default_view,
&ssao_resources.depth_differences_texture.default_view,
&ssao_resources
.screen_space_ambient_occlusion_texture
.default_view,
)),
);
commands.entity(entity).insert(SsaoBindGroups {
common_bind_group,
preprocess_depth_bind_group,
ssao_bind_group,
spatial_denoise_bind_group,
});
}
}
fn generate_hilbert_index_lut() -> [[u16; 64]; 64] {
use core::array::from_fn;
from_fn(|x| from_fn(|y| hilbert_index(x as u16, y as u16)))
}
// https://www.shadertoy.com/view/3tB3z3
const HILBERT_WIDTH: u16 = 64;
fn hilbert_index(mut x: u16, mut y: u16) -> u16 {
let mut index = 0;
let mut level: u16 = HILBERT_WIDTH / 2;
while level > 0 {
let region_x = (x & level > 0) as u16;
let region_y = (y & level > 0) as u16;
index += level * level * ((3 * region_x) ^ region_y);
if region_y == 0 {
if region_x == 1 {
x = HILBERT_WIDTH - 1 - x;
y = HILBERT_WIDTH - 1 - y;
}
mem::swap(&mut x, &mut y);
}
level /= 2;
}
index
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/meshlet/material_shade_nodes.rs | crates/bevy_pbr/src/meshlet/material_shade_nodes.rs | use super::{
material_pipeline_prepare::{
MeshletViewMaterialsDeferredGBufferPrepass, MeshletViewMaterialsMainOpaquePass,
MeshletViewMaterialsPrepass,
},
resource_manager::{MeshletViewBindGroups, MeshletViewResources},
InstanceManager,
};
use crate::{
MeshViewBindGroup, PrepassViewBindGroup, ViewEnvironmentMapUniformOffset, ViewFogUniformOffset,
ViewLightProbesUniformOffset, ViewLightsUniformOffset, ViewScreenSpaceReflectionsUniformOffset,
};
use bevy_camera::MainPassResolutionOverride;
use bevy_camera::Viewport;
use bevy_core_pipeline::prepass::{
MotionVectorPrepass, PreviousViewUniformOffset, ViewPrepassTextures,
};
use bevy_ecs::{
query::{Has, QueryItem},
world::World,
};
use bevy_render::{
camera::ExtractedCamera,
diagnostic::RecordDiagnostics,
render_graph::{NodeRunError, RenderGraphContext, ViewNode},
render_resource::{
LoadOp, Operations, PipelineCache, RenderPassDepthStencilAttachment, RenderPassDescriptor,
StoreOp,
},
renderer::RenderContext,
view::{ViewTarget, ViewUniformOffset},
};
/// Fullscreen shading pass based on the visibility buffer generated from rasterizing meshlets.
#[derive(Default)]
pub struct MeshletMainOpaquePass3dNode;
impl ViewNode for MeshletMainOpaquePass3dNode {
type ViewQuery = (
&'static ExtractedCamera,
&'static ViewTarget,
&'static MeshViewBindGroup,
&'static ViewUniformOffset,
&'static ViewLightsUniformOffset,
&'static ViewFogUniformOffset,
&'static ViewLightProbesUniformOffset,
&'static ViewScreenSpaceReflectionsUniformOffset,
&'static ViewEnvironmentMapUniformOffset,
Option<&'static MainPassResolutionOverride>,
&'static MeshletViewMaterialsMainOpaquePass,
&'static MeshletViewBindGroups,
&'static MeshletViewResources,
);
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
(
camera,
target,
mesh_view_bind_group,
view_uniform_offset,
view_lights_offset,
view_fog_offset,
view_light_probes_offset,
view_ssr_offset,
view_environment_map_offset,
resolution_override,
meshlet_view_materials,
meshlet_view_bind_groups,
meshlet_view_resources,
): QueryItem<Self::ViewQuery>,
world: &World,
) -> Result<(), NodeRunError> {
if meshlet_view_materials.is_empty() {
return Ok(());
}
let (
Some(instance_manager),
Some(pipeline_cache),
Some(meshlet_material_depth),
Some(meshlet_material_shade_bind_group),
) = (
world.get_resource::<InstanceManager>(),
world.get_resource::<PipelineCache>(),
meshlet_view_resources.material_depth.as_ref(),
meshlet_view_bind_groups.material_shade.as_ref(),
)
else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("meshlet_material_opaque_3d_pass"),
color_attachments: &[Some(target.get_color_attachment())],
depth_stencil_attachment: Some(RenderPassDepthStencilAttachment {
view: &meshlet_material_depth.default_view,
depth_ops: Some(Operations {
load: LoadOp::Load,
store: StoreOp::Store,
}),
stencil_ops: None,
}),
timestamp_writes: None,
occlusion_query_set: None,
});
let pass_span = diagnostics.pass_span(&mut render_pass, "meshlet_material_opaque_3d_pass");
if let Some(viewport) =
Viewport::from_viewport_and_override(camera.viewport.as_ref(), resolution_override)
{
render_pass.set_camera_viewport(&viewport);
}
render_pass.set_bind_group(
0,
&mesh_view_bind_group.main,
&[
view_uniform_offset.offset,
view_lights_offset.offset,
view_fog_offset.offset,
**view_light_probes_offset,
**view_ssr_offset,
**view_environment_map_offset,
],
);
render_pass.set_bind_group(1, &mesh_view_bind_group.binding_array, &[]);
render_pass.set_bind_group(2, meshlet_material_shade_bind_group, &[]);
// 1 fullscreen triangle draw per material
for (material_id, material_pipeline_id, material_bind_group) in
meshlet_view_materials.iter()
{
if instance_manager.material_present_in_scene(material_id)
&& let Some(material_pipeline) =
pipeline_cache.get_render_pipeline(*material_pipeline_id)
{
let x = *material_id * 3;
render_pass.set_render_pipeline(material_pipeline);
render_pass.set_bind_group(3, material_bind_group, &[]);
render_pass.draw(x..(x + 3), 0..1);
}
}
pass_span.end(&mut render_pass);
Ok(())
}
}
/// Fullscreen pass to generate prepass textures based on the visibility buffer generated from rasterizing meshlets.
#[derive(Default)]
pub struct MeshletPrepassNode;
impl ViewNode for MeshletPrepassNode {
type ViewQuery = (
&'static ExtractedCamera,
&'static ViewPrepassTextures,
&'static ViewUniformOffset,
&'static PreviousViewUniformOffset,
Option<&'static MainPassResolutionOverride>,
Has<MotionVectorPrepass>,
&'static MeshletViewMaterialsPrepass,
&'static MeshletViewBindGroups,
&'static MeshletViewResources,
);
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
(
camera,
view_prepass_textures,
view_uniform_offset,
previous_view_uniform_offset,
resolution_override,
view_has_motion_vector_prepass,
meshlet_view_materials,
meshlet_view_bind_groups,
meshlet_view_resources,
): QueryItem<Self::ViewQuery>,
world: &World,
) -> Result<(), NodeRunError> {
if meshlet_view_materials.is_empty() {
return Ok(());
}
let (
Some(prepass_view_bind_group),
Some(instance_manager),
Some(pipeline_cache),
Some(meshlet_material_depth),
Some(meshlet_material_shade_bind_group),
) = (
world.get_resource::<PrepassViewBindGroup>(),
world.get_resource::<InstanceManager>(),
world.get_resource::<PipelineCache>(),
meshlet_view_resources.material_depth.as_ref(),
meshlet_view_bind_groups.material_shade.as_ref(),
)
else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
let color_attachments = vec![
view_prepass_textures
.normal
.as_ref()
.map(|normals_texture| normals_texture.get_attachment()),
view_prepass_textures
.motion_vectors
.as_ref()
.map(|motion_vectors_texture| motion_vectors_texture.get_attachment()),
// Use None in place of Deferred attachments
None,
None,
];
let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("meshlet_material_prepass"),
color_attachments: &color_attachments,
depth_stencil_attachment: Some(RenderPassDepthStencilAttachment {
view: &meshlet_material_depth.default_view,
depth_ops: Some(Operations {
load: LoadOp::Load,
store: StoreOp::Store,
}),
stencil_ops: None,
}),
timestamp_writes: None,
occlusion_query_set: None,
});
let pass_span = diagnostics.pass_span(&mut render_pass, "meshlet_material_prepass");
if let Some(viewport) =
Viewport::from_viewport_and_override(camera.viewport.as_ref(), resolution_override)
{
render_pass.set_camera_viewport(&viewport);
}
if view_has_motion_vector_prepass {
render_pass.set_bind_group(
0,
prepass_view_bind_group.motion_vectors.as_ref().unwrap(),
&[
view_uniform_offset.offset,
previous_view_uniform_offset.offset,
],
);
} else {
render_pass.set_bind_group(
0,
prepass_view_bind_group.no_motion_vectors.as_ref().unwrap(),
&[view_uniform_offset.offset],
);
}
render_pass.set_bind_group(1, &prepass_view_bind_group.empty_bind_group, &[]);
render_pass.set_bind_group(2, meshlet_material_shade_bind_group, &[]);
// 1 fullscreen triangle draw per material
for (material_id, material_pipeline_id, material_bind_group) in
meshlet_view_materials.iter()
{
if instance_manager.material_present_in_scene(material_id)
&& let Some(material_pipeline) =
pipeline_cache.get_render_pipeline(*material_pipeline_id)
{
let x = *material_id * 3;
render_pass.set_render_pipeline(material_pipeline);
render_pass.set_bind_group(2, material_bind_group, &[]);
render_pass.draw(x..(x + 3), 0..1);
}
}
pass_span.end(&mut render_pass);
Ok(())
}
}
/// Fullscreen pass to generate a gbuffer based on the visibility buffer generated from rasterizing meshlets.
#[derive(Default)]
pub struct MeshletDeferredGBufferPrepassNode;
impl ViewNode for MeshletDeferredGBufferPrepassNode {
type ViewQuery = (
&'static ExtractedCamera,
&'static ViewPrepassTextures,
&'static ViewUniformOffset,
&'static PreviousViewUniformOffset,
Option<&'static MainPassResolutionOverride>,
Has<MotionVectorPrepass>,
&'static MeshletViewMaterialsDeferredGBufferPrepass,
&'static MeshletViewBindGroups,
&'static MeshletViewResources,
);
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
(
camera,
view_prepass_textures,
view_uniform_offset,
previous_view_uniform_offset,
resolution_override,
view_has_motion_vector_prepass,
meshlet_view_materials,
meshlet_view_bind_groups,
meshlet_view_resources,
): QueryItem<Self::ViewQuery>,
world: &World,
) -> Result<(), NodeRunError> {
if meshlet_view_materials.is_empty() {
return Ok(());
}
let (
Some(prepass_view_bind_group),
Some(instance_manager),
Some(pipeline_cache),
Some(meshlet_material_depth),
Some(meshlet_material_shade_bind_group),
) = (
world.get_resource::<PrepassViewBindGroup>(),
world.get_resource::<InstanceManager>(),
world.get_resource::<PipelineCache>(),
meshlet_view_resources.material_depth.as_ref(),
meshlet_view_bind_groups.material_shade.as_ref(),
)
else {
return Ok(());
};
let color_attachments = vec![
view_prepass_textures
.normal
.as_ref()
.map(|normals_texture| normals_texture.get_attachment()),
view_prepass_textures
.motion_vectors
.as_ref()
.map(|motion_vectors_texture| motion_vectors_texture.get_attachment()),
view_prepass_textures
.deferred
.as_ref()
.map(|deferred_texture| deferred_texture.get_attachment()),
view_prepass_textures
.deferred_lighting_pass_id
.as_ref()
.map(|deferred_lighting_pass_id| deferred_lighting_pass_id.get_attachment()),
];
let diagnostics = render_context.diagnostic_recorder();
let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("meshlet_material_deferred_prepass"),
color_attachments: &color_attachments,
depth_stencil_attachment: Some(RenderPassDepthStencilAttachment {
view: &meshlet_material_depth.default_view,
depth_ops: Some(Operations {
load: LoadOp::Load,
store: StoreOp::Store,
}),
stencil_ops: None,
}),
timestamp_writes: None,
occlusion_query_set: None,
});
let pass_span =
diagnostics.pass_span(&mut render_pass, "meshlet_material_deferred_prepass");
if let Some(viewport) =
Viewport::from_viewport_and_override(camera.viewport.as_ref(), resolution_override)
{
render_pass.set_camera_viewport(&viewport);
}
if view_has_motion_vector_prepass {
render_pass.set_bind_group(
0,
prepass_view_bind_group.motion_vectors.as_ref().unwrap(),
&[
view_uniform_offset.offset,
previous_view_uniform_offset.offset,
],
);
} else {
render_pass.set_bind_group(
0,
prepass_view_bind_group.no_motion_vectors.as_ref().unwrap(),
&[view_uniform_offset.offset],
);
}
render_pass.set_bind_group(1, &prepass_view_bind_group.empty_bind_group, &[]);
render_pass.set_bind_group(2, meshlet_material_shade_bind_group, &[]);
// 1 fullscreen triangle draw per material
for (material_id, material_pipeline_id, material_bind_group) in
meshlet_view_materials.iter()
{
if instance_manager.material_present_in_scene(material_id)
&& let Some(material_pipeline) =
pipeline_cache.get_render_pipeline(*material_pipeline_id)
{
let x = *material_id * 3;
render_pass.set_render_pipeline(material_pipeline);
render_pass.set_bind_group(2, material_bind_group, &[]);
render_pass.draw(x..(x + 3), 0..1);
}
}
pass_span.end(&mut render_pass);
Ok(())
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/meshlet/persistent_buffer_impls.rs | crates/bevy_pbr/src/meshlet/persistent_buffer_impls.rs | use crate::meshlet::asset::{BvhNode, MeshletCullData};
use super::{asset::Meshlet, persistent_buffer::PersistentGpuBufferable};
use alloc::sync::Arc;
use bevy_math::Vec2;
use bevy_render::render_resource::BufferAddress;
impl PersistentGpuBufferable for Arc<[BvhNode]> {
type Metadata = u32;
fn size_in_bytes(&self) -> usize {
self.len() * size_of::<BvhNode>()
}
fn write_bytes_le(
&self,
base_meshlet_index: Self::Metadata,
buffer_slice: &mut [u8],
buffer_offset: BufferAddress,
) {
const SIZE: usize = size_of::<BvhNode>();
for (i, &node) in self.iter().enumerate() {
let bytes: [u8; SIZE] =
bytemuck::cast(node.offset_aabbs(base_meshlet_index, buffer_offset));
buffer_slice[i * SIZE..(i + 1) * SIZE].copy_from_slice(&bytes);
}
}
}
impl BvhNode {
fn offset_aabbs(mut self, base_meshlet_index: u32, buffer_offset: BufferAddress) -> Self {
let size = size_of::<BvhNode>();
let base_bvh_node_index = (buffer_offset / size as u64) as u32;
for i in 0..self.aabbs.len() {
self.aabbs[i].child_offset += if self.child_is_bvh_node(i) {
base_bvh_node_index
} else {
base_meshlet_index
};
}
self
}
fn child_is_bvh_node(&self, i: usize) -> bool {
self.child_counts[i] == u8::MAX
}
}
impl PersistentGpuBufferable for Arc<[Meshlet]> {
type Metadata = (u64, u64, u64);
fn size_in_bytes(&self) -> usize {
self.len() * size_of::<Meshlet>()
}
fn write_bytes_le(
&self,
(vertex_position_offset, vertex_attribute_offset, index_offset): Self::Metadata,
buffer_slice: &mut [u8],
_: BufferAddress,
) {
let vertex_position_offset = (vertex_position_offset * 8) as u32;
let vertex_attribute_offset = (vertex_attribute_offset as usize / size_of::<u32>()) as u32;
let index_offset = index_offset as u32;
for (i, meshlet) in self.iter().enumerate() {
let size = size_of::<Meshlet>();
let i = i * size;
let bytes = bytemuck::cast::<_, [u8; size_of::<Meshlet>()]>(Meshlet {
start_vertex_position_bit: meshlet.start_vertex_position_bit
+ vertex_position_offset,
start_vertex_attribute_id: meshlet.start_vertex_attribute_id
+ vertex_attribute_offset,
start_index_id: meshlet.start_index_id + index_offset,
..*meshlet
});
buffer_slice[i..(i + size)].clone_from_slice(&bytes);
}
}
}
impl PersistentGpuBufferable for Arc<[MeshletCullData]> {
type Metadata = ();
fn size_in_bytes(&self) -> usize {
self.len() * size_of::<MeshletCullData>()
}
fn write_bytes_le(&self, _: Self::Metadata, buffer_slice: &mut [u8], _: BufferAddress) {
buffer_slice.clone_from_slice(bytemuck::cast_slice(self));
}
}
impl PersistentGpuBufferable for Arc<[u8]> {
type Metadata = ();
fn size_in_bytes(&self) -> usize {
self.len()
}
fn write_bytes_le(&self, _: Self::Metadata, buffer_slice: &mut [u8], _: BufferAddress) {
buffer_slice.clone_from_slice(self);
}
}
impl PersistentGpuBufferable for Arc<[u32]> {
type Metadata = ();
fn size_in_bytes(&self) -> usize {
self.len() * size_of::<u32>()
}
fn write_bytes_le(&self, _: Self::Metadata, buffer_slice: &mut [u8], _: BufferAddress) {
buffer_slice.clone_from_slice(bytemuck::cast_slice(self));
}
}
impl PersistentGpuBufferable for Arc<[Vec2]> {
type Metadata = ();
fn size_in_bytes(&self) -> usize {
self.len() * size_of::<Vec2>()
}
fn write_bytes_le(&self, _: Self::Metadata, buffer_slice: &mut [u8], _: BufferAddress) {
buffer_slice.clone_from_slice(bytemuck::cast_slice(self));
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/meshlet/instance_manager.rs | crates/bevy_pbr/src/meshlet/instance_manager.rs | use super::{meshlet_mesh_manager::MeshletMeshManager, MeshletMesh, MeshletMesh3d};
use crate::DUMMY_MESH_MATERIAL;
use crate::{
meshlet::asset::MeshletAabb, MaterialBindingId, MeshFlags, MeshTransforms, MeshUniform,
PreviousGlobalTransform, RenderMaterialBindings, RenderMaterialInstances,
};
use bevy_asset::{AssetEvent, AssetServer, Assets, UntypedAssetId};
use bevy_camera::visibility::RenderLayers;
use bevy_ecs::{
entity::{Entities, Entity, EntityHashMap},
message::MessageReader,
query::Has,
resource::Resource,
system::{Local, Query, Res, ResMut, SystemState},
};
use bevy_light::{NotShadowCaster, NotShadowReceiver};
use bevy_platform::collections::{HashMap, HashSet};
use bevy_render::{render_resource::StorageBuffer, sync_world::MainEntity, MainWorld};
use bevy_transform::components::GlobalTransform;
use core::ops::DerefMut;
/// Manages data for each entity with a [`MeshletMesh`].
#[derive(Resource)]
pub struct InstanceManager {
/// Amount of instances in the scene.
pub scene_instance_count: u32,
/// The max BVH depth of any instance in the scene. This is used to control the number of
/// dependent dispatches emitted for BVH traversal.
pub max_bvh_depth: u32,
/// Per-instance [`MainEntity`], [`RenderLayers`], and [`NotShadowCaster`].
pub instances: Vec<(MainEntity, RenderLayers, bool)>,
/// Per-instance [`MeshUniform`].
pub instance_uniforms: StorageBuffer<Vec<MeshUniform>>,
/// Per-instance model-space AABB.
pub instance_aabbs: StorageBuffer<Vec<MeshletAabb>>,
/// Per-instance material ID.
pub instance_material_ids: StorageBuffer<Vec<u32>>,
/// Per-instance index to the root node of the instance's BVH.
pub instance_bvh_root_nodes: StorageBuffer<Vec<u32>>,
/// Per-view per-instance visibility bit. Used for [`RenderLayers`] and [`NotShadowCaster`] support.
pub view_instance_visibility: EntityHashMap<StorageBuffer<Vec<u32>>>,
/// Next material ID available.
next_material_id: u32,
/// Map of material asset to material ID.
material_id_lookup: HashMap<UntypedAssetId, u32>,
/// Set of material IDs used in the scene.
material_ids_present_in_scene: HashSet<u32>,
}
impl InstanceManager {
pub fn new() -> Self {
Self {
scene_instance_count: 0,
max_bvh_depth: 0,
instances: Vec::new(),
instance_uniforms: {
let mut buffer = StorageBuffer::default();
buffer.set_label(Some("meshlet_instance_uniforms"));
buffer
},
instance_aabbs: {
let mut buffer = StorageBuffer::default();
buffer.set_label(Some("meshlet_instance_aabbs"));
buffer
},
instance_material_ids: {
let mut buffer = StorageBuffer::default();
buffer.set_label(Some("meshlet_instance_material_ids"));
buffer
},
instance_bvh_root_nodes: {
let mut buffer = StorageBuffer::default();
buffer.set_label(Some("meshlet_instance_bvh_root_nodes"));
buffer
},
view_instance_visibility: EntityHashMap::default(),
next_material_id: 0,
material_id_lookup: HashMap::default(),
material_ids_present_in_scene: HashSet::default(),
}
}
pub fn add_instance(
&mut self,
instance: MainEntity,
root_bvh_node: u32,
aabb: MeshletAabb,
bvh_depth: u32,
transform: &GlobalTransform,
previous_transform: Option<&PreviousGlobalTransform>,
render_layers: Option<&RenderLayers>,
mesh_material_ids: &RenderMaterialInstances,
render_material_bindings: &RenderMaterialBindings,
not_shadow_receiver: bool,
not_shadow_caster: bool,
) {
// Build a MeshUniform for the instance
let transform = transform.affine();
let previous_transform = previous_transform.map(|t| t.0).unwrap_or(transform);
let mut flags = if not_shadow_receiver {
MeshFlags::empty()
} else {
MeshFlags::SHADOW_RECEIVER
};
if transform.matrix3.determinant().is_sign_positive() {
flags |= MeshFlags::SIGN_DETERMINANT_MODEL_3X3;
}
let transforms = MeshTransforms {
world_from_local: (&transform).into(),
previous_world_from_local: (&previous_transform).into(),
flags: flags.bits(),
};
let mesh_material = mesh_material_ids.mesh_material(instance);
let mesh_material_binding_id = if mesh_material != DUMMY_MESH_MATERIAL.untyped() {
render_material_bindings
.get(&mesh_material)
.cloned()
.unwrap_or_default()
} else {
// Use a dummy binding ID if the mesh has no material
MaterialBindingId::default()
};
let mesh_uniform = MeshUniform::new(
&transforms,
0,
mesh_material_binding_id.slot,
None,
None,
None,
);
// Append instance data
self.instances.push((
instance,
render_layers.cloned().unwrap_or(RenderLayers::default()),
not_shadow_caster,
));
self.instance_uniforms.get_mut().push(mesh_uniform);
self.instance_aabbs.get_mut().push(aabb);
self.instance_material_ids.get_mut().push(0);
self.instance_bvh_root_nodes.get_mut().push(root_bvh_node);
self.scene_instance_count += 1;
self.max_bvh_depth = self.max_bvh_depth.max(bvh_depth);
}
/// Get the material ID for a [`crate::Material`].
pub fn get_material_id(&mut self, material_asset_id: UntypedAssetId) -> u32 {
*self
.material_id_lookup
.entry(material_asset_id)
.or_insert_with(|| {
self.next_material_id += 1;
self.next_material_id
})
}
pub fn material_present_in_scene(&self, material_id: &u32) -> bool {
self.material_ids_present_in_scene.contains(material_id)
}
pub fn reset(&mut self, entities: &Entities) {
self.scene_instance_count = 0;
self.max_bvh_depth = 0;
self.instances.clear();
self.instance_uniforms.get_mut().clear();
self.instance_aabbs.get_mut().clear();
self.instance_material_ids.get_mut().clear();
self.instance_bvh_root_nodes.get_mut().clear();
self.view_instance_visibility
.retain(|view_entity, _| entities.contains(*view_entity));
self.view_instance_visibility
.values_mut()
.for_each(|b| b.get_mut().clear());
self.next_material_id = 0;
self.material_id_lookup.clear();
self.material_ids_present_in_scene.clear();
}
}
pub fn extract_meshlet_mesh_entities(
mut meshlet_mesh_manager: ResMut<MeshletMeshManager>,
mut instance_manager: ResMut<InstanceManager>,
// TODO: Replace main_world and system_state when Extract<ResMut<Assets<MeshletMesh>>> is possible
mut main_world: ResMut<MainWorld>,
mesh_material_ids: Res<RenderMaterialInstances>,
render_material_bindings: Res<RenderMaterialBindings>,
mut system_state: Local<
Option<
SystemState<(
Query<(
Entity,
&MeshletMesh3d,
&GlobalTransform,
Option<&PreviousGlobalTransform>,
Option<&RenderLayers>,
Has<NotShadowReceiver>,
Has<NotShadowCaster>,
)>,
Res<AssetServer>,
ResMut<Assets<MeshletMesh>>,
MessageReader<AssetEvent<MeshletMesh>>,
)>,
>,
>,
render_entities: &Entities,
) {
// Get instances query
if system_state.is_none() {
*system_state = Some(SystemState::new(&mut main_world));
}
let system_state = system_state.as_mut().unwrap();
let (instances_query, asset_server, mut assets, mut asset_events) =
system_state.get_mut(&mut main_world);
// Reset per-frame data
instance_manager.reset(render_entities);
// Free GPU buffer space for any modified or dropped MeshletMesh assets
for asset_event in asset_events.read() {
if let AssetEvent::Unused { id } | AssetEvent::Modified { id } = asset_event {
meshlet_mesh_manager.remove(id);
}
}
// Iterate over every instance
// TODO: Switch to change events to not upload every instance every frame.
for (
instance,
meshlet_mesh,
transform,
previous_transform,
render_layers,
not_shadow_receiver,
not_shadow_caster,
) in &instances_query
{
// Skip instances with an unloaded MeshletMesh asset
// TODO: This is a semi-expensive check
if asset_server.is_managed(meshlet_mesh.id())
&& !asset_server.is_loaded_with_dependencies(meshlet_mesh.id())
{
continue;
}
// Upload the instance's MeshletMesh asset data if not done already done
let (root_bvh_node, aabb, bvh_depth) =
meshlet_mesh_manager.queue_upload_if_needed(meshlet_mesh.id(), &mut assets);
// Add the instance's data to the instance manager
instance_manager.add_instance(
instance.into(),
root_bvh_node,
aabb,
bvh_depth,
transform,
previous_transform,
render_layers,
&mesh_material_ids,
&render_material_bindings,
not_shadow_receiver,
not_shadow_caster,
);
}
}
/// For each entity in the scene, record what material ID its material was assigned in the `prepare_material_meshlet_meshes` systems,
/// and note that the material is used by at least one entity in the scene.
pub fn queue_material_meshlet_meshes(
mut instance_manager: ResMut<InstanceManager>,
render_material_instances: Res<RenderMaterialInstances>,
) {
let instance_manager = instance_manager.deref_mut();
for (i, (instance, _, _)) in instance_manager.instances.iter().enumerate() {
if let Some(material_instance) = render_material_instances.instances.get(instance)
&& let Some(material_id) = instance_manager
.material_id_lookup
.get(&material_instance.asset_id)
{
instance_manager
.material_ids_present_in_scene
.insert(*material_id);
instance_manager.instance_material_ids.get_mut()[i] = *material_id;
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/meshlet/visibility_buffer_raster_node.rs | crates/bevy_pbr/src/meshlet/visibility_buffer_raster_node.rs | use super::{
pipelines::MeshletPipelines,
resource_manager::{MeshletViewBindGroups, MeshletViewResources},
};
use crate::{
meshlet::resource_manager::ResourceManager, LightEntity, ShadowView, ViewLightEntities,
};
use bevy_color::LinearRgba;
use bevy_core_pipeline::prepass::PreviousViewUniformOffset;
use bevy_ecs::{
query::QueryState,
world::{FromWorld, World},
};
use bevy_math::UVec2;
use bevy_render::{
camera::ExtractedCamera,
diagnostic::RecordDiagnostics,
render_graph::{Node, NodeRunError, RenderGraphContext},
render_resource::*,
renderer::RenderContext,
view::{ViewDepthTexture, ViewUniformOffset},
};
/// Rasterize meshlets into a depth buffer, and optional visibility buffer + material depth buffer for shading passes.
pub struct MeshletVisibilityBufferRasterPassNode {
main_view_query: QueryState<(
&'static ExtractedCamera,
&'static ViewDepthTexture,
&'static ViewUniformOffset,
&'static PreviousViewUniformOffset,
&'static MeshletViewBindGroups,
&'static MeshletViewResources,
&'static ViewLightEntities,
)>,
view_light_query: QueryState<(
&'static ShadowView,
&'static LightEntity,
&'static ViewUniformOffset,
&'static PreviousViewUniformOffset,
&'static MeshletViewBindGroups,
&'static MeshletViewResources,
)>,
}
impl FromWorld for MeshletVisibilityBufferRasterPassNode {
fn from_world(world: &mut World) -> Self {
Self {
main_view_query: QueryState::new(world),
view_light_query: QueryState::new(world),
}
}
}
impl Node for MeshletVisibilityBufferRasterPassNode {
fn update(&mut self, world: &mut World) {
self.main_view_query.update_archetypes(world);
self.view_light_query.update_archetypes(world);
}
// TODO: Reuse compute/render passes between logical passes where possible, as they're expensive
fn run(
&self,
graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
let Ok((
camera,
view_depth,
view_offset,
previous_view_offset,
meshlet_view_bind_groups,
meshlet_view_resources,
lights,
)) = self.main_view_query.get_manual(world, graph.view_entity())
else {
return Ok(());
};
let Some((
clear_visibility_buffer_pipeline,
clear_visibility_buffer_shadow_view_pipeline,
first_instance_cull_pipeline,
second_instance_cull_pipeline,
first_bvh_cull_pipeline,
second_bvh_cull_pipeline,
first_meshlet_cull_pipeline,
second_meshlet_cull_pipeline,
downsample_depth_first_pipeline,
downsample_depth_second_pipeline,
downsample_depth_first_shadow_view_pipeline,
downsample_depth_second_shadow_view_pipeline,
visibility_buffer_software_raster_pipeline,
visibility_buffer_software_raster_shadow_view_pipeline,
visibility_buffer_hardware_raster_pipeline,
visibility_buffer_hardware_raster_shadow_view_pipeline,
visibility_buffer_hardware_raster_shadow_view_unclipped_pipeline,
resolve_depth_pipeline,
resolve_depth_shadow_view_pipeline,
resolve_material_depth_pipeline,
remap_1d_to_2d_dispatch_pipeline,
fill_counts_pipeline,
)) = MeshletPipelines::get(world)
else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
render_context
.command_encoder()
.push_debug_group("meshlet_visibility_buffer_raster");
let time_span = diagnostics.time_span(
render_context.command_encoder(),
"meshlet_visibility_buffer_raster",
);
let resource_manager = world.get_resource::<ResourceManager>().unwrap();
render_context.command_encoder().clear_buffer(
&resource_manager.visibility_buffer_raster_cluster_prev_counts,
0,
None,
);
clear_visibility_buffer_pass(
render_context,
&meshlet_view_bind_groups.clear_visibility_buffer,
clear_visibility_buffer_pipeline,
meshlet_view_resources.view_size,
);
render_context
.command_encoder()
.push_debug_group("meshlet_first_pass");
first_cull(
render_context,
meshlet_view_bind_groups,
meshlet_view_resources,
view_offset,
previous_view_offset,
first_instance_cull_pipeline,
first_bvh_cull_pipeline,
first_meshlet_cull_pipeline,
remap_1d_to_2d_dispatch_pipeline,
);
raster_pass(
true,
render_context,
&meshlet_view_resources.visibility_buffer_software_raster_indirect_args,
&meshlet_view_resources.visibility_buffer_hardware_raster_indirect_args,
&meshlet_view_resources.dummy_render_target.default_view,
meshlet_view_bind_groups,
view_offset,
visibility_buffer_software_raster_pipeline,
visibility_buffer_hardware_raster_pipeline,
fill_counts_pipeline,
Some(camera),
meshlet_view_resources.rightmost_slot,
);
render_context.command_encoder().pop_debug_group();
meshlet_view_resources.depth_pyramid.downsample_depth(
"downsample_depth",
render_context,
meshlet_view_resources.view_size,
&meshlet_view_bind_groups.downsample_depth,
downsample_depth_first_pipeline,
downsample_depth_second_pipeline,
);
render_context
.command_encoder()
.push_debug_group("meshlet_second_pass");
second_cull(
render_context,
meshlet_view_bind_groups,
meshlet_view_resources,
view_offset,
previous_view_offset,
second_instance_cull_pipeline,
second_bvh_cull_pipeline,
second_meshlet_cull_pipeline,
remap_1d_to_2d_dispatch_pipeline,
);
raster_pass(
false,
render_context,
&meshlet_view_resources.visibility_buffer_software_raster_indirect_args,
&meshlet_view_resources.visibility_buffer_hardware_raster_indirect_args,
&meshlet_view_resources.dummy_render_target.default_view,
meshlet_view_bind_groups,
view_offset,
visibility_buffer_software_raster_pipeline,
visibility_buffer_hardware_raster_pipeline,
fill_counts_pipeline,
Some(camera),
meshlet_view_resources.rightmost_slot,
);
render_context.command_encoder().pop_debug_group();
resolve_depth(
render_context,
view_depth.get_attachment(StoreOp::Store),
meshlet_view_bind_groups,
resolve_depth_pipeline,
camera,
);
resolve_material_depth(
render_context,
meshlet_view_resources,
meshlet_view_bind_groups,
resolve_material_depth_pipeline,
camera,
);
meshlet_view_resources.depth_pyramid.downsample_depth(
"downsample_depth",
render_context,
meshlet_view_resources.view_size,
&meshlet_view_bind_groups.downsample_depth,
downsample_depth_first_pipeline,
downsample_depth_second_pipeline,
);
render_context.command_encoder().pop_debug_group();
for light_entity in &lights.lights {
let Ok((
shadow_view,
light_type,
view_offset,
previous_view_offset,
meshlet_view_bind_groups,
meshlet_view_resources,
)) = self.view_light_query.get_manual(world, *light_entity)
else {
continue;
};
let shadow_visibility_buffer_hardware_raster_pipeline =
if let LightEntity::Directional { .. } = light_type {
visibility_buffer_hardware_raster_shadow_view_unclipped_pipeline
} else {
visibility_buffer_hardware_raster_shadow_view_pipeline
};
render_context.command_encoder().push_debug_group(&format!(
"meshlet_visibility_buffer_raster: {}",
shadow_view.pass_name
));
let time_span_shadow = diagnostics.time_span(
render_context.command_encoder(),
shadow_view.pass_name.clone(),
);
clear_visibility_buffer_pass(
render_context,
&meshlet_view_bind_groups.clear_visibility_buffer,
clear_visibility_buffer_shadow_view_pipeline,
meshlet_view_resources.view_size,
);
render_context
.command_encoder()
.push_debug_group("meshlet_first_pass");
first_cull(
render_context,
meshlet_view_bind_groups,
meshlet_view_resources,
view_offset,
previous_view_offset,
first_instance_cull_pipeline,
first_bvh_cull_pipeline,
first_meshlet_cull_pipeline,
remap_1d_to_2d_dispatch_pipeline,
);
raster_pass(
true,
render_context,
&meshlet_view_resources.visibility_buffer_software_raster_indirect_args,
&meshlet_view_resources.visibility_buffer_hardware_raster_indirect_args,
&meshlet_view_resources.dummy_render_target.default_view,
meshlet_view_bind_groups,
view_offset,
visibility_buffer_software_raster_shadow_view_pipeline,
shadow_visibility_buffer_hardware_raster_pipeline,
fill_counts_pipeline,
None,
meshlet_view_resources.rightmost_slot,
);
render_context.command_encoder().pop_debug_group();
meshlet_view_resources.depth_pyramid.downsample_depth(
"downsample_depth",
render_context,
meshlet_view_resources.view_size,
&meshlet_view_bind_groups.downsample_depth,
downsample_depth_first_shadow_view_pipeline,
downsample_depth_second_shadow_view_pipeline,
);
render_context
.command_encoder()
.push_debug_group("meshlet_second_pass");
second_cull(
render_context,
meshlet_view_bind_groups,
meshlet_view_resources,
view_offset,
previous_view_offset,
second_instance_cull_pipeline,
second_bvh_cull_pipeline,
second_meshlet_cull_pipeline,
remap_1d_to_2d_dispatch_pipeline,
);
raster_pass(
false,
render_context,
&meshlet_view_resources.visibility_buffer_software_raster_indirect_args,
&meshlet_view_resources.visibility_buffer_hardware_raster_indirect_args,
&meshlet_view_resources.dummy_render_target.default_view,
meshlet_view_bind_groups,
view_offset,
visibility_buffer_software_raster_shadow_view_pipeline,
shadow_visibility_buffer_hardware_raster_pipeline,
fill_counts_pipeline,
None,
meshlet_view_resources.rightmost_slot,
);
render_context.command_encoder().pop_debug_group();
resolve_depth(
render_context,
shadow_view.depth_attachment.get_attachment(StoreOp::Store),
meshlet_view_bind_groups,
resolve_depth_shadow_view_pipeline,
camera,
);
meshlet_view_resources.depth_pyramid.downsample_depth(
"downsample_depth",
render_context,
meshlet_view_resources.view_size,
&meshlet_view_bind_groups.downsample_depth,
downsample_depth_first_shadow_view_pipeline,
downsample_depth_second_shadow_view_pipeline,
);
render_context.command_encoder().pop_debug_group();
time_span_shadow.end(render_context.command_encoder());
}
time_span.end(render_context.command_encoder());
Ok(())
}
}
// TODO: Replace this with vkCmdClearColorImage once wgpu supports it
fn clear_visibility_buffer_pass(
render_context: &mut RenderContext,
clear_visibility_buffer_bind_group: &BindGroup,
clear_visibility_buffer_pipeline: &ComputePipeline,
view_size: UVec2,
) {
let command_encoder = render_context.command_encoder();
let mut clear_visibility_buffer_pass =
command_encoder.begin_compute_pass(&ComputePassDescriptor {
label: Some("clear_visibility_buffer"),
timestamp_writes: None,
});
clear_visibility_buffer_pass.set_pipeline(clear_visibility_buffer_pipeline);
clear_visibility_buffer_pass.set_push_constants(0, bytemuck::bytes_of(&view_size));
clear_visibility_buffer_pass.set_bind_group(0, clear_visibility_buffer_bind_group, &[]);
clear_visibility_buffer_pass.dispatch_workgroups(
view_size.x.div_ceil(16),
view_size.y.div_ceil(16),
1,
);
}
fn first_cull(
render_context: &mut RenderContext,
meshlet_view_bind_groups: &MeshletViewBindGroups,
meshlet_view_resources: &MeshletViewResources,
view_offset: &ViewUniformOffset,
previous_view_offset: &PreviousViewUniformOffset,
first_instance_cull_pipeline: &ComputePipeline,
first_bvh_cull_pipeline: &ComputePipeline,
first_meshlet_cull_pipeline: &ComputePipeline,
remap_1d_to_2d_pipeline: Option<&ComputePipeline>,
) {
let workgroups = meshlet_view_resources.scene_instance_count.div_ceil(128);
cull_pass(
"meshlet_first_instance_cull",
render_context,
&meshlet_view_bind_groups.first_instance_cull,
view_offset,
previous_view_offset,
first_instance_cull_pipeline,
&[meshlet_view_resources.scene_instance_count],
)
.dispatch_workgroups(workgroups, 1, 1);
render_context
.command_encoder()
.push_debug_group("meshlet_first_bvh_cull");
let mut ping = true;
for _ in 0..meshlet_view_resources.max_bvh_depth {
cull_pass(
"meshlet_first_bvh_cull_dispatch",
render_context,
if ping {
&meshlet_view_bind_groups.first_bvh_cull_ping
} else {
&meshlet_view_bind_groups.first_bvh_cull_pong
},
view_offset,
previous_view_offset,
first_bvh_cull_pipeline,
&[ping as u32, meshlet_view_resources.rightmost_slot],
)
.dispatch_workgroups_indirect(
if ping {
&meshlet_view_resources.first_bvh_cull_dispatch_front
} else {
&meshlet_view_resources.first_bvh_cull_dispatch_back
},
0,
);
render_context.command_encoder().clear_buffer(
if ping {
&meshlet_view_resources.first_bvh_cull_count_front
} else {
&meshlet_view_resources.first_bvh_cull_count_back
},
0,
Some(4),
);
render_context.command_encoder().clear_buffer(
if ping {
&meshlet_view_resources.first_bvh_cull_dispatch_front
} else {
&meshlet_view_resources.first_bvh_cull_dispatch_back
},
0,
Some(4),
);
ping = !ping;
}
render_context.command_encoder().pop_debug_group();
let mut pass = cull_pass(
"meshlet_first_meshlet_cull",
render_context,
&meshlet_view_bind_groups.first_meshlet_cull,
view_offset,
previous_view_offset,
first_meshlet_cull_pipeline,
&[meshlet_view_resources.rightmost_slot],
);
pass.dispatch_workgroups_indirect(&meshlet_view_resources.front_meshlet_cull_dispatch, 0);
remap_1d_to_2d(
pass,
remap_1d_to_2d_pipeline,
meshlet_view_bind_groups.remap_1d_to_2d_dispatch.as_ref(),
);
}
fn second_cull(
render_context: &mut RenderContext,
meshlet_view_bind_groups: &MeshletViewBindGroups,
meshlet_view_resources: &MeshletViewResources,
view_offset: &ViewUniformOffset,
previous_view_offset: &PreviousViewUniformOffset,
second_instance_cull_pipeline: &ComputePipeline,
second_bvh_cull_pipeline: &ComputePipeline,
second_meshlet_cull_pipeline: &ComputePipeline,
remap_1d_to_2d_pipeline: Option<&ComputePipeline>,
) {
cull_pass(
"meshlet_second_instance_cull",
render_context,
&meshlet_view_bind_groups.second_instance_cull,
view_offset,
previous_view_offset,
second_instance_cull_pipeline,
&[meshlet_view_resources.scene_instance_count],
)
.dispatch_workgroups_indirect(&meshlet_view_resources.second_pass_dispatch, 0);
render_context
.command_encoder()
.push_debug_group("meshlet_second_bvh_cull");
let mut ping = true;
for _ in 0..meshlet_view_resources.max_bvh_depth {
cull_pass(
"meshlet_second_bvh_cull_dispatch",
render_context,
if ping {
&meshlet_view_bind_groups.second_bvh_cull_ping
} else {
&meshlet_view_bind_groups.second_bvh_cull_pong
},
view_offset,
previous_view_offset,
second_bvh_cull_pipeline,
&[ping as u32, meshlet_view_resources.rightmost_slot],
)
.dispatch_workgroups_indirect(
if ping {
&meshlet_view_resources.second_bvh_cull_dispatch_front
} else {
&meshlet_view_resources.second_bvh_cull_dispatch_back
},
0,
);
ping = !ping;
}
render_context.command_encoder().pop_debug_group();
let mut pass = cull_pass(
"meshlet_second_meshlet_cull",
render_context,
&meshlet_view_bind_groups.second_meshlet_cull,
view_offset,
previous_view_offset,
second_meshlet_cull_pipeline,
&[meshlet_view_resources.rightmost_slot],
);
pass.dispatch_workgroups_indirect(&meshlet_view_resources.back_meshlet_cull_dispatch, 0);
remap_1d_to_2d(
pass,
remap_1d_to_2d_pipeline,
meshlet_view_bind_groups.remap_1d_to_2d_dispatch.as_ref(),
);
}
fn cull_pass<'a>(
label: &'static str,
render_context: &'a mut RenderContext,
bind_group: &'a BindGroup,
view_offset: &'a ViewUniformOffset,
previous_view_offset: &'a PreviousViewUniformOffset,
pipeline: &'a ComputePipeline,
push_constants: &[u32],
) -> ComputePass<'a> {
let command_encoder = render_context.command_encoder();
let mut pass = command_encoder.begin_compute_pass(&ComputePassDescriptor {
label: Some(label),
timestamp_writes: None,
});
pass.set_pipeline(pipeline);
pass.set_bind_group(
0,
bind_group,
&[view_offset.offset, previous_view_offset.offset],
);
pass.set_push_constants(0, bytemuck::cast_slice(push_constants));
pass
}
fn remap_1d_to_2d(
mut pass: ComputePass,
pipeline: Option<&ComputePipeline>,
bind_group: Option<&BindGroup>,
) {
if let (Some(pipeline), Some(bind_group)) = (pipeline, bind_group) {
pass.set_pipeline(pipeline);
pass.set_bind_group(0, bind_group, &[]);
pass.dispatch_workgroups(1, 1, 1);
}
}
fn raster_pass(
first_pass: bool,
render_context: &mut RenderContext,
visibility_buffer_software_raster_indirect_args: &Buffer,
visibility_buffer_hardware_raster_indirect_args: &Buffer,
dummy_render_target: &TextureView,
meshlet_view_bind_groups: &MeshletViewBindGroups,
view_offset: &ViewUniformOffset,
visibility_buffer_software_raster_pipeline: &ComputePipeline,
visibility_buffer_hardware_raster_pipeline: &RenderPipeline,
fill_counts_pipeline: &ComputePipeline,
camera: Option<&ExtractedCamera>,
raster_cluster_rightmost_slot: u32,
) {
let mut software_pass =
render_context
.command_encoder()
.begin_compute_pass(&ComputePassDescriptor {
label: Some(if first_pass {
"raster_software_first"
} else {
"raster_software_second"
}),
timestamp_writes: None,
});
software_pass.set_pipeline(visibility_buffer_software_raster_pipeline);
software_pass.set_bind_group(
0,
&meshlet_view_bind_groups.visibility_buffer_raster,
&[view_offset.offset],
);
software_pass.dispatch_workgroups_indirect(visibility_buffer_software_raster_indirect_args, 0);
drop(software_pass);
let mut hardware_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some(if first_pass {
"raster_hardware_first"
} else {
"raster_hardware_second"
}),
color_attachments: &[Some(RenderPassColorAttachment {
view: dummy_render_target,
depth_slice: None,
resolve_target: None,
ops: Operations {
load: LoadOp::Clear(LinearRgba::BLACK.into()),
store: StoreOp::Discard,
},
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
if let Some(viewport) = camera.and_then(|camera| camera.viewport.as_ref()) {
hardware_pass.set_camera_viewport(viewport);
}
hardware_pass.set_render_pipeline(visibility_buffer_hardware_raster_pipeline);
hardware_pass.set_push_constants(
ShaderStages::VERTEX,
0,
&raster_cluster_rightmost_slot.to_le_bytes(),
);
hardware_pass.set_bind_group(
0,
&meshlet_view_bind_groups.visibility_buffer_raster,
&[view_offset.offset],
);
hardware_pass.draw_indirect(visibility_buffer_hardware_raster_indirect_args, 0);
drop(hardware_pass);
let mut fill_counts_pass =
render_context
.command_encoder()
.begin_compute_pass(&ComputePassDescriptor {
label: Some("fill_counts"),
timestamp_writes: None,
});
fill_counts_pass.set_pipeline(fill_counts_pipeline);
fill_counts_pass.set_bind_group(0, &meshlet_view_bind_groups.fill_counts, &[]);
fill_counts_pass.dispatch_workgroups(1, 1, 1);
}
fn resolve_depth(
render_context: &mut RenderContext,
depth_stencil_attachment: RenderPassDepthStencilAttachment,
meshlet_view_bind_groups: &MeshletViewBindGroups,
resolve_depth_pipeline: &RenderPipeline,
camera: &ExtractedCamera,
) {
let mut resolve_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("resolve_depth"),
color_attachments: &[],
depth_stencil_attachment: Some(depth_stencil_attachment),
timestamp_writes: None,
occlusion_query_set: None,
});
if let Some(viewport) = &camera.viewport {
resolve_pass.set_camera_viewport(viewport);
}
resolve_pass.set_render_pipeline(resolve_depth_pipeline);
resolve_pass.set_bind_group(0, &meshlet_view_bind_groups.resolve_depth, &[]);
resolve_pass.draw(0..3, 0..1);
}
fn resolve_material_depth(
render_context: &mut RenderContext,
meshlet_view_resources: &MeshletViewResources,
meshlet_view_bind_groups: &MeshletViewBindGroups,
resolve_material_depth_pipeline: &RenderPipeline,
camera: &ExtractedCamera,
) {
if let (Some(material_depth), Some(resolve_material_depth_bind_group)) = (
meshlet_view_resources.material_depth.as_ref(),
meshlet_view_bind_groups.resolve_material_depth.as_ref(),
) {
let mut resolve_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("resolve_material_depth"),
color_attachments: &[],
depth_stencil_attachment: Some(RenderPassDepthStencilAttachment {
view: &material_depth.default_view,
depth_ops: Some(Operations {
load: LoadOp::Clear(0.0),
store: StoreOp::Store,
}),
stencil_ops: None,
}),
timestamp_writes: None,
occlusion_query_set: None,
});
if let Some(viewport) = &camera.viewport {
resolve_pass.set_camera_viewport(viewport);
}
resolve_pass.set_render_pipeline(resolve_material_depth_pipeline);
resolve_pass.set_bind_group(0, resolve_material_depth_bind_group, &[]);
resolve_pass.draw(0..3, 0..1);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/meshlet/asset.rs | crates/bevy_pbr/src/meshlet/asset.rs | use alloc::sync::Arc;
use bevy_asset::{
io::{Reader, Writer},
saver::{AssetSaver, SavedAsset},
Asset, AssetLoader, AsyncReadExt, AsyncWriteExt, LoadContext,
};
use bevy_math::{Vec2, Vec3};
use bevy_reflect::TypePath;
use bevy_render::render_resource::ShaderType;
use bevy_tasks::block_on;
use bytemuck::{Pod, Zeroable};
use lz4_flex::frame::{FrameDecoder, FrameEncoder};
use std::io::{Read, Write};
use thiserror::Error;
/// Unique identifier for the [`MeshletMesh`] asset format.
const MESHLET_MESH_ASSET_MAGIC: u64 = 1717551717668;
/// The current version of the [`MeshletMesh`] asset format.
pub const MESHLET_MESH_ASSET_VERSION: u64 = 3;
/// A mesh that has been pre-processed into multiple small clusters of triangles called meshlets.
///
/// A [`bevy_mesh::Mesh`] can be converted to a [`MeshletMesh`] using `MeshletMesh::from_mesh` when the `meshlet_processor` cargo feature is enabled.
/// The conversion step is very slow, and is meant to be ran once ahead of time, and not during runtime. This type of mesh is not suitable for
/// dynamically generated geometry.
///
/// There are restrictions on the [`Material`](`crate::Material`) functionality that can be used with this type of mesh.
/// * Materials have no control over the vertex shader or vertex attributes.
/// * Materials must be opaque. Transparent, alpha masked, and transmissive materials are not supported.
/// * Do not use normal maps baked from higher-poly geometry. Use the high-poly geometry directly and skip the normal map.
/// * If additional detail is needed, a smaller tiling normal map not baked from a mesh is ok.
/// * Material shaders must not use builtin functions that automatically calculate derivatives <https://gpuweb.github.io/gpuweb/wgsl/#derivatives>.
/// * Performing manual arithmetic on texture coordinates (UVs) is forbidden. Use the chain-rule version of arithmetic functions instead (TODO: not yet implemented).
/// * Limited control over [`bevy_render::render_resource::RenderPipelineDescriptor`] attributes.
/// * Materials must use the [`Material::meshlet_mesh_fragment_shader`](`crate::Material::meshlet_mesh_fragment_shader`) method (and similar variants for prepass/deferred shaders)
/// which requires certain shader patterns that differ from the regular material shaders.
///
/// See also [`MeshletMesh3d`](`super::MeshletMesh3d`) and [`MeshletPlugin`](`super::MeshletPlugin`).
#[derive(Asset, TypePath, Clone)]
pub struct MeshletMesh {
/// Quantized and bitstream-packed vertex positions for meshlet vertices.
pub(crate) vertex_positions: Arc<[u32]>,
/// Octahedral-encoded and 2x16snorm packed normals for meshlet vertices.
pub(crate) vertex_normals: Arc<[u32]>,
/// Uncompressed vertex texture coordinates for meshlet vertices.
pub(crate) vertex_uvs: Arc<[Vec2]>,
/// Triangle indices for meshlets.
pub(crate) indices: Arc<[u8]>,
/// The BVH8 used for culling and LOD selection of the meshlets. The root is at index 0.
pub(crate) bvh: Arc<[BvhNode]>,
/// The list of meshlets making up this mesh.
pub(crate) meshlets: Arc<[Meshlet]>,
/// Spherical bounding volumes.
pub(crate) meshlet_cull_data: Arc<[MeshletCullData]>,
/// The tight AABB of the meshlet mesh, used for frustum and occlusion culling at the instance
/// level.
pub(crate) aabb: MeshletAabb,
/// The depth of the culling BVH, used to determine the number of dispatches at runtime.
pub(crate) bvh_depth: u32,
}
/// A single BVH8 node in the BVH used for culling and LOD selection of a [`MeshletMesh`].
#[derive(Copy, Clone, Default, Pod, Zeroable)]
#[repr(C)]
pub struct BvhNode {
/// The tight AABBs of this node's children, used for frustum and occlusion during BVH
/// traversal.
pub aabbs: [MeshletAabbErrorOffset; 8],
/// The LOD bounding spheres of this node's children, used for LOD selection during BVH
/// traversal.
pub lod_bounds: [MeshletBoundingSphere; 8],
/// If `u8::MAX`, it indicates that the child of each children is a BVH node, otherwise it is the number of meshlets in the group.
pub child_counts: [u8; 8],
pub _padding: [u32; 2],
}
/// A single meshlet within a [`MeshletMesh`].
#[derive(Copy, Clone, Pod, Zeroable)]
#[repr(C)]
pub struct Meshlet {
/// The bit offset within the parent mesh's [`MeshletMesh::vertex_positions`] buffer where the vertex positions for this meshlet begin.
pub start_vertex_position_bit: u32,
/// The offset within the parent mesh's [`MeshletMesh::vertex_normals`] and [`MeshletMesh::vertex_uvs`] buffers
/// where non-position vertex attributes for this meshlet begin.
pub start_vertex_attribute_id: u32,
/// The offset within the parent mesh's [`MeshletMesh::indices`] buffer where the indices for this meshlet begin.
pub start_index_id: u32,
/// The amount of vertices in this meshlet (minus one to fit 256 in a u8).
pub vertex_count_minus_one: u8,
/// The amount of triangles in this meshlet.
pub triangle_count: u8,
/// Unused.
pub padding: u16,
/// Number of bits used to store the X channel of vertex positions within this meshlet.
pub bits_per_vertex_position_channel_x: u8,
/// Number of bits used to store the Y channel of vertex positions within this meshlet.
pub bits_per_vertex_position_channel_y: u8,
/// Number of bits used to store the Z channel of vertex positions within this meshlet.
pub bits_per_vertex_position_channel_z: u8,
/// Power of 2 factor used to quantize vertex positions within this meshlet.
pub vertex_position_quantization_factor: u8,
/// Minimum quantized X channel value of vertex positions within this meshlet.
pub min_vertex_position_channel_x: f32,
/// Minimum quantized Y channel value of vertex positions within this meshlet.
pub min_vertex_position_channel_y: f32,
/// Minimum quantized Z channel value of vertex positions within this meshlet.
pub min_vertex_position_channel_z: f32,
}
/// Bounding spheres used for culling and choosing level of detail for a [`Meshlet`].
#[derive(Copy, Clone, Pod, Zeroable)]
#[repr(C)]
pub struct MeshletCullData {
/// Tight bounding box, used for frustum and occlusion culling for this meshlet.
pub aabb: MeshletAabbErrorOffset,
/// Bounding sphere used for determining if this meshlet's group is at the correct level of detail for a given view.
pub lod_group_sphere: MeshletBoundingSphere,
}
/// An axis-aligned bounding box used for a [`Meshlet`].
#[derive(Copy, Clone, Default, Pod, Zeroable, ShaderType)]
#[repr(C)]
pub struct MeshletAabb {
pub center: Vec3,
pub half_extent: Vec3,
}
// An axis-aligned bounding box used for a [`Meshlet`].
#[derive(Copy, Clone, Default, Pod, Zeroable, ShaderType)]
#[repr(C)]
pub struct MeshletAabbErrorOffset {
pub center: Vec3,
pub error: f32,
pub half_extent: Vec3,
pub child_offset: u32,
}
/// A spherical bounding volume used for a [`Meshlet`].
#[derive(Copy, Clone, Default, Pod, Zeroable)]
#[repr(C)]
pub struct MeshletBoundingSphere {
pub center: Vec3,
pub radius: f32,
}
/// An [`AssetSaver`] for `.meshlet_mesh` [`MeshletMesh`] assets.
#[derive(TypePath)]
pub struct MeshletMeshSaver;
impl AssetSaver for MeshletMeshSaver {
type Asset = MeshletMesh;
type Settings = ();
type OutputLoader = MeshletMeshLoader;
type Error = MeshletMeshSaveOrLoadError;
async fn save(
&self,
writer: &mut Writer,
asset: SavedAsset<'_, MeshletMesh>,
_settings: &(),
) -> Result<(), MeshletMeshSaveOrLoadError> {
// Write asset magic number
writer
.write_all(&MESHLET_MESH_ASSET_MAGIC.to_le_bytes())
.await?;
// Write asset version
writer
.write_all(&MESHLET_MESH_ASSET_VERSION.to_le_bytes())
.await?;
writer.write_all(bytemuck::bytes_of(&asset.aabb)).await?;
writer
.write_all(bytemuck::bytes_of(&asset.bvh_depth))
.await?;
// Compress and write asset data
let mut writer = FrameEncoder::new(AsyncWriteSyncAdapter(writer));
write_slice(&asset.vertex_positions, &mut writer)?;
write_slice(&asset.vertex_normals, &mut writer)?;
write_slice(&asset.vertex_uvs, &mut writer)?;
write_slice(&asset.indices, &mut writer)?;
write_slice(&asset.bvh, &mut writer)?;
write_slice(&asset.meshlets, &mut writer)?;
write_slice(&asset.meshlet_cull_data, &mut writer)?;
// BUG: Flushing helps with an async_fs bug, but it still fails sometimes. https://github.com/smol-rs/async-fs/issues/45
// ERROR bevy_asset::server: Failed to load asset with asset loader MeshletMeshLoader: failed to fill whole buffer
writer.flush()?;
writer.finish()?;
Ok(())
}
}
/// An [`AssetLoader`] for `.meshlet_mesh` [`MeshletMesh`] assets.
#[derive(TypePath)]
pub struct MeshletMeshLoader;
impl AssetLoader for MeshletMeshLoader {
type Asset = MeshletMesh;
type Settings = ();
type Error = MeshletMeshSaveOrLoadError;
async fn load(
&self,
reader: &mut dyn Reader,
_settings: &(),
_load_context: &mut LoadContext<'_>,
) -> Result<MeshletMesh, MeshletMeshSaveOrLoadError> {
// Load and check magic number
let magic = async_read_u64(reader).await?;
if magic != MESHLET_MESH_ASSET_MAGIC {
return Err(MeshletMeshSaveOrLoadError::WrongFileType);
}
// Load and check asset version
let version = async_read_u64(reader).await?;
if version != MESHLET_MESH_ASSET_VERSION {
return Err(MeshletMeshSaveOrLoadError::WrongVersion { found: version });
}
let mut bytes = [0u8; size_of::<MeshletAabb>()];
reader.read_exact(&mut bytes).await?;
let aabb = bytemuck::cast(bytes);
let mut bytes = [0u8; size_of::<u32>()];
reader.read_exact(&mut bytes).await?;
let bvh_depth = u32::from_le_bytes(bytes);
// Load and decompress asset data
let reader = &mut FrameDecoder::new(AsyncReadSyncAdapter(reader));
let vertex_positions = read_slice(reader)?;
let vertex_normals = read_slice(reader)?;
let vertex_uvs = read_slice(reader)?;
let indices = read_slice(reader)?;
let bvh = read_slice(reader)?;
let meshlets = read_slice(reader)?;
let meshlet_cull_data = read_slice(reader)?;
Ok(MeshletMesh {
vertex_positions,
vertex_normals,
vertex_uvs,
indices,
bvh,
meshlets,
meshlet_cull_data,
aabb,
bvh_depth,
})
}
fn extensions(&self) -> &[&str] {
&["meshlet_mesh"]
}
}
#[derive(Error, Debug)]
pub enum MeshletMeshSaveOrLoadError {
#[error("file was not a MeshletMesh asset")]
WrongFileType,
#[error("expected asset version {MESHLET_MESH_ASSET_VERSION} but found version {found}")]
WrongVersion { found: u64 },
#[error("failed to compress or decompress asset data")]
CompressionOrDecompression(#[from] lz4_flex::frame::Error),
#[error(transparent)]
Io(#[from] std::io::Error),
}
async fn async_read_u64(reader: &mut dyn Reader) -> Result<u64, std::io::Error> {
let mut bytes = [0u8; 8];
reader.read_exact(&mut bytes).await?;
Ok(u64::from_le_bytes(bytes))
}
fn read_u64(reader: &mut dyn Read) -> Result<u64, std::io::Error> {
let mut bytes = [0u8; 8];
reader.read_exact(&mut bytes)?;
Ok(u64::from_le_bytes(bytes))
}
fn write_slice<T: Pod>(
field: &[T],
writer: &mut dyn Write,
) -> Result<(), MeshletMeshSaveOrLoadError> {
writer.write_all(&(field.len() as u64).to_le_bytes())?;
writer.write_all(bytemuck::cast_slice(field))?;
Ok(())
}
fn read_slice<T: Pod>(reader: &mut dyn Read) -> Result<Arc<[T]>, std::io::Error> {
let len = read_u64(reader)? as usize;
let mut data: Arc<[T]> = core::iter::repeat_with(T::zeroed).take(len).collect();
let slice = Arc::get_mut(&mut data).unwrap();
reader.read_exact(bytemuck::cast_slice_mut(slice))?;
Ok(data)
}
// TODO: Use async for everything and get rid of this adapter
struct AsyncWriteSyncAdapter<'a>(&'a mut Writer);
impl Write for AsyncWriteSyncAdapter<'_> {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
block_on(self.0.write(buf))
}
fn flush(&mut self) -> std::io::Result<()> {
block_on(self.0.flush())
}
}
// TODO: Use async for everything and get rid of this adapter
struct AsyncReadSyncAdapter<'a>(&'a mut dyn Reader);
impl Read for AsyncReadSyncAdapter<'_> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
block_on(self.0.read(buf))
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/meshlet/material_pipeline_prepare.rs | crates/bevy_pbr/src/meshlet/material_pipeline_prepare.rs | use super::{
instance_manager::InstanceManager, pipelines::MeshletPipelines,
resource_manager::ResourceManager,
};
use crate::*;
use bevy_camera::{Camera3d, Projection};
use bevy_core_pipeline::{
prepass::{DeferredPrepass, DepthPrepass, MotionVectorPrepass, NormalPrepass},
tonemapping::{DebandDither, Tonemapping},
};
use bevy_derive::{Deref, DerefMut};
use bevy_light::{EnvironmentMapLight, IrradianceVolume, ShadowFilteringMethod};
use bevy_mesh::VertexBufferLayout;
use bevy_mesh::{Mesh, MeshVertexBufferLayout, MeshVertexBufferLayoutRef, MeshVertexBufferLayouts};
use bevy_platform::collections::{HashMap, HashSet};
use bevy_render::erased_render_asset::ErasedRenderAssets;
use bevy_render::{camera::TemporalJitter, render_resource::*, view::ExtractedView};
use bevy_utils::default;
use core::any::{Any, TypeId};
/// A list of `(Material ID, Pipeline, BindGroup)` for a view for use in [`MeshletMainOpaquePass3dNode`](`super::MeshletMainOpaquePass3dNode`).
#[derive(Component, Deref, DerefMut, Default)]
pub struct MeshletViewMaterialsMainOpaquePass(pub Vec<(u32, CachedRenderPipelineId, BindGroup)>);
/// Prepare [`Material`] pipelines for [`MeshletMesh`](`super::MeshletMesh`) entities for use in [`MeshletMainOpaquePass3dNode`](`super::MeshletMainOpaquePass3dNode`),
/// and register the material with [`InstanceManager`].
pub fn prepare_material_meshlet_meshes_main_opaque_pass(
resource_manager: ResMut<ResourceManager>,
mut instance_manager: ResMut<InstanceManager>,
mut cache: Local<HashMap<(MeshPipelineKey, TypeId), CachedRenderPipelineId>>,
pipeline_cache: Res<PipelineCache>,
material_pipeline: Res<MaterialPipeline>,
mesh_pipeline: Res<MeshPipeline>,
render_materials: Res<ErasedRenderAssets<PreparedMaterial>>,
meshlet_pipelines: Res<MeshletPipelines>,
render_material_instances: Res<RenderMaterialInstances>,
material_bind_group_allocators: Res<MaterialBindGroupAllocators>,
mut mesh_vertex_buffer_layouts: ResMut<MeshVertexBufferLayouts>,
mut views: Query<
(
&mut MeshletViewMaterialsMainOpaquePass,
&ExtractedView,
Option<&Tonemapping>,
Option<&DebandDither>,
Option<&ShadowFilteringMethod>,
(Has<ScreenSpaceAmbientOcclusion>, Has<DistanceFog>),
(
Has<NormalPrepass>,
Has<DepthPrepass>,
Has<MotionVectorPrepass>,
Has<DeferredPrepass>,
),
Has<TemporalJitter>,
Option<&Projection>,
Has<RenderViewLightProbes<EnvironmentMapLight>>,
Has<RenderViewLightProbes<IrradianceVolume>>,
),
With<Camera3d>,
>,
) {
let fake_vertex_buffer_layout = &fake_vertex_buffer_layout(&mut mesh_vertex_buffer_layouts);
for (
mut materials,
view,
tonemapping,
dither,
shadow_filter_method,
(ssao, distance_fog),
(normal_prepass, depth_prepass, motion_vector_prepass, deferred_prepass),
temporal_jitter,
projection,
has_environment_maps,
has_irradiance_volumes,
) in &mut views
{
let mut view_key =
MeshPipelineKey::from_msaa_samples(1) | MeshPipelineKey::from_hdr(view.hdr);
if normal_prepass {
view_key |= MeshPipelineKey::NORMAL_PREPASS;
}
if depth_prepass {
view_key |= MeshPipelineKey::DEPTH_PREPASS;
}
if motion_vector_prepass {
view_key |= MeshPipelineKey::MOTION_VECTOR_PREPASS;
}
if deferred_prepass {
view_key |= MeshPipelineKey::DEFERRED_PREPASS;
}
if temporal_jitter {
view_key |= MeshPipelineKey::TEMPORAL_JITTER;
}
if has_environment_maps {
view_key |= MeshPipelineKey::ENVIRONMENT_MAP;
}
if has_irradiance_volumes {
view_key |= MeshPipelineKey::IRRADIANCE_VOLUME;
}
if let Some(projection) = projection {
view_key |= match projection {
Projection::Perspective(_) => MeshPipelineKey::VIEW_PROJECTION_PERSPECTIVE,
Projection::Orthographic(_) => MeshPipelineKey::VIEW_PROJECTION_ORTHOGRAPHIC,
Projection::Custom(_) => MeshPipelineKey::VIEW_PROJECTION_NONSTANDARD,
};
}
match shadow_filter_method.unwrap_or(&ShadowFilteringMethod::default()) {
ShadowFilteringMethod::Hardware2x2 => {
view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_HARDWARE_2X2;
}
ShadowFilteringMethod::Gaussian => {
view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_GAUSSIAN;
}
ShadowFilteringMethod::Temporal => {
view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_TEMPORAL;
}
}
if !view.hdr {
if let Some(tonemapping) = tonemapping {
view_key |= MeshPipelineKey::TONEMAP_IN_SHADER;
view_key |= tonemapping_pipeline_key(*tonemapping);
}
if let Some(DebandDither::Enabled) = dither {
view_key |= MeshPipelineKey::DEBAND_DITHER;
}
}
if ssao {
view_key |= MeshPipelineKey::SCREEN_SPACE_AMBIENT_OCCLUSION;
}
if distance_fog {
view_key |= MeshPipelineKey::DISTANCE_FOG;
}
view_key |= MeshPipelineKey::from_primitive_topology(PrimitiveTopology::TriangleList);
for material_id in render_material_instances
.instances
.values()
.map(|instance| instance.asset_id)
.collect::<HashSet<_>>()
{
let Some(material) = render_materials.get(material_id) else {
continue;
};
if material.properties.render_method != OpaqueRendererMethod::Forward
|| material.properties.alpha_mode != AlphaMode::Opaque
|| material.properties.reads_view_transmission_texture
{
continue;
}
let erased_key = ErasedMaterialPipelineKey {
mesh_key: view_key,
material_key: material.properties.material_key.clone(),
type_id: material_id.type_id(),
};
let material_pipeline_specializer = MaterialPipelineSpecializer {
pipeline: material_pipeline.clone(),
properties: material.properties.clone(),
};
let Ok(material_pipeline_descriptor) =
material_pipeline_specializer.specialize(erased_key, fake_vertex_buffer_layout)
else {
continue;
};
let material_fragment = material_pipeline_descriptor.fragment.unwrap();
let mut shader_defs = material_fragment.shader_defs;
shader_defs.push("MESHLET_MESH_MATERIAL_PASS".into());
let layout = mesh_pipeline.get_view_layout(view_key.into());
let layout = vec![
layout.main_layout.clone(),
layout.binding_array_layout.clone(),
resource_manager.material_shade_bind_group_layout.clone(),
material
.properties
.material_layout
.as_ref()
.unwrap()
.clone(),
];
let pipeline_descriptor = RenderPipelineDescriptor {
label: material_pipeline_descriptor.label,
layout,
push_constant_ranges: vec![],
vertex: VertexState {
shader: meshlet_pipelines.meshlet_mesh_material.clone(),
shader_defs: shader_defs.clone(),
entry_point: material_pipeline_descriptor.vertex.entry_point,
buffers: Vec::new(),
},
primitive: PrimitiveState::default(),
depth_stencil: Some(DepthStencilState {
format: TextureFormat::Depth16Unorm,
depth_write_enabled: false,
depth_compare: CompareFunction::Equal,
stencil: StencilState::default(),
bias: DepthBiasState::default(),
}),
multisample: MultisampleState::default(),
fragment: Some(FragmentState {
shader: match material.properties.get_shader(MeshletFragmentShader) {
Some(shader) => shader.clone(),
None => meshlet_pipelines.meshlet_mesh_material.clone(),
},
shader_defs,
entry_point: material_fragment.entry_point,
targets: material_fragment.targets,
}),
zero_initialize_workgroup_memory: false,
};
let type_id = material_id.type_id();
let Some(material_bind_group_allocator) = material_bind_group_allocators.get(&type_id)
else {
continue;
};
let material_id = instance_manager.get_material_id(material_id);
let pipeline_id = *cache.entry((view_key, type_id)).or_insert_with(|| {
pipeline_cache.queue_render_pipeline(pipeline_descriptor.clone())
});
let Some(material_bind_group) =
material_bind_group_allocator.get(material.binding.group)
else {
continue;
};
let Some(bind_group) = material_bind_group.bind_group() else {
continue;
};
materials.push((material_id, pipeline_id, (*bind_group).clone()));
}
}
}
/// A list of `(Material ID, Pipeline, BindGroup)` for a view for use in [`MeshletPrepassNode`](`super::MeshletPrepassNode`).
#[derive(Component, Deref, DerefMut, Default)]
pub struct MeshletViewMaterialsPrepass(pub Vec<(u32, CachedRenderPipelineId, BindGroup)>);
/// A list of `(Material ID, Pipeline, BindGroup)` for a view for use in [`MeshletDeferredGBufferPrepassNode`](`super::MeshletDeferredGBufferPrepassNode`).
#[derive(Component, Deref, DerefMut, Default)]
pub struct MeshletViewMaterialsDeferredGBufferPrepass(
pub Vec<(u32, CachedRenderPipelineId, BindGroup)>,
);
/// Prepare [`Material`] pipelines for [`MeshletMesh`](`super::MeshletMesh`) entities for use in [`MeshletPrepassNode`](`super::MeshletPrepassNode`),
/// and [`MeshletDeferredGBufferPrepassNode`](`super::MeshletDeferredGBufferPrepassNode`) and register the material with [`InstanceManager`].
pub fn prepare_material_meshlet_meshes_prepass(
resource_manager: ResMut<ResourceManager>,
mut instance_manager: ResMut<InstanceManager>,
mut cache: Local<HashMap<(MeshPipelineKey, TypeId), CachedRenderPipelineId>>,
pipeline_cache: Res<PipelineCache>,
prepass_pipeline: Res<PrepassPipeline>,
material_bind_group_allocators: Res<MaterialBindGroupAllocators>,
render_materials: Res<ErasedRenderAssets<PreparedMaterial>>,
meshlet_pipelines: Res<MeshletPipelines>,
render_material_instances: Res<RenderMaterialInstances>,
mut mesh_vertex_buffer_layouts: ResMut<MeshVertexBufferLayouts>,
mut views: Query<
(
&mut MeshletViewMaterialsPrepass,
&mut MeshletViewMaterialsDeferredGBufferPrepass,
&ExtractedView,
AnyOf<(&NormalPrepass, &MotionVectorPrepass, &DeferredPrepass)>,
),
With<Camera3d>,
>,
) {
let fake_vertex_buffer_layout = &fake_vertex_buffer_layout(&mut mesh_vertex_buffer_layouts);
for (
mut materials,
mut deferred_materials,
view,
(normal_prepass, motion_vector_prepass, deferred_prepass),
) in &mut views
{
let mut view_key =
MeshPipelineKey::from_msaa_samples(1) | MeshPipelineKey::from_hdr(view.hdr);
if normal_prepass.is_some() {
view_key |= MeshPipelineKey::NORMAL_PREPASS;
}
if motion_vector_prepass.is_some() {
view_key |= MeshPipelineKey::MOTION_VECTOR_PREPASS;
}
view_key |= MeshPipelineKey::from_primitive_topology(PrimitiveTopology::TriangleList);
for material_id in render_material_instances
.instances
.values()
.map(|instance| instance.asset_id)
.collect::<HashSet<_>>()
{
let Some(material) = render_materials.get(material_id) else {
continue;
};
let Some(material_bind_group_allocator) =
material_bind_group_allocators.get(&material_id.type_id())
else {
continue;
};
if material.properties.alpha_mode != AlphaMode::Opaque
|| material.properties.reads_view_transmission_texture
{
continue;
}
let material_wants_deferred = matches!(
material.properties.render_method,
OpaqueRendererMethod::Deferred
);
if deferred_prepass.is_some() && material_wants_deferred {
view_key |= MeshPipelineKey::DEFERRED_PREPASS;
} else if normal_prepass.is_none() && motion_vector_prepass.is_none() {
continue;
}
let erased_key = ErasedMaterialPipelineKey {
mesh_key: view_key,
material_key: material.properties.material_key.clone(),
type_id: material_id.type_id(),
};
let material_pipeline_specializer = PrepassPipelineSpecializer {
pipeline: prepass_pipeline.clone(),
properties: material.properties.clone(),
};
let Ok(material_pipeline_descriptor) =
material_pipeline_specializer.specialize(erased_key, fake_vertex_buffer_layout)
else {
continue;
};
let material_fragment = material_pipeline_descriptor.fragment.unwrap();
let mut shader_defs = material_fragment.shader_defs;
shader_defs.push("MESHLET_MESH_MATERIAL_PASS".into());
let view_layout = if view_key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) {
prepass_pipeline.view_layout_motion_vectors.clone()
} else {
prepass_pipeline.view_layout_no_motion_vectors.clone()
};
let fragment_shader = if view_key.contains(MeshPipelineKey::DEFERRED_PREPASS) {
material
.properties
.get_shader(MeshletDeferredFragmentShader)
.unwrap_or(meshlet_pipelines.meshlet_mesh_material.clone())
} else {
material
.properties
.get_shader(MeshletPrepassFragmentShader)
.unwrap_or(meshlet_pipelines.meshlet_mesh_material.clone())
};
let entry_point = if fragment_shader == meshlet_pipelines.meshlet_mesh_material {
material_fragment.entry_point.clone()
} else {
None
};
let pipeline_descriptor = RenderPipelineDescriptor {
label: material_pipeline_descriptor.label,
layout: vec![
view_layout,
prepass_pipeline.empty_layout.clone(),
resource_manager.material_shade_bind_group_layout.clone(),
material
.properties
.material_layout
.as_ref()
.unwrap()
.clone(),
],
vertex: VertexState {
shader: meshlet_pipelines.meshlet_mesh_material.clone(),
shader_defs: shader_defs.clone(),
entry_point: material_pipeline_descriptor.vertex.entry_point,
..default()
},
primitive: PrimitiveState::default(),
depth_stencil: Some(DepthStencilState {
format: TextureFormat::Depth16Unorm,
depth_write_enabled: false,
depth_compare: CompareFunction::Equal,
stencil: StencilState::default(),
bias: DepthBiasState::default(),
}),
fragment: Some(FragmentState {
shader: fragment_shader,
shader_defs,
entry_point,
targets: material_fragment.targets,
}),
..default()
};
let material_id = instance_manager.get_material_id(material_id);
let pipeline_id = *cache
.entry((view_key, material_id.type_id()))
.or_insert_with(|| {
pipeline_cache.queue_render_pipeline(pipeline_descriptor.clone())
});
let Some(material_bind_group) =
material_bind_group_allocator.get(material.binding.group)
else {
continue;
};
let Some(bind_group) = material_bind_group.bind_group() else {
continue;
};
let item = (material_id, pipeline_id, (*bind_group).clone());
if view_key.contains(MeshPipelineKey::DEFERRED_PREPASS) {
deferred_materials.push(item);
} else {
materials.push(item);
}
}
}
}
// Meshlet materials don't use a traditional vertex buffer, but the material specialization requires one.
fn fake_vertex_buffer_layout(layouts: &mut MeshVertexBufferLayouts) -> MeshVertexBufferLayoutRef {
layouts.insert(MeshVertexBufferLayout::new(
vec![
Mesh::ATTRIBUTE_POSITION.id,
Mesh::ATTRIBUTE_NORMAL.id,
Mesh::ATTRIBUTE_UV_0.id,
Mesh::ATTRIBUTE_TANGENT.id,
],
VertexBufferLayout {
array_stride: 48,
step_mode: VertexStepMode::Vertex,
attributes: vec![
VertexAttribute {
format: Mesh::ATTRIBUTE_POSITION.format,
offset: 0,
shader_location: 0,
},
VertexAttribute {
format: Mesh::ATTRIBUTE_NORMAL.format,
offset: 12,
shader_location: 1,
},
VertexAttribute {
format: Mesh::ATTRIBUTE_UV_0.format,
offset: 24,
shader_location: 2,
},
VertexAttribute {
format: Mesh::ATTRIBUTE_TANGENT.format,
offset: 32,
shader_location: 3,
},
],
},
))
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/meshlet/mod.rs | crates/bevy_pbr/src/meshlet/mod.rs | //! Render high-poly 3d meshes using an efficient GPU-driven method. See [`MeshletPlugin`] and [`MeshletMesh`] for details.
mod asset;
#[cfg(feature = "meshlet_processor")]
mod from_mesh;
mod instance_manager;
mod material_pipeline_prepare;
mod material_shade_nodes;
mod meshlet_mesh_manager;
mod persistent_buffer;
mod persistent_buffer_impls;
mod pipelines;
mod resource_manager;
mod visibility_buffer_raster_node;
pub mod graph {
use bevy_render::render_graph::RenderLabel;
#[derive(Debug, Hash, PartialEq, Eq, Clone, RenderLabel)]
pub enum NodeMeshlet {
VisibilityBufferRasterPass,
Prepass,
DeferredPrepass,
MainOpaquePass,
}
}
pub(crate) use self::{
instance_manager::{queue_material_meshlet_meshes, InstanceManager},
material_pipeline_prepare::{
prepare_material_meshlet_meshes_main_opaque_pass, prepare_material_meshlet_meshes_prepass,
},
};
pub use self::asset::{
MeshletMesh, MeshletMeshLoader, MeshletMeshSaver, MESHLET_MESH_ASSET_VERSION,
};
#[cfg(feature = "meshlet_processor")]
pub use self::from_mesh::{
MeshToMeshletMeshConversionError, MESHLET_DEFAULT_VERTEX_POSITION_QUANTIZATION_FACTOR,
};
use self::{
graph::NodeMeshlet,
instance_manager::extract_meshlet_mesh_entities,
material_pipeline_prepare::{
MeshletViewMaterialsDeferredGBufferPrepass, MeshletViewMaterialsMainOpaquePass,
MeshletViewMaterialsPrepass,
},
material_shade_nodes::{
MeshletDeferredGBufferPrepassNode, MeshletMainOpaquePass3dNode, MeshletPrepassNode,
},
meshlet_mesh_manager::perform_pending_meshlet_mesh_writes,
pipelines::*,
resource_manager::{
prepare_meshlet_per_frame_resources, prepare_meshlet_view_bind_groups, ResourceManager,
},
visibility_buffer_raster_node::MeshletVisibilityBufferRasterPassNode,
};
use crate::{
graph::NodePbr, meshlet::meshlet_mesh_manager::init_meshlet_mesh_manager,
PreviousGlobalTransform,
};
use bevy_app::{App, Plugin};
use bevy_asset::{embedded_asset, AssetApp, AssetId, Handle};
use bevy_camera::visibility::{self, Visibility, VisibilityClass};
use bevy_core_pipeline::{
core_3d::graph::{Core3d, Node3d},
prepass::{DeferredPrepass, MotionVectorPrepass, NormalPrepass},
};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
component::Component,
entity::Entity,
query::Has,
reflect::ReflectComponent,
schedule::IntoScheduleConfigs,
system::{Commands, Query, Res},
};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::{
render_graph::{RenderGraphExt, ViewNodeRunner},
renderer::RenderDevice,
settings::WgpuFeatures,
view::{prepare_view_targets, Msaa},
ExtractSchedule, Render, RenderApp, RenderStartup, RenderSystems,
};
use bevy_shader::load_shader_library;
use bevy_transform::components::Transform;
use derive_more::From;
use tracing::error;
/// Provides a plugin for rendering large amounts of high-poly 3d meshes using an efficient GPU-driven method. See also [`MeshletMesh`].
///
/// Rendering dense scenes made of high-poly meshes with thousands or millions of triangles is extremely expensive in Bevy's standard renderer.
/// Once meshes are pre-processed into a [`MeshletMesh`], this plugin can render these kinds of scenes very efficiently.
///
/// In comparison to Bevy's standard renderer:
/// * Much more efficient culling. Meshlets can be culled individually, instead of all or nothing culling for entire meshes at a time.
/// Additionally, occlusion culling can eliminate meshlets that would cause overdraw.
/// * Much more efficient batching. All geometry can be rasterized in a single draw.
/// * Scales better with large amounts of dense geometry and overdraw. Bevy's standard renderer will bottleneck sooner.
/// * Near-seamless level of detail (LOD).
/// * Much greater base overhead. Rendering will be slower and use more memory than Bevy's standard renderer
/// with small amounts of geometry and overdraw.
/// * Requires preprocessing meshes. See [`MeshletMesh`] for details.
/// * Limitations on the kinds of materials you can use. See [`MeshletMesh`] for details.
///
/// This plugin requires a fairly recent GPU that supports [`WgpuFeatures::TEXTURE_INT64_ATOMIC`].
///
/// This plugin currently works only on the Vulkan and Metal backends.
///
/// This plugin is not compatible with [`Msaa`]. Any camera rendering a [`MeshletMesh`] must have
/// [`Msaa`] set to [`Msaa::Off`].
///
/// Mixing forward+prepass and deferred rendering for opaque materials is not currently supported when using this plugin.
/// You must use one or the other by setting [`crate::DefaultOpaqueRendererMethod`].
/// Do not override [`crate::Material::opaque_render_method`] for any material when using this plugin.
///
/// 
pub struct MeshletPlugin {
/// The maximum amount of clusters that can be processed at once,
/// used to control the size of a pre-allocated GPU buffer.
///
/// If this number is too low, you'll see rendering artifacts like missing or blinking meshes.
///
/// Each cluster slot costs 4 bytes of VRAM.
///
/// Must not be greater than 2^25.
pub cluster_buffer_slots: u32,
}
impl MeshletPlugin {
/// [`WgpuFeatures`] required for this plugin to function.
pub fn required_wgpu_features() -> WgpuFeatures {
WgpuFeatures::TEXTURE_INT64_ATOMIC
| WgpuFeatures::TEXTURE_ATOMIC
| WgpuFeatures::SHADER_INT64
| WgpuFeatures::SUBGROUP
| WgpuFeatures::DEPTH_CLIP_CONTROL
| WgpuFeatures::PUSH_CONSTANTS
}
}
impl Plugin for MeshletPlugin {
fn build(&self, app: &mut App) {
#[cfg(target_endian = "big")]
compile_error!("MeshletPlugin is only supported on little-endian processors.");
if self.cluster_buffer_slots > 2_u32.pow(25) {
error!("MeshletPlugin::cluster_buffer_slots must not be greater than 2^25.");
std::process::exit(1);
}
load_shader_library!(app, "meshlet_bindings.wgsl");
load_shader_library!(app, "visibility_buffer_resolve.wgsl");
load_shader_library!(app, "meshlet_cull_shared.wgsl");
embedded_asset!(app, "clear_visibility_buffer.wgsl");
embedded_asset!(app, "cull_instances.wgsl");
embedded_asset!(app, "cull_bvh.wgsl");
embedded_asset!(app, "cull_clusters.wgsl");
embedded_asset!(app, "visibility_buffer_software_raster.wgsl");
embedded_asset!(app, "visibility_buffer_hardware_raster.wgsl");
embedded_asset!(app, "meshlet_mesh_material.wgsl");
embedded_asset!(app, "resolve_render_targets.wgsl");
embedded_asset!(app, "remap_1d_to_2d_dispatch.wgsl");
embedded_asset!(app, "fill_counts.wgsl");
app.init_asset::<MeshletMesh>()
.register_asset_loader(MeshletMeshLoader);
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
// Create a variable here so we can move-capture it.
let cluster_buffer_slots = self.cluster_buffer_slots;
let init_resource_manager_system =
move |mut commands: Commands, render_device: Res<RenderDevice>| {
commands
.insert_resource(ResourceManager::new(cluster_buffer_slots, &render_device));
};
render_app
.add_render_graph_node::<MeshletVisibilityBufferRasterPassNode>(
Core3d,
NodeMeshlet::VisibilityBufferRasterPass,
)
.add_render_graph_node::<ViewNodeRunner<MeshletPrepassNode>>(
Core3d,
NodeMeshlet::Prepass,
)
.add_render_graph_node::<ViewNodeRunner<MeshletDeferredGBufferPrepassNode>>(
Core3d,
NodeMeshlet::DeferredPrepass,
)
.add_render_graph_node::<ViewNodeRunner<MeshletMainOpaquePass3dNode>>(
Core3d,
NodeMeshlet::MainOpaquePass,
)
.add_render_graph_edges(
Core3d,
(
NodeMeshlet::VisibilityBufferRasterPass,
NodePbr::EarlyShadowPass,
//
NodeMeshlet::Prepass,
//
NodeMeshlet::DeferredPrepass,
Node3d::EndPrepasses,
//
Node3d::StartMainPass,
NodeMeshlet::MainOpaquePass,
Node3d::MainOpaquePass,
Node3d::EndMainPass,
),
)
.insert_resource(InstanceManager::new())
.add_systems(
RenderStartup,
(
check_meshlet_features,
(
(init_resource_manager_system, init_meshlet_pipelines).chain(),
init_meshlet_mesh_manager,
),
)
.chain(),
)
.add_systems(ExtractSchedule, extract_meshlet_mesh_entities)
.add_systems(
Render,
(
perform_pending_meshlet_mesh_writes.in_set(RenderSystems::PrepareAssets),
configure_meshlet_views
.after(prepare_view_targets)
.in_set(RenderSystems::ManageViews),
prepare_meshlet_per_frame_resources.in_set(RenderSystems::PrepareResources),
prepare_meshlet_view_bind_groups.in_set(RenderSystems::PrepareBindGroups),
queue_material_meshlet_meshes.in_set(RenderSystems::QueueMeshes),
prepare_material_meshlet_meshes_main_opaque_pass
.in_set(RenderSystems::QueueMeshes)
.before(queue_material_meshlet_meshes),
),
);
}
}
fn check_meshlet_features(render_device: Res<RenderDevice>) {
let features = render_device.features();
if !features.contains(MeshletPlugin::required_wgpu_features()) {
error!(
"MeshletPlugin can't be used. GPU lacks support for required features: {:?}.",
MeshletPlugin::required_wgpu_features().difference(features)
);
std::process::exit(1);
}
}
/// The meshlet mesh equivalent of [`bevy_mesh::Mesh3d`].
#[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)]
#[reflect(Component, Default, Clone, PartialEq)]
#[require(Transform, PreviousGlobalTransform, Visibility, VisibilityClass)]
#[component(on_add = visibility::add_visibility_class::<MeshletMesh3d>)]
pub struct MeshletMesh3d(pub Handle<MeshletMesh>);
impl From<MeshletMesh3d> for AssetId<MeshletMesh> {
fn from(mesh: MeshletMesh3d) -> Self {
mesh.id()
}
}
impl From<&MeshletMesh3d> for AssetId<MeshletMesh> {
fn from(mesh: &MeshletMesh3d) -> Self {
mesh.id()
}
}
fn configure_meshlet_views(
mut views_3d: Query<(
Entity,
&Msaa,
Has<NormalPrepass>,
Has<MotionVectorPrepass>,
Has<DeferredPrepass>,
)>,
mut commands: Commands,
) {
for (entity, msaa, normal_prepass, motion_vector_prepass, deferred_prepass) in &mut views_3d {
if *msaa != Msaa::Off {
error!("MeshletPlugin can't be used with MSAA. Add Msaa::Off to your camera to use this plugin.");
std::process::exit(1);
}
if !(normal_prepass || motion_vector_prepass || deferred_prepass) {
commands
.entity(entity)
.insert(MeshletViewMaterialsMainOpaquePass::default());
} else {
// TODO: Should we add both Prepass and DeferredGBufferPrepass materials here, and in other systems/nodes?
commands.entity(entity).insert((
MeshletViewMaterialsMainOpaquePass::default(),
MeshletViewMaterialsPrepass::default(),
MeshletViewMaterialsDeferredGBufferPrepass::default(),
));
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/meshlet/pipelines.rs | crates/bevy_pbr/src/meshlet/pipelines.rs | use super::resource_manager::ResourceManager;
use bevy_asset::{load_embedded_asset, AssetServer, Handle};
use bevy_core_pipeline::{
core_3d::CORE_3D_DEPTH_FORMAT, mip_generation::DownsampleShaders, FullscreenShader,
};
use bevy_ecs::{
resource::Resource,
system::{Commands, Res},
world::World,
};
use bevy_render::render_resource::*;
use bevy_shader::Shader;
use bevy_utils::default;
#[derive(Resource)]
pub struct MeshletPipelines {
clear_visibility_buffer: CachedComputePipelineId,
clear_visibility_buffer_shadow_view: CachedComputePipelineId,
first_instance_cull: CachedComputePipelineId,
second_instance_cull: CachedComputePipelineId,
first_bvh_cull: CachedComputePipelineId,
second_bvh_cull: CachedComputePipelineId,
first_meshlet_cull: CachedComputePipelineId,
second_meshlet_cull: CachedComputePipelineId,
downsample_depth_first: CachedComputePipelineId,
downsample_depth_second: CachedComputePipelineId,
downsample_depth_first_shadow_view: CachedComputePipelineId,
downsample_depth_second_shadow_view: CachedComputePipelineId,
visibility_buffer_software_raster: CachedComputePipelineId,
visibility_buffer_software_raster_shadow_view: CachedComputePipelineId,
visibility_buffer_hardware_raster: CachedRenderPipelineId,
visibility_buffer_hardware_raster_shadow_view: CachedRenderPipelineId,
visibility_buffer_hardware_raster_shadow_view_unclipped: CachedRenderPipelineId,
resolve_depth: CachedRenderPipelineId,
resolve_depth_shadow_view: CachedRenderPipelineId,
resolve_material_depth: CachedRenderPipelineId,
remap_1d_to_2d_dispatch: Option<CachedComputePipelineId>,
fill_counts: CachedComputePipelineId,
pub(crate) meshlet_mesh_material: Handle<Shader>,
}
pub fn init_meshlet_pipelines(
mut commands: Commands,
resource_manager: Res<ResourceManager>,
fullscreen_shader: Res<FullscreenShader>,
downsample_shaders: Res<DownsampleShaders>,
pipeline_cache: Res<PipelineCache>,
asset_server: Res<AssetServer>,
) {
let clear_visibility_buffer_bind_group_layout = resource_manager
.clear_visibility_buffer_bind_group_layout
.clone();
let clear_visibility_buffer_shadow_view_bind_group_layout = resource_manager
.clear_visibility_buffer_shadow_view_bind_group_layout
.clone();
let first_instance_cull_bind_group_layout = resource_manager
.first_instance_cull_bind_group_layout
.clone();
let second_instance_cull_bind_group_layout = resource_manager
.second_instance_cull_bind_group_layout
.clone();
let first_bvh_cull_bind_group_layout =
resource_manager.first_bvh_cull_bind_group_layout.clone();
let second_bvh_cull_bind_group_layout =
resource_manager.second_bvh_cull_bind_group_layout.clone();
let first_meshlet_cull_bind_group_layout = resource_manager
.first_meshlet_cull_bind_group_layout
.clone();
let second_meshlet_cull_bind_group_layout = resource_manager
.second_meshlet_cull_bind_group_layout
.clone();
let downsample_depth_layout = resource_manager.downsample_depth_bind_group_layout.clone();
let downsample_depth_shadow_view_layout = resource_manager
.downsample_depth_shadow_view_bind_group_layout
.clone();
let visibility_buffer_raster_layout = resource_manager
.visibility_buffer_raster_bind_group_layout
.clone();
let visibility_buffer_raster_shadow_view_layout = resource_manager
.visibility_buffer_raster_shadow_view_bind_group_layout
.clone();
let resolve_depth_layout = resource_manager.resolve_depth_bind_group_layout.clone();
let resolve_depth_shadow_view_layout = resource_manager
.resolve_depth_shadow_view_bind_group_layout
.clone();
let resolve_material_depth_layout = resource_manager
.resolve_material_depth_bind_group_layout
.clone();
let remap_1d_to_2d_dispatch_layout = resource_manager
.remap_1d_to_2d_dispatch_bind_group_layout
.clone();
let downsample_depth_shader = downsample_shaders.depth.clone();
let vertex_state = fullscreen_shader.to_vertex_state();
let fill_counts_layout = resource_manager.fill_counts_bind_group_layout.clone();
let clear_visibility_buffer =
load_embedded_asset!(asset_server.as_ref(), "clear_visibility_buffer.wgsl");
let cull_instances = load_embedded_asset!(asset_server.as_ref(), "cull_instances.wgsl");
let cull_bvh = load_embedded_asset!(asset_server.as_ref(), "cull_bvh.wgsl");
let cull_clusters = load_embedded_asset!(asset_server.as_ref(), "cull_clusters.wgsl");
let visibility_buffer_software_raster = load_embedded_asset!(
asset_server.as_ref(),
"visibility_buffer_software_raster.wgsl"
);
let visibility_buffer_hardware_raster = load_embedded_asset!(
asset_server.as_ref(),
"visibility_buffer_hardware_raster.wgsl"
);
let resolve_render_targets =
load_embedded_asset!(asset_server.as_ref(), "resolve_render_targets.wgsl");
let remap_1d_to_2d_dispatch =
load_embedded_asset!(asset_server.as_ref(), "remap_1d_to_2d_dispatch.wgsl");
let fill_counts = load_embedded_asset!(asset_server.as_ref(), "fill_counts.wgsl");
let meshlet_mesh_material =
load_embedded_asset!(asset_server.as_ref(), "meshlet_mesh_material.wgsl");
commands.insert_resource(MeshletPipelines {
clear_visibility_buffer: pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("meshlet_clear_visibility_buffer_pipeline".into()),
layout: vec![clear_visibility_buffer_bind_group_layout],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::COMPUTE,
range: 0..8,
}],
shader: clear_visibility_buffer.clone(),
shader_defs: vec!["MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT".into()],
..default()
}),
clear_visibility_buffer_shadow_view: pipeline_cache.queue_compute_pipeline(
ComputePipelineDescriptor {
label: Some("meshlet_clear_visibility_buffer_shadow_view_pipeline".into()),
layout: vec![clear_visibility_buffer_shadow_view_bind_group_layout],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::COMPUTE,
range: 0..8,
}],
shader: clear_visibility_buffer,
..default()
},
),
first_instance_cull: pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("meshlet_first_instance_cull_pipeline".into()),
layout: vec![first_instance_cull_bind_group_layout.clone()],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::COMPUTE,
range: 0..4,
}],
shader: cull_instances.clone(),
shader_defs: vec![
"MESHLET_INSTANCE_CULLING_PASS".into(),
"MESHLET_FIRST_CULLING_PASS".into(),
],
..default()
}),
second_instance_cull: pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("meshlet_second_instance_cull_pipeline".into()),
layout: vec![second_instance_cull_bind_group_layout.clone()],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::COMPUTE,
range: 0..4,
}],
shader: cull_instances,
shader_defs: vec![
"MESHLET_INSTANCE_CULLING_PASS".into(),
"MESHLET_SECOND_CULLING_PASS".into(),
],
..default()
}),
first_bvh_cull: pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("meshlet_first_bvh_cull_pipeline".into()),
layout: vec![first_bvh_cull_bind_group_layout.clone()],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::COMPUTE,
range: 0..8,
}],
shader: cull_bvh.clone(),
shader_defs: vec![
"MESHLET_BVH_CULLING_PASS".into(),
"MESHLET_FIRST_CULLING_PASS".into(),
],
..default()
}),
second_bvh_cull: pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("meshlet_second_bvh_cull_pipeline".into()),
layout: vec![second_bvh_cull_bind_group_layout.clone()],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::COMPUTE,
range: 0..8,
}],
shader: cull_bvh,
shader_defs: vec![
"MESHLET_BVH_CULLING_PASS".into(),
"MESHLET_SECOND_CULLING_PASS".into(),
],
..default()
}),
first_meshlet_cull: pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("meshlet_first_meshlet_cull_pipeline".into()),
layout: vec![first_meshlet_cull_bind_group_layout.clone()],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::COMPUTE,
range: 0..4,
}],
shader: cull_clusters.clone(),
shader_defs: vec![
"MESHLET_CLUSTER_CULLING_PASS".into(),
"MESHLET_FIRST_CULLING_PASS".into(),
],
..default()
}),
second_meshlet_cull: pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("meshlet_second_meshlet_cull_pipeline".into()),
layout: vec![second_meshlet_cull_bind_group_layout.clone()],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::COMPUTE,
range: 0..4,
}],
shader: cull_clusters,
shader_defs: vec![
"MESHLET_CLUSTER_CULLING_PASS".into(),
"MESHLET_SECOND_CULLING_PASS".into(),
],
..default()
}),
downsample_depth_first: pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("meshlet_downsample_depth_first_pipeline".into()),
layout: vec![downsample_depth_layout.clone()],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::COMPUTE,
range: 0..4,
}],
shader: downsample_depth_shader.clone(),
shader_defs: vec![
"MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT".into(),
"MESHLET".into(),
],
entry_point: Some("downsample_depth_first".into()),
..default()
}),
downsample_depth_second: pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("meshlet_downsample_depth_second_pipeline".into()),
layout: vec![downsample_depth_layout.clone()],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::COMPUTE,
range: 0..4,
}],
shader: downsample_depth_shader.clone(),
shader_defs: vec![
"MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT".into(),
"MESHLET".into(),
],
entry_point: Some("downsample_depth_second".into()),
..default()
}),
downsample_depth_first_shadow_view: pipeline_cache.queue_compute_pipeline(
ComputePipelineDescriptor {
label: Some("meshlet_downsample_depth_first_pipeline".into()),
layout: vec![downsample_depth_shadow_view_layout.clone()],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::COMPUTE,
range: 0..4,
}],
shader: downsample_depth_shader.clone(),
shader_defs: vec!["MESHLET".into()],
entry_point: Some("downsample_depth_first".into()),
..default()
},
),
downsample_depth_second_shadow_view: pipeline_cache.queue_compute_pipeline(
ComputePipelineDescriptor {
label: Some("meshlet_downsample_depth_second_pipeline".into()),
layout: vec![downsample_depth_shadow_view_layout],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::COMPUTE,
range: 0..4,
}],
shader: downsample_depth_shader,
shader_defs: vec!["MESHLET".into()],
entry_point: Some("downsample_depth_second".into()),
zero_initialize_workgroup_memory: false,
},
),
visibility_buffer_software_raster: pipeline_cache.queue_compute_pipeline(
ComputePipelineDescriptor {
label: Some("meshlet_visibility_buffer_software_raster_pipeline".into()),
layout: vec![visibility_buffer_raster_layout.clone()],
push_constant_ranges: vec![],
shader: visibility_buffer_software_raster.clone(),
shader_defs: vec![
"MESHLET_VISIBILITY_BUFFER_RASTER_PASS".into(),
"MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT".into(),
if remap_1d_to_2d_dispatch_layout.is_some() {
"MESHLET_2D_DISPATCH"
} else {
""
}
.into(),
],
..default()
},
),
visibility_buffer_software_raster_shadow_view: pipeline_cache.queue_compute_pipeline(
ComputePipelineDescriptor {
label: Some(
"meshlet_visibility_buffer_software_raster_shadow_view_pipeline".into(),
),
layout: vec![visibility_buffer_raster_shadow_view_layout.clone()],
push_constant_ranges: vec![],
shader: visibility_buffer_software_raster,
shader_defs: vec![
"MESHLET_VISIBILITY_BUFFER_RASTER_PASS".into(),
if remap_1d_to_2d_dispatch_layout.is_some() {
"MESHLET_2D_DISPATCH"
} else {
""
}
.into(),
],
..default()
},
),
visibility_buffer_hardware_raster: pipeline_cache.queue_render_pipeline(
RenderPipelineDescriptor {
label: Some("meshlet_visibility_buffer_hardware_raster_pipeline".into()),
layout: vec![visibility_buffer_raster_layout.clone()],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::VERTEX,
range: 0..4,
}],
vertex: VertexState {
shader: visibility_buffer_hardware_raster.clone(),
shader_defs: vec![
"MESHLET_VISIBILITY_BUFFER_RASTER_PASS".into(),
"MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT".into(),
],
..default()
},
fragment: Some(FragmentState {
shader: visibility_buffer_hardware_raster.clone(),
shader_defs: vec![
"MESHLET_VISIBILITY_BUFFER_RASTER_PASS".into(),
"MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT".into(),
],
targets: vec![Some(ColorTargetState {
format: TextureFormat::R8Uint,
blend: None,
write_mask: ColorWrites::empty(),
})],
..default()
}),
..default()
},
),
visibility_buffer_hardware_raster_shadow_view: pipeline_cache.queue_render_pipeline(
RenderPipelineDescriptor {
label: Some(
"meshlet_visibility_buffer_hardware_raster_shadow_view_pipeline".into(),
),
layout: vec![visibility_buffer_raster_shadow_view_layout.clone()],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::VERTEX,
range: 0..4,
}],
vertex: VertexState {
shader: visibility_buffer_hardware_raster.clone(),
shader_defs: vec!["MESHLET_VISIBILITY_BUFFER_RASTER_PASS".into()],
..default()
},
fragment: Some(FragmentState {
shader: visibility_buffer_hardware_raster.clone(),
shader_defs: vec!["MESHLET_VISIBILITY_BUFFER_RASTER_PASS".into()],
targets: vec![Some(ColorTargetState {
format: TextureFormat::R8Uint,
blend: None,
write_mask: ColorWrites::empty(),
})],
..default()
}),
..default()
},
),
visibility_buffer_hardware_raster_shadow_view_unclipped: pipeline_cache
.queue_render_pipeline(RenderPipelineDescriptor {
label: Some(
"meshlet_visibility_buffer_hardware_raster_shadow_view_unclipped_pipeline"
.into(),
),
layout: vec![visibility_buffer_raster_shadow_view_layout],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::VERTEX,
range: 0..4,
}],
vertex: VertexState {
shader: visibility_buffer_hardware_raster.clone(),
shader_defs: vec!["MESHLET_VISIBILITY_BUFFER_RASTER_PASS".into()],
..default()
},
fragment: Some(FragmentState {
shader: visibility_buffer_hardware_raster,
shader_defs: vec!["MESHLET_VISIBILITY_BUFFER_RASTER_PASS".into()],
targets: vec![Some(ColorTargetState {
format: TextureFormat::R8Uint,
blend: None,
write_mask: ColorWrites::empty(),
})],
..default()
}),
..default()
}),
resolve_depth: pipeline_cache.queue_render_pipeline(RenderPipelineDescriptor {
label: Some("meshlet_resolve_depth_pipeline".into()),
layout: vec![resolve_depth_layout],
vertex: vertex_state.clone(),
depth_stencil: Some(DepthStencilState {
format: CORE_3D_DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: CompareFunction::Always,
stencil: StencilState::default(),
bias: DepthBiasState::default(),
}),
fragment: Some(FragmentState {
shader: resolve_render_targets.clone(),
shader_defs: vec!["MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT".into()],
entry_point: Some("resolve_depth".into()),
..default()
}),
..default()
}),
resolve_depth_shadow_view: pipeline_cache.queue_render_pipeline(RenderPipelineDescriptor {
label: Some("meshlet_resolve_depth_pipeline".into()),
layout: vec![resolve_depth_shadow_view_layout],
vertex: vertex_state.clone(),
depth_stencil: Some(DepthStencilState {
format: CORE_3D_DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: CompareFunction::Always,
stencil: StencilState::default(),
bias: DepthBiasState::default(),
}),
fragment: Some(FragmentState {
shader: resolve_render_targets.clone(),
entry_point: Some("resolve_depth".into()),
..default()
}),
..default()
}),
resolve_material_depth: pipeline_cache.queue_render_pipeline(RenderPipelineDescriptor {
label: Some("meshlet_resolve_material_depth_pipeline".into()),
layout: vec![resolve_material_depth_layout],
vertex: vertex_state,
primitive: PrimitiveState::default(),
depth_stencil: Some(DepthStencilState {
format: TextureFormat::Depth16Unorm,
depth_write_enabled: true,
depth_compare: CompareFunction::Always,
stencil: StencilState::default(),
bias: DepthBiasState::default(),
}),
fragment: Some(FragmentState {
shader: resolve_render_targets,
shader_defs: vec!["MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT".into()],
entry_point: Some("resolve_material_depth".into()),
targets: vec![],
}),
..default()
}),
fill_counts: pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("meshlet_fill_counts_pipeline".into()),
layout: vec![fill_counts_layout],
shader: fill_counts,
shader_defs: vec![if remap_1d_to_2d_dispatch_layout.is_some() {
"MESHLET_2D_DISPATCH"
} else {
""
}
.into()],
..default()
}),
remap_1d_to_2d_dispatch: remap_1d_to_2d_dispatch_layout.map(|layout| {
pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("meshlet_remap_1d_to_2d_dispatch_pipeline".into()),
layout: vec![layout],
push_constant_ranges: vec![PushConstantRange {
stages: ShaderStages::COMPUTE,
range: 0..4,
}],
shader: remap_1d_to_2d_dispatch,
..default()
})
}),
meshlet_mesh_material,
});
}
impl MeshletPipelines {
pub fn get(
world: &World,
) -> Option<(
&ComputePipeline,
&ComputePipeline,
&ComputePipeline,
&ComputePipeline,
&ComputePipeline,
&ComputePipeline,
&ComputePipeline,
&ComputePipeline,
&ComputePipeline,
&ComputePipeline,
&ComputePipeline,
&ComputePipeline,
&ComputePipeline,
&ComputePipeline,
&RenderPipeline,
&RenderPipeline,
&RenderPipeline,
&RenderPipeline,
&RenderPipeline,
&RenderPipeline,
Option<&ComputePipeline>,
&ComputePipeline,
)> {
let pipeline_cache = world.get_resource::<PipelineCache>()?;
let pipeline = world.get_resource::<Self>()?;
Some((
pipeline_cache.get_compute_pipeline(pipeline.clear_visibility_buffer)?,
pipeline_cache.get_compute_pipeline(pipeline.clear_visibility_buffer_shadow_view)?,
pipeline_cache.get_compute_pipeline(pipeline.first_instance_cull)?,
pipeline_cache.get_compute_pipeline(pipeline.second_instance_cull)?,
pipeline_cache.get_compute_pipeline(pipeline.first_bvh_cull)?,
pipeline_cache.get_compute_pipeline(pipeline.second_bvh_cull)?,
pipeline_cache.get_compute_pipeline(pipeline.first_meshlet_cull)?,
pipeline_cache.get_compute_pipeline(pipeline.second_meshlet_cull)?,
pipeline_cache.get_compute_pipeline(pipeline.downsample_depth_first)?,
pipeline_cache.get_compute_pipeline(pipeline.downsample_depth_second)?,
pipeline_cache.get_compute_pipeline(pipeline.downsample_depth_first_shadow_view)?,
pipeline_cache.get_compute_pipeline(pipeline.downsample_depth_second_shadow_view)?,
pipeline_cache.get_compute_pipeline(pipeline.visibility_buffer_software_raster)?,
pipeline_cache
.get_compute_pipeline(pipeline.visibility_buffer_software_raster_shadow_view)?,
pipeline_cache.get_render_pipeline(pipeline.visibility_buffer_hardware_raster)?,
pipeline_cache
.get_render_pipeline(pipeline.visibility_buffer_hardware_raster_shadow_view)?,
pipeline_cache.get_render_pipeline(
pipeline.visibility_buffer_hardware_raster_shadow_view_unclipped,
)?,
pipeline_cache.get_render_pipeline(pipeline.resolve_depth)?,
pipeline_cache.get_render_pipeline(pipeline.resolve_depth_shadow_view)?,
pipeline_cache.get_render_pipeline(pipeline.resolve_material_depth)?,
match pipeline.remap_1d_to_2d_dispatch {
Some(id) => Some(pipeline_cache.get_compute_pipeline(id)?),
None => None,
},
pipeline_cache.get_compute_pipeline(pipeline.fill_counts)?,
))
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/meshlet/resource_manager.rs | crates/bevy_pbr/src/meshlet/resource_manager.rs | use super::{instance_manager::InstanceManager, meshlet_mesh_manager::MeshletMeshManager};
use crate::ShadowView;
use bevy_camera::{visibility::RenderLayers, Camera3d};
use bevy_core_pipeline::{
mip_generation::experimental::depth::{self, ViewDepthPyramid},
prepass::{PreviousViewData, PreviousViewUniforms},
};
use bevy_ecs::{
component::Component,
entity::{Entity, EntityHashMap},
query::AnyOf,
resource::Resource,
system::{Commands, Query, Res, ResMut},
};
use bevy_image::ToExtents;
use bevy_math::{UVec2, Vec4Swizzles};
use bevy_render::{
render_resource::*,
renderer::{RenderDevice, RenderQueue},
texture::{CachedTexture, TextureCache},
view::{ExtractedView, ViewUniform, ViewUniforms},
};
use binding_types::*;
use core::iter;
/// Manages per-view and per-cluster GPU resources for [`MeshletPlugin`](`super::MeshletPlugin`).
#[derive(Resource)]
pub struct ResourceManager {
/// Intermediate buffer of cluster IDs for use with rasterizing the visibility buffer
visibility_buffer_raster_clusters: Buffer,
/// Intermediate buffer of previous counts of clusters in rasterizer buckets
pub visibility_buffer_raster_cluster_prev_counts: Buffer,
/// Intermediate buffer of count of clusters to software rasterize
software_raster_cluster_count: Buffer,
/// BVH traversal queues
bvh_traversal_queues: [Buffer; 2],
/// Cluster cull candidate queue
cluster_cull_candidate_queue: Buffer,
/// Rightmost slot index of [`Self::visibility_buffer_raster_clusters`], [`Self::bvh_traversal_queues`], and [`Self::cluster_cull_candidate_queue`]
cull_queue_rightmost_slot: u32,
/// Second pass instance candidates
second_pass_candidates: Option<Buffer>,
/// Sampler for a depth pyramid
depth_pyramid_sampler: Sampler,
/// Dummy texture view for binding depth pyramids with less than the maximum amount of mips
depth_pyramid_dummy_texture: TextureView,
// TODO
previous_depth_pyramids: EntityHashMap<TextureView>,
// Bind group layouts
pub clear_visibility_buffer_bind_group_layout: BindGroupLayoutDescriptor,
pub clear_visibility_buffer_shadow_view_bind_group_layout: BindGroupLayoutDescriptor,
pub first_instance_cull_bind_group_layout: BindGroupLayoutDescriptor,
pub second_instance_cull_bind_group_layout: BindGroupLayoutDescriptor,
pub first_bvh_cull_bind_group_layout: BindGroupLayoutDescriptor,
pub second_bvh_cull_bind_group_layout: BindGroupLayoutDescriptor,
pub first_meshlet_cull_bind_group_layout: BindGroupLayoutDescriptor,
pub second_meshlet_cull_bind_group_layout: BindGroupLayoutDescriptor,
pub visibility_buffer_raster_bind_group_layout: BindGroupLayoutDescriptor,
pub visibility_buffer_raster_shadow_view_bind_group_layout: BindGroupLayoutDescriptor,
pub downsample_depth_bind_group_layout: BindGroupLayoutDescriptor,
pub downsample_depth_shadow_view_bind_group_layout: BindGroupLayoutDescriptor,
pub resolve_depth_bind_group_layout: BindGroupLayoutDescriptor,
pub resolve_depth_shadow_view_bind_group_layout: BindGroupLayoutDescriptor,
pub resolve_material_depth_bind_group_layout: BindGroupLayoutDescriptor,
pub material_shade_bind_group_layout: BindGroupLayoutDescriptor,
pub fill_counts_bind_group_layout: BindGroupLayoutDescriptor,
pub remap_1d_to_2d_dispatch_bind_group_layout: Option<BindGroupLayoutDescriptor>,
}
impl ResourceManager {
pub fn new(cluster_buffer_slots: u32, render_device: &RenderDevice) -> Self {
let needs_dispatch_remap =
cluster_buffer_slots > render_device.limits().max_compute_workgroups_per_dimension;
// The IDs are a (u32, u32) of instance and index.
let cull_queue_size = 2 * cluster_buffer_slots as u64 * size_of::<u32>() as u64;
Self {
visibility_buffer_raster_clusters: render_device.create_buffer(&BufferDescriptor {
label: Some("meshlet_visibility_buffer_raster_clusters"),
size: cull_queue_size,
usage: BufferUsages::STORAGE,
mapped_at_creation: false,
}),
visibility_buffer_raster_cluster_prev_counts: render_device.create_buffer(
&BufferDescriptor {
label: Some("meshlet_visibility_buffer_raster_cluster_prev_counts"),
size: size_of::<u32>() as u64 * 2,
usage: BufferUsages::STORAGE | BufferUsages::COPY_DST,
mapped_at_creation: false,
},
),
software_raster_cluster_count: render_device.create_buffer(&BufferDescriptor {
label: Some("meshlet_software_raster_cluster_count"),
size: size_of::<u32>() as u64,
usage: BufferUsages::STORAGE,
mapped_at_creation: false,
}),
bvh_traversal_queues: [
render_device.create_buffer(&BufferDescriptor {
label: Some("meshlet_bvh_traversal_queue_0"),
size: cull_queue_size,
usage: BufferUsages::STORAGE,
mapped_at_creation: false,
}),
render_device.create_buffer(&BufferDescriptor {
label: Some("meshlet_bvh_traversal_queue_1"),
size: cull_queue_size,
usage: BufferUsages::STORAGE,
mapped_at_creation: false,
}),
],
cluster_cull_candidate_queue: render_device.create_buffer(&BufferDescriptor {
label: Some("meshlet_cluster_cull_candidate_queue"),
size: cull_queue_size,
usage: BufferUsages::STORAGE,
mapped_at_creation: false,
}),
cull_queue_rightmost_slot: cluster_buffer_slots - 1,
second_pass_candidates: None,
depth_pyramid_sampler: render_device.create_sampler(&SamplerDescriptor {
label: Some("meshlet_depth_pyramid_sampler"),
..SamplerDescriptor::default()
}),
depth_pyramid_dummy_texture: depth::create_depth_pyramid_dummy_texture(
render_device,
"meshlet_depth_pyramid_dummy_texture",
"meshlet_depth_pyramid_dummy_texture_view",
),
previous_depth_pyramids: EntityHashMap::default(),
// TODO: Buffer min sizes
clear_visibility_buffer_bind_group_layout: BindGroupLayoutDescriptor::new(
"meshlet_clear_visibility_buffer_bind_group_layout",
&BindGroupLayoutEntries::single(
ShaderStages::COMPUTE,
texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::WriteOnly),
),
),
clear_visibility_buffer_shadow_view_bind_group_layout: BindGroupLayoutDescriptor::new(
"meshlet_clear_visibility_buffer_shadow_view_bind_group_layout",
&BindGroupLayoutEntries::single(
ShaderStages::COMPUTE,
texture_storage_2d(TextureFormat::R32Uint, StorageTextureAccess::WriteOnly),
),
),
first_instance_cull_bind_group_layout: BindGroupLayoutDescriptor::new(
"meshlet_first_instance_culling_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
texture_2d(TextureSampleType::Float { filterable: false }),
uniform_buffer::<ViewUniform>(true),
uniform_buffer::<PreviousViewData>(true),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
),
),
),
second_instance_cull_bind_group_layout: BindGroupLayoutDescriptor::new(
"meshlet_second_instance_culling_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
texture_2d(TextureSampleType::Float { filterable: false }),
uniform_buffer::<ViewUniform>(true),
uniform_buffer::<PreviousViewData>(true),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
),
),
),
first_bvh_cull_bind_group_layout: BindGroupLayoutDescriptor::new(
"meshlet_first_bvh_culling_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
texture_2d(TextureSampleType::Float { filterable: false }),
uniform_buffer::<ViewUniform>(true),
uniform_buffer::<PreviousViewData>(true),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
),
),
),
second_bvh_cull_bind_group_layout: BindGroupLayoutDescriptor::new(
"meshlet_second_bvh_culling_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
texture_2d(TextureSampleType::Float { filterable: false }),
uniform_buffer::<ViewUniform>(true),
uniform_buffer::<PreviousViewData>(true),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
),
),
),
first_meshlet_cull_bind_group_layout: BindGroupLayoutDescriptor::new(
"meshlet_first_meshlet_culling_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
texture_2d(TextureSampleType::Float { filterable: false }),
uniform_buffer::<ViewUniform>(true),
uniform_buffer::<PreviousViewData>(true),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
),
),
),
second_meshlet_cull_bind_group_layout: BindGroupLayoutDescriptor::new(
"meshlet_second_meshlet_culling_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
texture_2d(TextureSampleType::Float { filterable: false }),
uniform_buffer::<ViewUniform>(true),
uniform_buffer::<PreviousViewData>(true),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
),
),
),
downsample_depth_bind_group_layout: BindGroupLayoutDescriptor::new(
"meshlet_downsample_depth_bind_group_layout",
&BindGroupLayoutEntries::sequential(ShaderStages::COMPUTE, {
let write_only_r32float = || {
texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly)
};
(
texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::ReadOnly),
write_only_r32float(),
write_only_r32float(),
write_only_r32float(),
write_only_r32float(),
write_only_r32float(),
texture_storage_2d(
TextureFormat::R32Float,
StorageTextureAccess::ReadWrite,
),
write_only_r32float(),
write_only_r32float(),
write_only_r32float(),
write_only_r32float(),
write_only_r32float(),
write_only_r32float(),
sampler(SamplerBindingType::NonFiltering),
)
}),
),
downsample_depth_shadow_view_bind_group_layout: BindGroupLayoutDescriptor::new(
"meshlet_downsample_depth_shadow_view_bind_group_layout",
&BindGroupLayoutEntries::sequential(ShaderStages::COMPUTE, {
let write_only_r32float = || {
texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly)
};
(
texture_storage_2d(TextureFormat::R32Uint, StorageTextureAccess::ReadOnly),
write_only_r32float(),
write_only_r32float(),
write_only_r32float(),
write_only_r32float(),
write_only_r32float(),
texture_storage_2d(
TextureFormat::R32Float,
StorageTextureAccess::ReadWrite,
),
write_only_r32float(),
write_only_r32float(),
write_only_r32float(),
write_only_r32float(),
write_only_r32float(),
write_only_r32float(),
sampler(SamplerBindingType::NonFiltering),
)
}),
),
visibility_buffer_raster_bind_group_layout: BindGroupLayoutDescriptor::new(
"meshlet_visibility_buffer_raster_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::FRAGMENT | ShaderStages::VERTEX | ShaderStages::COMPUTE,
(
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::Atomic),
uniform_buffer::<ViewUniform>(true),
),
),
),
visibility_buffer_raster_shadow_view_bind_group_layout: BindGroupLayoutDescriptor::new(
"meshlet_visibility_buffer_raster_shadow_view_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::FRAGMENT | ShaderStages::VERTEX | ShaderStages::COMPUTE,
(
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
texture_storage_2d(TextureFormat::R32Uint, StorageTextureAccess::Atomic),
uniform_buffer::<ViewUniform>(true),
),
),
),
resolve_depth_bind_group_layout: BindGroupLayoutDescriptor::new(
"meshlet_resolve_depth_bind_group_layout",
&BindGroupLayoutEntries::single(
ShaderStages::FRAGMENT,
texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::ReadOnly),
),
),
resolve_depth_shadow_view_bind_group_layout: BindGroupLayoutDescriptor::new(
"meshlet_resolve_depth_shadow_view_bind_group_layout",
&BindGroupLayoutEntries::single(
ShaderStages::FRAGMENT,
texture_storage_2d(TextureFormat::R32Uint, StorageTextureAccess::ReadOnly),
),
),
resolve_material_depth_bind_group_layout: BindGroupLayoutDescriptor::new(
"meshlet_resolve_material_depth_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::FRAGMENT,
(
texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::ReadOnly),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
),
),
),
material_shade_bind_group_layout: BindGroupLayoutDescriptor::new(
"meshlet_mesh_material_shade_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::FRAGMENT,
(
texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::ReadOnly),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
storage_buffer_read_only_sized(false, None),
),
),
),
fill_counts_bind_group_layout: if needs_dispatch_remap {
BindGroupLayoutDescriptor::new(
"meshlet_fill_counts_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
),
),
)
} else {
BindGroupLayoutDescriptor::new(
"meshlet_fill_counts_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
),
),
)
},
remap_1d_to_2d_dispatch_bind_group_layout: needs_dispatch_remap.then(|| {
BindGroupLayoutDescriptor::new(
"meshlet_remap_1d_to_2d_dispatch_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
storage_buffer_sized(false, None),
storage_buffer_sized(false, None),
),
),
)
}),
}
}
}
// ------------ TODO: Everything under here needs to be rewritten and cached ------------
#[derive(Component)]
pub struct MeshletViewResources {
pub scene_instance_count: u32,
pub rightmost_slot: u32,
pub max_bvh_depth: u32,
instance_visibility: Buffer,
pub dummy_render_target: CachedTexture,
pub visibility_buffer: CachedTexture,
pub second_pass_count: Buffer,
pub second_pass_dispatch: Buffer,
pub second_pass_candidates: Buffer,
pub first_bvh_cull_count_front: Buffer,
pub first_bvh_cull_dispatch_front: Buffer,
pub first_bvh_cull_count_back: Buffer,
pub first_bvh_cull_dispatch_back: Buffer,
pub first_bvh_cull_queue: Buffer,
pub second_bvh_cull_count_front: Buffer,
pub second_bvh_cull_dispatch_front: Buffer,
pub second_bvh_cull_count_back: Buffer,
pub second_bvh_cull_dispatch_back: Buffer,
pub second_bvh_cull_queue: Buffer,
pub front_meshlet_cull_count: Buffer,
pub front_meshlet_cull_dispatch: Buffer,
pub back_meshlet_cull_count: Buffer,
pub back_meshlet_cull_dispatch: Buffer,
pub meshlet_cull_queue: Buffer,
pub visibility_buffer_software_raster_indirect_args: Buffer,
pub visibility_buffer_hardware_raster_indirect_args: Buffer,
pub depth_pyramid: ViewDepthPyramid,
previous_depth_pyramid: TextureView,
pub material_depth: Option<CachedTexture>,
pub view_size: UVec2,
not_shadow_view: bool,
}
#[derive(Component)]
pub struct MeshletViewBindGroups {
pub clear_visibility_buffer: BindGroup,
pub first_instance_cull: BindGroup,
pub second_instance_cull: BindGroup,
pub first_bvh_cull_ping: BindGroup,
pub first_bvh_cull_pong: BindGroup,
pub second_bvh_cull_ping: BindGroup,
pub second_bvh_cull_pong: BindGroup,
pub first_meshlet_cull: BindGroup,
pub second_meshlet_cull: BindGroup,
pub downsample_depth: BindGroup,
pub visibility_buffer_raster: BindGroup,
pub resolve_depth: BindGroup,
pub resolve_material_depth: Option<BindGroup>,
pub material_shade: Option<BindGroup>,
pub remap_1d_to_2d_dispatch: Option<BindGroup>,
pub fill_counts: BindGroup,
}
// TODO: Cache things per-view and skip running this system / optimize this system
pub fn prepare_meshlet_per_frame_resources(
mut resource_manager: ResMut<ResourceManager>,
mut instance_manager: ResMut<InstanceManager>,
views: Query<(
Entity,
&ExtractedView,
Option<&RenderLayers>,
AnyOf<(&Camera3d, &ShadowView)>,
)>,
mut texture_cache: ResMut<TextureCache>,
render_queue: Res<RenderQueue>,
render_device: Res<RenderDevice>,
mut commands: Commands,
) {
if instance_manager.scene_instance_count == 0 {
return;
}
let instance_manager = instance_manager.as_mut();
// TODO: Move this and the submit to a separate system and remove pub from the fields
instance_manager
.instance_uniforms
.write_buffer(&render_device, &render_queue);
instance_manager
.instance_aabbs
.write_buffer(&render_device, &render_queue);
instance_manager
.instance_material_ids
.write_buffer(&render_device, &render_queue);
instance_manager
.instance_bvh_root_nodes
.write_buffer(&render_device, &render_queue);
let needed_buffer_size = 4 * instance_manager.scene_instance_count as u64;
let second_pass_candidates = match &mut resource_manager.second_pass_candidates {
Some(buffer) if buffer.size() >= needed_buffer_size => buffer.clone(),
slot => {
let buffer = render_device.create_buffer(&BufferDescriptor {
label: Some("meshlet_second_pass_candidates"),
size: needed_buffer_size,
usage: BufferUsages::STORAGE,
mapped_at_creation: false,
});
*slot = Some(buffer.clone());
buffer
}
};
for (view_entity, view, render_layers, (_, shadow_view)) in &views {
let not_shadow_view = shadow_view.is_none();
let instance_visibility = instance_manager
.view_instance_visibility
.entry(view_entity)
.or_insert_with(|| {
let mut buffer = StorageBuffer::default();
buffer.set_label(Some("meshlet_view_instance_visibility"));
buffer
});
for (instance_index, (_, layers, not_shadow_caster)) in
instance_manager.instances.iter().enumerate()
{
// If either the layers don't match the view's layers or this is a shadow view
// and the instance is not a shadow caster, hide the instance for this view
if !render_layers
.unwrap_or(&RenderLayers::default())
.intersects(layers)
|| (shadow_view.is_some() && *not_shadow_caster)
{
let vec = instance_visibility.get_mut();
let index = instance_index / 32;
let bit = instance_index - index * 32;
if vec.len() <= index {
vec.extend(iter::repeat_n(0, index - vec.len() + 1));
}
vec[index] |= 1 << bit;
}
}
instance_visibility.write_buffer(&render_device, &render_queue);
let instance_visibility = instance_visibility.buffer().unwrap().clone();
// TODO: Remove this once wgpu allows render passes with no attachments
let dummy_render_target = texture_cache.get(
&render_device,
TextureDescriptor {
label: Some("meshlet_dummy_render_target"),
size: view.viewport.zw().to_extents(),
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: TextureFormat::R8Uint,
usage: TextureUsages::RENDER_ATTACHMENT,
view_formats: &[],
},
);
let visibility_buffer = texture_cache.get(
&render_device,
TextureDescriptor {
label: Some("meshlet_visibility_buffer"),
size: view.viewport.zw().to_extents(),
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: if not_shadow_view {
TextureFormat::R64Uint
} else {
TextureFormat::R32Uint
},
usage: TextureUsages::STORAGE_ATOMIC | TextureUsages::STORAGE_BINDING,
view_formats: &[],
},
);
let second_pass_count = render_device.create_buffer_with_data(&BufferInitDescriptor {
label: Some("meshlet_second_pass_count"),
contents: bytemuck::bytes_of(&0u32),
usage: BufferUsages::STORAGE,
});
let second_pass_dispatch = render_device.create_buffer_with_data(&BufferInitDescriptor {
label: Some("meshlet_second_pass_dispatch"),
contents: DispatchIndirectArgs { x: 0, y: 1, z: 1 }.as_bytes(),
usage: BufferUsages::STORAGE | BufferUsages::INDIRECT,
});
let first_bvh_cull_count_front =
render_device.create_buffer_with_data(&BufferInitDescriptor {
label: Some("meshlet_first_bvh_cull_count_front"),
contents: bytemuck::bytes_of(&0u32),
usage: BufferUsages::STORAGE | BufferUsages::COPY_DST,
});
let first_bvh_cull_dispatch_front =
render_device.create_buffer_with_data(&BufferInitDescriptor {
label: Some("meshlet_first_bvh_cull_dispatch_front"),
contents: DispatchIndirectArgs { x: 0, y: 1, z: 1 }.as_bytes(),
usage: BufferUsages::STORAGE | BufferUsages::INDIRECT | BufferUsages::COPY_DST,
});
let first_bvh_cull_count_back =
render_device.create_buffer_with_data(&BufferInitDescriptor {
label: Some("meshlet_first_bvh_cull_count_back"),
contents: bytemuck::bytes_of(&0u32),
usage: BufferUsages::STORAGE | BufferUsages::COPY_DST,
});
let first_bvh_cull_dispatch_back =
render_device.create_buffer_with_data(&BufferInitDescriptor {
label: Some("meshlet_first_bvh_cull_dispatch_back"),
contents: DispatchIndirectArgs { x: 0, y: 1, z: 1 }.as_bytes(),
usage: BufferUsages::STORAGE | BufferUsages::INDIRECT | BufferUsages::COPY_DST,
});
let second_bvh_cull_count_front =
render_device.create_buffer_with_data(&BufferInitDescriptor {
label: Some("meshlet_second_bvh_cull_count_front"),
contents: bytemuck::bytes_of(&0u32),
usage: BufferUsages::STORAGE | BufferUsages::COPY_DST,
});
let second_bvh_cull_dispatch_front =
render_device.create_buffer_with_data(&BufferInitDescriptor {
label: Some("meshlet_second_bvh_cull_dispatch_front"),
contents: DispatchIndirectArgs { x: 0, y: 1, z: 1 }.as_bytes(),
usage: BufferUsages::STORAGE | BufferUsages::INDIRECT | BufferUsages::COPY_DST,
});
let second_bvh_cull_count_back =
render_device.create_buffer_with_data(&BufferInitDescriptor {
label: Some("meshlet_second_bvh_cull_count_back"),
contents: bytemuck::bytes_of(&0u32),
usage: BufferUsages::STORAGE | BufferUsages::COPY_DST,
});
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/meshlet/from_mesh.rs | crates/bevy_pbr/src/meshlet/from_mesh.rs | use crate::meshlet::asset::{MeshletAabb, MeshletAabbErrorOffset, MeshletCullData};
use super::asset::{BvhNode, Meshlet, MeshletBoundingSphere, MeshletMesh};
use alloc::borrow::Cow;
use bevy_math::{
bounding::{Aabb3d, BoundingSphere, BoundingVolume},
ops::log2,
IVec3, Isometry3d, Vec2, Vec3, Vec3A, Vec3Swizzles,
};
use bevy_mesh::{Indices, Mesh};
use bevy_platform::collections::HashMap;
use bevy_render::render_resource::PrimitiveTopology;
use bevy_tasks::{AsyncComputeTaskPool, ParallelSlice};
use bitvec::{order::Lsb0, vec::BitVec, view::BitView};
use core::{f32, ops::Range};
use itertools::Itertools;
use meshopt::{
build_meshlets, ffi::meshopt_Meshlet, generate_position_remap,
simplify_with_attributes_and_locks, Meshlets, SimplifyOptions, VertexDataAdapter,
};
use metis::{option::Opt, Graph};
use smallvec::SmallVec;
use thiserror::Error;
use tracing::debug_span;
// Aim to have 8 meshlets per group
const TARGET_MESHLETS_PER_GROUP: usize = 8;
// Reject groups that keep over 60% of their original triangles. We'd much rather render a few
// extra triangles than create too many meshlets, increasing cull overhead.
const SIMPLIFICATION_FAILURE_PERCENTAGE: f32 = 0.60;
/// Default vertex position quantization factor for use with [`MeshletMesh::from_mesh`].
///
/// Snaps vertices to the nearest 1/16th of a centimeter (1/2^4).
pub const MESHLET_DEFAULT_VERTEX_POSITION_QUANTIZATION_FACTOR: u8 = 4;
const CENTIMETERS_PER_METER: f32 = 100.0;
impl MeshletMesh {
/// Process a [`Mesh`] to generate a [`MeshletMesh`].
///
/// This process is very slow, and should be done ahead of time, and not at runtime.
///
/// # Requirements
///
/// This function requires the `meshlet_processor` cargo feature.
///
/// The input mesh must:
/// 1. Use [`PrimitiveTopology::TriangleList`]
/// 2. Use indices
/// 3. Have the exact following set of vertex attributes: `{POSITION, NORMAL, UV_0}` (tangents can be used in material shaders, but are calculated at runtime and are not stored in the mesh)
///
/// # Vertex precision
///
/// `vertex_position_quantization_factor` is the amount of precision to use when quantizing vertex positions.
///
/// Vertices are snapped to the nearest (1/2^x)th of a centimeter, where x = `vertex_position_quantization_factor`.
/// E.g. if x = 4, then vertices are snapped to the nearest 1/2^4 = 1/16th of a centimeter.
///
/// Use [`MESHLET_DEFAULT_VERTEX_POSITION_QUANTIZATION_FACTOR`] as a default, adjusting lower to save memory and disk space, and higher to prevent artifacts if needed.
///
/// To ensure that two different meshes do not have cracks between them when placed directly next to each other:
/// * Use the same quantization factor when converting each mesh to a meshlet mesh
/// * Ensure that their [`bevy_transform::components::Transform::translation`]s are a multiple of 1/2^x centimeters (note that translations are in meters)
/// * Ensure that their [`bevy_transform::components::Transform::scale`]s are the same
/// * Ensure that their [`bevy_transform::components::Transform::rotation`]s are a multiple of 90 degrees
pub fn from_mesh(
mesh: &Mesh,
vertex_position_quantization_factor: u8,
) -> Result<Self, MeshToMeshletMeshConversionError> {
let s = debug_span!("build meshlet mesh");
let _e = s.enter();
// Validate mesh format
let indices = validate_input_mesh(mesh)?;
// Get meshlet vertices
let vertex_buffer = mesh.create_packed_vertex_buffer_data();
let vertex_stride = mesh.get_vertex_size() as usize;
let vertices = VertexDataAdapter::new(&vertex_buffer, vertex_stride, 0).unwrap();
let vertex_normals = bytemuck::cast_slice(&vertex_buffer[12..16]);
// Generate a position-only vertex buffer for determining triangle/meshlet connectivity
let position_only_vertex_remap = generate_position_remap(&vertices);
// Split the mesh into an initial list of meshlets (LOD 0)
let (mut meshlets, mut cull_data) =
compute_meshlets(&indices, &vertices, &position_only_vertex_remap, None);
let mut vertex_locks = vec![false; vertices.vertex_count];
// Build further LODs
let mut bvh = BvhBuilder::default();
let mut all_groups = Vec::new();
let mut simplification_queue: Vec<_> = (0..meshlets.len() as u32).collect();
let mut stuck = Vec::new();
while !simplification_queue.is_empty() {
let s = debug_span!("simplify lod", meshlets = simplification_queue.len());
let _e = s.enter();
// For each meshlet build a list of connected meshlets (meshlets that share a vertex)
let connected_meshlets_per_meshlet = find_connected_meshlets(
&simplification_queue,
&meshlets,
&position_only_vertex_remap,
);
// Group meshlets into roughly groups of size TARGET_MESHLETS_PER_GROUP,
// grouping meshlets with a high number of shared vertices
let groups = group_meshlets(
&simplification_queue,
&cull_data,
&connected_meshlets_per_meshlet,
);
simplification_queue.clear();
// Lock borders between groups to prevent cracks when simplifying
lock_group_borders(
&mut vertex_locks,
&groups,
&meshlets,
&position_only_vertex_remap,
);
let simplified = groups.par_chunk_map(AsyncComputeTaskPool::get(), 1, |_, groups| {
let mut group = groups[0].clone();
// If the group only has a single meshlet we can't simplify it
if group.meshlets.len() == 1 {
return Err(group);
}
let s = debug_span!("simplify group", meshlets = group.meshlets.len());
let _e = s.enter();
// Simplify the group to ~50% triangle count
let Some((simplified_group_indices, mut group_error)) = simplify_meshlet_group(
&group,
&meshlets,
&vertices,
vertex_normals,
vertex_stride,
&vertex_locks,
) else {
// Couldn't simplify the group enough
return Err(group);
};
// Force the group error to be atleast as large as all of its constituent meshlet's
// individual errors.
for &id in group.meshlets.iter() {
group_error = group_error.max(cull_data[id as usize].error);
}
group.parent_error = group_error;
// Build new meshlets using the simplified group
let new_meshlets = compute_meshlets(
&simplified_group_indices,
&vertices,
&position_only_vertex_remap,
Some((group.lod_bounds, group.parent_error)),
);
Ok((group, new_meshlets))
});
let first_group = all_groups.len() as u32;
let mut passed_tris = 0;
let mut stuck_tris = 0;
for group in simplified {
match group {
Ok((group, (new_meshlets, new_cull_data))) => {
let start = meshlets.len();
merge_meshlets(&mut meshlets, new_meshlets);
cull_data.extend(new_cull_data);
let end = meshlets.len();
let new_meshlet_ids = start as u32..end as u32;
passed_tris += triangles_in_meshlets(&meshlets, new_meshlet_ids.clone());
simplification_queue.extend(new_meshlet_ids);
all_groups.push(group);
}
Err(group) => {
stuck_tris +=
triangles_in_meshlets(&meshlets, group.meshlets.iter().copied());
stuck.push(group);
}
}
}
// If we have enough triangles that passed, we can retry simplifying the stuck
// meshlets.
if passed_tris > stuck_tris / 3 {
simplification_queue.extend(stuck.drain(..).flat_map(|group| group.meshlets));
}
bvh.add_lod(first_group, &all_groups);
}
// If there's any stuck meshlets left, add another LOD level with only them
if !stuck.is_empty() {
let first_group = all_groups.len() as u32;
all_groups.extend(stuck);
bvh.add_lod(first_group, &all_groups);
}
let (bvh, aabb, depth) = bvh.build(&mut meshlets, all_groups, &mut cull_data);
// Copy vertex attributes per meshlet and compress
let mut vertex_positions = BitVec::<u32, Lsb0>::new();
let mut vertex_normals = Vec::new();
let mut vertex_uvs = Vec::new();
let mut bevy_meshlets = Vec::with_capacity(meshlets.len());
for (i, meshlet) in meshlets.meshlets.iter().enumerate() {
build_and_compress_per_meshlet_vertex_data(
meshlet,
meshlets.get(i).vertices,
&vertex_buffer,
vertex_stride,
&mut vertex_positions,
&mut vertex_normals,
&mut vertex_uvs,
&mut bevy_meshlets,
vertex_position_quantization_factor,
);
}
vertex_positions.set_uninitialized(false);
Ok(Self {
vertex_positions: vertex_positions.into_vec().into(),
vertex_normals: vertex_normals.into(),
vertex_uvs: vertex_uvs.into(),
indices: meshlets.triangles.into(),
bvh: bvh.into(),
meshlets: bevy_meshlets.into(),
meshlet_cull_data: cull_data
.into_iter()
.map(|cull_data| MeshletCullData {
aabb: aabb_to_meshlet(cull_data.aabb, cull_data.error, 0),
lod_group_sphere: sphere_to_meshlet(cull_data.lod_group_sphere),
})
.collect(),
aabb,
bvh_depth: depth,
})
}
}
fn validate_input_mesh(mesh: &Mesh) -> Result<Cow<'_, [u32]>, MeshToMeshletMeshConversionError> {
if mesh.primitive_topology() != PrimitiveTopology::TriangleList {
return Err(MeshToMeshletMeshConversionError::WrongMeshPrimitiveTopology);
}
if mesh.attributes().map(|(attribute, _)| attribute.id).ne([
Mesh::ATTRIBUTE_POSITION.id,
Mesh::ATTRIBUTE_NORMAL.id,
Mesh::ATTRIBUTE_UV_0.id,
]) {
return Err(MeshToMeshletMeshConversionError::WrongMeshVertexAttributes(
mesh.attributes()
.map(|(attribute, _)| format!("{attribute:?}"))
.collect(),
));
}
match mesh.indices() {
Some(Indices::U32(indices)) => Ok(Cow::Borrowed(indices.as_slice())),
Some(Indices::U16(indices)) => Ok(indices.iter().map(|i| *i as u32).collect()),
_ => Err(MeshToMeshletMeshConversionError::MeshMissingIndices),
}
}
fn triangles_in_meshlets(meshlets: &Meshlets, ids: impl IntoIterator<Item = u32>) -> u32 {
ids.into_iter()
.map(|id| meshlets.get(id as _).triangles.len() as u32 / 3)
.sum()
}
fn compute_meshlets(
indices: &[u32],
vertices: &VertexDataAdapter,
position_only_vertex_remap: &[u32],
prev_lod_data: Option<(BoundingSphere, f32)>,
) -> (Meshlets, Vec<TempMeshletCullData>) {
// For each vertex, build a list of all triangles that use it
let mut vertices_to_triangles = vec![Vec::new(); position_only_vertex_remap.len()];
for (i, index) in indices.iter().enumerate() {
let vertex_id = position_only_vertex_remap[*index as usize];
let vertex_to_triangles = &mut vertices_to_triangles[vertex_id as usize];
vertex_to_triangles.push(i / 3);
}
// For each triangle pair, count how many vertices they share
let mut triangle_pair_to_shared_vertex_count = <HashMap<_, _>>::default();
for vertex_triangle_ids in vertices_to_triangles {
for (triangle_id1, triangle_id2) in vertex_triangle_ids.into_iter().tuple_combinations() {
let count = triangle_pair_to_shared_vertex_count
.entry((
triangle_id1.min(triangle_id2),
triangle_id1.max(triangle_id2),
))
.or_insert(0);
*count += 1;
}
}
// For each triangle, gather all other triangles that share at least one vertex along with their shared vertex count
let triangle_count = indices.len() / 3;
let mut connected_triangles_per_triangle = vec![Vec::new(); triangle_count];
for ((triangle_id1, triangle_id2), shared_vertex_count) in triangle_pair_to_shared_vertex_count
{
// We record both id1->id2 and id2->id1 as adjacency is symmetrical
connected_triangles_per_triangle[triangle_id1].push((triangle_id2, shared_vertex_count));
connected_triangles_per_triangle[triangle_id2].push((triangle_id1, shared_vertex_count));
}
// The order of triangles depends on hash traversal order; to produce deterministic results, sort them
// TODO: Wouldn't it be faster to use a `BTreeMap` above instead of `HashMap` + sorting?
for list in connected_triangles_per_triangle.iter_mut() {
list.sort_unstable();
}
let mut xadj = Vec::with_capacity(triangle_count + 1);
let mut adjncy = Vec::new();
let mut adjwgt = Vec::new();
for connected_triangles in connected_triangles_per_triangle {
xadj.push(adjncy.len() as i32);
for (connected_triangle_id, shared_vertex_count) in connected_triangles {
adjncy.push(connected_triangle_id as i32);
adjwgt.push(shared_vertex_count);
// TODO: Additional weight based on triangle center spatial proximity?
}
}
xadj.push(adjncy.len() as i32);
let mut options = [-1; metis::NOPTIONS];
options[metis::option::Seed::INDEX] = 17;
options[metis::option::UFactor::INDEX] = 1; // Important that there's very little imbalance between partitions
let mut meshlet_per_triangle = vec![0; triangle_count];
let partition_count = triangle_count.div_ceil(126); // Need to undershoot to prevent METIS from going over 128 triangles per meshlet
Graph::new(1, partition_count as i32, &xadj, &adjncy)
.unwrap()
.set_options(&options)
.set_adjwgt(&adjwgt)
.part_recursive(&mut meshlet_per_triangle)
.unwrap();
let mut indices_per_meshlet = vec![Vec::new(); partition_count];
for (triangle_id, meshlet) in meshlet_per_triangle.into_iter().enumerate() {
let meshlet_indices = &mut indices_per_meshlet[meshlet as usize];
let base_index = triangle_id * 3;
meshlet_indices.extend_from_slice(&indices[base_index..(base_index + 3)]);
}
// Use meshopt to build meshlets from the sets of triangles
let mut meshlets = Meshlets {
meshlets: Vec::new(),
vertices: Vec::new(),
triangles: Vec::new(),
};
let mut cull_data = Vec::new();
let get_vertex = |&v: &u32| {
*bytemuck::from_bytes::<Vec3>(
&vertices.reader.get_ref()
[vertices.position_offset + v as usize * vertices.vertex_stride..][..12],
)
};
for meshlet_indices in &indices_per_meshlet {
let meshlet = build_meshlets(meshlet_indices, vertices, 256, 128, 0.0);
for meshlet in meshlet.iter() {
let (lod_group_sphere, error) = prev_lod_data.unwrap_or_else(|| {
let bounds = meshopt::compute_meshlet_bounds(meshlet, vertices);
(BoundingSphere::new(bounds.center, bounds.radius), 0.0)
});
cull_data.push(TempMeshletCullData {
aabb: Aabb3d::from_point_cloud(
Isometry3d::IDENTITY,
meshlet.vertices.iter().map(get_vertex),
),
lod_group_sphere,
error,
});
}
merge_meshlets(&mut meshlets, meshlet);
}
(meshlets, cull_data)
}
fn find_connected_meshlets(
simplification_queue: &[u32],
meshlets: &Meshlets,
position_only_vertex_remap: &[u32],
) -> Vec<Vec<(usize, usize)>> {
// For each vertex, build a list of all meshlets that use it
let mut vertices_to_meshlets = vec![Vec::new(); position_only_vertex_remap.len()];
for (id_index, &meshlet_id) in simplification_queue.iter().enumerate() {
let meshlet = meshlets.get(meshlet_id as _);
for index in meshlet.triangles {
let vertex_id = position_only_vertex_remap[meshlet.vertices[*index as usize] as usize];
let vertex_to_meshlets = &mut vertices_to_meshlets[vertex_id as usize];
// Meshlets are added in order, so we can just check the last element to deduplicate,
// in the case of two triangles sharing the same vertex within a single meshlet
if vertex_to_meshlets.last() != Some(&id_index) {
vertex_to_meshlets.push(id_index);
}
}
}
// For each meshlet pair, count how many vertices they share
let mut meshlet_pair_to_shared_vertex_count = <HashMap<_, _>>::default();
for vertex_meshlet_ids in vertices_to_meshlets {
for (meshlet_id1, meshlet_id2) in vertex_meshlet_ids.into_iter().tuple_combinations() {
let count = meshlet_pair_to_shared_vertex_count
.entry((meshlet_id1.min(meshlet_id2), meshlet_id1.max(meshlet_id2)))
.or_insert(0);
*count += 1;
}
}
// For each meshlet, gather all other meshlets that share at least one vertex along with their shared vertex count
let mut connected_meshlets_per_meshlet = vec![Vec::new(); simplification_queue.len()];
for ((meshlet_id1, meshlet_id2), shared_vertex_count) in meshlet_pair_to_shared_vertex_count {
// We record both id1->id2 and id2->id1 as adjacency is symmetrical
connected_meshlets_per_meshlet[meshlet_id1].push((meshlet_id2, shared_vertex_count));
connected_meshlets_per_meshlet[meshlet_id2].push((meshlet_id1, shared_vertex_count));
}
// The order of meshlets depends on hash traversal order; to produce deterministic results, sort them
// TODO: Wouldn't it be faster to use a `BTreeMap` above instead of `HashMap` + sorting?
for list in connected_meshlets_per_meshlet.iter_mut() {
list.sort_unstable();
}
connected_meshlets_per_meshlet
}
// METIS manual: https://github.com/KarypisLab/METIS/blob/e0f1b88b8efcb24ffa0ec55eabb78fbe61e58ae7/manual/manual.pdf
fn group_meshlets(
simplification_queue: &[u32],
meshlet_cull_data: &[TempMeshletCullData],
connected_meshlets_per_meshlet: &[Vec<(usize, usize)>],
) -> Vec<TempMeshletGroup> {
let mut xadj = Vec::with_capacity(simplification_queue.len() + 1);
let mut adjncy = Vec::new();
let mut adjwgt = Vec::new();
for connected_meshlets in connected_meshlets_per_meshlet {
xadj.push(adjncy.len() as i32);
for (connected_meshlet_id, shared_vertex_count) in connected_meshlets {
adjncy.push(*connected_meshlet_id as i32);
adjwgt.push(*shared_vertex_count as i32);
// TODO: Additional weight based on meshlet spatial proximity
}
}
xadj.push(adjncy.len() as i32);
let mut options = [-1; metis::NOPTIONS];
options[metis::option::Seed::INDEX] = 17;
options[metis::option::UFactor::INDEX] = 200;
let mut group_per_meshlet = vec![0; simplification_queue.len()];
let partition_count = simplification_queue
.len()
.div_ceil(TARGET_MESHLETS_PER_GROUP); // TODO: Nanite uses groups of 8-32, probably based on some kind of heuristic
Graph::new(1, partition_count as i32, &xadj, &adjncy)
.unwrap()
.set_options(&options)
.set_adjwgt(&adjwgt)
.part_recursive(&mut group_per_meshlet)
.unwrap();
let mut groups = vec![TempMeshletGroup::default(); partition_count];
for (i, meshlet_group) in group_per_meshlet.into_iter().enumerate() {
let group = &mut groups[meshlet_group as usize];
let meshlet_id = simplification_queue[i];
group.meshlets.push(meshlet_id);
let data = &meshlet_cull_data[meshlet_id as usize];
group.aabb = group.aabb.merge(&data.aabb);
group.lod_bounds = merge_spheres(group.lod_bounds, data.lod_group_sphere);
}
groups
}
fn lock_group_borders(
vertex_locks: &mut [bool],
groups: &[TempMeshletGroup],
meshlets: &Meshlets,
position_only_vertex_remap: &[u32],
) {
let mut position_only_locks = vec![-1; position_only_vertex_remap.len()];
// Iterate over position-only based vertices of all meshlets in all groups
for (group_id, group) in groups.iter().enumerate() {
for &meshlet_id in group.meshlets.iter() {
let meshlet = meshlets.get(meshlet_id as usize);
for index in meshlet.triangles {
let vertex_id =
position_only_vertex_remap[meshlet.vertices[*index as usize] as usize] as usize;
// If the vertex is not yet claimed by any group, or was already claimed by this group
if position_only_locks[vertex_id] == -1
|| position_only_locks[vertex_id] == group_id as i32
{
position_only_locks[vertex_id] = group_id as i32; // Then claim the vertex for this group
} else {
position_only_locks[vertex_id] = -2; // Else vertex was already claimed by another group or was already locked, lock it
}
}
}
}
// Lock vertices used by more than 1 group
for i in 0..vertex_locks.len() {
let vertex_id = position_only_vertex_remap[i] as usize;
vertex_locks[i] = position_only_locks[vertex_id] == -2;
}
}
fn simplify_meshlet_group(
group: &TempMeshletGroup,
meshlets: &Meshlets,
vertices: &VertexDataAdapter<'_>,
vertex_normals: &[f32],
vertex_stride: usize,
vertex_locks: &[bool],
) -> Option<(Vec<u32>, f32)> {
// Build a new index buffer into the mesh vertex data by combining all meshlet data in the group
let group_indices = group
.meshlets
.iter()
.flat_map(|&meshlet_id| {
let meshlet = meshlets.get(meshlet_id as _);
meshlet
.triangles
.iter()
.map(|&meshlet_index| meshlet.vertices[meshlet_index as usize])
})
.collect::<Vec<_>>();
// Simplify the group to ~50% triangle count
let mut error = 0.0;
let simplified_group_indices = simplify_with_attributes_and_locks(
&group_indices,
vertices,
vertex_normals,
&[0.5; 3],
vertex_stride,
vertex_locks,
group_indices.len() / 2,
f32::MAX,
SimplifyOptions::Sparse | SimplifyOptions::ErrorAbsolute,
Some(&mut error),
);
// Check if we were able to simplify
if simplified_group_indices.len() as f32 / group_indices.len() as f32
> SIMPLIFICATION_FAILURE_PERCENTAGE
{
return None;
}
Some((simplified_group_indices, error))
}
fn merge_meshlets(meshlets: &mut Meshlets, merge: Meshlets) {
let vertex_offset = meshlets.vertices.len() as u32;
let triangle_offset = meshlets.triangles.len() as u32;
meshlets.vertices.extend_from_slice(&merge.vertices);
meshlets.triangles.extend_from_slice(&merge.triangles);
meshlets
.meshlets
.extend(merge.meshlets.into_iter().map(|mut meshlet| {
meshlet.vertex_offset += vertex_offset;
meshlet.triangle_offset += triangle_offset;
meshlet
}));
}
fn build_and_compress_per_meshlet_vertex_data(
meshlet: &meshopt_Meshlet,
meshlet_vertex_ids: &[u32],
vertex_buffer: &[u8],
vertex_stride: usize,
vertex_positions: &mut BitVec<u32, Lsb0>,
vertex_normals: &mut Vec<u32>,
vertex_uvs: &mut Vec<Vec2>,
meshlets: &mut Vec<Meshlet>,
vertex_position_quantization_factor: u8,
) {
let start_vertex_position_bit = vertex_positions.len() as u32;
let start_vertex_attribute_id = vertex_normals.len() as u32;
let quantization_factor =
(1 << vertex_position_quantization_factor) as f32 * CENTIMETERS_PER_METER;
let mut min_quantized_position_channels = IVec3::MAX;
let mut max_quantized_position_channels = IVec3::MIN;
// Lossy vertex compression
let mut quantized_positions = [IVec3::ZERO; 256];
for (i, vertex_id) in meshlet_vertex_ids.iter().enumerate() {
// Load source vertex attributes
let vertex_id_byte = *vertex_id as usize * vertex_stride;
let vertex_data = &vertex_buffer[vertex_id_byte..(vertex_id_byte + vertex_stride)];
let position = Vec3::from_slice(bytemuck::cast_slice(&vertex_data[0..12]));
let normal = Vec3::from_slice(bytemuck::cast_slice(&vertex_data[12..24]));
let uv = Vec2::from_slice(bytemuck::cast_slice(&vertex_data[24..32]));
// Copy uncompressed UV
vertex_uvs.push(uv);
// Compress normal
vertex_normals.push(pack2x16snorm(octahedral_encode(normal)));
// Quantize position to a fixed-point IVec3
let quantized_position = (position * quantization_factor + 0.5).as_ivec3();
quantized_positions[i] = quantized_position;
// Compute per X/Y/Z-channel quantized position min/max for this meshlet
min_quantized_position_channels = min_quantized_position_channels.min(quantized_position);
max_quantized_position_channels = max_quantized_position_channels.max(quantized_position);
}
// Calculate bits needed to encode each quantized vertex position channel based on the range of each channel
let range = max_quantized_position_channels - min_quantized_position_channels + 1;
let bits_per_vertex_position_channel_x = log2(range.x as f32).ceil() as u8;
let bits_per_vertex_position_channel_y = log2(range.y as f32).ceil() as u8;
let bits_per_vertex_position_channel_z = log2(range.z as f32).ceil() as u8;
// Lossless encoding of vertex positions in the minimum number of bits per channel
for quantized_position in quantized_positions.iter().take(meshlet_vertex_ids.len()) {
// Remap [range_min, range_max] IVec3 to [0, range_max - range_min] UVec3
let position = (quantized_position - min_quantized_position_channels).as_uvec3();
// Store as a packed bitstream
vertex_positions.extend_from_bitslice(
&position.x.view_bits::<Lsb0>()[..bits_per_vertex_position_channel_x as usize],
);
vertex_positions.extend_from_bitslice(
&position.y.view_bits::<Lsb0>()[..bits_per_vertex_position_channel_y as usize],
);
vertex_positions.extend_from_bitslice(
&position.z.view_bits::<Lsb0>()[..bits_per_vertex_position_channel_z as usize],
);
}
meshlets.push(Meshlet {
start_vertex_position_bit,
start_vertex_attribute_id,
start_index_id: meshlet.triangle_offset,
vertex_count_minus_one: (meshlet.vertex_count - 1) as u8,
triangle_count: meshlet.triangle_count as u8,
padding: 0,
bits_per_vertex_position_channel_x,
bits_per_vertex_position_channel_y,
bits_per_vertex_position_channel_z,
vertex_position_quantization_factor,
min_vertex_position_channel_x: min_quantized_position_channels.x as f32,
min_vertex_position_channel_y: min_quantized_position_channels.y as f32,
min_vertex_position_channel_z: min_quantized_position_channels.z as f32,
});
}
fn merge_spheres(a: BoundingSphere, b: BoundingSphere) -> BoundingSphere {
let sr = a.radius().min(b.radius());
let br = a.radius().max(b.radius());
let len = a.center.distance(b.center);
if len + sr <= br || sr == 0.0 || len == 0.0 {
if a.radius() > b.radius() {
a
} else {
b
}
} else {
let radius = (sr + br + len) / 2.0;
let center =
(a.center + b.center + (a.radius() - b.radius()) * (a.center - b.center) / len) / 2.0;
BoundingSphere::new(center, radius)
}
}
#[derive(Copy, Clone)]
struct TempMeshletCullData {
aabb: Aabb3d,
lod_group_sphere: BoundingSphere,
error: f32,
}
#[derive(Clone)]
struct TempMeshletGroup {
aabb: Aabb3d,
lod_bounds: BoundingSphere,
parent_error: f32,
meshlets: SmallVec<[u32; TARGET_MESHLETS_PER_GROUP]>,
}
impl Default for TempMeshletGroup {
fn default() -> Self {
Self {
aabb: aabb_default(), // Default AABB to merge into
lod_bounds: BoundingSphere::new(Vec3A::ZERO, 0.0),
parent_error: f32::MAX,
meshlets: SmallVec::new(),
}
}
}
// All the BVH build code was stolen from https://github.com/SparkyPotato/radiance/blob/4aa17a3a5be7a0466dc69713e249bbcee9f46057/crates/rad-renderer/src/assets/mesh/virtual_mesh.rs because it works and I'm lazy and don't want to reimplement it
struct TempBvhNode {
group: u32,
aabb: Aabb3d,
children: SmallVec<[u32; 8]>,
}
#[derive(Default)]
struct BvhBuilder {
nodes: Vec<TempBvhNode>,
lods: Vec<Range<u32>>,
}
impl BvhBuilder {
fn add_lod(&mut self, offset: u32, all_groups: &[TempMeshletGroup]) {
let first = self.nodes.len() as u32;
self.nodes.extend(
all_groups
.iter()
.enumerate()
.skip(offset as _)
.map(|(i, group)| TempBvhNode {
group: i as u32,
aabb: group.aabb,
children: SmallVec::new(),
}),
);
let end = self.nodes.len() as u32;
if first != end {
self.lods.push(first..end);
}
}
fn surface_area(&self, nodes: &[u32]) -> f32 {
nodes
.iter()
.map(|&x| self.nodes[x as usize].aabb)
.reduce(|a, b| a.merge(&b))
.expect("cannot find surface area of zero nodes")
.visible_area()
}
fn sort_nodes_by_sah(&self, nodes: &mut [u32], splits: [usize; 8]) {
// We use a BVH8, so just recursively binary split 3 times for near-optimal SAH
for i in 0..3 {
let parts = 1 << i; // 2^i
let nodes_per_split = 8 >> i; // 8 / 2^i
let half_count = nodes_per_split / 2;
let mut offset = 0;
for p in 0..parts {
let first = p * nodes_per_split;
let mut s0 = 0;
let mut s1 = 0;
for i in 0..half_count {
s0 += splits[first + i];
s1 += splits[first + half_count + i];
}
let c = s0 + s1;
let nodes = &mut nodes[offset..(offset + c)];
offset += c;
let mut cost = f32::MAX;
let mut axis = 0;
let key = |x, ax| self.nodes[x as usize].aabb.center()[ax];
for ax in 0..3 {
nodes.sort_unstable_by(|&x, &y| key(x, ax).partial_cmp(&key(y, ax)).unwrap());
let (left, right) = nodes.split_at(s0);
let c = self.surface_area(left) + self.surface_area(right);
if c < cost {
axis = ax;
cost = c;
}
}
if axis != 2 {
nodes.sort_unstable_by(|&x, &y| {
key(x, axis).partial_cmp(&key(y, axis)).unwrap()
});
}
}
}
}
fn build_temp_inner(&mut self, nodes: &mut [u32], optimize: bool) -> u32 {
let count = nodes.len();
if count == 1 {
nodes[0]
} else if count <= 8 {
let i = self.nodes.len();
self.nodes.push(TempBvhNode {
group: u32::MAX,
aabb: aabb_default(),
children: nodes.iter().copied().collect(),
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/meshlet/persistent_buffer.rs | crates/bevy_pbr/src/meshlet/persistent_buffer.rs | use bevy_render::{
render_resource::{
BindingResource, Buffer, BufferAddress, BufferDescriptor, BufferUsages,
CommandEncoderDescriptor, COPY_BUFFER_ALIGNMENT,
},
renderer::{RenderDevice, RenderQueue},
};
use core::{num::NonZero, ops::Range};
use range_alloc::RangeAllocator;
/// Wrapper for a GPU buffer holding a large amount of data that persists across frames.
pub struct PersistentGpuBuffer<T: PersistentGpuBufferable> {
/// Debug label for the buffer.
label: &'static str,
/// Handle to the GPU buffer.
buffer: Buffer,
/// Tracks free slices of the buffer.
allocation_planner: RangeAllocator<BufferAddress>,
/// Queue of pending writes, and associated metadata.
write_queue: Vec<(T, T::Metadata, Range<BufferAddress>)>,
}
impl<T: PersistentGpuBufferable> PersistentGpuBuffer<T> {
/// Create a new persistent buffer.
pub fn new(label: &'static str, render_device: &RenderDevice) -> Self {
Self {
label,
buffer: render_device.create_buffer(&BufferDescriptor {
label: Some(label),
size: 0,
usage: BufferUsages::STORAGE | BufferUsages::COPY_DST | BufferUsages::COPY_SRC,
mapped_at_creation: false,
}),
allocation_planner: RangeAllocator::new(0..0),
write_queue: Vec::new(),
}
}
/// Queue an item of type T to be added to the buffer, returning the byte range within the buffer that it will be located at.
pub fn queue_write(&mut self, data: T, metadata: T::Metadata) -> Range<BufferAddress> {
let data_size = data.size_in_bytes() as u64;
debug_assert!(data_size.is_multiple_of(COPY_BUFFER_ALIGNMENT));
if let Ok(buffer_slice) = self.allocation_planner.allocate_range(data_size) {
self.write_queue
.push((data, metadata, buffer_slice.clone()));
return buffer_slice;
}
let buffer_size = self.allocation_planner.initial_range();
let double_buffer_size = (buffer_size.end - buffer_size.start) * 2;
let new_size = double_buffer_size.max(data_size);
self.allocation_planner.grow_to(buffer_size.end + new_size);
let buffer_slice = self.allocation_planner.allocate_range(data_size).unwrap();
self.write_queue
.push((data, metadata, buffer_slice.clone()));
buffer_slice
}
/// Upload all pending data to the GPU buffer.
pub fn perform_writes(&mut self, render_queue: &RenderQueue, render_device: &RenderDevice) {
if self.allocation_planner.initial_range().end > self.buffer.size() {
self.expand_buffer(render_device, render_queue);
}
let queue_count = self.write_queue.len();
for (data, metadata, buffer_slice) in self.write_queue.drain(..) {
let buffer_slice_size =
NonZero::<u64>::new(buffer_slice.end - buffer_slice.start).unwrap();
let mut buffer_view = render_queue
.write_buffer_with(&self.buffer, buffer_slice.start, buffer_slice_size)
.unwrap();
data.write_bytes_le(metadata, &mut buffer_view, buffer_slice.start);
}
let queue_saturation = queue_count as f32 / self.write_queue.capacity() as f32;
if queue_saturation < 0.3 {
self.write_queue = Vec::new();
}
}
/// Mark a section of the GPU buffer as no longer needed.
pub fn mark_slice_unused(&mut self, buffer_slice: Range<BufferAddress>) {
self.allocation_planner.free_range(buffer_slice);
}
pub fn binding(&self) -> BindingResource<'_> {
self.buffer.as_entire_binding()
}
/// Expand the buffer by creating a new buffer and copying old data over.
fn expand_buffer(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) {
let size = self.allocation_planner.initial_range();
let new_buffer = render_device.create_buffer(&BufferDescriptor {
label: Some(self.label),
size: size.end - size.start,
usage: BufferUsages::STORAGE | BufferUsages::COPY_DST | BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let mut command_encoder = render_device.create_command_encoder(&CommandEncoderDescriptor {
label: Some("persistent_gpu_buffer_expand"),
});
command_encoder.copy_buffer_to_buffer(&self.buffer, 0, &new_buffer, 0, self.buffer.size());
render_queue.submit([command_encoder.finish()]);
self.buffer = new_buffer;
}
}
/// A trait representing data that can be written to a [`PersistentGpuBuffer`].
pub trait PersistentGpuBufferable {
/// Additional metadata associated with each item, made available during `write_bytes_le`.
type Metadata;
/// The size in bytes of `self`. This will determine the size of the buffer passed into
/// `write_bytes_le`.
///
/// All data written must be in a multiple of `wgpu::COPY_BUFFER_ALIGNMENT` bytes. Failure to do so will
/// result in a panic when using [`PersistentGpuBuffer`].
fn size_in_bytes(&self) -> usize;
/// Convert `self` + `metadata` into bytes (little-endian), and write to the provided buffer slice.
/// Any bytes not written to in the slice will be zeroed out when uploaded to the GPU.
fn write_bytes_le(
&self,
metadata: Self::Metadata,
buffer_slice: &mut [u8],
buffer_offset: BufferAddress,
);
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/meshlet/meshlet_mesh_manager.rs | crates/bevy_pbr/src/meshlet/meshlet_mesh_manager.rs | use crate::meshlet::asset::{BvhNode, MeshletAabb, MeshletCullData};
use super::{asset::Meshlet, persistent_buffer::PersistentGpuBuffer, MeshletMesh};
use alloc::sync::Arc;
use bevy_asset::{AssetId, Assets};
use bevy_ecs::{
resource::Resource,
system::{Commands, Res, ResMut},
};
use bevy_math::Vec2;
use bevy_platform::collections::HashMap;
use bevy_render::{
render_resource::BufferAddress,
renderer::{RenderDevice, RenderQueue},
};
use core::ops::Range;
/// Manages uploading [`MeshletMesh`] asset data to the GPU.
#[derive(Resource)]
pub struct MeshletMeshManager {
pub vertex_positions: PersistentGpuBuffer<Arc<[u32]>>,
pub vertex_normals: PersistentGpuBuffer<Arc<[u32]>>,
pub vertex_uvs: PersistentGpuBuffer<Arc<[Vec2]>>,
pub indices: PersistentGpuBuffer<Arc<[u8]>>,
pub bvh_nodes: PersistentGpuBuffer<Arc<[BvhNode]>>,
pub meshlets: PersistentGpuBuffer<Arc<[Meshlet]>>,
pub meshlet_cull_data: PersistentGpuBuffer<Arc<[MeshletCullData]>>,
meshlet_mesh_slices:
HashMap<AssetId<MeshletMesh>, ([Range<BufferAddress>; 7], MeshletAabb, u32)>,
}
pub fn init_meshlet_mesh_manager(mut commands: Commands, render_device: Res<RenderDevice>) {
commands.insert_resource(MeshletMeshManager {
vertex_positions: PersistentGpuBuffer::new("meshlet_vertex_positions", &render_device),
vertex_normals: PersistentGpuBuffer::new("meshlet_vertex_normals", &render_device),
vertex_uvs: PersistentGpuBuffer::new("meshlet_vertex_uvs", &render_device),
indices: PersistentGpuBuffer::new("meshlet_indices", &render_device),
bvh_nodes: PersistentGpuBuffer::new("meshlet_bvh_nodes", &render_device),
meshlets: PersistentGpuBuffer::new("meshlets", &render_device),
meshlet_cull_data: PersistentGpuBuffer::new("meshlet_cull_data", &render_device),
meshlet_mesh_slices: HashMap::default(),
});
}
impl MeshletMeshManager {
// Returns the index of the root BVH node, as well as the depth of the BVH.
pub fn queue_upload_if_needed(
&mut self,
asset_id: AssetId<MeshletMesh>,
assets: &mut Assets<MeshletMesh>,
) -> (u32, MeshletAabb, u32) {
let queue_meshlet_mesh = |asset_id: &AssetId<MeshletMesh>| {
let meshlet_mesh = assets.remove_untracked(*asset_id).expect(
"MeshletMesh asset was already unloaded but is not registered with MeshletMeshManager",
);
let vertex_positions_slice = self
.vertex_positions
.queue_write(Arc::clone(&meshlet_mesh.vertex_positions), ());
let vertex_normals_slice = self
.vertex_normals
.queue_write(Arc::clone(&meshlet_mesh.vertex_normals), ());
let vertex_uvs_slice = self
.vertex_uvs
.queue_write(Arc::clone(&meshlet_mesh.vertex_uvs), ());
let indices_slice = self
.indices
.queue_write(Arc::clone(&meshlet_mesh.indices), ());
let meshlets_slice = self.meshlets.queue_write(
Arc::clone(&meshlet_mesh.meshlets),
(
vertex_positions_slice.start,
vertex_normals_slice.start,
indices_slice.start,
),
);
let base_meshlet_index = (meshlets_slice.start / size_of::<Meshlet>() as u64) as u32;
let bvh_node_slice = self
.bvh_nodes
.queue_write(Arc::clone(&meshlet_mesh.bvh), base_meshlet_index);
let meshlet_cull_data_slice = self
.meshlet_cull_data
.queue_write(Arc::clone(&meshlet_mesh.meshlet_cull_data), ());
(
[
vertex_positions_slice,
vertex_normals_slice,
vertex_uvs_slice,
indices_slice,
bvh_node_slice,
meshlets_slice,
meshlet_cull_data_slice,
],
meshlet_mesh.aabb,
meshlet_mesh.bvh_depth,
)
};
// If the MeshletMesh asset has not been uploaded to the GPU yet, queue it for uploading
let ([_, _, _, _, bvh_node_slice, _, _], aabb, bvh_depth) = self
.meshlet_mesh_slices
.entry(asset_id)
.or_insert_with_key(queue_meshlet_mesh)
.clone();
(
(bvh_node_slice.start / size_of::<BvhNode>() as u64) as u32,
aabb,
bvh_depth,
)
}
pub fn remove(&mut self, asset_id: &AssetId<MeshletMesh>) {
if let Some((
[vertex_positions_slice, vertex_normals_slice, vertex_uvs_slice, indices_slice, bvh_node_slice, meshlets_slice, meshlet_cull_data_slice],
_,
_,
)) = self.meshlet_mesh_slices.remove(asset_id)
{
self.vertex_positions
.mark_slice_unused(vertex_positions_slice);
self.vertex_normals.mark_slice_unused(vertex_normals_slice);
self.vertex_uvs.mark_slice_unused(vertex_uvs_slice);
self.indices.mark_slice_unused(indices_slice);
self.bvh_nodes.mark_slice_unused(bvh_node_slice);
self.meshlets.mark_slice_unused(meshlets_slice);
self.meshlet_cull_data
.mark_slice_unused(meshlet_cull_data_slice);
}
}
}
/// Upload all newly queued [`MeshletMesh`] asset data to the GPU.
pub fn perform_pending_meshlet_mesh_writes(
mut meshlet_mesh_manager: ResMut<MeshletMeshManager>,
render_queue: Res<RenderQueue>,
render_device: Res<RenderDevice>,
) {
meshlet_mesh_manager
.vertex_positions
.perform_writes(&render_queue, &render_device);
meshlet_mesh_manager
.vertex_normals
.perform_writes(&render_queue, &render_device);
meshlet_mesh_manager
.vertex_uvs
.perform_writes(&render_queue, &render_device);
meshlet_mesh_manager
.indices
.perform_writes(&render_queue, &render_device);
meshlet_mesh_manager
.bvh_nodes
.perform_writes(&render_queue, &render_device);
meshlet_mesh_manager
.meshlets
.perform_writes(&render_queue, &render_device);
meshlet_mesh_manager
.meshlet_cull_data
.perform_writes(&render_queue, &render_device);
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/volumetric_fog/render.rs | crates/bevy_pbr/src/volumetric_fog/render.rs | //! Rendering of fog volumes.
use core::array;
use bevy_asset::{load_embedded_asset, AssetId, AssetServer, Handle};
use bevy_camera::Camera3d;
use bevy_color::ColorToComponents as _;
use bevy_core_pipeline::prepass::{
DeferredPrepass, DepthPrepass, MotionVectorPrepass, NormalPrepass,
};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
component::Component,
entity::Entity,
query::{Has, QueryItem, With},
resource::Resource,
system::{lifetimeless::Read, Commands, Local, Query, Res, ResMut},
world::World,
};
use bevy_image::{BevyDefault, Image};
use bevy_light::{FogVolume, VolumetricFog, VolumetricLight};
use bevy_math::{vec4, Affine3A, Mat4, Vec3, Vec3A, Vec4};
use bevy_mesh::{Mesh, MeshVertexBufferLayoutRef};
use bevy_render::{
diagnostic::RecordDiagnostics,
mesh::{allocator::MeshAllocator, RenderMesh, RenderMeshBufferInfo},
render_asset::RenderAssets,
render_graph::{NodeRunError, RenderGraphContext, ViewNode},
render_resource::{
binding_types::{
sampler, texture_3d, texture_depth_2d, texture_depth_2d_multisampled, uniform_buffer,
},
BindGroupLayoutDescriptor, BindGroupLayoutEntries, BindingResource, BlendComponent,
BlendFactor, BlendOperation, BlendState, CachedRenderPipelineId, ColorTargetState,
ColorWrites, DynamicBindGroupEntries, DynamicUniformBuffer, Face, FragmentState, LoadOp,
Operations, PipelineCache, PrimitiveState, RenderPassColorAttachment, RenderPassDescriptor,
RenderPipelineDescriptor, SamplerBindingType, ShaderStages, ShaderType,
SpecializedRenderPipeline, SpecializedRenderPipelines, StoreOp, TextureFormat,
TextureSampleType, TextureUsages, VertexState,
},
renderer::{RenderContext, RenderDevice, RenderQueue},
sync_world::RenderEntity,
texture::GpuImage,
view::{ExtractedView, Msaa, ViewDepthTexture, ViewTarget, ViewUniformOffset},
Extract,
};
use bevy_shader::Shader;
use bevy_transform::components::GlobalTransform;
use bevy_utils::prelude::default;
use bitflags::bitflags;
use crate::{
ExtractedAtmosphere, MeshPipelineViewLayoutKey, MeshPipelineViewLayouts, MeshViewBindGroup,
ViewEnvironmentMapUniformOffset, ViewFogUniformOffset, ViewLightProbesUniformOffset,
ViewLightsUniformOffset, ViewScreenSpaceReflectionsUniformOffset,
};
use super::FogAssets;
bitflags! {
/// Flags that describe the bind group layout used to render volumetric fog.
#[derive(Clone, Copy, PartialEq)]
struct VolumetricFogBindGroupLayoutKey: u8 {
/// The framebuffer is multisampled.
const MULTISAMPLED = 0x1;
/// The volumetric fog has a 3D voxel density texture.
const DENSITY_TEXTURE = 0x2;
}
}
bitflags! {
/// Flags that describe the rasterization pipeline used to render volumetric
/// fog.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
struct VolumetricFogPipelineKeyFlags: u8 {
/// The view's color format has high dynamic range.
const HDR = 0x1;
/// The volumetric fog has a 3D voxel density texture.
const DENSITY_TEXTURE = 0x2;
}
}
/// The total number of bind group layouts.
///
/// This is the total number of combinations of all
/// [`VolumetricFogBindGroupLayoutKey`] flags.
const VOLUMETRIC_FOG_BIND_GROUP_LAYOUT_COUNT: usize =
VolumetricFogBindGroupLayoutKey::all().bits() as usize + 1;
/// A matrix that converts from local 1×1×1 space to UVW 3D density texture
/// space.
static UVW_FROM_LOCAL: Mat4 = Mat4::from_cols(
vec4(1.0, 0.0, 0.0, 0.0),
vec4(0.0, 1.0, 0.0, 0.0),
vec4(0.0, 0.0, 1.0, 0.0),
vec4(0.5, 0.5, 0.5, 1.0),
);
/// The GPU pipeline for the volumetric fog postprocessing effect.
#[derive(Resource)]
pub struct VolumetricFogPipeline {
/// A reference to the shared set of mesh pipeline view layouts.
mesh_view_layouts: MeshPipelineViewLayouts,
/// All bind group layouts.
///
/// Since there aren't too many of these, we precompile them all.
volumetric_view_bind_group_layouts:
[BindGroupLayoutDescriptor; VOLUMETRIC_FOG_BIND_GROUP_LAYOUT_COUNT],
// The shader asset handle.
shader: Handle<Shader>,
}
/// The two render pipelines that we use for fog volumes: one for when a 3D
/// density texture is present and one for when it isn't.
#[derive(Component)]
pub struct ViewVolumetricFogPipelines {
/// The render pipeline that we use when no density texture is present, and
/// the density distribution is uniform.
pub textureless: CachedRenderPipelineId,
/// The render pipeline that we use when a density texture is present.
pub textured: CachedRenderPipelineId,
}
/// The node in the render graph, part of the postprocessing stack, that
/// implements volumetric fog.
#[derive(Default)]
pub struct VolumetricFogNode;
/// Identifies a single specialization of the volumetric fog shader.
#[derive(PartialEq, Eq, Hash, Clone)]
pub struct VolumetricFogPipelineKey {
/// The layout of the view, which is needed for the raymarching.
mesh_pipeline_view_key: MeshPipelineViewLayoutKey,
/// The vertex buffer layout of the primitive.
///
/// Both planes (used when the camera is inside the fog volume) and cubes
/// (used when the camera is outside the fog volume) use identical vertex
/// buffer layouts, so we only need one of them.
vertex_buffer_layout: MeshVertexBufferLayoutRef,
/// Flags that specify features on the pipeline key.
flags: VolumetricFogPipelineKeyFlags,
}
/// The same as [`VolumetricFog`] and [`FogVolume`], but formatted for
/// the GPU.
///
/// See the documentation of those structures for more information on these
/// fields.
#[derive(ShaderType)]
pub struct VolumetricFogUniform {
clip_from_local: Mat4,
/// The transform from world space to 3D density texture UVW space.
uvw_from_world: Mat4,
/// View-space plane equations of the far faces of the fog volume cuboid.
///
/// The vector takes the form V = (N, -N⋅Q), where N is the normal of the
/// plane and Q is any point in it, in view space. The equation of the plane
/// for homogeneous point P = (Px, Py, Pz, Pw) is V⋅P = 0.
far_planes: [Vec4; 3],
fog_color: Vec3,
light_tint: Vec3,
ambient_color: Vec3,
ambient_intensity: f32,
step_count: u32,
/// The radius of a sphere that bounds the fog volume in view space.
bounding_radius: f32,
absorption: f32,
scattering: f32,
density: f32,
density_texture_offset: Vec3,
scattering_asymmetry: f32,
light_intensity: f32,
jitter_strength: f32,
}
/// Specifies the offset within the [`VolumetricFogUniformBuffer`] of the
/// [`VolumetricFogUniform`] for a specific view.
#[derive(Component, Deref, DerefMut)]
pub struct ViewVolumetricFog(Vec<ViewFogVolume>);
/// Information that the render world needs to maintain about each fog volume.
pub struct ViewFogVolume {
/// The 3D voxel density texture for this volume, if present.
density_texture: Option<AssetId<Image>>,
/// The offset of this view's [`VolumetricFogUniform`] structure within the
/// [`VolumetricFogUniformBuffer`].
uniform_buffer_offset: u32,
/// True if the camera is outside the fog volume; false if it's inside the
/// fog volume.
exterior: bool,
}
/// The GPU buffer that stores the [`VolumetricFogUniform`] data.
#[derive(Resource, Default, Deref, DerefMut)]
pub struct VolumetricFogUniformBuffer(pub DynamicUniformBuffer<VolumetricFogUniform>);
pub fn init_volumetric_fog_pipeline(
mut commands: Commands,
mesh_view_layouts: Res<MeshPipelineViewLayouts>,
asset_server: Res<AssetServer>,
) {
// Create the bind group layout entries common to all bind group
// layouts.
let base_bind_group_layout_entries = &BindGroupLayoutEntries::single(
ShaderStages::VERTEX_FRAGMENT,
// `volumetric_fog`
uniform_buffer::<VolumetricFogUniform>(true),
);
// For every combination of `VolumetricFogBindGroupLayoutKey` bits,
// create a bind group layout.
let bind_group_layouts = array::from_fn(|bits| {
let flags = VolumetricFogBindGroupLayoutKey::from_bits_retain(bits as u8);
let mut bind_group_layout_entries = base_bind_group_layout_entries.to_vec();
// `depth_texture`
bind_group_layout_entries.extend_from_slice(&BindGroupLayoutEntries::with_indices(
ShaderStages::FRAGMENT,
((
1,
if flags.contains(VolumetricFogBindGroupLayoutKey::MULTISAMPLED) {
texture_depth_2d_multisampled()
} else {
texture_depth_2d()
},
),),
));
// `density_texture` and `density_sampler`
if flags.contains(VolumetricFogBindGroupLayoutKey::DENSITY_TEXTURE) {
bind_group_layout_entries.extend_from_slice(&BindGroupLayoutEntries::with_indices(
ShaderStages::FRAGMENT,
(
(2, texture_3d(TextureSampleType::Float { filterable: true })),
(3, sampler(SamplerBindingType::Filtering)),
),
));
}
// Create the bind group layout.
let description = flags.bind_group_layout_description();
BindGroupLayoutDescriptor::new(description, &bind_group_layout_entries)
});
commands.insert_resource(VolumetricFogPipeline {
mesh_view_layouts: mesh_view_layouts.clone(),
volumetric_view_bind_group_layouts: bind_group_layouts,
shader: load_embedded_asset!(asset_server.as_ref(), "volumetric_fog.wgsl"),
});
}
/// Extracts [`VolumetricFog`], [`FogVolume`], and [`VolumetricLight`]s
/// from the main world to the render world.
pub fn extract_volumetric_fog(
mut commands: Commands,
view_targets: Extract<Query<(RenderEntity, &VolumetricFog)>>,
fog_volumes: Extract<Query<(RenderEntity, &FogVolume, &GlobalTransform)>>,
volumetric_lights: Extract<Query<(RenderEntity, &VolumetricLight)>>,
) {
if volumetric_lights.is_empty() {
// TODO: needs better way to handle clean up in render world
for (entity, ..) in view_targets.iter() {
commands
.entity(entity)
.remove::<(VolumetricFog, ViewVolumetricFogPipelines, ViewVolumetricFog)>();
}
for (entity, ..) in fog_volumes.iter() {
commands.entity(entity).remove::<FogVolume>();
}
return;
}
for (entity, volumetric_fog) in view_targets.iter() {
commands
.get_entity(entity)
.expect("Volumetric fog entity wasn't synced.")
.insert(*volumetric_fog);
}
for (entity, fog_volume, fog_transform) in fog_volumes.iter() {
commands
.get_entity(entity)
.expect("Fog volume entity wasn't synced.")
.insert((*fog_volume).clone())
.insert(*fog_transform);
}
for (entity, volumetric_light) in volumetric_lights.iter() {
commands
.get_entity(entity)
.expect("Volumetric light entity wasn't synced.")
.insert(*volumetric_light);
}
}
impl ViewNode for VolumetricFogNode {
type ViewQuery = (
Read<ViewTarget>,
Read<ViewDepthTexture>,
Read<ViewVolumetricFogPipelines>,
Read<ViewUniformOffset>,
Read<ViewLightsUniformOffset>,
Read<ViewFogUniformOffset>,
Read<ViewLightProbesUniformOffset>,
Read<ViewVolumetricFog>,
Read<MeshViewBindGroup>,
Read<ViewScreenSpaceReflectionsUniformOffset>,
Read<Msaa>,
Read<ViewEnvironmentMapUniformOffset>,
);
fn run<'w>(
&self,
_: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
(
view_target,
view_depth_texture,
view_volumetric_lighting_pipelines,
view_uniform_offset,
view_lights_offset,
view_fog_offset,
view_light_probes_offset,
view_fog_volumes,
view_bind_group,
view_ssr_offset,
msaa,
view_environment_map_offset,
): QueryItem<'w, '_, Self::ViewQuery>,
world: &'w World,
) -> Result<(), NodeRunError> {
let pipeline_cache = world.resource::<PipelineCache>();
let volumetric_lighting_pipeline = world.resource::<VolumetricFogPipeline>();
let volumetric_lighting_uniform_buffers = world.resource::<VolumetricFogUniformBuffer>();
let image_assets = world.resource::<RenderAssets<GpuImage>>();
let mesh_allocator = world.resource::<MeshAllocator>();
// Fetch the uniform buffer and binding.
let (
Some(textureless_pipeline),
Some(textured_pipeline),
Some(volumetric_lighting_uniform_buffer_binding),
) = (
pipeline_cache.get_render_pipeline(view_volumetric_lighting_pipelines.textureless),
pipeline_cache.get_render_pipeline(view_volumetric_lighting_pipelines.textured),
volumetric_lighting_uniform_buffers.binding(),
)
else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
render_context
.command_encoder()
.push_debug_group("volumetric_lighting");
let time_span =
diagnostics.time_span(render_context.command_encoder(), "volumetric_lighting");
let fog_assets = world.resource::<FogAssets>();
let render_meshes = world.resource::<RenderAssets<RenderMesh>>();
for view_fog_volume in view_fog_volumes.iter() {
// If the camera is outside the fog volume, pick the cube mesh;
// otherwise, pick the plane mesh. In the latter case we'll be
// effectively rendering a full-screen quad.
let mesh_handle = if view_fog_volume.exterior {
fog_assets.cube_mesh.clone()
} else {
fog_assets.plane_mesh.clone()
};
let Some(vertex_buffer_slice) = mesh_allocator.mesh_vertex_slice(&mesh_handle.id())
else {
continue;
};
let density_image = view_fog_volume
.density_texture
.and_then(|density_texture| image_assets.get(density_texture));
// Pick the right pipeline, depending on whether a density texture
// is present or not.
let pipeline = if density_image.is_some() {
textured_pipeline
} else {
textureless_pipeline
};
// This should always succeed, but if the asset was unloaded don't
// panic.
let Some(render_mesh) = render_meshes.get(&mesh_handle) else {
return Ok(());
};
// Create the bind group for the view.
//
// TODO: Cache this.
let mut bind_group_layout_key = VolumetricFogBindGroupLayoutKey::empty();
bind_group_layout_key.set(
VolumetricFogBindGroupLayoutKey::MULTISAMPLED,
!matches!(*msaa, Msaa::Off),
);
// Create the bind group entries. The ones relating to the density
// texture will only be filled in if that texture is present.
let mut bind_group_entries = DynamicBindGroupEntries::sequential((
volumetric_lighting_uniform_buffer_binding.clone(),
BindingResource::TextureView(view_depth_texture.view()),
));
if let Some(density_image) = density_image {
bind_group_layout_key.insert(VolumetricFogBindGroupLayoutKey::DENSITY_TEXTURE);
bind_group_entries = bind_group_entries.extend_sequential((
BindingResource::TextureView(&density_image.texture_view),
BindingResource::Sampler(&density_image.sampler),
));
}
let volumetric_view_bind_group_layout = &volumetric_lighting_pipeline
.volumetric_view_bind_group_layouts[bind_group_layout_key.bits() as usize];
let volumetric_view_bind_group = render_context.render_device().create_bind_group(
None,
&pipeline_cache.get_bind_group_layout(volumetric_view_bind_group_layout),
&bind_group_entries,
);
let render_pass_descriptor = RenderPassDescriptor {
label: Some("volumetric lighting pass"),
color_attachments: &[Some(RenderPassColorAttachment {
view: view_target.main_texture_view(),
depth_slice: None,
resolve_target: None,
ops: Operations {
load: LoadOp::Load,
store: StoreOp::Store,
},
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
};
let mut render_pass = render_context
.command_encoder()
.begin_render_pass(&render_pass_descriptor);
render_pass.set_vertex_buffer(0, *vertex_buffer_slice.buffer.slice(..));
render_pass.set_pipeline(pipeline);
render_pass.set_bind_group(
0,
&view_bind_group.main,
&[
view_uniform_offset.offset,
view_lights_offset.offset,
view_fog_offset.offset,
**view_light_probes_offset,
**view_ssr_offset,
**view_environment_map_offset,
],
);
render_pass.set_bind_group(
1,
&volumetric_view_bind_group,
&[view_fog_volume.uniform_buffer_offset],
);
// Draw elements or arrays, as appropriate.
match &render_mesh.buffer_info {
RenderMeshBufferInfo::Indexed {
index_format,
count,
} => {
let Some(index_buffer_slice) =
mesh_allocator.mesh_index_slice(&mesh_handle.id())
else {
continue;
};
render_pass
.set_index_buffer(*index_buffer_slice.buffer.slice(..), *index_format);
render_pass.draw_indexed(
index_buffer_slice.range.start..(index_buffer_slice.range.start + count),
vertex_buffer_slice.range.start as i32,
0..1,
);
}
RenderMeshBufferInfo::NonIndexed => {
render_pass.draw(vertex_buffer_slice.range, 0..1);
}
}
}
time_span.end(render_context.command_encoder());
render_context.command_encoder().pop_debug_group();
Ok(())
}
}
impl SpecializedRenderPipeline for VolumetricFogPipeline {
type Key = VolumetricFogPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
// We always use hardware 2x2 filtering for sampling the shadow map; the
// more accurate versions with percentage-closer filtering aren't worth
// the overhead.
let mut shader_defs = vec!["SHADOW_FILTER_METHOD_HARDWARE_2X2".into()];
// We need a separate layout for MSAA and non-MSAA, as well as one for
// the presence or absence of the density texture.
let mut bind_group_layout_key = VolumetricFogBindGroupLayoutKey::empty();
bind_group_layout_key.set(
VolumetricFogBindGroupLayoutKey::MULTISAMPLED,
key.mesh_pipeline_view_key
.contains(MeshPipelineViewLayoutKey::MULTISAMPLED),
);
bind_group_layout_key.set(
VolumetricFogBindGroupLayoutKey::DENSITY_TEXTURE,
key.flags
.contains(VolumetricFogPipelineKeyFlags::DENSITY_TEXTURE),
);
let volumetric_view_bind_group_layout =
self.volumetric_view_bind_group_layouts[bind_group_layout_key.bits() as usize].clone();
// Both the cube and plane have the same vertex layout, so we don't need
// to distinguish between the two.
let vertex_format = key
.vertex_buffer_layout
.0
.get_layout(&[Mesh::ATTRIBUTE_POSITION.at_shader_location(0)])
.expect("Failed to get vertex layout for volumetric fog hull");
if key
.mesh_pipeline_view_key
.contains(MeshPipelineViewLayoutKey::MULTISAMPLED)
{
shader_defs.push("MULTISAMPLED".into());
}
if key
.mesh_pipeline_view_key
.contains(MeshPipelineViewLayoutKey::ATMOSPHERE)
{
shader_defs.push("ATMOSPHERE".into());
}
if key
.flags
.contains(VolumetricFogPipelineKeyFlags::DENSITY_TEXTURE)
{
shader_defs.push("DENSITY_TEXTURE".into());
}
let layout = self
.mesh_view_layouts
.get_view_layout(key.mesh_pipeline_view_key);
let layout = vec![
layout.main_layout.clone(),
volumetric_view_bind_group_layout.clone(),
];
RenderPipelineDescriptor {
label: Some("volumetric lighting pipeline".into()),
layout,
vertex: VertexState {
shader: self.shader.clone(),
shader_defs: shader_defs.clone(),
buffers: vec![vertex_format],
..default()
},
primitive: PrimitiveState {
cull_mode: Some(Face::Back),
..default()
},
fragment: Some(FragmentState {
shader: self.shader.clone(),
shader_defs,
targets: vec![Some(ColorTargetState {
format: if key.flags.contains(VolumetricFogPipelineKeyFlags::HDR) {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
},
// Blend on top of what's already in the framebuffer. Doing
// the alpha blending with the hardware blender allows us to
// avoid having to use intermediate render targets.
blend: Some(BlendState {
color: BlendComponent {
src_factor: BlendFactor::One,
dst_factor: BlendFactor::OneMinusSrcAlpha,
operation: BlendOperation::Add,
},
alpha: BlendComponent {
src_factor: BlendFactor::Zero,
dst_factor: BlendFactor::One,
operation: BlendOperation::Add,
},
}),
write_mask: ColorWrites::ALL,
})],
..default()
}),
..default()
}
}
}
/// Specializes volumetric fog pipelines for all views with that effect enabled.
pub fn prepare_volumetric_fog_pipelines(
mut commands: Commands,
pipeline_cache: Res<PipelineCache>,
mut pipelines: ResMut<SpecializedRenderPipelines<VolumetricFogPipeline>>,
volumetric_lighting_pipeline: Res<VolumetricFogPipeline>,
fog_assets: Res<FogAssets>,
view_targets: Query<
(
Entity,
&ExtractedView,
&Msaa,
Has<NormalPrepass>,
Has<DepthPrepass>,
Has<MotionVectorPrepass>,
Has<DeferredPrepass>,
Has<ExtractedAtmosphere>,
),
With<VolumetricFog>,
>,
meshes: Res<RenderAssets<RenderMesh>>,
) {
let Some(plane_mesh) = meshes.get(&fog_assets.plane_mesh) else {
// There's an off chance that the mesh won't be prepared yet if `RenderAssetBytesPerFrame` limiting is in use.
return;
};
for (
entity,
view,
msaa,
normal_prepass,
depth_prepass,
motion_vector_prepass,
deferred_prepass,
atmosphere,
) in view_targets.iter()
{
// Create a mesh pipeline view layout key corresponding to the view.
let mut mesh_pipeline_view_key = MeshPipelineViewLayoutKey::from(*msaa);
mesh_pipeline_view_key.set(MeshPipelineViewLayoutKey::NORMAL_PREPASS, normal_prepass);
mesh_pipeline_view_key.set(MeshPipelineViewLayoutKey::DEPTH_PREPASS, depth_prepass);
mesh_pipeline_view_key.set(
MeshPipelineViewLayoutKey::MOTION_VECTOR_PREPASS,
motion_vector_prepass,
);
mesh_pipeline_view_key.set(
MeshPipelineViewLayoutKey::DEFERRED_PREPASS,
deferred_prepass,
);
mesh_pipeline_view_key.set(MeshPipelineViewLayoutKey::ATMOSPHERE, atmosphere);
let mut textureless_flags = VolumetricFogPipelineKeyFlags::empty();
textureless_flags.set(VolumetricFogPipelineKeyFlags::HDR, view.hdr);
// Specialize the pipeline.
let textureless_pipeline_key = VolumetricFogPipelineKey {
mesh_pipeline_view_key,
vertex_buffer_layout: plane_mesh.layout.clone(),
flags: textureless_flags,
};
let textureless_pipeline_id = pipelines.specialize(
&pipeline_cache,
&volumetric_lighting_pipeline,
textureless_pipeline_key.clone(),
);
let textured_pipeline_id = pipelines.specialize(
&pipeline_cache,
&volumetric_lighting_pipeline,
VolumetricFogPipelineKey {
flags: textureless_pipeline_key.flags
| VolumetricFogPipelineKeyFlags::DENSITY_TEXTURE,
..textureless_pipeline_key
},
);
commands.entity(entity).insert(ViewVolumetricFogPipelines {
textureless: textureless_pipeline_id,
textured: textured_pipeline_id,
});
}
}
/// A system that converts [`VolumetricFog`] into [`VolumetricFogUniform`]s.
pub fn prepare_volumetric_fog_uniforms(
mut commands: Commands,
mut volumetric_lighting_uniform_buffer: ResMut<VolumetricFogUniformBuffer>,
view_targets: Query<(Entity, &ExtractedView, &VolumetricFog)>,
fog_volumes: Query<(Entity, &FogVolume, &GlobalTransform)>,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
mut local_from_world_matrices: Local<Vec<Affine3A>>,
) {
// Do this up front to avoid O(n^2) matrix inversion.
local_from_world_matrices.clear();
for (_, _, fog_transform) in fog_volumes.iter() {
local_from_world_matrices.push(fog_transform.affine().inverse());
}
let uniform_count = view_targets.iter().len() * local_from_world_matrices.len();
let Some(mut writer) =
volumetric_lighting_uniform_buffer.get_writer(uniform_count, &render_device, &render_queue)
else {
return;
};
for (view_entity, extracted_view, volumetric_fog) in view_targets.iter() {
let world_from_view = extracted_view.world_from_view.affine();
let mut view_fog_volumes = vec![];
for ((_, fog_volume, _), local_from_world) in
fog_volumes.iter().zip(local_from_world_matrices.iter())
{
// Calculate the transforms to and from 1×1×1 local space.
let local_from_view = *local_from_world * world_from_view;
let view_from_local = local_from_view.inverse();
// Determine whether the camera is inside or outside the volume, and
// calculate the clip space transform.
let interior = camera_is_inside_fog_volume(&local_from_view);
let hull_clip_from_local = calculate_fog_volume_clip_from_local_transforms(
interior,
&extracted_view.clip_from_view,
&view_from_local,
);
// Calculate the radius of the sphere that bounds the fog volume.
let bounding_radius = view_from_local
.transform_vector3a(Vec3A::splat(0.5))
.length();
// Write out our uniform.
let uniform_buffer_offset = writer.write(&VolumetricFogUniform {
clip_from_local: hull_clip_from_local,
uvw_from_world: UVW_FROM_LOCAL * *local_from_world,
far_planes: get_far_planes(&view_from_local),
fog_color: fog_volume.fog_color.to_linear().to_vec3(),
light_tint: fog_volume.light_tint.to_linear().to_vec3(),
ambient_color: volumetric_fog.ambient_color.to_linear().to_vec3(),
ambient_intensity: volumetric_fog.ambient_intensity,
step_count: volumetric_fog.step_count,
bounding_radius,
absorption: fog_volume.absorption,
scattering: fog_volume.scattering,
density: fog_volume.density_factor,
density_texture_offset: fog_volume.density_texture_offset,
scattering_asymmetry: fog_volume.scattering_asymmetry,
light_intensity: fog_volume.light_intensity,
jitter_strength: volumetric_fog.jitter,
});
view_fog_volumes.push(ViewFogVolume {
uniform_buffer_offset,
exterior: !interior,
density_texture: fog_volume.density_texture.as_ref().map(Handle::id),
});
}
commands
.entity(view_entity)
.insert(ViewVolumetricFog(view_fog_volumes));
}
}
/// A system that marks all view depth textures as readable in shaders.
///
/// The volumetric lighting pass needs to do this, and it doesn't happen by
/// default.
pub fn prepare_view_depth_textures_for_volumetric_fog(
mut view_targets: Query<&mut Camera3d>,
fog_volumes: Query<&VolumetricFog>,
) {
if fog_volumes.is_empty() {
return;
}
for mut camera in view_targets.iter_mut() {
camera.depth_texture_usages.0 |= TextureUsages::TEXTURE_BINDING.bits();
}
}
fn get_far_planes(view_from_local: &Affine3A) -> [Vec4; 3] {
let (mut far_planes, mut next_index) = ([Vec4::ZERO; 3], 0);
for &local_normal in &[
Vec3A::X,
Vec3A::NEG_X,
Vec3A::Y,
Vec3A::NEG_Y,
Vec3A::Z,
Vec3A::NEG_Z,
] {
let view_normal = view_from_local
.transform_vector3a(local_normal)
.normalize_or_zero();
if view_normal.z <= 0.0 {
continue;
}
let view_position = view_from_local.transform_point3a(-local_normal * 0.5);
let plane_coords = view_normal.extend(-view_normal.dot(view_position));
far_planes[next_index] = plane_coords;
next_index += 1;
if next_index == far_planes.len() {
continue;
}
}
far_planes
}
impl VolumetricFogBindGroupLayoutKey {
/// Creates an appropriate debug description for the bind group layout with
/// these flags.
fn bind_group_layout_description(&self) -> String {
if self.is_empty() {
return "volumetric lighting view bind group layout".to_owned();
}
format!(
"volumetric lighting view bind group layout ({})",
self.iter()
.filter_map(|flag| {
if flag == VolumetricFogBindGroupLayoutKey::DENSITY_TEXTURE {
Some("density texture")
} else if flag == VolumetricFogBindGroupLayoutKey::MULTISAMPLED {
Some("multisampled")
} else {
None
}
})
.collect::<Vec<_>>()
.join(", ")
)
}
}
/// Given the transform from the view to the 1×1×1 cube in local fog volume
/// space, returns true if the camera is inside the volume.
fn camera_is_inside_fog_volume(local_from_view: &Affine3A) -> bool {
local_from_view
.translation
.abs()
.cmple(Vec3A::splat(0.5))
.all()
}
/// Given the local transforms, returns the matrix that transforms model space
/// to clip space.
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/volumetric_fog/mod.rs | crates/bevy_pbr/src/volumetric_fog/mod.rs | //! Volumetric fog and volumetric lighting, also known as light shafts or god
//! rays.
//!
//! This module implements a more physically-accurate, but slower, form of fog
//! than the [`crate::fog`] module does. Notably, this *volumetric fog* allows
//! for light beams from directional lights to shine through, creating what is
//! known as *light shafts* or *god rays*.
//!
//! To add volumetric fog to a scene, add [`bevy_light::VolumetricFog`] to the
//! camera, and add [`bevy_light::VolumetricLight`] to directional lights that you wish to
//! be volumetric. [`bevy_light::VolumetricFog`] feature numerous settings that
//! allow you to define the accuracy of the simulation, as well as the look of
//! the fog. Currently, only interaction with directional lights that have
//! shadow maps is supported. Note that the overhead of the effect scales
//! directly with the number of directional lights in use, so apply
//! [`bevy_light::VolumetricLight`] sparingly for the best results.
//!
//! The overall algorithm, which is implemented as a postprocessing effect, is a
//! combination of the techniques described in [Scratchapixel] and [this blog
//! post]. It uses raymarching in screen space, transformed into shadow map
//! space for sampling and combined with physically-based modeling of absorption
//! and scattering. Bevy employs the widely-used [Henyey-Greenstein phase
//! function] to model asymmetry; this essentially allows light shafts to fade
//! into and out of existence as the user views them.
//!
//! [Scratchapixel]: https://www.scratchapixel.com/lessons/3d-basic-rendering/volume-rendering-for-developers/intro-volume-rendering.html
//!
//! [this blog post]: https://www.alexandre-pestana.com/volumetric-lights/
//!
//! [Henyey-Greenstein phase function]: https://www.pbr-book.org/4ed/Volume_Scattering/Phase_Functions#TheHenyeyndashGreensteinPhaseFunction
use bevy_app::{App, Plugin};
use bevy_asset::{embedded_asset, Assets, Handle};
use bevy_core_pipeline::core_3d::{
graph::{Core3d, Node3d},
prepare_core_3d_depth_textures,
};
use bevy_ecs::{resource::Resource, schedule::IntoScheduleConfigs as _};
use bevy_light::FogVolume;
use bevy_math::{
primitives::{Cuboid, Plane3d},
Vec2, Vec3,
};
use bevy_mesh::{Mesh, Meshable};
use bevy_render::{
render_graph::{RenderGraphExt, ViewNodeRunner},
render_resource::SpecializedRenderPipelines,
sync_component::SyncComponentPlugin,
ExtractSchedule, Render, RenderApp, RenderStartup, RenderSystems,
};
use render::{VolumetricFogNode, VolumetricFogPipeline, VolumetricFogUniformBuffer};
use crate::{graph::NodePbr, volumetric_fog::render::init_volumetric_fog_pipeline};
pub mod render;
/// A plugin that implements volumetric fog.
pub struct VolumetricFogPlugin;
#[derive(Resource)]
pub struct FogAssets {
plane_mesh: Handle<Mesh>,
cube_mesh: Handle<Mesh>,
}
impl Plugin for VolumetricFogPlugin {
fn build(&self, app: &mut App) {
embedded_asset!(app, "volumetric_fog.wgsl");
let mut meshes = app.world_mut().resource_mut::<Assets<Mesh>>();
let plane_mesh = meshes.add(Plane3d::new(Vec3::Z, Vec2::ONE).mesh());
let cube_mesh = meshes.add(Cuboid::new(1.0, 1.0, 1.0).mesh());
app.add_plugins(SyncComponentPlugin::<FogVolume>::default());
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.insert_resource(FogAssets {
plane_mesh,
cube_mesh,
})
.init_resource::<SpecializedRenderPipelines<VolumetricFogPipeline>>()
.init_resource::<VolumetricFogUniformBuffer>()
.add_systems(RenderStartup, init_volumetric_fog_pipeline)
.add_systems(ExtractSchedule, render::extract_volumetric_fog)
.add_systems(
Render,
(
render::prepare_volumetric_fog_pipelines.in_set(RenderSystems::Prepare),
render::prepare_volumetric_fog_uniforms.in_set(RenderSystems::Prepare),
render::prepare_view_depth_textures_for_volumetric_fog
.in_set(RenderSystems::Prepare)
.before(prepare_core_3d_depth_textures),
),
)
.add_render_graph_node::<ViewNodeRunner<VolumetricFogNode>>(
Core3d,
NodePbr::VolumetricFog,
)
.add_render_graph_edges(
Core3d,
// Volumetric fog should run after the main pass but before bloom, so
// we order if at the start of post processing.
(
Node3d::EndMainPass,
NodePbr::VolumetricFog,
Node3d::StartMainPassPostProcessing,
),
);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/light_probe/irradiance_volume.rs | crates/bevy_pbr/src/light_probe/irradiance_volume.rs | //! Irradiance volumes, also known as voxel global illumination.
//!
//! An *irradiance volume* is a cuboid voxel region consisting of
//! regularly-spaced precomputed samples of diffuse indirect light. They're
//! ideal if you have a dynamic object such as a character that can move about
//! static non-moving geometry such as a level in a game, and you want that
//! dynamic object to be affected by the light bouncing off that static
//! geometry.
//!
//! To use irradiance volumes, you need to precompute, or *bake*, the indirect
//! light in your scene. Bevy doesn't currently come with a way to do this.
//! Fortunately, [Blender] provides a [baking tool] as part of the Eevee
//! renderer, and its irradiance volumes are compatible with those used by Bevy.
//! The [`bevy-baked-gi`] project provides a tool, `export-blender-gi`, that can
//! extract the baked irradiance volumes from the Blender `.blend` file and
//! package them up into a `.ktx2` texture for use by the engine. See the
//! documentation in the `bevy-baked-gi` project for more details on this
//! workflow.
//!
//! Like all light probes in Bevy, irradiance volumes are 1×1×1 cubes, centered
//! on the origin, that can be arbitrarily scaled, rotated, and positioned in a
//! scene with the [`bevy_transform::components::Transform`] component. The 3D
//! voxel grid will be stretched to fill the interior of the cube, with linear
//! interpolation, and the illumination from the irradiance volume will apply to
//! all fragments within that bounding region.
//!
//! Bevy's irradiance volumes are based on Valve's [*ambient cubes*] as used in
//! *Half-Life 2* ([Mitchell 2006, slide 27]). These encode a single color of
//! light from the six 3D cardinal directions and blend the sides together
//! according to the surface normal. For an explanation of why ambient cubes
//! were chosen over spherical harmonics, see [Why ambient cubes?] below.
//!
//! If you wish to use a tool other than `export-blender-gi` to produce the
//! irradiance volumes, you'll need to pack the irradiance volumes in the
//! following format. The irradiance volume of resolution *(Rx, Ry, Rz)* is
//! expected to be a 3D texture of dimensions *(Rx, 2Ry, 3Rz)*. The unnormalized
//! texture coordinate *(s, t, p)* of the voxel at coordinate *(x, y, z)* with
//! side *S* ∈ *{-X, +X, -Y, +Y, -Z, +Z}* is as follows:
//!
//! ```text
//! s = x
//!
//! t = y + ⎰ 0 if S ∈ {-X, -Y, -Z}
//! ⎱ Ry if S ∈ {+X, +Y, +Z}
//!
//! ⎧ 0 if S ∈ {-X, +X}
//! p = z + ⎨ Rz if S ∈ {-Y, +Y}
//! ⎩ 2Rz if S ∈ {-Z, +Z}
//! ```
//!
//! Visually, in a left-handed coordinate system with Y up, viewed from the
//! right, the 3D texture looks like a stacked series of voxel grids, one for
//! each cube side, in this order:
//!
//! | **+X** | **+Y** | **+Z** |
//! | ------ | ------ | ------ |
//! | **-X** | **-Y** | **-Z** |
//!
//! A terminology note: Other engines may refer to irradiance volumes as *voxel
//! global illumination*, *VXGI*, or simply as *light probes*. Sometimes *light
//! probe* refers to what Bevy calls a reflection probe. In Bevy, *light probe*
//! is a generic term that encompasses all cuboid bounding regions that capture
//! indirect illumination, whether based on voxels or not.
//!
//! Note that, if binding arrays aren't supported (e.g. on WebGPU or WebGL 2),
//! then only the closest irradiance volume to the view will be taken into
//! account during rendering. The required `wgpu` features are
//! [`bevy_render::settings::WgpuFeatures::TEXTURE_BINDING_ARRAY`] and
//! [`bevy_render::settings::WgpuFeatures::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING`].
//!
//! ## Why ambient cubes?
//!
//! This section describes the motivation behind the decision to use ambient
//! cubes in Bevy. It's not needed to use the feature; feel free to skip it
//! unless you're interested in its internal design.
//!
//! Bevy uses *Half-Life 2*-style ambient cubes (usually abbreviated as *HL2*)
//! as the representation of irradiance for light probes instead of the
//! more-popular spherical harmonics (*SH*). This might seem to be a surprising
//! choice, but it turns out to work well for the specific case of voxel
//! sampling on the GPU. Spherical harmonics have two problems that make them
//! less ideal for this use case:
//!
//! 1. The level 1 spherical harmonic coefficients can be negative. That
//! prevents the use of the efficient [RGB9E5 texture format], which only
//! encodes unsigned floating point numbers, and forces the use of the
//! less-efficient [RGBA16F format] if hardware interpolation is desired.
//!
//! 2. As an alternative to RGBA16F, level 1 spherical harmonics can be
//! normalized and scaled to the SH0 base color, as [Frostbite] does. This
//! allows them to be packed in standard LDR RGBA8 textures. However, this
//! prevents the use of hardware trilinear filtering, as the nonuniform scale
//! factor means that hardware interpolation no longer produces correct results.
//! The 8 texture fetches needed to interpolate between voxels can be upwards of
//! twice as slow as the hardware interpolation.
//!
//! The following chart summarizes the costs and benefits of ambient cubes,
//! level 1 spherical harmonics, and level 2 spherical harmonics:
//!
//! | Technique | HW-interpolated samples | Texel fetches | Bytes per voxel | Quality |
//! | ------------------------ | ----------------------- | ------------- | --------------- | ------- |
//! | Ambient cubes | 3 | 0 | 24 | Medium |
//! | Level 1 SH, compressed | 0 | 36 | 16 | Low |
//! | Level 1 SH, uncompressed | 4 | 0 | 24 | Low |
//! | Level 2 SH, compressed | 0 | 72 | 28 | High |
//! | Level 2 SH, uncompressed | 9 | 0 | 54 | High |
//!
//! (Note that the number of bytes per voxel can be reduced using various
//! texture compression methods, but the overall ratios remain similar.)
//!
//! From these data, we can see that ambient cubes balance fast lookups (from
//! leveraging hardware interpolation) with relatively-small storage
//! requirements and acceptable quality. Hence, they were chosen for irradiance
//! volumes in Bevy.
//!
//! [*ambient cubes*]: https://advances.realtimerendering.com/s2006/Mitchell-ShadingInValvesSourceEngine.pdf
//!
//! [spherical harmonics]: https://en.wikipedia.org/wiki/Spherical_harmonic_lighting
//!
//! [RGB9E5 texture format]: https://www.khronos.org/opengl/wiki/Small_Float_Formats#RGB9_E5
//!
//! [RGBA16F format]: https://www.khronos.org/opengl/wiki/Small_Float_Formats#Low-bitdepth_floats
//!
//! [Frostbite]: https://media.contentapi.ea.com/content/dam/eacom/frostbite/files/gdc2018-precomputedgiobalilluminationinfrostbite.pdf#page=53
//!
//! [Mitchell 2006, slide 27]: https://advances.realtimerendering.com/s2006/Mitchell-ShadingInValvesSourceEngine.pdf#page=27
//!
//! [Blender]: http://blender.org/
//!
//! [baking tool]: https://docs.blender.org/manual/en/latest/render/eevee/light_probes/volume.html
//!
//! [`bevy-baked-gi`]: https://github.com/pcwalton/bevy-baked-gi
//!
//! [Why ambient cubes?]: #why-ambient-cubes
use bevy_image::Image;
use bevy_light::IrradianceVolume;
use bevy_render::{
render_asset::RenderAssets,
render_resource::{
binding_types, BindGroupLayoutEntryBuilder, Sampler, SamplerBindingType, TextureSampleType,
TextureView,
},
renderer::{RenderAdapter, RenderDevice},
texture::{FallbackImage, GpuImage},
};
use core::{num::NonZero, ops::Deref};
use bevy_asset::AssetId;
use crate::{
add_cubemap_texture_view, binding_arrays_are_usable, RenderViewLightProbes,
MAX_VIEW_LIGHT_PROBES,
};
use super::LightProbeComponent;
/// On WebGL and WebGPU, we must disable irradiance volumes, as otherwise we can
/// overflow the number of texture bindings when deferred rendering is in use
/// (see issue #11885).
pub(crate) const IRRADIANCE_VOLUMES_ARE_USABLE: bool = cfg!(not(target_arch = "wasm32"));
/// All the bind group entries necessary for PBR shaders to access the
/// irradiance volumes exposed to a view.
pub(crate) enum RenderViewIrradianceVolumeBindGroupEntries<'a> {
/// The version used when binding arrays aren't available on the current platform.
Single {
/// The texture view of the closest light probe.
texture_view: &'a TextureView,
/// A sampler used to sample voxels of the irradiance volume.
sampler: &'a Sampler,
},
/// The version used when binding arrays are available on the current
/// platform.
Multiple {
/// A texture view of the voxels of each irradiance volume, in the same
/// order that they are supplied to the view (i.e. in the same order as
/// `binding_index_to_cubemap` in [`RenderViewLightProbes`]).
///
/// This is a vector of `wgpu::TextureView`s. But we don't want to import
/// `wgpu` in this crate, so we refer to it indirectly like this.
texture_views: Vec<&'a <TextureView as Deref>::Target>,
/// A sampler used to sample voxels of the irradiance volumes.
sampler: &'a Sampler,
},
}
impl<'a> RenderViewIrradianceVolumeBindGroupEntries<'a> {
/// Looks up and returns the bindings for any irradiance volumes visible in
/// the view, as well as the sampler.
pub(crate) fn get(
render_view_irradiance_volumes: Option<&RenderViewLightProbes<IrradianceVolume>>,
images: &'a RenderAssets<GpuImage>,
fallback_image: &'a FallbackImage,
render_device: &RenderDevice,
render_adapter: &RenderAdapter,
) -> RenderViewIrradianceVolumeBindGroupEntries<'a> {
if binding_arrays_are_usable(render_device, render_adapter) {
RenderViewIrradianceVolumeBindGroupEntries::get_multiple(
render_view_irradiance_volumes,
images,
fallback_image,
)
} else {
RenderViewIrradianceVolumeBindGroupEntries::single(
render_view_irradiance_volumes,
images,
fallback_image,
)
}
}
/// Looks up and returns the bindings for any irradiance volumes visible in
/// the view, as well as the sampler. This is the version used when binding
/// arrays are available on the current platform.
fn get_multiple(
render_view_irradiance_volumes: Option<&RenderViewLightProbes<IrradianceVolume>>,
images: &'a RenderAssets<GpuImage>,
fallback_image: &'a FallbackImage,
) -> RenderViewIrradianceVolumeBindGroupEntries<'a> {
let mut texture_views = vec![];
let mut sampler = None;
if let Some(irradiance_volumes) = render_view_irradiance_volumes {
for &cubemap_id in &irradiance_volumes.binding_index_to_textures {
add_cubemap_texture_view(
&mut texture_views,
&mut sampler,
cubemap_id,
images,
fallback_image,
);
}
}
// Pad out the bindings to the size of the binding array using fallback
// textures. This is necessary on D3D12 and Metal.
texture_views.resize(MAX_VIEW_LIGHT_PROBES, &*fallback_image.d3.texture_view);
RenderViewIrradianceVolumeBindGroupEntries::Multiple {
texture_views,
sampler: sampler.unwrap_or(&fallback_image.d3.sampler),
}
}
/// Looks up and returns the bindings for any irradiance volumes visible in
/// the view, as well as the sampler. This is the version used when binding
/// arrays aren't available on the current platform.
fn single(
render_view_irradiance_volumes: Option<&RenderViewLightProbes<IrradianceVolume>>,
images: &'a RenderAssets<GpuImage>,
fallback_image: &'a FallbackImage,
) -> RenderViewIrradianceVolumeBindGroupEntries<'a> {
if let Some(irradiance_volumes) = render_view_irradiance_volumes
&& let Some(irradiance_volume) = irradiance_volumes.render_light_probes.first()
&& irradiance_volume.texture_index >= 0
&& let Some(image_id) = irradiance_volumes
.binding_index_to_textures
.get(irradiance_volume.texture_index as usize)
&& let Some(image) = images.get(*image_id)
{
return RenderViewIrradianceVolumeBindGroupEntries::Single {
texture_view: &image.texture_view,
sampler: &image.sampler,
};
}
RenderViewIrradianceVolumeBindGroupEntries::Single {
texture_view: &fallback_image.d3.texture_view,
sampler: &fallback_image.d3.sampler,
}
}
}
/// Returns the bind group layout entries for the voxel texture and sampler
/// respectively.
pub(crate) fn get_bind_group_layout_entries(
render_device: &RenderDevice,
render_adapter: &RenderAdapter,
) -> [BindGroupLayoutEntryBuilder; 2] {
let mut texture_3d_binding =
binding_types::texture_3d(TextureSampleType::Float { filterable: true });
if binding_arrays_are_usable(render_device, render_adapter) {
texture_3d_binding =
texture_3d_binding.count(NonZero::<u32>::new(MAX_VIEW_LIGHT_PROBES as _).unwrap());
}
[
texture_3d_binding,
binding_types::sampler(SamplerBindingType::Filtering),
]
}
impl LightProbeComponent for IrradianceVolume {
type AssetId = AssetId<Image>;
// Irradiance volumes can't be attached to the view, so we store nothing
// here.
type ViewLightProbeInfo = ();
fn id(&self, image_assets: &RenderAssets<GpuImage>) -> Option<Self::AssetId> {
if image_assets.get(&self.voxels).is_none() {
None
} else {
Some(self.voxels.id())
}
}
fn intensity(&self) -> f32 {
self.intensity
}
fn affects_lightmapped_mesh_diffuse(&self) -> bool {
self.affects_lightmapped_meshes
}
fn create_render_view_light_probes(
_: Option<&Self>,
_: &RenderAssets<GpuImage>,
) -> RenderViewLightProbes<Self> {
RenderViewLightProbes::new()
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/light_probe/environment_map.rs | crates/bevy_pbr/src/light_probe/environment_map.rs | //! Environment maps and reflection probes.
//!
//! An *environment map* consists of a pair of diffuse and specular cubemaps
//! that together reflect the static surrounding area of a region in space. When
//! available, the PBR shader uses these to apply diffuse light and calculate
//! specular reflections.
//!
//! Environment maps come in two flavors, depending on what other components the
//! entities they're attached to have:
//!
//! 1. If attached to a view, they represent the objects located a very far
//! distance from the view, in a similar manner to a skybox. Essentially, these
//! *view environment maps* represent a higher-quality replacement for
//! [`AmbientLight`](bevy_light::AmbientLight) for outdoor scenes. The indirect light from such
//! environment maps are added to every point of the scene, including
//! interior enclosed areas.
//!
//! 2. If attached to a [`bevy_light::LightProbe`], environment maps represent the immediate
//! surroundings of a specific location in the scene. These types of
//! environment maps are known as *reflection probes*.
//!
//! Typically, environment maps are static (i.e. "baked", calculated ahead of
//! time) and so only reflect fixed static geometry. The environment maps must
//! be pre-filtered into a pair of cubemaps, one for the diffuse component and
//! one for the specular component, according to the [split-sum approximation].
//! To pre-filter your environment map, you can use the [glTF IBL Sampler] or
//! its [artist-friendly UI]. The diffuse map uses the Lambertian distribution,
//! while the specular map uses the GGX distribution.
//!
//! The Khronos Group has [several pre-filtered environment maps] available for
//! you to use.
//!
//! Currently, reflection probes (i.e. environment maps attached to light
//! probes) use binding arrays (also known as bindless textures) and
//! consequently aren't supported on WebGL2 or WebGPU. Reflection probes are
//! also unsupported if GLSL is in use, due to `naga` limitations. Environment
//! maps attached to views are, however, supported on all platforms.
//!
//! [split-sum approximation]: https://cdn2.unrealengine.com/Resources/files/2013SiggraphPresentationsNotes-26915738.pdf
//!
//! [glTF IBL Sampler]: https://github.com/KhronosGroup/glTF-IBL-Sampler
//!
//! [artist-friendly UI]: https://github.com/pcwalton/gltf-ibl-sampler-egui
//!
//! [several pre-filtered environment maps]: https://github.com/KhronosGroup/glTF-Sample-Environments
use bevy_asset::AssetId;
use bevy_ecs::{query::QueryItem, system::lifetimeless::Read};
use bevy_image::Image;
use bevy_light::EnvironmentMapLight;
use bevy_render::{
extract_instances::ExtractInstance,
render_asset::RenderAssets,
render_resource::{
binding_types::{self, uniform_buffer},
BindGroupLayoutEntryBuilder, Sampler, SamplerBindingType, ShaderStages, TextureSampleType,
TextureView,
},
renderer::{RenderAdapter, RenderDevice},
texture::{FallbackImage, GpuImage},
};
use core::{num::NonZero, ops::Deref};
use crate::{
add_cubemap_texture_view, binding_arrays_are_usable, EnvironmentMapUniform,
MAX_VIEW_LIGHT_PROBES,
};
use super::{LightProbeComponent, RenderViewLightProbes};
/// Like [`EnvironmentMapLight`], but contains asset IDs instead of handles.
///
/// This is for use in the render app.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct EnvironmentMapIds {
/// The blurry image that represents diffuse radiance surrounding a region.
pub(crate) diffuse: AssetId<Image>,
/// The typically-sharper, mipmapped image that represents specular radiance
/// surrounding a region.
pub(crate) specular: AssetId<Image>,
}
/// All the bind group entries necessary for PBR shaders to access the
/// environment maps exposed to a view.
pub(crate) enum RenderViewEnvironmentMapBindGroupEntries<'a> {
/// The version used when binding arrays aren't available on the current
/// platform.
Single {
/// The texture view of the view's diffuse cubemap.
diffuse_texture_view: &'a TextureView,
/// The texture view of the view's specular cubemap.
specular_texture_view: &'a TextureView,
/// The sampler used to sample elements of both `diffuse_texture_views` and
/// `specular_texture_views`.
sampler: &'a Sampler,
},
/// The version used when binding arrays are available on the current
/// platform.
Multiple {
/// A texture view of each diffuse cubemap, in the same order that they are
/// supplied to the view (i.e. in the same order as
/// `binding_index_to_cubemap` in [`RenderViewLightProbes`]).
///
/// This is a vector of `wgpu::TextureView`s. But we don't want to import
/// `wgpu` in this crate, so we refer to it indirectly like this.
diffuse_texture_views: Vec<&'a <TextureView as Deref>::Target>,
/// As above, but for specular cubemaps.
specular_texture_views: Vec<&'a <TextureView as Deref>::Target>,
/// The sampler used to sample elements of both `diffuse_texture_views` and
/// `specular_texture_views`.
sampler: &'a Sampler,
},
}
/// Information about the environment map attached to the view, if any. This is
/// a global environment map that lights everything visible in the view, as
/// opposed to a light probe which affects only a specific area.
pub struct EnvironmentMapViewLightProbeInfo {
/// The index of the diffuse and specular cubemaps in the binding arrays.
pub(crate) cubemap_index: i32,
/// The smallest mip level of the specular cubemap.
pub(crate) smallest_specular_mip_level: u32,
/// The scale factor applied to the diffuse and specular light in the
/// cubemap. This is in units of cd/m² (candela per square meter).
pub(crate) intensity: f32,
/// Whether this lightmap affects the diffuse lighting of lightmapped
/// meshes.
pub(crate) affects_lightmapped_mesh_diffuse: bool,
}
impl ExtractInstance for EnvironmentMapIds {
type QueryData = Read<EnvironmentMapLight>;
type QueryFilter = ();
fn extract(item: QueryItem<'_, '_, Self::QueryData>) -> Option<Self> {
Some(EnvironmentMapIds {
diffuse: item.diffuse_map.id(),
specular: item.specular_map.id(),
})
}
}
/// Returns the bind group layout entries for the environment map diffuse and
/// specular binding arrays respectively, in addition to the sampler.
pub(crate) fn get_bind_group_layout_entries(
render_device: &RenderDevice,
render_adapter: &RenderAdapter,
) -> [BindGroupLayoutEntryBuilder; 4] {
let mut texture_cube_binding =
binding_types::texture_cube(TextureSampleType::Float { filterable: true });
if binding_arrays_are_usable(render_device, render_adapter) {
texture_cube_binding =
texture_cube_binding.count(NonZero::<u32>::new(MAX_VIEW_LIGHT_PROBES as _).unwrap());
}
[
texture_cube_binding,
texture_cube_binding,
binding_types::sampler(SamplerBindingType::Filtering),
uniform_buffer::<EnvironmentMapUniform>(true).visibility(ShaderStages::FRAGMENT),
]
}
impl<'a> RenderViewEnvironmentMapBindGroupEntries<'a> {
/// Looks up and returns the bindings for the environment map diffuse and
/// specular binding arrays respectively, as well as the sampler.
pub(crate) fn get(
render_view_environment_maps: Option<&RenderViewLightProbes<EnvironmentMapLight>>,
images: &'a RenderAssets<GpuImage>,
fallback_image: &'a FallbackImage,
render_device: &RenderDevice,
render_adapter: &RenderAdapter,
) -> RenderViewEnvironmentMapBindGroupEntries<'a> {
if binding_arrays_are_usable(render_device, render_adapter) {
let mut diffuse_texture_views = vec![];
let mut specular_texture_views = vec![];
let mut sampler = None;
if let Some(environment_maps) = render_view_environment_maps {
for &cubemap_id in &environment_maps.binding_index_to_textures {
add_cubemap_texture_view(
&mut diffuse_texture_views,
&mut sampler,
cubemap_id.diffuse,
images,
fallback_image,
);
add_cubemap_texture_view(
&mut specular_texture_views,
&mut sampler,
cubemap_id.specular,
images,
fallback_image,
);
}
}
// Pad out the bindings to the size of the binding array using fallback
// textures. This is necessary on D3D12 and Metal.
diffuse_texture_views.resize(MAX_VIEW_LIGHT_PROBES, &*fallback_image.cube.texture_view);
specular_texture_views
.resize(MAX_VIEW_LIGHT_PROBES, &*fallback_image.cube.texture_view);
return RenderViewEnvironmentMapBindGroupEntries::Multiple {
diffuse_texture_views,
specular_texture_views,
sampler: sampler.unwrap_or(&fallback_image.cube.sampler),
};
}
if let Some(environment_maps) = render_view_environment_maps
&& let Some(cubemap) = environment_maps.binding_index_to_textures.first()
&& let (Some(diffuse_image), Some(specular_image)) =
(images.get(cubemap.diffuse), images.get(cubemap.specular))
{
return RenderViewEnvironmentMapBindGroupEntries::Single {
diffuse_texture_view: &diffuse_image.texture_view,
specular_texture_view: &specular_image.texture_view,
sampler: &diffuse_image.sampler,
};
}
RenderViewEnvironmentMapBindGroupEntries::Single {
diffuse_texture_view: &fallback_image.cube.texture_view,
specular_texture_view: &fallback_image.cube.texture_view,
sampler: &fallback_image.cube.sampler,
}
}
}
impl LightProbeComponent for EnvironmentMapLight {
type AssetId = EnvironmentMapIds;
// Information needed to render with the environment map attached to the
// view.
type ViewLightProbeInfo = EnvironmentMapViewLightProbeInfo;
fn id(&self, image_assets: &RenderAssets<GpuImage>) -> Option<Self::AssetId> {
if image_assets.get(&self.diffuse_map).is_none()
|| image_assets.get(&self.specular_map).is_none()
{
None
} else {
Some(EnvironmentMapIds {
diffuse: self.diffuse_map.id(),
specular: self.specular_map.id(),
})
}
}
fn intensity(&self) -> f32 {
self.intensity
}
fn affects_lightmapped_mesh_diffuse(&self) -> bool {
self.affects_lightmapped_mesh_diffuse
}
fn create_render_view_light_probes(
view_component: Option<&EnvironmentMapLight>,
image_assets: &RenderAssets<GpuImage>,
) -> RenderViewLightProbes<Self> {
let mut render_view_light_probes = RenderViewLightProbes::new();
// Find the index of the cubemap associated with the view, and determine
// its smallest mip level.
if let Some(EnvironmentMapLight {
diffuse_map: diffuse_map_handle,
specular_map: specular_map_handle,
intensity,
affects_lightmapped_mesh_diffuse,
..
}) = view_component
&& let (Some(_), Some(specular_map)) = (
image_assets.get(diffuse_map_handle),
image_assets.get(specular_map_handle),
)
{
render_view_light_probes.view_light_probe_info = EnvironmentMapViewLightProbeInfo {
cubemap_index: render_view_light_probes.get_or_insert_cubemap(&EnvironmentMapIds {
diffuse: diffuse_map_handle.id(),
specular: specular_map_handle.id(),
}) as i32,
smallest_specular_mip_level: specular_map.mip_level_count - 1,
intensity: *intensity,
affects_lightmapped_mesh_diffuse: *affects_lightmapped_mesh_diffuse,
};
};
render_view_light_probes
}
}
impl Default for EnvironmentMapViewLightProbeInfo {
fn default() -> Self {
Self {
cubemap_index: -1,
smallest_specular_mip_level: 0,
intensity: 1.0,
affects_lightmapped_mesh_diffuse: true,
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/light_probe/mod.rs | crates/bevy_pbr/src/light_probe/mod.rs | //! Light probes for baked global illumination.
use bevy_app::{App, Plugin};
use bevy_asset::AssetId;
use bevy_camera::{
primitives::{Aabb, Frustum},
Camera3d,
};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
component::Component,
entity::Entity,
query::With,
resource::Resource,
schedule::IntoScheduleConfigs,
system::{Commands, Local, Query, Res, ResMut},
};
use bevy_image::Image;
use bevy_light::{EnvironmentMapLight, IrradianceVolume, LightProbe};
use bevy_math::{Affine3A, FloatOrd, Mat4, Vec3A, Vec4};
use bevy_platform::collections::HashMap;
use bevy_render::{
extract_instances::ExtractInstancesPlugin,
render_asset::RenderAssets,
render_resource::{DynamicUniformBuffer, Sampler, ShaderType, TextureView},
renderer::{RenderAdapter, RenderAdapterInfo, RenderDevice, RenderQueue, WgpuWrapper},
settings::WgpuFeatures,
sync_world::RenderEntity,
texture::{FallbackImage, GpuImage},
view::ExtractedView,
Extract, ExtractSchedule, Render, RenderApp, RenderSystems,
};
use bevy_shader::load_shader_library;
use bevy_transform::{components::Transform, prelude::GlobalTransform};
use tracing::error;
use core::{hash::Hash, ops::Deref};
use crate::{
generate::EnvironmentMapGenerationPlugin, light_probe::environment_map::EnvironmentMapIds,
};
pub mod environment_map;
pub mod generate;
pub mod irradiance_volume;
/// The maximum number of each type of light probe that each view will consider.
///
/// Because the fragment shader does a linear search through the list for each
/// fragment, this number needs to be relatively small.
pub const MAX_VIEW_LIGHT_PROBES: usize = 8;
/// How many texture bindings are used in the fragment shader, *not* counting
/// environment maps or irradiance volumes.
const STANDARD_MATERIAL_FRAGMENT_SHADER_MIN_TEXTURE_BINDINGS: usize = 16;
/// Adds support for light probes: cuboid bounding regions that apply global
/// illumination to objects within them.
///
/// This also adds support for view environment maps: diffuse and specular
/// cubemaps applied to all objects that a view renders.
pub struct LightProbePlugin;
/// A GPU type that stores information about a light probe.
#[derive(Clone, Copy, ShaderType, Default)]
struct RenderLightProbe {
/// The transform from the world space to the model space. This is used to
/// efficiently check for bounding box intersection.
light_from_world_transposed: [Vec4; 3],
/// The index of the texture or textures in the appropriate binding array or
/// arrays.
///
/// For example, for reflection probes this is the index of the cubemap in
/// the diffuse and specular texture arrays.
texture_index: i32,
/// Scale factor applied to the light generated by this light probe.
///
/// See the comment in [`EnvironmentMapLight`] for details.
intensity: f32,
/// Whether this light probe adds to the diffuse contribution of the
/// irradiance for meshes with lightmaps.
affects_lightmapped_mesh_diffuse: u32,
}
/// A per-view shader uniform that specifies all the light probes that the view
/// takes into account.
#[derive(ShaderType)]
pub struct LightProbesUniform {
/// The list of applicable reflection probes, sorted from nearest to the
/// camera to the farthest away from the camera.
reflection_probes: [RenderLightProbe; MAX_VIEW_LIGHT_PROBES],
/// The list of applicable irradiance volumes, sorted from nearest to the
/// camera to the farthest away from the camera.
irradiance_volumes: [RenderLightProbe; MAX_VIEW_LIGHT_PROBES],
/// The number of reflection probes in the list.
reflection_probe_count: i32,
/// The number of irradiance volumes in the list.
irradiance_volume_count: i32,
/// The index of the diffuse and specular environment maps associated with
/// the view itself. This is used as a fallback if no reflection probe in
/// the list contains the fragment.
view_cubemap_index: i32,
/// The smallest valid mipmap level for the specular environment cubemap
/// associated with the view.
smallest_specular_mip_level_for_view: u32,
/// The intensity of the environment cubemap associated with the view.
///
/// See the comment in [`EnvironmentMapLight`] for details.
intensity_for_view: f32,
/// Whether the environment map attached to the view affects the diffuse
/// lighting for lightmapped meshes.
///
/// This will be 1 if the map does affect lightmapped meshes or 0 otherwise.
view_environment_map_affects_lightmapped_mesh_diffuse: u32,
}
/// A GPU buffer that stores information about all light probes.
#[derive(Resource, Default, Deref, DerefMut)]
pub struct LightProbesBuffer(DynamicUniformBuffer<LightProbesUniform>);
/// A component attached to each camera in the render world that stores the
/// index of the [`LightProbesUniform`] in the [`LightProbesBuffer`].
#[derive(Component, Default, Deref, DerefMut)]
pub struct ViewLightProbesUniformOffset(u32);
/// Information that [`gather_light_probes`] keeps about each light probe.
///
/// This information is parameterized by the [`LightProbeComponent`] type. This
/// will either be [`EnvironmentMapLight`] for reflection probes or
/// [`IrradianceVolume`] for irradiance volumes.
struct LightProbeInfo<C>
where
C: LightProbeComponent,
{
// The transform from world space to light probe space.
// Stored as the transpose of the inverse transform to compress the structure
// on the GPU (from 4 `Vec4`s to 3 `Vec4`s). The shader will transpose it
// to recover the original inverse transform.
light_from_world: [Vec4; 3],
// The transform from light probe space to world space.
world_from_light: Affine3A,
// Scale factor applied to the diffuse and specular light generated by this
// reflection probe.
//
// See the comment in [`EnvironmentMapLight`] for details.
intensity: f32,
// Whether this light probe adds to the diffuse contribution of the
// irradiance for meshes with lightmaps.
affects_lightmapped_mesh_diffuse: bool,
// The IDs of all assets associated with this light probe.
//
// Because each type of light probe component may reference different types
// of assets (e.g. a reflection probe references two cubemap assets while an
// irradiance volume references a single 3D texture asset), this is generic.
asset_id: C::AssetId,
}
/// A component, part of the render world, that stores the mapping from asset ID
/// or IDs to the texture index in the appropriate binding arrays.
///
/// Cubemap textures belonging to environment maps are collected into binding
/// arrays, and the index of each texture is presented to the shader for runtime
/// lookup. 3D textures belonging to reflection probes are likewise collected
/// into binding arrays, and the shader accesses the 3D texture by index.
///
/// This component is attached to each view in the render world, because each
/// view may have a different set of light probes that it considers and therefore
/// the texture indices are per-view.
#[derive(Component, Default)]
pub struct RenderViewLightProbes<C>
where
C: LightProbeComponent,
{
/// The list of environment maps presented to the shader, in order.
binding_index_to_textures: Vec<C::AssetId>,
/// The reverse of `binding_index_to_cubemap`: a map from the texture ID to
/// the index in `binding_index_to_cubemap`.
cubemap_to_binding_index: HashMap<C::AssetId, u32>,
/// Information about each light probe, ready for upload to the GPU, sorted
/// in order from closest to the camera to farthest.
///
/// Note that this is not necessarily ordered by binding index. So don't
/// write code like
/// `render_light_probes[cubemap_to_binding_index[asset_id]]`; instead
/// search for the light probe with the appropriate binding index in this
/// array.
render_light_probes: Vec<RenderLightProbe>,
/// Information needed to render the light probe attached directly to the
/// view, if applicable.
///
/// A light probe attached directly to a view represents a "global" light
/// probe that affects all objects not in the bounding region of any light
/// probe. Currently, the only light probe type that supports this is the
/// [`EnvironmentMapLight`].
view_light_probe_info: C::ViewLightProbeInfo,
}
/// A trait implemented by all components that represent light probes.
///
/// Currently, the two light probe types are [`EnvironmentMapLight`] and
/// [`IrradianceVolume`], for reflection probes and irradiance volumes
/// respectively.
///
/// Most light probe systems are written to be generic over the type of light
/// probe. This allows much of the code to be shared and enables easy addition
/// of more light probe types (e.g. real-time reflection planes) in the future.
pub trait LightProbeComponent: Send + Sync + Component + Sized {
/// Holds [`AssetId`]s of the texture or textures that this light probe
/// references.
///
/// This can just be [`AssetId`] if the light probe only references one
/// texture. If it references multiple textures, it will be a structure
/// containing those asset IDs.
type AssetId: Send + Sync + Clone + Eq + Hash;
/// If the light probe can be attached to the view itself (as opposed to a
/// cuboid region within the scene), this contains the information that will
/// be passed to the GPU in order to render it. Otherwise, this will be
/// `()`.
///
/// Currently, only reflection probes (i.e. [`EnvironmentMapLight`]) can be
/// attached directly to views.
type ViewLightProbeInfo: Send + Sync + Default;
/// Returns the asset ID or asset IDs of the texture or textures referenced
/// by this light probe.
fn id(&self, image_assets: &RenderAssets<GpuImage>) -> Option<Self::AssetId>;
/// Returns the intensity of this light probe.
///
/// This is a scaling factor that will be multiplied by the value or values
/// sampled from the texture.
fn intensity(&self) -> f32;
/// Returns true if this light probe contributes diffuse lighting to meshes
/// with lightmaps or false otherwise.
fn affects_lightmapped_mesh_diffuse(&self) -> bool;
/// Creates an instance of [`RenderViewLightProbes`] containing all the
/// information needed to render this light probe.
///
/// This is called for every light probe in view every frame.
fn create_render_view_light_probes(
view_component: Option<&Self>,
image_assets: &RenderAssets<GpuImage>,
) -> RenderViewLightProbes<Self>;
}
/// The uniform struct extracted from [`EnvironmentMapLight`].
/// Will be available for use in the Environment Map shader.
#[derive(Component, ShaderType, Clone)]
pub struct EnvironmentMapUniform {
/// The world space transformation matrix of the sample ray for environment cubemaps.
transform: Mat4,
}
impl Default for EnvironmentMapUniform {
fn default() -> Self {
EnvironmentMapUniform {
transform: Mat4::IDENTITY,
}
}
}
/// A GPU buffer that stores the environment map settings for each view.
#[derive(Resource, Default, Deref, DerefMut)]
pub struct EnvironmentMapUniformBuffer(pub DynamicUniformBuffer<EnvironmentMapUniform>);
/// A component that stores the offset within the
/// [`EnvironmentMapUniformBuffer`] for each view.
#[derive(Component, Default, Deref, DerefMut)]
pub struct ViewEnvironmentMapUniformOffset(u32);
impl Plugin for LightProbePlugin {
fn build(&self, app: &mut App) {
load_shader_library!(app, "light_probe.wgsl");
load_shader_library!(app, "environment_map.wgsl");
load_shader_library!(app, "irradiance_volume.wgsl");
app.add_plugins((
EnvironmentMapGenerationPlugin,
ExtractInstancesPlugin::<EnvironmentMapIds>::new(),
));
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.init_resource::<LightProbesBuffer>()
.init_resource::<EnvironmentMapUniformBuffer>()
.add_systems(ExtractSchedule, gather_environment_map_uniform)
.add_systems(ExtractSchedule, gather_light_probes::<EnvironmentMapLight>)
.add_systems(ExtractSchedule, gather_light_probes::<IrradianceVolume>)
.add_systems(
Render,
(upload_light_probes, prepare_environment_uniform_buffer)
.in_set(RenderSystems::PrepareResources),
);
}
}
/// Extracts [`EnvironmentMapLight`] from views and creates [`EnvironmentMapUniform`] for them.
///
/// Compared to the `ExtractComponentPlugin`, this implementation will create a default instance
/// if one does not already exist.
fn gather_environment_map_uniform(
view_query: Extract<Query<(RenderEntity, Option<&EnvironmentMapLight>), With<Camera3d>>>,
mut commands: Commands,
) {
for (view_entity, environment_map_light) in view_query.iter() {
let environment_map_uniform = if let Some(environment_map_light) = environment_map_light {
EnvironmentMapUniform {
transform: Transform::from_rotation(environment_map_light.rotation)
.to_matrix()
.inverse(),
}
} else {
EnvironmentMapUniform::default()
};
commands
.get_entity(view_entity)
.expect("Environment map light entity wasn't synced.")
.insert(environment_map_uniform);
}
}
/// Gathers up all light probes of a single type in the scene and assigns them
/// to views, performing frustum culling and distance sorting in the process.
fn gather_light_probes<C>(
image_assets: Res<RenderAssets<GpuImage>>,
light_probe_query: Extract<Query<(&GlobalTransform, &C), With<LightProbe>>>,
view_query: Extract<
Query<(RenderEntity, &GlobalTransform, &Frustum, Option<&C>), With<Camera3d>>,
>,
mut reflection_probes: Local<Vec<LightProbeInfo<C>>>,
mut view_reflection_probes: Local<Vec<LightProbeInfo<C>>>,
mut commands: Commands,
) where
C: LightProbeComponent,
{
// Create [`LightProbeInfo`] for every light probe in the scene.
reflection_probes.clear();
reflection_probes.extend(
light_probe_query
.iter()
.filter_map(|query_row| LightProbeInfo::new(query_row, &image_assets)),
);
// Build up the light probes uniform and the key table.
for (view_entity, view_transform, view_frustum, view_component) in view_query.iter() {
// Cull light probes outside the view frustum.
view_reflection_probes.clear();
view_reflection_probes.extend(
reflection_probes
.iter()
.filter(|light_probe_info| light_probe_info.frustum_cull(view_frustum))
.cloned(),
);
// Sort by distance to camera.
view_reflection_probes.sort_by_cached_key(|light_probe_info| {
light_probe_info.camera_distance_sort_key(view_transform)
});
// Create the light probes list.
let mut render_view_light_probes =
C::create_render_view_light_probes(view_component, &image_assets);
// Gather up the light probes in the list.
render_view_light_probes.maybe_gather_light_probes(&view_reflection_probes);
// Record the per-view light probes.
if render_view_light_probes.is_empty() {
commands
.get_entity(view_entity)
.expect("View entity wasn't synced.")
.remove::<RenderViewLightProbes<C>>();
} else {
commands
.get_entity(view_entity)
.expect("View entity wasn't synced.")
.insert(render_view_light_probes);
}
}
}
/// Gathers up environment map settings for each applicable view and
/// writes them into a GPU buffer.
pub fn prepare_environment_uniform_buffer(
mut commands: Commands,
views: Query<(Entity, Option<&EnvironmentMapUniform>), With<ExtractedView>>,
mut environment_uniform_buffer: ResMut<EnvironmentMapUniformBuffer>,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
) {
let Some(mut writer) =
environment_uniform_buffer.get_writer(views.iter().len(), &render_device, &render_queue)
else {
return;
};
for (view, environment_uniform) in views.iter() {
let uniform_offset = match environment_uniform {
None => 0,
Some(environment_uniform) => writer.write(environment_uniform),
};
commands
.entity(view)
.insert(ViewEnvironmentMapUniformOffset(uniform_offset));
}
}
// A system that runs after [`gather_light_probes`] and populates the GPU
// uniforms with the results.
//
// Note that, unlike [`gather_light_probes`], this system is not generic over
// the type of light probe. It collects light probes of all types together into
// a single structure, ready to be passed to the shader.
fn upload_light_probes(
mut commands: Commands,
views: Query<Entity, With<ExtractedView>>,
mut light_probes_buffer: ResMut<LightProbesBuffer>,
mut view_light_probes_query: Query<(
Option<&RenderViewLightProbes<EnvironmentMapLight>>,
Option<&RenderViewLightProbes<IrradianceVolume>>,
)>,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
) {
// If there are no views, bail.
if views.is_empty() {
return;
}
// Initialize the uniform buffer writer.
let mut writer = light_probes_buffer
.get_writer(views.iter().len(), &render_device, &render_queue)
.unwrap();
// Process each view.
for view_entity in views.iter() {
let Ok((render_view_environment_maps, render_view_irradiance_volumes)) =
view_light_probes_query.get_mut(view_entity)
else {
error!("Failed to find `RenderViewLightProbes` for the view!");
continue;
};
// Initialize the uniform with only the view environment map, if there
// is one.
let mut light_probes_uniform = LightProbesUniform {
reflection_probes: [RenderLightProbe::default(); MAX_VIEW_LIGHT_PROBES],
irradiance_volumes: [RenderLightProbe::default(); MAX_VIEW_LIGHT_PROBES],
reflection_probe_count: render_view_environment_maps
.map(RenderViewLightProbes::len)
.unwrap_or_default()
.min(MAX_VIEW_LIGHT_PROBES) as i32,
irradiance_volume_count: render_view_irradiance_volumes
.map(RenderViewLightProbes::len)
.unwrap_or_default()
.min(MAX_VIEW_LIGHT_PROBES) as i32,
view_cubemap_index: render_view_environment_maps
.map(|maps| maps.view_light_probe_info.cubemap_index)
.unwrap_or(-1),
smallest_specular_mip_level_for_view: render_view_environment_maps
.map(|maps| maps.view_light_probe_info.smallest_specular_mip_level)
.unwrap_or(0),
intensity_for_view: render_view_environment_maps
.map(|maps| maps.view_light_probe_info.intensity)
.unwrap_or(1.0),
view_environment_map_affects_lightmapped_mesh_diffuse: render_view_environment_maps
.map(|maps| maps.view_light_probe_info.affects_lightmapped_mesh_diffuse as u32)
.unwrap_or(1),
};
// Add any environment maps that [`gather_light_probes`] found to the
// uniform.
if let Some(render_view_environment_maps) = render_view_environment_maps {
render_view_environment_maps.add_to_uniform(
&mut light_probes_uniform.reflection_probes,
&mut light_probes_uniform.reflection_probe_count,
);
}
// Add any irradiance volumes that [`gather_light_probes`] found to the
// uniform.
if let Some(render_view_irradiance_volumes) = render_view_irradiance_volumes {
render_view_irradiance_volumes.add_to_uniform(
&mut light_probes_uniform.irradiance_volumes,
&mut light_probes_uniform.irradiance_volume_count,
);
}
// Queue the view's uniforms to be written to the GPU.
let uniform_offset = writer.write(&light_probes_uniform);
commands
.entity(view_entity)
.insert(ViewLightProbesUniformOffset(uniform_offset));
}
}
impl Default for LightProbesUniform {
fn default() -> Self {
Self {
reflection_probes: [RenderLightProbe::default(); MAX_VIEW_LIGHT_PROBES],
irradiance_volumes: [RenderLightProbe::default(); MAX_VIEW_LIGHT_PROBES],
reflection_probe_count: 0,
irradiance_volume_count: 0,
view_cubemap_index: -1,
smallest_specular_mip_level_for_view: 0,
intensity_for_view: 1.0,
view_environment_map_affects_lightmapped_mesh_diffuse: 1,
}
}
}
impl<C> LightProbeInfo<C>
where
C: LightProbeComponent,
{
/// Given the set of light probe components, constructs and returns
/// [`LightProbeInfo`]. This is done for every light probe in the scene
/// every frame.
fn new(
(light_probe_transform, environment_map): (&GlobalTransform, &C),
image_assets: &RenderAssets<GpuImage>,
) -> Option<LightProbeInfo<C>> {
let light_from_world_transposed =
Mat4::from(light_probe_transform.affine().inverse()).transpose();
environment_map.id(image_assets).map(|id| LightProbeInfo {
world_from_light: light_probe_transform.affine(),
light_from_world: [
light_from_world_transposed.x_axis,
light_from_world_transposed.y_axis,
light_from_world_transposed.z_axis,
],
asset_id: id,
intensity: environment_map.intensity(),
affects_lightmapped_mesh_diffuse: environment_map.affects_lightmapped_mesh_diffuse(),
})
}
/// Returns true if this light probe is in the viewing frustum of the camera
/// or false if it isn't.
fn frustum_cull(&self, view_frustum: &Frustum) -> bool {
view_frustum.intersects_obb(
&Aabb {
center: Vec3A::default(),
half_extents: Vec3A::splat(0.5),
},
&self.world_from_light,
true,
false,
)
}
/// Returns the squared distance from this light probe to the camera,
/// suitable for distance sorting.
fn camera_distance_sort_key(&self, view_transform: &GlobalTransform) -> FloatOrd {
FloatOrd(
(self.world_from_light.translation - view_transform.translation_vec3a())
.length_squared(),
)
}
}
impl<C> RenderViewLightProbes<C>
where
C: LightProbeComponent,
{
/// Creates a new empty list of light probes.
fn new() -> RenderViewLightProbes<C> {
RenderViewLightProbes {
binding_index_to_textures: vec![],
cubemap_to_binding_index: HashMap::default(),
render_light_probes: vec![],
view_light_probe_info: C::ViewLightProbeInfo::default(),
}
}
/// Returns true if there are no light probes in the list.
pub(crate) fn is_empty(&self) -> bool {
self.binding_index_to_textures.is_empty()
}
/// Returns the number of light probes in the list.
pub(crate) fn len(&self) -> usize {
self.binding_index_to_textures.len()
}
/// Adds a cubemap to the list of bindings, if it wasn't there already, and
/// returns its index within that list.
pub(crate) fn get_or_insert_cubemap(&mut self, cubemap_id: &C::AssetId) -> u32 {
*self
.cubemap_to_binding_index
.entry((*cubemap_id).clone())
.or_insert_with(|| {
let index = self.binding_index_to_textures.len() as u32;
self.binding_index_to_textures.push((*cubemap_id).clone());
index
})
}
/// Adds all the light probes in this structure to the supplied array, which
/// is expected to be shipped to the GPU.
fn add_to_uniform(
&self,
render_light_probes: &mut [RenderLightProbe; MAX_VIEW_LIGHT_PROBES],
render_light_probe_count: &mut i32,
) {
render_light_probes[0..self.render_light_probes.len()]
.copy_from_slice(&self.render_light_probes[..]);
*render_light_probe_count = self.render_light_probes.len() as i32;
}
/// Gathers up all light probes of the given type in the scene and records
/// them in this structure.
fn maybe_gather_light_probes(&mut self, light_probes: &[LightProbeInfo<C>]) {
for light_probe in light_probes.iter().take(MAX_VIEW_LIGHT_PROBES) {
// Determine the index of the cubemap in the binding array.
let cubemap_index = self.get_or_insert_cubemap(&light_probe.asset_id);
// Write in the light probe data.
self.render_light_probes.push(RenderLightProbe {
light_from_world_transposed: light_probe.light_from_world,
texture_index: cubemap_index as i32,
intensity: light_probe.intensity,
affects_lightmapped_mesh_diffuse: light_probe.affects_lightmapped_mesh_diffuse
as u32,
});
}
}
}
impl<C> Clone for LightProbeInfo<C>
where
C: LightProbeComponent,
{
fn clone(&self) -> Self {
Self {
light_from_world: self.light_from_world,
world_from_light: self.world_from_light,
intensity: self.intensity,
affects_lightmapped_mesh_diffuse: self.affects_lightmapped_mesh_diffuse,
asset_id: self.asset_id.clone(),
}
}
}
/// Adds a diffuse or specular texture view to the `texture_views` list, and
/// populates `sampler` if this is the first such view.
pub(crate) fn add_cubemap_texture_view<'a>(
texture_views: &mut Vec<&'a <TextureView as Deref>::Target>,
sampler: &mut Option<&'a Sampler>,
image_id: AssetId<Image>,
images: &'a RenderAssets<GpuImage>,
fallback_image: &'a FallbackImage,
) {
match images.get(image_id) {
None => {
// Use the fallback image if the cubemap isn't loaded yet.
texture_views.push(&*fallback_image.cube.texture_view);
}
Some(image) => {
// If this is the first texture view, populate `sampler`.
if sampler.is_none() {
*sampler = Some(&image.sampler);
}
texture_views.push(&*image.texture_view);
}
}
}
/// Many things can go wrong when attempting to use texture binding arrays
/// (a.k.a. bindless textures). This function checks for these pitfalls:
///
/// 1. If GLSL support is enabled at the feature level, then in debug mode
/// `naga_oil` will attempt to compile all shader modules under GLSL to check
/// validity of names, even if GLSL isn't actually used. This will cause a crash
/// if binding arrays are enabled, because binding arrays are currently
/// unimplemented in the GLSL backend of Naga. Therefore, we disable binding
/// arrays if the `shader_format_glsl` feature is present.
///
/// 2. If there aren't enough texture bindings available to accommodate all the
/// binding arrays, the driver will panic. So we also bail out if there aren't
/// enough texture bindings available in the fragment shader.
///
/// 3. If binding arrays aren't supported on the hardware, then we obviously
/// can't use them. Adreno <= 610 claims to support bindless, but seems to be
/// too buggy to be usable.
///
/// 4. If binding arrays are supported on the hardware, but they can only be
/// accessed by uniform indices, that's not good enough, and we bail out.
///
/// If binding arrays aren't usable, we disable reflection probes and limit the
/// number of irradiance volumes in the scene to 1.
pub(crate) fn binding_arrays_are_usable(
render_device: &RenderDevice,
render_adapter: &RenderAdapter,
) -> bool {
let adapter_info = RenderAdapterInfo(WgpuWrapper::new(render_adapter.get_info()));
!cfg!(feature = "shader_format_glsl")
&& bevy_render::get_adreno_model(&adapter_info).is_none_or(|model| model > 610)
&& render_device.limits().max_storage_textures_per_shader_stage
>= (STANDARD_MATERIAL_FRAGMENT_SHADER_MIN_TEXTURE_BINDINGS + MAX_VIEW_LIGHT_PROBES)
as u32
&& render_device.features().contains(
WgpuFeatures::TEXTURE_BINDING_ARRAY
| WgpuFeatures::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
)
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/light_probe/generate.rs | crates/bevy_pbr/src/light_probe/generate.rs | //! Like [`EnvironmentMapLight`], but filtered in realtime from a cubemap.
//!
//! An environment map needs to be processed to be able to support uses beyond a simple skybox,
//! such as reflections, and ambient light contribution.
//! This process is called filtering, and can either be done ahead of time (prefiltering), or
//! in realtime, although at a reduced quality. Prefiltering is preferred, but not always possible:
//! sometimes you only gain access to an environment map at runtime, for whatever reason.
//! Typically this is from realtime reflection probes, but can also be from other sources.
//!
//! In any case, Bevy supports both modes of filtering.
//! This module provides realtime filtering via [`bevy_light::GeneratedEnvironmentMapLight`].
//! For prefiltered environment maps, see [`bevy_light::EnvironmentMapLight`].
//! These components are intended to be added to a camera.
use bevy_app::{App, Plugin, Update};
use bevy_asset::{embedded_asset, load_embedded_asset, AssetServer, Assets, RenderAssetUsages};
use bevy_core_pipeline::{
core_3d::graph::{Core3d, Node3d},
mip_generation::{self, DownsampleShaders, DownsamplingConstants},
};
use bevy_ecs::{
component::Component,
entity::Entity,
query::{QueryState, With, Without},
resource::Resource,
schedule::IntoScheduleConfigs,
system::{lifetimeless::Read, Commands, Query, Res, ResMut},
world::{FromWorld, World},
};
use bevy_image::Image;
use bevy_math::{Quat, UVec2, Vec2};
use bevy_render::{
diagnostic::RecordDiagnostics,
render_asset::RenderAssets,
render_graph::{Node, NodeRunError, RenderGraphContext, RenderGraphExt, RenderLabel},
render_resource::{
binding_types::*, AddressMode, BindGroup, BindGroupEntries, BindGroupLayoutDescriptor,
BindGroupLayoutEntries, CachedComputePipelineId, ComputePassDescriptor,
ComputePipelineDescriptor, DownlevelFlags, Extent3d, FilterMode, PipelineCache, Sampler,
SamplerBindingType, SamplerDescriptor, ShaderStages, ShaderType, StorageTextureAccess,
Texture, TextureAspect, TextureDescriptor, TextureDimension, TextureFormat,
TextureSampleType, TextureUsages, TextureView, TextureViewDescriptor, TextureViewDimension,
UniformBuffer,
},
renderer::{RenderAdapter, RenderContext, RenderDevice, RenderQueue},
settings::WgpuFeatures,
sync_component::SyncComponentPlugin,
sync_world::RenderEntity,
texture::{CachedTexture, GpuImage, TextureCache},
Extract, ExtractSchedule, Render, RenderApp, RenderStartup, RenderSystems,
};
// Implementation: generate diffuse and specular cubemaps required by PBR
// from a given high-res cubemap by
//
// 1. Copying the base mip (level 0) of the source cubemap into an intermediate
// storage texture.
// 2. Generating mipmaps using [single-pass down-sampling] (SPD).
// 3. Convolving the mip chain twice:
// * a [Lambertian convolution] for the 32 × 32 diffuse cubemap
// * a [GGX convolution], once per mip level, for the specular cubemap.
//
// [single-pass down-sampling]: https://gpuopen.com/fidelityfx-spd/
// [Lambertian convolution]: https://bruop.github.io/ibl/#:~:text=Lambertian%20Diffuse%20Component
// [GGX convolution]: https://gpuopen.com/download/Bounded_VNDF_Sampling_for_Smith-GGX_Reflections.pdf
use bevy_light::{EnvironmentMapLight, GeneratedEnvironmentMapLight};
use bevy_shader::ShaderDefVal;
use core::cmp::min;
use tracing::info;
use crate::Bluenoise;
/// Labels for the environment map generation nodes
#[derive(PartialEq, Eq, Debug, Copy, Clone, Hash, RenderLabel)]
pub enum GeneratorNode {
Downsampling,
Filtering,
}
/// Stores the bind group layouts for the environment map generation pipelines
#[derive(Resource)]
pub struct GeneratorBindGroupLayouts {
pub downsampling_first: BindGroupLayoutDescriptor,
pub downsampling_second: BindGroupLayoutDescriptor,
pub radiance: BindGroupLayoutDescriptor,
pub irradiance: BindGroupLayoutDescriptor,
pub copy: BindGroupLayoutDescriptor,
}
/// Samplers for the environment map generation pipelines
#[derive(Resource)]
pub struct GeneratorSamplers {
pub linear: Sampler,
}
/// Pipelines for the environment map generation pipelines
#[derive(Resource)]
pub struct GeneratorPipelines {
pub downsample_first: CachedComputePipelineId,
pub downsample_second: CachedComputePipelineId,
pub copy: CachedComputePipelineId,
pub radiance: CachedComputePipelineId,
pub irradiance: CachedComputePipelineId,
}
/// Configuration for downsampling strategy based on device limits
#[derive(Resource, Clone, Copy, Debug, PartialEq, Eq)]
pub struct DownsamplingConfig {
// can bind ≥12 storage textures and use read-write storage textures
pub combine_bind_group: bool,
}
pub struct EnvironmentMapGenerationPlugin;
impl Plugin for EnvironmentMapGenerationPlugin {
fn build(&self, _: &mut App) {}
fn finish(&self, app: &mut App) {
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
let adapter = render_app.world().resource::<RenderAdapter>();
let device = render_app.world().resource::<RenderDevice>();
// Cubemap SPD requires at least 6 storage textures
let limit_support = device.limits().max_storage_textures_per_shader_stage >= 6
&& device.limits().max_compute_workgroup_storage_size != 0
&& device.limits().max_compute_workgroup_size_x != 0;
let downlevel_support = adapter
.get_downlevel_capabilities()
.flags
.contains(DownlevelFlags::COMPUTE_SHADERS);
if !limit_support || !downlevel_support {
info!("Disabling EnvironmentMapGenerationPlugin because compute is not supported on this platform. This is safe to ignore if you are not using EnvironmentMapGenerationPlugin.");
return;
}
} else {
return;
}
embedded_asset!(app, "environment_filter.wgsl");
embedded_asset!(app, "copy.wgsl");
app.add_plugins(SyncComponentPlugin::<GeneratedEnvironmentMapLight>::default())
.add_systems(Update, generate_environment_map_light);
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.add_render_graph_node::<DownsamplingNode>(Core3d, GeneratorNode::Downsampling)
.add_render_graph_node::<FilteringNode>(Core3d, GeneratorNode::Filtering)
.add_render_graph_edges(
Core3d,
(
Node3d::EndPrepasses,
GeneratorNode::Downsampling,
GeneratorNode::Filtering,
Node3d::StartMainPass,
),
)
.add_systems(
ExtractSchedule,
extract_generated_environment_map_entities.after(generate_environment_map_light),
)
.add_systems(
Render,
prepare_generated_environment_map_bind_groups
.in_set(RenderSystems::PrepareBindGroups),
)
.add_systems(
Render,
prepare_generated_environment_map_intermediate_textures
.in_set(RenderSystems::PrepareResources),
)
.add_systems(
RenderStartup,
initialize_generated_environment_map_resources,
);
}
}
/// Initializes all render-world resources used by the environment-map generator once on
/// [`bevy_render::RenderStartup`].
pub fn initialize_generated_environment_map_resources(
mut commands: Commands,
render_device: Res<RenderDevice>,
render_adapter: Res<RenderAdapter>,
pipeline_cache: Res<PipelineCache>,
asset_server: Res<AssetServer>,
downsample_shaders: Res<DownsampleShaders>,
) {
// Combine the bind group and use read-write storage if it is supported
let combine_bind_group =
mip_generation::can_combine_downsampling_bind_groups(&render_adapter, &render_device);
// Output mips are write-only
let mips =
texture_storage_2d_array(TextureFormat::Rgba16Float, StorageTextureAccess::WriteOnly);
// Bind group layouts
let (downsampling_first, downsampling_second) = if combine_bind_group {
// One big bind group layout containing all outputs 1–12
let downsampling = BindGroupLayoutDescriptor::new(
"downsampling_bind_group_layout_combined",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
sampler(SamplerBindingType::Filtering),
uniform_buffer::<DownsamplingConstants>(false),
texture_2d_array(TextureSampleType::Float { filterable: true }),
mips, // 1
mips, // 2
mips, // 3
mips, // 4
mips, // 5
texture_storage_2d_array(
TextureFormat::Rgba16Float,
StorageTextureAccess::ReadWrite,
), // 6
mips, // 7
mips, // 8
mips, // 9
mips, // 10
mips, // 11
mips, // 12
),
),
);
(downsampling.clone(), downsampling)
} else {
// Split layout: first pass outputs 1–6, second pass outputs 7–12 (input mip6 read-only)
let downsampling_first = BindGroupLayoutDescriptor::new(
"downsampling_first_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
sampler(SamplerBindingType::Filtering),
uniform_buffer::<DownsamplingConstants>(false),
// Input mip 0
texture_2d_array(TextureSampleType::Float { filterable: true }),
mips, // 1
mips, // 2
mips, // 3
mips, // 4
mips, // 5
mips, // 6
),
),
);
let downsampling_second = BindGroupLayoutDescriptor::new(
"downsampling_second_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
sampler(SamplerBindingType::Filtering),
uniform_buffer::<DownsamplingConstants>(false),
// Input mip 6
texture_2d_array(TextureSampleType::Float { filterable: true }),
mips, // 7
mips, // 8
mips, // 9
mips, // 10
mips, // 11
mips, // 12
),
),
);
(downsampling_first, downsampling_second)
};
let radiance = BindGroupLayoutDescriptor::new(
"radiance_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
// Source environment cubemap
texture_2d_array(TextureSampleType::Float { filterable: true }),
sampler(SamplerBindingType::Filtering), // Source sampler
// Output specular map
texture_storage_2d_array(
TextureFormat::Rgba16Float,
StorageTextureAccess::WriteOnly,
),
uniform_buffer::<FilteringConstants>(false), // Uniforms
texture_2d_array(TextureSampleType::Float { filterable: true }), // Blue noise texture
),
),
);
let irradiance = BindGroupLayoutDescriptor::new(
"irradiance_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
// Source environment cubemap
texture_2d_array(TextureSampleType::Float { filterable: true }),
sampler(SamplerBindingType::Filtering), // Source sampler
// Output irradiance map
texture_storage_2d_array(
TextureFormat::Rgba16Float,
StorageTextureAccess::WriteOnly,
),
uniform_buffer::<FilteringConstants>(false), // Uniforms
texture_2d_array(TextureSampleType::Float { filterable: true }), // Blue noise texture
),
),
);
let copy = BindGroupLayoutDescriptor::new(
"copy_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
// Source cubemap
texture_2d_array(TextureSampleType::Float { filterable: true }),
// Destination mip0
texture_storage_2d_array(
TextureFormat::Rgba16Float,
StorageTextureAccess::WriteOnly,
),
),
),
);
let layouts = GeneratorBindGroupLayouts {
downsampling_first,
downsampling_second,
radiance,
irradiance,
copy,
};
// Samplers
let linear = render_device.create_sampler(&SamplerDescriptor {
label: Some("generator_linear_sampler"),
address_mode_u: AddressMode::ClampToEdge,
address_mode_v: AddressMode::ClampToEdge,
address_mode_w: AddressMode::ClampToEdge,
mag_filter: FilterMode::Linear,
min_filter: FilterMode::Linear,
mipmap_filter: FilterMode::Linear,
..Default::default()
});
let samplers = GeneratorSamplers { linear };
// Pipelines
let features = render_device.features();
let mut shader_defs = vec![];
if features.contains(WgpuFeatures::SUBGROUP) {
shader_defs.push(ShaderDefVal::Int("SUBGROUP_SUPPORT".into(), 1));
}
if combine_bind_group {
shader_defs.push(ShaderDefVal::Int("COMBINE_BIND_GROUP".into(), 1));
}
shader_defs.push(ShaderDefVal::Bool("ARRAY_TEXTURE".into(), true));
#[cfg(feature = "bluenoise_texture")]
{
shader_defs.push(ShaderDefVal::Int("HAS_BLUE_NOISE".into(), 1));
}
let env_filter_shader = load_embedded_asset!(asset_server.as_ref(), "environment_filter.wgsl");
let copy_shader = load_embedded_asset!(asset_server.as_ref(), "copy.wgsl");
let downsampling_shader = downsample_shaders
.general
.get(&TextureFormat::Rgba16Float)
.expect("Mip generation shader should exist in the general downsampling shader table");
// First pass for base mip Levels (0-5)
let downsample_first = pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("downsampling_first_pipeline".into()),
layout: vec![layouts.downsampling_first.clone()],
push_constant_ranges: vec![],
shader: downsampling_shader.clone(),
shader_defs: {
let mut defs = shader_defs.clone();
if !combine_bind_group {
defs.push(ShaderDefVal::Int("FIRST_PASS".into(), 1));
}
defs
},
entry_point: Some("downsample_first".into()),
zero_initialize_workgroup_memory: false,
});
let downsample_second = pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("downsampling_second_pipeline".into()),
layout: vec![layouts.downsampling_second.clone()],
push_constant_ranges: vec![],
shader: downsampling_shader.clone(),
shader_defs: {
let mut defs = shader_defs.clone();
if !combine_bind_group {
defs.push(ShaderDefVal::Int("SECOND_PASS".into(), 1));
}
defs
},
entry_point: Some("downsample_second".into()),
zero_initialize_workgroup_memory: false,
});
// Radiance map for specular environment maps
let radiance = pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("radiance_pipeline".into()),
layout: vec![layouts.radiance.clone()],
push_constant_ranges: vec![],
shader: env_filter_shader.clone(),
shader_defs: shader_defs.clone(),
entry_point: Some("generate_radiance_map".into()),
zero_initialize_workgroup_memory: false,
});
// Irradiance map for diffuse environment maps
let irradiance = pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("irradiance_pipeline".into()),
layout: vec![layouts.irradiance.clone()],
push_constant_ranges: vec![],
shader: env_filter_shader,
shader_defs: shader_defs.clone(),
entry_point: Some("generate_irradiance_map".into()),
zero_initialize_workgroup_memory: false,
});
// Copy pipeline handles format conversion and populates mip0 when formats differ
let copy_pipeline = pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("copy_pipeline".into()),
layout: vec![layouts.copy.clone()],
push_constant_ranges: vec![],
shader: copy_shader,
shader_defs: vec![],
entry_point: Some("copy".into()),
zero_initialize_workgroup_memory: false,
});
let pipelines = GeneratorPipelines {
downsample_first,
downsample_second,
radiance,
irradiance,
copy: copy_pipeline,
};
// Insert all resources into the render world
commands.insert_resource(layouts);
commands.insert_resource(samplers);
commands.insert_resource(pipelines);
commands.insert_resource(DownsamplingConfig { combine_bind_group });
}
pub fn extract_generated_environment_map_entities(
query: Extract<
Query<(
RenderEntity,
&GeneratedEnvironmentMapLight,
&EnvironmentMapLight,
)>,
>,
mut commands: Commands,
render_images: Res<RenderAssets<GpuImage>>,
) {
for (entity, filtered_env_map, env_map_light) in query.iter() {
let Some(env_map) = render_images.get(&filtered_env_map.environment_map) else {
continue;
};
let diffuse_map = render_images.get(&env_map_light.diffuse_map);
let specular_map = render_images.get(&env_map_light.specular_map);
// continue if the diffuse map is not found
if diffuse_map.is_none() || specular_map.is_none() {
continue;
}
let diffuse_map = diffuse_map.unwrap();
let specular_map = specular_map.unwrap();
let render_filtered_env_map = RenderEnvironmentMap {
environment_map: env_map.clone(),
diffuse_map: diffuse_map.clone(),
specular_map: specular_map.clone(),
intensity: filtered_env_map.intensity,
rotation: filtered_env_map.rotation,
affects_lightmapped_mesh_diffuse: filtered_env_map.affects_lightmapped_mesh_diffuse,
};
commands
.get_entity(entity)
.expect("Entity not synced to render world")
.insert(render_filtered_env_map);
}
}
// A render-world specific version of FilteredEnvironmentMapLight that uses CachedTexture
#[derive(Component, Clone)]
pub struct RenderEnvironmentMap {
pub environment_map: GpuImage,
pub diffuse_map: GpuImage,
pub specular_map: GpuImage,
pub intensity: f32,
pub rotation: Quat,
pub affects_lightmapped_mesh_diffuse: bool,
}
#[derive(Component)]
pub struct IntermediateTextures {
pub environment_map: CachedTexture,
}
/// Returns the total number of mip levels for the provided square texture size.
/// `size` must be a power of two greater than zero. For example, `size = 512` → `9`.
#[inline]
fn compute_mip_count(size: u32) -> u32 {
debug_assert!(size.is_power_of_two());
32 - size.leading_zeros()
}
/// Prepares textures needed for single pass downsampling
pub fn prepare_generated_environment_map_intermediate_textures(
light_probes: Query<(Entity, &RenderEnvironmentMap)>,
render_device: Res<RenderDevice>,
mut texture_cache: ResMut<TextureCache>,
mut commands: Commands,
) {
for (entity, env_map_light) in &light_probes {
let base_size = env_map_light.environment_map.size.width;
let mip_level_count = compute_mip_count(base_size);
let environment_map = texture_cache.get(
&render_device,
TextureDescriptor {
label: Some("intermediate_environment_map"),
size: Extent3d {
width: base_size,
height: base_size,
depth_or_array_layers: 6, // Cubemap faces
},
mip_level_count,
sample_count: 1,
dimension: TextureDimension::D2,
format: TextureFormat::Rgba16Float,
usage: TextureUsages::TEXTURE_BINDING
| TextureUsages::STORAGE_BINDING
| TextureUsages::COPY_DST,
view_formats: &[],
},
);
commands
.entity(entity)
.insert(IntermediateTextures { environment_map });
}
}
/// Constants for filtering
#[derive(Clone, Copy, ShaderType)]
#[repr(C)]
pub struct FilteringConstants {
mip_level: f32,
sample_count: u32,
roughness: f32,
noise_size_bits: UVec2,
}
/// Stores bind groups for the environment map generation pipelines
#[derive(Component)]
pub struct GeneratorBindGroups {
pub downsampling_first: BindGroup,
pub downsampling_second: BindGroup,
pub radiance: Vec<BindGroup>, // One per mip level
pub irradiance: BindGroup,
pub copy: BindGroup,
}
/// Prepares bind groups for environment map generation pipelines
pub fn prepare_generated_environment_map_bind_groups(
light_probes: Query<
(Entity, &IntermediateTextures, &RenderEnvironmentMap),
With<RenderEnvironmentMap>,
>,
render_device: Res<RenderDevice>,
pipeline_cache: Res<PipelineCache>,
queue: Res<RenderQueue>,
layouts: Res<GeneratorBindGroupLayouts>,
samplers: Res<GeneratorSamplers>,
render_images: Res<RenderAssets<GpuImage>>,
bluenoise: Res<Bluenoise>,
config: Res<DownsamplingConfig>,
mut commands: Commands,
) {
// Skip until the blue-noise texture is available to avoid panicking.
// The system will retry next frame once the asset has loaded.
let Some(stbn_texture) = render_images.get(&bluenoise.texture) else {
return;
};
assert!(stbn_texture.size.width.is_power_of_two());
assert!(stbn_texture.size.height.is_power_of_two());
let noise_size_bits = UVec2::new(
stbn_texture.size.width.trailing_zeros(),
stbn_texture.size.height.trailing_zeros(),
);
for (entity, textures, env_map_light) in &light_probes {
// Determine mip chain based on input size
let base_size = env_map_light.environment_map.size.width;
let mip_count = compute_mip_count(base_size);
let last_mip = mip_count - 1;
let env_map_texture = env_map_light.environment_map.texture.clone();
// Create downsampling constants
let downsampling_constants = DownsamplingConstants {
mips: mip_count - 1, // Number of mips we are generating (excluding mip 0)
inverse_input_size: Vec2::new(1.0 / base_size as f32, 1.0 / base_size as f32),
_padding: 0,
};
let mut downsampling_constants_buffer = UniformBuffer::from(downsampling_constants);
downsampling_constants_buffer.write_buffer(&render_device, &queue);
let input_env_map_first = env_map_texture.clone().create_view(&TextureViewDescriptor {
dimension: Some(TextureViewDimension::D2Array),
..Default::default()
});
// Utility closure to get a unique storage view for a given mip level.
let mip_storage = |level: u32| {
if level <= last_mip {
create_storage_view(&textures.environment_map.texture, level, &render_device)
} else {
// Return a fresh 1×1 placeholder view so each binding has its own sub-resource and cannot alias.
create_placeholder_storage_view(&render_device)
}
};
// Depending on device limits, build either a combined or split bind group layout
let (downsampling_first_bind_group, downsampling_second_bind_group) =
if config.combine_bind_group {
// Combined layout expects destinations 1–12 in both bind groups
let bind_group = render_device.create_bind_group(
"downsampling_bind_group_combined_first",
&pipeline_cache.get_bind_group_layout(&layouts.downsampling_first),
&BindGroupEntries::sequential((
&samplers.linear,
&downsampling_constants_buffer,
&input_env_map_first,
&mip_storage(1),
&mip_storage(2),
&mip_storage(3),
&mip_storage(4),
&mip_storage(5),
&mip_storage(6),
&mip_storage(7),
&mip_storage(8),
&mip_storage(9),
&mip_storage(10),
&mip_storage(11),
&mip_storage(12),
)),
);
(bind_group.clone(), bind_group)
} else {
// Split path requires a separate view for mip6 input
let input_env_map_second =
textures
.environment_map
.texture
.create_view(&TextureViewDescriptor {
dimension: Some(TextureViewDimension::D2Array),
base_mip_level: min(6, last_mip),
mip_level_count: Some(1),
..Default::default()
});
// Split layout (current behavior)
let first = render_device.create_bind_group(
"downsampling_first_bind_group",
&pipeline_cache.get_bind_group_layout(&layouts.downsampling_first),
&BindGroupEntries::sequential((
&samplers.linear,
&downsampling_constants_buffer,
&input_env_map_first,
&mip_storage(1),
&mip_storage(2),
&mip_storage(3),
&mip_storage(4),
&mip_storage(5),
&mip_storage(6),
)),
);
let second = render_device.create_bind_group(
"downsampling_second_bind_group",
&pipeline_cache.get_bind_group_layout(&layouts.downsampling_second),
&BindGroupEntries::sequential((
&samplers.linear,
&downsampling_constants_buffer,
&input_env_map_second,
&mip_storage(7),
&mip_storage(8),
&mip_storage(9),
&mip_storage(10),
&mip_storage(11),
&mip_storage(12),
)),
);
(first, second)
};
// create a 2d array view of the bluenoise texture
let stbn_texture_view = stbn_texture
.texture
.clone()
.create_view(&TextureViewDescriptor {
dimension: Some(TextureViewDimension::D2Array),
..Default::default()
});
// Create radiance map bind groups for each mip level
let num_mips = mip_count as usize;
let mut radiance_bind_groups = Vec::with_capacity(num_mips);
for mip in 0..num_mips {
// Calculate roughness from 0.0 (mip 0) to 0.889 (mip 8)
// We don't need roughness=1.0 as a mip level because it's handled by the separate diffuse irradiance map
let roughness = mip as f32 / (num_mips - 1) as f32;
let sample_count = 32u32 * 2u32.pow((roughness * 4.0) as u32);
let radiance_constants = FilteringConstants {
mip_level: mip as f32,
sample_count,
roughness,
noise_size_bits,
};
let mut radiance_constants_buffer = UniformBuffer::from(radiance_constants);
radiance_constants_buffer.write_buffer(&render_device, &queue);
let mip_storage_view = create_storage_view(
&env_map_light.specular_map.texture,
mip as u32,
&render_device,
);
let bind_group = render_device.create_bind_group(
Some(format!("radiance_bind_group_mip_{mip}").as_str()),
&pipeline_cache.get_bind_group_layout(&layouts.radiance),
&BindGroupEntries::sequential((
&textures.environment_map.default_view,
&samplers.linear,
&mip_storage_view,
&radiance_constants_buffer,
&stbn_texture_view,
)),
);
radiance_bind_groups.push(bind_group);
}
// Create irradiance bind group
let irradiance_constants = FilteringConstants {
mip_level: 0.0,
// 32 phi, 32 theta = 1024 samples total
sample_count: 1024,
roughness: 1.0,
noise_size_bits,
};
let mut irradiance_constants_buffer = UniformBuffer::from(irradiance_constants);
irradiance_constants_buffer.write_buffer(&render_device, &queue);
// create a 2d array view
let irradiance_map =
env_map_light
.diffuse_map
.texture
.create_view(&TextureViewDescriptor {
dimension: Some(TextureViewDimension::D2Array),
..Default::default()
});
let irradiance_bind_group = render_device.create_bind_group(
"irradiance_bind_group",
&pipeline_cache.get_bind_group_layout(&layouts.irradiance),
&BindGroupEntries::sequential((
&textures.environment_map.default_view,
&samplers.linear,
&irradiance_map,
&irradiance_constants_buffer,
&stbn_texture_view,
)),
);
// Create copy bind group (source env map → destination mip0)
let src_view = env_map_light
.environment_map
.texture
.create_view(&TextureViewDescriptor {
dimension: Some(TextureViewDimension::D2Array),
..Default::default()
});
let dst_view = create_storage_view(&textures.environment_map.texture, 0, &render_device);
let copy_bind_group = render_device.create_bind_group(
"copy_bind_group",
&pipeline_cache.get_bind_group_layout(&layouts.copy),
&BindGroupEntries::with_indices(((0, &src_view), (1, &dst_view))),
);
commands.entity(entity).insert(GeneratorBindGroups {
downsampling_first: downsampling_first_bind_group,
downsampling_second: downsampling_second_bind_group,
radiance: radiance_bind_groups,
irradiance: irradiance_bind_group,
copy: copy_bind_group,
});
}
}
/// Helper function to create a storage texture view for a specific mip level
fn create_storage_view(texture: &Texture, mip: u32, _render_device: &RenderDevice) -> TextureView {
texture.create_view(&TextureViewDescriptor {
label: Some(format!("storage_view_mip_{mip}").as_str()),
format: Some(texture.format()),
dimension: Some(TextureViewDimension::D2Array),
aspect: TextureAspect::All,
base_mip_level: mip,
mip_level_count: Some(1),
base_array_layer: 0,
array_layer_count: Some(texture.depth_or_array_layers()),
usage: Some(TextureUsages::STORAGE_BINDING),
})
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/render/light.rs | crates/bevy_pbr/src/render/light.rs | use crate::*;
use bevy_asset::UntypedAssetId;
use bevy_camera::primitives::{
face_index_to_name, CascadesFrusta, CubeMapFace, CubemapFrusta, Frustum, HalfSpace,
CUBE_MAP_FACES,
};
use bevy_camera::visibility::{
CascadesVisibleEntities, CubemapVisibleEntities, RenderLayers, ViewVisibility,
VisibleMeshEntities,
};
use bevy_camera::Camera3d;
use bevy_color::ColorToComponents;
use bevy_core_pipeline::core_3d::CORE_3D_DEPTH_FORMAT;
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::change_detection::Tick;
use bevy_ecs::system::SystemChangeTick;
use bevy_ecs::{
entity::{EntityHashMap, EntityHashSet},
prelude::*,
system::lifetimeless::Read,
};
use bevy_light::cascade::Cascade;
use bevy_light::cluster::assign::{calculate_cluster_factors, ClusterableObjectType};
use bevy_light::cluster::GlobalVisibleClusterableObjects;
use bevy_light::SunDisk;
use bevy_light::{
spot_light_clip_from_view, spot_light_world_from_view, AmbientLight, CascadeShadowConfig,
Cascades, DirectionalLight, DirectionalLightShadowMap, GlobalAmbientLight, NotShadowCaster,
PointLight, PointLightShadowMap, ShadowFilteringMethod, SpotLight, VolumetricLight,
};
use bevy_math::{ops, Mat4, UVec4, Vec3, Vec3Swizzles, Vec4, Vec4Swizzles};
use bevy_platform::collections::{HashMap, HashSet};
use bevy_platform::hash::FixedHasher;
use bevy_render::erased_render_asset::ErasedRenderAssets;
use bevy_render::experimental::occlusion_culling::{
OcclusionCulling, OcclusionCullingSubview, OcclusionCullingSubviewEntities,
};
use bevy_render::sync_world::MainEntityHashMap;
use bevy_render::{
batching::gpu_preprocessing::{GpuPreprocessingMode, GpuPreprocessingSupport},
camera::SortedCameras,
mesh::allocator::MeshAllocator,
view::{NoIndirectDrawing, RetainedViewEntity},
};
use bevy_render::{
diagnostic::RecordDiagnostics,
mesh::RenderMesh,
render_asset::RenderAssets,
render_graph::{Node, NodeRunError, RenderGraphContext},
render_phase::*,
render_resource::*,
renderer::{RenderContext, RenderDevice, RenderQueue},
texture::*,
view::ExtractedView,
Extract,
};
use bevy_render::{
mesh::allocator::SlabId,
sync_world::{MainEntity, RenderEntity},
};
use bevy_transform::{components::GlobalTransform, prelude::Transform};
use bevy_utils::default;
use core::{hash::Hash, ops::Range};
use decal::clustered::RenderClusteredDecals;
#[cfg(feature = "trace")]
use tracing::info_span;
use tracing::{error, warn};
#[derive(Component)]
pub struct ExtractedPointLight {
pub color: LinearRgba,
/// luminous intensity in lumens per steradian
pub intensity: f32,
pub range: f32,
pub radius: f32,
pub transform: GlobalTransform,
pub shadows_enabled: bool,
pub shadow_depth_bias: f32,
pub shadow_normal_bias: f32,
pub shadow_map_near_z: f32,
pub spot_light_angles: Option<(f32, f32)>,
pub volumetric: bool,
pub soft_shadows_enabled: bool,
/// whether this point light contributes diffuse light to lightmapped meshes
pub affects_lightmapped_mesh_diffuse: bool,
}
#[derive(Component, Debug)]
pub struct ExtractedDirectionalLight {
pub color: LinearRgba,
pub illuminance: f32,
pub transform: GlobalTransform,
pub shadows_enabled: bool,
pub volumetric: bool,
/// whether this directional light contributes diffuse light to lightmapped
/// meshes
pub affects_lightmapped_mesh_diffuse: bool,
pub shadow_depth_bias: f32,
pub shadow_normal_bias: f32,
pub cascade_shadow_config: CascadeShadowConfig,
pub cascades: EntityHashMap<Vec<Cascade>>,
pub frusta: EntityHashMap<Vec<Frustum>>,
pub render_layers: RenderLayers,
pub soft_shadow_size: Option<f32>,
/// True if this light is using two-phase occlusion culling.
pub occlusion_culling: bool,
pub sun_disk_angular_size: f32,
pub sun_disk_intensity: f32,
}
// NOTE: These must match the bit flags in bevy_pbr/src/render/mesh_view_types.wgsl!
bitflags::bitflags! {
#[repr(transparent)]
struct PointLightFlags: u32 {
const SHADOWS_ENABLED = 1 << 0;
const SPOT_LIGHT_Y_NEGATIVE = 1 << 1;
const VOLUMETRIC = 1 << 2;
const AFFECTS_LIGHTMAPPED_MESH_DIFFUSE = 1 << 3;
const NONE = 0;
const UNINITIALIZED = 0xFFFF;
}
}
#[derive(Copy, Clone, ShaderType, Default, Debug)]
pub struct GpuDirectionalCascade {
clip_from_world: Mat4,
texel_size: f32,
far_bound: f32,
}
#[derive(Copy, Clone, ShaderType, Default, Debug)]
pub struct GpuDirectionalLight {
cascades: [GpuDirectionalCascade; MAX_CASCADES_PER_LIGHT],
color: Vec4,
dir_to_light: Vec3,
flags: u32,
soft_shadow_size: f32,
shadow_depth_bias: f32,
shadow_normal_bias: f32,
num_cascades: u32,
cascades_overlap_proportion: f32,
depth_texture_base_index: u32,
decal_index: u32,
sun_disk_angular_size: f32,
sun_disk_intensity: f32,
}
// NOTE: These must match the bit flags in bevy_pbr/src/render/mesh_view_types.wgsl!
bitflags::bitflags! {
#[repr(transparent)]
struct DirectionalLightFlags: u32 {
const SHADOWS_ENABLED = 1 << 0;
const VOLUMETRIC = 1 << 1;
const AFFECTS_LIGHTMAPPED_MESH_DIFFUSE = 1 << 2;
const NONE = 0;
const UNINITIALIZED = 0xFFFF;
}
}
#[derive(Copy, Clone, Debug, ShaderType)]
pub struct GpuLights {
directional_lights: [GpuDirectionalLight; MAX_DIRECTIONAL_LIGHTS],
ambient_color: Vec4,
// xyz are x/y/z cluster dimensions and w is the number of clusters
cluster_dimensions: UVec4,
// xy are vec2<f32>(cluster_dimensions.xy) / vec2<f32>(view.width, view.height)
// z is cluster_dimensions.z / log(far / near)
// w is cluster_dimensions.z * log(near) / log(far / near)
cluster_factors: Vec4,
n_directional_lights: u32,
// offset from spot light's light index to spot light's shadow map index
spot_light_shadowmap_offset: i32,
ambient_light_affects_lightmapped_meshes: u32,
}
// NOTE: When running bevy on Adreno GPU chipsets in WebGL, any value above 1 will result in a crash
// when loading the wgsl "pbr_functions.wgsl" in the function apply_fog.
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
pub const MAX_DIRECTIONAL_LIGHTS: usize = 1;
#[cfg(any(
not(feature = "webgl"),
not(target_arch = "wasm32"),
feature = "webgpu"
))]
pub const MAX_DIRECTIONAL_LIGHTS: usize = 10;
#[cfg(any(
not(feature = "webgl"),
not(target_arch = "wasm32"),
feature = "webgpu"
))]
pub const MAX_CASCADES_PER_LIGHT: usize = 4;
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
pub const MAX_CASCADES_PER_LIGHT: usize = 1;
#[derive(Resource, Clone)]
pub struct ShadowSamplers {
pub point_light_comparison_sampler: Sampler,
#[cfg(feature = "experimental_pbr_pcss")]
pub point_light_linear_sampler: Sampler,
pub directional_light_comparison_sampler: Sampler,
#[cfg(feature = "experimental_pbr_pcss")]
pub directional_light_linear_sampler: Sampler,
}
pub fn init_shadow_samplers(mut commands: Commands, render_device: Res<RenderDevice>) {
let base_sampler_descriptor = SamplerDescriptor {
address_mode_u: AddressMode::ClampToEdge,
address_mode_v: AddressMode::ClampToEdge,
address_mode_w: AddressMode::ClampToEdge,
mag_filter: FilterMode::Linear,
min_filter: FilterMode::Linear,
mipmap_filter: FilterMode::Nearest,
..default()
};
commands.insert_resource(ShadowSamplers {
point_light_comparison_sampler: render_device.create_sampler(&SamplerDescriptor {
compare: Some(CompareFunction::GreaterEqual),
..base_sampler_descriptor
}),
#[cfg(feature = "experimental_pbr_pcss")]
point_light_linear_sampler: render_device.create_sampler(&base_sampler_descriptor),
directional_light_comparison_sampler: render_device.create_sampler(&SamplerDescriptor {
compare: Some(CompareFunction::GreaterEqual),
..base_sampler_descriptor
}),
#[cfg(feature = "experimental_pbr_pcss")]
directional_light_linear_sampler: render_device.create_sampler(&base_sampler_descriptor),
});
}
// This is needed because of the orphan rule not allowing implementing
// foreign trait ExtractComponent on foreign type ShadowFilteringMethod
pub fn extract_shadow_filtering_method(
mut commands: Commands,
mut previous_len: Local<usize>,
query: Extract<Query<(RenderEntity, &ShadowFilteringMethod)>>,
) {
let mut values = Vec::with_capacity(*previous_len);
for (entity, query_item) in &query {
values.push((entity, *query_item));
}
*previous_len = values.len();
commands.try_insert_batch(values);
}
// This is needed because of the orphan rule not allowing implementing
// foreign trait ExtractResource on foreign type AmbientLight
pub fn extract_ambient_light_resource(
mut commands: Commands,
main_resource: Extract<Option<Res<GlobalAmbientLight>>>,
target_resource: Option<ResMut<GlobalAmbientLight>>,
) {
if let Some(main_resource) = main_resource.as_ref() {
if let Some(mut target_resource) = target_resource {
if main_resource.is_changed() {
*target_resource = (*main_resource).clone();
}
} else {
commands.insert_resource((*main_resource).clone());
}
}
}
// This is needed because of the orphan rule not allowing implementing
// foreign trait ExtractComponent on foreign type AmbientLight
pub fn extract_ambient_light(
mut commands: Commands,
mut previous_len: Local<usize>,
query: Extract<Query<(RenderEntity, &AmbientLight)>>,
) {
let mut values = Vec::with_capacity(*previous_len);
for (entity, query_item) in &query {
values.push((entity, query_item.clone()));
}
*previous_len = values.len();
commands.try_insert_batch(values);
}
pub fn extract_lights(
mut commands: Commands,
point_light_shadow_map: Extract<Res<PointLightShadowMap>>,
directional_light_shadow_map: Extract<Res<DirectionalLightShadowMap>>,
global_visible_clusterable: Extract<Res<GlobalVisibleClusterableObjects>>,
previous_point_lights: Query<
Entity,
(
With<RenderCubemapVisibleEntities>,
With<ExtractedPointLight>,
),
>,
previous_spot_lights: Query<
Entity,
(With<RenderVisibleMeshEntities>, With<ExtractedPointLight>),
>,
point_lights: Extract<
Query<(
Entity,
RenderEntity,
&PointLight,
&CubemapVisibleEntities,
&GlobalTransform,
&ViewVisibility,
&CubemapFrusta,
Option<&VolumetricLight>,
)>,
>,
spot_lights: Extract<
Query<(
Entity,
RenderEntity,
&SpotLight,
&VisibleMeshEntities,
&GlobalTransform,
&ViewVisibility,
&Frustum,
Option<&VolumetricLight>,
)>,
>,
directional_lights: Extract<
Query<
(
Entity,
RenderEntity,
&DirectionalLight,
&CascadesVisibleEntities,
&Cascades,
&CascadeShadowConfig,
&CascadesFrusta,
&GlobalTransform,
&ViewVisibility,
Option<&RenderLayers>,
Option<&VolumetricLight>,
Has<OcclusionCulling>,
Option<&SunDisk>,
),
Without<SpotLight>,
>,
>,
mapper: Extract<Query<RenderEntity>>,
mut previous_point_lights_len: Local<usize>,
mut previous_spot_lights_len: Local<usize>,
) {
// NOTE: These shadow map resources are extracted here as they are used here too so this avoids
// races between scheduling of ExtractResourceSystems and this system.
if point_light_shadow_map.is_changed() {
commands.insert_resource(point_light_shadow_map.clone());
}
if directional_light_shadow_map.is_changed() {
commands.insert_resource(directional_light_shadow_map.clone());
}
// Clear previous visible entities for all point/spot lights as they might not be in the
// `global_visible_clusterable` list anymore.
commands.try_insert_batch(
previous_point_lights
.iter()
.map(|render_entity| (render_entity, RenderCubemapVisibleEntities::default()))
.collect::<Vec<_>>(),
);
commands.try_insert_batch(
previous_spot_lights
.iter()
.map(|render_entity| (render_entity, RenderVisibleMeshEntities::default()))
.collect::<Vec<_>>(),
);
// This is the point light shadow map texel size for one face of the cube as a distance of 1.0
// world unit from the light.
// point_light_texel_size = 2.0 * 1.0 * tan(PI / 4.0) / cube face width in texels
// PI / 4.0 is half the cube face fov, tan(PI / 4.0) = 1.0, so this simplifies to:
// point_light_texel_size = 2.0 / cube face width in texels
// NOTE: When using various PCF kernel sizes, this will need to be adjusted, according to:
// https://catlikecoding.com/unity/tutorials/custom-srp/point-and-spot-shadows/
let point_light_texel_size = 2.0 / point_light_shadow_map.size as f32;
let mut point_lights_values = Vec::with_capacity(*previous_point_lights_len);
for entity in global_visible_clusterable.iter().copied() {
let Ok((
main_entity,
render_entity,
point_light,
cubemap_visible_entities,
transform,
view_visibility,
frusta,
volumetric_light,
)) = point_lights.get(entity)
else {
continue;
};
if !view_visibility.get() {
continue;
}
let render_cubemap_visible_entities = RenderCubemapVisibleEntities {
data: cubemap_visible_entities
.iter()
.map(|v| create_render_visible_mesh_entities(&mapper, v))
.collect::<Vec<_>>()
.try_into()
.unwrap(),
};
let extracted_point_light = ExtractedPointLight {
color: point_light.color.into(),
// NOTE: Map from luminous power in lumens to luminous intensity in lumens per steradian
// for a point light. See https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminousPower
// for details.
intensity: point_light.intensity / (4.0 * core::f32::consts::PI),
range: point_light.range,
radius: point_light.radius,
transform: *transform,
shadows_enabled: point_light.shadows_enabled,
shadow_depth_bias: point_light.shadow_depth_bias,
// The factor of SQRT_2 is for the worst-case diagonal offset
shadow_normal_bias: point_light.shadow_normal_bias
* point_light_texel_size
* core::f32::consts::SQRT_2,
shadow_map_near_z: point_light.shadow_map_near_z,
spot_light_angles: None,
volumetric: volumetric_light.is_some(),
affects_lightmapped_mesh_diffuse: point_light.affects_lightmapped_mesh_diffuse,
#[cfg(feature = "experimental_pbr_pcss")]
soft_shadows_enabled: point_light.soft_shadows_enabled,
#[cfg(not(feature = "experimental_pbr_pcss"))]
soft_shadows_enabled: false,
};
point_lights_values.push((
render_entity,
(
extracted_point_light,
render_cubemap_visible_entities,
(*frusta).clone(),
MainEntity::from(main_entity),
),
));
}
*previous_point_lights_len = point_lights_values.len();
commands.try_insert_batch(point_lights_values);
let mut spot_lights_values = Vec::with_capacity(*previous_spot_lights_len);
for entity in global_visible_clusterable.iter().copied() {
if let Ok((
main_entity,
render_entity,
spot_light,
visible_entities,
transform,
view_visibility,
frustum,
volumetric_light,
)) = spot_lights.get(entity)
{
if !view_visibility.get() {
continue;
}
let render_visible_entities =
create_render_visible_mesh_entities(&mapper, visible_entities);
let texel_size =
2.0 * ops::tan(spot_light.outer_angle) / directional_light_shadow_map.size as f32;
spot_lights_values.push((
render_entity,
(
ExtractedPointLight {
color: spot_light.color.into(),
// NOTE: Map from luminous power in lumens to luminous intensity in lumens per steradian
// for a point light. See https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminousPower
// for details.
// Note: Filament uses a divisor of PI for spot lights. We choose to use the same 4*PI divisor
// in both cases so that toggling between point light and spot light keeps lit areas lit equally,
// which seems least surprising for users
intensity: spot_light.intensity / (4.0 * core::f32::consts::PI),
range: spot_light.range,
radius: spot_light.radius,
transform: *transform,
shadows_enabled: spot_light.shadows_enabled,
shadow_depth_bias: spot_light.shadow_depth_bias,
// The factor of SQRT_2 is for the worst-case diagonal offset
shadow_normal_bias: spot_light.shadow_normal_bias
* texel_size
* core::f32::consts::SQRT_2,
shadow_map_near_z: spot_light.shadow_map_near_z,
spot_light_angles: Some((spot_light.inner_angle, spot_light.outer_angle)),
volumetric: volumetric_light.is_some(),
affects_lightmapped_mesh_diffuse: spot_light
.affects_lightmapped_mesh_diffuse,
#[cfg(feature = "experimental_pbr_pcss")]
soft_shadows_enabled: spot_light.soft_shadows_enabled,
#[cfg(not(feature = "experimental_pbr_pcss"))]
soft_shadows_enabled: false,
},
render_visible_entities,
*frustum,
MainEntity::from(main_entity),
),
));
}
}
*previous_spot_lights_len = spot_lights_values.len();
commands.try_insert_batch(spot_lights_values);
for (
main_entity,
entity,
directional_light,
visible_entities,
cascades,
cascade_config,
frusta,
transform,
view_visibility,
maybe_layers,
volumetric_light,
occlusion_culling,
sun_disk,
) in &directional_lights
{
if !view_visibility.get() {
commands
.get_entity(entity)
.expect("Light entity wasn't synced.")
.remove::<(ExtractedDirectionalLight, RenderCascadesVisibleEntities)>();
continue;
}
// TODO: update in place instead of reinserting.
let mut extracted_cascades = EntityHashMap::default();
let mut extracted_frusta = EntityHashMap::default();
let mut cascade_visible_entities = EntityHashMap::default();
for (e, v) in cascades.cascades.iter() {
if let Ok(entity) = mapper.get(*e) {
extracted_cascades.insert(entity, v.clone());
} else {
break;
}
}
for (e, v) in frusta.frusta.iter() {
if let Ok(entity) = mapper.get(*e) {
extracted_frusta.insert(entity, v.clone());
} else {
break;
}
}
for (e, v) in visible_entities.entities.iter() {
if let Ok(entity) = mapper.get(*e) {
cascade_visible_entities.insert(
entity,
v.iter()
.map(|v| create_render_visible_mesh_entities(&mapper, v))
.collect(),
);
} else {
break;
}
}
commands
.get_entity(entity)
.expect("Light entity wasn't synced.")
.insert((
ExtractedDirectionalLight {
color: directional_light.color.into(),
illuminance: directional_light.illuminance,
transform: *transform,
volumetric: volumetric_light.is_some(),
affects_lightmapped_mesh_diffuse: directional_light
.affects_lightmapped_mesh_diffuse,
#[cfg(feature = "experimental_pbr_pcss")]
soft_shadow_size: directional_light.soft_shadow_size,
#[cfg(not(feature = "experimental_pbr_pcss"))]
soft_shadow_size: None,
shadows_enabled: directional_light.shadows_enabled,
shadow_depth_bias: directional_light.shadow_depth_bias,
// The factor of SQRT_2 is for the worst-case diagonal offset
shadow_normal_bias: directional_light.shadow_normal_bias
* core::f32::consts::SQRT_2,
cascade_shadow_config: cascade_config.clone(),
cascades: extracted_cascades,
frusta: extracted_frusta,
render_layers: maybe_layers.unwrap_or_default().clone(),
occlusion_culling,
sun_disk_angular_size: sun_disk.unwrap_or_default().angular_size,
sun_disk_intensity: sun_disk.unwrap_or_default().intensity,
},
RenderCascadesVisibleEntities {
entities: cascade_visible_entities,
},
MainEntity::from(main_entity),
));
}
}
fn create_render_visible_mesh_entities(
mapper: &Extract<Query<RenderEntity>>,
visible_entities: &VisibleMeshEntities,
) -> RenderVisibleMeshEntities {
RenderVisibleMeshEntities {
entities: visible_entities
.iter()
.map(|e| {
let render_entity = mapper.get(*e).unwrap_or(Entity::PLACEHOLDER);
(render_entity, MainEntity::from(*e))
})
.collect(),
}
}
#[derive(Component, Default, Deref, DerefMut)]
/// Component automatically attached to a light entity to track light-view entities
/// for each view.
pub struct LightViewEntities(EntityHashMap<Vec<Entity>>);
// TODO: using required component
pub(crate) fn add_light_view_entities(
add: On<Add, (ExtractedDirectionalLight, ExtractedPointLight)>,
mut commands: Commands,
) {
if let Ok(mut v) = commands.get_entity(add.entity) {
v.insert(LightViewEntities::default());
}
}
/// Removes [`LightViewEntities`] when light is removed. See [`add_light_view_entities`].
pub(crate) fn extracted_light_removed(
remove: On<Remove, (ExtractedDirectionalLight, ExtractedPointLight)>,
mut commands: Commands,
) {
if let Ok(mut v) = commands.get_entity(remove.entity) {
v.try_remove::<LightViewEntities>();
}
}
pub(crate) fn remove_light_view_entities(
remove: On<Remove, LightViewEntities>,
query: Query<&LightViewEntities>,
mut commands: Commands,
) {
if let Ok(entities) = query.get(remove.entity) {
for v in entities.0.values() {
for e in v.iter().copied() {
if let Ok(mut v) = commands.get_entity(e) {
v.despawn();
}
}
}
}
}
#[derive(Component)]
pub struct ShadowView {
pub depth_attachment: DepthAttachment,
pub pass_name: String,
}
#[derive(Component)]
pub struct ViewShadowBindings {
pub point_light_depth_texture: Texture,
pub point_light_depth_texture_view: TextureView,
pub directional_light_depth_texture: Texture,
pub directional_light_depth_texture_view: TextureView,
}
/// A component that holds the shadow cascade views for all shadow cascades
/// associated with a camera.
///
/// Note: Despite the name, this component actually holds the shadow cascade
/// views, not the lights themselves.
#[derive(Component)]
pub struct ViewLightEntities {
/// The shadow cascade views for all shadow cascades associated with a
/// camera.
///
/// Note: Despite the name, this component actually holds the shadow cascade
/// views, not the lights themselves.
pub lights: Vec<Entity>,
}
#[derive(Component)]
pub struct ViewLightsUniformOffset {
pub offset: u32,
}
#[derive(Resource, Default)]
pub struct LightMeta {
pub view_gpu_lights: DynamicUniformBuffer<GpuLights>,
}
#[derive(Component)]
pub enum LightEntity {
Directional {
light_entity: Entity,
cascade_index: usize,
},
Point {
light_entity: Entity,
face_index: usize,
},
Spot {
light_entity: Entity,
},
}
pub fn prepare_lights(
mut commands: Commands,
mut texture_cache: ResMut<TextureCache>,
(render_device, render_queue): (Res<RenderDevice>, Res<RenderQueue>),
mut global_light_meta: ResMut<GlobalClusterableObjectMeta>,
mut light_meta: ResMut<LightMeta>,
views: Query<
(
Entity,
MainEntity,
&ExtractedView,
&ExtractedClusterConfig,
Option<&RenderLayers>,
Has<NoIndirectDrawing>,
Option<&AmbientLight>,
),
With<Camera3d>,
>,
ambient_light: Res<GlobalAmbientLight>,
point_light_shadow_map: Res<PointLightShadowMap>,
directional_light_shadow_map: Res<DirectionalLightShadowMap>,
mut shadow_render_phases: ResMut<ViewBinnedRenderPhases<Shadow>>,
(
mut max_directional_lights_warning_emitted,
mut max_cascades_per_light_warning_emitted,
mut live_shadow_mapping_lights,
): (Local<bool>, Local<bool>, Local<HashSet<RetainedViewEntity>>),
point_lights: Query<(
Entity,
&MainEntity,
&ExtractedPointLight,
AnyOf<(&CubemapFrusta, &Frustum)>,
)>,
directional_lights: Query<(Entity, &MainEntity, &ExtractedDirectionalLight)>,
mut light_view_entities: Query<&mut LightViewEntities>,
sorted_cameras: Res<SortedCameras>,
(gpu_preprocessing_support, decals): (
Res<GpuPreprocessingSupport>,
Option<Res<RenderClusteredDecals>>,
),
) {
let views_iter = views.iter();
let views_count = views_iter.len();
let Some(mut view_gpu_lights_writer) =
light_meta
.view_gpu_lights
.get_writer(views_count, &render_device, &render_queue)
else {
return;
};
// Pre-calculate for PointLights
let cube_face_rotations = CUBE_MAP_FACES
.iter()
.map(|CubeMapFace { target, up }| Transform::IDENTITY.looking_at(*target, *up))
.collect::<Vec<_>>();
global_light_meta.entity_to_index.clear();
let mut point_lights: Vec<_> = point_lights.iter().collect::<Vec<_>>();
let mut directional_lights: Vec<_> = directional_lights.iter().collect::<Vec<_>>();
#[cfg(any(
not(feature = "webgl"),
not(target_arch = "wasm32"),
feature = "webgpu"
))]
let max_texture_array_layers = render_device.limits().max_texture_array_layers as usize;
#[cfg(any(
not(feature = "webgl"),
not(target_arch = "wasm32"),
feature = "webgpu"
))]
let max_texture_cubes = max_texture_array_layers / 6;
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
let max_texture_array_layers = 1;
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
let max_texture_cubes = 1;
if !*max_directional_lights_warning_emitted && directional_lights.len() > MAX_DIRECTIONAL_LIGHTS
{
warn!(
"The amount of directional lights of {} is exceeding the supported limit of {}.",
directional_lights.len(),
MAX_DIRECTIONAL_LIGHTS
);
*max_directional_lights_warning_emitted = true;
}
if !*max_cascades_per_light_warning_emitted
&& directional_lights
.iter()
.any(|(_, _, light)| light.cascade_shadow_config.bounds.len() > MAX_CASCADES_PER_LIGHT)
{
warn!(
"The number of cascades configured for a directional light exceeds the supported limit of {}.",
MAX_CASCADES_PER_LIGHT
);
*max_cascades_per_light_warning_emitted = true;
}
let point_light_count = point_lights
.iter()
.filter(|light| light.2.spot_light_angles.is_none())
.count();
let point_light_volumetric_enabled_count = point_lights
.iter()
.filter(|(_, _, light, _)| light.volumetric && light.spot_light_angles.is_none())
.count()
.min(max_texture_cubes);
let point_light_shadow_maps_count = point_lights
.iter()
.filter(|light| light.2.shadows_enabled && light.2.spot_light_angles.is_none())
.count()
.min(max_texture_cubes);
let directional_volumetric_enabled_count = directional_lights
.iter()
.take(MAX_DIRECTIONAL_LIGHTS)
.filter(|(_, _, light)| light.volumetric)
.count()
.min(max_texture_array_layers / MAX_CASCADES_PER_LIGHT);
let directional_shadow_enabled_count = directional_lights
.iter()
.take(MAX_DIRECTIONAL_LIGHTS)
.filter(|(_, _, light)| light.shadows_enabled)
.count()
.min(max_texture_array_layers / MAX_CASCADES_PER_LIGHT);
let spot_light_count = point_lights
.iter()
.filter(|(_, _, light, _)| light.spot_light_angles.is_some())
.count()
.min(max_texture_array_layers - directional_shadow_enabled_count * MAX_CASCADES_PER_LIGHT);
let spot_light_volumetric_enabled_count = point_lights
.iter()
.filter(|(_, _, light, _)| light.volumetric && light.spot_light_angles.is_some())
.count()
.min(max_texture_array_layers - directional_shadow_enabled_count * MAX_CASCADES_PER_LIGHT);
let spot_light_shadow_maps_count = point_lights
.iter()
.filter(|(_, _, light, _)| light.shadows_enabled && light.spot_light_angles.is_some())
.count()
.min(max_texture_array_layers - directional_shadow_enabled_count * MAX_CASCADES_PER_LIGHT);
// Sort lights by
// - point-light vs spot-light, so that we can iterate point lights and spot lights in contiguous blocks in the fragment shader,
// - then those with shadows enabled first, so that the index can be used to render at most `point_light_shadow_maps_count`
// point light shadows and `spot_light_shadow_maps_count` spot light shadow maps,
// - then by entity as a stable key to ensure that a consistent set of lights are chosen if the light count limit is exceeded.
point_lights.sort_by_cached_key(|(entity, _, light, _)| {
(
point_or_spot_light_to_clusterable(light).ordering(),
*entity,
)
});
// Sort lights by
// - those with volumetric (and shadows) enabled first, so that the
// volumetric lighting pass can quickly find the volumetric lights;
// - then those with shadows enabled second, so that the index can be used
// to render at most `directional_light_shadow_maps_count` directional light
// shadows
// - then by entity as a stable key to ensure that a consistent set of
// lights are chosen if the light count limit is exceeded.
// - because entities are unique, we can use `sort_unstable_by_key`
// and still end up with a stable order.
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/render/mesh_view_bindings.rs | crates/bevy_pbr/src/render/mesh_view_bindings.rs | use alloc::sync::Arc;
use bevy_core_pipeline::{
core_3d::ViewTransmissionTexture,
oit::{resolve::is_oit_supported, OitBuffers, OrderIndependentTransparencySettings},
prepass::ViewPrepassTextures,
tonemapping::{
get_lut_bind_group_layout_entries, get_lut_bindings, Tonemapping, TonemappingLuts,
},
};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
component::Component,
entity::Entity,
query::Has,
resource::Resource,
system::{Commands, Query, Res},
world::{FromWorld, World},
};
use bevy_image::BevyDefault as _;
use bevy_light::{EnvironmentMapLight, IrradianceVolume};
use bevy_math::Vec4;
use bevy_render::{
globals::{GlobalsBuffer, GlobalsUniform},
render_asset::RenderAssets,
render_resource::{binding_types::*, *},
renderer::{RenderAdapter, RenderDevice},
texture::{FallbackImage, FallbackImageMsaa, FallbackImageZero, GpuImage},
view::{
Msaa, RenderVisibilityRanges, ViewUniform, ViewUniforms,
VISIBILITY_RANGES_STORAGE_BUFFER_COUNT,
},
};
use core::{array, num::NonZero};
use crate::{
decal::{
self,
clustered::{
DecalsBuffer, RenderClusteredDecals, RenderViewClusteredDecalBindGroupEntries,
},
},
environment_map::{self, RenderViewEnvironmentMapBindGroupEntries},
irradiance_volume::{
self, RenderViewIrradianceVolumeBindGroupEntries, IRRADIANCE_VOLUMES_ARE_USABLE,
},
prepass,
resources::{AtmosphereBuffer, AtmosphereData, AtmosphereSampler, AtmosphereTextures},
EnvironmentMapUniformBuffer, ExtractedAtmosphere, FogMeta, GlobalClusterableObjectMeta,
GpuClusterableObjects, GpuFog, GpuLights, LightMeta, LightProbesBuffer, LightProbesUniform,
MeshPipeline, MeshPipelineKey, RenderViewLightProbes, ScreenSpaceAmbientOcclusionResources,
ScreenSpaceReflectionsBuffer, ScreenSpaceReflectionsUniform, ShadowSamplers,
ViewClusterBindings, ViewShadowBindings, CLUSTERED_FORWARD_STORAGE_BUFFER_COUNT,
};
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
use bevy_render::render_resource::binding_types::texture_cube;
#[cfg(debug_assertions)]
use {crate::MESH_PIPELINE_VIEW_LAYOUT_SAFE_MAX_TEXTURES, bevy_utils::once, tracing::warn};
#[derive(Clone)]
pub struct MeshPipelineViewLayout {
pub main_layout: BindGroupLayoutDescriptor,
pub binding_array_layout: BindGroupLayoutDescriptor,
pub empty_layout: BindGroupLayoutDescriptor,
#[cfg(debug_assertions)]
pub texture_count: usize,
}
bitflags::bitflags! {
/// A key that uniquely identifies a [`MeshPipelineViewLayout`].
///
/// Used to generate all possible layouts for the mesh pipeline in [`generate_view_layouts`],
/// so special care must be taken to not add too many flags, as the number of possible layouts
/// will grow exponentially.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[repr(transparent)]
pub struct MeshPipelineViewLayoutKey: u32 {
const MULTISAMPLED = 1 << 0;
const DEPTH_PREPASS = 1 << 1;
const NORMAL_PREPASS = 1 << 2;
const MOTION_VECTOR_PREPASS = 1 << 3;
const DEFERRED_PREPASS = 1 << 4;
const OIT_ENABLED = 1 << 5;
const ATMOSPHERE = 1 << 6;
}
}
impl MeshPipelineViewLayoutKey {
// The number of possible layouts
pub const COUNT: usize = Self::all().bits() as usize + 1;
/// Builds a unique label for each layout based on the flags
pub fn label(&self) -> String {
use MeshPipelineViewLayoutKey as Key;
format!(
"mesh_view_layout{}{}{}{}{}{}{}",
if self.contains(Key::MULTISAMPLED) {
"_multisampled"
} else {
Default::default()
},
if self.contains(Key::DEPTH_PREPASS) {
"_depth"
} else {
Default::default()
},
if self.contains(Key::NORMAL_PREPASS) {
"_normal"
} else {
Default::default()
},
if self.contains(Key::MOTION_VECTOR_PREPASS) {
"_motion"
} else {
Default::default()
},
if self.contains(Key::DEFERRED_PREPASS) {
"_deferred"
} else {
Default::default()
},
if self.contains(Key::OIT_ENABLED) {
"_oit"
} else {
Default::default()
},
if self.contains(Key::ATMOSPHERE) {
"_atmosphere"
} else {
Default::default()
},
)
}
}
impl From<MeshPipelineKey> for MeshPipelineViewLayoutKey {
fn from(value: MeshPipelineKey) -> Self {
let mut result = MeshPipelineViewLayoutKey::empty();
if value.msaa_samples() > 1 {
result |= MeshPipelineViewLayoutKey::MULTISAMPLED;
}
if value.contains(MeshPipelineKey::DEPTH_PREPASS) {
result |= MeshPipelineViewLayoutKey::DEPTH_PREPASS;
}
if value.contains(MeshPipelineKey::NORMAL_PREPASS) {
result |= MeshPipelineViewLayoutKey::NORMAL_PREPASS;
}
if value.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) {
result |= MeshPipelineViewLayoutKey::MOTION_VECTOR_PREPASS;
}
if value.contains(MeshPipelineKey::DEFERRED_PREPASS) {
result |= MeshPipelineViewLayoutKey::DEFERRED_PREPASS;
}
if value.contains(MeshPipelineKey::OIT_ENABLED) {
result |= MeshPipelineViewLayoutKey::OIT_ENABLED;
}
if value.contains(MeshPipelineKey::ATMOSPHERE) {
result |= MeshPipelineViewLayoutKey::ATMOSPHERE;
}
result
}
}
impl From<Msaa> for MeshPipelineViewLayoutKey {
fn from(value: Msaa) -> Self {
let mut result = MeshPipelineViewLayoutKey::empty();
if value.samples() > 1 {
result |= MeshPipelineViewLayoutKey::MULTISAMPLED;
}
result
}
}
impl From<Option<&ViewPrepassTextures>> for MeshPipelineViewLayoutKey {
fn from(value: Option<&ViewPrepassTextures>) -> Self {
let mut result = MeshPipelineViewLayoutKey::empty();
if let Some(prepass_textures) = value {
if prepass_textures.depth.is_some() {
result |= MeshPipelineViewLayoutKey::DEPTH_PREPASS;
}
if prepass_textures.normal.is_some() {
result |= MeshPipelineViewLayoutKey::NORMAL_PREPASS;
}
if prepass_textures.motion_vectors.is_some() {
result |= MeshPipelineViewLayoutKey::MOTION_VECTOR_PREPASS;
}
if prepass_textures.deferred.is_some() {
result |= MeshPipelineViewLayoutKey::DEFERRED_PREPASS;
}
}
result
}
}
pub(crate) fn buffer_layout(
buffer_binding_type: BufferBindingType,
has_dynamic_offset: bool,
min_binding_size: Option<NonZero<u64>>,
) -> BindGroupLayoutEntryBuilder {
match buffer_binding_type {
BufferBindingType::Uniform => uniform_buffer_sized(has_dynamic_offset, min_binding_size),
BufferBindingType::Storage { read_only } => {
if read_only {
storage_buffer_read_only_sized(has_dynamic_offset, min_binding_size)
} else {
storage_buffer_sized(has_dynamic_offset, min_binding_size)
}
}
}
}
/// Returns the appropriate bind group layout vec based on the parameters
fn layout_entries(
clustered_forward_buffer_binding_type: BufferBindingType,
visibility_ranges_buffer_binding_type: BufferBindingType,
layout_key: MeshPipelineViewLayoutKey,
render_device: &RenderDevice,
render_adapter: &RenderAdapter,
) -> [Vec<BindGroupLayoutEntry>; 2] {
// EnvironmentMapLight
let environment_map_entries =
environment_map::get_bind_group_layout_entries(render_device, render_adapter);
let mut entries = DynamicBindGroupLayoutEntries::new_with_indices(
ShaderStages::FRAGMENT,
(
// View
(
0,
uniform_buffer::<ViewUniform>(true).visibility(ShaderStages::VERTEX_FRAGMENT),
),
// Lights
(1, uniform_buffer::<GpuLights>(true)),
// Point Shadow Texture Cube Array
(
2,
#[cfg(all(
not(target_abi = "sim"),
any(
not(feature = "webgl"),
not(target_arch = "wasm32"),
feature = "webgpu"
)
))]
texture_cube_array(TextureSampleType::Depth),
#[cfg(any(
target_abi = "sim",
all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu"))
))]
texture_cube(TextureSampleType::Depth),
),
// Point Shadow Texture Array Comparison Sampler
(3, sampler(SamplerBindingType::Comparison)),
// Point Shadow Texture Array Linear Sampler
#[cfg(feature = "experimental_pbr_pcss")]
(4, sampler(SamplerBindingType::Filtering)),
// Directional Shadow Texture Array
(
5,
#[cfg(any(
not(feature = "webgl"),
not(target_arch = "wasm32"),
feature = "webgpu"
))]
texture_2d_array(TextureSampleType::Depth),
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
texture_2d(TextureSampleType::Depth),
),
// Directional Shadow Texture Array Comparison Sampler
(6, sampler(SamplerBindingType::Comparison)),
// Directional Shadow Texture Array Linear Sampler
#[cfg(feature = "experimental_pbr_pcss")]
(7, sampler(SamplerBindingType::Filtering)),
// PointLights
(
8,
buffer_layout(
clustered_forward_buffer_binding_type,
false,
Some(GpuClusterableObjects::min_size(
clustered_forward_buffer_binding_type,
)),
),
),
// ClusteredLightIndexLists
(
9,
buffer_layout(
clustered_forward_buffer_binding_type,
false,
Some(
ViewClusterBindings::min_size_clusterable_object_index_lists(
clustered_forward_buffer_binding_type,
),
),
),
),
// ClusterOffsetsAndCounts
(
10,
buffer_layout(
clustered_forward_buffer_binding_type,
false,
Some(ViewClusterBindings::min_size_cluster_offsets_and_counts(
clustered_forward_buffer_binding_type,
)),
),
),
// Globals
(
11,
uniform_buffer::<GlobalsUniform>(false).visibility(ShaderStages::VERTEX_FRAGMENT),
),
// Fog
(12, uniform_buffer::<GpuFog>(true)),
// Light probes
(13, uniform_buffer::<LightProbesUniform>(true)),
// Visibility ranges
(
14,
buffer_layout(
visibility_ranges_buffer_binding_type,
false,
Some(Vec4::min_size()),
)
.visibility(ShaderStages::VERTEX),
),
// Screen space reflection settings
(15, uniform_buffer::<ScreenSpaceReflectionsUniform>(true)),
// Screen space ambient occlusion texture
(
16,
texture_2d(TextureSampleType::Float { filterable: false }),
),
(17, environment_map_entries[3]),
),
);
// Tonemapping
let tonemapping_lut_entries = get_lut_bind_group_layout_entries();
entries = entries.extend_with_indices((
(18, tonemapping_lut_entries[0]),
(19, tonemapping_lut_entries[1]),
));
// Prepass
if cfg!(any(not(feature = "webgl"), not(target_arch = "wasm32")))
|| (cfg!(all(feature = "webgl", target_arch = "wasm32"))
&& !layout_key.contains(MeshPipelineViewLayoutKey::MULTISAMPLED))
{
for (entry, binding) in prepass::get_bind_group_layout_entries(layout_key)
.iter()
.zip([20, 21, 22, 23])
{
if let Some(entry) = entry {
entries = entries.extend_with_indices(((binding as u32, *entry),));
}
}
}
// View Transmission Texture
entries = entries.extend_with_indices((
(
24,
texture_2d(TextureSampleType::Float { filterable: true }),
),
(25, sampler(SamplerBindingType::Filtering)),
));
// OIT
if layout_key.contains(MeshPipelineViewLayoutKey::OIT_ENABLED) {
// Check if we can use OIT. This is a hack to avoid errors on webgl --
// the OIT plugin will warn the user that OIT is not supported on their
// platform, so we don't need to do it here.
if is_oit_supported(render_adapter, render_device, false) {
entries = entries.extend_with_indices((
// oit_layers
(26, storage_buffer_sized(false, None)),
// oit_layer_ids,
(27, storage_buffer_sized(false, None)),
// oit_layer_count
(
28,
uniform_buffer::<OrderIndependentTransparencySettings>(true),
),
));
}
}
// Atmosphere
if layout_key.contains(MeshPipelineViewLayoutKey::ATMOSPHERE) {
entries = entries.extend_with_indices((
// transmittance LUT
(
29,
texture_2d(TextureSampleType::Float { filterable: true }),
),
(30, sampler(SamplerBindingType::Filtering)),
// atmosphere data buffer
(31, storage_buffer_read_only::<AtmosphereData>(false)),
));
}
let mut binding_array_entries = DynamicBindGroupLayoutEntries::new(ShaderStages::FRAGMENT);
binding_array_entries = binding_array_entries.extend_with_indices((
(0, environment_map_entries[0]),
(1, environment_map_entries[1]),
(2, environment_map_entries[2]),
));
// Irradiance volumes
if IRRADIANCE_VOLUMES_ARE_USABLE {
let irradiance_volume_entries =
irradiance_volume::get_bind_group_layout_entries(render_device, render_adapter);
binding_array_entries = binding_array_entries.extend_with_indices((
(3, irradiance_volume_entries[0]),
(4, irradiance_volume_entries[1]),
));
}
// Clustered decals
if let Some(clustered_decal_entries) =
decal::clustered::get_bind_group_layout_entries(render_device, render_adapter)
{
binding_array_entries = binding_array_entries.extend_with_indices((
(5, clustered_decal_entries[0]),
(6, clustered_decal_entries[1]),
(7, clustered_decal_entries[2]),
));
}
[entries.to_vec(), binding_array_entries.to_vec()]
}
/// Stores the view layouts for every combination of pipeline keys.
///
/// This is wrapped in an [`Arc`] so that it can be efficiently cloned and
/// placed inside specializable pipeline types.
#[derive(Resource, Clone, Deref, DerefMut)]
pub struct MeshPipelineViewLayouts(
pub Arc<[MeshPipelineViewLayout; MeshPipelineViewLayoutKey::COUNT]>,
);
impl FromWorld for MeshPipelineViewLayouts {
fn from_world(world: &mut World) -> Self {
// Generates all possible view layouts for the mesh pipeline, based on all combinations of
// [`MeshPipelineViewLayoutKey`] flags.
let render_device = world.resource::<RenderDevice>();
let render_adapter = world.resource::<RenderAdapter>();
let clustered_forward_buffer_binding_type = render_device
.get_supported_read_only_binding_type(CLUSTERED_FORWARD_STORAGE_BUFFER_COUNT);
let visibility_ranges_buffer_binding_type = render_device
.get_supported_read_only_binding_type(VISIBILITY_RANGES_STORAGE_BUFFER_COUNT);
Self(Arc::new(array::from_fn(|i| {
let key = MeshPipelineViewLayoutKey::from_bits_truncate(i as u32);
let entries = layout_entries(
clustered_forward_buffer_binding_type,
visibility_ranges_buffer_binding_type,
key,
render_device,
render_adapter,
);
#[cfg(debug_assertions)]
let texture_count: usize = entries
.iter()
.flat_map(|e| {
e.iter()
.filter(|entry| matches!(entry.ty, BindingType::Texture { .. }))
})
.count();
MeshPipelineViewLayout {
main_layout: BindGroupLayoutDescriptor::new(key.label(), &entries[0]),
binding_array_layout: BindGroupLayoutDescriptor::new(
format!("{}_binding_array", key.label()),
&entries[1],
),
empty_layout: BindGroupLayoutDescriptor::new(format!("{}_empty", key.label()), &[]),
#[cfg(debug_assertions)]
texture_count,
}
})))
}
}
impl MeshPipelineViewLayouts {
pub fn get_view_layout(
&self,
layout_key: MeshPipelineViewLayoutKey,
) -> &MeshPipelineViewLayout {
let index = layout_key.bits() as usize;
let layout = &self[index];
#[cfg(debug_assertions)]
if layout.texture_count > MESH_PIPELINE_VIEW_LAYOUT_SAFE_MAX_TEXTURES {
// Issue our own warning here because Naga's error message is a bit cryptic in this situation
once!(warn!("Too many textures in mesh pipeline view layout, this might cause us to hit `wgpu::Limits::max_sampled_textures_per_shader_stage` in some environments."));
}
layout
}
}
/// Generates all possible view layouts for the mesh pipeline, based on all combinations of
/// [`MeshPipelineViewLayoutKey`] flags.
pub fn generate_view_layouts(
render_device: &RenderDevice,
render_adapter: &RenderAdapter,
clustered_forward_buffer_binding_type: BufferBindingType,
visibility_ranges_buffer_binding_type: BufferBindingType,
) -> [MeshPipelineViewLayout; MeshPipelineViewLayoutKey::COUNT] {
array::from_fn(|i| {
let key = MeshPipelineViewLayoutKey::from_bits_truncate(i as u32);
let entries = layout_entries(
clustered_forward_buffer_binding_type,
visibility_ranges_buffer_binding_type,
key,
render_device,
render_adapter,
);
#[cfg(debug_assertions)]
let texture_count: usize = entries
.iter()
.flat_map(|e| {
e.iter()
.filter(|entry| matches!(entry.ty, BindingType::Texture { .. }))
})
.count();
MeshPipelineViewLayout {
main_layout: BindGroupLayoutDescriptor::new(key.label(), &entries[0]),
binding_array_layout: BindGroupLayoutDescriptor::new(
format!("{}_binding_array", key.label()),
&entries[1],
),
empty_layout: BindGroupLayoutDescriptor::new(format!("{}_empty", key.label()), &[]),
#[cfg(debug_assertions)]
texture_count,
}
})
}
#[derive(Component)]
pub struct MeshViewBindGroup {
pub main: BindGroup,
pub binding_array: BindGroup,
pub empty: BindGroup,
}
pub fn prepare_mesh_view_bind_groups(
mut commands: Commands,
(render_device, pipeline_cache, render_adapter): (
Res<RenderDevice>,
Res<PipelineCache>,
Res<RenderAdapter>,
),
mesh_pipeline: Res<MeshPipeline>,
shadow_samplers: Res<ShadowSamplers>,
(light_meta, global_light_meta): (Res<LightMeta>, Res<GlobalClusterableObjectMeta>),
fog_meta: Res<FogMeta>,
(view_uniforms, environment_map_uniform): (Res<ViewUniforms>, Res<EnvironmentMapUniformBuffer>),
views: Query<(
Entity,
&ViewShadowBindings,
&ViewClusterBindings,
&Msaa,
Option<&ScreenSpaceAmbientOcclusionResources>,
Option<&ViewPrepassTextures>,
Option<&ViewTransmissionTexture>,
&Tonemapping,
Option<&RenderViewLightProbes<EnvironmentMapLight>>,
Option<&RenderViewLightProbes<IrradianceVolume>>,
Has<OrderIndependentTransparencySettings>,
Option<&AtmosphereTextures>,
Has<ExtractedAtmosphere>,
)>,
(images, mut fallback_images, fallback_image, fallback_image_zero): (
Res<RenderAssets<GpuImage>>,
FallbackImageMsaa,
Res<FallbackImage>,
Res<FallbackImageZero>,
),
globals_buffer: Res<GlobalsBuffer>,
tonemapping_luts: Res<TonemappingLuts>,
light_probes_buffer: Res<LightProbesBuffer>,
visibility_ranges: Res<RenderVisibilityRanges>,
ssr_buffer: Res<ScreenSpaceReflectionsBuffer>,
oit_buffers: Res<OitBuffers>,
(decals_buffer, render_decals, atmosphere_buffer, atmosphere_sampler): (
Res<DecalsBuffer>,
Res<RenderClusteredDecals>,
Option<Res<AtmosphereBuffer>>,
Option<Res<AtmosphereSampler>>,
),
) {
if let (
Some(view_binding),
Some(light_binding),
Some(clusterable_objects_binding),
Some(globals),
Some(fog_binding),
Some(light_probes_binding),
Some(visibility_ranges_buffer),
Some(ssr_binding),
Some(environment_map_binding),
) = (
view_uniforms.uniforms.binding(),
light_meta.view_gpu_lights.binding(),
global_light_meta.gpu_clusterable_objects.binding(),
globals_buffer.buffer.binding(),
fog_meta.gpu_fogs.binding(),
light_probes_buffer.binding(),
visibility_ranges.buffer().buffer(),
ssr_buffer.binding(),
environment_map_uniform.binding(),
) {
for (
entity,
shadow_bindings,
cluster_bindings,
msaa,
ssao_resources,
prepass_textures,
transmission_texture,
tonemapping,
render_view_environment_maps,
render_view_irradiance_volumes,
has_oit,
atmosphere_textures,
has_atmosphere,
) in &views
{
let fallback_ssao = fallback_images
.image_for_samplecount(1, TextureFormat::bevy_default())
.texture_view
.clone();
let ssao_view = ssao_resources
.map(|t| &t.screen_space_ambient_occlusion_texture.default_view)
.unwrap_or(&fallback_ssao);
let mut layout_key = MeshPipelineViewLayoutKey::from(*msaa)
| MeshPipelineViewLayoutKey::from(prepass_textures);
if has_oit {
layout_key |= MeshPipelineViewLayoutKey::OIT_ENABLED;
}
if has_atmosphere {
layout_key |= MeshPipelineViewLayoutKey::ATMOSPHERE;
}
let layout = mesh_pipeline.get_view_layout(layout_key);
let mut entries = DynamicBindGroupEntries::new_with_indices((
(0, view_binding.clone()),
(1, light_binding.clone()),
(2, &shadow_bindings.point_light_depth_texture_view),
(3, &shadow_samplers.point_light_comparison_sampler),
#[cfg(feature = "experimental_pbr_pcss")]
(4, &shadow_samplers.point_light_linear_sampler),
(5, &shadow_bindings.directional_light_depth_texture_view),
(6, &shadow_samplers.directional_light_comparison_sampler),
#[cfg(feature = "experimental_pbr_pcss")]
(7, &shadow_samplers.directional_light_linear_sampler),
(8, clusterable_objects_binding.clone()),
(
9,
cluster_bindings
.clusterable_object_index_lists_binding()
.unwrap(),
),
(10, cluster_bindings.offsets_and_counts_binding().unwrap()),
(11, globals.clone()),
(12, fog_binding.clone()),
(13, light_probes_binding.clone()),
(14, visibility_ranges_buffer.as_entire_binding()),
(15, ssr_binding.clone()),
(16, ssao_view),
));
entries = entries.extend_with_indices(((17, environment_map_binding.clone()),));
let lut_bindings =
get_lut_bindings(&images, &tonemapping_luts, tonemapping, &fallback_image);
entries = entries.extend_with_indices(((18, lut_bindings.0), (19, lut_bindings.1)));
// When using WebGL, we can't have a depth texture with multisampling
let prepass_bindings;
if cfg!(any(not(feature = "webgl"), not(target_arch = "wasm32"))) || msaa.samples() == 1
{
prepass_bindings = prepass::get_bindings(prepass_textures);
for (binding, index) in prepass_bindings
.iter()
.map(Option::as_ref)
.zip([20, 21, 22, 23])
.flat_map(|(b, i)| b.map(|b| (b, i)))
{
entries = entries.extend_with_indices(((index, binding),));
}
};
let transmission_view = transmission_texture
.map(|transmission| &transmission.view)
.unwrap_or(&fallback_image_zero.texture_view);
let transmission_sampler = transmission_texture
.map(|transmission| &transmission.sampler)
.unwrap_or(&fallback_image_zero.sampler);
entries =
entries.extend_with_indices(((24, transmission_view), (25, transmission_sampler)));
if has_oit
&& let (
Some(oit_layers_binding),
Some(oit_layer_ids_binding),
Some(oit_settings_binding),
) = (
oit_buffers.layers.binding(),
oit_buffers.layer_ids.binding(),
oit_buffers.settings.binding(),
)
{
entries = entries.extend_with_indices((
(26, oit_layers_binding.clone()),
(27, oit_layer_ids_binding.clone()),
(28, oit_settings_binding.clone()),
));
}
if has_atmosphere
&& let Some(atmosphere_textures) = atmosphere_textures
&& let Some(atmosphere_buffer) = atmosphere_buffer.as_ref()
&& let Some(atmosphere_sampler) = atmosphere_sampler.as_ref()
&& let Some(atmosphere_buffer_binding) = atmosphere_buffer.buffer.binding()
{
entries = entries.extend_with_indices((
(29, &atmosphere_textures.transmittance_lut.default_view),
(30, &***atmosphere_sampler),
(31, atmosphere_buffer_binding),
));
}
let mut entries_binding_array = DynamicBindGroupEntries::new();
let environment_map_bind_group_entries = RenderViewEnvironmentMapBindGroupEntries::get(
render_view_environment_maps,
&images,
&fallback_image,
&render_device,
&render_adapter,
);
match environment_map_bind_group_entries {
RenderViewEnvironmentMapBindGroupEntries::Single {
diffuse_texture_view,
specular_texture_view,
sampler,
} => {
entries_binding_array = entries_binding_array.extend_with_indices((
(0, diffuse_texture_view),
(1, specular_texture_view),
(2, sampler),
));
}
RenderViewEnvironmentMapBindGroupEntries::Multiple {
ref diffuse_texture_views,
ref specular_texture_views,
sampler,
} => {
entries_binding_array = entries_binding_array.extend_with_indices((
(0, diffuse_texture_views.as_slice()),
(1, specular_texture_views.as_slice()),
(2, sampler),
));
}
}
let irradiance_volume_bind_group_entries = if IRRADIANCE_VOLUMES_ARE_USABLE {
Some(RenderViewIrradianceVolumeBindGroupEntries::get(
render_view_irradiance_volumes,
&images,
&fallback_image,
&render_device,
&render_adapter,
))
} else {
None
};
match irradiance_volume_bind_group_entries {
Some(RenderViewIrradianceVolumeBindGroupEntries::Single {
texture_view,
sampler,
}) => {
entries_binding_array = entries_binding_array
.extend_with_indices(((3, texture_view), (4, sampler)));
}
Some(RenderViewIrradianceVolumeBindGroupEntries::Multiple {
ref texture_views,
sampler,
}) => {
entries_binding_array = entries_binding_array
.extend_with_indices(((3, texture_views.as_slice()), (4, sampler)));
}
None => {}
}
let decal_bind_group_entries = RenderViewClusteredDecalBindGroupEntries::get(
&render_decals,
&decals_buffer,
&images,
&fallback_image,
&render_device,
&render_adapter,
);
// Add the decal bind group entries.
if let Some(ref render_view_decal_bind_group_entries) = decal_bind_group_entries {
entries_binding_array = entries_binding_array.extend_with_indices((
// `clustered_decals`
(
5,
render_view_decal_bind_group_entries
.decals
.as_entire_binding(),
),
// `clustered_decal_textures`
(
6,
render_view_decal_bind_group_entries
.texture_views
.as_slice(),
),
// `clustered_decal_sampler`
(7, render_view_decal_bind_group_entries.sampler),
));
}
commands.entity(entity).insert(MeshViewBindGroup {
main: render_device.create_bind_group(
"mesh_view_bind_group",
&pipeline_cache.get_bind_group_layout(&layout.main_layout),
&entries,
),
binding_array: render_device.create_bind_group(
"mesh_view_bind_group_binding_array",
&pipeline_cache.get_bind_group_layout(&layout.binding_array_layout),
&entries_binding_array,
),
empty: render_device.create_bind_group(
"mesh_view_bind_group_empty",
&pipeline_cache.get_bind_group_layout(&layout.empty_layout),
&[],
),
});
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/render/morph.rs | crates/bevy_pbr/src/render/morph.rs | use core::{iter, mem};
use bevy_camera::visibility::ViewVisibility;
use bevy_ecs::prelude::*;
use bevy_mesh::morph::{MeshMorphWeights, MAX_MORPH_WEIGHTS};
use bevy_render::sync_world::MainEntityHashMap;
use bevy_render::{
batching::NoAutomaticBatching,
render_resource::{BufferUsages, RawBufferVec},
renderer::{RenderDevice, RenderQueue},
Extract,
};
use bytemuck::NoUninit;
#[derive(Component)]
pub struct MorphIndex {
pub index: u32,
}
/// Maps each mesh affected by morph targets to the applicable offset within the
/// [`MorphUniforms`] buffer.
///
/// We store both the current frame's mapping and the previous frame's mapping
/// for the purposes of motion vector calculation.
#[derive(Default, Resource)]
pub struct MorphIndices {
/// Maps each entity with a morphed mesh to the appropriate offset within
/// [`MorphUniforms::current_buffer`].
pub current: MainEntityHashMap<MorphIndex>,
/// Maps each entity with a morphed mesh to the appropriate offset within
/// [`MorphUniforms::prev_buffer`].
pub prev: MainEntityHashMap<MorphIndex>,
}
/// The GPU buffers containing morph weights for all meshes with morph targets.
///
/// This is double-buffered: we store the weights of the previous frame in
/// addition to those of the current frame. This is for motion vector
/// calculation. Every frame, we swap buffers and reuse the morph target weight
/// buffer from two frames ago for the current frame.
#[derive(Resource)]
pub struct MorphUniforms {
/// The morph weights for the current frame.
pub current_buffer: RawBufferVec<f32>,
/// The morph weights for the previous frame.
pub prev_buffer: RawBufferVec<f32>,
}
impl Default for MorphUniforms {
fn default() -> Self {
Self {
current_buffer: RawBufferVec::new(BufferUsages::UNIFORM),
prev_buffer: RawBufferVec::new(BufferUsages::UNIFORM),
}
}
}
pub fn prepare_morphs(
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
mut uniform: ResMut<MorphUniforms>,
) {
if uniform.current_buffer.is_empty() {
return;
}
let len = uniform.current_buffer.len();
uniform.current_buffer.reserve(len, &render_device);
uniform
.current_buffer
.write_buffer(&render_device, &render_queue);
// We don't need to write `uniform.prev_buffer` because we already wrote it
// last frame, and the data should still be on the GPU.
}
const fn can_align(step: usize, target: usize) -> bool {
step.is_multiple_of(target) || target.is_multiple_of(step)
}
const WGPU_MIN_ALIGN: usize = 256;
/// Align a [`RawBufferVec`] to `N` bytes by padding the end with `T::default()` values.
fn add_to_alignment<T: NoUninit + Default>(buffer: &mut RawBufferVec<T>) {
let n = WGPU_MIN_ALIGN;
let t_size = size_of::<T>();
if !can_align(n, t_size) {
// This panic is stripped at compile time, due to n, t_size and can_align being const
panic!(
"RawBufferVec should contain only types with a size multiple or divisible by {n}, \
{} has a size of {t_size}, which is neither multiple or divisible by {n}",
core::any::type_name::<T>()
);
}
let buffer_size = buffer.len();
let byte_size = t_size * buffer_size;
let bytes_over_n = byte_size % n;
if bytes_over_n == 0 {
return;
}
let bytes_to_add = n - bytes_over_n;
let ts_to_add = bytes_to_add / t_size;
buffer.extend(iter::repeat_with(T::default).take(ts_to_add));
}
// Notes on implementation: see comment on top of the extract_skins system in skin module.
// This works similarly, but for `f32` instead of `Mat4`
pub fn extract_morphs(
morph_indices: ResMut<MorphIndices>,
uniform: ResMut<MorphUniforms>,
query: Extract<Query<(Entity, &ViewVisibility, &MeshMorphWeights)>>,
) {
// Borrow check workaround.
let (morph_indices, uniform) = (morph_indices.into_inner(), uniform.into_inner());
// Swap buffers. We need to keep the previous frame's buffer around for the
// purposes of motion vector computation.
mem::swap(&mut morph_indices.current, &mut morph_indices.prev);
mem::swap(&mut uniform.current_buffer, &mut uniform.prev_buffer);
morph_indices.current.clear();
uniform.current_buffer.clear();
for (entity, view_visibility, morph_weights) in &query {
if !view_visibility.get() {
continue;
}
let start = uniform.current_buffer.len();
let weights = morph_weights.weights();
let legal_weights = weights
.iter()
.chain(iter::repeat(&0.0))
.take(MAX_MORPH_WEIGHTS)
.copied();
uniform.current_buffer.extend(legal_weights);
add_to_alignment::<f32>(&mut uniform.current_buffer);
let index = (start * size_of::<f32>()) as u32;
morph_indices
.current
.insert(entity.into(), MorphIndex { index });
}
}
// NOTE: Because morph targets require per-morph target texture bindings, they cannot
// currently be batched.
pub fn no_automatic_morph_batching(
mut commands: Commands,
query: Query<Entity, (With<MeshMorphWeights>, Without<NoAutomaticBatching>)>,
) {
for entity in &query {
commands.entity(entity).try_insert(NoAutomaticBatching);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/render/gpu_preprocess.rs | crates/bevy_pbr/src/render/gpu_preprocess.rs | //! GPU mesh preprocessing.
//!
//! This is an optional pass that uses a compute shader to reduce the amount of
//! data that has to be transferred from the CPU to the GPU. When enabled,
//! instead of transferring [`MeshUniform`]s to the GPU, we transfer the smaller
//! [`MeshInputUniform`]s instead and use the GPU to calculate the remaining
//! derived fields in [`MeshUniform`].
use core::num::{NonZero, NonZeroU64};
use bevy_app::{App, Plugin};
use bevy_asset::{embedded_asset, load_embedded_asset, Handle};
use bevy_core_pipeline::{
core_3d::graph::{Core3d, Node3d},
mip_generation::experimental::depth::ViewDepthPyramid,
prepass::{DepthPrepass, PreviousViewData, PreviousViewUniformOffset, PreviousViewUniforms},
};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
component::Component,
entity::Entity,
prelude::resource_exists,
query::{Has, Or, QueryState, With, Without},
resource::Resource,
schedule::IntoScheduleConfigs as _,
system::{lifetimeless::Read, Commands, Query, Res, ResMut},
world::{FromWorld, World},
};
use bevy_render::{
batching::gpu_preprocessing::{
BatchedInstanceBuffers, GpuOcclusionCullingWorkItemBuffers, GpuPreprocessingMode,
GpuPreprocessingSupport, IndirectBatchSet, IndirectParametersBuffers,
IndirectParametersCpuMetadata, IndirectParametersGpuMetadata, IndirectParametersIndexed,
IndirectParametersNonIndexed, LatePreprocessWorkItemIndirectParameters, PreprocessWorkItem,
PreprocessWorkItemBuffers, UntypedPhaseBatchedInstanceBuffers,
UntypedPhaseIndirectParametersBuffers,
},
diagnostic::RecordDiagnostics,
experimental::occlusion_culling::OcclusionCulling,
render_graph::{Node, NodeRunError, RenderGraphContext, RenderGraphExt},
render_resource::{
binding_types::{storage_buffer, storage_buffer_read_only, texture_2d, uniform_buffer},
BindGroup, BindGroupEntries, BindGroupLayoutDescriptor, BindingResource, Buffer,
BufferBinding, CachedComputePipelineId, ComputePassDescriptor, ComputePipelineDescriptor,
DynamicBindGroupLayoutEntries, PipelineCache, PushConstantRange, RawBufferVec,
ShaderStages, ShaderType, SpecializedComputePipeline, SpecializedComputePipelines,
TextureSampleType, UninitBufferVec,
},
renderer::{RenderContext, RenderDevice, RenderQueue},
settings::WgpuFeatures,
view::{ExtractedView, NoIndirectDrawing, ViewUniform, ViewUniformOffset, ViewUniforms},
Render, RenderApp, RenderSystems,
};
use bevy_shader::Shader;
use bevy_utils::{default, TypeIdMap};
use bitflags::bitflags;
use smallvec::{smallvec, SmallVec};
use tracing::warn;
use crate::{
graph::NodePbr, MeshCullingData, MeshCullingDataBuffer, MeshInputUniform, MeshUniform,
};
use super::{ShadowView, ViewLightEntities};
/// The GPU workgroup size.
const WORKGROUP_SIZE: usize = 64;
/// A plugin that builds mesh uniforms on GPU.
///
/// This will only be added if the platform supports compute shaders (e.g. not
/// on WebGL 2).
pub struct GpuMeshPreprocessPlugin {
/// Whether we're building [`MeshUniform`]s on GPU.
///
/// This requires compute shader support and so will be forcibly disabled if
/// the platform doesn't support those.
pub use_gpu_instance_buffer_builder: bool,
}
/// The render node that clears out the GPU-side indirect metadata buffers.
///
/// This is only used when indirect drawing is enabled.
#[derive(Default)]
pub struct ClearIndirectParametersMetadataNode;
/// The render node for the first mesh preprocessing pass.
///
/// This pass runs a compute shader to cull meshes outside the view frustum (if
/// that wasn't done by the CPU), cull meshes that weren't visible last frame
/// (if occlusion culling is on), transform them, and, if indirect drawing is
/// on, populate indirect draw parameter metadata for the subsequent
/// [`EarlyPrepassBuildIndirectParametersNode`].
pub struct EarlyGpuPreprocessNode {
view_query: QueryState<
(
Read<ExtractedView>,
Option<Read<PreprocessBindGroups>>,
Option<Read<ViewUniformOffset>>,
Has<NoIndirectDrawing>,
Has<OcclusionCulling>,
),
Without<SkipGpuPreprocess>,
>,
main_view_query: QueryState<Read<ViewLightEntities>>,
}
/// The render node for the second mesh preprocessing pass.
///
/// This pass runs a compute shader to cull meshes outside the view frustum (if
/// that wasn't done by the CPU), cull meshes that were neither visible last
/// frame nor visible this frame (if occlusion culling is on), transform them,
/// and, if indirect drawing is on, populate the indirect draw parameter
/// metadata for the subsequent [`LatePrepassBuildIndirectParametersNode`].
pub struct LateGpuPreprocessNode {
view_query: QueryState<
(
Read<ExtractedView>,
Read<PreprocessBindGroups>,
Read<ViewUniformOffset>,
),
(
Without<SkipGpuPreprocess>,
Without<NoIndirectDrawing>,
With<OcclusionCulling>,
With<DepthPrepass>,
),
>,
}
/// The render node for the part of the indirect parameter building pass that
/// draws the meshes visible from the previous frame.
///
/// This node runs a compute shader on the output of the
/// [`EarlyGpuPreprocessNode`] in order to transform the
/// [`IndirectParametersGpuMetadata`] into properly-formatted
/// [`IndirectParametersIndexed`] and [`IndirectParametersNonIndexed`].
pub struct EarlyPrepassBuildIndirectParametersNode {
view_query: QueryState<
Read<PreprocessBindGroups>,
(
Without<SkipGpuPreprocess>,
Without<NoIndirectDrawing>,
Or<(With<DepthPrepass>, With<ShadowView>)>,
),
>,
}
/// The render node for the part of the indirect parameter building pass that
/// draws the meshes that are potentially visible on this frame but weren't
/// visible on the previous frame.
///
/// This node runs a compute shader on the output of the
/// [`LateGpuPreprocessNode`] in order to transform the
/// [`IndirectParametersGpuMetadata`] into properly-formatted
/// [`IndirectParametersIndexed`] and [`IndirectParametersNonIndexed`].
pub struct LatePrepassBuildIndirectParametersNode {
view_query: QueryState<
Read<PreprocessBindGroups>,
(
Without<SkipGpuPreprocess>,
Without<NoIndirectDrawing>,
Or<(With<DepthPrepass>, With<ShadowView>)>,
With<OcclusionCulling>,
),
>,
}
/// The render node for the part of the indirect parameter building pass that
/// draws all meshes, both those that are newly-visible on this frame and those
/// that were visible last frame.
///
/// This node runs a compute shader on the output of the
/// [`EarlyGpuPreprocessNode`] and [`LateGpuPreprocessNode`] in order to
/// transform the [`IndirectParametersGpuMetadata`] into properly-formatted
/// [`IndirectParametersIndexed`] and [`IndirectParametersNonIndexed`].
pub struct MainBuildIndirectParametersNode {
view_query: QueryState<
Read<PreprocessBindGroups>,
(Without<SkipGpuPreprocess>, Without<NoIndirectDrawing>),
>,
}
/// The compute shader pipelines for the GPU mesh preprocessing and indirect
/// parameter building passes.
#[derive(Resource)]
pub struct PreprocessPipelines {
/// The pipeline used for CPU culling. This pipeline doesn't populate
/// indirect parameter metadata.
pub direct_preprocess: PreprocessPipeline,
/// The pipeline used for mesh preprocessing when GPU frustum culling is in
/// use, but occlusion culling isn't.
///
/// This pipeline populates indirect parameter metadata.
pub gpu_frustum_culling_preprocess: PreprocessPipeline,
/// The pipeline used for the first phase of occlusion culling.
///
/// This pipeline culls, transforms meshes, and populates indirect parameter
/// metadata.
pub early_gpu_occlusion_culling_preprocess: PreprocessPipeline,
/// The pipeline used for the second phase of occlusion culling.
///
/// This pipeline culls, transforms meshes, and populates indirect parameter
/// metadata.
pub late_gpu_occlusion_culling_preprocess: PreprocessPipeline,
/// The pipeline that builds indirect draw parameters for indexed meshes,
/// when frustum culling is enabled but occlusion culling *isn't* enabled.
pub gpu_frustum_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline,
/// The pipeline that builds indirect draw parameters for non-indexed
/// meshes, when frustum culling is enabled but occlusion culling *isn't*
/// enabled.
pub gpu_frustum_culling_build_non_indexed_indirect_params: BuildIndirectParametersPipeline,
/// Compute shader pipelines for the early prepass phase that draws meshes
/// visible in the previous frame.
pub early_phase: PreprocessPhasePipelines,
/// Compute shader pipelines for the late prepass phase that draws meshes
/// that weren't visible in the previous frame, but became visible this
/// frame.
pub late_phase: PreprocessPhasePipelines,
/// Compute shader pipelines for the main color phase.
pub main_phase: PreprocessPhasePipelines,
}
/// Compute shader pipelines for a specific phase: early, late, or main.
///
/// The distinction between these phases is relevant for occlusion culling.
#[derive(Clone)]
pub struct PreprocessPhasePipelines {
/// The pipeline that resets the indirect draw counts used in
/// `multi_draw_indirect_count` to 0 in preparation for a new pass.
pub reset_indirect_batch_sets: ResetIndirectBatchSetsPipeline,
/// The pipeline used for indexed indirect parameter building.
///
/// This pipeline converts indirect parameter metadata into indexed indirect
/// parameters.
pub gpu_occlusion_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline,
/// The pipeline used for non-indexed indirect parameter building.
///
/// This pipeline converts indirect parameter metadata into non-indexed
/// indirect parameters.
pub gpu_occlusion_culling_build_non_indexed_indirect_params: BuildIndirectParametersPipeline,
}
/// The pipeline for the GPU mesh preprocessing shader.
pub struct PreprocessPipeline {
/// The bind group layout for the compute shader.
pub bind_group_layout: BindGroupLayoutDescriptor,
/// The shader asset handle.
pub shader: Handle<Shader>,
/// The pipeline ID for the compute shader.
///
/// This gets filled in `prepare_preprocess_pipelines`.
pub pipeline_id: Option<CachedComputePipelineId>,
}
/// The pipeline for the batch set count reset shader.
///
/// This shader resets the indirect batch set count to 0 for each view. It runs
/// in between every phase (early, late, and main).
#[derive(Clone)]
pub struct ResetIndirectBatchSetsPipeline {
/// The bind group layout for the compute shader.
pub bind_group_layout: BindGroupLayoutDescriptor,
/// The shader asset handle.
pub shader: Handle<Shader>,
/// The pipeline ID for the compute shader.
///
/// This gets filled in `prepare_preprocess_pipelines`.
pub pipeline_id: Option<CachedComputePipelineId>,
}
/// The pipeline for the indirect parameter building shader.
#[derive(Clone)]
pub struct BuildIndirectParametersPipeline {
/// The bind group layout for the compute shader.
pub bind_group_layout: BindGroupLayoutDescriptor,
/// The shader asset handle.
pub shader: Handle<Shader>,
/// The pipeline ID for the compute shader.
///
/// This gets filled in `prepare_preprocess_pipelines`.
pub pipeline_id: Option<CachedComputePipelineId>,
}
bitflags! {
/// Specifies variants of the mesh preprocessing shader.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct PreprocessPipelineKey: u8 {
/// Whether GPU frustum culling is in use.
///
/// This `#define`'s `FRUSTUM_CULLING` in the shader.
const FRUSTUM_CULLING = 1;
/// Whether GPU two-phase occlusion culling is in use.
///
/// This `#define`'s `OCCLUSION_CULLING` in the shader.
const OCCLUSION_CULLING = 2;
/// Whether this is the early phase of GPU two-phase occlusion culling.
///
/// This `#define`'s `EARLY_PHASE` in the shader.
const EARLY_PHASE = 4;
}
/// Specifies variants of the indirect parameter building shader.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct BuildIndirectParametersPipelineKey: u8 {
/// Whether the indirect parameter building shader is processing indexed
/// meshes (those that have index buffers).
///
/// This defines `INDEXED` in the shader.
const INDEXED = 1;
/// Whether the GPU and driver supports `multi_draw_indirect_count`.
///
/// This defines `MULTI_DRAW_INDIRECT_COUNT_SUPPORTED` in the shader.
const MULTI_DRAW_INDIRECT_COUNT_SUPPORTED = 2;
/// Whether GPU two-phase occlusion culling is in use.
///
/// This `#define`'s `OCCLUSION_CULLING` in the shader.
const OCCLUSION_CULLING = 4;
/// Whether this is the early phase of GPU two-phase occlusion culling.
///
/// This `#define`'s `EARLY_PHASE` in the shader.
const EARLY_PHASE = 8;
/// Whether this is the late phase of GPU two-phase occlusion culling.
///
/// This `#define`'s `LATE_PHASE` in the shader.
const LATE_PHASE = 16;
/// Whether this is the phase that runs after the early and late phases,
/// and right before the main drawing logic, when GPU two-phase
/// occlusion culling is in use.
///
/// This `#define`'s `MAIN_PHASE` in the shader.
const MAIN_PHASE = 32;
}
}
/// The compute shader bind group for the mesh preprocessing pass for each
/// render phase.
///
/// This goes on the view. It maps the [`core::any::TypeId`] of a render phase
/// (e.g. [`bevy_core_pipeline::core_3d::Opaque3d`]) to the
/// [`PhasePreprocessBindGroups`] for that phase.
#[derive(Component, Clone, Deref, DerefMut)]
pub struct PreprocessBindGroups(pub TypeIdMap<PhasePreprocessBindGroups>);
/// The compute shader bind group for the mesh preprocessing step for a single
/// render phase on a single view.
#[derive(Clone)]
pub enum PhasePreprocessBindGroups {
/// The bind group used for the single invocation of the compute shader when
/// indirect drawing is *not* being used.
///
/// Because direct drawing doesn't require splitting the meshes into indexed
/// and non-indexed meshes, there's only one bind group in this case.
Direct(BindGroup),
/// The bind groups used for the compute shader when indirect drawing is
/// being used, but occlusion culling isn't being used.
///
/// Because indirect drawing requires splitting the meshes into indexed and
/// non-indexed meshes, there are two bind groups here.
IndirectFrustumCulling {
/// The bind group for indexed meshes.
indexed: Option<BindGroup>,
/// The bind group for non-indexed meshes.
non_indexed: Option<BindGroup>,
},
/// The bind groups used for the compute shader when indirect drawing is
/// being used, but occlusion culling isn't being used.
///
/// Because indirect drawing requires splitting the meshes into indexed and
/// non-indexed meshes, and because occlusion culling requires splitting
/// this phase into early and late versions, there are four bind groups
/// here.
IndirectOcclusionCulling {
/// The bind group for indexed meshes during the early mesh
/// preprocessing phase.
early_indexed: Option<BindGroup>,
/// The bind group for non-indexed meshes during the early mesh
/// preprocessing phase.
early_non_indexed: Option<BindGroup>,
/// The bind group for indexed meshes during the late mesh preprocessing
/// phase.
late_indexed: Option<BindGroup>,
/// The bind group for non-indexed meshes during the late mesh
/// preprocessing phase.
late_non_indexed: Option<BindGroup>,
},
}
/// The bind groups for the compute shaders that reset indirect draw counts and
/// build indirect parameters.
///
/// There's one set of bind group for each phase. Phases are keyed off their
/// [`core::any::TypeId`].
#[derive(Resource, Default, Deref, DerefMut)]
pub struct BuildIndirectParametersBindGroups(pub TypeIdMap<PhaseBuildIndirectParametersBindGroups>);
impl BuildIndirectParametersBindGroups {
/// Creates a new, empty [`BuildIndirectParametersBindGroups`] table.
pub fn new() -> BuildIndirectParametersBindGroups {
Self::default()
}
}
/// The per-phase set of bind groups for the compute shaders that reset indirect
/// draw counts and build indirect parameters.
pub struct PhaseBuildIndirectParametersBindGroups {
/// The bind group for the `reset_indirect_batch_sets.wgsl` shader, for
/// indexed meshes.
reset_indexed_indirect_batch_sets: Option<BindGroup>,
/// The bind group for the `reset_indirect_batch_sets.wgsl` shader, for
/// non-indexed meshes.
reset_non_indexed_indirect_batch_sets: Option<BindGroup>,
/// The bind group for the `build_indirect_params.wgsl` shader, for indexed
/// meshes.
build_indexed_indirect: Option<BindGroup>,
/// The bind group for the `build_indirect_params.wgsl` shader, for
/// non-indexed meshes.
build_non_indexed_indirect: Option<BindGroup>,
}
/// Stops the `GpuPreprocessNode` attempting to generate the buffer for this view
/// useful to avoid duplicating effort if the bind group is shared between views
#[derive(Component, Default)]
pub struct SkipGpuPreprocess;
impl Plugin for GpuMeshPreprocessPlugin {
fn build(&self, app: &mut App) {
embedded_asset!(app, "mesh_preprocess.wgsl");
embedded_asset!(app, "reset_indirect_batch_sets.wgsl");
embedded_asset!(app, "build_indirect_params.wgsl");
}
fn finish(&self, app: &mut App) {
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
// This plugin does nothing if GPU instance buffer building isn't in
// use.
let gpu_preprocessing_support = render_app.world().resource::<GpuPreprocessingSupport>();
if !self.use_gpu_instance_buffer_builder || !gpu_preprocessing_support.is_available() {
return;
}
render_app
.init_resource::<PreprocessPipelines>()
.init_resource::<SpecializedComputePipelines<PreprocessPipeline>>()
.init_resource::<SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>>()
.init_resource::<SpecializedComputePipelines<BuildIndirectParametersPipeline>>()
.add_systems(
Render,
(
prepare_preprocess_pipelines.in_set(RenderSystems::Prepare),
prepare_preprocess_bind_groups
.run_if(resource_exists::<BatchedInstanceBuffers<
MeshUniform,
MeshInputUniform
>>)
.in_set(RenderSystems::PrepareBindGroups),
write_mesh_culling_data_buffer.in_set(RenderSystems::PrepareResourcesFlush),
),
)
.add_render_graph_node::<ClearIndirectParametersMetadataNode>(
Core3d,
NodePbr::ClearIndirectParametersMetadata
)
.add_render_graph_node::<EarlyGpuPreprocessNode>(Core3d, NodePbr::EarlyGpuPreprocess)
.add_render_graph_node::<LateGpuPreprocessNode>(Core3d, NodePbr::LateGpuPreprocess)
.add_render_graph_node::<EarlyPrepassBuildIndirectParametersNode>(
Core3d,
NodePbr::EarlyPrepassBuildIndirectParameters,
)
.add_render_graph_node::<LatePrepassBuildIndirectParametersNode>(
Core3d,
NodePbr::LatePrepassBuildIndirectParameters,
)
.add_render_graph_node::<MainBuildIndirectParametersNode>(
Core3d,
NodePbr::MainBuildIndirectParameters,
)
.add_render_graph_edges(
Core3d,
(
NodePbr::ClearIndirectParametersMetadata,
NodePbr::EarlyGpuPreprocess,
NodePbr::EarlyPrepassBuildIndirectParameters,
Node3d::EarlyPrepass,
Node3d::EarlyDeferredPrepass,
Node3d::EarlyDownsampleDepth,
NodePbr::LateGpuPreprocess,
NodePbr::LatePrepassBuildIndirectParameters,
Node3d::LatePrepass,
Node3d::LateDeferredPrepass,
NodePbr::MainBuildIndirectParameters,
Node3d::StartMainPass,
),
).add_render_graph_edges(
Core3d,
(
NodePbr::EarlyPrepassBuildIndirectParameters,
NodePbr::EarlyShadowPass,
Node3d::EarlyDownsampleDepth,
)
).add_render_graph_edges(
Core3d,
(
NodePbr::LatePrepassBuildIndirectParameters,
NodePbr::LateShadowPass,
NodePbr::MainBuildIndirectParameters,
)
);
}
}
impl Node for ClearIndirectParametersMetadataNode {
fn run<'w>(
&self,
_: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
world: &'w World,
) -> Result<(), NodeRunError> {
let Some(indirect_parameters_buffers) = world.get_resource::<IndirectParametersBuffers>()
else {
return Ok(());
};
// Clear out each indexed and non-indexed GPU-side buffer.
for phase_indirect_parameters_buffers in indirect_parameters_buffers.values() {
if let Some(indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers
.indexed
.gpu_metadata_buffer()
{
render_context.command_encoder().clear_buffer(
indexed_gpu_metadata_buffer,
0,
Some(
phase_indirect_parameters_buffers.indexed.batch_count() as u64
* size_of::<IndirectParametersGpuMetadata>() as u64,
),
);
}
if let Some(non_indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers
.non_indexed
.gpu_metadata_buffer()
{
render_context.command_encoder().clear_buffer(
non_indexed_gpu_metadata_buffer,
0,
Some(
phase_indirect_parameters_buffers.non_indexed.batch_count() as u64
* size_of::<IndirectParametersGpuMetadata>() as u64,
),
);
}
}
Ok(())
}
}
impl FromWorld for EarlyGpuPreprocessNode {
fn from_world(world: &mut World) -> Self {
Self {
view_query: QueryState::new(world),
main_view_query: QueryState::new(world),
}
}
}
impl Node for EarlyGpuPreprocessNode {
fn update(&mut self, world: &mut World) {
self.view_query.update_archetypes(world);
self.main_view_query.update_archetypes(world);
}
fn run<'w>(
&self,
graph: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
world: &'w World,
) -> Result<(), NodeRunError> {
let diagnostics = render_context.diagnostic_recorder();
// Grab the [`BatchedInstanceBuffers`].
let batched_instance_buffers =
world.resource::<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>();
let pipeline_cache = world.resource::<PipelineCache>();
let preprocess_pipelines = world.resource::<PreprocessPipelines>();
let mut compute_pass =
render_context
.command_encoder()
.begin_compute_pass(&ComputePassDescriptor {
label: Some("early_mesh_preprocessing"),
timestamp_writes: None,
});
let pass_span = diagnostics.pass_span(&mut compute_pass, "early_mesh_preprocessing");
let mut all_views: SmallVec<[_; 8]> = SmallVec::new();
all_views.push(graph.view_entity());
if let Ok(shadow_cascade_views) =
self.main_view_query.get_manual(world, graph.view_entity())
{
all_views.extend(shadow_cascade_views.lights.iter().copied());
}
// Run the compute passes.
for view_entity in all_views {
let Ok((
view,
bind_groups,
view_uniform_offset,
no_indirect_drawing,
occlusion_culling,
)) = self.view_query.get_manual(world, view_entity)
else {
continue;
};
let Some(bind_groups) = bind_groups else {
continue;
};
let Some(view_uniform_offset) = view_uniform_offset else {
continue;
};
// Select the right pipeline, depending on whether GPU culling is in
// use.
let maybe_pipeline_id = if no_indirect_drawing {
preprocess_pipelines.direct_preprocess.pipeline_id
} else if occlusion_culling {
preprocess_pipelines
.early_gpu_occlusion_culling_preprocess
.pipeline_id
} else {
preprocess_pipelines
.gpu_frustum_culling_preprocess
.pipeline_id
};
// Fetch the pipeline.
let Some(preprocess_pipeline_id) = maybe_pipeline_id else {
warn!("The build mesh uniforms pipeline wasn't ready");
continue;
};
let Some(preprocess_pipeline) =
pipeline_cache.get_compute_pipeline(preprocess_pipeline_id)
else {
// This will happen while the pipeline is being compiled and is fine.
continue;
};
compute_pass.set_pipeline(preprocess_pipeline);
// Loop over each render phase.
for (phase_type_id, batched_phase_instance_buffers) in
&batched_instance_buffers.phase_instance_buffers
{
// Grab the work item buffers for this view.
let Some(work_item_buffers) = batched_phase_instance_buffers
.work_item_buffers
.get(&view.retained_view_entity)
else {
continue;
};
// Fetch the bind group for the render phase.
let Some(phase_bind_groups) = bind_groups.get(phase_type_id) else {
continue;
};
// Make sure the mesh preprocessing shader has access to the
// view info it needs to do culling and motion vector
// computation.
let dynamic_offsets = [view_uniform_offset.offset];
// Are we drawing directly or indirectly?
match *phase_bind_groups {
PhasePreprocessBindGroups::Direct(ref bind_group) => {
// Invoke the mesh preprocessing shader to transform
// meshes only, but not cull.
let PreprocessWorkItemBuffers::Direct(work_item_buffer) = work_item_buffers
else {
continue;
};
compute_pass.set_bind_group(0, bind_group, &dynamic_offsets);
let workgroup_count = work_item_buffer.len().div_ceil(WORKGROUP_SIZE);
if workgroup_count > 0 {
compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
}
}
PhasePreprocessBindGroups::IndirectFrustumCulling {
indexed: ref maybe_indexed_bind_group,
non_indexed: ref maybe_non_indexed_bind_group,
}
| PhasePreprocessBindGroups::IndirectOcclusionCulling {
early_indexed: ref maybe_indexed_bind_group,
early_non_indexed: ref maybe_non_indexed_bind_group,
..
} => {
// Invoke the mesh preprocessing shader to transform and
// cull the meshes.
let PreprocessWorkItemBuffers::Indirect {
indexed: indexed_buffer,
non_indexed: non_indexed_buffer,
..
} = work_item_buffers
else {
continue;
};
// Transform and cull indexed meshes if there are any.
if let Some(indexed_bind_group) = maybe_indexed_bind_group {
if let PreprocessWorkItemBuffers::Indirect {
gpu_occlusion_culling:
Some(GpuOcclusionCullingWorkItemBuffers {
late_indirect_parameters_indexed_offset,
..
}),
..
} = *work_item_buffers
{
compute_pass.set_push_constants(
0,
bytemuck::bytes_of(&late_indirect_parameters_indexed_offset),
);
}
compute_pass.set_bind_group(0, indexed_bind_group, &dynamic_offsets);
let workgroup_count = indexed_buffer.len().div_ceil(WORKGROUP_SIZE);
if workgroup_count > 0 {
compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
}
}
// Transform and cull non-indexed meshes if there are any.
if let Some(non_indexed_bind_group) = maybe_non_indexed_bind_group {
if let PreprocessWorkItemBuffers::Indirect {
gpu_occlusion_culling:
Some(GpuOcclusionCullingWorkItemBuffers {
late_indirect_parameters_non_indexed_offset,
..
}),
..
} = *work_item_buffers
{
compute_pass.set_push_constants(
0,
bytemuck::bytes_of(
&late_indirect_parameters_non_indexed_offset,
),
);
}
compute_pass.set_bind_group(
0,
non_indexed_bind_group,
&dynamic_offsets,
);
let workgroup_count = non_indexed_buffer.len().div_ceil(WORKGROUP_SIZE);
if workgroup_count > 0 {
compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
}
}
}
}
}
}
pass_span.end(&mut compute_pass);
Ok(())
}
}
impl FromWorld for EarlyPrepassBuildIndirectParametersNode {
fn from_world(world: &mut World) -> Self {
Self {
view_query: QueryState::new(world),
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/render/mesh.rs | crates/bevy_pbr/src/render/mesh.rs | use crate::{
material_bind_groups::{MaterialBindGroupIndex, MaterialBindGroupSlot},
resources::write_atmosphere_buffer,
};
use bevy_asset::{embedded_asset, load_embedded_asset, AssetId};
use bevy_camera::{
primitives::Aabb,
visibility::{NoFrustumCulling, RenderLayers, ViewVisibility, VisibilityRange},
Camera, Camera3d, Projection,
};
use bevy_core_pipeline::{
core_3d::{AlphaMask3d, Opaque3d, Transmissive3d, Transparent3d, CORE_3D_DEPTH_FORMAT},
deferred::{AlphaMask3dDeferred, Opaque3dDeferred},
oit::{prepare_oit_buffers, OrderIndependentTransparencySettingsOffset},
prepass::MotionVectorPrepass,
};
use bevy_derive::{Deref, DerefMut};
use bevy_diagnostic::FrameCount;
use bevy_ecs::{
entity::EntityHashSet,
prelude::*,
query::{QueryData, ROQueryItem},
relationship::RelationshipSourceCollection,
system::{lifetimeless::*, SystemParamItem, SystemState},
};
use bevy_image::{BevyDefault, ImageSampler, TextureFormatPixelInfo};
use bevy_light::{
EnvironmentMapLight, IrradianceVolume, NotShadowCaster, NotShadowReceiver,
ShadowFilteringMethod, TransmittedShadowReceiver,
};
use bevy_math::{Affine3, Rect, UVec2, Vec3, Vec4};
use bevy_mesh::{
skinning::SkinnedMesh, BaseMeshPipelineKey, Mesh, Mesh3d, MeshTag, MeshVertexBufferLayoutRef,
VertexAttributeDescriptor,
};
use bevy_platform::collections::{hash_map::Entry, HashMap};
use bevy_render::{
batching::{
gpu_preprocessing::{
self, GpuPreprocessingSupport, IndirectBatchSet, IndirectParametersBuffers,
IndirectParametersCpuMetadata, IndirectParametersIndexed, IndirectParametersNonIndexed,
InstanceInputUniformBuffer, UntypedPhaseIndirectParametersBuffers,
},
no_gpu_preprocessing, GetBatchData, GetFullBatchData, NoAutomaticBatching,
},
mesh::{allocator::MeshAllocator, RenderMesh, RenderMeshBufferInfo},
render_asset::RenderAssets,
render_phase::{
BinnedRenderPhasePlugin, InputUniformIndex, PhaseItem, PhaseItemExtraIndex, RenderCommand,
RenderCommandResult, SortedRenderPhasePlugin, TrackedRenderPass,
},
render_resource::*,
renderer::{RenderAdapter, RenderDevice, RenderQueue},
sync_world::MainEntityHashSet,
texture::{DefaultImageSampler, GpuImage},
view::{
self, NoIndirectDrawing, RenderVisibilityRanges, RetainedViewEntity, ViewTarget,
ViewUniformOffset,
},
Extract,
};
use bevy_shader::{load_shader_library, Shader, ShaderDefVal, ShaderSettings};
use bevy_transform::components::GlobalTransform;
use bevy_utils::{default, Parallel, TypeIdMap};
use core::any::TypeId;
use core::mem::size_of;
use material_bind_groups::MaterialBindingId;
use tracing::{error, warn};
use self::irradiance_volume::IRRADIANCE_VOLUMES_ARE_USABLE;
use crate::{
render::{
morph::{
extract_morphs, no_automatic_morph_batching, prepare_morphs, MorphIndices,
MorphUniforms,
},
skin::no_automatic_skin_batching,
},
*,
};
use bevy_core_pipeline::oit::OrderIndependentTransparencySettings;
use bevy_core_pipeline::prepass::{DeferredPrepass, DepthPrepass, NormalPrepass};
use bevy_core_pipeline::tonemapping::{DebandDither, Tonemapping};
use bevy_ecs::change_detection::Tick;
use bevy_ecs::system::SystemChangeTick;
use bevy_render::camera::TemporalJitter;
use bevy_render::prelude::Msaa;
use bevy_render::sync_world::{MainEntity, MainEntityHashMap};
use bevy_render::view::ExtractedView;
use bevy_render::RenderSystems::PrepareAssets;
use bytemuck::{Pod, Zeroable};
use nonmax::{NonMaxU16, NonMaxU32};
use smallvec::{smallvec, SmallVec};
use static_assertions::const_assert_eq;
/// Provides support for rendering 3D meshes.
pub struct MeshRenderPlugin {
/// Whether we're building [`MeshUniform`]s on GPU.
///
/// This requires compute shader support and so will be forcibly disabled if
/// the platform doesn't support those.
pub use_gpu_instance_buffer_builder: bool,
/// Debugging flags that can optionally be set when constructing the renderer.
pub debug_flags: RenderDebugFlags,
}
impl MeshRenderPlugin {
/// Creates a new [`MeshRenderPlugin`] with the given debug flags.
pub fn new(debug_flags: RenderDebugFlags) -> MeshRenderPlugin {
MeshRenderPlugin {
use_gpu_instance_buffer_builder: false,
debug_flags,
}
}
}
/// How many textures are allowed in the view bind group layout (`@group(0)`) before
/// broader compatibility with WebGL and WebGPU is at risk, due to the minimum guaranteed
/// values for `MAX_TEXTURE_IMAGE_UNITS` (in WebGL) and `maxSampledTexturesPerShaderStage` (in WebGPU),
/// currently both at 16.
///
/// We use 10 here because it still leaves us, in a worst case scenario, with 6 textures for the other bind groups.
///
/// See: <https://gpuweb.github.io/gpuweb/#limits>
#[cfg(debug_assertions)]
pub const MESH_PIPELINE_VIEW_LAYOUT_SAFE_MAX_TEXTURES: usize = 10;
impl Plugin for MeshRenderPlugin {
fn build(&self, app: &mut App) {
load_shader_library!(app, "forward_io.wgsl");
load_shader_library!(app, "mesh_view_types.wgsl", |settings| *settings =
ShaderSettings {
shader_defs: vec![
ShaderDefVal::UInt(
"MAX_DIRECTIONAL_LIGHTS".into(),
MAX_DIRECTIONAL_LIGHTS as u32
),
ShaderDefVal::UInt(
"MAX_CASCADES_PER_LIGHT".into(),
MAX_CASCADES_PER_LIGHT as u32,
)
]
});
load_shader_library!(app, "mesh_view_bindings.wgsl");
load_shader_library!(app, "mesh_types.wgsl");
load_shader_library!(app, "mesh_functions.wgsl");
load_shader_library!(app, "skinning.wgsl");
load_shader_library!(app, "morph.wgsl");
load_shader_library!(app, "occlusion_culling.wgsl");
embedded_asset!(app, "mesh.wgsl");
if app.get_sub_app(RenderApp).is_none() {
return;
}
app.add_systems(
PostUpdate,
(no_automatic_skin_batching, no_automatic_morph_batching),
)
.add_plugins((
BinnedRenderPhasePlugin::<Opaque3d, MeshPipeline>::new(self.debug_flags),
BinnedRenderPhasePlugin::<AlphaMask3d, MeshPipeline>::new(self.debug_flags),
BinnedRenderPhasePlugin::<Shadow, MeshPipeline>::new(self.debug_flags),
BinnedRenderPhasePlugin::<Opaque3dDeferred, MeshPipeline>::new(self.debug_flags),
BinnedRenderPhasePlugin::<AlphaMask3dDeferred, MeshPipeline>::new(self.debug_flags),
SortedRenderPhasePlugin::<Transmissive3d, MeshPipeline>::new(self.debug_flags),
SortedRenderPhasePlugin::<Transparent3d, MeshPipeline>::new(self.debug_flags),
));
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app
.init_resource::<MorphUniforms>()
.init_resource::<MorphIndices>()
.init_resource::<MeshCullingDataBuffer>()
.init_resource::<RenderMaterialInstances>()
.configure_sets(
ExtractSchedule,
MeshExtractionSystems
.after(view::extract_visibility_ranges)
.after(late_sweep_material_instances),
)
.add_systems(
ExtractSchedule,
(
extract_skins,
extract_morphs,
gpu_preprocessing::clear_batched_gpu_instance_buffers::<MeshPipeline>
.before(MeshExtractionSystems),
),
)
.add_systems(
Render,
(
set_mesh_motion_vector_flags.in_set(RenderSystems::PrepareMeshes),
prepare_skins.in_set(RenderSystems::PrepareResources),
prepare_morphs.in_set(RenderSystems::PrepareResources),
prepare_mesh_bind_groups.in_set(RenderSystems::PrepareBindGroups),
prepare_mesh_view_bind_groups
.in_set(RenderSystems::PrepareBindGroups)
.after(prepare_oit_buffers)
.after(write_atmosphere_buffer),
no_gpu_preprocessing::clear_batched_cpu_instance_buffers::<MeshPipeline>
.in_set(RenderSystems::Cleanup)
.after(RenderSystems::Render),
),
);
}
}
fn finish(&self, app: &mut App) {
let mut mesh_bindings_shader_defs = Vec::with_capacity(1);
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app
.init_resource::<ViewKeyCache>()
.init_resource::<ViewSpecializationTicks>()
.init_resource::<GpuPreprocessingSupport>()
.init_resource::<SkinUniforms>()
.add_systems(
Render,
check_views_need_specialization.in_set(PrepareAssets),
);
let gpu_preprocessing_support =
render_app.world().resource::<GpuPreprocessingSupport>();
let use_gpu_instance_buffer_builder =
self.use_gpu_instance_buffer_builder && gpu_preprocessing_support.is_available();
let render_mesh_instances = RenderMeshInstances::new(use_gpu_instance_buffer_builder);
render_app.insert_resource(render_mesh_instances);
if use_gpu_instance_buffer_builder {
render_app
.init_resource::<gpu_preprocessing::BatchedInstanceBuffers<
MeshUniform,
MeshInputUniform
>>()
.init_resource::<RenderMeshInstanceGpuQueues>()
.init_resource::<MeshesToReextractNextFrame>()
.add_systems(
ExtractSchedule,
extract_meshes_for_gpu_building.in_set(MeshExtractionSystems),
)
.add_systems(
Render,
(
gpu_preprocessing::write_batched_instance_buffers::<MeshPipeline>
.in_set(RenderSystems::PrepareResourcesFlush),
gpu_preprocessing::delete_old_work_item_buffers::<MeshPipeline>
.in_set(RenderSystems::PrepareResources),
collect_meshes_for_gpu_building
.in_set(RenderSystems::PrepareMeshes)
// This must be before
// `set_mesh_motion_vector_flags` so it doesn't
// overwrite those flags.
.before(set_mesh_motion_vector_flags),
),
);
} else {
let render_device = render_app.world().resource::<RenderDevice>();
let cpu_batched_instance_buffer = no_gpu_preprocessing::BatchedInstanceBuffer::<
MeshUniform,
>::new(&render_device.limits());
render_app
.insert_resource(cpu_batched_instance_buffer)
.add_systems(
ExtractSchedule,
extract_meshes_for_cpu_building.in_set(MeshExtractionSystems),
)
.add_systems(
Render,
no_gpu_preprocessing::write_batched_instance_buffer::<MeshPipeline>
.in_set(RenderSystems::PrepareResourcesFlush),
);
};
let render_device = render_app.world().resource::<RenderDevice>();
if let Some(per_object_buffer_batch_size) =
GpuArrayBuffer::<MeshUniform>::batch_size(&render_device.limits())
{
mesh_bindings_shader_defs.push(ShaderDefVal::UInt(
"PER_OBJECT_BUFFER_BATCH_SIZE".into(),
per_object_buffer_batch_size,
));
}
render_app
.init_resource::<MeshPipelineViewLayouts>()
.init_resource::<MeshPipeline>();
}
// Load the mesh_bindings shader module here as it depends on runtime information about
// whether storage buffers are supported, or the maximum uniform buffer binding size.
load_shader_library!(app, "mesh_bindings.wgsl", move |settings| *settings =
ShaderSettings {
shader_defs: mesh_bindings_shader_defs.clone(),
});
}
}
#[derive(Resource, Deref, DerefMut, Default, Debug, Clone)]
pub struct ViewKeyCache(HashMap<RetainedViewEntity, MeshPipelineKey>);
#[derive(Resource, Deref, DerefMut, Default, Debug, Clone)]
pub struct ViewSpecializationTicks(HashMap<RetainedViewEntity, Tick>);
pub fn check_views_need_specialization(
mut view_key_cache: ResMut<ViewKeyCache>,
mut view_specialization_ticks: ResMut<ViewSpecializationTicks>,
mut views: Query<(
&ExtractedView,
&Msaa,
Option<&Tonemapping>,
Option<&DebandDither>,
Option<&ShadowFilteringMethod>,
Has<ScreenSpaceAmbientOcclusion>,
(
Has<NormalPrepass>,
Has<DepthPrepass>,
Has<MotionVectorPrepass>,
Has<DeferredPrepass>,
),
Option<&Camera3d>,
Has<TemporalJitter>,
Option<&Projection>,
Has<DistanceFog>,
(
Has<RenderViewLightProbes<EnvironmentMapLight>>,
Has<RenderViewLightProbes<IrradianceVolume>>,
),
Has<OrderIndependentTransparencySettings>,
Has<ExtractedAtmosphere>,
)>,
ticks: SystemChangeTick,
) {
for (
view,
msaa,
tonemapping,
dither,
shadow_filter_method,
ssao,
(normal_prepass, depth_prepass, motion_vector_prepass, deferred_prepass),
camera_3d,
temporal_jitter,
projection,
distance_fog,
(has_environment_maps, has_irradiance_volumes),
has_oit,
has_atmosphere,
) in views.iter_mut()
{
let mut view_key = MeshPipelineKey::from_msaa_samples(msaa.samples())
| MeshPipelineKey::from_hdr(view.hdr);
if normal_prepass {
view_key |= MeshPipelineKey::NORMAL_PREPASS;
}
if depth_prepass {
view_key |= MeshPipelineKey::DEPTH_PREPASS;
}
if motion_vector_prepass {
view_key |= MeshPipelineKey::MOTION_VECTOR_PREPASS;
}
if deferred_prepass {
view_key |= MeshPipelineKey::DEFERRED_PREPASS;
}
if temporal_jitter {
view_key |= MeshPipelineKey::TEMPORAL_JITTER;
}
if has_environment_maps {
view_key |= MeshPipelineKey::ENVIRONMENT_MAP;
}
if has_irradiance_volumes {
view_key |= MeshPipelineKey::IRRADIANCE_VOLUME;
}
if has_oit {
view_key |= MeshPipelineKey::OIT_ENABLED;
}
if has_atmosphere {
view_key |= MeshPipelineKey::ATMOSPHERE;
}
if view.invert_culling {
view_key |= MeshPipelineKey::INVERT_CULLING;
}
if let Some(projection) = projection {
view_key |= match projection {
Projection::Perspective(_) => MeshPipelineKey::VIEW_PROJECTION_PERSPECTIVE,
Projection::Orthographic(_) => MeshPipelineKey::VIEW_PROJECTION_ORTHOGRAPHIC,
Projection::Custom(_) => MeshPipelineKey::VIEW_PROJECTION_NONSTANDARD,
};
}
match shadow_filter_method.unwrap_or(&ShadowFilteringMethod::default()) {
ShadowFilteringMethod::Hardware2x2 => {
view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_HARDWARE_2X2;
}
ShadowFilteringMethod::Gaussian => {
view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_GAUSSIAN;
}
ShadowFilteringMethod::Temporal => {
view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_TEMPORAL;
}
}
if !view.hdr {
if let Some(tonemapping) = tonemapping {
view_key |= MeshPipelineKey::TONEMAP_IN_SHADER;
view_key |= tonemapping_pipeline_key(*tonemapping);
}
if let Some(DebandDither::Enabled) = dither {
view_key |= MeshPipelineKey::DEBAND_DITHER;
}
}
if ssao {
view_key |= MeshPipelineKey::SCREEN_SPACE_AMBIENT_OCCLUSION;
}
if distance_fog {
view_key |= MeshPipelineKey::DISTANCE_FOG;
}
if let Some(camera_3d) = camera_3d {
view_key |= screen_space_specular_transmission_pipeline_key(
camera_3d.screen_space_specular_transmission_quality,
);
}
if !view_key_cache
.get_mut(&view.retained_view_entity)
.is_some_and(|current_key| *current_key == view_key)
{
view_key_cache.insert(view.retained_view_entity, view_key);
view_specialization_ticks.insert(view.retained_view_entity, ticks.this_run());
}
}
}
#[derive(Component)]
pub struct MeshTransforms {
pub world_from_local: Affine3,
pub previous_world_from_local: Affine3,
pub flags: u32,
}
#[derive(ShaderType, Clone)]
pub struct MeshUniform {
// Affine 4x3 matrices transposed to 3x4
pub world_from_local: [Vec4; 3],
pub previous_world_from_local: [Vec4; 3],
// 3x3 matrix packed in mat2x4 and f32 as:
// [0].xyz, [1].x,
// [1].yz, [2].xy
// [2].z
pub local_from_world_transpose_a: [Vec4; 2],
pub local_from_world_transpose_b: f32,
pub flags: u32,
// Four 16-bit unsigned normalized UV values packed into a `UVec2`:
//
// <--- MSB LSB --->
// +---- min v ----+ +---- min u ----+
// lightmap_uv_rect.x: vvvvvvvv vvvvvvvv uuuuuuuu uuuuuuuu,
// +---- max v ----+ +---- max u ----+
// lightmap_uv_rect.y: VVVVVVVV VVVVVVVV UUUUUUUU UUUUUUUU,
//
// (MSB: most significant bit; LSB: least significant bit.)
pub lightmap_uv_rect: UVec2,
/// The index of this mesh's first vertex in the vertex buffer.
///
/// Multiple meshes can be packed into a single vertex buffer (see
/// [`MeshAllocator`]). This value stores the offset of the first vertex in
/// this mesh in that buffer.
pub first_vertex_index: u32,
/// The current skin index, or `u32::MAX` if there's no skin.
pub current_skin_index: u32,
/// The material and lightmap indices, packed into 32 bits.
///
/// Low 16 bits: index of the material inside the bind group data.
/// High 16 bits: index of the lightmap in the binding array.
pub material_and_lightmap_bind_group_slot: u32,
/// User supplied tag to identify this mesh instance.
pub tag: u32,
/// Padding.
pub pad: u32,
}
/// Information that has to be transferred from CPU to GPU in order to produce
/// the full [`MeshUniform`].
///
/// This is essentially a subset of the fields in [`MeshUniform`] above.
#[derive(ShaderType, Pod, Zeroable, Clone, Copy, Default, Debug)]
#[repr(C)]
pub struct MeshInputUniform {
/// Affine 4x3 matrix transposed to 3x4.
pub world_from_local: [Vec4; 3],
/// Four 16-bit unsigned normalized UV values packed into a `UVec2`:
///
/// ```text
/// <--- MSB LSB --->
/// +---- min v ----+ +---- min u ----+
/// lightmap_uv_rect.x: vvvvvvvv vvvvvvvv uuuuuuuu uuuuuuuu,
/// +---- max v ----+ +---- max u ----+
/// lightmap_uv_rect.y: VVVVVVVV VVVVVVVV UUUUUUUU UUUUUUUU,
///
/// (MSB: most significant bit; LSB: least significant bit.)
/// ```
pub lightmap_uv_rect: UVec2,
/// Various [`MeshFlags`].
pub flags: u32,
/// The index of this mesh's [`MeshInputUniform`] in the previous frame's
/// buffer, if applicable.
///
/// This is used for TAA. If not present, this will be `u32::MAX`.
pub previous_input_index: u32,
/// The index of this mesh's first vertex in the vertex buffer.
///
/// Multiple meshes can be packed into a single vertex buffer (see
/// [`MeshAllocator`]). This value stores the offset of the first vertex in
/// this mesh in that buffer.
pub first_vertex_index: u32,
/// The index of this mesh's first index in the index buffer, if any.
///
/// Multiple meshes can be packed into a single index buffer (see
/// [`MeshAllocator`]). This value stores the offset of the first index in
/// this mesh in that buffer.
///
/// If this mesh isn't indexed, this value is ignored.
pub first_index_index: u32,
/// For an indexed mesh, the number of indices that make it up; for a
/// non-indexed mesh, the number of vertices in it.
pub index_count: u32,
/// The current skin index, or `u32::MAX` if there's no skin.
pub current_skin_index: u32,
/// The material and lightmap indices, packed into 32 bits.
///
/// Low 16 bits: index of the material inside the bind group data.
/// High 16 bits: index of the lightmap in the binding array.
pub material_and_lightmap_bind_group_slot: u32,
/// The number of the frame on which this [`MeshInputUniform`] was built.
///
/// This is used to validate the previous transform and skin. If this
/// [`MeshInputUniform`] wasn't updated on this frame, then we know that
/// neither this mesh's transform nor that of its joints have been updated
/// on this frame, and therefore the transforms of both this mesh and its
/// joints must be identical to those for the previous frame.
pub timestamp: u32,
/// User supplied tag to identify this mesh instance.
pub tag: u32,
/// Padding.
pub pad: u32,
}
/// Information about each mesh instance needed to cull it on GPU.
///
/// This consists of its axis-aligned bounding box (AABB).
#[derive(ShaderType, Pod, Zeroable, Clone, Copy, Default)]
#[repr(C)]
pub struct MeshCullingData {
/// The 3D center of the AABB in model space, padded with an extra unused
/// float value.
pub aabb_center: Vec4,
/// The 3D extents of the AABB in model space, divided by two, padded with
/// an extra unused float value.
pub aabb_half_extents: Vec4,
}
/// A GPU buffer that holds the information needed to cull meshes on GPU.
///
/// At the moment, this simply holds each mesh's AABB.
///
/// To avoid wasting CPU time in the CPU culling case, this buffer will be empty
/// if GPU culling isn't in use.
#[derive(Resource, Deref, DerefMut)]
pub struct MeshCullingDataBuffer(RawBufferVec<MeshCullingData>);
impl MeshUniform {
pub fn new(
mesh_transforms: &MeshTransforms,
first_vertex_index: u32,
material_bind_group_slot: MaterialBindGroupSlot,
maybe_lightmap: Option<(LightmapSlotIndex, Rect)>,
current_skin_index: Option<u32>,
tag: Option<u32>,
) -> Self {
let (local_from_world_transpose_a, local_from_world_transpose_b) =
mesh_transforms.world_from_local.inverse_transpose_3x3();
let lightmap_bind_group_slot = match maybe_lightmap {
None => u16::MAX,
Some((slot_index, _)) => slot_index.into(),
};
Self {
world_from_local: mesh_transforms.world_from_local.to_transpose(),
previous_world_from_local: mesh_transforms.previous_world_from_local.to_transpose(),
lightmap_uv_rect: pack_lightmap_uv_rect(maybe_lightmap.map(|(_, uv_rect)| uv_rect)),
local_from_world_transpose_a,
local_from_world_transpose_b,
flags: mesh_transforms.flags,
first_vertex_index,
current_skin_index: current_skin_index.unwrap_or(u32::MAX),
material_and_lightmap_bind_group_slot: u32::from(material_bind_group_slot)
| ((lightmap_bind_group_slot as u32) << 16),
tag: tag.unwrap_or(0),
pad: 0,
}
}
}
// NOTE: These must match the bit flags in bevy_pbr/src/render/mesh_types.wgsl!
bitflags::bitflags! {
/// Various flags and tightly-packed values on a mesh.
///
/// Flags grow from the top bit down; other values grow from the bottom bit
/// up.
#[repr(transparent)]
pub struct MeshFlags: u32 {
/// Bitmask for the 16-bit index into the LOD array.
///
/// This will be `u16::MAX` if this mesh has no LOD.
const LOD_INDEX_MASK = (1 << 16) - 1;
/// Disables frustum culling for this mesh.
///
/// This corresponds to the
/// [`bevy_render::view::visibility::NoFrustumCulling`] component.
const NO_FRUSTUM_CULLING = 1 << 28;
const SHADOW_RECEIVER = 1 << 29;
const TRANSMITTED_SHADOW_RECEIVER = 1 << 30;
// Indicates the sign of the determinant of the 3x3 model matrix. If the sign is positive,
// then the flag should be set, else it should not be set.
const SIGN_DETERMINANT_MODEL_3X3 = 1 << 31;
const NONE = 0;
const UNINITIALIZED = 0xFFFFFFFF;
}
}
impl MeshFlags {
fn from_components(
transform: &GlobalTransform,
lod_index: Option<NonMaxU16>,
no_frustum_culling: bool,
not_shadow_receiver: bool,
transmitted_receiver: bool,
) -> MeshFlags {
let mut mesh_flags = if not_shadow_receiver {
MeshFlags::empty()
} else {
MeshFlags::SHADOW_RECEIVER
};
if no_frustum_culling {
mesh_flags |= MeshFlags::NO_FRUSTUM_CULLING;
}
if transmitted_receiver {
mesh_flags |= MeshFlags::TRANSMITTED_SHADOW_RECEIVER;
}
if transform.affine().matrix3.determinant().is_sign_positive() {
mesh_flags |= MeshFlags::SIGN_DETERMINANT_MODEL_3X3;
}
let lod_index_bits = match lod_index {
None => u16::MAX,
Some(lod_index) => u16::from(lod_index),
};
mesh_flags |=
MeshFlags::from_bits_retain((lod_index_bits as u32) << MeshFlags::LOD_INDEX_SHIFT);
mesh_flags
}
/// The first bit of the LOD index.
pub const LOD_INDEX_SHIFT: u32 = 0;
}
bitflags::bitflags! {
/// Various useful flags for [`RenderMeshInstance`]s.
#[derive(Clone, Copy)]
pub struct RenderMeshInstanceFlags: u8 {
/// The mesh casts shadows.
const SHADOW_CASTER = 1 << 0;
/// The mesh can participate in automatic batching.
const AUTOMATIC_BATCHING = 1 << 1;
/// The mesh had a transform last frame and so is eligible for motion
/// vector computation.
const HAS_PREVIOUS_TRANSFORM = 1 << 2;
/// The mesh had a skin last frame and so that skin should be taken into
/// account for motion vector computation.
const HAS_PREVIOUS_SKIN = 1 << 3;
/// The mesh had morph targets last frame and so they should be taken
/// into account for motion vector computation.
const HAS_PREVIOUS_MORPH = 1 << 4;
}
}
/// CPU data that the render world keeps for each entity, when *not* using GPU
/// mesh uniform building.
#[derive(Deref, DerefMut)]
pub struct RenderMeshInstanceCpu {
/// Data shared between both the CPU mesh uniform building and the GPU mesh
/// uniform building paths.
#[deref]
pub shared: RenderMeshInstanceShared,
/// The transform of the mesh.
///
/// This will be written into the [`MeshUniform`] at the appropriate time.
pub transforms: MeshTransforms,
}
/// CPU data that the render world needs to keep for each entity that contains a
/// mesh when using GPU mesh uniform building.
#[derive(Deref, DerefMut)]
pub struct RenderMeshInstanceGpu {
/// Data shared between both the CPU mesh uniform building and the GPU mesh
/// uniform building paths.
#[deref]
pub shared: RenderMeshInstanceShared,
/// The representative position of the mesh instance in world-space.
///
/// This world-space center is used as a spatial proxy for view-dependent
/// operations such as distance computation and render-order sorting.
pub center: Vec3,
/// The index of the [`MeshInputUniform`] in the buffer.
pub current_uniform_index: NonMaxU32,
}
/// CPU data that the render world needs to keep about each entity that contains
/// a mesh.
pub struct RenderMeshInstanceShared {
/// The [`AssetId`] of the mesh.
pub mesh_asset_id: AssetId<Mesh>,
/// A slot for the material bind group index.
pub material_bindings_index: MaterialBindingId,
/// Various flags.
pub flags: RenderMeshInstanceFlags,
/// Index of the slab that the lightmap resides in, if a lightmap is
/// present.
pub lightmap_slab_index: Option<LightmapSlabIndex>,
/// User supplied tag to identify this mesh instance.
pub tag: u32,
/// Render layers that this mesh instance belongs to.
pub render_layers: Option<RenderLayers>,
/// A representative position of the mesh instance in local space,
/// derived from its axis-aligned bounding box.
///
/// This value is typically used as a spatial proxy for operations such as
/// view-dependent sorting (e.g., transparent object ordering).
pub center: Vec3,
}
/// Information that is gathered during the parallel portion of mesh extraction
/// when GPU mesh uniform building is enabled.
///
/// From this, the [`MeshInputUniform`] and [`RenderMeshInstanceGpu`] are
/// prepared.
pub struct RenderMeshInstanceGpuBuilder {
/// Data that will be placed on the [`RenderMeshInstanceGpu`].
pub shared: RenderMeshInstanceShared,
/// The current transform.
pub world_from_local: Affine3,
/// Four 16-bit unsigned normalized UV values packed into a [`UVec2`]:
///
/// ```text
/// <--- MSB LSB --->
/// +---- min v ----+ +---- min u ----+
/// lightmap_uv_rect.x: vvvvvvvv vvvvvvvv uuuuuuuu uuuuuuuu,
/// +---- max v ----+ +---- max u ----+
/// lightmap_uv_rect.y: VVVVVVVV VVVVVVVV UUUUUUUU UUUUUUUU,
///
/// (MSB: most significant bit; LSB: least significant bit.)
/// ```
pub lightmap_uv_rect: UVec2,
/// The index of the previous mesh input.
pub previous_input_index: Option<NonMaxU32>,
/// Various flags.
pub mesh_flags: MeshFlags,
}
/// The per-thread queues used during [`extract_meshes_for_gpu_building`].
///
/// There are two varieties of these: one for when culling happens on CPU and
/// one for when culling happens on GPU. Having the two varieties avoids wasting
/// space if GPU culling is disabled.
#[derive(Default)]
pub enum RenderMeshInstanceGpuQueue {
/// The default value.
///
/// This becomes [`RenderMeshInstanceGpuQueue::CpuCulling`] or
/// [`RenderMeshInstanceGpuQueue::GpuCulling`] once extraction starts.
#[default]
None,
/// The version of [`RenderMeshInstanceGpuQueue`] that omits the
/// [`MeshCullingData`], so that we don't waste space when GPU
/// culling is disabled.
CpuCulling {
/// Stores GPU data for each entity that became visible or changed in
/// such a way that necessitates updating the [`MeshInputUniform`] (e.g.
/// changed transform).
changed: Vec<(MainEntity, RenderMeshInstanceGpuBuilder)>,
/// Stores the IDs of entities that became invisible this frame.
removed: Vec<MainEntity>,
},
/// The version of [`RenderMeshInstanceGpuQueue`] that contains the
/// [`MeshCullingData`], used when any view has GPU culling
/// enabled.
GpuCulling {
/// Stores GPU data for each entity that became visible or changed in
/// such a way that necessitates updating the [`MeshInputUniform`] (e.g.
/// changed transform).
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/render/mod.rs | crates/bevy_pbr/src/render/mod.rs | mod fog;
mod gpu_preprocess;
mod light;
pub(crate) mod mesh;
mod mesh_bindings;
mod mesh_view_bindings;
mod morph;
pub(crate) mod skin;
pub use fog::*;
pub use gpu_preprocess::*;
pub use light::*;
pub use mesh::*;
pub use mesh_bindings::MeshLayouts;
pub use mesh_view_bindings::*;
pub use morph::*;
pub use skin::{extract_skins, prepare_skins, skins_use_uniform_buffers, SkinUniforms, MAX_JOINTS};
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/render/mesh_bindings.rs | crates/bevy_pbr/src/render/mesh_bindings.rs | //! Bind group layout related definitions for the mesh pipeline.
use bevy_math::Mat4;
use bevy_mesh::morph::MAX_MORPH_WEIGHTS;
use bevy_render::{
render_resource::*,
renderer::{RenderAdapter, RenderDevice},
};
use crate::{binding_arrays_are_usable, render::skin::MAX_JOINTS, LightmapSlab};
const MORPH_WEIGHT_SIZE: usize = size_of::<f32>();
/// This is used to allocate buffers.
/// The correctness of the value depends on the GPU/platform.
/// The current value is chosen because it is guaranteed to work everywhere.
/// To allow for bigger values, a check must be made for the limits
/// of the GPU at runtime, which would mean not using consts anymore.
pub const MORPH_BUFFER_SIZE: usize = MAX_MORPH_WEIGHTS * MORPH_WEIGHT_SIZE;
const JOINT_SIZE: usize = size_of::<Mat4>();
pub(crate) const JOINT_BUFFER_SIZE: usize = MAX_JOINTS * JOINT_SIZE;
/// Individual layout entries.
mod layout_entry {
use core::num::NonZeroU32;
use super::{JOINT_BUFFER_SIZE, MORPH_BUFFER_SIZE};
use crate::{render::skin, MeshUniform, LIGHTMAPS_PER_SLAB};
use bevy_render::{
render_resource::{
binding_types::{
sampler, storage_buffer_read_only_sized, texture_2d, texture_3d,
uniform_buffer_sized,
},
BindGroupLayoutEntryBuilder, BufferSize, GpuArrayBuffer, SamplerBindingType,
ShaderStages, TextureSampleType,
},
settings::WgpuLimits,
};
pub(super) fn model(limits: &WgpuLimits) -> BindGroupLayoutEntryBuilder {
GpuArrayBuffer::<MeshUniform>::binding_layout(limits)
.visibility(ShaderStages::VERTEX_FRAGMENT)
}
pub(super) fn skinning(limits: &WgpuLimits) -> BindGroupLayoutEntryBuilder {
// If we can use storage buffers, do so. Otherwise, fall back to uniform
// buffers.
let size = BufferSize::new(JOINT_BUFFER_SIZE as u64);
if skin::skins_use_uniform_buffers(limits) {
uniform_buffer_sized(true, size)
} else {
storage_buffer_read_only_sized(false, size)
}
}
pub(super) fn weights() -> BindGroupLayoutEntryBuilder {
uniform_buffer_sized(true, BufferSize::new(MORPH_BUFFER_SIZE as u64))
}
pub(super) fn targets() -> BindGroupLayoutEntryBuilder {
texture_3d(TextureSampleType::Float { filterable: false })
}
pub(super) fn lightmaps_texture_view() -> BindGroupLayoutEntryBuilder {
texture_2d(TextureSampleType::Float { filterable: true }).visibility(ShaderStages::FRAGMENT)
}
pub(super) fn lightmaps_sampler() -> BindGroupLayoutEntryBuilder {
sampler(SamplerBindingType::Filtering).visibility(ShaderStages::FRAGMENT)
}
pub(super) fn lightmaps_texture_view_array() -> BindGroupLayoutEntryBuilder {
texture_2d(TextureSampleType::Float { filterable: true })
.visibility(ShaderStages::FRAGMENT)
.count(NonZeroU32::new(LIGHTMAPS_PER_SLAB as u32).unwrap())
}
pub(super) fn lightmaps_sampler_array() -> BindGroupLayoutEntryBuilder {
sampler(SamplerBindingType::Filtering)
.visibility(ShaderStages::FRAGMENT)
.count(NonZeroU32::new(LIGHTMAPS_PER_SLAB as u32).unwrap())
}
}
/// Individual [`BindGroupEntry`]
/// for bind groups.
mod entry {
use crate::render::skin;
use super::{JOINT_BUFFER_SIZE, MORPH_BUFFER_SIZE};
use bevy_render::{
render_resource::{
BindGroupEntry, BindingResource, Buffer, BufferBinding, BufferSize, Sampler,
TextureView, WgpuSampler, WgpuTextureView,
},
renderer::RenderDevice,
};
fn entry(binding: u32, size: Option<u64>, buffer: &Buffer) -> BindGroupEntry<'_> {
BindGroupEntry {
binding,
resource: BindingResource::Buffer(BufferBinding {
buffer,
offset: 0,
size: size.map(|size| BufferSize::new(size).unwrap()),
}),
}
}
pub(super) fn model(binding: u32, resource: BindingResource) -> BindGroupEntry {
BindGroupEntry { binding, resource }
}
pub(super) fn skinning<'a>(
render_device: &RenderDevice,
binding: u32,
buffer: &'a Buffer,
) -> BindGroupEntry<'a> {
let size = if skin::skins_use_uniform_buffers(&render_device.limits()) {
Some(JOINT_BUFFER_SIZE as u64)
} else {
None
};
entry(binding, size, buffer)
}
pub(super) fn weights(binding: u32, buffer: &Buffer) -> BindGroupEntry<'_> {
entry(binding, Some(MORPH_BUFFER_SIZE as u64), buffer)
}
pub(super) fn targets(binding: u32, texture: &TextureView) -> BindGroupEntry<'_> {
BindGroupEntry {
binding,
resource: BindingResource::TextureView(texture),
}
}
pub(super) fn lightmaps_texture_view(
binding: u32,
texture: &TextureView,
) -> BindGroupEntry<'_> {
BindGroupEntry {
binding,
resource: BindingResource::TextureView(texture),
}
}
pub(super) fn lightmaps_sampler(binding: u32, sampler: &Sampler) -> BindGroupEntry<'_> {
BindGroupEntry {
binding,
resource: BindingResource::Sampler(sampler),
}
}
pub(super) fn lightmaps_texture_view_array<'a>(
binding: u32,
textures: &'a [&'a WgpuTextureView],
) -> BindGroupEntry<'a> {
BindGroupEntry {
binding,
resource: BindingResource::TextureViewArray(textures),
}
}
pub(super) fn lightmaps_sampler_array<'a>(
binding: u32,
samplers: &'a [&'a WgpuSampler],
) -> BindGroupEntry<'a> {
BindGroupEntry {
binding,
resource: BindingResource::SamplerArray(samplers),
}
}
}
/// All possible [`BindGroupLayout`]s in bevy's default mesh shader (`mesh.wgsl`).
#[derive(Clone)]
pub struct MeshLayouts {
/// The mesh model uniform (transform) and nothing else.
pub model_only: BindGroupLayoutDescriptor,
/// Includes the lightmap texture and uniform.
pub lightmapped: BindGroupLayoutDescriptor,
/// Also includes the uniform for skinning
pub skinned: BindGroupLayoutDescriptor,
/// Like [`MeshLayouts::skinned`], but includes slots for the previous
/// frame's joint matrices, so that we can compute motion vectors.
pub skinned_motion: BindGroupLayoutDescriptor,
/// Also includes the uniform and [`MorphAttributes`] for morph targets.
///
/// [`MorphAttributes`]: bevy_mesh::morph::MorphAttributes
pub morphed: BindGroupLayoutDescriptor,
/// Like [`MeshLayouts::morphed`], but includes a slot for the previous
/// frame's morph weights, so that we can compute motion vectors.
pub morphed_motion: BindGroupLayoutDescriptor,
/// Also includes both uniforms for skinning and morph targets, also the
/// morph target [`MorphAttributes`] binding.
///
/// [`MorphAttributes`]: bevy_mesh::morph::MorphAttributes
pub morphed_skinned: BindGroupLayoutDescriptor,
/// Like [`MeshLayouts::morphed_skinned`], but includes slots for the
/// previous frame's joint matrices and morph weights, so that we can
/// compute motion vectors.
pub morphed_skinned_motion: BindGroupLayoutDescriptor,
}
impl MeshLayouts {
/// Prepare the layouts used by the default bevy [`Mesh`].
///
/// [`Mesh`]: bevy_mesh::Mesh
pub fn new(render_device: &RenderDevice, render_adapter: &RenderAdapter) -> Self {
MeshLayouts {
model_only: Self::model_only_layout(render_device),
lightmapped: Self::lightmapped_layout(render_device, render_adapter),
skinned: Self::skinned_layout(render_device),
skinned_motion: Self::skinned_motion_layout(render_device),
morphed: Self::morphed_layout(render_device),
morphed_motion: Self::morphed_motion_layout(render_device),
morphed_skinned: Self::morphed_skinned_layout(render_device),
morphed_skinned_motion: Self::morphed_skinned_motion_layout(render_device),
}
}
// ---------- create individual BindGroupLayouts ----------
fn model_only_layout(render_device: &RenderDevice) -> BindGroupLayoutDescriptor {
BindGroupLayoutDescriptor::new(
"mesh_layout",
&BindGroupLayoutEntries::single(
ShaderStages::empty(),
layout_entry::model(&render_device.limits()),
),
)
}
/// Creates the layout for skinned meshes.
fn skinned_layout(render_device: &RenderDevice) -> BindGroupLayoutDescriptor {
BindGroupLayoutDescriptor::new(
"skinned_mesh_layout",
&BindGroupLayoutEntries::with_indices(
ShaderStages::VERTEX,
(
(0, layout_entry::model(&render_device.limits())),
// The current frame's joint matrix buffer.
(1, layout_entry::skinning(&render_device.limits())),
),
),
)
}
/// Creates the layout for skinned meshes with the infrastructure to compute
/// motion vectors.
fn skinned_motion_layout(render_device: &RenderDevice) -> BindGroupLayoutDescriptor {
BindGroupLayoutDescriptor::new(
"skinned_motion_mesh_layout",
&BindGroupLayoutEntries::with_indices(
ShaderStages::VERTEX,
(
(0, layout_entry::model(&render_device.limits())),
// The current frame's joint matrix buffer.
(1, layout_entry::skinning(&render_device.limits())),
// The previous frame's joint matrix buffer.
(6, layout_entry::skinning(&render_device.limits())),
),
),
)
}
/// Creates the layout for meshes with morph targets.
fn morphed_layout(render_device: &RenderDevice) -> BindGroupLayoutDescriptor {
BindGroupLayoutDescriptor::new(
"morphed_mesh_layout",
&BindGroupLayoutEntries::with_indices(
ShaderStages::VERTEX,
(
(0, layout_entry::model(&render_device.limits())),
// The current frame's morph weight buffer.
(2, layout_entry::weights()),
(3, layout_entry::targets()),
),
),
)
}
/// Creates the layout for meshes with morph targets and the infrastructure
/// to compute motion vectors.
fn morphed_motion_layout(render_device: &RenderDevice) -> BindGroupLayoutDescriptor {
BindGroupLayoutDescriptor::new(
"morphed_mesh_layout",
&BindGroupLayoutEntries::with_indices(
ShaderStages::VERTEX,
(
(0, layout_entry::model(&render_device.limits())),
// The current frame's morph weight buffer.
(2, layout_entry::weights()),
(3, layout_entry::targets()),
// The previous frame's morph weight buffer.
(7, layout_entry::weights()),
),
),
)
}
/// Creates the bind group layout for meshes with both skins and morph
/// targets.
fn morphed_skinned_layout(render_device: &RenderDevice) -> BindGroupLayoutDescriptor {
BindGroupLayoutDescriptor::new(
"morphed_skinned_mesh_layout",
&BindGroupLayoutEntries::with_indices(
ShaderStages::VERTEX,
(
(0, layout_entry::model(&render_device.limits())),
// The current frame's joint matrix buffer.
(1, layout_entry::skinning(&render_device.limits())),
// The current frame's morph weight buffer.
(2, layout_entry::weights()),
(3, layout_entry::targets()),
),
),
)
}
/// Creates the bind group layout for meshes with both skins and morph
/// targets, in addition to the infrastructure to compute motion vectors.
fn morphed_skinned_motion_layout(render_device: &RenderDevice) -> BindGroupLayoutDescriptor {
BindGroupLayoutDescriptor::new(
"morphed_skinned_motion_mesh_layout",
&BindGroupLayoutEntries::with_indices(
ShaderStages::VERTEX,
(
(0, layout_entry::model(&render_device.limits())),
// The current frame's joint matrix buffer.
(1, layout_entry::skinning(&render_device.limits())),
// The current frame's morph weight buffer.
(2, layout_entry::weights()),
(3, layout_entry::targets()),
// The previous frame's joint matrix buffer.
(6, layout_entry::skinning(&render_device.limits())),
// The previous frame's morph weight buffer.
(7, layout_entry::weights()),
),
),
)
}
fn lightmapped_layout(
render_device: &RenderDevice,
render_adapter: &RenderAdapter,
) -> BindGroupLayoutDescriptor {
if binding_arrays_are_usable(render_device, render_adapter) {
BindGroupLayoutDescriptor::new(
"lightmapped_mesh_layout",
&BindGroupLayoutEntries::with_indices(
ShaderStages::VERTEX,
(
(0, layout_entry::model(&render_device.limits())),
(4, layout_entry::lightmaps_texture_view_array()),
(5, layout_entry::lightmaps_sampler_array()),
),
),
)
} else {
BindGroupLayoutDescriptor::new(
"lightmapped_mesh_layout",
&BindGroupLayoutEntries::with_indices(
ShaderStages::VERTEX,
(
(0, layout_entry::model(&render_device.limits())),
(4, layout_entry::lightmaps_texture_view()),
(5, layout_entry::lightmaps_sampler()),
),
),
)
}
}
// ---------- BindGroup methods ----------
pub fn model_only(
&self,
render_device: &RenderDevice,
pipeline_cache: &PipelineCache,
model: &BindingResource,
) -> BindGroup {
render_device.create_bind_group(
"model_only_mesh_bind_group",
&pipeline_cache.get_bind_group_layout(&self.model_only),
&[entry::model(0, model.clone())],
)
}
pub fn lightmapped(
&self,
render_device: &RenderDevice,
pipeline_cache: &PipelineCache,
model: &BindingResource,
lightmap_slab: &LightmapSlab,
bindless_lightmaps: bool,
) -> BindGroup {
if bindless_lightmaps {
let (texture_views, samplers) = lightmap_slab.build_binding_arrays();
render_device.create_bind_group(
"lightmapped_mesh_bind_group",
&pipeline_cache.get_bind_group_layout(&self.lightmapped),
&[
entry::model(0, model.clone()),
entry::lightmaps_texture_view_array(4, &texture_views),
entry::lightmaps_sampler_array(5, &samplers),
],
)
} else {
let (texture_view, sampler) = lightmap_slab.bindings_for_first_lightmap();
render_device.create_bind_group(
"lightmapped_mesh_bind_group",
&pipeline_cache.get_bind_group_layout(&self.lightmapped),
&[
entry::model(0, model.clone()),
entry::lightmaps_texture_view(4, texture_view),
entry::lightmaps_sampler(5, sampler),
],
)
}
}
/// Creates the bind group for skinned meshes with no morph targets.
pub fn skinned(
&self,
render_device: &RenderDevice,
pipeline_cache: &PipelineCache,
model: &BindingResource,
current_skin: &Buffer,
) -> BindGroup {
render_device.create_bind_group(
"skinned_mesh_bind_group",
&pipeline_cache.get_bind_group_layout(&self.skinned),
&[
entry::model(0, model.clone()),
entry::skinning(render_device, 1, current_skin),
],
)
}
/// Creates the bind group for skinned meshes with no morph targets, with
/// the infrastructure to compute motion vectors.
///
/// `current_skin` is the buffer of joint matrices for this frame;
/// `prev_skin` is the buffer for the previous frame. The latter is used for
/// motion vector computation. If there is no such applicable buffer,
/// `current_skin` and `prev_skin` will reference the same buffer.
pub fn skinned_motion(
&self,
render_device: &RenderDevice,
pipeline_cache: &PipelineCache,
model: &BindingResource,
current_skin: &Buffer,
prev_skin: &Buffer,
) -> BindGroup {
render_device.create_bind_group(
"skinned_motion_mesh_bind_group",
&pipeline_cache.get_bind_group_layout(&self.skinned_motion),
&[
entry::model(0, model.clone()),
entry::skinning(render_device, 1, current_skin),
entry::skinning(render_device, 6, prev_skin),
],
)
}
/// Creates the bind group for meshes with no skins but morph targets.
pub fn morphed(
&self,
render_device: &RenderDevice,
pipeline_cache: &PipelineCache,
model: &BindingResource,
current_weights: &Buffer,
targets: &TextureView,
) -> BindGroup {
render_device.create_bind_group(
"morphed_mesh_bind_group",
&pipeline_cache.get_bind_group_layout(&self.morphed),
&[
entry::model(0, model.clone()),
entry::weights(2, current_weights),
entry::targets(3, targets),
],
)
}
/// Creates the bind group for meshes with no skins but morph targets, in
/// addition to the infrastructure to compute motion vectors.
///
/// `current_weights` is the buffer of morph weights for this frame;
/// `prev_weights` is the buffer for the previous frame. The latter is used
/// for motion vector computation. If there is no such applicable buffer,
/// `current_weights` and `prev_weights` will reference the same buffer.
pub fn morphed_motion(
&self,
render_device: &RenderDevice,
pipeline_cache: &PipelineCache,
model: &BindingResource,
current_weights: &Buffer,
targets: &TextureView,
prev_weights: &Buffer,
) -> BindGroup {
render_device.create_bind_group(
"morphed_motion_mesh_bind_group",
&pipeline_cache.get_bind_group_layout(&self.morphed_motion),
&[
entry::model(0, model.clone()),
entry::weights(2, current_weights),
entry::targets(3, targets),
entry::weights(7, prev_weights),
],
)
}
/// Creates the bind group for meshes with skins and morph targets.
pub fn morphed_skinned(
&self,
render_device: &RenderDevice,
pipeline_cache: &PipelineCache,
model: &BindingResource,
current_skin: &Buffer,
current_weights: &Buffer,
targets: &TextureView,
) -> BindGroup {
render_device.create_bind_group(
"morphed_skinned_mesh_bind_group",
&pipeline_cache.get_bind_group_layout(&self.morphed_skinned),
&[
entry::model(0, model.clone()),
entry::skinning(render_device, 1, current_skin),
entry::weights(2, current_weights),
entry::targets(3, targets),
],
)
}
/// Creates the bind group for meshes with skins and morph targets, in
/// addition to the infrastructure to compute motion vectors.
///
/// See the documentation for [`MeshLayouts::skinned_motion`] and
/// [`MeshLayouts::morphed_motion`] above for more information about the
/// `current_skin`, `prev_skin`, `current_weights`, and `prev_weights`
/// buffers.
pub fn morphed_skinned_motion(
&self,
render_device: &RenderDevice,
pipeline_cache: &PipelineCache,
model: &BindingResource,
current_skin: &Buffer,
current_weights: &Buffer,
targets: &TextureView,
prev_skin: &Buffer,
prev_weights: &Buffer,
) -> BindGroup {
render_device.create_bind_group(
"morphed_skinned_motion_mesh_bind_group",
&pipeline_cache.get_bind_group_layout(&self.morphed_skinned_motion),
&[
entry::model(0, model.clone()),
entry::skinning(render_device, 1, current_skin),
entry::weights(2, current_weights),
entry::targets(3, targets),
entry::skinning(render_device, 6, prev_skin),
entry::weights(7, prev_weights),
],
)
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/render/fog.rs | crates/bevy_pbr/src/render/fog.rs | use bevy_app::{App, Plugin};
use bevy_color::{ColorToComponents, LinearRgba};
use bevy_ecs::prelude::*;
use bevy_math::{Vec3, Vec4};
use bevy_render::{
extract_component::ExtractComponentPlugin,
render_resource::{DynamicUniformBuffer, ShaderType},
renderer::{RenderDevice, RenderQueue},
view::ExtractedView,
Render, RenderApp, RenderSystems,
};
use bevy_shader::load_shader_library;
use crate::{DistanceFog, FogFalloff};
/// The GPU-side representation of the fog configuration that's sent as a uniform to the shader
#[derive(Copy, Clone, ShaderType, Default, Debug)]
pub struct GpuFog {
/// Fog color
base_color: Vec4,
/// The color used for the fog where the view direction aligns with directional lights
directional_light_color: Vec4,
/// Allocated differently depending on fog mode.
/// See `mesh_view_types.wgsl` for a detailed explanation
be: Vec3,
/// The exponent applied to the directional light alignment calculation
directional_light_exponent: f32,
/// Allocated differently depending on fog mode.
/// See `mesh_view_types.wgsl` for a detailed explanation
bi: Vec3,
/// Unsigned int representation of the active fog falloff mode
mode: u32,
}
// Important: These must be kept in sync with `mesh_view_types.wgsl`
const GPU_FOG_MODE_OFF: u32 = 0;
const GPU_FOG_MODE_LINEAR: u32 = 1;
const GPU_FOG_MODE_EXPONENTIAL: u32 = 2;
const GPU_FOG_MODE_EXPONENTIAL_SQUARED: u32 = 3;
const GPU_FOG_MODE_ATMOSPHERIC: u32 = 4;
/// Metadata for fog
#[derive(Default, Resource)]
pub struct FogMeta {
pub gpu_fogs: DynamicUniformBuffer<GpuFog>,
}
/// Prepares fog metadata and writes the fog-related uniform buffers to the GPU
pub fn prepare_fog(
mut commands: Commands,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
mut fog_meta: ResMut<FogMeta>,
views: Query<(Entity, Option<&DistanceFog>), With<ExtractedView>>,
) {
let views_iter = views.iter();
let view_count = views_iter.len();
let Some(mut writer) = fog_meta
.gpu_fogs
.get_writer(view_count, &render_device, &render_queue)
else {
return;
};
for (entity, fog) in views_iter {
let gpu_fog = if let Some(fog) = fog {
match &fog.falloff {
FogFalloff::Linear { start, end } => GpuFog {
mode: GPU_FOG_MODE_LINEAR,
base_color: LinearRgba::from(fog.color).to_vec4(),
directional_light_color: LinearRgba::from(fog.directional_light_color)
.to_vec4(),
directional_light_exponent: fog.directional_light_exponent,
be: Vec3::new(*start, *end, 0.0),
..Default::default()
},
FogFalloff::Exponential { density } => GpuFog {
mode: GPU_FOG_MODE_EXPONENTIAL,
base_color: LinearRgba::from(fog.color).to_vec4(),
directional_light_color: LinearRgba::from(fog.directional_light_color)
.to_vec4(),
directional_light_exponent: fog.directional_light_exponent,
be: Vec3::new(*density, 0.0, 0.0),
..Default::default()
},
FogFalloff::ExponentialSquared { density } => GpuFog {
mode: GPU_FOG_MODE_EXPONENTIAL_SQUARED,
base_color: LinearRgba::from(fog.color).to_vec4(),
directional_light_color: LinearRgba::from(fog.directional_light_color)
.to_vec4(),
directional_light_exponent: fog.directional_light_exponent,
be: Vec3::new(*density, 0.0, 0.0),
..Default::default()
},
FogFalloff::Atmospheric {
extinction,
inscattering,
} => GpuFog {
mode: GPU_FOG_MODE_ATMOSPHERIC,
base_color: LinearRgba::from(fog.color).to_vec4(),
directional_light_color: LinearRgba::from(fog.directional_light_color)
.to_vec4(),
directional_light_exponent: fog.directional_light_exponent,
be: *extinction,
bi: *inscattering,
},
}
} else {
// If no fog is added to a camera, by default it's off
GpuFog {
mode: GPU_FOG_MODE_OFF,
..Default::default()
}
};
// This is later read by `SetMeshViewBindGroup<I>`
commands.entity(entity).insert(ViewFogUniformOffset {
offset: writer.write(&gpu_fog),
});
}
}
/// Inserted on each `Entity` with an `ExtractedView` to keep track of its offset
/// in the `gpu_fogs` `DynamicUniformBuffer` within `FogMeta`
#[derive(Component)]
pub struct ViewFogUniformOffset {
pub offset: u32,
}
/// A plugin that consolidates fog extraction, preparation and related resources/assets
pub struct FogPlugin;
impl Plugin for FogPlugin {
fn build(&self, app: &mut App) {
load_shader_library!(app, "fog.wgsl");
app.add_plugins(ExtractComponentPlugin::<DistanceFog>::default());
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app
.init_resource::<FogMeta>()
.add_systems(Render, prepare_fog.in_set(RenderSystems::PrepareResources));
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/render/skin.rs | crates/bevy_pbr/src/render/skin.rs | use core::mem::{self, size_of};
use std::sync::OnceLock;
use bevy_asset::{prelude::AssetChanged, Assets};
use bevy_camera::visibility::ViewVisibility;
use bevy_ecs::prelude::*;
use bevy_math::Mat4;
use bevy_mesh::skinning::{SkinnedMesh, SkinnedMeshInverseBindposes};
use bevy_platform::collections::hash_map::Entry;
use bevy_render::render_resource::{Buffer, BufferDescriptor};
use bevy_render::settings::WgpuLimits;
use bevy_render::sync_world::{MainEntity, MainEntityHashMap, MainEntityHashSet};
use bevy_render::{
batching::NoAutomaticBatching,
render_resource::BufferUsages,
renderer::{RenderDevice, RenderQueue},
Extract,
};
use bevy_transform::prelude::GlobalTransform;
use offset_allocator::{Allocation, Allocator};
use smallvec::SmallVec;
use tracing::error;
/// Maximum number of joints supported for skinned meshes.
///
/// It is used to allocate buffers.
/// The correctness of the value depends on the GPU/platform.
/// The current value is chosen because it is guaranteed to work everywhere.
/// To allow for bigger values, a check must be made for the limits
/// of the GPU at runtime, which would mean not using consts anymore.
pub const MAX_JOINTS: usize = 256;
/// The total number of joints we support.
///
/// This is 256 GiB worth of joint matrices, which we will never hit under any
/// reasonable circumstances.
const MAX_TOTAL_JOINTS: u32 = 1024 * 1024 * 1024;
/// The number of joints that we allocate at a time.
///
/// Some hardware requires that uniforms be allocated on 256-byte boundaries, so
/// we need to allocate 4 64-byte matrices at a time to satisfy alignment
/// requirements.
const JOINTS_PER_ALLOCATION_UNIT: u32 = (256 / size_of::<Mat4>()) as u32;
/// The maximum ratio of the number of entities whose transforms changed to the
/// total number of joints before we re-extract all joints.
///
/// We use this as a heuristic to decide whether it's worth switching over to
/// fine-grained detection to determine which skins need extraction. If the
/// number of changed entities is over this threshold, we skip change detection
/// and simply re-extract the transforms of all joints.
const JOINT_EXTRACTION_THRESHOLD_FACTOR: f64 = 0.25;
/// The location of the first joint matrix in the skin uniform buffer.
#[derive(Clone, Copy)]
pub struct SkinByteOffset {
/// The byte offset of the first joint matrix.
pub byte_offset: u32,
}
impl SkinByteOffset {
/// Index to be in address space based on the size of a skin uniform.
const fn from_index(index: usize) -> Self {
SkinByteOffset {
byte_offset: (index * size_of::<Mat4>()) as u32,
}
}
/// Returns this skin index in elements (not bytes).
///
/// Each element is a 4x4 matrix.
pub fn index(&self) -> u32 {
self.byte_offset / size_of::<Mat4>() as u32
}
}
/// The GPU buffers containing joint matrices for all skinned meshes.
///
/// This is double-buffered: we store the joint matrices of each mesh for the
/// previous frame in addition to those of each mesh for the current frame. This
/// is for motion vector calculation. Every frame, we swap buffers and overwrite
/// the joint matrix buffer from two frames ago with the data for the current
/// frame.
///
/// Notes on implementation: see comment on top of the `extract_skins` system.
#[derive(Resource)]
pub struct SkinUniforms {
/// The CPU-side buffer that stores the joint matrices for skinned meshes in
/// the current frame.
pub current_staging_buffer: Vec<Mat4>,
/// The GPU-side buffer that stores the joint matrices for skinned meshes in
/// the current frame.
pub current_buffer: Buffer,
/// The GPU-side buffer that stores the joint matrices for skinned meshes in
/// the previous frame.
pub prev_buffer: Buffer,
/// The offset allocator that manages the placement of the joints within the
/// [`Self::current_buffer`].
allocator: Allocator,
/// Allocation information that we keep about each skin.
skin_uniform_info: MainEntityHashMap<SkinUniformInfo>,
/// Maps each joint entity to the skins it's associated with.
///
/// We use this in conjunction with change detection to only update the
/// skins that need updating each frame.
///
/// Note that conceptually this is a hash map of sets, but we use a
/// [`SmallVec`] to avoid allocations for the vast majority of the cases in
/// which each bone belongs to exactly one skin.
joint_to_skins: MainEntityHashMap<SmallVec<[MainEntity; 1]>>,
/// The total number of joints in the scene.
///
/// We use this as part of our heuristic to decide whether to use
/// fine-grained change detection.
total_joints: usize,
}
impl FromWorld for SkinUniforms {
fn from_world(world: &mut World) -> Self {
let device = world.resource::<RenderDevice>();
let buffer_usages = (if skins_use_uniform_buffers(&device.limits()) {
BufferUsages::UNIFORM
} else {
BufferUsages::STORAGE
}) | BufferUsages::COPY_DST;
// Create the current and previous buffer with the minimum sizes.
//
// These will be swapped every frame.
let current_buffer = device.create_buffer(&BufferDescriptor {
label: Some("skin uniform buffer"),
size: MAX_JOINTS as u64 * size_of::<Mat4>() as u64,
usage: buffer_usages,
mapped_at_creation: false,
});
let prev_buffer = device.create_buffer(&BufferDescriptor {
label: Some("skin uniform buffer"),
size: MAX_JOINTS as u64 * size_of::<Mat4>() as u64,
usage: buffer_usages,
mapped_at_creation: false,
});
Self {
current_staging_buffer: vec![],
current_buffer,
prev_buffer,
allocator: Allocator::new(MAX_TOTAL_JOINTS),
skin_uniform_info: MainEntityHashMap::default(),
joint_to_skins: MainEntityHashMap::default(),
total_joints: 0,
}
}
}
impl SkinUniforms {
/// Returns the current offset in joints of the skin in the buffer.
pub fn skin_index(&self, skin: MainEntity) -> Option<u32> {
self.skin_uniform_info
.get(&skin)
.map(SkinUniformInfo::offset)
}
/// Returns the current offset in bytes of the skin in the buffer.
pub fn skin_byte_offset(&self, skin: MainEntity) -> Option<SkinByteOffset> {
self.skin_uniform_info.get(&skin).map(|skin_uniform_info| {
SkinByteOffset::from_index(skin_uniform_info.offset() as usize)
})
}
/// Returns an iterator over all skins in the scene.
pub fn all_skins(&self) -> impl Iterator<Item = &MainEntity> {
self.skin_uniform_info.keys()
}
}
/// Allocation information about each skin.
struct SkinUniformInfo {
/// The allocation of the joints within the [`SkinUniforms::current_buffer`].
allocation: Allocation,
/// The entities that comprise the joints.
joints: Vec<MainEntity>,
}
impl SkinUniformInfo {
/// The offset in joints within the [`SkinUniforms::current_staging_buffer`].
fn offset(&self) -> u32 {
self.allocation.offset * JOINTS_PER_ALLOCATION_UNIT
}
}
/// Returns true if skinning must use uniforms (and dynamic offsets) because
/// storage buffers aren't supported on the current platform.
pub fn skins_use_uniform_buffers(limits: &WgpuLimits) -> bool {
static SKINS_USE_UNIFORM_BUFFERS: OnceLock<bool> = OnceLock::new();
*SKINS_USE_UNIFORM_BUFFERS.get_or_init(|| limits.max_storage_buffers_per_shader_stage == 0)
}
/// Uploads the buffers containing the joints to the GPU.
pub fn prepare_skins(
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
uniform: ResMut<SkinUniforms>,
) {
let uniform = uniform.into_inner();
if uniform.current_staging_buffer.is_empty() {
return;
}
// Swap current and previous buffers.
mem::swap(&mut uniform.current_buffer, &mut uniform.prev_buffer);
// Resize the buffers if necessary. Include extra space equal to `MAX_JOINTS`
// because we need to be able to bind a full uniform buffer's worth of data
// if skins use uniform buffers on this platform.
let needed_size = (uniform.current_staging_buffer.len() as u64 + MAX_JOINTS as u64)
* size_of::<Mat4>() as u64;
if uniform.current_buffer.size() < needed_size {
let mut new_size = uniform.current_buffer.size();
while new_size < needed_size {
// 1.5× growth factor.
new_size = (new_size + new_size / 2).next_multiple_of(4);
}
// Create the new buffers.
let buffer_usages = if skins_use_uniform_buffers(&render_device.limits()) {
BufferUsages::UNIFORM
} else {
BufferUsages::STORAGE
} | BufferUsages::COPY_DST;
uniform.current_buffer = render_device.create_buffer(&BufferDescriptor {
label: Some("skin uniform buffer"),
usage: buffer_usages,
size: new_size,
mapped_at_creation: false,
});
uniform.prev_buffer = render_device.create_buffer(&BufferDescriptor {
label: Some("skin uniform buffer"),
usage: buffer_usages,
size: new_size,
mapped_at_creation: false,
});
// We've created a new `prev_buffer` but we don't have the previous joint
// data needed to fill it out correctly. Use the current joint data
// instead.
//
// TODO: This is a bug - will cause motion blur to ignore joint movement
// for one frame.
render_queue.write_buffer(
&uniform.prev_buffer,
0,
bytemuck::must_cast_slice(&uniform.current_staging_buffer[..]),
);
}
// Write the data from `uniform.current_staging_buffer` into
// `uniform.current_buffer`.
render_queue.write_buffer(
&uniform.current_buffer,
0,
bytemuck::must_cast_slice(&uniform.current_staging_buffer[..]),
);
// We don't need to write `uniform.prev_buffer` because we already wrote it
// last frame, and the data should still be on the GPU.
}
// Notes on implementation:
// We define the uniform binding as an array<mat4x4<f32>, N> in the shader,
// where N is the maximum number of Mat4s we can fit in the uniform binding,
// which may be as little as 16kB or 64kB. But, we may not need all N.
// We may only need, for example, 10.
//
// If we used uniform buffers ‘normally’ then we would have to write a full
// binding of data for each dynamic offset binding, which is wasteful, makes
// the buffer much larger than it needs to be, and uses more memory bandwidth
// to transfer the data, which then costs frame time So @superdump came up
// with this design: just bind data at the specified offset and interpret
// the data at that offset as an array<T, N> regardless of what is there.
//
// So instead of writing N Mat4s when you only need 10, you write 10, and
// then pad up to the next dynamic offset alignment. Then write the next.
// And for the last dynamic offset binding, make sure there is a full binding
// of data after it so that the buffer is of size
// `last dynamic offset` + `array<mat4x4<f32>>`.
//
// Then when binding the first dynamic offset, the first 10 entries in the array
// are what you expect, but if you read the 11th you’re reading ‘invalid’ data
// which could be padding or could be from the next binding.
//
// In this way, we can pack ‘variable sized arrays’ into uniform buffer bindings
// which normally only support fixed size arrays. You just have to make sure
// in the shader that you only read the values that are valid for that binding.
pub fn extract_skins(
skin_uniforms: ResMut<SkinUniforms>,
skinned_meshes: Extract<Query<(Entity, &SkinnedMesh)>>,
changed_skinned_meshes: Extract<
Query<
(Entity, &ViewVisibility, &SkinnedMesh),
Or<(
Changed<ViewVisibility>,
Changed<SkinnedMesh>,
AssetChanged<SkinnedMesh>,
)>,
>,
>,
skinned_mesh_inverse_bindposes: Extract<Res<Assets<SkinnedMeshInverseBindposes>>>,
changed_transforms: Extract<Query<(Entity, &GlobalTransform), Changed<GlobalTransform>>>,
joints: Extract<Query<&GlobalTransform>>,
mut removed_skinned_meshes_query: Extract<RemovedComponents<SkinnedMesh>>,
) {
let skin_uniforms = skin_uniforms.into_inner();
// Find skins that have become visible or invisible on this frame. Allocate,
// reallocate, or free space for them as necessary.
add_or_delete_skins(
skin_uniforms,
&changed_skinned_meshes,
&skinned_mesh_inverse_bindposes,
&joints,
);
// Extract the transforms for all joints from the scene, and write them into
// the staging buffer at the appropriate spot.
extract_joints(
skin_uniforms,
&skinned_meshes,
&changed_skinned_meshes,
&skinned_mesh_inverse_bindposes,
&changed_transforms,
&joints,
);
// Delete skins that became invisible.
for skinned_mesh_entity in removed_skinned_meshes_query.read() {
// Only remove a skin if we didn't pick it up in `add_or_delete_skins`.
// It's possible that a necessary component was removed and re-added in
// the same frame.
if !changed_skinned_meshes.contains(skinned_mesh_entity) {
remove_skin(skin_uniforms, skinned_mesh_entity.into());
}
}
}
/// Searches for all skins that have become visible or invisible this frame and
/// allocations for them as necessary.
fn add_or_delete_skins(
skin_uniforms: &mut SkinUniforms,
changed_skinned_meshes: &Query<
(Entity, &ViewVisibility, &SkinnedMesh),
Or<(
Changed<ViewVisibility>,
Changed<SkinnedMesh>,
AssetChanged<SkinnedMesh>,
)>,
>,
skinned_mesh_inverse_bindposes: &Assets<SkinnedMeshInverseBindposes>,
joints: &Query<&GlobalTransform>,
) {
// Find every skinned mesh that changed one of (1) visibility; (2) joint
// entities (part of `SkinnedMesh`); (3) the associated
// `SkinnedMeshInverseBindposes` asset.
for (skinned_mesh_entity, skinned_mesh_view_visibility, skinned_mesh) in changed_skinned_meshes
{
// Remove the skin if it existed last frame.
let skinned_mesh_entity = MainEntity::from(skinned_mesh_entity);
remove_skin(skin_uniforms, skinned_mesh_entity);
// If the skin is invisible, we're done.
if !(*skinned_mesh_view_visibility).get() {
continue;
}
// Initialize the skin.
add_skin(
skinned_mesh_entity,
skinned_mesh,
skin_uniforms,
skinned_mesh_inverse_bindposes,
joints,
);
}
}
/// Extracts the global transforms of all joints and updates the staging buffer
/// as necessary.
fn extract_joints(
skin_uniforms: &mut SkinUniforms,
skinned_meshes: &Query<(Entity, &SkinnedMesh)>,
changed_skinned_meshes: &Query<
(Entity, &ViewVisibility, &SkinnedMesh),
Or<(
Changed<ViewVisibility>,
Changed<SkinnedMesh>,
AssetChanged<SkinnedMesh>,
)>,
>,
skinned_mesh_inverse_bindposes: &Assets<SkinnedMeshInverseBindposes>,
changed_transforms: &Query<(Entity, &GlobalTransform), Changed<GlobalTransform>>,
joints: &Query<&GlobalTransform>,
) {
// If the number of entities that changed transforms exceeds a certain
// fraction (currently 25%) of the total joints in the scene, then skip
// fine-grained change detection.
//
// Note that this is a crude heuristic, for performance reasons. It doesn't
// consider the ratio of modified *joints* to total joints, only the ratio
// of modified *entities* to total joints. Thus in the worst case we might
// end up re-extracting all skins even though none of the joints changed.
// But making the heuristic finer-grained would make it slower to evaluate,
// and we don't want to lose performance.
let threshold =
(skin_uniforms.total_joints as f64 * JOINT_EXTRACTION_THRESHOLD_FACTOR).floor() as usize;
if changed_transforms.iter().nth(threshold).is_some() {
// Go ahead and re-extract all skins in the scene.
for (skin_entity, skin) in skinned_meshes {
extract_joints_for_skin(
skin_entity.into(),
skin,
skin_uniforms,
changed_skinned_meshes,
skinned_mesh_inverse_bindposes,
joints,
);
}
return;
}
// Use fine-grained change detection to figure out only the skins that need
// to have their joints re-extracted.
let dirty_skins: MainEntityHashSet = changed_transforms
.iter()
.flat_map(|(joint, _)| skin_uniforms.joint_to_skins.get(&MainEntity::from(joint)))
.flat_map(|skin_joint_mappings| skin_joint_mappings.iter())
.copied()
.collect();
// Re-extract the joints for only those skins.
for skin_entity in dirty_skins {
let Ok((_, skin)) = skinned_meshes.get(*skin_entity) else {
continue;
};
extract_joints_for_skin(
skin_entity,
skin,
skin_uniforms,
changed_skinned_meshes,
skinned_mesh_inverse_bindposes,
joints,
);
}
}
/// Extracts all joints for a single skin and writes their transforms into the
/// CPU staging buffer.
fn extract_joints_for_skin(
skin_entity: MainEntity,
skin: &SkinnedMesh,
skin_uniforms: &mut SkinUniforms,
changed_skinned_meshes: &Query<
(Entity, &ViewVisibility, &SkinnedMesh),
Or<(
Changed<ViewVisibility>,
Changed<SkinnedMesh>,
AssetChanged<SkinnedMesh>,
)>,
>,
skinned_mesh_inverse_bindposes: &Assets<SkinnedMeshInverseBindposes>,
joints: &Query<&GlobalTransform>,
) {
// If we initialized the skin this frame, we already populated all
// the joints, so there's no need to populate them again.
if changed_skinned_meshes.contains(*skin_entity) {
return;
}
// Fetch information about the skin.
let Some(skin_uniform_info) = skin_uniforms.skin_uniform_info.get(&skin_entity) else {
return;
};
let Some(skinned_mesh_inverse_bindposes) =
skinned_mesh_inverse_bindposes.get(&skin.inverse_bindposes)
else {
return;
};
// Calculate and write in the new joint matrices.
for (joint_index, (&joint, skinned_mesh_inverse_bindpose)) in skin
.joints
.iter()
.zip(skinned_mesh_inverse_bindposes.iter())
.enumerate()
{
let Ok(joint_transform) = joints.get(joint) else {
continue;
};
let joint_matrix = joint_transform.affine() * *skinned_mesh_inverse_bindpose;
skin_uniforms.current_staging_buffer[skin_uniform_info.offset() as usize + joint_index] =
joint_matrix;
}
}
/// Allocates space for a new skin in the buffers, and populates its joints.
fn add_skin(
skinned_mesh_entity: MainEntity,
skinned_mesh: &SkinnedMesh,
skin_uniforms: &mut SkinUniforms,
skinned_mesh_inverse_bindposes: &Assets<SkinnedMeshInverseBindposes>,
joints: &Query<&GlobalTransform>,
) {
// Allocate space for the joints.
let Some(allocation) = skin_uniforms.allocator.allocate(
skinned_mesh
.joints
.len()
.div_ceil(JOINTS_PER_ALLOCATION_UNIT as usize) as u32,
) else {
error!(
"Out of space for skin: {:?}. Tried to allocate space for {:?} joints.",
skinned_mesh_entity,
skinned_mesh.joints.len()
);
return;
};
// Store that allocation.
let skin_uniform_info = SkinUniformInfo {
allocation,
joints: skinned_mesh
.joints
.iter()
.map(|entity| MainEntity::from(*entity))
.collect(),
};
let skinned_mesh_inverse_bindposes =
skinned_mesh_inverse_bindposes.get(&skinned_mesh.inverse_bindposes);
for (joint_index, &joint) in skinned_mesh.joints.iter().enumerate() {
// Calculate the initial joint matrix.
let skinned_mesh_inverse_bindpose =
skinned_mesh_inverse_bindposes.and_then(|skinned_mesh_inverse_bindposes| {
skinned_mesh_inverse_bindposes.get(joint_index)
});
let joint_matrix = match (skinned_mesh_inverse_bindpose, joints.get(joint)) {
(Some(skinned_mesh_inverse_bindpose), Ok(transform)) => {
transform.affine() * *skinned_mesh_inverse_bindpose
}
_ => Mat4::IDENTITY,
};
// Write in the new joint matrix, growing the staging buffer if
// necessary.
let buffer_index = skin_uniform_info.offset() as usize + joint_index;
if skin_uniforms.current_staging_buffer.len() < buffer_index + 1 {
skin_uniforms
.current_staging_buffer
.resize(buffer_index + 1, Mat4::IDENTITY);
}
skin_uniforms.current_staging_buffer[buffer_index] = joint_matrix;
// Record the inverse mapping from the joint back to the skin. We use
// this in order to perform fine-grained joint extraction.
skin_uniforms
.joint_to_skins
.entry(MainEntity::from(joint))
.or_default()
.push(skinned_mesh_entity);
}
// Record the number of joints.
skin_uniforms.total_joints += skinned_mesh.joints.len();
skin_uniforms
.skin_uniform_info
.insert(skinned_mesh_entity, skin_uniform_info);
}
/// Deallocates a skin and removes it from the [`SkinUniforms`].
fn remove_skin(skin_uniforms: &mut SkinUniforms, skinned_mesh_entity: MainEntity) {
let Some(old_skin_uniform_info) = skin_uniforms.skin_uniform_info.remove(&skinned_mesh_entity)
else {
return;
};
// Free the allocation.
skin_uniforms
.allocator
.free(old_skin_uniform_info.allocation);
// Remove the inverse mapping from each joint back to the skin.
for &joint in &old_skin_uniform_info.joints {
if let Entry::Occupied(mut entry) = skin_uniforms.joint_to_skins.entry(joint) {
entry.get_mut().retain(|skin| *skin != skinned_mesh_entity);
if entry.get_mut().is_empty() {
entry.remove();
}
}
}
// Update the total number of joints.
skin_uniforms.total_joints -= old_skin_uniform_info.joints.len();
}
// NOTE: The skinned joints uniform buffer has to be bound at a dynamic offset per
// entity and so cannot currently be batched on WebGL 2.
pub fn no_automatic_skin_batching(
mut commands: Commands,
query: Query<Entity, (With<SkinnedMesh>, Without<NoAutomaticBatching>)>,
render_device: Res<RenderDevice>,
) {
if !skins_use_uniform_buffers(&render_device.limits()) {
return;
}
for entity in &query {
commands.entity(entity).try_insert(NoAutomaticBatching);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/atmosphere/node.rs | crates/bevy_pbr/src/atmosphere/node.rs | use bevy_camera::{MainPassResolutionOverride, Viewport};
use bevy_ecs::{query::QueryItem, system::lifetimeless::Read, world::World};
use bevy_math::{UVec2, Vec3Swizzles};
use bevy_render::{
camera::ExtractedCamera,
diagnostic::RecordDiagnostics,
extract_component::DynamicUniformIndex,
render_graph::{NodeRunError, RenderGraphContext, RenderLabel, ViewNode},
render_resource::{ComputePass, ComputePassDescriptor, PipelineCache, RenderPassDescriptor},
renderer::RenderContext,
view::{ViewTarget, ViewUniformOffset},
};
use crate::{resources::GpuAtmosphere, ViewLightsUniformOffset};
use super::{
resources::{
AtmosphereBindGroups, AtmosphereLutPipelines, AtmosphereTransformsOffset,
RenderSkyPipelineId,
},
GpuAtmosphereSettings,
};
#[derive(PartialEq, Eq, Debug, Copy, Clone, Hash, RenderLabel)]
pub enum AtmosphereNode {
RenderLuts,
RenderSky,
Environment,
}
#[derive(Default)]
pub(super) struct AtmosphereLutsNode {}
impl ViewNode for AtmosphereLutsNode {
type ViewQuery = (
Read<GpuAtmosphereSettings>,
Read<AtmosphereBindGroups>,
Read<DynamicUniformIndex<GpuAtmosphere>>,
Read<DynamicUniformIndex<GpuAtmosphereSettings>>,
Read<AtmosphereTransformsOffset>,
Read<ViewUniformOffset>,
Read<ViewLightsUniformOffset>,
);
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
(
settings,
bind_groups,
atmosphere_uniforms_offset,
settings_uniforms_offset,
atmosphere_transforms_offset,
view_uniforms_offset,
lights_uniforms_offset,
): QueryItem<Self::ViewQuery>,
world: &World,
) -> Result<(), NodeRunError> {
let pipelines = world.resource::<AtmosphereLutPipelines>();
let pipeline_cache = world.resource::<PipelineCache>();
let (
Some(transmittance_lut_pipeline),
Some(multiscattering_lut_pipeline),
Some(sky_view_lut_pipeline),
Some(aerial_view_lut_pipeline),
) = (
pipeline_cache.get_compute_pipeline(pipelines.transmittance_lut),
pipeline_cache.get_compute_pipeline(pipelines.multiscattering_lut),
pipeline_cache.get_compute_pipeline(pipelines.sky_view_lut),
pipeline_cache.get_compute_pipeline(pipelines.aerial_view_lut),
)
else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
let command_encoder = render_context.command_encoder();
let mut luts_pass = command_encoder.begin_compute_pass(&ComputePassDescriptor {
label: Some("atmosphere_luts"),
timestamp_writes: None,
});
let pass_span = diagnostics.pass_span(&mut luts_pass, "atmosphere_luts");
fn dispatch_2d(compute_pass: &mut ComputePass, size: UVec2) {
const WORKGROUP_SIZE: u32 = 16;
let workgroups_x = size.x.div_ceil(WORKGROUP_SIZE);
let workgroups_y = size.y.div_ceil(WORKGROUP_SIZE);
compute_pass.dispatch_workgroups(workgroups_x, workgroups_y, 1);
}
// Transmittance LUT
luts_pass.set_pipeline(transmittance_lut_pipeline);
luts_pass.set_bind_group(
0,
&bind_groups.transmittance_lut,
&[
atmosphere_uniforms_offset.index(),
settings_uniforms_offset.index(),
],
);
dispatch_2d(&mut luts_pass, settings.transmittance_lut_size);
// Multiscattering LUT
luts_pass.set_pipeline(multiscattering_lut_pipeline);
luts_pass.set_bind_group(
0,
&bind_groups.multiscattering_lut,
&[
atmosphere_uniforms_offset.index(),
settings_uniforms_offset.index(),
],
);
luts_pass.dispatch_workgroups(
settings.multiscattering_lut_size.x,
settings.multiscattering_lut_size.y,
1,
);
// Sky View LUT
luts_pass.set_pipeline(sky_view_lut_pipeline);
luts_pass.set_bind_group(
0,
&bind_groups.sky_view_lut,
&[
atmosphere_uniforms_offset.index(),
settings_uniforms_offset.index(),
atmosphere_transforms_offset.index(),
view_uniforms_offset.offset,
lights_uniforms_offset.offset,
],
);
dispatch_2d(&mut luts_pass, settings.sky_view_lut_size);
// Aerial View LUT
luts_pass.set_pipeline(aerial_view_lut_pipeline);
luts_pass.set_bind_group(
0,
&bind_groups.aerial_view_lut,
&[
atmosphere_uniforms_offset.index(),
settings_uniforms_offset.index(),
view_uniforms_offset.offset,
lights_uniforms_offset.offset,
],
);
dispatch_2d(&mut luts_pass, settings.aerial_view_lut_size.xy());
pass_span.end(&mut luts_pass);
Ok(())
}
}
#[derive(Default)]
pub(super) struct RenderSkyNode;
impl ViewNode for RenderSkyNode {
type ViewQuery = (
Read<ExtractedCamera>,
Read<AtmosphereBindGroups>,
Read<ViewTarget>,
Read<DynamicUniformIndex<GpuAtmosphere>>,
Read<DynamicUniformIndex<GpuAtmosphereSettings>>,
Read<AtmosphereTransformsOffset>,
Read<ViewUniformOffset>,
Read<ViewLightsUniformOffset>,
Read<RenderSkyPipelineId>,
Option<Read<MainPassResolutionOverride>>,
);
fn run<'w>(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
(
camera,
atmosphere_bind_groups,
view_target,
atmosphere_uniforms_offset,
settings_uniforms_offset,
atmosphere_transforms_offset,
view_uniforms_offset,
lights_uniforms_offset,
render_sky_pipeline_id,
resolution_override,
): QueryItem<'w, '_, Self::ViewQuery>,
world: &'w World,
) -> Result<(), NodeRunError> {
let pipeline_cache = world.resource::<PipelineCache>();
let Some(render_sky_pipeline) =
pipeline_cache.get_render_pipeline(render_sky_pipeline_id.0)
else {
return Ok(());
}; //TODO: warning
let diagnostics = render_context.diagnostic_recorder();
let mut render_sky_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("render_sky"),
color_attachments: &[Some(view_target.get_color_attachment())],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
let pass_span = diagnostics.pass_span(&mut render_sky_pass, "render_sky");
if let Some(viewport) =
Viewport::from_viewport_and_override(camera.viewport.as_ref(), resolution_override)
{
render_sky_pass.set_camera_viewport(&viewport);
}
render_sky_pass.set_render_pipeline(render_sky_pipeline);
render_sky_pass.set_bind_group(
0,
&atmosphere_bind_groups.render_sky,
&[
atmosphere_uniforms_offset.index(),
settings_uniforms_offset.index(),
atmosphere_transforms_offset.index(),
view_uniforms_offset.offset,
lights_uniforms_offset.offset,
],
);
render_sky_pass.draw(0..3, 0..1);
pass_span.end(&mut render_sky_pass);
Ok(())
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/atmosphere/environment.rs | crates/bevy_pbr/src/atmosphere/environment.rs | use crate::{
resources::{
AtmosphereSampler, AtmosphereTextures, AtmosphereTransform, AtmosphereTransforms,
AtmosphereTransformsOffset, GpuAtmosphere,
},
ExtractedAtmosphere, GpuAtmosphereSettings, GpuLights, LightMeta, ViewLightsUniformOffset,
};
use bevy_asset::{load_embedded_asset, AssetServer, Assets, Handle, RenderAssetUsages};
use bevy_ecs::{
component::Component,
entity::Entity,
query::{QueryState, With, Without},
resource::Resource,
system::{lifetimeless::Read, Commands, Query, Res, ResMut},
world::{FromWorld, World},
};
use bevy_image::Image;
use bevy_light::{AtmosphereEnvironmentMapLight, GeneratedEnvironmentMapLight};
use bevy_math::{Quat, UVec2};
use bevy_render::{
extract_component::{ComponentUniforms, DynamicUniformIndex, ExtractComponent},
render_asset::RenderAssets,
render_graph::{Node, NodeRunError, RenderGraphContext},
render_resource::{binding_types::*, *},
renderer::{RenderContext, RenderDevice},
texture::{CachedTexture, GpuImage},
view::{ViewUniform, ViewUniformOffset, ViewUniforms},
};
use bevy_utils::default;
use tracing::warn;
// Render world representation of an environment map light for the atmosphere
#[derive(Component, ExtractComponent, Clone)]
pub struct AtmosphereEnvironmentMap {
pub environment_map: Handle<Image>,
pub size: UVec2,
}
#[derive(Component)]
pub struct AtmosphereProbeTextures {
pub environment: TextureView,
pub transmittance_lut: CachedTexture,
pub multiscattering_lut: CachedTexture,
pub sky_view_lut: CachedTexture,
pub aerial_view_lut: CachedTexture,
}
#[derive(Component)]
pub(crate) struct AtmosphereProbeBindGroups {
pub environment: BindGroup,
}
#[derive(Resource)]
pub struct AtmosphereProbeLayouts {
pub environment: BindGroupLayoutDescriptor,
}
#[derive(Resource)]
pub struct AtmosphereProbePipeline {
pub environment: CachedComputePipelineId,
}
pub fn init_atmosphere_probe_layout(mut commands: Commands) {
let environment = BindGroupLayoutDescriptor::new(
"environment_bind_group_layout",
&BindGroupLayoutEntries::with_indices(
ShaderStages::COMPUTE,
(
// uniforms
(0, uniform_buffer::<GpuAtmosphere>(true)),
(1, uniform_buffer::<GpuAtmosphereSettings>(true)),
(2, uniform_buffer::<AtmosphereTransform>(true)),
(3, uniform_buffer::<ViewUniform>(true)),
(4, uniform_buffer::<GpuLights>(true)),
// atmosphere luts and sampler
(8, texture_2d(TextureSampleType::default())), // transmittance
(9, texture_2d(TextureSampleType::default())), // multiscattering
(10, texture_2d(TextureSampleType::default())), // sky view
(11, texture_3d(TextureSampleType::default())), // aerial view
(12, sampler(SamplerBindingType::Filtering)),
// output 2D array texture
(
13,
texture_storage_2d_array(
TextureFormat::Rgba16Float,
StorageTextureAccess::WriteOnly,
),
),
),
),
);
commands.insert_resource(AtmosphereProbeLayouts { environment });
}
pub(super) fn prepare_atmosphere_probe_bind_groups(
probes: Query<(Entity, &AtmosphereProbeTextures), With<AtmosphereEnvironmentMap>>,
render_device: Res<RenderDevice>,
layouts: Res<AtmosphereProbeLayouts>,
atmosphere_sampler: Res<AtmosphereSampler>,
view_uniforms: Res<ViewUniforms>,
lights_uniforms: Res<LightMeta>,
atmosphere_transforms: Res<AtmosphereTransforms>,
atmosphere_uniforms: Res<ComponentUniforms<GpuAtmosphere>>,
settings_uniforms: Res<ComponentUniforms<GpuAtmosphereSettings>>,
pipeline_cache: Res<PipelineCache>,
mut commands: Commands,
) {
for (entity, textures) in &probes {
let environment = render_device.create_bind_group(
"environment_bind_group",
&pipeline_cache.get_bind_group_layout(&layouts.environment),
&BindGroupEntries::with_indices((
// uniforms
(0, atmosphere_uniforms.binding().unwrap()),
(1, settings_uniforms.binding().unwrap()),
(2, atmosphere_transforms.uniforms().binding().unwrap()),
(3, view_uniforms.uniforms.binding().unwrap()),
(4, lights_uniforms.view_gpu_lights.binding().unwrap()),
// atmosphere luts and sampler
(8, &textures.transmittance_lut.default_view),
(9, &textures.multiscattering_lut.default_view),
(10, &textures.sky_view_lut.default_view),
(11, &textures.aerial_view_lut.default_view),
(12, &**atmosphere_sampler),
// output 2D array texture
(13, &textures.environment),
)),
);
commands
.entity(entity)
.insert(AtmosphereProbeBindGroups { environment });
}
}
pub(super) fn prepare_probe_textures(
view_textures: Query<&AtmosphereTextures, With<ExtractedAtmosphere>>,
probes: Query<
(Entity, &AtmosphereEnvironmentMap),
(
With<AtmosphereEnvironmentMap>,
Without<AtmosphereProbeTextures>,
),
>,
gpu_images: Res<RenderAssets<GpuImage>>,
mut commands: Commands,
) {
for (probe, render_env_map) in &probes {
let environment = gpu_images.get(&render_env_map.environment_map).unwrap();
// create a cube view
let environment_view = environment.texture.create_view(&TextureViewDescriptor {
dimension: Some(TextureViewDimension::D2Array),
..Default::default()
});
// Get the first view entity's textures to borrow
if let Some(view_textures) = view_textures.iter().next() {
commands.entity(probe).insert(AtmosphereProbeTextures {
environment: environment_view,
transmittance_lut: view_textures.transmittance_lut.clone(),
multiscattering_lut: view_textures.multiscattering_lut.clone(),
sky_view_lut: view_textures.sky_view_lut.clone(),
aerial_view_lut: view_textures.aerial_view_lut.clone(),
});
}
}
}
pub fn init_atmosphere_probe_pipeline(
pipeline_cache: Res<PipelineCache>,
layouts: Res<AtmosphereProbeLayouts>,
asset_server: Res<AssetServer>,
mut commands: Commands,
) {
let environment = pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("environment_pipeline".into()),
layout: vec![layouts.environment.clone()],
shader: load_embedded_asset!(asset_server.as_ref(), "environment.wgsl"),
..default()
});
commands.insert_resource(AtmosphereProbePipeline { environment });
}
// Ensure power-of-two dimensions to avoid edge update issues on cubemap faces
pub fn validate_environment_map_size(size: UVec2) -> UVec2 {
let new_size = UVec2::new(
size.x.max(1).next_power_of_two(),
size.y.max(1).next_power_of_two(),
);
if new_size != size {
warn!(
"Non-power-of-two AtmosphereEnvironmentMapLight size {}, correcting to {new_size}",
size
);
}
new_size
}
pub fn prepare_atmosphere_probe_components(
probes: Query<(Entity, &AtmosphereEnvironmentMapLight), (Without<AtmosphereEnvironmentMap>,)>,
mut commands: Commands,
mut images: ResMut<Assets<Image>>,
) {
for (entity, env_map_light) in &probes {
// Create a cubemap image in the main world that we can reference
let new_size = validate_environment_map_size(env_map_light.size);
let mut environment_image = Image::new_fill(
Extent3d {
width: new_size.x,
height: new_size.y,
depth_or_array_layers: 6,
},
TextureDimension::D2,
&[0; 8],
TextureFormat::Rgba16Float,
RenderAssetUsages::all(),
);
environment_image.texture_view_descriptor = Some(TextureViewDescriptor {
dimension: Some(TextureViewDimension::Cube),
..Default::default()
});
environment_image.texture_descriptor.usage = TextureUsages::TEXTURE_BINDING
| TextureUsages::STORAGE_BINDING
| TextureUsages::COPY_SRC;
// Add the image to assets to get a handle
let environment_handle = images.add(environment_image);
commands.entity(entity).insert(AtmosphereEnvironmentMap {
environment_map: environment_handle.clone(),
size: new_size,
});
commands
.entity(entity)
.insert(GeneratedEnvironmentMapLight {
environment_map: environment_handle,
intensity: env_map_light.intensity,
rotation: Quat::IDENTITY,
affects_lightmapped_mesh_diffuse: env_map_light.affects_lightmapped_mesh_diffuse,
});
}
}
pub(super) struct EnvironmentNode {
main_view_query: QueryState<(
Read<DynamicUniformIndex<GpuAtmosphere>>,
Read<DynamicUniformIndex<GpuAtmosphereSettings>>,
Read<AtmosphereTransformsOffset>,
Read<ViewUniformOffset>,
Read<ViewLightsUniformOffset>,
)>,
probe_query: QueryState<(
Read<AtmosphereProbeBindGroups>,
Read<AtmosphereEnvironmentMap>,
)>,
}
impl FromWorld for EnvironmentNode {
fn from_world(world: &mut World) -> Self {
Self {
main_view_query: QueryState::new(world),
probe_query: QueryState::new(world),
}
}
}
impl Node for EnvironmentNode {
fn update(&mut self, world: &mut World) {
self.main_view_query.update_archetypes(world);
self.probe_query.update_archetypes(world);
}
fn run(
&self,
graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
let pipeline_cache = world.resource::<PipelineCache>();
let pipelines = world.resource::<AtmosphereProbePipeline>();
let view_entity = graph.view_entity();
let Some(environment_pipeline) = pipeline_cache.get_compute_pipeline(pipelines.environment)
else {
return Ok(());
};
let (Ok((
atmosphere_uniforms_offset,
settings_uniforms_offset,
atmosphere_transforms_offset,
view_uniforms_offset,
lights_uniforms_offset,
)),) = (self.main_view_query.get_manual(world, view_entity),)
else {
return Ok(());
};
for (bind_groups, env_map_light) in self.probe_query.iter_manual(world) {
let mut pass =
render_context
.command_encoder()
.begin_compute_pass(&ComputePassDescriptor {
label: Some("environment_pass"),
timestamp_writes: None,
});
pass.set_pipeline(environment_pipeline);
pass.set_bind_group(
0,
&bind_groups.environment,
&[
atmosphere_uniforms_offset.index(),
settings_uniforms_offset.index(),
atmosphere_transforms_offset.index(),
view_uniforms_offset.offset,
lights_uniforms_offset.offset,
],
);
pass.dispatch_workgroups(
env_map_light.size.x / 8,
env_map_light.size.y / 8,
6, // 6 cubemap faces
);
}
Ok(())
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/atmosphere/resources.rs | crates/bevy_pbr/src/atmosphere/resources.rs | use crate::{
ExtractedAtmosphere, GpuLights, GpuScatteringMedium, LightMeta, ScatteringMedium,
ScatteringMediumSampler,
};
use bevy_asset::{load_embedded_asset, AssetId, Handle};
use bevy_camera::{Camera, Camera3d};
use bevy_core_pipeline::FullscreenShader;
use bevy_derive::Deref;
use bevy_ecs::{
component::Component,
entity::Entity,
error::BevyError,
query::With,
resource::Resource,
system::{Commands, Query, Res, ResMut},
world::{FromWorld, World},
};
use bevy_image::ToExtents;
use bevy_math::{Affine3A, Mat4, Vec3, Vec3A};
use bevy_render::{
extract_component::ComponentUniforms,
render_asset::RenderAssets,
render_resource::{binding_types::*, *},
renderer::{RenderDevice, RenderQueue},
texture::{CachedTexture, TextureCache},
view::{ExtractedView, Msaa, ViewDepthTexture, ViewUniform, ViewUniforms},
};
use bevy_shader::Shader;
use bevy_utils::default;
use super::GpuAtmosphereSettings;
#[derive(Resource)]
pub(crate) struct AtmosphereBindGroupLayouts {
pub transmittance_lut: BindGroupLayoutDescriptor,
pub multiscattering_lut: BindGroupLayoutDescriptor,
pub sky_view_lut: BindGroupLayoutDescriptor,
pub aerial_view_lut: BindGroupLayoutDescriptor,
}
#[derive(Resource)]
pub(crate) struct RenderSkyBindGroupLayouts {
pub render_sky: BindGroupLayoutDescriptor,
pub render_sky_msaa: BindGroupLayoutDescriptor,
pub fullscreen_shader: FullscreenShader,
pub fragment_shader: Handle<Shader>,
}
impl AtmosphereBindGroupLayouts {
pub fn new() -> Self {
let transmittance_lut = BindGroupLayoutDescriptor::new(
"transmittance_lut_bind_group_layout",
&BindGroupLayoutEntries::with_indices(
ShaderStages::COMPUTE,
(
(0, uniform_buffer::<GpuAtmosphere>(true)),
(1, uniform_buffer::<GpuAtmosphereSettings>(true)),
// scattering medium luts and sampler
(5, texture_2d(TextureSampleType::default())),
(6, texture_2d(TextureSampleType::default())),
(7, sampler(SamplerBindingType::Filtering)),
// transmittance lut storage texture
(
13,
texture_storage_2d(
TextureFormat::Rgba16Float,
StorageTextureAccess::WriteOnly,
),
),
),
),
);
let multiscattering_lut = BindGroupLayoutDescriptor::new(
"multiscattering_lut_bind_group_layout",
&BindGroupLayoutEntries::with_indices(
ShaderStages::COMPUTE,
(
(0, uniform_buffer::<GpuAtmosphere>(true)),
(1, uniform_buffer::<GpuAtmosphereSettings>(true)),
// scattering medium luts and sampler
(5, texture_2d(TextureSampleType::default())),
(6, texture_2d(TextureSampleType::default())),
(7, sampler(SamplerBindingType::Filtering)),
// atmosphere luts and sampler
(8, texture_2d(TextureSampleType::default())), // transmittance
(12, sampler(SamplerBindingType::Filtering)),
// multiscattering lut storage texture
(
13,
texture_storage_2d(
TextureFormat::Rgba16Float,
StorageTextureAccess::WriteOnly,
),
),
),
),
);
let sky_view_lut = BindGroupLayoutDescriptor::new(
"sky_view_lut_bind_group_layout",
&BindGroupLayoutEntries::with_indices(
ShaderStages::COMPUTE,
(
(0, uniform_buffer::<GpuAtmosphere>(true)),
(1, uniform_buffer::<GpuAtmosphereSettings>(true)),
(2, uniform_buffer::<AtmosphereTransform>(true)),
(3, uniform_buffer::<ViewUniform>(true)),
(4, uniform_buffer::<GpuLights>(true)),
// scattering medium luts and sampler
(5, texture_2d(TextureSampleType::default())),
(6, texture_2d(TextureSampleType::default())),
(7, sampler(SamplerBindingType::Filtering)),
// atmosphere luts and sampler
(8, texture_2d(TextureSampleType::default())), // transmittance
(9, texture_2d(TextureSampleType::default())), // multiscattering
(12, sampler(SamplerBindingType::Filtering)),
// sky view lut storage texture
(
13,
texture_storage_2d(
TextureFormat::Rgba16Float,
StorageTextureAccess::WriteOnly,
),
),
),
),
);
let aerial_view_lut = BindGroupLayoutDescriptor::new(
"aerial_view_lut_bind_group_layout",
&BindGroupLayoutEntries::with_indices(
ShaderStages::COMPUTE,
(
(0, uniform_buffer::<GpuAtmosphere>(true)),
(1, uniform_buffer::<GpuAtmosphereSettings>(true)),
(3, uniform_buffer::<ViewUniform>(true)),
(4, uniform_buffer::<GpuLights>(true)),
// scattering medium luts and sampler
(5, texture_2d(TextureSampleType::default())),
(6, texture_2d(TextureSampleType::default())),
(7, sampler(SamplerBindingType::Filtering)),
// atmosphere luts and sampler
(8, texture_2d(TextureSampleType::default())), // transmittance
(9, texture_2d(TextureSampleType::default())), // multiscattering
(12, sampler(SamplerBindingType::Filtering)),
// eerial view lut storage texture
(
13,
texture_storage_3d(
TextureFormat::Rgba16Float,
StorageTextureAccess::WriteOnly,
),
),
),
),
);
Self {
transmittance_lut,
multiscattering_lut,
sky_view_lut,
aerial_view_lut,
}
}
}
impl FromWorld for RenderSkyBindGroupLayouts {
fn from_world(world: &mut World) -> Self {
let render_sky = BindGroupLayoutDescriptor::new(
"render_sky_bind_group_layout",
&BindGroupLayoutEntries::with_indices(
ShaderStages::FRAGMENT,
(
(0, uniform_buffer::<GpuAtmosphere>(true)),
(1, uniform_buffer::<GpuAtmosphereSettings>(true)),
(2, uniform_buffer::<AtmosphereTransform>(true)),
(3, uniform_buffer::<ViewUniform>(true)),
(4, uniform_buffer::<GpuLights>(true)),
// scattering medium luts and sampler
(5, texture_2d(TextureSampleType::default())),
(6, texture_2d(TextureSampleType::default())),
(7, sampler(SamplerBindingType::Filtering)),
// atmosphere luts and sampler
(8, texture_2d(TextureSampleType::default())), // transmittance
(9, texture_2d(TextureSampleType::default())), // multiscattering
(10, texture_2d(TextureSampleType::default())), // sky view
(11, texture_3d(TextureSampleType::default())), // aerial view
(12, sampler(SamplerBindingType::Filtering)),
// view depth texture
(13, texture_2d(TextureSampleType::Depth)),
),
),
);
let render_sky_msaa = BindGroupLayoutDescriptor::new(
"render_sky_msaa_bind_group_layout",
&BindGroupLayoutEntries::with_indices(
ShaderStages::FRAGMENT,
(
(0, uniform_buffer::<GpuAtmosphere>(true)),
(1, uniform_buffer::<GpuAtmosphereSettings>(true)),
(2, uniform_buffer::<AtmosphereTransform>(true)),
(3, uniform_buffer::<ViewUniform>(true)),
(4, uniform_buffer::<GpuLights>(true)),
// scattering medium luts and sampler
(5, texture_2d(TextureSampleType::default())),
(6, texture_2d(TextureSampleType::default())),
(7, sampler(SamplerBindingType::Filtering)),
// atmosphere luts and sampler
(8, texture_2d(TextureSampleType::default())), // transmittance
(9, texture_2d(TextureSampleType::default())), // multiscattering
(10, texture_2d(TextureSampleType::default())), // sky view
(11, texture_3d(TextureSampleType::default())), // aerial view
(12, sampler(SamplerBindingType::Filtering)),
// view depth texture
(13, texture_2d_multisampled(TextureSampleType::Depth)),
),
),
);
Self {
render_sky,
render_sky_msaa,
fullscreen_shader: world.resource::<FullscreenShader>().clone(),
fragment_shader: load_embedded_asset!(world, "render_sky.wgsl"),
}
}
}
#[derive(Resource, Deref)]
pub struct AtmosphereSampler(Sampler);
impl FromWorld for AtmosphereSampler {
fn from_world(world: &mut World) -> Self {
let render_device = world.resource::<RenderDevice>();
let sampler = render_device.create_sampler(&SamplerDescriptor {
mag_filter: FilterMode::Linear,
min_filter: FilterMode::Linear,
mipmap_filter: FilterMode::Nearest,
..Default::default()
});
Self(sampler)
}
}
#[derive(Resource)]
pub(crate) struct AtmosphereLutPipelines {
pub transmittance_lut: CachedComputePipelineId,
pub multiscattering_lut: CachedComputePipelineId,
pub sky_view_lut: CachedComputePipelineId,
pub aerial_view_lut: CachedComputePipelineId,
}
impl FromWorld for AtmosphereLutPipelines {
fn from_world(world: &mut World) -> Self {
let pipeline_cache = world.resource::<PipelineCache>();
let layouts = world.resource::<AtmosphereBindGroupLayouts>();
let transmittance_lut = pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("transmittance_lut_pipeline".into()),
layout: vec![layouts.transmittance_lut.clone()],
shader: load_embedded_asset!(world, "transmittance_lut.wgsl"),
..default()
});
let multiscattering_lut =
pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("multi_scattering_lut_pipeline".into()),
layout: vec![layouts.multiscattering_lut.clone()],
shader: load_embedded_asset!(world, "multiscattering_lut.wgsl"),
..default()
});
let sky_view_lut = pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("sky_view_lut_pipeline".into()),
layout: vec![layouts.sky_view_lut.clone()],
shader: load_embedded_asset!(world, "sky_view_lut.wgsl"),
..default()
});
let aerial_view_lut = pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("aerial_view_lut_pipeline".into()),
layout: vec![layouts.aerial_view_lut.clone()],
shader: load_embedded_asset!(world, "aerial_view_lut.wgsl"),
..default()
});
Self {
transmittance_lut,
multiscattering_lut,
sky_view_lut,
aerial_view_lut,
}
}
}
#[derive(Component)]
pub(crate) struct RenderSkyPipelineId(pub CachedRenderPipelineId);
#[derive(Copy, Clone, Hash, PartialEq, Eq)]
pub(crate) struct RenderSkyPipelineKey {
pub msaa_samples: u32,
pub dual_source_blending: bool,
}
impl SpecializedRenderPipeline for RenderSkyBindGroupLayouts {
type Key = RenderSkyPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let mut shader_defs = Vec::new();
if key.msaa_samples > 1 {
shader_defs.push("MULTISAMPLED".into());
}
if key.dual_source_blending {
shader_defs.push("DUAL_SOURCE_BLENDING".into());
}
let dst_factor = if key.dual_source_blending {
BlendFactor::Src1
} else {
BlendFactor::SrcAlpha
};
RenderPipelineDescriptor {
label: Some(format!("render_sky_pipeline_{}", key.msaa_samples).into()),
layout: vec![if key.msaa_samples == 1 {
self.render_sky.clone()
} else {
self.render_sky_msaa.clone()
}],
vertex: self.fullscreen_shader.to_vertex_state(),
fragment: Some(FragmentState {
shader: self.fragment_shader.clone(),
shader_defs,
targets: vec![Some(ColorTargetState {
format: TextureFormat::Rgba16Float,
blend: Some(BlendState {
color: BlendComponent {
src_factor: BlendFactor::One,
dst_factor,
operation: BlendOperation::Add,
},
alpha: BlendComponent {
src_factor: BlendFactor::Zero,
dst_factor: BlendFactor::One,
operation: BlendOperation::Add,
},
}),
write_mask: ColorWrites::ALL,
})],
..default()
}),
multisample: MultisampleState {
count: key.msaa_samples,
..default()
},
..default()
}
}
}
pub(super) fn queue_render_sky_pipelines(
views: Query<(Entity, &Msaa), (With<Camera>, With<ExtractedAtmosphere>)>,
pipeline_cache: Res<PipelineCache>,
layouts: Res<RenderSkyBindGroupLayouts>,
mut specializer: ResMut<SpecializedRenderPipelines<RenderSkyBindGroupLayouts>>,
render_device: Res<RenderDevice>,
mut commands: Commands,
) {
for (entity, msaa) in &views {
let id = specializer.specialize(
&pipeline_cache,
&layouts,
RenderSkyPipelineKey {
msaa_samples: msaa.samples(),
dual_source_blending: render_device
.features()
.contains(WgpuFeatures::DUAL_SOURCE_BLENDING),
},
);
commands.entity(entity).insert(RenderSkyPipelineId(id));
}
}
#[derive(Component)]
pub struct AtmosphereTextures {
pub transmittance_lut: CachedTexture,
pub multiscattering_lut: CachedTexture,
pub sky_view_lut: CachedTexture,
pub aerial_view_lut: CachedTexture,
}
pub(super) fn prepare_atmosphere_textures(
views: Query<(Entity, &GpuAtmosphereSettings), With<ExtractedAtmosphere>>,
render_device: Res<RenderDevice>,
mut texture_cache: ResMut<TextureCache>,
mut commands: Commands,
) {
for (entity, lut_settings) in &views {
let transmittance_lut = texture_cache.get(
&render_device,
TextureDescriptor {
label: Some("transmittance_lut"),
size: lut_settings.transmittance_lut_size.to_extents(),
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: TextureFormat::Rgba16Float,
usage: TextureUsages::STORAGE_BINDING | TextureUsages::TEXTURE_BINDING,
view_formats: &[],
},
);
let multiscattering_lut = texture_cache.get(
&render_device,
TextureDescriptor {
label: Some("multiscattering_lut"),
size: lut_settings.multiscattering_lut_size.to_extents(),
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: TextureFormat::Rgba16Float,
usage: TextureUsages::STORAGE_BINDING | TextureUsages::TEXTURE_BINDING,
view_formats: &[],
},
);
let sky_view_lut = texture_cache.get(
&render_device,
TextureDescriptor {
label: Some("sky_view_lut"),
size: lut_settings.sky_view_lut_size.to_extents(),
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: TextureFormat::Rgba16Float,
usage: TextureUsages::STORAGE_BINDING | TextureUsages::TEXTURE_BINDING,
view_formats: &[],
},
);
let aerial_view_lut = texture_cache.get(
&render_device,
TextureDescriptor {
label: Some("aerial_view_lut"),
size: lut_settings.aerial_view_lut_size.to_extents(),
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D3,
format: TextureFormat::Rgba16Float,
usage: TextureUsages::STORAGE_BINDING | TextureUsages::TEXTURE_BINDING,
view_formats: &[],
},
);
commands.entity(entity).insert({
AtmosphereTextures {
transmittance_lut,
multiscattering_lut,
sky_view_lut,
aerial_view_lut,
}
});
}
}
#[derive(Copy, Clone, Debug, thiserror::Error)]
#[error("ScatteringMedium missing with id {0:?}: make sure the asset was not removed.")]
struct ScatteringMediumMissingError(AssetId<ScatteringMedium>);
/// The shader-uniform representation of an Atmosphere.
#[derive(Clone, Component, ShaderType)]
pub struct GpuAtmosphere {
//TODO: rename to Planet later?
pub ground_albedo: Vec3,
pub bottom_radius: f32,
pub top_radius: f32,
}
pub fn prepare_atmosphere_uniforms(
mut commands: Commands,
atmospheres: Query<(Entity, &ExtractedAtmosphere)>,
) -> Result<(), BevyError> {
for (entity, atmosphere) in atmospheres {
commands.entity(entity).insert(GpuAtmosphere {
ground_albedo: atmosphere.ground_albedo,
bottom_radius: atmosphere.bottom_radius,
top_radius: atmosphere.top_radius,
});
}
Ok(())
}
#[derive(Resource, Default)]
pub struct AtmosphereTransforms {
uniforms: DynamicUniformBuffer<AtmosphereTransform>,
}
impl AtmosphereTransforms {
#[inline]
pub fn uniforms(&self) -> &DynamicUniformBuffer<AtmosphereTransform> {
&self.uniforms
}
}
#[derive(ShaderType)]
pub struct AtmosphereTransform {
world_from_atmosphere: Mat4,
}
#[derive(Component)]
pub struct AtmosphereTransformsOffset {
index: u32,
}
impl AtmosphereTransformsOffset {
#[inline]
pub fn index(&self) -> u32 {
self.index
}
}
pub(super) fn prepare_atmosphere_transforms(
views: Query<(Entity, &ExtractedView), (With<ExtractedAtmosphere>, With<Camera3d>)>,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
mut atmo_uniforms: ResMut<AtmosphereTransforms>,
mut commands: Commands,
) {
let atmo_count = views.iter().len();
let Some(mut writer) =
atmo_uniforms
.uniforms
.get_writer(atmo_count, &render_device, &render_queue)
else {
return;
};
for (entity, view) in &views {
let world_from_view = view.world_from_view.affine();
let camera_z = world_from_view.matrix3.z_axis;
let camera_y = world_from_view.matrix3.y_axis;
let atmo_z = camera_z
.with_y(0.0)
.try_normalize()
.unwrap_or_else(|| camera_y.with_y(0.0).normalize());
let atmo_y = Vec3A::Y;
let atmo_x = atmo_y.cross(atmo_z).normalize();
let world_from_atmosphere =
Affine3A::from_cols(atmo_x, atmo_y, atmo_z, world_from_view.translation);
let world_from_atmosphere = Mat4::from(world_from_atmosphere);
commands.entity(entity).insert(AtmosphereTransformsOffset {
index: writer.write(&AtmosphereTransform {
world_from_atmosphere,
}),
});
}
}
#[derive(Component)]
pub(crate) struct AtmosphereBindGroups {
pub transmittance_lut: BindGroup,
pub multiscattering_lut: BindGroup,
pub sky_view_lut: BindGroup,
pub aerial_view_lut: BindGroup,
pub render_sky: BindGroup,
}
#[derive(Copy, Clone, Debug, thiserror::Error)]
enum AtmosphereBindGroupError {
#[error("Failed to prepare atmosphere bind groups. Atmosphere uniform buffer missing")]
Atmosphere,
#[error(
"Failed to prepare atmosphere bind groups. AtmosphereTransforms uniform buffer missing"
)]
Transforms,
#[error("Failed to prepare atmosphere bind groups. AtmosphereSettings uniform buffer missing")]
Settings,
#[error("Failed to prepare atmosphere bind groups. View uniform buffer missing")]
ViewUniforms,
#[error("Failed to prepare atmosphere bind groups. Light uniform buffer missing")]
LightUniforms,
}
pub(super) fn prepare_atmosphere_bind_groups(
views: Query<
(
Entity,
&ExtractedAtmosphere,
&AtmosphereTextures,
&ViewDepthTexture,
&Msaa,
),
(With<Camera3d>, With<ExtractedAtmosphere>),
>,
render_device: Res<RenderDevice>,
layouts: Res<AtmosphereBindGroupLayouts>,
render_sky_layouts: Res<RenderSkyBindGroupLayouts>,
atmosphere_sampler: Res<AtmosphereSampler>,
view_uniforms: Res<ViewUniforms>,
lights_uniforms: Res<LightMeta>,
atmosphere_transforms: Res<AtmosphereTransforms>,
atmosphere_uniforms: Res<ComponentUniforms<GpuAtmosphere>>,
settings_uniforms: Res<ComponentUniforms<GpuAtmosphereSettings>>,
gpu_media: Res<RenderAssets<GpuScatteringMedium>>,
medium_sampler: Res<ScatteringMediumSampler>,
pipeline_cache: Res<PipelineCache>,
mut commands: Commands,
) -> Result<(), BevyError> {
if views.iter().len() == 0 {
return Ok(());
}
let atmosphere_binding = atmosphere_uniforms
.binding()
.ok_or(AtmosphereBindGroupError::Atmosphere)?;
let transforms_binding = atmosphere_transforms
.uniforms()
.binding()
.ok_or(AtmosphereBindGroupError::Transforms)?;
let settings_binding = settings_uniforms
.binding()
.ok_or(AtmosphereBindGroupError::Settings)?;
let view_binding = view_uniforms
.uniforms
.binding()
.ok_or(AtmosphereBindGroupError::ViewUniforms)?;
let lights_binding = lights_uniforms
.view_gpu_lights
.binding()
.ok_or(AtmosphereBindGroupError::LightUniforms)?;
for (entity, atmosphere, textures, view_depth_texture, msaa) in &views {
let gpu_medium = gpu_media
.get(atmosphere.medium)
.ok_or(ScatteringMediumMissingError(atmosphere.medium))?;
let transmittance_lut = render_device.create_bind_group(
"transmittance_lut_bind_group",
&pipeline_cache.get_bind_group_layout(&layouts.transmittance_lut),
&BindGroupEntries::with_indices((
// uniforms
(0, atmosphere_binding.clone()),
(1, settings_binding.clone()),
// scattering medium luts and sampler
(5, &gpu_medium.density_lut_view),
(6, &gpu_medium.scattering_lut_view),
(7, medium_sampler.sampler()),
// transmittance lut storage texture
(13, &textures.transmittance_lut.default_view),
)),
);
let multiscattering_lut = render_device.create_bind_group(
"multiscattering_lut_bind_group",
&pipeline_cache.get_bind_group_layout(&layouts.multiscattering_lut),
&BindGroupEntries::with_indices((
// uniforms
(0, atmosphere_binding.clone()),
(1, settings_binding.clone()),
// scattering medium luts and sampler
(5, &gpu_medium.density_lut_view),
(6, &gpu_medium.scattering_lut_view),
(7, medium_sampler.sampler()),
// atmosphere luts and sampler
(8, &textures.transmittance_lut.default_view),
(12, &**atmosphere_sampler),
// multiscattering lut storage texture
(13, &textures.multiscattering_lut.default_view),
)),
);
let sky_view_lut = render_device.create_bind_group(
"sky_view_lut_bind_group",
&pipeline_cache.get_bind_group_layout(&layouts.sky_view_lut),
&BindGroupEntries::with_indices((
// uniforms
(0, atmosphere_binding.clone()),
(1, settings_binding.clone()),
(2, transforms_binding.clone()),
(3, view_binding.clone()),
(4, lights_binding.clone()),
// scattering medium luts and sampler
(5, &gpu_medium.density_lut_view),
(6, &gpu_medium.scattering_lut_view),
(7, medium_sampler.sampler()),
// atmosphere luts and sampler
(8, &textures.transmittance_lut.default_view),
(9, &textures.multiscattering_lut.default_view),
(12, &**atmosphere_sampler),
// sky view lut storage texture
(13, &textures.sky_view_lut.default_view),
)),
);
let aerial_view_lut = render_device.create_bind_group(
"sky_view_lut_bind_group",
&pipeline_cache.get_bind_group_layout(&layouts.aerial_view_lut),
&BindGroupEntries::with_indices((
// uniforms
(0, atmosphere_binding.clone()),
(1, settings_binding.clone()),
(3, view_binding.clone()),
(4, lights_binding.clone()),
// scattering medium luts and sampler
(5, &gpu_medium.density_lut_view),
(6, &gpu_medium.scattering_lut_view),
(7, medium_sampler.sampler()),
// atmosphere luts and sampler
(8, &textures.transmittance_lut.default_view),
(9, &textures.multiscattering_lut.default_view),
(12, &**atmosphere_sampler),
// aerial view lut storage texture
(13, &textures.aerial_view_lut.default_view),
)),
);
let render_sky = render_device.create_bind_group(
"render_sky_bind_group",
&pipeline_cache.get_bind_group_layout(if *msaa == Msaa::Off {
&render_sky_layouts.render_sky
} else {
&render_sky_layouts.render_sky_msaa
}),
&BindGroupEntries::with_indices((
// uniforms
(0, atmosphere_binding.clone()),
(1, settings_binding.clone()),
(2, transforms_binding.clone()),
(3, view_binding.clone()),
(4, lights_binding.clone()),
// scattering medium luts and sampler
(5, &gpu_medium.density_lut_view),
(6, &gpu_medium.scattering_lut_view),
(7, medium_sampler.sampler()),
// atmosphere luts and sampler
(8, &textures.transmittance_lut.default_view),
(9, &textures.multiscattering_lut.default_view),
(10, &textures.sky_view_lut.default_view),
(11, &textures.aerial_view_lut.default_view),
(12, &**atmosphere_sampler),
// view depth texture
(13, view_depth_texture.view()),
)),
);
commands.entity(entity).insert(AtmosphereBindGroups {
transmittance_lut,
multiscattering_lut,
sky_view_lut,
aerial_view_lut,
render_sky,
});
}
Ok(())
}
#[derive(ShaderType)]
#[repr(C)]
pub(crate) struct AtmosphereData {
pub atmosphere: GpuAtmosphere,
pub settings: GpuAtmosphereSettings,
}
pub fn init_atmosphere_buffer(mut commands: Commands) {
commands.insert_resource(AtmosphereBuffer {
buffer: StorageBuffer::from(AtmosphereData {
atmosphere: GpuAtmosphere {
ground_albedo: Vec3::ZERO,
bottom_radius: 0.0,
top_radius: 0.0,
},
settings: GpuAtmosphereSettings::default(),
}),
});
}
#[derive(Resource)]
pub struct AtmosphereBuffer {
pub(crate) buffer: StorageBuffer<AtmosphereData>,
}
pub(crate) fn write_atmosphere_buffer(
device: Res<RenderDevice>,
queue: Res<RenderQueue>,
atmosphere_entity: Query<(&GpuAtmosphere, &GpuAtmosphereSettings), With<Camera3d>>,
mut atmosphere_buffer: ResMut<AtmosphereBuffer>,
) {
let Ok((atmosphere, settings)) = atmosphere_entity.single() else {
return;
};
atmosphere_buffer.buffer.set(AtmosphereData {
atmosphere: atmosphere.clone(),
settings: settings.clone(),
});
atmosphere_buffer.buffer.write_buffer(&device, &queue);
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_pbr/src/atmosphere/mod.rs | crates/bevy_pbr/src/atmosphere/mod.rs | //! Procedural Atmospheric Scattering.
//!
//! This plugin implements [Hillaire's 2020 paper](https://sebh.github.io/publications/egsr2020.pdf)
//! on real-time atmospheric scattering. While it *will* work simply as a
//! procedural skybox, it also does much more. It supports dynamic time-of-
//! -day, multiple directional lights, and since it's applied as a post-processing
//! effect *on top* of the existing skybox, a starry skybox would automatically
//! show based on the time of day. Scattering in front of terrain (similar
//! to distance fog, but more complex) is handled as well, and takes into
//! account the directional light color and direction.
//!
//! Adding the [`Atmosphere`] component to a 3d camera will enable the effect,
//! which by default is set to look similar to Earth's atmosphere. See the
//! documentation on the component itself for information regarding its fields.
//!
//! Performance-wise, the effect should be fairly cheap since the LUTs (Look
//! Up Tables) that encode most of the data are small, and take advantage of the
//! fact that the atmosphere is symmetric. Performance is also proportional to
//! the number of directional lights in the scene. In order to tune
//! performance more finely, the [`AtmosphereSettings`] camera component
//! manages the size of each LUT and the sample count for each ray.
//!
//! Given how similar it is to [`crate::volumetric_fog`], it might be expected
//! that these two modules would work together well. However for now using both
//! at once is untested, and might not be physically accurate. These may be
//! integrated into a single module in the future.
//!
//! On web platforms, atmosphere rendering will look slightly different. Specifically, when calculating how light travels
//! through the atmosphere, we use a simpler averaging technique instead of the more
//! complex blending operations. This difference will be resolved for WebGPU in a future release.
//!
//! [Shadertoy]: https://www.shadertoy.com/view/slSXRW
//!
//! [Unreal Engine Implementation]: https://github.com/sebh/UnrealEngineSkyAtmosphere
mod environment;
mod node;
pub mod resources;
use bevy_app::{App, Plugin, Update};
use bevy_asset::{embedded_asset, AssetId, Assets, Handle};
use bevy_camera::Camera3d;
use bevy_core_pipeline::core_3d::graph::Node3d;
use bevy_ecs::{
component::Component,
query::{Changed, QueryItem, With},
resource::Resource,
schedule::IntoScheduleConfigs,
system::{lifetimeless::Read, Query},
};
use bevy_math::{UVec2, UVec3, Vec3};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::{
extract_component::UniformComponentPlugin,
render_resource::{DownlevelFlags, ShaderType, SpecializedRenderPipelines},
view::Hdr,
RenderStartup,
};
use bevy_render::{
extract_component::{ExtractComponent, ExtractComponentPlugin},
render_graph::{RenderGraphExt, ViewNodeRunner},
render_resource::{TextureFormat, TextureUsages},
renderer::RenderAdapter,
Render, RenderApp, RenderSystems,
};
use bevy_core_pipeline::core_3d::graph::Core3d;
use bevy_shader::load_shader_library;
use environment::{
init_atmosphere_probe_layout, init_atmosphere_probe_pipeline,
prepare_atmosphere_probe_bind_groups, prepare_atmosphere_probe_components,
prepare_probe_textures, AtmosphereEnvironmentMap, EnvironmentNode,
};
use resources::{
prepare_atmosphere_transforms, prepare_atmosphere_uniforms, queue_render_sky_pipelines,
AtmosphereTransforms, GpuAtmosphere, RenderSkyBindGroupLayouts,
};
use tracing::warn;
use crate::{
medium::ScatteringMedium,
resources::{init_atmosphere_buffer, write_atmosphere_buffer},
};
use self::{
node::{AtmosphereLutsNode, AtmosphereNode, RenderSkyNode},
resources::{
prepare_atmosphere_bind_groups, prepare_atmosphere_textures, AtmosphereBindGroupLayouts,
AtmosphereLutPipelines, AtmosphereSampler,
},
};
#[doc(hidden)]
pub struct AtmospherePlugin;
impl Plugin for AtmospherePlugin {
fn build(&self, app: &mut App) {
load_shader_library!(app, "types.wgsl");
load_shader_library!(app, "functions.wgsl");
load_shader_library!(app, "bruneton_functions.wgsl");
load_shader_library!(app, "bindings.wgsl");
embedded_asset!(app, "transmittance_lut.wgsl");
embedded_asset!(app, "multiscattering_lut.wgsl");
embedded_asset!(app, "sky_view_lut.wgsl");
embedded_asset!(app, "aerial_view_lut.wgsl");
embedded_asset!(app, "render_sky.wgsl");
embedded_asset!(app, "environment.wgsl");
app.add_plugins((
ExtractComponentPlugin::<Atmosphere>::default(),
ExtractComponentPlugin::<GpuAtmosphereSettings>::default(),
ExtractComponentPlugin::<AtmosphereEnvironmentMap>::default(),
UniformComponentPlugin::<GpuAtmosphere>::default(),
UniformComponentPlugin::<GpuAtmosphereSettings>::default(),
))
.add_systems(Update, prepare_atmosphere_probe_components);
let world = app.world_mut();
let earthlike_medium = world
.resource_mut::<Assets<ScatteringMedium>>()
.add(ScatteringMedium::earthlike(256, 256));
world.insert_resource(EarthlikeAtmosphere(Atmosphere::earthlike(earthlike_medium)));
}
fn finish(&self, app: &mut App) {
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
let render_adapter = render_app.world().resource::<RenderAdapter>();
if !render_adapter
.get_downlevel_capabilities()
.flags
.contains(DownlevelFlags::COMPUTE_SHADERS)
{
warn!("AtmospherePlugin not loaded. GPU lacks support for compute shaders.");
return;
}
if !render_adapter
.get_texture_format_features(TextureFormat::Rgba16Float)
.allowed_usages
.contains(TextureUsages::STORAGE_BINDING)
{
warn!("AtmospherePlugin not loaded. GPU lacks support: TextureFormat::Rgba16Float does not support TextureUsages::STORAGE_BINDING.");
return;
}
render_app
.insert_resource(AtmosphereBindGroupLayouts::new())
.init_resource::<RenderSkyBindGroupLayouts>()
.init_resource::<AtmosphereSampler>()
.init_resource::<AtmosphereLutPipelines>()
.init_resource::<AtmosphereTransforms>()
.init_resource::<SpecializedRenderPipelines<RenderSkyBindGroupLayouts>>()
.add_systems(
RenderStartup,
(
init_atmosphere_probe_layout,
init_atmosphere_probe_pipeline,
init_atmosphere_buffer,
)
.chain(),
)
.add_systems(
Render,
(
configure_camera_depth_usages.in_set(RenderSystems::ManageViews),
queue_render_sky_pipelines.in_set(RenderSystems::Queue),
prepare_atmosphere_textures.in_set(RenderSystems::PrepareResources),
prepare_probe_textures
.in_set(RenderSystems::PrepareResources)
.after(prepare_atmosphere_textures),
prepare_atmosphere_uniforms
.before(RenderSystems::PrepareResources)
.after(RenderSystems::PrepareAssets),
prepare_atmosphere_probe_bind_groups.in_set(RenderSystems::PrepareBindGroups),
prepare_atmosphere_transforms.in_set(RenderSystems::PrepareResources),
prepare_atmosphere_bind_groups.in_set(RenderSystems::PrepareBindGroups),
write_atmosphere_buffer.in_set(RenderSystems::PrepareResources),
),
)
.add_render_graph_node::<ViewNodeRunner<AtmosphereLutsNode>>(
Core3d,
AtmosphereNode::RenderLuts,
)
.add_render_graph_edges(
Core3d,
(
// END_PRE_PASSES -> RENDER_LUTS -> MAIN_PASS
Node3d::EndPrepasses,
AtmosphereNode::RenderLuts,
Node3d::StartMainPass,
),
)
.add_render_graph_node::<ViewNodeRunner<RenderSkyNode>>(
Core3d,
AtmosphereNode::RenderSky,
)
.add_render_graph_node::<EnvironmentNode>(Core3d, AtmosphereNode::Environment)
.add_render_graph_edges(
Core3d,
(
Node3d::MainOpaquePass,
AtmosphereNode::RenderSky,
Node3d::MainTransparentPass,
),
);
}
}
#[derive(Resource)]
pub struct EarthlikeAtmosphere(Atmosphere);
impl EarthlikeAtmosphere {
pub fn get(&self) -> Atmosphere {
self.0.clone()
}
}
/// Enables atmospheric scattering for an HDR camera.
#[derive(Clone, Component)]
#[require(AtmosphereSettings, Hdr)]
pub struct Atmosphere {
/// Radius of the planet
///
/// units: m
pub bottom_radius: f32,
/// Radius at which we consider the atmosphere to 'end' for our
/// calculations (from center of planet)
///
/// units: m
pub top_radius: f32,
/// An approximation of the average albedo (or color, roughly) of the
/// planet's surface. This is used when calculating multiscattering.
///
/// units: N/A
pub ground_albedo: Vec3,
/// A handle to a [`ScatteringMedium`], which describes the substance
/// of the atmosphere and how it scatters light.
pub medium: Handle<ScatteringMedium>,
}
impl Atmosphere {
pub fn earthlike(medium: Handle<ScatteringMedium>) -> Self {
const EARTH_BOTTOM_RADIUS: f32 = 6_360_000.0;
const EARTH_TOP_RADIUS: f32 = 6_460_000.0;
const EARTH_ALBEDO: Vec3 = Vec3::splat(0.3);
Self {
bottom_radius: EARTH_BOTTOM_RADIUS,
top_radius: EARTH_TOP_RADIUS,
ground_albedo: EARTH_ALBEDO,
medium,
}
}
}
impl ExtractComponent for Atmosphere {
type QueryData = Read<Atmosphere>;
type QueryFilter = With<Camera3d>;
type Out = ExtractedAtmosphere;
fn extract_component(item: QueryItem<'_, '_, Self::QueryData>) -> Option<Self::Out> {
Some(ExtractedAtmosphere {
bottom_radius: item.bottom_radius,
top_radius: item.top_radius,
ground_albedo: item.ground_albedo,
medium: item.medium.id(),
})
}
}
/// The render-world representation of an `Atmosphere`, but which
/// hasn't been converted into shader uniforms yet.
#[derive(Clone, Component)]
pub struct ExtractedAtmosphere {
pub bottom_radius: f32,
pub top_radius: f32,
pub ground_albedo: Vec3,
pub medium: AssetId<ScatteringMedium>,
}
/// This component controls the resolution of the atmosphere LUTs, and
/// how many samples are used when computing them.
///
/// The transmittance LUT stores the transmittance from a point in the
/// atmosphere to the outer edge of the atmosphere in any direction,
/// parametrized by the point's radius and the cosine of the zenith angle
/// of the ray.
///
/// The multiscattering LUT stores the factor representing luminance scattered
/// towards the camera with scattering order >2, parametrized by the point's radius
/// and the cosine of the zenith angle of the sun.
///
/// The sky-view lut is essentially the actual skybox, storing the light scattered
/// towards the camera in every direction with a cubemap.
///
/// The aerial-view lut is a 3d LUT fit to the view frustum, which stores the luminance
/// scattered towards the camera at each point (RGB channels), alongside the average
/// transmittance to that point (A channel).
#[derive(Clone, Component, Reflect)]
#[reflect(Clone, Default)]
pub struct AtmosphereSettings {
/// The size of the transmittance LUT
pub transmittance_lut_size: UVec2,
/// The size of the multiscattering LUT
pub multiscattering_lut_size: UVec2,
/// The size of the sky-view LUT.
pub sky_view_lut_size: UVec2,
/// The size of the aerial-view LUT.
pub aerial_view_lut_size: UVec3,
/// The number of points to sample along each ray when
/// computing the transmittance LUT
pub transmittance_lut_samples: u32,
/// The number of rays to sample when computing each
/// pixel of the multiscattering LUT
pub multiscattering_lut_dirs: u32,
/// The number of points to sample when integrating along each
/// multiscattering ray
pub multiscattering_lut_samples: u32,
/// The number of points to sample along each ray when
/// computing the sky-view LUT.
pub sky_view_lut_samples: u32,
/// The number of points to sample for each slice along the z-axis
/// of the aerial-view LUT.
pub aerial_view_lut_samples: u32,
/// The maximum distance from the camera to evaluate the
/// aerial view LUT. The slices along the z-axis of the
/// texture will be distributed linearly from the camera
/// to this value.
///
/// units: m
pub aerial_view_lut_max_distance: f32,
/// A conversion factor between scene units and meters, used to
/// ensure correctness at different length scales.
pub scene_units_to_m: f32,
/// The number of points to sample for each fragment when the using
/// ray marching to render the sky
pub sky_max_samples: u32,
/// The rendering method to use for the atmosphere.
pub rendering_method: AtmosphereMode,
}
impl Default for AtmosphereSettings {
fn default() -> Self {
Self {
transmittance_lut_size: UVec2::new(256, 128),
transmittance_lut_samples: 40,
multiscattering_lut_size: UVec2::new(32, 32),
multiscattering_lut_dirs: 64,
multiscattering_lut_samples: 20,
sky_view_lut_size: UVec2::new(400, 200),
sky_view_lut_samples: 16,
aerial_view_lut_size: UVec3::new(32, 32, 32),
aerial_view_lut_samples: 10,
aerial_view_lut_max_distance: 3.2e4,
scene_units_to_m: 1.0,
sky_max_samples: 16,
rendering_method: AtmosphereMode::LookupTexture,
}
}
}
#[derive(Clone, Component, Reflect, ShaderType)]
#[reflect(Default)]
pub struct GpuAtmosphereSettings {
pub transmittance_lut_size: UVec2,
pub multiscattering_lut_size: UVec2,
pub sky_view_lut_size: UVec2,
pub aerial_view_lut_size: UVec3,
pub transmittance_lut_samples: u32,
pub multiscattering_lut_dirs: u32,
pub multiscattering_lut_samples: u32,
pub sky_view_lut_samples: u32,
pub aerial_view_lut_samples: u32,
pub aerial_view_lut_max_distance: f32,
pub scene_units_to_m: f32,
pub sky_max_samples: u32,
pub rendering_method: u32,
}
impl Default for GpuAtmosphereSettings {
fn default() -> Self {
AtmosphereSettings::default().into()
}
}
impl From<AtmosphereSettings> for GpuAtmosphereSettings {
fn from(s: AtmosphereSettings) -> Self {
Self {
transmittance_lut_size: s.transmittance_lut_size,
multiscattering_lut_size: s.multiscattering_lut_size,
sky_view_lut_size: s.sky_view_lut_size,
aerial_view_lut_size: s.aerial_view_lut_size,
transmittance_lut_samples: s.transmittance_lut_samples,
multiscattering_lut_dirs: s.multiscattering_lut_dirs,
multiscattering_lut_samples: s.multiscattering_lut_samples,
sky_view_lut_samples: s.sky_view_lut_samples,
aerial_view_lut_samples: s.aerial_view_lut_samples,
aerial_view_lut_max_distance: s.aerial_view_lut_max_distance,
scene_units_to_m: s.scene_units_to_m,
sky_max_samples: s.sky_max_samples,
rendering_method: s.rendering_method as u32,
}
}
}
impl ExtractComponent for GpuAtmosphereSettings {
type QueryData = Read<AtmosphereSettings>;
type QueryFilter = (With<Camera3d>, With<Atmosphere>);
type Out = GpuAtmosphereSettings;
fn extract_component(item: QueryItem<'_, '_, Self::QueryData>) -> Option<Self::Out> {
Some(item.clone().into())
}
}
fn configure_camera_depth_usages(
mut cameras: Query<&mut Camera3d, (Changed<Camera3d>, With<ExtractedAtmosphere>)>,
) {
for mut camera in &mut cameras {
camera.depth_texture_usages.0 |= TextureUsages::TEXTURE_BINDING.bits();
}
}
/// Selects how the atmosphere is rendered. Choose based on scene scale and
/// volumetric shadow quality, and based on performance needs.
#[repr(u32)]
#[derive(Clone, Default, Reflect, Copy)]
pub enum AtmosphereMode {
/// High-performance solution tailored to scenes that are mostly inside of the atmosphere.
/// Uses a set of lookup textures to approximate scattering integration.
/// Slightly less accurate for very long-distance/space views (lighting precision
/// tapers as the camera moves far from the scene origin) and for sharp volumetric
/// (cloud/fog) shadows.
#[default]
LookupTexture = 0,
/// Slower, more accurate rendering method for any type of scene.
/// Integrates the scattering numerically with raymarching and produces sharp volumetric
/// (cloud/fog) shadows.
/// Best for cinematic shots, planets seen from orbit, and scenes requiring
/// accurate long-distance lighting.
Raymarched = 1,
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos_render/src/pipeline_3d.rs | crates/bevy_gizmos_render/src/pipeline_3d.rs | use crate::{
init_line_gizmo_uniform_bind_group_layout, line_gizmo_vertex_buffer_layouts,
line_joint_gizmo_vertex_buffer_layouts, DrawLineGizmo, DrawLineJointGizmo, GizmoRenderSystems,
GpuLineGizmo, LineGizmoUniformBindgroupLayout, SetLineGizmoBindGroup,
};
use bevy_app::{App, Plugin};
use bevy_asset::{load_embedded_asset, AssetServer, Handle};
use bevy_camera::visibility::RenderLayers;
use bevy_core_pipeline::{
core_3d::{Transparent3d, CORE_3D_DEPTH_FORMAT},
oit::OrderIndependentTransparencySettings,
prepass::{DeferredPrepass, DepthPrepass, MotionVectorPrepass, NormalPrepass},
};
use bevy_gizmos::config::{GizmoLineJoint, GizmoLineStyle, GizmoMeshConfig};
use bevy_ecs::{
error::BevyError,
prelude::Entity,
query::Has,
resource::Resource,
schedule::IntoScheduleConfigs,
system::{Commands, Query, Res, ResMut},
};
use bevy_image::BevyDefault as _;
use bevy_pbr::{ExtractedAtmosphere, MeshPipeline, MeshPipelineKey, SetMeshViewBindGroup};
use bevy_render::{
render_asset::{prepare_assets, RenderAssets},
render_phase::{
AddRenderCommand, DrawFunctions, PhaseItemExtraIndex, SetItemPipeline,
ViewSortedRenderPhases,
},
render_resource::*,
view::{ExtractedView, Msaa, ViewTarget},
Render, RenderApp, RenderSystems,
};
use bevy_render::{sync_world::MainEntity, RenderStartup};
use bevy_shader::Shader;
use bevy_utils::default;
use tracing::error;
pub struct LineGizmo3dPlugin;
impl Plugin for LineGizmo3dPlugin {
fn build(&self, app: &mut App) {
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.add_render_command::<Transparent3d, DrawLineGizmo3d>()
.add_render_command::<Transparent3d, DrawLineGizmo3dStrip>()
.add_render_command::<Transparent3d, DrawLineJointGizmo3d>()
.init_resource::<SpecializedRenderPipelines<LineJointGizmoPipeline>>()
.configure_sets(
Render,
GizmoRenderSystems::QueueLineGizmos3d.in_set(RenderSystems::Queue),
)
.add_systems(
RenderStartup,
init_line_gizmo_pipelines.after(init_line_gizmo_uniform_bind_group_layout),
)
.add_systems(
Render,
(queue_line_gizmos_3d, queue_line_joint_gizmos_3d)
.in_set(GizmoRenderSystems::QueueLineGizmos3d)
.after(prepare_assets::<GpuLineGizmo>),
);
}
}
#[derive(Resource)]
struct LineGizmoPipeline {
variants: Variants<RenderPipeline, LineGizmoPipelineSpecializer>,
}
fn init_line_gizmo_pipelines(
mut commands: Commands,
mesh_pipeline: Res<MeshPipeline>,
uniform_bind_group_layout: Res<LineGizmoUniformBindgroupLayout>,
asset_server: Res<AssetServer>,
) {
let line_shader = load_embedded_asset!(asset_server.as_ref(), "lines.wgsl");
let variants_line = Variants::new(
LineGizmoPipelineSpecializer {
mesh_pipeline: mesh_pipeline.clone(),
},
RenderPipelineDescriptor {
label: Some("LineGizmo 3d Pipeline".into()),
vertex: VertexState {
shader: line_shader.clone(),
..default()
},
fragment: Some(FragmentState {
shader: line_shader,
..default()
}),
layout: vec![
Default::default(), // placeholder
uniform_bind_group_layout.layout.clone(),
],
depth_stencil: Some(DepthStencilState {
format: CORE_3D_DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: CompareFunction::Greater,
stencil: StencilState::default(),
bias: DepthBiasState::default(),
}),
..default()
},
);
commands.insert_resource(LineGizmoPipeline {
variants: variants_line,
});
commands.insert_resource(LineJointGizmoPipeline {
mesh_pipeline: mesh_pipeline.clone(),
uniform_layout: uniform_bind_group_layout.layout.clone(),
shader: load_embedded_asset!(asset_server.as_ref(), "line_joints.wgsl"),
});
}
struct LineGizmoPipelineSpecializer {
mesh_pipeline: MeshPipeline,
}
#[derive(PartialEq, Eq, Hash, Clone, SpecializerKey)]
struct LineGizmoPipelineKey {
view_key: MeshPipelineKey,
strip: bool,
perspective: bool,
line_style: GizmoLineStyle,
}
impl Specializer<RenderPipeline> for LineGizmoPipelineSpecializer {
type Key = LineGizmoPipelineKey;
fn specialize(
&self,
key: Self::Key,
descriptor: &mut RenderPipelineDescriptor,
) -> Result<Canonical<Self::Key>, BevyError> {
let view_layout = self
.mesh_pipeline
.get_view_layout(key.view_key.into())
.clone();
descriptor.set_layout(0, view_layout.main_layout.clone());
descriptor.vertex.buffers = line_gizmo_vertex_buffer_layouts(key.strip);
descriptor.multisample.count = key.view_key.msaa_samples();
let fragment = descriptor.fragment_mut()?;
#[cfg(feature = "webgl")]
fragment.shader_defs.push("SIXTEEN_BYTE_ALIGNMENT".into());
if key.perspective {
fragment.shader_defs.push("PERSPECTIVE".into());
}
let format = if key.view_key.contains(MeshPipelineKey::HDR) {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
};
let fragment_entry_point = match key.line_style {
GizmoLineStyle::Solid => "fragment_solid",
GizmoLineStyle::Dotted => "fragment_dotted",
GizmoLineStyle::Dashed { .. } => "fragment_dashed",
_ => unimplemented!(),
};
fragment.entry_point = Some(fragment_entry_point.into());
fragment.set_target(
0,
ColorTargetState {
format,
blend: Some(BlendState::ALPHA_BLENDING),
write_mask: ColorWrites::ALL,
},
);
Ok(key)
}
}
#[derive(Clone, Resource)]
struct LineJointGizmoPipeline {
mesh_pipeline: MeshPipeline,
uniform_layout: BindGroupLayoutDescriptor,
shader: Handle<Shader>,
}
#[derive(PartialEq, Eq, Hash, Clone)]
struct LineJointGizmoPipelineKey {
view_key: MeshPipelineKey,
perspective: bool,
joints: GizmoLineJoint,
}
impl SpecializedRenderPipeline for LineJointGizmoPipeline {
type Key = LineJointGizmoPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let mut shader_defs = vec![
#[cfg(feature = "webgl")]
"SIXTEEN_BYTE_ALIGNMENT".into(),
];
if key.perspective {
shader_defs.push("PERSPECTIVE".into());
}
let format = if key.view_key.contains(MeshPipelineKey::HDR) {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
};
let view_layout = self
.mesh_pipeline
.get_view_layout(key.view_key.into())
.clone();
let layout = vec![view_layout.main_layout.clone(), self.uniform_layout.clone()];
if key.joints == GizmoLineJoint::None {
error!("There is no entry point for line joints with GizmoLineJoints::None. Please consider aborting the drawing process before reaching this stage.");
};
let entry_point = match key.joints {
GizmoLineJoint::Miter => "vertex_miter",
GizmoLineJoint::Round(_) => "vertex_round",
GizmoLineJoint::None | GizmoLineJoint::Bevel => "vertex_bevel",
};
RenderPipelineDescriptor {
vertex: VertexState {
shader: self.shader.clone(),
entry_point: Some(entry_point.into()),
shader_defs: shader_defs.clone(),
buffers: line_joint_gizmo_vertex_buffer_layouts(),
},
fragment: Some(FragmentState {
shader: self.shader.clone(),
shader_defs,
targets: vec![Some(ColorTargetState {
format,
blend: Some(BlendState::ALPHA_BLENDING),
write_mask: ColorWrites::ALL,
})],
..default()
}),
layout,
depth_stencil: Some(DepthStencilState {
format: CORE_3D_DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: CompareFunction::Greater,
stencil: StencilState::default(),
bias: DepthBiasState::default(),
}),
multisample: MultisampleState {
count: key.view_key.msaa_samples(),
mask: !0,
alpha_to_coverage_enabled: false,
},
label: Some("LineJointGizmo 3d Pipeline".into()),
..default()
}
}
}
type DrawLineGizmo3d = (
SetItemPipeline,
SetMeshViewBindGroup<0>,
SetLineGizmoBindGroup<1>,
DrawLineGizmo<false>,
);
type DrawLineGizmo3dStrip = (
SetItemPipeline,
SetMeshViewBindGroup<0>,
SetLineGizmoBindGroup<1>,
DrawLineGizmo<true>,
);
type DrawLineJointGizmo3d = (
SetItemPipeline,
SetMeshViewBindGroup<0>,
SetLineGizmoBindGroup<1>,
DrawLineJointGizmo,
);
fn queue_line_gizmos_3d(
draw_functions: Res<DrawFunctions<Transparent3d>>,
mut pipeline: ResMut<LineGizmoPipeline>,
pipeline_cache: Res<PipelineCache>,
line_gizmos: Query<(Entity, &MainEntity, &GizmoMeshConfig)>,
line_gizmo_assets: Res<RenderAssets<GpuLineGizmo>>,
mut transparent_render_phases: ResMut<ViewSortedRenderPhases<Transparent3d>>,
views: Query<(
&ExtractedView,
&Msaa,
Option<&RenderLayers>,
(
Has<NormalPrepass>,
Has<DepthPrepass>,
Has<MotionVectorPrepass>,
Has<DeferredPrepass>,
Has<OrderIndependentTransparencySettings>,
Has<ExtractedAtmosphere>,
),
)>,
) -> Result<(), BevyError> {
let draw_function = draw_functions.read().get_id::<DrawLineGizmo3d>().unwrap();
let draw_function_strip = draw_functions
.read()
.get_id::<DrawLineGizmo3dStrip>()
.unwrap();
for (
view,
msaa,
render_layers,
(normal_prepass, depth_prepass, motion_vector_prepass, deferred_prepass, oit, atmosphere),
) in &views
{
let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity)
else {
continue;
};
let render_layers = render_layers.unwrap_or_default();
let mut view_key = MeshPipelineKey::from_msaa_samples(msaa.samples())
| MeshPipelineKey::from_hdr(view.hdr);
if normal_prepass {
view_key |= MeshPipelineKey::NORMAL_PREPASS;
}
if depth_prepass {
view_key |= MeshPipelineKey::DEPTH_PREPASS;
}
if motion_vector_prepass {
view_key |= MeshPipelineKey::MOTION_VECTOR_PREPASS;
}
if deferred_prepass {
view_key |= MeshPipelineKey::DEFERRED_PREPASS;
}
if oit {
view_key |= MeshPipelineKey::OIT_ENABLED;
}
if atmosphere {
view_key |= MeshPipelineKey::ATMOSPHERE;
}
for (entity, main_entity, config) in &line_gizmos {
if !config.render_layers.intersects(render_layers) {
continue;
}
let Some(line_gizmo) = line_gizmo_assets.get(&config.handle) else {
continue;
};
if line_gizmo.list_vertex_count > 0 {
let pipeline = pipeline.variants.specialize(
&pipeline_cache,
LineGizmoPipelineKey {
view_key,
strip: false,
perspective: config.line_perspective,
line_style: config.line_style,
},
)?;
transparent_phase.add(Transparent3d {
entity: (entity, *main_entity),
draw_function,
pipeline,
distance: 0.,
batch_range: 0..1,
extra_index: PhaseItemExtraIndex::None,
indexed: true,
});
}
if line_gizmo.strip_vertex_count >= 2 {
let pipeline = pipeline.variants.specialize(
&pipeline_cache,
LineGizmoPipelineKey {
view_key,
strip: true,
perspective: config.line_perspective,
line_style: config.line_style,
},
)?;
transparent_phase.add(Transparent3d {
entity: (entity, *main_entity),
draw_function: draw_function_strip,
pipeline,
distance: 0.,
batch_range: 0..1,
extra_index: PhaseItemExtraIndex::None,
indexed: true,
});
}
}
}
Ok(())
}
fn queue_line_joint_gizmos_3d(
draw_functions: Res<DrawFunctions<Transparent3d>>,
pipeline: Res<LineJointGizmoPipeline>,
mut pipelines: ResMut<SpecializedRenderPipelines<LineJointGizmoPipeline>>,
pipeline_cache: Res<PipelineCache>,
line_gizmos: Query<(Entity, &MainEntity, &GizmoMeshConfig)>,
line_gizmo_assets: Res<RenderAssets<GpuLineGizmo>>,
mut transparent_render_phases: ResMut<ViewSortedRenderPhases<Transparent3d>>,
views: Query<(
&ExtractedView,
&Msaa,
Option<&RenderLayers>,
(
Has<NormalPrepass>,
Has<DepthPrepass>,
Has<MotionVectorPrepass>,
Has<DeferredPrepass>,
),
)>,
) {
let draw_function = draw_functions
.read()
.get_id::<DrawLineJointGizmo3d>()
.unwrap();
for (
view,
msaa,
render_layers,
(normal_prepass, depth_prepass, motion_vector_prepass, deferred_prepass),
) in &views
{
let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity)
else {
continue;
};
let render_layers = render_layers.unwrap_or_default();
let mut view_key = MeshPipelineKey::from_msaa_samples(msaa.samples())
| MeshPipelineKey::from_hdr(view.hdr);
if normal_prepass {
view_key |= MeshPipelineKey::NORMAL_PREPASS;
}
if depth_prepass {
view_key |= MeshPipelineKey::DEPTH_PREPASS;
}
if motion_vector_prepass {
view_key |= MeshPipelineKey::MOTION_VECTOR_PREPASS;
}
if deferred_prepass {
view_key |= MeshPipelineKey::DEFERRED_PREPASS;
}
for (entity, main_entity, config) in &line_gizmos {
if !config.render_layers.intersects(render_layers) {
continue;
}
let Some(line_gizmo) = line_gizmo_assets.get(&config.handle) else {
continue;
};
if line_gizmo.strip_vertex_count < 3 || config.line_joints == GizmoLineJoint::None {
continue;
}
let pipeline = pipelines.specialize(
&pipeline_cache,
&pipeline,
LineJointGizmoPipelineKey {
view_key,
perspective: config.line_perspective,
joints: config.line_joints,
},
);
transparent_phase.add(Transparent3d {
entity: (entity, *main_entity),
draw_function,
pipeline,
distance: 0.,
batch_range: 0..1,
extra_index: PhaseItemExtraIndex::None,
indexed: true,
});
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos_render/src/retained.rs | crates/bevy_gizmos_render/src/retained.rs | //! This module is for 'retained' alternatives to the 'immediate mode' [`Gizmos`](bevy_gizmos::gizmos::Gizmos) system parameter.
use crate::LineGizmoUniform;
use bevy_camera::visibility::RenderLayers;
use bevy_gizmos::retained::Gizmo;
use bevy_math::Affine3;
use bevy_render::sync_world::{MainEntity, TemporaryRenderEntity};
use bevy_utils::once;
use tracing::warn;
use {
bevy_ecs::{
entity::Entity,
system::{Commands, Local, Query},
},
bevy_gizmos::config::GizmoLineJoint,
bevy_render::Extract,
bevy_transform::components::GlobalTransform,
};
use bevy_gizmos::config::GizmoLineStyle;
pub(crate) fn extract_linegizmos(
mut commands: Commands,
mut previous_len: Local<usize>,
query: Extract<Query<(Entity, &Gizmo, &GlobalTransform, Option<&RenderLayers>)>>,
) {
let mut values = Vec::with_capacity(*previous_len);
#[cfg_attr(
not(any(feature = "bevy_pbr", feature = "bevy_sprite_render")),
expect(
unused_variables,
reason = "`render_layers` is unused when bevy_pbr and bevy_sprite_render are both disabled."
)
)]
for (entity, gizmo, transform, render_layers) in &query {
let joints_resolution = if let GizmoLineJoint::Round(resolution) = gizmo.line_config.joints
{
resolution
} else {
0
};
let (gap_scale, line_scale) = if let GizmoLineStyle::Dashed {
gap_scale,
line_scale,
} = gizmo.line_config.style
{
if gap_scale <= 0.0 {
once!(warn!("when using gizmos with the line style `GizmoLineStyle::Dashed{{..}}` the gap scale should be greater than zero"));
}
if line_scale <= 0.0 {
once!(warn!("when using gizmos with the line style `GizmoLineStyle::Dashed{{..}}` the line scale should be greater than zero"));
}
(gap_scale, line_scale)
} else {
(1.0, 1.0)
};
values.push((
LineGizmoUniform {
world_from_local: Affine3::from(&transform.affine()).to_transpose(),
line_width: gizmo.line_config.width,
depth_bias: gizmo.depth_bias,
joints_resolution,
gap_scale,
line_scale,
#[cfg(feature = "webgl")]
_padding: Default::default(),
},
#[cfg(any(feature = "bevy_pbr", feature = "bevy_sprite_render"))]
bevy_gizmos::config::GizmoMeshConfig {
line_perspective: gizmo.line_config.perspective,
line_style: gizmo.line_config.style,
line_joints: gizmo.line_config.joints,
render_layers: render_layers.cloned().unwrap_or_default(),
handle: gizmo.handle.clone(),
},
MainEntity::from(entity),
TemporaryRenderEntity,
));
}
*previous_len = values.len();
commands.spawn_batch(values);
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos_render/src/lib.rs | crates/bevy_gizmos_render/src/lib.rs | #![cfg_attr(docsrs, feature(doc_cfg))]
#![doc(
html_logo_url = "https://bevy.org/assets/icon.png",
html_favicon_url = "https://bevy.org/assets/icon.png"
)]
//! This crate renders `bevy_gizmos` with `bevy_render`.
/// System set label for the systems handling the rendering of gizmos.
#[derive(SystemSet, Clone, Debug, Hash, PartialEq, Eq)]
pub enum GizmoRenderSystems {
/// Adds gizmos to the [`Transparent2d`](bevy_core_pipeline::core_2d::Transparent2d) render phase
#[cfg(feature = "bevy_sprite_render")]
QueueLineGizmos2d,
/// Adds gizmos to the [`Transparent3d`](bevy_core_pipeline::core_3d::Transparent3d) render phase
#[cfg(feature = "bevy_pbr")]
QueueLineGizmos3d,
}
pub mod retained;
#[cfg(feature = "bevy_sprite_render")]
mod pipeline_2d;
#[cfg(feature = "bevy_pbr")]
mod pipeline_3d;
use bevy_app::{App, Plugin};
use bevy_ecs::{
resource::Resource,
schedule::{IntoScheduleConfigs, SystemSet},
system::Res,
};
use {bevy_gizmos::config::GizmoMeshConfig, bevy_mesh::VertexBufferLayout};
use {
crate::retained::extract_linegizmos,
bevy_asset::AssetId,
bevy_ecs::{
component::Component,
entity::Entity,
query::ROQueryItem,
system::{
lifetimeless::{Read, SRes},
Commands, SystemParamItem,
},
},
bevy_math::{Affine3, Affine3A, Vec4},
bevy_render::{
extract_component::{ComponentUniforms, DynamicUniformIndex, UniformComponentPlugin},
render_asset::{PrepareAssetError, RenderAsset, RenderAssetPlugin, RenderAssets},
render_phase::{PhaseItem, RenderCommand, RenderCommandResult, TrackedRenderPass},
render_resource::{
binding_types::uniform_buffer, BindGroup, BindGroupEntries, BindGroupLayoutEntries,
Buffer, BufferInitDescriptor, BufferUsages, ShaderStages, ShaderType, VertexFormat,
},
renderer::RenderDevice,
sync_world::{MainEntity, TemporaryRenderEntity},
Extract, ExtractSchedule, Render, RenderApp, RenderStartup, RenderSystems,
},
bytemuck::cast_slice,
};
use bevy_render::render_resource::{
BindGroupLayoutDescriptor, PipelineCache, VertexAttribute, VertexStepMode,
};
use bevy_gizmos::{
config::{GizmoConfigStore, GizmoLineJoint},
GizmoAsset, GizmoHandles,
};
/// A [`Plugin`] that provides an immediate mode drawing api for visual debugging.
///
/// Requires to be loaded after [`PbrPlugin`](bevy_pbr::PbrPlugin) or [`SpriteRenderPlugin`](bevy_sprite_render::SpriteRenderPlugin).
#[derive(Default)]
pub struct GizmoRenderPlugin;
impl Plugin for GizmoRenderPlugin {
fn build(&self, app: &mut App) {
{
use bevy_asset::embedded_asset;
embedded_asset!(app, "lines.wgsl");
embedded_asset!(app, "line_joints.wgsl");
}
app.add_plugins(UniformComponentPlugin::<LineGizmoUniform>::default())
.add_plugins(RenderAssetPlugin::<GpuLineGizmo>::default());
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app.add_systems(RenderStartup, init_line_gizmo_uniform_bind_group_layout);
render_app.add_systems(
Render,
prepare_line_gizmo_bind_group.in_set(RenderSystems::PrepareBindGroups),
);
render_app.add_systems(ExtractSchedule, (extract_gizmo_data, extract_linegizmos));
#[cfg(feature = "bevy_sprite_render")]
if app.is_plugin_added::<bevy_sprite_render::SpriteRenderPlugin>() {
app.add_plugins(pipeline_2d::LineGizmo2dPlugin);
} else {
tracing::warn!("bevy_sprite_render feature is enabled but bevy_sprite_render::SpriteRenderPlugin was not detected. Are you sure you loaded GizmoPlugin after SpriteRenderPlugin?");
}
#[cfg(feature = "bevy_pbr")]
if app.is_plugin_added::<bevy_pbr::PbrPlugin>() {
app.add_plugins(pipeline_3d::LineGizmo3dPlugin);
} else {
tracing::warn!("bevy_pbr feature is enabled but bevy_pbr::PbrPlugin was not detected. Are you sure you loaded GizmoPlugin after PbrPlugin?");
}
} else {
tracing::warn!("bevy_render feature is enabled but RenderApp was not detected. Are you sure you loaded GizmoPlugin after RenderPlugin?");
}
}
}
fn init_line_gizmo_uniform_bind_group_layout(mut commands: Commands) {
let line_layout = BindGroupLayoutDescriptor::new(
"LineGizmoUniform layout",
&BindGroupLayoutEntries::single(
ShaderStages::VERTEX,
uniform_buffer::<LineGizmoUniform>(true),
),
);
commands.insert_resource(LineGizmoUniformBindgroupLayout {
layout: line_layout,
});
}
fn extract_gizmo_data(
mut commands: Commands,
handles: Extract<Res<GizmoHandles>>,
config: Extract<Res<GizmoConfigStore>>,
) {
use bevy_gizmos::config::GizmoLineStyle;
use bevy_utils::once;
use tracing::warn;
for (group_type_id, handle) in handles.handles() {
let Some((config, _)) = config.get_config_dyn(group_type_id) else {
continue;
};
if !config.enabled {
continue;
}
#[cfg_attr(
not(any(feature = "bevy_pbr", feature = "bevy_sprite_render")),
expect(
unused_variables,
reason = "`handle` is unused when bevy_pbr and bevy_sprite_render are both disabled."
)
)]
let Some(handle) = handle
else {
continue;
};
let joints_resolution = if let GizmoLineJoint::Round(resolution) = config.line.joints {
resolution
} else {
0
};
let (gap_scale, line_scale) = if let GizmoLineStyle::Dashed {
gap_scale,
line_scale,
} = config.line.style
{
if gap_scale <= 0.0 {
once!(warn!("When using gizmos with the line style `GizmoLineStyle::Dashed{{..}}` the gap scale should be greater than zero."));
}
if line_scale <= 0.0 {
once!(warn!("When using gizmos with the line style `GizmoLineStyle::Dashed{{..}}` the line scale should be greater than zero."));
}
(gap_scale, line_scale)
} else {
(1.0, 1.0)
};
commands.spawn((
LineGizmoUniform {
world_from_local: Affine3::from(&Affine3A::IDENTITY).to_transpose(),
line_width: config.line.width,
depth_bias: config.depth_bias,
joints_resolution,
gap_scale,
line_scale,
#[cfg(feature = "webgl")]
_padding: Default::default(),
},
#[cfg(any(feature = "bevy_pbr", feature = "bevy_sprite_render"))]
GizmoMeshConfig {
line_perspective: config.line.perspective,
line_style: config.line.style,
line_joints: config.line.joints,
render_layers: config.render_layers.clone(),
handle: handle.clone(),
},
// The immediate mode API does not have a main world entity to refer to,
// but we do need MainEntity on this render entity for the systems to find it.
MainEntity::from(Entity::PLACEHOLDER),
TemporaryRenderEntity,
));
}
}
#[derive(Component, ShaderType, Clone, Copy)]
struct LineGizmoUniform {
world_from_local: [Vec4; 3],
line_width: f32,
depth_bias: f32,
// Only used by gizmo line t if the current configs `line_joints` is set to `GizmoLineJoint::Round(_)`
joints_resolution: u32,
// Only used if the current configs `line_style` is set to `GizmoLineStyle::Dashed{_}`
gap_scale: f32,
line_scale: f32,
/// WebGL2 structs must be 16 byte aligned.
#[cfg(feature = "webgl")]
_padding: bevy_math::Vec3,
}
#[cfg_attr(
not(any(feature = "bevy_pbr", feature = "bevy_sprite_render")),
expect(
dead_code,
reason = "fields are unused when bevy_pbr and bevy_sprite_render are both disabled."
)
)]
#[derive(Debug, Clone)]
struct GpuLineGizmo {
list_position_buffer: Buffer,
list_color_buffer: Buffer,
list_vertex_count: u32,
strip_position_buffer: Buffer,
strip_color_buffer: Buffer,
strip_vertex_count: u32,
}
impl RenderAsset for GpuLineGizmo {
type SourceAsset = GizmoAsset;
type Param = SRes<RenderDevice>;
fn prepare_asset(
gizmo: Self::SourceAsset,
_: AssetId<Self::SourceAsset>,
render_device: &mut SystemParamItem<Self::Param>,
_: Option<&Self>,
) -> Result<Self, PrepareAssetError<Self::SourceAsset>> {
let list_position_buffer = render_device.create_buffer_with_data(&BufferInitDescriptor {
usage: BufferUsages::VERTEX,
label: Some("LineGizmo Position Buffer"),
contents: cast_slice(&gizmo.buffer().list_positions),
});
let list_color_buffer = render_device.create_buffer_with_data(&BufferInitDescriptor {
usage: BufferUsages::VERTEX,
label: Some("LineGizmo Color Buffer"),
contents: cast_slice(&gizmo.buffer().list_colors),
});
let strip_position_buffer = render_device.create_buffer_with_data(&BufferInitDescriptor {
usage: BufferUsages::VERTEX,
label: Some("LineGizmo Strip Position Buffer"),
contents: cast_slice(&gizmo.buffer().strip_positions),
});
let strip_color_buffer = render_device.create_buffer_with_data(&BufferInitDescriptor {
usage: BufferUsages::VERTEX,
label: Some("LineGizmo Strip Color Buffer"),
contents: cast_slice(&gizmo.buffer().strip_colors),
});
Ok(GpuLineGizmo {
list_position_buffer,
list_color_buffer,
list_vertex_count: gizmo.buffer().list_positions.len() as u32,
strip_position_buffer,
strip_color_buffer,
strip_vertex_count: gizmo.buffer().strip_positions.len() as u32,
})
}
}
#[derive(Resource)]
struct LineGizmoUniformBindgroupLayout {
layout: BindGroupLayoutDescriptor,
}
#[cfg_attr(
not(any(feature = "bevy_pbr", feature = "bevy_sprite_render")),
expect(
dead_code,
reason = "fields are unused when bevy_pbr and bevy_sprite_render are both disabled."
)
)]
#[derive(Resource)]
struct LineGizmoUniformBindgroup {
bindgroup: BindGroup,
}
fn prepare_line_gizmo_bind_group(
mut commands: Commands,
line_gizmo_uniform_layout: Res<LineGizmoUniformBindgroupLayout>,
render_device: Res<RenderDevice>,
pipeline_cache: Res<PipelineCache>,
line_gizmo_uniforms: Res<ComponentUniforms<LineGizmoUniform>>,
) {
if let Some(binding) = line_gizmo_uniforms.uniforms().binding() {
commands.insert_resource(LineGizmoUniformBindgroup {
bindgroup: render_device.create_bind_group(
"LineGizmoUniform bindgroup",
&pipeline_cache.get_bind_group_layout(&line_gizmo_uniform_layout.layout),
&BindGroupEntries::single(binding),
),
});
}
}
#[cfg_attr(
not(any(feature = "bevy_pbr", feature = "bevy_sprite_render")),
expect(
dead_code,
reason = "struct is not constructed when bevy_pbr and bevy_sprite_render are both disabled."
)
)]
struct SetLineGizmoBindGroup<const I: usize>;
impl<const I: usize, P: PhaseItem> RenderCommand<P> for SetLineGizmoBindGroup<I> {
type Param = SRes<LineGizmoUniformBindgroup>;
type ViewQuery = ();
type ItemQuery = Read<DynamicUniformIndex<LineGizmoUniform>>;
#[inline]
fn render<'w>(
_item: &P,
_view: ROQueryItem<'w, '_, Self::ViewQuery>,
uniform_index: Option<ROQueryItem<'w, '_, Self::ItemQuery>>,
bind_group: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let Some(uniform_index) = uniform_index else {
return RenderCommandResult::Skip;
};
pass.set_bind_group(
I,
&bind_group.into_inner().bindgroup,
&[uniform_index.index()],
);
RenderCommandResult::Success
}
}
#[cfg_attr(
not(any(feature = "bevy_pbr", feature = "bevy_sprite_render")),
expect(
dead_code,
reason = "struct is not constructed when bevy_pbr and bevy_sprite_render are both disabled."
)
)]
struct DrawLineGizmo<const STRIP: bool>;
impl<P: PhaseItem, const STRIP: bool> RenderCommand<P> for DrawLineGizmo<STRIP> {
type Param = SRes<RenderAssets<GpuLineGizmo>>;
type ViewQuery = ();
type ItemQuery = Read<GizmoMeshConfig>;
#[inline]
fn render<'w>(
_item: &P,
_view: ROQueryItem<'w, '_, Self::ViewQuery>,
config: Option<ROQueryItem<'w, '_, Self::ItemQuery>>,
line_gizmos: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let Some(config) = config else {
return RenderCommandResult::Skip;
};
let Some(line_gizmo) = line_gizmos.into_inner().get(&config.handle) else {
return RenderCommandResult::Skip;
};
let vertex_count = if STRIP {
line_gizmo.strip_vertex_count
} else {
line_gizmo.list_vertex_count
};
if vertex_count < 2 {
return RenderCommandResult::Success;
}
let instances = if STRIP {
let item_size = VertexFormat::Float32x3.size();
let buffer_size = line_gizmo.strip_position_buffer.size() - item_size;
pass.set_vertex_buffer(0, line_gizmo.strip_position_buffer.slice(..buffer_size));
pass.set_vertex_buffer(1, line_gizmo.strip_position_buffer.slice(item_size..));
let item_size = VertexFormat::Float32x4.size();
let buffer_size = line_gizmo.strip_color_buffer.size() - item_size;
pass.set_vertex_buffer(2, line_gizmo.strip_color_buffer.slice(..buffer_size));
pass.set_vertex_buffer(3, line_gizmo.strip_color_buffer.slice(item_size..));
vertex_count - 1
} else {
pass.set_vertex_buffer(0, line_gizmo.list_position_buffer.slice(..));
pass.set_vertex_buffer(1, line_gizmo.list_color_buffer.slice(..));
vertex_count / 2
};
pass.draw(0..6, 0..instances);
RenderCommandResult::Success
}
}
#[cfg_attr(
not(any(feature = "bevy_pbr", feature = "bevy_sprite_render")),
expect(
dead_code,
reason = "struct is not constructed when bevy_pbr and bevy_sprite_render are both disabled."
)
)]
struct DrawLineJointGizmo;
impl<P: PhaseItem> RenderCommand<P> for DrawLineJointGizmo {
type Param = SRes<RenderAssets<GpuLineGizmo>>;
type ViewQuery = ();
type ItemQuery = Read<GizmoMeshConfig>;
#[inline]
fn render<'w>(
_item: &P,
_view: ROQueryItem<'w, '_, Self::ViewQuery>,
config: Option<ROQueryItem<'w, '_, Self::ItemQuery>>,
line_gizmos: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let Some(config) = config else {
return RenderCommandResult::Skip;
};
let Some(line_gizmo) = line_gizmos.into_inner().get(&config.handle) else {
return RenderCommandResult::Skip;
};
if line_gizmo.strip_vertex_count <= 2 {
return RenderCommandResult::Success;
};
if config.line_joints == GizmoLineJoint::None {
return RenderCommandResult::Success;
};
let instances = {
let item_size = VertexFormat::Float32x3.size();
// position_a
let buffer_size_a = line_gizmo.strip_position_buffer.size() - item_size * 2;
pass.set_vertex_buffer(0, line_gizmo.strip_position_buffer.slice(..buffer_size_a));
// position_b
let buffer_size_b = line_gizmo.strip_position_buffer.size() - item_size;
pass.set_vertex_buffer(
1,
line_gizmo
.strip_position_buffer
.slice(item_size..buffer_size_b),
);
// position_c
pass.set_vertex_buffer(2, line_gizmo.strip_position_buffer.slice(item_size * 2..));
// color
let item_size = VertexFormat::Float32x4.size();
let buffer_size = line_gizmo.strip_color_buffer.size() - item_size;
// This corresponds to the color of position_b, hence starts from `item_size`
pass.set_vertex_buffer(
3,
line_gizmo.strip_color_buffer.slice(item_size..buffer_size),
);
line_gizmo.strip_vertex_count - 2
};
let vertices = match config.line_joints {
GizmoLineJoint::None => unreachable!(),
GizmoLineJoint::Miter => 6,
GizmoLineJoint::Round(resolution) => resolution * 3,
GizmoLineJoint::Bevel => 3,
};
pass.draw(0..vertices, 0..instances);
RenderCommandResult::Success
}
}
#[cfg_attr(
not(any(feature = "bevy_pbr", feature = "bevy_sprite_render")),
expect(
dead_code,
reason = "function is unused when bevy_pbr and bevy_sprite_render are both disabled."
)
)]
fn line_gizmo_vertex_buffer_layouts(strip: bool) -> Vec<VertexBufferLayout> {
use VertexFormat::*;
let mut position_layout = VertexBufferLayout {
array_stride: Float32x3.size(),
step_mode: VertexStepMode::Instance,
attributes: vec![VertexAttribute {
format: Float32x3,
offset: 0,
shader_location: 0,
}],
};
let mut color_layout = VertexBufferLayout {
array_stride: Float32x4.size(),
step_mode: VertexStepMode::Instance,
attributes: vec![VertexAttribute {
format: Float32x4,
offset: 0,
shader_location: 2,
}],
};
if strip {
vec![
position_layout.clone(),
{
position_layout.attributes[0].shader_location = 1;
position_layout
},
color_layout.clone(),
{
color_layout.attributes[0].shader_location = 3;
color_layout
},
]
} else {
position_layout.array_stride *= 2;
position_layout.attributes.push(VertexAttribute {
format: Float32x3,
offset: Float32x3.size(),
shader_location: 1,
});
color_layout.array_stride *= 2;
color_layout.attributes.push(VertexAttribute {
format: Float32x4,
offset: Float32x4.size(),
shader_location: 3,
});
vec![position_layout, color_layout]
}
}
#[cfg_attr(
not(any(feature = "bevy_pbr", feature = "bevy_sprite_render")),
expect(
dead_code,
reason = "function is unused when bevy_pbr and bevy_sprite_render are both disabled."
)
)]
fn line_joint_gizmo_vertex_buffer_layouts() -> Vec<VertexBufferLayout> {
use VertexFormat::*;
let mut position_layout = VertexBufferLayout {
array_stride: Float32x3.size(),
step_mode: VertexStepMode::Instance,
attributes: vec![VertexAttribute {
format: Float32x3,
offset: 0,
shader_location: 0,
}],
};
let color_layout = VertexBufferLayout {
array_stride: Float32x4.size(),
step_mode: VertexStepMode::Instance,
attributes: vec![VertexAttribute {
format: Float32x4,
offset: 0,
shader_location: 3,
}],
};
vec![
position_layout.clone(),
{
position_layout.attributes[0].shader_location = 1;
position_layout.clone()
},
{
position_layout.attributes[0].shader_location = 2;
position_layout
},
color_layout.clone(),
]
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_gizmos_render/src/pipeline_2d.rs | crates/bevy_gizmos_render/src/pipeline_2d.rs | use crate::{
init_line_gizmo_uniform_bind_group_layout, line_gizmo_vertex_buffer_layouts,
line_joint_gizmo_vertex_buffer_layouts, DrawLineGizmo, DrawLineJointGizmo, GizmoRenderSystems,
GpuLineGizmo, LineGizmoUniformBindgroupLayout, SetLineGizmoBindGroup,
};
use bevy_app::{App, Plugin};
use bevy_asset::{load_embedded_asset, AssetServer, Handle};
use bevy_camera::visibility::RenderLayers;
use bevy_core_pipeline::core_2d::{Transparent2d, CORE_2D_DEPTH_FORMAT};
use bevy_gizmos::config::{GizmoLineJoint, GizmoLineStyle, GizmoMeshConfig};
use bevy_ecs::{
prelude::Entity,
resource::Resource,
schedule::IntoScheduleConfigs,
system::{Commands, Query, Res, ResMut},
};
use bevy_image::BevyDefault as _;
use bevy_math::FloatOrd;
use bevy_render::{
render_asset::{prepare_assets, RenderAssets},
render_phase::{
AddRenderCommand, DrawFunctions, PhaseItemExtraIndex, SetItemPipeline,
ViewSortedRenderPhases,
},
render_resource::*,
view::{ExtractedView, Msaa, ViewTarget},
Render, RenderApp, RenderSystems,
};
use bevy_render::{sync_world::MainEntity, RenderStartup};
use bevy_shader::Shader;
use bevy_sprite_render::{
init_mesh_2d_pipeline, Mesh2dPipeline, Mesh2dPipelineKey, SetMesh2dViewBindGroup,
};
use bevy_utils::default;
use tracing::error;
pub struct LineGizmo2dPlugin;
impl Plugin for LineGizmo2dPlugin {
fn build(&self, app: &mut App) {
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.add_render_command::<Transparent2d, DrawLineGizmo2d>()
.add_render_command::<Transparent2d, DrawLineGizmo2dStrip>()
.add_render_command::<Transparent2d, DrawLineJointGizmo2d>()
.init_resource::<SpecializedRenderPipelines<LineGizmoPipeline>>()
.init_resource::<SpecializedRenderPipelines<LineJointGizmoPipeline>>()
.configure_sets(
Render,
GizmoRenderSystems::QueueLineGizmos2d
.in_set(RenderSystems::Queue)
.ambiguous_with(bevy_sprite_render::queue_sprites)
.ambiguous_with(
bevy_sprite_render::queue_material2d_meshes::<
bevy_sprite_render::ColorMaterial,
>,
),
)
.add_systems(
RenderStartup,
init_line_gizmo_pipelines
.after(init_line_gizmo_uniform_bind_group_layout)
.after(init_mesh_2d_pipeline),
)
.add_systems(
Render,
queue_line_and_joint_gizmos_2d
.in_set(GizmoRenderSystems::QueueLineGizmos2d)
.after(prepare_assets::<GpuLineGizmo>),
);
}
}
#[derive(Clone, Resource)]
struct LineGizmoPipeline {
mesh_pipeline: Mesh2dPipeline,
uniform_layout: BindGroupLayoutDescriptor,
shader: Handle<Shader>,
}
fn init_line_gizmo_pipelines(
mut commands: Commands,
mesh_2d_pipeline: Res<Mesh2dPipeline>,
uniform_bind_group_layout: Res<LineGizmoUniformBindgroupLayout>,
asset_server: Res<AssetServer>,
) {
commands.insert_resource(LineGizmoPipeline {
mesh_pipeline: mesh_2d_pipeline.clone(),
uniform_layout: uniform_bind_group_layout.layout.clone(),
shader: load_embedded_asset!(asset_server.as_ref(), "lines.wgsl"),
});
commands.insert_resource(LineJointGizmoPipeline {
mesh_pipeline: mesh_2d_pipeline.clone(),
uniform_layout: uniform_bind_group_layout.layout.clone(),
shader: load_embedded_asset!(asset_server.as_ref(), "line_joints.wgsl"),
});
}
#[derive(PartialEq, Eq, Hash, Clone)]
struct LineGizmoPipelineKey {
mesh_key: Mesh2dPipelineKey,
strip: bool,
line_style: GizmoLineStyle,
}
impl SpecializedRenderPipeline for LineGizmoPipeline {
type Key = LineGizmoPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let format = if key.mesh_key.contains(Mesh2dPipelineKey::HDR) {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
};
let shader_defs = vec![
#[cfg(feature = "webgl")]
"SIXTEEN_BYTE_ALIGNMENT".into(),
];
let layout = vec![
self.mesh_pipeline.view_layout.clone(),
self.uniform_layout.clone(),
];
let fragment_entry_point = match key.line_style {
GizmoLineStyle::Solid => "fragment_solid",
GizmoLineStyle::Dotted => "fragment_dotted",
GizmoLineStyle::Dashed { .. } => "fragment_dashed",
_ => unimplemented!(),
};
RenderPipelineDescriptor {
vertex: VertexState {
shader: self.shader.clone(),
shader_defs: shader_defs.clone(),
buffers: line_gizmo_vertex_buffer_layouts(key.strip),
..default()
},
fragment: Some(FragmentState {
shader: self.shader.clone(),
shader_defs,
entry_point: Some(fragment_entry_point.into()),
targets: vec![Some(ColorTargetState {
format,
blend: Some(BlendState::ALPHA_BLENDING),
write_mask: ColorWrites::ALL,
})],
}),
layout,
depth_stencil: Some(DepthStencilState {
format: CORE_2D_DEPTH_FORMAT,
depth_write_enabled: false,
depth_compare: CompareFunction::Always,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
}),
multisample: MultisampleState {
count: key.mesh_key.msaa_samples(),
mask: !0,
alpha_to_coverage_enabled: false,
},
label: Some("LineGizmo Pipeline 2D".into()),
..default()
}
}
}
#[derive(Clone, Resource)]
struct LineJointGizmoPipeline {
mesh_pipeline: Mesh2dPipeline,
uniform_layout: BindGroupLayoutDescriptor,
shader: Handle<Shader>,
}
#[derive(PartialEq, Eq, Hash, Clone)]
struct LineJointGizmoPipelineKey {
mesh_key: Mesh2dPipelineKey,
joints: GizmoLineJoint,
}
impl SpecializedRenderPipeline for LineJointGizmoPipeline {
type Key = LineJointGizmoPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let format = if key.mesh_key.contains(Mesh2dPipelineKey::HDR) {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
};
let shader_defs = vec![
#[cfg(feature = "webgl")]
"SIXTEEN_BYTE_ALIGNMENT".into(),
];
let layout = vec![
self.mesh_pipeline.view_layout.clone(),
self.uniform_layout.clone(),
];
if key.joints == GizmoLineJoint::None {
error!("There is no entry point for line joints with GizmoLineJoints::None. Please consider aborting the drawing process before reaching this stage.");
};
let entry_point = match key.joints {
GizmoLineJoint::Miter => "vertex_miter",
GizmoLineJoint::Round(_) => "vertex_round",
GizmoLineJoint::None | GizmoLineJoint::Bevel => "vertex_bevel",
};
RenderPipelineDescriptor {
vertex: VertexState {
shader: self.shader.clone(),
entry_point: Some(entry_point.into()),
shader_defs: shader_defs.clone(),
buffers: line_joint_gizmo_vertex_buffer_layouts(),
},
fragment: Some(FragmentState {
shader: self.shader.clone(),
shader_defs,
targets: vec![Some(ColorTargetState {
format,
blend: Some(BlendState::ALPHA_BLENDING),
write_mask: ColorWrites::ALL,
})],
..default()
}),
layout,
primitive: PrimitiveState::default(),
depth_stencil: Some(DepthStencilState {
format: CORE_2D_DEPTH_FORMAT,
depth_write_enabled: false,
depth_compare: CompareFunction::Always,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
}),
multisample: MultisampleState {
count: key.mesh_key.msaa_samples(),
mask: !0,
alpha_to_coverage_enabled: false,
},
label: Some("LineJointGizmo Pipeline 2D".into()),
..default()
}
}
}
type DrawLineGizmo2d = (
SetItemPipeline,
SetMesh2dViewBindGroup<0>,
SetLineGizmoBindGroup<1>,
DrawLineGizmo<false>,
);
type DrawLineGizmo2dStrip = (
SetItemPipeline,
SetMesh2dViewBindGroup<0>,
SetLineGizmoBindGroup<1>,
DrawLineGizmo<true>,
);
type DrawLineJointGizmo2d = (
SetItemPipeline,
SetMesh2dViewBindGroup<0>,
SetLineGizmoBindGroup<1>,
DrawLineJointGizmo,
);
fn queue_line_and_joint_gizmos_2d(
draw_functions: Res<DrawFunctions<Transparent2d>>,
line_gizmo_pipeline: Res<LineGizmoPipeline>,
line_joint_gizmo_pipeline: Res<LineJointGizmoPipeline>,
mut line_gizmo_pipelines: ResMut<SpecializedRenderPipelines<LineGizmoPipeline>>,
mut line_joint_gizmo_pipelines: ResMut<SpecializedRenderPipelines<LineJointGizmoPipeline>>,
pipeline_cache: Res<PipelineCache>,
line_gizmos: Query<(Entity, &MainEntity, &GizmoMeshConfig)>,
line_gizmo_assets: Res<RenderAssets<GpuLineGizmo>>,
mut transparent_render_phases: ResMut<ViewSortedRenderPhases<Transparent2d>>,
mut views: Query<(&ExtractedView, &Msaa, Option<&RenderLayers>)>,
) {
let draw_function = draw_functions.read().get_id::<DrawLineGizmo2d>().unwrap();
let draw_line_function_strip = draw_functions
.read()
.get_id::<DrawLineGizmo2dStrip>()
.unwrap();
let draw_line_joint_function = draw_functions
.read()
.get_id::<DrawLineJointGizmo2d>()
.unwrap();
for (view, msaa, render_layers) in &mut views {
let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity)
else {
continue;
};
let mesh_key = Mesh2dPipelineKey::from_msaa_samples(msaa.samples())
| Mesh2dPipelineKey::from_hdr(view.hdr);
let render_layers = render_layers.unwrap_or_default();
for (entity, main_entity, config) in &line_gizmos {
if !config.render_layers.intersects(render_layers) {
continue;
}
let Some(line_gizmo) = line_gizmo_assets.get(&config.handle) else {
continue;
};
// Draw lines
if line_gizmo.list_vertex_count > 0 {
let pipeline = line_gizmo_pipelines.specialize(
&pipeline_cache,
&line_gizmo_pipeline,
LineGizmoPipelineKey {
mesh_key,
strip: false,
line_style: config.line_style,
},
);
transparent_phase.add(Transparent2d {
entity: (entity, *main_entity),
draw_function,
pipeline,
sort_key: FloatOrd(f32::INFINITY),
batch_range: 0..1,
extra_index: PhaseItemExtraIndex::None,
extracted_index: usize::MAX,
indexed: false,
});
}
if line_gizmo.strip_vertex_count >= 2 {
let pipeline = line_gizmo_pipelines.specialize(
&pipeline_cache,
&line_gizmo_pipeline,
LineGizmoPipelineKey {
mesh_key,
strip: true,
line_style: config.line_style,
},
);
transparent_phase.add(Transparent2d {
entity: (entity, *main_entity),
draw_function: draw_line_function_strip,
pipeline,
sort_key: FloatOrd(f32::INFINITY),
batch_range: 0..1,
extra_index: PhaseItemExtraIndex::None,
extracted_index: usize::MAX,
indexed: false,
});
}
// Draw line joints
if line_gizmo.strip_vertex_count < 3 || config.line_joints == GizmoLineJoint::None {
continue;
}
let pipeline = line_joint_gizmo_pipelines.specialize(
&pipeline_cache,
&line_joint_gizmo_pipeline,
LineJointGizmoPipelineKey {
mesh_key,
joints: config.line_joints,
},
);
transparent_phase.add(Transparent2d {
entity: (entity, *main_entity),
draw_function: draw_line_joint_function,
pipeline,
sort_key: FloatOrd(f32::INFINITY),
batch_range: 0..1,
extra_index: PhaseItemExtraIndex::None,
extracted_index: usize::MAX,
indexed: false,
});
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_utils/src/debug_info.rs | crates/bevy_utils/src/debug_info.rs | use crate::cfg;
cfg::alloc! {
use alloc::{borrow::Cow, fmt, string::String};
}
#[cfg(feature = "debug")]
use core::any::type_name;
use core::ops::Deref;
use disqualified::ShortName;
#[cfg(not(feature = "debug"))]
const FEATURE_DISABLED: &str = "Enable the debug feature to see the name";
/// Wrapper to help debugging ECS issues. This is used to display the names of systems, components, ...
///
/// * If the `debug` feature is enabled, the actual name will be used
/// * If it is disabled, a string mentioning the disabled feature will be used
#[derive(Clone, PartialEq, Eq)]
pub struct DebugName {
#[cfg(feature = "debug")]
name: Cow<'static, str>,
}
cfg::alloc! {
impl fmt::Display for DebugName {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
// Deref to `str`, which will use `FEATURE_DISABLED` if necessary
write!(f, "{}", &**self)
}
}
impl fmt::Debug for DebugName {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
// Deref to `str`, which will use `FEATURE_DISABLED` if necessary
write!(f, "{:?}", &**self)
}
}
}
impl DebugName {
/// Create a new `DebugName` from a `&str`
///
/// The value will be ignored if the `debug` feature is not enabled
#[cfg_attr(
not(feature = "debug"),
expect(
unused_variables,
reason = "The value will be ignored if the `debug` feature is not enabled"
)
)]
pub const fn borrowed(value: &'static str) -> Self {
DebugName {
#[cfg(feature = "debug")]
name: Cow::Borrowed(value),
}
}
cfg::alloc! {
/// Create a new `DebugName` from a `String`
///
/// The value will be ignored if the `debug` feature is not enabled
#[cfg_attr(
not(feature = "debug"),
expect(
unused_variables,
reason = "The value will be ignored if the `debug` feature is not enabled"
)
)]
pub fn owned(value: String) -> Self {
DebugName {
#[cfg(feature = "debug")]
name: Cow::Owned(value),
}
}
}
/// Create a new `DebugName` from a type by using its [`core::any::type_name`]
///
/// The value will be ignored if the `debug` feature is not enabled
pub fn type_name<T>() -> Self {
DebugName {
#[cfg(feature = "debug")]
name: Cow::Borrowed(type_name::<T>()),
}
}
/// Get the [`ShortName`] corresponding to this debug name
///
/// The value will be a static string if the `debug` feature is not enabled
pub fn shortname(&self) -> ShortName<'_> {
#[cfg(feature = "debug")]
return ShortName(self.name.as_ref());
#[cfg(not(feature = "debug"))]
return ShortName(FEATURE_DISABLED);
}
/// Return the string hold by this `DebugName`
///
/// This is intended for debugging purpose, and only available if the `debug` feature is enabled
#[cfg(feature = "debug")]
pub fn as_string(&self) -> String {
self.name.clone().into_owned()
}
}
impl Deref for DebugName {
type Target = str;
fn deref(&self) -> &Self::Target {
#[cfg(feature = "debug")]
return &self.name;
#[cfg(not(feature = "debug"))]
return FEATURE_DISABLED;
}
}
cfg::alloc! {
impl From<Cow<'static, str>> for DebugName {
#[cfg_attr(
not(feature = "debug"),
expect(
unused_variables,
reason = "The value will be ignored if the `debug` feature is not enabled"
)
)]
fn from(value: Cow<'static, str>) -> Self {
Self {
#[cfg(feature = "debug")]
name: value,
}
}
}
impl From<String> for DebugName {
fn from(value: String) -> Self {
Self::owned(value)
}
}
impl From<DebugName> for Cow<'static, str> {
#[cfg_attr(
not(feature = "debug"),
expect(
unused_variables,
reason = "The value will be ignored if the `debug` feature is not enabled"
)
)]
fn from(value: DebugName) -> Self {
#[cfg(feature = "debug")]
{
value.name
}
#[cfg(not(feature = "debug"))]
{
Cow::Borrowed(FEATURE_DISABLED)
}
}
}
}
impl From<&'static str> for DebugName {
fn from(value: &'static str) -> Self {
Self::borrowed(value)
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_utils/src/lib.rs | crates/bevy_utils/src/lib.rs | #![cfg_attr(docsrs, feature(doc_cfg))]
#![doc(
html_logo_url = "https://bevy.org/assets/icon.png",
html_favicon_url = "https://bevy.org/assets/icon.png"
)]
#![no_std]
//! General utilities for first-party [Bevy] engine crates.
//!
//! [Bevy]: https://bevy.org/
/// Configuration information for this crate.
pub mod cfg {
pub(crate) use bevy_platform::cfg::*;
pub use bevy_platform::cfg::{alloc, std};
define_alias! {
#[cfg(feature = "parallel")] => {
/// Indicates the `Parallel` type is available.
parallel
}
}
}
cfg::std! {
extern crate std;
}
cfg::alloc! {
extern crate alloc;
mod map;
pub use map::*;
}
cfg::parallel! {
mod parallel_queue;
pub use parallel_queue::*;
}
/// The utilities prelude.
///
/// This includes the most common types in this crate, re-exported for your convenience.
pub mod prelude {
pub use crate::debug_info::DebugName;
pub use crate::default;
pub use disqualified::ShortName;
}
mod debug_info;
mod default;
mod once;
#[doc(hidden)]
pub use once::OnceFlag;
pub use default::default;
use core::mem::ManuallyDrop;
/// A type which calls a function when dropped.
/// This can be used to ensure that cleanup code is run even in case of a panic.
///
/// Note that this only works for panics that [unwind](https://doc.rust-lang.org/nomicon/unwinding.html)
/// -- any code within `OnDrop` will be skipped if a panic does not unwind.
/// In most cases, this will just work.
///
/// # Examples
///
/// ```
/// # use bevy_utils::OnDrop;
/// # fn test_panic(do_panic: bool, log: impl FnOnce(&str)) {
/// // This will print a message when the variable `_catch` gets dropped,
/// // even if a panic occurs before we reach the end of this scope.
/// // This is similar to a `try ... catch` block in languages such as C++.
/// let _catch = OnDrop::new(|| log("Oops, a panic occurred and this function didn't complete!"));
///
/// // Some code that may panic...
/// // ...
/// # if do_panic { panic!() }
///
/// // Make sure the message only gets printed if a panic occurs.
/// // If we remove this line, then the message will be printed regardless of whether a panic occurs
/// // -- similar to a `try ... finally` block.
/// core::mem::forget(_catch);
/// # }
/// #
/// # test_panic(false, |_| unreachable!());
/// # let mut did_log = false;
/// # std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
/// # test_panic(true, |_| did_log = true);
/// # }));
/// # assert!(did_log);
/// ```
pub struct OnDrop<F: FnOnce()> {
callback: ManuallyDrop<F>,
}
impl<F: FnOnce()> OnDrop<F> {
/// Returns an object that will invoke the specified callback when dropped.
pub fn new(callback: F) -> Self {
Self {
callback: ManuallyDrop::new(callback),
}
}
}
impl<F: FnOnce()> Drop for OnDrop<F> {
fn drop(&mut self) {
#![expect(
unsafe_code,
reason = "Taking from a ManuallyDrop requires unsafe code."
)]
// SAFETY: We may move out of `self`, since this instance can never be observed after it's dropped.
let callback = unsafe { ManuallyDrop::take(&mut self.callback) };
callback();
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.