repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/erased_render_asset.rs
crates/bevy_render/src/erased_render_asset.rs
use crate::{ render_resource::AsBindGroupError, ExtractSchedule, MainWorld, Render, RenderApp, RenderSystems, Res, }; use bevy_app::{App, Plugin, SubApp}; use bevy_asset::RenderAssetUsages; use bevy_asset::{Asset, AssetEvent, AssetId, Assets, UntypedAssetId}; use bevy_ecs::{ prelude::{Commands, IntoScheduleConfigs, MessageReader, ResMut, Resource}, schedule::{ScheduleConfigs, SystemSet}, system::{ScheduleSystem, StaticSystemParam, SystemParam, SystemParamItem, SystemState}, world::{FromWorld, Mut}, }; use bevy_platform::collections::{HashMap, HashSet}; use bevy_render::render_asset::RenderAssetBytesPerFrameLimiter; use core::marker::PhantomData; use thiserror::Error; use tracing::{debug, error}; #[derive(Debug, Error)] pub enum PrepareAssetError<E: Send + Sync + 'static> { #[error("Failed to prepare asset")] RetryNextUpdate(E), #[error("Failed to build bind group: {0}")] AsBindGroupError(AsBindGroupError), } /// The system set during which we extract modified assets to the render world. #[derive(SystemSet, Clone, PartialEq, Eq, Debug, Hash)] pub struct AssetExtractionSystems; /// Describes how an asset gets extracted and prepared for rendering. /// /// In the [`ExtractSchedule`] step the [`ErasedRenderAsset::SourceAsset`] is transferred /// from the "main world" into the "render world". /// /// After that in the [`RenderSystems::PrepareAssets`] step the extracted asset /// is transformed into its GPU-representation of type [`ErasedRenderAsset`]. pub trait ErasedRenderAsset: Send + Sync + 'static { /// The representation of the asset in the "main world". type SourceAsset: Asset + Clone; /// The target representation of the asset in the "render world". type ErasedAsset: Send + Sync + 'static + Sized; /// Specifies all ECS data required by [`ErasedRenderAsset::prepare_asset`]. /// /// For convenience use the [`lifetimeless`](bevy_ecs::system::lifetimeless) [`SystemParam`]. type Param: SystemParam; /// Whether or not to unload the asset after extracting it to the render world. #[inline] fn asset_usage(_source_asset: &Self::SourceAsset) -> RenderAssetUsages { RenderAssetUsages::default() } /// Size of the data the asset will upload to the gpu. Specifying a return value /// will allow the asset to be throttled via [`RenderAssetBytesPerFrameLimiter`]. #[inline] #[expect( unused_variables, reason = "The parameters here are intentionally unused by the default implementation; however, putting underscores here will result in the underscores being copied by rust-analyzer's tab completion." )] fn byte_len(erased_asset: &Self::SourceAsset) -> Option<usize> { None } /// Prepares the [`ErasedRenderAsset::SourceAsset`] for the GPU by transforming it into a [`ErasedRenderAsset`]. /// /// ECS data may be accessed via `param`. fn prepare_asset( source_asset: Self::SourceAsset, asset_id: AssetId<Self::SourceAsset>, param: &mut SystemParamItem<Self::Param>, ) -> Result<Self::ErasedAsset, PrepareAssetError<Self::SourceAsset>>; /// Called whenever the [`ErasedRenderAsset::SourceAsset`] has been removed. /// /// You can implement this method if you need to access ECS data (via /// `_param`) in order to perform cleanup tasks when the asset is removed. /// /// The default implementation does nothing. fn unload_asset( _source_asset: AssetId<Self::SourceAsset>, _param: &mut SystemParamItem<Self::Param>, ) { } } /// This plugin extracts the changed assets from the "app world" into the "render world" /// and prepares them for the GPU. They can then be accessed from the [`ErasedRenderAssets`] resource. /// /// Therefore it sets up the [`ExtractSchedule`] and /// [`RenderSystems::PrepareAssets`] steps for the specified [`ErasedRenderAsset`]. /// /// The `AFTER` generic parameter can be used to specify that `A::prepare_asset` should not be run until /// `prepare_assets::<AFTER>` has completed. This allows the `prepare_asset` function to depend on another /// prepared [`ErasedRenderAsset`], for example `Mesh::prepare_asset` relies on `ErasedRenderAssets::<GpuImage>` for morph /// targets, so the plugin is created as `ErasedRenderAssetPlugin::<RenderMesh, GpuImage>::default()`. pub struct ErasedRenderAssetPlugin< A: ErasedRenderAsset, AFTER: ErasedRenderAssetDependency + 'static = (), > { phantom: PhantomData<fn() -> (A, AFTER)>, } impl<A: ErasedRenderAsset, AFTER: ErasedRenderAssetDependency + 'static> Default for ErasedRenderAssetPlugin<A, AFTER> { fn default() -> Self { Self { phantom: Default::default(), } } } impl<A: ErasedRenderAsset, AFTER: ErasedRenderAssetDependency + 'static> Plugin for ErasedRenderAssetPlugin<A, AFTER> { fn build(&self, app: &mut App) { app.init_resource::<CachedExtractErasedRenderAssetSystemState<A>>(); } fn finish(&self, app: &mut App) { if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app .init_resource::<ExtractedAssets<A>>() .init_resource::<ErasedRenderAssets<A::ErasedAsset>>() .init_resource::<PrepareNextFrameAssets<A>>() .add_systems( ExtractSchedule, extract_erased_render_asset::<A>.in_set(AssetExtractionSystems), ); AFTER::register_system( render_app, prepare_erased_assets::<A>.in_set(RenderSystems::PrepareAssets), ); } } } // helper to allow specifying dependencies between render assets pub trait ErasedRenderAssetDependency { fn register_system(render_app: &mut SubApp, system: ScheduleConfigs<ScheduleSystem>); } impl ErasedRenderAssetDependency for () { fn register_system(render_app: &mut SubApp, system: ScheduleConfigs<ScheduleSystem>) { render_app.add_systems(Render, system); } } impl<A: ErasedRenderAsset> ErasedRenderAssetDependency for A { fn register_system(render_app: &mut SubApp, system: ScheduleConfigs<ScheduleSystem>) { render_app.add_systems(Render, system.after(prepare_erased_assets::<A>)); } } /// Temporarily stores the extracted and removed assets of the current frame. #[derive(Resource)] pub struct ExtractedAssets<A: ErasedRenderAsset> { /// The assets extracted this frame. /// /// These are assets that were either added or modified this frame. pub extracted: Vec<(AssetId<A::SourceAsset>, A::SourceAsset)>, /// IDs of the assets that were removed this frame. /// /// These assets will not be present in [`ExtractedAssets::extracted`]. pub removed: HashSet<AssetId<A::SourceAsset>>, /// IDs of the assets that were modified this frame. pub modified: HashSet<AssetId<A::SourceAsset>>, /// IDs of the assets that were added this frame. pub added: HashSet<AssetId<A::SourceAsset>>, } impl<A: ErasedRenderAsset> Default for ExtractedAssets<A> { fn default() -> Self { Self { extracted: Default::default(), removed: Default::default(), modified: Default::default(), added: Default::default(), } } } /// Stores all GPU representations ([`ErasedRenderAsset`]) /// of [`ErasedRenderAsset::SourceAsset`] as long as they exist. #[derive(Resource)] pub struct ErasedRenderAssets<ERA>(HashMap<UntypedAssetId, ERA>); impl<ERA> Default for ErasedRenderAssets<ERA> { fn default() -> Self { Self(Default::default()) } } impl<ERA> ErasedRenderAssets<ERA> { pub fn get(&self, id: impl Into<UntypedAssetId>) -> Option<&ERA> { self.0.get(&id.into()) } pub fn get_mut(&mut self, id: impl Into<UntypedAssetId>) -> Option<&mut ERA> { self.0.get_mut(&id.into()) } pub fn insert(&mut self, id: impl Into<UntypedAssetId>, value: ERA) -> Option<ERA> { self.0.insert(id.into(), value) } pub fn remove(&mut self, id: impl Into<UntypedAssetId>) -> Option<ERA> { self.0.remove(&id.into()) } pub fn iter(&self) -> impl Iterator<Item = (UntypedAssetId, &ERA)> { self.0.iter().map(|(k, v)| (*k, v)) } pub fn iter_mut(&mut self) -> impl Iterator<Item = (UntypedAssetId, &mut ERA)> { self.0.iter_mut().map(|(k, v)| (*k, v)) } } #[derive(Resource)] struct CachedExtractErasedRenderAssetSystemState<A: ErasedRenderAsset> { state: SystemState<( MessageReader<'static, 'static, AssetEvent<A::SourceAsset>>, ResMut<'static, Assets<A::SourceAsset>>, )>, } impl<A: ErasedRenderAsset> FromWorld for CachedExtractErasedRenderAssetSystemState<A> { fn from_world(world: &mut bevy_ecs::world::World) -> Self { Self { state: SystemState::new(world), } } } /// This system extracts all created or modified assets of the corresponding [`ErasedRenderAsset::SourceAsset`] type /// into the "render world". pub(crate) fn extract_erased_render_asset<A: ErasedRenderAsset>( mut commands: Commands, mut main_world: ResMut<MainWorld>, ) { main_world.resource_scope( |world, mut cached_state: Mut<CachedExtractErasedRenderAssetSystemState<A>>| { let (mut events, mut assets) = cached_state.state.get_mut(world); let mut needs_extracting = <HashSet<_>>::default(); let mut removed = <HashSet<_>>::default(); let mut modified = <HashSet<_>>::default(); for event in events.read() { #[expect( clippy::match_same_arms, reason = "LoadedWithDependencies is marked as a TODO, so it's likely this will no longer lint soon." )] match event { AssetEvent::Added { id } => { needs_extracting.insert(*id); } AssetEvent::Modified { id } => { needs_extracting.insert(*id); modified.insert(*id); } AssetEvent::Removed { .. } => { // We don't care that the asset was removed from Assets<T> in the main world. // An asset is only removed from ErasedRenderAssets<T> when its last handle is dropped (AssetEvent::Unused). } AssetEvent::Unused { id } => { needs_extracting.remove(id); modified.remove(id); removed.insert(*id); } AssetEvent::LoadedWithDependencies { .. } => { // TODO: handle this } } } let mut extracted_assets = Vec::new(); let mut added = <HashSet<_>>::default(); for id in needs_extracting.drain() { if let Some(asset) = assets.get(id) { let asset_usage = A::asset_usage(asset); if asset_usage.contains(RenderAssetUsages::RENDER_WORLD) { if asset_usage == RenderAssetUsages::RENDER_WORLD { if let Some(asset) = assets.remove(id) { extracted_assets.push((id, asset)); added.insert(id); } } else { extracted_assets.push((id, asset.clone())); added.insert(id); } } } } commands.insert_resource(ExtractedAssets::<A> { extracted: extracted_assets, removed, modified, added, }); cached_state.state.apply(world); }, ); } // TODO: consider storing inside system? /// All assets that should be prepared next frame. #[derive(Resource)] pub struct PrepareNextFrameAssets<A: ErasedRenderAsset> { assets: Vec<(AssetId<A::SourceAsset>, A::SourceAsset)>, } impl<A: ErasedRenderAsset> Default for PrepareNextFrameAssets<A> { fn default() -> Self { Self { assets: Default::default(), } } } /// This system prepares all assets of the corresponding [`ErasedRenderAsset::SourceAsset`] type /// which where extracted this frame for the GPU. pub fn prepare_erased_assets<A: ErasedRenderAsset>( mut extracted_assets: ResMut<ExtractedAssets<A>>, mut render_assets: ResMut<ErasedRenderAssets<A::ErasedAsset>>, mut prepare_next_frame: ResMut<PrepareNextFrameAssets<A>>, param: StaticSystemParam<<A as ErasedRenderAsset>::Param>, bpf: Res<RenderAssetBytesPerFrameLimiter>, ) { let mut wrote_asset_count = 0; let mut param = param.into_inner(); let queued_assets = core::mem::take(&mut prepare_next_frame.assets); for (id, extracted_asset) in queued_assets { if extracted_assets.removed.contains(&id) || extracted_assets.added.contains(&id) { // skip previous frame's assets that have been removed or updated continue; } let write_bytes = if let Some(size) = A::byte_len(&extracted_asset) { // we could check if available bytes > byte_len here, but we want to make some // forward progress even if the asset is larger than the max bytes per frame. // this way we always write at least one (sized) asset per frame. // in future we could also consider partial asset uploads. if bpf.exhausted() { prepare_next_frame.assets.push((id, extracted_asset)); continue; } size } else { 0 }; match A::prepare_asset(extracted_asset, id, &mut param) { Ok(prepared_asset) => { render_assets.insert(id, prepared_asset); bpf.write_bytes(write_bytes); wrote_asset_count += 1; } Err(PrepareAssetError::RetryNextUpdate(extracted_asset)) => { prepare_next_frame.assets.push((id, extracted_asset)); } Err(PrepareAssetError::AsBindGroupError(e)) => { error!( "{} Bind group construction failed: {e}", core::any::type_name::<A>() ); } } } for removed in extracted_assets.removed.drain() { render_assets.remove(removed); A::unload_asset(removed, &mut param); } for (id, extracted_asset) in extracted_assets.extracted.drain(..) { // we remove previous here to ensure that if we are updating the asset then // any users will not see the old asset after a new asset is extracted, // even if the new asset is not yet ready or we are out of bytes to write. render_assets.remove(id); let write_bytes = if let Some(size) = A::byte_len(&extracted_asset) { if bpf.exhausted() { prepare_next_frame.assets.push((id, extracted_asset)); continue; } size } else { 0 }; match A::prepare_asset(extracted_asset, id, &mut param) { Ok(prepared_asset) => { render_assets.insert(id, prepared_asset); bpf.write_bytes(write_bytes); wrote_asset_count += 1; } Err(PrepareAssetError::RetryNextUpdate(extracted_asset)) => { prepare_next_frame.assets.push((id, extracted_asset)); } Err(PrepareAssetError::AsBindGroupError(e)) => { error!( "{} Bind group construction failed: {e}", core::any::type_name::<A>() ); } } } if bpf.exhausted() && !prepare_next_frame.assets.is_empty() { debug!( "{} write budget exhausted with {} assets remaining (wrote {})", core::any::type_name::<A>(), prepare_next_frame.assets.len(), wrote_asset_count ); } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/pipelined_rendering.rs
crates/bevy_render/src/pipelined_rendering.rs
use async_channel::{Receiver, Sender}; use bevy_app::{App, AppExit, AppLabel, Plugin, SubApp}; use bevy_ecs::{ resource::Resource, schedule::MainThreadExecutor, world::{Mut, World}, }; use bevy_tasks::ComputeTaskPool; use crate::RenderApp; /// A Label for the sub app that runs the parts of pipelined rendering that need to run on the main thread. /// /// The Main schedule of this app can be used to run logic after the render schedule starts, but /// before I/O processing. This can be useful for something like frame pacing. #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, AppLabel)] pub struct RenderExtractApp; /// Channels used by the main app to send and receive the render app. #[derive(Resource)] pub struct RenderAppChannels { app_to_render_sender: Sender<SubApp>, render_to_app_receiver: Receiver<SubApp>, render_app_in_render_thread: bool, } impl RenderAppChannels { /// Create a `RenderAppChannels` from a [`async_channel::Receiver`] and [`async_channel::Sender`] pub fn new( app_to_render_sender: Sender<SubApp>, render_to_app_receiver: Receiver<SubApp>, ) -> Self { Self { app_to_render_sender, render_to_app_receiver, render_app_in_render_thread: false, } } /// Send the `render_app` to the rendering thread. pub fn send_blocking(&mut self, render_app: SubApp) { self.app_to_render_sender.send_blocking(render_app).unwrap(); self.render_app_in_render_thread = true; } /// Receive the `render_app` from the rendering thread. /// Return `None` if the render thread has panicked. pub async fn recv(&mut self) -> Option<SubApp> { let render_app = self.render_to_app_receiver.recv().await.ok()?; self.render_app_in_render_thread = false; Some(render_app) } } impl Drop for RenderAppChannels { fn drop(&mut self) { if self.render_app_in_render_thread { // Any non-send data in the render world was initialized on the main thread. // So on dropping the main world and ending the app, we block and wait for // the render world to return to drop it. Which allows the non-send data // drop methods to run on the correct thread. self.render_to_app_receiver.recv_blocking().ok(); } } } /// The [`PipelinedRenderingPlugin`] can be added to your application to enable pipelined rendering. /// /// This moves rendering into a different thread, so that the Nth frame's rendering can /// be run at the same time as the N + 1 frame's simulation. /// /// ```text /// |--------------------|--------------------|--------------------|--------------------| /// | simulation thread | frame 1 simulation | frame 2 simulation | frame 3 simulation | /// |--------------------|--------------------|--------------------|--------------------| /// | rendering thread | | frame 1 rendering | frame 2 rendering | /// |--------------------|--------------------|--------------------|--------------------| /// ``` /// /// The plugin is dependent on the [`RenderApp`] added by [`crate::RenderPlugin`] and so must /// be added after that plugin. If it is not added after, the plugin will do nothing. /// /// A single frame of execution looks something like below /// /// ```text /// |---------------------------------------------------------------------------| /// | | | RenderExtractApp schedule | winit events | main schedule | /// | sync | extract |----------------------------------------------------------| /// | | | extract commands | rendering schedule | /// |---------------------------------------------------------------------------| /// ``` /// /// - `sync` is the step where the entity-entity mapping between the main and render world is updated. /// This is run on the main app's thread. For more information checkout [`SyncWorldPlugin`]. /// - `extract` is the step where data is copied from the main world to the render world. /// This is run on the main app's thread. /// - On the render thread, we first apply the `extract commands`. This is not run during extract, so the /// main schedule can start sooner. /// - Then the `rendering schedule` is run. See [`RenderSystems`](crate::RenderSystems) for the standard steps in this process. /// - In parallel to the rendering thread the [`RenderExtractApp`] schedule runs. By /// default, this schedule is empty. But it is useful if you need something to run before I/O processing. /// - Next all the `winit events` are processed. /// - And finally the `main app schedule` is run. /// - Once both the `main app schedule` and the `render schedule` are finished running, `extract` is run again. /// /// [`SyncWorldPlugin`]: crate::sync_world::SyncWorldPlugin #[derive(Default)] pub struct PipelinedRenderingPlugin; impl Plugin for PipelinedRenderingPlugin { fn build(&self, app: &mut App) { // Don't add RenderExtractApp if RenderApp isn't initialized. if app.get_sub_app(RenderApp).is_none() { return; } app.insert_resource(MainThreadExecutor::new()); let mut sub_app = SubApp::new(); sub_app.set_extract(renderer_extract); app.insert_sub_app(RenderExtractApp, sub_app); } // Sets up the render thread and inserts resources into the main app used for controlling the render thread. fn cleanup(&self, app: &mut App) { // skip setting up when headless if app.get_sub_app(RenderExtractApp).is_none() { return; } let (app_to_render_sender, app_to_render_receiver) = async_channel::bounded::<SubApp>(1); let (render_to_app_sender, render_to_app_receiver) = async_channel::bounded::<SubApp>(1); let mut render_app = app .remove_sub_app(RenderApp) .expect("Unable to get RenderApp. Another plugin may have removed the RenderApp before PipelinedRenderingPlugin"); // clone main thread executor to render world let executor = app.world().get_resource::<MainThreadExecutor>().unwrap(); render_app.world_mut().insert_resource(executor.clone()); render_to_app_sender.send_blocking(render_app).unwrap(); app.insert_resource(RenderAppChannels::new( app_to_render_sender, render_to_app_receiver, )); std::thread::spawn(move || { #[cfg(feature = "trace")] let _span = tracing::info_span!("render thread").entered(); let compute_task_pool = ComputeTaskPool::get(); loop { // run a scope here to allow main world to use this thread while it's waiting for the render app let sent_app = compute_task_pool .scope(|s| { s.spawn(async { app_to_render_receiver.recv().await }); }) .pop(); let Some(Ok(mut render_app)) = sent_app else { break; }; { #[cfg(feature = "trace")] let _sub_app_span = tracing::info_span!("sub app", name = ?RenderApp).entered(); render_app.update(); } if render_to_app_sender.send_blocking(render_app).is_err() { break; } } tracing::debug!("exiting pipelined rendering thread"); }); } } // This function waits for the rendering world to be received, // runs extract, and then sends the rendering world back to the render thread. fn renderer_extract(app_world: &mut World, _world: &mut World) { app_world.resource_scope(|world, main_thread_executor: Mut<MainThreadExecutor>| { world.resource_scope(|world, mut render_channels: Mut<RenderAppChannels>| { // we use a scope here to run any main thread tasks that the render world still needs to run // while we wait for the render world to be received. if let Some(mut render_app) = ComputeTaskPool::get() .scope_with_executor(true, Some(&*main_thread_executor.0), |s| { s.spawn(async { render_channels.recv().await }); }) .pop() .unwrap() { render_app.extract(world); render_channels.send_blocking(render_app); } else { // Renderer thread panicked world.write_message(AppExit::error()); } }); }); }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/pipeline_specializer.rs
crates/bevy_render/src/render_resource/pipeline_specializer.rs
use crate::render_resource::{ CachedComputePipelineId, CachedRenderPipelineId, ComputePipelineDescriptor, PipelineCache, RenderPipelineDescriptor, }; use bevy_ecs::resource::Resource; use bevy_mesh::{MeshVertexBufferLayoutRef, MissingVertexAttributeError, VertexBufferLayout}; use bevy_platform::{ collections::{ hash_map::{Entry, RawEntryMut, VacantEntry}, HashMap, }, hash::FixedHasher, }; use bevy_utils::default; use core::{fmt::Debug, hash::Hash}; use thiserror::Error; use tracing::error; /// A trait that allows constructing different variants of a render pipeline from a key. /// /// Note: This is intended for modifying your pipeline descriptor on the basis of a key. If your key /// contains no data then you don't need to specialize. For example, if you are using the /// [`AsBindGroup`](crate::render_resource::AsBindGroup) without the `#[bind_group_data]` attribute, /// you don't need to specialize. Instead, create the pipeline directly from [`PipelineCache`] and /// store its ID. /// /// See [`SpecializedRenderPipelines`] for more info. pub trait SpecializedRenderPipeline { /// The key that defines each "variant" of the render pipeline. type Key: Clone + Hash + PartialEq + Eq; /// Construct a new render pipeline based on the provided key. fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor; } /// A convenience cache for creating different variants of a render pipeline based on some key. /// /// Some render pipelines may need to be configured differently depending on the exact situation. /// This cache allows constructing different render pipelines for each situation based on a key, /// making it easy to A) construct the necessary pipelines, and B) reuse already constructed /// pipelines. /// /// Note: This is intended for modifying your pipeline descriptor on the basis of a key. If your key /// contains no data then you don't need to specialize. For example, if you are using the /// [`AsBindGroup`](crate::render_resource::AsBindGroup) without the `#[bind_group_data]` attribute, /// you don't need to specialize. Instead, create the pipeline directly from [`PipelineCache`] and /// store its ID. #[derive(Resource)] pub struct SpecializedRenderPipelines<S: SpecializedRenderPipeline> { cache: HashMap<S::Key, CachedRenderPipelineId>, } impl<S: SpecializedRenderPipeline> Default for SpecializedRenderPipelines<S> { fn default() -> Self { Self { cache: default() } } } impl<S: SpecializedRenderPipeline> SpecializedRenderPipelines<S> { /// Get or create a specialized instance of the pipeline corresponding to `key`. pub fn specialize( &mut self, cache: &PipelineCache, pipeline_specializer: &S, key: S::Key, ) -> CachedRenderPipelineId { *self.cache.entry(key.clone()).or_insert_with(|| { let descriptor = pipeline_specializer.specialize(key); cache.queue_render_pipeline(descriptor) }) } } /// A trait that allows constructing different variants of a compute pipeline from a key. /// /// Note: This is intended for modifying your pipeline descriptor on the basis of a key. If your key /// contains no data then you don't need to specialize. For example, if you are using the /// [`AsBindGroup`](crate::render_resource::AsBindGroup) without the `#[bind_group_data]` attribute, /// you don't need to specialize. Instead, create the pipeline directly from [`PipelineCache`] and /// store its ID. /// /// See [`SpecializedComputePipelines`] for more info. pub trait SpecializedComputePipeline { /// The key that defines each "variant" of the compute pipeline. type Key: Clone + Hash + PartialEq + Eq; /// Construct a new compute pipeline based on the provided key. fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor; } /// A convenience cache for creating different variants of a compute pipeline based on some key. /// /// Some compute pipelines may need to be configured differently depending on the exact situation. /// This cache allows constructing different compute pipelines for each situation based on a key, /// making it easy to A) construct the necessary pipelines, and B) reuse already constructed /// pipelines. /// /// Note: This is intended for modifying your pipeline descriptor on the basis of a key. If your key /// contains no data then you don't need to specialize. For example, if you are using the /// [`AsBindGroup`](crate::render_resource::AsBindGroup) without the `#[bind_group_data]` attribute, /// you don't need to specialize. Instead, create the pipeline directly from [`PipelineCache`] and /// store its ID. #[derive(Resource)] pub struct SpecializedComputePipelines<S: SpecializedComputePipeline> { cache: HashMap<S::Key, CachedComputePipelineId>, } impl<S: SpecializedComputePipeline> Default for SpecializedComputePipelines<S> { fn default() -> Self { Self { cache: default() } } } impl<S: SpecializedComputePipeline> SpecializedComputePipelines<S> { /// Get or create a specialized instance of the pipeline corresponding to `key`. pub fn specialize( &mut self, cache: &PipelineCache, specialize_pipeline: &S, key: S::Key, ) -> CachedComputePipelineId { *self.cache.entry(key.clone()).or_insert_with(|| { let descriptor = specialize_pipeline.specialize(key); cache.queue_compute_pipeline(descriptor) }) } } /// A trait that allows constructing different variants of a render pipeline from a key and the /// particular mesh's vertex buffer layout. /// /// See [`SpecializedMeshPipelines`] for more info. pub trait SpecializedMeshPipeline { /// The key that defines each "variant" of the render pipeline. type Key: Clone + Hash + PartialEq + Eq; /// Construct a new render pipeline based on the provided key and vertex layout. /// /// The returned pipeline descriptor should have a single vertex buffer, which is derived from /// `layout`. fn specialize( &self, key: Self::Key, layout: &MeshVertexBufferLayoutRef, ) -> Result<RenderPipelineDescriptor, SpecializedMeshPipelineError>; } /// A cache of different variants of a render pipeline based on a key and the particular mesh's /// vertex buffer layout. #[derive(Resource)] pub struct SpecializedMeshPipelines<S: SpecializedMeshPipeline> { mesh_layout_cache: HashMap<(MeshVertexBufferLayoutRef, S::Key), CachedRenderPipelineId>, vertex_layout_cache: VertexLayoutCache<S>, } type VertexLayoutCache<S> = HashMap< VertexBufferLayout, HashMap<<S as SpecializedMeshPipeline>::Key, CachedRenderPipelineId>, >; impl<S: SpecializedMeshPipeline> Default for SpecializedMeshPipelines<S> { fn default() -> Self { Self { mesh_layout_cache: Default::default(), vertex_layout_cache: Default::default(), } } } impl<S: SpecializedMeshPipeline> SpecializedMeshPipelines<S> { /// Construct a new render pipeline based on the provided key and the mesh's vertex buffer /// layout. #[inline] pub fn specialize( &mut self, cache: &PipelineCache, pipeline_specializer: &S, key: S::Key, layout: &MeshVertexBufferLayoutRef, ) -> Result<CachedRenderPipelineId, SpecializedMeshPipelineError> { return match self.mesh_layout_cache.entry((layout.clone(), key.clone())) { Entry::Occupied(entry) => Ok(*entry.into_mut()), Entry::Vacant(entry) => specialize_slow( &mut self.vertex_layout_cache, cache, pipeline_specializer, key, layout, entry, ), }; #[cold] fn specialize_slow<S>( vertex_layout_cache: &mut VertexLayoutCache<S>, cache: &PipelineCache, specialize_pipeline: &S, key: S::Key, layout: &MeshVertexBufferLayoutRef, entry: VacantEntry< (MeshVertexBufferLayoutRef, S::Key), CachedRenderPipelineId, FixedHasher, >, ) -> Result<CachedRenderPipelineId, SpecializedMeshPipelineError> where S: SpecializedMeshPipeline, { let descriptor = specialize_pipeline .specialize(key.clone(), layout) .map_err(|mut err| { { let SpecializedMeshPipelineError::MissingVertexAttribute(err) = &mut err; err.pipeline_type = Some(core::any::type_name::<S>()); } err })?; // Different MeshVertexBufferLayouts can produce the same final VertexBufferLayout // We want compatible vertex buffer layouts to use the same pipelines, so we must "deduplicate" them let layout_map = match vertex_layout_cache .raw_entry_mut() .from_key(&descriptor.vertex.buffers[0]) { RawEntryMut::Occupied(entry) => entry.into_mut(), RawEntryMut::Vacant(entry) => { entry .insert(descriptor.vertex.buffers[0].clone(), Default::default()) .1 } }; Ok(*entry.insert(match layout_map.entry(key) { Entry::Occupied(entry) => { if cfg!(debug_assertions) { let stored_descriptor = cache.get_render_pipeline_descriptor(*entry.get()); if stored_descriptor != &descriptor { error!( "The cached pipeline descriptor for {} is not \ equal to the generated descriptor for the given key. \ This means the SpecializePipeline implementation uses \ unused' MeshVertexBufferLayout information to specialize \ the pipeline. This is not allowed because it would invalidate \ the pipeline cache.", core::any::type_name::<S>() ); } } *entry.into_mut() } Entry::Vacant(entry) => *entry.insert(cache.queue_render_pipeline(descriptor)), })) } } } #[derive(Error, Debug)] pub enum SpecializedMeshPipelineError { #[error(transparent)] MissingVertexAttribute(#[from] MissingVertexAttributeError), }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/batched_uniform_buffer.rs
crates/bevy_render/src/render_resource/batched_uniform_buffer.rs
use super::{GpuArrayBufferIndex, GpuArrayBufferable}; use crate::{ render_resource::DynamicUniformBuffer, renderer::{RenderDevice, RenderQueue}, }; use core::{marker::PhantomData, num::NonZero}; use encase::{ private::{ArrayMetadata, BufferMut, Metadata, RuntimeSizedArray, WriteInto, Writer}, ShaderType, }; use nonmax::NonMaxU32; use wgpu::{BindingResource, Limits}; // 1MB else we will make really large arrays on macOS which reports very large // `max_uniform_buffer_binding_size`. On macOS this ends up being the minimum // size of the uniform buffer as well as the size of each chunk of data at a // dynamic offset. #[cfg(any( not(feature = "webgl"), not(target_arch = "wasm32"), feature = "webgpu" ))] const MAX_REASONABLE_UNIFORM_BUFFER_BINDING_SIZE: u32 = 1 << 20; // WebGL2 quirk: using uniform buffers larger than 4KB will cause extremely // long shader compilation times, so the limit needs to be lower on WebGL2. // This is due to older shader compilers/GPUs that don't support dynamically // indexing uniform buffers, and instead emulate it with large switch statements // over buffer indices that take a long time to compile. #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] const MAX_REASONABLE_UNIFORM_BUFFER_BINDING_SIZE: u32 = 1 << 12; /// Similar to [`DynamicUniformBuffer`], except every N elements (depending on size) /// are grouped into a batch as an `array<T, N>` in WGSL. /// /// This reduces the number of rebindings required due to having to pass dynamic /// offsets to bind group commands, and if indices into the array can be passed /// in via other means, it enables batching of draw commands. pub struct BatchedUniformBuffer<T: GpuArrayBufferable> { // Batches of fixed-size arrays of T are written to this buffer so that // each batch in a fixed-size array can be bound at a dynamic offset. uniforms: DynamicUniformBuffer<MaxCapacityArray<Vec<T>>>, // A batch of T are gathered into this `MaxCapacityArray` until it is full, // then it is written into the `DynamicUniformBuffer`, cleared, and new T // are gathered here, and so on for each batch. temp: MaxCapacityArray<Vec<T>>, current_offset: u32, dynamic_offset_alignment: u32, } impl<T: GpuArrayBufferable> BatchedUniformBuffer<T> { pub fn batch_size(limits: &Limits) -> usize { (limits .max_uniform_buffer_binding_size .min(MAX_REASONABLE_UNIFORM_BUFFER_BINDING_SIZE) as u64 / T::min_size().get()) as usize } pub fn new(limits: &Limits) -> Self { let capacity = Self::batch_size(limits); let alignment = limits.min_uniform_buffer_offset_alignment; Self { uniforms: DynamicUniformBuffer::new_with_alignment(alignment as u64), temp: MaxCapacityArray(Vec::with_capacity(capacity), capacity), current_offset: 0, dynamic_offset_alignment: alignment, } } #[inline] pub fn size(&self) -> NonZero<u64> { self.temp.size() } pub fn clear(&mut self) { self.uniforms.clear(); self.current_offset = 0; self.temp.0.clear(); } pub fn push(&mut self, component: T) -> GpuArrayBufferIndex<T> { let result = GpuArrayBufferIndex { index: self.temp.0.len() as u32, dynamic_offset: NonMaxU32::new(self.current_offset), element_type: PhantomData, }; self.temp.0.push(component); if self.temp.0.len() == self.temp.1 { self.flush(); } result } pub fn flush(&mut self) { self.uniforms.push(&self.temp); self.current_offset += align_to_next(self.temp.size().get(), self.dynamic_offset_alignment as u64) as u32; self.temp.0.clear(); } pub fn write_buffer(&mut self, device: &RenderDevice, queue: &RenderQueue) { if !self.temp.0.is_empty() { self.flush(); } self.uniforms.write_buffer(device, queue); } #[inline] pub fn binding(&self) -> Option<BindingResource<'_>> { let mut binding = self.uniforms.binding(); if let Some(BindingResource::Buffer(binding)) = &mut binding { // MaxCapacityArray is runtime-sized so can't use T::min_size() binding.size = Some(self.size()); } binding } } #[inline] fn align_to_next(value: u64, alignment: u64) -> u64 { debug_assert!(alignment.is_power_of_two()); ((value - 1) | (alignment - 1)) + 1 } // ---------------------------------------------------------------------------- // MaxCapacityArray was implemented by Teodor Tanasoaia for encase. It was // copied here as it was not yet included in an encase release and it is // unclear if it is the correct long-term solution for encase. #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)] struct MaxCapacityArray<T>(T, usize); impl<T> ShaderType for MaxCapacityArray<T> where T: ShaderType<ExtraMetadata = ArrayMetadata>, { type ExtraMetadata = ArrayMetadata; const METADATA: Metadata<Self::ExtraMetadata> = T::METADATA; fn size(&self) -> NonZero<u64> { Self::METADATA.stride().mul(self.1.max(1) as u64).0 } } impl<T> WriteInto for MaxCapacityArray<T> where T: WriteInto + RuntimeSizedArray, { fn write_into<B: BufferMut>(&self, writer: &mut Writer<B>) { debug_assert!(self.0.len() <= self.1); self.0.write_into(writer); } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/buffer_vec.rs
crates/bevy_render/src/render_resource/buffer_vec.rs
use core::{iter, marker::PhantomData}; use crate::{ render_resource::Buffer, renderer::{RenderDevice, RenderQueue}, }; use bytemuck::{must_cast_slice, NoUninit}; use encase::{ internal::{WriteInto, Writer}, ShaderType, }; use thiserror::Error; use wgpu::{BindingResource, BufferAddress, BufferUsages}; use super::GpuArrayBufferable; /// A structure for storing raw bytes that have already been properly formatted /// for use by the GPU. /// /// "Properly formatted" means that item data already meets the alignment and padding /// requirements for how it will be used on the GPU. The item type must implement [`NoUninit`] /// for its data representation to be directly copyable. /// /// Index, vertex, and instance-rate vertex buffers have no alignment nor padding requirements and /// so this helper type is a good choice for them. /// /// The contained data is stored in system RAM. Calling [`reserve`](RawBufferVec::reserve) /// allocates VRAM from the [`RenderDevice`]. /// [`write_buffer`](RawBufferVec::write_buffer) queues copying of the data /// from system RAM to VRAM. /// /// Other options for storing GPU-accessible data are: /// * [`BufferVec`] /// * [`DynamicStorageBuffer`](crate::render_resource::DynamicStorageBuffer) /// * [`DynamicUniformBuffer`](crate::render_resource::DynamicUniformBuffer) /// * [`GpuArrayBuffer`](crate::render_resource::GpuArrayBuffer) /// * [`StorageBuffer`](crate::render_resource::StorageBuffer) /// * [`Texture`](crate::render_resource::Texture) /// * [`UniformBuffer`](crate::render_resource::UniformBuffer) pub struct RawBufferVec<T: NoUninit> { values: Vec<T>, buffer: Option<Buffer>, capacity: usize, item_size: usize, buffer_usage: BufferUsages, label: Option<String>, changed: bool, } impl<T: NoUninit> RawBufferVec<T> { /// Creates a new [`RawBufferVec`] with the given [`BufferUsages`]. pub const fn new(buffer_usage: BufferUsages) -> Self { Self { values: Vec::new(), buffer: None, capacity: 0, item_size: size_of::<T>(), buffer_usage, label: None, changed: false, } } /// Returns a handle to the buffer, if the data has been uploaded. #[inline] pub fn buffer(&self) -> Option<&Buffer> { self.buffer.as_ref() } /// Returns the binding for the buffer if the data has been uploaded. #[inline] pub fn binding(&self) -> Option<BindingResource<'_>> { Some(BindingResource::Buffer( self.buffer()?.as_entire_buffer_binding(), )) } /// Returns the amount of space that the GPU will use before reallocating. #[inline] pub fn capacity(&self) -> usize { self.capacity } /// Returns the number of items that have been pushed to this buffer. #[inline] pub fn len(&self) -> usize { self.values.len() } /// Returns true if the buffer is empty. #[inline] pub fn is_empty(&self) -> bool { self.values.is_empty() } /// Adds a new value and returns its index. pub fn push(&mut self, value: T) -> usize { let index = self.values.len(); self.values.push(value); index } pub fn append(&mut self, other: &mut RawBufferVec<T>) { self.values.append(&mut other.values); } /// Returns the value at the given index. pub fn get(&self, index: u32) -> Option<&T> { self.values.get(index as usize) } /// Sets the value at the given index. /// /// The index must be less than [`RawBufferVec::len`]. pub fn set(&mut self, index: u32, value: T) { self.values[index as usize] = value; } /// Preallocates space for `count` elements in the internal CPU-side buffer. /// /// Unlike [`RawBufferVec::reserve`], this doesn't have any effect on the GPU buffer. pub fn reserve_internal(&mut self, count: usize) { self.values.reserve(count); } /// Changes the debugging label of the buffer. /// /// The next time the buffer is updated (via [`reserve`](Self::reserve)), Bevy will inform /// the driver of the new label. pub fn set_label(&mut self, label: Option<&str>) { let label = label.map(str::to_string); if label != self.label { self.changed = true; } self.label = label; } /// Returns the label pub fn get_label(&self) -> Option<&str> { self.label.as_deref() } /// Creates a [`Buffer`] on the [`RenderDevice`] with size /// at least `size_of::<T>() * capacity`, unless a such a buffer already exists. /// /// If a [`Buffer`] exists, but is too small, references to it will be discarded, /// and a new [`Buffer`] will be created. Any previously created [`Buffer`]s /// that are no longer referenced will be deleted by the [`RenderDevice`] /// once it is done using them (typically 1-2 frames). /// /// In addition to any [`BufferUsages`] provided when /// the `RawBufferVec` was created, the buffer on the [`RenderDevice`] /// is marked as [`BufferUsages::COPY_DST`](BufferUsages). pub fn reserve(&mut self, capacity: usize, device: &RenderDevice) { let size = self.item_size * capacity; if capacity > self.capacity || (self.changed && size > 0) { self.capacity = capacity; self.buffer = Some(device.create_buffer(&wgpu::BufferDescriptor { label: self.label.as_deref(), size: size as BufferAddress, usage: BufferUsages::COPY_DST | self.buffer_usage, mapped_at_creation: false, })); self.changed = false; } } /// Queues writing of data from system RAM to VRAM using the [`RenderDevice`] /// and the provided [`RenderQueue`]. /// /// Before queuing the write, a [`reserve`](RawBufferVec::reserve) operation /// is executed. pub fn write_buffer(&mut self, device: &RenderDevice, queue: &RenderQueue) { if self.values.is_empty() { return; } self.reserve(self.values.len(), device); if let Some(buffer) = &self.buffer { let range = 0..self.item_size * self.values.len(); let bytes: &[u8] = must_cast_slice(&self.values); queue.write_buffer(buffer, 0, &bytes[range]); } } /// Queues writing of data from system RAM to VRAM using the [`RenderDevice`] /// and the provided [`RenderQueue`]. /// /// If the buffer is not initialized on the GPU or the range is bigger than the capacity it will /// return an error. You'll need to either reserve a new buffer which will lose data on the GPU /// or create a new buffer and copy the old data to it. /// /// This will only write the data contained in the given range. It is useful if you only want /// to update a part of the buffer. pub fn write_buffer_range( &mut self, render_queue: &RenderQueue, range: core::ops::Range<usize>, ) -> Result<(), WriteBufferRangeError> { if self.values.is_empty() { return Err(WriteBufferRangeError::NoValuesToUpload); } if range.end > self.item_size * self.capacity { return Err(WriteBufferRangeError::RangeBiggerThanBuffer); } if let Some(buffer) = &self.buffer { // Cast only the bytes we need to write let bytes: &[u8] = must_cast_slice(&self.values[range.start..range.end]); render_queue.write_buffer(buffer, (range.start * self.item_size) as u64, bytes); Ok(()) } else { Err(WriteBufferRangeError::BufferNotInitialized) } } /// Reduces the length of the buffer. pub fn truncate(&mut self, len: usize) { self.values.truncate(len); } /// Removes all elements from the buffer. pub fn clear(&mut self) { self.values.clear(); } /// Removes and returns the last element in the buffer. pub fn pop(&mut self) -> Option<T> { self.values.pop() } pub fn values(&self) -> &Vec<T> { &self.values } pub fn values_mut(&mut self) -> &mut Vec<T> { &mut self.values } } impl<T> RawBufferVec<T> where T: NoUninit + Default, { pub fn grow_set(&mut self, index: u32, value: T) { while index as usize + 1 > self.len() { self.values.push(T::default()); } self.values[index as usize] = value; } } impl<T: NoUninit> Extend<T> for RawBufferVec<T> { #[inline] fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) { self.values.extend(iter); } } /// Like [`RawBufferVec`], but doesn't require that the data type `T` be /// [`NoUninit`]. /// /// This is a high-performance data structure that you should use whenever /// possible if your data is more complex than is suitable for [`RawBufferVec`]. /// The [`ShaderType`] trait from the `encase` library is used to ensure that /// the data is correctly aligned for use by the GPU. /// /// For performance reasons, unlike [`RawBufferVec`], this type doesn't allow /// CPU access to the data after it's been added via [`BufferVec::push`]. If you /// need CPU access to the data, consider another type, such as /// [`StorageBuffer`][super::StorageBuffer]. /// /// Other options for storing GPU-accessible data are: /// * [`DynamicStorageBuffer`](crate::render_resource::DynamicStorageBuffer) /// * [`DynamicUniformBuffer`](crate::render_resource::DynamicUniformBuffer) /// * [`GpuArrayBuffer`](crate::render_resource::GpuArrayBuffer) /// * [`RawBufferVec`] /// * [`StorageBuffer`](crate::render_resource::StorageBuffer) /// * [`Texture`](crate::render_resource::Texture) /// * [`UniformBuffer`](crate::render_resource::UniformBuffer) pub struct BufferVec<T> where T: ShaderType + WriteInto, { data: Vec<u8>, buffer: Option<Buffer>, capacity: usize, buffer_usage: BufferUsages, label: Option<String>, label_changed: bool, phantom: PhantomData<T>, } impl<T> BufferVec<T> where T: ShaderType + WriteInto, { /// Creates a new [`BufferVec`] with the given [`BufferUsages`]. pub const fn new(buffer_usage: BufferUsages) -> Self { Self { data: vec![], buffer: None, capacity: 0, buffer_usage, label: None, label_changed: false, phantom: PhantomData, } } /// Returns a handle to the buffer, if the data has been uploaded. #[inline] pub fn buffer(&self) -> Option<&Buffer> { self.buffer.as_ref() } /// Returns the binding for the buffer if the data has been uploaded. #[inline] pub fn binding(&self) -> Option<BindingResource<'_>> { Some(BindingResource::Buffer( self.buffer()?.as_entire_buffer_binding(), )) } /// Returns the amount of space that the GPU will use before reallocating. #[inline] pub fn capacity(&self) -> usize { self.capacity } /// Returns the number of items that have been pushed to this buffer. #[inline] pub fn len(&self) -> usize { self.data.len() / u64::from(T::min_size()) as usize } /// Returns true if the buffer is empty. #[inline] pub fn is_empty(&self) -> bool { self.data.is_empty() } /// Adds a new value and returns its index. pub fn push(&mut self, value: T) -> usize { let element_size = u64::from(T::min_size()) as usize; let offset = self.data.len(); // TODO: Consider using unsafe code to push uninitialized, to prevent // the zeroing. It shows up in profiles. self.data.extend(iter::repeat_n(0, element_size)); // Take a slice of the new data for `write_into` to use. This is // important: it hoists the bounds check up here so that the compiler // can eliminate all the bounds checks that `write_into` will emit. let mut dest = &mut self.data[offset..(offset + element_size)]; value.write_into(&mut Writer::new(&value, &mut dest, 0).unwrap()); offset / u64::from(T::min_size()) as usize } /// Changes the debugging label of the buffer. /// /// The next time the buffer is updated (via [`Self::reserve`]), Bevy will inform /// the driver of the new label. pub fn set_label(&mut self, label: Option<&str>) { let label = label.map(str::to_string); if label != self.label { self.label_changed = true; } self.label = label; } /// Returns the label pub fn get_label(&self) -> Option<&str> { self.label.as_deref() } /// Creates a [`Buffer`] on the [`RenderDevice`] with size /// at least `size_of::<T>() * capacity`, unless such a buffer already exists. /// /// If a [`Buffer`] exists, but is too small, references to it will be discarded, /// and a new [`Buffer`] will be created. Any previously created [`Buffer`]s /// that are no longer referenced will be deleted by the [`RenderDevice`] /// once it is done using them (typically 1-2 frames). /// /// In addition to any [`BufferUsages`] provided when /// the `BufferVec` was created, the buffer on the [`RenderDevice`] /// is marked as [`BufferUsages::COPY_DST`](BufferUsages). pub fn reserve(&mut self, capacity: usize, device: &RenderDevice) { if capacity <= self.capacity && !self.label_changed { return; } self.capacity = capacity; let size = u64::from(T::min_size()) as usize * capacity; self.buffer = Some(device.create_buffer(&wgpu::BufferDescriptor { label: self.label.as_deref(), size: size as BufferAddress, usage: BufferUsages::COPY_DST | self.buffer_usage, mapped_at_creation: false, })); self.label_changed = false; } /// Queues writing of data from system RAM to VRAM using the [`RenderDevice`] /// and the provided [`RenderQueue`]. /// /// Before queuing the write, a [`reserve`](BufferVec::reserve) operation is /// executed. pub fn write_buffer(&mut self, device: &RenderDevice, queue: &RenderQueue) { if self.data.is_empty() { return; } self.reserve(self.data.len() / u64::from(T::min_size()) as usize, device); let Some(buffer) = &self.buffer else { return }; queue.write_buffer(buffer, 0, &self.data); } /// Queues writing of data from system RAM to VRAM using the [`RenderDevice`] /// and the provided [`RenderQueue`]. /// /// If the buffer is not initialized on the GPU or the range is bigger than the capacity it will /// return an error. You'll need to either reserve a new buffer which will lose data on the GPU /// or create a new buffer and copy the old data to it. /// /// This will only write the data contained in the given range. It is useful if you only want /// to update a part of the buffer. pub fn write_buffer_range( &mut self, render_queue: &RenderQueue, range: core::ops::Range<usize>, ) -> Result<(), WriteBufferRangeError> { if self.data.is_empty() { return Err(WriteBufferRangeError::NoValuesToUpload); } let item_size = u64::from(T::min_size()) as usize; if range.end > item_size * self.capacity { return Err(WriteBufferRangeError::RangeBiggerThanBuffer); } if let Some(buffer) = &self.buffer { let bytes = &self.data[range.start..range.end]; render_queue.write_buffer(buffer, (range.start * item_size) as u64, bytes); Ok(()) } else { Err(WriteBufferRangeError::BufferNotInitialized) } } /// Reduces the length of the buffer. pub fn truncate(&mut self, len: usize) { self.data.truncate(u64::from(T::min_size()) as usize * len); } /// Removes all elements from the buffer. pub fn clear(&mut self) { self.data.clear(); } } /// Like a [`BufferVec`], but only reserves space on the GPU for elements /// instead of initializing them CPU-side. /// /// This type is useful when you're accumulating "output slots" for a GPU /// compute shader to write into. /// /// The type `T` need not be [`NoUninit`], unlike [`RawBufferVec`]; it only has to /// be [`GpuArrayBufferable`]. pub struct UninitBufferVec<T> where T: GpuArrayBufferable, { buffer: Option<Buffer>, len: usize, capacity: usize, item_size: usize, buffer_usage: BufferUsages, label: Option<String>, label_changed: bool, phantom: PhantomData<T>, } impl<T> UninitBufferVec<T> where T: GpuArrayBufferable, { /// Creates a new [`UninitBufferVec`] with the given [`BufferUsages`]. pub const fn new(buffer_usage: BufferUsages) -> Self { Self { len: 0, buffer: None, capacity: 0, item_size: size_of::<T>(), buffer_usage, label: None, label_changed: false, phantom: PhantomData, } } /// Returns the buffer, if allocated. #[inline] pub fn buffer(&self) -> Option<&Buffer> { self.buffer.as_ref() } /// Returns the binding for the buffer if the data has been uploaded. #[inline] pub fn binding(&self) -> Option<BindingResource<'_>> { Some(BindingResource::Buffer( self.buffer()?.as_entire_buffer_binding(), )) } /// Reserves space for one more element in the buffer and returns its index. pub fn add(&mut self) -> usize { self.add_multiple(1) } /// Reserves space for the given number of elements in the buffer and /// returns the index of the first one. pub fn add_multiple(&mut self, count: usize) -> usize { let index = self.len; self.len += count; index } /// Returns true if no elements have been added to this [`UninitBufferVec`]. pub fn is_empty(&self) -> bool { self.len == 0 } /// Removes all elements from the buffer. pub fn clear(&mut self) { self.len = 0; } /// Returns the length of the buffer. pub fn len(&self) -> usize { self.len } /// Materializes the buffer on the GPU with space for `capacity` elements. /// /// If the buffer is already big enough, this function doesn't reallocate /// the buffer. pub fn reserve(&mut self, capacity: usize, device: &RenderDevice) { if capacity <= self.capacity && !self.label_changed { return; } self.capacity = capacity; let size = self.item_size * capacity; self.buffer = Some(device.create_buffer(&wgpu::BufferDescriptor { label: self.label.as_deref(), size: size as BufferAddress, usage: BufferUsages::COPY_DST | self.buffer_usage, mapped_at_creation: false, })); self.label_changed = false; } /// Materializes the buffer on the GPU, with an appropriate size for the /// elements that have been pushed so far. pub fn write_buffer(&mut self, device: &RenderDevice) { if !self.is_empty() { self.reserve(self.len, device); } } } /// Error returned when `write_buffer_range` fails /// /// See [`RawBufferVec::write_buffer_range`] [`BufferVec::write_buffer_range`] #[derive(Debug, Eq, PartialEq, Copy, Clone, Error)] pub enum WriteBufferRangeError { #[error("the range is bigger than the capacity of the buffer")] RangeBiggerThanBuffer, #[error("the gpu buffer is not initialized")] BufferNotInitialized, #[error("there are no values to upload")] NoValuesToUpload, }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/pipeline_cache.rs
crates/bevy_render/src/render_resource/pipeline_cache.rs
use crate::{ render_resource::*, renderer::{RenderAdapter, RenderDevice, WgpuWrapper}, Extract, }; use alloc::{borrow::Cow, sync::Arc}; use bevy_asset::{AssetEvent, AssetId, Assets, Handle}; use bevy_ecs::{ message::MessageReader, resource::Resource, system::{Res, ResMut}, }; use bevy_platform::collections::{HashMap, HashSet}; use bevy_shader::{ CachedPipelineId, PipelineCacheError, Shader, ShaderCache, ShaderCacheSource, ShaderDefVal, ValidateShader, }; use bevy_tasks::Task; use bevy_utils::default; use core::{future::Future, hash::Hash, mem}; use std::sync::{Mutex, PoisonError}; use tracing::error; use wgpu::{PipelineCompilationOptions, VertexBufferLayout as RawVertexBufferLayout}; /// A descriptor for a [`Pipeline`]. /// /// Used to store a heterogenous collection of render and compute pipeline descriptors together. #[derive(Debug)] pub enum PipelineDescriptor { RenderPipelineDescriptor(Box<RenderPipelineDescriptor>), ComputePipelineDescriptor(Box<ComputePipelineDescriptor>), } /// A pipeline defining the data layout and shader logic for a specific GPU task. /// /// Used to store a heterogenous collection of render and compute pipelines together. #[derive(Debug)] pub enum Pipeline { RenderPipeline(RenderPipeline), ComputePipeline(ComputePipeline), } /// Index of a cached render pipeline in a [`PipelineCache`]. #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)] pub struct CachedRenderPipelineId(CachedPipelineId); impl CachedRenderPipelineId { /// An invalid cached render pipeline index, often used to initialize a variable. pub const INVALID: Self = CachedRenderPipelineId(usize::MAX); #[inline] pub fn id(&self) -> usize { self.0 } } /// Index of a cached compute pipeline in a [`PipelineCache`]. #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] pub struct CachedComputePipelineId(CachedPipelineId); impl CachedComputePipelineId { /// An invalid cached compute pipeline index, often used to initialize a variable. pub const INVALID: Self = CachedComputePipelineId(usize::MAX); #[inline] pub fn id(&self) -> usize { self.0 } } pub struct CachedPipeline { pub descriptor: PipelineDescriptor, pub state: CachedPipelineState, } /// State of a cached pipeline inserted into a [`PipelineCache`]. #[cfg_attr( not(target_arch = "wasm32"), expect( clippy::large_enum_variant, reason = "See https://github.com/bevyengine/bevy/issues/19220" ) )] #[derive(Debug)] pub enum CachedPipelineState { /// The pipeline GPU object is queued for creation. Queued, /// The pipeline GPU object is being created. Creating(Task<Result<Pipeline, PipelineCacheError>>), /// The pipeline GPU object was created successfully and is available (allocated on the GPU). Ok(Pipeline), /// An error occurred while trying to create the pipeline GPU object. Err(PipelineCacheError), } impl CachedPipelineState { /// Convenience method to "unwrap" a pipeline state into its underlying GPU object. /// /// # Returns /// /// The method returns the allocated pipeline GPU object. /// /// # Panics /// /// This method panics if the pipeline GPU object is not available, either because it is /// pending creation or because an error occurred while attempting to create GPU object. pub fn unwrap(&self) -> &Pipeline { match self { CachedPipelineState::Ok(pipeline) => pipeline, CachedPipelineState::Queued => { panic!("Pipeline has not been compiled yet. It is still in the 'Queued' state.") } CachedPipelineState::Creating(..) => { panic!("Pipeline has not been compiled yet. It is still in the 'Creating' state.") } CachedPipelineState::Err(err) => panic!("{}", err), } } } type LayoutCacheKey = (Vec<BindGroupLayoutId>, Vec<PushConstantRange>); #[derive(Default)] struct LayoutCache { layouts: HashMap<LayoutCacheKey, Arc<WgpuWrapper<PipelineLayout>>>, } impl LayoutCache { fn get( &mut self, render_device: &RenderDevice, bind_group_layouts: &[BindGroupLayout], push_constant_ranges: Vec<PushConstantRange>, ) -> Arc<WgpuWrapper<PipelineLayout>> { let bind_group_ids = bind_group_layouts.iter().map(BindGroupLayout::id).collect(); self.layouts .entry((bind_group_ids, push_constant_ranges)) .or_insert_with_key(|(_, push_constant_ranges)| { let bind_group_layouts = bind_group_layouts .iter() .map(BindGroupLayout::value) .collect::<Vec<_>>(); Arc::new(WgpuWrapper::new(render_device.create_pipeline_layout( &PipelineLayoutDescriptor { bind_group_layouts: &bind_group_layouts, push_constant_ranges, ..default() }, ))) }) .clone() } } #[expect( clippy::result_large_err, reason = "See https://github.com/bevyengine/bevy/issues/19220" )] fn load_module( render_device: &RenderDevice, shader_source: ShaderCacheSource, validate_shader: &ValidateShader, ) -> Result<WgpuWrapper<ShaderModule>, PipelineCacheError> { let shader_source = match shader_source { #[cfg(feature = "shader_format_spirv")] ShaderCacheSource::SpirV(data) => wgpu::util::make_spirv(data), #[cfg(not(feature = "shader_format_spirv"))] ShaderCacheSource::SpirV(_) => { unimplemented!("Enable feature \"shader_format_spirv\" to use SPIR-V shaders") } ShaderCacheSource::Wgsl(src) => ShaderSource::Wgsl(Cow::Owned(src)), #[cfg(not(feature = "decoupled_naga"))] ShaderCacheSource::Naga(src) => ShaderSource::Naga(Cow::Owned(src)), }; let module_descriptor = ShaderModuleDescriptor { label: None, source: shader_source, }; render_device .wgpu_device() .push_error_scope(wgpu::ErrorFilter::Validation); let shader_module = WgpuWrapper::new(match validate_shader { ValidateShader::Enabled => { render_device.create_and_validate_shader_module(module_descriptor) } // SAFETY: we are interfacing with shader code, which may contain undefined behavior, // such as indexing out of bounds. // The checks required are prohibitively expensive and a poor default for game engines. ValidateShader::Disabled => unsafe { render_device.create_shader_module(module_descriptor) }, }); let error = render_device.wgpu_device().pop_error_scope(); // `now_or_never` will return Some if the future is ready and None otherwise. // On native platforms, wgpu will yield the error immediately while on wasm it may take longer since the browser APIs are asynchronous. // So to keep the complexity of the ShaderCache low, we will only catch this error early on native platforms, // and on wasm the error will be handled by wgpu and crash the application. if let Some(Some(wgpu::Error::Validation { description, .. })) = bevy_tasks::futures::now_or_never(error) { return Err(PipelineCacheError::CreateShaderModule(description)); } Ok(shader_module) } #[derive(Default)] struct BindGroupLayoutCache { bgls: HashMap<BindGroupLayoutDescriptor, BindGroupLayout>, } impl BindGroupLayoutCache { fn get( &mut self, render_device: &RenderDevice, descriptor: BindGroupLayoutDescriptor, ) -> BindGroupLayout { self.bgls .entry(descriptor) .or_insert_with_key(|descriptor| { render_device .create_bind_group_layout(descriptor.label.as_ref(), &descriptor.entries) }) .clone() } } /// Cache for render and compute pipelines. /// /// The cache stores existing render and compute pipelines allocated on the GPU, as well as /// pending creation. Pipelines inserted into the cache are identified by a unique ID, which /// can be used to retrieve the actual GPU object once it's ready. The creation of the GPU /// pipeline object is deferred to the [`RenderSystems::Render`] step, just before the render /// graph starts being processed, as this requires access to the GPU. /// /// Note that the cache does not perform automatic deduplication of identical pipelines. It is /// up to the user not to insert the same pipeline twice to avoid wasting GPU resources. /// /// [`RenderSystems::Render`]: crate::RenderSystems::Render #[derive(Resource)] pub struct PipelineCache { layout_cache: Arc<Mutex<LayoutCache>>, bindgroup_layout_cache: Arc<Mutex<BindGroupLayoutCache>>, shader_cache: Arc<Mutex<ShaderCache<WgpuWrapper<ShaderModule>, RenderDevice>>>, device: RenderDevice, pipelines: Vec<CachedPipeline>, waiting_pipelines: HashSet<CachedPipelineId>, new_pipelines: Mutex<Vec<CachedPipeline>>, global_shader_defs: Vec<ShaderDefVal>, /// If `true`, disables asynchronous pipeline compilation. /// This has no effect on macOS, wasm, or without the `multi_threaded` feature. synchronous_pipeline_compilation: bool, } impl PipelineCache { /// Returns an iterator over the pipelines in the pipeline cache. pub fn pipelines(&self) -> impl Iterator<Item = &CachedPipeline> { self.pipelines.iter() } /// Returns a iterator of the IDs of all currently waiting pipelines. pub fn waiting_pipelines(&self) -> impl Iterator<Item = CachedPipelineId> + '_ { self.waiting_pipelines.iter().copied() } /// Create a new pipeline cache associated with the given render device. pub fn new( device: RenderDevice, render_adapter: RenderAdapter, synchronous_pipeline_compilation: bool, ) -> Self { let mut global_shader_defs = Vec::new(); #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] { global_shader_defs.push("NO_ARRAY_TEXTURES_SUPPORT".into()); global_shader_defs.push("NO_CUBE_ARRAY_TEXTURES_SUPPORT".into()); global_shader_defs.push("SIXTEEN_BYTE_ALIGNMENT".into()); } if cfg!(target_abi = "sim") { global_shader_defs.push("NO_CUBE_ARRAY_TEXTURES_SUPPORT".into()); } global_shader_defs.push(ShaderDefVal::UInt( String::from("AVAILABLE_STORAGE_BUFFER_BINDINGS"), device.limits().max_storage_buffers_per_shader_stage, )); Self { shader_cache: Arc::new(Mutex::new(ShaderCache::new( device.features(), render_adapter.get_downlevel_capabilities().flags, load_module, ))), device, layout_cache: default(), bindgroup_layout_cache: default(), waiting_pipelines: default(), new_pipelines: default(), pipelines: default(), global_shader_defs, synchronous_pipeline_compilation, } } /// Get the state of a cached render pipeline. /// /// See [`PipelineCache::queue_render_pipeline()`]. #[inline] pub fn get_render_pipeline_state(&self, id: CachedRenderPipelineId) -> &CachedPipelineState { // If the pipeline id isn't in `pipelines`, it's queued in `new_pipelines` self.pipelines .get(id.0) .map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state) } /// Get the state of a cached compute pipeline. /// /// See [`PipelineCache::queue_compute_pipeline()`]. #[inline] pub fn get_compute_pipeline_state(&self, id: CachedComputePipelineId) -> &CachedPipelineState { // If the pipeline id isn't in `pipelines`, it's queued in `new_pipelines` self.pipelines .get(id.0) .map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state) } /// Get the render pipeline descriptor a cached render pipeline was inserted from. /// /// See [`PipelineCache::queue_render_pipeline()`]. /// /// **Note**: Be careful calling this method. It will panic if called with a pipeline that /// has been queued but has not yet been processed by [`PipelineCache::process_queue()`]. #[inline] pub fn get_render_pipeline_descriptor( &self, id: CachedRenderPipelineId, ) -> &RenderPipelineDescriptor { match &self.pipelines[id.0].descriptor { PipelineDescriptor::RenderPipelineDescriptor(descriptor) => descriptor, PipelineDescriptor::ComputePipelineDescriptor(_) => unreachable!(), } } /// Get the compute pipeline descriptor a cached render pipeline was inserted from. /// /// See [`PipelineCache::queue_compute_pipeline()`]. /// /// **Note**: Be careful calling this method. It will panic if called with a pipeline that /// has been queued but has not yet been processed by [`PipelineCache::process_queue()`]. #[inline] pub fn get_compute_pipeline_descriptor( &self, id: CachedComputePipelineId, ) -> &ComputePipelineDescriptor { match &self.pipelines[id.0].descriptor { PipelineDescriptor::RenderPipelineDescriptor(_) => unreachable!(), PipelineDescriptor::ComputePipelineDescriptor(descriptor) => descriptor, } } /// Try to retrieve a render pipeline GPU object from a cached ID. /// /// # Returns /// /// This method returns a successfully created render pipeline if any, or `None` if the pipeline /// was not created yet or if there was an error during creation. You can check the actual creation /// state with [`PipelineCache::get_render_pipeline_state()`]. #[inline] pub fn get_render_pipeline(&self, id: CachedRenderPipelineId) -> Option<&RenderPipeline> { if let CachedPipelineState::Ok(Pipeline::RenderPipeline(pipeline)) = &self.pipelines.get(id.0)?.state { Some(pipeline) } else { None } } /// Wait for a render pipeline to finish compiling. #[inline] pub fn block_on_render_pipeline(&mut self, id: CachedRenderPipelineId) { if self.pipelines.len() <= id.0 { self.process_queue(); } let state = &mut self.pipelines[id.0].state; if let CachedPipelineState::Creating(task) = state { *state = match bevy_tasks::block_on(task) { Ok(p) => CachedPipelineState::Ok(p), Err(e) => CachedPipelineState::Err(e), }; } } /// Try to retrieve a compute pipeline GPU object from a cached ID. /// /// # Returns /// /// This method returns a successfully created compute pipeline if any, or `None` if the pipeline /// was not created yet or if there was an error during creation. You can check the actual creation /// state with [`PipelineCache::get_compute_pipeline_state()`]. #[inline] pub fn get_compute_pipeline(&self, id: CachedComputePipelineId) -> Option<&ComputePipeline> { if let CachedPipelineState::Ok(Pipeline::ComputePipeline(pipeline)) = &self.pipelines.get(id.0)?.state { Some(pipeline) } else { None } } /// Insert a render pipeline into the cache, and queue its creation. /// /// The pipeline is always inserted and queued for creation. There is no attempt to deduplicate it with /// an already cached pipeline. /// /// # Returns /// /// This method returns the unique render shader ID of the cached pipeline, which can be used to query /// the caching state with [`get_render_pipeline_state()`] and to retrieve the created GPU pipeline once /// it's ready with [`get_render_pipeline()`]. /// /// [`get_render_pipeline_state()`]: PipelineCache::get_render_pipeline_state /// [`get_render_pipeline()`]: PipelineCache::get_render_pipeline pub fn queue_render_pipeline( &self, descriptor: RenderPipelineDescriptor, ) -> CachedRenderPipelineId { let mut new_pipelines = self .new_pipelines .lock() .unwrap_or_else(PoisonError::into_inner); let id = CachedRenderPipelineId(self.pipelines.len() + new_pipelines.len()); new_pipelines.push(CachedPipeline { descriptor: PipelineDescriptor::RenderPipelineDescriptor(Box::new(descriptor)), state: CachedPipelineState::Queued, }); id } /// Insert a compute pipeline into the cache, and queue its creation. /// /// The pipeline is always inserted and queued for creation. There is no attempt to deduplicate it with /// an already cached pipeline. /// /// # Returns /// /// This method returns the unique compute shader ID of the cached pipeline, which can be used to query /// the caching state with [`get_compute_pipeline_state()`] and to retrieve the created GPU pipeline once /// it's ready with [`get_compute_pipeline()`]. /// /// [`get_compute_pipeline_state()`]: PipelineCache::get_compute_pipeline_state /// [`get_compute_pipeline()`]: PipelineCache::get_compute_pipeline pub fn queue_compute_pipeline( &self, descriptor: ComputePipelineDescriptor, ) -> CachedComputePipelineId { let mut new_pipelines = self .new_pipelines .lock() .unwrap_or_else(PoisonError::into_inner); let id = CachedComputePipelineId(self.pipelines.len() + new_pipelines.len()); new_pipelines.push(CachedPipeline { descriptor: PipelineDescriptor::ComputePipelineDescriptor(Box::new(descriptor)), state: CachedPipelineState::Queued, }); id } pub fn get_bind_group_layout( &self, bind_group_layout_descriptor: &BindGroupLayoutDescriptor, ) -> BindGroupLayout { self.bindgroup_layout_cache .lock() .unwrap() .get(&self.device, bind_group_layout_descriptor.clone()) } fn set_shader(&mut self, id: AssetId<Shader>, shader: Shader) { let mut shader_cache = self.shader_cache.lock().unwrap(); let pipelines_to_queue = shader_cache.set_shader(id, shader); for cached_pipeline in pipelines_to_queue { self.pipelines[cached_pipeline].state = CachedPipelineState::Queued; self.waiting_pipelines.insert(cached_pipeline); } } fn remove_shader(&mut self, shader: AssetId<Shader>) { let mut shader_cache = self.shader_cache.lock().unwrap(); let pipelines_to_queue = shader_cache.remove(shader); for cached_pipeline in pipelines_to_queue { self.pipelines[cached_pipeline].state = CachedPipelineState::Queued; self.waiting_pipelines.insert(cached_pipeline); } } fn start_create_render_pipeline( &mut self, id: CachedPipelineId, descriptor: RenderPipelineDescriptor, ) -> CachedPipelineState { let device = self.device.clone(); let shader_cache = self.shader_cache.clone(); let layout_cache = self.layout_cache.clone(); let mut bindgroup_layout_cache = self.bindgroup_layout_cache.lock().unwrap(); let bind_group_layout = descriptor .layout .iter() .map(|bind_group_layout_descriptor| { bindgroup_layout_cache.get(&self.device, bind_group_layout_descriptor.clone()) }) .collect::<Vec<_>>(); create_pipeline_task( async move { let mut shader_cache = shader_cache.lock().unwrap(); let mut layout_cache = layout_cache.lock().unwrap(); let vertex_module = match shader_cache.get( &device, id, descriptor.vertex.shader.id(), &descriptor.vertex.shader_defs, ) { Ok(module) => module, Err(err) => return Err(err), }; let fragment_module = match &descriptor.fragment { Some(fragment) => { match shader_cache.get( &device, id, fragment.shader.id(), &fragment.shader_defs, ) { Ok(module) => Some(module), Err(err) => return Err(err), } } None => None, }; let layout = if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() { None } else { Some(layout_cache.get( &device, &bind_group_layout, descriptor.push_constant_ranges.to_vec(), )) }; drop((shader_cache, layout_cache)); let vertex_buffer_layouts = descriptor .vertex .buffers .iter() .map(|layout| RawVertexBufferLayout { array_stride: layout.array_stride, attributes: &layout.attributes, step_mode: layout.step_mode, }) .collect::<Vec<_>>(); let fragment_data = descriptor.fragment.as_ref().map(|fragment| { ( fragment_module.unwrap(), fragment.entry_point.as_deref(), fragment.targets.as_slice(), ) }); // TODO: Expose the rest of this somehow let compilation_options = PipelineCompilationOptions { constants: &[], zero_initialize_workgroup_memory: descriptor.zero_initialize_workgroup_memory, }; let descriptor = RawRenderPipelineDescriptor { multiview: None, depth_stencil: descriptor.depth_stencil.clone(), label: descriptor.label.as_deref(), layout: layout.as_ref().map(|layout| -> &PipelineLayout { layout }), multisample: descriptor.multisample, primitive: descriptor.primitive, vertex: RawVertexState { buffers: &vertex_buffer_layouts, entry_point: descriptor.vertex.entry_point.as_deref(), module: &vertex_module, // TODO: Should this be the same as the fragment compilation options? compilation_options: compilation_options.clone(), }, fragment: fragment_data .as_ref() .map(|(module, entry_point, targets)| RawFragmentState { entry_point: entry_point.as_deref(), module, targets, // TODO: Should this be the same as the vertex compilation options? compilation_options, }), cache: None, }; Ok(Pipeline::RenderPipeline( device.create_render_pipeline(&descriptor), )) }, self.synchronous_pipeline_compilation, ) } fn start_create_compute_pipeline( &mut self, id: CachedPipelineId, descriptor: ComputePipelineDescriptor, ) -> CachedPipelineState { let device = self.device.clone(); let shader_cache = self.shader_cache.clone(); let layout_cache = self.layout_cache.clone(); let mut bindgroup_layout_cache = self.bindgroup_layout_cache.lock().unwrap(); let bind_group_layout = descriptor .layout .iter() .map(|bind_group_layout_descriptor| { bindgroup_layout_cache.get(&self.device, bind_group_layout_descriptor.clone()) }) .collect::<Vec<_>>(); create_pipeline_task( async move { let mut shader_cache = shader_cache.lock().unwrap(); let mut layout_cache = layout_cache.lock().unwrap(); let compute_module = match shader_cache.get( &device, id, descriptor.shader.id(), &descriptor.shader_defs, ) { Ok(module) => module, Err(err) => return Err(err), }; let layout = if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() { None } else { Some(layout_cache.get( &device, &bind_group_layout, descriptor.push_constant_ranges.to_vec(), )) }; drop((shader_cache, layout_cache)); let descriptor = RawComputePipelineDescriptor { label: descriptor.label.as_deref(), layout: layout.as_ref().map(|layout| -> &PipelineLayout { layout }), module: &compute_module, entry_point: descriptor.entry_point.as_deref(), // TODO: Expose the rest of this somehow compilation_options: PipelineCompilationOptions { constants: &[], zero_initialize_workgroup_memory: descriptor .zero_initialize_workgroup_memory, }, cache: None, }; Ok(Pipeline::ComputePipeline( device.create_compute_pipeline(&descriptor), )) }, self.synchronous_pipeline_compilation, ) } /// Process the pipeline queue and create all pending pipelines if possible. /// /// This is generally called automatically during the [`RenderSystems::Render`] step, but can /// be called manually to force creation at a different time. /// /// [`RenderSystems::Render`]: crate::RenderSystems::Render pub fn process_queue(&mut self) { let mut waiting_pipelines = mem::take(&mut self.waiting_pipelines); let mut pipelines = mem::take(&mut self.pipelines); { let mut new_pipelines = self .new_pipelines .lock() .unwrap_or_else(PoisonError::into_inner); for new_pipeline in new_pipelines.drain(..) { let id = pipelines.len(); pipelines.push(new_pipeline); waiting_pipelines.insert(id); } } for id in waiting_pipelines { self.process_pipeline(&mut pipelines[id], id); } self.pipelines = pipelines; } fn process_pipeline(&mut self, cached_pipeline: &mut CachedPipeline, id: usize) { match &mut cached_pipeline.state { CachedPipelineState::Queued => { cached_pipeline.state = match &cached_pipeline.descriptor { PipelineDescriptor::RenderPipelineDescriptor(descriptor) => { self.start_create_render_pipeline(id, *descriptor.clone()) } PipelineDescriptor::ComputePipelineDescriptor(descriptor) => { self.start_create_compute_pipeline(id, *descriptor.clone()) } }; } CachedPipelineState::Creating(task) => match bevy_tasks::futures::check_ready(task) { Some(Ok(pipeline)) => { cached_pipeline.state = CachedPipelineState::Ok(pipeline); return; } Some(Err(err)) => cached_pipeline.state = CachedPipelineState::Err(err), _ => (), }, CachedPipelineState::Err(err) => match err { // Retry PipelineCacheError::ShaderNotLoaded(_) | PipelineCacheError::ShaderImportNotYetAvailable => { cached_pipeline.state = CachedPipelineState::Queued; } // Shader could not be processed ... retrying won't help PipelineCacheError::ProcessShaderError(err) => { let error_detail = err.emit_to_string(&self.shader_cache.lock().unwrap().composer); if std::env::var("VERBOSE_SHADER_ERROR") .is_ok_and(|v| !(v.is_empty() || v == "0" || v == "false")) { error!("{}", pipeline_error_context(cached_pipeline)); } error!("failed to process shader error:\n{}", error_detail); return; } PipelineCacheError::CreateShaderModule(description) => { error!("failed to create shader module: {}", description); return; } }, CachedPipelineState::Ok(_) => return, } // Retry self.waiting_pipelines.insert(id); } pub(crate) fn process_pipeline_queue_system(mut cache: ResMut<Self>) { cache.process_queue(); } pub(crate) fn extract_shaders( mut cache: ResMut<Self>, shaders: Extract<Res<Assets<Shader>>>, mut events: Extract<MessageReader<AssetEvent<Shader>>>, ) { for event in events.read() { #[expect( clippy::match_same_arms, reason = "LoadedWithDependencies is marked as a TODO, so it's likely this will no longer lint soon." )] match event { // PERF: Instead of blocking waiting for the shader cache lock, try again next frame if the lock is currently held AssetEvent::Added { id } | AssetEvent::Modified { id } => { if let Some(shader) = shaders.get(*id) { let mut shader = shader.clone(); shader.shader_defs.extend(cache.global_shader_defs.clone()); cache.set_shader(*id, shader); } } AssetEvent::Removed { id } => cache.remove_shader(*id), AssetEvent::Unused { .. } => {} AssetEvent::LoadedWithDependencies { .. } => { // TODO: handle this } } } } } fn pipeline_error_context(cached_pipeline: &CachedPipeline) -> String { fn format( shader: &Handle<Shader>, entry: &Option<Cow<'static, str>>, shader_defs: &[ShaderDefVal], ) -> String { let source = match shader.path() { Some(path) => path.path().to_string_lossy().to_string(), None => String::new(), }; let entry = match entry { Some(entry) => entry.to_string(), None => String::new(), }; let shader_defs = shader_defs .iter() .flat_map(|def| match def { ShaderDefVal::Bool(k, v) if *v => Some(k.to_string()), ShaderDefVal::Int(k, v) => Some(format!("{k} = {v}")), ShaderDefVal::UInt(k, v) => Some(format!("{k} = {v}")), _ => None, }) .collect::<Vec<_>>() .join(", "); format!("{source}:{entry}\nshader defs: {shader_defs}") } match &cached_pipeline.descriptor { PipelineDescriptor::RenderPipelineDescriptor(desc) => { let vert = &desc.vertex; let vert_str = format(&vert.shader, &vert.entry_point, &vert.shader_defs); let Some(frag) = desc.fragment.as_ref() else { return vert_str; };
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
true
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/bind_group.rs
crates/bevy_render/src/render_resource/bind_group.rs
use crate::{ define_atomic_id, render_asset::RenderAssets, render_resource::{BindGroupLayout, Buffer, PipelineCache, Sampler, TextureView}, renderer::{RenderDevice, WgpuWrapper}, texture::GpuImage, }; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::system::{SystemParam, SystemParamItem}; use bevy_render::render_resource::BindGroupLayoutDescriptor; pub use bevy_render_macros::AsBindGroup; use core::ops::Deref; use encase::ShaderType; use thiserror::Error; use wgpu::{ BindGroupEntry, BindGroupLayoutEntry, BindingResource, SamplerBindingType, TextureViewDimension, }; use super::{BindlessDescriptor, BindlessSlabResourceLimit}; define_atomic_id!(BindGroupId); /// Bind groups are responsible for binding render resources (e.g. buffers, textures, samplers) /// to a [`TrackedRenderPass`](crate::render_phase::TrackedRenderPass). /// This makes them accessible in the pipeline (shaders) as uniforms. /// /// This is a lightweight thread-safe wrapper around wgpu's own [`BindGroup`](wgpu::BindGroup), /// which can be cloned as needed to workaround lifetime management issues. It may be converted /// from and dereferences to wgpu's [`BindGroup`](wgpu::BindGroup). /// /// Can be created via [`RenderDevice::create_bind_group`](RenderDevice::create_bind_group). #[derive(Clone, Debug)] pub struct BindGroup { id: BindGroupId, value: WgpuWrapper<wgpu::BindGroup>, } impl BindGroup { /// Returns the [`BindGroupId`] representing the unique ID of the bind group. #[inline] pub fn id(&self) -> BindGroupId { self.id } } impl PartialEq for BindGroup { fn eq(&self, other: &Self) -> bool { self.id == other.id } } impl Eq for BindGroup {} impl core::hash::Hash for BindGroup { fn hash<H: core::hash::Hasher>(&self, state: &mut H) { self.id.0.hash(state); } } impl From<wgpu::BindGroup> for BindGroup { fn from(value: wgpu::BindGroup) -> Self { BindGroup { id: BindGroupId::new(), value: WgpuWrapper::new(value), } } } impl<'a> From<&'a BindGroup> for Option<&'a wgpu::BindGroup> { fn from(value: &'a BindGroup) -> Self { Some(value.deref()) } } impl<'a> From<&'a mut BindGroup> for Option<&'a wgpu::BindGroup> { fn from(value: &'a mut BindGroup) -> Self { Some(&*value) } } impl Deref for BindGroup { type Target = wgpu::BindGroup; #[inline] fn deref(&self) -> &Self::Target { &self.value } } /// Converts a value to a [`BindGroup`] with a given [`BindGroupLayout`], which can then be used in Bevy shaders. /// This trait can be derived (and generally should be). Read on for details and examples. /// /// This is an opinionated trait that is intended to make it easy to generically /// convert a type into a [`BindGroup`]. It provides access to specific render resources, /// such as [`RenderAssets<GpuImage>`] and [`crate::texture::FallbackImage`]. If a type has a [`Handle<Image>`](bevy_asset::Handle), /// these can be used to retrieve the corresponding [`Texture`](crate::render_resource::Texture) resource. /// /// [`AsBindGroup::as_bind_group`] is intended to be called once, then the result cached somewhere. It is generally /// ok to do "expensive" work here, such as creating a [`Buffer`] for a uniform. /// /// If for some reason a [`BindGroup`] cannot be created yet (for example, the [`Texture`](crate::render_resource::Texture) /// for an [`Image`](bevy_image::Image) hasn't loaded yet), just return [`AsBindGroupError::RetryNextUpdate`], which signals that the caller /// should retry again later. /// /// # Deriving /// /// This trait can be derived. Field attributes like `uniform` and `texture` are used to define which fields should be bindings, /// what their binding type is, and what index they should be bound at: /// /// ``` /// # use bevy_render::render_resource::*; /// # use bevy_image::Image; /// # use bevy_color::LinearRgba; /// # use bevy_asset::Handle; /// # use bevy_render::storage::ShaderStorageBuffer; /// /// #[derive(AsBindGroup)] /// struct CoolMaterial { /// #[uniform(0)] /// color: LinearRgba, /// #[texture(1)] /// #[sampler(2)] /// color_texture: Handle<Image>, /// #[storage(3, read_only)] /// storage_buffer: Handle<ShaderStorageBuffer>, /// #[storage(4, read_only, buffer)] /// raw_buffer: Buffer, /// #[storage_texture(5)] /// storage_texture: Handle<Image>, /// } /// ``` /// /// In WGSL shaders, the binding would look like this: /// /// ```wgsl /// @group(#{MATERIAL_BIND_GROUP}) @binding(0) var<uniform> color: vec4<f32>; /// @group(#{MATERIAL_BIND_GROUP}) @binding(1) var color_texture: texture_2d<f32>; /// @group(#{MATERIAL_BIND_GROUP}) @binding(2) var color_sampler: sampler; /// @group(#{MATERIAL_BIND_GROUP}) @binding(3) var<storage> storage_buffer: array<f32>; /// @group(#{MATERIAL_BIND_GROUP}) @binding(4) var<storage> raw_buffer: array<f32>; /// @group(#{MATERIAL_BIND_GROUP}) @binding(5) var storage_texture: texture_storage_2d<rgba8unorm, read_write>; /// ``` /// Note that the "group" index is determined by the usage context. It is not defined in [`AsBindGroup`]. For example, in Bevy material bind groups /// are generally bound to group 2. /// /// The following field-level attributes are supported: /// /// ## `uniform(BINDING_INDEX)` /// /// * The field will be converted to a shader-compatible type using the [`ShaderType`] trait, written to a [`Buffer`], and bound as a uniform. /// [`ShaderType`] is implemented for most math types already, such as [`f32`], [`Vec4`](bevy_math::Vec4), and /// [`LinearRgba`](bevy_color::LinearRgba). It can also be derived for custom structs. /// /// ## `texture(BINDING_INDEX, arguments)` /// /// * This field's [`Handle<Image>`](bevy_asset::Handle) will be used to look up the matching [`Texture`](crate::render_resource::Texture) /// GPU resource, which will be bound as a texture in shaders. The field will be assumed to implement [`Into<Option<Handle<Image>>>`]. In practice, /// most fields should be a [`Handle<Image>`](bevy_asset::Handle) or [`Option<Handle<Image>>`]. If the value of an [`Option<Handle<Image>>`] is /// [`None`], the [`crate::texture::FallbackImage`] resource will be used instead. This attribute can be used in conjunction with a `sampler` binding attribute /// (with a different binding index) if a binding of the sampler for the [`Image`](bevy_image::Image) is also required. /// /// | Arguments | Values | Default | /// |-----------------------|-------------------------------------------------------------------------|----------------------| /// | `dimension` = "..." | `"1d"`, `"2d"`, `"2d_array"`, `"3d"`, `"cube"`, `"cube_array"` | `"2d"` | /// | `sample_type` = "..." | `"float"`, `"depth"`, `"s_int"` or `"u_int"` | `"float"` | /// | `filterable` = ... | `true`, `false` | `true` | /// | `multisampled` = ... | `true`, `false` | `false` | /// | `visibility(...)` | `all`, `none`, or a list-combination of `vertex`, `fragment`, `compute` | `vertex`, `fragment` | /// /// ## `storage_texture(BINDING_INDEX, arguments)` /// /// * This field's [`Handle<Image>`](bevy_asset::Handle) will be used to look up the matching [`Texture`](crate::render_resource::Texture) /// GPU resource, which will be bound as a storage texture in shaders. The field will be assumed to implement [`Into<Option<Handle<Image>>>`]. In practice, /// most fields should be a [`Handle<Image>`](bevy_asset::Handle) or [`Option<Handle<Image>>`]. If the value of an [`Option<Handle<Image>>`] is /// [`None`], the [`crate::texture::FallbackImage`] resource will be used instead. /// /// | Arguments | Values | Default | /// |------------------------|--------------------------------------------------------------------------------------------|---------------| /// | `dimension` = "..." | `"1d"`, `"2d"`, `"2d_array"`, `"3d"`, `"cube"`, `"cube_array"` | `"2d"` | /// | `image_format` = ... | any member of [`TextureFormat`](crate::render_resource::TextureFormat) | `Rgba8Unorm` | /// | `access` = ... | any member of [`StorageTextureAccess`](crate::render_resource::StorageTextureAccess) | `ReadWrite` | /// | `visibility(...)` | `all`, `none`, or a list-combination of `vertex`, `fragment`, `compute` | `compute` | /// /// ## `sampler(BINDING_INDEX, arguments)` /// /// * This field's [`Handle<Image>`](bevy_asset::Handle) will be used to look up the matching [`Sampler`] GPU /// resource, which will be bound as a sampler in shaders. The field will be assumed to implement [`Into<Option<Handle<Image>>>`]. In practice, /// most fields should be a [`Handle<Image>`](bevy_asset::Handle) or [`Option<Handle<Image>>`]. If the value of an [`Option<Handle<Image>>`] is /// [`None`], the [`crate::texture::FallbackImage`] resource will be used instead. This attribute can be used in conjunction with a `texture` binding attribute /// (with a different binding index) if a binding of the texture for the [`Image`](bevy_image::Image) is also required. /// /// | Arguments | Values | Default | /// |------------------------|-------------------------------------------------------------------------|------------------------| /// | `sampler_type` = "..." | `"filtering"`, `"non_filtering"`, `"comparison"`. | `"filtering"` | /// | `visibility(...)` | `all`, `none`, or a list-combination of `vertex`, `fragment`, `compute` | `vertex`, `fragment` | /// /// ## `storage(BINDING_INDEX, arguments)` /// /// * The field's [`Handle<Storage>`](bevy_asset::Handle) will be used to look /// up the matching [`Buffer`] GPU resource, which will be bound as a storage /// buffer in shaders. If the `storage` attribute is used, the field is expected /// a raw buffer, and the buffer will be bound as a storage buffer in shaders. /// In bindless mode, `binding_array()` argument that specifies the binding /// number of the resulting storage buffer binding array must be present. /// /// | Arguments | Values | Default | /// |------------------------|-------------------------------------------------------------------------|------------------------| /// | `visibility(...)` | `all`, `none`, or a list-combination of `vertex`, `fragment`, `compute` | `vertex`, `fragment` | /// | `read_only` | if present then value is true, otherwise false | `false` | /// | `buffer` | if present then the field will be assumed to be a raw wgpu buffer | | /// | `binding_array(...)` | the binding number of the binding array, for bindless mode | bindless mode disabled | /// /// Note that fields without field-level binding attributes will be ignored. /// ``` /// # use bevy_render::{render_resource::AsBindGroup}; /// # use bevy_color::LinearRgba; /// # use bevy_asset::Handle; /// #[derive(AsBindGroup)] /// struct CoolMaterial { /// #[uniform(0)] /// color: LinearRgba, /// this_field_is_ignored: String, /// } /// ``` /// /// As mentioned above, [`Option<Handle<Image>>`] is also supported: /// ``` /// # use bevy_asset::Handle; /// # use bevy_color::LinearRgba; /// # use bevy_image::Image; /// # use bevy_render::render_resource::AsBindGroup; /// #[derive(AsBindGroup)] /// struct CoolMaterial { /// #[uniform(0)] /// color: LinearRgba, /// #[texture(1)] /// #[sampler(2)] /// color_texture: Option<Handle<Image>>, /// } /// ``` /// This is useful if you want a texture to be optional. When the value is [`None`], the [`crate::texture::FallbackImage`] will be used for the binding instead, which defaults /// to "pure white". /// /// Field uniforms with the same index will be combined into a single binding: /// ``` /// # use bevy_render::{render_resource::AsBindGroup}; /// # use bevy_color::LinearRgba; /// #[derive(AsBindGroup)] /// struct CoolMaterial { /// #[uniform(0)] /// color: LinearRgba, /// #[uniform(0)] /// roughness: f32, /// } /// ``` /// /// In WGSL shaders, the binding would look like this: /// ```wgsl /// struct CoolMaterial { /// color: vec4<f32>, /// roughness: f32, /// }; /// /// @group(#{MATERIAL_BIND_GROUP}) @binding(0) var<uniform> material: CoolMaterial; /// ``` /// /// Some less common scenarios will require "struct-level" attributes. These are the currently supported struct-level attributes: /// ## `uniform(BINDING_INDEX, ConvertedShaderType)` /// /// * This also creates a [`Buffer`] using [`ShaderType`] and binds it as a /// uniform, much like the field-level `uniform` attribute. The difference is /// that the entire [`AsBindGroup`] value is converted to `ConvertedShaderType`, /// which must implement [`ShaderType`], instead of a specific field /// implementing [`ShaderType`]. This is useful if more complicated conversion /// logic is required, or when using bindless mode (see below). The conversion /// is done using the [`AsBindGroupShaderType<ConvertedShaderType>`] trait, /// which is automatically implemented if `&Self` implements /// [`Into<ConvertedShaderType>`]. Outside of bindless mode, only use /// [`AsBindGroupShaderType`] if access to resources like /// [`RenderAssets<GpuImage>`] is required. /// /// * In bindless mode (see `bindless(COUNT)`), this attribute becomes /// `uniform(BINDLESS_INDEX, ConvertedShaderType, /// binding_array(BINDING_INDEX))`. The resulting uniform buffers will be /// available in the shader as a binding array at the given `BINDING_INDEX`. The /// `BINDLESS_INDEX` specifies the offset of the buffer in the bindless index /// table. /// /// For example, suppose that the material slot is stored in a variable named /// `slot`, the bindless index table is named `material_indices`, and that the /// first field (index 0) of the bindless index table type is named /// `material`. Then specifying `#[uniform(0, StandardMaterialUniform, /// binding_array(10)]` will create a binding array buffer declared in the /// shader as `var<storage> material_array: /// binding_array<StandardMaterialUniform>` and accessible as /// `material_array[material_indices[slot].material]`. /// /// ## `data(BINDING_INDEX, ConvertedShaderType, binding_array(BINDING_INDEX))` /// /// * This is very similar to `uniform(BINDING_INDEX, ConvertedShaderType, /// binding_array(BINDING_INDEX)` and in fact is identical if bindless mode /// isn't being used. The difference is that, in bindless mode, the `data` /// attribute produces a single buffer containing an array, not an array of /// buffers. For example, suppose you had the following declaration: /// /// ```ignore /// #[uniform(0, StandardMaterialUniform, binding_array(10))] /// struct StandardMaterial { ... } /// ``` /// /// In bindless mode, this will produce a binding matching the following WGSL /// declaration: /// /// ```wgsl /// @group(#{MATERIAL_BIND_GROUP}) @binding(10) var<storage> material_array: binding_array<StandardMaterial>; /// ``` /// /// On the other hand, if you write this declaration: /// /// ```ignore /// #[data(0, StandardMaterialUniform, binding_array(10))] /// struct StandardMaterial { ... } /// ``` /// /// Then Bevy produces a binding that matches this WGSL declaration instead: /// /// ```wgsl /// @group(#{MATERIAL_BIND_GROUP}) @binding(10) var<storage> material_array: array<StandardMaterial>; /// ``` /// /// * Just as with the structure-level `uniform` attribute, Bevy converts the /// entire [`AsBindGroup`] to `ConvertedShaderType`, using the /// [`AsBindGroupShaderType<ConvertedShaderType>`] trait. /// /// * In non-bindless mode, the structure-level `data` attribute is the same as /// the structure-level `uniform` attribute and produces a single uniform buffer /// in the shader. The above example would result in a binding that looks like /// this in WGSL in non-bindless mode: /// /// ```wgsl /// @group(#{MATERIAL_BIND_GROUP}) @binding(0) var<uniform> material: StandardMaterial; /// ``` /// /// * For efficiency reasons, `data` is generally preferred over `uniform` /// unless you need to place your data in individual buffers. /// /// ## `bind_group_data(DataType)` /// /// * The [`AsBindGroup`] type will be converted to some `DataType` using [`Into<DataType>`] and stored /// as [`AsBindGroup::Data`] as part of the [`AsBindGroup::as_bind_group`] call. This is useful if data needs to be stored alongside /// the generated bind group, such as a unique identifier for a material's bind group. The most common use case for this attribute /// is "shader pipeline specialization". See [`SpecializedRenderPipeline`](crate::render_resource::SpecializedRenderPipeline). /// /// ## `bindless` /// /// * This switch enables *bindless resources*, which changes the way Bevy /// supplies resources (textures, and samplers) to the shader. When bindless /// resources are enabled, and the current platform supports them, Bevy will /// allocate textures, and samplers into *binding arrays*, separated based on /// type and will supply your shader with indices into those arrays. /// * Bindless textures and samplers are placed into the appropriate global /// array defined in `bevy_render::bindless` (`bindless.wgsl`). /// * Bevy doesn't currently support bindless buffers, except for those created /// with the `uniform(BINDLESS_INDEX, ConvertedShaderType, /// binding_array(BINDING_INDEX))` attribute. If you need to include a buffer in /// your object, and you can't create the data in that buffer with the `uniform` /// attribute, consider a non-bindless object instead. /// * If bindless mode is enabled, the `BINDLESS` definition will be /// available. Because not all platforms support bindless resources, you /// should check for the presence of this definition via `#ifdef` and fall /// back to standard bindings if it isn't present. /// * By default, in bindless mode, binding 0 becomes the *bindless index /// table*, which is an array of structures, each of which contains as many /// fields of type `u32` as the highest binding number in the structure /// annotated with `#[derive(AsBindGroup)]`. Again by default, the *i*th field /// of the bindless index table contains the index of the resource with binding /// *i* within the appropriate binding array. /// * In the case of materials, the index of the applicable table within the /// bindless index table list corresponding to the mesh currently being drawn /// can be retrieved with /// `mesh[in.instance_index].material_and_lightmap_bind_group_slot & 0xffffu`. /// * You can limit the size of the bindless slabs to N resources with the /// `limit(N)` declaration. For example, `#[bindless(limit(16))]` ensures that /// each slab will have no more than 16 total resources in it. If you don't /// specify a limit, Bevy automatically picks a reasonable one for the current /// platform. /// * The `index_table(range(M..N), binding(B))` declaration allows you to /// customize the layout of the bindless index table. This is useful for /// materials that are composed of multiple bind groups, such as /// `ExtendedMaterial`. In such cases, there will be multiple bindless index /// tables, so they can't both be assigned to binding 0 or their bindings will /// conflict. /// - The `binding(B)` attribute of the `index_table` attribute allows you to /// customize the binding (`@binding(B)`, in the shader) at which the index /// table will be bound. /// - The `range(M, N)` attribute of the `index_table` attribute allows you to /// change the mapping from the field index in the bindless index table to the /// bindless index. Instead of the field at index $i$ being mapped to the /// bindless index $i$, with the `range(M, N)` attribute the field at index /// $i$ in the bindless index table is mapped to the bindless index $i$ + M. /// The size of the index table will be set to N - M. Note that this may /// result in the table being too small to contain all the bindless bindings. /// * The purpose of bindless mode is to improve performance by reducing /// state changes. By grouping resources together into binding arrays, Bevy /// doesn't have to modify GPU state as often, decreasing API and driver /// overhead. /// * See the `shaders/shader_material_bindless` example for an example of how /// to use bindless mode. See the `shaders/extended_material_bindless` example /// for a more exotic example of bindless mode that demonstrates the /// `index_table` attribute. /// * The following diagram illustrates how bindless mode works using a subset /// of `StandardMaterial`: /// /// ```text /// Shader Bindings Sampler Binding Array /// +----+-----------------------------+ +-----------+-----------+-----+ /// +---| 0 | material_indices | +->| sampler 0 | sampler 1 | ... | /// | +----+-----------------------------+ | +-----------+-----------+-----+ /// | | 1 | bindless_samplers_filtering +--+ ^ /// | +----+-----------------------------+ +-------------------------------+ /// | | .. | ... | | /// | +----+-----------------------------+ Texture Binding Array | /// | | 5 | bindless_textures_2d +--+ +-----------+-----------+-----+ | /// | +----+-----------------------------+ +->| texture 0 | texture 1 | ... | | /// | | .. | ... | +-----------+-----------+-----+ | /// | +----+-----------------------------+ ^ | /// | + 10 | material_array +--+ +---------------------------+ | /// | +----+-----------------------------+ | | | /// | | Buffer Binding Array | | /// | | +----------+----------+-----+ | | /// | +->| buffer 0 | buffer 1 | ... | | | /// | Material Bindless Indices +----------+----------+-----+ | | /// | +----+-----------------------------+ ^ | | /// +-->| 0 | material +----------+ | | /// +----+-----------------------------+ | | /// | 1 | base_color_texture +---------------------------------------+ | /// +----+-----------------------------+ | /// | 2 | base_color_sampler +-------------------------------------------+ /// +----+-----------------------------+ /// | .. | ... | /// +----+-----------------------------+ /// ``` /// /// The previous `CoolMaterial` example illustrating "combining multiple field-level uniform attributes with the same binding index" can /// also be equivalently represented with a single struct-level uniform attribute: /// ``` /// # use bevy_render::{render_resource::{AsBindGroup, ShaderType}}; /// # use bevy_color::LinearRgba; /// #[derive(AsBindGroup)] /// #[uniform(0, CoolMaterialUniform)] /// struct CoolMaterial { /// color: LinearRgba, /// roughness: f32, /// } /// /// #[derive(ShaderType)] /// struct CoolMaterialUniform { /// color: LinearRgba, /// roughness: f32, /// } /// /// impl From<&CoolMaterial> for CoolMaterialUniform { /// fn from(material: &CoolMaterial) -> CoolMaterialUniform { /// CoolMaterialUniform { /// color: material.color, /// roughness: material.roughness, /// } /// } /// } /// ``` /// /// Setting `bind_group_data` looks like this: /// ``` /// # use bevy_render::{render_resource::AsBindGroup}; /// # use bevy_color::LinearRgba; /// #[derive(AsBindGroup)] /// #[bind_group_data(CoolMaterialKey)] /// struct CoolMaterial { /// #[uniform(0)] /// color: LinearRgba, /// is_shaded: bool, /// } /// /// // Materials keys are intended to be small, cheap to hash, and /// // uniquely identify a specific material permutation. /// #[repr(C)] /// #[derive(Copy, Clone, Hash, Eq, PartialEq)] /// struct CoolMaterialKey { /// is_shaded: bool, /// } /// /// impl From<&CoolMaterial> for CoolMaterialKey { /// fn from(material: &CoolMaterial) -> CoolMaterialKey { /// CoolMaterialKey { /// is_shaded: material.is_shaded, /// } /// } /// } /// ``` pub trait AsBindGroup { /// Data that will be stored alongside the "prepared" bind group. type Data: Send + Sync; type Param: SystemParam + 'static; /// The number of slots per bind group, if bindless mode is enabled. /// /// If this bind group doesn't use bindless, then this will be `None`. /// /// Note that the *actual* slot count may be different from this value, due /// to platform limitations. For example, if bindless resources aren't /// supported on this platform, the actual slot count will be 1. fn bindless_slot_count() -> Option<BindlessSlabResourceLimit> { None } /// True if the hardware *actually* supports bindless textures for this /// type, taking the device and driver capabilities into account. /// /// If this type doesn't use bindless textures, then the return value from /// this function is meaningless. fn bindless_supported(_: &RenderDevice) -> bool { true } /// label fn label() -> &'static str; /// Creates a bind group for `self` matching the layout defined in [`AsBindGroup::bind_group_layout`]. fn as_bind_group( &self, layout_descriptor: &BindGroupLayoutDescriptor, render_device: &RenderDevice, pipeline_cache: &PipelineCache, param: &mut SystemParamItem<'_, '_, Self::Param>, ) -> Result<PreparedBindGroup, AsBindGroupError> { let layout = &pipeline_cache.get_bind_group_layout(layout_descriptor); let UnpreparedBindGroup { bindings } = Self::unprepared_bind_group(self, layout, render_device, param, false)?; let entries = bindings .iter() .map(|(index, binding)| BindGroupEntry { binding: *index, resource: binding.get_binding(), }) .collect::<Vec<_>>(); let bind_group = render_device.create_bind_group(Self::label(), layout, &entries); Ok(PreparedBindGroup { bindings, bind_group, }) } fn bind_group_data(&self) -> Self::Data; /// Returns a vec of (binding index, `OwnedBindingResource`). /// /// In cases where `OwnedBindingResource` is not available (as for bindless /// texture arrays currently), an implementor may return /// `AsBindGroupError::CreateBindGroupDirectly` from this function and /// instead define `as_bind_group` directly. This may prevent certain /// features, such as bindless mode, from working correctly. /// /// Set `force_no_bindless` to true to require that bindless textures *not* /// be used. `ExtendedMaterial` uses this in order to ensure that the base /// material doesn't use bindless mode if the extension doesn't. fn unprepared_bind_group( &self, layout: &BindGroupLayout, render_device: &RenderDevice, param: &mut SystemParamItem<'_, '_, Self::Param>, force_no_bindless: bool, ) -> Result<UnpreparedBindGroup, AsBindGroupError>; /// Creates the bind group layout matching all bind groups returned by /// [`AsBindGroup::as_bind_group`] fn bind_group_layout(render_device: &RenderDevice) -> BindGroupLayout where Self: Sized, { render_device.create_bind_group_layout( Self::label(), &Self::bind_group_layout_entries(render_device, false), ) } /// Creates the bind group layout descriptor matching all bind groups returned by /// [`AsBindGroup::as_bind_group`] /// TODO: we only need `RenderDevice` to determine if bindless is supported fn bind_group_layout_descriptor(render_device: &RenderDevice) -> BindGroupLayoutDescriptor where Self: Sized, { BindGroupLayoutDescriptor { label: Self::label().into(), entries: Self::bind_group_layout_entries(render_device, false), } } /// Returns a vec of bind group layout entries. /// /// Set `force_no_bindless` to true to require that bindless textures *not* /// be used. `ExtendedMaterial` uses this in order to ensure that the base /// material doesn't use bindless mode if the extension doesn't. fn bind_group_layout_entries( render_device: &RenderDevice, force_no_bindless: bool, ) -> Vec<BindGroupLayoutEntry> where Self: Sized; fn bindless_descriptor() -> Option<BindlessDescriptor> { None } } /// An error that occurs during [`AsBindGroup::as_bind_group`] calls. #[derive(Debug, Error)] pub enum AsBindGroupError { /// The bind group could not be generated. Try again next frame. #[error("The bind group could not be generated")] RetryNextUpdate, #[error("Create the bind group via `as_bind_group()` instead")] CreateBindGroupDirectly, #[error("At binding index {0}, the provided image sampler `{1}` does not match the required sampler type(s) `{2}`.")] InvalidSamplerType(u32, String, String), } /// A prepared bind group returned as a result of [`AsBindGroup::as_bind_group`]. pub struct PreparedBindGroup { pub bindings: BindingResources, pub bind_group: BindGroup, } /// a map containing `OwnedBindingResource`s, keyed by the target binding index pub struct UnpreparedBindGroup { pub bindings: BindingResources, } /// A pair of binding index and binding resource, used as part of /// [`PreparedBindGroup`] and [`UnpreparedBindGroup`]. #[derive(Deref, DerefMut)] pub struct BindingResources(pub Vec<(u32, OwnedBindingResource)>); /// An owned binding resource of any type (ex: a [`Buffer`], [`TextureView`], etc). /// This is used by types like [`PreparedBindGroup`] to hold a single list of all /// render resources used by bindings. #[derive(Debug)] pub enum OwnedBindingResource { Buffer(Buffer), TextureView(TextureViewDimension, TextureView), Sampler(SamplerBindingType, Sampler), Data(OwnedData), } /// Data that will be copied into a GPU buffer. /// /// This corresponds to the `#[data]` attribute in `AsBindGroup`. #[derive(Debug, Deref, DerefMut)] pub struct OwnedData(pub Vec<u8>); impl OwnedBindingResource { /// Creates a [`BindingResource`] reference to this /// [`OwnedBindingResource`]. /// /// Note that this operation panics if passed a /// [`OwnedBindingResource::Data`], because [`OwnedData`] doesn't itself /// correspond to any binding and instead requires the /// `MaterialBindGroupAllocator` to pack it into a buffer. pub fn get_binding(&self) -> BindingResource<'_> { match self { OwnedBindingResource::Buffer(buffer) => buffer.as_entire_binding(), OwnedBindingResource::TextureView(_, view) => BindingResource::TextureView(view), OwnedBindingResource::Sampler(_, sampler) => BindingResource::Sampler(sampler), OwnedBindingResource::Data(_) => panic!("`OwnedData` has no binding resource"), } } } /// Converts a value to a [`ShaderType`] for use in a bind group. /// /// This is automatically implemented for references that implement [`Into`]. /// Generally normal [`Into`] / [`From`] impls should be preferred, but /// sometimes additional runtime metadata is required. /// This exists largely to make some [`AsBindGroup`] use cases easier. pub trait AsBindGroupShaderType<T: ShaderType> { /// Return the `T` [`ShaderType`] for `self`. When used in [`AsBindGroup`]
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
true
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/bind_group_layout.rs
crates/bevy_render/src/render_resource/bind_group_layout.rs
use crate::{define_atomic_id, renderer::WgpuWrapper}; use core::ops::Deref; define_atomic_id!(BindGroupLayoutId); /// Bind group layouts define the interface of resources (e.g. buffers, textures, samplers) /// for a shader. The actual resource binding is done via a [`BindGroup`](super::BindGroup). /// /// This is a lightweight thread-safe wrapper around wgpu's own [`BindGroupLayout`](wgpu::BindGroupLayout), /// which can be cloned as needed to workaround lifetime management issues. It may be converted /// from and dereferences to wgpu's [`BindGroupLayout`](wgpu::BindGroupLayout). /// /// Can be created via [`RenderDevice::create_bind_group_layout`](crate::renderer::RenderDevice::create_bind_group_layout). #[derive(Clone, Debug)] pub struct BindGroupLayout { id: BindGroupLayoutId, value: WgpuWrapper<wgpu::BindGroupLayout>, } impl PartialEq for BindGroupLayout { fn eq(&self, other: &Self) -> bool { self.id == other.id } } impl Eq for BindGroupLayout {} impl core::hash::Hash for BindGroupLayout { fn hash<H: core::hash::Hasher>(&self, state: &mut H) { self.id.0.hash(state); } } impl BindGroupLayout { /// Returns the [`BindGroupLayoutId`] representing the unique ID of the bind group layout. #[inline] pub fn id(&self) -> BindGroupLayoutId { self.id } #[inline] pub fn value(&self) -> &wgpu::BindGroupLayout { &self.value } } impl From<wgpu::BindGroupLayout> for BindGroupLayout { fn from(value: wgpu::BindGroupLayout) -> Self { BindGroupLayout { id: BindGroupLayoutId::new(), value: WgpuWrapper::new(value), } } } impl Deref for BindGroupLayout { type Target = wgpu::BindGroupLayout; #[inline] fn deref(&self) -> &Self::Target { &self.value } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/specializer.rs
crates/bevy_render/src/render_resource/specializer.rs
use super::{ CachedComputePipelineId, CachedRenderPipelineId, ComputePipeline, ComputePipelineDescriptor, PipelineCache, RenderPipeline, RenderPipelineDescriptor, }; use bevy_ecs::error::BevyError; use bevy_platform::{ collections::{ hash_map::{Entry, VacantEntry}, HashMap, }, hash::FixedHasher, }; use core::{hash::Hash, marker::PhantomData}; use tracing::error; use variadics_please::all_tuples; pub use bevy_render_macros::{Specializer, SpecializerKey}; /// Defines a type that is able to be "specialized" and cached by creating and transforming /// its descriptor type. This is implemented for [`RenderPipeline`] and [`ComputePipeline`], and /// likely will not have much utility for other types. /// /// See docs on [`Specializer`] for more info. pub trait Specializable { type Descriptor: PartialEq + Clone + Send + Sync; type CachedId: Clone + Send + Sync; fn queue(pipeline_cache: &PipelineCache, descriptor: Self::Descriptor) -> Self::CachedId; fn get_descriptor(pipeline_cache: &PipelineCache, id: Self::CachedId) -> &Self::Descriptor; } impl Specializable for RenderPipeline { type Descriptor = RenderPipelineDescriptor; type CachedId = CachedRenderPipelineId; fn queue(pipeline_cache: &PipelineCache, descriptor: Self::Descriptor) -> Self::CachedId { pipeline_cache.queue_render_pipeline(descriptor) } fn get_descriptor( pipeline_cache: &PipelineCache, id: CachedRenderPipelineId, ) -> &Self::Descriptor { pipeline_cache.get_render_pipeline_descriptor(id) } } impl Specializable for ComputePipeline { type Descriptor = ComputePipelineDescriptor; type CachedId = CachedComputePipelineId; fn queue(pipeline_cache: &PipelineCache, descriptor: Self::Descriptor) -> Self::CachedId { pipeline_cache.queue_compute_pipeline(descriptor) } fn get_descriptor( pipeline_cache: &PipelineCache, id: CachedComputePipelineId, ) -> &Self::Descriptor { pipeline_cache.get_compute_pipeline_descriptor(id) } } /// Defines a type capable of "specializing" values of a type T. /// /// Specialization is the process of generating variants of a type T /// from small hashable keys, and specializers themselves can be /// thought of as [pure functions] from the key type to `T`, that /// [memoize] their results based on the key. /// /// <div class="warning"> /// Because specialization is designed for use with render and compute /// pipelines, specializers act on <i>descriptors</i> of <code>T</code> rather /// than produce <code>T</code> itself, but the above comparison is still valid. /// </div> /// /// Since compiling render and compute pipelines can be so slow, /// specialization allows a Bevy app to detect when it would compile /// a duplicate pipeline and reuse what's already in the cache. While /// pipelines could all be memoized hashing each whole descriptor, this /// would be much slower and could still create duplicates. In contrast, /// memoizing groups of *related* pipelines based on a small hashable /// key is much faster. See the docs on [`SpecializerKey`] for more info. /// /// ## Composing Specializers /// /// This trait can be derived with `#[derive(Specializer)]` for structs whose /// fields all implement [`Specializer`]. This allows for composing multiple /// specializers together, and makes encapsulation and separating concerns /// between specializers much nicer. One could make individual specializers /// for common operations and place them in entirely separate modules, then /// compose them together with a single `#[derive]` /// /// ```rust /// # use bevy_ecs::error::BevyError; /// # use bevy_render::render_resource::Specializer; /// # use bevy_render::render_resource::SpecializerKey; /// # use bevy_render::render_resource::RenderPipeline; /// # use bevy_render::render_resource::RenderPipelineDescriptor; /// struct A; /// struct B; /// #[derive(Copy, Clone, PartialEq, Eq, Hash, SpecializerKey)] /// struct BKey { contrived_number: u32 }; /// /// impl Specializer<RenderPipeline> for A { /// type Key = (); /// /// fn specialize( /// &self, /// key: (), /// descriptor: &mut RenderPipelineDescriptor /// ) -> Result<(), BevyError> { /// # let _ = descriptor; /// // mutate the descriptor here /// Ok(key) /// } /// } /// /// impl Specializer<RenderPipeline> for B { /// type Key = BKey; /// /// fn specialize( /// &self, /// key: BKey, /// descriptor: &mut RenderPipelineDescriptor /// ) -> Result<BKey, BevyError> { /// # let _ = descriptor; /// // mutate the descriptor here /// Ok(key) /// } /// } /// /// #[derive(Specializer)] /// #[specialize(RenderPipeline)] /// struct C { /// #[key(default)] /// a: A, /// b: B, /// } /// /// /* /// The generated implementation: /// impl Specializer<RenderPipeline> for C { /// type Key = BKey; /// fn specialize( /// &self, /// key: Self::Key, /// descriptor: &mut RenderPipelineDescriptor /// ) -> Result<Canonical<Self::Key>, BevyError> { /// let _ = self.a.specialize((), descriptor); /// let key = self.b.specialize(key, descriptor); /// Ok(key) /// } /// } /// */ /// ``` /// /// The key type for a composed specializer will be a tuple of the keys /// of each field, and their specialization logic will be applied in field /// order. Since derive macros can't have generic parameters, the derive macro /// requires an additional `#[specialize(..targets)]` attribute to specify a /// list of types to target for the implementation. `#[specialize(all)]` is /// also allowed, and will generate a fully generic implementation at the cost /// of slightly worse error messages. /// /// Additionally, each field can optionally take a `#[key]` attribute to /// specify a "key override". This will hide that field's key from being /// exposed by the wrapper, and always use the value given by the attribute. /// Values for this attribute may either be `default` which will use the key's /// [`Default`] implementation, or a valid rust expression of the key type. /// /// [pure functions]: https://en.wikipedia.org/wiki/Pure_function /// [memoize]: https://en.wikipedia.org/wiki/Memoization pub trait Specializer<T: Specializable>: Send + Sync + 'static { type Key: SpecializerKey; fn specialize( &self, key: Self::Key, descriptor: &mut T::Descriptor, ) -> Result<Canonical<Self::Key>, BevyError>; } // TODO: update docs for `SpecializerKey` with a more concrete example // once we've migrated mesh layout specialization /// Defines a type that is able to be used as a key for [`Specializer`]s /// /// <div class = "warning"> /// <strong>Most types should implement this trait with the included derive macro.</strong> <br/> /// This generates a "canonical" key type, with <code>IS_CANONICAL = true</code>, and <code>Canonical = Self</code> /// </div> /// /// ## What's a "canonical" key? /// /// The specialization API memoizes pipelines based on the hash of each key, but this /// can still produce duplicates. For example, if one used a list of vertex attributes /// as a key, even if all the same attributes were present they could be in any order. /// In each case, though the keys would be "different" they would produce the same /// pipeline. /// /// To address this, during specialization keys are processed into a [canonical] /// (or "standard") form that represents the actual descriptor that was produced. /// In the previous example, that would be the final `VertexBufferLayout` contained /// by the pipeline descriptor. This new key is used by [`Variants`] to /// perform additional checks for duplicates, but only if required. If a key is /// canonical from the start, then there's no need. /// /// For implementors: the main property of a canonical key is that if two keys hash /// differently, they should nearly always produce different descriptors. /// /// [canonical]: https://en.wikipedia.org/wiki/Canonicalization pub trait SpecializerKey: Clone + Hash + Eq { /// Denotes whether this key is canonical or not. This should only be `true` /// if and only if `Canonical = Self`. const IS_CANONICAL: bool; /// The canonical key type to convert this into during specialization. type Canonical: Hash + Eq; } pub type Canonical<T> = <T as SpecializerKey>::Canonical; impl<T: Specializable> Specializer<T> for () { type Key = (); fn specialize( &self, _key: Self::Key, _descriptor: &mut T::Descriptor, ) -> Result<(), BevyError> { Ok(()) } } impl<T: Specializable, V: Send + Sync + 'static> Specializer<T> for PhantomData<V> { type Key = (); fn specialize( &self, _key: Self::Key, _descriptor: &mut T::Descriptor, ) -> Result<(), BevyError> { Ok(()) } } macro_rules! impl_specialization_key_tuple { ($(#[$meta:meta])* $($T:ident),*) => { $(#[$meta])* impl <$($T: SpecializerKey),*> SpecializerKey for ($($T,)*) { const IS_CANONICAL: bool = true $(&& <$T as SpecializerKey>::IS_CANONICAL)*; type Canonical = ($(Canonical<$T>,)*); } }; } all_tuples!( #[doc(fake_variadic)] impl_specialization_key_tuple, 0, 12, T ); /// A cache for variants of a resource type created by a specializer. /// At most one resource will be created for each key. pub struct Variants<T: Specializable, S: Specializer<T>> { specializer: S, base_descriptor: T::Descriptor, primary_cache: HashMap<S::Key, T::CachedId>, secondary_cache: HashMap<Canonical<S::Key>, T::CachedId>, } impl<T: Specializable, S: Specializer<T>> Variants<T, S> { /// Creates a new [`Variants`] from a [`Specializer`] and a base descriptor. #[inline] pub fn new(specializer: S, base_descriptor: T::Descriptor) -> Self { Self { specializer, base_descriptor, primary_cache: Default::default(), secondary_cache: Default::default(), } } /// Specializes a resource given the [`Specializer`]'s key type. #[inline] pub fn specialize( &mut self, pipeline_cache: &PipelineCache, key: S::Key, ) -> Result<T::CachedId, BevyError> { let entry = self.primary_cache.entry(key.clone()); match entry { Entry::Occupied(entry) => Ok(entry.get().clone()), Entry::Vacant(entry) => Self::specialize_slow( &self.specializer, self.base_descriptor.clone(), pipeline_cache, key, entry, &mut self.secondary_cache, ), } } #[cold] fn specialize_slow( specializer: &S, base_descriptor: T::Descriptor, pipeline_cache: &PipelineCache, key: S::Key, primary_entry: VacantEntry<S::Key, T::CachedId, FixedHasher>, secondary_cache: &mut HashMap<Canonical<S::Key>, T::CachedId>, ) -> Result<T::CachedId, BevyError> { let mut descriptor = base_descriptor.clone(); let canonical_key = specializer.specialize(key.clone(), &mut descriptor)?; // if the whole key is canonical, the secondary cache isn't needed. if <S::Key as SpecializerKey>::IS_CANONICAL { return Ok(primary_entry .insert(<T as Specializable>::queue(pipeline_cache, descriptor)) .clone()); } let id = match secondary_cache.entry(canonical_key) { Entry::Occupied(entry) => { if cfg!(debug_assertions) { let stored_descriptor = <T as Specializable>::get_descriptor(pipeline_cache, entry.get().clone()); if &descriptor != stored_descriptor { error!( "Invalid Specializer<{}> impl for {}: the cached descriptor \ is not equal to the generated descriptor for the given key. \ This means the Specializer implementation uses unused information \ from the key to specialize the pipeline. This is not allowed \ because it would invalidate the cache.", core::any::type_name::<T>(), core::any::type_name::<S>() ); } } entry.into_mut().clone() } Entry::Vacant(entry) => entry .insert(<T as Specializable>::queue(pipeline_cache, descriptor)) .clone(), }; primary_entry.insert(id.clone()); Ok(id) } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/storage_buffer.rs
crates/bevy_render/src/render_resource/storage_buffer.rs
use core::marker::PhantomData; use super::Buffer; use crate::renderer::{RenderDevice, RenderQueue}; use encase::{ internal::WriteInto, DynamicStorageBuffer as DynamicStorageBufferWrapper, ShaderType, StorageBuffer as StorageBufferWrapper, }; use wgpu::{util::BufferInitDescriptor, BindingResource, BufferBinding, BufferSize, BufferUsages}; use super::IntoBinding; /// Stores data to be transferred to the GPU and made accessible to shaders as a storage buffer. /// /// Storage buffers can be made available to shaders in some combination of read/write mode, and can store large amounts of data. /// Note however that WebGL2 does not support storage buffers, so consider alternative options in this case. /// /// Storage buffers can store runtime-sized arrays, but only if they are the last field in a structure. /// /// The contained data is stored in system RAM. [`write_buffer`](StorageBuffer::write_buffer) queues /// copying of the data from system RAM to VRAM. Storage buffers must conform to [std430 alignment/padding requirements], which /// is automatically enforced by this structure. /// /// Other options for storing GPU-accessible data are: /// * [`BufferVec`](crate::render_resource::BufferVec) /// * [`DynamicStorageBuffer`] /// * [`DynamicUniformBuffer`](crate::render_resource::DynamicUniformBuffer) /// * [`GpuArrayBuffer`](crate::render_resource::GpuArrayBuffer) /// * [`RawBufferVec`](crate::render_resource::RawBufferVec) /// * [`Texture`](crate::render_resource::Texture) /// * [`UniformBuffer`](crate::render_resource::UniformBuffer) /// /// [std430 alignment/padding requirements]: https://www.w3.org/TR/WGSL/#address-spaces-storage pub struct StorageBuffer<T: ShaderType> { value: T, scratch: StorageBufferWrapper<Vec<u8>>, buffer: Option<Buffer>, label: Option<String>, changed: bool, buffer_usage: BufferUsages, last_written_size: Option<BufferSize>, } impl<T: ShaderType> From<T> for StorageBuffer<T> { fn from(value: T) -> Self { Self { value, scratch: StorageBufferWrapper::new(Vec::new()), buffer: None, label: None, changed: false, buffer_usage: BufferUsages::COPY_DST | BufferUsages::STORAGE, last_written_size: None, } } } impl<T: ShaderType + Default> Default for StorageBuffer<T> { fn default() -> Self { Self { value: T::default(), scratch: StorageBufferWrapper::new(Vec::new()), buffer: None, label: None, changed: false, buffer_usage: BufferUsages::COPY_DST | BufferUsages::STORAGE, last_written_size: None, } } } impl<T: ShaderType + WriteInto> StorageBuffer<T> { #[inline] pub fn buffer(&self) -> Option<&Buffer> { self.buffer.as_ref() } #[inline] pub fn binding(&self) -> Option<BindingResource<'_>> { Some(BindingResource::Buffer(BufferBinding { buffer: self.buffer()?, offset: 0, size: self.last_written_size, })) } pub fn set(&mut self, value: T) { self.value = value; } pub fn get(&self) -> &T { &self.value } pub fn get_mut(&mut self) -> &mut T { &mut self.value } pub fn set_label(&mut self, label: Option<&str>) { let label = label.map(str::to_string); if label != self.label { self.changed = true; } self.label = label; } pub fn get_label(&self) -> Option<&str> { self.label.as_deref() } /// Add more [`BufferUsages`] to the buffer. /// /// This method only allows addition of flags to the default usage flags. /// /// The default values for buffer usage are `BufferUsages::COPY_DST` and `BufferUsages::STORAGE`. pub fn add_usages(&mut self, usage: BufferUsages) { self.buffer_usage |= usage; self.changed = true; } /// Queues writing of data from system RAM to VRAM using the [`RenderDevice`] /// and the provided [`RenderQueue`]. /// /// If there is no GPU-side buffer allocated to hold the data currently stored, or if a GPU-side buffer previously /// allocated does not have enough capacity, a new GPU-side buffer is created. pub fn write_buffer(&mut self, device: &RenderDevice, queue: &RenderQueue) { self.scratch.write(&self.value).unwrap(); let capacity = self.buffer.as_deref().map(wgpu::Buffer::size).unwrap_or(0); let size = self.scratch.as_ref().len() as u64; if capacity < size || self.changed { self.buffer = Some(device.create_buffer_with_data(&BufferInitDescriptor { label: self.label.as_deref(), usage: self.buffer_usage, contents: self.scratch.as_ref(), })); self.changed = false; } else if let Some(buffer) = &self.buffer { queue.write_buffer(buffer, 0, self.scratch.as_ref()); } self.last_written_size = BufferSize::new(size); } } impl<'a, T: ShaderType + WriteInto> IntoBinding<'a> for &'a StorageBuffer<T> { #[inline] fn into_binding(self) -> BindingResource<'a> { self.binding().expect("Failed to get buffer") } } /// Stores data to be transferred to the GPU and made accessible to shaders as a dynamic storage buffer. /// /// This is just a [`StorageBuffer`], but also allows you to set dynamic offsets. /// /// Dynamic storage buffers can be made available to shaders in some combination of read/write mode, and can store large amounts /// of data. Note however that WebGL2 does not support storage buffers, so consider alternative options in this case. Dynamic /// storage buffers support multiple separate bindings at dynamic byte offsets and so have a /// [`push`](DynamicStorageBuffer::push) method. /// /// The contained data is stored in system RAM. [`write_buffer`](DynamicStorageBuffer::write_buffer) /// queues copying of the data from system RAM to VRAM. The data within a storage buffer binding must conform to /// [std430 alignment/padding requirements]. `DynamicStorageBuffer` takes care of serializing the inner type to conform to /// these requirements. Each item [`push`](DynamicStorageBuffer::push)ed into this structure /// will additionally be aligned to meet dynamic offset alignment requirements. /// /// Other options for storing GPU-accessible data are: /// * [`BufferVec`](crate::render_resource::BufferVec) /// * [`DynamicUniformBuffer`](crate::render_resource::DynamicUniformBuffer) /// * [`GpuArrayBuffer`](crate::render_resource::GpuArrayBuffer) /// * [`RawBufferVec`](crate::render_resource::RawBufferVec) /// * [`StorageBuffer`] /// * [`Texture`](crate::render_resource::Texture) /// * [`UniformBuffer`](crate::render_resource::UniformBuffer) /// /// [std430 alignment/padding requirements]: https://www.w3.org/TR/WGSL/#address-spaces-storage pub struct DynamicStorageBuffer<T: ShaderType> { scratch: DynamicStorageBufferWrapper<Vec<u8>>, buffer: Option<Buffer>, label: Option<String>, changed: bool, buffer_usage: BufferUsages, last_written_size: Option<BufferSize>, _marker: PhantomData<fn() -> T>, } impl<T: ShaderType> Default for DynamicStorageBuffer<T> { fn default() -> Self { Self { scratch: DynamicStorageBufferWrapper::new(Vec::new()), buffer: None, label: None, changed: false, buffer_usage: BufferUsages::COPY_DST | BufferUsages::STORAGE, last_written_size: None, _marker: PhantomData, } } } impl<T: ShaderType + WriteInto> DynamicStorageBuffer<T> { #[inline] pub fn buffer(&self) -> Option<&Buffer> { self.buffer.as_ref() } #[inline] pub fn binding(&self) -> Option<BindingResource<'_>> { Some(BindingResource::Buffer(BufferBinding { buffer: self.buffer()?, offset: 0, size: self.last_written_size, })) } #[inline] pub fn is_empty(&self) -> bool { self.scratch.as_ref().is_empty() } #[inline] pub fn push(&mut self, value: T) -> u32 { self.scratch.write(&value).unwrap() as u32 } pub fn set_label(&mut self, label: Option<&str>) { let label = label.map(str::to_string); if label != self.label { self.changed = true; } self.label = label; } pub fn get_label(&self) -> Option<&str> { self.label.as_deref() } /// Add more [`BufferUsages`] to the buffer. /// /// This method only allows addition of flags to the default usage flags. /// /// The default values for buffer usage are `BufferUsages::COPY_DST` and `BufferUsages::STORAGE`. pub fn add_usages(&mut self, usage: BufferUsages) { self.buffer_usage |= usage; self.changed = true; } #[inline] pub fn write_buffer(&mut self, device: &RenderDevice, queue: &RenderQueue) { let capacity = self.buffer.as_deref().map(wgpu::Buffer::size).unwrap_or(0); let size = self.scratch.as_ref().len() as u64; if capacity < size || (self.changed && size > 0) { self.buffer = Some(device.create_buffer_with_data(&BufferInitDescriptor { label: self.label.as_deref(), usage: self.buffer_usage, contents: self.scratch.as_ref(), })); self.changed = false; } else if let Some(buffer) = &self.buffer { queue.write_buffer(buffer, 0, self.scratch.as_ref()); } self.last_written_size = BufferSize::new(size); } #[inline] pub fn clear(&mut self) { self.scratch.as_mut().clear(); self.scratch.set_offset(0); } } impl<'a, T: ShaderType + WriteInto> IntoBinding<'a> for &'a DynamicStorageBuffer<T> { #[inline] fn into_binding(self) -> BindingResource<'a> { self.binding().expect("Failed to get buffer") } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/texture.rs
crates/bevy_render/src/render_resource/texture.rs
use crate::define_atomic_id; use crate::renderer::WgpuWrapper; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::resource::Resource; use core::ops::Deref; define_atomic_id!(TextureId); /// A GPU-accessible texture. /// /// May be converted from and dereferences to a wgpu [`Texture`](wgpu::Texture). /// Can be created via [`RenderDevice::create_texture`](crate::renderer::RenderDevice::create_texture). /// /// Other options for storing GPU-accessible data are: /// * [`BufferVec`](crate::render_resource::BufferVec) /// * [`DynamicStorageBuffer`](crate::render_resource::DynamicStorageBuffer) /// * [`DynamicUniformBuffer`](crate::render_resource::DynamicUniformBuffer) /// * [`GpuArrayBuffer`](crate::render_resource::GpuArrayBuffer) /// * [`RawBufferVec`](crate::render_resource::RawBufferVec) /// * [`StorageBuffer`](crate::render_resource::StorageBuffer) /// * [`UniformBuffer`](crate::render_resource::UniformBuffer) #[derive(Clone, Debug)] pub struct Texture { id: TextureId, value: WgpuWrapper<wgpu::Texture>, } impl Texture { /// Returns the [`TextureId`]. #[inline] pub fn id(&self) -> TextureId { self.id } /// Creates a view of this texture. pub fn create_view(&self, desc: &wgpu::TextureViewDescriptor) -> TextureView { TextureView::from(self.value.create_view(desc)) } } impl From<wgpu::Texture> for Texture { fn from(value: wgpu::Texture) -> Self { Texture { id: TextureId::new(), value: WgpuWrapper::new(value), } } } impl Deref for Texture { type Target = wgpu::Texture; #[inline] fn deref(&self) -> &Self::Target { &self.value } } define_atomic_id!(TextureViewId); /// Describes a [`Texture`] with its associated metadata required by a pipeline or [`BindGroup`](super::BindGroup). #[derive(Clone, Debug)] pub struct TextureView { id: TextureViewId, value: WgpuWrapper<wgpu::TextureView>, } pub struct SurfaceTexture { value: WgpuWrapper<wgpu::SurfaceTexture>, } impl SurfaceTexture { pub fn present(self) { self.value.into_inner().present(); } } impl TextureView { /// Returns the [`TextureViewId`]. #[inline] pub fn id(&self) -> TextureViewId { self.id } } impl From<wgpu::TextureView> for TextureView { fn from(value: wgpu::TextureView) -> Self { TextureView { id: TextureViewId::new(), value: WgpuWrapper::new(value), } } } impl From<wgpu::SurfaceTexture> for SurfaceTexture { fn from(value: wgpu::SurfaceTexture) -> Self { SurfaceTexture { value: WgpuWrapper::new(value), } } } impl Deref for TextureView { type Target = wgpu::TextureView; #[inline] fn deref(&self) -> &Self::Target { &self.value } } impl Deref for SurfaceTexture { type Target = wgpu::SurfaceTexture; #[inline] fn deref(&self) -> &Self::Target { &self.value } } define_atomic_id!(SamplerId); /// A Sampler defines how a pipeline will sample from a [`TextureView`]. /// They define image filters (including anisotropy) and address (wrapping) modes, among other things. /// /// May be converted from and dereferences to a wgpu [`Sampler`](wgpu::Sampler). /// Can be created via [`RenderDevice::create_sampler`](crate::renderer::RenderDevice::create_sampler). #[derive(Clone, Debug)] pub struct Sampler { id: SamplerId, value: WgpuWrapper<wgpu::Sampler>, } impl Sampler { /// Returns the [`SamplerId`]. #[inline] pub fn id(&self) -> SamplerId { self.id } } impl From<wgpu::Sampler> for Sampler { fn from(value: wgpu::Sampler) -> Self { Sampler { id: SamplerId::new(), value: WgpuWrapper::new(value), } } } impl Deref for Sampler { type Target = wgpu::Sampler; #[inline] fn deref(&self) -> &Self::Target { &self.value } } /// A rendering resource for the default image sampler which is set during renderer /// initialization. /// /// The [`ImagePlugin`](bevy_image::ImagePlugin) can be set during app initialization to change the default /// image sampler. #[derive(Resource, Debug, Clone, Deref, DerefMut)] pub struct DefaultImageSampler(pub(crate) Sampler);
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/uniform_buffer.rs
crates/bevy_render/src/render_resource/uniform_buffer.rs
use core::{marker::PhantomData, num::NonZero}; use crate::{ render_resource::Buffer, renderer::{RenderDevice, RenderQueue}, }; use encase::{ internal::{AlignmentValue, BufferMut, WriteInto}, DynamicUniformBuffer as DynamicUniformBufferWrapper, ShaderType, UniformBuffer as UniformBufferWrapper, }; use wgpu::{ util::BufferInitDescriptor, BindingResource, BufferBinding, BufferDescriptor, BufferUsages, }; use super::IntoBinding; /// Stores data to be transferred to the GPU and made accessible to shaders as a uniform buffer. /// /// Uniform buffers are available to shaders on a read-only basis. Uniform buffers are commonly used to make available to shaders /// parameters that are constant during shader execution, and are best used for data that is relatively small in size as they are /// only guaranteed to support up to 16kB per binding. /// /// The contained data is stored in system RAM. [`write_buffer`](UniformBuffer::write_buffer) queues /// copying of the data from system RAM to VRAM. Data in uniform buffers must follow [std140 alignment/padding requirements], /// which is automatically enforced by this structure. Per the WGPU spec, uniform buffers cannot store runtime-sized array /// (vectors), or structures with fields that are vectors. /// /// Other options for storing GPU-accessible data are: /// * [`BufferVec`](crate::render_resource::BufferVec) /// * [`DynamicStorageBuffer`](crate::render_resource::DynamicStorageBuffer) /// * [`DynamicUniformBuffer`] /// * [`GpuArrayBuffer`](crate::render_resource::GpuArrayBuffer) /// * [`RawBufferVec`](crate::render_resource::RawBufferVec) /// * [`StorageBuffer`](crate::render_resource::StorageBuffer) /// * [`Texture`](crate::render_resource::Texture) /// /// [std140 alignment/padding requirements]: https://www.w3.org/TR/WGSL/#address-spaces-uniform pub struct UniformBuffer<T: ShaderType> { value: T, scratch: UniformBufferWrapper<Vec<u8>>, buffer: Option<Buffer>, label: Option<String>, changed: bool, buffer_usage: BufferUsages, } impl<T: ShaderType> From<T> for UniformBuffer<T> { fn from(value: T) -> Self { Self { value, scratch: UniformBufferWrapper::new(Vec::new()), buffer: None, label: None, changed: false, buffer_usage: BufferUsages::COPY_DST | BufferUsages::UNIFORM, } } } impl<T: ShaderType + Default> Default for UniformBuffer<T> { fn default() -> Self { Self { value: T::default(), scratch: UniformBufferWrapper::new(Vec::new()), buffer: None, label: None, changed: false, buffer_usage: BufferUsages::COPY_DST | BufferUsages::UNIFORM, } } } impl<T: ShaderType + WriteInto> UniformBuffer<T> { #[inline] pub fn buffer(&self) -> Option<&Buffer> { self.buffer.as_ref() } #[inline] pub fn binding(&self) -> Option<BindingResource<'_>> { Some(BindingResource::Buffer( self.buffer()?.as_entire_buffer_binding(), )) } /// Set the data the buffer stores. pub fn set(&mut self, value: T) { self.value = value; } pub fn get(&self) -> &T { &self.value } pub fn get_mut(&mut self) -> &mut T { &mut self.value } pub fn set_label(&mut self, label: Option<&str>) { let label = label.map(str::to_string); if label != self.label { self.changed = true; } self.label = label; } pub fn get_label(&self) -> Option<&str> { self.label.as_deref() } /// Add more [`BufferUsages`] to the buffer. /// /// This method only allows addition of flags to the default usage flags. /// /// The default values for buffer usage are `BufferUsages::COPY_DST` and `BufferUsages::UNIFORM`. pub fn add_usages(&mut self, usage: BufferUsages) { self.buffer_usage |= usage; self.changed = true; } /// Queues writing of data from system RAM to VRAM using the [`RenderDevice`] /// and the provided [`RenderQueue`], if a GPU-side backing buffer already exists. /// /// If a GPU-side buffer does not already exist for this data, such a buffer is initialized with currently /// available data. pub fn write_buffer(&mut self, device: &RenderDevice, queue: &RenderQueue) { self.scratch.write(&self.value).unwrap(); if self.changed || self.buffer.is_none() { self.buffer = Some(device.create_buffer_with_data(&BufferInitDescriptor { label: self.label.as_deref(), usage: self.buffer_usage, contents: self.scratch.as_ref(), })); self.changed = false; } else if let Some(buffer) = &self.buffer { queue.write_buffer(buffer, 0, self.scratch.as_ref()); } } } impl<'a, T: ShaderType + WriteInto> IntoBinding<'a> for &'a UniformBuffer<T> { #[inline] fn into_binding(self) -> BindingResource<'a> { self.buffer() .expect("Failed to get buffer") .as_entire_buffer_binding() .into_binding() } } /// Stores data to be transferred to the GPU and made accessible to shaders as a dynamic uniform buffer. /// /// Dynamic uniform buffers are available to shaders on a read-only basis. Dynamic uniform buffers are commonly used to make /// available to shaders runtime-sized arrays of parameters that are otherwise constant during shader execution, and are best /// suited to data that is relatively small in size as they are only guaranteed to support up to 16kB per binding. /// /// The contained data is stored in system RAM. [`write_buffer`](DynamicUniformBuffer::write_buffer) queues /// copying of the data from system RAM to VRAM. Data in uniform buffers must follow [std140 alignment/padding requirements], /// which is automatically enforced by this structure. Per the WGPU spec, uniform buffers cannot store runtime-sized array /// (vectors), or structures with fields that are vectors. /// /// Other options for storing GPU-accessible data are: /// * [`BufferVec`](crate::render_resource::BufferVec) /// * [`DynamicStorageBuffer`](crate::render_resource::DynamicStorageBuffer) /// * [`GpuArrayBuffer`](crate::render_resource::GpuArrayBuffer) /// * [`RawBufferVec`](crate::render_resource::RawBufferVec) /// * [`StorageBuffer`](crate::render_resource::StorageBuffer) /// * [`Texture`](crate::render_resource::Texture) /// * [`UniformBuffer`] /// /// [std140 alignment/padding requirements]: https://www.w3.org/TR/WGSL/#address-spaces-uniform pub struct DynamicUniformBuffer<T: ShaderType> { scratch: DynamicUniformBufferWrapper<Vec<u8>>, buffer: Option<Buffer>, label: Option<String>, changed: bool, buffer_usage: BufferUsages, _marker: PhantomData<fn() -> T>, } impl<T: ShaderType> Default for DynamicUniformBuffer<T> { fn default() -> Self { Self { scratch: DynamicUniformBufferWrapper::new(Vec::new()), buffer: None, label: None, changed: false, buffer_usage: BufferUsages::COPY_DST | BufferUsages::UNIFORM, _marker: PhantomData, } } } impl<T: ShaderType + WriteInto> DynamicUniformBuffer<T> { pub fn new_with_alignment(alignment: u64) -> Self { Self { scratch: DynamicUniformBufferWrapper::new_with_alignment(Vec::new(), alignment), buffer: None, label: None, changed: false, buffer_usage: BufferUsages::COPY_DST | BufferUsages::UNIFORM, _marker: PhantomData, } } #[inline] pub fn buffer(&self) -> Option<&Buffer> { self.buffer.as_ref() } #[inline] pub fn binding(&self) -> Option<BindingResource<'_>> { Some(BindingResource::Buffer(BufferBinding { buffer: self.buffer()?, offset: 0, size: Some(T::min_size()), })) } #[inline] pub fn is_empty(&self) -> bool { self.scratch.as_ref().is_empty() } /// Push data into the `DynamicUniformBuffer`'s internal vector (residing on system RAM). #[inline] pub fn push(&mut self, value: &T) -> u32 { self.scratch.write(value).unwrap() as u32 } pub fn set_label(&mut self, label: Option<&str>) { let label = label.map(str::to_string); if label != self.label { self.changed = true; } self.label = label; } pub fn get_label(&self) -> Option<&str> { self.label.as_deref() } /// Add more [`BufferUsages`] to the buffer. /// /// This method only allows addition of flags to the default usage flags. /// /// The default values for buffer usage are `BufferUsages::COPY_DST` and `BufferUsages::UNIFORM`. pub fn add_usages(&mut self, usage: BufferUsages) { self.buffer_usage |= usage; self.changed = true; } /// Creates a writer that can be used to directly write elements into the target buffer. /// /// This method uses less memory and performs fewer memory copies using over [`push`] and [`write_buffer`]. /// /// `max_count` *must* be greater than or equal to the number of elements that are to be written to the buffer, or /// the writer will panic while writing. Dropping the writer will schedule the buffer write into the provided /// [`RenderQueue`]. /// /// If there is no GPU-side buffer allocated to hold the data currently stored, or if a GPU-side buffer previously /// allocated does not have enough capacity to hold `max_count` elements, a new GPU-side buffer is created. /// /// Returns `None` if there is no allocated GPU-side buffer, and `max_count` is 0. /// /// [`push`]: Self::push /// [`write_buffer`]: Self::write_buffer #[inline] pub fn get_writer<'a>( &'a mut self, max_count: usize, device: &RenderDevice, queue: &'a RenderQueue, ) -> Option<DynamicUniformBufferWriter<T>> { let alignment = if cfg!(target_abi = "sim") { // On iOS simulator on silicon macs, metal validation check that the host OS alignment // is respected, but the device reports the correct value for iOS, which is smaller. // Use the larger value. // See https://github.com/gfx-rs/wgpu/issues/7057 - remove if it's not needed anymore. AlignmentValue::new(256) } else { AlignmentValue::new(device.limits().min_uniform_buffer_offset_alignment as u64) }; let mut capacity = self.buffer.as_deref().map(wgpu::Buffer::size).unwrap_or(0); let size = alignment .round_up(T::min_size().get()) .checked_mul(max_count as u64) .unwrap(); if capacity < size || (self.changed && size > 0) { let buffer = device.create_buffer(&BufferDescriptor { label: self.label.as_deref(), usage: self.buffer_usage, size, mapped_at_creation: false, }); capacity = buffer.size(); self.buffer = Some(buffer); self.changed = false; } if let Some(buffer) = self.buffer.as_deref() { let buffer_view = queue .write_buffer_with(buffer, 0, NonZero::<u64>::new(buffer.size())?) .unwrap(); Some(DynamicUniformBufferWriter { buffer: encase::DynamicUniformBuffer::new_with_alignment( QueueWriteBufferViewWrapper { capacity: capacity as usize, buffer_view, }, alignment.get(), ), _marker: PhantomData, }) } else { None } } /// Queues writing of data from system RAM to VRAM using the [`RenderDevice`] /// and the provided [`RenderQueue`]. /// /// If there is no GPU-side buffer allocated to hold the data currently stored, or if a GPU-side buffer previously /// allocated does not have enough capacity, a new GPU-side buffer is created. #[inline] pub fn write_buffer(&mut self, device: &RenderDevice, queue: &RenderQueue) { let capacity = self.buffer.as_deref().map(wgpu::Buffer::size).unwrap_or(0); let size = self.scratch.as_ref().len() as u64; if capacity < size || (self.changed && size > 0) { self.buffer = Some(device.create_buffer_with_data(&BufferInitDescriptor { label: self.label.as_deref(), usage: self.buffer_usage, contents: self.scratch.as_ref(), })); self.changed = false; } else if let Some(buffer) = &self.buffer { queue.write_buffer(buffer, 0, self.scratch.as_ref()); } } #[inline] pub fn clear(&mut self) { self.scratch.as_mut().clear(); self.scratch.set_offset(0); } } /// A writer that can be used to directly write elements into the target buffer. /// /// For more information, see [`DynamicUniformBuffer::get_writer`]. pub struct DynamicUniformBufferWriter<T> { buffer: encase::DynamicUniformBuffer<QueueWriteBufferViewWrapper>, _marker: PhantomData<fn() -> T>, } impl<T: ShaderType + WriteInto> DynamicUniformBufferWriter<T> { pub fn write(&mut self, value: &T) -> u32 { self.buffer.write(value).unwrap() as u32 } } /// A wrapper to work around the orphan rule so that [`wgpu::QueueWriteBufferView`] can implement /// [`BufferMut`]. struct QueueWriteBufferViewWrapper { buffer_view: wgpu::QueueWriteBufferView, // Must be kept separately and cannot be retrieved from buffer_view, as the read-only access will // invoke a panic. capacity: usize, } impl BufferMut for QueueWriteBufferViewWrapper { #[inline] fn capacity(&self) -> usize { self.capacity } #[inline] fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) { self.buffer_view.write(offset, val); } #[inline] fn write_slice(&mut self, offset: usize, val: &[u8]) { self.buffer_view.write_slice(offset, val); } } impl<'a, T: ShaderType + WriteInto> IntoBinding<'a> for &'a DynamicUniformBuffer<T> { #[inline] fn into_binding(self) -> BindingResource<'a> { self.binding().unwrap() } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/bind_group_entries.rs
crates/bevy_render/src/render_resource/bind_group_entries.rs
use variadics_please::all_tuples_with_size; use wgpu::{BindGroupEntry, BindingResource}; use super::{Sampler, TextureView}; /// Helper for constructing bindgroups. /// /// Allows constructing the descriptor's entries as: /// ```ignore (render_device cannot be easily accessed) /// render_device.create_bind_group( /// "my_bind_group", /// &my_layout, /// &BindGroupEntries::with_indices(( /// (2, &my_sampler), /// (3, my_uniform), /// )), /// ); /// ``` /// /// instead of /// /// ```ignore (render_device cannot be easily accessed) /// render_device.create_bind_group( /// "my_bind_group", /// &my_layout, /// &[ /// BindGroupEntry { /// binding: 2, /// resource: BindingResource::Sampler(&my_sampler), /// }, /// BindGroupEntry { /// binding: 3, /// resource: my_uniform, /// }, /// ], /// ); /// ``` /// /// or /// /// ```ignore (render_device cannot be easily accessed) /// render_device.create_bind_group( /// "my_bind_group", /// &my_layout, /// &BindGroupEntries::sequential(( /// &my_sampler, /// my_uniform, /// )), /// ); /// ``` /// /// instead of /// /// ```ignore (render_device cannot be easily accessed) /// render_device.create_bind_group( /// "my_bind_group", /// &my_layout, /// &[ /// BindGroupEntry { /// binding: 0, /// resource: BindingResource::Sampler(&my_sampler), /// }, /// BindGroupEntry { /// binding: 1, /// resource: my_uniform, /// }, /// ], /// ); /// ``` /// /// or /// /// ```ignore (render_device cannot be easily accessed) /// render_device.create_bind_group( /// "my_bind_group", /// &my_layout, /// &BindGroupEntries::single(my_uniform), /// ); /// ``` /// /// instead of /// /// ```ignore (render_device cannot be easily accessed) /// render_device.create_bind_group( /// "my_bind_group", /// &my_layout, /// &[ /// BindGroupEntry { /// binding: 0, /// resource: my_uniform, /// }, /// ], /// ); /// ``` pub struct BindGroupEntries<'b, const N: usize = 1> { entries: [BindGroupEntry<'b>; N], } impl<'b, const N: usize> BindGroupEntries<'b, N> { #[inline] pub fn sequential(resources: impl IntoBindingArray<'b, N>) -> Self { let mut i = 0; Self { entries: resources.into_array().map(|resource| { let binding = i; i += 1; BindGroupEntry { binding, resource } }), } } #[inline] pub fn with_indices(indexed_resources: impl IntoIndexedBindingArray<'b, N>) -> Self { Self { entries: indexed_resources .into_array() .map(|(binding, resource)| BindGroupEntry { binding, resource }), } } } impl<'b> BindGroupEntries<'b, 1> { pub fn single(resource: impl IntoBinding<'b>) -> [BindGroupEntry<'b>; 1] { [BindGroupEntry { binding: 0, resource: resource.into_binding(), }] } } impl<'b, const N: usize> core::ops::Deref for BindGroupEntries<'b, N> { type Target = [BindGroupEntry<'b>]; fn deref(&self) -> &[BindGroupEntry<'b>] { &self.entries } } pub trait IntoBinding<'a> { fn into_binding(self) -> BindingResource<'a>; } impl<'a> IntoBinding<'a> for &'a TextureView { #[inline] fn into_binding(self) -> BindingResource<'a> { BindingResource::TextureView(self) } } impl<'a> IntoBinding<'a> for &'a wgpu::TextureView { #[inline] fn into_binding(self) -> BindingResource<'a> { BindingResource::TextureView(self) } } impl<'a> IntoBinding<'a> for &'a [&'a wgpu::TextureView] { #[inline] fn into_binding(self) -> BindingResource<'a> { BindingResource::TextureViewArray(self) } } impl<'a> IntoBinding<'a> for &'a Sampler { #[inline] fn into_binding(self) -> BindingResource<'a> { BindingResource::Sampler(self) } } impl<'a> IntoBinding<'a> for &'a [&'a wgpu::Sampler] { #[inline] fn into_binding(self) -> BindingResource<'a> { BindingResource::SamplerArray(self) } } impl<'a> IntoBinding<'a> for BindingResource<'a> { #[inline] fn into_binding(self) -> BindingResource<'a> { self } } impl<'a> IntoBinding<'a> for wgpu::BufferBinding<'a> { #[inline] fn into_binding(self) -> BindingResource<'a> { BindingResource::Buffer(self) } } impl<'a> IntoBinding<'a> for &'a [wgpu::BufferBinding<'a>] { #[inline] fn into_binding(self) -> BindingResource<'a> { BindingResource::BufferArray(self) } } pub trait IntoBindingArray<'b, const N: usize> { fn into_array(self) -> [BindingResource<'b>; N]; } macro_rules! impl_to_binding_slice { ($N: expr, $(#[$meta:meta])* $(($T: ident, $I: ident)),*) => { $(#[$meta])* impl<'b, $($T: IntoBinding<'b>),*> IntoBindingArray<'b, $N> for ($($T,)*) { #[inline] fn into_array(self) -> [BindingResource<'b>; $N] { let ($($I,)*) = self; [$($I.into_binding(), )*] } } } } all_tuples_with_size!( #[doc(fake_variadic)] impl_to_binding_slice, 1, 32, T, s ); pub trait IntoIndexedBindingArray<'b, const N: usize> { fn into_array(self) -> [(u32, BindingResource<'b>); N]; } macro_rules! impl_to_indexed_binding_slice { ($N: expr, $(($T: ident, $S: ident, $I: ident)),*) => { impl<'b, $($T: IntoBinding<'b>),*> IntoIndexedBindingArray<'b, $N> for ($((u32, $T),)*) { #[inline] fn into_array(self) -> [(u32, BindingResource<'b>); $N] { let ($(($S, $I),)*) = self; [$(($S, $I.into_binding())), *] } } } } all_tuples_with_size!(impl_to_indexed_binding_slice, 1, 32, T, n, s); pub struct DynamicBindGroupEntries<'b> { entries: Vec<BindGroupEntry<'b>>, } impl<'b> Default for DynamicBindGroupEntries<'b> { fn default() -> Self { Self::new() } } impl<'b> DynamicBindGroupEntries<'b> { pub fn sequential<const N: usize>(entries: impl IntoBindingArray<'b, N>) -> Self { Self { entries: entries .into_array() .into_iter() .enumerate() .map(|(ix, resource)| BindGroupEntry { binding: ix as u32, resource, }) .collect(), } } pub fn extend_sequential<const N: usize>( mut self, entries: impl IntoBindingArray<'b, N>, ) -> Self { let start = self.entries.last().unwrap().binding + 1; self.entries.extend( entries .into_array() .into_iter() .enumerate() .map(|(ix, resource)| BindGroupEntry { binding: start + ix as u32, resource, }), ); self } pub fn new_with_indices<const N: usize>(entries: impl IntoIndexedBindingArray<'b, N>) -> Self { Self { entries: entries .into_array() .into_iter() .map(|(binding, resource)| BindGroupEntry { binding, resource }) .collect(), } } pub fn new() -> Self { Self { entries: Vec::new(), } } pub fn extend_with_indices<const N: usize>( mut self, entries: impl IntoIndexedBindingArray<'b, N>, ) -> Self { self.entries.extend( entries .into_array() .into_iter() .map(|(binding, resource)| BindGroupEntry { binding, resource }), ); self } } impl<'b> core::ops::Deref for DynamicBindGroupEntries<'b> { type Target = [BindGroupEntry<'b>]; fn deref(&self) -> &[BindGroupEntry<'b>] { &self.entries } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/bindless.rs
crates/bevy_render/src/render_resource/bindless.rs
//! Types and functions relating to bindless resources. use alloc::borrow::Cow; use core::{ num::{NonZeroU32, NonZeroU64}, ops::Range, }; use bevy_derive::{Deref, DerefMut}; use wgpu::{ BindGroupLayoutEntry, SamplerBindingType, ShaderStages, TextureSampleType, TextureViewDimension, }; use crate::render_resource::binding_types::storage_buffer_read_only_sized; use super::binding_types::{ sampler, texture_1d, texture_2d, texture_2d_array, texture_3d, texture_cube, texture_cube_array, }; /// The default value for the number of resources that can be stored in a slab /// on this platform. /// /// See the documentation for [`BindlessSlabResourceLimit`] for more /// information. #[cfg(any(target_os = "macos", target_os = "ios"))] pub const AUTO_BINDLESS_SLAB_RESOURCE_LIMIT: u32 = 64; /// The default value for the number of resources that can be stored in a slab /// on this platform. /// /// See the documentation for [`BindlessSlabResourceLimit`] for more /// information. #[cfg(not(any(target_os = "macos", target_os = "ios")))] pub const AUTO_BINDLESS_SLAB_RESOURCE_LIMIT: u32 = 2048; /// The binding numbers for the built-in binding arrays of each bindless /// resource type. /// /// In the case of materials, the material allocator manages these binding /// arrays. /// /// `bindless.wgsl` contains declarations of these arrays for use in your /// shaders. If you change these, make sure to update that file as well. pub static BINDING_NUMBERS: [(BindlessResourceType, BindingNumber); 9] = [ (BindlessResourceType::SamplerFiltering, BindingNumber(1)), (BindlessResourceType::SamplerNonFiltering, BindingNumber(2)), (BindlessResourceType::SamplerComparison, BindingNumber(3)), (BindlessResourceType::Texture1d, BindingNumber(4)), (BindlessResourceType::Texture2d, BindingNumber(5)), (BindlessResourceType::Texture2dArray, BindingNumber(6)), (BindlessResourceType::Texture3d, BindingNumber(7)), (BindlessResourceType::TextureCube, BindingNumber(8)), (BindlessResourceType::TextureCubeArray, BindingNumber(9)), ]; /// The maximum number of resources that can be stored in a slab. /// /// This limit primarily exists in order to work around `wgpu` performance /// problems involving large numbers of bindless resources. Also, some /// platforms, such as Metal, currently enforce limits on the number of /// resources in use. /// /// This corresponds to `LIMIT` in the `#[bindless(LIMIT)]` attribute when /// deriving [`crate::render_resource::AsBindGroup`]. #[derive(Clone, Copy, Default, PartialEq, Debug)] pub enum BindlessSlabResourceLimit { /// Allows the renderer to choose a reasonable value for the resource limit /// based on the platform. /// /// This value has been tuned, so you should default to this value unless /// you have special platform-specific considerations that prevent you from /// using it. #[default] Auto, /// A custom value for the resource limit. /// /// Bevy will allocate no more than this number of resources in a slab, /// unless exceeding this value is necessary in order to allocate at all /// (i.e. unless the number of bindless resources in your bind group exceeds /// this value), in which case Bevy can exceed it. Custom(u32), } /// Information about the bindless resources in this object. /// /// The material bind group allocator uses this descriptor in order to create /// and maintain bind groups. The fields within this bindless descriptor are /// [`Cow`]s in order to support both the common case in which the fields are /// simply `static` constants and the more unusual case in which the fields are /// dynamically generated efficiently. An example of the latter case is /// `ExtendedMaterial`, which needs to assemble a bindless descriptor from those /// of the base material and the material extension at runtime. /// /// This structure will only be present if this object is bindless. pub struct BindlessDescriptor { /// The bindless resource types that this object uses, in order of bindless /// index. /// /// The resource assigned to binding index 0 will be at index 0, the /// resource assigned to binding index will be at index 1 in this array, and /// so on. Unused binding indices are set to [`BindlessResourceType::None`]. pub resources: Cow<'static, [BindlessResourceType]>, /// The [`BindlessBufferDescriptor`] for each bindless buffer that this /// object uses. /// /// The order of this array is irrelevant. pub buffers: Cow<'static, [BindlessBufferDescriptor]>, /// The [`BindlessIndexTableDescriptor`]s describing each bindless index /// table. /// /// This list must be sorted by the first bindless index. pub index_tables: Cow<'static, [BindlessIndexTableDescriptor]>, } /// The type of potentially-bindless resource. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum BindlessResourceType { /// No bindless resource. /// /// This is used as a placeholder to fill holes in the /// [`BindlessDescriptor::resources`] list. None, /// A storage buffer. Buffer, /// A filtering sampler. SamplerFiltering, /// A non-filtering sampler (nearest neighbor). SamplerNonFiltering, /// A comparison sampler (typically used for shadow maps). SamplerComparison, /// A 1D texture. Texture1d, /// A 2D texture. Texture2d, /// A 2D texture array. /// /// Note that this differs from a binding array. 2D texture arrays must all /// have the same size and format. Texture2dArray, /// A 3D texture. Texture3d, /// A cubemap texture. TextureCube, /// A cubemap texture array. /// /// Note that this differs from a binding array. Cubemap texture arrays must /// all have the same size and format. TextureCubeArray, /// Multiple instances of plain old data concatenated into a single buffer. /// /// This corresponds to the `#[data]` declaration in /// [`crate::render_resource::AsBindGroup`]. /// /// Note that this resource doesn't itself map to a GPU-level binding /// resource and instead depends on the `MaterialBindGroupAllocator` to /// create a binding resource for it. DataBuffer, } /// Describes a bindless buffer. /// /// Unlike samplers and textures, each buffer in a bind group gets its own /// unique bind group entry. That is, there isn't any `bindless_buffers` binding /// array to go along with `bindless_textures_2d`, /// `bindless_samplers_filtering`, etc. Therefore, this descriptor contains two /// indices: the *binding number* and the *bindless index*. The binding number /// is the `@binding` number used in the shader, while the bindless index is the /// index of the buffer in the bindless index table (which is itself /// conventionally bound to binding number 0). /// /// When declaring the buffer in a derived implementation /// [`crate::render_resource::AsBindGroup`] with syntax like /// `#[uniform(BINDLESS_INDEX, StandardMaterialUniform, /// bindless(BINDING_NUMBER)]`, the bindless index is `BINDLESS_INDEX`, and the /// binding number is `BINDING_NUMBER`. Note the order. #[derive(Clone, Copy, Debug)] pub struct BindlessBufferDescriptor { /// The actual binding number of the buffer. /// /// This is declared with `@binding` in WGSL. When deriving /// [`crate::render_resource::AsBindGroup`], this is the `BINDING_NUMBER` in /// `#[uniform(BINDLESS_INDEX, StandardMaterialUniform, /// bindless(BINDING_NUMBER)]`. pub binding_number: BindingNumber, /// The index of the buffer in the bindless index table. /// /// In the shader, this is the index into the table bound to binding 0. When /// deriving [`crate::render_resource::AsBindGroup`], this is the /// `BINDLESS_INDEX` in `#[uniform(BINDLESS_INDEX, StandardMaterialUniform, /// bindless(BINDING_NUMBER)]`. pub bindless_index: BindlessIndex, /// The size of the buffer in bytes, if known. pub size: Option<usize>, } /// Describes the layout of the bindless index table, which maps bindless /// indices to indices within the binding arrays. #[derive(Clone)] pub struct BindlessIndexTableDescriptor { /// The range of bindless indices that this descriptor covers. pub indices: Range<BindlessIndex>, /// The binding at which the index table itself will be bound. /// /// By default, this is binding 0, but it can be changed with the /// `#[bindless(index_table(binding(B)))]` attribute. pub binding_number: BindingNumber, } /// The index of the actual binding in the bind group. /// /// This is the value specified in WGSL as `@binding`. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Deref, DerefMut)] pub struct BindingNumber(pub u32); /// The index in the bindless index table. /// /// This table is conventionally bound to binding number 0. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Hash, Debug, Deref, DerefMut)] pub struct BindlessIndex(pub u32); /// Creates the bind group layout entries common to all shaders that use /// bindless bind groups. /// /// `bindless_resource_count` specifies the total number of bindless resources. /// `bindless_slab_resource_limit` specifies the resolved /// [`BindlessSlabResourceLimit`] value. pub fn create_bindless_bind_group_layout_entries( bindless_index_table_length: u32, bindless_slab_resource_limit: u32, bindless_index_table_binding_number: BindingNumber, ) -> Vec<BindGroupLayoutEntry> { let bindless_slab_resource_limit = NonZeroU32::new(bindless_slab_resource_limit).expect("Bindless slot count must be nonzero"); // The maximum size of a binding array is the // `bindless_slab_resource_limit`, which would occur if all of the bindless // resources were of the same type. So we create our binding arrays with // that size. vec![ // Start with the bindless index table, bound to binding number 0. storage_buffer_read_only_sized( false, NonZeroU64::new(bindless_index_table_length as u64 * size_of::<u32>() as u64), ) .build( *bindless_index_table_binding_number, ShaderStages::FRAGMENT | ShaderStages::VERTEX | ShaderStages::COMPUTE, ), // Continue with the common bindless resource arrays. sampler(SamplerBindingType::Filtering) .count(bindless_slab_resource_limit) .build( 1, ShaderStages::FRAGMENT | ShaderStages::VERTEX | ShaderStages::COMPUTE, ), sampler(SamplerBindingType::NonFiltering) .count(bindless_slab_resource_limit) .build( 2, ShaderStages::FRAGMENT | ShaderStages::VERTEX | ShaderStages::COMPUTE, ), sampler(SamplerBindingType::Comparison) .count(bindless_slab_resource_limit) .build( 3, ShaderStages::FRAGMENT | ShaderStages::VERTEX | ShaderStages::COMPUTE, ), texture_1d(TextureSampleType::Float { filterable: true }) .count(bindless_slab_resource_limit) .build( 4, ShaderStages::FRAGMENT | ShaderStages::VERTEX | ShaderStages::COMPUTE, ), texture_2d(TextureSampleType::Float { filterable: true }) .count(bindless_slab_resource_limit) .build( 5, ShaderStages::FRAGMENT | ShaderStages::VERTEX | ShaderStages::COMPUTE, ), texture_2d_array(TextureSampleType::Float { filterable: true }) .count(bindless_slab_resource_limit) .build( 6, ShaderStages::FRAGMENT | ShaderStages::VERTEX | ShaderStages::COMPUTE, ), texture_3d(TextureSampleType::Float { filterable: true }) .count(bindless_slab_resource_limit) .build( 7, ShaderStages::FRAGMENT | ShaderStages::VERTEX | ShaderStages::COMPUTE, ), texture_cube(TextureSampleType::Float { filterable: true }) .count(bindless_slab_resource_limit) .build( 8, ShaderStages::FRAGMENT | ShaderStages::VERTEX | ShaderStages::COMPUTE, ), texture_cube_array(TextureSampleType::Float { filterable: true }) .count(bindless_slab_resource_limit) .build( 9, ShaderStages::FRAGMENT | ShaderStages::VERTEX | ShaderStages::COMPUTE, ), ] } impl BindlessSlabResourceLimit { /// Determines the actual bindless slab resource limit on this platform. pub fn resolve(&self) -> u32 { match *self { BindlessSlabResourceLimit::Auto => AUTO_BINDLESS_SLAB_RESOURCE_LIMIT, BindlessSlabResourceLimit::Custom(limit) => limit, } } } impl BindlessResourceType { /// Returns the binding number for the common array of this resource type. /// /// For example, if you pass `BindlessResourceType::Texture2d`, this will /// return 5, in order to match the `@group(2) @binding(5) var /// bindless_textures_2d: binding_array<texture_2d<f32>>` declaration in /// `bindless.wgsl`. /// /// Not all resource types have fixed binding numbers. If you call /// [`Self::binding_number`] on such a resource type, it returns `None`. /// /// Note that this returns a static reference to the binding number, not the /// binding number itself. This is to conform to an idiosyncratic API in /// `wgpu` whereby binding numbers for binding arrays are taken by `&u32` /// *reference*, not by `u32` value. pub fn binding_number(&self) -> Option<&'static BindingNumber> { match BINDING_NUMBERS.binary_search_by_key(self, |(key, _)| *key) { Ok(binding_number) => Some(&BINDING_NUMBERS[binding_number].1), Err(_) => None, } } } impl From<TextureViewDimension> for BindlessResourceType { fn from(texture_view_dimension: TextureViewDimension) -> Self { match texture_view_dimension { TextureViewDimension::D1 => BindlessResourceType::Texture1d, TextureViewDimension::D2 => BindlessResourceType::Texture2d, TextureViewDimension::D2Array => BindlessResourceType::Texture2dArray, TextureViewDimension::Cube => BindlessResourceType::TextureCube, TextureViewDimension::CubeArray => BindlessResourceType::TextureCubeArray, TextureViewDimension::D3 => BindlessResourceType::Texture3d, } } } impl From<SamplerBindingType> for BindlessResourceType { fn from(sampler_binding_type: SamplerBindingType) -> Self { match sampler_binding_type { SamplerBindingType::Filtering => BindlessResourceType::SamplerFiltering, SamplerBindingType::NonFiltering => BindlessResourceType::SamplerNonFiltering, SamplerBindingType::Comparison => BindlessResourceType::SamplerComparison, } } } impl From<u32> for BindlessIndex { fn from(value: u32) -> Self { Self(value) } } impl From<u32> for BindingNumber { fn from(value: u32) -> Self { Self(value) } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/bind_group_layout_entries.rs
crates/bevy_render/src/render_resource/bind_group_layout_entries.rs
use core::num::NonZero; use variadics_please::all_tuples_with_size; use wgpu::{BindGroupLayoutEntry, BindingType, ShaderStages}; /// Helper for constructing bind group layouts. /// /// Allows constructing the layout's entries as: /// ```ignore (render_device cannot be easily accessed) /// let layout = render_device.create_bind_group_layout( /// "my_bind_group_layout", /// &BindGroupLayoutEntries::with_indices( /// // The layout entries will only be visible in the fragment stage /// ShaderStages::FRAGMENT, /// ( /// // Screen texture /// (2, texture_2d(TextureSampleType::Float { filterable: true })), /// // Sampler /// (3, sampler(SamplerBindingType::Filtering)), /// ), /// ), /// ); /// ``` /// /// instead of /// /// ```ignore (render_device cannot be easily accessed) /// let layout = render_device.create_bind_group_layout( /// "my_bind_group_layout", /// &[ /// // Screen texture /// BindGroupLayoutEntry { /// binding: 2, /// visibility: ShaderStages::FRAGMENT, /// ty: BindingType::Texture { /// sample_type: TextureSampleType::Float { filterable: true }, /// view_dimension: TextureViewDimension::D2, /// multisampled: false, /// }, /// count: None, /// }, /// // Sampler /// BindGroupLayoutEntry { /// binding: 3, /// visibility: ShaderStages::FRAGMENT, /// ty: BindingType::Sampler(SamplerBindingType::Filtering), /// count: None, /// }, /// ], /// ); /// ``` /// /// or /// /// ```ignore (render_device cannot be easily accessed) /// render_device.create_bind_group_layout( /// "my_bind_group_layout", /// &BindGroupLayoutEntries::sequential( /// ShaderStages::FRAGMENT, /// ( /// // Screen texture /// texture_2d(TextureSampleType::Float { filterable: true }), /// // Sampler /// sampler(SamplerBindingType::Filtering), /// ), /// ), /// ); /// ``` /// /// instead of /// /// ```ignore (render_device cannot be easily accessed) /// let layout = render_device.create_bind_group_layout( /// "my_bind_group_layout", /// &[ /// // Screen texture /// BindGroupLayoutEntry { /// binding: 0, /// visibility: ShaderStages::FRAGMENT, /// ty: BindingType::Texture { /// sample_type: TextureSampleType::Float { filterable: true }, /// view_dimension: TextureViewDimension::D2, /// multisampled: false, /// }, /// count: None, /// }, /// // Sampler /// BindGroupLayoutEntry { /// binding: 1, /// visibility: ShaderStages::FRAGMENT, /// ty: BindingType::Sampler(SamplerBindingType::Filtering), /// count: None, /// }, /// ], /// ); /// ``` /// /// or /// /// ```ignore (render_device cannot be easily accessed) /// render_device.create_bind_group_layout( /// "my_bind_group_layout", /// &BindGroupLayoutEntries::single( /// ShaderStages::FRAGMENT, /// texture_2d(TextureSampleType::Float { filterable: true }), /// ), /// ); /// ``` /// /// instead of /// /// ```ignore (render_device cannot be easily accessed) /// let layout = render_device.create_bind_group_layout( /// "my_bind_group_layout", /// &[ /// BindGroupLayoutEntry { /// binding: 0, /// visibility: ShaderStages::FRAGMENT, /// ty: BindingType::Texture { /// sample_type: TextureSampleType::Float { filterable: true }, /// view_dimension: TextureViewDimension::D2, /// multisampled: false, /// }, /// count: None, /// }, /// ], /// ); /// ``` #[derive(Clone, Copy)] pub struct BindGroupLayoutEntryBuilder { ty: BindingType, visibility: Option<ShaderStages>, count: Option<NonZero<u32>>, } impl BindGroupLayoutEntryBuilder { pub fn visibility(mut self, visibility: ShaderStages) -> Self { self.visibility = Some(visibility); self } pub fn count(mut self, count: NonZero<u32>) -> Self { self.count = Some(count); self } pub fn build(&self, binding: u32, default_visibility: ShaderStages) -> BindGroupLayoutEntry { BindGroupLayoutEntry { binding, ty: self.ty, visibility: self.visibility.unwrap_or(default_visibility), count: self.count, } } } pub struct BindGroupLayoutEntries<const N: usize> { entries: [BindGroupLayoutEntry; N], } impl<const N: usize> BindGroupLayoutEntries<N> { #[inline] pub fn sequential( default_visibility: ShaderStages, entries_ext: impl IntoBindGroupLayoutEntryBuilderArray<N>, ) -> Self { let mut i = 0; Self { entries: entries_ext.into_array().map(|entry| { let binding = i; i += 1; entry.build(binding, default_visibility) }), } } #[inline] pub fn with_indices( default_visibility: ShaderStages, indexed_entries: impl IntoIndexedBindGroupLayoutEntryBuilderArray<N>, ) -> Self { Self { entries: indexed_entries .into_array() .map(|(binding, entry)| entry.build(binding, default_visibility)), } } } impl BindGroupLayoutEntries<1> { pub fn single( visibility: ShaderStages, resource: impl IntoBindGroupLayoutEntryBuilder, ) -> [BindGroupLayoutEntry; 1] { [resource .into_bind_group_layout_entry_builder() .build(0, visibility)] } } impl<const N: usize> core::ops::Deref for BindGroupLayoutEntries<N> { type Target = [BindGroupLayoutEntry]; fn deref(&self) -> &[BindGroupLayoutEntry] { &self.entries } } pub trait IntoBindGroupLayoutEntryBuilder { fn into_bind_group_layout_entry_builder(self) -> BindGroupLayoutEntryBuilder; } impl IntoBindGroupLayoutEntryBuilder for BindingType { fn into_bind_group_layout_entry_builder(self) -> BindGroupLayoutEntryBuilder { BindGroupLayoutEntryBuilder { ty: self, visibility: None, count: None, } } } impl IntoBindGroupLayoutEntryBuilder for BindGroupLayoutEntry { fn into_bind_group_layout_entry_builder(self) -> BindGroupLayoutEntryBuilder { if self.binding != u32::MAX { tracing::warn!("The BindGroupLayoutEntries api ignores the binding index when converting a raw wgpu::BindGroupLayoutEntry. You can ignore this warning by setting it to u32::MAX."); } BindGroupLayoutEntryBuilder { ty: self.ty, visibility: Some(self.visibility), count: self.count, } } } impl IntoBindGroupLayoutEntryBuilder for BindGroupLayoutEntryBuilder { fn into_bind_group_layout_entry_builder(self) -> BindGroupLayoutEntryBuilder { self } } pub trait IntoBindGroupLayoutEntryBuilderArray<const N: usize> { fn into_array(self) -> [BindGroupLayoutEntryBuilder; N]; } macro_rules! impl_to_binding_type_slice { ($N: expr, $(#[$meta:meta])* $(($T: ident, $I: ident)),*) => { $(#[$meta])* impl<$($T: IntoBindGroupLayoutEntryBuilder),*> IntoBindGroupLayoutEntryBuilderArray<$N> for ($($T,)*) { #[inline] fn into_array(self) -> [BindGroupLayoutEntryBuilder; $N] { let ($($I,)*) = self; [$($I.into_bind_group_layout_entry_builder(), )*] } } } } all_tuples_with_size!( #[doc(fake_variadic)] impl_to_binding_type_slice, 1, 32, T, s ); pub trait IntoIndexedBindGroupLayoutEntryBuilderArray<const N: usize> { fn into_array(self) -> [(u32, BindGroupLayoutEntryBuilder); N]; } macro_rules! impl_to_indexed_binding_type_slice { ($N: expr, $(($T: ident, $S: ident, $I: ident)),*) => { impl<$($T: IntoBindGroupLayoutEntryBuilder),*> IntoIndexedBindGroupLayoutEntryBuilderArray<$N> for ($((u32, $T),)*) { #[inline] fn into_array(self) -> [(u32, BindGroupLayoutEntryBuilder); $N] { let ($(($S, $I),)*) = self; [$(($S, $I.into_bind_group_layout_entry_builder())), *] } } } } all_tuples_with_size!(impl_to_indexed_binding_type_slice, 1, 32, T, n, s); impl<const N: usize> IntoBindGroupLayoutEntryBuilderArray<N> for [BindGroupLayoutEntry; N] { fn into_array(self) -> [BindGroupLayoutEntryBuilder; N] { self.map(IntoBindGroupLayoutEntryBuilder::into_bind_group_layout_entry_builder) } } pub struct DynamicBindGroupLayoutEntries { default_visibility: ShaderStages, entries: Vec<BindGroupLayoutEntry>, } impl DynamicBindGroupLayoutEntries { pub fn sequential<const N: usize>( default_visibility: ShaderStages, entries: impl IntoBindGroupLayoutEntryBuilderArray<N>, ) -> Self { Self { default_visibility, entries: entries .into_array() .into_iter() .enumerate() .map(|(ix, resource)| resource.build(ix as u32, default_visibility)) .collect(), } } pub fn extend_sequential<const N: usize>( mut self, entries: impl IntoBindGroupLayoutEntryBuilderArray<N>, ) -> Self { let start = self.entries.last().unwrap().binding + 1; self.entries.extend( entries .into_array() .into_iter() .enumerate() .map(|(ix, resource)| resource.build(start + ix as u32, self.default_visibility)), ); self } pub fn new_with_indices<const N: usize>( default_visibility: ShaderStages, entries: impl IntoIndexedBindGroupLayoutEntryBuilderArray<N>, ) -> Self { Self { default_visibility, entries: entries .into_array() .into_iter() .map(|(binding, resource)| resource.build(binding, default_visibility)) .collect(), } } pub fn new(default_visibility: ShaderStages) -> Self { Self { default_visibility, entries: Vec::new(), } } pub fn extend_with_indices<const N: usize>( mut self, entries: impl IntoIndexedBindGroupLayoutEntryBuilderArray<N>, ) -> Self { self.entries.extend( entries .into_array() .into_iter() .map(|(binding, resource)| resource.build(binding, self.default_visibility)), ); self } } impl core::ops::Deref for DynamicBindGroupLayoutEntries { type Target = [BindGroupLayoutEntry]; fn deref(&self) -> &[BindGroupLayoutEntry] { &self.entries } } pub mod binding_types { use crate::render_resource::{ BufferBindingType, SamplerBindingType, TextureSampleType, TextureViewDimension, }; use core::num::NonZero; use encase::ShaderType; use wgpu::{StorageTextureAccess, TextureFormat}; use super::*; pub fn storage_buffer<T: ShaderType>(has_dynamic_offset: bool) -> BindGroupLayoutEntryBuilder { storage_buffer_sized(has_dynamic_offset, Some(T::min_size())) } pub fn storage_buffer_sized( has_dynamic_offset: bool, min_binding_size: Option<NonZero<u64>>, ) -> BindGroupLayoutEntryBuilder { BindingType::Buffer { ty: BufferBindingType::Storage { read_only: false }, has_dynamic_offset, min_binding_size, } .into_bind_group_layout_entry_builder() } pub fn storage_buffer_read_only<T: ShaderType>( has_dynamic_offset: bool, ) -> BindGroupLayoutEntryBuilder { storage_buffer_read_only_sized(has_dynamic_offset, Some(T::min_size())) } pub fn storage_buffer_read_only_sized( has_dynamic_offset: bool, min_binding_size: Option<NonZero<u64>>, ) -> BindGroupLayoutEntryBuilder { BindingType::Buffer { ty: BufferBindingType::Storage { read_only: true }, has_dynamic_offset, min_binding_size, } .into_bind_group_layout_entry_builder() } pub fn uniform_buffer<T: ShaderType>(has_dynamic_offset: bool) -> BindGroupLayoutEntryBuilder { uniform_buffer_sized(has_dynamic_offset, Some(T::min_size())) } pub fn uniform_buffer_sized( has_dynamic_offset: bool, min_binding_size: Option<NonZero<u64>>, ) -> BindGroupLayoutEntryBuilder { BindingType::Buffer { ty: BufferBindingType::Uniform, has_dynamic_offset, min_binding_size, } .into_bind_group_layout_entry_builder() } pub fn texture_1d(sample_type: TextureSampleType) -> BindGroupLayoutEntryBuilder { BindingType::Texture { sample_type, view_dimension: TextureViewDimension::D1, multisampled: false, } .into_bind_group_layout_entry_builder() } pub fn texture_2d(sample_type: TextureSampleType) -> BindGroupLayoutEntryBuilder { BindingType::Texture { sample_type, view_dimension: TextureViewDimension::D2, multisampled: false, } .into_bind_group_layout_entry_builder() } pub fn texture_2d_multisampled(sample_type: TextureSampleType) -> BindGroupLayoutEntryBuilder { BindingType::Texture { sample_type, view_dimension: TextureViewDimension::D2, multisampled: true, } .into_bind_group_layout_entry_builder() } pub fn texture_2d_array(sample_type: TextureSampleType) -> BindGroupLayoutEntryBuilder { BindingType::Texture { sample_type, view_dimension: TextureViewDimension::D2Array, multisampled: false, } .into_bind_group_layout_entry_builder() } pub fn texture_2d_array_multisampled( sample_type: TextureSampleType, ) -> BindGroupLayoutEntryBuilder { BindingType::Texture { sample_type, view_dimension: TextureViewDimension::D2Array, multisampled: true, } .into_bind_group_layout_entry_builder() } pub fn texture_depth_2d() -> BindGroupLayoutEntryBuilder { texture_2d(TextureSampleType::Depth).into_bind_group_layout_entry_builder() } pub fn texture_depth_2d_multisampled() -> BindGroupLayoutEntryBuilder { texture_2d_multisampled(TextureSampleType::Depth).into_bind_group_layout_entry_builder() } pub fn texture_cube(sample_type: TextureSampleType) -> BindGroupLayoutEntryBuilder { BindingType::Texture { sample_type, view_dimension: TextureViewDimension::Cube, multisampled: false, } .into_bind_group_layout_entry_builder() } pub fn texture_cube_multisampled( sample_type: TextureSampleType, ) -> BindGroupLayoutEntryBuilder { BindingType::Texture { sample_type, view_dimension: TextureViewDimension::Cube, multisampled: true, } .into_bind_group_layout_entry_builder() } pub fn texture_cube_array(sample_type: TextureSampleType) -> BindGroupLayoutEntryBuilder { BindingType::Texture { sample_type, view_dimension: TextureViewDimension::CubeArray, multisampled: false, } .into_bind_group_layout_entry_builder() } pub fn texture_cube_array_multisampled( sample_type: TextureSampleType, ) -> BindGroupLayoutEntryBuilder { BindingType::Texture { sample_type, view_dimension: TextureViewDimension::CubeArray, multisampled: true, } .into_bind_group_layout_entry_builder() } pub fn texture_3d(sample_type: TextureSampleType) -> BindGroupLayoutEntryBuilder { BindingType::Texture { sample_type, view_dimension: TextureViewDimension::D3, multisampled: false, } .into_bind_group_layout_entry_builder() } pub fn texture_3d_multisampled(sample_type: TextureSampleType) -> BindGroupLayoutEntryBuilder { BindingType::Texture { sample_type, view_dimension: TextureViewDimension::D3, multisampled: true, } .into_bind_group_layout_entry_builder() } pub fn sampler(sampler_binding_type: SamplerBindingType) -> BindGroupLayoutEntryBuilder { BindingType::Sampler(sampler_binding_type).into_bind_group_layout_entry_builder() } pub fn texture_storage_2d( format: TextureFormat, access: StorageTextureAccess, ) -> BindGroupLayoutEntryBuilder { BindingType::StorageTexture { access, format, view_dimension: TextureViewDimension::D2, } .into_bind_group_layout_entry_builder() } pub fn texture_storage_2d_array( format: TextureFormat, access: StorageTextureAccess, ) -> BindGroupLayoutEntryBuilder { BindingType::StorageTexture { access, format, view_dimension: TextureViewDimension::D2Array, } .into_bind_group_layout_entry_builder() } pub fn texture_storage_3d( format: TextureFormat, access: StorageTextureAccess, ) -> BindGroupLayoutEntryBuilder { BindingType::StorageTexture { access, format, view_dimension: TextureViewDimension::D3, } .into_bind_group_layout_entry_builder() } pub fn acceleration_structure() -> BindGroupLayoutEntryBuilder { BindingType::AccelerationStructure { vertex_return: false, } .into_bind_group_layout_entry_builder() } pub fn acceleration_structure_vertex_return() -> BindGroupLayoutEntryBuilder { BindingType::AccelerationStructure { vertex_return: true, } .into_bind_group_layout_entry_builder() } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/mod.rs
crates/bevy_render/src/render_resource/mod.rs
mod batched_uniform_buffer; mod bind_group; mod bind_group_entries; mod bind_group_layout; mod bind_group_layout_entries; mod bindless; mod buffer; mod buffer_vec; mod gpu_array_buffer; mod pipeline; mod pipeline_cache; mod pipeline_specializer; pub mod resource_macros; mod specializer; mod storage_buffer; mod texture; mod uniform_buffer; pub use bind_group::*; pub use bind_group_entries::*; pub use bind_group_layout::*; pub use bind_group_layout_entries::*; pub use bindless::*; pub use buffer::*; pub use buffer_vec::*; pub use gpu_array_buffer::*; pub use pipeline::*; pub use pipeline_cache::*; pub use pipeline_specializer::*; pub use specializer::*; pub use storage_buffer::*; pub use texture::*; pub use uniform_buffer::*; // TODO: decide where re-exports should go pub use wgpu::{ util::{ BufferInitDescriptor, DispatchIndirectArgs, DrawIndexedIndirectArgs, DrawIndirectArgs, TextureDataOrder, }, AccelerationStructureFlags, AccelerationStructureGeometryFlags, AccelerationStructureUpdateMode, AdapterInfo as WgpuAdapterInfo, AddressMode, AstcBlock, AstcChannel, BindGroupDescriptor, BindGroupEntry, BindGroupLayoutEntry, BindingResource, BindingType, Blas, BlasBuildEntry, BlasGeometries, BlasGeometrySizeDescriptors, BlasTriangleGeometry, BlasTriangleGeometrySizeDescriptor, BlendComponent, BlendFactor, BlendOperation, BlendState, BufferAddress, BufferAsyncError, BufferBinding, BufferBindingType, BufferDescriptor, BufferSize, BufferUsages, ColorTargetState, ColorWrites, CommandEncoder, CommandEncoderDescriptor, CompareFunction, ComputePass, ComputePassDescriptor, ComputePipelineDescriptor as RawComputePipelineDescriptor, CreateBlasDescriptor, CreateTlasDescriptor, DepthBiasState, DepthStencilState, DownlevelFlags, Extent3d, Face, Features as WgpuFeatures, FilterMode, FragmentState as RawFragmentState, FrontFace, ImageSubresourceRange, IndexFormat, Limits as WgpuLimits, LoadOp, MapMode, MultisampleState, Operations, Origin3d, PipelineCompilationOptions, PipelineLayout, PipelineLayoutDescriptor, PollType, PolygonMode, PrimitiveState, PrimitiveTopology, PushConstantRange, RenderPassColorAttachment, RenderPassDepthStencilAttachment, RenderPassDescriptor, RenderPipelineDescriptor as RawRenderPipelineDescriptor, Sampler as WgpuSampler, SamplerBindingType, SamplerDescriptor, ShaderModule, ShaderModuleDescriptor, ShaderSource, ShaderStages, StencilFaceState, StencilOperation, StencilState, StorageTextureAccess, StoreOp, TexelCopyBufferInfo, TexelCopyBufferLayout, TexelCopyTextureInfo, TextureAspect, TextureDescriptor, TextureDimension, TextureFormat, TextureFormatFeatureFlags, TextureFormatFeatures, TextureSampleType, TextureUsages, TextureView as WgpuTextureView, TextureViewDescriptor, TextureViewDimension, Tlas, TlasInstance, VertexAttribute, VertexBufferLayout as RawVertexBufferLayout, VertexFormat, VertexState as RawVertexState, VertexStepMode, COPY_BUFFER_ALIGNMENT, }; pub mod encase { pub use bevy_encase_derive::ShaderType; pub use encase::*; } pub use self::encase::{ShaderSize, ShaderType}; pub use naga::ShaderStage;
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/pipeline.rs
crates/bevy_render/src/render_resource/pipeline.rs
use crate::define_atomic_id; use crate::renderer::WgpuWrapper; use alloc::borrow::Cow; use bevy_asset::Handle; use bevy_mesh::VertexBufferLayout; use bevy_shader::{Shader, ShaderDefVal}; use core::iter; use core::ops::Deref; use thiserror::Error; use wgpu::{ BindGroupLayoutEntry, ColorTargetState, DepthStencilState, MultisampleState, PrimitiveState, PushConstantRange, }; define_atomic_id!(RenderPipelineId); /// A [`RenderPipeline`] represents a graphics pipeline and its stages (shaders), bindings and vertex buffers. /// /// May be converted from and dereferences to a wgpu [`RenderPipeline`](wgpu::RenderPipeline). /// Can be created via [`RenderDevice::create_render_pipeline`](crate::renderer::RenderDevice::create_render_pipeline). #[derive(Clone, Debug)] pub struct RenderPipeline { id: RenderPipelineId, value: WgpuWrapper<wgpu::RenderPipeline>, } impl RenderPipeline { #[inline] pub fn id(&self) -> RenderPipelineId { self.id } } impl From<wgpu::RenderPipeline> for RenderPipeline { fn from(value: wgpu::RenderPipeline) -> Self { RenderPipeline { id: RenderPipelineId::new(), value: WgpuWrapper::new(value), } } } impl Deref for RenderPipeline { type Target = wgpu::RenderPipeline; #[inline] fn deref(&self) -> &Self::Target { &self.value } } define_atomic_id!(ComputePipelineId); /// A [`ComputePipeline`] represents a compute pipeline and its single shader stage. /// /// May be converted from and dereferences to a wgpu [`ComputePipeline`](wgpu::ComputePipeline). /// Can be created via [`RenderDevice::create_compute_pipeline`](crate::renderer::RenderDevice::create_compute_pipeline). #[derive(Clone, Debug)] pub struct ComputePipeline { id: ComputePipelineId, value: WgpuWrapper<wgpu::ComputePipeline>, } impl ComputePipeline { /// Returns the [`ComputePipelineId`]. #[inline] pub fn id(&self) -> ComputePipelineId { self.id } } impl From<wgpu::ComputePipeline> for ComputePipeline { fn from(value: wgpu::ComputePipeline) -> Self { ComputePipeline { id: ComputePipelineId::new(), value: WgpuWrapper::new(value), } } } impl Deref for ComputePipeline { type Target = wgpu::ComputePipeline; #[inline] fn deref(&self) -> &Self::Target { &self.value } } #[derive(Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct BindGroupLayoutDescriptor { /// Debug label of the bind group layout descriptor. This will show up in graphics debuggers for easy identification. pub label: Cow<'static, str>, pub entries: Vec<BindGroupLayoutEntry>, } impl BindGroupLayoutDescriptor { pub fn new(label: impl Into<Cow<'static, str>>, entries: &[BindGroupLayoutEntry]) -> Self { Self { label: label.into(), entries: entries.into(), } } } /// Describes a render (graphics) pipeline. #[derive(Clone, Debug, PartialEq, Default)] pub struct RenderPipelineDescriptor { /// Debug label of the pipeline. This will show up in graphics debuggers for easy identification. pub label: Option<Cow<'static, str>>, /// The layout of bind groups for this pipeline. pub layout: Vec<BindGroupLayoutDescriptor>, /// The push constant ranges for this pipeline. /// Supply an empty vector if the pipeline doesn't use push constants. pub push_constant_ranges: Vec<PushConstantRange>, /// The compiled vertex stage, its entry point, and the input buffers layout. pub vertex: VertexState, /// The properties of the pipeline at the primitive assembly and rasterization level. pub primitive: PrimitiveState, /// The effect of draw calls on the depth and stencil aspects of the output target, if any. pub depth_stencil: Option<DepthStencilState>, /// The multi-sampling properties of the pipeline. pub multisample: MultisampleState, /// The compiled fragment stage, its entry point, and the color targets. pub fragment: Option<FragmentState>, /// Whether to zero-initialize workgroup memory by default. If you're not sure, set this to true. /// If this is false, reading from workgroup variables before writing to them will result in garbage values. pub zero_initialize_workgroup_memory: bool, } #[derive(Copy, Clone, Debug, Error)] #[error("RenderPipelineDescriptor has no FragmentState configured")] pub struct NoFragmentStateError; impl RenderPipelineDescriptor { pub fn fragment_mut(&mut self) -> Result<&mut FragmentState, NoFragmentStateError> { self.fragment.as_mut().ok_or(NoFragmentStateError) } pub fn set_layout(&mut self, index: usize, layout: BindGroupLayoutDescriptor) { filling_set_at(&mut self.layout, index, bevy_utils::default(), layout); } } #[derive(Clone, Debug, Eq, PartialEq, Default)] pub struct VertexState { /// The compiled shader module for this stage. pub shader: Handle<Shader>, pub shader_defs: Vec<ShaderDefVal>, /// The name of the entry point in the compiled shader, or `None` if the default entry point /// is used. pub entry_point: Option<Cow<'static, str>>, /// The format of any vertex buffers used with this pipeline. pub buffers: Vec<VertexBufferLayout>, } /// Describes the fragment process in a render pipeline. #[derive(Clone, Debug, PartialEq, Eq, Default)] pub struct FragmentState { /// The compiled shader module for this stage. pub shader: Handle<Shader>, pub shader_defs: Vec<ShaderDefVal>, /// The name of the entry point in the compiled shader, or `None` if the default entry point /// is used. pub entry_point: Option<Cow<'static, str>>, /// The color state of the render targets. pub targets: Vec<Option<ColorTargetState>>, } impl FragmentState { pub fn set_target(&mut self, index: usize, target: ColorTargetState) { filling_set_at(&mut self.targets, index, None, Some(target)); } } /// Describes a compute pipeline. #[derive(Clone, Debug, PartialEq, Eq, Default)] pub struct ComputePipelineDescriptor { pub label: Option<Cow<'static, str>>, pub layout: Vec<BindGroupLayoutDescriptor>, pub push_constant_ranges: Vec<PushConstantRange>, /// The compiled shader module for this stage. pub shader: Handle<Shader>, pub shader_defs: Vec<ShaderDefVal>, /// The name of the entry point in the compiled shader, or `None` if the default entry point /// is used. pub entry_point: Option<Cow<'static, str>>, /// Whether to zero-initialize workgroup memory by default. If you're not sure, set this to true. /// If this is false, reading from workgroup variables before writing to them will result in garbage values. pub zero_initialize_workgroup_memory: bool, } // utility function to set a value at the specified index, extending with // a filler value if the index is out of bounds. fn filling_set_at<T: Clone>(vec: &mut Vec<T>, index: usize, filler: T, value: T) { let num_to_fill = (index + 1).saturating_sub(vec.len()); vec.extend(iter::repeat_n(filler, num_to_fill)); vec[index] = value; }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/buffer.rs
crates/bevy_render/src/render_resource/buffer.rs
use crate::define_atomic_id; use crate::renderer::WgpuWrapper; use core::ops::{Deref, RangeBounds}; define_atomic_id!(BufferId); #[derive(Clone, Debug)] pub struct Buffer { id: BufferId, value: WgpuWrapper<wgpu::Buffer>, } impl Buffer { #[inline] pub fn id(&self) -> BufferId { self.id } pub fn slice(&self, bounds: impl RangeBounds<wgpu::BufferAddress>) -> BufferSlice<'_> { BufferSlice { id: self.id, value: self.value.slice(bounds), } } #[inline] pub fn unmap(&self) { self.value.unmap(); } } impl From<wgpu::Buffer> for Buffer { fn from(value: wgpu::Buffer) -> Self { Buffer { id: BufferId::new(), value: WgpuWrapper::new(value), } } } impl Deref for Buffer { type Target = wgpu::Buffer; #[inline] fn deref(&self) -> &Self::Target { &self.value } } #[derive(Clone, Debug)] pub struct BufferSlice<'a> { id: BufferId, value: wgpu::BufferSlice<'a>, } impl<'a> BufferSlice<'a> { #[inline] pub fn id(&self) -> BufferId { self.id } } impl<'a> Deref for BufferSlice<'a> { type Target = wgpu::BufferSlice<'a>; #[inline] fn deref(&self) -> &Self::Target { &self.value } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/resource_macros.rs
crates/bevy_render/src/render_resource/resource_macros.rs
#[macro_export] macro_rules! define_atomic_id { ($atomic_id_type:ident) => { #[derive(Copy, Clone, Hash, Eq, PartialEq, PartialOrd, Ord, Debug)] pub struct $atomic_id_type(core::num::NonZero<u32>); impl $atomic_id_type { #[expect( clippy::new_without_default, reason = "Implementing the `Default` trait on atomic IDs would imply that two `<AtomicIdType>::default()` equal each other. By only implementing `new()`, we indicate that each atomic ID created will be unique." )] pub fn new() -> Self { use core::sync::atomic::{AtomicU32, Ordering}; static COUNTER: AtomicU32 = AtomicU32::new(1); let counter = COUNTER.fetch_add(1, Ordering::Relaxed); Self(core::num::NonZero::<u32>::new(counter).unwrap_or_else(|| { panic!( "The system ran out of unique `{}`s.", stringify!($atomic_id_type) ); })) } } impl From<$atomic_id_type> for core::num::NonZero<u32> { fn from(value: $atomic_id_type) -> Self { value.0 } } impl From<core::num::NonZero<u32>> for $atomic_id_type { fn from(value: core::num::NonZero<u32>) -> Self { Self(value) } } }; }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_resource/gpu_array_buffer.rs
crates/bevy_render/src/render_resource/gpu_array_buffer.rs
use super::{ binding_types::{storage_buffer_read_only, uniform_buffer_sized}, BindGroupLayoutEntryBuilder, BufferVec, }; use crate::{ render_resource::batched_uniform_buffer::BatchedUniformBuffer, renderer::{RenderDevice, RenderQueue}, }; use bevy_ecs::{prelude::Component, resource::Resource}; use core::marker::PhantomData; use encase::{private::WriteInto, ShaderSize, ShaderType}; use nonmax::NonMaxU32; use wgpu::{BindingResource, BufferUsages, Limits}; /// Trait for types able to go in a [`GpuArrayBuffer`]. pub trait GpuArrayBufferable: ShaderType + ShaderSize + WriteInto + Clone {} impl<T: ShaderType + ShaderSize + WriteInto + Clone> GpuArrayBufferable for T {} /// Stores an array of elements to be transferred to the GPU and made accessible to shaders as a read-only array. /// /// On platforms that support storage buffers, this is equivalent to /// [`BufferVec<T>`]. Otherwise, this falls back to a dynamic offset /// uniform buffer with the largest array of T that fits within a uniform buffer /// binding (within reasonable limits). /// /// Other options for storing GPU-accessible data are: /// * [`BufferVec`] /// * [`DynamicStorageBuffer`](crate::render_resource::DynamicStorageBuffer) /// * [`DynamicUniformBuffer`](crate::render_resource::DynamicUniformBuffer) /// * [`RawBufferVec`](crate::render_resource::RawBufferVec) /// * [`StorageBuffer`](crate::render_resource::StorageBuffer) /// * [`Texture`](crate::render_resource::Texture) /// * [`UniformBuffer`](crate::render_resource::UniformBuffer) #[derive(Resource)] pub enum GpuArrayBuffer<T: GpuArrayBufferable> { Uniform(BatchedUniformBuffer<T>), Storage(BufferVec<T>), } impl<T: GpuArrayBufferable> GpuArrayBuffer<T> { pub fn new(limits: &Limits) -> Self { if limits.max_storage_buffers_per_shader_stage == 0 { GpuArrayBuffer::Uniform(BatchedUniformBuffer::new(limits)) } else { GpuArrayBuffer::Storage(BufferVec::new(BufferUsages::STORAGE)) } } pub fn clear(&mut self) { match self { GpuArrayBuffer::Uniform(buffer) => buffer.clear(), GpuArrayBuffer::Storage(buffer) => buffer.clear(), } } pub fn push(&mut self, value: T) -> GpuArrayBufferIndex<T> { match self { GpuArrayBuffer::Uniform(buffer) => buffer.push(value), GpuArrayBuffer::Storage(buffer) => { let index = buffer.push(value) as u32; GpuArrayBufferIndex { index, dynamic_offset: None, element_type: PhantomData, } } } } pub fn write_buffer(&mut self, device: &RenderDevice, queue: &RenderQueue) { match self { GpuArrayBuffer::Uniform(buffer) => buffer.write_buffer(device, queue), GpuArrayBuffer::Storage(buffer) => buffer.write_buffer(device, queue), } } pub fn binding_layout(limits: &Limits) -> BindGroupLayoutEntryBuilder { if limits.max_storage_buffers_per_shader_stage == 0 { uniform_buffer_sized( true, // BatchedUniformBuffer uses a MaxCapacityArray that is runtime-sized, so we use // None here and let wgpu figure out the size. None, ) } else { storage_buffer_read_only::<T>(false) } } pub fn binding(&self) -> Option<BindingResource<'_>> { match self { GpuArrayBuffer::Uniform(buffer) => buffer.binding(), GpuArrayBuffer::Storage(buffer) => buffer.binding(), } } pub fn batch_size(limits: &Limits) -> Option<u32> { if limits.max_storage_buffers_per_shader_stage == 0 { Some(BatchedUniformBuffer::<T>::batch_size(limits) as u32) } else { None } } } /// An index into a [`GpuArrayBuffer`] for a given element. #[derive(Component, Clone)] pub struct GpuArrayBufferIndex<T: GpuArrayBufferable> { /// The index to use in a shader into the array. pub index: u32, /// The dynamic offset to use when setting the bind group in a pass. /// Only used on platforms that don't support storage buffers. pub dynamic_offset: Option<NonMaxU32>, pub element_type: PhantomData<T>, }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/renderer/wgpu_wrapper.rs
crates/bevy_render/src/renderer/wgpu_wrapper.rs
/// A wrapper to safely make `wgpu` types Send / Sync on web with atomics enabled. /// /// On web with `atomics` enabled the inner value can only be accessed /// or dropped on the `wgpu` thread or else a panic will occur. /// On other platforms the wrapper simply contains the wrapped value. #[derive(Debug, Clone)] pub struct WgpuWrapper<T>( #[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))] T, #[cfg(all(target_arch = "wasm32", target_feature = "atomics"))] send_wrapper::SendWrapper<T>, ); // SAFETY: SendWrapper is always Send + Sync. #[cfg(all(target_arch = "wasm32", target_feature = "atomics"))] #[expect(unsafe_code, reason = "Blanket-impl Send requires unsafe.")] unsafe impl<T> Send for WgpuWrapper<T> {} #[cfg(all(target_arch = "wasm32", target_feature = "atomics"))] #[expect(unsafe_code, reason = "Blanket-impl Sync requires unsafe.")] unsafe impl<T> Sync for WgpuWrapper<T> {} impl<T> WgpuWrapper<T> { /// Constructs a new instance of `WgpuWrapper` which will wrap the specified value. pub fn new(t: T) -> Self { #[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))] return Self(t); #[cfg(all(target_arch = "wasm32", target_feature = "atomics"))] return Self(send_wrapper::SendWrapper::new(t)); } /// Unwraps the value. pub fn into_inner(self) -> T { #[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))] return self.0; #[cfg(all(target_arch = "wasm32", target_feature = "atomics"))] return self.0.take(); } } impl<T> core::ops::Deref for WgpuWrapper<T> { type Target = T; fn deref(&self) -> &Self::Target { &self.0 } } impl<T> core::ops::DerefMut for WgpuWrapper<T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/renderer/raw_vulkan_init.rs
crates/bevy_render/src/renderer/raw_vulkan_init.rs
use alloc::sync::Arc; use bevy_ecs::resource::Resource; use bevy_platform::collections::HashSet; use core::any::{Any, TypeId}; use thiserror::Error; use wgpu::{ hal::api::Vulkan, Adapter, Device, DeviceDescriptor, Instance, InstanceDescriptor, Queue, }; /// When the `raw_vulkan_init` feature is enabled, these settings will be used to configure the raw vulkan instance. #[derive(Resource, Default, Clone)] pub struct RawVulkanInitSettings { // SAFETY: this must remain private to ensure that registering callbacks is unsafe create_instance_callbacks: Vec< Arc< dyn Fn( &mut wgpu::hal::vulkan::CreateInstanceCallbackArgs, &mut AdditionalVulkanFeatures, ) + Send + Sync, >, >, // SAFETY: this must remain private to ensure that registering callbacks is unsafe create_device_callbacks: Vec< Arc< dyn Fn( &mut wgpu::hal::vulkan::CreateDeviceCallbackArgs, &wgpu::hal::vulkan::Adapter, &mut AdditionalVulkanFeatures, ) + Send + Sync, >, >, } impl RawVulkanInitSettings { /// Adds a new Vulkan create instance callback. See [`wgpu::hal::vulkan::Instance::init_with_callback`] for details. /// /// # Safety /// - Callback must not remove features. /// - Callback must not change anything to what the instance does not support. pub unsafe fn add_create_instance_callback( &mut self, callback: impl Fn(&mut wgpu::hal::vulkan::CreateInstanceCallbackArgs, &mut AdditionalVulkanFeatures) + Send + Sync + 'static, ) { self.create_instance_callbacks.push(Arc::new(callback)); } /// Adds a new Vulkan create device callback. See [`wgpu::hal::vulkan::Adapter::open_with_callback`] for details. /// /// # Safety /// - Callback must not remove features. /// - Callback must not change anything to what the device does not support. pub unsafe fn add_create_device_callback( &mut self, callback: impl Fn( &mut wgpu::hal::vulkan::CreateDeviceCallbackArgs, &wgpu::hal::vulkan::Adapter, &mut AdditionalVulkanFeatures, ) + Send + Sync + 'static, ) { self.create_device_callbacks.push(Arc::new(callback)); } } pub(crate) fn create_raw_vulkan_instance( instance_descriptor: &InstanceDescriptor, settings: &RawVulkanInitSettings, additional_features: &mut AdditionalVulkanFeatures, ) -> Instance { // SAFETY: Registering callbacks is unsafe. Callback authors promise not to remove features // or change the instance to something it does not support unsafe { wgpu::hal::vulkan::Instance::init_with_callback( &wgpu::hal::InstanceDescriptor { name: "wgpu", flags: instance_descriptor.flags, memory_budget_thresholds: instance_descriptor.memory_budget_thresholds, backend_options: instance_descriptor.backend_options.clone(), }, Some(Box::new(|mut args| { for callback in &settings.create_instance_callbacks { (callback)(&mut args, additional_features); } })), ) .map(|raw_instance| Instance::from_hal::<Vulkan>(raw_instance)) .unwrap_or_else(|_| Instance::new(instance_descriptor)) } } pub(crate) async fn create_raw_device( adapter: &Adapter, device_descriptor: &DeviceDescriptor<'_>, settings: &RawVulkanInitSettings, additional_features: &mut AdditionalVulkanFeatures, ) -> Result<(Device, Queue), CreateRawVulkanDeviceError> { // SAFETY: Registering callbacks is unsafe. Callback authors promise not to remove features // or change the adapter to something it does not support unsafe { let Some(raw_adapter) = adapter.as_hal::<Vulkan>() else { return Ok(adapter.request_device(device_descriptor).await?); }; let open_device = raw_adapter.open_with_callback( device_descriptor.required_features, &device_descriptor.memory_hints, Some(Box::new(|mut args| { for callback in &settings.create_device_callbacks { (callback)(&mut args, &raw_adapter, additional_features); } })), )?; Ok(adapter.create_device_from_hal::<Vulkan>(open_device, device_descriptor)?) } } #[derive(Error, Debug)] pub(crate) enum CreateRawVulkanDeviceError { #[error(transparent)] RequestDeviceError(#[from] wgpu::RequestDeviceError), #[error(transparent)] DeviceError(#[from] wgpu::hal::DeviceError), } /// A list of additional Vulkan features that are supported by the current wgpu instance / adapter. This is populated /// by callbacks defined in [`RawVulkanInitSettings`] #[derive(Resource, Default, Clone)] pub struct AdditionalVulkanFeatures(HashSet<TypeId>); impl AdditionalVulkanFeatures { pub fn insert<T: Any>(&mut self) { self.0.insert(TypeId::of::<T>()); } pub fn has<T: Any>(&self) -> bool { self.0.contains(&TypeId::of::<T>()) } pub fn remove<T: Any>(&mut self) { self.0.remove(&TypeId::of::<T>()); } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/renderer/render_device.rs
crates/bevy_render/src/renderer/render_device.rs
use super::RenderQueue; use crate::render_resource::{ BindGroup, BindGroupLayout, Buffer, ComputePipeline, RawRenderPipelineDescriptor, RenderPipeline, Sampler, Texture, }; use crate::renderer::WgpuWrapper; use bevy_ecs::resource::Resource; use wgpu::{ util::DeviceExt, BindGroupDescriptor, BindGroupEntry, BindGroupLayoutDescriptor, BindGroupLayoutEntry, BufferAsyncError, BufferBindingType, PollError, PollStatus, }; /// This GPU device is responsible for the creation of most rendering and compute resources. #[derive(Resource, Clone)] pub struct RenderDevice { device: WgpuWrapper<wgpu::Device>, } impl From<wgpu::Device> for RenderDevice { fn from(device: wgpu::Device) -> Self { Self::new(WgpuWrapper::new(device)) } } impl RenderDevice { pub fn new(device: WgpuWrapper<wgpu::Device>) -> Self { Self { device } } /// List all [`Features`](wgpu::Features) that may be used with this device. /// /// Functions may panic if you use unsupported features. #[inline] pub fn features(&self) -> wgpu::Features { self.device.features() } /// List all [`Limits`](wgpu::Limits) that were requested of this device. /// /// If any of these limits are exceeded, functions may panic. #[inline] pub fn limits(&self) -> wgpu::Limits { self.device.limits() } /// Creates a [`ShaderModule`](wgpu::ShaderModule) from either SPIR-V or WGSL source code. /// /// # Safety /// /// Creates a shader module with user-customizable runtime checks which allows shaders to /// perform operations which can lead to undefined behavior like indexing out of bounds, /// To avoid UB, ensure any unchecked shaders are sound! /// This method should never be called for user-supplied shaders. #[inline] pub unsafe fn create_shader_module( &self, desc: wgpu::ShaderModuleDescriptor, ) -> wgpu::ShaderModule { #[cfg(feature = "spirv_shader_passthrough")] match &desc.source { wgpu::ShaderSource::SpirV(source) if self .features() .contains(wgpu::Features::EXPERIMENTAL_PASSTHROUGH_SHADERS) => { // SAFETY: // This call passes binary data to the backend as-is and can potentially result in a driver crash or bogus behavior. // No attempt is made to ensure that data is valid SPIR-V. unsafe { self.device.create_shader_module_passthrough( wgpu::ShaderModuleDescriptorPassthrough { label: desc.label, spirv: Some(source.clone()), ..Default::default() }, ) } } // SAFETY: // // This call passes binary data to the backend as-is and can potentially result in a driver crash or bogus behavior. // No attempt is made to ensure that data is valid SPIR-V. _ => unsafe { self.device .create_shader_module_trusted(desc, wgpu::ShaderRuntimeChecks::unchecked()) }, } #[cfg(not(feature = "spirv_shader_passthrough"))] // SAFETY: the caller is responsible for upholding the safety requirements unsafe { self.device .create_shader_module_trusted(desc, wgpu::ShaderRuntimeChecks::unchecked()) } } /// Creates and validates a [`ShaderModule`](wgpu::ShaderModule) from either SPIR-V or WGSL source code. /// /// See [`ValidateShader`](bevy_shader::ValidateShader) for more information on the tradeoffs involved with shader validation. #[inline] pub fn create_and_validate_shader_module( &self, desc: wgpu::ShaderModuleDescriptor, ) -> wgpu::ShaderModule { #[cfg(feature = "spirv_shader_passthrough")] match &desc.source { wgpu::ShaderSource::SpirV(_source) => panic!("no safety checks are performed for spirv shaders. use `create_shader_module` instead"), _ => self.device.create_shader_module(desc), } #[cfg(not(feature = "spirv_shader_passthrough"))] self.device.create_shader_module(desc) } /// Check for resource cleanups and mapping callbacks. /// /// Return `true` if the queue is empty, or `false` if there are more queue /// submissions still in flight. (Note that, unless access to the [`wgpu::Queue`] is /// coordinated somehow, this information could be out of date by the time /// the caller receives it. `Queue`s can be shared between threads, so /// other threads could submit new work at any time.) /// /// no-op on the web, device is automatically polled. #[inline] pub fn poll(&self, maintain: wgpu::PollType) -> Result<PollStatus, PollError> { self.device.poll(maintain) } /// Creates an empty [`CommandEncoder`](wgpu::CommandEncoder). #[inline] pub fn create_command_encoder( &self, desc: &wgpu::CommandEncoderDescriptor, ) -> wgpu::CommandEncoder { self.device.create_command_encoder(desc) } /// Creates an empty [`RenderBundleEncoder`](wgpu::RenderBundleEncoder). #[inline] pub fn create_render_bundle_encoder( &self, desc: &wgpu::RenderBundleEncoderDescriptor, ) -> wgpu::RenderBundleEncoder<'_> { self.device.create_render_bundle_encoder(desc) } /// Creates a new [`BindGroup`](wgpu::BindGroup). #[inline] pub fn create_bind_group<'a>( &self, label: impl Into<wgpu::Label<'a>>, layout: &'a BindGroupLayout, entries: &'a [BindGroupEntry<'a>], ) -> BindGroup { let wgpu_bind_group = self.device.create_bind_group(&BindGroupDescriptor { label: label.into(), layout, entries, }); BindGroup::from(wgpu_bind_group) } /// Creates a [`BindGroupLayout`](wgpu::BindGroupLayout). #[inline] pub fn create_bind_group_layout<'a>( &self, label: impl Into<wgpu::Label<'a>>, entries: &'a [BindGroupLayoutEntry], ) -> BindGroupLayout { BindGroupLayout::from( self.device .create_bind_group_layout(&BindGroupLayoutDescriptor { label: label.into(), entries, }), ) } /// Creates a [`PipelineLayout`](wgpu::PipelineLayout). #[inline] pub fn create_pipeline_layout( &self, desc: &wgpu::PipelineLayoutDescriptor, ) -> wgpu::PipelineLayout { self.device.create_pipeline_layout(desc) } /// Creates a [`RenderPipeline`]. #[inline] pub fn create_render_pipeline(&self, desc: &RawRenderPipelineDescriptor) -> RenderPipeline { let wgpu_render_pipeline = self.device.create_render_pipeline(desc); RenderPipeline::from(wgpu_render_pipeline) } /// Creates a [`ComputePipeline`]. #[inline] pub fn create_compute_pipeline( &self, desc: &wgpu::ComputePipelineDescriptor, ) -> ComputePipeline { let wgpu_compute_pipeline = self.device.create_compute_pipeline(desc); ComputePipeline::from(wgpu_compute_pipeline) } /// Creates a [`Buffer`]. pub fn create_buffer(&self, desc: &wgpu::BufferDescriptor) -> Buffer { let wgpu_buffer = self.device.create_buffer(desc); Buffer::from(wgpu_buffer) } /// Creates a [`Buffer`] and initializes it with the specified data. pub fn create_buffer_with_data(&self, desc: &wgpu::util::BufferInitDescriptor) -> Buffer { let wgpu_buffer = self.device.create_buffer_init(desc); Buffer::from(wgpu_buffer) } /// Creates a new [`Texture`] and initializes it with the specified data. /// /// `desc` specifies the general format of the texture. /// `data` is the raw data. pub fn create_texture_with_data( &self, render_queue: &RenderQueue, desc: &wgpu::TextureDescriptor, order: wgpu::util::TextureDataOrder, data: &[u8], ) -> Texture { let wgpu_texture = self.device .create_texture_with_data(render_queue.as_ref(), desc, order, data); Texture::from(wgpu_texture) } /// Creates a new [`Texture`]. /// /// `desc` specifies the general format of the texture. pub fn create_texture(&self, desc: &wgpu::TextureDescriptor) -> Texture { let wgpu_texture = self.device.create_texture(desc); Texture::from(wgpu_texture) } /// Creates a new [`Sampler`]. /// /// `desc` specifies the behavior of the sampler. pub fn create_sampler(&self, desc: &wgpu::SamplerDescriptor) -> Sampler { let wgpu_sampler = self.device.create_sampler(desc); Sampler::from(wgpu_sampler) } /// Initializes [`Surface`](wgpu::Surface) for presentation. /// /// # Panics /// /// - A old [`SurfaceTexture`](wgpu::SurfaceTexture) is still alive referencing an old surface. /// - Texture format requested is unsupported on the surface. pub fn configure_surface(&self, surface: &wgpu::Surface, config: &wgpu::SurfaceConfiguration) { surface.configure(&self.device, config); } /// Returns the wgpu [`Device`](wgpu::Device). pub fn wgpu_device(&self) -> &wgpu::Device { &self.device } pub fn map_buffer( &self, buffer: &wgpu::BufferSlice, map_mode: wgpu::MapMode, callback: impl FnOnce(Result<(), BufferAsyncError>) + Send + 'static, ) { buffer.map_async(map_mode, callback); } // Rounds up `row_bytes` to be a multiple of [`wgpu::COPY_BYTES_PER_ROW_ALIGNMENT`]. pub const fn align_copy_bytes_per_row(row_bytes: usize) -> usize { let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as usize; // If row_bytes is aligned calculate a value just under the next aligned value. // Otherwise calculate a value greater than the next aligned value. let over_aligned = row_bytes + align - 1; // Round the number *down* to the nearest aligned value. (over_aligned / align) * align } pub fn get_supported_read_only_binding_type( &self, buffers_per_shader_stage: u32, ) -> BufferBindingType { if self.limits().max_storage_buffers_per_shader_stage >= buffers_per_shader_stage { BufferBindingType::Storage { read_only: true } } else { BufferBindingType::Uniform } } } #[cfg(test)] mod tests { use super::*; #[test] fn align_copy_bytes_per_row() { // Test for https://github.com/bevyengine/bevy/issues/16992 let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as usize; assert_eq!(RenderDevice::align_copy_bytes_per_row(0), 0); assert_eq!(RenderDevice::align_copy_bytes_per_row(1), align); assert_eq!(RenderDevice::align_copy_bytes_per_row(align + 1), align * 2); assert_eq!(RenderDevice::align_copy_bytes_per_row(align), align); } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/renderer/graph_runner.rs
crates/bevy_render/src/renderer/graph_runner.rs
use bevy_ecs::{prelude::Entity, world::World}; use bevy_platform::collections::HashMap; #[cfg(feature = "trace")] use tracing::info_span; use alloc::{borrow::Cow, collections::VecDeque}; use smallvec::{smallvec, SmallVec}; use thiserror::Error; use crate::{ diagnostic::internal::{DiagnosticsRecorder, RenderDiagnosticsMutex}, render_graph::{ Edge, InternedRenderLabel, InternedRenderSubGraph, NodeRunError, NodeState, RenderGraph, RenderGraphContext, SlotLabel, SlotType, SlotValue, }, renderer::{RenderContext, RenderDevice}, }; /// The [`RenderGraphRunner`] is responsible for executing a [`RenderGraph`]. /// /// It will run all nodes in the graph sequentially in the correct order (defined by the edges). /// Each [`Node`](crate::render_graph::Node) can run any arbitrary code, but will generally /// either send directly a [`CommandBuffer`] or a task that will asynchronously generate a [`CommandBuffer`] /// /// After running the graph, the [`RenderGraphRunner`] will execute in parallel all the tasks to get /// an ordered list of [`CommandBuffer`]s to execute. These [`CommandBuffer`] will be submitted to the GPU /// sequentially in the order that the tasks were submitted. (which is the order of the [`RenderGraph`]) /// /// [`CommandBuffer`]: wgpu::CommandBuffer pub(crate) struct RenderGraphRunner; #[derive(Error, Debug)] pub enum RenderGraphRunnerError { #[error(transparent)] NodeRunError(#[from] NodeRunError), #[error("node output slot not set (index {slot_index}, name {slot_name})")] EmptyNodeOutputSlot { type_name: &'static str, slot_index: usize, slot_name: Cow<'static, str>, }, #[error("graph '{sub_graph:?}' could not be run because slot '{slot_name}' at index {slot_index} has no value")] MissingInput { slot_index: usize, slot_name: Cow<'static, str>, sub_graph: Option<InternedRenderSubGraph>, }, #[error("attempted to use the wrong type for input slot")] MismatchedInputSlotType { slot_index: usize, label: SlotLabel, expected: SlotType, actual: SlotType, }, #[error( "node (name: '{node_name:?}') has {slot_count} input slots, but was provided {value_count} values" )] MismatchedInputCount { node_name: InternedRenderLabel, slot_count: usize, value_count: usize, }, } impl RenderGraphRunner { pub fn run( graph: &RenderGraph, render_device: RenderDevice, mut diagnostics_recorder: Option<DiagnosticsRecorder>, queue: &wgpu::Queue, world: &World, finalizer: impl FnOnce(&mut wgpu::CommandEncoder), ) -> Result<Option<DiagnosticsRecorder>, RenderGraphRunnerError> { if let Some(recorder) = &mut diagnostics_recorder { recorder.begin_frame(); } let mut render_context = RenderContext::new(render_device, diagnostics_recorder); Self::run_graph(graph, None, &mut render_context, world, &[], None, None)?; finalizer(render_context.command_encoder()); let (render_device, mut diagnostics_recorder) = { let (commands, render_device, diagnostics_recorder) = render_context.finish(); #[cfg(feature = "trace")] let _span = info_span!("submit_graph_commands").entered(); queue.submit(commands); (render_device, diagnostics_recorder) }; if let Some(recorder) = &mut diagnostics_recorder { let render_diagnostics_mutex = world.resource::<RenderDiagnosticsMutex>().0.clone(); recorder.finish_frame(&render_device, move |diagnostics| { *render_diagnostics_mutex.lock().expect("lock poisoned") = Some(diagnostics); }); } Ok(diagnostics_recorder) } /// Runs the [`RenderGraph`] and all its sub-graphs sequentially, making sure that all nodes are /// run in the correct order. (a node only runs when all its dependencies have finished running) fn run_graph<'w>( graph: &RenderGraph, sub_graph: Option<InternedRenderSubGraph>, render_context: &mut RenderContext<'w>, world: &'w World, inputs: &[SlotValue], view_entity: Option<Entity>, debug_group: Option<String>, ) -> Result<(), RenderGraphRunnerError> { let mut node_outputs: HashMap<InternedRenderLabel, SmallVec<[SlotValue; 4]>> = HashMap::default(); #[cfg(feature = "trace")] let span = if let Some(render_label) = &sub_graph { let name = format!("{render_label:?}"); if let Some(debug_group) = debug_group.as_ref() { info_span!("run_graph", name = name, debug_group = debug_group) } else { info_span!("run_graph", name = name) } } else { info_span!("run_graph", name = "main_graph") }; #[cfg(feature = "trace")] let _guard = span.enter(); if let Some(debug_group) = debug_group.as_ref() { // wgpu 27 changed the debug_group validation which makes it impossible to have // a debug_group that spans multiple command encoders. // // <https://github.com/gfx-rs/wgpu/pull/8048> // // For now, we use a debug_marker as a workaround render_context .command_encoder() .insert_debug_marker(&format!("Start {debug_group}")); } // Queue up nodes without inputs, which can be run immediately let mut node_queue: VecDeque<&NodeState> = graph .iter_nodes() .filter(|node| node.input_slots.is_empty()) .collect(); // pass inputs into the graph if let Some(input_node) = graph.get_input_node() { let mut input_values: SmallVec<[SlotValue; 4]> = SmallVec::new(); for (i, input_slot) in input_node.input_slots.iter().enumerate() { if let Some(input_value) = inputs.get(i) { if input_slot.slot_type != input_value.slot_type() { return Err(RenderGraphRunnerError::MismatchedInputSlotType { slot_index: i, actual: input_value.slot_type(), expected: input_slot.slot_type, label: input_slot.name.clone().into(), }); } input_values.push(input_value.clone()); } else { return Err(RenderGraphRunnerError::MissingInput { slot_index: i, slot_name: input_slot.name.clone(), sub_graph, }); } } node_outputs.insert(input_node.label, input_values); for (_, node_state) in graph .iter_node_outputs(input_node.label) .expect("node exists") { node_queue.push_front(node_state); } } 'handle_node: while let Some(node_state) = node_queue.pop_back() { // skip nodes that are already processed if node_outputs.contains_key(&node_state.label) { continue; } let mut slot_indices_and_inputs: SmallVec<[(usize, SlotValue); 4]> = SmallVec::new(); // check if all dependencies have finished running for (edge, input_node) in graph .iter_node_inputs(node_state.label) .expect("node is in graph") { match edge { Edge::SlotEdge { output_index, input_index, .. } => { if let Some(outputs) = node_outputs.get(&input_node.label) { slot_indices_and_inputs .push((*input_index, outputs[*output_index].clone())); } else { node_queue.push_front(node_state); continue 'handle_node; } } Edge::NodeEdge { .. } => { if !node_outputs.contains_key(&input_node.label) { node_queue.push_front(node_state); continue 'handle_node; } } } } // construct final sorted input list slot_indices_and_inputs.sort_by_key(|(index, _)| *index); let inputs: SmallVec<[SlotValue; 4]> = slot_indices_and_inputs .into_iter() .map(|(_, value)| value) .collect(); if inputs.len() != node_state.input_slots.len() { return Err(RenderGraphRunnerError::MismatchedInputCount { node_name: node_state.label, slot_count: node_state.input_slots.len(), value_count: inputs.len(), }); } let mut outputs: SmallVec<[Option<SlotValue>; 4]> = smallvec![None; node_state.output_slots.len()]; { let mut context = RenderGraphContext::new(graph, node_state, &inputs, &mut outputs); if let Some(view_entity) = view_entity { context.set_view_entity(view_entity); } { #[cfg(feature = "trace")] let _span = info_span!("node", name = node_state.type_name).entered(); node_state.node.run(&mut context, render_context, world)?; } for run_sub_graph in context.finish() { let sub_graph = graph .get_sub_graph(run_sub_graph.sub_graph) .expect("sub graph exists because it was validated when queued."); Self::run_graph( sub_graph, Some(run_sub_graph.sub_graph), render_context, world, &run_sub_graph.inputs, run_sub_graph.view_entity, run_sub_graph.debug_group, )?; } } let mut values: SmallVec<[SlotValue; 4]> = SmallVec::new(); for (i, output) in outputs.into_iter().enumerate() { if let Some(value) = output { values.push(value); } else { let empty_slot = node_state.output_slots.get_slot(i).unwrap(); return Err(RenderGraphRunnerError::EmptyNodeOutputSlot { type_name: node_state.type_name, slot_index: i, slot_name: empty_slot.name.clone(), }); } } node_outputs.insert(node_state.label, values); for (_, node_state) in graph .iter_node_outputs(node_state.label) .expect("node exists") { node_queue.push_front(node_state); } } if let Some(debug_group) = debug_group { render_context .command_encoder() .insert_debug_marker(&format!("End {debug_group}")); } Ok(()) } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/renderer/mod.rs
crates/bevy_render/src/renderer/mod.rs
mod graph_runner; #[cfg(feature = "raw_vulkan_init")] pub mod raw_vulkan_init; mod render_device; mod wgpu_wrapper; pub use graph_runner::*; pub use render_device::*; pub use wgpu_wrapper::WgpuWrapper; use crate::{ diagnostic::{internal::DiagnosticsRecorder, RecordDiagnostics}, render_graph::RenderGraph, render_phase::TrackedRenderPass, render_resource::RenderPassDescriptor, settings::{RenderResources, WgpuSettings, WgpuSettingsPriority}, view::{ExtractedWindows, ViewTarget}, }; use alloc::sync::Arc; use bevy_camera::NormalizedRenderTarget; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{prelude::*, system::SystemState}; use bevy_platform::time::Instant; use bevy_render::camera::ExtractedCamera; use bevy_time::TimeSender; use bevy_window::RawHandleWrapperHolder; use tracing::{debug, error, info, info_span, warn}; use wgpu::{ Adapter, AdapterInfo, Backends, CommandBuffer, CommandEncoder, DeviceType, Instance, Queue, RequestAdapterOptions, Trace, }; /// Updates the [`RenderGraph`] with all of its nodes and then runs it to render the entire frame. pub fn render_system( world: &mut World, state: &mut SystemState<Query<(&ViewTarget, &ExtractedCamera)>>, ) { world.resource_scope(|world, mut graph: Mut<RenderGraph>| { graph.update(world); }); let diagnostics_recorder = world.remove_resource::<DiagnosticsRecorder>(); let graph = world.resource::<RenderGraph>(); let render_device = world.resource::<RenderDevice>(); let render_queue = world.resource::<RenderQueue>(); let res = RenderGraphRunner::run( graph, render_device.clone(), // TODO: is this clone really necessary? diagnostics_recorder, &render_queue.0, world, |encoder| { crate::view::screenshot::submit_screenshot_commands(world, encoder); crate::gpu_readback::submit_readback_commands(world, encoder); }, ); match res { Ok(Some(diagnostics_recorder)) => { world.insert_resource(diagnostics_recorder); } Ok(None) => {} Err(e) => { error!("Error running render graph:"); { let mut src: &dyn core::error::Error = &e; loop { error!("> {}", src); match src.source() { Some(s) => src = s, None => break, } } } panic!("Error running render graph: {e}"); } } { let _span = info_span!("present_frames").entered(); world.resource_scope(|world, mut windows: Mut<ExtractedWindows>| { let views = state.get(world); for window in windows.values_mut() { let view_needs_present = views.iter().any(|(view_target, camera)| { matches!( camera.target, Some(NormalizedRenderTarget::Window(w)) if w.entity() == window.entity ) && view_target.needs_present() }); if view_needs_present || window.needs_initial_present { window.present(); window.needs_initial_present = false; } } }); #[cfg(feature = "tracing-tracy")] tracing::event!( tracing::Level::INFO, message = "finished frame", tracy.frame_mark = true ); } crate::view::screenshot::collect_screenshots(world); // update the time and send it to the app world let time_sender = world.resource::<TimeSender>(); if let Err(error) = time_sender.0.try_send(Instant::now()) { match error { bevy_time::TrySendError::Full(_) => { panic!("The TimeSender channel should always be empty during render. You might need to add the bevy::core::time_system to your app."); } bevy_time::TrySendError::Disconnected(_) => { // ignore disconnected errors, the main world probably just got dropped during shutdown } } } } /// This queue is used to enqueue tasks for the GPU to execute asynchronously. #[derive(Resource, Clone, Deref, DerefMut)] pub struct RenderQueue(pub Arc<WgpuWrapper<Queue>>); /// The handle to the physical device being used for rendering. /// See [`Adapter`] for more info. #[derive(Resource, Clone, Debug, Deref, DerefMut)] pub struct RenderAdapter(pub Arc<WgpuWrapper<Adapter>>); /// The GPU instance is used to initialize the [`RenderQueue`] and [`RenderDevice`], /// as well as to create [`WindowSurfaces`](crate::view::window::WindowSurfaces). #[derive(Resource, Clone, Deref, DerefMut)] pub struct RenderInstance(pub Arc<WgpuWrapper<Instance>>); /// The [`AdapterInfo`] of the adapter in use by the renderer. #[derive(Resource, Clone, Deref, DerefMut)] pub struct RenderAdapterInfo(pub WgpuWrapper<AdapterInfo>); const GPU_NOT_FOUND_ERROR_MESSAGE: &str = if cfg!(target_os = "linux") { "Unable to find a GPU! Make sure you have installed required drivers! For extra information, see: https://github.com/bevyengine/bevy/blob/latest/docs/linux_dependencies.md" } else { "Unable to find a GPU! Make sure you have installed required drivers!" }; #[cfg(not(target_family = "wasm"))] fn find_adapter_by_name( instance: &Instance, options: &WgpuSettings, compatible_surface: Option<&wgpu::Surface<'_>>, adapter_name: &str, ) -> Option<Adapter> { for adapter in instance.enumerate_adapters(options.backends.expect( "The `backends` field of `WgpuSettings` must be set to use a specific adapter.", )) { tracing::trace!("Checking adapter: {:?}", adapter.get_info()); let info = adapter.get_info(); if let Some(surface) = compatible_surface && !adapter.is_surface_supported(surface) { continue; } if info .name .to_lowercase() .contains(&adapter_name.to_lowercase()) { return Some(adapter); } } None } /// Initializes the renderer by retrieving and preparing the GPU instance, device and queue /// for the specified backend. pub async fn initialize_renderer( backends: Backends, primary_window: Option<RawHandleWrapperHolder>, options: &WgpuSettings, #[cfg(feature = "raw_vulkan_init")] raw_vulkan_init_settings: raw_vulkan_init::RawVulkanInitSettings, ) -> RenderResources { let instance_descriptor = wgpu::InstanceDescriptor { backends, flags: options.instance_flags, memory_budget_thresholds: options.instance_memory_budget_thresholds, backend_options: wgpu::BackendOptions { gl: wgpu::GlBackendOptions { gles_minor_version: options.gles3_minor_version, fence_behavior: wgpu::GlFenceBehavior::Normal, }, dx12: wgpu::Dx12BackendOptions { shader_compiler: options.dx12_shader_compiler.clone(), presentation_system: wgpu::wgt::Dx12SwapchainKind::from_env().unwrap_or_default(), latency_waitable_object: wgpu::wgt::Dx12UseFrameLatencyWaitableObject::from_env() .unwrap_or_default(), }, noop: wgpu::NoopBackendOptions { enable: false }, }, }; #[cfg(not(feature = "raw_vulkan_init"))] let instance = Instance::new(&instance_descriptor); #[cfg(feature = "raw_vulkan_init")] let mut additional_vulkan_features = raw_vulkan_init::AdditionalVulkanFeatures::default(); #[cfg(feature = "raw_vulkan_init")] let instance = raw_vulkan_init::create_raw_vulkan_instance( &instance_descriptor, &raw_vulkan_init_settings, &mut additional_vulkan_features, ); let surface = primary_window.and_then(|wrapper| { let maybe_handle = wrapper .0 .lock() .expect("Couldn't get the window handle in time for renderer initialization"); if let Some(wrapper) = maybe_handle.as_ref() { // SAFETY: Plugins should be set up on the main thread. let handle = unsafe { wrapper.get_handle() }; Some( instance .create_surface(handle) .expect("Failed to create wgpu surface"), ) } else { None } }); let force_fallback_adapter = std::env::var("WGPU_FORCE_FALLBACK_ADAPTER") .map_or(options.force_fallback_adapter, |v| { !(v.is_empty() || v == "0" || v == "false") }); let desired_adapter_name = std::env::var("WGPU_ADAPTER_NAME") .as_deref() .map_or(options.adapter_name.clone(), |x| Some(x.to_lowercase())); let request_adapter_options = RequestAdapterOptions { power_preference: options.power_preference, compatible_surface: surface.as_ref(), force_fallback_adapter, }; #[cfg(not(target_family = "wasm"))] let mut selected_adapter = desired_adapter_name.and_then(|adapter_name| { find_adapter_by_name( &instance, options, request_adapter_options.compatible_surface, &adapter_name, ) }); #[cfg(target_family = "wasm")] let mut selected_adapter = None; #[cfg(target_family = "wasm")] if desired_adapter_name.is_some() { warn!("Choosing an adapter is not supported on wasm."); } if selected_adapter.is_none() { debug!( "Searching for adapter with options: {:?}", request_adapter_options ); selected_adapter = instance .request_adapter(&request_adapter_options) .await .ok(); } let adapter = selected_adapter.expect(GPU_NOT_FOUND_ERROR_MESSAGE); let adapter_info = adapter.get_info(); info!("{:?}", adapter_info); if adapter_info.device_type == DeviceType::Cpu { warn!( "The selected adapter is using a driver that only supports software rendering. \ This is likely to be very slow. See https://bevy.org/learn/errors/b0006/" ); } // Maybe get features and limits based on what is supported by the adapter/backend let mut features = wgpu::Features::empty(); let mut limits = options.limits.clone(); if matches!(options.priority, WgpuSettingsPriority::Functionality) { features = adapter.features(); if adapter_info.device_type == DeviceType::DiscreteGpu { // `MAPPABLE_PRIMARY_BUFFERS` can have a significant, negative performance impact for // discrete GPUs due to having to transfer data across the PCI-E bus and so it // should not be automatically enabled in this case. It is however beneficial for // integrated GPUs. features.remove(wgpu::Features::MAPPABLE_PRIMARY_BUFFERS); } limits = adapter.limits(); } // Enforce the disabled features if let Some(disabled_features) = options.disabled_features { features.remove(disabled_features); } // NOTE: |= is used here to ensure that any explicitly-enabled features are respected. features |= options.features; // Enforce the limit constraints if let Some(constrained_limits) = options.constrained_limits.as_ref() { // NOTE: Respect the configured limits as an 'upper bound'. This means for 'max' limits, we // take the minimum of the calculated limits according to the adapter/backend and the // specified max_limits. For 'min' limits, take the maximum instead. This is intended to // err on the side of being conservative. We can't claim 'higher' limits that are supported // but we can constrain to 'lower' limits. limits = wgpu::Limits { max_texture_dimension_1d: limits .max_texture_dimension_1d .min(constrained_limits.max_texture_dimension_1d), max_texture_dimension_2d: limits .max_texture_dimension_2d .min(constrained_limits.max_texture_dimension_2d), max_texture_dimension_3d: limits .max_texture_dimension_3d .min(constrained_limits.max_texture_dimension_3d), max_texture_array_layers: limits .max_texture_array_layers .min(constrained_limits.max_texture_array_layers), max_bind_groups: limits .max_bind_groups .min(constrained_limits.max_bind_groups), max_dynamic_uniform_buffers_per_pipeline_layout: limits .max_dynamic_uniform_buffers_per_pipeline_layout .min(constrained_limits.max_dynamic_uniform_buffers_per_pipeline_layout), max_dynamic_storage_buffers_per_pipeline_layout: limits .max_dynamic_storage_buffers_per_pipeline_layout .min(constrained_limits.max_dynamic_storage_buffers_per_pipeline_layout), max_sampled_textures_per_shader_stage: limits .max_sampled_textures_per_shader_stage .min(constrained_limits.max_sampled_textures_per_shader_stage), max_samplers_per_shader_stage: limits .max_samplers_per_shader_stage .min(constrained_limits.max_samplers_per_shader_stage), max_storage_buffers_per_shader_stage: limits .max_storage_buffers_per_shader_stage .min(constrained_limits.max_storage_buffers_per_shader_stage), max_storage_textures_per_shader_stage: limits .max_storage_textures_per_shader_stage .min(constrained_limits.max_storage_textures_per_shader_stage), max_uniform_buffers_per_shader_stage: limits .max_uniform_buffers_per_shader_stage .min(constrained_limits.max_uniform_buffers_per_shader_stage), max_binding_array_elements_per_shader_stage: limits .max_binding_array_elements_per_shader_stage .min(constrained_limits.max_binding_array_elements_per_shader_stage), max_binding_array_sampler_elements_per_shader_stage: limits .max_binding_array_sampler_elements_per_shader_stage .min(constrained_limits.max_binding_array_sampler_elements_per_shader_stage), max_uniform_buffer_binding_size: limits .max_uniform_buffer_binding_size .min(constrained_limits.max_uniform_buffer_binding_size), max_storage_buffer_binding_size: limits .max_storage_buffer_binding_size .min(constrained_limits.max_storage_buffer_binding_size), max_vertex_buffers: limits .max_vertex_buffers .min(constrained_limits.max_vertex_buffers), max_vertex_attributes: limits .max_vertex_attributes .min(constrained_limits.max_vertex_attributes), max_vertex_buffer_array_stride: limits .max_vertex_buffer_array_stride .min(constrained_limits.max_vertex_buffer_array_stride), max_push_constant_size: limits .max_push_constant_size .min(constrained_limits.max_push_constant_size), min_uniform_buffer_offset_alignment: limits .min_uniform_buffer_offset_alignment .max(constrained_limits.min_uniform_buffer_offset_alignment), min_storage_buffer_offset_alignment: limits .min_storage_buffer_offset_alignment .max(constrained_limits.min_storage_buffer_offset_alignment), max_inter_stage_shader_components: limits .max_inter_stage_shader_components .min(constrained_limits.max_inter_stage_shader_components), max_compute_workgroup_storage_size: limits .max_compute_workgroup_storage_size .min(constrained_limits.max_compute_workgroup_storage_size), max_compute_invocations_per_workgroup: limits .max_compute_invocations_per_workgroup .min(constrained_limits.max_compute_invocations_per_workgroup), max_compute_workgroup_size_x: limits .max_compute_workgroup_size_x .min(constrained_limits.max_compute_workgroup_size_x), max_compute_workgroup_size_y: limits .max_compute_workgroup_size_y .min(constrained_limits.max_compute_workgroup_size_y), max_compute_workgroup_size_z: limits .max_compute_workgroup_size_z .min(constrained_limits.max_compute_workgroup_size_z), max_compute_workgroups_per_dimension: limits .max_compute_workgroups_per_dimension .min(constrained_limits.max_compute_workgroups_per_dimension), max_buffer_size: limits .max_buffer_size .min(constrained_limits.max_buffer_size), max_bindings_per_bind_group: limits .max_bindings_per_bind_group .min(constrained_limits.max_bindings_per_bind_group), max_non_sampler_bindings: limits .max_non_sampler_bindings .min(constrained_limits.max_non_sampler_bindings), max_blas_primitive_count: limits .max_blas_primitive_count .min(constrained_limits.max_blas_primitive_count), max_blas_geometry_count: limits .max_blas_geometry_count .min(constrained_limits.max_blas_geometry_count), max_tlas_instance_count: limits .max_tlas_instance_count .min(constrained_limits.max_tlas_instance_count), max_color_attachments: limits .max_color_attachments .min(constrained_limits.max_color_attachments), max_color_attachment_bytes_per_sample: limits .max_color_attachment_bytes_per_sample .min(constrained_limits.max_color_attachment_bytes_per_sample), min_subgroup_size: limits .min_subgroup_size .max(constrained_limits.min_subgroup_size), max_subgroup_size: limits .max_subgroup_size .min(constrained_limits.max_subgroup_size), max_acceleration_structures_per_shader_stage: limits .max_acceleration_structures_per_shader_stage .min(constrained_limits.max_acceleration_structures_per_shader_stage), max_task_workgroup_total_count: limits .max_task_workgroup_total_count .min(constrained_limits.max_task_workgroup_total_count), max_task_workgroups_per_dimension: limits .max_task_workgroups_per_dimension .min(constrained_limits.max_task_workgroups_per_dimension), max_mesh_output_layers: limits .max_mesh_output_layers .min(constrained_limits.max_mesh_output_layers), max_mesh_multiview_count: limits .max_mesh_multiview_count .min(constrained_limits.max_mesh_multiview_count), }; } let device_descriptor = wgpu::DeviceDescriptor { label: options.device_label.as_ref().map(AsRef::as_ref), required_features: features, required_limits: limits, // SAFETY: TODO, see https://github.com/bevyengine/bevy/issues/22082 experimental_features: unsafe { wgpu::ExperimentalFeatures::enabled() }, memory_hints: options.memory_hints.clone(), // See https://github.com/gfx-rs/wgpu/issues/5974 trace: Trace::Off, }; #[cfg(not(feature = "raw_vulkan_init"))] let (device, queue) = adapter.request_device(&device_descriptor).await.unwrap(); #[cfg(feature = "raw_vulkan_init")] let (device, queue) = raw_vulkan_init::create_raw_device( &adapter, &device_descriptor, &raw_vulkan_init_settings, &mut additional_vulkan_features, ) .await .unwrap(); debug!("Configured wgpu adapter Limits: {:#?}", device.limits()); debug!("Configured wgpu adapter Features: {:#?}", device.features()); RenderResources( RenderDevice::from(device), RenderQueue(Arc::new(WgpuWrapper::new(queue))), RenderAdapterInfo(WgpuWrapper::new(adapter_info)), RenderAdapter(Arc::new(WgpuWrapper::new(adapter))), RenderInstance(Arc::new(WgpuWrapper::new(instance))), #[cfg(feature = "raw_vulkan_init")] additional_vulkan_features, ) } /// The context with all information required to interact with the GPU. /// /// The [`RenderDevice`] is used to create render resources and the /// [`CommandEncoder`] is used to record a series of GPU operations. pub struct RenderContext<'w> { render_device: RenderDevice, command_encoder: Option<CommandEncoder>, command_buffer_queue: Vec<QueuedCommandBuffer<'w>>, diagnostics_recorder: Option<Arc<DiagnosticsRecorder>>, } impl<'w> RenderContext<'w> { /// Creates a new [`RenderContext`] from a [`RenderDevice`]. pub fn new( render_device: RenderDevice, diagnostics_recorder: Option<DiagnosticsRecorder>, ) -> Self { Self { render_device, command_encoder: None, command_buffer_queue: Vec::new(), diagnostics_recorder: diagnostics_recorder.map(Arc::new), } } /// Gets the underlying [`RenderDevice`]. pub fn render_device(&self) -> &RenderDevice { &self.render_device } /// Gets the diagnostics recorder, used to track elapsed time and pipeline statistics /// of various render and compute passes. pub fn diagnostic_recorder(&self) -> impl RecordDiagnostics + use<> { self.diagnostics_recorder.clone() } /// Gets the current [`CommandEncoder`]. pub fn command_encoder(&mut self) -> &mut CommandEncoder { self.command_encoder.get_or_insert_with(|| { self.render_device .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()) }) } pub(crate) fn has_commands(&mut self) -> bool { self.command_encoder.is_some() || !self.command_buffer_queue.is_empty() } /// Creates a new [`TrackedRenderPass`] for the context, /// configured using the provided `descriptor`. pub fn begin_tracked_render_pass<'a>( &'a mut self, descriptor: RenderPassDescriptor<'_>, ) -> TrackedRenderPass<'a> { // Cannot use command_encoder() as we need to split the borrow on self let command_encoder = self.command_encoder.get_or_insert_with(|| { self.render_device .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()) }); let render_pass = command_encoder.begin_render_pass(&descriptor); TrackedRenderPass::new(&self.render_device, render_pass) } /// Append a [`CommandBuffer`] to the command buffer queue. /// /// If present, this will flush the currently unflushed [`CommandEncoder`] /// into a [`CommandBuffer`] into the queue before appending the provided /// buffer. pub fn add_command_buffer(&mut self, command_buffer: CommandBuffer) { self.flush_encoder(); self.command_buffer_queue .push(QueuedCommandBuffer::Ready(command_buffer)); } /// Append a function that will generate a [`CommandBuffer`] to the /// command buffer queue, to be ran later. /// /// If present, this will flush the currently unflushed [`CommandEncoder`] /// into a [`CommandBuffer`] into the queue before appending the provided /// buffer. pub fn add_command_buffer_generation_task( &mut self, #[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))] task: impl FnOnce(RenderDevice) -> CommandBuffer + 'w + Send, #[cfg(all(target_arch = "wasm32", target_feature = "atomics"))] task: impl FnOnce(RenderDevice) -> CommandBuffer + 'w, ) { self.flush_encoder(); self.command_buffer_queue .push(QueuedCommandBuffer::Task(Box::new(task))); } /// Finalizes and returns the queue of [`CommandBuffer`]s. /// /// This function will wait until all command buffer generation tasks are complete /// by running them in parallel (where supported). /// /// The [`CommandBuffer`]s will be returned in the order that they were added. pub fn finish( mut self, ) -> ( Vec<CommandBuffer>, RenderDevice, Option<DiagnosticsRecorder>, ) { self.flush_encoder(); let mut command_buffers = Vec::with_capacity(self.command_buffer_queue.len()); #[cfg(feature = "trace")] let _command_buffer_generation_tasks_span = info_span!("command_buffer_generation_tasks").entered(); #[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))] { let mut task_based_command_buffers = bevy_tasks::ComputeTaskPool::get().scope(|task_pool| { for (i, queued_command_buffer) in self.command_buffer_queue.into_iter().enumerate() { match queued_command_buffer { QueuedCommandBuffer::Ready(command_buffer) => { command_buffers.push((i, command_buffer)); } QueuedCommandBuffer::Task(command_buffer_generation_task) => { let render_device = self.render_device.clone(); task_pool.spawn(async move { (i, command_buffer_generation_task(render_device)) }); } } } }); command_buffers.append(&mut task_based_command_buffers); } #[cfg(all(target_arch = "wasm32", target_feature = "atomics"))] for (i, queued_command_buffer) in self.command_buffer_queue.into_iter().enumerate() { match queued_command_buffer { QueuedCommandBuffer::Ready(command_buffer) => { command_buffers.push((i, command_buffer)); } QueuedCommandBuffer::Task(command_buffer_generation_task) => { let render_device = self.render_device.clone(); command_buffers.push((i, command_buffer_generation_task(render_device))); } } } #[cfg(feature = "trace")] drop(_command_buffer_generation_tasks_span); command_buffers.sort_unstable_by_key(|(i, _)| *i); let mut command_buffers = command_buffers .into_iter() .map(|(_, cb)| cb) .collect::<Vec<CommandBuffer>>(); let mut diagnostics_recorder = self.diagnostics_recorder.take().map(|v| { Arc::try_unwrap(v) .ok() .expect("diagnostic recorder shouldn't be held longer than necessary") }); if let Some(recorder) = &mut diagnostics_recorder { let mut command_encoder = self .render_device .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); recorder.resolve(&mut command_encoder); command_buffers.push(command_encoder.finish()); } (command_buffers, self.render_device, diagnostics_recorder) } fn flush_encoder(&mut self) { if let Some(encoder) = self.command_encoder.take() { self.command_buffer_queue .push(QueuedCommandBuffer::Ready(encoder.finish())); } } } enum QueuedCommandBuffer<'w> { Ready(CommandBuffer), #[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))] Task(Box<dyn FnOnce(RenderDevice) -> CommandBuffer + 'w + Send>), #[cfg(all(target_arch = "wasm32", target_feature = "atomics"))] Task(Box<dyn FnOnce(RenderDevice) -> CommandBuffer + 'w>), }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_graph/app.rs
crates/bevy_render/src/render_graph/app.rs
use bevy_app::{App, SubApp}; use bevy_ecs::world::{FromWorld, World}; use tracing::warn; use super::{IntoRenderNodeArray, Node, RenderGraph, RenderLabel, RenderSubGraph}; /// Adds common [`RenderGraph`] operations to [`SubApp`] (and [`App`]). pub trait RenderGraphExt { // Add a sub graph to the [`RenderGraph`] fn add_render_sub_graph(&mut self, sub_graph: impl RenderSubGraph) -> &mut Self; /// Add a [`Node`] to the [`RenderGraph`]: /// * Create the [`Node`] using the [`FromWorld`] implementation /// * Add it to the graph fn add_render_graph_node<T: Node + FromWorld>( &mut self, sub_graph: impl RenderSubGraph, node_label: impl RenderLabel, ) -> &mut Self; /// Automatically add the required node edges based on the given ordering fn add_render_graph_edges<const N: usize>( &mut self, sub_graph: impl RenderSubGraph, edges: impl IntoRenderNodeArray<N>, ) -> &mut Self; /// Add node edge to the specified graph fn add_render_graph_edge( &mut self, sub_graph: impl RenderSubGraph, output_node: impl RenderLabel, input_node: impl RenderLabel, ) -> &mut Self; } impl RenderGraphExt for World { fn add_render_graph_node<T: Node + FromWorld>( &mut self, sub_graph: impl RenderSubGraph, node_label: impl RenderLabel, ) -> &mut Self { let sub_graph = sub_graph.intern(); let node = T::from_world(self); let mut render_graph = self.get_resource_mut::<RenderGraph>().expect( "RenderGraph not found. Make sure you are using add_render_graph_node on the RenderApp", ); if let Some(graph) = render_graph.get_sub_graph_mut(sub_graph) { graph.add_node(node_label, node); } else { warn!( "Tried adding a render graph node to {sub_graph:?} but the sub graph doesn't exist" ); } self } #[track_caller] fn add_render_graph_edges<const N: usize>( &mut self, sub_graph: impl RenderSubGraph, edges: impl IntoRenderNodeArray<N>, ) -> &mut Self { let sub_graph = sub_graph.intern(); let mut render_graph = self.get_resource_mut::<RenderGraph>().expect( "RenderGraph not found. Make sure you are using add_render_graph_edges on the RenderApp", ); if let Some(graph) = render_graph.get_sub_graph_mut(sub_graph) { graph.add_node_edges(edges); } else { warn!( "Tried adding render graph edges to {sub_graph:?} but the sub graph doesn't exist" ); } self } fn add_render_graph_edge( &mut self, sub_graph: impl RenderSubGraph, output_node: impl RenderLabel, input_node: impl RenderLabel, ) -> &mut Self { let sub_graph = sub_graph.intern(); let mut render_graph = self.get_resource_mut::<RenderGraph>().expect( "RenderGraph not found. Make sure you are using add_render_graph_edge on the RenderApp", ); if let Some(graph) = render_graph.get_sub_graph_mut(sub_graph) { graph.add_node_edge(output_node, input_node); } else { warn!( "Tried adding a render graph edge to {sub_graph:?} but the sub graph doesn't exist" ); } self } fn add_render_sub_graph(&mut self, sub_graph: impl RenderSubGraph) -> &mut Self { let mut render_graph = self.get_resource_mut::<RenderGraph>().expect( "RenderGraph not found. Make sure you are using add_render_sub_graph on the RenderApp", ); render_graph.add_sub_graph(sub_graph, RenderGraph::default()); self } } impl RenderGraphExt for SubApp { fn add_render_graph_node<T: Node + FromWorld>( &mut self, sub_graph: impl RenderSubGraph, node_label: impl RenderLabel, ) -> &mut Self { World::add_render_graph_node::<T>(self.world_mut(), sub_graph, node_label); self } fn add_render_graph_edge( &mut self, sub_graph: impl RenderSubGraph, output_node: impl RenderLabel, input_node: impl RenderLabel, ) -> &mut Self { World::add_render_graph_edge(self.world_mut(), sub_graph, output_node, input_node); self } #[track_caller] fn add_render_graph_edges<const N: usize>( &mut self, sub_graph: impl RenderSubGraph, edges: impl IntoRenderNodeArray<N>, ) -> &mut Self { World::add_render_graph_edges(self.world_mut(), sub_graph, edges); self } fn add_render_sub_graph(&mut self, sub_graph: impl RenderSubGraph) -> &mut Self { World::add_render_sub_graph(self.world_mut(), sub_graph); self } } impl RenderGraphExt for App { fn add_render_graph_node<T: Node + FromWorld>( &mut self, sub_graph: impl RenderSubGraph, node_label: impl RenderLabel, ) -> &mut Self { World::add_render_graph_node::<T>(self.world_mut(), sub_graph, node_label); self } fn add_render_graph_edge( &mut self, sub_graph: impl RenderSubGraph, output_node: impl RenderLabel, input_node: impl RenderLabel, ) -> &mut Self { World::add_render_graph_edge(self.world_mut(), sub_graph, output_node, input_node); self } fn add_render_graph_edges<const N: usize>( &mut self, sub_graph: impl RenderSubGraph, edges: impl IntoRenderNodeArray<N>, ) -> &mut Self { World::add_render_graph_edges(self.world_mut(), sub_graph, edges); self } fn add_render_sub_graph(&mut self, sub_graph: impl RenderSubGraph) -> &mut Self { World::add_render_sub_graph(self.world_mut(), sub_graph); self } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_graph/node.rs
crates/bevy_render/src/render_graph/node.rs
use crate::{ render_graph::{ Edge, InputSlotError, OutputSlotError, RenderGraphContext, RenderGraphError, RunSubGraphError, SlotInfo, SlotInfos, }, render_phase::DrawError, renderer::RenderContext, }; pub use bevy_ecs::label::DynEq; use bevy_ecs::{ define_label, intern::Interned, query::{QueryItem, QueryState, ReadOnlyQueryData}, world::{FromWorld, World}, }; use core::fmt::Debug; use downcast_rs::{impl_downcast, Downcast}; use thiserror::Error; use variadics_please::all_tuples_with_size; pub use bevy_render_macros::RenderLabel; use super::{InternedRenderSubGraph, RenderSubGraph}; define_label!( #[diagnostic::on_unimplemented( note = "consider annotating `{Self}` with `#[derive(RenderLabel)]`" )] /// A strongly-typed class of labels used to identify a [`Node`] in a render graph. RenderLabel, RENDER_LABEL_INTERNER ); /// A shorthand for `Interned<dyn RenderLabel>`. pub type InternedRenderLabel = Interned<dyn RenderLabel>; pub trait IntoRenderNodeArray<const N: usize> { fn into_array(self) -> [InternedRenderLabel; N]; } impl<const N: usize> IntoRenderNodeArray<N> for Vec<InternedRenderLabel> { fn into_array(self) -> [InternedRenderLabel; N] { self.try_into().unwrap() } } macro_rules! impl_render_label_tuples { ($N: expr, $(#[$meta:meta])* $(($T: ident, $I: ident)),*) => { $(#[$meta])* impl<$($T: RenderLabel),*> IntoRenderNodeArray<$N> for ($($T,)*) { #[inline] fn into_array(self) -> [InternedRenderLabel; $N] { let ($($I,)*) = self; [$($I.intern(), )*] } } } } all_tuples_with_size!( #[doc(fake_variadic)] impl_render_label_tuples, 1, 32, T, l ); /// A render node that can be added to a [`RenderGraph`](super::RenderGraph). /// /// Nodes are the fundamental part of the graph and used to extend its functionality, by /// generating draw calls and/or running subgraphs. /// They are added via the `render_graph::add_node(my_node)` method. /// /// To determine their position in the graph and ensure that all required dependencies (inputs) /// are already executed, [`Edges`](Edge) are used. /// /// A node can produce outputs used as dependencies by other nodes. /// Those inputs and outputs are called slots and are the default way of passing render data /// inside the graph. For more information see [`SlotType`](super::SlotType). pub trait Node: Downcast + Send + Sync + 'static { /// Specifies the required input slots for this node. /// They will then be available during the run method inside the [`RenderGraphContext`]. fn input(&self) -> Vec<SlotInfo> { Vec::new() } /// Specifies the produced output slots for this node. /// They can then be passed one inside [`RenderGraphContext`] during the run method. fn output(&self) -> Vec<SlotInfo> { Vec::new() } /// Updates internal node state using the current render [`World`] prior to the run method. fn update(&mut self, _world: &mut World) {} /// Runs the graph node logic, issues draw calls, updates the output slots and /// optionally queues up subgraphs for execution. The graph data, input and output values are /// passed via the [`RenderGraphContext`]. fn run<'w>( &self, graph: &mut RenderGraphContext, render_context: &mut RenderContext<'w>, world: &'w World, ) -> Result<(), NodeRunError>; } impl_downcast!(Node); #[derive(Error, Debug, Eq, PartialEq)] pub enum NodeRunError { #[error("encountered an input slot error")] InputSlotError(#[from] InputSlotError), #[error("encountered an output slot error")] OutputSlotError(#[from] OutputSlotError), #[error("encountered an error when running a sub-graph")] RunSubGraphError(#[from] RunSubGraphError), #[error("encountered an error when executing draw command")] DrawError(#[from] DrawError), } /// A collection of input and output [`Edges`](Edge) for a [`Node`]. #[derive(Debug)] pub struct Edges { label: InternedRenderLabel, input_edges: Vec<Edge>, output_edges: Vec<Edge>, } impl Edges { /// Returns all "input edges" (edges going "in") for this node . #[inline] pub fn input_edges(&self) -> &[Edge] { &self.input_edges } /// Returns all "output edges" (edges going "out") for this node . #[inline] pub fn output_edges(&self) -> &[Edge] { &self.output_edges } /// Returns this node's label. #[inline] pub fn label(&self) -> InternedRenderLabel { self.label } /// Adds an edge to the `input_edges` if it does not already exist. pub(crate) fn add_input_edge(&mut self, edge: Edge) -> Result<(), RenderGraphError> { if self.has_input_edge(&edge) { return Err(RenderGraphError::EdgeAlreadyExists(edge)); } self.input_edges.push(edge); Ok(()) } /// Removes an edge from the `input_edges` if it exists. pub(crate) fn remove_input_edge(&mut self, edge: Edge) -> Result<(), RenderGraphError> { if let Some(index) = self.input_edges.iter().position(|e| *e == edge) { self.input_edges.swap_remove(index); Ok(()) } else { Err(RenderGraphError::EdgeDoesNotExist(edge)) } } /// Adds an edge to the `output_edges` if it does not already exist. pub(crate) fn add_output_edge(&mut self, edge: Edge) -> Result<(), RenderGraphError> { if self.has_output_edge(&edge) { return Err(RenderGraphError::EdgeAlreadyExists(edge)); } self.output_edges.push(edge); Ok(()) } /// Removes an edge from the `output_edges` if it exists. pub(crate) fn remove_output_edge(&mut self, edge: Edge) -> Result<(), RenderGraphError> { if let Some(index) = self.output_edges.iter().position(|e| *e == edge) { self.output_edges.swap_remove(index); Ok(()) } else { Err(RenderGraphError::EdgeDoesNotExist(edge)) } } /// Checks whether the input edge already exists. pub fn has_input_edge(&self, edge: &Edge) -> bool { self.input_edges.contains(edge) } /// Checks whether the output edge already exists. pub fn has_output_edge(&self, edge: &Edge) -> bool { self.output_edges.contains(edge) } /// Searches the `input_edges` for a [`Edge::SlotEdge`], /// which `input_index` matches the `index`; pub fn get_input_slot_edge(&self, index: usize) -> Result<&Edge, RenderGraphError> { self.input_edges .iter() .find(|e| { if let Edge::SlotEdge { input_index, .. } = e { *input_index == index } else { false } }) .ok_or(RenderGraphError::UnconnectedNodeInputSlot { input_slot: index, node: self.label, }) } /// Searches the `output_edges` for a [`Edge::SlotEdge`], /// which `output_index` matches the `index`; pub fn get_output_slot_edge(&self, index: usize) -> Result<&Edge, RenderGraphError> { self.output_edges .iter() .find(|e| { if let Edge::SlotEdge { output_index, .. } = e { *output_index == index } else { false } }) .ok_or(RenderGraphError::UnconnectedNodeOutputSlot { output_slot: index, node: self.label, }) } } /// The internal representation of a [`Node`], with all data required /// by the [`RenderGraph`](super::RenderGraph). /// /// The `input_slots` and `output_slots` are provided by the `node`. pub struct NodeState { pub label: InternedRenderLabel, /// The name of the type that implements [`Node`]. pub type_name: &'static str, pub node: Box<dyn Node>, pub input_slots: SlotInfos, pub output_slots: SlotInfos, pub edges: Edges, } impl Debug for NodeState { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { writeln!(f, "{:?} ({})", self.label, self.type_name) } } impl NodeState { /// Creates an [`NodeState`] without edges, but the `input_slots` and `output_slots` /// are provided by the `node`. pub fn new<T>(label: InternedRenderLabel, node: T) -> Self where T: Node, { NodeState { label, input_slots: node.input().into(), output_slots: node.output().into(), node: Box::new(node), type_name: core::any::type_name::<T>(), edges: Edges { label, input_edges: Vec::new(), output_edges: Vec::new(), }, } } /// Retrieves the [`Node`]. pub fn node<T>(&self) -> Result<&T, RenderGraphError> where T: Node, { self.node .downcast_ref::<T>() .ok_or(RenderGraphError::WrongNodeType) } /// Retrieves the [`Node`] mutably. pub fn node_mut<T>(&mut self) -> Result<&mut T, RenderGraphError> where T: Node, { self.node .downcast_mut::<T>() .ok_or(RenderGraphError::WrongNodeType) } /// Validates that each input slot corresponds to an input edge. pub fn validate_input_slots(&self) -> Result<(), RenderGraphError> { for i in 0..self.input_slots.len() { self.edges.get_input_slot_edge(i)?; } Ok(()) } /// Validates that each output slot corresponds to an output edge. pub fn validate_output_slots(&self) -> Result<(), RenderGraphError> { for i in 0..self.output_slots.len() { self.edges.get_output_slot_edge(i)?; } Ok(()) } } /// A [`Node`] without any inputs, outputs and subgraphs, which does nothing when run. /// Used (as a label) to bundle multiple dependencies into one inside /// the [`RenderGraph`](super::RenderGraph). #[derive(Default)] pub struct EmptyNode; impl Node for EmptyNode { fn run( &self, _graph: &mut RenderGraphContext, _render_context: &mut RenderContext, _world: &World, ) -> Result<(), NodeRunError> { Ok(()) } } /// A [`RenderGraph`](super::RenderGraph) [`Node`] that runs the configured subgraph once. /// This makes it easier to insert sub-graph runs into a graph. pub struct RunGraphOnViewNode { sub_graph: InternedRenderSubGraph, } impl RunGraphOnViewNode { pub fn new<T: RenderSubGraph>(sub_graph: T) -> Self { Self { sub_graph: sub_graph.intern(), } } } impl Node for RunGraphOnViewNode { fn run( &self, graph: &mut RenderGraphContext, _render_context: &mut RenderContext, _world: &World, ) -> Result<(), NodeRunError> { graph.run_sub_graph(self.sub_graph, vec![], Some(graph.view_entity()), None)?; Ok(()) } } /// This trait should be used instead of the [`Node`] trait when making a render node that runs on a view. /// /// It is intended to be used with [`ViewNodeRunner`] pub trait ViewNode { /// The query that will be used on the view entity. /// It is guaranteed to run on the view entity, so there's no need for a filter type ViewQuery: ReadOnlyQueryData; /// Updates internal node state using the current render [`World`] prior to the run method. fn update(&mut self, _world: &mut World) {} /// Runs the graph node logic, issues draw calls, updates the output slots and /// optionally queues up subgraphs for execution. The graph data, input and output values are /// passed via the [`RenderGraphContext`]. fn run<'w>( &self, graph: &mut RenderGraphContext, render_context: &mut RenderContext<'w>, view_query: QueryItem<'w, '_, Self::ViewQuery>, world: &'w World, ) -> Result<(), NodeRunError>; } /// This [`Node`] can be used to run any [`ViewNode`]. /// It will take care of updating the view query in `update()` and running the query in `run()`. /// /// This [`Node`] exists to help reduce boilerplate when making a render node that runs on a view. pub struct ViewNodeRunner<N: ViewNode> { view_query: QueryState<N::ViewQuery>, node: N, } impl<N: ViewNode> ViewNodeRunner<N> { pub fn new(node: N, world: &mut World) -> Self { Self { view_query: world.query_filtered(), node, } } } impl<N: ViewNode + FromWorld> FromWorld for ViewNodeRunner<N> { fn from_world(world: &mut World) -> Self { Self::new(N::from_world(world), world) } } impl<T> Node for ViewNodeRunner<T> where T: ViewNode + Send + Sync + 'static, { fn update(&mut self, world: &mut World) { self.view_query.update_archetypes(world); self.node.update(world); } fn run<'w>( &self, graph: &mut RenderGraphContext, render_context: &mut RenderContext<'w>, world: &'w World, ) -> Result<(), NodeRunError> { let Ok(view) = self.view_query.get_manual(world, graph.view_entity()) else { return Ok(()); }; ViewNode::run(&self.node, graph, render_context, view, world)?; Ok(()) } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_graph/edge.rs
crates/bevy_render/src/render_graph/edge.rs
use super::InternedRenderLabel; /// An edge, which connects two [`Nodes`](super::Node) in /// a [`RenderGraph`](crate::render_graph::RenderGraph). /// /// They are used to describe the ordering (which node has to run first) /// and may be of two kinds: [`NodeEdge`](Self::NodeEdge) and [`SlotEdge`](Self::SlotEdge). /// /// Edges are added via the [`RenderGraph::add_node_edge`] and the /// [`RenderGraph::add_slot_edge`] methods. /// /// The former simply states that the `output_node` has to be run before the `input_node`, /// while the later connects an output slot of the `output_node` /// with an input slot of the `input_node` to pass additional data along. /// For more information see [`SlotType`](super::SlotType). /// /// [`RenderGraph::add_node_edge`]: crate::render_graph::RenderGraph::add_node_edge /// [`RenderGraph::add_slot_edge`]: crate::render_graph::RenderGraph::add_slot_edge #[derive(Clone, Debug, Eq, PartialEq)] pub enum Edge { /// An edge describing to ordering of both nodes (`output_node` before `input_node`) /// and connecting the output slot at the `output_index` of the `output_node` /// with the slot at the `input_index` of the `input_node`. SlotEdge { input_node: InternedRenderLabel, input_index: usize, output_node: InternedRenderLabel, output_index: usize, }, /// An edge describing to ordering of both nodes (`output_node` before `input_node`). NodeEdge { input_node: InternedRenderLabel, output_node: InternedRenderLabel, }, } impl Edge { /// Returns the id of the `input_node`. pub fn get_input_node(&self) -> InternedRenderLabel { match self { Edge::SlotEdge { input_node, .. } | Edge::NodeEdge { input_node, .. } => *input_node, } } /// Returns the id of the `output_node`. pub fn get_output_node(&self) -> InternedRenderLabel { match self { Edge::SlotEdge { output_node, .. } | Edge::NodeEdge { output_node, .. } => *output_node, } } } #[derive(PartialEq, Eq)] pub enum EdgeExistence { Exists, DoesNotExist, }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_graph/node_slot.rs
crates/bevy_render/src/render_graph/node_slot.rs
use alloc::borrow::Cow; use bevy_ecs::entity::Entity; use core::fmt; use derive_more::derive::From; use crate::render_resource::{Buffer, Sampler, TextureView}; /// A value passed between render [`Nodes`](super::Node). /// Corresponds to the [`SlotType`] specified in the [`RenderGraph`](super::RenderGraph). /// /// Slots can have four different types of values: /// [`Buffer`], [`TextureView`], [`Sampler`] and [`Entity`]. /// /// These values do not contain the actual render data, but only the ids to retrieve them. #[derive(Debug, Clone, From)] pub enum SlotValue { /// A GPU-accessible [`Buffer`]. Buffer(Buffer), /// A [`TextureView`] describes a texture used in a pipeline. TextureView(TextureView), /// A texture [`Sampler`] defines how a pipeline will sample from a [`TextureView`]. Sampler(Sampler), /// An entity from the ECS. Entity(Entity), } impl SlotValue { /// Returns the [`SlotType`] of this value. pub fn slot_type(&self) -> SlotType { match self { SlotValue::Buffer(_) => SlotType::Buffer, SlotValue::TextureView(_) => SlotType::TextureView, SlotValue::Sampler(_) => SlotType::Sampler, SlotValue::Entity(_) => SlotType::Entity, } } } /// Describes the render resources created (output) or used (input) by /// the render [`Nodes`](super::Node). /// /// This should not be confused with [`SlotValue`], which actually contains the passed data. #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum SlotType { /// A GPU-accessible [`Buffer`]. Buffer, /// A [`TextureView`] describes a texture used in a pipeline. TextureView, /// A texture [`Sampler`] defines how a pipeline will sample from a [`TextureView`]. Sampler, /// An entity from the ECS. Entity, } impl fmt::Display for SlotType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let s = match self { SlotType::Buffer => "Buffer", SlotType::TextureView => "TextureView", SlotType::Sampler => "Sampler", SlotType::Entity => "Entity", }; f.write_str(s) } } /// A [`SlotLabel`] is used to reference a slot by either its name or index /// inside the [`RenderGraph`](super::RenderGraph). #[derive(Debug, Clone, Eq, PartialEq, From)] pub enum SlotLabel { Index(usize), Name(Cow<'static, str>), } impl From<&SlotLabel> for SlotLabel { fn from(value: &SlotLabel) -> Self { value.clone() } } impl From<String> for SlotLabel { fn from(value: String) -> Self { SlotLabel::Name(value.into()) } } impl From<&'static str> for SlotLabel { fn from(value: &'static str) -> Self { SlotLabel::Name(value.into()) } } /// The internal representation of a slot, which specifies its [`SlotType`] and name. #[derive(Clone, Debug)] pub struct SlotInfo { pub name: Cow<'static, str>, pub slot_type: SlotType, } impl SlotInfo { pub fn new(name: impl Into<Cow<'static, str>>, slot_type: SlotType) -> Self { SlotInfo { name: name.into(), slot_type, } } } /// A collection of input or output [`SlotInfos`](SlotInfo) for /// a [`NodeState`](super::NodeState). #[derive(Default, Debug)] pub struct SlotInfos { slots: Vec<SlotInfo>, } impl<T: IntoIterator<Item = SlotInfo>> From<T> for SlotInfos { fn from(slots: T) -> Self { SlotInfos { slots: slots.into_iter().collect(), } } } impl SlotInfos { /// Returns the count of slots. #[inline] pub fn len(&self) -> usize { self.slots.len() } /// Returns true if there are no slots. #[inline] pub fn is_empty(&self) -> bool { self.slots.is_empty() } /// Retrieves the [`SlotInfo`] for the provided label. pub fn get_slot(&self, label: impl Into<SlotLabel>) -> Option<&SlotInfo> { let label = label.into(); let index = self.get_slot_index(label)?; self.slots.get(index) } /// Retrieves the [`SlotInfo`] for the provided label mutably. pub fn get_slot_mut(&mut self, label: impl Into<SlotLabel>) -> Option<&mut SlotInfo> { let label = label.into(); let index = self.get_slot_index(label)?; self.slots.get_mut(index) } /// Retrieves the index (inside input or output slots) of the slot for the provided label. pub fn get_slot_index(&self, label: impl Into<SlotLabel>) -> Option<usize> { let label = label.into(); match label { SlotLabel::Index(index) => Some(index), SlotLabel::Name(ref name) => self.slots.iter().position(|s| s.name == *name), } } /// Returns an iterator over the slot infos. pub fn iter(&self) -> impl Iterator<Item = &SlotInfo> { self.slots.iter() } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_graph/mod.rs
crates/bevy_render/src/render_graph/mod.rs
mod app; mod camera_driver_node; mod context; mod edge; mod graph; mod node; mod node_slot; pub use app::*; pub use camera_driver_node::*; pub use context::*; pub use edge::*; pub use graph::*; pub use node::*; pub use node_slot::*; use thiserror::Error; #[derive(Error, Debug, Eq, PartialEq)] pub enum RenderGraphError { #[error("node {0:?} does not exist")] InvalidNode(InternedRenderLabel), #[error("output node slot does not exist")] InvalidOutputNodeSlot(SlotLabel), #[error("input node slot does not exist")] InvalidInputNodeSlot(SlotLabel), #[error("node does not match the given type")] WrongNodeType, #[error("attempted to connect output slot {output_slot} from node {output_node:?} to incompatible input slot {input_slot} from node {input_node:?}")] MismatchedNodeSlots { output_node: InternedRenderLabel, output_slot: usize, input_node: InternedRenderLabel, input_slot: usize, }, #[error("attempted to add an edge that already exists")] EdgeAlreadyExists(Edge), #[error("attempted to remove an edge that does not exist")] EdgeDoesNotExist(Edge), #[error("node {node:?} has an unconnected input slot {input_slot}")] UnconnectedNodeInputSlot { node: InternedRenderLabel, input_slot: usize, }, #[error("node {node:?} has an unconnected output slot {output_slot}")] UnconnectedNodeOutputSlot { node: InternedRenderLabel, output_slot: usize, }, #[error("node {node:?} input slot {input_slot} already occupied by {occupied_by_node:?}")] NodeInputSlotAlreadyOccupied { node: InternedRenderLabel, input_slot: usize, occupied_by_node: InternedRenderLabel, }, }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_graph/context.rs
crates/bevy_render/src/render_graph/context.rs
use crate::{ render_graph::{NodeState, RenderGraph, SlotInfos, SlotLabel, SlotType, SlotValue}, render_resource::{Buffer, Sampler, TextureView}, }; use alloc::borrow::Cow; use bevy_ecs::{entity::Entity, intern::Interned}; use thiserror::Error; use super::{InternedRenderSubGraph, RenderLabel, RenderSubGraph}; /// A command that signals the graph runner to run the sub graph corresponding to the `sub_graph` /// with the specified `inputs` next. pub struct RunSubGraph { pub sub_graph: InternedRenderSubGraph, pub inputs: Vec<SlotValue>, pub view_entity: Option<Entity>, pub debug_group: Option<String>, } /// The context with all graph information required to run a [`Node`](super::Node). /// This context is created for each node by the render graph runner. /// /// The slot input can be read from here and the outputs must be written back to the context for /// passing them onto the next node. /// /// Sub graphs can be queued for running by adding a [`RunSubGraph`] command to the context. /// After the node has finished running the graph runner is responsible for executing the sub graphs. pub struct RenderGraphContext<'a> { graph: &'a RenderGraph, node: &'a NodeState, inputs: &'a [SlotValue], outputs: &'a mut [Option<SlotValue>], run_sub_graphs: Vec<RunSubGraph>, /// The `view_entity` associated with the render graph being executed /// This is optional because you aren't required to have a `view_entity` for a node. /// For example, compute shader nodes don't have one. /// It should always be set when the [`RenderGraph`] is running on a View. view_entity: Option<Entity>, } impl<'a> RenderGraphContext<'a> { /// Creates a new render graph context for the `node`. pub fn new( graph: &'a RenderGraph, node: &'a NodeState, inputs: &'a [SlotValue], outputs: &'a mut [Option<SlotValue>], ) -> Self { Self { graph, node, inputs, outputs, run_sub_graphs: Vec::new(), view_entity: None, } } /// Returns the input slot values for the node. #[inline] pub fn inputs(&self) -> &[SlotValue] { self.inputs } /// Returns the [`SlotInfos`] of the inputs. pub fn input_info(&self) -> &SlotInfos { &self.node.input_slots } /// Returns the [`SlotInfos`] of the outputs. pub fn output_info(&self) -> &SlotInfos { &self.node.output_slots } /// Retrieves the input slot value referenced by the `label`. pub fn get_input(&self, label: impl Into<SlotLabel>) -> Result<&SlotValue, InputSlotError> { let label = label.into(); let index = self .input_info() .get_slot_index(label.clone()) .ok_or(InputSlotError::InvalidSlot(label))?; Ok(&self.inputs[index]) } // TODO: should this return an Arc or a reference? /// Retrieves the input slot value referenced by the `label` as a [`TextureView`]. pub fn get_input_texture( &self, label: impl Into<SlotLabel>, ) -> Result<&TextureView, InputSlotError> { let label = label.into(); match self.get_input(label.clone())? { SlotValue::TextureView(value) => Ok(value), value => Err(InputSlotError::MismatchedSlotType { label, actual: value.slot_type(), expected: SlotType::TextureView, }), } } /// Retrieves the input slot value referenced by the `label` as a [`Sampler`]. pub fn get_input_sampler( &self, label: impl Into<SlotLabel>, ) -> Result<&Sampler, InputSlotError> { let label = label.into(); match self.get_input(label.clone())? { SlotValue::Sampler(value) => Ok(value), value => Err(InputSlotError::MismatchedSlotType { label, actual: value.slot_type(), expected: SlotType::Sampler, }), } } /// Retrieves the input slot value referenced by the `label` as a [`Buffer`]. pub fn get_input_buffer(&self, label: impl Into<SlotLabel>) -> Result<&Buffer, InputSlotError> { let label = label.into(); match self.get_input(label.clone())? { SlotValue::Buffer(value) => Ok(value), value => Err(InputSlotError::MismatchedSlotType { label, actual: value.slot_type(), expected: SlotType::Buffer, }), } } /// Retrieves the input slot value referenced by the `label` as an [`Entity`]. pub fn get_input_entity(&self, label: impl Into<SlotLabel>) -> Result<Entity, InputSlotError> { let label = label.into(); match self.get_input(label.clone())? { SlotValue::Entity(value) => Ok(*value), value => Err(InputSlotError::MismatchedSlotType { label, actual: value.slot_type(), expected: SlotType::Entity, }), } } /// Sets the output slot value referenced by the `label`. pub fn set_output( &mut self, label: impl Into<SlotLabel>, value: impl Into<SlotValue>, ) -> Result<(), OutputSlotError> { let label = label.into(); let value = value.into(); let slot_index = self .output_info() .get_slot_index(label.clone()) .ok_or_else(|| OutputSlotError::InvalidSlot(label.clone()))?; let slot = self .output_info() .get_slot(slot_index) .expect("slot is valid"); if value.slot_type() != slot.slot_type { return Err(OutputSlotError::MismatchedSlotType { label, actual: slot.slot_type, expected: value.slot_type(), }); } self.outputs[slot_index] = Some(value); Ok(()) } pub fn view_entity(&self) -> Entity { self.view_entity.unwrap() } pub fn get_view_entity(&self) -> Option<Entity> { self.view_entity } pub fn set_view_entity(&mut self, view_entity: Entity) { self.view_entity = Some(view_entity); } /// Queues up a sub graph for execution after the node has finished running. pub fn run_sub_graph( &mut self, name: impl RenderSubGraph, inputs: Vec<SlotValue>, view_entity: Option<Entity>, debug_group: Option<String>, ) -> Result<(), RunSubGraphError> { let name = name.intern(); let sub_graph = self .graph .get_sub_graph(name) .ok_or(RunSubGraphError::MissingSubGraph(name))?; if let Some(input_node) = sub_graph.get_input_node() { for (i, input_slot) in input_node.input_slots.iter().enumerate() { if let Some(input_value) = inputs.get(i) { if input_slot.slot_type != input_value.slot_type() { return Err(RunSubGraphError::MismatchedInputSlotType { graph_name: name, slot_index: i, actual: input_value.slot_type(), expected: input_slot.slot_type, label: input_slot.name.clone().into(), }); } } else { return Err(RunSubGraphError::MissingInput { slot_index: i, slot_name: input_slot.name.clone(), graph_name: name, }); } } } else if !inputs.is_empty() { return Err(RunSubGraphError::SubGraphHasNoInputs(name)); } self.run_sub_graphs.push(RunSubGraph { sub_graph: name, inputs, view_entity, debug_group, }); Ok(()) } /// Returns a human-readable label for this node, for debugging purposes. pub fn label(&self) -> Interned<dyn RenderLabel> { self.node.label } /// Finishes the context for this [`Node`](super::Node) by /// returning the sub graphs to run next. pub fn finish(self) -> Vec<RunSubGraph> { self.run_sub_graphs } } #[derive(Error, Debug, Eq, PartialEq)] pub enum RunSubGraphError { #[error("attempted to run sub-graph `{0:?}`, but it does not exist")] MissingSubGraph(InternedRenderSubGraph), #[error("attempted to pass inputs to sub-graph `{0:?}`, which has no input slots")] SubGraphHasNoInputs(InternedRenderSubGraph), #[error("sub graph (name: `{graph_name:?}`) could not be run because slot `{slot_name}` at index {slot_index} has no value")] MissingInput { slot_index: usize, slot_name: Cow<'static, str>, graph_name: InternedRenderSubGraph, }, #[error("attempted to use the wrong type for input slot")] MismatchedInputSlotType { graph_name: InternedRenderSubGraph, slot_index: usize, label: SlotLabel, expected: SlotType, actual: SlotType, }, } #[derive(Error, Debug, Eq, PartialEq)] pub enum OutputSlotError { #[error("output slot `{0:?}` does not exist")] InvalidSlot(SlotLabel), #[error("attempted to output a value of type `{actual}` to output slot `{label:?}`, which has type `{expected}`")] MismatchedSlotType { label: SlotLabel, expected: SlotType, actual: SlotType, }, } #[derive(Error, Debug, Eq, PartialEq)] pub enum InputSlotError { #[error("input slot `{0:?}` does not exist")] InvalidSlot(SlotLabel), #[error("attempted to retrieve a value of type `{actual}` from input slot `{label:?}`, which has type `{expected}`")] MismatchedSlotType { label: SlotLabel, expected: SlotType, actual: SlotType, }, }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_graph/graph.rs
crates/bevy_render/src/render_graph/graph.rs
use crate::{ render_graph::{ Edge, Node, NodeRunError, NodeState, RenderGraphContext, RenderGraphError, RenderLabel, SlotInfo, SlotLabel, }, renderer::RenderContext, }; use bevy_ecs::{define_label, intern::Interned, prelude::World, resource::Resource}; use bevy_platform::collections::HashMap; use core::fmt::Debug; use super::{EdgeExistence, InternedRenderLabel, IntoRenderNodeArray}; pub use bevy_render_macros::RenderSubGraph; define_label!( #[diagnostic::on_unimplemented( note = "consider annotating `{Self}` with `#[derive(RenderSubGraph)]`" )] /// A strongly-typed class of labels used to identify a [`SubGraph`] in a render graph. RenderSubGraph, RENDER_SUB_GRAPH_INTERNER ); /// A shorthand for `Interned<dyn RenderSubGraph>`. pub type InternedRenderSubGraph = Interned<dyn RenderSubGraph>; /// The render graph configures the modular and re-usable render logic. /// /// It is a retained and stateless (nodes themselves may have their own internal state) structure, /// which can not be modified while it is executed by the graph runner. /// /// The render graph runner is responsible for executing the entire graph each frame. /// It will execute each node in the graph in the correct order, based on the edges between the nodes. /// /// It consists of three main components: [`Nodes`](Node), [`Edges`](Edge) /// and [`Slots`](super::SlotType). /// /// Nodes are responsible for generating draw calls and operating on input and output slots. /// Edges specify the order of execution for nodes and connect input and output slots together. /// Slots describe the render resources created or used by the nodes. /// /// Additionally a render graph can contain multiple sub graphs, which are run by the /// corresponding nodes. Every render graph can have its own optional input node. /// /// ## Example /// Here is a simple render graph example with two nodes connected by a node edge. /// ```ignore /// # TODO: Remove when #10645 is fixed /// # use bevy_app::prelude::*; /// # use bevy_ecs::prelude::World; /// # use bevy_render::render_graph::{RenderGraph, RenderLabel, Node, RenderGraphContext, NodeRunError}; /// # use bevy_render::renderer::RenderContext; /// # /// #[derive(RenderLabel)] /// enum Labels { /// A, /// B, /// } /// /// # struct MyNode; /// # /// # impl Node for MyNode { /// # fn run(&self, graph: &mut RenderGraphContext, render_context: &mut RenderContext, world: &World) -> Result<(), NodeRunError> { /// # unimplemented!() /// # } /// # } /// # /// let mut graph = RenderGraph::default(); /// graph.add_node(Labels::A, MyNode); /// graph.add_node(Labels::B, MyNode); /// graph.add_node_edge(Labels::B, Labels::A); /// ``` #[derive(Resource, Default)] pub struct RenderGraph { nodes: HashMap<InternedRenderLabel, NodeState>, sub_graphs: HashMap<InternedRenderSubGraph, RenderGraph>, } /// The label for the input node of a graph. Used to connect other nodes to it. #[derive(Debug, Hash, PartialEq, Eq, Clone, RenderLabel)] pub struct GraphInput; impl RenderGraph { /// Updates all nodes and sub graphs of the render graph. Should be called before executing it. pub fn update(&mut self, world: &mut World) { for node in self.nodes.values_mut() { node.node.update(world); } for sub_graph in self.sub_graphs.values_mut() { sub_graph.update(world); } } /// Creates an [`GraphInputNode`] with the specified slots if not already present. pub fn set_input(&mut self, inputs: Vec<SlotInfo>) { assert!( matches!( self.get_node_state(GraphInput), Err(RenderGraphError::InvalidNode(_)) ), "Graph already has an input node" ); self.add_node(GraphInput, GraphInputNode { inputs }); } /// Returns the [`NodeState`] of the input node of this graph. /// /// # See also /// /// - [`input_node`](Self::input_node) for an unchecked version. #[inline] pub fn get_input_node(&self) -> Option<&NodeState> { self.get_node_state(GraphInput).ok() } /// Returns the [`NodeState`] of the input node of this graph. /// /// # Panics /// /// Panics if there is no input node set. /// /// # See also /// /// - [`get_input_node`](Self::get_input_node) for a version which returns an [`Option`] instead. #[inline] pub fn input_node(&self) -> &NodeState { self.get_input_node().unwrap() } /// Adds the `node` with the `label` to the graph. /// If the label is already present replaces it instead. pub fn add_node<T>(&mut self, label: impl RenderLabel, node: T) where T: Node, { let label = label.intern(); let node_state = NodeState::new(label, node); self.nodes.insert(label, node_state); } /// Add `node_edge`s based on the order of the given `edges` array. /// /// Defining an edge that already exists is not considered an error with this api. /// It simply won't create a new edge. #[track_caller] pub fn add_node_edges<const N: usize>(&mut self, edges: impl IntoRenderNodeArray<N>) { for window in edges.into_array().windows(2) { let [a, b] = window else { break; }; if let Err(err) = self.try_add_node_edge(*a, *b) { match err { // Already existing edges are very easy to produce with this api // and shouldn't cause a panic RenderGraphError::EdgeAlreadyExists(_) => {} _ => panic!("{err}"), } } } } /// Removes the `node` with the `label` from the graph. /// If the label does not exist, nothing happens. pub fn remove_node(&mut self, label: impl RenderLabel) -> Result<(), RenderGraphError> { let label = label.intern(); if let Some(node_state) = self.nodes.remove(&label) { // Remove all edges from other nodes to this one. Note that as we're removing this // node, we don't need to remove its input edges for input_edge in node_state.edges.input_edges() { match input_edge { Edge::SlotEdge { output_node, .. } | Edge::NodeEdge { input_node: _, output_node, } => { if let Ok(output_node) = self.get_node_state_mut(*output_node) { output_node.edges.remove_output_edge(input_edge.clone())?; } } } } // Remove all edges from this node to other nodes. Note that as we're removing this // node, we don't need to remove its output edges for output_edge in node_state.edges.output_edges() { match output_edge { Edge::SlotEdge { output_node: _, output_index: _, input_node, input_index: _, } | Edge::NodeEdge { output_node: _, input_node, } => { if let Ok(input_node) = self.get_node_state_mut(*input_node) { input_node.edges.remove_input_edge(output_edge.clone())?; } } } } } Ok(()) } /// Retrieves the [`NodeState`] referenced by the `label`. pub fn get_node_state(&self, label: impl RenderLabel) -> Result<&NodeState, RenderGraphError> { let label = label.intern(); self.nodes .get(&label) .ok_or(RenderGraphError::InvalidNode(label)) } /// Retrieves the [`NodeState`] referenced by the `label` mutably. pub fn get_node_state_mut( &mut self, label: impl RenderLabel, ) -> Result<&mut NodeState, RenderGraphError> { let label = label.intern(); self.nodes .get_mut(&label) .ok_or(RenderGraphError::InvalidNode(label)) } /// Retrieves the [`Node`] referenced by the `label`. pub fn get_node<T>(&self, label: impl RenderLabel) -> Result<&T, RenderGraphError> where T: Node, { self.get_node_state(label).and_then(|n| n.node()) } /// Retrieves the [`Node`] referenced by the `label` mutably. pub fn get_node_mut<T>(&mut self, label: impl RenderLabel) -> Result<&mut T, RenderGraphError> where T: Node, { self.get_node_state_mut(label).and_then(|n| n.node_mut()) } /// Adds the [`Edge::SlotEdge`] to the graph. This guarantees that the `output_node` /// is run before the `input_node` and also connects the `output_slot` to the `input_slot`. /// /// Fails if any invalid [`RenderLabel`]s or [`SlotLabel`]s are given. /// /// # See also /// /// - [`add_slot_edge`](Self::add_slot_edge) for an infallible version. pub fn try_add_slot_edge( &mut self, output_node: impl RenderLabel, output_slot: impl Into<SlotLabel>, input_node: impl RenderLabel, input_slot: impl Into<SlotLabel>, ) -> Result<(), RenderGraphError> { let output_slot = output_slot.into(); let input_slot = input_slot.into(); let output_node = output_node.intern(); let input_node = input_node.intern(); let output_index = self .get_node_state(output_node)? .output_slots .get_slot_index(output_slot.clone()) .ok_or(RenderGraphError::InvalidOutputNodeSlot(output_slot))?; let input_index = self .get_node_state(input_node)? .input_slots .get_slot_index(input_slot.clone()) .ok_or(RenderGraphError::InvalidInputNodeSlot(input_slot))?; let edge = Edge::SlotEdge { output_node, output_index, input_node, input_index, }; self.validate_edge(&edge, EdgeExistence::DoesNotExist)?; { let output_node = self.get_node_state_mut(output_node)?; output_node.edges.add_output_edge(edge.clone())?; } let input_node = self.get_node_state_mut(input_node)?; input_node.edges.add_input_edge(edge)?; Ok(()) } /// Adds the [`Edge::SlotEdge`] to the graph. This guarantees that the `output_node` /// is run before the `input_node` and also connects the `output_slot` to the `input_slot`. /// /// # Panics /// /// Any invalid [`RenderLabel`]s or [`SlotLabel`]s are given. /// /// # See also /// /// - [`try_add_slot_edge`](Self::try_add_slot_edge) for a fallible version. pub fn add_slot_edge( &mut self, output_node: impl RenderLabel, output_slot: impl Into<SlotLabel>, input_node: impl RenderLabel, input_slot: impl Into<SlotLabel>, ) { self.try_add_slot_edge(output_node, output_slot, input_node, input_slot) .unwrap(); } /// Removes the [`Edge::SlotEdge`] from the graph. If any nodes or slots do not exist then /// nothing happens. pub fn remove_slot_edge( &mut self, output_node: impl RenderLabel, output_slot: impl Into<SlotLabel>, input_node: impl RenderLabel, input_slot: impl Into<SlotLabel>, ) -> Result<(), RenderGraphError> { let output_slot = output_slot.into(); let input_slot = input_slot.into(); let output_node = output_node.intern(); let input_node = input_node.intern(); let output_index = self .get_node_state(output_node)? .output_slots .get_slot_index(output_slot.clone()) .ok_or(RenderGraphError::InvalidOutputNodeSlot(output_slot))?; let input_index = self .get_node_state(input_node)? .input_slots .get_slot_index(input_slot.clone()) .ok_or(RenderGraphError::InvalidInputNodeSlot(input_slot))?; let edge = Edge::SlotEdge { output_node, output_index, input_node, input_index, }; self.validate_edge(&edge, EdgeExistence::Exists)?; { let output_node = self.get_node_state_mut(output_node)?; output_node.edges.remove_output_edge(edge.clone())?; } let input_node = self.get_node_state_mut(input_node)?; input_node.edges.remove_input_edge(edge)?; Ok(()) } /// Adds the [`Edge::NodeEdge`] to the graph. This guarantees that the `output_node` /// is run before the `input_node`. /// /// Fails if any invalid [`RenderLabel`] is given. /// /// # See also /// /// - [`add_node_edge`](Self::add_node_edge) for an infallible version. pub fn try_add_node_edge( &mut self, output_node: impl RenderLabel, input_node: impl RenderLabel, ) -> Result<(), RenderGraphError> { let output_node = output_node.intern(); let input_node = input_node.intern(); let edge = Edge::NodeEdge { output_node, input_node, }; self.validate_edge(&edge, EdgeExistence::DoesNotExist)?; { let output_node = self.get_node_state_mut(output_node)?; output_node.edges.add_output_edge(edge.clone())?; } let input_node = self.get_node_state_mut(input_node)?; input_node.edges.add_input_edge(edge)?; Ok(()) } /// Adds the [`Edge::NodeEdge`] to the graph. This guarantees that the `output_node` /// is run before the `input_node`. /// /// # Panics /// /// Panics if any invalid [`RenderLabel`] is given. /// /// # See also /// /// - [`try_add_node_edge`](Self::try_add_node_edge) for a fallible version. pub fn add_node_edge(&mut self, output_node: impl RenderLabel, input_node: impl RenderLabel) { self.try_add_node_edge(output_node, input_node).unwrap(); } /// Removes the [`Edge::NodeEdge`] from the graph. If either node does not exist then nothing /// happens. pub fn remove_node_edge( &mut self, output_node: impl RenderLabel, input_node: impl RenderLabel, ) -> Result<(), RenderGraphError> { let output_node = output_node.intern(); let input_node = input_node.intern(); let edge = Edge::NodeEdge { output_node, input_node, }; self.validate_edge(&edge, EdgeExistence::Exists)?; { let output_node = self.get_node_state_mut(output_node)?; output_node.edges.remove_output_edge(edge.clone())?; } let input_node = self.get_node_state_mut(input_node)?; input_node.edges.remove_input_edge(edge)?; Ok(()) } /// Verifies that the edge existence is as expected and /// checks that slot edges are connected correctly. pub fn validate_edge( &mut self, edge: &Edge, should_exist: EdgeExistence, ) -> Result<(), RenderGraphError> { if should_exist == EdgeExistence::Exists && !self.has_edge(edge) { return Err(RenderGraphError::EdgeDoesNotExist(edge.clone())); } else if should_exist == EdgeExistence::DoesNotExist && self.has_edge(edge) { return Err(RenderGraphError::EdgeAlreadyExists(edge.clone())); } match *edge { Edge::SlotEdge { output_node, output_index, input_node, input_index, } => { let output_node_state = self.get_node_state(output_node)?; let input_node_state = self.get_node_state(input_node)?; let output_slot = output_node_state .output_slots .get_slot(output_index) .ok_or(RenderGraphError::InvalidOutputNodeSlot(SlotLabel::Index( output_index, )))?; let input_slot = input_node_state.input_slots.get_slot(input_index).ok_or( RenderGraphError::InvalidInputNodeSlot(SlotLabel::Index(input_index)), )?; if let Some(Edge::SlotEdge { output_node: current_output_node, .. }) = input_node_state.edges.input_edges().iter().find(|e| { if let Edge::SlotEdge { input_index: current_input_index, .. } = e { input_index == *current_input_index } else { false } }) && should_exist == EdgeExistence::DoesNotExist { return Err(RenderGraphError::NodeInputSlotAlreadyOccupied { node: input_node, input_slot: input_index, occupied_by_node: *current_output_node, }); } if output_slot.slot_type != input_slot.slot_type { return Err(RenderGraphError::MismatchedNodeSlots { output_node, output_slot: output_index, input_node, input_slot: input_index, }); } } Edge::NodeEdge { .. } => { /* nothing to validate here */ } } Ok(()) } /// Checks whether the `edge` already exists in the graph. pub fn has_edge(&self, edge: &Edge) -> bool { let output_node_state = self.get_node_state(edge.get_output_node()); let input_node_state = self.get_node_state(edge.get_input_node()); if let Ok(output_node_state) = output_node_state && output_node_state.edges.output_edges().contains(edge) && let Ok(input_node_state) = input_node_state && input_node_state.edges.input_edges().contains(edge) { return true; } false } /// Returns an iterator over the [`NodeStates`](NodeState). pub fn iter_nodes(&self) -> impl Iterator<Item = &NodeState> { self.nodes.values() } /// Returns an iterator over the [`NodeStates`](NodeState), that allows modifying each value. pub fn iter_nodes_mut(&mut self) -> impl Iterator<Item = &mut NodeState> { self.nodes.values_mut() } /// Returns an iterator over the sub graphs. pub fn iter_sub_graphs(&self) -> impl Iterator<Item = (InternedRenderSubGraph, &RenderGraph)> { self.sub_graphs.iter().map(|(name, graph)| (*name, graph)) } /// Returns an iterator over the sub graphs, that allows modifying each value. pub fn iter_sub_graphs_mut( &mut self, ) -> impl Iterator<Item = (InternedRenderSubGraph, &mut RenderGraph)> { self.sub_graphs .iter_mut() .map(|(name, graph)| (*name, graph)) } /// Returns an iterator over a tuple of the input edges and the corresponding output nodes /// for the node referenced by the label. pub fn iter_node_inputs( &self, label: impl RenderLabel, ) -> Result<impl Iterator<Item = (&Edge, &NodeState)>, RenderGraphError> { let node = self.get_node_state(label)?; Ok(node .edges .input_edges() .iter() .map(|edge| (edge, edge.get_output_node())) .map(move |(edge, output_node)| (edge, self.get_node_state(output_node).unwrap()))) } /// Returns an iterator over a tuple of the output edges and the corresponding input nodes /// for the node referenced by the label. pub fn iter_node_outputs( &self, label: impl RenderLabel, ) -> Result<impl Iterator<Item = (&Edge, &NodeState)>, RenderGraphError> { let node = self.get_node_state(label)?; Ok(node .edges .output_edges() .iter() .map(|edge| (edge, edge.get_input_node())) .map(move |(edge, input_node)| (edge, self.get_node_state(input_node).unwrap()))) } /// Adds the `sub_graph` with the `label` to the graph. /// If the label is already present replaces it instead. pub fn add_sub_graph(&mut self, label: impl RenderSubGraph, sub_graph: RenderGraph) { self.sub_graphs.insert(label.intern(), sub_graph); } /// Removes the `sub_graph` with the `label` from the graph. /// If the label does not exist then nothing happens. pub fn remove_sub_graph(&mut self, label: impl RenderSubGraph) { self.sub_graphs.remove(&label.intern()); } /// Retrieves the sub graph corresponding to the `label`. pub fn get_sub_graph(&self, label: impl RenderSubGraph) -> Option<&RenderGraph> { self.sub_graphs.get(&label.intern()) } /// Retrieves the sub graph corresponding to the `label` mutably. pub fn get_sub_graph_mut(&mut self, label: impl RenderSubGraph) -> Option<&mut RenderGraph> { self.sub_graphs.get_mut(&label.intern()) } /// Retrieves the sub graph corresponding to the `label`. /// /// # Panics /// /// Panics if any invalid subgraph label is given. /// /// # See also /// /// - [`get_sub_graph`](Self::get_sub_graph) for a fallible version. pub fn sub_graph(&self, label: impl RenderSubGraph) -> &RenderGraph { let label = label.intern(); self.sub_graphs .get(&label) .unwrap_or_else(|| panic!("Subgraph {label:?} not found")) } /// Retrieves the sub graph corresponding to the `label` mutably. /// /// # Panics /// /// Panics if any invalid subgraph label is given. /// /// # See also /// /// - [`get_sub_graph_mut`](Self::get_sub_graph_mut) for a fallible version. pub fn sub_graph_mut(&mut self, label: impl RenderSubGraph) -> &mut RenderGraph { let label = label.intern(); self.sub_graphs .get_mut(&label) .unwrap_or_else(|| panic!("Subgraph {label:?} not found")) } } impl Debug for RenderGraph { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { for node in self.iter_nodes() { writeln!(f, "{:?}", node.label)?; writeln!(f, " in: {:?}", node.input_slots)?; writeln!(f, " out: {:?}", node.output_slots)?; } Ok(()) } } /// A [`Node`] which acts as an entry point for a [`RenderGraph`] with custom inputs. /// It has the same input and output slots and simply copies them over when run. pub struct GraphInputNode { inputs: Vec<SlotInfo>, } impl Node for GraphInputNode { fn input(&self) -> Vec<SlotInfo> { self.inputs.clone() } fn output(&self) -> Vec<SlotInfo> { self.inputs.clone() } fn run( &self, graph: &mut RenderGraphContext, _render_context: &mut RenderContext, _world: &World, ) -> Result<(), NodeRunError> { for i in 0..graph.inputs().len() { let input = graph.inputs()[i].clone(); graph.set_output(i, input)?; } Ok(()) } } #[cfg(test)] mod tests { use crate::{ render_graph::{ node::IntoRenderNodeArray, Edge, InternedRenderLabel, Node, NodeRunError, RenderGraph, RenderGraphContext, RenderGraphError, RenderLabel, SlotInfo, SlotType, }, renderer::RenderContext, }; use bevy_ecs::world::{FromWorld, World}; use bevy_platform::collections::HashSet; #[derive(Debug, Hash, PartialEq, Eq, Clone, RenderLabel)] enum TestLabel { A, B, C, D, } #[derive(Debug)] struct TestNode { inputs: Vec<SlotInfo>, outputs: Vec<SlotInfo>, } impl TestNode { pub fn new(inputs: usize, outputs: usize) -> Self { TestNode { inputs: (0..inputs) .map(|i| SlotInfo::new(format!("in_{i}"), SlotType::TextureView)) .collect(), outputs: (0..outputs) .map(|i| SlotInfo::new(format!("out_{i}"), SlotType::TextureView)) .collect(), } } } impl Node for TestNode { fn input(&self) -> Vec<SlotInfo> { self.inputs.clone() } fn output(&self) -> Vec<SlotInfo> { self.outputs.clone() } fn run( &self, _: &mut RenderGraphContext, _: &mut RenderContext, _: &World, ) -> Result<(), NodeRunError> { Ok(()) } } fn input_nodes(label: impl RenderLabel, graph: &RenderGraph) -> HashSet<InternedRenderLabel> { graph .iter_node_inputs(label) .unwrap() .map(|(_edge, node)| node.label) .collect::<HashSet<InternedRenderLabel>>() } fn output_nodes(label: impl RenderLabel, graph: &RenderGraph) -> HashSet<InternedRenderLabel> { graph .iter_node_outputs(label) .unwrap() .map(|(_edge, node)| node.label) .collect::<HashSet<InternedRenderLabel>>() } #[test] fn test_graph_edges() { let mut graph = RenderGraph::default(); graph.add_node(TestLabel::A, TestNode::new(0, 1)); graph.add_node(TestLabel::B, TestNode::new(0, 1)); graph.add_node(TestLabel::C, TestNode::new(1, 1)); graph.add_node(TestLabel::D, TestNode::new(1, 0)); graph.add_slot_edge(TestLabel::A, "out_0", TestLabel::C, "in_0"); graph.add_node_edge(TestLabel::B, TestLabel::C); graph.add_slot_edge(TestLabel::C, 0, TestLabel::D, 0); assert!( input_nodes(TestLabel::A, &graph).is_empty(), "A has no inputs" ); assert_eq!( output_nodes(TestLabel::A, &graph), HashSet::from_iter((TestLabel::C,).into_array()), "A outputs to C" ); assert!( input_nodes(TestLabel::B, &graph).is_empty(), "B has no inputs" ); assert_eq!( output_nodes(TestLabel::B, &graph), HashSet::from_iter((TestLabel::C,).into_array()), "B outputs to C" ); assert_eq!( input_nodes(TestLabel::C, &graph), HashSet::from_iter((TestLabel::A, TestLabel::B).into_array()), "A and B input to C" ); assert_eq!( output_nodes(TestLabel::C, &graph), HashSet::from_iter((TestLabel::D,).into_array()), "C outputs to D" ); assert_eq!( input_nodes(TestLabel::D, &graph), HashSet::from_iter((TestLabel::C,).into_array()), "C inputs to D" ); assert!( output_nodes(TestLabel::D, &graph).is_empty(), "D has no outputs" ); } #[test] fn test_get_node_typed() { struct MyNode { value: usize, } impl Node for MyNode { fn run( &self, _: &mut RenderGraphContext, _: &mut RenderContext, _: &World, ) -> Result<(), NodeRunError> { Ok(()) } } let mut graph = RenderGraph::default(); graph.add_node(TestLabel::A, MyNode { value: 42 }); let node: &MyNode = graph.get_node(TestLabel::A).unwrap(); assert_eq!(node.value, 42, "node value matches"); let result: Result<&TestNode, RenderGraphError> = graph.get_node(TestLabel::A); assert_eq!( result.unwrap_err(), RenderGraphError::WrongNodeType, "expect a wrong node type error" ); } #[test] fn test_slot_already_occupied() { let mut graph = RenderGraph::default(); graph.add_node(TestLabel::A, TestNode::new(0, 1)); graph.add_node(TestLabel::B, TestNode::new(0, 1)); graph.add_node(TestLabel::C, TestNode::new(1, 1)); graph.add_slot_edge(TestLabel::A, 0, TestLabel::C, 0); assert_eq!( graph.try_add_slot_edge(TestLabel::B, 0, TestLabel::C, 0), Err(RenderGraphError::NodeInputSlotAlreadyOccupied { node: TestLabel::C.intern(), input_slot: 0, occupied_by_node: TestLabel::A.intern(), }), "Adding to a slot that is already occupied should return an error" ); } #[test] fn test_edge_already_exists() { let mut graph = RenderGraph::default(); graph.add_node(TestLabel::A, TestNode::new(0, 1)); graph.add_node(TestLabel::B, TestNode::new(1, 0)); graph.add_slot_edge(TestLabel::A, 0, TestLabel::B, 0); assert_eq!( graph.try_add_slot_edge(TestLabel::A, 0, TestLabel::B, 0), Err(RenderGraphError::EdgeAlreadyExists(Edge::SlotEdge { output_node: TestLabel::A.intern(), output_index: 0, input_node: TestLabel::B.intern(), input_index: 0, })), "Adding to a duplicate edge should return an error" ); } #[test] fn test_add_node_edges() { struct SimpleNode; impl Node for SimpleNode { fn run( &self, _graph: &mut RenderGraphContext, _render_context: &mut RenderContext, _world: &World, ) -> Result<(), NodeRunError> { Ok(()) } } impl FromWorld for SimpleNode { fn from_world(_world: &mut World) -> Self { Self } } let mut graph = RenderGraph::default(); graph.add_node(TestLabel::A, SimpleNode); graph.add_node(TestLabel::B, SimpleNode); graph.add_node(TestLabel::C, SimpleNode); graph.add_node_edges((TestLabel::A, TestLabel::B, TestLabel::C)); assert_eq!( output_nodes(TestLabel::A, &graph), HashSet::from_iter((TestLabel::B,).into_array()), "A -> B" ); assert_eq!( input_nodes(TestLabel::B, &graph), HashSet::from_iter((TestLabel::A,).into_array()), "A -> B" ); assert_eq!( output_nodes(TestLabel::B, &graph), HashSet::from_iter((TestLabel::C,).into_array()), "B -> C" ); assert_eq!( input_nodes(TestLabel::C, &graph), HashSet::from_iter((TestLabel::B,).into_array()), "B -> C" ); } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_graph/camera_driver_node.rs
crates/bevy_render/src/render_graph/camera_driver_node.rs
use crate::{ camera::{ExtractedCamera, SortedCameras}, render_graph::{Node, NodeRunError, RenderGraphContext}, renderer::RenderContext, view::ExtractedWindows, }; use bevy_camera::{ClearColor, NormalizedRenderTarget}; use bevy_ecs::{entity::ContainsEntity, prelude::QueryState, world::World}; use bevy_platform::collections::HashSet; use wgpu::{LoadOp, Operations, RenderPassColorAttachment, RenderPassDescriptor, StoreOp}; pub struct CameraDriverNode { cameras: QueryState<&'static ExtractedCamera>, } impl CameraDriverNode { pub fn new(world: &mut World) -> Self { Self { cameras: world.query(), } } } impl Node for CameraDriverNode { fn update(&mut self, world: &mut World) { self.cameras.update_archetypes(world); } fn run( &self, graph: &mut RenderGraphContext, render_context: &mut RenderContext, world: &World, ) -> Result<(), NodeRunError> { let sorted_cameras = world.resource::<SortedCameras>(); let windows = world.resource::<ExtractedWindows>(); let mut camera_windows = <HashSet<_>>::default(); for sorted_camera in &sorted_cameras.0 { let Ok(camera) = self.cameras.get_manual(world, sorted_camera.entity) else { continue; }; let mut run_graph = true; if let Some(NormalizedRenderTarget::Window(window_ref)) = camera.target { let window_entity = window_ref.entity(); if windows .windows .get(&window_entity) .is_some_and(|w| w.physical_width > 0 && w.physical_height > 0) { camera_windows.insert(window_entity); } else { // The window doesn't exist anymore or zero-sized so we don't need to run the graph run_graph = false; } } if run_graph { graph.run_sub_graph( camera.render_graph, vec![], Some(sorted_camera.entity), Some(format!( "Camera {} ({})", sorted_camera.order, sorted_camera.entity )), )?; } } let clear_color_global = world.resource::<ClearColor>(); // wgpu (and some backends) require doing work for swap chains if you call `get_current_texture()` and `present()` // This ensures that Bevy doesn't crash, even when there are no cameras (and therefore no work submitted). for (id, window) in world.resource::<ExtractedWindows>().iter() { if camera_windows.contains(id) && render_context.has_commands() { continue; } let Some(swap_chain_texture) = &window.swap_chain_texture_view else { continue; }; #[cfg(feature = "trace")] let _span = tracing::info_span!("no_camera_clear_pass").entered(); let pass_descriptor = RenderPassDescriptor { label: Some("no_camera_clear_pass"), color_attachments: &[Some(RenderPassColorAttachment { view: swap_chain_texture, depth_slice: None, resolve_target: None, ops: Operations { load: LoadOp::Clear(clear_color_global.to_linear().into()), store: StoreOp::Store, }, })], depth_stencil_attachment: None, timestamp_writes: None, occlusion_query_set: None, }; render_context .command_encoder() .begin_render_pass(&pass_descriptor); } Ok(()) } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_phase/draw.rs
crates/bevy_render/src/render_phase/draw.rs
use crate::render_phase::{PhaseItem, TrackedRenderPass}; use bevy_app::{App, SubApp}; use bevy_ecs::{ entity::Entity, query::{QueryEntityError, QueryState, ROQueryItem, ReadOnlyQueryData}, resource::Resource, system::{ReadOnlySystemParam, SystemParam, SystemParamItem, SystemState}, world::World, }; use bevy_utils::TypeIdMap; use core::{any::TypeId, fmt::Debug, hash::Hash}; use std::sync::{PoisonError, RwLock, RwLockReadGuard, RwLockWriteGuard}; use thiserror::Error; use variadics_please::all_tuples; /// A draw function used to draw [`PhaseItem`]s. /// /// The draw function can retrieve and query the required ECS data from the render world. /// /// This trait can either be implemented directly or implicitly composed out of multiple modular /// [`RenderCommand`]s. For more details and an example see the [`RenderCommand`] documentation. pub trait Draw<P: PhaseItem>: Send + Sync + 'static { /// Prepares the draw function to be used. This is called once and only once before the phase /// begins. There may be zero or more [`draw`](Draw::draw) calls following a call to this function. /// Implementing this is optional. #[expect( unused_variables, reason = "The parameters here are intentionally unused by the default implementation; however, putting underscores here will result in the underscores being copied by rust-analyzer's tab completion." )] fn prepare(&mut self, world: &'_ World) {} /// Draws a [`PhaseItem`] by issuing zero or more `draw` calls via the [`TrackedRenderPass`]. fn draw<'w>( &mut self, world: &'w World, pass: &mut TrackedRenderPass<'w>, view: Entity, item: &P, ) -> Result<(), DrawError>; } #[derive(Error, Debug, PartialEq, Eq)] pub enum DrawError { #[error("Failed to execute render command {0:?}")] RenderCommandFailure(&'static str), #[error("Failed to get execute view query")] InvalidViewQuery, #[error("View entity not found")] ViewEntityNotFound, } // TODO: make this generic? /// An identifier for a [`Draw`] function stored in [`DrawFunctions`]. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)] pub struct DrawFunctionId(u32); /// Stores all [`Draw`] functions for the [`PhaseItem`] type. /// /// For retrieval, the [`Draw`] functions are mapped to their respective [`TypeId`]s. pub struct DrawFunctionsInternal<P: PhaseItem> { pub draw_functions: Vec<Box<dyn Draw<P>>>, pub indices: TypeIdMap<DrawFunctionId>, } impl<P: PhaseItem> DrawFunctionsInternal<P> { /// Prepares all draw function. This is called once and only once before the phase begins. pub fn prepare(&mut self, world: &World) { for function in &mut self.draw_functions { function.prepare(world); } } /// Adds the [`Draw`] function and maps it to its own type. pub fn add<T: Draw<P>>(&mut self, draw_function: T) -> DrawFunctionId { self.add_with::<T, T>(draw_function) } /// Adds the [`Draw`] function and maps it to the type `T` pub fn add_with<T: 'static, D: Draw<P>>(&mut self, draw_function: D) -> DrawFunctionId { let id = DrawFunctionId(self.draw_functions.len().try_into().unwrap()); self.draw_functions.push(Box::new(draw_function)); self.indices.insert(TypeId::of::<T>(), id); id } /// Retrieves the [`Draw`] function corresponding to the `id` mutably. pub fn get_mut(&mut self, id: DrawFunctionId) -> Option<&mut dyn Draw<P>> { self.draw_functions.get_mut(id.0 as usize).map(|f| &mut **f) } /// Retrieves the id of the [`Draw`] function corresponding to their associated type `T`. pub fn get_id<T: 'static>(&self) -> Option<DrawFunctionId> { self.indices.get(&TypeId::of::<T>()).copied() } /// Retrieves the id of the [`Draw`] function corresponding to their associated type `T`. /// /// Fallible wrapper for [`Self::get_id()`] /// /// ## Panics /// If the id doesn't exist, this function will panic. pub fn id<T: 'static>(&self) -> DrawFunctionId { self.get_id::<T>().unwrap_or_else(|| { panic!( "Draw function {} not found for {}", core::any::type_name::<T>(), core::any::type_name::<P>() ) }) } } /// Stores all draw functions for the [`PhaseItem`] type hidden behind a reader-writer lock. /// /// To access them the [`DrawFunctions::read`] and [`DrawFunctions::write`] methods are used. #[derive(Resource)] pub struct DrawFunctions<P: PhaseItem> { internal: RwLock<DrawFunctionsInternal<P>>, } impl<P: PhaseItem> Default for DrawFunctions<P> { fn default() -> Self { Self { internal: RwLock::new(DrawFunctionsInternal { draw_functions: Vec::new(), indices: Default::default(), }), } } } impl<P: PhaseItem> DrawFunctions<P> { /// Accesses the draw functions in read mode. pub fn read(&self) -> RwLockReadGuard<'_, DrawFunctionsInternal<P>> { self.internal.read().unwrap_or_else(PoisonError::into_inner) } /// Accesses the draw functions in write mode. pub fn write(&self) -> RwLockWriteGuard<'_, DrawFunctionsInternal<P>> { self.internal .write() .unwrap_or_else(PoisonError::into_inner) } } /// [`RenderCommand`]s are modular standardized pieces of render logic that can be composed into /// [`Draw`] functions. /// /// To turn a stateless render command into a usable draw function it has to be wrapped by a /// [`RenderCommandState`]. /// This is done automatically when registering a render command as a [`Draw`] function via the /// [`AddRenderCommand::add_render_command`] method. /// /// Compared to the draw function the required ECS data is fetched automatically /// (by the [`RenderCommandState`]) from the render world. /// Therefore the three types [`Param`](RenderCommand::Param), /// [`ViewQuery`](RenderCommand::ViewQuery) and /// [`ItemQuery`](RenderCommand::ItemQuery) are used. /// They specify which information is required to execute the render command. /// /// Multiple render commands can be combined together by wrapping them in a tuple. /// /// # Example /// /// The `DrawMaterial` draw function is created from the following render command /// tuple. Const generics are used to set specific bind group locations: /// /// ``` /// # use bevy_render::render_phase::SetItemPipeline; /// # struct SetMeshViewBindGroup<const N: usize>; /// # struct SetMeshViewBindingArrayBindGroup<const N: usize>; /// # struct SetMeshBindGroup<const N: usize>; /// # struct SetMaterialBindGroup<M, const N: usize>(std::marker::PhantomData<M>); /// # struct DrawMesh; /// pub type DrawMaterial<M> = ( /// SetItemPipeline, /// SetMeshViewBindGroup<0>, /// SetMeshViewBindingArrayBindGroup<1>, /// SetMeshBindGroup<2>, /// SetMaterialBindGroup<M, 3>, /// DrawMesh, /// ); /// ``` pub trait RenderCommand<P: PhaseItem> { /// Specifies the general ECS data (e.g. resources) required by [`RenderCommand::render`]. /// /// When fetching resources, note that, due to lifetime limitations of the `Deref` trait, /// [`SRes::into_inner`] must be called on each [`SRes`] reference in the /// [`RenderCommand::render`] method, instead of being automatically dereferenced as is the /// case in normal `systems`. /// /// All parameters have to be read only. /// /// [`SRes`]: bevy_ecs::system::lifetimeless::SRes /// [`SRes::into_inner`]: bevy_ecs::system::lifetimeless::SRes::into_inner type Param: SystemParam + 'static; /// Specifies the ECS data of the view entity required by [`RenderCommand::render`]. /// /// The view entity refers to the camera, or shadow-casting light, etc. from which the phase /// item will be rendered from. /// All components have to be accessed read only. type ViewQuery: ReadOnlyQueryData; /// Specifies the ECS data of the item entity required by [`RenderCommand::render`]. /// /// The item is the entity that will be rendered for the corresponding view. /// All components have to be accessed read only. /// /// For efficiency reasons, Bevy doesn't always extract entities to the /// render world; for instance, entities that simply consist of meshes are /// often not extracted. If the entity doesn't exist in the render world, /// the supplied query data will be `None`. type ItemQuery: ReadOnlyQueryData; /// Renders a [`PhaseItem`] by recording commands (e.g. setting pipelines, binding bind groups, /// issuing draw calls, etc.) via the [`TrackedRenderPass`]. fn render<'w>( item: &P, view: ROQueryItem<'w, '_, Self::ViewQuery>, entity: Option<ROQueryItem<'w, '_, Self::ItemQuery>>, param: SystemParamItem<'w, '_, Self::Param>, pass: &mut TrackedRenderPass<'w>, ) -> RenderCommandResult; } /// The result of a [`RenderCommand`]. #[derive(Debug)] pub enum RenderCommandResult { Success, Skip, Failure(&'static str), } macro_rules! render_command_tuple_impl { ($(#[$meta:meta])* $(($name: ident, $view: ident, $entity: ident)),*) => { $(#[$meta])* impl<P: PhaseItem, $($name: RenderCommand<P>),*> RenderCommand<P> for ($($name,)*) { type Param = ($($name::Param,)*); type ViewQuery = ($($name::ViewQuery,)*); type ItemQuery = ($($name::ItemQuery,)*); #[expect( clippy::allow_attributes, reason = "We are in a macro; as such, `non_snake_case` may not always lint." )] #[allow( non_snake_case, reason = "Parameter and variable names are provided by the macro invocation, not by us." )] fn render<'w>( _item: &P, ($($view,)*): ROQueryItem<'w, '_, Self::ViewQuery>, maybe_entities: Option<ROQueryItem<'w, '_, Self::ItemQuery>>, ($($name,)*): SystemParamItem<'w, '_, Self::Param>, _pass: &mut TrackedRenderPass<'w>, ) -> RenderCommandResult { match maybe_entities { None => { $( match $name::render(_item, $view, None, $name, _pass) { RenderCommandResult::Skip => return RenderCommandResult::Skip, RenderCommandResult::Failure(reason) => return RenderCommandResult::Failure(reason), _ => {}, } )* } Some(($($entity,)*)) => { $( match $name::render(_item, $view, Some($entity), $name, _pass) { RenderCommandResult::Skip => return RenderCommandResult::Skip, RenderCommandResult::Failure(reason) => return RenderCommandResult::Failure(reason), _ => {}, } )* } } RenderCommandResult::Success } } }; } all_tuples!( #[doc(fake_variadic)] render_command_tuple_impl, 0, 15, C, V, E ); /// Wraps a [`RenderCommand`] into a state so that it can be used as a [`Draw`] function. /// /// The [`RenderCommand::Param`], [`RenderCommand::ViewQuery`] and /// [`RenderCommand::ItemQuery`] are fetched from the ECS and passed to the command. pub struct RenderCommandState<P: PhaseItem + 'static, C: RenderCommand<P>> { state: SystemState<C::Param>, view: QueryState<C::ViewQuery>, entity: QueryState<C::ItemQuery>, } impl<P: PhaseItem, C: RenderCommand<P>> RenderCommandState<P, C> { /// Creates a new [`RenderCommandState`] for the [`RenderCommand`]. pub fn new(world: &mut World) -> Self { Self { state: SystemState::new(world), view: world.query(), entity: world.query(), } } } impl<P: PhaseItem, C: RenderCommand<P> + Send + Sync + 'static> Draw<P> for RenderCommandState<P, C> where C::Param: ReadOnlySystemParam, { /// Prepares the render command to be used. This is called once and only once before the phase /// begins. There may be zero or more [`draw`](RenderCommandState::draw) calls following a call to this function. fn prepare(&mut self, world: &'_ World) { self.view.update_archetypes(world); self.entity.update_archetypes(world); } /// Fetches the ECS parameters for the wrapped [`RenderCommand`] and then renders it. fn draw<'w>( &mut self, world: &'w World, pass: &mut TrackedRenderPass<'w>, view: Entity, item: &P, ) -> Result<(), DrawError> { let param = self.state.get(world); let view = match self.view.get_manual(world, view) { Ok(view) => view, Err(err) => match err { QueryEntityError::NotSpawned(_) => return Err(DrawError::ViewEntityNotFound), QueryEntityError::QueryDoesNotMatch(_, _) | QueryEntityError::AliasedMutability(_) => { return Err(DrawError::InvalidViewQuery) } }, }; let entity = self.entity.get_manual(world, item.entity()).ok(); match C::render(item, view, entity, param, pass) { RenderCommandResult::Success | RenderCommandResult::Skip => Ok(()), RenderCommandResult::Failure(reason) => Err(DrawError::RenderCommandFailure(reason)), } } } /// Registers a [`RenderCommand`] as a [`Draw`] function. /// They are stored inside the [`DrawFunctions`] resource of the app. pub trait AddRenderCommand { /// Adds the [`RenderCommand`] for the specified render phase to the app. fn add_render_command<P: PhaseItem, C: RenderCommand<P> + Send + Sync + 'static>( &mut self, ) -> &mut Self where C::Param: ReadOnlySystemParam; } impl AddRenderCommand for SubApp { fn add_render_command<P: PhaseItem, C: RenderCommand<P> + Send + Sync + 'static>( &mut self, ) -> &mut Self where C::Param: ReadOnlySystemParam, { let draw_function = RenderCommandState::<P, C>::new(self.world_mut()); let draw_functions = self .world() .get_resource::<DrawFunctions<P>>() .unwrap_or_else(|| { panic!( "DrawFunctions<{}> must be added to the world as a resource \ before adding render commands to it", core::any::type_name::<P>(), ); }); draw_functions.write().add_with::<C, _>(draw_function); self } } impl AddRenderCommand for App { fn add_render_command<P: PhaseItem, C: RenderCommand<P> + Send + Sync + 'static>( &mut self, ) -> &mut Self where C::Param: ReadOnlySystemParam, { SubApp::add_render_command::<P, C>(self.main_mut()); self } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_phase/draw_state.rs
crates/bevy_render/src/render_phase/draw_state.rs
use crate::{ diagnostic::internal::{Pass, PassKind, WritePipelineStatistics, WriteTimestamp}, render_resource::{ BindGroup, BindGroupId, Buffer, BufferId, BufferSlice, RenderPipeline, RenderPipelineId, ShaderStages, }, renderer::RenderDevice, }; use bevy_camera::Viewport; use bevy_color::LinearRgba; use bevy_utils::default; use core::ops::Range; use wgpu::{IndexFormat, QuerySet, RenderPass}; #[cfg(feature = "detailed_trace")] use tracing::trace; type BufferSliceKey = (BufferId, wgpu::BufferAddress, wgpu::BufferSize); /// Tracks the state of a [`TrackedRenderPass`]. /// /// This is used to skip redundant operations on the [`TrackedRenderPass`] (e.g. setting an already /// set pipeline, binding an already bound bind group). These operations can otherwise be fairly /// costly due to IO to the GPU, so deduplicating these calls results in a speedup. #[derive(Debug, Default)] struct DrawState { pipeline: Option<RenderPipelineId>, bind_groups: Vec<(Option<BindGroupId>, Vec<u32>)>, /// List of vertex buffers by [`BufferId`], offset, and size. See [`DrawState::buffer_slice_key`] vertex_buffers: Vec<Option<BufferSliceKey>>, index_buffer: Option<(BufferSliceKey, IndexFormat)>, /// Stores whether this state is populated or empty for quick state invalidation stores_state: bool, } impl DrawState { /// Marks the `pipeline` as bound. fn set_pipeline(&mut self, pipeline: RenderPipelineId) { // TODO: do these need to be cleared? // self.bind_groups.clear(); // self.vertex_buffers.clear(); // self.index_buffer = None; self.pipeline = Some(pipeline); self.stores_state = true; } /// Checks, whether the `pipeline` is already bound. fn is_pipeline_set(&self, pipeline: RenderPipelineId) -> bool { self.pipeline == Some(pipeline) } /// Marks the `bind_group` as bound to the `index`. fn set_bind_group(&mut self, index: usize, bind_group: BindGroupId, dynamic_indices: &[u32]) { let group = &mut self.bind_groups[index]; group.0 = Some(bind_group); group.1.clear(); group.1.extend(dynamic_indices); self.stores_state = true; } /// Checks, whether the `bind_group` is already bound to the `index`. fn is_bind_group_set( &self, index: usize, bind_group: BindGroupId, dynamic_indices: &[u32], ) -> bool { if let Some(current_bind_group) = self.bind_groups.get(index) { current_bind_group.0 == Some(bind_group) && dynamic_indices == current_bind_group.1 } else { false } } /// Marks the vertex `buffer` as bound to the `index`. fn set_vertex_buffer(&mut self, index: usize, buffer_slice: BufferSlice) { self.vertex_buffers[index] = Some(self.buffer_slice_key(&buffer_slice)); self.stores_state = true; } /// Checks, whether the vertex `buffer` is already bound to the `index`. fn is_vertex_buffer_set(&self, index: usize, buffer_slice: &BufferSlice) -> bool { if let Some(current) = self.vertex_buffers.get(index) { *current == Some(self.buffer_slice_key(buffer_slice)) } else { false } } /// Returns the value used for checking whether `BufferSlice`s are equivalent. fn buffer_slice_key(&self, buffer_slice: &BufferSlice) -> BufferSliceKey { ( buffer_slice.id(), buffer_slice.offset(), buffer_slice.size(), ) } /// Marks the index `buffer` as bound. fn set_index_buffer(&mut self, buffer_slice: &BufferSlice, index_format: IndexFormat) { self.index_buffer = Some((self.buffer_slice_key(buffer_slice), index_format)); self.stores_state = true; } /// Checks, whether the index `buffer` is already bound. fn is_index_buffer_set(&self, buffer: &BufferSlice, index_format: IndexFormat) -> bool { self.index_buffer == Some((self.buffer_slice_key(buffer), index_format)) } /// Resets tracking state pub fn reset_tracking(&mut self) { if !self.stores_state { return; } self.pipeline = None; self.bind_groups.iter_mut().for_each(|val| { val.0 = None; val.1.clear(); }); self.vertex_buffers.iter_mut().for_each(|val| { *val = None; }); self.index_buffer = None; self.stores_state = false; } } /// A [`RenderPass`], which tracks the current pipeline state to skip redundant operations. /// /// It is used to set the current [`RenderPipeline`], [`BindGroup`]s and [`Buffer`]s. /// After all requirements are specified, draw calls can be issued. pub struct TrackedRenderPass<'a> { pass: RenderPass<'a>, state: DrawState, } impl<'a> TrackedRenderPass<'a> { /// Tracks the supplied render pass. pub fn new(device: &RenderDevice, pass: RenderPass<'a>) -> Self { let limits = device.limits(); let max_bind_groups = limits.max_bind_groups as usize; let max_vertex_buffers = limits.max_vertex_buffers as usize; Self { state: DrawState { bind_groups: vec![(None, Vec::new()); max_bind_groups], vertex_buffers: vec![None; max_vertex_buffers], ..default() }, pass, } } /// Returns the wgpu [`RenderPass`]. /// /// Function invalidates internal tracking state, /// some redundant pipeline operations may not be skipped. pub fn wgpu_pass(&mut self) -> &mut RenderPass<'a> { self.state.reset_tracking(); &mut self.pass } /// Sets the active [`RenderPipeline`]. /// /// Subsequent draw calls will exhibit the behavior defined by the `pipeline`. pub fn set_render_pipeline(&mut self, pipeline: &'a RenderPipeline) { #[cfg(feature = "detailed_trace")] trace!("set pipeline: {:?}", pipeline); if self.state.is_pipeline_set(pipeline.id()) { return; } self.pass.set_pipeline(pipeline); self.state.set_pipeline(pipeline.id()); } /// Sets the active bind group for a given bind group index. The bind group layout /// in the active pipeline when any `draw()` function is called must match the layout of /// this bind group. /// /// If the bind group have dynamic offsets, provide them in binding order. /// These offsets have to be aligned to [`WgpuLimits::min_uniform_buffer_offset_alignment`](crate::settings::WgpuLimits::min_uniform_buffer_offset_alignment) /// or [`WgpuLimits::min_storage_buffer_offset_alignment`](crate::settings::WgpuLimits::min_storage_buffer_offset_alignment) appropriately. pub fn set_bind_group( &mut self, index: usize, bind_group: &'a BindGroup, dynamic_uniform_indices: &[u32], ) { if self .state .is_bind_group_set(index, bind_group.id(), dynamic_uniform_indices) { #[cfg(feature = "detailed_trace")] trace!( "set bind_group {} (already set): {:?} ({:?})", index, bind_group, dynamic_uniform_indices ); return; } #[cfg(feature = "detailed_trace")] trace!( "set bind_group {}: {:?} ({:?})", index, bind_group, dynamic_uniform_indices ); self.pass .set_bind_group(index as u32, bind_group, dynamic_uniform_indices); self.state .set_bind_group(index, bind_group.id(), dynamic_uniform_indices); } /// Assign a vertex buffer to a slot. /// /// Subsequent calls to [`draw`] and [`draw_indexed`] on this /// [`TrackedRenderPass`] will use `buffer` as one of the source vertex buffers. /// /// The `slot_index` refers to the index of the matching descriptor in /// [`VertexState::buffers`](crate::render_resource::VertexState::buffers). /// /// [`draw`]: TrackedRenderPass::draw /// [`draw_indexed`]: TrackedRenderPass::draw_indexed pub fn set_vertex_buffer(&mut self, slot_index: usize, buffer_slice: BufferSlice<'a>) { if self.state.is_vertex_buffer_set(slot_index, &buffer_slice) { #[cfg(feature = "detailed_trace")] trace!( "set vertex buffer {} (already set): {:?} (offset = {}, size = {})", slot_index, buffer_slice.id(), buffer_slice.offset(), buffer_slice.size(), ); return; } #[cfg(feature = "detailed_trace")] trace!( "set vertex buffer {}: {:?} (offset = {}, size = {})", slot_index, buffer_slice.id(), buffer_slice.offset(), buffer_slice.size(), ); self.pass .set_vertex_buffer(slot_index as u32, *buffer_slice); self.state.set_vertex_buffer(slot_index, buffer_slice); } /// Sets the active index buffer. /// /// Subsequent calls to [`TrackedRenderPass::draw_indexed`] will use the buffer referenced by /// `buffer_slice` as the source index buffer. pub fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) { let already_set = self.state.is_index_buffer_set(&buffer_slice, index_format); #[cfg(feature = "detailed_trace")] trace!( "set index buffer{}: {:?} (offset = {}, size = {})", if already_set { " (already set)" } else { "" }, buffer_slice.id(), buffer_slice.offset(), buffer_slice.size(), ); if already_set { return; } self.pass.set_index_buffer(*buffer_slice, index_format); self.state.set_index_buffer(&buffer_slice, index_format); } /// Draws primitives from the active vertex buffer(s). /// /// The active vertex buffer(s) can be set with [`TrackedRenderPass::set_vertex_buffer`]. pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) { #[cfg(feature = "detailed_trace")] trace!("draw: {:?} {:?}", vertices, instances); self.pass.draw(vertices, instances); } /// Draws indexed primitives using the active index buffer and the active vertex buffer(s). /// /// The active index buffer can be set with [`TrackedRenderPass::set_index_buffer`], while the /// active vertex buffer(s) can be set with [`TrackedRenderPass::set_vertex_buffer`]. pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) { #[cfg(feature = "detailed_trace")] trace!( "draw indexed: {:?} {} {:?}", indices, base_vertex, instances ); self.pass.draw_indexed(indices, base_vertex, instances); } /// Draws primitives from the active vertex buffer(s) based on the contents of the /// `indirect_buffer`. /// /// The active vertex buffers can be set with [`TrackedRenderPass::set_vertex_buffer`]. /// /// The structure expected in `indirect_buffer` is the following: /// /// ``` /// #[repr(C)] /// struct DrawIndirect { /// vertex_count: u32, // The number of vertices to draw. /// instance_count: u32, // The number of instances to draw. /// first_vertex: u32, // The Index of the first vertex to draw. /// first_instance: u32, // The instance ID of the first instance to draw. /// // has to be 0, unless [`Features::INDIRECT_FIRST_INSTANCE`] is enabled. /// } /// ``` pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: u64) { #[cfg(feature = "detailed_trace")] trace!("draw indirect: {:?} {}", indirect_buffer, indirect_offset); self.pass.draw_indirect(indirect_buffer, indirect_offset); } /// Draws indexed primitives using the active index buffer and the active vertex buffers, /// based on the contents of the `indirect_buffer`. /// /// The active index buffer can be set with [`TrackedRenderPass::set_index_buffer`], while the /// active vertex buffers can be set with [`TrackedRenderPass::set_vertex_buffer`]. /// /// The structure expected in `indirect_buffer` is the following: /// /// ``` /// #[repr(C)] /// struct DrawIndexedIndirect { /// vertex_count: u32, // The number of vertices to draw. /// instance_count: u32, // The number of instances to draw. /// first_index: u32, // The base index within the index buffer. /// vertex_offset: i32, // The value added to the vertex index before indexing into the vertex buffer. /// first_instance: u32, // The instance ID of the first instance to draw. /// // has to be 0, unless [`Features::INDIRECT_FIRST_INSTANCE`] is enabled. /// } /// ``` pub fn draw_indexed_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: u64) { #[cfg(feature = "detailed_trace")] trace!( "draw indexed indirect: {:?} {}", indirect_buffer, indirect_offset ); self.pass .draw_indexed_indirect(indirect_buffer, indirect_offset); } /// Dispatches multiple draw calls from the active vertex buffer(s) based on the contents of the /// `indirect_buffer`.`count` draw calls are issued. /// /// The active vertex buffers can be set with [`TrackedRenderPass::set_vertex_buffer`]. /// /// `indirect_buffer` should contain `count` tightly packed elements of the following structure: /// /// ``` /// #[repr(C)] /// struct DrawIndirect { /// vertex_count: u32, // The number of vertices to draw. /// instance_count: u32, // The number of instances to draw. /// first_vertex: u32, // The Index of the first vertex to draw. /// first_instance: u32, // The instance ID of the first instance to draw. /// // has to be 0, unless [`Features::INDIRECT_FIRST_INSTANCE`] is enabled. /// } /// ``` pub fn multi_draw_indirect( &mut self, indirect_buffer: &'a Buffer, indirect_offset: u64, count: u32, ) { #[cfg(feature = "detailed_trace")] trace!( "multi draw indirect: {:?} {}, {}x", indirect_buffer, indirect_offset, count ); self.pass .multi_draw_indirect(indirect_buffer, indirect_offset, count); } /// Dispatches multiple draw calls from the active vertex buffer(s) based on the contents of /// the `indirect_buffer`. /// The count buffer is read to determine how many draws to issue. /// /// The indirect buffer must be long enough to account for `max_count` draws, however only /// `count` elements will be read, where `count` is the value read from `count_buffer` capped /// at `max_count`. /// /// The active vertex buffers can be set with [`TrackedRenderPass::set_vertex_buffer`]. /// /// `indirect_buffer` should contain `count` tightly packed elements of the following structure: /// /// ``` /// #[repr(C)] /// struct DrawIndirect { /// vertex_count: u32, // The number of vertices to draw. /// instance_count: u32, // The number of instances to draw. /// first_vertex: u32, // The Index of the first vertex to draw. /// first_instance: u32, // The instance ID of the first instance to draw. /// // has to be 0, unless [`Features::INDIRECT_FIRST_INSTANCE`] is enabled. /// } /// ``` pub fn multi_draw_indirect_count( &mut self, indirect_buffer: &'a Buffer, indirect_offset: u64, count_buffer: &'a Buffer, count_offset: u64, max_count: u32, ) { #[cfg(feature = "detailed_trace")] trace!( "multi draw indirect count: {:?} {}, ({:?} {})x, max {}x", indirect_buffer, indirect_offset, count_buffer, count_offset, max_count ); self.pass.multi_draw_indirect_count( indirect_buffer, indirect_offset, count_buffer, count_offset, max_count, ); } /// Dispatches multiple draw calls from the active index buffer and the active vertex buffers, /// based on the contents of the `indirect_buffer`. `count` draw calls are issued. /// /// The active index buffer can be set with [`TrackedRenderPass::set_index_buffer`], while the /// active vertex buffers can be set with [`TrackedRenderPass::set_vertex_buffer`]. /// /// `indirect_buffer` should contain `count` tightly packed elements of the following structure: /// /// ``` /// #[repr(C)] /// struct DrawIndexedIndirect { /// vertex_count: u32, // The number of vertices to draw. /// instance_count: u32, // The number of instances to draw. /// first_index: u32, // The base index within the index buffer. /// vertex_offset: i32, // The value added to the vertex index before indexing into the vertex buffer. /// first_instance: u32, // The instance ID of the first instance to draw. /// // has to be 0, unless [`Features::INDIRECT_FIRST_INSTANCE`] is enabled. /// } /// ``` pub fn multi_draw_indexed_indirect( &mut self, indirect_buffer: &'a Buffer, indirect_offset: u64, count: u32, ) { #[cfg(feature = "detailed_trace")] trace!( "multi draw indexed indirect: {:?} {}, {}x", indirect_buffer, indirect_offset, count ); self.pass .multi_draw_indexed_indirect(indirect_buffer, indirect_offset, count); } /// Dispatches multiple draw calls from the active index buffer and the active vertex buffers, /// based on the contents of the `indirect_buffer`. /// The count buffer is read to determine how many draws to issue. /// /// The indirect buffer must be long enough to account for `max_count` draws, however only /// `count` elements will be read, where `count` is the value read from `count_buffer` capped /// at `max_count`. /// /// The active index buffer can be set with [`TrackedRenderPass::set_index_buffer`], while the /// active vertex buffers can be set with [`TrackedRenderPass::set_vertex_buffer`]. /// /// `indirect_buffer` should contain `count` tightly packed elements of the following structure: /// /// ``` /// #[repr(C)] /// struct DrawIndexedIndirect { /// vertex_count: u32, // The number of vertices to draw. /// instance_count: u32, // The number of instances to draw. /// first_index: u32, // The base index within the index buffer. /// vertex_offset: i32, // The value added to the vertex index before indexing into the vertex buffer. /// first_instance: u32, // The instance ID of the first instance to draw. /// // has to be 0, unless [`Features::INDIRECT_FIRST_INSTANCE`] is enabled. /// } /// ``` pub fn multi_draw_indexed_indirect_count( &mut self, indirect_buffer: &'a Buffer, indirect_offset: u64, count_buffer: &'a Buffer, count_offset: u64, max_count: u32, ) { #[cfg(feature = "detailed_trace")] trace!( "multi draw indexed indirect count: {:?} {}, ({:?} {})x, max {}x", indirect_buffer, indirect_offset, count_buffer, count_offset, max_count ); self.pass.multi_draw_indexed_indirect_count( indirect_buffer, indirect_offset, count_buffer, count_offset, max_count, ); } /// Sets the stencil reference. /// /// Subsequent stencil tests will test against this value. pub fn set_stencil_reference(&mut self, reference: u32) { #[cfg(feature = "detailed_trace")] trace!("set stencil reference: {}", reference); self.pass.set_stencil_reference(reference); } /// Sets the scissor region. /// /// Subsequent draw calls will discard any fragments that fall outside this region. pub fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) { #[cfg(feature = "detailed_trace")] trace!("set_scissor_rect: {} {} {} {}", x, y, width, height); self.pass.set_scissor_rect(x, y, width, height); } /// Set push constant data. /// /// `Features::PUSH_CONSTANTS` must be enabled on the device in order to call these functions. pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) { #[cfg(feature = "detailed_trace")] trace!( "set push constants: {:?} offset: {} data.len: {}", stages, offset, data.len() ); self.pass.set_push_constants(stages, offset, data); } /// Set the rendering viewport. /// /// Subsequent draw calls will be projected into that viewport. pub fn set_viewport( &mut self, x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32, ) { #[cfg(feature = "detailed_trace")] trace!( "set viewport: {} {} {} {} {} {}", x, y, width, height, min_depth, max_depth ); self.pass .set_viewport(x, y, width, height, min_depth, max_depth); } /// Set the rendering viewport to the given camera [`Viewport`]. /// /// Subsequent draw calls will be projected into that viewport. pub fn set_camera_viewport(&mut self, viewport: &Viewport) { self.set_viewport( viewport.physical_position.x as f32, viewport.physical_position.y as f32, viewport.physical_size.x as f32, viewport.physical_size.y as f32, viewport.depth.start, viewport.depth.end, ); } /// Insert a single debug marker. /// /// This is a GPU debugging feature. This has no effect on the rendering itself. pub fn insert_debug_marker(&mut self, label: &str) { #[cfg(feature = "detailed_trace")] trace!("insert debug marker: {}", label); self.pass.insert_debug_marker(label); } /// Start a new debug group. /// /// Push a new debug group over the internal stack. Subsequent render commands and debug /// markers are grouped into this new group, until [`pop_debug_group`] is called. /// /// ``` /// # fn example(mut pass: bevy_render::render_phase::TrackedRenderPass<'static>) { /// pass.push_debug_group("Render the car"); /// // [setup pipeline etc...] /// pass.draw(0..64, 0..1); /// pass.pop_debug_group(); /// # } /// ``` /// /// Note that [`push_debug_group`] and [`pop_debug_group`] must always be called in pairs. /// /// This is a GPU debugging feature. This has no effect on the rendering itself. /// /// [`push_debug_group`]: TrackedRenderPass::push_debug_group /// [`pop_debug_group`]: TrackedRenderPass::pop_debug_group pub fn push_debug_group(&mut self, label: &str) { #[cfg(feature = "detailed_trace")] trace!("push_debug_group marker: {}", label); self.pass.push_debug_group(label); } /// End the current debug group. /// /// Subsequent render commands and debug markers are not grouped anymore in /// this group, but in the previous one (if any) or the default top-level one /// if the debug group was the last one on the stack. /// /// Note that [`push_debug_group`] and [`pop_debug_group`] must always be called in pairs. /// /// This is a GPU debugging feature. This has no effect on the rendering itself. /// /// [`push_debug_group`]: TrackedRenderPass::push_debug_group /// [`pop_debug_group`]: TrackedRenderPass::pop_debug_group pub fn pop_debug_group(&mut self) { #[cfg(feature = "detailed_trace")] trace!("pop_debug_group"); self.pass.pop_debug_group(); } /// Sets the blend color as used by some of the blending modes. /// /// Subsequent blending tests will test against this value. pub fn set_blend_constant(&mut self, color: LinearRgba) { #[cfg(feature = "detailed_trace")] trace!("set blend constant: {:?}", color); self.pass.set_blend_constant(wgpu::Color::from(color)); } } impl WriteTimestamp for TrackedRenderPass<'_> { fn write_timestamp(&mut self, query_set: &QuerySet, index: u32) { self.pass.write_timestamp(query_set, index); } } impl WritePipelineStatistics for TrackedRenderPass<'_> { fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, index: u32) { self.pass.begin_pipeline_statistics_query(query_set, index); } fn end_pipeline_statistics_query(&mut self) { self.pass.end_pipeline_statistics_query(); } } impl Pass for TrackedRenderPass<'_> { const KIND: PassKind = PassKind::Render; }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_phase/rangefinder.rs
crates/bevy_render/src/render_phase/rangefinder.rs
use bevy_math::{Affine3A, Mat4, Vec3, Vec4}; /// A distance calculator for the draw order of [`PhaseItem`](crate::render_phase::PhaseItem)s. pub struct ViewRangefinder3d { view_from_world_row_2: Vec4, } impl ViewRangefinder3d { /// Creates a 3D rangefinder for a view matrix. pub fn from_world_from_view(world_from_view: &Affine3A) -> ViewRangefinder3d { let view_from_world = world_from_view.inverse(); ViewRangefinder3d { view_from_world_row_2: Mat4::from(view_from_world).row(2), } } /// Calculates the distance, or view-space `Z` value, for the given world-space `position`. #[inline] pub fn distance(&self, position: &Vec3) -> f32 { // NOTE: row 2 of the inverse view matrix dotted with the world-space position // gives the z component of the point in view-space self.view_from_world_row_2.dot(position.extend(1.0)) } } #[cfg(test)] mod tests { use super::ViewRangefinder3d; use bevy_math::{Affine3A, Vec3}; #[test] fn distance() { let view_matrix = Affine3A::from_translation(Vec3::new(0.0, 0.0, -1.0)); let rangefinder = ViewRangefinder3d::from_world_from_view(&view_matrix); assert_eq!(rangefinder.distance(&Vec3::new(0., 0., 0.)), 1.0); assert_eq!(rangefinder.distance(&Vec3::new(0., 0., 1.)), 2.0); } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/render_phase/mod.rs
crates/bevy_render/src/render_phase/mod.rs
//! The modular rendering abstraction responsible for queuing, preparing, sorting and drawing //! entities as part of separate render phases. //! //! In Bevy each view (camera, or shadow-casting light, etc.) has one or multiple render phases //! (e.g. opaque, transparent, shadow, etc). //! They are used to queue entities for rendering. //! Multiple phases might be required due to different sorting/batching behaviors //! (e.g. opaque: front to back, transparent: back to front) or because one phase depends on //! the rendered texture of the previous phase (e.g. for screen-space reflections). //! //! To draw an entity, a corresponding [`PhaseItem`] has to be added to one or multiple of these //! render phases for each view that it is visible in. //! This must be done in the [`RenderSystems::Queue`]. //! After that the render phase sorts them in the [`RenderSystems::PhaseSort`]. //! Finally the items are rendered using a single [`TrackedRenderPass`], during //! the [`RenderSystems::Render`]. //! //! Therefore each phase item is assigned a [`Draw`] function. //! These set up the state of the [`TrackedRenderPass`] (i.e. select the //! [`RenderPipeline`](crate::render_resource::RenderPipeline), configure the //! [`BindGroup`](crate::render_resource::BindGroup)s, etc.) and then issue a draw call, //! for the corresponding item. //! //! The [`Draw`] function trait can either be implemented directly or such a function can be //! created by composing multiple [`RenderCommand`]s. mod draw; mod draw_state; mod rangefinder; use bevy_app::{App, Plugin}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::change_detection::Tick; use bevy_ecs::entity::EntityHash; use bevy_platform::collections::{hash_map::Entry, HashMap}; use bevy_utils::default; pub use draw::*; pub use draw_state::*; use encase::{internal::WriteInto, ShaderSize}; use fixedbitset::{Block, FixedBitSet}; use indexmap::IndexMap; use nonmax::NonMaxU32; pub use rangefinder::*; use wgpu::Features; use crate::batching::gpu_preprocessing::{ GpuPreprocessingMode, GpuPreprocessingSupport, PhaseBatchedInstanceBuffers, PhaseIndirectParametersBuffers, }; use crate::renderer::RenderDevice; use crate::sync_world::{MainEntity, MainEntityHashMap}; use crate::view::RetainedViewEntity; use crate::RenderDebugFlags; use crate::{ batching::{ self, gpu_preprocessing::{self, BatchedInstanceBuffers}, no_gpu_preprocessing::{self, BatchedInstanceBuffer}, GetFullBatchData, }, render_resource::{CachedRenderPipelineId, GpuArrayBufferIndex, PipelineCache}, Render, RenderApp, RenderSystems, }; use bevy_ecs::intern::Interned; use bevy_ecs::{ define_label, prelude::*, system::{lifetimeless::SRes, SystemParamItem}, }; use bevy_render::renderer::RenderAdapterInfo; pub use bevy_render_macros::ShaderLabel; use core::{fmt::Debug, hash::Hash, iter, marker::PhantomData, ops::Range, slice::SliceIndex}; use smallvec::SmallVec; use tracing::warn; define_label!( #[diagnostic::on_unimplemented( note = "consider annotating `{Self}` with `#[derive(ShaderLabel)]`" )] /// Labels used to uniquely identify types of material shaders ShaderLabel, SHADER_LABEL_INTERNER ); /// A shorthand for `Interned<dyn RenderSubGraph>`. pub type InternedShaderLabel = Interned<dyn ShaderLabel>; pub use bevy_render_macros::DrawFunctionLabel; define_label!( #[diagnostic::on_unimplemented( note = "consider annotating `{Self}` with `#[derive(DrawFunctionLabel)]`" )] /// Labels used to uniquely identify types of material shaders DrawFunctionLabel, DRAW_FUNCTION_LABEL_INTERNER ); pub type InternedDrawFunctionLabel = Interned<dyn DrawFunctionLabel>; /// Stores the rendering instructions for a single phase that uses bins in all /// views. /// /// They're cleared out every frame, but storing them in a resource like this /// allows us to reuse allocations. #[derive(Resource, Deref, DerefMut)] pub struct ViewBinnedRenderPhases<BPI>(pub HashMap<RetainedViewEntity, BinnedRenderPhase<BPI>>) where BPI: BinnedPhaseItem; /// A collection of all rendering instructions, that will be executed by the GPU, for a /// single render phase for a single view. /// /// Each view (camera, or shadow-casting light, etc.) can have one or multiple render phases. /// They are used to queue entities for rendering. /// Multiple phases might be required due to different sorting/batching behaviors /// (e.g. opaque: front to back, transparent: back to front) or because one phase depends on /// the rendered texture of the previous phase (e.g. for screen-space reflections). /// All [`PhaseItem`]s are then rendered using a single [`TrackedRenderPass`]. /// The render pass might be reused for multiple phases to reduce GPU overhead. /// /// This flavor of render phase is used for phases in which the ordering is less /// critical: for example, `Opaque3d`. It's generally faster than the /// alternative [`SortedRenderPhase`]. pub struct BinnedRenderPhase<BPI> where BPI: BinnedPhaseItem, { /// The multidrawable bins. /// /// Each batch set key maps to a *batch set*, which in this case is a set of /// meshes that can be drawn together in one multidraw call. Each batch set /// is subdivided into *bins*, each of which represents a particular mesh. /// Each bin contains the entity IDs of instances of that mesh. /// /// So, for example, if there are two cubes and a sphere present in the /// scene, we would generally have one batch set containing two bins, /// assuming that the cubes and sphere meshes are allocated together and use /// the same pipeline. The first bin, corresponding to the cubes, will have /// two entities in it. The second bin, corresponding to the sphere, will /// have one entity in it. pub multidrawable_meshes: IndexMap<BPI::BatchSetKey, IndexMap<BPI::BinKey, RenderBin>>, /// The bins corresponding to batchable items that aren't multidrawable. /// /// For multidrawable entities, use `multidrawable_meshes`; for /// unbatchable entities, use `unbatchable_values`. pub batchable_meshes: IndexMap<(BPI::BatchSetKey, BPI::BinKey), RenderBin>, /// The unbatchable bins. /// /// Each entity here is rendered in a separate drawcall. pub unbatchable_meshes: IndexMap<(BPI::BatchSetKey, BPI::BinKey), UnbatchableBinnedEntities>, /// Items in the bin that aren't meshes at all. /// /// Bevy itself doesn't place anything in this list, but plugins or your app /// can in order to execute custom drawing commands. Draw functions for each /// entity are simply called in order at rendering time. /// /// See the `custom_phase_item` example for an example of how to use this. pub non_mesh_items: IndexMap<(BPI::BatchSetKey, BPI::BinKey), NonMeshEntities>, /// Information on each batch set. /// /// A *batch set* is a set of entities that will be batched together unless /// we're on a platform that doesn't support storage buffers (e.g. WebGL 2) /// and differing dynamic uniform indices force us to break batches. On /// platforms that support storage buffers, a batch set always consists of /// at most one batch. /// /// Multidrawable entities come first, then batchable entities, then /// unbatchable entities. pub(crate) batch_sets: BinnedRenderPhaseBatchSets<BPI::BinKey>, /// The batch and bin key for each entity. /// /// We retain these so that, when the entity changes, /// [`Self::sweep_old_entities`] can quickly find the bin it was located in /// and remove it. cached_entity_bin_keys: IndexMap<MainEntity, CachedBinnedEntity<BPI>, EntityHash>, /// The set of indices in [`Self::cached_entity_bin_keys`] that are /// confirmed to be up to date. /// /// Note that each bit in this bit set refers to an *index* in the /// [`IndexMap`] (i.e. a bucket in the hash table). They aren't entity IDs. valid_cached_entity_bin_keys: FixedBitSet, /// The set of entities that changed bins this frame. /// /// An entity will only be present in this list if it was in one bin on the /// previous frame and is in a new bin on this frame. Each list entry /// specifies the bin the entity used to be in. We use this in order to /// remove the entity from the old bin during /// [`BinnedRenderPhase::sweep_old_entities`]. entities_that_changed_bins: Vec<EntityThatChangedBins<BPI>>, /// The gpu preprocessing mode configured for the view this phase is associated /// with. gpu_preprocessing_mode: GpuPreprocessingMode, } /// All entities that share a mesh and a material and can be batched as part of /// a [`BinnedRenderPhase`]. #[derive(Default)] pub struct RenderBin { /// A list of the entities in each bin, along with their cached /// [`InputUniformIndex`]. entities: IndexMap<MainEntity, InputUniformIndex, EntityHash>, } /// Information that we track about an entity that was in one bin on the /// previous frame and is in a different bin this frame. struct EntityThatChangedBins<BPI> where BPI: BinnedPhaseItem, { /// The entity. main_entity: MainEntity, /// The key that identifies the bin that this entity used to be in. old_cached_binned_entity: CachedBinnedEntity<BPI>, } /// Information that we keep about an entity currently within a bin. pub struct CachedBinnedEntity<BPI> where BPI: BinnedPhaseItem, { /// Information that we use to identify a cached entity in a bin. pub cached_bin_key: Option<CachedBinKey<BPI>>, /// The last modified tick of the entity. /// /// We use this to detect when the entity needs to be invalidated. pub change_tick: Tick, } /// Information that we use to identify a cached entity in a bin. pub struct CachedBinKey<BPI> where BPI: BinnedPhaseItem, { /// The key of the batch set containing the entity. pub batch_set_key: BPI::BatchSetKey, /// The key of the bin containing the entity. pub bin_key: BPI::BinKey, /// The type of render phase that we use to render the entity: multidraw, /// plain batch, etc. pub phase_type: BinnedRenderPhaseType, } impl<BPI> Clone for CachedBinnedEntity<BPI> where BPI: BinnedPhaseItem, { fn clone(&self) -> Self { CachedBinnedEntity { cached_bin_key: self.cached_bin_key.clone(), change_tick: self.change_tick, } } } impl<BPI> Clone for CachedBinKey<BPI> where BPI: BinnedPhaseItem, { fn clone(&self) -> Self { CachedBinKey { batch_set_key: self.batch_set_key.clone(), bin_key: self.bin_key.clone(), phase_type: self.phase_type, } } } impl<BPI> PartialEq for CachedBinKey<BPI> where BPI: BinnedPhaseItem, { fn eq(&self, other: &Self) -> bool { self.batch_set_key == other.batch_set_key && self.bin_key == other.bin_key && self.phase_type == other.phase_type } } /// How we store and render the batch sets. /// /// Each one of these corresponds to a [`GpuPreprocessingMode`]. pub enum BinnedRenderPhaseBatchSets<BK> { /// Batches are grouped into batch sets based on dynamic uniforms. /// /// This corresponds to [`GpuPreprocessingMode::None`]. DynamicUniforms(Vec<SmallVec<[BinnedRenderPhaseBatch; 1]>>), /// Batches are never grouped into batch sets. /// /// This corresponds to [`GpuPreprocessingMode::PreprocessingOnly`]. Direct(Vec<BinnedRenderPhaseBatch>), /// Batches are grouped together into batch sets based on their ability to /// be multi-drawn together. /// /// This corresponds to [`GpuPreprocessingMode::Culling`]. MultidrawIndirect(Vec<BinnedRenderPhaseBatchSet<BK>>), } /// A group of entities that will be batched together into a single multi-draw /// call. pub struct BinnedRenderPhaseBatchSet<BK> { /// The first batch in this batch set. pub(crate) first_batch: BinnedRenderPhaseBatch, /// The key of the bin that the first batch corresponds to. pub(crate) bin_key: BK, /// The number of batches. pub(crate) batch_count: u32, /// The index of the batch set in the GPU buffer. pub(crate) index: u32, } impl<BK> BinnedRenderPhaseBatchSets<BK> { fn clear(&mut self) { match *self { BinnedRenderPhaseBatchSets::DynamicUniforms(ref mut vec) => vec.clear(), BinnedRenderPhaseBatchSets::Direct(ref mut vec) => vec.clear(), BinnedRenderPhaseBatchSets::MultidrawIndirect(ref mut vec) => vec.clear(), } } } /// Information about a single batch of entities rendered using binned phase /// items. #[derive(Debug)] pub struct BinnedRenderPhaseBatch { /// An entity that's *representative* of this batch. /// /// Bevy uses this to fetch the mesh. It can be any entity in the batch. pub representative_entity: (Entity, MainEntity), /// The range of instance indices in this batch. pub instance_range: Range<u32>, /// The dynamic offset of the batch. /// /// Note that dynamic offsets are only used on platforms that don't support /// storage buffers. pub extra_index: PhaseItemExtraIndex, } /// Information about the unbatchable entities in a bin. pub struct UnbatchableBinnedEntities { /// The entities. pub entities: MainEntityHashMap<Entity>, /// The GPU array buffer indices of each unbatchable binned entity. pub(crate) buffer_indices: UnbatchableBinnedEntityIndexSet, } /// Information about [`BinnedRenderPhaseType::NonMesh`] entities. pub struct NonMeshEntities { /// The entities. pub entities: MainEntityHashMap<Entity>, } /// Stores instance indices and dynamic offsets for unbatchable entities in a /// binned render phase. /// /// This is conceptually `Vec<UnbatchableBinnedEntityDynamicOffset>`, but it /// avoids the overhead of storing dynamic offsets on platforms that support /// them. In other words, this allows a fast path that avoids allocation on /// platforms that aren't WebGL 2. #[derive(Default)] pub(crate) enum UnbatchableBinnedEntityIndexSet { /// There are no unbatchable entities in this bin (yet). #[default] NoEntities, /// The instances for all unbatchable entities in this bin are contiguous, /// and there are no dynamic uniforms. /// /// This is the typical case on platforms other than WebGL 2. We special /// case this to avoid allocation on those platforms. Sparse { /// The range of indices. instance_range: Range<u32>, /// The index of the first indirect instance parameters. /// /// The other indices immediately follow these. first_indirect_parameters_index: Option<NonMaxU32>, }, /// Dynamic uniforms are present for unbatchable entities in this bin. /// /// We fall back to this on WebGL 2. Dense(Vec<UnbatchableBinnedEntityIndices>), } /// The instance index and dynamic offset (if present) for an unbatchable entity. /// /// This is only useful on platforms that don't support storage buffers. #[derive(Clone)] pub(crate) struct UnbatchableBinnedEntityIndices { /// The instance index. pub(crate) instance_index: u32, /// The [`PhaseItemExtraIndex`], if present. pub(crate) extra_index: PhaseItemExtraIndex, } /// Identifies the list within [`BinnedRenderPhase`] that a phase item is to be /// placed in. #[derive(Clone, Copy, PartialEq, Debug)] pub enum BinnedRenderPhaseType { /// The item is a mesh that's eligible for multi-draw indirect rendering and /// can be batched with other meshes of the same type. MultidrawableMesh, /// The item is a mesh that can be batched with other meshes of the same type and /// drawn in a single draw call. BatchableMesh, /// The item is a mesh that's eligible for indirect rendering, but can't be /// batched with other meshes of the same type. UnbatchableMesh, /// The item isn't a mesh at all. /// /// Bevy will simply invoke the drawing commands for such items one after /// another, with no further processing. /// /// The engine itself doesn't enqueue any items of this type, but it's /// available for use in your application and/or plugins. NonMesh, } impl<T> From<GpuArrayBufferIndex<T>> for UnbatchableBinnedEntityIndices where T: Clone + ShaderSize + WriteInto, { fn from(value: GpuArrayBufferIndex<T>) -> Self { UnbatchableBinnedEntityIndices { instance_index: value.index, extra_index: PhaseItemExtraIndex::maybe_dynamic_offset(value.dynamic_offset), } } } impl<BPI> Default for ViewBinnedRenderPhases<BPI> where BPI: BinnedPhaseItem, { fn default() -> Self { Self(default()) } } impl<BPI> ViewBinnedRenderPhases<BPI> where BPI: BinnedPhaseItem, { pub fn prepare_for_new_frame( &mut self, retained_view_entity: RetainedViewEntity, gpu_preprocessing: GpuPreprocessingMode, ) { match self.entry(retained_view_entity) { Entry::Occupied(mut entry) => entry.get_mut().prepare_for_new_frame(), Entry::Vacant(entry) => { entry.insert(BinnedRenderPhase::<BPI>::new(gpu_preprocessing)); } } } } /// The index of the uniform describing this object in the GPU buffer, when GPU /// preprocessing is enabled. /// /// For example, for 3D meshes, this is the index of the `MeshInputUniform` in /// the buffer. /// /// This field is ignored if GPU preprocessing isn't in use, such as (currently) /// in the case of 2D meshes. In that case, it can be safely set to /// [`core::default::Default::default`]. #[derive(Clone, Copy, PartialEq, Default, Deref, DerefMut)] #[repr(transparent)] pub struct InputUniformIndex(pub u32); impl<BPI> BinnedRenderPhase<BPI> where BPI: BinnedPhaseItem, { /// Bins a new entity. /// /// The `phase_type` parameter specifies whether the entity is a /// preprocessable mesh and whether it can be binned with meshes of the same /// type. pub fn add( &mut self, batch_set_key: BPI::BatchSetKey, bin_key: BPI::BinKey, (entity, main_entity): (Entity, MainEntity), input_uniform_index: InputUniformIndex, mut phase_type: BinnedRenderPhaseType, change_tick: Tick, ) { // If the user has overridden indirect drawing for this view, we need to // force the phase type to be batchable instead. if self.gpu_preprocessing_mode == GpuPreprocessingMode::PreprocessingOnly && phase_type == BinnedRenderPhaseType::MultidrawableMesh { phase_type = BinnedRenderPhaseType::BatchableMesh; } match phase_type { BinnedRenderPhaseType::MultidrawableMesh => { match self.multidrawable_meshes.entry(batch_set_key.clone()) { indexmap::map::Entry::Occupied(mut entry) => { entry .get_mut() .entry(bin_key.clone()) .or_default() .insert(main_entity, input_uniform_index); } indexmap::map::Entry::Vacant(entry) => { let mut new_batch_set = IndexMap::default(); new_batch_set.insert( bin_key.clone(), RenderBin::from_entity(main_entity, input_uniform_index), ); entry.insert(new_batch_set); } } } BinnedRenderPhaseType::BatchableMesh => { match self .batchable_meshes .entry((batch_set_key.clone(), bin_key.clone()).clone()) { indexmap::map::Entry::Occupied(mut entry) => { entry.get_mut().insert(main_entity, input_uniform_index); } indexmap::map::Entry::Vacant(entry) => { entry.insert(RenderBin::from_entity(main_entity, input_uniform_index)); } } } BinnedRenderPhaseType::UnbatchableMesh => { match self .unbatchable_meshes .entry((batch_set_key.clone(), bin_key.clone())) { indexmap::map::Entry::Occupied(mut entry) => { entry.get_mut().entities.insert(main_entity, entity); } indexmap::map::Entry::Vacant(entry) => { let mut entities = MainEntityHashMap::default(); entities.insert(main_entity, entity); entry.insert(UnbatchableBinnedEntities { entities, buffer_indices: default(), }); } } } BinnedRenderPhaseType::NonMesh => { // We don't process these items further. match self .non_mesh_items .entry((batch_set_key.clone(), bin_key.clone()).clone()) { indexmap::map::Entry::Occupied(mut entry) => { entry.get_mut().entities.insert(main_entity, entity); } indexmap::map::Entry::Vacant(entry) => { let mut entities = MainEntityHashMap::default(); entities.insert(main_entity, entity); entry.insert(NonMeshEntities { entities }); } } } } // Update the cache. self.update_cache( main_entity, Some(CachedBinKey { batch_set_key, bin_key, phase_type, }), change_tick, ); } /// Inserts an entity into the cache with the given change tick. pub fn update_cache( &mut self, main_entity: MainEntity, cached_bin_key: Option<CachedBinKey<BPI>>, change_tick: Tick, ) { let new_cached_binned_entity = CachedBinnedEntity { cached_bin_key, change_tick, }; let (index, old_cached_binned_entity) = self .cached_entity_bin_keys .insert_full(main_entity, new_cached_binned_entity.clone()); // If the entity changed bins, record its old bin so that we can remove // the entity from it. if let Some(old_cached_binned_entity) = old_cached_binned_entity && old_cached_binned_entity.cached_bin_key != new_cached_binned_entity.cached_bin_key { self.entities_that_changed_bins.push(EntityThatChangedBins { main_entity, old_cached_binned_entity, }); } // Mark the entity as valid. self.valid_cached_entity_bin_keys.grow_and_insert(index); } /// Encodes the GPU commands needed to render all entities in this phase. pub fn render<'w>( &self, render_pass: &mut TrackedRenderPass<'w>, world: &'w World, view: Entity, ) -> Result<(), DrawError> { { let draw_functions = world.resource::<DrawFunctions<BPI>>(); let mut draw_functions = draw_functions.write(); draw_functions.prepare(world); // Make sure to drop the reader-writer lock here to avoid recursive // locks. } self.render_batchable_meshes(render_pass, world, view)?; self.render_unbatchable_meshes(render_pass, world, view)?; self.render_non_meshes(render_pass, world, view)?; Ok(()) } /// Renders all batchable meshes queued in this phase. fn render_batchable_meshes<'w>( &self, render_pass: &mut TrackedRenderPass<'w>, world: &'w World, view: Entity, ) -> Result<(), DrawError> { let draw_functions = world.resource::<DrawFunctions<BPI>>(); let mut draw_functions = draw_functions.write(); let render_device = world.resource::<RenderDevice>(); let render_adapter_info = world.resource::<RenderAdapterInfo>(); let multi_draw_indirect_count_supported = render_device .features() .contains(Features::MULTI_DRAW_INDIRECT_COUNT) // TODO: https://github.com/gfx-rs/wgpu/issues/7974 && !matches!(render_adapter_info.backend, wgpu::Backend::Dx12); match self.batch_sets { BinnedRenderPhaseBatchSets::DynamicUniforms(ref batch_sets) => { debug_assert_eq!(self.batchable_meshes.len(), batch_sets.len()); for ((batch_set_key, bin_key), batch_set) in self.batchable_meshes.keys().zip(batch_sets.iter()) { for batch in batch_set { let binned_phase_item = BPI::new( batch_set_key.clone(), bin_key.clone(), batch.representative_entity, batch.instance_range.clone(), batch.extra_index.clone(), ); // Fetch the draw function. let Some(draw_function) = draw_functions.get_mut(binned_phase_item.draw_function()) else { continue; }; draw_function.draw(world, render_pass, view, &binned_phase_item)?; } } } BinnedRenderPhaseBatchSets::Direct(ref batch_set) => { for (batch, (batch_set_key, bin_key)) in batch_set.iter().zip(self.batchable_meshes.keys()) { let binned_phase_item = BPI::new( batch_set_key.clone(), bin_key.clone(), batch.representative_entity, batch.instance_range.clone(), batch.extra_index.clone(), ); // Fetch the draw function. let Some(draw_function) = draw_functions.get_mut(binned_phase_item.draw_function()) else { continue; }; draw_function.draw(world, render_pass, view, &binned_phase_item)?; } } BinnedRenderPhaseBatchSets::MultidrawIndirect(ref batch_sets) => { for (batch_set_key, batch_set) in self .multidrawable_meshes .keys() .chain( self.batchable_meshes .keys() .map(|(batch_set_key, _)| batch_set_key), ) .zip(batch_sets.iter()) { let batch = &batch_set.first_batch; let batch_set_index = if multi_draw_indirect_count_supported { NonMaxU32::new(batch_set.index) } else { None }; let binned_phase_item = BPI::new( batch_set_key.clone(), batch_set.bin_key.clone(), batch.representative_entity, batch.instance_range.clone(), match batch.extra_index { PhaseItemExtraIndex::None => PhaseItemExtraIndex::None, PhaseItemExtraIndex::DynamicOffset(ref dynamic_offset) => { PhaseItemExtraIndex::DynamicOffset(*dynamic_offset) } PhaseItemExtraIndex::IndirectParametersIndex { ref range, .. } => { PhaseItemExtraIndex::IndirectParametersIndex { range: range.start..(range.start + batch_set.batch_count), batch_set_index, } } }, ); // Fetch the draw function. let Some(draw_function) = draw_functions.get_mut(binned_phase_item.draw_function()) else { continue; }; draw_function.draw(world, render_pass, view, &binned_phase_item)?; } } } Ok(()) } /// Renders all unbatchable meshes queued in this phase. fn render_unbatchable_meshes<'w>( &self, render_pass: &mut TrackedRenderPass<'w>, world: &'w World, view: Entity, ) -> Result<(), DrawError> { let draw_functions = world.resource::<DrawFunctions<BPI>>(); let mut draw_functions = draw_functions.write(); for (batch_set_key, bin_key) in self.unbatchable_meshes.keys() { let unbatchable_entities = &self.unbatchable_meshes[&(batch_set_key.clone(), bin_key.clone())]; for (entity_index, entity) in unbatchable_entities.entities.iter().enumerate() { let unbatchable_dynamic_offset = match &unbatchable_entities.buffer_indices { UnbatchableBinnedEntityIndexSet::NoEntities => { // Shouldn't happen… continue; } UnbatchableBinnedEntityIndexSet::Sparse { instance_range, first_indirect_parameters_index, } => UnbatchableBinnedEntityIndices { instance_index: instance_range.start + entity_index as u32, extra_index: match first_indirect_parameters_index { None => PhaseItemExtraIndex::None, Some(first_indirect_parameters_index) => { let first_indirect_parameters_index_for_entity = u32::from(*first_indirect_parameters_index) + entity_index as u32; PhaseItemExtraIndex::IndirectParametersIndex { range: first_indirect_parameters_index_for_entity ..(first_indirect_parameters_index_for_entity + 1), batch_set_index: None, } } }, }, UnbatchableBinnedEntityIndexSet::Dense(dynamic_offsets) => { dynamic_offsets[entity_index].clone() } }; let binned_phase_item = BPI::new( batch_set_key.clone(), bin_key.clone(), (*entity.1, *entity.0), unbatchable_dynamic_offset.instance_index ..(unbatchable_dynamic_offset.instance_index + 1), unbatchable_dynamic_offset.extra_index, ); // Fetch the draw function. let Some(draw_function) = draw_functions.get_mut(binned_phase_item.draw_function()) else { continue; }; draw_function.draw(world, render_pass, view, &binned_phase_item)?; } } Ok(()) } /// Renders all objects of type [`BinnedRenderPhaseType::NonMesh`]. /// /// These will have been added by plugins or the application. fn render_non_meshes<'w>( &self, render_pass: &mut TrackedRenderPass<'w>, world: &'w World, view: Entity, ) -> Result<(), DrawError> { let draw_functions = world.resource::<DrawFunctions<BPI>>(); let mut draw_functions = draw_functions.write(); for ((batch_set_key, bin_key), non_mesh_entities) in &self.non_mesh_items { for (main_entity, entity) in non_mesh_entities.entities.iter() {
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
true
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/diagnostic/internal.rs
crates/bevy_render/src/diagnostic/internal.rs
use alloc::{borrow::Cow, sync::Arc}; use core::{ ops::{DerefMut, Range}, sync::atomic::{AtomicBool, Ordering}, }; use std::thread::{self, ThreadId}; use bevy_diagnostic::{Diagnostic, DiagnosticMeasurement, DiagnosticPath, DiagnosticsStore}; use bevy_ecs::resource::Resource; use bevy_ecs::system::{Res, ResMut}; use bevy_platform::time::Instant; use std::sync::Mutex; use wgpu::{ Buffer, BufferDescriptor, BufferUsages, CommandEncoder, ComputePass, Features, MapMode, PipelineStatisticsTypes, QuerySet, QuerySetDescriptor, QueryType, RenderPass, }; use crate::renderer::{RenderAdapterInfo, RenderDevice, RenderQueue, WgpuWrapper}; use super::RecordDiagnostics; // buffer offset must be divisible by 256, so this constant must be divisible by 32 (=256/8) const MAX_TIMESTAMP_QUERIES: u32 = 256; const MAX_PIPELINE_STATISTICS: u32 = 128; const TIMESTAMP_SIZE: u64 = 8; const PIPELINE_STATISTICS_SIZE: u64 = 40; struct DiagnosticsRecorderInternal { timestamp_period_ns: f32, features: Features, current_frame: Mutex<FrameData>, submitted_frames: Vec<FrameData>, finished_frames: Vec<FrameData>, #[cfg(feature = "tracing-tracy")] tracy_gpu_context: tracy_client::GpuContext, } /// Records diagnostics into [`QuerySet`]'s keeping track of the mapping between /// spans and indices to the corresponding entries in the [`QuerySet`]. #[derive(Resource)] pub struct DiagnosticsRecorder(WgpuWrapper<DiagnosticsRecorderInternal>); impl DiagnosticsRecorder { /// Creates the new `DiagnosticsRecorder`. pub fn new( adapter_info: &RenderAdapterInfo, device: &RenderDevice, queue: &RenderQueue, ) -> DiagnosticsRecorder { let features = device.features(); #[cfg(feature = "tracing-tracy")] let tracy_gpu_context = super::tracy_gpu::new_tracy_gpu_context(adapter_info, device, queue); let _ = adapter_info; // Prevent unused variable warnings when tracing-tracy is not enabled DiagnosticsRecorder(WgpuWrapper::new(DiagnosticsRecorderInternal { timestamp_period_ns: queue.get_timestamp_period(), features, current_frame: Mutex::new(FrameData::new( device, features, #[cfg(feature = "tracing-tracy")] tracy_gpu_context.clone(), )), submitted_frames: Vec::new(), finished_frames: Vec::new(), #[cfg(feature = "tracing-tracy")] tracy_gpu_context, })) } fn current_frame_mut(&mut self) -> &mut FrameData { self.0.current_frame.get_mut().expect("lock poisoned") } fn current_frame_lock(&self) -> impl DerefMut<Target = FrameData> + '_ { self.0.current_frame.lock().expect("lock poisoned") } /// Begins recording diagnostics for a new frame. pub fn begin_frame(&mut self) { let internal = &mut self.0; let mut idx = 0; while idx < internal.submitted_frames.len() { let timestamp = internal.timestamp_period_ns; if internal.submitted_frames[idx].run_mapped_callback(timestamp) { let removed = internal.submitted_frames.swap_remove(idx); internal.finished_frames.push(removed); } else { idx += 1; } } self.current_frame_mut().begin(); } /// Copies data from [`QuerySet`]'s to a [`Buffer`], after which it can be downloaded to CPU. /// /// Should be called before [`DiagnosticsRecorder::finish_frame`]. pub fn resolve(&mut self, encoder: &mut CommandEncoder) { self.current_frame_mut().resolve(encoder); } /// Finishes recording diagnostics for the current frame. /// /// The specified `callback` will be invoked when diagnostics become available. /// /// Should be called after [`DiagnosticsRecorder::resolve`], /// and **after** all commands buffers have been queued. pub fn finish_frame( &mut self, device: &RenderDevice, callback: impl FnOnce(RenderDiagnostics) + Send + Sync + 'static, ) { #[cfg(feature = "tracing-tracy")] let tracy_gpu_context = self.0.tracy_gpu_context.clone(); let internal = &mut self.0; internal .current_frame .get_mut() .expect("lock poisoned") .finish(callback); // reuse one of the finished frames, if we can let new_frame = match internal.finished_frames.pop() { Some(frame) => frame, None => FrameData::new( device, internal.features, #[cfg(feature = "tracing-tracy")] tracy_gpu_context, ), }; let old_frame = core::mem::replace( internal.current_frame.get_mut().expect("lock poisoned"), new_frame, ); internal.submitted_frames.push(old_frame); } } impl RecordDiagnostics for DiagnosticsRecorder { fn begin_time_span<E: WriteTimestamp>(&self, encoder: &mut E, span_name: Cow<'static, str>) { self.current_frame_lock() .begin_time_span(encoder, span_name); } fn end_time_span<E: WriteTimestamp>(&self, encoder: &mut E) { self.current_frame_lock().end_time_span(encoder); } fn begin_pass_span<P: Pass>(&self, pass: &mut P, span_name: Cow<'static, str>) { self.current_frame_lock().begin_pass(pass, span_name); } fn end_pass_span<P: Pass>(&self, pass: &mut P) { self.current_frame_lock().end_pass(pass); } } struct SpanRecord { thread_id: ThreadId, path_range: Range<usize>, pass_kind: Option<PassKind>, begin_timestamp_index: Option<u32>, end_timestamp_index: Option<u32>, begin_instant: Option<Instant>, end_instant: Option<Instant>, pipeline_statistics_index: Option<u32>, } struct FrameData { timestamps_query_set: Option<QuerySet>, num_timestamps: u32, supports_timestamps_inside_passes: bool, supports_timestamps_inside_encoders: bool, pipeline_statistics_query_set: Option<QuerySet>, num_pipeline_statistics: u32, buffer_size: u64, pipeline_statistics_buffer_offset: u64, resolve_buffer: Option<Buffer>, read_buffer: Option<Buffer>, path_components: Vec<Cow<'static, str>>, open_spans: Vec<SpanRecord>, closed_spans: Vec<SpanRecord>, is_mapped: Arc<AtomicBool>, callback: Option<Box<dyn FnOnce(RenderDiagnostics) + Send + Sync + 'static>>, #[cfg(feature = "tracing-tracy")] tracy_gpu_context: tracy_client::GpuContext, } impl FrameData { fn new( device: &RenderDevice, features: Features, #[cfg(feature = "tracing-tracy")] tracy_gpu_context: tracy_client::GpuContext, ) -> FrameData { let wgpu_device = device.wgpu_device(); let mut buffer_size = 0; let timestamps_query_set = if features.contains(Features::TIMESTAMP_QUERY) { buffer_size += u64::from(MAX_TIMESTAMP_QUERIES) * TIMESTAMP_SIZE; Some(wgpu_device.create_query_set(&QuerySetDescriptor { label: Some("timestamps_query_set"), ty: QueryType::Timestamp, count: MAX_TIMESTAMP_QUERIES, })) } else { None }; let pipeline_statistics_buffer_offset = buffer_size; let pipeline_statistics_query_set = if features.contains(Features::PIPELINE_STATISTICS_QUERY) { buffer_size += u64::from(MAX_PIPELINE_STATISTICS) * PIPELINE_STATISTICS_SIZE; Some(wgpu_device.create_query_set(&QuerySetDescriptor { label: Some("pipeline_statistics_query_set"), ty: QueryType::PipelineStatistics(PipelineStatisticsTypes::all()), count: MAX_PIPELINE_STATISTICS, })) } else { None }; let (resolve_buffer, read_buffer) = if buffer_size > 0 { let resolve_buffer = wgpu_device.create_buffer(&BufferDescriptor { label: Some("render_statistics_resolve_buffer"), size: buffer_size, usage: BufferUsages::QUERY_RESOLVE | BufferUsages::COPY_SRC, mapped_at_creation: false, }); let read_buffer = wgpu_device.create_buffer(&BufferDescriptor { label: Some("render_statistics_read_buffer"), size: buffer_size, usage: BufferUsages::COPY_DST | BufferUsages::MAP_READ, mapped_at_creation: false, }); (Some(resolve_buffer), Some(read_buffer)) } else { (None, None) }; FrameData { timestamps_query_set, num_timestamps: 0, supports_timestamps_inside_passes: features .contains(Features::TIMESTAMP_QUERY_INSIDE_PASSES), supports_timestamps_inside_encoders: features .contains(Features::TIMESTAMP_QUERY_INSIDE_ENCODERS), pipeline_statistics_query_set, num_pipeline_statistics: 0, buffer_size, pipeline_statistics_buffer_offset, resolve_buffer, read_buffer, path_components: Vec::new(), open_spans: Vec::new(), closed_spans: Vec::new(), is_mapped: Arc::new(AtomicBool::new(false)), callback: None, #[cfg(feature = "tracing-tracy")] tracy_gpu_context, } } fn begin(&mut self) { self.num_timestamps = 0; self.num_pipeline_statistics = 0; self.path_components.clear(); self.open_spans.clear(); self.closed_spans.clear(); } fn write_timestamp( &mut self, encoder: &mut impl WriteTimestamp, is_inside_pass: bool, ) -> Option<u32> { // `encoder.write_timestamp` is unsupported on WebGPU. if !self.supports_timestamps_inside_encoders { return None; } if is_inside_pass && !self.supports_timestamps_inside_passes { return None; } if self.num_timestamps >= MAX_TIMESTAMP_QUERIES { return None; } let set = self.timestamps_query_set.as_ref()?; let index = self.num_timestamps; encoder.write_timestamp(set, index); self.num_timestamps += 1; Some(index) } fn write_pipeline_statistics( &mut self, encoder: &mut impl WritePipelineStatistics, ) -> Option<u32> { if self.num_pipeline_statistics >= MAX_PIPELINE_STATISTICS { return None; } let set = self.pipeline_statistics_query_set.as_ref()?; let index = self.num_pipeline_statistics; encoder.begin_pipeline_statistics_query(set, index); self.num_pipeline_statistics += 1; Some(index) } fn open_span( &mut self, pass_kind: Option<PassKind>, name: Cow<'static, str>, ) -> &mut SpanRecord { let thread_id = thread::current().id(); let parent = self.open_spans.iter().rfind(|v| v.thread_id == thread_id); let path_range = match &parent { Some(parent) if parent.path_range.end == self.path_components.len() => { parent.path_range.start..parent.path_range.end + 1 } Some(parent) => { self.path_components .extend_from_within(parent.path_range.clone()); self.path_components.len() - parent.path_range.len()..self.path_components.len() + 1 } None => self.path_components.len()..self.path_components.len() + 1, }; self.path_components.push(name); self.open_spans.push(SpanRecord { thread_id, path_range, pass_kind, begin_timestamp_index: None, end_timestamp_index: None, begin_instant: None, end_instant: None, pipeline_statistics_index: None, }); self.open_spans.last_mut().unwrap() } fn close_span(&mut self) -> &mut SpanRecord { let thread_id = thread::current().id(); let iter = self.open_spans.iter(); let (index, _) = iter .enumerate() .rfind(|(_, v)| v.thread_id == thread_id) .unwrap(); let span = self.open_spans.swap_remove(index); self.closed_spans.push(span); self.closed_spans.last_mut().unwrap() } fn begin_time_span(&mut self, encoder: &mut impl WriteTimestamp, name: Cow<'static, str>) { let begin_instant = Instant::now(); let begin_timestamp_index = self.write_timestamp(encoder, false); let span = self.open_span(None, name); span.begin_instant = Some(begin_instant); span.begin_timestamp_index = begin_timestamp_index; } fn end_time_span(&mut self, encoder: &mut impl WriteTimestamp) { let end_timestamp_index = self.write_timestamp(encoder, false); let span = self.close_span(); span.end_timestamp_index = end_timestamp_index; span.end_instant = Some(Instant::now()); } fn begin_pass<P: Pass>(&mut self, pass: &mut P, name: Cow<'static, str>) { let begin_instant = Instant::now(); let begin_timestamp_index = self.write_timestamp(pass, true); let pipeline_statistics_index = self.write_pipeline_statistics(pass); let span = self.open_span(Some(P::KIND), name); span.begin_instant = Some(begin_instant); span.begin_timestamp_index = begin_timestamp_index; span.pipeline_statistics_index = pipeline_statistics_index; } fn end_pass(&mut self, pass: &mut impl Pass) { let end_timestamp_index = self.write_timestamp(pass, true); let span = self.close_span(); span.end_timestamp_index = end_timestamp_index; if span.pipeline_statistics_index.is_some() { pass.end_pipeline_statistics_query(); } span.end_instant = Some(Instant::now()); } fn resolve(&mut self, encoder: &mut CommandEncoder) { let Some(resolve_buffer) = &self.resolve_buffer else { return; }; match &self.timestamps_query_set { Some(set) if self.num_timestamps > 0 => { encoder.resolve_query_set(set, 0..self.num_timestamps, resolve_buffer, 0); } _ => {} } match &self.pipeline_statistics_query_set { Some(set) if self.num_pipeline_statistics > 0 => { encoder.resolve_query_set( set, 0..self.num_pipeline_statistics, resolve_buffer, self.pipeline_statistics_buffer_offset, ); } _ => {} } let Some(read_buffer) = &self.read_buffer else { return; }; encoder.copy_buffer_to_buffer(resolve_buffer, 0, read_buffer, 0, self.buffer_size); } fn diagnostic_path(&self, range: &Range<usize>, field: &str) -> DiagnosticPath { DiagnosticPath::from_components( core::iter::once("render") .chain(self.path_components[range.clone()].iter().map(|v| &**v)) .chain(core::iter::once(field)), ) } fn finish(&mut self, callback: impl FnOnce(RenderDiagnostics) + Send + Sync + 'static) { let Some(read_buffer) = &self.read_buffer else { // we still have cpu timings, so let's use them let mut diagnostics = Vec::new(); for span in &self.closed_spans { if let (Some(begin), Some(end)) = (span.begin_instant, span.end_instant) { diagnostics.push(RenderDiagnostic { path: self.diagnostic_path(&span.path_range, "elapsed_cpu"), suffix: "ms", value: (end - begin).as_secs_f64() * 1000.0, }); } } callback(RenderDiagnostics(diagnostics)); return; }; self.callback = Some(Box::new(callback)); let is_mapped = self.is_mapped.clone(); read_buffer.slice(..).map_async(MapMode::Read, move |res| { if let Err(e) = res { tracing::warn!("Failed to download render statistics buffer: {e}"); return; } is_mapped.store(true, Ordering::Release); }); } // returns true if the frame is considered finished, false otherwise fn run_mapped_callback(&mut self, timestamp_period_ns: f32) -> bool { let Some(read_buffer) = &self.read_buffer else { return true; }; if !self.is_mapped.load(Ordering::Acquire) { // need to wait more return false; } let Some(callback) = self.callback.take() else { return true; }; let data = read_buffer.slice(..).get_mapped_range(); let timestamps = data[..(self.num_timestamps * 8) as usize] .chunks(8) .map(|v| u64::from_le_bytes(v.try_into().unwrap())) .collect::<Vec<u64>>(); let start = self.pipeline_statistics_buffer_offset as usize; let len = (self.num_pipeline_statistics as usize) * 40; let pipeline_statistics = data[start..start + len] .chunks(8) .map(|v| u64::from_le_bytes(v.try_into().unwrap())) .collect::<Vec<u64>>(); let mut diagnostics = Vec::new(); for span in &self.closed_spans { if let (Some(begin), Some(end)) = (span.begin_instant, span.end_instant) { diagnostics.push(RenderDiagnostic { path: self.diagnostic_path(&span.path_range, "elapsed_cpu"), suffix: "ms", value: (end - begin).as_secs_f64() * 1000.0, }); } if let (Some(begin), Some(end)) = (span.begin_timestamp_index, span.end_timestamp_index) { let begin = timestamps[begin as usize] as f64; let end = timestamps[end as usize] as f64; let value = (end - begin) * (timestamp_period_ns as f64) / 1e6; #[cfg(feature = "tracing-tracy")] { // Calling span_alloc() and end_zone() here instead of in open_span() and close_span() means that tracy does not know where each GPU command was recorded on the CPU timeline. // Unfortunately we must do it this way, because tracy does not play nicely with multithreaded command recording. The start/end pairs would get all mixed up. // The GPU spans themselves are still accurate though, and it's probably safe to assume that each GPU span in frame N belongs to the corresponding CPU render node span from frame N-1. let name = &self.path_components[span.path_range.clone()].join("/"); let mut tracy_gpu_span = self.tracy_gpu_context.span_alloc(name, "", "", 0).unwrap(); tracy_gpu_span.end_zone(); tracy_gpu_span.upload_timestamp_start(begin as i64); tracy_gpu_span.upload_timestamp_end(end as i64); } diagnostics.push(RenderDiagnostic { path: self.diagnostic_path(&span.path_range, "elapsed_gpu"), suffix: "ms", value, }); } if let Some(index) = span.pipeline_statistics_index { let index = (index as usize) * 5; if span.pass_kind == Some(PassKind::Render) { diagnostics.push(RenderDiagnostic { path: self.diagnostic_path(&span.path_range, "vertex_shader_invocations"), suffix: "", value: pipeline_statistics[index] as f64, }); diagnostics.push(RenderDiagnostic { path: self.diagnostic_path(&span.path_range, "clipper_invocations"), suffix: "", value: pipeline_statistics[index + 1] as f64, }); diagnostics.push(RenderDiagnostic { path: self.diagnostic_path(&span.path_range, "clipper_primitives_out"), suffix: "", value: pipeline_statistics[index + 2] as f64, }); diagnostics.push(RenderDiagnostic { path: self.diagnostic_path(&span.path_range, "fragment_shader_invocations"), suffix: "", value: pipeline_statistics[index + 3] as f64, }); } if span.pass_kind == Some(PassKind::Compute) { diagnostics.push(RenderDiagnostic { path: self.diagnostic_path(&span.path_range, "compute_shader_invocations"), suffix: "", value: pipeline_statistics[index + 4] as f64, }); } } } callback(RenderDiagnostics(diagnostics)); drop(data); read_buffer.unmap(); self.is_mapped.store(false, Ordering::Release); true } } /// Resource which stores render diagnostics of the most recent frame. #[derive(Debug, Default, Clone, Resource)] pub struct RenderDiagnostics(Vec<RenderDiagnostic>); /// A render diagnostic which has been recorded, but not yet stored in [`DiagnosticsStore`]. #[derive(Debug, Clone, Resource)] pub struct RenderDiagnostic { pub path: DiagnosticPath, pub suffix: &'static str, pub value: f64, } /// Stores render diagnostics before they can be synced with the main app. /// /// This mutex is locked twice per frame: /// 1. in `PreUpdate`, during [`sync_diagnostics`], /// 2. after rendering has finished and statistics have been downloaded from GPU. #[derive(Debug, Default, Clone, Resource)] pub struct RenderDiagnosticsMutex(pub(crate) Arc<Mutex<Option<RenderDiagnostics>>>); /// Updates render diagnostics measurements. pub fn sync_diagnostics(mutex: Res<RenderDiagnosticsMutex>, mut store: ResMut<DiagnosticsStore>) { let Some(diagnostics) = mutex.0.lock().ok().and_then(|mut v| v.take()) else { return; }; let time = Instant::now(); for diagnostic in &diagnostics.0 { if store.get(&diagnostic.path).is_none() { store.add(Diagnostic::new(diagnostic.path.clone()).with_suffix(diagnostic.suffix)); } store .get_mut(&diagnostic.path) .unwrap() .add_measurement(DiagnosticMeasurement { time, value: diagnostic.value, }); } } pub trait WriteTimestamp { fn write_timestamp(&mut self, query_set: &QuerySet, index: u32); } impl WriteTimestamp for CommandEncoder { fn write_timestamp(&mut self, query_set: &QuerySet, index: u32) { if cfg!(target_os = "macos") { // When using tracy (and thus this function), rendering was flickering on macOS Tahoe. // See: https://github.com/bevyengine/bevy/issues/22257 // The issue seems to be triggered when `write_timestamp` is called very close to frame // presentation. return; } CommandEncoder::write_timestamp(self, query_set, index); } } impl WriteTimestamp for RenderPass<'_> { fn write_timestamp(&mut self, query_set: &QuerySet, index: u32) { RenderPass::write_timestamp(self, query_set, index); } } impl WriteTimestamp for ComputePass<'_> { fn write_timestamp(&mut self, query_set: &QuerySet, index: u32) { ComputePass::write_timestamp(self, query_set, index); } } pub trait WritePipelineStatistics { fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, index: u32); fn end_pipeline_statistics_query(&mut self); } impl WritePipelineStatistics for RenderPass<'_> { fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, index: u32) { RenderPass::begin_pipeline_statistics_query(self, query_set, index); } fn end_pipeline_statistics_query(&mut self) { RenderPass::end_pipeline_statistics_query(self); } } impl WritePipelineStatistics for ComputePass<'_> { fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, index: u32) { ComputePass::begin_pipeline_statistics_query(self, query_set, index); } fn end_pipeline_statistics_query(&mut self) { ComputePass::end_pipeline_statistics_query(self); } } pub trait Pass: WritePipelineStatistics + WriteTimestamp { const KIND: PassKind; } impl Pass for RenderPass<'_> { const KIND: PassKind = PassKind::Render; } impl Pass for ComputePass<'_> { const KIND: PassKind = PassKind::Compute; } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] pub enum PassKind { Render, Compute, }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/diagnostic/mesh_allocator_diagnostic_plugin.rs
crates/bevy_render/src/diagnostic/mesh_allocator_diagnostic_plugin.rs
use bevy_app::{Plugin, PreUpdate}; use bevy_diagnostic::{Diagnostic, DiagnosticPath, Diagnostics, RegisterDiagnostic}; use bevy_ecs::{resource::Resource, system::Res}; use bevy_platform::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use crate::{mesh::allocator::MeshAllocator, Extract, ExtractSchedule, RenderApp}; /// Number of meshes allocated by the allocator static MESH_ALLOCATOR_SLABS: DiagnosticPath = DiagnosticPath::const_new("mesh_allocator_slabs"); /// Total size of all slabs static MESH_ALLOCATOR_SLABS_SIZE: DiagnosticPath = DiagnosticPath::const_new("mesh_allocator_slabs_size"); /// Number of meshes allocated into slabs static MESH_ALLOCATOR_ALLOCATIONS: DiagnosticPath = DiagnosticPath::const_new("mesh_allocator_allocations"); pub struct MeshAllocatorDiagnosticPlugin; impl MeshAllocatorDiagnosticPlugin { /// Get the [`DiagnosticPath`] for slab count pub fn slabs_diagnostic_path() -> &'static DiagnosticPath { &MESH_ALLOCATOR_SLABS } /// Get the [`DiagnosticPath`] for total slabs size pub fn slabs_size_diagnostic_path() -> &'static DiagnosticPath { &MESH_ALLOCATOR_SLABS_SIZE } /// Get the [`DiagnosticPath`] for mesh allocations pub fn allocations_diagnostic_path() -> &'static DiagnosticPath { &MESH_ALLOCATOR_ALLOCATIONS } } impl Plugin for MeshAllocatorDiagnosticPlugin { fn build(&self, app: &mut bevy_app::App) { app.register_diagnostic( Diagnostic::new(MESH_ALLOCATOR_SLABS.clone()).with_suffix(" slabs"), ) .register_diagnostic( Diagnostic::new(MESH_ALLOCATOR_SLABS_SIZE.clone()).with_suffix(" bytes"), ) .register_diagnostic( Diagnostic::new(MESH_ALLOCATOR_ALLOCATIONS.clone()).with_suffix(" meshes"), ) .init_resource::<MeshAllocatorMeasurements>() .add_systems(PreUpdate, add_mesh_allocator_measurement); if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app.add_systems(ExtractSchedule, measure_allocator); } } } #[derive(Debug, Default, Resource)] struct MeshAllocatorMeasurements { slabs: AtomicUsize, slabs_size: AtomicU64, allocations: AtomicUsize, } fn add_mesh_allocator_measurement( mut diagnostics: Diagnostics, measurements: Res<MeshAllocatorMeasurements>, ) { diagnostics.add_measurement(&MESH_ALLOCATOR_SLABS, || { measurements.slabs.load(Ordering::Relaxed) as f64 }); diagnostics.add_measurement(&MESH_ALLOCATOR_SLABS_SIZE, || { measurements.slabs_size.load(Ordering::Relaxed) as f64 }); diagnostics.add_measurement(&MESH_ALLOCATOR_ALLOCATIONS, || { measurements.allocations.load(Ordering::Relaxed) as f64 }); } fn measure_allocator( measurements: Extract<Res<MeshAllocatorMeasurements>>, allocator: Res<MeshAllocator>, ) { measurements .slabs .store(allocator.slab_count(), Ordering::Relaxed); measurements .slabs_size .store(allocator.slabs_size(), Ordering::Relaxed); measurements .allocations .store(allocator.allocations(), Ordering::Relaxed); }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/diagnostic/mod.rs
crates/bevy_render/src/diagnostic/mod.rs
//! Infrastructure for recording render diagnostics. //! //! For more info, see [`RenderDiagnosticsPlugin`]. mod erased_render_asset_diagnostic_plugin; pub(crate) mod internal; mod mesh_allocator_diagnostic_plugin; mod render_asset_diagnostic_plugin; #[cfg(feature = "tracing-tracy")] mod tracy_gpu; use alloc::{borrow::Cow, sync::Arc}; use core::marker::PhantomData; use bevy_app::{App, Plugin, PreUpdate}; use crate::{renderer::RenderAdapterInfo, RenderApp}; use self::internal::{ sync_diagnostics, DiagnosticsRecorder, Pass, RenderDiagnosticsMutex, WriteTimestamp, }; pub use self::{ erased_render_asset_diagnostic_plugin::ErasedRenderAssetDiagnosticPlugin, mesh_allocator_diagnostic_plugin::MeshAllocatorDiagnosticPlugin, render_asset_diagnostic_plugin::RenderAssetDiagnosticPlugin, }; use crate::renderer::{RenderDevice, RenderQueue}; /// Enables collecting render diagnostics, such as CPU/GPU elapsed time per render pass, /// as well as pipeline statistics (number of primitives, number of shader invocations, etc). /// /// To access the diagnostics, you can use the [`DiagnosticsStore`](bevy_diagnostic::DiagnosticsStore) resource, /// add [`LogDiagnosticsPlugin`](bevy_diagnostic::LogDiagnosticsPlugin), or use [Tracy](https://github.com/bevyengine/bevy/blob/main/docs/profiling.md#tracy-renderqueue). /// /// To record diagnostics in your own passes: /// 1. First, obtain the diagnostic recorder using [`RenderContext::diagnostic_recorder`](crate::renderer::RenderContext::diagnostic_recorder). /// /// It won't do anything unless [`RenderDiagnosticsPlugin`] is present, /// so you're free to omit `#[cfg]` clauses. /// ```ignore /// let diagnostics = render_context.diagnostic_recorder(); /// ``` /// 2. Begin the span inside a command encoder, or a render/compute pass encoder. /// ```ignore /// let time_span = diagnostics.time_span(render_context.command_encoder(), "shadows"); /// ``` /// 3. End the span, providing the same encoder. /// ```ignore /// time_span.end(render_context.command_encoder()); /// ``` /// /// # Supported platforms /// Timestamp queries and pipeline statistics are currently supported only on Vulkan and DX12. /// On other platforms (Metal, WebGPU, WebGL2) only CPU time will be recorded. #[derive(Default)] pub struct RenderDiagnosticsPlugin; impl Plugin for RenderDiagnosticsPlugin { fn build(&self, app: &mut App) { let render_diagnostics_mutex = RenderDiagnosticsMutex::default(); app.insert_resource(render_diagnostics_mutex.clone()) .add_systems(PreUpdate, sync_diagnostics); if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app.insert_resource(render_diagnostics_mutex); } } fn finish(&self, app: &mut App) { let Some(render_app) = app.get_sub_app_mut(RenderApp) else { return; }; let adapter_info = render_app.world().resource::<RenderAdapterInfo>(); let device = render_app.world().resource::<RenderDevice>(); let queue = render_app.world().resource::<RenderQueue>(); render_app.insert_resource(DiagnosticsRecorder::new(adapter_info, device, queue)); } } /// Allows recording diagnostic spans. pub trait RecordDiagnostics: Send + Sync { /// Begin a time span, which will record elapsed CPU and GPU time. /// /// Returns a guard, which will panic on drop unless you end the span. fn time_span<E, N>(&self, encoder: &mut E, name: N) -> TimeSpanGuard<'_, Self, E> where E: WriteTimestamp, N: Into<Cow<'static, str>>, { self.begin_time_span(encoder, name.into()); TimeSpanGuard { recorder: self, marker: PhantomData, } } /// Begin a pass span, which will record elapsed CPU and GPU time, /// as well as pipeline statistics on supported platforms. /// /// Returns a guard, which will panic on drop unless you end the span. fn pass_span<P, N>(&self, pass: &mut P, name: N) -> PassSpanGuard<'_, Self, P> where P: Pass, N: Into<Cow<'static, str>>, { let name = name.into(); self.begin_pass_span(pass, name.clone()); PassSpanGuard { recorder: self, name, marker: PhantomData, } } #[doc(hidden)] fn begin_time_span<E: WriteTimestamp>(&self, encoder: &mut E, name: Cow<'static, str>); #[doc(hidden)] fn end_time_span<E: WriteTimestamp>(&self, encoder: &mut E); #[doc(hidden)] fn begin_pass_span<P: Pass>(&self, pass: &mut P, name: Cow<'static, str>); #[doc(hidden)] fn end_pass_span<P: Pass>(&self, pass: &mut P); } /// Guard returned by [`RecordDiagnostics::time_span`]. /// /// Will panic on drop unless [`TimeSpanGuard::end`] is called. pub struct TimeSpanGuard<'a, R: ?Sized, E> { recorder: &'a R, marker: PhantomData<E>, } impl<R: RecordDiagnostics + ?Sized, E: WriteTimestamp> TimeSpanGuard<'_, R, E> { /// End the span. You have to provide the same encoder which was used to begin the span. pub fn end(self, encoder: &mut E) { self.recorder.end_time_span(encoder); core::mem::forget(self); } } impl<R: ?Sized, E> Drop for TimeSpanGuard<'_, R, E> { fn drop(&mut self) { panic!("TimeSpanScope::end was never called") } } /// Guard returned by [`RecordDiagnostics::pass_span`]. /// /// Will panic on drop unless [`PassSpanGuard::end`] is called. pub struct PassSpanGuard<'a, R: ?Sized, P> { recorder: &'a R, name: Cow<'static, str>, marker: PhantomData<P>, } impl<R: RecordDiagnostics + ?Sized, P: Pass> PassSpanGuard<'_, R, P> { /// End the span. You have to provide the same pass which was used to begin the span. pub fn end(self, pass: &mut P) { self.recorder.end_pass_span(pass); core::mem::forget(self); } } impl<R: ?Sized, P> Drop for PassSpanGuard<'_, R, P> { fn drop(&mut self) { panic!("PassSpanGuard::end was never called for {}", self.name) } } impl<T: RecordDiagnostics> RecordDiagnostics for Option<Arc<T>> { fn begin_time_span<E: WriteTimestamp>(&self, encoder: &mut E, name: Cow<'static, str>) { if let Some(recorder) = &self { recorder.begin_time_span(encoder, name); } } fn end_time_span<E: WriteTimestamp>(&self, encoder: &mut E) { if let Some(recorder) = &self { recorder.end_time_span(encoder); } } fn begin_pass_span<P: Pass>(&self, pass: &mut P, name: Cow<'static, str>) { if let Some(recorder) = &self { recorder.begin_pass_span(pass, name); } } fn end_pass_span<P: Pass>(&self, pass: &mut P) { if let Some(recorder) = &self { recorder.end_pass_span(pass); } } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/diagnostic/tracy_gpu.rs
crates/bevy_render/src/diagnostic/tracy_gpu.rs
use crate::renderer::{RenderAdapterInfo, RenderDevice, RenderQueue}; use tracy_client::{Client, GpuContext, GpuContextType}; use wgpu::{ Backend, BufferDescriptor, BufferUsages, CommandEncoderDescriptor, MapMode, PollType, QuerySetDescriptor, QueryType, QUERY_SIZE, }; pub fn new_tracy_gpu_context( adapter_info: &RenderAdapterInfo, device: &RenderDevice, queue: &RenderQueue, ) -> GpuContext { let tracy_gpu_backend = match adapter_info.backend { Backend::Vulkan => GpuContextType::Vulkan, Backend::Dx12 => GpuContextType::Direct3D12, Backend::Gl => GpuContextType::OpenGL, Backend::Metal | Backend::BrowserWebGpu | Backend::Noop => GpuContextType::Invalid, }; let tracy_client = Client::running().unwrap(); tracy_client .new_gpu_context( Some("RenderQueue"), tracy_gpu_backend, initial_timestamp(device, queue), queue.get_timestamp_period(), ) .unwrap() } // Code copied from https://github.com/Wumpf/wgpu-profiler/blob/f9de342a62cb75f50904a98d11dd2bbeb40ceab8/src/tracy.rs fn initial_timestamp(device: &RenderDevice, queue: &RenderQueue) -> i64 { let query_set = device.wgpu_device().create_query_set(&QuerySetDescriptor { label: None, ty: QueryType::Timestamp, count: 1, }); let resolve_buffer = device.create_buffer(&BufferDescriptor { label: None, size: QUERY_SIZE as _, usage: BufferUsages::QUERY_RESOLVE | BufferUsages::COPY_SRC, mapped_at_creation: false, }); let map_buffer = device.create_buffer(&BufferDescriptor { label: None, size: QUERY_SIZE as _, usage: BufferUsages::MAP_READ | BufferUsages::COPY_DST, mapped_at_creation: false, }); let mut timestamp_encoder = device.create_command_encoder(&CommandEncoderDescriptor::default()); timestamp_encoder.write_timestamp(&query_set, 0); timestamp_encoder.resolve_query_set(&query_set, 0..1, &resolve_buffer, 0); // Workaround for https://github.com/gfx-rs/wgpu/issues/6406 // TODO when that bug is fixed, merge these encoders together again let mut copy_encoder = device.create_command_encoder(&CommandEncoderDescriptor::default()); copy_encoder.copy_buffer_to_buffer(&resolve_buffer, 0, &map_buffer, 0, Some(QUERY_SIZE as _)); queue.submit([timestamp_encoder.finish(), copy_encoder.finish()]); map_buffer.slice(..).map_async(MapMode::Read, |_| ()); device .poll(PollType::wait_indefinitely()) .expect("Failed to poll device for map async"); let view = map_buffer.slice(..).get_mapped_range(); i64::from_le_bytes((*view).try_into().unwrap()) }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/diagnostic/erased_render_asset_diagnostic_plugin.rs
crates/bevy_render/src/diagnostic/erased_render_asset_diagnostic_plugin.rs
use core::{any::type_name, marker::PhantomData}; use bevy_app::{Plugin, PreUpdate}; use bevy_diagnostic::{Diagnostic, DiagnosticPath, Diagnostics, RegisterDiagnostic}; use bevy_ecs::{resource::Resource, system::Res}; use bevy_platform::sync::atomic::{AtomicUsize, Ordering}; use crate::{ erased_render_asset::{ErasedRenderAsset, ErasedRenderAssets}, Extract, ExtractSchedule, RenderApp, }; /// Collects diagnostics for a [`ErasedRenderAsset`]. /// /// If the [`ErasedRenderAsset::ErasedAsset`] is shared between other /// [`ErasedRenderAsset`], they all will report the same number. pub struct ErasedRenderAssetDiagnosticPlugin<A: ErasedRenderAsset> { suffix: &'static str, _phantom: PhantomData<A>, } impl<A: ErasedRenderAsset> ErasedRenderAssetDiagnosticPlugin<A> { pub fn new(suffix: &'static str) -> Self { Self { suffix, _phantom: PhantomData, } } pub fn render_asset_diagnostic_path() -> DiagnosticPath { DiagnosticPath::from_components(["erased_render_asset", type_name::<A>()]) } } impl<A: ErasedRenderAsset> Plugin for ErasedRenderAssetDiagnosticPlugin<A> { fn build(&self, app: &mut bevy_app::App) { app.register_diagnostic( Diagnostic::new(Self::render_asset_diagnostic_path()).with_suffix(self.suffix), ) .init_resource::<ErasedRenderAssetMeasurements<A>>() .add_systems(PreUpdate, add_erased_render_asset_measurement::<A>); if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app.add_systems(ExtractSchedule, measure_erased_render_asset::<A>); } } } #[derive(Debug, Resource)] struct ErasedRenderAssetMeasurements<A: ErasedRenderAsset> { assets: AtomicUsize, _phantom: PhantomData<A>, } impl<A: ErasedRenderAsset> Default for ErasedRenderAssetMeasurements<A> { fn default() -> Self { Self { assets: AtomicUsize::default(), _phantom: PhantomData, } } } fn add_erased_render_asset_measurement<A: ErasedRenderAsset>( mut diagnostics: Diagnostics, measurements: Res<ErasedRenderAssetMeasurements<A>>, ) { diagnostics.add_measurement( &ErasedRenderAssetDiagnosticPlugin::<A>::render_asset_diagnostic_path(), || measurements.assets.load(Ordering::Relaxed) as f64, ); } fn measure_erased_render_asset<A: ErasedRenderAsset>( measurements: Extract<Res<ErasedRenderAssetMeasurements<A>>>, assets: Res<ErasedRenderAssets<A::ErasedAsset>>, ) { measurements .assets .store(assets.iter().count(), Ordering::Relaxed); }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/diagnostic/render_asset_diagnostic_plugin.rs
crates/bevy_render/src/diagnostic/render_asset_diagnostic_plugin.rs
use core::{any::type_name, marker::PhantomData}; use bevy_app::{Plugin, PreUpdate}; use bevy_diagnostic::{Diagnostic, DiagnosticPath, Diagnostics, RegisterDiagnostic}; use bevy_ecs::{resource::Resource, system::Res}; use bevy_platform::sync::atomic::{AtomicUsize, Ordering}; use crate::{ render_asset::{RenderAsset, RenderAssets}, Extract, ExtractSchedule, RenderApp, }; pub struct RenderAssetDiagnosticPlugin<A: RenderAsset> { suffix: &'static str, _phantom: PhantomData<A>, } impl<A: RenderAsset> RenderAssetDiagnosticPlugin<A> { pub fn new(suffix: &'static str) -> Self { Self { suffix, _phantom: PhantomData, } } pub fn render_asset_diagnostic_path() -> DiagnosticPath { DiagnosticPath::from_components(["render_asset", type_name::<A>()]) } } impl<A: RenderAsset> Plugin for RenderAssetDiagnosticPlugin<A> { fn build(&self, app: &mut bevy_app::App) { app.register_diagnostic( Diagnostic::new(Self::render_asset_diagnostic_path()).with_suffix(self.suffix), ) .init_resource::<RenderAssetMeasurements<A>>() .add_systems(PreUpdate, add_render_asset_measurement::<A>); if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app.add_systems(ExtractSchedule, measure_render_asset::<A>); } } } #[derive(Debug, Resource)] struct RenderAssetMeasurements<A: RenderAsset> { assets: AtomicUsize, _phantom: PhantomData<A>, } impl<A: RenderAsset> Default for RenderAssetMeasurements<A> { fn default() -> Self { Self { assets: AtomicUsize::default(), _phantom: PhantomData, } } } fn add_render_asset_measurement<A: RenderAsset>( mut diagnostics: Diagnostics, measurements: Res<RenderAssetMeasurements<A>>, ) { diagnostics.add_measurement( &RenderAssetDiagnosticPlugin::<A>::render_asset_diagnostic_path(), || measurements.assets.load(Ordering::Relaxed) as f64, ); } fn measure_render_asset<A: RenderAsset>( measurements: Extract<Res<RenderAssetMeasurements<A>>>, assets: Res<RenderAssets<A>>, ) { measurements .assets .store(assets.iter().count(), Ordering::Relaxed); }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/view/mod.rs
crates/bevy_render/src/view/mod.rs
pub mod visibility; pub mod window; use bevy_camera::{ primitives::Frustum, CameraMainTextureUsages, ClearColor, ClearColorConfig, Exposure, MainPassResolutionOverride, NormalizedRenderTarget, }; use bevy_diagnostic::FrameCount; pub use visibility::*; pub use window::*; use crate::{ camera::{ExtractedCamera, MipBias, NormalizedRenderTargetExt as _, TemporalJitter}, experimental::occlusion_culling::OcclusionCulling, extract_component::ExtractComponentPlugin, render_asset::RenderAssets, render_phase::ViewRangefinder3d, render_resource::{DynamicUniformBuffer, ShaderType, Texture, TextureView}, renderer::{RenderDevice, RenderQueue}, sync_world::MainEntity, texture::{ CachedTexture, ColorAttachment, DepthAttachment, GpuImage, ManualTextureViews, OutputColorAttachment, TextureCache, }, Render, RenderApp, RenderSystems, }; use alloc::sync::Arc; use bevy_app::{App, Plugin}; use bevy_color::LinearRgba; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::prelude::*; use bevy_image::{BevyDefault as _, ToExtents}; use bevy_math::{mat3, vec2, vec3, Mat3, Mat4, UVec4, Vec2, Vec3, Vec4, Vec4Swizzles}; use bevy_platform::collections::{hash_map::Entry, HashMap}; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render_macros::ExtractComponent; use bevy_shader::load_shader_library; use bevy_transform::components::GlobalTransform; use core::{ ops::Range, sync::atomic::{AtomicUsize, Ordering}, }; use wgpu::{ BufferUsages, RenderPassColorAttachment, RenderPassDepthStencilAttachment, StoreOp, TextureDescriptor, TextureDimension, TextureFormat, TextureUsages, }; /// The matrix that converts from the RGB to the LMS color space. /// /// To derive this, first we convert from RGB to [CIE 1931 XYZ]: /// /// ```text /// ⎡ X ⎤ ⎡ 0.490 0.310 0.200 ⎤ ⎡ R ⎤ /// ⎢ Y ⎥ = ⎢ 0.177 0.812 0.011 ⎥ ⎢ G ⎥ /// ⎣ Z ⎦ ⎣ 0.000 0.010 0.990 ⎦ ⎣ B ⎦ /// ``` /// /// Then we convert to LMS according to the [CAM16 standard matrix]: /// /// ```text /// ⎡ L ⎤ ⎡ 0.401 0.650 -0.051 ⎤ ⎡ X ⎤ /// ⎢ M ⎥ = ⎢ -0.250 1.204 0.046 ⎥ ⎢ Y ⎥ /// ⎣ S ⎦ ⎣ -0.002 0.049 0.953 ⎦ ⎣ Z ⎦ /// ``` /// /// The resulting matrix is just the concatenation of these two matrices, to do /// the conversion in one step. /// /// [CIE 1931 XYZ]: https://en.wikipedia.org/wiki/CIE_1931_color_space /// [CAM16 standard matrix]: https://en.wikipedia.org/wiki/LMS_color_space static RGB_TO_LMS: Mat3 = mat3( vec3(0.311692, 0.0905138, 0.00764433), vec3(0.652085, 0.901341, 0.0486554), vec3(0.0362225, 0.00814478, 0.943700), ); /// The inverse of the [`RGB_TO_LMS`] matrix, converting from the LMS color /// space back to RGB. static LMS_TO_RGB: Mat3 = mat3( vec3(4.06305, -0.40791, -0.0118812), vec3(-2.93241, 1.40437, -0.0486532), vec3(-0.130646, 0.00353630, 1.0605344), ); /// The [CIE 1931] *xy* chromaticity coordinates of the [D65 white point]. /// /// [CIE 1931]: https://en.wikipedia.org/wiki/CIE_1931_color_space /// [D65 white point]: https://en.wikipedia.org/wiki/Standard_illuminant#D65_values static D65_XY: Vec2 = vec2(0.31272, 0.32903); /// The [D65 white point] in [LMS color space]. /// /// [LMS color space]: https://en.wikipedia.org/wiki/LMS_color_space /// [D65 white point]: https://en.wikipedia.org/wiki/Standard_illuminant#D65_values static D65_LMS: Vec3 = vec3(0.975538, 1.01648, 1.08475); pub struct ViewPlugin; impl Plugin for ViewPlugin { fn build(&self, app: &mut App) { load_shader_library!(app, "view.wgsl"); app // NOTE: windows.is_changed() handles cases where a window was resized .add_plugins(( ExtractComponentPlugin::<Hdr>::default(), ExtractComponentPlugin::<Msaa>::default(), ExtractComponentPlugin::<OcclusionCulling>::default(), RenderVisibilityRangePlugin, )); if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app.add_systems( Render, ( // `TextureView`s need to be dropped before reconfiguring window surfaces. clear_view_attachments .in_set(RenderSystems::ManageViews) .before(create_surfaces), cleanup_view_targets_for_resize .in_set(RenderSystems::ManageViews) .before(create_surfaces), prepare_view_attachments .in_set(RenderSystems::ManageViews) .before(prepare_view_targets) .after(prepare_windows), prepare_view_targets .in_set(RenderSystems::ManageViews) .after(prepare_windows) .after(crate::render_asset::prepare_assets::<GpuImage>) .ambiguous_with(crate::camera::sort_cameras), // doesn't use `sorted_camera_index_for_target` prepare_view_uniforms.in_set(RenderSystems::PrepareResources), ), ); } } fn finish(&self, app: &mut App) { if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app .init_resource::<ViewUniforms>() .init_resource::<ViewTargetAttachments>(); } } } /// Component for configuring the number of samples for [Multi-Sample Anti-Aliasing](https://en.wikipedia.org/wiki/Multisample_anti-aliasing) /// for a [`Camera`](bevy_camera::Camera). /// /// Defaults to 4 samples. A higher number of samples results in smoother edges. /// /// Some advanced rendering features may require that MSAA is disabled. /// /// Note that the web currently only supports 1 or 4 samples. #[derive( Component, Default, Clone, Copy, ExtractComponent, Reflect, PartialEq, PartialOrd, Eq, Hash, Debug, )] #[reflect(Component, Default, PartialEq, Hash, Debug)] pub enum Msaa { Off = 1, Sample2 = 2, #[default] Sample4 = 4, Sample8 = 8, } impl Msaa { #[inline] pub fn samples(&self) -> u32 { *self as u32 } pub fn from_samples(samples: u32) -> Self { match samples { 1 => Msaa::Off, 2 => Msaa::Sample2, 4 => Msaa::Sample4, 8 => Msaa::Sample8, _ => panic!("Unsupported MSAA sample count: {samples}"), } } } /// If this component is added to a camera, the camera will use an intermediate "high dynamic range" render texture. /// This allows rendering with a wider range of lighting values. However, this does *not* affect /// whether the camera will render with hdr display output (which bevy does not support currently) /// and only affects the intermediate render texture. #[derive( Component, Default, Copy, Clone, ExtractComponent, Reflect, PartialEq, Eq, Hash, Debug, )] #[reflect(Component, Default, PartialEq, Hash, Debug)] pub struct Hdr; /// An identifier for a view that is stable across frames. /// /// We can't use [`Entity`] for this because render world entities aren't /// stable, and we can't use just [`MainEntity`] because some main world views /// extract to multiple render world views. For example, a directional light /// extracts to one render world view per cascade, and a point light extracts to /// one render world view per cubemap face. So we pair the main entity with an /// *auxiliary entity* and a *subview index*, which *together* uniquely identify /// a view in the render world in a way that's stable from frame to frame. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct RetainedViewEntity { /// The main entity that this view corresponds to. pub main_entity: MainEntity, /// Another entity associated with the view entity. /// /// This is currently used for shadow cascades. If there are multiple /// cameras, each camera needs to have its own set of shadow cascades. Thus /// the light and subview index aren't themselves enough to uniquely /// identify a shadow cascade: we need the camera that the cascade is /// associated with as well. This entity stores that camera. /// /// If not present, this will be `MainEntity(Entity::PLACEHOLDER)`. pub auxiliary_entity: MainEntity, /// The index of the view corresponding to the entity. /// /// For example, for point lights that cast shadows, this is the index of /// the cubemap face (0 through 5 inclusive). For directional lights, this /// is the index of the cascade. pub subview_index: u32, } impl RetainedViewEntity { /// Creates a new [`RetainedViewEntity`] from the given main world entity, /// auxiliary main world entity, and subview index. /// /// See [`RetainedViewEntity::subview_index`] for an explanation of what /// `auxiliary_entity` and `subview_index` are. pub fn new( main_entity: MainEntity, auxiliary_entity: Option<MainEntity>, subview_index: u32, ) -> Self { Self { main_entity, auxiliary_entity: auxiliary_entity.unwrap_or(Entity::PLACEHOLDER.into()), subview_index, } } } /// Describes a camera in the render world. /// /// Each entity in the main world can potentially extract to multiple subviews, /// each of which has a [`RetainedViewEntity::subview_index`]. For instance, 3D /// cameras extract to both a 3D camera subview with index 0 and a special UI /// subview with index 1. Likewise, point lights with shadows extract to 6 /// subviews, one for each side of the shadow cubemap. #[derive(Component)] pub struct ExtractedView { /// The entity in the main world corresponding to this render world view. pub retained_view_entity: RetainedViewEntity, /// Typically a column-major right-handed projection matrix, one of either: /// /// Perspective (infinite reverse z) /// ```text /// f = 1 / tan(fov_y_radians / 2) /// /// ⎡ f / aspect 0 0 0 ⎤ /// ⎢ 0 f 0 0 ⎥ /// ⎢ 0 0 0 near ⎥ /// ⎣ 0 0 -1 0 ⎦ /// ``` /// /// Orthographic /// ```text /// w = right - left /// h = top - bottom /// d = far - near /// cw = -right - left /// ch = -top - bottom /// /// ⎡ 2 / w 0 0 cw / w ⎤ /// ⎢ 0 2 / h 0 ch / h ⎥ /// ⎢ 0 0 1 / d far / d ⎥ /// ⎣ 0 0 0 1 ⎦ /// ``` /// /// `clip_from_view[3][3] == 1.0` is the standard way to check if a projection is orthographic /// /// Glam matrices are column major, so for example getting the near plane of a perspective projection is `clip_from_view[3][2]` /// /// Custom projections are also possible however. pub clip_from_view: Mat4, pub world_from_view: GlobalTransform, // The view-projection matrix. When provided it is used instead of deriving it from // `projection` and `transform` fields, which can be helpful in cases where numerical // stability matters and there is a more direct way to derive the view-projection matrix. pub clip_from_world: Option<Mat4>, pub hdr: bool, // uvec4(origin.x, origin.y, width, height) pub viewport: UVec4, pub color_grading: ColorGrading, /// Whether to switch culling mode so that materials that request backface /// culling cull front faces, and vice versa. /// /// This is typically used for cameras that mirror the world that they /// render across a plane, because doing that flips the winding of each /// polygon. /// /// This setting doesn't affect materials that disable backface culling. pub invert_culling: bool, } impl ExtractedView { /// Creates a 3D rangefinder for a view pub fn rangefinder3d(&self) -> ViewRangefinder3d { ViewRangefinder3d::from_world_from_view(&self.world_from_view.affine()) } } /// Configures filmic color grading parameters to adjust the image appearance. /// /// Color grading is applied just before tonemapping for a given /// [`Camera`](bevy_camera::Camera) entity, with the sole exception of the /// `post_saturation` value in [`ColorGradingGlobal`], which is applied after /// tonemapping. #[derive(Component, Reflect, Debug, Default, Clone)] #[reflect(Component, Default, Debug, Clone)] pub struct ColorGrading { /// Filmic color grading values applied to the image as a whole (as opposed /// to individual sections, like shadows and highlights). pub global: ColorGradingGlobal, /// Color grading values that are applied to the darker parts of the image. /// /// The cutoff points can be customized with the /// [`ColorGradingGlobal::midtones_range`] field. pub shadows: ColorGradingSection, /// Color grading values that are applied to the parts of the image with /// intermediate brightness. /// /// The cutoff points can be customized with the /// [`ColorGradingGlobal::midtones_range`] field. pub midtones: ColorGradingSection, /// Color grading values that are applied to the lighter parts of the image. /// /// The cutoff points can be customized with the /// [`ColorGradingGlobal::midtones_range`] field. pub highlights: ColorGradingSection, } /// Filmic color grading values applied to the image as a whole (as opposed to /// individual sections, like shadows and highlights). #[derive(Clone, Debug, Reflect)] #[reflect(Default, Clone)] pub struct ColorGradingGlobal { /// Exposure value (EV) offset, measured in stops. pub exposure: f32, /// An adjustment made to the [CIE 1931] chromaticity *x* value. /// /// Positive values make the colors redder. Negative values make the colors /// bluer. This has no effect on luminance (brightness). /// /// [CIE 1931]: https://en.wikipedia.org/wiki/CIE_1931_color_space#CIE_xy_chromaticity_diagram_and_the_CIE_xyY_color_space pub temperature: f32, /// An adjustment made to the [CIE 1931] chromaticity *y* value. /// /// Positive values make the colors more magenta. Negative values make the /// colors greener. This has no effect on luminance (brightness). /// /// [CIE 1931]: https://en.wikipedia.org/wiki/CIE_1931_color_space#CIE_xy_chromaticity_diagram_and_the_CIE_xyY_color_space pub tint: f32, /// An adjustment to the [hue], in radians. /// /// Adjusting this value changes the perceived colors in the image: red to /// yellow to green to blue, etc. It has no effect on the saturation or /// brightness of the colors. /// /// [hue]: https://en.wikipedia.org/wiki/HSL_and_HSV#Formal_derivation pub hue: f32, /// Saturation adjustment applied after tonemapping. /// Values below 1.0 desaturate, with a value of 0.0 resulting in a grayscale image /// with luminance defined by ITU-R BT.709 /// Values above 1.0 increase saturation. pub post_saturation: f32, /// The luminance (brightness) ranges that are considered part of the /// "midtones" of the image. /// /// This affects which [`ColorGradingSection`]s apply to which colors. Note /// that the sections smoothly blend into one another, to avoid abrupt /// transitions. /// /// The default value is 0.2 to 0.7. pub midtones_range: Range<f32>, } /// The [`ColorGrading`] structure, packed into the most efficient form for the /// GPU. #[derive(Clone, Copy, Debug, ShaderType)] pub struct ColorGradingUniform { pub balance: Mat3, pub saturation: Vec3, pub contrast: Vec3, pub gamma: Vec3, pub gain: Vec3, pub lift: Vec3, pub midtone_range: Vec2, pub exposure: f32, pub hue: f32, pub post_saturation: f32, } /// A section of color grading values that can be selectively applied to /// shadows, midtones, and highlights. #[derive(Reflect, Debug, Copy, Clone, PartialEq)] #[reflect(Clone, PartialEq)] pub struct ColorGradingSection { /// Values below 1.0 desaturate, with a value of 0.0 resulting in a grayscale image /// with luminance defined by ITU-R BT.709. /// Values above 1.0 increase saturation. pub saturation: f32, /// Adjusts the range of colors. /// /// A value of 1.0 applies no changes. Values below 1.0 move the colors more /// toward a neutral gray. Values above 1.0 spread the colors out away from /// the neutral gray. pub contrast: f32, /// A nonlinear luminance adjustment, mainly affecting the high end of the /// range. /// /// This is the *n* exponent in the standard [ASC CDL] formula for color /// correction: /// /// ```text /// out = (i × s + o)ⁿ /// ``` /// /// [ASC CDL]: https://en.wikipedia.org/wiki/ASC_CDL#Combined_Function pub gamma: f32, /// A linear luminance adjustment, mainly affecting the middle part of the /// range. /// /// This is the *s* factor in the standard [ASC CDL] formula for color /// correction: /// /// ```text /// out = (i × s + o)ⁿ /// ``` /// /// [ASC CDL]: https://en.wikipedia.org/wiki/ASC_CDL#Combined_Function pub gain: f32, /// A fixed luminance adjustment, mainly affecting the lower part of the /// range. /// /// This is the *o* term in the standard [ASC CDL] formula for color /// correction: /// /// ```text /// out = (i × s + o)ⁿ /// ``` /// /// [ASC CDL]: https://en.wikipedia.org/wiki/ASC_CDL#Combined_Function pub lift: f32, } impl Default for ColorGradingGlobal { fn default() -> Self { Self { exposure: 0.0, temperature: 0.0, tint: 0.0, hue: 0.0, post_saturation: 1.0, midtones_range: 0.2..0.7, } } } impl Default for ColorGradingSection { fn default() -> Self { Self { saturation: 1.0, contrast: 1.0, gamma: 1.0, gain: 1.0, lift: 0.0, } } } impl ColorGrading { /// Creates a new [`ColorGrading`] instance in which shadows, midtones, and /// highlights all have the same set of color grading values. pub fn with_identical_sections( global: ColorGradingGlobal, section: ColorGradingSection, ) -> ColorGrading { ColorGrading { global, highlights: section, midtones: section, shadows: section, } } /// Returns an iterator that visits the shadows, midtones, and highlights /// sections, in that order. pub fn all_sections(&self) -> impl Iterator<Item = &ColorGradingSection> { [&self.shadows, &self.midtones, &self.highlights].into_iter() } /// Applies the given mutating function to the shadows, midtones, and /// highlights sections, in that order. /// /// Returns an array composed of the results of such evaluation, in that /// order. pub fn all_sections_mut(&mut self) -> impl Iterator<Item = &mut ColorGradingSection> { [&mut self.shadows, &mut self.midtones, &mut self.highlights].into_iter() } } #[derive(Clone, ShaderType)] pub struct ViewUniform { pub clip_from_world: Mat4, pub unjittered_clip_from_world: Mat4, pub world_from_clip: Mat4, pub world_from_view: Mat4, pub view_from_world: Mat4, /// Typically a column-major right-handed projection matrix, one of either: /// /// Perspective (infinite reverse z) /// ```text /// f = 1 / tan(fov_y_radians / 2) /// /// ⎡ f / aspect 0 0 0 ⎤ /// ⎢ 0 f 0 0 ⎥ /// ⎢ 0 0 0 near ⎥ /// ⎣ 0 0 -1 0 ⎦ /// ``` /// /// Orthographic /// ```text /// w = right - left /// h = top - bottom /// d = far - near /// cw = -right - left /// ch = -top - bottom /// /// ⎡ 2 / w 0 0 cw / w ⎤ /// ⎢ 0 2 / h 0 ch / h ⎥ /// ⎢ 0 0 1 / d far / d ⎥ /// ⎣ 0 0 0 1 ⎦ /// ``` /// /// `clip_from_view[3][3] == 1.0` is the standard way to check if a projection is orthographic /// /// Glam matrices are column major, so for example getting the near plane of a perspective projection is `clip_from_view[3][2]` /// /// Custom projections are also possible however. pub clip_from_view: Mat4, pub view_from_clip: Mat4, pub world_position: Vec3, pub exposure: f32, // viewport(x_origin, y_origin, width, height) pub viewport: Vec4, pub main_pass_viewport: Vec4, /// 6 world-space half spaces (normal: vec3, distance: f32) ordered left, right, top, bottom, near, far. /// The normal vectors point towards the interior of the frustum. /// A half space contains `p` if `normal.dot(p) + distance > 0.` pub frustum: [Vec4; 6], pub color_grading: ColorGradingUniform, pub mip_bias: f32, pub frame_count: u32, } #[derive(Resource)] pub struct ViewUniforms { pub uniforms: DynamicUniformBuffer<ViewUniform>, } impl FromWorld for ViewUniforms { fn from_world(world: &mut World) -> Self { let mut uniforms = DynamicUniformBuffer::default(); uniforms.set_label(Some("view_uniforms_buffer")); let render_device = world.resource::<RenderDevice>(); if render_device.limits().max_storage_buffers_per_shader_stage > 0 { uniforms.add_usages(BufferUsages::STORAGE); } Self { uniforms } } } #[derive(Component)] pub struct ViewUniformOffset { pub offset: u32, } #[derive(Component, Clone)] pub struct ViewTarget { main_textures: MainTargetTextures, main_texture_format: TextureFormat, /// 0 represents `main_textures.a`, 1 represents `main_textures.b` /// This is shared across view targets with the same render target main_texture: Arc<AtomicUsize>, out_texture: OutputColorAttachment, } /// Contains [`OutputColorAttachment`] used for each target present on any view in the current /// frame, after being prepared by [`prepare_view_attachments`]. Users that want to override /// the default output color attachment for a specific target can do so by adding a /// [`OutputColorAttachment`] to this resource before [`prepare_view_targets`] is called. #[derive(Resource, Default, Deref, DerefMut)] pub struct ViewTargetAttachments(HashMap<NormalizedRenderTarget, OutputColorAttachment>); pub struct PostProcessWrite<'a> { pub source: &'a TextureView, pub source_texture: &'a Texture, pub destination: &'a TextureView, pub destination_texture: &'a Texture, } impl From<ColorGrading> for ColorGradingUniform { fn from(component: ColorGrading) -> Self { // Compute the balance matrix that will be used to apply the white // balance adjustment to an RGB color. Our general approach will be to // convert both the color and the developer-supplied white point to the // LMS color space, apply the conversion, and then convert back. // // First, we start with the CIE 1931 *xy* values of the standard D65 // illuminant: // <https://en.wikipedia.org/wiki/Standard_illuminant#D65_values> // // We then adjust them based on the developer's requested white balance. let white_point_xy = D65_XY + vec2(-component.global.temperature, component.global.tint); // Convert the white point from CIE 1931 *xy* to LMS. First, we convert to XYZ: // // Y Y // Y = 1 X = ─ x Z = ─ (1 - x - y) // y y // // Then we convert from XYZ to LMS color space, using the CAM16 matrix // from <https://en.wikipedia.org/wiki/LMS_color_space#Later_CIECAMs>: // // ⎡ L ⎤ ⎡ 0.401 0.650 -0.051 ⎤ ⎡ X ⎤ // ⎢ M ⎥ = ⎢ -0.250 1.204 0.046 ⎥ ⎢ Y ⎥ // ⎣ S ⎦ ⎣ -0.002 0.049 0.953 ⎦ ⎣ Z ⎦ // // The following formula is just a simplification of the above. let white_point_lms = vec3(0.701634, 1.15856, -0.904175) + (vec3(-0.051461, 0.045854, 0.953127) + vec3(0.452749, -0.296122, -0.955206) * white_point_xy.x) / white_point_xy.y; // Now that we're in LMS space, perform the white point scaling. let white_point_adjustment = Mat3::from_diagonal(D65_LMS / white_point_lms); // Finally, combine the RGB → LMS → corrected LMS → corrected RGB // pipeline into a single 3×3 matrix. let balance = LMS_TO_RGB * white_point_adjustment * RGB_TO_LMS; Self { balance, saturation: vec3( component.shadows.saturation, component.midtones.saturation, component.highlights.saturation, ), contrast: vec3( component.shadows.contrast, component.midtones.contrast, component.highlights.contrast, ), gamma: vec3( component.shadows.gamma, component.midtones.gamma, component.highlights.gamma, ), gain: vec3( component.shadows.gain, component.midtones.gain, component.highlights.gain, ), lift: vec3( component.shadows.lift, component.midtones.lift, component.highlights.lift, ), midtone_range: vec2( component.global.midtones_range.start, component.global.midtones_range.end, ), exposure: component.global.exposure, hue: component.global.hue, post_saturation: component.global.post_saturation, } } } /// Add this component to a camera to disable *indirect mode*. /// /// Indirect mode, automatically enabled on supported hardware, allows Bevy to /// offload transform and cull operations to the GPU, reducing CPU overhead. /// Doing this, however, reduces the amount of control that your app has over /// instancing decisions. In certain circumstances, you may want to disable /// indirect drawing so that your app can manually instance meshes as it sees /// fit. See the `custom_shader_instancing` example. /// /// The vast majority of applications will not need to use this component, as it /// generally reduces rendering performance. /// /// Note: This component should only be added when initially spawning a camera. Adding /// or removing after spawn can result in unspecified behavior. #[derive(Component, Default)] pub struct NoIndirectDrawing; impl ViewTarget { pub const TEXTURE_FORMAT_HDR: TextureFormat = TextureFormat::Rgba16Float; /// Retrieve this target's main texture's color attachment. pub fn get_color_attachment(&self) -> RenderPassColorAttachment<'_> { if self.main_texture.load(Ordering::SeqCst) == 0 { self.main_textures.a.get_attachment() } else { self.main_textures.b.get_attachment() } } /// Retrieve this target's "unsampled" main texture's color attachment. pub fn get_unsampled_color_attachment(&self) -> RenderPassColorAttachment<'_> { if self.main_texture.load(Ordering::SeqCst) == 0 { self.main_textures.a.get_unsampled_attachment() } else { self.main_textures.b.get_unsampled_attachment() } } /// The "main" unsampled texture. pub fn main_texture(&self) -> &Texture { if self.main_texture.load(Ordering::SeqCst) == 0 { &self.main_textures.a.texture.texture } else { &self.main_textures.b.texture.texture } } /// The _other_ "main" unsampled texture. /// In most cases you should use [`Self::main_texture`] instead and never this. /// The textures will naturally be swapped when [`Self::post_process_write`] is called. /// /// A use case for this is to be able to prepare a bind group for all main textures /// ahead of time. pub fn main_texture_other(&self) -> &Texture { if self.main_texture.load(Ordering::SeqCst) == 0 { &self.main_textures.b.texture.texture } else { &self.main_textures.a.texture.texture } } /// The "main" unsampled texture. pub fn main_texture_view(&self) -> &TextureView { if self.main_texture.load(Ordering::SeqCst) == 0 { &self.main_textures.a.texture.default_view } else { &self.main_textures.b.texture.default_view } } /// The _other_ "main" unsampled texture view. /// In most cases you should use [`Self::main_texture_view`] instead and never this. /// The textures will naturally be swapped when [`Self::post_process_write`] is called. /// /// A use case for this is to be able to prepare a bind group for all main textures /// ahead of time. pub fn main_texture_other_view(&self) -> &TextureView { if self.main_texture.load(Ordering::SeqCst) == 0 { &self.main_textures.b.texture.default_view } else { &self.main_textures.a.texture.default_view } } /// The "main" sampled texture. pub fn sampled_main_texture(&self) -> Option<&Texture> { self.main_textures .a .resolve_target .as_ref() .map(|sampled| &sampled.texture) } /// The "main" sampled texture view. pub fn sampled_main_texture_view(&self) -> Option<&TextureView> { self.main_textures .a .resolve_target .as_ref() .map(|sampled| &sampled.default_view) } #[inline] pub fn main_texture_format(&self) -> TextureFormat { self.main_texture_format } /// Returns `true` if and only if the main texture is [`Self::TEXTURE_FORMAT_HDR`] #[inline] pub fn is_hdr(&self) -> bool { self.main_texture_format == ViewTarget::TEXTURE_FORMAT_HDR } /// The final texture this view will render to. #[inline] pub fn out_texture(&self) -> &TextureView { &self.out_texture.view } pub fn out_texture_color_attachment( &self, clear_color: Option<LinearRgba>, ) -> RenderPassColorAttachment<'_> { self.out_texture.get_attachment(clear_color) } /// Whether the final texture this view will render to needs to be presented. pub fn needs_present(&self) -> bool { self.out_texture.needs_present() } /// The format of the final texture this view will render to #[inline] pub fn out_texture_view_format(&self) -> TextureFormat { self.out_texture.view_format } /// This will start a new "post process write", which assumes that the caller /// will write the [`PostProcessWrite`]'s `source` to the `destination`. /// /// `source` is the "current" main texture. This will internally flip this /// [`ViewTarget`]'s main texture to the `destination` texture, so the caller /// _must_ ensure `source` is copied to `destination`, with or without modifications. /// Failing to do so will cause the current main texture information to be lost. pub fn post_process_write(&self) -> PostProcessWrite<'_> { let old_is_a_main_texture = self.main_texture.fetch_xor(1, Ordering::SeqCst); // if the old main texture is a, then the post processing must write from a to b if old_is_a_main_texture == 0 { self.main_textures.b.mark_as_cleared(); PostProcessWrite { source: &self.main_textures.a.texture.default_view, source_texture: &self.main_textures.a.texture.texture, destination: &self.main_textures.b.texture.default_view, destination_texture: &self.main_textures.b.texture.texture, } } else { self.main_textures.a.mark_as_cleared(); PostProcessWrite { source: &self.main_textures.b.texture.default_view, source_texture: &self.main_textures.b.texture.texture, destination: &self.main_textures.a.texture.default_view, destination_texture: &self.main_textures.a.texture.texture, } } } } #[derive(Component)]
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
true
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/view/window/screenshot.rs
crates/bevy_render/src/view/window/screenshot.rs
use super::ExtractedWindows; use crate::{ gpu_readback, render_asset::RenderAssets, render_resource::{ binding_types::texture_2d, BindGroup, BindGroupEntries, BindGroupLayoutDescriptor, BindGroupLayoutEntries, Buffer, BufferUsages, CachedRenderPipelineId, FragmentState, PipelineCache, RenderPipelineDescriptor, SpecializedRenderPipeline, SpecializedRenderPipelines, Texture, TextureUsages, TextureView, VertexState, }, renderer::RenderDevice, texture::{GpuImage, ManualTextureViews, OutputColorAttachment}, view::{prepare_view_attachments, prepare_view_targets, ViewTargetAttachments, WindowSurfaces}, ExtractSchedule, MainWorld, Render, RenderApp, RenderStartup, RenderSystems, }; use alloc::{borrow::Cow, sync::Arc}; use bevy_app::{First, Plugin, Update}; use bevy_asset::{embedded_asset, load_embedded_asset, AssetServer, Handle, RenderAssetUsages}; use bevy_camera::{ManualTextureViewHandle, NormalizedRenderTarget, RenderTarget}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ entity::EntityHashMap, message::message_update_system, prelude::*, system::SystemState, }; use bevy_image::{Image, TextureFormatPixelInfo, ToExtents}; use bevy_platform::collections::HashSet; use bevy_reflect::Reflect; use bevy_shader::Shader; use bevy_tasks::AsyncComputeTaskPool; use bevy_utils::default; use bevy_window::{PrimaryWindow, WindowRef}; use core::ops::Deref; use std::{ path::Path, sync::{ mpsc::{Receiver, Sender}, Mutex, }, }; use tracing::{error, info, warn}; use wgpu::{CommandEncoder, Extent3d, TextureFormat}; #[derive(EntityEvent, Reflect, Deref, DerefMut, Debug)] #[reflect(Debug)] pub struct ScreenshotCaptured { pub entity: Entity, #[deref] pub image: Image, } /// A component that signals to the renderer to capture a screenshot this frame. /// /// This component should be spawned on a new entity with an observer that will trigger /// with [`ScreenshotCaptured`] when the screenshot is ready. /// /// Screenshots are captured asynchronously and may not be available immediately after the frame /// that the component is spawned on. The observer should be used to handle the screenshot when it /// is ready. /// /// Note that the screenshot entity will be despawned after the screenshot is captured and the /// observer is triggered. /// /// # Usage /// /// ``` /// # use bevy_ecs::prelude::*; /// # use bevy_render::view::screenshot::{save_to_disk, Screenshot}; /// /// fn take_screenshot(mut commands: Commands) { /// commands.spawn(Screenshot::primary_window()) /// .observe(save_to_disk("screenshot.png")); /// } /// ``` #[derive(Component, Deref, DerefMut, Reflect, Debug)] #[reflect(Component, Debug)] pub struct Screenshot(pub RenderTarget); /// A marker component that indicates that a screenshot is currently being captured. #[derive(Component, Default)] pub struct Capturing; /// A marker component that indicates that a screenshot has been captured, the image is ready, and /// the screenshot entity can be despawned. #[derive(Component, Default)] pub struct Captured; impl Screenshot { /// Capture a screenshot of the provided window entity. pub fn window(window: Entity) -> Self { Self(RenderTarget::Window(WindowRef::Entity(window))) } /// Capture a screenshot of the primary window, if one exists. pub fn primary_window() -> Self { Self(RenderTarget::Window(WindowRef::Primary)) } /// Capture a screenshot of the provided render target image. pub fn image(image: Handle<Image>) -> Self { Self(RenderTarget::Image(image.into())) } /// Capture a screenshot of the provided manual texture view. pub fn texture_view(texture_view: ManualTextureViewHandle) -> Self { Self(RenderTarget::TextureView(texture_view)) } } struct ScreenshotPreparedState { pub texture: Texture, pub buffer: Buffer, pub bind_group: BindGroup, pub pipeline_id: CachedRenderPipelineId, pub size: Extent3d, } #[derive(Resource, Deref, DerefMut)] pub struct CapturedScreenshots(pub Arc<Mutex<Receiver<(Entity, Image)>>>); #[derive(Resource, Deref, DerefMut, Default)] struct RenderScreenshotTargets(EntityHashMap<NormalizedRenderTarget>); #[derive(Resource, Deref, DerefMut, Default)] struct RenderScreenshotsPrepared(EntityHashMap<ScreenshotPreparedState>); #[derive(Resource, Deref, DerefMut)] struct RenderScreenshotsSender(Sender<(Entity, Image)>); /// Saves the captured screenshot to disk at the provided path. pub fn save_to_disk(path: impl AsRef<Path>) -> impl FnMut(On<ScreenshotCaptured>) { let path = path.as_ref().to_owned(); move |screenshot_captured| { let img = screenshot_captured.image.clone(); match img.try_into_dynamic() { Ok(dyn_img) => match image::ImageFormat::from_path(&path) { Ok(format) => { // discard the alpha channel which stores brightness values when HDR is enabled to make sure // the screenshot looks right let img = dyn_img.to_rgb8(); #[cfg(not(target_arch = "wasm32"))] match img.save_with_format(&path, format) { Ok(_) => info!("Screenshot saved to {}", path.display()), Err(e) => error!("Cannot save screenshot, IO error: {e}"), } #[cfg(target_arch = "wasm32")] { let save_screenshot = || { use image::EncodableLayout; use wasm_bindgen::{JsCast, JsValue}; let mut image_buffer = std::io::Cursor::new(Vec::new()); img.write_to(&mut image_buffer, format) .map_err(|e| JsValue::from_str(&format!("{e}")))?; let parts = js_sys::Array::of1( &js_sys::Uint8Array::new_from_slice( image_buffer.into_inner().as_bytes(), ) .into(), ); let blob = web_sys::Blob::new_with_u8_array_sequence(&parts)?; let url = web_sys::Url::create_object_url_with_blob(&blob)?; let window = web_sys::window().unwrap(); let document = window.document().unwrap(); let link = document.create_element("a")?; link.set_attribute("href", &url)?; link.set_attribute( "download", path.file_name() .and_then(|filename| filename.to_str()) .ok_or_else(|| JsValue::from_str("Invalid filename"))?, )?; let html_element = link.dyn_into::<web_sys::HtmlElement>()?; html_element.click(); web_sys::Url::revoke_object_url(&url)?; Ok::<(), JsValue>(()) }; match (save_screenshot)() { Ok(_) => info!("Screenshot saved to {}", path.display()), Err(e) => error!("Cannot save screenshot, error: {e:?}"), }; } } Err(e) => error!("Cannot save screenshot, requested format not recognized: {e}"), }, Err(e) => error!("Cannot save screenshot, screen format cannot be understood: {e}"), } } } fn clear_screenshots(mut commands: Commands, screenshots: Query<Entity, With<Captured>>) { for entity in screenshots.iter() { commands.entity(entity).despawn(); } } pub fn trigger_screenshots( mut commands: Commands, captured_screenshots: ResMut<CapturedScreenshots>, ) { let captured_screenshots = captured_screenshots.lock().unwrap(); while let Ok((entity, image)) = captured_screenshots.try_recv() { commands.entity(entity).insert(Captured); commands.trigger(ScreenshotCaptured { image, entity }); } } fn extract_screenshots( mut targets: ResMut<RenderScreenshotTargets>, mut main_world: ResMut<MainWorld>, mut system_state: Local< Option< SystemState<( Commands, Query<Entity, With<PrimaryWindow>>, Query<(Entity, &Screenshot), Without<Capturing>>, )>, >, >, mut seen_targets: Local<HashSet<NormalizedRenderTarget>>, ) { if system_state.is_none() { *system_state = Some(SystemState::new(&mut main_world)); } let system_state = system_state.as_mut().unwrap(); let (mut commands, primary_window, screenshots) = system_state.get_mut(&mut main_world); targets.clear(); seen_targets.clear(); let primary_window = primary_window.iter().next(); for (entity, screenshot) in screenshots.iter() { let render_target = screenshot.0.clone(); let Some(render_target) = render_target.normalize(primary_window) else { warn!( "Unknown render target for screenshot, skipping: {:?}", render_target ); continue; }; if seen_targets.contains(&render_target) { warn!( "Duplicate render target for screenshot, skipping entity {}: {:?}", entity, render_target ); // If we don't despawn the entity here, it will be captured again in the next frame commands.entity(entity).despawn(); continue; } seen_targets.insert(render_target.clone()); targets.insert(entity, render_target); commands.entity(entity).insert(Capturing); } system_state.apply(&mut main_world); } fn prepare_screenshots( targets: Res<RenderScreenshotTargets>, mut prepared: ResMut<RenderScreenshotsPrepared>, window_surfaces: Res<WindowSurfaces>, render_device: Res<RenderDevice>, screenshot_pipeline: Res<ScreenshotToScreenPipeline>, pipeline_cache: Res<PipelineCache>, mut pipelines: ResMut<SpecializedRenderPipelines<ScreenshotToScreenPipeline>>, images: Res<RenderAssets<GpuImage>>, manual_texture_views: Res<ManualTextureViews>, mut view_target_attachments: ResMut<ViewTargetAttachments>, ) { prepared.clear(); for (entity, target) in targets.iter() { match target { NormalizedRenderTarget::Window(window) => { let window = window.entity(); let Some(surface_data) = window_surfaces.surfaces.get(&window) else { warn!("Unknown window for screenshot, skipping: {}", window); continue; }; let view_format = surface_data .texture_view_format .unwrap_or(surface_data.configuration.format); let size = Extent3d { width: surface_data.configuration.width, height: surface_data.configuration.height, ..default() }; let (texture_view, state) = prepare_screenshot_state( size, view_format, &render_device, &screenshot_pipeline, &pipeline_cache, &mut pipelines, ); prepared.insert(*entity, state); view_target_attachments.insert( target.clone(), OutputColorAttachment::new(texture_view.clone(), view_format), ); } NormalizedRenderTarget::Image(image) => { let Some(gpu_image) = images.get(&image.handle) else { warn!("Unknown image for screenshot, skipping: {:?}", image); continue; }; let view_format = gpu_image .texture_view_format .unwrap_or(gpu_image.texture_format); let (texture_view, state) = prepare_screenshot_state( gpu_image.size, view_format, &render_device, &screenshot_pipeline, &pipeline_cache, &mut pipelines, ); prepared.insert(*entity, state); view_target_attachments.insert( target.clone(), OutputColorAttachment::new(texture_view.clone(), view_format), ); } NormalizedRenderTarget::TextureView(texture_view) => { let Some(manual_texture_view) = manual_texture_views.get(texture_view) else { warn!( "Unknown manual texture view for screenshot, skipping: {:?}", texture_view ); continue; }; let view_format = manual_texture_view.view_format; let size = manual_texture_view.size.to_extents(); let (texture_view, state) = prepare_screenshot_state( size, view_format, &render_device, &screenshot_pipeline, &pipeline_cache, &mut pipelines, ); prepared.insert(*entity, state); view_target_attachments.insert( target.clone(), OutputColorAttachment::new(texture_view.clone(), view_format), ); } NormalizedRenderTarget::None { .. } => { // Nothing to screenshot! } } } } fn prepare_screenshot_state( size: Extent3d, format: TextureFormat, render_device: &RenderDevice, pipeline: &ScreenshotToScreenPipeline, pipeline_cache: &PipelineCache, pipelines: &mut SpecializedRenderPipelines<ScreenshotToScreenPipeline>, ) -> (TextureView, ScreenshotPreparedState) { let texture = render_device.create_texture(&wgpu::TextureDescriptor { label: Some("screenshot-capture-rendertarget"), size, mip_level_count: 1, sample_count: 1, dimension: wgpu::TextureDimension::D2, format, usage: TextureUsages::RENDER_ATTACHMENT | TextureUsages::COPY_SRC | TextureUsages::TEXTURE_BINDING, view_formats: &[], }); let texture_view = texture.create_view(&Default::default()); let buffer = render_device.create_buffer(&wgpu::BufferDescriptor { label: Some("screenshot-transfer-buffer"), size: gpu_readback::get_aligned_size(size, format.pixel_size().unwrap_or(0) as u32) as u64, usage: BufferUsages::MAP_READ | BufferUsages::COPY_DST, mapped_at_creation: false, }); let bind_group = render_device.create_bind_group( "screenshot-to-screen-bind-group", &pipeline_cache.get_bind_group_layout(&pipeline.bind_group_layout), &BindGroupEntries::single(&texture_view), ); let pipeline_id = pipelines.specialize(pipeline_cache, pipeline, format); ( texture_view, ScreenshotPreparedState { texture, buffer, bind_group, pipeline_id, size, }, ) } pub struct ScreenshotPlugin; impl Plugin for ScreenshotPlugin { fn build(&self, app: &mut bevy_app::App) { embedded_asset!(app, "screenshot.wgsl"); let (tx, rx) = std::sync::mpsc::channel(); app.insert_resource(CapturedScreenshots(Arc::new(Mutex::new(rx)))) .add_systems( First, clear_screenshots .after(message_update_system) .before(ApplyDeferred), ) .add_systems(Update, trigger_screenshots); let Some(render_app) = app.get_sub_app_mut(RenderApp) else { return; }; render_app .insert_resource(RenderScreenshotsSender(tx)) .init_resource::<RenderScreenshotTargets>() .init_resource::<RenderScreenshotsPrepared>() .init_resource::<SpecializedRenderPipelines<ScreenshotToScreenPipeline>>() .add_systems(RenderStartup, init_screenshot_to_screen_pipeline) .add_systems(ExtractSchedule, extract_screenshots.ambiguous_with_all()) .add_systems( Render, prepare_screenshots .after(prepare_view_attachments) .before(prepare_view_targets) .in_set(RenderSystems::ManageViews), ); } } #[derive(Resource)] pub struct ScreenshotToScreenPipeline { pub bind_group_layout: BindGroupLayoutDescriptor, pub shader: Handle<Shader>, } pub fn init_screenshot_to_screen_pipeline(mut commands: Commands, asset_server: Res<AssetServer>) { let bind_group_layout = BindGroupLayoutDescriptor::new( "screenshot-to-screen-bgl", &BindGroupLayoutEntries::single( wgpu::ShaderStages::FRAGMENT, texture_2d(wgpu::TextureSampleType::Float { filterable: false }), ), ); let shader = load_embedded_asset!(asset_server.as_ref(), "screenshot.wgsl"); commands.insert_resource(ScreenshotToScreenPipeline { bind_group_layout, shader, }); } impl SpecializedRenderPipeline for ScreenshotToScreenPipeline { type Key = TextureFormat; fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor { RenderPipelineDescriptor { label: Some(Cow::Borrowed("screenshot-to-screen")), layout: vec![self.bind_group_layout.clone()], vertex: VertexState { shader: self.shader.clone(), ..default() }, primitive: wgpu::PrimitiveState { cull_mode: Some(wgpu::Face::Back), ..Default::default() }, multisample: Default::default(), fragment: Some(FragmentState { shader: self.shader.clone(), targets: vec![Some(wgpu::ColorTargetState { format: key, blend: None, write_mask: wgpu::ColorWrites::ALL, })], ..default() }), ..default() } } } pub(crate) fn submit_screenshot_commands(world: &World, encoder: &mut CommandEncoder) { let targets = world.resource::<RenderScreenshotTargets>(); let prepared = world.resource::<RenderScreenshotsPrepared>(); let pipelines = world.resource::<PipelineCache>(); let gpu_images = world.resource::<RenderAssets<GpuImage>>(); let windows = world.resource::<ExtractedWindows>(); let manual_texture_views = world.resource::<ManualTextureViews>(); for (entity, render_target) in targets.iter() { match render_target { NormalizedRenderTarget::Window(window) => { let window = window.entity(); let Some(window) = windows.get(&window) else { continue; }; let width = window.physical_width; let height = window.physical_height; let Some(texture_format) = window.swap_chain_texture_view_format else { continue; }; let Some(swap_chain_texture_view) = window.swap_chain_texture_view.as_ref() else { continue; }; render_screenshot( encoder, prepared, pipelines, entity, width, height, texture_format, swap_chain_texture_view, ); } NormalizedRenderTarget::Image(image) => { let Some(gpu_image) = gpu_images.get(&image.handle) else { warn!("Unknown image for screenshot, skipping: {:?}", image); continue; }; let width = gpu_image.size.width; let height = gpu_image.size.height; let texture_format = gpu_image.texture_format; let texture_view = gpu_image.texture_view.deref(); render_screenshot( encoder, prepared, pipelines, entity, width, height, texture_format, texture_view, ); } NormalizedRenderTarget::TextureView(texture_view) => { let Some(texture_view) = manual_texture_views.get(texture_view) else { warn!( "Unknown manual texture view for screenshot, skipping: {:?}", texture_view ); continue; }; let width = texture_view.size.x; let height = texture_view.size.y; let texture_format = texture_view.view_format; let texture_view = texture_view.texture_view.deref(); render_screenshot( encoder, prepared, pipelines, entity, width, height, texture_format, texture_view, ); } NormalizedRenderTarget::None { .. } => { // Nothing to screenshot! } }; } } fn render_screenshot( encoder: &mut CommandEncoder, prepared: &RenderScreenshotsPrepared, pipelines: &PipelineCache, entity: &Entity, width: u32, height: u32, texture_format: TextureFormat, texture_view: &wgpu::TextureView, ) { if let Some(prepared_state) = &prepared.get(entity) { let extent = Extent3d { width, height, depth_or_array_layers: 1, }; encoder.copy_texture_to_buffer( prepared_state.texture.as_image_copy(), wgpu::TexelCopyBufferInfo { buffer: &prepared_state.buffer, layout: gpu_readback::layout_data(extent, texture_format), }, extent, ); if let Some(pipeline) = pipelines.get_render_pipeline(prepared_state.pipeline_id) { let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { label: Some("screenshot_to_screen_pass"), color_attachments: &[Some(wgpu::RenderPassColorAttachment { view: texture_view, depth_slice: None, resolve_target: None, ops: wgpu::Operations { load: wgpu::LoadOp::Load, store: wgpu::StoreOp::Store, }, })], depth_stencil_attachment: None, timestamp_writes: None, occlusion_query_set: None, }); pass.set_pipeline(pipeline); pass.set_bind_group(0, &prepared_state.bind_group, &[]); pass.draw(0..3, 0..1); } } } pub(crate) fn collect_screenshots(world: &mut World) { #[cfg(feature = "trace")] let _span = tracing::info_span!("collect_screenshots").entered(); let sender = world.resource::<RenderScreenshotsSender>().deref().clone(); let prepared = world.resource::<RenderScreenshotsPrepared>(); for (entity, prepared) in prepared.iter() { let entity = *entity; let sender = sender.clone(); let width = prepared.size.width; let height = prepared.size.height; let texture_format = prepared.texture.format(); let Ok(pixel_size) = texture_format.pixel_size() else { continue; }; let buffer = prepared.buffer.clone(); let finish = async move { let (tx, rx) = async_channel::bounded(1); let buffer_slice = buffer.slice(..); // The polling for this map call is done every frame when the command queue is submitted. buffer_slice.map_async(wgpu::MapMode::Read, move |result| { if let Err(err) = result { panic!("{}", err.to_string()); } tx.try_send(()).unwrap(); }); rx.recv().await.unwrap(); let data = buffer_slice.get_mapped_range(); // we immediately move the data to CPU memory to avoid holding the mapped view for long let mut result = Vec::from(&*data); drop(data); if result.len() != ((width * height) as usize * pixel_size) { // Our buffer has been padded because we needed to align to a multiple of 256. // We remove this padding here let initial_row_bytes = width as usize * pixel_size; let buffered_row_bytes = gpu_readback::align_byte_size(width * pixel_size as u32) as usize; let mut take_offset = buffered_row_bytes; let mut place_offset = initial_row_bytes; for _ in 1..height { result.copy_within(take_offset..take_offset + buffered_row_bytes, place_offset); take_offset += buffered_row_bytes; place_offset += initial_row_bytes; } result.truncate(initial_row_bytes * height as usize); } if let Err(e) = sender.send(( entity, Image::new( Extent3d { width, height, depth_or_array_layers: 1, }, wgpu::TextureDimension::D2, result, texture_format, RenderAssetUsages::RENDER_WORLD, ), )) { error!("Failed to send screenshot: {}", e); } }; AsyncComputeTaskPool::get().spawn(finish).detach(); } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/view/window/mod.rs
crates/bevy_render/src/view/window/mod.rs
use crate::renderer::WgpuWrapper; use crate::{ render_resource::{SurfaceTexture, TextureView}, renderer::{RenderAdapter, RenderDevice, RenderInstance}, Extract, ExtractSchedule, Render, RenderApp, RenderSystems, }; use bevy_app::{App, Plugin}; use bevy_ecs::{entity::EntityHashMap, prelude::*}; use bevy_platform::collections::HashSet; use bevy_utils::default; use bevy_window::{ CompositeAlphaMode, PresentMode, PrimaryWindow, RawHandleWrapper, Window, WindowClosing, }; use core::{ num::NonZero, ops::{Deref, DerefMut}, }; use tracing::{debug, info, warn}; use wgpu::{ SurfaceConfiguration, SurfaceTargetUnsafe, TextureFormat, TextureUsages, TextureViewDescriptor, }; pub mod screenshot; use screenshot::ScreenshotPlugin; pub struct WindowRenderPlugin; impl Plugin for WindowRenderPlugin { fn build(&self, app: &mut App) { app.add_plugins(ScreenshotPlugin); if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app .init_resource::<ExtractedWindows>() .init_resource::<WindowSurfaces>() .add_systems(ExtractSchedule, extract_windows) .add_systems( Render, create_surfaces .run_if(need_surface_configuration) .before(prepare_windows), ) .add_systems(Render, prepare_windows.in_set(RenderSystems::ManageViews)); } } } pub struct ExtractedWindow { /// An entity that contains the components in [`Window`]. pub entity: Entity, pub handle: RawHandleWrapper, pub physical_width: u32, pub physical_height: u32, pub present_mode: PresentMode, pub desired_maximum_frame_latency: Option<NonZero<u32>>, /// Note: this will not always be the swap chain texture view. When taking a screenshot, /// this will point to an alternative texture instead to allow for copying the render result /// to CPU memory. pub swap_chain_texture_view: Option<TextureView>, pub swap_chain_texture: Option<SurfaceTexture>, pub swap_chain_texture_format: Option<TextureFormat>, pub swap_chain_texture_view_format: Option<TextureFormat>, pub size_changed: bool, pub present_mode_changed: bool, pub alpha_mode: CompositeAlphaMode, /// Whether this window needs an initial buffer commit. /// /// On Wayland, windows must present at least once before they are shown. /// See <https://wayland.app/protocols/xdg-shell#xdg_surface> pub needs_initial_present: bool, } impl ExtractedWindow { fn set_swapchain_texture(&mut self, frame: wgpu::SurfaceTexture) { self.swap_chain_texture_view_format = Some(frame.texture.format().add_srgb_suffix()); let texture_view_descriptor = TextureViewDescriptor { format: self.swap_chain_texture_view_format, ..default() }; self.swap_chain_texture_view = Some(TextureView::from( frame.texture.create_view(&texture_view_descriptor), )); self.swap_chain_texture = Some(SurfaceTexture::from(frame)); } fn has_swapchain_texture(&self) -> bool { self.swap_chain_texture_view.is_some() && self.swap_chain_texture.is_some() } pub fn present(&mut self) { if let Some(surface_texture) = self.swap_chain_texture.take() { // TODO(clean): winit docs recommends calling pre_present_notify before this. // though `present()` doesn't present the frame, it schedules it to be presented // by wgpu. // https://docs.rs/winit/0.29.9/wasm32-unknown-unknown/winit/window/struct.Window.html#method.pre_present_notify surface_texture.present(); } } } #[derive(Default, Resource)] pub struct ExtractedWindows { pub primary: Option<Entity>, pub windows: EntityHashMap<ExtractedWindow>, } impl Deref for ExtractedWindows { type Target = EntityHashMap<ExtractedWindow>; fn deref(&self) -> &Self::Target { &self.windows } } impl DerefMut for ExtractedWindows { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.windows } } fn extract_windows( mut extracted_windows: ResMut<ExtractedWindows>, mut closing: Extract<MessageReader<WindowClosing>>, windows: Extract<Query<(Entity, &Window, &RawHandleWrapper, Option<&PrimaryWindow>)>>, mut removed: Extract<RemovedComponents<RawHandleWrapper>>, mut window_surfaces: ResMut<WindowSurfaces>, ) { for (entity, window, handle, primary) in windows.iter() { if primary.is_some() { extracted_windows.primary = Some(entity); } let (new_width, new_height) = ( window.resolution.physical_width().max(1), window.resolution.physical_height().max(1), ); let extracted_window = extracted_windows.entry(entity).or_insert(ExtractedWindow { entity, handle: handle.clone(), physical_width: new_width, physical_height: new_height, present_mode: window.present_mode, desired_maximum_frame_latency: window.desired_maximum_frame_latency, swap_chain_texture: None, swap_chain_texture_view: None, size_changed: false, swap_chain_texture_format: None, swap_chain_texture_view_format: None, present_mode_changed: false, alpha_mode: window.composite_alpha_mode, needs_initial_present: true, }); if extracted_window.swap_chain_texture.is_none() { // If we called present on the previous swap-chain texture last update, // then drop the swap chain frame here, otherwise we can keep it for the // next update as an optimization. `prepare_windows` will only acquire a new // swap chain texture if needed. extracted_window.swap_chain_texture_view = None; } extracted_window.size_changed = new_width != extracted_window.physical_width || new_height != extracted_window.physical_height; extracted_window.present_mode_changed = window.present_mode != extracted_window.present_mode; if extracted_window.size_changed { debug!( "Window size changed from {}x{} to {}x{}", extracted_window.physical_width, extracted_window.physical_height, new_width, new_height ); extracted_window.physical_width = new_width; extracted_window.physical_height = new_height; } if extracted_window.present_mode_changed { debug!( "Window Present Mode changed from {:?} to {:?}", extracted_window.present_mode, window.present_mode ); extracted_window.present_mode = window.present_mode; } } for closing_window in closing.read() { extracted_windows.remove(&closing_window.window); window_surfaces.remove(&closing_window.window); } for removed_window in removed.read() { extracted_windows.remove(&removed_window); window_surfaces.remove(&removed_window); } } struct SurfaceData { // TODO: what lifetime should this be? surface: WgpuWrapper<wgpu::Surface<'static>>, configuration: SurfaceConfiguration, texture_view_format: Option<TextureFormat>, } #[derive(Resource, Default)] pub struct WindowSurfaces { surfaces: EntityHashMap<SurfaceData>, /// List of windows that we have already called the initial `configure_surface` for configured_windows: HashSet<Entity>, } impl WindowSurfaces { fn remove(&mut self, window: &Entity) { self.surfaces.remove(window); self.configured_windows.remove(window); } } /// (re)configures window surfaces, and obtains a swapchain texture for rendering. /// /// NOTE: `get_current_texture` in `prepare_windows` can take a long time if the GPU workload is /// the performance bottleneck. This can be seen in profiles as multiple prepare-set systems all /// taking an unusually long time to complete, and all finishing at about the same time as the /// `prepare_windows` system. Improvements in bevy are planned to avoid this happening when it /// should not but it will still happen as it is easy for a user to create a large GPU workload /// relative to the GPU performance and/or CPU workload. /// This can be caused by many reasons, but several of them are: /// - GPU workload is more than your current GPU can manage /// - Error / performance bug in your custom shaders /// - wgpu was unable to detect a proper GPU hardware-accelerated device given the chosen /// [`Backends`](crate::settings::Backends), [`WgpuLimits`](crate::settings::WgpuLimits), /// and/or [`WgpuFeatures`](crate::settings::WgpuFeatures). For example, on Windows currently /// `DirectX 11` is not supported by wgpu 0.12 and so if your GPU/drivers do not support Vulkan, /// it may be that a software renderer called "Microsoft Basic Render Driver" using `DirectX 12` /// will be chosen and performance will be very poor. This is visible in a log message that is /// output during renderer initialization. /// Another alternative is to try to use [`ANGLE`](https://github.com/gfx-rs/wgpu#angle) and /// [`Backends::GL`](crate::settings::Backends::GL) with the `gles` feature enabled if your /// GPU/drivers support `OpenGL 4.3` / `OpenGL ES 3.0` or later. pub fn prepare_windows( mut windows: ResMut<ExtractedWindows>, mut window_surfaces: ResMut<WindowSurfaces>, render_device: Res<RenderDevice>, #[cfg(target_os = "linux")] render_instance: Res<RenderInstance>, ) { for window in windows.windows.values_mut() { let window_surfaces = window_surfaces.deref_mut(); let Some(surface_data) = window_surfaces.surfaces.get(&window.entity) else { continue; }; // We didn't present the previous frame, so we can keep using our existing swapchain texture. if window.has_swapchain_texture() && !window.size_changed && !window.present_mode_changed { continue; } // A recurring issue is hitting `wgpu::SurfaceError::Timeout` on certain Linux // mesa driver implementations. This seems to be a quirk of some drivers. // We'd rather keep panicking when not on Linux mesa, because in those case, // the `Timeout` is still probably the symptom of a degraded unrecoverable // application state. // see https://github.com/bevyengine/bevy/pull/5957 // and https://github.com/gfx-rs/wgpu/issues/1218 #[cfg(target_os = "linux")] let may_erroneously_timeout = || { render_instance .enumerate_adapters(wgpu::Backends::VULKAN) .iter() .any(|adapter| { let name = adapter.get_info().name; name.starts_with("Radeon") || name.starts_with("AMD") || name.starts_with("Intel") }) }; let surface = &surface_data.surface; match surface.get_current_texture() { Ok(frame) => { window.set_swapchain_texture(frame); } Err(wgpu::SurfaceError::Outdated) => { render_device.configure_surface(surface, &surface_data.configuration); let frame = match surface.get_current_texture() { Ok(frame) => frame, Err(err) => { // This is a common occurrence on X11 and Xwayland with NVIDIA drivers // when opening and resizing the window. warn!("Couldn't get swap chain texture after configuring. Cause: '{err}'"); continue; } }; window.set_swapchain_texture(frame); } #[cfg(target_os = "linux")] Err(wgpu::SurfaceError::Timeout) if may_erroneously_timeout() => { tracing::trace!( "Couldn't get swap chain texture. This is probably a quirk \ of your Linux GPU driver, so it can be safely ignored." ); } Err(err) => { panic!("Couldn't get swap chain texture, operation unrecoverable: {err}"); } } window.swap_chain_texture_format = Some(surface_data.configuration.format); } } pub fn need_surface_configuration( windows: Res<ExtractedWindows>, window_surfaces: Res<WindowSurfaces>, ) -> bool { for window in windows.windows.values() { if !window_surfaces.configured_windows.contains(&window.entity) || window.size_changed || window.present_mode_changed { return true; } } false } // 2 is wgpu's default/what we've been using so far. // 1 is the minimum, but may cause lower framerates due to the cpu waiting for the gpu to finish // all work for the previous frame before starting work on the next frame, which then means the gpu // has to wait for the cpu to finish to start on the next frame. const DEFAULT_DESIRED_MAXIMUM_FRAME_LATENCY: u32 = 2; /// Creates window surfaces. pub fn create_surfaces( // By accessing a NonSend resource, we tell the scheduler to put this system on the main thread, // which is necessary for some OS's #[cfg(any(target_os = "macos", target_os = "ios"))] _marker: bevy_ecs::system::NonSendMarker, mut windows: ResMut<ExtractedWindows>, mut window_surfaces: ResMut<WindowSurfaces>, render_instance: Res<RenderInstance>, render_adapter: Res<RenderAdapter>, render_device: Res<RenderDevice>, ) { for window in windows.windows.values_mut() { let data = window_surfaces .surfaces .entry(window.entity) .or_insert_with(|| { let surface_target = SurfaceTargetUnsafe::RawHandle { raw_display_handle: window.handle.get_display_handle(), raw_window_handle: window.handle.get_window_handle(), }; // SAFETY: The window handles in ExtractedWindows will always be valid objects to create surfaces on let surface = unsafe { // NOTE: On some OSes this MUST be called from the main thread. // As of wgpu 0.15, only fallible if the given window is a HTML canvas and obtaining a WebGPU or WebGL2 context fails. render_instance .create_surface_unsafe(surface_target) .expect("Failed to create wgpu surface") }; let caps = surface.get_capabilities(&render_adapter); let present_mode = present_mode(window, &caps); let formats = caps.formats; // For future HDR output support, we'll need to request a format that supports HDR, // but as of wgpu 0.15 that is not yet supported. // Prefer sRGB formats for surfaces, but fall back to first available format if no sRGB formats are available. let mut format = *formats.first().expect("No supported formats for surface"); for available_format in formats { // Rgba8UnormSrgb and Bgra8UnormSrgb and the only sRGB formats wgpu exposes that we can use for surfaces. if available_format == TextureFormat::Rgba8UnormSrgb || available_format == TextureFormat::Bgra8UnormSrgb { format = available_format; break; } } let texture_view_format = if !format.is_srgb() { Some(format.add_srgb_suffix()) } else { None }; let configuration = SurfaceConfiguration { format, width: window.physical_width, height: window.physical_height, usage: TextureUsages::RENDER_ATTACHMENT, present_mode, desired_maximum_frame_latency: window .desired_maximum_frame_latency .map(NonZero::<u32>::get) .unwrap_or(DEFAULT_DESIRED_MAXIMUM_FRAME_LATENCY), alpha_mode: match window.alpha_mode { CompositeAlphaMode::Auto => wgpu::CompositeAlphaMode::Auto, CompositeAlphaMode::Opaque => wgpu::CompositeAlphaMode::Opaque, CompositeAlphaMode::PreMultiplied => { wgpu::CompositeAlphaMode::PreMultiplied } CompositeAlphaMode::PostMultiplied => { wgpu::CompositeAlphaMode::PostMultiplied } CompositeAlphaMode::Inherit => wgpu::CompositeAlphaMode::Inherit, }, view_formats: match texture_view_format { Some(format) => vec![format], None => vec![], }, }; render_device.configure_surface(&surface, &configuration); SurfaceData { surface: WgpuWrapper::new(surface), configuration, texture_view_format, } }); if window.size_changed || window.present_mode_changed { // normally this is dropped on present but we double check here to be safe as failure to // drop it will cause validation errors in wgpu drop(window.swap_chain_texture.take()); #[cfg_attr( target_arch = "wasm32", expect(clippy::drop_non_drop, reason = "texture views are not drop on wasm") )] drop(window.swap_chain_texture_view.take()); data.configuration.width = window.physical_width; data.configuration.height = window.physical_height; let caps = data.surface.get_capabilities(&render_adapter); data.configuration.present_mode = present_mode(window, &caps); render_device.configure_surface(&data.surface, &data.configuration); } window_surfaces.configured_windows.insert(window.entity); } } fn present_mode( window: &mut ExtractedWindow, caps: &wgpu::SurfaceCapabilities, ) -> wgpu::PresentMode { let present_mode = match window.present_mode { PresentMode::Fifo => wgpu::PresentMode::Fifo, PresentMode::FifoRelaxed => wgpu::PresentMode::FifoRelaxed, PresentMode::Mailbox => wgpu::PresentMode::Mailbox, PresentMode::Immediate => wgpu::PresentMode::Immediate, PresentMode::AutoVsync => wgpu::PresentMode::AutoVsync, PresentMode::AutoNoVsync => wgpu::PresentMode::AutoNoVsync, }; let fallbacks = match present_mode { wgpu::PresentMode::AutoVsync => { &[wgpu::PresentMode::FifoRelaxed, wgpu::PresentMode::Fifo][..] } wgpu::PresentMode::AutoNoVsync => &[ wgpu::PresentMode::Immediate, wgpu::PresentMode::Mailbox, wgpu::PresentMode::Fifo, ][..], wgpu::PresentMode::Mailbox => &[ wgpu::PresentMode::Mailbox, wgpu::PresentMode::Immediate, wgpu::PresentMode::Fifo, ][..], // Always end in FIFO to make sure it's always supported x => &[x, wgpu::PresentMode::Fifo][..], }; let new_present_mode = fallbacks .iter() .copied() .find(|fallback| caps.present_modes.contains(fallback)) .unwrap_or_else(|| { unreachable!( "Fallback system failed to choose present mode. \ This is a bug. Mode: {:?}, Options: {:?}", window.present_mode, &caps.present_modes ); }); if new_present_mode != present_mode && fallbacks.contains(&present_mode) { info!("PresentMode {present_mode:?} requested but not available. Falling back to {new_present_mode:?}"); } new_present_mode }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/view/visibility/range.rs
crates/bevy_render/src/view/visibility/range.rs
//! Specific distances from the camera in which entities are visible, also known //! as *hierarchical levels of detail* or *HLOD*s. use super::VisibilityRange; use bevy_app::{App, Plugin}; use bevy_ecs::{ entity::Entity, lifecycle::RemovedComponents, query::Changed, resource::Resource, schedule::IntoScheduleConfigs as _, system::{Query, Res, ResMut}, }; use bevy_math::{vec4, Vec4}; use bevy_platform::collections::HashMap; use bevy_utils::prelude::default; use nonmax::NonMaxU16; use wgpu::{BufferBindingType, BufferUsages}; use crate::{ render_resource::BufferVec, renderer::{RenderDevice, RenderQueue}, sync_world::{MainEntity, MainEntityHashMap}, Extract, ExtractSchedule, Render, RenderApp, RenderSystems, }; /// We need at least 4 storage buffer bindings available to enable the /// visibility range buffer. /// /// Even though we only use one storage buffer, the first 3 available storage /// buffers will go to various light-related buffers. We will grab the fourth /// buffer slot. pub const VISIBILITY_RANGES_STORAGE_BUFFER_COUNT: u32 = 4; /// The size of the visibility ranges buffer in elements (not bytes) when fewer /// than 6 storage buffers are available and we're forced to use a uniform /// buffer instead (most notably, on WebGL 2). const VISIBILITY_RANGE_UNIFORM_BUFFER_SIZE: usize = 64; /// A plugin that enables [`RenderVisibilityRanges`]s, which allow entities to be /// hidden or shown based on distance to the camera. pub struct RenderVisibilityRangePlugin; impl Plugin for RenderVisibilityRangePlugin { fn build(&self, app: &mut App) { let Some(render_app) = app.get_sub_app_mut(RenderApp) else { return; }; render_app .init_resource::<RenderVisibilityRanges>() .add_systems(ExtractSchedule, extract_visibility_ranges) .add_systems( Render, write_render_visibility_ranges.in_set(RenderSystems::PrepareResourcesFlush), ); } } /// Stores information related to [`VisibilityRange`]s in the render world. #[derive(Resource)] pub struct RenderVisibilityRanges { /// Information corresponding to each entity. entities: MainEntityHashMap<RenderVisibilityEntityInfo>, /// Maps a [`VisibilityRange`] to its index within the `buffer`. /// /// This map allows us to deduplicate identical visibility ranges, which /// saves GPU memory. range_to_index: HashMap<VisibilityRange, NonMaxU16>, /// The GPU buffer that stores [`VisibilityRange`]s. /// /// Each [`Vec4`] contains the start margin start, start margin end, end /// margin start, and end margin end distances, in that order. buffer: BufferVec<Vec4>, /// True if the buffer has been changed since the last frame and needs to be /// reuploaded to the GPU. buffer_dirty: bool, } /// Per-entity information related to [`VisibilityRange`]s. struct RenderVisibilityEntityInfo { /// The index of the range within the GPU buffer. buffer_index: NonMaxU16, /// True if the range is abrupt: i.e. has no crossfade. is_abrupt: bool, } impl Default for RenderVisibilityRanges { fn default() -> Self { Self { entities: default(), range_to_index: default(), buffer: BufferVec::new( BufferUsages::STORAGE | BufferUsages::UNIFORM | BufferUsages::VERTEX, ), buffer_dirty: true, } } } impl RenderVisibilityRanges { /// Clears out the [`RenderVisibilityRanges`] in preparation for a new /// frame. fn clear(&mut self) { self.entities.clear(); self.range_to_index.clear(); self.buffer.clear(); self.buffer_dirty = true; } /// Inserts a new entity into the [`RenderVisibilityRanges`]. fn insert(&mut self, entity: MainEntity, visibility_range: &VisibilityRange) { // Grab a slot in the GPU buffer, or take the existing one if there // already is one. let buffer_index = *self .range_to_index .entry(visibility_range.clone()) .or_insert_with(|| { NonMaxU16::try_from(self.buffer.push(vec4( visibility_range.start_margin.start, visibility_range.start_margin.end, visibility_range.end_margin.start, visibility_range.end_margin.end, )) as u16) .unwrap_or_default() }); self.entities.insert( entity, RenderVisibilityEntityInfo { buffer_index, is_abrupt: visibility_range.is_abrupt(), }, ); } /// Returns the index in the GPU buffer corresponding to the visible range /// for the given entity. /// /// If the entity has no visible range, returns `None`. #[inline] pub fn lod_index_for_entity(&self, entity: MainEntity) -> Option<NonMaxU16> { self.entities.get(&entity).map(|info| info.buffer_index) } /// Returns true if the entity has a visibility range and it isn't abrupt: /// i.e. if it has a crossfade. #[inline] pub fn entity_has_crossfading_visibility_ranges(&self, entity: MainEntity) -> bool { self.entities .get(&entity) .is_some_and(|info| !info.is_abrupt) } /// Returns a reference to the GPU buffer that stores visibility ranges. #[inline] pub fn buffer(&self) -> &BufferVec<Vec4> { &self.buffer } } /// Extracts all [`VisibilityRange`] components from the main world to the /// render world and inserts them into [`RenderVisibilityRanges`]. pub fn extract_visibility_ranges( mut render_visibility_ranges: ResMut<RenderVisibilityRanges>, visibility_ranges_query: Extract<Query<(Entity, &VisibilityRange)>>, changed_ranges_query: Extract<Query<Entity, Changed<VisibilityRange>>>, mut removed_visibility_ranges: Extract<RemovedComponents<VisibilityRange>>, ) { if changed_ranges_query.is_empty() && removed_visibility_ranges.read().next().is_none() { return; } render_visibility_ranges.clear(); for (entity, visibility_range) in visibility_ranges_query.iter() { render_visibility_ranges.insert(entity.into(), visibility_range); } } /// Writes the [`RenderVisibilityRanges`] table to the GPU. pub fn write_render_visibility_ranges( render_device: Res<RenderDevice>, render_queue: Res<RenderQueue>, mut render_visibility_ranges: ResMut<RenderVisibilityRanges>, ) { // If there haven't been any changes, early out. if !render_visibility_ranges.buffer_dirty { return; } // Mess with the length of the buffer to meet API requirements if necessary. match render_device.get_supported_read_only_binding_type(VISIBILITY_RANGES_STORAGE_BUFFER_COUNT) { // If we're using a uniform buffer, we must have *exactly* // `VISIBILITY_RANGE_UNIFORM_BUFFER_SIZE` elements. BufferBindingType::Uniform if render_visibility_ranges.buffer.len() > VISIBILITY_RANGE_UNIFORM_BUFFER_SIZE => { render_visibility_ranges .buffer .truncate(VISIBILITY_RANGE_UNIFORM_BUFFER_SIZE); } BufferBindingType::Uniform if render_visibility_ranges.buffer.len() < VISIBILITY_RANGE_UNIFORM_BUFFER_SIZE => { while render_visibility_ranges.buffer.len() < VISIBILITY_RANGE_UNIFORM_BUFFER_SIZE { render_visibility_ranges.buffer.push(default()); } } // Otherwise, if we're using a storage buffer, just ensure there's // something in the buffer, or else it won't get allocated. BufferBindingType::Storage { .. } if render_visibility_ranges.buffer.is_empty() => { render_visibility_ranges.buffer.push(default()); } _ => {} } // Schedule the write. render_visibility_ranges .buffer .write_buffer(&render_device, &render_queue); render_visibility_ranges.buffer_dirty = false; }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/view/visibility/mod.rs
crates/bevy_render/src/view/visibility/mod.rs
use core::any::TypeId; use bevy_ecs::{component::Component, entity::Entity, prelude::ReflectComponent}; use bevy_reflect::{prelude::ReflectDefault, Reflect}; use bevy_utils::TypeIdMap; use crate::sync_world::MainEntity; mod range; use bevy_camera::visibility::*; pub use range::*; /// Collection of entities visible from the current view. /// /// This component is extracted from [`VisibleEntities`]. #[derive(Clone, Component, Default, Debug, Reflect)] #[reflect(Component, Default, Debug, Clone)] pub struct RenderVisibleEntities { #[reflect(ignore, clone)] pub entities: TypeIdMap<Vec<(Entity, MainEntity)>>, } impl RenderVisibleEntities { pub fn get<QF>(&self) -> &[(Entity, MainEntity)] where QF: 'static, { match self.entities.get(&TypeId::of::<QF>()) { Some(entities) => &entities[..], None => &[], } } pub fn iter<QF>(&self) -> impl DoubleEndedIterator<Item = &(Entity, MainEntity)> where QF: 'static, { self.get::<QF>().iter() } pub fn len<QF>(&self) -> usize where QF: 'static, { self.get::<QF>().len() } pub fn is_empty<QF>(&self) -> bool where QF: 'static, { self.get::<QF>().is_empty() } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/texture/fallback_image.rs
crates/bevy_render/src/texture/fallback_image.rs
use crate::{ render_resource::*, renderer::{RenderDevice, RenderQueue}, texture::{DefaultImageSampler, GpuImage}, }; use bevy_asset::RenderAssetUsages; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ prelude::{FromWorld, Res, ResMut}, resource::Resource, system::SystemParam, }; use bevy_image::{BevyDefault, Image, ImageSampler, TextureFormatPixelInfo}; use bevy_platform::collections::HashMap; /// A [`RenderApp`](crate::RenderApp) resource that contains the default "fallback image", /// which can be used in situations where an image was not explicitly defined. The most common /// use case is [`AsBindGroup`] implementations (such as materials) that support optional textures. /// /// Defaults to a 1x1 fully opaque white texture, (1.0, 1.0, 1.0, 1.0) which makes multiplying /// it with other colors a no-op. #[derive(Resource)] pub struct FallbackImage { /// Fallback image for [`TextureViewDimension::D1`]. pub d1: GpuImage, /// Fallback image for [`TextureViewDimension::D2`]. pub d2: GpuImage, /// Fallback image for [`TextureViewDimension::D2Array`]. pub d2_array: GpuImage, /// Fallback image for [`TextureViewDimension::Cube`]. pub cube: GpuImage, /// Fallback image for [`TextureViewDimension::CubeArray`]. pub cube_array: GpuImage, /// Fallback image for [`TextureViewDimension::D3`]. pub d3: GpuImage, } impl FallbackImage { /// Returns the appropriate fallback image for the given texture dimension. pub fn get(&self, texture_dimension: TextureViewDimension) -> &GpuImage { match texture_dimension { TextureViewDimension::D1 => &self.d1, TextureViewDimension::D2 => &self.d2, TextureViewDimension::D2Array => &self.d2_array, TextureViewDimension::Cube => &self.cube, TextureViewDimension::CubeArray => &self.cube_array, TextureViewDimension::D3 => &self.d3, } } } /// A [`RenderApp`](crate::RenderApp) resource that contains a _zero-filled_ "fallback image", /// which can be used in place of [`FallbackImage`], when a fully transparent or black fallback /// is required instead of fully opaque white. /// /// Defaults to a 1x1 fully transparent black texture, (0.0, 0.0, 0.0, 0.0) which makes adding /// or alpha-blending it to other colors a no-op. #[derive(Resource, Deref)] pub struct FallbackImageZero(GpuImage); /// A [`RenderApp`](crate::RenderApp) resource that contains a "cubemap fallback image", /// which can be used in situations where an image was not explicitly defined. The most common /// use case is [`AsBindGroup`] implementations (such as materials) that support optional textures. #[derive(Resource, Deref)] pub struct FallbackImageCubemap(GpuImage); fn fallback_image_new( render_device: &RenderDevice, render_queue: &RenderQueue, default_sampler: &DefaultImageSampler, format: TextureFormat, dimension: TextureViewDimension, samples: u32, value: u8, ) -> GpuImage { // TODO make this configurable per channel let extents = Extent3d { width: 1, height: 1, depth_or_array_layers: match dimension { TextureViewDimension::Cube | TextureViewDimension::CubeArray => 6, _ => 1, }, }; // We can't create textures with data when it's a depth texture or when using multiple samples let create_texture_with_data = !format.is_depth_stencil_format() && samples == 1; let image_dimension = dimension.compatible_texture_dimension(); let mut image = if create_texture_with_data { let data = vec![value; format.pixel_size().unwrap_or(0)]; Image::new_fill( extents, image_dimension, &data, format, RenderAssetUsages::RENDER_WORLD, ) } else { let mut image = Image::default_uninit(); image.texture_descriptor.dimension = TextureDimension::D2; image.texture_descriptor.size = extents; image.texture_descriptor.format = format; image }; image.texture_descriptor.sample_count = samples; if image_dimension == TextureDimension::D2 { image.texture_descriptor.usage |= TextureUsages::RENDER_ATTACHMENT; } let texture = if create_texture_with_data { render_device.create_texture_with_data( render_queue, &image.texture_descriptor, TextureDataOrder::default(), &image.data.expect("Image has no data"), ) } else { render_device.create_texture(&image.texture_descriptor) }; let texture_view = texture.create_view(&TextureViewDescriptor { dimension: Some(dimension), array_layer_count: Some(extents.depth_or_array_layers), ..TextureViewDescriptor::default() }); let sampler = match image.sampler { ImageSampler::Default => (**default_sampler).clone(), ImageSampler::Descriptor(ref descriptor) => { render_device.create_sampler(&descriptor.as_wgpu()) } }; GpuImage { texture, texture_view, texture_format: image.texture_descriptor.format, texture_view_format: image.texture_view_descriptor.and_then(|v| v.format), sampler, size: image.texture_descriptor.size, mip_level_count: image.texture_descriptor.mip_level_count, had_data: true, } } impl FromWorld for FallbackImage { fn from_world(world: &mut bevy_ecs::prelude::World) -> Self { let render_device = world.resource::<RenderDevice>(); let render_queue = world.resource::<RenderQueue>(); let default_sampler = world.resource::<DefaultImageSampler>(); Self { d1: fallback_image_new( render_device, render_queue, default_sampler, TextureFormat::bevy_default(), TextureViewDimension::D1, 1, 255, ), d2: fallback_image_new( render_device, render_queue, default_sampler, TextureFormat::bevy_default(), TextureViewDimension::D2, 1, 255, ), d2_array: fallback_image_new( render_device, render_queue, default_sampler, TextureFormat::bevy_default(), TextureViewDimension::D2Array, 1, 255, ), cube: fallback_image_new( render_device, render_queue, default_sampler, TextureFormat::bevy_default(), TextureViewDimension::Cube, 1, 255, ), cube_array: fallback_image_new( render_device, render_queue, default_sampler, TextureFormat::bevy_default(), TextureViewDimension::CubeArray, 1, 255, ), d3: fallback_image_new( render_device, render_queue, default_sampler, TextureFormat::bevy_default(), TextureViewDimension::D3, 1, 255, ), } } } impl FromWorld for FallbackImageZero { fn from_world(world: &mut bevy_ecs::prelude::World) -> Self { let render_device = world.resource::<RenderDevice>(); let render_queue = world.resource::<RenderQueue>(); let default_sampler = world.resource::<DefaultImageSampler>(); Self(fallback_image_new( render_device, render_queue, default_sampler, TextureFormat::bevy_default(), TextureViewDimension::D2, 1, 0, )) } } impl FromWorld for FallbackImageCubemap { fn from_world(world: &mut bevy_ecs::prelude::World) -> Self { let render_device = world.resource::<RenderDevice>(); let render_queue = world.resource::<RenderQueue>(); let default_sampler = world.resource::<DefaultImageSampler>(); Self(fallback_image_new( render_device, render_queue, default_sampler, TextureFormat::bevy_default(), TextureViewDimension::Cube, 1, 255, )) } } /// A Cache of fallback textures that uses the sample count and `TextureFormat` as a key /// /// # WARNING /// Images using MSAA with sample count > 1 are not initialized with data, therefore, /// you shouldn't sample them before writing data to them first. #[derive(Resource, Deref, DerefMut, Default)] pub struct FallbackImageFormatMsaaCache(HashMap<(u32, TextureFormat), GpuImage>); #[derive(SystemParam)] pub struct FallbackImageMsaa<'w> { cache: ResMut<'w, FallbackImageFormatMsaaCache>, render_device: Res<'w, RenderDevice>, render_queue: Res<'w, RenderQueue>, default_sampler: Res<'w, DefaultImageSampler>, } impl<'w> FallbackImageMsaa<'w> { pub fn image_for_samplecount(&mut self, sample_count: u32, format: TextureFormat) -> &GpuImage { self.cache.entry((sample_count, format)).or_insert_with(|| { fallback_image_new( &self.render_device, &self.render_queue, &self.default_sampler, format, TextureViewDimension::D2, sample_count, 255, ) }) } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/texture/texture_attachment.rs
crates/bevy_render/src/texture/texture_attachment.rs
use super::CachedTexture; use crate::render_resource::{TextureFormat, TextureView}; use alloc::sync::Arc; use bevy_color::LinearRgba; use core::sync::atomic::{AtomicBool, Ordering}; use wgpu::{ LoadOp, Operations, RenderPassColorAttachment, RenderPassDepthStencilAttachment, StoreOp, }; /// A wrapper for a [`CachedTexture`] that is used as a [`RenderPassColorAttachment`]. #[derive(Clone)] pub struct ColorAttachment { pub texture: CachedTexture, pub resolve_target: Option<CachedTexture>, pub previous_frame_texture: Option<CachedTexture>, clear_color: Option<LinearRgba>, is_first_call: Arc<AtomicBool>, } impl ColorAttachment { pub fn new( texture: CachedTexture, resolve_target: Option<CachedTexture>, previous_frame_texture: Option<CachedTexture>, clear_color: Option<LinearRgba>, ) -> Self { Self { texture, resolve_target, previous_frame_texture, clear_color, is_first_call: Arc::new(AtomicBool::new(true)), } } /// Get this texture view as an attachment. The attachment will be cleared with a value of /// `clear_color` if this is the first time calling this function, otherwise it will be loaded. /// /// The returned attachment will always have writing enabled (`store: StoreOp::Load`). pub fn get_attachment(&self) -> RenderPassColorAttachment<'_> { if let Some(resolve_target) = self.resolve_target.as_ref() { let first_call = self.is_first_call.fetch_and(false, Ordering::SeqCst); RenderPassColorAttachment { view: &resolve_target.default_view, depth_slice: None, resolve_target: Some(&self.texture.default_view), ops: Operations { load: match (self.clear_color, first_call) { (Some(clear_color), true) => LoadOp::Clear(clear_color.into()), (None, _) | (Some(_), false) => LoadOp::Load, }, store: StoreOp::Store, }, } } else { self.get_unsampled_attachment() } } /// Get this texture view as an attachment, without the resolve target. The attachment will be cleared with /// a value of `clear_color` if this is the first time calling this function, otherwise it will be loaded. /// /// The returned attachment will always have writing enabled (`store: StoreOp::Load`). pub fn get_unsampled_attachment(&self) -> RenderPassColorAttachment<'_> { let first_call = self.is_first_call.fetch_and(false, Ordering::SeqCst); RenderPassColorAttachment { view: &self.texture.default_view, depth_slice: None, resolve_target: None, ops: Operations { load: match (self.clear_color, first_call) { (Some(clear_color), true) => LoadOp::Clear(clear_color.into()), (None, _) | (Some(_), false) => LoadOp::Load, }, store: StoreOp::Store, }, } } pub(crate) fn mark_as_cleared(&self) { self.is_first_call.store(false, Ordering::SeqCst); } } /// A wrapper for a [`TextureView`] that is used as a depth-only [`RenderPassDepthStencilAttachment`]. #[derive(Clone)] pub struct DepthAttachment { pub view: TextureView, clear_value: Option<f32>, is_first_call: Arc<AtomicBool>, } impl DepthAttachment { pub fn new(view: TextureView, clear_value: Option<f32>) -> Self { Self { view, clear_value, is_first_call: Arc::new(AtomicBool::new(clear_value.is_some())), } } /// Get this texture view as an attachment. The attachment will be cleared with a value of /// `clear_value` if this is the first time calling this function with `store` == [`StoreOp::Store`], /// and a clear value was provided, otherwise it will be loaded. pub fn get_attachment(&self, store: StoreOp) -> RenderPassDepthStencilAttachment<'_> { let first_call = self .is_first_call .fetch_and(store != StoreOp::Store, Ordering::SeqCst); RenderPassDepthStencilAttachment { view: &self.view, depth_ops: Some(Operations { load: if first_call { // If first_call is true, then a clear value will always have been provided in the constructor LoadOp::Clear(self.clear_value.unwrap()) } else { LoadOp::Load }, store, }), stencil_ops: None, } } } /// A wrapper for a [`TextureView`] that is used as a [`RenderPassColorAttachment`] for a view /// target's final output texture. #[derive(Clone)] pub struct OutputColorAttachment { pub view: TextureView, pub view_format: TextureFormat, is_first_call: Arc<AtomicBool>, } impl OutputColorAttachment { pub fn new(view: TextureView, view_format: TextureFormat) -> Self { Self { view, view_format, is_first_call: Arc::new(AtomicBool::new(true)), } } /// Get this texture view as an attachment. The attachment will be cleared with a value of /// the provided `clear_color` if this is the first time calling this function, otherwise it /// will be loaded. pub fn get_attachment(&self, clear_color: Option<LinearRgba>) -> RenderPassColorAttachment<'_> { let first_call = self.is_first_call.fetch_and(false, Ordering::SeqCst); RenderPassColorAttachment { view: &self.view, depth_slice: None, resolve_target: None, ops: Operations { load: match (clear_color, first_call) { (Some(clear_color), true) => LoadOp::Clear(clear_color.into()), (None, _) | (Some(_), false) => LoadOp::Load, }, store: StoreOp::Store, }, } } /// Returns `true` if this attachment has been written to by a render pass. // we re-use is_first_call atomic to track usage, which assumes that calls to get_attachment // are always consumed by a render pass that writes to the attachment pub fn needs_present(&self) -> bool { !self.is_first_call.load(Ordering::SeqCst) } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/texture/manual_texture_view.rs
crates/bevy_render/src/texture/manual_texture_view.rs
use bevy_camera::ManualTextureViewHandle; use bevy_ecs::{prelude::Component, resource::Resource}; use bevy_image::BevyDefault; use bevy_math::UVec2; use bevy_platform::collections::HashMap; use bevy_render_macros::ExtractResource; use wgpu::TextureFormat; use crate::render_resource::TextureView; /// A manually managed [`TextureView`] for use as a [`bevy_camera::RenderTarget`]. #[derive(Debug, Clone, Component)] pub struct ManualTextureView { pub texture_view: TextureView, pub size: UVec2, pub view_format: TextureFormat, } impl ManualTextureView { pub fn with_default_format(texture_view: TextureView, size: UVec2) -> Self { Self { texture_view, size, view_format: TextureFormat::bevy_default(), } } } /// Resource that stores manually managed [`ManualTextureView`]s for use as a [`RenderTarget`](bevy_camera::RenderTarget). /// This type dereferences to a `HashMap<ManualTextureViewHandle, ManualTextureView>`. /// To add a new texture view, pick a new [`ManualTextureViewHandle`] and insert it into the map. /// Then, to render to the view, set a [`Camera`](bevy_camera::Camera)s `target` to `RenderTarget::TextureView(handle)`. /// ```ignore /// # use bevy_ecs::prelude::*; /// # let mut world = World::default(); /// # world.insert_resource(ManualTextureViews::default()); /// # let texture_view = todo!(); /// let manual_views = world.resource_mut::<ManualTextureViews>(); /// let manual_view = ManualTextureView::with_default_format(texture_view, UVec2::new(1024, 1024)); /// /// // Choose an unused handle value; it's likely only you are inserting manual views. /// const MANUAL_VIEW_HANDLE: ManualTextureViewHandle = ManualTextureViewHandle::new(42); /// manual_views.insert(MANUAL_VIEW_HANDLE, manual_view); /// /// // Now you can spawn a Camera that renders to the manual view: /// # use bevy_camera::{Camera, RenderTarget}; /// world.spawn(Camera { /// target: RenderTarget::TextureView(MANUAL_VIEW_HANDLE), /// ..Default::default() /// }); /// ``` /// Bevy will then use the `ManualTextureViews` resource to find your texture view and render to it. #[derive(Default, Clone, Resource, ExtractResource)] pub struct ManualTextureViews(HashMap<ManualTextureViewHandle, ManualTextureView>); impl core::ops::Deref for ManualTextureViews { type Target = HashMap<ManualTextureViewHandle, ManualTextureView>; fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for ManualTextureViews { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/texture/mod.rs
crates/bevy_render/src/texture/mod.rs
mod fallback_image; mod gpu_image; mod manual_texture_view; mod texture_attachment; mod texture_cache; pub use crate::render_resource::DefaultImageSampler; use bevy_image::{CompressedImageFormatSupport, CompressedImageFormats, ImageLoader, ImagePlugin}; pub use fallback_image::*; pub use gpu_image::*; pub use manual_texture_view::*; pub use texture_attachment::*; pub use texture_cache::*; use crate::{ extract_resource::ExtractResourcePlugin, render_asset::RenderAssetPlugin, renderer::RenderDevice, Render, RenderApp, RenderSystems, }; use bevy_app::{App, Plugin}; use bevy_asset::AssetApp; use bevy_ecs::prelude::*; use tracing::warn; #[derive(Default)] pub struct TexturePlugin; impl Plugin for TexturePlugin { fn build(&self, app: &mut App) { app.add_plugins(( RenderAssetPlugin::<GpuImage>::default(), ExtractResourcePlugin::<ManualTextureViews>::default(), )) .init_resource::<ManualTextureViews>(); if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app.init_resource::<TextureCache>().add_systems( Render, update_texture_cache_system.in_set(RenderSystems::Cleanup), ); } } fn finish(&self, app: &mut App) { if !ImageLoader::SUPPORTED_FORMATS.is_empty() { let supported_compressed_formats = if let Some(resource) = app.world().get_resource::<CompressedImageFormatSupport>() { resource.0 } else { warn!("CompressedImageFormatSupport resource not found. It should either be initialized in finish() of \ RenderPlugin, or manually if not using the RenderPlugin or the WGPU backend."); CompressedImageFormats::NONE }; app.register_asset_loader(ImageLoader::new(supported_compressed_formats)); } let default_sampler = app.get_added_plugins::<ImagePlugin>()[0] .default_sampler .clone(); if let Some(render_app) = app.get_sub_app_mut(RenderApp) { let default_sampler = { let device = render_app.world().resource::<RenderDevice>(); device.create_sampler(&default_sampler.as_wgpu()) }; render_app .insert_resource(DefaultImageSampler(default_sampler)) .init_resource::<FallbackImage>() .init_resource::<FallbackImageZero>() .init_resource::<FallbackImageCubemap>() .init_resource::<FallbackImageFormatMsaaCache>(); } } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/texture/gpu_image.rs
crates/bevy_render/src/texture/gpu_image.rs
use crate::{ render_asset::{AssetExtractionError, PrepareAssetError, RenderAsset}, render_resource::{DefaultImageSampler, Sampler, Texture, TextureView}, renderer::{RenderDevice, RenderQueue}, }; use bevy_asset::{AssetId, RenderAssetUsages}; use bevy_ecs::system::{lifetimeless::SRes, SystemParamItem}; use bevy_image::{Image, ImageSampler}; use bevy_math::{AspectRatio, UVec2}; use tracing::warn; use wgpu::{Extent3d, TextureFormat, TextureViewDescriptor}; /// The GPU-representation of an [`Image`]. /// Consists of the [`Texture`], its [`TextureView`] and the corresponding [`Sampler`], and the texture's size. #[derive(Debug, Clone)] pub struct GpuImage { pub texture: Texture, pub texture_view: TextureView, pub texture_format: TextureFormat, pub texture_view_format: Option<TextureFormat>, pub sampler: Sampler, pub size: Extent3d, pub mip_level_count: u32, pub had_data: bool, } impl RenderAsset for GpuImage { type SourceAsset = Image; type Param = ( SRes<RenderDevice>, SRes<RenderQueue>, SRes<DefaultImageSampler>, ); #[inline] fn asset_usage(image: &Self::SourceAsset) -> RenderAssetUsages { image.asset_usage } fn take_gpu_data( source: &mut Self::SourceAsset, previous_gpu_asset: Option<&Self>, ) -> Result<Self::SourceAsset, AssetExtractionError> { let data = source.data.take(); // check if this image originally had data and no longer does, that implies it // has already been extracted let valid_upload = data.is_some() || previous_gpu_asset.is_none_or(|prev| !prev.had_data); valid_upload .then(|| Self::SourceAsset { data, ..source.clone() }) .ok_or(AssetExtractionError::AlreadyExtracted) } #[inline] fn byte_len(image: &Self::SourceAsset) -> Option<usize> { image.data.as_ref().map(Vec::len) } /// Converts the extracted image into a [`GpuImage`]. fn prepare_asset( image: Self::SourceAsset, _: AssetId<Self::SourceAsset>, (render_device, render_queue, default_sampler): &mut SystemParamItem<Self::Param>, previous_asset: Option<&Self>, ) -> Result<Self, PrepareAssetError<Self::SourceAsset>> { let had_data = image.data.is_some(); let texture = if let Some(ref data) = image.data { render_device.create_texture_with_data( render_queue, &image.texture_descriptor, image.data_order, data, ) } else { let new_texture = render_device.create_texture(&image.texture_descriptor); if image.copy_on_resize { if let Some(previous) = previous_asset { let mut command_encoder = render_device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("copy_image_on_resize"), }); let copy_size = Extent3d { width: image.texture_descriptor.size.width.min(previous.size.width), height: image .texture_descriptor .size .height .min(previous.size.height), depth_or_array_layers: image .texture_descriptor .size .depth_or_array_layers .min(previous.size.depth_or_array_layers), }; command_encoder.copy_texture_to_texture( previous.texture.as_image_copy(), new_texture.as_image_copy(), copy_size, ); render_queue.submit([command_encoder.finish()]); } else { warn!("No previous asset to copy from for image: {:?}", image); } } new_texture }; let texture_view = texture.create_view( image .texture_view_descriptor .as_ref() .unwrap_or(&TextureViewDescriptor::default()), ); let sampler = match image.sampler { ImageSampler::Default => (***default_sampler).clone(), ImageSampler::Descriptor(descriptor) => { render_device.create_sampler(&descriptor.as_wgpu()) } }; Ok(GpuImage { texture, texture_view, texture_format: image.texture_descriptor.format, texture_view_format: image.texture_view_descriptor.and_then(|v| v.format), sampler, size: image.texture_descriptor.size, mip_level_count: image.texture_descriptor.mip_level_count, had_data, }) } } impl GpuImage { /// Returns the aspect ratio (width / height) of a 2D image. #[inline] pub fn aspect_ratio(&self) -> AspectRatio { AspectRatio::try_from_pixels(self.size.width, self.size.height).expect( "Failed to calculate aspect ratio: Image dimensions must be positive, non-zero values", ) } /// Returns the size of a 2D image. #[inline] pub fn size_2d(&self) -> UVec2 { UVec2::new(self.size.width, self.size.height) } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/texture/texture_cache.rs
crates/bevy_render/src/texture/texture_cache.rs
use crate::{ render_resource::{Texture, TextureView}, renderer::RenderDevice, }; use bevy_ecs::{prelude::ResMut, resource::Resource}; use bevy_platform::collections::{hash_map::Entry, HashMap}; use wgpu::{TextureDescriptor, TextureViewDescriptor}; /// The internal representation of a [`CachedTexture`] used to track whether it was recently used /// and is currently taken. struct CachedTextureMeta { texture: Texture, default_view: TextureView, taken: bool, frames_since_last_use: usize, } /// A cached GPU [`Texture`] with corresponding [`TextureView`]. /// /// This is useful for textures that are created repeatedly (each frame) in the rendering process /// to reduce the amount of GPU memory allocations. #[derive(Clone)] pub struct CachedTexture { pub texture: Texture, pub default_view: TextureView, } /// This resource caches textures that are created repeatedly in the rendering process and /// are only required for one frame. #[derive(Resource, Default)] pub struct TextureCache { textures: HashMap<TextureDescriptor<'static>, Vec<CachedTextureMeta>>, } impl TextureCache { /// Retrieves a texture that matches the `descriptor`. If no matching one is found a new /// [`CachedTexture`] is created. pub fn get( &mut self, render_device: &RenderDevice, descriptor: TextureDescriptor<'static>, ) -> CachedTexture { match self.textures.entry(descriptor) { Entry::Occupied(mut entry) => { for texture in entry.get_mut().iter_mut() { if !texture.taken { texture.frames_since_last_use = 0; texture.taken = true; return CachedTexture { texture: texture.texture.clone(), default_view: texture.default_view.clone(), }; } } let texture = render_device.create_texture(&entry.key().clone()); let default_view = texture.create_view(&TextureViewDescriptor::default()); entry.get_mut().push(CachedTextureMeta { texture: texture.clone(), default_view: default_view.clone(), frames_since_last_use: 0, taken: true, }); CachedTexture { texture, default_view, } } Entry::Vacant(entry) => { let texture = render_device.create_texture(entry.key()); let default_view = texture.create_view(&TextureViewDescriptor::default()); entry.insert(vec![CachedTextureMeta { texture: texture.clone(), default_view: default_view.clone(), taken: true, frames_since_last_use: 0, }]); CachedTexture { texture, default_view, } } } } /// Returns `true` if the texture cache contains no textures. pub fn is_empty(&self) -> bool { self.textures.is_empty() } /// Updates the cache and only retains recently used textures. pub fn update(&mut self) { self.textures.retain(|_, textures| { for texture in textures.iter_mut() { texture.frames_since_last_use += 1; texture.taken = false; } textures.retain(|texture| texture.frames_since_last_use < 3); !textures.is_empty() }); } } /// Updates the [`TextureCache`] to only retains recently used textures. pub fn update_texture_cache_system(mut texture_cache: ResMut<TextureCache>) { texture_cache.update(); }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/batching/gpu_preprocessing.rs
crates/bevy_render/src/batching/gpu_preprocessing.rs
//! Batching functionality when GPU preprocessing is in use. use core::{any::TypeId, marker::PhantomData, mem}; use bevy_app::{App, Plugin}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ prelude::Entity, query::{Has, With}, resource::Resource, schedule::IntoScheduleConfigs as _, system::{Query, Res, ResMut, StaticSystemParam}, world::{FromWorld, World}, }; use bevy_encase_derive::ShaderType; use bevy_math::UVec4; use bevy_platform::collections::{hash_map::Entry, HashMap, HashSet}; use bevy_utils::{default, TypeIdMap}; use bytemuck::{Pod, Zeroable}; use encase::{internal::WriteInto, ShaderSize}; use indexmap::IndexMap; use nonmax::NonMaxU32; use tracing::{error, info}; use wgpu::{BindingResource, BufferUsages, DownlevelFlags, Features}; use crate::{ experimental::occlusion_culling::OcclusionCulling, render_phase::{ BinnedPhaseItem, BinnedRenderPhaseBatch, BinnedRenderPhaseBatchSet, BinnedRenderPhaseBatchSets, CachedRenderPipelinePhaseItem, PhaseItem, PhaseItemBatchSetKey as _, PhaseItemExtraIndex, RenderBin, SortedPhaseItem, SortedRenderPhase, UnbatchableBinnedEntityIndices, ViewBinnedRenderPhases, ViewSortedRenderPhases, }, render_resource::{Buffer, GpuArrayBufferable, RawBufferVec, UninitBufferVec}, renderer::{RenderAdapter, RenderAdapterInfo, RenderDevice, RenderQueue, WgpuWrapper}, sync_world::MainEntity, view::{ExtractedView, NoIndirectDrawing, RetainedViewEntity}, Render, RenderApp, RenderDebugFlags, RenderSystems, }; use super::{BatchMeta, GetBatchData, GetFullBatchData}; #[derive(Default)] pub struct BatchingPlugin { /// Debugging flags that can optionally be set when constructing the renderer. pub debug_flags: RenderDebugFlags, } impl Plugin for BatchingPlugin { fn build(&self, app: &mut App) { let Some(render_app) = app.get_sub_app_mut(RenderApp) else { return; }; render_app .insert_resource(IndirectParametersBuffers::new( self.debug_flags .contains(RenderDebugFlags::ALLOW_COPIES_FROM_INDIRECT_PARAMETERS), )) .add_systems( Render, write_indirect_parameters_buffers.in_set(RenderSystems::PrepareResourcesFlush), ) .add_systems( Render, clear_indirect_parameters_buffers.in_set(RenderSystems::ManageViews), ); } fn finish(&self, app: &mut App) { let Some(render_app) = app.get_sub_app_mut(RenderApp) else { return; }; render_app.init_resource::<GpuPreprocessingSupport>(); } } /// Records whether GPU preprocessing and/or GPU culling are supported on the /// device. /// /// No GPU preprocessing is supported on WebGL because of the lack of compute /// shader support. GPU preprocessing is supported on DirectX 12, but due to [a /// `wgpu` limitation] GPU culling is not. /// /// [a `wgpu` limitation]: https://github.com/gfx-rs/wgpu/issues/2471 #[derive(Clone, Copy, PartialEq, Resource)] pub struct GpuPreprocessingSupport { /// The maximum amount of GPU preprocessing available on this platform. pub max_supported_mode: GpuPreprocessingMode, } impl GpuPreprocessingSupport { /// Returns true if this GPU preprocessing support level isn't `None`. #[inline] pub fn is_available(&self) -> bool { self.max_supported_mode != GpuPreprocessingMode::None } /// Returns the given GPU preprocessing mode, capped to the current /// preprocessing mode. pub fn min(&self, mode: GpuPreprocessingMode) -> GpuPreprocessingMode { match (self.max_supported_mode, mode) { (GpuPreprocessingMode::None, _) | (_, GpuPreprocessingMode::None) => { GpuPreprocessingMode::None } (mode, GpuPreprocessingMode::Culling) | (GpuPreprocessingMode::Culling, mode) => mode, (GpuPreprocessingMode::PreprocessingOnly, GpuPreprocessingMode::PreprocessingOnly) => { GpuPreprocessingMode::PreprocessingOnly } } } /// Returns true if GPU culling is supported on this platform. pub fn is_culling_supported(&self) -> bool { self.max_supported_mode == GpuPreprocessingMode::Culling } } /// The amount of GPU preprocessing (compute and indirect draw) that we do. #[derive(Clone, Copy, PartialEq)] pub enum GpuPreprocessingMode { /// No GPU preprocessing is in use at all. /// /// This is used when GPU compute isn't available. None, /// GPU preprocessing is in use, but GPU culling isn't. /// /// This is used when the [`NoIndirectDrawing`] component is present on the /// camera. PreprocessingOnly, /// Both GPU preprocessing and GPU culling are in use. /// /// This is used by default. Culling, } /// The GPU buffers holding the data needed to render batches. /// /// For example, in the 3D PBR pipeline this holds `MeshUniform`s, which are the /// `BD` type parameter in that mode. /// /// We have a separate *buffer data input* type (`BDI`) here, which a compute /// shader is expected to expand to the full buffer data (`BD`) type. GPU /// uniform building is generally faster and uses less system RAM to VRAM bus /// bandwidth, but only implemented for some pipelines (for example, not in the /// 2D pipeline at present) and only when compute shader is available. #[derive(Resource)] pub struct BatchedInstanceBuffers<BD, BDI> where BD: GpuArrayBufferable + Sync + Send + 'static, BDI: Pod + Default, { /// The uniform data inputs for the current frame. /// /// These are uploaded during the extraction phase. pub current_input_buffer: InstanceInputUniformBuffer<BDI>, /// The uniform data inputs for the previous frame. /// /// The indices don't generally line up between `current_input_buffer` /// and `previous_input_buffer`, because, among other reasons, entities /// can spawn or despawn between frames. Instead, each current buffer /// data input uniform is expected to contain the index of the /// corresponding buffer data input uniform in this list. pub previous_input_buffer: InstanceInputUniformBuffer<BDI>, /// The data needed to render buffers for each phase. /// /// The keys of this map are the type IDs of each phase: e.g. `Opaque3d`, /// `AlphaMask3d`, etc. pub phase_instance_buffers: TypeIdMap<UntypedPhaseBatchedInstanceBuffers<BD>>, } impl<BD, BDI> Default for BatchedInstanceBuffers<BD, BDI> where BD: GpuArrayBufferable + Sync + Send + 'static, BDI: Pod + Sync + Send + Default + 'static, { fn default() -> Self { BatchedInstanceBuffers { current_input_buffer: InstanceInputUniformBuffer::new(), previous_input_buffer: InstanceInputUniformBuffer::new(), phase_instance_buffers: HashMap::default(), } } } /// The GPU buffers holding the data needed to render batches for a single /// phase. /// /// These are split out per phase so that we can run the phases in parallel. /// This is the version of the structure that has a type parameter, which /// enables Bevy's scheduler to run the batching operations for the different /// phases in parallel. /// /// See the documentation for [`BatchedInstanceBuffers`] for more information. #[derive(Resource)] pub struct PhaseBatchedInstanceBuffers<PI, BD> where PI: PhaseItem, BD: GpuArrayBufferable + Sync + Send + 'static, { /// The buffers for this phase. pub buffers: UntypedPhaseBatchedInstanceBuffers<BD>, phantom: PhantomData<PI>, } impl<PI, BD> Default for PhaseBatchedInstanceBuffers<PI, BD> where PI: PhaseItem, BD: GpuArrayBufferable + Sync + Send + 'static, { fn default() -> Self { PhaseBatchedInstanceBuffers { buffers: UntypedPhaseBatchedInstanceBuffers::default(), phantom: PhantomData, } } } /// The GPU buffers holding the data needed to render batches for a single /// phase, without a type parameter for that phase. /// /// Since this structure doesn't have a type parameter, it can be placed in /// [`BatchedInstanceBuffers::phase_instance_buffers`]. pub struct UntypedPhaseBatchedInstanceBuffers<BD> where BD: GpuArrayBufferable + Sync + Send + 'static, { /// A storage area for the buffer data that the GPU compute shader is /// expected to write to. /// /// There will be one entry for each index. pub data_buffer: UninitBufferVec<BD>, /// The index of the buffer data in the current input buffer that /// corresponds to each instance. /// /// This is keyed off each view. Each view has a separate buffer. pub work_item_buffers: HashMap<RetainedViewEntity, PreprocessWorkItemBuffers>, /// A buffer that holds the number of indexed meshes that weren't visible in /// the previous frame, when GPU occlusion culling is in use. /// /// There's one set of [`LatePreprocessWorkItemIndirectParameters`] per /// view. Bevy uses this value to determine how many threads to dispatch to /// check meshes that weren't visible next frame to see if they became newly /// visible this frame. pub late_indexed_indirect_parameters_buffer: RawBufferVec<LatePreprocessWorkItemIndirectParameters>, /// A buffer that holds the number of non-indexed meshes that weren't /// visible in the previous frame, when GPU occlusion culling is in use. /// /// There's one set of [`LatePreprocessWorkItemIndirectParameters`] per /// view. Bevy uses this value to determine how many threads to dispatch to /// check meshes that weren't visible next frame to see if they became newly /// visible this frame. pub late_non_indexed_indirect_parameters_buffer: RawBufferVec<LatePreprocessWorkItemIndirectParameters>, } /// Holds the GPU buffer of instance input data, which is the data about each /// mesh instance that the CPU provides. /// /// `BDI` is the *buffer data input* type, which the GPU mesh preprocessing /// shader is expected to expand to the full *buffer data* type. pub struct InstanceInputUniformBuffer<BDI> where BDI: Pod + Default, { /// The buffer containing the data that will be uploaded to the GPU. buffer: RawBufferVec<BDI>, /// Indices of slots that are free within the buffer. /// /// When adding data, we preferentially overwrite these slots first before /// growing the buffer itself. free_uniform_indices: Vec<u32>, } impl<BDI> InstanceInputUniformBuffer<BDI> where BDI: Pod + Default, { /// Creates a new, empty buffer. pub fn new() -> InstanceInputUniformBuffer<BDI> { InstanceInputUniformBuffer { buffer: RawBufferVec::new(BufferUsages::STORAGE), free_uniform_indices: vec![], } } /// Clears the buffer and entity list out. pub fn clear(&mut self) { self.buffer.clear(); self.free_uniform_indices.clear(); } /// Returns the [`RawBufferVec`] corresponding to this input uniform buffer. #[inline] pub fn buffer(&self) -> &RawBufferVec<BDI> { &self.buffer } /// Adds a new piece of buffered data to the uniform buffer and returns its /// index. pub fn add(&mut self, element: BDI) -> u32 { match self.free_uniform_indices.pop() { Some(uniform_index) => { self.buffer.values_mut()[uniform_index as usize] = element; uniform_index } None => self.buffer.push(element) as u32, } } /// Removes a piece of buffered data from the uniform buffer. /// /// This simply marks the data as free. pub fn remove(&mut self, uniform_index: u32) { self.free_uniform_indices.push(uniform_index); } /// Returns the piece of buffered data at the given index. /// /// Returns [`None`] if the index is out of bounds or the data is removed. pub fn get(&self, uniform_index: u32) -> Option<BDI> { if (uniform_index as usize) >= self.buffer.len() || self.free_uniform_indices.contains(&uniform_index) { None } else { Some(self.get_unchecked(uniform_index)) } } /// Returns the piece of buffered data at the given index. /// Can return data that has previously been removed. /// /// # Panics /// if `uniform_index` is not in bounds of [`Self::buffer`]. pub fn get_unchecked(&self, uniform_index: u32) -> BDI { self.buffer.values()[uniform_index as usize] } /// Stores a piece of buffered data at the given index. /// /// # Panics /// if `uniform_index` is not in bounds of [`Self::buffer`]. pub fn set(&mut self, uniform_index: u32, element: BDI) { self.buffer.values_mut()[uniform_index as usize] = element; } // Ensures that the buffers are nonempty, which the GPU requires before an // upload can take place. pub fn ensure_nonempty(&mut self) { if self.buffer.is_empty() { self.buffer.push(default()); } } /// Returns the number of instances in this buffer. pub fn len(&self) -> usize { self.buffer.len() } /// Returns true if this buffer has no instances or false if it contains any /// instances. pub fn is_empty(&self) -> bool { self.buffer.is_empty() } /// Consumes this [`InstanceInputUniformBuffer`] and returns the raw buffer /// ready to be uploaded to the GPU. pub fn into_buffer(self) -> RawBufferVec<BDI> { self.buffer } } impl<BDI> Default for InstanceInputUniformBuffer<BDI> where BDI: Pod + Default, { fn default() -> Self { Self::new() } } /// The buffer of GPU preprocessing work items for a single view. #[cfg_attr( not(target_arch = "wasm32"), expect( clippy::large_enum_variant, reason = "See https://github.com/bevyengine/bevy/issues/19220" ) )] pub enum PreprocessWorkItemBuffers { /// The work items we use if we aren't using indirect drawing. /// /// Because we don't have to separate indexed from non-indexed meshes in /// direct mode, we only have a single buffer here. Direct(RawBufferVec<PreprocessWorkItem>), /// The buffer of work items we use if we are using indirect drawing. /// /// We need to separate out indexed meshes from non-indexed meshes in this /// case because the indirect parameters for these two types of meshes have /// different sizes. Indirect { /// The buffer of work items corresponding to indexed meshes. indexed: RawBufferVec<PreprocessWorkItem>, /// The buffer of work items corresponding to non-indexed meshes. non_indexed: RawBufferVec<PreprocessWorkItem>, /// The work item buffers we use when GPU occlusion culling is in use. gpu_occlusion_culling: Option<GpuOcclusionCullingWorkItemBuffers>, }, } /// The work item buffers we use when GPU occlusion culling is in use. pub struct GpuOcclusionCullingWorkItemBuffers { /// The buffer of work items corresponding to indexed meshes. pub late_indexed: UninitBufferVec<PreprocessWorkItem>, /// The buffer of work items corresponding to non-indexed meshes. pub late_non_indexed: UninitBufferVec<PreprocessWorkItem>, /// The offset into the /// [`UntypedPhaseBatchedInstanceBuffers::late_indexed_indirect_parameters_buffer`] /// where this view's indirect dispatch counts for indexed meshes live. pub late_indirect_parameters_indexed_offset: u32, /// The offset into the /// [`UntypedPhaseBatchedInstanceBuffers::late_non_indexed_indirect_parameters_buffer`] /// where this view's indirect dispatch counts for non-indexed meshes live. pub late_indirect_parameters_non_indexed_offset: u32, } /// A GPU-side data structure that stores the number of workgroups to dispatch /// for the second phase of GPU occlusion culling. /// /// The late mesh preprocessing phase checks meshes that weren't visible frame /// to determine if they're potentially visible this frame. #[derive(Clone, Copy, ShaderType, Pod, Zeroable)] #[repr(C)] pub struct LatePreprocessWorkItemIndirectParameters { /// The number of workgroups to dispatch. /// /// This will be equal to `work_item_count / 64`, rounded *up*. dispatch_x: u32, /// The number of workgroups along the abstract Y axis to dispatch: always /// 1. dispatch_y: u32, /// The number of workgroups along the abstract Z axis to dispatch: always /// 1. dispatch_z: u32, /// The actual number of work items. /// /// The GPU indirect dispatch doesn't read this, but it's used internally to /// determine the actual number of work items that exist in the late /// preprocessing work item buffer. work_item_count: u32, /// Padding to 64-byte boundaries for some hardware. pad: UVec4, } impl Default for LatePreprocessWorkItemIndirectParameters { fn default() -> LatePreprocessWorkItemIndirectParameters { LatePreprocessWorkItemIndirectParameters { dispatch_x: 0, dispatch_y: 1, dispatch_z: 1, work_item_count: 0, pad: default(), } } } /// Returns the set of work item buffers for the given view, first creating it /// if necessary. /// /// Bevy uses work item buffers to tell the mesh preprocessing compute shader /// which meshes are to be drawn. /// /// You may need to call this function if you're implementing your own custom /// render phases. See the `specialized_mesh_pipeline` example. pub fn get_or_create_work_item_buffer<'a, I>( work_item_buffers: &'a mut HashMap<RetainedViewEntity, PreprocessWorkItemBuffers>, view: RetainedViewEntity, no_indirect_drawing: bool, enable_gpu_occlusion_culling: bool, ) -> &'a mut PreprocessWorkItemBuffers where I: 'static, { let preprocess_work_item_buffers = match work_item_buffers.entry(view) { Entry::Occupied(occupied_entry) => occupied_entry.into_mut(), Entry::Vacant(vacant_entry) => { if no_indirect_drawing { vacant_entry.insert(PreprocessWorkItemBuffers::Direct(RawBufferVec::new( BufferUsages::STORAGE, ))) } else { vacant_entry.insert(PreprocessWorkItemBuffers::Indirect { indexed: RawBufferVec::new(BufferUsages::STORAGE), non_indexed: RawBufferVec::new(BufferUsages::STORAGE), // We fill this in below if `enable_gpu_occlusion_culling` // is set. gpu_occlusion_culling: None, }) } } }; // Initialize the GPU occlusion culling buffers if necessary. if let PreprocessWorkItemBuffers::Indirect { ref mut gpu_occlusion_culling, .. } = *preprocess_work_item_buffers { match ( enable_gpu_occlusion_culling, gpu_occlusion_culling.is_some(), ) { (false, false) | (true, true) => {} (false, true) => { *gpu_occlusion_culling = None; } (true, false) => { *gpu_occlusion_culling = Some(GpuOcclusionCullingWorkItemBuffers { late_indexed: UninitBufferVec::new(BufferUsages::STORAGE), late_non_indexed: UninitBufferVec::new(BufferUsages::STORAGE), late_indirect_parameters_indexed_offset: 0, late_indirect_parameters_non_indexed_offset: 0, }); } } } preprocess_work_item_buffers } /// Initializes work item buffers for a phase in preparation for a new frame. pub fn init_work_item_buffers( work_item_buffers: &mut PreprocessWorkItemBuffers, late_indexed_indirect_parameters_buffer: &'_ mut RawBufferVec< LatePreprocessWorkItemIndirectParameters, >, late_non_indexed_indirect_parameters_buffer: &'_ mut RawBufferVec< LatePreprocessWorkItemIndirectParameters, >, ) { // Add the offsets for indirect parameters that the late phase of mesh // preprocessing writes to. if let PreprocessWorkItemBuffers::Indirect { gpu_occlusion_culling: Some(GpuOcclusionCullingWorkItemBuffers { ref mut late_indirect_parameters_indexed_offset, ref mut late_indirect_parameters_non_indexed_offset, .. }), .. } = *work_item_buffers { *late_indirect_parameters_indexed_offset = late_indexed_indirect_parameters_buffer .push(LatePreprocessWorkItemIndirectParameters::default()) as u32; *late_indirect_parameters_non_indexed_offset = late_non_indexed_indirect_parameters_buffer .push(LatePreprocessWorkItemIndirectParameters::default()) as u32; } } impl PreprocessWorkItemBuffers { /// Adds a new work item to the appropriate buffer. /// /// `indexed` specifies whether the work item corresponds to an indexed /// mesh. pub fn push(&mut self, indexed: bool, preprocess_work_item: PreprocessWorkItem) { match *self { PreprocessWorkItemBuffers::Direct(ref mut buffer) => { buffer.push(preprocess_work_item); } PreprocessWorkItemBuffers::Indirect { indexed: ref mut indexed_buffer, non_indexed: ref mut non_indexed_buffer, ref mut gpu_occlusion_culling, } => { if indexed { indexed_buffer.push(preprocess_work_item); } else { non_indexed_buffer.push(preprocess_work_item); } if let Some(ref mut gpu_occlusion_culling) = *gpu_occlusion_culling { if indexed { gpu_occlusion_culling.late_indexed.add(); } else { gpu_occlusion_culling.late_non_indexed.add(); } } } } } /// Clears out the GPU work item buffers in preparation for a new frame. pub fn clear(&mut self) { match *self { PreprocessWorkItemBuffers::Direct(ref mut buffer) => { buffer.clear(); } PreprocessWorkItemBuffers::Indirect { indexed: ref mut indexed_buffer, non_indexed: ref mut non_indexed_buffer, ref mut gpu_occlusion_culling, } => { indexed_buffer.clear(); non_indexed_buffer.clear(); if let Some(ref mut gpu_occlusion_culling) = *gpu_occlusion_culling { gpu_occlusion_culling.late_indexed.clear(); gpu_occlusion_culling.late_non_indexed.clear(); gpu_occlusion_culling.late_indirect_parameters_indexed_offset = 0; gpu_occlusion_culling.late_indirect_parameters_non_indexed_offset = 0; } } } } } /// One invocation of the preprocessing shader: i.e. one mesh instance in a /// view. #[derive(Clone, Copy, Default, Pod, Zeroable, ShaderType)] #[repr(C)] pub struct PreprocessWorkItem { /// The index of the batch input data in the input buffer that the shader /// reads from. pub input_index: u32, /// In direct mode, the index of the mesh uniform; in indirect mode, the /// index of the [`IndirectParametersGpuMetadata`]. /// /// In indirect mode, this is the index of the /// [`IndirectParametersGpuMetadata`] in the /// `IndirectParametersBuffers::indexed_metadata` or /// `IndirectParametersBuffers::non_indexed_metadata`. pub output_or_indirect_parameters_index: u32, } /// The `wgpu` indirect parameters structure that specifies a GPU draw command. /// /// This is the variant for indexed meshes. We generate the instances of this /// structure in the `build_indirect_params.wgsl` compute shader. #[derive(Clone, Copy, Debug, Pod, Zeroable, ShaderType)] #[repr(C)] pub struct IndirectParametersIndexed { /// The number of indices that this mesh has. pub index_count: u32, /// The number of instances we are to draw. pub instance_count: u32, /// The offset of the first index for this mesh in the index buffer slab. pub first_index: u32, /// The offset of the first vertex for this mesh in the vertex buffer slab. pub base_vertex: u32, /// The index of the first mesh instance in the `MeshUniform` buffer. pub first_instance: u32, } /// The `wgpu` indirect parameters structure that specifies a GPU draw command. /// /// This is the variant for non-indexed meshes. We generate the instances of /// this structure in the `build_indirect_params.wgsl` compute shader. #[derive(Clone, Copy, Debug, Pod, Zeroable, ShaderType)] #[repr(C)] pub struct IndirectParametersNonIndexed { /// The number of vertices that this mesh has. pub vertex_count: u32, /// The number of instances we are to draw. pub instance_count: u32, /// The offset of the first vertex for this mesh in the vertex buffer slab. pub base_vertex: u32, /// The index of the first mesh instance in the `Mesh` buffer. pub first_instance: u32, } /// A structure, initialized on CPU and read on GPU, that contains metadata /// about each batch. /// /// Each batch will have one instance of this structure. #[derive(Clone, Copy, Default, Pod, Zeroable, ShaderType)] #[repr(C)] pub struct IndirectParametersCpuMetadata { /// The index of the first instance of this mesh in the array of /// `MeshUniform`s. /// /// Note that this is the *first* output index in this batch. Since each /// instance of this structure refers to arbitrarily many instances, the /// `MeshUniform`s corresponding to this batch span the indices /// `base_output_index..(base_output_index + instance_count)`. pub base_output_index: u32, /// The index of the batch set that this batch belongs to in the /// [`IndirectBatchSet`] buffer. /// /// A *batch set* is a set of meshes that may be multi-drawn together. /// Multiple batches (and therefore multiple instances of /// [`IndirectParametersGpuMetadata`] structures) can be part of the same /// batch set. pub batch_set_index: u32, } /// A structure, written and read GPU, that records how many instances of each /// mesh are actually to be drawn. /// /// The GPU mesh preprocessing shader increments the /// [`Self::early_instance_count`] and [`Self::late_instance_count`] as it /// determines that meshes are visible. The indirect parameter building shader /// reads this metadata in order to construct the indirect draw parameters. /// /// Each batch will have one instance of this structure. #[derive(Clone, Copy, Default, Pod, Zeroable, ShaderType)] #[repr(C)] pub struct IndirectParametersGpuMetadata { /// The index of the first mesh in this batch in the array of /// `MeshInputUniform`s. pub mesh_index: u32, /// The number of instances that were judged visible last frame. /// /// The CPU sets this value to 0, and the GPU mesh preprocessing shader /// increments it as it culls mesh instances. pub early_instance_count: u32, /// The number of instances that have been judged potentially visible this /// frame that weren't in the last frame's potentially visible set. /// /// The CPU sets this value to 0, and the GPU mesh preprocessing shader /// increments it as it culls mesh instances. pub late_instance_count: u32, } /// A structure, shared between CPU and GPU, that holds the number of on-GPU /// indirect draw commands for each *batch set*. /// /// A *batch set* is a set of meshes that may be multi-drawn together. /// /// If the current hardware and driver support `multi_draw_indirect_count`, the /// indirect parameters building shader increments /// [`Self::indirect_parameters_count`] as it generates indirect parameters. The /// `multi_draw_indirect_count` command reads /// [`Self::indirect_parameters_count`] in order to determine how many commands /// belong to each batch set. #[derive(Clone, Copy, Default, Pod, Zeroable, ShaderType)] #[repr(C)] pub struct IndirectBatchSet { /// The number of indirect parameter commands (i.e. batches) in this batch /// set. /// /// The CPU sets this value to 0 before uploading this structure to GPU. The /// indirect parameters building shader increments this value as it creates /// indirect parameters. Then the `multi_draw_indirect_count` command reads /// this value in order to determine how many indirect draw commands to /// process. pub indirect_parameters_count: u32, /// The offset within the `IndirectParametersBuffers::indexed_data` or /// `IndirectParametersBuffers::non_indexed_data` of the first indirect draw /// command for this batch set. /// /// The CPU fills out this value. pub indirect_parameters_base: u32, } /// The buffers containing all the information that indirect draw commands /// (`multi_draw_indirect`, `multi_draw_indirect_count`) use to draw the scene. /// /// In addition to the indirect draw buffers themselves, this structure contains /// the buffers that store [`IndirectParametersGpuMetadata`], which are the /// structures that culling writes to so that the indirect parameter building /// pass can determine how many meshes are actually to be drawn. /// /// These buffers will remain empty if indirect drawing isn't in use. #[derive(Resource, Deref, DerefMut)] pub struct IndirectParametersBuffers { /// A mapping from a phase type ID to the indirect parameters buffers for /// that phase. /// /// Examples of phase type IDs are `Opaque3d` and `AlphaMask3d`. #[deref] pub buffers: TypeIdMap<UntypedPhaseIndirectParametersBuffers>, /// If true, this sets the `COPY_SRC` flag on indirect draw parameters so /// that they can be read back to CPU. /// /// This is a debugging feature that may reduce performance. It primarily /// exists for the `occlusion_culling` example. pub allow_copies_from_indirect_parameter_buffers: bool, } impl IndirectParametersBuffers { /// Initializes a new [`IndirectParametersBuffers`] resource. pub fn new(allow_copies_from_indirect_parameter_buffers: bool) -> IndirectParametersBuffers { IndirectParametersBuffers { buffers: TypeIdMap::default(), allow_copies_from_indirect_parameter_buffers, } } } /// The buffers containing all the information that indirect draw commands use /// to draw the scene, for a single phase. /// /// This is the version of the structure that has a type parameter, so that the /// batching for different phases can run in parallel. /// /// See the [`IndirectParametersBuffers`] documentation for more information. #[derive(Resource)] pub struct PhaseIndirectParametersBuffers<PI> where PI: PhaseItem, { /// The indirect draw buffers for the phase. pub buffers: UntypedPhaseIndirectParametersBuffers, phantom: PhantomData<PI>, } impl<PI> PhaseIndirectParametersBuffers<PI> where PI: PhaseItem, { pub fn new(allow_copies_from_indirect_parameter_buffers: bool) -> Self { PhaseIndirectParametersBuffers { buffers: UntypedPhaseIndirectParametersBuffers::new( allow_copies_from_indirect_parameter_buffers, ), phantom: PhantomData, } } } /// The buffers containing all the information that indirect draw commands use /// to draw the scene, for a single phase. /// /// This is the version of the structure that doesn't have a type parameter, so /// that it can be inserted into [`IndirectParametersBuffers::buffers`] /// /// See the [`IndirectParametersBuffers`] documentation for more information. pub struct UntypedPhaseIndirectParametersBuffers { /// Information that indirect draw commands use to draw indexed meshes in /// the scene. pub indexed: MeshClassIndirectParametersBuffers<IndirectParametersIndexed>, /// Information that indirect draw commands use to draw non-indexed meshes /// in the scene. pub non_indexed: MeshClassIndirectParametersBuffers<IndirectParametersNonIndexed>, } impl UntypedPhaseIndirectParametersBuffers { /// Creates the indirect parameters buffers. pub fn new( allow_copies_from_indirect_parameter_buffers: bool,
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
true
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/batching/mod.rs
crates/bevy_render/src/batching/mod.rs
use bevy_ecs::{ component::Component, entity::Entity, system::{ResMut, SystemParam, SystemParamItem}, }; use bytemuck::Pod; use gpu_preprocessing::UntypedPhaseIndirectParametersBuffers; use nonmax::NonMaxU32; use crate::{ render_phase::{ BinnedPhaseItem, CachedRenderPipelinePhaseItem, DrawFunctionId, PhaseItemExtraIndex, SortedPhaseItem, SortedRenderPhase, ViewBinnedRenderPhases, }, render_resource::{CachedRenderPipelineId, GpuArrayBufferable}, sync_world::MainEntity, }; pub mod gpu_preprocessing; pub mod no_gpu_preprocessing; /// Add this component to mesh entities to disable automatic batching #[derive(Component, Default, Clone, Copy)] pub struct NoAutomaticBatching; /// Data necessary to be equal for two draw commands to be mergeable /// /// This is based on the following assumptions: /// - Only entities with prepared assets (pipelines, materials, meshes) are /// queued to phases /// - View bindings are constant across a phase for a given draw function as /// phases are per-view /// - `batch_and_prepare_render_phase` is the only system that performs this /// batching and has sole responsibility for preparing the per-object data. /// As such the mesh binding and dynamic offsets are assumed to only be /// variable as a result of the `batch_and_prepare_render_phase` system, e.g. /// due to having to split data across separate uniform bindings within the /// same buffer due to the maximum uniform buffer binding size. #[derive(PartialEq)] struct BatchMeta<T: PartialEq> { /// The pipeline id encompasses all pipeline configuration including vertex /// buffers and layouts, shaders and their specializations, bind group /// layouts, etc. pipeline_id: CachedRenderPipelineId, /// The draw function id defines the `RenderCommands` that are called to /// set the pipeline and bindings, and make the draw command draw_function_id: DrawFunctionId, dynamic_offset: Option<NonMaxU32>, user_data: T, } impl<T: PartialEq> BatchMeta<T> { fn new(item: &impl CachedRenderPipelinePhaseItem, user_data: T) -> Self { BatchMeta { pipeline_id: item.cached_pipeline(), draw_function_id: item.draw_function(), dynamic_offset: match item.extra_index() { PhaseItemExtraIndex::DynamicOffset(dynamic_offset) => { NonMaxU32::new(dynamic_offset) } PhaseItemExtraIndex::None | PhaseItemExtraIndex::IndirectParametersIndex { .. } => { None } }, user_data, } } } /// A trait to support getting data used for batching draw commands via phase /// items. /// /// This is a simple version that only allows for sorting, not binning, as well /// as only CPU processing, not GPU preprocessing. For these fancier features, /// see [`GetFullBatchData`]. pub trait GetBatchData { /// The system parameters [`GetBatchData::get_batch_data`] needs in /// order to compute the batch data. type Param: SystemParam + 'static; /// Data used for comparison between phase items. If the pipeline id, draw /// function id, per-instance data buffer dynamic offset and this data /// matches, the draws can be batched. type CompareData: PartialEq; /// The per-instance data to be inserted into the /// [`crate::render_resource::GpuArrayBuffer`] containing these data for all /// instances. type BufferData: GpuArrayBufferable + Sync + Send + 'static; /// Get the per-instance data to be inserted into the /// [`crate::render_resource::GpuArrayBuffer`]. If the instance can be /// batched, also return the data used for comparison when deciding whether /// draws can be batched, else return None for the `CompareData`. /// /// This is only called when building instance data on CPU. In the GPU /// instance data building path, we use /// [`GetFullBatchData::get_index_and_compare_data`] instead. fn get_batch_data( param: &SystemParamItem<Self::Param>, query_item: (Entity, MainEntity), ) -> Option<(Self::BufferData, Option<Self::CompareData>)>; } /// A trait to support getting data used for batching draw commands via phase /// items. /// /// This version allows for binning and GPU preprocessing. pub trait GetFullBatchData: GetBatchData { /// The per-instance data that was inserted into the /// [`crate::render_resource::BufferVec`] during extraction. type BufferInputData: Pod + Default + Sync + Send; /// Get the per-instance data to be inserted into the /// [`crate::render_resource::GpuArrayBuffer`]. /// /// This is only called when building uniforms on CPU. In the GPU instance /// buffer building path, we use /// [`GetFullBatchData::get_index_and_compare_data`] instead. fn get_binned_batch_data( param: &SystemParamItem<Self::Param>, query_item: MainEntity, ) -> Option<Self::BufferData>; /// Returns the index of the [`GetFullBatchData::BufferInputData`] that the /// GPU preprocessing phase will use. /// /// We already inserted the [`GetFullBatchData::BufferInputData`] during the /// extraction phase before we got here, so this function shouldn't need to /// look up any render data. If CPU instance buffer building is in use, this /// function will never be called. fn get_index_and_compare_data( param: &SystemParamItem<Self::Param>, query_item: MainEntity, ) -> Option<(NonMaxU32, Option<Self::CompareData>)>; /// Returns the index of the [`GetFullBatchData::BufferInputData`] that the /// GPU preprocessing phase will use. /// /// We already inserted the [`GetFullBatchData::BufferInputData`] during the /// extraction phase before we got here, so this function shouldn't need to /// look up any render data. /// /// This function is currently only called for unbatchable entities when GPU /// instance buffer building is in use. For batchable entities, the uniform /// index is written during queuing (e.g. in `queue_material_meshes`). In /// the case of CPU instance buffer building, the CPU writes the uniforms, /// so there's no index to return. fn get_binned_index( param: &SystemParamItem<Self::Param>, query_item: MainEntity, ) -> Option<NonMaxU32>; /// Writes the [`gpu_preprocessing::IndirectParametersGpuMetadata`] /// necessary to draw this batch into the given metadata buffer at the given /// index. /// /// This is only used if GPU culling is enabled (which requires GPU /// preprocessing). /// /// * `indexed` is true if the mesh is indexed or false if it's non-indexed. /// /// * `base_output_index` is the index of the first mesh instance in this /// batch in the `MeshUniform` output buffer. /// /// * `batch_set_index` is the index of the batch set in the /// [`gpu_preprocessing::IndirectBatchSet`] buffer, if this batch belongs to /// a batch set. /// /// * `indirect_parameters_buffers` is the buffer in which to write the /// metadata. /// /// * `indirect_parameters_offset` is the index in that buffer at which to /// write the metadata. fn write_batch_indirect_parameters_metadata( indexed: bool, base_output_index: u32, batch_set_index: Option<NonMaxU32>, indirect_parameters_buffers: &mut UntypedPhaseIndirectParametersBuffers, indirect_parameters_offset: u32, ); } /// Sorts a render phase that uses bins. pub fn sort_binned_render_phase<BPI>(mut phases: ResMut<ViewBinnedRenderPhases<BPI>>) where BPI: BinnedPhaseItem, { for phase in phases.values_mut() { phase.multidrawable_meshes.sort_unstable_keys(); phase.batchable_meshes.sort_unstable_keys(); phase.unbatchable_meshes.sort_unstable_keys(); phase.non_mesh_items.sort_unstable_keys(); } } /// Batches the items in a sorted render phase. /// /// This means comparing metadata needed to draw each phase item and trying to /// combine the draws into a batch. /// /// This is common code factored out from /// [`gpu_preprocessing::batch_and_prepare_sorted_render_phase`] and /// [`no_gpu_preprocessing::batch_and_prepare_sorted_render_phase`]. fn batch_and_prepare_sorted_render_phase<I, GBD>( phase: &mut SortedRenderPhase<I>, mut process_item: impl FnMut(&mut I) -> Option<GBD::CompareData>, ) where I: CachedRenderPipelinePhaseItem + SortedPhaseItem, GBD: GetBatchData, { let items = phase.items.iter_mut().map(|item| { let batch_data = match process_item(item) { Some(compare_data) if I::AUTOMATIC_BATCHING => Some(BatchMeta::new(item, compare_data)), _ => None, }; (item.batch_range_mut(), batch_data) }); items.reduce(|(start_range, prev_batch_meta), (range, batch_meta)| { if batch_meta.is_some() && prev_batch_meta == batch_meta { start_range.end = range.end; (start_range, prev_batch_meta) } else { (range, batch_meta) } }); }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/batching/no_gpu_preprocessing.rs
crates/bevy_render/src/batching/no_gpu_preprocessing.rs
//! Batching functionality when GPU preprocessing isn't in use. use bevy_derive::{Deref, DerefMut}; use bevy_ecs::entity::Entity; use bevy_ecs::resource::Resource; use bevy_ecs::system::{Res, ResMut, StaticSystemParam}; use smallvec::{smallvec, SmallVec}; use tracing::error; use wgpu::{BindingResource, Limits}; use crate::{ render_phase::{ BinnedPhaseItem, BinnedRenderPhaseBatch, BinnedRenderPhaseBatchSets, CachedRenderPipelinePhaseItem, PhaseItemExtraIndex, SortedPhaseItem, ViewBinnedRenderPhases, ViewSortedRenderPhases, }, render_resource::{GpuArrayBuffer, GpuArrayBufferable}, renderer::{RenderDevice, RenderQueue}, }; use super::{GetBatchData, GetFullBatchData}; /// The GPU buffers holding the data needed to render batches. /// /// For example, in the 3D PBR pipeline this holds `MeshUniform`s, which are the /// `BD` type parameter in that mode. #[derive(Resource, Deref, DerefMut)] pub struct BatchedInstanceBuffer<BD>(pub GpuArrayBuffer<BD>) where BD: GpuArrayBufferable + Sync + Send + 'static; impl<BD> BatchedInstanceBuffer<BD> where BD: GpuArrayBufferable + Sync + Send + 'static, { /// Creates a new buffer. pub fn new(limits: &Limits) -> Self { BatchedInstanceBuffer(GpuArrayBuffer::new(limits)) } /// Returns the binding of the buffer that contains the per-instance data. /// /// If we're in the GPU instance buffer building mode, this buffer needs to /// be filled in via a compute shader. pub fn instance_data_binding(&self) -> Option<BindingResource<'_>> { self.binding() } } /// A system that clears out the [`BatchedInstanceBuffer`] for the frame. /// /// This needs to run before the CPU batched instance buffers are used. pub fn clear_batched_cpu_instance_buffers<GBD>( cpu_batched_instance_buffer: Option<ResMut<BatchedInstanceBuffer<GBD::BufferData>>>, ) where GBD: GetBatchData, { if let Some(mut cpu_batched_instance_buffer) = cpu_batched_instance_buffer { cpu_batched_instance_buffer.clear(); } } /// Batch the items in a sorted render phase, when GPU instance buffer building /// isn't in use. This means comparing metadata needed to draw each phase item /// and trying to combine the draws into a batch. pub fn batch_and_prepare_sorted_render_phase<I, GBD>( batched_instance_buffer: ResMut<BatchedInstanceBuffer<GBD::BufferData>>, mut phases: ResMut<ViewSortedRenderPhases<I>>, param: StaticSystemParam<GBD::Param>, ) where I: CachedRenderPipelinePhaseItem + SortedPhaseItem, GBD: GetBatchData, { let system_param_item = param.into_inner(); // We only process CPU-built batch data in this function. let batched_instance_buffer = batched_instance_buffer.into_inner(); for phase in phases.values_mut() { super::batch_and_prepare_sorted_render_phase::<I, GBD>(phase, |item| { let (buffer_data, compare_data) = GBD::get_batch_data(&system_param_item, (item.entity(), item.main_entity()))?; let buffer_index = batched_instance_buffer.push(buffer_data); let index = buffer_index.index; let (batch_range, extra_index) = item.batch_range_and_extra_index_mut(); *batch_range = index..index + 1; *extra_index = PhaseItemExtraIndex::maybe_dynamic_offset(buffer_index.dynamic_offset); compare_data }); } } /// Creates batches for a render phase that uses bins, when GPU batch data /// building isn't in use. pub fn batch_and_prepare_binned_render_phase<BPI, GFBD>( gpu_array_buffer: ResMut<BatchedInstanceBuffer<GFBD::BufferData>>, mut phases: ResMut<ViewBinnedRenderPhases<BPI>>, param: StaticSystemParam<GFBD::Param>, ) where BPI: BinnedPhaseItem, GFBD: GetFullBatchData, { let gpu_array_buffer = gpu_array_buffer.into_inner(); let system_param_item = param.into_inner(); for phase in phases.values_mut() { // Prepare batchables. for bin in phase.batchable_meshes.values_mut() { let mut batch_set: SmallVec<[BinnedRenderPhaseBatch; 1]> = smallvec![]; for main_entity in bin.entities().keys() { let Some(buffer_data) = GFBD::get_binned_batch_data(&system_param_item, *main_entity) else { continue; }; let instance = gpu_array_buffer.push(buffer_data); // If the dynamic offset has changed, flush the batch. // // This is the only time we ever have more than one batch per // bin. Note that dynamic offsets are only used on platforms // with no storage buffers. if !batch_set.last().is_some_and(|batch| { batch.instance_range.end == instance.index && batch.extra_index == PhaseItemExtraIndex::maybe_dynamic_offset(instance.dynamic_offset) }) { batch_set.push(BinnedRenderPhaseBatch { representative_entity: (Entity::PLACEHOLDER, *main_entity), instance_range: instance.index..instance.index, extra_index: PhaseItemExtraIndex::maybe_dynamic_offset( instance.dynamic_offset, ), }); } if let Some(batch) = batch_set.last_mut() { batch.instance_range.end = instance.index + 1; } } match phase.batch_sets { BinnedRenderPhaseBatchSets::DynamicUniforms(ref mut batch_sets) => { batch_sets.push(batch_set); } BinnedRenderPhaseBatchSets::Direct(_) | BinnedRenderPhaseBatchSets::MultidrawIndirect { .. } => { error!( "Dynamic uniform batch sets should be used when GPU preprocessing is off" ); } } } // Prepare unbatchables. for unbatchables in phase.unbatchable_meshes.values_mut() { for main_entity in unbatchables.entities.keys() { let Some(buffer_data) = GFBD::get_binned_batch_data(&system_param_item, *main_entity) else { continue; }; let instance = gpu_array_buffer.push(buffer_data); unbatchables.buffer_indices.add(instance.into()); } } } } /// Writes the instance buffer data to the GPU. pub fn write_batched_instance_buffer<GBD>( render_device: Res<RenderDevice>, render_queue: Res<RenderQueue>, mut cpu_batched_instance_buffer: ResMut<BatchedInstanceBuffer<GBD::BufferData>>, ) where GBD: GetBatchData, { cpu_batched_instance_buffer.write_buffer(&render_device, &render_queue); }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/experimental/mod.rs
crates/bevy_render/src/experimental/mod.rs
//! Experimental rendering features. //! //! Experimental features are features with known problems, but are included //! nonetheless for testing purposes. pub mod occlusion_culling;
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/experimental/occlusion_culling/mod.rs
crates/bevy_render/src/experimental/occlusion_culling/mod.rs
//! GPU occlusion culling. //! //! See [`OcclusionCulling`] for a detailed description of occlusion culling in //! Bevy. use bevy_app::{App, Plugin}; use bevy_ecs::{component::Component, entity::Entity, prelude::ReflectComponent}; use bevy_reflect::{prelude::ReflectDefault, Reflect}; use bevy_shader::load_shader_library; use crate::{extract_component::ExtractComponent, render_resource::TextureView}; /// Enables GPU occlusion culling. /// /// See [`OcclusionCulling`] for a detailed description of occlusion culling in /// Bevy. pub struct OcclusionCullingPlugin; impl Plugin for OcclusionCullingPlugin { fn build(&self, app: &mut App) { load_shader_library!(app, "mesh_preprocess_types.wgsl"); } } /// Add this component to a view in order to enable experimental GPU occlusion /// culling. /// /// *Bevy's occlusion culling is currently marked as experimental.* There are /// known issues whereby, in rare circumstances, occlusion culling can result in /// meshes being culled that shouldn't be (i.e. meshes that turn invisible). /// Please try it out and report issues. /// /// *Occlusion culling* allows Bevy to avoid rendering objects that are fully /// behind other opaque or alpha tested objects. This is different from, and /// complements, depth fragment rejection as the `DepthPrepass` enables. While /// depth rejection allows Bevy to avoid rendering *pixels* that are behind /// other objects, the GPU still has to examine those pixels to reject them, /// which requires transforming the vertices of the objects and performing /// skinning if the objects were skinned. Occlusion culling allows the GPU to go /// a step further, avoiding even transforming the vertices of objects that it /// can quickly prove to be behind other objects. /// /// Occlusion culling inherently has some overhead, because Bevy must examine /// the objects' bounding boxes, and create an acceleration structure /// (hierarchical Z-buffer) to perform the occlusion tests. Therefore, occlusion /// culling is disabled by default. Only enable it if you measure it to be a /// speedup on your scene. Note that, because Bevy's occlusion culling runs on /// the GPU and is quite efficient, it's rare for occlusion culling to result in /// a significant slowdown. /// /// Occlusion culling currently requires a `DepthPrepass`. If no depth prepass /// is present on the view, the [`OcclusionCulling`] component will be ignored. /// Additionally, occlusion culling is currently incompatible with deferred /// shading; including both `DeferredPrepass` and [`OcclusionCulling`] results /// in unspecified behavior. /// /// The algorithm that Bevy uses is known as [*two-phase occlusion culling*]. /// When you enable occlusion culling, Bevy splits the depth prepass into two: /// an *early* depth prepass and a *late* depth prepass. The early depth prepass /// renders all the meshes that were visible last frame to produce a /// conservative approximation of the depth buffer. Then, after producing an /// acceleration structure known as a hierarchical Z-buffer or depth pyramid, /// Bevy tests the bounding boxes of all meshes against that depth buffer. Those /// that can be quickly proven to be behind the geometry rendered during the /// early depth prepass are skipped entirely. The other potentially-visible /// meshes are rendered during the late prepass, and finally all the visible /// meshes are rendered as usual during the opaque, transparent, etc. passes. /// /// Unlike other occlusion culling systems you may be familiar with, Bevy's /// occlusion culling is fully dynamic and requires no baking step. The CPU /// overhead is minimal. Large skinned meshes and other dynamic objects can /// occlude other objects. /// /// [*two-phase occlusion culling*]: /// https://medium.com/@mil_kru/two-pass-occlusion-culling-4100edcad501 #[derive(Component, ExtractComponent, Clone, Copy, Default, Reflect)] #[reflect(Component, Default, Clone)] pub struct OcclusionCulling; /// A render-world component that contains resources necessary to perform /// occlusion culling on any view other than a camera. /// /// Bevy automatically places this component on views created for shadow /// mapping. You don't ordinarily need to add this component yourself. #[derive(Clone, Component)] pub struct OcclusionCullingSubview { /// A texture view of the Z-buffer. pub depth_texture_view: TextureView, /// The size of the texture along both dimensions. /// /// Because [`OcclusionCullingSubview`] is only currently used for shadow /// maps, they're guaranteed to have sizes equal to a power of two, so we /// don't have to store the two dimensions individually here. pub depth_texture_size: u32, } /// A render-world component placed on each camera that stores references to all /// entities other than cameras that need occlusion culling. /// /// Bevy automatically places this component on cameras that are drawing /// shadows, when those shadows come from lights with occlusion culling enabled. /// You don't ordinarily need to add this component yourself. #[derive(Clone, Component)] pub struct OcclusionCullingSubviewEntities(pub Vec<Entity>);
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/mesh/allocator.rs
crates/bevy_render/src/mesh/allocator.rs
//! Manages mesh vertex and index buffers. use alloc::vec::Vec; use bevy_mesh::Indices; use core::{ fmt::{self, Display, Formatter}, ops::Range, }; use nonmax::NonMaxU32; use bevy_app::{App, Plugin}; use bevy_asset::AssetId; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ resource::Resource, schedule::IntoScheduleConfigs as _, system::{Res, ResMut}, world::{FromWorld, World}, }; use bevy_platform::collections::{hash_map::Entry, HashMap, HashSet}; use bevy_utils::default; use offset_allocator::{Allocation, Allocator}; use tracing::error; use wgpu::{ BufferDescriptor, BufferSize, BufferUsages, CommandEncoderDescriptor, DownlevelFlags, COPY_BUFFER_ALIGNMENT, }; use crate::{ mesh::{Mesh, MeshVertexBufferLayouts, RenderMesh}, render_asset::{prepare_assets, ExtractedAssets}, render_resource::Buffer, renderer::{RenderAdapter, RenderDevice, RenderQueue}, Render, RenderApp, RenderSystems, }; /// A plugin that manages GPU memory for mesh data. pub struct MeshAllocatorPlugin; /// Manages the assignment of mesh data to GPU buffers. /// /// The Bevy renderer tries to pack vertex and index data for multiple meshes /// together so that multiple meshes can be drawn back-to-back without any /// rebinding. This resource manages these buffers. /// /// Within each slab, or hardware buffer, the underlying allocation algorithm is /// [`offset_allocator`], a Rust port of Sebastian Aaltonen's hard-real-time C++ /// `OffsetAllocator`. Slabs start small and then grow as their contents fill /// up, up to a maximum size limit. To reduce fragmentation, vertex and index /// buffers that are too large bypass this system and receive their own buffers. /// /// The [`MeshAllocatorSettings`] allows you to tune the behavior of the /// allocator for better performance with your application. Most applications /// won't need to change the settings from their default values. #[derive(Resource)] pub struct MeshAllocator { /// Holds all buffers and allocators. slabs: HashMap<SlabId, Slab>, /// Maps a layout to the slabs that hold elements of that layout. /// /// This is used when allocating, so that we can find the appropriate slab /// to place an object in. slab_layouts: HashMap<ElementLayout, Vec<SlabId>>, /// Maps mesh asset IDs to the ID of the slabs that hold their vertex data. mesh_id_to_vertex_slab: HashMap<AssetId<Mesh>, SlabId>, /// Maps mesh asset IDs to the ID of the slabs that hold their index data. mesh_id_to_index_slab: HashMap<AssetId<Mesh>, SlabId>, /// The next slab ID to assign. next_slab_id: SlabId, /// Whether we can pack multiple vertex arrays into a single slab on this /// platform. /// /// This corresponds to [`DownlevelFlags::BASE_VERTEX`], which is unset on /// WebGL 2. On this platform, we must give each vertex array its own /// buffer, because we can't adjust the first vertex when we perform a draw. general_vertex_slabs_supported: bool, /// Additional buffer usages to add to any vertex or index buffers created. pub extra_buffer_usages: BufferUsages, } /// Tunable parameters that customize the behavior of the allocator. /// /// Generally, these parameters adjust the tradeoff between memory fragmentation /// and performance. You can adjust them as desired for your application. Most /// applications can stick with the default values. #[derive(Resource)] pub struct MeshAllocatorSettings { /// The minimum size of a slab (hardware buffer), in bytes. /// /// The default value is 1 MiB. pub min_slab_size: u64, /// The maximum size of a slab (hardware buffer), in bytes. /// /// When a slab reaches this limit, a new slab is created. /// /// The default value is 512 MiB. pub max_slab_size: u64, /// The maximum size of vertex or index data that can be placed in a general /// slab, in bytes. /// /// If a mesh has vertex or index data that exceeds this size limit, that /// data is placed in its own slab. This reduces fragmentation, but incurs /// more CPU-side binding overhead when drawing the mesh. /// /// The default value is 256 MiB. pub large_threshold: u64, /// The factor by which we scale a slab when growing it. /// /// This value must be greater than 1. Higher values result in more /// fragmentation but fewer expensive copy operations when growing the /// buffer. /// /// The default value is 1.5. pub growth_factor: f64, } impl Default for MeshAllocatorSettings { fn default() -> Self { Self { // 1 MiB min_slab_size: 1024 * 1024, // 512 MiB max_slab_size: 1024 * 1024 * 512, // 256 MiB large_threshold: 1024 * 1024 * 256, // 1.5× growth growth_factor: 1.5, } } } /// The hardware buffer that mesh data lives in, as well as the range within /// that buffer. pub struct MeshBufferSlice<'a> { /// The buffer that the mesh data resides in. pub buffer: &'a Buffer, /// The range of elements within this buffer that the mesh data resides in, /// measured in elements. /// /// This is not a byte range; it's an element range. For vertex data, this /// is measured in increments of a single vertex. (Thus, if a vertex is 32 /// bytes long, then this range is in units of 32 bytes each.) For index /// data, this is measured in increments of a single index value (2 or 4 /// bytes). Draw commands generally take their ranges in elements, not /// bytes, so this is the most convenient unit in this case. pub range: Range<u32>, } /// The index of a single slab. #[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] #[repr(transparent)] pub struct SlabId(pub NonMaxU32); /// Data for a single slab. #[expect( clippy::large_enum_variant, reason = "See https://github.com/bevyengine/bevy/issues/19220" )] enum Slab { /// A slab that can contain multiple objects. General(GeneralSlab), /// A slab that contains a single object. LargeObject(LargeObjectSlab), } impl Slab { pub fn buffer_size(&self) -> u64 { match self { Self::General(gs) => gs.buffer.as_ref().map(|buffer| buffer.size()).unwrap_or(0), Self::LargeObject(lo) => lo.buffer.as_ref().map(|buffer| buffer.size()).unwrap_or(0), } } } /// A resizable slab that can contain multiple objects. /// /// This is the normal type of slab used for objects that are below the /// [`MeshAllocatorSettings::large_threshold`]. Slabs are divided into *slots*, /// which are described in detail in the [`ElementLayout`] documentation. struct GeneralSlab { /// The [`Allocator`] that manages the objects in this slab. allocator: Allocator, /// The GPU buffer that backs this slab. /// /// This may be `None` if the buffer hasn't been created yet. We delay /// creation of buffers until allocating all the meshes for a single frame, /// so that we don't needlessly create and resize buffers when many meshes /// load all at once. buffer: Option<Buffer>, /// Allocations that are on the GPU. /// /// The range is in slots. resident_allocations: HashMap<AssetId<Mesh>, SlabAllocation>, /// Allocations that are waiting to be uploaded to the GPU. /// /// The range is in slots. pending_allocations: HashMap<AssetId<Mesh>, SlabAllocation>, /// The layout of a single element (vertex or index). element_layout: ElementLayout, /// The size of this slab in slots. current_slot_capacity: u32, } /// A slab that contains a single object. /// /// Typically, this is for objects that exceed the /// [`MeshAllocatorSettings::large_threshold`]. This is also for objects that /// would ordinarily receive their own slab but can't because of platform /// limitations, most notably vertex arrays on WebGL 2. struct LargeObjectSlab { /// The GPU buffer that backs this slab. /// /// This may be `None` if the buffer hasn't been created yet. buffer: Option<Buffer>, /// The layout of a single element (vertex or index). element_layout: ElementLayout, } /// The type of element that a slab can store. #[derive(Clone, Copy, PartialEq, Eq, Hash)] enum ElementClass { /// Data for a vertex. Vertex, /// A vertex index. Index, } /// The results of [`GeneralSlab::grow_if_necessary`]. enum SlabGrowthResult { /// The mesh data already fits in the slab; the slab doesn't need to grow. NoGrowthNeeded, /// The slab needed to grow. /// /// The [`SlabToReallocate`] contains the old capacity of the slab. NeededGrowth(SlabToReallocate), /// The slab wanted to grow but couldn't because it hit its maximum size. CantGrow, } /// Information about the size of individual elements (vertices or indices) /// within a slab. /// /// Slab objects are allocated in units of *slots*. Usually, each element takes /// up one slot, and so elements and slots are equivalent. Occasionally, /// however, a slot may consist of 2 or even 4 elements. This occurs when the /// size of an element isn't divisible by [`COPY_BUFFER_ALIGNMENT`]. When we /// resize buffers, we perform GPU-to-GPU copies to shuffle the existing /// elements into their new positions, and such copies must be on /// [`COPY_BUFFER_ALIGNMENT`] boundaries. Slots solve this problem by /// guaranteeing that the size of an allocation quantum is divisible by both the /// size of an element and [`COPY_BUFFER_ALIGNMENT`], so we can relocate it /// freely. #[derive(Clone, Copy, PartialEq, Eq, Hash)] struct ElementLayout { /// Either a vertex or an index. class: ElementClass, /// The size in bytes of a single element (vertex or index). size: u64, /// The number of elements that make up a single slot. /// /// Usually, this is 1, but it can be different if [`ElementLayout::size`] /// isn't divisible by 4. See the comment in [`ElementLayout`] for more /// details. elements_per_slot: u32, } /// The location of an allocation and the slab it's contained in. struct MeshAllocation { /// The ID of the slab. slab_id: SlabId, /// Holds the actual allocation. slab_allocation: SlabAllocation, } /// An allocation within a slab. #[derive(Clone)] struct SlabAllocation { /// The actual [`Allocator`] handle, needed to free the allocation. allocation: Allocation, /// The number of slots that this allocation takes up. slot_count: u32, } /// Holds information about all slabs scheduled to be allocated or reallocated. #[derive(Default, Deref, DerefMut)] struct SlabsToReallocate(HashMap<SlabId, SlabToReallocate>); /// Holds information about a slab that's scheduled to be allocated or /// reallocated. #[derive(Default)] struct SlabToReallocate { /// The capacity of the slab before we decided to grow it. old_slot_capacity: u32, } impl Display for SlabId { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } impl Plugin for MeshAllocatorPlugin { fn build(&self, app: &mut App) { let Some(render_app) = app.get_sub_app_mut(RenderApp) else { return; }; render_app .init_resource::<MeshAllocatorSettings>() .add_systems( Render, allocate_and_free_meshes .in_set(RenderSystems::PrepareAssets) .before(prepare_assets::<RenderMesh>), ); } fn finish(&self, app: &mut App) { let Some(render_app) = app.get_sub_app_mut(RenderApp) else { return; }; // The `RenderAdapter` isn't available until now, so we can't do this in // [`Plugin::build`]. render_app.init_resource::<MeshAllocator>(); } } impl FromWorld for MeshAllocator { fn from_world(world: &mut World) -> Self { // Note whether we're on WebGL 2. In this case, we must give every // vertex array its own slab. let render_adapter = world.resource::<RenderAdapter>(); let general_vertex_slabs_supported = render_adapter .get_downlevel_capabilities() .flags .contains(DownlevelFlags::BASE_VERTEX); Self { slabs: HashMap::default(), slab_layouts: HashMap::default(), mesh_id_to_vertex_slab: HashMap::default(), mesh_id_to_index_slab: HashMap::default(), next_slab_id: default(), general_vertex_slabs_supported, extra_buffer_usages: BufferUsages::empty(), } } } /// A system that processes newly-extracted or newly-removed meshes and writes /// their data into buffers or frees their data as appropriate. pub fn allocate_and_free_meshes( mut mesh_allocator: ResMut<MeshAllocator>, mesh_allocator_settings: Res<MeshAllocatorSettings>, extracted_meshes: Res<ExtractedAssets<RenderMesh>>, mut mesh_vertex_buffer_layouts: ResMut<MeshVertexBufferLayouts>, render_device: Res<RenderDevice>, render_queue: Res<RenderQueue>, ) { // Process removed or modified meshes. mesh_allocator.free_meshes(&extracted_meshes); // Process newly-added or modified meshes. mesh_allocator.allocate_meshes( &mesh_allocator_settings, &extracted_meshes, &mut mesh_vertex_buffer_layouts, &render_device, &render_queue, ); } impl MeshAllocator { /// Returns the buffer and range within that buffer of the vertex data for /// the mesh with the given ID. /// /// If the mesh wasn't allocated, returns None. pub fn mesh_vertex_slice(&self, mesh_id: &AssetId<Mesh>) -> Option<MeshBufferSlice<'_>> { self.mesh_slice_in_slab(mesh_id, *self.mesh_id_to_vertex_slab.get(mesh_id)?) } /// Returns the buffer and range within that buffer of the index data for /// the mesh with the given ID. /// /// If the mesh has no index data or wasn't allocated, returns None. pub fn mesh_index_slice(&self, mesh_id: &AssetId<Mesh>) -> Option<MeshBufferSlice<'_>> { self.mesh_slice_in_slab(mesh_id, *self.mesh_id_to_index_slab.get(mesh_id)?) } /// Returns the IDs of the vertex buffer and index buffer respectively for /// the mesh with the given ID. /// /// If the mesh wasn't allocated, or has no index data in the case of the /// index buffer, the corresponding element in the returned tuple will be /// None. pub fn mesh_slabs(&self, mesh_id: &AssetId<Mesh>) -> (Option<SlabId>, Option<SlabId>) { ( self.mesh_id_to_vertex_slab.get(mesh_id).cloned(), self.mesh_id_to_index_slab.get(mesh_id).cloned(), ) } /// Get the number of allocated slabs pub fn slab_count(&self) -> usize { self.slabs.len() } /// Get the total size of all allocated slabs pub fn slabs_size(&self) -> u64 { self.slabs.iter().map(|slab| slab.1.buffer_size()).sum() } pub fn allocations(&self) -> usize { self.mesh_id_to_index_slab.len() } /// Given a slab and a mesh with data located with it, returns the buffer /// and range of that mesh data within the slab. fn mesh_slice_in_slab( &self, mesh_id: &AssetId<Mesh>, slab_id: SlabId, ) -> Option<MeshBufferSlice<'_>> { match self.slabs.get(&slab_id)? { Slab::General(general_slab) => { let slab_allocation = general_slab.resident_allocations.get(mesh_id)?; Some(MeshBufferSlice { buffer: general_slab.buffer.as_ref()?, range: (slab_allocation.allocation.offset * general_slab.element_layout.elements_per_slot) ..((slab_allocation.allocation.offset + slab_allocation.slot_count) * general_slab.element_layout.elements_per_slot), }) } Slab::LargeObject(large_object_slab) => { let buffer = large_object_slab.buffer.as_ref()?; Some(MeshBufferSlice { buffer, range: 0..((buffer.size() / large_object_slab.element_layout.size) as u32), }) } } } /// Processes newly-loaded meshes, allocating room in the slabs for their /// mesh data and performing upload operations as appropriate. fn allocate_meshes( &mut self, mesh_allocator_settings: &MeshAllocatorSettings, extracted_meshes: &ExtractedAssets<RenderMesh>, mesh_vertex_buffer_layouts: &mut MeshVertexBufferLayouts, render_device: &RenderDevice, render_queue: &RenderQueue, ) { let mut slabs_to_grow = SlabsToReallocate::default(); // Allocate. for (mesh_id, mesh) in &extracted_meshes.extracted { let vertex_buffer_size = mesh.get_vertex_buffer_size() as u64; if vertex_buffer_size == 0 { continue; } // Allocate vertex data. Note that we can only pack mesh vertex data // together if the platform supports it. let vertex_element_layout = ElementLayout::vertex(mesh_vertex_buffer_layouts, mesh); if self.general_vertex_slabs_supported { self.allocate( mesh_id, vertex_buffer_size, vertex_element_layout, &mut slabs_to_grow, mesh_allocator_settings, ); } else { self.allocate_large(mesh_id, vertex_element_layout); } // Allocate index data. if let (Some(index_buffer_data), Some(index_element_layout)) = (mesh.get_index_buffer_bytes(), ElementLayout::index(mesh)) { self.allocate( mesh_id, index_buffer_data.len() as u64, index_element_layout, &mut slabs_to_grow, mesh_allocator_settings, ); } } // Perform growth. for (slab_id, slab_to_grow) in slabs_to_grow.0 { self.reallocate_slab(render_device, render_queue, slab_id, slab_to_grow); } // Copy new mesh data in. for (mesh_id, mesh) in &extracted_meshes.extracted { self.copy_mesh_vertex_data(mesh_id, mesh, render_device, render_queue); self.copy_mesh_index_data(mesh_id, mesh, render_device, render_queue); } } /// Copies vertex array data from a mesh into the appropriate spot in the /// slab. fn copy_mesh_vertex_data( &mut self, mesh_id: &AssetId<Mesh>, mesh: &Mesh, render_device: &RenderDevice, render_queue: &RenderQueue, ) { let Some(&slab_id) = self.mesh_id_to_vertex_slab.get(mesh_id) else { return; }; // Call the generic function. self.copy_element_data( mesh_id, mesh.get_vertex_buffer_size(), |slice| mesh.write_packed_vertex_buffer_data(slice), BufferUsages::VERTEX, slab_id, render_device, render_queue, ); } /// Copies index array data from a mesh into the appropriate spot in the /// slab. fn copy_mesh_index_data( &mut self, mesh_id: &AssetId<Mesh>, mesh: &Mesh, render_device: &RenderDevice, render_queue: &RenderQueue, ) { let Some(&slab_id) = self.mesh_id_to_index_slab.get(mesh_id) else { return; }; let Some(index_data) = mesh.get_index_buffer_bytes() else { return; }; // Call the generic function. self.copy_element_data( mesh_id, index_data.len(), |slice| slice.copy_from_slice(index_data), BufferUsages::INDEX, slab_id, render_device, render_queue, ); } /// A generic function that copies either vertex or index data into a slab. fn copy_element_data( &mut self, mesh_id: &AssetId<Mesh>, len: usize, fill_data: impl Fn(&mut [u8]), buffer_usages: BufferUsages, slab_id: SlabId, render_device: &RenderDevice, render_queue: &RenderQueue, ) { let Some(slab) = self.slabs.get_mut(&slab_id) else { return; }; match *slab { Slab::General(ref mut general_slab) => { let (Some(buffer), Some(allocated_range)) = ( &general_slab.buffer, general_slab.pending_allocations.remove(mesh_id), ) else { return; }; let slot_size = general_slab.element_layout.slot_size(); // round up size to a multiple of the slot size to satisfy wgpu alignment requirements if let Some(size) = BufferSize::new((len as u64).next_multiple_of(slot_size)) { // Write the data in. if let Some(mut buffer) = render_queue.write_buffer_with( buffer, allocated_range.allocation.offset as u64 * slot_size, size, ) { let slice = &mut buffer.as_mut()[..len]; fill_data(slice); } } // Mark the allocation as resident. general_slab .resident_allocations .insert(*mesh_id, allocated_range); } Slab::LargeObject(ref mut large_object_slab) => { debug_assert!(large_object_slab.buffer.is_none()); // Create the buffer and its data in one go. let buffer = render_device.create_buffer(&BufferDescriptor { label: Some(&format!( "large mesh slab {} ({}buffer)", slab_id, buffer_usages_to_str(buffer_usages) )), size: len as u64, usage: buffer_usages | BufferUsages::COPY_DST | self.extra_buffer_usages, mapped_at_creation: true, }); { let slice = &mut buffer.slice(..).get_mapped_range_mut()[..len]; fill_data(slice); } buffer.unmap(); large_object_slab.buffer = Some(buffer); } } } /// Frees allocations for meshes that were removed or modified this frame. fn free_meshes(&mut self, extracted_meshes: &ExtractedAssets<RenderMesh>) { let mut empty_slabs = <HashSet<_>>::default(); // TODO: Consider explicitly reusing allocations for changed meshes of the same size let meshes_to_free = extracted_meshes .removed .iter() .chain(extracted_meshes.modified.iter()); for mesh_id in meshes_to_free { if let Some(slab_id) = self.mesh_id_to_vertex_slab.remove(mesh_id) { self.free_allocation_in_slab(mesh_id, slab_id, &mut empty_slabs); } if let Some(slab_id) = self.mesh_id_to_index_slab.remove(mesh_id) { self.free_allocation_in_slab(mesh_id, slab_id, &mut empty_slabs); } } for empty_slab in empty_slabs { self.slab_layouts.values_mut().for_each(|slab_ids| { let idx = slab_ids.iter().position(|&slab_id| slab_id == empty_slab); if let Some(idx) = idx { slab_ids.remove(idx); } }); self.slabs.remove(&empty_slab); } } /// Given a slab and the ID of a mesh containing data in it, marks the /// allocation as free. /// /// If this results in the slab becoming empty, this function adds the slab /// to the `empty_slabs` set. fn free_allocation_in_slab( &mut self, mesh_id: &AssetId<Mesh>, slab_id: SlabId, empty_slabs: &mut HashSet<SlabId>, ) { let Some(slab) = self.slabs.get_mut(&slab_id) else { return; }; match *slab { Slab::General(ref mut general_slab) => { let Some(slab_allocation) = general_slab .resident_allocations .remove(mesh_id) .or_else(|| general_slab.pending_allocations.remove(mesh_id)) else { return; }; general_slab.allocator.free(slab_allocation.allocation); if general_slab.is_empty() { empty_slabs.insert(slab_id); } } Slab::LargeObject(_) => { empty_slabs.insert(slab_id); } } } /// Allocates space for mesh data with the given byte size and layout in the /// appropriate slab, creating that slab if necessary. fn allocate( &mut self, mesh_id: &AssetId<Mesh>, data_byte_len: u64, layout: ElementLayout, slabs_to_grow: &mut SlabsToReallocate, settings: &MeshAllocatorSettings, ) { let data_element_count = data_byte_len.div_ceil(layout.size) as u32; let data_slot_count = data_element_count.div_ceil(layout.elements_per_slot); // If the mesh data is too large for a slab, give it a slab of its own. if data_slot_count as u64 * layout.slot_size() >= settings.large_threshold.min(settings.max_slab_size) { self.allocate_large(mesh_id, layout); } else { self.allocate_general(mesh_id, data_slot_count, layout, slabs_to_grow, settings); } } /// Allocates space for mesh data with the given slot size and layout in the /// appropriate general slab. fn allocate_general( &mut self, mesh_id: &AssetId<Mesh>, data_slot_count: u32, layout: ElementLayout, slabs_to_grow: &mut SlabsToReallocate, settings: &MeshAllocatorSettings, ) { let candidate_slabs = self.slab_layouts.entry(layout).or_default(); // Loop through the slabs that accept elements of the appropriate type // and try to allocate the mesh inside them. We go with the first one // that succeeds. let mut mesh_allocation = None; for &slab_id in &*candidate_slabs { let Some(Slab::General(slab)) = self.slabs.get_mut(&slab_id) else { unreachable!("Slab not found") }; let Some(allocation) = slab.allocator.allocate(data_slot_count) else { continue; }; // Try to fit the object in the slab, growing if necessary. match slab.grow_if_necessary(allocation.offset + data_slot_count, settings) { SlabGrowthResult::NoGrowthNeeded => {} SlabGrowthResult::NeededGrowth(slab_to_reallocate) => { // If we already grew the slab this frame, don't replace the // `SlabToReallocate` entry. We want to keep the entry // corresponding to the size that the slab had at the start // of the frame, so that we can copy only the used portion // of the initial buffer to the new one. if let Entry::Vacant(vacant_entry) = slabs_to_grow.entry(slab_id) { vacant_entry.insert(slab_to_reallocate); } } SlabGrowthResult::CantGrow => continue, } mesh_allocation = Some(MeshAllocation { slab_id, slab_allocation: SlabAllocation { allocation, slot_count: data_slot_count, }, }); break; } // If we still have no allocation, make a new slab. if mesh_allocation.is_none() { let new_slab_id = self.next_slab_id; self.next_slab_id.0 = NonMaxU32::new(self.next_slab_id.0.get() + 1).unwrap_or_default(); let new_slab = GeneralSlab::new( new_slab_id, &mut mesh_allocation, settings, layout, data_slot_count, ); self.slabs.insert(new_slab_id, Slab::General(new_slab)); candidate_slabs.push(new_slab_id); slabs_to_grow.insert(new_slab_id, SlabToReallocate::default()); } let mesh_allocation = mesh_allocation.expect("Should have been able to allocate"); // Mark the allocation as pending. Don't copy it in just yet; further // meshes loaded this frame may result in its final allocation location // changing. if let Some(Slab::General(general_slab)) = self.slabs.get_mut(&mesh_allocation.slab_id) { general_slab .pending_allocations .insert(*mesh_id, mesh_allocation.slab_allocation); }; self.record_allocation(mesh_id, mesh_allocation.slab_id, layout.class); } /// Allocates an object into its own dedicated slab. fn allocate_large(&mut self, mesh_id: &AssetId<Mesh>, layout: ElementLayout) { let new_slab_id = self.next_slab_id; self.next_slab_id.0 = NonMaxU32::new(self.next_slab_id.0.get() + 1).unwrap_or_default(); self.record_allocation(mesh_id, new_slab_id, layout.class); self.slabs.insert( new_slab_id, Slab::LargeObject(LargeObjectSlab { buffer: None, element_layout: layout, }), ); } /// Reallocates a slab that needs to be resized, or allocates a new slab. /// /// This performs the actual growth operation that /// [`GeneralSlab::grow_if_necessary`] scheduled. We do the growth in two /// phases so that, if a slab grows multiple times in the same frame, only /// one new buffer is reallocated, rather than reallocating the buffer /// multiple times. fn reallocate_slab( &mut self, render_device: &RenderDevice, render_queue: &RenderQueue, slab_id: SlabId, slab_to_grow: SlabToReallocate, ) { let Some(Slab::General(slab)) = self.slabs.get_mut(&slab_id) else { error!("Couldn't find slab {} to grow", slab_id); return; }; let old_buffer = slab.buffer.take(); let mut buffer_usages = BufferUsages::COPY_SRC | BufferUsages::COPY_DST; match slab.element_layout.class { ElementClass::Vertex => buffer_usages |= BufferUsages::VERTEX, ElementClass::Index => buffer_usages |= BufferUsages::INDEX, }; // Create the buffer. let new_buffer = render_device.create_buffer(&BufferDescriptor { label: Some(&format!( "general mesh slab {} ({}buffer)", slab_id, buffer_usages_to_str(buffer_usages) )), size: slab.current_slot_capacity as u64 * slab.element_layout.slot_size(), usage: buffer_usages | self.extra_buffer_usages, mapped_at_creation: false, }); slab.buffer = Some(new_buffer.clone()); let Some(old_buffer) = old_buffer else { return }; // In order to do buffer copies, we need a command encoder. let mut encoder = render_device.create_command_encoder(&CommandEncoderDescriptor { label: Some("slab resize encoder"), }); // Copy the data from the old buffer into the new one. encoder.copy_buffer_to_buffer( &old_buffer, 0, &new_buffer, 0, slab_to_grow.old_slot_capacity as u64 * slab.element_layout.slot_size(), ); let command_buffer = encoder.finish(); render_queue.submit([command_buffer]); } /// Records the location of the given newly-allocated mesh data in the /// [`Self::mesh_id_to_vertex_slab`] or [`Self::mesh_id_to_index_slab`] /// tables as appropriate. fn record_allocation( &mut self, mesh_id: &AssetId<Mesh>, slab_id: SlabId, element_class: ElementClass,
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
true
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_render/src/mesh/mod.rs
crates/bevy_render/src/mesh/mod.rs
pub mod allocator; use crate::{ render_asset::{ AssetExtractionError, PrepareAssetError, RenderAsset, RenderAssetPlugin, RenderAssets, }, texture::GpuImage, RenderApp, }; use allocator::MeshAllocatorPlugin; use bevy_app::{App, Plugin}; use bevy_asset::{AssetId, RenderAssetUsages}; use bevy_ecs::{ prelude::*, system::{ lifetimeless::{SRes, SResMut}, SystemParamItem, }, }; #[cfg(feature = "morph")] use bevy_mesh::morph::{MeshMorphWeights, MorphWeights}; use bevy_mesh::*; use wgpu::IndexFormat; /// Makes sure that [`Mesh`]es are extracted and prepared for the GPU. /// Does *not* add the [`Mesh`] as an asset. Use [`MeshPlugin`] for that. pub struct MeshRenderAssetPlugin; impl Plugin for MeshRenderAssetPlugin { fn build(&self, app: &mut App) { app // 'Mesh' must be prepared after 'Image' as meshes rely on the morph target image being ready .add_plugins(RenderAssetPlugin::<RenderMesh, GpuImage>::default()) .add_plugins(MeshAllocatorPlugin); let Some(render_app) = app.get_sub_app_mut(RenderApp) else { return; }; render_app.init_resource::<MeshVertexBufferLayouts>(); } } /// [Inherit weights](inherit_weights) from glTF mesh parent entity to direct /// bevy mesh child entities (ie: glTF primitive). #[cfg(feature = "morph")] pub struct MorphPlugin; #[cfg(feature = "morph")] impl Plugin for MorphPlugin { fn build(&self, app: &mut App) { app.add_systems( bevy_app::PostUpdate, inherit_weights.in_set(InheritWeightSystems), ); } } /// Bevy meshes are gltf primitives, [`MorphWeights`] on the bevy node entity /// should be inherited by children meshes. /// /// Only direct children are updated, to fulfill the expectations of glTF spec. #[cfg(feature = "morph")] pub fn inherit_weights( morph_nodes: Query<(&Children, &MorphWeights), (Without<Mesh3d>, Changed<MorphWeights>)>, mut morph_primitives: Query<&mut MeshMorphWeights, With<Mesh3d>>, ) { for (children, parent_weights) in &morph_nodes { let mut iter = morph_primitives.iter_many_mut(children); while let Some(mut child_weight) = iter.fetch_next() { child_weight.clear_weights(); child_weight.extend_weights(parent_weights.weights()); } } } /// The render world representation of a [`Mesh`]. #[derive(Debug, Clone)] pub struct RenderMesh { /// The number of vertices in the mesh. pub vertex_count: u32, /// Morph targets for the mesh, if present. #[cfg(feature = "morph")] pub morph_targets: Option<crate::render_resource::TextureView>, /// Information about the mesh data buffers, including whether the mesh uses /// indices or not. pub buffer_info: RenderMeshBufferInfo, /// Precomputed pipeline key bits for this mesh. pub key_bits: BaseMeshPipelineKey, /// A reference to the vertex buffer layout. /// /// Combined with [`RenderMesh::buffer_info`], this specifies the complete /// layout of the buffers associated with this mesh. pub layout: MeshVertexBufferLayoutRef, } impl RenderMesh { /// Returns the primitive topology of this mesh (triangles, triangle strips, /// etc.) #[inline] pub fn primitive_topology(&self) -> PrimitiveTopology { self.key_bits.primitive_topology() } /// Returns true if this mesh uses an index buffer or false otherwise. #[inline] pub fn indexed(&self) -> bool { matches!(self.buffer_info, RenderMeshBufferInfo::Indexed { .. }) } } /// The index/vertex buffer info of a [`RenderMesh`]. #[derive(Debug, Clone)] pub enum RenderMeshBufferInfo { Indexed { count: u32, index_format: IndexFormat, }, NonIndexed, } impl RenderAsset for RenderMesh { type SourceAsset = Mesh; type Param = ( SRes<RenderAssets<GpuImage>>, SResMut<MeshVertexBufferLayouts>, ); #[inline] fn asset_usage(mesh: &Self::SourceAsset) -> RenderAssetUsages { mesh.asset_usage } fn take_gpu_data( source: &mut Self::SourceAsset, _previous_gpu_asset: Option<&Self>, ) -> Result<Self::SourceAsset, AssetExtractionError> { source .take_gpu_data() .map_err(|_| AssetExtractionError::AlreadyExtracted) } fn byte_len(mesh: &Self::SourceAsset) -> Option<usize> { let mut vertex_size = 0; for attribute_data in mesh.attributes() { let vertex_format = attribute_data.0.format; vertex_size += vertex_format.size() as usize; } let vertex_count = mesh.count_vertices(); let index_bytes = mesh.get_index_buffer_bytes().map(<[_]>::len).unwrap_or(0); Some(vertex_size * vertex_count + index_bytes) } /// Converts the extracted mesh into a [`RenderMesh`]. fn prepare_asset( mesh: Self::SourceAsset, _: AssetId<Self::SourceAsset>, (_images, mesh_vertex_buffer_layouts): &mut SystemParamItem<Self::Param>, _: Option<&Self>, ) -> Result<Self, PrepareAssetError<Self::SourceAsset>> { #[cfg(feature = "morph")] let morph_targets = match mesh.morph_targets() { Some(mt) => { let Some(target_image) = _images.get(mt) else { return Err(PrepareAssetError::RetryNextUpdate(mesh)); }; Some(target_image.texture_view.clone()) } None => None, }; let buffer_info = match mesh.indices() { Some(indices) => RenderMeshBufferInfo::Indexed { count: indices.len() as u32, index_format: indices.into(), }, None => RenderMeshBufferInfo::NonIndexed, }; let mesh_vertex_buffer_layout = mesh.get_mesh_vertex_buffer_layout(mesh_vertex_buffer_layouts); let key_bits = BaseMeshPipelineKey::from_primitive_topology(mesh.primitive_topology()); #[cfg(feature = "morph")] let key_bits = if mesh.morph_targets().is_some() { key_bits | BaseMeshPipelineKey::MORPH_TARGETS } else { key_bits }; Ok(RenderMesh { vertex_count: mesh.count_vertices() as u32, buffer_info, key_bits, layout: mesh_vertex_buffer_layout, #[cfg(feature = "morph")] morph_targets, }) } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_scene/src/serde.rs
crates/bevy_scene/src/serde.rs
//! `serde` serialization and deserialization implementation for Bevy scenes. use crate::{DynamicEntity, DynamicScene}; use bevy_ecs::entity::Entity; use bevy_platform::collections::HashSet; use bevy_reflect::{ serde::{ ReflectDeserializer, TypeRegistrationDeserializer, TypedReflectDeserializer, TypedReflectSerializer, }, PartialReflect, ReflectFromReflect, TypeRegistry, }; use core::fmt::Formatter; use serde::{ de::{DeserializeSeed, Error, MapAccess, SeqAccess, Visitor}, ser::{SerializeMap, SerializeStruct}, Deserialize, Deserializer, Serialize, Serializer, }; /// Name of the serialized scene struct type. pub const SCENE_STRUCT: &str = "Scene"; /// Name of the serialized resources field in a scene struct. pub const SCENE_RESOURCES: &str = "resources"; /// Name of the serialized entities field in a scene struct. pub const SCENE_ENTITIES: &str = "entities"; /// Name of the serialized entity struct type. pub const ENTITY_STRUCT: &str = "Entity"; /// Name of the serialized component field in an entity struct. pub const ENTITY_FIELD_COMPONENTS: &str = "components"; /// Serializer for a [`DynamicScene`]. /// /// Helper object defining Bevy's serialize format for a [`DynamicScene`] and implementing /// the [`Serialize`] trait for use with Serde. /// /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; /// # use bevy_scene::{DynamicScene, serde::SceneSerializer}; /// # let mut world = World::default(); /// # world.insert_resource(AppTypeRegistry::default()); /// // Get the type registry /// let registry = world.resource::<AppTypeRegistry>(); /// let registry = registry.read(); /// /// // Get a DynamicScene to serialize, for example from the World itself /// let scene = DynamicScene::from_world(&world); /// /// // Create a serializer for that DynamicScene, using the associated TypeRegistry /// let scene_serializer = SceneSerializer::new(&scene, &registry); /// /// // Serialize through any serde-compatible Serializer /// let ron_string = ron::ser::to_string(&scene_serializer); /// ``` pub struct SceneSerializer<'a> { /// The scene to serialize. pub scene: &'a DynamicScene, /// The type registry containing the types present in the scene. pub registry: &'a TypeRegistry, } impl<'a> SceneSerializer<'a> { /// Create a new serializer from a [`DynamicScene`] and an associated [`TypeRegistry`]. /// /// The type registry must contain all types present in the scene. This is generally the case /// if you obtain both the scene and the registry from the same [`World`]. /// /// [`World`]: bevy_ecs::world::World pub fn new(scene: &'a DynamicScene, registry: &'a TypeRegistry) -> Self { SceneSerializer { scene, registry } } } impl<'a> Serialize for SceneSerializer<'a> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut state = serializer.serialize_struct(SCENE_STRUCT, 2)?; state.serialize_field( SCENE_RESOURCES, &SceneMapSerializer { entries: &self.scene.resources, registry: self.registry, }, )?; state.serialize_field( SCENE_ENTITIES, &EntitiesSerializer { entities: &self.scene.entities, registry: self.registry, }, )?; state.end() } } /// Handles serialization of multiple entities as a map of entity id to serialized entity. pub struct EntitiesSerializer<'a> { /// The entities to serialize. pub entities: &'a [DynamicEntity], /// Type registry in which the component types used by the entities are registered. pub registry: &'a TypeRegistry, } impl<'a> Serialize for EntitiesSerializer<'a> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut state = serializer.serialize_map(Some(self.entities.len()))?; for entity in self.entities { state.serialize_entry( &entity.entity, &EntitySerializer { entity, registry: self.registry, }, )?; } state.end() } } /// Handles entity serialization as a map of component type to component value. pub struct EntitySerializer<'a> { /// The entity to serialize. pub entity: &'a DynamicEntity, /// Type registry in which the component types used by the entity are registered. pub registry: &'a TypeRegistry, } impl<'a> Serialize for EntitySerializer<'a> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut state = serializer.serialize_struct(ENTITY_STRUCT, 1)?; state.serialize_field( ENTITY_FIELD_COMPONENTS, &SceneMapSerializer { entries: &self.entity.components, registry: self.registry, }, )?; state.end() } } /// Handles serializing a list of values with a unique type as a map of type to value. /// /// Used to serialize scene resources in [`SceneSerializer`] and entity components in [`EntitySerializer`]. /// Note that having several entries of the same type in `entries` will lead to an error when using the RON format and /// deserializing through [`SceneMapDeserializer`]. /// /// Note: The entries are sorted by type path before they're serialized. pub struct SceneMapSerializer<'a> { /// List of boxed values of unique type to serialize. pub entries: &'a [Box<dyn PartialReflect>], /// Type registry in which the types used in `entries` are registered. pub registry: &'a TypeRegistry, } impl<'a> Serialize for SceneMapSerializer<'a> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut state = serializer.serialize_map(Some(self.entries.len()))?; let sorted_entries = { let mut entries = self .entries .iter() .map(|entry| { ( entry.get_represented_type_info().unwrap().type_path(), entry.as_partial_reflect(), ) }) .collect::<Vec<_>>(); entries.sort_by_key(|(type_path, _)| *type_path); entries }; for (type_path, partial_reflect) in sorted_entries { state.serialize_entry( type_path, &TypedReflectSerializer::new(partial_reflect, self.registry), )?; } state.end() } } #[derive(Deserialize)] #[serde(field_identifier, rename_all = "lowercase")] enum SceneField { Resources, Entities, } #[derive(Deserialize)] #[serde(field_identifier, rename_all = "lowercase")] enum EntityField { Components, } /// Handles scene deserialization. pub struct SceneDeserializer<'a> { /// Type registry in which the components and resources types used in the scene to deserialize are registered. pub type_registry: &'a TypeRegistry, } impl<'a, 'de> DeserializeSeed<'de> for SceneDeserializer<'a> { type Value = DynamicScene; fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_struct( SCENE_STRUCT, &[SCENE_RESOURCES, SCENE_ENTITIES], SceneVisitor { type_registry: self.type_registry, }, ) } } struct SceneVisitor<'a> { pub type_registry: &'a TypeRegistry, } impl<'a, 'de> Visitor<'de> for SceneVisitor<'a> { type Value = DynamicScene; fn expecting(&self, formatter: &mut Formatter) -> core::fmt::Result { formatter.write_str("scene struct") } fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error> where A: SeqAccess<'de>, { let resources = seq .next_element_seed(SceneMapDeserializer { registry: self.type_registry, })? .ok_or_else(|| Error::missing_field(SCENE_RESOURCES))?; let entities = seq .next_element_seed(SceneEntitiesDeserializer { type_registry: self.type_registry, })? .ok_or_else(|| Error::missing_field(SCENE_ENTITIES))?; Ok(DynamicScene { resources, entities, }) } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: MapAccess<'de>, { let mut resources = None; let mut entities = None; while let Some(key) = map.next_key()? { match key { SceneField::Resources => { if resources.is_some() { return Err(Error::duplicate_field(SCENE_RESOURCES)); } resources = Some(map.next_value_seed(SceneMapDeserializer { registry: self.type_registry, })?); } SceneField::Entities => { if entities.is_some() { return Err(Error::duplicate_field(SCENE_ENTITIES)); } entities = Some(map.next_value_seed(SceneEntitiesDeserializer { type_registry: self.type_registry, })?); } } } let resources = resources.ok_or_else(|| Error::missing_field(SCENE_RESOURCES))?; let entities = entities.ok_or_else(|| Error::missing_field(SCENE_ENTITIES))?; Ok(DynamicScene { resources, entities, }) } } /// Handles deserialization for a collection of entities. pub struct SceneEntitiesDeserializer<'a> { /// Type registry in which the component types used by the entities to deserialize are registered. pub type_registry: &'a TypeRegistry, } impl<'a, 'de> DeserializeSeed<'de> for SceneEntitiesDeserializer<'a> { type Value = Vec<DynamicEntity>; fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_map(SceneEntitiesVisitor { type_registry: self.type_registry, }) } } struct SceneEntitiesVisitor<'a> { pub type_registry: &'a TypeRegistry, } impl<'a, 'de> Visitor<'de> for SceneEntitiesVisitor<'a> { type Value = Vec<DynamicEntity>; fn expecting(&self, formatter: &mut Formatter) -> core::fmt::Result { formatter.write_str("map of entities") } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: MapAccess<'de>, { let mut entities = Vec::new(); while let Some(entity) = map.next_key::<Entity>()? { let entity = map.next_value_seed(SceneEntityDeserializer { entity, type_registry: self.type_registry, })?; entities.push(entity); } Ok(entities) } } /// Handle deserialization of an entity and its components. pub struct SceneEntityDeserializer<'a> { /// Id of the deserialized entity. pub entity: Entity, /// Type registry in which the component types used by the entity to deserialize are registered. pub type_registry: &'a TypeRegistry, } impl<'a, 'de> DeserializeSeed<'de> for SceneEntityDeserializer<'a> { type Value = DynamicEntity; fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_struct( ENTITY_STRUCT, &[ENTITY_FIELD_COMPONENTS], SceneEntityVisitor { entity: self.entity, registry: self.type_registry, }, ) } } struct SceneEntityVisitor<'a> { pub entity: Entity, pub registry: &'a TypeRegistry, } impl<'a, 'de> Visitor<'de> for SceneEntityVisitor<'a> { type Value = DynamicEntity; fn expecting(&self, formatter: &mut Formatter) -> core::fmt::Result { formatter.write_str("entities") } fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error> where A: SeqAccess<'de>, { let components = seq .next_element_seed(SceneMapDeserializer { registry: self.registry, })? .ok_or_else(|| Error::missing_field(ENTITY_FIELD_COMPONENTS))?; Ok(DynamicEntity { entity: self.entity, components, }) } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: MapAccess<'de>, { let mut components = None; while let Some(key) = map.next_key()? { match key { EntityField::Components => { if components.is_some() { return Err(Error::duplicate_field(ENTITY_FIELD_COMPONENTS)); } components = Some(map.next_value_seed(SceneMapDeserializer { registry: self.registry, })?); } } } let components = components .take() .ok_or_else(|| Error::missing_field(ENTITY_FIELD_COMPONENTS))?; Ok(DynamicEntity { entity: self.entity, components, }) } } /// Handles deserialization of a sequence of values with unique types. pub struct SceneMapDeserializer<'a> { /// Type registry in which the types of the values to deserialize are registered. pub registry: &'a TypeRegistry, } impl<'a, 'de> DeserializeSeed<'de> for SceneMapDeserializer<'a> { type Value = Vec<Box<dyn PartialReflect>>; fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_map(SceneMapVisitor { registry: self.registry, }) } } struct SceneMapVisitor<'a> { pub registry: &'a TypeRegistry, } impl<'a, 'de> Visitor<'de> for SceneMapVisitor<'a> { type Value = Vec<Box<dyn PartialReflect>>; fn expecting(&self, formatter: &mut Formatter) -> core::fmt::Result { formatter.write_str("map of reflect types") } fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error> where A: SeqAccess<'de>, { let mut dynamic_properties = Vec::new(); while let Some(entity) = seq.next_element_seed(ReflectDeserializer::new(self.registry))? { dynamic_properties.push(entity); } Ok(dynamic_properties) } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: MapAccess<'de>, { let mut added = <HashSet<_>>::default(); let mut entries = Vec::new(); while let Some(registration) = map.next_key_seed(TypeRegistrationDeserializer::new(self.registry))? { if !added.insert(registration.type_id()) { return Err(Error::custom(format_args!( "duplicate reflect type: `{}`", registration.type_info().type_path(), ))); } let value = map.next_value_seed(TypedReflectDeserializer::new(registration, self.registry))?; // Attempt to convert using FromReflect. let value = self .registry .get(registration.type_id()) .and_then(|tr| tr.data::<ReflectFromReflect>()) .and_then(|fr| fr.from_reflect(value.as_partial_reflect())) .map(PartialReflect::into_partial_reflect) .unwrap_or(value); entries.push(value); } Ok(entries) } } #[cfg(test)] mod tests { use crate::{ serde::{SceneDeserializer, SceneSerializer}, DynamicScene, DynamicSceneBuilder, }; use bevy_ecs::{ entity::{Entity, EntityHashMap}, prelude::{Component, ReflectComponent, ReflectResource, Resource, World}, query::{With, Without}, reflect::AppTypeRegistry, world::FromWorld, }; use bevy_reflect::{Reflect, ReflectDeserialize, ReflectSerialize}; use ron; use serde::{de::DeserializeSeed, Deserialize, Serialize}; use std::io::BufReader; #[derive(Component, Reflect, Default)] #[reflect(Component)] struct Foo(i32); #[derive(Component, Reflect, Default)] #[reflect(Component)] struct Bar(i32); #[derive(Component, Reflect, Default)] #[reflect(Component)] struct Baz(i32); // De/serialize as hex. mod qux { use serde::{de::Error, Deserialize, Deserializer, Serializer}; pub fn serialize<S>(value: &u32, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { serializer.serialize_str(&format!("{value:X}")) } pub fn deserialize<'de, D>(deserializer: D) -> Result<u32, D::Error> where D: Deserializer<'de>, { u32::from_str_radix(<&str as Deserialize>::deserialize(deserializer)?, 16) .map_err(Error::custom) } } #[derive(Component, Copy, Clone, Reflect, Debug, PartialEq, Serialize, Deserialize)] #[reflect(Component, Serialize, Deserialize)] struct Qux(#[serde(with = "qux")] u32); #[derive(Component, Reflect, Default)] #[reflect(Component)] struct MyComponent { foo: [usize; 3], bar: (f32, f32), baz: MyEnum, } #[derive(Reflect, Default)] enum MyEnum { #[default] Unit, Tuple(String), Struct { value: u32, }, } #[derive(Resource, Reflect, Default)] #[reflect(Resource)] struct MyResource { foo: i32, } #[derive(Clone, Component, Reflect, PartialEq)] #[reflect(Component, PartialEq)] struct MyEntityRef(#[entities] Entity); impl FromWorld for MyEntityRef { fn from_world(_world: &mut World) -> Self { Self(Entity::PLACEHOLDER) } } fn create_world() -> World { let mut world = World::new(); let registry = AppTypeRegistry::default(); { let mut registry = registry.write(); registry.register::<Foo>(); registry.register::<Bar>(); registry.register::<Baz>(); registry.register::<Qux>(); registry.register::<MyComponent>(); registry.register::<MyEnum>(); registry.register::<String>(); registry.register_type_data::<String, ReflectSerialize>(); registry.register::<[usize; 3]>(); registry.register::<(f32, f32)>(); registry.register::<MyEntityRef>(); registry.register::<Entity>(); registry.register::<MyResource>(); } world.insert_resource(registry); world } #[test] fn should_serialize() { let mut world = create_world(); let a = world.spawn(Foo(123)).id(); let b = world.spawn((Foo(123), Bar(345))).id(); let c = world.spawn((Foo(123), Bar(345), Baz(789))).id(); world.insert_resource(MyResource { foo: 123 }); let scene = DynamicSceneBuilder::from_world(&world) .extract_entities([a, b, c].into_iter()) .extract_resources() .build(); let expected = r#"( resources: { "bevy_scene::serde::tests::MyResource": ( foo: 123, ), }, entities: { 4294967293: ( components: { "bevy_scene::serde::tests::Bar": (345), "bevy_scene::serde::tests::Baz": (789), "bevy_scene::serde::tests::Foo": (123), }, ), 4294967294: ( components: { "bevy_scene::serde::tests::Bar": (345), "bevy_scene::serde::tests::Foo": (123), }, ), 4294967295: ( components: { "bevy_scene::serde::tests::Foo": (123), }, ), }, )"#; let output = scene .serialize(&world.resource::<AppTypeRegistry>().read()) .unwrap(); assert_eq!(expected, output); } #[test] fn should_deserialize() { let world = create_world(); let input = r#"( resources: { "bevy_scene::serde::tests::MyResource": ( foo: 123, ), }, entities: { 8589934591: ( components: { "bevy_scene::serde::tests::Foo": (123), }, ), 8589934590: ( components: { "bevy_scene::serde::tests::Foo": (123), "bevy_scene::serde::tests::Bar": (345), }, ), 8589934589: ( components: { "bevy_scene::serde::tests::Foo": (123), "bevy_scene::serde::tests::Bar": (345), "bevy_scene::serde::tests::Baz": (789), }, ), }, )"#; let mut deserializer = ron::de::Deserializer::from_str(input).unwrap(); let scene_deserializer = SceneDeserializer { type_registry: &world.resource::<AppTypeRegistry>().read(), }; let scene = scene_deserializer.deserialize(&mut deserializer).unwrap(); assert_eq!( 1, scene.resources.len(), "expected `resources` to contain 1 resource" ); assert_eq!( 3, scene.entities.len(), "expected `entities` to contain 3 entities" ); let mut map = EntityHashMap::default(); let mut dst_world = create_world(); scene.write_to_world(&mut dst_world, &mut map).unwrap(); let my_resource = dst_world.get_resource::<MyResource>(); assert!(my_resource.is_some()); let my_resource = my_resource.unwrap(); assert_eq!(my_resource.foo, 123); assert_eq!(3, dst_world.query::<&Foo>().iter(&dst_world).count()); assert_eq!(2, dst_world.query::<&Bar>().iter(&dst_world).count()); assert_eq!(1, dst_world.query::<&Baz>().iter(&dst_world).count()); } fn roundtrip_ron(world: &World) -> (DynamicScene, DynamicScene) { let scene = DynamicScene::from_world(world); let registry = world.resource::<AppTypeRegistry>().read(); let serialized = scene.serialize(&registry).unwrap(); let mut deserializer = ron::de::Deserializer::from_str(&serialized).unwrap(); let scene_deserializer = SceneDeserializer { type_registry: &registry, }; let deserialized_scene = scene_deserializer.deserialize(&mut deserializer).unwrap(); (scene, deserialized_scene) } #[test] fn should_roundtrip_with_later_generations_and_obsolete_references() { let mut world = create_world(); world.spawn_empty().despawn(); let a = world.spawn_empty().id(); let foo = world.spawn(MyEntityRef(a)).insert(Foo(123)).id(); world.despawn(a); world.spawn(MyEntityRef(foo)).insert(Bar(123)); let (scene, deserialized_scene) = roundtrip_ron(&world); let mut map = EntityHashMap::default(); let mut dst_world = create_world(); deserialized_scene .write_to_world(&mut dst_world, &mut map) .unwrap(); assert_eq!(2, deserialized_scene.entities.len()); assert_scene_eq(&scene, &deserialized_scene); let bar_to_foo = dst_world .query_filtered::<&MyEntityRef, Without<Foo>>() .single(&dst_world) .cloned() .unwrap(); let foo = dst_world .query_filtered::<Entity, With<Foo>>() .single(&dst_world) .unwrap(); assert_eq!(foo, bar_to_foo.0); assert!(dst_world .query_filtered::<&MyEntityRef, With<Foo>>() .iter(&dst_world) .all(|r| world.get_entity(r.0).is_err())); } #[test] fn should_roundtrip_with_custom_serialization() { let mut world = create_world(); let qux = Qux(42); world.spawn(qux); let (scene, deserialized_scene) = roundtrip_ron(&world); assert_eq!(1, deserialized_scene.entities.len()); assert_scene_eq(&scene, &deserialized_scene); let mut world = create_world(); deserialized_scene .write_to_world(&mut world, &mut EntityHashMap::default()) .unwrap(); assert_eq!(&qux, world.query::<&Qux>().single(&world).unwrap()); } #[test] fn should_roundtrip_postcard() { let mut world = create_world(); world.spawn(MyComponent { foo: [1, 2, 3], bar: (1.3, 3.7), baz: MyEnum::Tuple("Hello World!".to_string()), }); let registry = world.resource::<AppTypeRegistry>(); let registry = &registry.read(); let scene = DynamicScene::from_world(&world); let scene_serializer = SceneSerializer::new(&scene, registry); let serialized_scene = postcard::to_allocvec(&scene_serializer).unwrap(); assert_eq!( vec![ 0, 1, 255, 255, 255, 255, 15, 1, 37, 98, 101, 118, 121, 95, 115, 99, 101, 110, 101, 58, 58, 115, 101, 114, 100, 101, 58, 58, 116, 101, 115, 116, 115, 58, 58, 77, 121, 67, 111, 109, 112, 111, 110, 101, 110, 116, 1, 2, 3, 102, 102, 166, 63, 205, 204, 108, 64, 1, 12, 72, 101, 108, 108, 111, 32, 87, 111, 114, 108, 100, 33 ], serialized_scene ); let scene_deserializer = SceneDeserializer { type_registry: registry, }; let deserialized_scene = scene_deserializer .deserialize(&mut postcard::Deserializer::from_bytes(&serialized_scene)) .unwrap(); assert_eq!(1, deserialized_scene.entities.len()); assert_scene_eq(&scene, &deserialized_scene); } #[test] fn should_roundtrip_messagepack() { let mut world = create_world(); world.spawn(MyComponent { foo: [1, 2, 3], bar: (1.3, 3.7), baz: MyEnum::Tuple("Hello World!".to_string()), }); let registry = world.resource::<AppTypeRegistry>(); let registry = &registry.read(); let scene = DynamicScene::from_world(&world); let scene_serializer = SceneSerializer::new(&scene, registry); let mut buf = Vec::new(); let mut ser = rmp_serde::Serializer::new(&mut buf); scene_serializer.serialize(&mut ser).unwrap(); assert_eq!( vec![ 146, 128, 129, 206, 255, 255, 255, 255, 145, 129, 217, 37, 98, 101, 118, 121, 95, 115, 99, 101, 110, 101, 58, 58, 115, 101, 114, 100, 101, 58, 58, 116, 101, 115, 116, 115, 58, 58, 77, 121, 67, 111, 109, 112, 111, 110, 101, 110, 116, 147, 147, 1, 2, 3, 146, 202, 63, 166, 102, 102, 202, 64, 108, 204, 205, 129, 165, 84, 117, 112, 108, 101, 172, 72, 101, 108, 108, 111, 32, 87, 111, 114, 108, 100, 33 ], buf ); let scene_deserializer = SceneDeserializer { type_registry: registry, }; let mut reader = BufReader::new(buf.as_slice()); let deserialized_scene = scene_deserializer .deserialize(&mut rmp_serde::Deserializer::new(&mut reader)) .unwrap(); assert_eq!(1, deserialized_scene.entities.len()); assert_scene_eq(&scene, &deserialized_scene); } #[test] fn should_roundtrip_bincode() { let mut world = create_world(); world.spawn(MyComponent { foo: [1, 2, 3], bar: (1.3, 3.7), baz: MyEnum::Tuple("Hello World!".to_string()), }); let registry = world.resource::<AppTypeRegistry>(); let registry = &registry.read(); let scene = DynamicScene::from_world(&world); let config = bincode::config::standard().with_fixed_int_encoding(); let scene_serializer = SceneSerializer::new(&scene, registry); let serialized_scene = bincode::serde::encode_to_vec(&scene_serializer, config).unwrap(); assert_eq!( vec![ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 37, 0, 0, 0, 0, 0, 0, 0, 98, 101, 118, 121, 95, 115, 99, 101, 110, 101, 58, 58, 115, 101, 114, 100, 101, 58, 58, 116, 101, 115, 116, 115, 58, 58, 77, 121, 67, 111, 109, 112, 111, 110, 101, 110, 116, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 102, 102, 166, 63, 205, 204, 108, 64, 1, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 72, 101, 108, 108, 111, 32, 87, 111, 114, 108, 100, 33 ], serialized_scene ); let scene_deserializer = SceneDeserializer { type_registry: registry, }; let (deserialized_scene, _read_bytes) = bincode::serde::seed_decode_from_slice(scene_deserializer, &serialized_scene, config) .unwrap(); assert_eq!(1, deserialized_scene.entities.len()); assert_scene_eq(&scene, &deserialized_scene); } /// A crude equality checker for [`DynamicScene`], used solely for testing purposes. fn assert_scene_eq(expected: &DynamicScene, received: &DynamicScene) { assert_eq!( expected.entities.len(), received.entities.len(), "entity count did not match", ); for expected in &expected.entities { let received = received .entities .iter() .find(|dynamic_entity| dynamic_entity.entity == expected.entity) .unwrap_or_else(|| panic!("missing entity (expected: `{}`)", expected.entity)); assert_eq!(expected.entity, received.entity, "entities did not match"); for expected in &expected.components { let received = received .components .iter() .find(|component| { component.get_represented_type_info().unwrap().type_path() == expected.get_represented_type_info().unwrap().type_path() }) .unwrap_or_else(|| { panic!( "missing component (expected: `{}`)", expected.get_represented_type_info().unwrap().type_path() ) }); assert!( expected .reflect_partial_eq(received.as_ref()) .unwrap_or_default(), "components did not match: (expected: `{expected:?}`, received: `{received:?}`)", ); } } } /// These tests just verify that the [`assert_scene_eq`] function is working properly for our tests. mod assert_scene_eq_tests { use super::*; #[test] #[should_panic(expected = "entity count did not match")] fn should_panic_when_entity_count_not_eq() { let mut world = create_world(); let scene_a = DynamicScene::from_world(&world); world.spawn(MyComponent { foo: [1, 2, 3], bar: (1.3, 3.7), baz: MyEnum::Unit, }); let scene_b = DynamicScene::from_world(&world); assert_scene_eq(&scene_a, &scene_b); } #[test] #[should_panic(expected = "components did not match")] fn should_panic_when_components_not_eq() { let mut world = create_world(); let entity = world .spawn(MyComponent { foo: [1, 2, 3], bar: (1.3, 3.7), baz: MyEnum::Unit, }) .id(); let scene_a = DynamicScene::from_world(&world); world.entity_mut(entity).insert(MyComponent { foo: [3, 2, 1], bar: (1.3, 3.7), baz: MyEnum::Unit, }); let scene_b = DynamicScene::from_world(&world); assert_scene_eq(&scene_a, &scene_b); } #[test] #[should_panic(expected = "missing component")] fn should_panic_when_missing_component() { let mut world = create_world();
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
true
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_scene/src/lib.rs
crates/bevy_scene/src/lib.rs
#![cfg_attr(docsrs, feature(doc_cfg))] #![doc( html_logo_url = "https://bevy.org/assets/icon.png", html_favicon_url = "https://bevy.org/assets/icon.png" )] //! Provides scene definition, instantiation and serialization/deserialization. //! //! Scenes are collections of entities and their associated components that can be //! instantiated or removed from a world to allow composition. Scenes can be serialized/deserialized, //! for example to save part of the world state to a file. extern crate alloc; mod components; mod dynamic_scene; mod dynamic_scene_builder; mod reflect_utils; mod scene; mod scene_filter; mod scene_loader; mod scene_spawner; #[cfg(feature = "serialize")] pub mod serde; pub use components::*; pub use dynamic_scene::*; pub use dynamic_scene_builder::*; pub use scene::*; pub use scene_filter::*; pub use scene_loader::*; pub use scene_spawner::*; /// The scene prelude. /// /// This includes the most common types in this crate, re-exported for your convenience. pub mod prelude { #[doc(hidden)] pub use crate::{ DynamicScene, DynamicSceneBuilder, DynamicSceneRoot, Scene, SceneFilter, SceneRoot, SceneSpawner, }; } use bevy_app::prelude::*; #[cfg(feature = "serialize")] use {bevy_asset::AssetApp, bevy_ecs::schedule::IntoScheduleConfigs}; /// Plugin that provides scene functionality to an [`App`]. #[derive(Default)] pub struct ScenePlugin; #[cfg(feature = "serialize")] impl Plugin for ScenePlugin { fn build(&self, app: &mut App) { app.init_asset::<DynamicScene>() .init_asset::<Scene>() .init_asset_loader::<SceneLoader>() .init_resource::<SceneSpawner>() .add_systems(SpawnScene, (scene_spawner, scene_spawner_system).chain()); // Register component hooks for DynamicSceneRoot app.world_mut() .register_component_hooks::<DynamicSceneRoot>() .on_remove(|mut world, context| { let Some(handle) = world.get::<DynamicSceneRoot>(context.entity) else { return; }; let id = handle.id(); if let Some(&SceneInstance(scene_instance)) = world.get::<SceneInstance>(context.entity) { let Some(mut scene_spawner) = world.get_resource_mut::<SceneSpawner>() else { return; }; if let Some(instance_ids) = scene_spawner.spawned_dynamic_scenes.get_mut(&id) { instance_ids.remove(&scene_instance); } scene_spawner.unregister_instance(scene_instance); } }); // Register component hooks for SceneRoot app.world_mut() .register_component_hooks::<SceneRoot>() .on_remove(|mut world, context| { let Some(handle) = world.get::<SceneRoot>(context.entity) else { return; }; let id = handle.id(); if let Some(&SceneInstance(scene_instance)) = world.get::<SceneInstance>(context.entity) { let Some(mut scene_spawner) = world.get_resource_mut::<SceneSpawner>() else { return; }; if let Some(instance_ids) = scene_spawner.spawned_scenes.get_mut(&id) { instance_ids.remove(&scene_instance); } scene_spawner.unregister_instance(scene_instance); } }); } } #[cfg(not(feature = "serialize"))] impl Plugin for ScenePlugin { fn build(&self, _: &mut App) {} } #[cfg(test)] mod tests { use bevy_app::App; use bevy_asset::{AssetPlugin, Assets}; use bevy_ecs::{ component::Component, entity::Entity, hierarchy::{ChildOf, Children}, reflect::{AppTypeRegistry, ReflectComponent}, world::World, }; use bevy_reflect::Reflect; use crate::{ DynamicScene, DynamicSceneBuilder, DynamicSceneRoot, Scene, ScenePlugin, SceneRoot, }; #[derive(Component, Reflect, PartialEq, Debug)] #[reflect(Component)] struct Circle { radius: f32, } #[derive(Component, Reflect, PartialEq, Debug)] #[reflect(Component)] struct Rectangle { width: f32, height: f32, } #[derive(Component, Reflect, PartialEq, Debug)] #[reflect(Component)] struct Triangle { base: f32, height: f32, } #[derive(Component, Reflect)] #[reflect(Component)] struct FinishLine; #[test] fn scene_spawns_and_respawns_after_change() { let mut app = App::new(); app.add_plugins((AssetPlugin::default(), ScenePlugin)) .register_type::<Circle>() .register_type::<Rectangle>() .register_type::<Triangle>() .register_type::<FinishLine>(); let scene_handle = app .world_mut() .resource_mut::<Assets<Scene>>() .reserve_handle(); let scene_entity = app.world_mut().spawn(SceneRoot(scene_handle.clone())).id(); app.update(); assert!(app.world().entity(scene_entity).get::<Children>().is_none()); let mut scene_1 = Scene { world: World::new(), }; let root = scene_1.world.spawn_empty().id(); scene_1.world.spawn(( Rectangle { width: 10.0, height: 5.0, }, FinishLine, ChildOf(root), )); scene_1.world.spawn((Circle { radius: 7.0 }, ChildOf(root))); app.world_mut() .resource_mut::<Assets<Scene>>() .insert(&scene_handle, scene_1) .unwrap(); app.update(); // TODO: multiple updates to avoid debounced asset events. See comment on SceneSpawner::debounced_scene_asset_events app.update(); app.update(); app.update(); let child_root = app .world() .entity(scene_entity) .get::<Children>() .and_then(|children| children.first().cloned()) .expect("There should be exactly one child on the scene root"); let children = app .world() .entity(child_root) .get::<Children>() .expect("The child of the scene root should itself have 2 children"); assert_eq!(children.len(), 2); let finish_line = app.world().entity(children[0]); assert_eq!(finish_line.archetype().component_count(), 3); let (rectangle, _, child_of) = finish_line.components::<(&Rectangle, &FinishLine, &ChildOf)>(); assert_eq!( rectangle, &Rectangle { width: 10.0, height: 5.0, } ); assert_eq!(child_of.0, child_root); let circle = app.world().entity(children[1]); assert_eq!(circle.archetype().component_count(), 2); let (circle, child_of) = circle.components::<(&Circle, &ChildOf)>(); assert_eq!(circle, &Circle { radius: 7.0 }); assert_eq!(child_of.0, child_root); // Now that we know our scene contains exactly what we expect, we will change the scene // asset and ensure it contains the new scene results. let mut scene_2 = Scene { world: World::new(), }; let root = scene_2.world.spawn_empty().id(); scene_2.world.spawn(( Triangle { base: 1.0, height: 2.0, }, ChildOf(root), )); app.world_mut() .resource_mut::<Assets<Scene>>() .insert(&scene_handle, scene_2) .unwrap(); app.update(); app.update(); let child_root = app .world() .entity(scene_entity) .get::<Children>() .and_then(|children| children.first().cloned()) .expect("There should be exactly one child on the scene root"); let children = app .world() .entity(child_root) .get::<Children>() .expect("The child of the scene root should itself have 2 children"); assert_eq!(children.len(), 1); let triangle = app.world().entity(children[0]); assert_eq!(triangle.archetype().component_count(), 2); let (triangle, child_of) = triangle.components::<(&Triangle, &ChildOf)>(); assert_eq!( triangle, &Triangle { base: 1.0, height: 2.0, } ); assert_eq!(child_of.0, child_root); } #[test] fn dynamic_scene_spawns_and_respawns_after_change() { let mut app = App::new(); app.add_plugins((AssetPlugin::default(), ScenePlugin)) .register_type::<Circle>() .register_type::<Rectangle>() .register_type::<Triangle>() .register_type::<FinishLine>(); let scene_handle = app .world_mut() .resource_mut::<Assets<DynamicScene>>() .reserve_handle(); let scene_entity = app .world_mut() .spawn(DynamicSceneRoot(scene_handle.clone())) .id(); app.update(); assert!(app.world().entity(scene_entity).get::<Children>().is_none()); let create_dynamic_scene = |mut scene: Scene, world: &World| { scene .world .insert_resource(world.resource::<AppTypeRegistry>().clone()); let entities: Vec<Entity> = scene.world.query::<Entity>().iter(&scene.world).collect(); DynamicSceneBuilder::from_world(&scene.world) .extract_entities(entities.into_iter()) .build() }; let mut scene_1 = Scene { world: World::new(), }; let root = scene_1.world.spawn_empty().id(); scene_1.world.spawn(( Rectangle { width: 10.0, height: 5.0, }, FinishLine, ChildOf(root), )); scene_1.world.spawn((Circle { radius: 7.0 }, ChildOf(root))); let scene_1 = create_dynamic_scene(scene_1, app.world()); app.world_mut() .resource_mut::<Assets<DynamicScene>>() .insert(&scene_handle, scene_1) .unwrap(); app.update(); // TODO: multiple updates to avoid debounced asset events. See comment on SceneSpawner::debounced_scene_asset_events app.update(); app.update(); app.update(); let child_root = app .world() .entity(scene_entity) .get::<Children>() .and_then(|children| children.first().cloned()) .expect("There should be exactly one child on the scene root"); let children = app .world() .entity(child_root) .get::<Children>() .expect("The child of the scene root should itself have 2 children"); assert_eq!(children.len(), 2); let finish_line = app.world().entity(children[0]); assert_eq!(finish_line.archetype().component_count(), 3); let (rectangle, _, child_of) = finish_line.components::<(&Rectangle, &FinishLine, &ChildOf)>(); assert_eq!( rectangle, &Rectangle { width: 10.0, height: 5.0, } ); assert_eq!(child_of.0, child_root); let circle = app.world().entity(children[1]); assert_eq!(circle.archetype().component_count(), 2); let (circle, child_of) = circle.components::<(&Circle, &ChildOf)>(); assert_eq!(circle, &Circle { radius: 7.0 }); assert_eq!(child_of.0, child_root); // Now that we know our scene contains exactly what we expect, we will change the scene // asset and ensure it contains the new scene results. let mut scene_2 = Scene { world: World::new(), }; let root = scene_2.world.spawn_empty().id(); scene_2.world.spawn(( Triangle { base: 1.0, height: 2.0, }, ChildOf(root), )); let scene_2 = create_dynamic_scene(scene_2, app.world()); app.world_mut() .resource_mut::<Assets<DynamicScene>>() .insert(&scene_handle, scene_2) .unwrap(); app.update(); app.update(); let child_root = app .world() .entity(scene_entity) .get::<Children>() .and_then(|children| children.first().cloned()) .expect("There should be exactly one child on the scene root"); let children = app .world() .entity(child_root) .get::<Children>() .expect("The child of the scene root should itself have 2 children"); assert_eq!(children.len(), 1); let triangle = app.world().entity(children[0]); assert_eq!(triangle.archetype().component_count(), 2); let (triangle, child_of) = triangle.components::<(&Triangle, &ChildOf)>(); assert_eq!( triangle, &Triangle { base: 1.0, height: 2.0, } ); assert_eq!(child_of.0, child_root); } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_scene/src/scene_filter.rs
crates/bevy_scene/src/scene_filter.rs
use bevy_platform::collections::{hash_set::IntoIter, HashSet}; use core::any::{Any, TypeId}; /// A filter used to control which types can be added to a [`DynamicScene`]. /// /// This scene filter _can_ be used more generically to represent a filter for any given type; /// however, note that its intended usage with `DynamicScene` only considers [components] and [resources]. /// Adding types that are not a component or resource will have no effect when used with `DynamicScene`. /// /// [`DynamicScene`]: crate::DynamicScene /// [components]: bevy_ecs::prelude::Component /// [resources]: bevy_ecs::prelude::Resource #[derive(Default, Debug, Clone, PartialEq, Eq)] pub enum SceneFilter { /// Represents an unset filter. /// /// This is the equivalent of an empty [`Denylist`] or an [`Allowlist`] containing every type— /// essentially, all types are permissible. /// /// [Allowing] a type will convert this filter to an `Allowlist`. /// Similarly, [denying] a type will convert this filter to a `Denylist`. /// /// [`Denylist`]: SceneFilter::Denylist /// [`Allowlist`]: SceneFilter::Allowlist /// [Allowing]: SceneFilter::allow /// [denying]: SceneFilter::deny #[default] Unset, /// Contains the set of permitted types by their [`TypeId`]. /// /// Types not contained within this set should not be allowed to be saved to an associated [`DynamicScene`]. /// /// [`DynamicScene`]: crate::DynamicScene Allowlist(HashSet<TypeId>), /// Contains the set of prohibited types by their [`TypeId`]. /// /// Types contained within this set should not be allowed to be saved to an associated [`DynamicScene`]. /// /// [`DynamicScene`]: crate::DynamicScene Denylist(HashSet<TypeId>), } impl SceneFilter { /// Creates a filter where all types are allowed. /// /// This is the equivalent of creating an empty [`Denylist`]. /// /// [`Denylist`]: SceneFilter::Denylist pub fn allow_all() -> Self { Self::Denylist(HashSet::default()) } /// Creates a filter where all types are denied. /// /// This is the equivalent of creating an empty [`Allowlist`]. /// /// [`Allowlist`]: SceneFilter::Allowlist pub fn deny_all() -> Self { Self::Allowlist(HashSet::default()) } /// Allow the given type, `T`. /// /// If this filter is already set as a [`Denylist`], /// then the given type will be removed from the denied set. /// /// If this filter is [`Unset`], then it will be completely replaced by a new [`Allowlist`]. /// /// [`Denylist`]: SceneFilter::Denylist /// [`Unset`]: SceneFilter::Unset /// [`Allowlist`]: SceneFilter::Allowlist #[must_use] pub fn allow<T: Any>(self) -> Self { self.allow_by_id(TypeId::of::<T>()) } /// Allow the given type. /// /// If this filter is already set as a [`Denylist`], /// then the given type will be removed from the denied set. /// /// If this filter is [`Unset`], then it will be completely replaced by a new [`Allowlist`]. /// /// [`Denylist`]: SceneFilter::Denylist /// [`Unset`]: SceneFilter::Unset /// [`Allowlist`]: SceneFilter::Allowlist #[must_use] pub fn allow_by_id(mut self, type_id: TypeId) -> Self { match &mut self { Self::Unset => { self = Self::Allowlist([type_id].into_iter().collect()); } Self::Allowlist(list) => { list.insert(type_id); } Self::Denylist(list) => { list.remove(&type_id); } } self } /// Deny the given type, `T`. /// /// If this filter is already set as an [`Allowlist`], /// then the given type will be removed from the allowed set. /// /// If this filter is [`Unset`], then it will be completely replaced by a new [`Denylist`]. /// /// [`Allowlist`]: SceneFilter::Allowlist /// [`Unset`]: SceneFilter::Unset /// [`Denylist`]: SceneFilter::Denylist #[must_use] pub fn deny<T: Any>(self) -> Self { self.deny_by_id(TypeId::of::<T>()) } /// Deny the given type. /// /// If this filter is already set as an [`Allowlist`], /// then the given type will be removed from the allowed set. /// /// If this filter is [`Unset`], then it will be completely replaced by a new [`Denylist`]. /// /// [`Allowlist`]: SceneFilter::Allowlist /// [`Unset`]: SceneFilter::Unset /// [`Denylist`]: SceneFilter::Denylist #[must_use] pub fn deny_by_id(mut self, type_id: TypeId) -> Self { match &mut self { Self::Unset => self = Self::Denylist([type_id].into_iter().collect()), Self::Allowlist(list) => { list.remove(&type_id); } Self::Denylist(list) => { list.insert(type_id); } } self } /// Returns true if the given type, `T`, is allowed by the filter. /// /// If the filter is [`Unset`], this will always return `true`. /// /// [`Unset`]: SceneFilter::Unset pub fn is_allowed<T: Any>(&self) -> bool { self.is_allowed_by_id(TypeId::of::<T>()) } /// Returns true if the given type is allowed by the filter. /// /// If the filter is [`Unset`], this will always return `true`. /// /// [`Unset`]: SceneFilter::Unset pub fn is_allowed_by_id(&self, type_id: TypeId) -> bool { match self { Self::Unset => true, Self::Allowlist(list) => list.contains(&type_id), Self::Denylist(list) => !list.contains(&type_id), } } /// Returns true if the given type, `T`, is denied by the filter. /// /// If the filter is [`Unset`], this will always return `false`. /// /// [`Unset`]: SceneFilter::Unset pub fn is_denied<T: Any>(&self) -> bool { self.is_denied_by_id(TypeId::of::<T>()) } /// Returns true if the given type is denied by the filter. /// /// If the filter is [`Unset`], this will always return `false`. /// /// [`Unset`]: SceneFilter::Unset pub fn is_denied_by_id(&self, type_id: TypeId) -> bool { !self.is_allowed_by_id(type_id) } /// Returns an iterator over the items in the filter. /// /// If the filter is [`Unset`], this will return an empty iterator. /// /// [`Unset`]: SceneFilter::Unset pub fn iter(&self) -> Box<dyn ExactSizeIterator<Item = &TypeId> + '_> { match self { Self::Unset => Box::new(core::iter::empty()), Self::Allowlist(list) | Self::Denylist(list) => Box::new(list.iter()), } } /// Returns the number of items in the filter. /// /// If the filter is [`Unset`], this will always return a length of zero. /// /// [`Unset`]: SceneFilter::Unset pub fn len(&self) -> usize { match self { Self::Unset => 0, Self::Allowlist(list) | Self::Denylist(list) => list.len(), } } /// Returns true if there are zero items in the filter. /// /// If the filter is [`Unset`], this will always return `true`. /// /// [`Unset`]: SceneFilter::Unset pub fn is_empty(&self) -> bool { match self { Self::Unset => true, Self::Allowlist(list) | Self::Denylist(list) => list.is_empty(), } } } impl IntoIterator for SceneFilter { type Item = TypeId; type IntoIter = IntoIter<TypeId>; fn into_iter(self) -> Self::IntoIter { match self { Self::Unset => Default::default(), Self::Allowlist(list) | Self::Denylist(list) => list.into_iter(), } } } #[cfg(test)] mod tests { use super::*; #[test] fn should_set_list_type_if_none() { let filter = SceneFilter::Unset.allow::<i32>(); assert!(matches!(filter, SceneFilter::Allowlist(_))); let filter = SceneFilter::Unset.deny::<i32>(); assert!(matches!(filter, SceneFilter::Denylist(_))); } #[test] fn should_add_to_list() { let filter = SceneFilter::default().allow::<i16>().allow::<i32>(); assert_eq!(2, filter.len()); assert!(filter.is_allowed::<i16>()); assert!(filter.is_allowed::<i32>()); let filter = SceneFilter::default().deny::<i16>().deny::<i32>(); assert_eq!(2, filter.len()); assert!(filter.is_denied::<i16>()); assert!(filter.is_denied::<i32>()); } #[test] fn should_remove_from_list() { let filter = SceneFilter::default() .allow::<i16>() .allow::<i32>() .deny::<i32>(); assert_eq!(1, filter.len()); assert!(filter.is_allowed::<i16>()); assert!(!filter.is_allowed::<i32>()); let filter = SceneFilter::default() .deny::<i16>() .deny::<i32>() .allow::<i32>(); assert_eq!(1, filter.len()); assert!(filter.is_denied::<i16>()); assert!(!filter.is_denied::<i32>()); } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_scene/src/reflect_utils.rs
crates/bevy_scene/src/reflect_utils.rs
use bevy_reflect::{PartialReflect, ReflectFromReflect, TypeRegistration}; /// Attempts to clone a [`PartialReflect`] value using various methods. /// /// This first attempts to clone via [`PartialReflect::reflect_clone`]. /// then falls back to [`ReflectFromReflect::from_reflect`], /// and finally [`PartialReflect::to_dynamic`] if the first two methods fail. /// /// This helps ensure that the original type and type data is retained, /// and only returning a dynamic type if all other methods fail. pub(super) fn clone_reflect_value( value: &dyn PartialReflect, type_registration: &TypeRegistration, ) -> Box<dyn PartialReflect> { value .reflect_clone() .map(PartialReflect::into_partial_reflect) .unwrap_or_else(|_| { type_registration .data::<ReflectFromReflect>() .and_then(|fr| fr.from_reflect(value.as_partial_reflect())) .map(PartialReflect::into_partial_reflect) .unwrap_or_else(|| value.to_dynamic()) }) }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_scene/src/scene.rs
crates/bevy_scene/src/scene.rs
use core::any::TypeId; use crate::reflect_utils::clone_reflect_value; use crate::{DynamicScene, SceneSpawnError}; use bevy_asset::Asset; use bevy_ecs::{ component::ComponentCloneBehavior, entity::{Entity, EntityHashMap, SceneEntityMapper}, entity_disabling::DefaultQueryFilters, reflect::{AppTypeRegistry, ReflectComponent, ReflectResource}, relationship::RelationshipHookMode, world::World, }; use bevy_reflect::TypePath; /// A composition of [`World`] objects. /// /// To spawn a scene, you can use either: /// * [`SceneSpawner::spawn`](crate::SceneSpawner::spawn) /// * adding the [`SceneRoot`](crate::components::SceneRoot) component to an entity. #[derive(Asset, TypePath, Debug)] pub struct Scene { /// The world of the scene, containing its entities and resources. pub world: World, } impl Scene { /// Creates a new scene with the given world. pub fn new(world: World) -> Self { Self { world } } /// Create a new scene from a given dynamic scene. pub fn from_dynamic_scene( dynamic_scene: &DynamicScene, type_registry: &AppTypeRegistry, ) -> Result<Scene, SceneSpawnError> { let mut world = World::new(); let mut entity_map = EntityHashMap::default(); dynamic_scene.write_to_world_with(&mut world, &mut entity_map, type_registry)?; Ok(Self { world }) } /// Clone the scene. /// /// This method will return a [`SceneSpawnError`] if a type either is not registered in the /// provided [`AppTypeRegistry`] or doesn't reflect the [`Component`](bevy_ecs::component::Component) trait. pub fn clone_with(&self, type_registry: &AppTypeRegistry) -> Result<Scene, SceneSpawnError> { let mut new_world = World::new(); let mut entity_map = EntityHashMap::default(); self.write_to_world_with(&mut new_world, &mut entity_map, type_registry)?; Ok(Self { world: new_world }) } /// Write the entities and their corresponding components to the given world. /// /// This method will return a [`SceneSpawnError`] if a type either is not registered in the /// provided [`AppTypeRegistry`] or doesn't reflect the [`Component`](bevy_ecs::component::Component) trait. pub fn write_to_world_with( &self, world: &mut World, entity_map: &mut EntityHashMap<Entity>, type_registry: &AppTypeRegistry, ) -> Result<(), SceneSpawnError> { let type_registry = type_registry.read(); let self_dqf_id = self .world .components() .get_resource_id(TypeId::of::<DefaultQueryFilters>()); // Resources archetype for (component_id, resource_data) in self.world.storages().resources.iter() { if Some(component_id) == self_dqf_id { continue; } if !resource_data.is_present() { continue; } let component_info = self .world .components() .get_info(component_id) .expect("component_ids in archetypes should have ComponentInfo"); let type_id = component_info .type_id() .expect("reflected resources must have a type_id"); let registration = type_registry .get(type_id) .ok_or_else(|| SceneSpawnError::UnregisteredType { std_type_name: component_info.name(), })?; let reflect_resource = registration.data::<ReflectResource>().ok_or_else(|| { SceneSpawnError::UnregisteredResource { type_path: registration.type_info().type_path().to_string(), } })?; reflect_resource.copy(&self.world, world, &type_registry); } // Ensure that all scene entities have been allocated in the destination // world before handling components that may contain references that need mapping. for archetype in self.world.archetypes().iter() { for scene_entity in archetype.entities() { entity_map .entry(scene_entity.id()) .or_insert_with(|| world.spawn_empty().id()); } } for archetype in self.world.archetypes().iter() { for scene_entity in archetype.entities() { let entity = *entity_map .get(&scene_entity.id()) .expect("should have previously spawned an entity"); for component_id in archetype.iter_components() { let component_info = self .world .components() .get_info(component_id) .expect("component_ids in archetypes should have ComponentInfo"); if matches!( *component_info.clone_behavior(), ComponentCloneBehavior::Ignore ) { continue; } let registration = type_registry .get(component_info.type_id().unwrap()) .ok_or_else(|| SceneSpawnError::UnregisteredType { std_type_name: component_info.name(), })?; let reflect_component = registration.data::<ReflectComponent>().ok_or_else(|| { SceneSpawnError::UnregisteredComponent { type_path: registration.type_info().type_path().to_string(), } })?; let Some(component) = reflect_component .reflect(self.world.entity(scene_entity.id())) .map(|component| { clone_reflect_value(component.as_partial_reflect(), registration) }) else { continue; }; // If this component references entities in the scene, // update them to the entities in the world. SceneEntityMapper::world_scope(entity_map, world, |world, mapper| { reflect_component.apply_or_insert_mapped( &mut world.entity_mut(entity), component.as_partial_reflect(), &type_registry, mapper, RelationshipHookMode::Skip, ); }); } } } Ok(()) } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_scene/src/dynamic_scene.rs
crates/bevy_scene/src/dynamic_scene.rs
use crate::{DynamicSceneBuilder, Scene, SceneSpawnError}; use bevy_asset::Asset; use bevy_ecs::reflect::{ReflectMapEntities, ReflectResource}; use bevy_ecs::{ entity::{Entity, EntityHashMap, SceneEntityMapper}, reflect::{AppTypeRegistry, ReflectComponent}, world::World, }; use bevy_reflect::{PartialReflect, TypePath}; use crate::reflect_utils::clone_reflect_value; use bevy_ecs::component::ComponentCloneBehavior; use bevy_ecs::relationship::RelationshipHookMode; #[cfg(feature = "serialize")] use {crate::serde::SceneSerializer, bevy_reflect::TypeRegistry, serde::Serialize}; /// A collection of serializable resources and dynamic entities. /// /// Each dynamic entity in the collection contains its own run-time defined set of components. /// To spawn a dynamic scene, you can use either: /// * [`SceneSpawner::spawn_dynamic`](crate::SceneSpawner::spawn_dynamic) /// * adding the [`DynamicSceneRoot`](crate::components::DynamicSceneRoot) component to an entity. /// * using the [`DynamicSceneBuilder`] to construct a `DynamicScene` from `World`. #[derive(Asset, TypePath, Default)] pub struct DynamicScene { /// Resources stored in the dynamic scene. pub resources: Vec<Box<dyn PartialReflect>>, /// Entities contained in the dynamic scene. pub entities: Vec<DynamicEntity>, } /// A reflection-powered serializable representation of an entity and its components. pub struct DynamicEntity { /// The identifier of the entity, unique within a scene (and the world it may have been generated from). /// /// Components that reference this entity must consistently use this identifier. pub entity: Entity, /// A vector of boxed components that belong to the given entity and /// implement the [`PartialReflect`] trait. pub components: Vec<Box<dyn PartialReflect>>, } impl DynamicScene { /// Create a new dynamic scene from a given scene. pub fn from_scene(scene: &Scene) -> Self { Self::from_world(&scene.world) } /// Create a new dynamic scene from a given world. pub fn from_world(world: &World) -> Self { DynamicSceneBuilder::from_world(world) .extract_entities( // we do this instead of a query, in order to completely sidestep default query filters. // while we could use `Allow<_>`, this wouldn't account for custom disabled components world .archetypes() .iter() .flat_map(bevy_ecs::archetype::Archetype::entities) .map(bevy_ecs::archetype::ArchetypeEntity::id), ) .extract_resources() .build() } /// Write the resources, the dynamic entities, and their corresponding components to the given world. /// /// This method will return a [`SceneSpawnError`] if a type either is not registered /// in the provided [`AppTypeRegistry`] resource, or doesn't reflect the /// [`Component`](bevy_ecs::component::Component) or [`Resource`](bevy_ecs::prelude::Resource) trait. pub fn write_to_world_with( &self, world: &mut World, entity_map: &mut EntityHashMap<Entity>, type_registry: &AppTypeRegistry, ) -> Result<(), SceneSpawnError> { let type_registry = type_registry.read(); // First ensure that every entity in the scene has a corresponding world // entity in the entity map. for scene_entity in &self.entities { // Fetch the entity with the given entity id from the `entity_map` // or spawn a new entity with a transiently unique id if there is // no corresponding entry. entity_map .entry(scene_entity.entity) .or_insert_with(|| world.spawn_empty().id()); } for scene_entity in &self.entities { // Fetch the entity with the given entity id from the `entity_map`. let entity = *entity_map .get(&scene_entity.entity) .expect("should have previously spawned an empty entity"); // Apply/ add each component to the given entity. for component in &scene_entity.components { let type_info = component.get_represented_type_info().ok_or_else(|| { SceneSpawnError::NoRepresentedType { type_path: component.reflect_type_path().to_string(), } })?; let registration = type_registry.get(type_info.type_id()).ok_or_else(|| { SceneSpawnError::UnregisteredButReflectedType { type_path: type_info.type_path().to_string(), } })?; let reflect_component = registration.data::<ReflectComponent>().ok_or_else(|| { SceneSpawnError::UnregisteredComponent { type_path: type_info.type_path().to_string(), } })?; { let component_id = reflect_component.register_component(world); // SAFETY: we registered the component above. the info exists #[expect(unsafe_code, reason = "this is faster")] let component_info = unsafe { world.components().get_info_unchecked(component_id) }; if matches!( *component_info.clone_behavior(), ComponentCloneBehavior::Ignore ) { continue; } } SceneEntityMapper::world_scope(entity_map, world, |world, mapper| { reflect_component.apply_or_insert_mapped( &mut world.entity_mut(entity), component.as_partial_reflect(), &type_registry, mapper, RelationshipHookMode::Skip, ); }); } } // Insert resources after all entities have been added to the world. // This ensures the entities are available for the resources to reference during mapping. for resource in &self.resources { let type_info = resource.get_represented_type_info().ok_or_else(|| { SceneSpawnError::NoRepresentedType { type_path: resource.reflect_type_path().to_string(), } })?; let registration = type_registry.get(type_info.type_id()).ok_or_else(|| { SceneSpawnError::UnregisteredButReflectedType { type_path: type_info.type_path().to_string(), } })?; let reflect_resource = registration.data::<ReflectResource>().ok_or_else(|| { SceneSpawnError::UnregisteredResource { type_path: type_info.type_path().to_string(), } })?; // If this component references entities in the scene, update // them to the entities in the world. let mut cloned_resource; let partial_reflect_resource = if let Some(map_entities) = registration.data::<ReflectMapEntities>() { cloned_resource = clone_reflect_value(resource.as_partial_reflect(), registration); SceneEntityMapper::world_scope(entity_map, world, |_, mapper| { map_entities.map_entities(cloned_resource.as_partial_reflect_mut(), mapper); }); cloned_resource.as_partial_reflect() } else { resource.as_partial_reflect() }; // If the world already contains an instance of the given resource // just apply the (possibly) new value, otherwise insert the resource reflect_resource.apply_or_insert(world, partial_reflect_resource, &type_registry); } Ok(()) } /// Write the resources, the dynamic entities, and their corresponding components to the given world. /// /// This method will return a [`SceneSpawnError`] if a type either is not registered /// in the world's [`AppTypeRegistry`] resource, or doesn't reflect the /// [`Component`](bevy_ecs::component::Component) trait. pub fn write_to_world( &self, world: &mut World, entity_map: &mut EntityHashMap<Entity>, ) -> Result<(), SceneSpawnError> { let registry = world.resource::<AppTypeRegistry>().clone(); self.write_to_world_with(world, entity_map, &registry) } // TODO: move to AssetSaver when it is implemented /// Serialize this dynamic scene into the official Bevy scene format (`.scn` / `.scn.ron`). /// /// The Bevy scene format is based on [Rusty Object Notation (RON)]. It describes the scene /// in a human-friendly format. To deserialize the scene, use the [`SceneLoader`]. /// /// [`SceneLoader`]: crate::SceneLoader /// [Rusty Object Notation (RON)]: https://crates.io/crates/ron #[cfg(feature = "serialize")] pub fn serialize(&self, registry: &TypeRegistry) -> Result<String, ron::Error> { serialize_ron(SceneSerializer::new(self, registry)) } } /// Serialize a given Rust data structure into rust object notation (ron). #[cfg(feature = "serialize")] pub fn serialize_ron<S>(serialize: S) -> Result<String, ron::Error> where S: Serialize, { let pretty_config = ron::ser::PrettyConfig::default() .indentor(" ".to_string()) .new_line("\n".to_string()); ron::ser::to_string_pretty(&serialize, pretty_config) } #[cfg(test)] mod tests { use bevy_ecs::{ component::Component, entity::{Entity, EntityHashMap, EntityMapper, MapEntities}, hierarchy::ChildOf, reflect::{AppTypeRegistry, ReflectComponent, ReflectMapEntities, ReflectResource}, resource::Resource, world::World, }; use bevy_reflect::Reflect; use crate::dynamic_scene::DynamicScene; use crate::dynamic_scene_builder::DynamicSceneBuilder; #[derive(Resource, Reflect, MapEntities, Debug)] #[reflect(Resource, MapEntities)] struct TestResource { #[entities] entity_a: Entity, #[entities] entity_b: Entity, } #[test] fn resource_entity_map_maps_entities() { let type_registry = AppTypeRegistry::default(); type_registry.write().register::<TestResource>(); let mut source_world = World::new(); source_world.insert_resource(type_registry.clone()); let original_entity_a = source_world.spawn_empty().id(); let original_entity_b = source_world.spawn_empty().id(); source_world.insert_resource(TestResource { entity_a: original_entity_a, entity_b: original_entity_b, }); // Write the scene. let scene = DynamicSceneBuilder::from_world(&source_world) .extract_resources() .extract_entity(original_entity_a) .extract_entity(original_entity_b) .build(); let mut entity_map = EntityHashMap::default(); let mut destination_world = World::new(); destination_world.insert_resource(type_registry); scene .write_to_world(&mut destination_world, &mut entity_map) .unwrap(); let &from_entity_a = entity_map.get(&original_entity_a).unwrap(); let &from_entity_b = entity_map.get(&original_entity_b).unwrap(); let test_resource = destination_world.get_resource::<TestResource>().unwrap(); assert_eq!(from_entity_a, test_resource.entity_a); assert_eq!(from_entity_b, test_resource.entity_b); } #[test] fn components_not_defined_in_scene_should_not_be_affected_by_scene_entity_map() { // Testing that scene reloading applies EntityMap correctly to MapEntities components. // First, we create a simple world with a parent and a child relationship let mut world = World::new(); world.init_resource::<AppTypeRegistry>(); world .resource_mut::<AppTypeRegistry>() .write() .register::<ChildOf>(); let original_parent_entity = world.spawn_empty().id(); let original_child_entity = world.spawn_empty().id(); world .entity_mut(original_parent_entity) .add_child(original_child_entity); // We then write this relationship to a new scene, and then write that scene back to the // world to create another parent and child relationship let scene = DynamicSceneBuilder::from_world(&world) .extract_entity(original_parent_entity) .extract_entity(original_child_entity) .build(); let mut entity_map = EntityHashMap::default(); scene.write_to_world(&mut world, &mut entity_map).unwrap(); let &from_scene_parent_entity = entity_map.get(&original_parent_entity).unwrap(); let &from_scene_child_entity = entity_map.get(&original_child_entity).unwrap(); // We then add the parent from the scene as a child of the original child // Hierarchy should look like: // Original Parent <- Original Child <- Scene Parent <- Scene Child world .entity_mut(original_child_entity) .add_child(from_scene_parent_entity); // We then reload the scene to make sure that from_scene_parent_entity's parent component // isn't updated with the entity map, since this component isn't defined in the scene. // With [`bevy_ecs::hierarchy`], this can cause serious errors and malformed hierarchies. scene.write_to_world(&mut world, &mut entity_map).unwrap(); assert_eq!( original_parent_entity, world .get_entity(original_child_entity) .unwrap() .get::<ChildOf>() .unwrap() .parent(), "something about reloading the scene is touching entities with the same scene Ids" ); assert_eq!( original_child_entity, world .get_entity(from_scene_parent_entity) .unwrap() .get::<ChildOf>() .unwrap() .parent(), "something about reloading the scene is touching components not defined in the scene but on entities defined in the scene" ); assert_eq!( from_scene_parent_entity, world .get_entity(from_scene_child_entity) .unwrap() .get::<ChildOf>() .expect("something is wrong with this test, and the scene components don't have a parent/child relationship") .parent(), "something is wrong with this test or the code reloading scenes since the relationship between scene entities is broken" ); } // Regression test for https://github.com/bevyengine/bevy/issues/14300 // Fails before the fix in https://github.com/bevyengine/bevy/pull/15405 #[test] fn no_panic_in_map_entities_after_pending_entity_in_hook() { #[derive(Default, Component, Reflect)] #[reflect(Component)] struct A; #[derive(Component, Reflect)] #[reflect(Component)] struct B(pub Entity); impl MapEntities for B { fn map_entities<E: EntityMapper>(&mut self, entity_mapper: &mut E) { self.0 = entity_mapper.get_mapped(self.0); } } let reg = AppTypeRegistry::default(); { let mut reg_write = reg.write(); reg_write.register::<A>(); reg_write.register::<B>(); } let mut scene_world = World::new(); scene_world.insert_resource(reg.clone()); scene_world.spawn((B(Entity::PLACEHOLDER), A)); let scene = DynamicScene::from_world(&scene_world); let mut dst_world = World::new(); dst_world .register_component_hooks::<A>() .on_add(|mut world, _| { world.commands().spawn_empty(); }); dst_world.insert_resource(reg.clone()); // Should not panic. // Prior to fix, the `Entities::alloc` call in // `EntityMapper::map_entity` would panic due to pending entities from the observer // not having been flushed. scene .write_to_world(&mut dst_world, &mut Default::default()) .unwrap(); } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_scene/src/scene_loader.rs
crates/bevy_scene/src/scene_loader.rs
use bevy_ecs::{ reflect::AppTypeRegistry, world::{FromWorld, World}, }; use bevy_reflect::{TypePath, TypeRegistryArc}; #[cfg(feature = "serialize")] use { crate::{serde::SceneDeserializer, DynamicScene}, bevy_asset::{io::Reader, AssetLoader, LoadContext}, serde::de::DeserializeSeed, thiserror::Error, }; /// Asset loader for a Bevy dynamic scene (`.scn` / `.scn.ron`). /// /// The loader handles assets serialized with [`DynamicScene::serialize`]. #[derive(Debug, TypePath)] pub struct SceneLoader { #[cfg_attr( not(feature = "serialize"), expect(dead_code, reason = "only used with `serialize` feature") )] type_registry: TypeRegistryArc, } impl FromWorld for SceneLoader { fn from_world(world: &mut World) -> Self { let type_registry = world.resource::<AppTypeRegistry>(); SceneLoader { type_registry: type_registry.0.clone(), } } } /// Possible errors that can be produced by [`SceneLoader`] #[cfg(feature = "serialize")] #[non_exhaustive] #[derive(Debug, Error)] pub enum SceneLoaderError { /// An [IO Error](std::io::Error) #[error("Error while trying to read the scene file: {0}")] Io(#[from] std::io::Error), /// A [RON Error](ron::error::SpannedError) #[error("Could not parse RON: {0}")] RonSpannedError(#[from] ron::error::SpannedError), } #[cfg(feature = "serialize")] impl AssetLoader for SceneLoader { type Asset = DynamicScene; type Settings = (); type Error = SceneLoaderError; async fn load( &self, reader: &mut dyn Reader, _settings: &(), _load_context: &mut LoadContext<'_>, ) -> Result<Self::Asset, Self::Error> { let mut bytes = Vec::new(); reader.read_to_end(&mut bytes).await?; let mut deserializer = ron::de::Deserializer::from_bytes(&bytes)?; let scene_deserializer = SceneDeserializer { type_registry: &self.type_registry.read(), }; Ok(scene_deserializer .deserialize(&mut deserializer) .map_err(|e| deserializer.span_error(e))?) } fn extensions(&self) -> &[&str] { &["scn", "scn.ron"] } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_scene/src/dynamic_scene_builder.rs
crates/bevy_scene/src/dynamic_scene_builder.rs
use core::any::TypeId; use crate::reflect_utils::clone_reflect_value; use crate::{DynamicEntity, DynamicScene, SceneFilter}; use alloc::collections::BTreeMap; use bevy_ecs::{ component::{Component, ComponentId}, entity_disabling::DefaultQueryFilters, prelude::Entity, reflect::{AppTypeRegistry, ReflectComponent, ReflectResource}, resource::Resource, world::World, }; use bevy_reflect::PartialReflect; use bevy_utils::default; /// A [`DynamicScene`] builder, used to build a scene from a [`World`] by extracting some entities and resources. /// /// # Component Extraction /// /// By default, all components registered with [`ReflectComponent`] type data in a world's [`AppTypeRegistry`] will be extracted. /// (this type data is added automatically during registration if [`Reflect`] is derived with the `#[reflect(Component)]` attribute). /// This can be changed by [specifying a filter](DynamicSceneBuilder::with_component_filter) or by explicitly /// [allowing](DynamicSceneBuilder::allow_component)/[denying](DynamicSceneBuilder::deny_component) certain components. /// /// Extraction happens immediately and uses the filter as it exists during the time of extraction. /// /// # Resource Extraction /// /// By default, all resources registered with [`ReflectResource`] type data in a world's [`AppTypeRegistry`] will be extracted. /// (this type data is added automatically during registration if [`Reflect`] is derived with the `#[reflect(Resource)]` attribute). /// This can be changed by [specifying a filter](DynamicSceneBuilder::with_resource_filter) or by explicitly /// [allowing](DynamicSceneBuilder::allow_resource)/[denying](DynamicSceneBuilder::deny_resource) certain resources. /// /// Extraction happens immediately and uses the filter as it exists during the time of extraction. /// /// # Entity Order /// /// Extracted entities will always be stored in ascending order based on their [index](Entity::index). /// This means that inserting `Entity(1v0)` then `Entity(0v0)` will always result in the entities /// being ordered as `[Entity(0v0), Entity(1v0)]`. /// /// # Example /// ``` /// # use bevy_scene::DynamicSceneBuilder; /// # use bevy_ecs::reflect::AppTypeRegistry; /// # use bevy_ecs::{ /// # component::Component, prelude::Entity, query::With, reflect::ReflectComponent, world::World, /// # }; /// # use bevy_reflect::Reflect; /// # #[derive(Component, Reflect, Default, Eq, PartialEq, Debug)] /// # #[reflect(Component)] /// # struct ComponentA; /// # let mut world = World::default(); /// # world.init_resource::<AppTypeRegistry>(); /// # let entity = world.spawn(ComponentA).id(); /// let dynamic_scene = DynamicSceneBuilder::from_world(&world).extract_entity(entity).build(); /// ``` /// /// [`Reflect`]: bevy_reflect::Reflect pub struct DynamicSceneBuilder<'w> { extracted_resources: BTreeMap<ComponentId, Box<dyn PartialReflect>>, extracted_scene: BTreeMap<Entity, DynamicEntity>, component_filter: SceneFilter, resource_filter: SceneFilter, original_world: &'w World, } impl<'w> DynamicSceneBuilder<'w> { /// Prepare a builder that will extract entities and their component from the given [`World`]. pub fn from_world(world: &'w World) -> Self { Self { extracted_resources: default(), extracted_scene: default(), component_filter: SceneFilter::default(), resource_filter: SceneFilter::default(), original_world: world, } } /// Specify a custom component [`SceneFilter`] to be used with this builder. #[must_use] pub fn with_component_filter(mut self, filter: SceneFilter) -> Self { self.component_filter = filter; self } /// Specify a custom resource [`SceneFilter`] to be used with this builder. #[must_use] pub fn with_resource_filter(mut self, filter: SceneFilter) -> Self { self.resource_filter = filter; self } /// Updates the filter to allow all component and resource types. /// /// This is useful for resetting the filter so that types may be selectively denied /// with [`deny_component`](`Self::deny_component`) and [`deny_resource`](`Self::deny_resource`). pub fn allow_all(mut self) -> Self { self.component_filter = SceneFilter::allow_all(); self.resource_filter = SceneFilter::allow_all(); self } /// Updates the filter to deny all component and resource types. /// /// This is useful for resetting the filter so that types may be selectively allowed /// with [`allow_component`](`Self::allow_component`) and [`allow_resource`](`Self::allow_resource`). pub fn deny_all(mut self) -> Self { self.component_filter = SceneFilter::deny_all(); self.resource_filter = SceneFilter::deny_all(); self } /// Allows the given component type, `T`, to be included in the generated scene. /// /// This method may be called multiple times for any number of components. /// /// This is the inverse of [`deny_component`](Self::deny_component). /// If `T` has already been denied, then it will be removed from the denylist. #[must_use] pub fn allow_component<T: Component>(mut self) -> Self { self.component_filter = self.component_filter.allow::<T>(); self } /// Denies the given component type, `T`, from being included in the generated scene. /// /// This method may be called multiple times for any number of components. /// /// This is the inverse of [`allow_component`](Self::allow_component). /// If `T` has already been allowed, then it will be removed from the allowlist. #[must_use] pub fn deny_component<T: Component>(mut self) -> Self { self.component_filter = self.component_filter.deny::<T>(); self } /// Updates the filter to allow all component types. /// /// This is useful for resetting the filter so that types may be selectively [denied]. /// /// [denied]: Self::deny_component #[must_use] pub fn allow_all_components(mut self) -> Self { self.component_filter = SceneFilter::allow_all(); self } /// Updates the filter to deny all component types. /// /// This is useful for resetting the filter so that types may be selectively [allowed]. /// /// [allowed]: Self::allow_component #[must_use] pub fn deny_all_components(mut self) -> Self { self.component_filter = SceneFilter::deny_all(); self } /// Allows the given resource type, `T`, to be included in the generated scene. /// /// This method may be called multiple times for any number of resources. /// /// This is the inverse of [`deny_resource`](Self::deny_resource). /// If `T` has already been denied, then it will be removed from the denylist. #[must_use] pub fn allow_resource<T: Resource>(mut self) -> Self { self.resource_filter = self.resource_filter.allow::<T>(); self } /// Denies the given resource type, `T`, from being included in the generated scene. /// /// This method may be called multiple times for any number of resources. /// /// This is the inverse of [`allow_resource`](Self::allow_resource). /// If `T` has already been allowed, then it will be removed from the allowlist. #[must_use] pub fn deny_resource<T: Resource>(mut self) -> Self { self.resource_filter = self.resource_filter.deny::<T>(); self } /// Updates the filter to allow all resource types. /// /// This is useful for resetting the filter so that types may be selectively [denied]. /// /// [denied]: Self::deny_resource #[must_use] pub fn allow_all_resources(mut self) -> Self { self.resource_filter = SceneFilter::allow_all(); self } /// Updates the filter to deny all resource types. /// /// This is useful for resetting the filter so that types may be selectively [allowed]. /// /// [allowed]: Self::allow_resource #[must_use] pub fn deny_all_resources(mut self) -> Self { self.resource_filter = SceneFilter::deny_all(); self } /// Consume the builder, producing a [`DynamicScene`]. /// /// To make sure the dynamic scene doesn't contain entities without any components, call /// [`Self::remove_empty_entities`] before building the scene. #[must_use] pub fn build(self) -> DynamicScene { DynamicScene { resources: self.extracted_resources.into_values().collect(), entities: self.extracted_scene.into_values().collect(), } } /// Extract one entity from the builder's [`World`]. /// /// Re-extracting an entity that was already extracted will have no effect. #[must_use] pub fn extract_entity(self, entity: Entity) -> Self { self.extract_entities(core::iter::once(entity)) } /// Despawns all entities with no components. /// /// These were likely created because none of their components were present in the provided type registry upon extraction. #[must_use] pub fn remove_empty_entities(mut self) -> Self { self.extracted_scene .retain(|_, entity| !entity.components.is_empty()); self } /// Extract entities from the builder's [`World`]. /// /// Re-extracting an entity that was already extracted will have no effect. /// /// To control which components are extracted, use the [`allow`] or /// [`deny`] helper methods. /// /// This method may be used to extract entities from a query: /// ``` /// # use bevy_scene::DynamicSceneBuilder; /// # use bevy_ecs::reflect::AppTypeRegistry; /// # use bevy_ecs::{ /// # component::Component, prelude::Entity, query::With, reflect::ReflectComponent, world::World, /// # }; /// # use bevy_reflect::Reflect; /// #[derive(Component, Default, Reflect)] /// #[reflect(Component)] /// struct MyComponent; /// /// # let mut world = World::default(); /// # world.init_resource::<AppTypeRegistry>(); /// # let _entity = world.spawn(MyComponent).id(); /// let mut query = world.query_filtered::<Entity, With<MyComponent>>(); /// /// let scene = DynamicSceneBuilder::from_world(&world) /// .extract_entities(query.iter(&world)) /// .build(); /// ``` /// /// Note that components extracted from queried entities must still pass through the filter if one is set. /// /// [`allow`]: Self::allow_component /// [`deny`]: Self::deny_component #[must_use] pub fn extract_entities(mut self, entities: impl Iterator<Item = Entity>) -> Self { let type_registry = self.original_world.resource::<AppTypeRegistry>().read(); for entity in entities { if self.extracted_scene.contains_key(&entity) { continue; } let mut entry = DynamicEntity { entity, components: Vec::new(), }; let original_entity = self.original_world.entity(entity); for &component_id in original_entity.archetype().components().iter() { let mut extract_and_push = || { let type_id = self .original_world .components() .get_info(component_id)? .type_id()?; let is_denied = self.component_filter.is_denied_by_id(type_id); if is_denied { // Component is either in the denylist or _not_ in the allowlist return None; } let type_registration = type_registry.get(type_id)?; let component = type_registration .data::<ReflectComponent>()? .reflect(original_entity)?; let component = clone_reflect_value(component.as_partial_reflect(), type_registration); entry.components.push(component); Some(()) }; extract_and_push(); } self.extracted_scene.insert(entity, entry); } self } /// Extract resources from the builder's [`World`]. /// /// Re-extracting a resource that was already extracted will have no effect. /// /// To control which resources are extracted, use the [`allow_resource`] or /// [`deny_resource`] helper methods. /// /// ``` /// # use bevy_scene::DynamicSceneBuilder; /// # use bevy_ecs::reflect::AppTypeRegistry; /// # use bevy_ecs::prelude::{ReflectResource, Resource, World}; /// # use bevy_reflect::Reflect; /// #[derive(Resource, Default, Reflect)] /// #[reflect(Resource)] /// struct MyResource; /// /// # let mut world = World::default(); /// # world.init_resource::<AppTypeRegistry>(); /// world.insert_resource(MyResource); /// /// let mut builder = DynamicSceneBuilder::from_world(&world).extract_resources(); /// let scene = builder.build(); /// ``` /// /// [`allow_resource`]: Self::allow_resource /// [`deny_resource`]: Self::deny_resource #[must_use] pub fn extract_resources(mut self) -> Self { // Don't extract the DefaultQueryFilters resource let original_world_dqf_id = self .original_world .components() .get_valid_resource_id(TypeId::of::<DefaultQueryFilters>()); let type_registry = self.original_world.resource::<AppTypeRegistry>().read(); for (component_id, _) in self.original_world.storages().resources.iter() { if Some(component_id) == original_world_dqf_id { continue; } let mut extract_and_push = || { let type_id = self .original_world .components() .get_info(component_id)? .type_id()?; let is_denied = self.resource_filter.is_denied_by_id(type_id); if is_denied { // Resource is either in the denylist or _not_ in the allowlist return None; } let type_registration = type_registry.get(type_id)?; let resource = type_registration .data::<ReflectResource>()? .reflect(self.original_world) .ok()?; let resource = clone_reflect_value(resource.as_partial_reflect(), type_registration); self.extracted_resources.insert(component_id, resource); Some(()) }; extract_and_push(); } drop(type_registry); self } } #[cfg(test)] mod tests { use bevy_ecs::{ component::Component, prelude::{Entity, Resource}, query::With, reflect::{AppTypeRegistry, ReflectComponent, ReflectResource}, world::World, }; use bevy_reflect::Reflect; use super::DynamicSceneBuilder; #[derive(Component, Reflect, Default, Eq, PartialEq, Debug)] #[reflect(Component)] struct ComponentA; #[derive(Component, Reflect, Default, Eq, PartialEq, Debug)] #[reflect(Component)] struct ComponentB; #[derive(Resource, Reflect, Default, Eq, PartialEq, Debug)] #[reflect(Resource)] struct ResourceA; #[derive(Resource, Reflect, Default, Eq, PartialEq, Debug)] #[reflect(Resource)] struct ResourceB; #[test] fn extract_one_entity() { let mut world = World::default(); let atr = AppTypeRegistry::default(); atr.write().register::<ComponentA>(); world.insert_resource(atr); let entity = world.spawn((ComponentA, ComponentB)).id(); let scene = DynamicSceneBuilder::from_world(&world) .extract_entity(entity) .build(); assert_eq!(scene.entities.len(), 1); assert_eq!(scene.entities[0].entity, entity); assert_eq!(scene.entities[0].components.len(), 1); assert!(scene.entities[0].components[0].represents::<ComponentA>()); } #[test] fn extract_one_entity_twice() { let mut world = World::default(); let atr = AppTypeRegistry::default(); atr.write().register::<ComponentA>(); world.insert_resource(atr); let entity = world.spawn((ComponentA, ComponentB)).id(); let scene = DynamicSceneBuilder::from_world(&world) .extract_entity(entity) .extract_entity(entity) .build(); assert_eq!(scene.entities.len(), 1); assert_eq!(scene.entities[0].entity, entity); assert_eq!(scene.entities[0].components.len(), 1); assert!(scene.entities[0].components[0].represents::<ComponentA>()); } #[test] fn extract_one_entity_two_components() { let mut world = World::default(); let atr = AppTypeRegistry::default(); { let mut register = atr.write(); register.register::<ComponentA>(); register.register::<ComponentB>(); } world.insert_resource(atr); let entity = world.spawn((ComponentA, ComponentB)).id(); let scene = DynamicSceneBuilder::from_world(&world) .extract_entity(entity) .build(); assert_eq!(scene.entities.len(), 1); assert_eq!(scene.entities[0].entity, entity); assert_eq!(scene.entities[0].components.len(), 2); assert!(scene.entities[0].components[0].represents::<ComponentA>()); assert!(scene.entities[0].components[1].represents::<ComponentB>()); } #[test] fn extract_entity_order() { let mut world = World::default(); world.init_resource::<AppTypeRegistry>(); // Spawn entities in order let entity_a = world.spawn_empty().id(); let entity_b = world.spawn_empty().id(); let entity_c = world.spawn_empty().id(); let entity_d = world.spawn_empty().id(); // Insert entities out of order let builder = DynamicSceneBuilder::from_world(&world) .extract_entity(entity_b) .extract_entities([entity_d, entity_a].into_iter()) .extract_entity(entity_c); let mut entities = builder.build().entities.into_iter(); // Assert entities are ordered assert_eq!(entity_d, entities.next().map(|e| e.entity).unwrap()); assert_eq!(entity_c, entities.next().map(|e| e.entity).unwrap()); assert_eq!(entity_b, entities.next().map(|e| e.entity).unwrap()); assert_eq!(entity_a, entities.next().map(|e| e.entity).unwrap()); } #[test] fn extract_query() { let mut world = World::default(); let atr = AppTypeRegistry::default(); { let mut register = atr.write(); register.register::<ComponentA>(); register.register::<ComponentB>(); } world.insert_resource(atr); let entity_a_b = world.spawn((ComponentA, ComponentB)).id(); let entity_a = world.spawn(ComponentA).id(); let _entity_b = world.spawn(ComponentB).id(); let mut query = world.query_filtered::<Entity, With<ComponentA>>(); let scene = DynamicSceneBuilder::from_world(&world) .extract_entities(query.iter(&world)) .build(); assert_eq!(scene.entities.len(), 2); let mut scene_entities = vec![scene.entities[0].entity, scene.entities[1].entity]; scene_entities.sort(); assert_eq!(scene_entities, [entity_a, entity_a_b]); } #[test] fn remove_componentless_entity() { let mut world = World::default(); let atr = AppTypeRegistry::default(); atr.write().register::<ComponentA>(); world.insert_resource(atr); let entity_a = world.spawn(ComponentA).id(); let entity_b = world.spawn(ComponentB).id(); let scene = DynamicSceneBuilder::from_world(&world) .extract_entities([entity_a, entity_b].into_iter()) .remove_empty_entities() .build(); assert_eq!(scene.entities.len(), 1); assert_eq!(scene.entities[0].entity, entity_a); } #[test] fn extract_one_resource() { let mut world = World::default(); let atr = AppTypeRegistry::default(); atr.write().register::<ResourceA>(); world.insert_resource(atr); world.insert_resource(ResourceA); let scene = DynamicSceneBuilder::from_world(&world) .extract_resources() .build(); assert_eq!(scene.resources.len(), 1); assert!(scene.resources[0].represents::<ResourceA>()); } #[test] fn extract_one_resource_twice() { let mut world = World::default(); let atr = AppTypeRegistry::default(); atr.write().register::<ResourceA>(); world.insert_resource(atr); world.insert_resource(ResourceA); let scene = DynamicSceneBuilder::from_world(&world) .extract_resources() .extract_resources() .build(); assert_eq!(scene.resources.len(), 1); assert!(scene.resources[0].represents::<ResourceA>()); } #[test] fn should_extract_allowed_components() { let mut world = World::default(); let atr = AppTypeRegistry::default(); { let mut register = atr.write(); register.register::<ComponentA>(); register.register::<ComponentB>(); } world.insert_resource(atr); let entity_a_b = world.spawn((ComponentA, ComponentB)).id(); let entity_a = world.spawn(ComponentA).id(); let entity_b = world.spawn(ComponentB).id(); let scene = DynamicSceneBuilder::from_world(&world) .allow_component::<ComponentA>() .extract_entities([entity_a_b, entity_a, entity_b].into_iter()) .build(); assert_eq!(scene.entities.len(), 3); assert!(scene.entities[2].components[0].represents::<ComponentA>()); assert!(scene.entities[1].components[0].represents::<ComponentA>()); assert_eq!(scene.entities[0].components.len(), 0); } #[test] fn should_not_extract_denied_components() { let mut world = World::default(); let atr = AppTypeRegistry::default(); { let mut register = atr.write(); register.register::<ComponentA>(); register.register::<ComponentB>(); } world.insert_resource(atr); let entity_a_b = world.spawn((ComponentA, ComponentB)).id(); let entity_a = world.spawn(ComponentA).id(); let entity_b = world.spawn(ComponentB).id(); let scene = DynamicSceneBuilder::from_world(&world) .deny_component::<ComponentA>() .extract_entities([entity_a_b, entity_a, entity_b].into_iter()) .build(); assert_eq!(scene.entities.len(), 3); assert!(scene.entities[0].components[0].represents::<ComponentB>()); assert_eq!(scene.entities[1].components.len(), 0); assert!(scene.entities[2].components[0].represents::<ComponentB>()); } #[test] fn should_extract_allowed_resources() { let mut world = World::default(); let atr = AppTypeRegistry::default(); { let mut register = atr.write(); register.register::<ResourceA>(); register.register::<ResourceB>(); } world.insert_resource(atr); world.insert_resource(ResourceA); world.insert_resource(ResourceB); let scene = DynamicSceneBuilder::from_world(&world) .allow_resource::<ResourceA>() .extract_resources() .build(); assert_eq!(scene.resources.len(), 1); assert!(scene.resources[0].represents::<ResourceA>()); } #[test] fn should_not_extract_denied_resources() { let mut world = World::default(); let atr = AppTypeRegistry::default(); { let mut register = atr.write(); register.register::<ResourceA>(); register.register::<ResourceB>(); } world.insert_resource(atr); world.insert_resource(ResourceA); world.insert_resource(ResourceB); let scene = DynamicSceneBuilder::from_world(&world) .deny_resource::<ResourceA>() .extract_resources() .build(); assert_eq!(scene.resources.len(), 1); assert!(scene.resources[0].represents::<ResourceB>()); } #[test] fn should_use_from_reflect() { #[derive(Component, Reflect)] #[reflect(Component)] struct SomeType(i32); #[derive(Resource, Reflect)] #[reflect(Resource)] struct SomeResource(i32); let mut world = World::default(); let atr = AppTypeRegistry::default(); { let mut register = atr.write(); register.register::<SomeType>(); register.register::<SomeResource>(); } world.insert_resource(atr); world.insert_resource(SomeResource(123)); let entity = world.spawn(SomeType(123)).id(); let scene = DynamicSceneBuilder::from_world(&world) .extract_resources() .extract_entities(vec![entity].into_iter()) .build(); let component = &scene.entities[0].components[0]; assert!(component .try_as_reflect() .expect("component should be concrete due to `FromReflect`") .is::<SomeType>()); let resource = &scene.resources[0]; assert!(resource .try_as_reflect() .expect("resource should be concrete due to `FromReflect`") .is::<SomeResource>()); } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_scene/src/scene_spawner.rs
crates/bevy_scene/src/scene_spawner.rs
use crate::{DynamicScene, Scene}; use bevy_asset::{AssetEvent, AssetId, Assets, Handle}; use bevy_ecs::{ entity::{Entity, EntityHashMap}, event::EntityEvent, hierarchy::ChildOf, message::{MessageCursor, Messages}, reflect::AppTypeRegistry, resource::Resource, world::{Mut, World}, }; use bevy_platform::collections::{HashMap, HashSet}; use bevy_reflect::Reflect; use bevy_utils::prelude::DebugName; use thiserror::Error; use uuid::Uuid; use crate::{DynamicSceneRoot, SceneRoot}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ change_detection::ResMut, prelude::{Changed, Component, Without}, system::{Commands, Query}, }; /// Triggered on a scene's parent entity when [`SceneInstance`](`crate::SceneInstance`) becomes ready to use. /// /// See also [`On`], [`SceneSpawner::instance_is_ready`]. /// /// [`On`]: bevy_ecs::observer::On #[derive(Clone, Copy, Debug, Eq, PartialEq, EntityEvent, Reflect)] #[reflect(Debug, PartialEq, Clone)] pub struct SceneInstanceReady { /// The entity whose scene instance is ready. pub entity: Entity, /// Instance which has been spawned. pub instance_id: InstanceId, } /// Information about a scene instance. #[derive(Debug)] struct InstanceInfo { /// Mapping of entities from the scene world to the instance world. entity_map: EntityHashMap<Entity>, /// The parent to attach this instance to. parent: Option<Entity>, } /// Unique id identifying a scene instance. #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Reflect)] #[reflect(Debug, PartialEq, Hash, Clone)] pub struct InstanceId(Uuid); impl InstanceId { fn new() -> Self { InstanceId(Uuid::new_v4()) } } /// Handles spawning and despawning scenes in the world, either synchronously or batched through the [`scene_spawner_system`]. /// /// Synchronous methods: (Scene operations will take effect immediately) /// - [`spawn_sync`](Self::spawn_sync) /// - [`spawn_dynamic_sync`](Self::spawn_dynamic_sync) /// - [`despawn_sync`](Self::despawn_sync) /// - [`despawn_dynamic_sync`](Self::despawn_dynamic_sync) /// - [`despawn_instance_sync`](Self::despawn_instance_sync) /// - [`update_spawned_scenes`](Self::update_spawned_scenes) /// - [`update_spawned_dynamic_scenes`](Self::update_spawned_dynamic_scenes) /// - [`spawn_queued_scenes`](Self::spawn_queued_scenes) /// - [`despawn_queued_scenes`](Self::despawn_queued_scenes) /// - [`despawn_queued_instances`](Self::despawn_queued_instances) /// /// Deferred methods: (Scene operations will be processed when the [`scene_spawner_system`] is run) /// - [`spawn_dynamic`](Self::spawn_dynamic) /// - [`spawn_dynamic_as_child`](Self::spawn_dynamic_as_child) /// - [`spawn`](Self::spawn) /// - [`spawn_as_child`](Self::spawn_as_child) /// - [`despawn`](Self::despawn) /// - [`despawn_dynamic`](Self::despawn_dynamic) /// - [`despawn_instance`](Self::despawn_instance) #[derive(Default, Resource)] pub struct SceneSpawner { pub(crate) spawned_scenes: HashMap<AssetId<Scene>, HashSet<InstanceId>>, pub(crate) spawned_dynamic_scenes: HashMap<AssetId<DynamicScene>, HashSet<InstanceId>>, spawned_instances: HashMap<InstanceId, InstanceInfo>, scene_asset_event_reader: MessageCursor<AssetEvent<Scene>>, // TODO: temp fix for https://github.com/bevyengine/bevy/issues/12756 effect on scenes // To handle scene hot reloading, they are unloaded/reloaded on asset modifications. // When loading several subassets of a scene as is common with gltf, they each trigger a complete asset load, // and each will trigger either a created or modified event for the parent asset. This causes the scene to be // unloaded, losing its initial setup, and reloaded without it. // Debouncing scene asset events let us ignore events that happen less than SCENE_ASSET_AGE_THRESHOLD frames // apart and not reload the scene in those cases as it's unlikely to be an actual asset change. debounced_scene_asset_events: HashMap<AssetId<Scene>, u32>, dynamic_scene_asset_event_reader: MessageCursor<AssetEvent<DynamicScene>>, // TODO: temp fix for https://github.com/bevyengine/bevy/issues/12756 effect on scenes // See debounced_scene_asset_events debounced_dynamic_scene_asset_events: HashMap<AssetId<DynamicScene>, u32>, scenes_to_spawn: Vec<(Handle<Scene>, InstanceId, Option<Entity>)>, dynamic_scenes_to_spawn: Vec<(Handle<DynamicScene>, InstanceId, Option<Entity>)>, scenes_to_despawn: Vec<AssetId<Scene>>, dynamic_scenes_to_despawn: Vec<AssetId<DynamicScene>>, instances_to_despawn: Vec<InstanceId>, instances_ready: Vec<(InstanceId, Option<Entity>)>, } /// Errors that can occur when spawning a scene. #[derive(Error, Debug)] pub enum SceneSpawnError { /// Scene contains an unregistered component type. #[error("scene contains the unregistered component `{type_path}`. consider adding `#[reflect(Component)]` to your type")] UnregisteredComponent { /// Type of the unregistered component. type_path: String, }, /// Scene contains an unregistered resource type. #[error("scene contains the unregistered resource `{type_path}`. consider adding `#[reflect(Resource)]` to your type")] UnregisteredResource { /// Type of the unregistered resource. type_path: String, }, /// Scene contains an unregistered type. #[error( "scene contains the unregistered type `{std_type_name}`. \ consider reflecting it with `#[derive(Reflect)]` \ and registering the type using `app.register_type::<T>()`" )] UnregisteredType { /// The [type name](std::any::type_name) for the unregistered type. std_type_name: DebugName, }, /// Scene contains an unregistered type which has a `TypePath`. #[error( "scene contains the reflected type `{type_path}` but it was not found in the type registry. \ consider registering the type using `app.register_type::<T>()``" )] UnregisteredButReflectedType { /// The unregistered type. type_path: String, }, /// Scene contains a proxy without a represented type. #[error("scene contains dynamic type `{type_path}` without a represented type. consider changing this using `set_represented_type`.")] NoRepresentedType { /// The dynamic instance type. type_path: String, }, /// Dynamic scene with the given id does not exist. #[error("scene does not exist")] NonExistentScene { /// Id of the non-existent dynamic scene. id: AssetId<DynamicScene>, }, /// Scene with the given id does not exist. #[error("scene does not exist")] NonExistentRealScene { /// Id of the non-existent scene. id: AssetId<Scene>, }, } impl SceneSpawner { /// Schedule the spawn of a new instance of the provided dynamic scene. pub fn spawn_dynamic(&mut self, id: impl Into<Handle<DynamicScene>>) -> InstanceId { let instance_id = InstanceId::new(); self.dynamic_scenes_to_spawn .push((id.into(), instance_id, None)); instance_id } /// Schedule the spawn of a new instance of the provided dynamic scene as a child of `parent`. pub fn spawn_dynamic_as_child( &mut self, id: impl Into<Handle<DynamicScene>>, parent: Entity, ) -> InstanceId { let instance_id = InstanceId::new(); self.dynamic_scenes_to_spawn .push((id.into(), instance_id, Some(parent))); instance_id } /// Schedule the spawn of a new instance of the provided scene. pub fn spawn(&mut self, id: impl Into<Handle<Scene>>) -> InstanceId { let instance_id = InstanceId::new(); self.scenes_to_spawn.push((id.into(), instance_id, None)); instance_id } /// Schedule the spawn of a new instance of the provided scene as a child of `parent`. pub fn spawn_as_child(&mut self, id: impl Into<Handle<Scene>>, parent: Entity) -> InstanceId { let instance_id = InstanceId::new(); self.scenes_to_spawn .push((id.into(), instance_id, Some(parent))); instance_id } /// Schedule the despawn of all instances of the provided scene. pub fn despawn(&mut self, id: impl Into<AssetId<Scene>>) { self.scenes_to_despawn.push(id.into()); } /// Schedule the despawn of all instances of the provided dynamic scene. pub fn despawn_dynamic(&mut self, id: impl Into<AssetId<DynamicScene>>) { self.dynamic_scenes_to_despawn.push(id.into()); } /// Schedule the despawn of a scene instance, removing all its entities from the world. /// /// Note: this will despawn _all_ entities associated with this instance, including those /// that have been removed from the scene hierarchy. To despawn _only_ entities still in the hierarchy, /// despawn the relevant root entity directly. pub fn despawn_instance(&mut self, instance_id: InstanceId) { self.instances_to_despawn.push(instance_id); } /// This will remove all records of this instance, without despawning any entities. pub fn unregister_instance(&mut self, instance_id: InstanceId) { self.spawned_instances.remove(&instance_id); } /// Immediately despawns all instances of a scene. pub fn despawn_sync( &mut self, world: &mut World, id: impl Into<AssetId<Scene>>, ) -> Result<(), SceneSpawnError> { if let Some(instance_ids) = self.spawned_scenes.remove(&id.into()) { for instance_id in instance_ids { self.despawn_instance_sync(world, &instance_id); } } Ok(()) } /// Immediately despawns all instances of a dynamic scene. pub fn despawn_dynamic_sync( &mut self, world: &mut World, id: impl Into<AssetId<DynamicScene>>, ) -> Result<(), SceneSpawnError> { if let Some(instance_ids) = self.spawned_dynamic_scenes.remove(&id.into()) { for instance_id in instance_ids { self.despawn_instance_sync(world, &instance_id); } } Ok(()) } /// Immediately despawns a scene instance, removing all its entities from the world. pub fn despawn_instance_sync(&mut self, world: &mut World, instance_id: &InstanceId) { if let Some(mut instance) = self.spawned_instances.remove(instance_id) { Self::despawn_instance_internal(world, &mut instance); } } fn despawn_instance_internal(world: &mut World, instance: &mut InstanceInfo) { for &entity in instance.entity_map.values() { if let Ok(entity_mut) = world.get_entity_mut(entity) { entity_mut.despawn(); }; } // Just make sure if we reuse `InstanceInfo` for something, we don't reuse the despawned entities. instance.entity_map.clear(); } /// Immediately spawns a new instance of the provided dynamic scene. pub fn spawn_dynamic_sync( &mut self, world: &mut World, id: impl Into<AssetId<DynamicScene>>, ) -> Result<InstanceId, SceneSpawnError> { let mut entity_map = EntityHashMap::default(); let id = id.into(); Self::spawn_dynamic_internal(world, id, &mut entity_map)?; let instance_id = InstanceId::new(); self.spawned_instances.insert( instance_id, InstanceInfo { entity_map, parent: None, }, ); let spawned = self.spawned_dynamic_scenes.entry(id).or_default(); spawned.insert(instance_id); // We trigger `SceneInstanceReady` events after processing all scenes // SceneSpawner may not be available in the observer. self.instances_ready.push((instance_id, None)); Ok(instance_id) } fn spawn_dynamic_internal( world: &mut World, id: AssetId<DynamicScene>, entity_map: &mut EntityHashMap<Entity>, ) -> Result<(), SceneSpawnError> { world.resource_scope(|world, scenes: Mut<Assets<DynamicScene>>| { let scene = scenes .get(id) .ok_or(SceneSpawnError::NonExistentScene { id })?; scene.write_to_world(world, entity_map) }) } /// Immediately spawns a new instance of the provided scene. pub fn spawn_sync( &mut self, world: &mut World, id: impl Into<AssetId<Scene>>, ) -> Result<InstanceId, SceneSpawnError> { let mut entity_map = EntityHashMap::default(); let id = id.into(); Self::spawn_sync_internal(world, id, &mut entity_map)?; let instance_id = InstanceId::new(); self.spawned_instances.insert( instance_id, InstanceInfo { entity_map, parent: None, }, ); let spawned = self.spawned_scenes.entry(id).or_default(); spawned.insert(instance_id); // We trigger `SceneInstanceReady` events after processing all scenes // SceneSpawner may not be available in the observer. self.instances_ready.push((instance_id, None)); Ok(instance_id) } fn spawn_sync_internal( world: &mut World, id: AssetId<Scene>, entity_map: &mut EntityHashMap<Entity>, ) -> Result<(), SceneSpawnError> { world.resource_scope(|world, scenes: Mut<Assets<Scene>>| { let scene = scenes .get(id) .ok_or(SceneSpawnError::NonExistentRealScene { id })?; scene.write_to_world_with( world, entity_map, &world.resource::<AppTypeRegistry>().clone(), ) }) } /// Iterate through all instances of the provided scenes and update those immediately. /// /// Useful for updating already spawned scene instances after their corresponding scene has been /// modified. pub fn update_spawned_scenes( &mut self, world: &mut World, scene_ids: &[AssetId<Scene>], ) -> Result<(), SceneSpawnError> { for id in scene_ids { if let Some(spawned_instances) = self.spawned_scenes.get(id) { for instance_id in spawned_instances { if let Some(instance_info) = self.spawned_instances.get_mut(instance_id) { // Despawn the scene before respawning it. This is a very heavy operation, // but otherwise, entities may be left behind, or be left in an otherwise // invalid state (e.g., invalid relationships). Self::despawn_instance_internal(world, instance_info); Self::spawn_sync_internal(world, *id, &mut instance_info.entity_map)?; Self::set_scene_instance_parent_sync(world, instance_info); // We trigger `SceneInstanceReady` events after processing all scenes // SceneSpawner may not be available in the observer. self.instances_ready .push((*instance_id, instance_info.parent)); } } } } Ok(()) } /// Iterate through all instances of the provided dynamic scenes and update those immediately. /// /// Useful for updating already spawned scene instances after their corresponding dynamic scene /// has been modified. pub fn update_spawned_dynamic_scenes( &mut self, world: &mut World, scene_ids: &[AssetId<DynamicScene>], ) -> Result<(), SceneSpawnError> { for id in scene_ids { if let Some(spawned_instances) = self.spawned_dynamic_scenes.get(id) { for instance_id in spawned_instances { if let Some(instance_info) = self.spawned_instances.get_mut(instance_id) { // Despawn the scene before respawning it. This is a very heavy operation, // but otherwise, entities may be left behind, or be left in an otherwise // invalid state (e.g., invalid relationships). Self::despawn_instance_internal(world, instance_info); Self::spawn_dynamic_internal(world, *id, &mut instance_info.entity_map)?; Self::set_scene_instance_parent_sync(world, instance_info); // We trigger `SceneInstanceReady` events after processing all scenes // SceneSpawner may not be available in the observer. self.instances_ready .push((*instance_id, instance_info.parent)); } } } } Ok(()) } /// Immediately despawns all scenes scheduled for despawn by despawning their instances. pub fn despawn_queued_scenes(&mut self, world: &mut World) -> Result<(), SceneSpawnError> { let scenes_to_despawn = core::mem::take(&mut self.scenes_to_despawn); for scene_handle in scenes_to_despawn { self.despawn_sync(world, scene_handle)?; } let scenes_to_despawn = core::mem::take(&mut self.dynamic_scenes_to_despawn); for scene_handle in scenes_to_despawn { self.despawn_dynamic_sync(world, scene_handle)?; } Ok(()) } /// Immediately despawns all scene instances scheduled for despawn. pub fn despawn_queued_instances(&mut self, world: &mut World) { let instances_to_despawn = core::mem::take(&mut self.instances_to_despawn); for instance_id in instances_to_despawn { self.despawn_instance_sync(world, &instance_id); } } /// Immediately spawns all scenes scheduled for spawn. pub fn spawn_queued_scenes(&mut self, world: &mut World) -> Result<(), SceneSpawnError> { let scenes_to_spawn = core::mem::take(&mut self.dynamic_scenes_to_spawn); for (handle, instance_id, parent) in scenes_to_spawn { let mut entity_map = EntityHashMap::default(); match Self::spawn_dynamic_internal(world, handle.id(), &mut entity_map) { Ok(_) => { let instance_info = InstanceInfo { entity_map, parent }; Self::set_scene_instance_parent_sync(world, &instance_info); self.spawned_instances.insert(instance_id, instance_info); let spawned = self.spawned_dynamic_scenes.entry(handle.id()).or_default(); spawned.insert(instance_id); // We trigger `SceneInstanceReady` events after processing all scenes // SceneSpawner may not be available in the observer. self.instances_ready.push((instance_id, parent)); } Err(SceneSpawnError::NonExistentScene { .. }) => { self.dynamic_scenes_to_spawn .push((handle, instance_id, parent)); } Err(err) => return Err(err), } } let scenes_to_spawn = core::mem::take(&mut self.scenes_to_spawn); for (scene_handle, instance_id, parent) in scenes_to_spawn { let mut entity_map = EntityHashMap::default(); match Self::spawn_sync_internal(world, scene_handle.id(), &mut entity_map) { Ok(_) => { let instance_info = InstanceInfo { entity_map, parent }; Self::set_scene_instance_parent_sync(world, &instance_info); self.spawned_instances.insert(instance_id, instance_info); let spawned = self.spawned_scenes.entry(scene_handle.id()).or_default(); spawned.insert(instance_id); // We trigger `SceneInstanceReady` events after processing all scenes // SceneSpawner may not be available in the observer. self.instances_ready.push((instance_id, parent)); } Err(SceneSpawnError::NonExistentRealScene { .. }) => { self.scenes_to_spawn .push((scene_handle, instance_id, parent)); } Err(err) => return Err(err), } } Ok(()) } fn set_scene_instance_parent_sync(world: &mut World, instance: &InstanceInfo) { let Some(parent) = instance.parent else { return; }; for &entity in instance.entity_map.values() { // Add the `ChildOf` component to the scene root, and update the `Children` component of // the scene parent if !world .get_entity(entity) .ok() // This will filter only the scene root entity, as all other from the // scene have a parent // Entities that wouldn't exist anymore are also skipped // this case shouldn't happen anyway .is_none_or(|entity| entity.contains::<ChildOf>()) { world.entity_mut(parent).add_child(entity); } } } fn trigger_scene_ready_events(&mut self, world: &mut World) { for (instance_id, parent) in self.instances_ready.drain(..) { if let Some(parent) = parent { // Defer via commands otherwise SceneSpawner is not available in the observer. world.commands().trigger(SceneInstanceReady { instance_id, entity: parent, }); } else { // Defer via commands otherwise SceneSpawner is not available in the observer. // TODO: triggering this for PLACEHOLDER is suboptimal, but this scene system is on // its way out, so lets avoid breaking people by making a second event. world.commands().trigger(SceneInstanceReady { instance_id, entity: Entity::PLACEHOLDER, }); } } } /// Check that a scene instance spawned previously is ready to use pub fn instance_is_ready(&self, instance_id: InstanceId) -> bool { self.spawned_instances.contains_key(&instance_id) } /// Get an iterator over the entities in an instance, once it's spawned. /// /// Before the scene is spawned, the iterator will be empty. Use [`Self::instance_is_ready`] /// to check if the instance is ready. pub fn iter_instance_entities( &'_ self, instance_id: InstanceId, ) -> impl Iterator<Item = Entity> + '_ { self.spawned_instances .get(&instance_id) .map(|instance| instance.entity_map.values()) .into_iter() .flatten() .copied() } } /// System that handles scheduled scene instance spawning and despawning through a [`SceneSpawner`]. pub fn scene_spawner_system(world: &mut World) { world.resource_scope(|world, mut scene_spawner: Mut<SceneSpawner>| { // remove any loading instances where parent is deleted let is_parent_alive = |parent: &Option<Entity>| { parent .map(|parent| world.get_entity(parent).is_ok()) .unwrap_or(true) // If we don't have a parent, then consider the parent alive. }; scene_spawner .dynamic_scenes_to_spawn .retain(|(_, _, parent)| is_parent_alive(parent)); scene_spawner .scenes_to_spawn .retain(|(_, _, parent)| is_parent_alive(parent)); let scene_asset_events = world.resource::<Messages<AssetEvent<Scene>>>(); let dynamic_scene_asset_events = world.resource::<Messages<AssetEvent<DynamicScene>>>(); let scene_spawner = &mut *scene_spawner; let mut updated_spawned_scenes = Vec::new(); for event in scene_spawner .scene_asset_event_reader .read(scene_asset_events) { match event { AssetEvent::Added { id } => { scene_spawner.debounced_scene_asset_events.insert(*id, 0); } AssetEvent::Modified { id } => { if scene_spawner .debounced_scene_asset_events .insert(*id, 0) .is_none() && scene_spawner.spawned_scenes.contains_key(id) { updated_spawned_scenes.push(*id); } } _ => {} } } let mut updated_spawned_dynamic_scenes = Vec::new(); for event in scene_spawner .dynamic_scene_asset_event_reader .read(dynamic_scene_asset_events) { match event { AssetEvent::Added { id } => { scene_spawner .debounced_dynamic_scene_asset_events .insert(*id, 0); } AssetEvent::Modified { id } => { if scene_spawner .debounced_dynamic_scene_asset_events .insert(*id, 0) .is_none() && scene_spawner.spawned_dynamic_scenes.contains_key(id) { updated_spawned_dynamic_scenes.push(*id); } } _ => {} } } scene_spawner.despawn_queued_scenes(world).unwrap(); scene_spawner.despawn_queued_instances(world); scene_spawner .spawn_queued_scenes(world) .unwrap_or_else(|err| panic!("{}", err)); scene_spawner .update_spawned_scenes(world, &updated_spawned_scenes) .unwrap(); scene_spawner .update_spawned_dynamic_scenes(world, &updated_spawned_dynamic_scenes) .unwrap(); scene_spawner.trigger_scene_ready_events(world); const SCENE_ASSET_AGE_THRESHOLD: u32 = 2; for asset_id in scene_spawner.debounced_scene_asset_events.clone().keys() { let age = scene_spawner .debounced_scene_asset_events .get(asset_id) .unwrap(); if *age > SCENE_ASSET_AGE_THRESHOLD { scene_spawner.debounced_scene_asset_events.remove(asset_id); } else { scene_spawner .debounced_scene_asset_events .insert(*asset_id, *age + 1); } } for asset_id in scene_spawner .debounced_dynamic_scene_asset_events .clone() .keys() { let age = scene_spawner .debounced_dynamic_scene_asset_events .get(asset_id) .unwrap(); if *age > SCENE_ASSET_AGE_THRESHOLD { scene_spawner .debounced_dynamic_scene_asset_events .remove(asset_id); } else { scene_spawner .debounced_dynamic_scene_asset_events .insert(*asset_id, *age + 1); } } }); } /// [`InstanceId`] of a spawned scene. It can be used with the [`SceneSpawner`] to /// interact with the spawned scene. #[derive(Component, Deref, DerefMut)] pub struct SceneInstance(pub(crate) InstanceId); /// System that will spawn scenes from the [`SceneRoot`] and [`DynamicSceneRoot`] components. pub fn scene_spawner( mut commands: Commands, mut scene_to_spawn: Query< (Entity, &SceneRoot, Option<&mut SceneInstance>), (Changed<SceneRoot>, Without<DynamicSceneRoot>), >, mut dynamic_scene_to_spawn: Query< (Entity, &DynamicSceneRoot, Option<&mut SceneInstance>), (Changed<DynamicSceneRoot>, Without<SceneRoot>), >, mut scene_spawner: ResMut<SceneSpawner>, ) { for (entity, scene, instance) in &mut scene_to_spawn { let new_instance = scene_spawner.spawn_as_child(scene.0.clone(), entity); if let Some(mut old_instance) = instance { scene_spawner.despawn_instance(**old_instance); *old_instance = SceneInstance(new_instance); } else { commands.entity(entity).insert(SceneInstance(new_instance)); } } for (entity, dynamic_scene, instance) in &mut dynamic_scene_to_spawn { let new_instance = scene_spawner.spawn_dynamic_as_child(dynamic_scene.0.clone(), entity); if let Some(mut old_instance) = instance { scene_spawner.despawn_instance(**old_instance); *old_instance = SceneInstance(new_instance); } else { commands.entity(entity).insert(SceneInstance(new_instance)); } } } #[cfg(test)] mod tests { use bevy_app::App; use bevy_asset::{AssetPlugin, AssetServer, Handle}; use bevy_ecs::{ component::Component, hierarchy::Children, observer::On, prelude::ReflectComponent, query::With, system::{Commands, Query, Res, ResMut, RunSystemOnce}, }; use bevy_reflect::Reflect; use crate::{DynamicSceneBuilder, DynamicSceneRoot, ScenePlugin}; use super::*; use crate::{DynamicScene, SceneSpawner}; use bevy_app::ScheduleRunnerPlugin; use bevy_asset::Assets; use bevy_ecs::{ entity::Entity, prelude::{AppTypeRegistry, World}, }; #[derive(Component, Reflect, Default)] #[reflect(Component)] struct ComponentA { pub x: f32, pub y: f32, } #[test] fn spawn_and_delete() { let mut app = App::new(); app.add_plugins(ScheduleRunnerPlugin::default()) .add_plugins(AssetPlugin::default()) .add_plugins(ScenePlugin); app.update(); let mut scene_world = World::new(); // create a new DynamicScene manually let type_registry = app.world().resource::<AppTypeRegistry>().clone(); scene_world.insert_resource(type_registry); scene_world.spawn(ComponentA { x: 3.0, y: 4.0 }); let scene = DynamicScene::from_world(&scene_world); let scene_handle = app .world_mut() .resource_mut::<Assets<DynamicScene>>() .add(scene); // spawn the scene as a child of `entity` using `DynamicSceneRoot` let entity = app .world_mut() .spawn(DynamicSceneRoot(scene_handle.clone())) .id(); // run the app's schedule once, so that the scene gets spawned app.update(); // make sure that the scene was added as a child of the root entity let (scene_entity, scene_component_a) = app .world_mut() .query::<(Entity, &ComponentA)>() .single(app.world()) .unwrap(); assert_eq!(scene_component_a.x, 3.0); assert_eq!(scene_component_a.y, 4.0); assert_eq!( app.world().entity(entity).get::<Children>().unwrap().len(), 1 ); // let's try to delete the scene let mut scene_spawner = app.world_mut().resource_mut::<SceneSpawner>(); scene_spawner.despawn_dynamic(&scene_handle); // run the scene spawner system to despawn the scene app.update(); // the scene entity does not exist anymore assert!(app.world().get_entity(scene_entity).is_err()); // the root entity does not have any children anymore assert!(app.world().entity(entity).get::<Children>().is_none()); } #[derive(Reflect, Component, Debug, PartialEq, Eq, Clone, Copy, Default)] #[reflect(Component)] struct A(usize); #[test] fn clone_dynamic_entities() { let mut world = World::default(); // setup let atr = AppTypeRegistry::default(); atr.write().register::<A>(); world.insert_resource(atr); world.insert_resource(Assets::<DynamicScene>::default()); // start test world.spawn(A(42)); assert_eq!(world.query::<&A>().iter(&world).len(), 1); // clone only existing entity let mut scene_spawner = SceneSpawner::default(); let entity = world .query_filtered::<Entity, With<A>>() .single(&world) .unwrap(); let scene = DynamicSceneBuilder::from_world(&world) .extract_entity(entity) .build();
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
true
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_scene/src/components.rs
crates/bevy_scene/src/components.rs
use bevy_asset::{AsAssetId, AssetId, Handle}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{component::Component, prelude::ReflectComponent}; use bevy_reflect::{prelude::ReflectDefault, Reflect}; use bevy_transform::components::Transform; use derive_more::derive::From; use bevy_camera::visibility::Visibility; use crate::{DynamicScene, Scene}; /// Adding this component will spawn the scene as a child of that entity. /// Once it's spawned, the entity will have a [`SceneInstance`](crate::SceneInstance) component. #[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)] #[reflect(Component, Default, Debug, PartialEq, Clone)] #[require(Transform)] #[require(Visibility)] pub struct SceneRoot(pub Handle<Scene>); impl AsAssetId for SceneRoot { type Asset = Scene; fn as_asset_id(&self) -> AssetId<Self::Asset> { self.id() } } /// Adding this component will spawn the scene as a child of that entity. /// Once it's spawned, the entity will have a [`SceneInstance`](crate::SceneInstance) component. #[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)] #[reflect(Component, Default, Debug, PartialEq, Clone)] #[require(Transform)] #[require(Visibility)] pub struct DynamicSceneRoot(pub Handle<DynamicScene>); impl AsAssetId for DynamicSceneRoot { type Asset = DynamicScene; fn as_asset_id(&self) -> AssetId<Self::Asset> { self.id() } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/lib.rs
crates/bevy_platform/src/lib.rs
#![cfg_attr(docsrs, feature(doc_cfg))] #![doc( html_logo_url = "https://bevy.org/assets/icon.png", html_favicon_url = "https://bevy.org/assets/icon.png" )] #![no_std] //! Platform compatibility support for first-party [Bevy] engine crates. //! //! [Bevy]: https://bevy.org/ cfg::std! { extern crate std; } cfg::alloc! { extern crate alloc; pub mod collections; } pub mod cell; pub mod cfg; pub mod hash; pub mod sync; pub mod thread; pub mod time; /// Frequently used items which would typically be included in most contexts. /// /// When adding `no_std` support to a crate for the first time, often there's a substantial refactor /// required due to the change in implicit prelude from `std::prelude` to `core::prelude`. /// This unfortunately leaves out many items from `alloc`, even if the crate unconditionally /// includes that crate. /// /// This prelude aims to ease the transition by re-exporting items from `alloc` which would /// otherwise be included in the `std` implicit prelude. pub mod prelude { crate::cfg::alloc! { pub use alloc::{ borrow::ToOwned, boxed::Box, format, string::String, string::ToString, vec, vec::Vec, }; } // Items from `std::prelude` that are missing in this module: // * dbg // * eprint // * eprintln // * is_x86_feature_detected // * print // * println // * thread_local } /// Re-exports of crates that are useful across Bevy. /// Not intended for external crates to use. #[doc(hidden)] pub mod exports { crate::cfg::web! { pub use js_sys; pub use wasm_bindgen; pub use wasm_bindgen_futures; } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/hash.rs
crates/bevy_platform/src/hash.rs
//! Provides replacements for `std::hash` items using [`foldhash`]. //! //! Also provides some additional items beyond the standard library. use core::{ fmt::Debug, hash::{BuildHasher, Hash, Hasher}, marker::PhantomData, ops::Deref, }; pub use foldhash::fast::{FixedState, FoldHasher as DefaultHasher, RandomState}; /// For when you want a deterministic hasher. /// /// Seed was randomly generated with a fair dice roll. Guaranteed to be random: /// <https://github.com/bevyengine/bevy/pull/1268/files#r560918426> const FIXED_HASHER: FixedState = FixedState::with_seed(0b1001010111101110000001001100010000000011001001101011001001111000); /// Deterministic hasher based upon a random but fixed state. #[derive(Copy, Clone, Default, Debug)] pub struct FixedHasher; impl BuildHasher for FixedHasher { type Hasher = DefaultHasher<'static>; #[inline] fn build_hasher(&self) -> Self::Hasher { FIXED_HASHER.build_hasher() } } /// A pre-hashed value of a specific type. Pre-hashing enables memoization of hashes that are expensive to compute. /// /// It also enables faster [`PartialEq`] comparisons by short circuiting on hash equality. /// See [`PassHash`] and [`PassHasher`] for a "pass through" [`BuildHasher`] and [`Hasher`] implementation /// designed to work with [`Hashed`] /// See `PreHashMap` for a hashmap pre-configured to use [`Hashed`] keys. pub struct Hashed<V, S = FixedHasher> { hash: u64, value: V, marker: PhantomData<S>, } impl<V: Hash, H: BuildHasher + Default> Hashed<V, H> { /// Pre-hashes the given value using the [`BuildHasher`] configured in the [`Hashed`] type. pub fn new(value: V) -> Self { Self { hash: H::default().hash_one(&value), value, marker: PhantomData, } } /// The pre-computed hash. #[inline] pub fn hash(&self) -> u64 { self.hash } } impl<V, H> Hash for Hashed<V, H> { #[inline] fn hash<R: Hasher>(&self, state: &mut R) { state.write_u64(self.hash); } } impl<V: Hash, H: BuildHasher + Default> From<V> for Hashed<V, H> { fn from(value: V) -> Self { Self::new(value) } } impl<V, H> Deref for Hashed<V, H> { type Target = V; #[inline] fn deref(&self) -> &Self::Target { &self.value } } impl<V: PartialEq, H> PartialEq for Hashed<V, H> { /// A fast impl of [`PartialEq`] that first checks that `other`'s pre-computed hash /// matches this value's pre-computed hash. #[inline] fn eq(&self, other: &Self) -> bool { self.hash == other.hash && self.value.eq(&other.value) } } impl<V: Debug, H> Debug for Hashed<V, H> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("Hashed") .field("hash", &self.hash) .field("value", &self.value) .finish() } } impl<V: Clone, H> Clone for Hashed<V, H> { #[inline] fn clone(&self) -> Self { Self { hash: self.hash, value: self.value.clone(), marker: PhantomData, } } } impl<V: Copy, H> Copy for Hashed<V, H> {} impl<V: Eq, H> Eq for Hashed<V, H> {} /// A [`BuildHasher`] that results in a [`PassHasher`]. #[derive(Default, Clone)] pub struct PassHash; impl BuildHasher for PassHash { type Hasher = PassHasher; fn build_hasher(&self) -> Self::Hasher { PassHasher::default() } } /// A no-op hash that only works on `u64`s. Will panic if attempting to /// hash a type containing non-u64 fields. #[derive(Debug, Default)] pub struct PassHasher { hash: u64, } impl Hasher for PassHasher { #[inline] fn finish(&self) -> u64 { self.hash } fn write(&mut self, _bytes: &[u8]) { panic!("can only hash u64 using PassHasher"); } #[inline] fn write_u64(&mut self, i: u64) { self.hash = i; } } /// [`BuildHasher`] for types that already contain a high-quality hash. #[derive(Clone, Default)] pub struct NoOpHash; impl BuildHasher for NoOpHash { type Hasher = NoOpHasher; fn build_hasher(&self) -> Self::Hasher { NoOpHasher(0) } } #[doc(hidden)] pub struct NoOpHasher(u64); // This is for types that already contain a high-quality hash and want to skip // re-hashing that hash. impl Hasher for NoOpHasher { fn finish(&self) -> u64 { self.0 } fn write(&mut self, bytes: &[u8]) { // This should never be called by consumers. Prefer to call `write_u64` instead. // Don't break applications (slower fallback, just check in test): self.0 = bytes.iter().fold(self.0, |hash, b| { hash.rotate_left(8).wrapping_add(*b as u64) }); } #[inline] fn write_u64(&mut self, i: u64) { self.0 = i; } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/cfg.rs
crates/bevy_platform/src/cfg.rs
//! Provides helpful configuration macros, allowing detection of platform features such as //! [`alloc`](crate::cfg::alloc) or [`std`](crate::cfg::std) without explicit features. /// Provides a `match`-like expression similar to [`cfg_if`] and based on the experimental /// [`cfg_match`]. /// The name `switch` is used to avoid conflict with the `match` keyword. /// Arms are evaluated top to bottom, and an optional wildcard arm can be provided if no match /// can be made. /// /// An arm can either be: /// - a `cfg(...)` pattern (e.g., `feature = "foo"`) /// - a wildcard `_` /// - an alias defined using [`define_alias`] /// /// Common aliases are provided by [`cfg`](crate::cfg). /// Note that aliases are evaluated from the context of the defining crate, not the consumer. /// /// # Examples /// /// ``` /// # use bevy_platform::cfg; /// # fn log(_: &str) {} /// # fn foo(_: &str) {} /// # /// cfg::switch! { /// #[cfg(feature = "foo")] => { /// foo("We have the `foo` feature!") /// } /// cfg::std => { /// extern crate std; /// std::println!("No `foo`, but we have `std`!"); /// } /// _ => { /// log("Don't have `std` or `foo`"); /// } /// } /// ``` /// /// [`cfg_if`]: https://crates.io/crates/cfg-if /// [`cfg_match`]: https://github.com/rust-lang/rust/issues/115585 #[doc(inline)] pub use crate::switch; /// Defines an alias for a particular configuration. /// This has two advantages over directly using `#[cfg(...)]`: /// /// 1. Complex configurations can be abbreviated to more meaningful shorthand. /// 2. Features are evaluated in the context of the _defining_ crate, not the consuming. /// /// The second advantage is a particularly powerful tool, as it allows consuming crates to use /// functionality in a defining crate regardless of what crate in the dependency graph enabled the /// relevant feature. /// /// For example, consider a crate `foo` that depends on another crate `bar`. /// `bar` has a feature "`faster_algorithms`". /// If `bar` defines a "`faster_algorithms`" alias: /// /// ```ignore /// define_alias! { /// #[cfg(feature = "faster_algorithms")] => { faster_algorithms } /// } /// ``` /// /// Now, `foo` can gate its usage of those faster algorithms on the alias, avoiding the need to /// expose its own "`faster_algorithms`" feature. /// This also avoids the unfortunate situation where one crate activates "`faster_algorithms`" on /// `bar` without activating that same feature on `foo`. /// /// Once an alias is defined, there are 4 ways you can use it: /// /// 1. Evaluate with no contents to return a `bool` indicating if the alias is active. /// ``` /// # use bevy_platform::cfg; /// if cfg::std!() { /// // Have `std`! /// } else { /// // No `std`... /// } /// ``` /// 2. Pass a single code block which will only be compiled if the alias is active. /// ``` /// # use bevy_platform::cfg; /// cfg::std! { /// // Have `std`! /// # () /// } /// ``` /// 3. Pass a single `if { ... } else { ... }` expression to conditionally compile either the first /// or the second code block. /// ``` /// # use bevy_platform::cfg; /// cfg::std! { /// if { /// // Have `std`! /// } else { /// // No `std`... /// } /// } /// ``` /// 4. Use in a [`switch`] arm for more complex conditional compilation. /// ``` /// # use bevy_platform::cfg; /// cfg::switch! { /// cfg::std => { /// // Have `std`! /// } /// cfg::alloc => { /// // No `std`, but do have `alloc`! /// } /// _ => { /// // No `std` or `alloc`... /// } /// } /// ``` #[doc(inline)] pub use crate::define_alias; /// Macro which represents an enabled compilation condition. #[doc(inline)] pub use crate::enabled; /// Macro which represents a disabled compilation condition. #[doc(inline)] pub use crate::disabled; #[doc(hidden)] #[macro_export] macro_rules! switch { ({ $($tt:tt)* }) => {{ $crate::switch! { $($tt)* } }}; (_ => { $($output:tt)* }) => { $($output)* }; ( $cond:path => $output:tt $($( $rest:tt )+)? ) => { $cond! { if { $crate::switch! { _ => $output } } else { $( $crate::switch! { $($rest)+ } )? } } }; ( #[cfg($cfg:meta)] => $output:tt $($( $rest:tt )+)? ) => { #[cfg($cfg)] $crate::switch! { _ => $output } $( #[cfg(not($cfg))] $crate::switch! { $($rest)+ } )? }; } #[doc(hidden)] #[macro_export] macro_rules! disabled { () => { false }; (if { $($p:tt)* } else { $($n:tt)* }) => { $($n)* }; ($($p:tt)*) => {}; } #[doc(hidden)] #[macro_export] macro_rules! enabled { () => { true }; (if { $($p:tt)* } else { $($n:tt)* }) => { $($p)* }; ($($p:tt)*) => { $($p)* }; } #[doc(hidden)] #[macro_export] macro_rules! define_alias { ( #[cfg($meta:meta)] => $p:ident $(, $( $rest:tt )+)? ) => { $crate::define_alias! { #[cfg($meta)] => { $p } $( $($rest)+ )? } }; ( #[cfg($meta:meta)] => $p:ident, $($( $rest:tt )+)? ) => { $crate::define_alias! { #[cfg($meta)] => { $p } $( $($rest)+ )? } }; ( #[cfg($meta:meta)] => { $(#[$p_meta:meta])* $p:ident } $($( $rest:tt )+)? ) => { $crate::switch! { #[cfg($meta)] => { $(#[$p_meta])* #[doc(inline)] /// #[doc = concat!("This macro passes the provided code because `#[cfg(", stringify!($meta), ")]` is currently active.")] pub use $crate::enabled as $p; } _ => { $(#[$p_meta])* #[doc(inline)] /// #[doc = concat!("This macro suppresses the provided code because `#[cfg(", stringify!($meta), ")]` is _not_ currently active.")] pub use $crate::disabled as $p; } } $( $crate::define_alias! { $($rest)+ } )? } } define_alias! { #[cfg(feature = "alloc")] => { /// Indicates the `alloc` crate is available and can be used. alloc } #[cfg(feature = "std")] => { /// Indicates the `std` crate is available and can be used. std } #[cfg(panic = "unwind")] => { /// Indicates that a [`panic`] will be unwound, and can be potentially caught. panic_unwind } #[cfg(panic = "abort")] => { /// Indicates that a [`panic`] will lead to an abort, and cannot be caught. panic_abort } #[cfg(all(target_arch = "wasm32", feature = "web"))] => { /// Indicates that this target has access to browser APIs. web } #[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] => { /// Indicates that this target has access to a native implementation of `Arc`. arc } #[cfg(feature = "critical-section")] => { /// Indicates `critical-section` is available. critical_section } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/thread.rs
crates/bevy_platform/src/thread.rs
//! Provides `sleep` for all platforms. pub use thread::sleep; crate::cfg::switch! { // TODO: use browser timeouts based on ScheduleRunnerPlugin::build // crate::cfg::web => { ... } crate::cfg::std => { use std::thread; } _ => { mod fallback { use core::{hint::spin_loop, time::Duration}; use crate::time::Instant; /// Puts the current thread to sleep for at least the specified amount of time. /// /// As this is a `no_std` fallback implementation, this will spin the current thread. pub fn sleep(dur: Duration) { let start = Instant::now(); while start.elapsed() < dur { spin_loop(); } } } use fallback as thread; } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/cell/mod.rs
crates/bevy_platform/src/cell/mod.rs
//! Provides cell primitives. //! //! This is a drop-in replacement for `std::cell::SyncCell`/`std::cell::SyncUnsafeCell`. mod sync_cell; mod sync_unsafe_cell; pub use sync_cell::SyncCell; pub use sync_unsafe_cell::SyncUnsafeCell;
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/cell/sync_cell.rs
crates/bevy_platform/src/cell/sync_cell.rs
#![expect(unsafe_code, reason = "SyncCell requires unsafe code.")] //! A reimplementation of the currently unstable [`std::sync::Exclusive`] //! //! [`std::sync::Exclusive`]: https://doc.rust-lang.org/nightly/std/sync/struct.Exclusive.html use core::ptr; /// See [`Exclusive`](https://github.com/rust-lang/rust/issues/98407) for stdlib's upcoming implementation, /// which should replace this one entirely. /// /// Provides a wrapper that allows making any type unconditionally [`Sync`] by only providing mutable access. #[repr(transparent)] pub struct SyncCell<T: ?Sized> { inner: T, } impl<T: Sized> SyncCell<T> { /// Construct a new instance of a `SyncCell` from the given value. pub fn new(inner: T) -> Self { Self { inner } } /// Deconstruct this `SyncCell` into its inner value. pub fn to_inner(Self { inner }: Self) -> T { inner } } impl<T: ?Sized> SyncCell<T> { /// Get a reference to this `SyncCell`'s inner value. pub fn get(&mut self) -> &mut T { &mut self.inner } /// For types that implement [`Sync`], get shared access to this `SyncCell`'s inner value. pub fn read(&self) -> &T where T: Sync, { &self.inner } /// Build a mutable reference to a `SyncCell` from a mutable reference /// to its inner value, to skip constructing with [`new()`](SyncCell::new()). pub fn from_mut(r: &'_ mut T) -> &'_ mut SyncCell<T> { // SAFETY: repr is transparent, so refs have the same layout; and `SyncCell` properties are `&mut`-agnostic unsafe { &mut *(ptr::from_mut(r) as *mut SyncCell<T>) } } } // SAFETY: `Sync` only allows multithreaded access via immutable reference. // As `SyncCell` requires an exclusive reference to access the wrapped value for `!Sync` types, // marking this type as `Sync` does not actually allow unsynchronized access to the inner value. unsafe impl<T: ?Sized> Sync for SyncCell<T> {}
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/cell/sync_unsafe_cell.rs
crates/bevy_platform/src/cell/sync_unsafe_cell.rs
#![expect(unsafe_code, reason = "SyncUnsafeCell requires unsafe code.")] //! A reimplementation of the currently unstable [`std::cell::SyncUnsafeCell`] //! //! [`std::cell::SyncUnsafeCell`]: https://doc.rust-lang.org/nightly/std/cell/struct.SyncUnsafeCell.html pub use core::cell::UnsafeCell; use core::ptr; /// [`UnsafeCell`], but [`Sync`]. /// /// See [tracking issue](https://github.com/rust-lang/rust/issues/95439) for upcoming native impl, /// which should replace this one entirely (except `from_mut`). /// /// This is just an `UnsafeCell`, except it implements `Sync` /// if `T` implements `Sync`. /// /// `UnsafeCell` doesn't implement `Sync`, to prevent accidental misuse. /// You can use `SyncUnsafeCell` instead of `UnsafeCell` to allow it to be /// shared between threads, if that's intentional. /// Providing proper synchronization is still the task of the user, /// making this type just as unsafe to use. /// /// See [`UnsafeCell`] for details. #[repr(transparent)] pub struct SyncUnsafeCell<T: ?Sized> { value: UnsafeCell<T>, } // SAFETY: `T` is Sync, caller is responsible for upholding rust safety rules unsafe impl<T: ?Sized + Sync> Sync for SyncUnsafeCell<T> {} impl<T> SyncUnsafeCell<T> { /// Constructs a new instance of `SyncUnsafeCell` which will wrap the specified value. #[inline] pub const fn new(value: T) -> Self { Self { value: UnsafeCell::new(value), } } /// Unwraps the value. #[inline] pub fn into_inner(self) -> T { self.value.into_inner() } } impl<T: ?Sized> SyncUnsafeCell<T> { /// Gets a mutable pointer to the wrapped value. /// /// This can be cast to a pointer of any kind. /// Ensure that the access is unique (no active references, mutable or not) /// when casting to `&mut T`, and ensure that there are no mutations /// or mutable aliases going on when casting to `&T` #[inline] pub const fn get(&self) -> *mut T { self.value.get() } /// Returns a mutable reference to the underlying data. /// /// This call borrows the `SyncUnsafeCell` mutably (at compile-time) which /// guarantees that we possess the only reference. #[inline] pub fn get_mut(&mut self) -> &mut T { self.value.get_mut() } /// Gets a mutable pointer to the wrapped value. /// /// See [`UnsafeCell::get`] for details. #[inline] pub const fn raw_get(this: *const Self) -> *mut T { // We can just cast the pointer from `SyncUnsafeCell<T>` to `T` because // of #[repr(transparent)] on both SyncUnsafeCell and UnsafeCell. // See UnsafeCell::raw_get. (this as *const T).cast_mut() } #[inline] /// Returns a `&mut SyncUnsafeCell<T>` from a `&mut T`. pub fn from_mut(t: &mut T) -> &mut SyncUnsafeCell<T> { let ptr = ptr::from_mut(t) as *mut SyncUnsafeCell<T>; // SAFETY: `ptr` must be safe to mutably dereference, since it was originally // obtained from a mutable reference. `SyncUnsafeCell` has the same representation // as the original type `T`, since the former is annotated with #[repr(transparent)]. unsafe { &mut *ptr } } } impl<T> SyncUnsafeCell<[T]> { /// Returns a `&[SyncUnsafeCell<T>]` from a `&SyncUnsafeCell<[T]>`. /// # Examples /// /// ``` /// # use bevy_platform::cell::SyncUnsafeCell; /// /// let slice: &mut [i32] = &mut [1, 2, 3]; /// let cell_slice: &SyncUnsafeCell<[i32]> = SyncUnsafeCell::from_mut(slice); /// let slice_cell: &[SyncUnsafeCell<i32>] = cell_slice.as_slice_of_cells(); /// /// assert_eq!(slice_cell.len(), 3); /// ``` pub fn as_slice_of_cells(&self) -> &[SyncUnsafeCell<T>] { let self_ptr: *const SyncUnsafeCell<[T]> = ptr::from_ref(self); let slice_ptr = self_ptr as *const [SyncUnsafeCell<T>]; // SAFETY: `UnsafeCell<T>` and `SyncUnsafeCell<T>` have #[repr(transparent)] // therefore: // - `SyncUnsafeCell<T>` has the same layout as `T` // - `SyncUnsafeCell<[T]>` has the same layout as `[T]` // - `SyncUnsafeCell<[T]>` has the same layout as `[SyncUnsafeCell<T>]` unsafe { &*slice_ptr } } } impl<T: Default> Default for SyncUnsafeCell<T> { /// Creates a new `SyncUnsafeCell` with the `Default` value for T. fn default() -> SyncUnsafeCell<T> { SyncUnsafeCell::new(Default::default()) } } impl<T> From<T> for SyncUnsafeCell<T> { /// Creates a new `SyncUnsafeCell<T>` containing the given value. fn from(t: T) -> SyncUnsafeCell<T> { SyncUnsafeCell::new(t) } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/time/mod.rs
crates/bevy_platform/src/time/mod.rs
//! Provides `Instant` for all platforms. pub use time::Instant; crate::cfg::switch! { crate::cfg::web => { use web_time as time; } crate::cfg::std => { use std::time; } _ => { mod fallback; use fallback as time; } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/time/fallback.rs
crates/bevy_platform/src/time/fallback.rs
//! Provides a fallback implementation of `Instant` from the standard library. #![expect( unsafe_code, reason = "Instant fallback requires unsafe to allow users to update the internal value" )] use crate::sync::atomic::{AtomicPtr, Ordering}; use core::{ fmt, ops::{Add, AddAssign, Sub, SubAssign}, time::Duration, }; static ELAPSED_GETTER: AtomicPtr<()> = AtomicPtr::new(unset_getter as *mut _); /// Fallback implementation of `Instant` suitable for a `no_std` environment. /// /// If you are on any of the following target architectures, this is a drop-in replacement: /// /// - `x86` /// - `x86_64` /// - `aarch64` /// /// On any other architecture, you must call [`Instant::set_elapsed`], providing a method /// which when called supplies a monotonically increasing count of elapsed nanoseconds relative /// to some arbitrary point in time. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Instant(Duration); impl Instant { /// Returns an instant corresponding to "now". #[must_use] pub fn now() -> Instant { let getter = ELAPSED_GETTER.load(Ordering::Acquire); // SAFETY: Function pointer is always valid let getter = unsafe { core::mem::transmute::<*mut (), fn() -> Duration>(getter) }; Self((getter)()) } /// Provides a function returning the amount of time that has elapsed since execution began. /// The getter provided to this method will be used by [`now`](Instant::now). /// /// # Safety /// /// - The function provided must accurately represent the elapsed time. /// - The function must preserve all invariants of the [`Instant`] type. /// - The pointer to the function must be valid whenever [`Instant::now`] is called. pub unsafe fn set_elapsed(getter: fn() -> Duration) { ELAPSED_GETTER.store(getter as *mut _, Ordering::Release); } /// Returns the amount of time elapsed from another instant to this one, /// or zero duration if that instant is later than this one. #[must_use] pub fn duration_since(&self, earlier: Instant) -> Duration { self.saturating_duration_since(earlier) } /// Returns the amount of time elapsed from another instant to this one, /// or None if that instant is later than this one. /// /// Due to monotonicity bugs, even under correct logical ordering of the passed `Instant`s, /// this method can return `None`. #[must_use] pub fn checked_duration_since(&self, earlier: Instant) -> Option<Duration> { self.0.checked_sub(earlier.0) } /// Returns the amount of time elapsed from another instant to this one, /// or zero duration if that instant is later than this one. #[must_use] pub fn saturating_duration_since(&self, earlier: Instant) -> Duration { self.0.saturating_sub(earlier.0) } /// Returns the amount of time elapsed since this instant. #[must_use] pub fn elapsed(&self) -> Duration { Instant::now().saturating_duration_since(*self) } /// Returns `Some(t)` where `t` is the time `self + duration` if `t` can be represented as /// `Instant` (which means it's inside the bounds of the underlying data structure), `None` /// otherwise. pub fn checked_add(&self, duration: Duration) -> Option<Instant> { self.0.checked_add(duration).map(Instant) } /// Returns `Some(t)` where `t` is the time `self - duration` if `t` can be represented as /// `Instant` (which means it's inside the bounds of the underlying data structure), `None` /// otherwise. pub fn checked_sub(&self, duration: Duration) -> Option<Instant> { self.0.checked_sub(duration).map(Instant) } } impl Add<Duration> for Instant { type Output = Instant; /// # Panics /// /// This function may panic if the resulting point in time cannot be represented by the /// underlying data structure. See [`Instant::checked_add`] for a version without panic. fn add(self, other: Duration) -> Instant { self.checked_add(other) .expect("overflow when adding duration to instant") } } impl AddAssign<Duration> for Instant { fn add_assign(&mut self, other: Duration) { *self = *self + other; } } impl Sub<Duration> for Instant { type Output = Instant; fn sub(self, other: Duration) -> Instant { self.checked_sub(other) .expect("overflow when subtracting duration from instant") } } impl SubAssign<Duration> for Instant { fn sub_assign(&mut self, other: Duration) { *self = *self - other; } } impl Sub<Instant> for Instant { type Output = Duration; /// Returns the amount of time elapsed from another instant to this one, /// or zero duration if that instant is later than this one. fn sub(self, other: Instant) -> Duration { self.duration_since(other) } } impl fmt::Debug for Instant { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } fn unset_getter() -> Duration { crate::cfg::switch! { #[cfg(target_arch = "x86")] => { // SAFETY: standard technique for getting a nanosecond counter on x86 let nanos = unsafe { core::arch::x86::_rdtsc() }; Duration::from_nanos(nanos) } #[cfg(target_arch = "x86_64")] => { // SAFETY: standard technique for getting a nanosecond counter on x86_64 let nanos = unsafe { core::arch::x86_64::_rdtsc() }; Duration::from_nanos(nanos) } #[cfg(target_arch = "aarch64")] => { // SAFETY: standard technique for getting a nanosecond counter of aarch64 let nanos = unsafe { let mut ticks: u64; core::arch::asm!("mrs {}, cntvct_el0", out(reg) ticks); ticks }; Duration::from_nanos(nanos) } _ => { panic!("An elapsed time getter has not been provided to `Instant`. Please use `Instant::set_elapsed(...)` before calling `Instant::now()`") } } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/collections/hash_map.rs
crates/bevy_platform/src/collections/hash_map.rs
//! Provides [`HashMap`] based on [hashbrown]'s implementation. //! Unlike [`hashbrown::HashMap`], [`HashMap`] defaults to [`FixedHasher`] //! instead of [`RandomState`]. //! This provides determinism by default with an acceptable compromise to denial //! of service resistance in the context of a game engine. use core::{ fmt::Debug, hash::{BuildHasher, Hash}, ops::{Deref, DerefMut, Index}, }; use hashbrown::{hash_map as hb, Equivalent}; use crate::hash::FixedHasher; #[cfg(feature = "rayon")] use rayon::prelude::{FromParallelIterator, IntoParallelIterator, ParallelExtend}; // Re-exports to match `std::collections::hash_map` pub use { crate::hash::{DefaultHasher, RandomState}, hb::{ Drain, IntoIter, IntoKeys, IntoValues, Iter, IterMut, Keys, OccupiedEntry, VacantEntry, Values, ValuesMut, }, }; // Additional items from `hashbrown` pub use hb::{ EntryRef, ExtractIf, OccupiedError, RawEntryBuilder, RawEntryBuilderMut, RawEntryMut, RawOccupiedEntryMut, }; /// Shortcut for [`Entry`](hb::Entry) with [`FixedHasher`] as the default hashing provider. pub type Entry<'a, K, V, S = FixedHasher> = hb::Entry<'a, K, V, S>; /// New-type for [`HashMap`](hb::HashMap) with [`FixedHasher`] as the default hashing provider. /// Can be trivially converted to and from a [hashbrown] [`HashMap`](hb::HashMap) using [`From`]. /// /// A new-type is used instead of a type alias due to critical methods like [`new`](hb::HashMap::new) /// being incompatible with Bevy's choice of default hasher. /// /// Unlike [`hashbrown::HashMap`], [`HashMap`] defaults to [`FixedHasher`] /// instead of [`RandomState`]. /// This provides determinism by default with an acceptable compromise to denial /// of service resistance in the context of a game engine. #[repr(transparent)] pub struct HashMap<K, V, S = FixedHasher>(hb::HashMap<K, V, S>); impl<K, V, S> Clone for HashMap<K, V, S> where hb::HashMap<K, V, S>: Clone, { #[inline] fn clone(&self) -> Self { Self(self.0.clone()) } #[inline] fn clone_from(&mut self, source: &Self) { self.0.clone_from(&source.0); } } impl<K, V, S> Debug for HashMap<K, V, S> where hb::HashMap<K, V, S>: Debug, { #[inline] fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { <hb::HashMap<K, V, S> as Debug>::fmt(&self.0, f) } } impl<K, V, S> Default for HashMap<K, V, S> where hb::HashMap<K, V, S>: Default, { #[inline] fn default() -> Self { Self(Default::default()) } } impl<K, V, S> PartialEq for HashMap<K, V, S> where hb::HashMap<K, V, S>: PartialEq, { #[inline] fn eq(&self, other: &Self) -> bool { self.0.eq(&other.0) } } impl<K, V, S> Eq for HashMap<K, V, S> where hb::HashMap<K, V, S>: Eq {} impl<K, V, S, T> FromIterator<T> for HashMap<K, V, S> where hb::HashMap<K, V, S>: FromIterator<T>, { #[inline] fn from_iter<U: IntoIterator<Item = T>>(iter: U) -> Self { Self(FromIterator::from_iter(iter)) } } impl<K, V, S, T> Index<T> for HashMap<K, V, S> where hb::HashMap<K, V, S>: Index<T>, { type Output = <hb::HashMap<K, V, S> as Index<T>>::Output; #[inline] fn index(&self, index: T) -> &Self::Output { self.0.index(index) } } impl<K, V, S> IntoIterator for HashMap<K, V, S> where hb::HashMap<K, V, S>: IntoIterator, { type Item = <hb::HashMap<K, V, S> as IntoIterator>::Item; type IntoIter = <hb::HashMap<K, V, S> as IntoIterator>::IntoIter; #[inline] fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } impl<'a, K, V, S> IntoIterator for &'a HashMap<K, V, S> where &'a hb::HashMap<K, V, S>: IntoIterator, { type Item = <&'a hb::HashMap<K, V, S> as IntoIterator>::Item; type IntoIter = <&'a hb::HashMap<K, V, S> as IntoIterator>::IntoIter; #[inline] fn into_iter(self) -> Self::IntoIter { (&self.0).into_iter() } } impl<'a, K, V, S> IntoIterator for &'a mut HashMap<K, V, S> where &'a mut hb::HashMap<K, V, S>: IntoIterator, { type Item = <&'a mut hb::HashMap<K, V, S> as IntoIterator>::Item; type IntoIter = <&'a mut hb::HashMap<K, V, S> as IntoIterator>::IntoIter; #[inline] fn into_iter(self) -> Self::IntoIter { (&mut self.0).into_iter() } } impl<K, V, S, T> Extend<T> for HashMap<K, V, S> where hb::HashMap<K, V, S>: Extend<T>, { #[inline] fn extend<U: IntoIterator<Item = T>>(&mut self, iter: U) { self.0.extend(iter); } } impl<K, V, const N: usize> From<[(K, V); N]> for HashMap<K, V, FixedHasher> where K: Eq + Hash, { fn from(arr: [(K, V); N]) -> Self { arr.into_iter().collect() } } impl<K, V, S> From<hb::HashMap<K, V, S>> for HashMap<K, V, S> { #[inline] fn from(value: hb::HashMap<K, V, S>) -> Self { Self(value) } } impl<K, V, S> From<HashMap<K, V, S>> for hb::HashMap<K, V, S> { #[inline] fn from(value: HashMap<K, V, S>) -> Self { value.0 } } impl<K, V, S> Deref for HashMap<K, V, S> { type Target = hb::HashMap<K, V, S>; #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl<K, V, S> DerefMut for HashMap<K, V, S> { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } #[cfg(feature = "serialize")] impl<K, V, S> serde::Serialize for HashMap<K, V, S> where hb::HashMap<K, V, S>: serde::Serialize, { #[inline] fn serialize<T>(&self, serializer: T) -> Result<T::Ok, T::Error> where T: serde::Serializer, { self.0.serialize(serializer) } } #[cfg(feature = "serialize")] impl<'de, K, V, S> serde::Deserialize<'de> for HashMap<K, V, S> where hb::HashMap<K, V, S>: serde::Deserialize<'de>, { #[inline] fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { Ok(Self(serde::Deserialize::deserialize(deserializer)?)) } } #[cfg(feature = "rayon")] impl<K, V, S, T> FromParallelIterator<T> for HashMap<K, V, S> where hb::HashMap<K, V, S>: FromParallelIterator<T>, T: Send, { fn from_par_iter<P>(par_iter: P) -> Self where P: IntoParallelIterator<Item = T>, { Self(<hb::HashMap<K, V, S> as FromParallelIterator<T>>::from_par_iter(par_iter)) } } #[cfg(feature = "rayon")] impl<K, V, S> IntoParallelIterator for HashMap<K, V, S> where hb::HashMap<K, V, S>: IntoParallelIterator, { type Item = <hb::HashMap<K, V, S> as IntoParallelIterator>::Item; type Iter = <hb::HashMap<K, V, S> as IntoParallelIterator>::Iter; fn into_par_iter(self) -> Self::Iter { self.0.into_par_iter() } } #[cfg(feature = "rayon")] impl<'a, K: Sync, V: Sync, S> IntoParallelIterator for &'a HashMap<K, V, S> where &'a hb::HashMap<K, V, S>: IntoParallelIterator, { type Item = <&'a hb::HashMap<K, V, S> as IntoParallelIterator>::Item; type Iter = <&'a hb::HashMap<K, V, S> as IntoParallelIterator>::Iter; fn into_par_iter(self) -> Self::Iter { (&self.0).into_par_iter() } } #[cfg(feature = "rayon")] impl<'a, K: Sync, V: Sync, S> IntoParallelIterator for &'a mut HashMap<K, V, S> where &'a mut hb::HashMap<K, V, S>: IntoParallelIterator, { type Item = <&'a mut hb::HashMap<K, V, S> as IntoParallelIterator>::Item; type Iter = <&'a mut hb::HashMap<K, V, S> as IntoParallelIterator>::Iter; fn into_par_iter(self) -> Self::Iter { (&mut self.0).into_par_iter() } } #[cfg(feature = "rayon")] impl<K, V, S, T> ParallelExtend<T> for HashMap<K, V, S> where hb::HashMap<K, V, S>: ParallelExtend<T>, T: Send, { fn par_extend<I>(&mut self, par_iter: I) where I: IntoParallelIterator<Item = T>, { <hb::HashMap<K, V, S> as ParallelExtend<T>>::par_extend(&mut self.0, par_iter); } } impl<K, V> HashMap<K, V, FixedHasher> { /// Creates an empty [`HashMap`]. /// /// Refer to [`new`](hb::HashMap::new) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// # /// // Creates a HashMap with zero capacity. /// let map = HashMap::new(); /// # /// # let mut map = map; /// # map.insert(0usize, "foo"); /// # assert_eq!(map.get(&0), Some("foo").as_ref()); /// ``` #[inline] pub const fn new() -> Self { Self::with_hasher(FixedHasher) } /// Creates an empty [`HashMap`] with the specified capacity. /// /// Refer to [`with_capacity`](hb::HashMap::with_capacity) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// # /// // Creates a HashMap with capacity for at least 5 entries. /// let map = HashMap::with_capacity(5); /// # /// # let mut map = map; /// # map.insert(0usize, "foo"); /// # assert_eq!(map.get(&0), Some("foo").as_ref()); /// ``` #[inline] pub fn with_capacity(capacity: usize) -> Self { Self::with_capacity_and_hasher(capacity, FixedHasher) } } impl<K, V, S> HashMap<K, V, S> { /// Creates an empty [`HashMap`] which will use the given hash builder to hash /// keys. /// /// Refer to [`with_hasher`](hb::HashMap::with_hasher) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// # use bevy_platform::hash::FixedHasher as SomeHasher; /// // Creates a HashMap with the provided hasher. /// let map = HashMap::with_hasher(SomeHasher); /// # /// # let mut map = map; /// # map.insert(0usize, "foo"); /// # assert_eq!(map.get(&0), Some("foo").as_ref()); /// ``` #[inline] pub const fn with_hasher(hash_builder: S) -> Self { Self(hb::HashMap::with_hasher(hash_builder)) } /// Creates an empty [`HashMap`] with the specified capacity, using `hash_builder` /// to hash the keys. /// /// Refer to [`with_capacity_and_hasher`](hb::HashMap::with_capacity_and_hasher) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// # use bevy_platform::hash::FixedHasher as SomeHasher; /// // Creates a HashMap with capacity for 5 entries and the provided hasher. /// let map = HashMap::with_capacity_and_hasher(5, SomeHasher); /// # /// # let mut map = map; /// # map.insert(0usize, "foo"); /// # assert_eq!(map.get(&0), Some("foo").as_ref()); /// ``` #[inline] pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self { Self(hb::HashMap::with_capacity_and_hasher( capacity, hash_builder, )) } /// Returns a reference to the map's [`BuildHasher`], or `S` parameter. /// /// Refer to [`hasher`](hb::HashMap::hasher) for further details. #[inline] pub fn hasher(&self) -> &S { self.0.hasher() } /// Returns the number of elements the map can hold without reallocating. /// /// Refer to [`capacity`](hb::HashMap::capacity) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let map = HashMap::with_capacity(5); /// /// # let map: HashMap<(), ()> = map; /// # /// assert!(map.capacity() >= 5); /// ``` #[inline] pub fn capacity(&self) -> usize { self.0.capacity() } /// An iterator visiting all keys in arbitrary order. /// The iterator element type is `&'a K`. /// /// Refer to [`keys`](hb::HashMap::keys) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// # /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// map.insert("bar", 1); /// map.insert("baz", 2); /// /// for key in map.keys() { /// // foo, bar, baz /// // Note that the above order is not guaranteed /// } /// # /// # assert_eq!(map.keys().count(), 3); /// ``` #[inline] pub fn keys(&self) -> Keys<'_, K, V> { self.0.keys() } /// An iterator visiting all values in arbitrary order. /// The iterator element type is `&'a V`. /// /// Refer to [`values`](hb::HashMap::values) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// # /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// map.insert("bar", 1); /// map.insert("baz", 2); /// /// for key in map.values() { /// // 0, 1, 2 /// // Note that the above order is not guaranteed /// } /// # /// # assert_eq!(map.values().count(), 3); /// ``` #[inline] pub fn values(&self) -> Values<'_, K, V> { self.0.values() } /// An iterator visiting all values mutably in arbitrary order. /// The iterator element type is `&'a mut V`. /// /// Refer to [`values`](hb::HashMap::values) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// # /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// map.insert("bar", 1); /// map.insert("baz", 2); /// /// for key in map.values_mut() { /// // 0, 1, 2 /// // Note that the above order is not guaranteed /// } /// # /// # assert_eq!(map.values_mut().count(), 3); /// ``` #[inline] pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { self.0.values_mut() } /// An iterator visiting all key-value pairs in arbitrary order. /// The iterator element type is `(&'a K, &'a V)`. /// /// Refer to [`iter`](hb::HashMap::iter) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// # /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// map.insert("bar", 1); /// map.insert("baz", 2); /// /// for (key, value) in map.iter() { /// // ("foo", 0), ("bar", 1), ("baz", 2) /// // Note that the above order is not guaranteed /// } /// # /// # assert_eq!(map.iter().count(), 3); /// ``` #[inline] pub fn iter(&self) -> Iter<'_, K, V> { self.0.iter() } /// An iterator visiting all key-value pairs in arbitrary order, /// with mutable references to the values. /// The iterator element type is `(&'a K, &'a mut V)`. /// /// Refer to [`iter_mut`](hb::HashMap::iter_mut) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// # /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// map.insert("bar", 1); /// map.insert("baz", 2); /// /// for (key, value) in map.iter_mut() { /// // ("foo", 0), ("bar", 1), ("baz", 2) /// // Note that the above order is not guaranteed /// } /// # /// # assert_eq!(map.iter_mut().count(), 3); /// ``` #[inline] pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { self.0.iter_mut() } /// Returns the number of elements in the map. /// /// Refer to [`len`](hb::HashMap::len) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::new(); /// /// assert_eq!(map.len(), 0); /// /// map.insert("foo", 0); /// /// assert_eq!(map.len(), 1); /// ``` #[inline] pub fn len(&self) -> usize { self.0.len() } /// Returns `true` if the map contains no elements. /// /// Refer to [`is_empty`](hb::HashMap::is_empty) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::new(); /// /// assert!(map.is_empty()); /// /// map.insert("foo", 0); /// /// assert!(!map.is_empty()); /// ``` #[inline] pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Clears the map, returning all key-value pairs as an iterator. Keeps the /// allocated memory for reuse. /// /// Refer to [`drain`](hb::HashMap::drain) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// # /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// map.insert("bar", 1); /// map.insert("baz", 2); /// /// for (key, value) in map.drain() { /// // ("foo", 0), ("bar", 1), ("baz", 2) /// // Note that the above order is not guaranteed /// } /// /// assert!(map.is_empty()); /// ``` #[inline] pub fn drain(&mut self) -> Drain<'_, K, V> { self.0.drain() } /// Retains only the elements specified by the predicate. Keeps the /// allocated memory for reuse. /// /// Refer to [`retain`](hb::HashMap::retain) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// # /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// map.insert("bar", 1); /// map.insert("baz", 2); /// /// map.retain(|key, value| *value == 2); /// /// assert_eq!(map.len(), 1); /// ``` #[inline] pub fn retain<F>(&mut self, f: F) where F: FnMut(&K, &mut V) -> bool, { self.0.retain(f); } /// Drains elements which are true under the given predicate, /// and returns an iterator over the removed items. /// /// Refer to [`extract_if`](hb::HashMap::extract_if) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// # /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// map.insert("bar", 1); /// map.insert("baz", 2); /// /// let extracted = map /// .extract_if(|key, value| *value == 2) /// .collect::<Vec<_>>(); /// /// assert_eq!(map.len(), 2); /// assert_eq!(extracted.len(), 1); /// ``` #[inline] pub fn extract_if<F>(&mut self, f: F) -> ExtractIf<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool, { self.0.extract_if(f) } /// Clears the map, removing all key-value pairs. Keeps the allocated memory /// for reuse. /// /// Refer to [`clear`](hb::HashMap::clear) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// # /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// map.insert("bar", 1); /// map.insert("baz", 2); /// /// map.clear(); /// /// assert!(map.is_empty()); /// ``` #[inline] pub fn clear(&mut self) { self.0.clear(); } /// Creates a consuming iterator visiting all the keys in arbitrary order. /// The map cannot be used after calling this. /// The iterator element type is `K`. /// /// Refer to [`into_keys`](hb::HashMap::into_keys) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// # /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// map.insert("bar", 1); /// map.insert("baz", 2); /// /// for key in map.into_keys() { /// // "foo", "bar", "baz" /// // Note that the above order is not guaranteed /// } /// ``` #[inline] pub fn into_keys(self) -> IntoKeys<K, V> { self.0.into_keys() } /// Creates a consuming iterator visiting all the values in arbitrary order. /// The map cannot be used after calling this. /// The iterator element type is `V`. /// /// Refer to [`into_values`](hb::HashMap::into_values) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// # /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// map.insert("bar", 1); /// map.insert("baz", 2); /// /// for key in map.into_values() { /// // 0, 1, 2 /// // Note that the above order is not guaranteed /// } /// ``` #[inline] pub fn into_values(self) -> IntoValues<K, V> { self.0.into_values() } /// Takes the inner [`HashMap`](hb::HashMap) out of this wrapper. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let map: HashMap<&'static str, usize> = HashMap::new(); /// let map: hashbrown::HashMap<&'static str, usize, _> = map.into_inner(); /// ``` #[inline] pub fn into_inner(self) -> hb::HashMap<K, V, S> { self.0 } } impl<K, V, S> HashMap<K, V, S> where K: Eq + Hash, S: BuildHasher, { /// Reserves capacity for at least `additional` more elements to be inserted /// in the [`HashMap`]. The collection may reserve more space to avoid /// frequent reallocations. /// /// Refer to [`reserve`](hb::HashMap::reserve) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::with_capacity(5); /// /// # let mut map: HashMap<(), ()> = map; /// # /// assert!(map.capacity() >= 5); /// /// map.reserve(10); /// /// assert!(map.capacity() - map.len() >= 10); /// ``` #[inline] pub fn reserve(&mut self, additional: usize) { self.0.reserve(additional); } /// Tries to reserve capacity for at least `additional` more elements to be inserted /// in the given `HashMap<K,V>`. The collection may reserve more space to avoid /// frequent reallocations. /// /// Refer to [`try_reserve`](hb::HashMap::try_reserve) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::with_capacity(5); /// /// # let mut map: HashMap<(), ()> = map; /// # /// assert!(map.capacity() >= 5); /// /// map.try_reserve(10).expect("Out of Memory!"); /// /// assert!(map.capacity() - map.len() >= 10); /// ``` #[inline] pub fn try_reserve(&mut self, additional: usize) -> Result<(), hashbrown::TryReserveError> { self.0.try_reserve(additional) } /// Shrinks the capacity of the map as much as possible. It will drop /// down as much as possible while maintaining the internal rules /// and possibly leaving some space in accordance with the resize policy. /// /// Refer to [`shrink_to_fit`](hb::HashMap::shrink_to_fit) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::with_capacity(5); /// /// map.insert("foo", 0); /// map.insert("bar", 1); /// map.insert("baz", 2); /// /// assert!(map.capacity() >= 5); /// /// map.shrink_to_fit(); /// /// assert_eq!(map.capacity(), 3); /// ``` #[inline] pub fn shrink_to_fit(&mut self) { self.0.shrink_to_fit(); } /// Shrinks the capacity of the map with a lower limit. It will drop /// down no lower than the supplied limit while maintaining the internal rules /// and possibly leaving some space in accordance with the resize policy. /// /// Refer to [`shrink_to`](hb::HashMap::shrink_to) for further details. #[inline] pub fn shrink_to(&mut self, min_capacity: usize) { self.0.shrink_to(min_capacity); } /// Gets the given key's corresponding entry in the map for in-place manipulation. /// /// Refer to [`entry`](hb::HashMap::entry) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::new(); /// /// let value = map.entry("foo").or_insert(0); /// # /// # assert_eq!(*value, 0); /// ``` #[inline] pub fn entry(&mut self, key: K) -> Entry<'_, K, V, S> { self.0.entry(key) } /// Gets the given key's corresponding entry by reference in the map for in-place manipulation. /// /// Refer to [`entry_ref`](hb::HashMap::entry_ref) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::new(); /// # let mut map: HashMap<&'static str, usize> = map; /// /// let value = map.entry_ref("foo").or_insert(0); /// # /// # assert_eq!(*value, 0); /// ``` #[inline] pub fn entry_ref<'a, 'b, Q>(&'a mut self, key: &'b Q) -> EntryRef<'a, 'b, K, Q, V, S> where Q: Hash + Equivalent<K> + ?Sized, { self.0.entry_ref(key) } /// Returns a reference to the value corresponding to the key. /// /// Refer to [`get`](hb::HashMap::get) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// /// assert_eq!(map.get("foo"), Some(&0)); /// ``` #[inline] pub fn get<Q>(&self, k: &Q) -> Option<&V> where Q: Hash + Equivalent<K> + ?Sized, { self.0.get(k) } /// Returns the key-value pair corresponding to the supplied key. /// /// Refer to [`get_key_value`](hb::HashMap::get_key_value) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// /// assert_eq!(map.get_key_value("foo"), Some((&"foo", &0))); /// ``` #[inline] pub fn get_key_value<Q>(&self, k: &Q) -> Option<(&K, &V)> where Q: Hash + Equivalent<K> + ?Sized, { self.0.get_key_value(k) } /// Returns the key-value pair corresponding to the supplied key, with a mutable reference to value. /// /// Refer to [`get_key_value_mut`](hb::HashMap::get_key_value_mut) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// /// assert_eq!(map.get_key_value_mut("foo"), Some((&"foo", &mut 0))); /// ``` #[inline] pub fn get_key_value_mut<Q>(&mut self, k: &Q) -> Option<(&K, &mut V)> where Q: Hash + Equivalent<K> + ?Sized, { self.0.get_key_value_mut(k) } /// Returns `true` if the map contains a value for the specified key. /// /// Refer to [`contains_key`](hb::HashMap::contains_key) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// /// assert!(map.contains_key("foo")); /// ``` #[inline] pub fn contains_key<Q>(&self, k: &Q) -> bool where Q: Hash + Equivalent<K> + ?Sized, { self.0.contains_key(k) } /// Returns a mutable reference to the value corresponding to the key. /// /// Refer to [`get_mut`](hb::HashMap::get_mut) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// /// assert_eq!(map.get_mut("foo"), Some(&mut 0)); /// ``` #[inline] pub fn get_mut<Q>(&mut self, k: &Q) -> Option<&mut V> where Q: Hash + Equivalent<K> + ?Sized, { self.0.get_mut(k) } /// Attempts to get mutable references to `N` values in the map at once. /// /// Refer to [`get_disjoint_mut`](hb::HashMap::get_disjoint_mut) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// map.insert("bar", 1); /// map.insert("baz", 2); /// /// let result = map.get_disjoint_mut(["foo", "bar"]); /// /// assert_eq!(result, [Some(&mut 0), Some(&mut 1)]); /// ``` #[inline] pub fn get_disjoint_mut<Q, const N: usize>(&mut self, ks: [&Q; N]) -> [Option<&'_ mut V>; N] where Q: Hash + Equivalent<K> + ?Sized, { self.0.get_disjoint_mut(ks) } /// Attempts to get mutable references to `N` values in the map at once, with immutable /// references to the corresponding keys. /// /// Refer to [`get_disjoint_key_value_mut`](hb::HashMap::get_disjoint_key_value_mut) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// map.insert("bar", 1); /// map.insert("baz", 2); /// /// let result = map.get_disjoint_key_value_mut(["foo", "bar"]); /// /// assert_eq!(result, [Some((&"foo", &mut 0)), Some((&"bar", &mut 1))]); /// ``` #[inline] pub fn get_disjoint_key_value_mut<Q, const N: usize>( &mut self, ks: [&Q; N], ) -> [Option<(&'_ K, &'_ mut V)>; N] where Q: Hash + Equivalent<K> + ?Sized, { self.0.get_disjoint_key_value_mut(ks) } /// Inserts a key-value pair into the map. /// /// Refer to [`insert`](hb::HashMap::insert) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// /// assert_eq!(map.get("foo"), Some(&0)); /// ``` #[inline] pub fn insert(&mut self, k: K, v: V) -> Option<V> { self.0.insert(k, v) } /// Tries to insert a key-value pair into the map, and returns /// a mutable reference to the value in the entry. /// /// Refer to [`try_insert`](hb::HashMap::try_insert) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::new(); /// /// map.try_insert("foo", 0).unwrap(); /// /// assert!(map.try_insert("foo", 1).is_err()); /// ``` #[inline] pub fn try_insert(&mut self, key: K, value: V) -> Result<&mut V, OccupiedError<'_, K, V, S>> { self.0.try_insert(key, value) } /// Removes a key from the map, returning the value at the key if the key /// was previously in the map. Keeps the allocated memory for reuse. /// /// Refer to [`remove`](hb::HashMap::remove) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// /// assert_eq!(map.remove("foo"), Some(0)); /// /// assert!(map.is_empty()); /// ``` #[inline] pub fn remove<Q>(&mut self, k: &Q) -> Option<V> where Q: Hash + Equivalent<K> + ?Sized, { self.0.remove(k) } /// Removes a key from the map, returning the stored key and value if the /// key was previously in the map. Keeps the allocated memory for reuse. /// /// Refer to [`remove_entry`](hb::HashMap::remove_entry) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::new(); /// /// map.insert("foo", 0); /// /// assert_eq!(map.remove_entry("foo"), Some(("foo", 0))); /// /// assert!(map.is_empty()); /// ``` #[inline] pub fn remove_entry<Q>(&mut self, k: &Q) -> Option<(K, V)> where Q: Hash + Equivalent<K> + ?Sized, { self.0.remove_entry(k) } /// Returns the total amount of memory allocated internally by the hash /// set, in bytes. /// /// Refer to [`allocation_size`](hb::HashMap::allocation_size) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashMap; /// let mut map = HashMap::new(); /// /// assert_eq!(map.allocation_size(), 0); ///
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
true
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/collections/hash_set.rs
crates/bevy_platform/src/collections/hash_set.rs
//! Provides [`HashSet`] based on [hashbrown]'s implementation. //! Unlike [`hashbrown::HashSet`], [`HashSet`] defaults to [`FixedHasher`] //! instead of [`RandomState`](crate::hash::RandomState). //! This provides determinism by default with an acceptable compromise to denial //! of service resistance in the context of a game engine. use core::{ fmt::Debug, hash::{BuildHasher, Hash}, ops::{ BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Deref, DerefMut, Sub, SubAssign, }, }; use hashbrown::{hash_set as hb, Equivalent}; use crate::hash::FixedHasher; #[cfg(feature = "rayon")] use rayon::prelude::{FromParallelIterator, IntoParallelIterator, ParallelExtend}; // Re-exports to match `std::collections::hash_set` pub use hb::{Difference, Drain, Intersection, IntoIter, Iter, SymmetricDifference, Union}; // Additional items from `hashbrown` pub use hb::{ExtractIf, OccupiedEntry, VacantEntry}; /// Shortcut for [`Entry`](hb::Entry) with [`FixedHasher`] as the default hashing provider. pub type Entry<'a, T, S = FixedHasher> = hb::Entry<'a, T, S>; /// New-type for [`HashSet`](hb::HashSet) with [`FixedHasher`] as the default hashing provider. /// Can be trivially converted to and from a [hashbrown] [`HashSet`](hb::HashSet) using [`From`]. /// /// A new-type is used instead of a type alias due to critical methods like [`new`](hb::HashSet::new) /// being incompatible with Bevy's choice of default hasher. /// /// Unlike [`hashbrown::HashSet`], [`HashSet`] defaults to [`FixedHasher`] /// instead of [`RandomState`](crate::hash::RandomState). /// This provides determinism by default with an acceptable compromise to denial /// of service resistance in the context of a game engine. #[repr(transparent)] pub struct HashSet<T, S = FixedHasher>(hb::HashSet<T, S>); impl<T, S> Clone for HashSet<T, S> where hb::HashSet<T, S>: Clone, { #[inline] fn clone(&self) -> Self { Self(self.0.clone()) } #[inline] fn clone_from(&mut self, source: &Self) { self.0.clone_from(&source.0); } } impl<T, S> Debug for HashSet<T, S> where hb::HashSet<T, S>: Debug, { #[inline] fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { <hb::HashSet<T, S> as Debug>::fmt(&self.0, f) } } impl<T, S> Default for HashSet<T, S> where hb::HashSet<T, S>: Default, { #[inline] fn default() -> Self { Self(Default::default()) } } impl<T, S> PartialEq for HashSet<T, S> where hb::HashSet<T, S>: PartialEq, { #[inline] fn eq(&self, other: &Self) -> bool { self.0.eq(&other.0) } } impl<T, S> Eq for HashSet<T, S> where hb::HashSet<T, S>: Eq {} impl<T, S, X> FromIterator<X> for HashSet<T, S> where hb::HashSet<T, S>: FromIterator<X>, { #[inline] fn from_iter<U: IntoIterator<Item = X>>(iter: U) -> Self { Self(FromIterator::from_iter(iter)) } } impl<T, S> IntoIterator for HashSet<T, S> where hb::HashSet<T, S>: IntoIterator, { type Item = <hb::HashSet<T, S> as IntoIterator>::Item; type IntoIter = <hb::HashSet<T, S> as IntoIterator>::IntoIter; #[inline] fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } impl<'a, T, S> IntoIterator for &'a HashSet<T, S> where &'a hb::HashSet<T, S>: IntoIterator, { type Item = <&'a hb::HashSet<T, S> as IntoIterator>::Item; type IntoIter = <&'a hb::HashSet<T, S> as IntoIterator>::IntoIter; #[inline] fn into_iter(self) -> Self::IntoIter { (&self.0).into_iter() } } impl<'a, T, S> IntoIterator for &'a mut HashSet<T, S> where &'a mut hb::HashSet<T, S>: IntoIterator, { type Item = <&'a mut hb::HashSet<T, S> as IntoIterator>::Item; type IntoIter = <&'a mut hb::HashSet<T, S> as IntoIterator>::IntoIter; #[inline] fn into_iter(self) -> Self::IntoIter { (&mut self.0).into_iter() } } impl<T, S, X> Extend<X> for HashSet<T, S> where hb::HashSet<T, S>: Extend<X>, { #[inline] fn extend<U: IntoIterator<Item = X>>(&mut self, iter: U) { self.0.extend(iter); } } impl<T, const N: usize> From<[T; N]> for HashSet<T, FixedHasher> where T: Eq + Hash, { fn from(value: [T; N]) -> Self { value.into_iter().collect() } } impl<T, S> From<crate::collections::HashMap<T, (), S>> for HashSet<T, S> { #[inline] fn from(value: crate::collections::HashMap<T, (), S>) -> Self { Self(hb::HashSet::from(hashbrown::HashMap::from(value))) } } impl<T, S> From<hb::HashSet<T, S>> for HashSet<T, S> { #[inline] fn from(value: hb::HashSet<T, S>) -> Self { Self(value) } } impl<T, S> From<HashSet<T, S>> for hb::HashSet<T, S> { #[inline] fn from(value: HashSet<T, S>) -> Self { value.0 } } impl<T, S> Deref for HashSet<T, S> { type Target = hb::HashSet<T, S>; #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl<T, S> DerefMut for HashSet<T, S> { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } #[cfg(feature = "serialize")] impl<T, S> serde::Serialize for HashSet<T, S> where hb::HashSet<T, S>: serde::Serialize, { #[inline] fn serialize<U>(&self, serializer: U) -> Result<U::Ok, U::Error> where U: serde::Serializer, { self.0.serialize(serializer) } } #[cfg(feature = "serialize")] impl<'de, T, S> serde::Deserialize<'de> for HashSet<T, S> where hb::HashSet<T, S>: serde::Deserialize<'de>, { #[inline] fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { Ok(Self(serde::Deserialize::deserialize(deserializer)?)) } } #[cfg(feature = "rayon")] impl<T, S, U> FromParallelIterator<U> for HashSet<T, S> where hb::HashSet<T, S>: FromParallelIterator<U>, U: Send, { fn from_par_iter<P>(par_iter: P) -> Self where P: IntoParallelIterator<Item = U>, { Self(<hb::HashSet<T, S> as FromParallelIterator<U>>::from_par_iter(par_iter)) } } #[cfg(feature = "rayon")] impl<T, S> IntoParallelIterator for HashSet<T, S> where hb::HashSet<T, S>: IntoParallelIterator, { type Item = <hb::HashSet<T, S> as IntoParallelIterator>::Item; type Iter = <hb::HashSet<T, S> as IntoParallelIterator>::Iter; fn into_par_iter(self) -> Self::Iter { self.0.into_par_iter() } } #[cfg(feature = "rayon")] impl<'a, T: Sync, S> IntoParallelIterator for &'a HashSet<T, S> where &'a hb::HashSet<T, S>: IntoParallelIterator, { type Item = <&'a hb::HashSet<T, S> as IntoParallelIterator>::Item; type Iter = <&'a hb::HashSet<T, S> as IntoParallelIterator>::Iter; fn into_par_iter(self) -> Self::Iter { (&self.0).into_par_iter() } } #[cfg(feature = "rayon")] impl<T, S, U> ParallelExtend<U> for HashSet<T, S> where hb::HashSet<T, S>: ParallelExtend<U>, U: Send, { fn par_extend<I>(&mut self, par_iter: I) where I: IntoParallelIterator<Item = U>, { <hb::HashSet<T, S> as ParallelExtend<U>>::par_extend(&mut self.0, par_iter); } } impl<T> HashSet<T, FixedHasher> { /// Creates an empty [`HashSet`]. /// /// Refer to [`new`](hb::HashSet::new) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// # /// // Creates a HashSet with zero capacity. /// let map = HashSet::new(); /// # /// # let mut map = map; /// # map.insert("foo"); /// # assert_eq!(map.get("foo"), Some("foo").as_ref()); /// ``` #[inline] pub const fn new() -> Self { Self::with_hasher(FixedHasher) } /// Creates an empty [`HashSet`] with the specified capacity. /// /// Refer to [`with_capacity`](hb::HashSet::with_capacity) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// # /// // Creates a HashSet with capacity for at least 5 entries. /// let map = HashSet::with_capacity(5); /// # /// # let mut map = map; /// # map.insert("foo"); /// # assert_eq!(map.get("foo"), Some("foo").as_ref()); /// ``` #[inline] pub fn with_capacity(capacity: usize) -> Self { Self::with_capacity_and_hasher(capacity, FixedHasher) } } impl<T, S> HashSet<T, S> { /// Returns the number of elements the set can hold without reallocating. /// /// Refer to [`capacity`](hb::HashSet::capacity) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let map = HashSet::with_capacity(5); /// /// # let map: HashSet<()> = map; /// # /// assert!(map.capacity() >= 5); /// ``` #[inline] pub fn capacity(&self) -> usize { self.0.capacity() } /// An iterator visiting all elements in arbitrary order. /// The iterator element type is `&'a T`. /// /// Refer to [`iter`](hb::HashSet::iter) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// # /// let mut map = HashSet::new(); /// /// map.insert("foo"); /// map.insert("bar"); /// map.insert("baz"); /// /// for value in map.iter() { /// // "foo", "bar", "baz" /// // Note that the above order is not guaranteed /// } /// # /// # assert_eq!(map.iter().count(), 3); /// ``` #[inline] pub fn iter(&self) -> Iter<'_, T> { self.0.iter() } /// Returns the number of elements in the set. /// /// Refer to [`len`](hb::HashSet::len) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let mut map = HashSet::new(); /// /// assert_eq!(map.len(), 0); /// /// map.insert("foo"); /// /// assert_eq!(map.len(), 1); /// ``` #[inline] pub fn len(&self) -> usize { self.0.len() } /// Returns `true` if the set contains no elements. /// /// Refer to [`is_empty`](hb::HashSet::is_empty) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let mut map = HashSet::new(); /// /// assert!(map.is_empty()); /// /// map.insert("foo"); /// /// assert!(!map.is_empty()); /// ``` #[inline] pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Clears the set, returning all elements in an iterator. /// /// Refer to [`drain`](hb::HashSet::drain) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// # /// let mut map = HashSet::new(); /// /// map.insert("foo"); /// map.insert("bar"); /// map.insert("baz"); /// /// for value in map.drain() { /// // "foo", "bar", "baz" /// // Note that the above order is not guaranteed /// } /// /// assert!(map.is_empty()); /// ``` #[inline] pub fn drain(&mut self) -> Drain<'_, T> { self.0.drain() } /// Retains only the elements specified by the predicate. /// /// Refer to [`retain`](hb::HashSet::retain) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// # /// let mut map = HashSet::new(); /// /// map.insert("foo"); /// map.insert("bar"); /// map.insert("baz"); /// /// map.retain(|value| *value == "baz"); /// /// assert_eq!(map.len(), 1); /// ``` #[inline] pub fn retain<F>(&mut self, f: F) where F: FnMut(&T) -> bool, { self.0.retain(f); } /// Drains elements which are true under the given predicate, /// and returns an iterator over the removed items. /// /// Refer to [`extract_if`](hb::HashSet::extract_if) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// # /// let mut map = HashSet::new(); /// /// map.insert("foo"); /// map.insert("bar"); /// map.insert("baz"); /// /// let extracted = map /// .extract_if(|value| *value == "baz") /// .collect::<Vec<_>>(); /// /// assert_eq!(map.len(), 2); /// assert_eq!(extracted.len(), 1); /// ``` #[inline] pub fn extract_if<F>(&mut self, f: F) -> ExtractIf<'_, T, F> where F: FnMut(&T) -> bool, { self.0.extract_if(f) } /// Clears the set, removing all values. /// /// Refer to [`clear`](hb::HashSet::clear) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// # /// let mut map = HashSet::new(); /// /// map.insert("foo"); /// map.insert("bar"); /// map.insert("baz"); /// /// map.clear(); /// /// assert!(map.is_empty()); /// ``` #[inline] pub fn clear(&mut self) { self.0.clear(); } /// Creates a new empty hash set which will use the given hasher to hash /// keys. /// /// Refer to [`with_hasher`](hb::HashSet::with_hasher) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// # use bevy_platform::hash::FixedHasher as SomeHasher; /// // Creates a HashSet with the provided hasher. /// let map = HashSet::with_hasher(SomeHasher); /// # /// # let mut map = map; /// # map.insert("foo"); /// # assert_eq!(map.get("foo"), Some("foo").as_ref()); /// ``` #[inline] pub const fn with_hasher(hasher: S) -> Self { Self(hb::HashSet::with_hasher(hasher)) } /// Creates an empty [`HashSet`] with the specified capacity, using /// `hasher` to hash the keys. /// /// Refer to [`with_capacity_and_hasher`](hb::HashSet::with_capacity_and_hasher) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// # use bevy_platform::hash::FixedHasher as SomeHasher; /// // Creates a HashSet with capacity for 5 entries and the provided hasher. /// let map = HashSet::with_capacity_and_hasher(5, SomeHasher); /// # /// # let mut map = map; /// # map.insert("foo"); /// # assert_eq!(map.get("foo"), Some("foo").as_ref()); /// ``` #[inline] pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self { Self(hb::HashSet::with_capacity_and_hasher(capacity, hasher)) } /// Returns a reference to the set's [`BuildHasher`]. /// /// Refer to [`hasher`](hb::HashSet::hasher) for further details. #[inline] pub fn hasher(&self) -> &S { self.0.hasher() } /// Takes the inner [`HashSet`](hb::HashSet) out of this wrapper. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let map: HashSet<&'static str> = HashSet::new(); /// let map: hashbrown::HashSet<&'static str, _> = map.into_inner(); /// ``` #[inline] pub fn into_inner(self) -> hb::HashSet<T, S> { self.0 } } impl<T, S> HashSet<T, S> where T: Eq + Hash, S: BuildHasher, { /// Reserves capacity for at least `additional` more elements to be inserted /// in the [`HashSet`]. The collection may reserve more space to avoid /// frequent reallocations. /// /// Refer to [`reserve`](hb::HashSet::reserve) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let mut map = HashSet::with_capacity(5); /// /// # let mut map: HashSet<()> = map; /// # /// assert!(map.capacity() >= 5); /// /// map.reserve(10); /// /// assert!(map.capacity() - map.len() >= 10); /// ``` #[inline] pub fn reserve(&mut self, additional: usize) { self.0.reserve(additional); } /// Tries to reserve capacity for at least `additional` more elements to be inserted /// in the given `HashSet<K,V>`. The collection may reserve more space to avoid /// frequent reallocations. /// /// Refer to [`try_reserve`](hb::HashSet::try_reserve) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let mut map = HashSet::with_capacity(5); /// /// # let mut map: HashSet<()> = map; /// # /// assert!(map.capacity() >= 5); /// /// map.try_reserve(10).expect("Out of Memory!"); /// /// assert!(map.capacity() - map.len() >= 10); /// ``` #[inline] pub fn try_reserve(&mut self, additional: usize) -> Result<(), hashbrown::TryReserveError> { self.0.try_reserve(additional) } /// Shrinks the capacity of the set as much as possible. It will drop /// down as much as possible while maintaining the internal rules /// and possibly leaving some space in accordance with the resize policy. /// /// Refer to [`shrink_to_fit`](hb::HashSet::shrink_to_fit) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let mut map = HashSet::with_capacity(5); /// /// map.insert("foo"); /// map.insert("bar"); /// map.insert("baz"); /// /// assert!(map.capacity() >= 5); /// /// map.shrink_to_fit(); /// /// assert_eq!(map.capacity(), 3); /// ``` #[inline] pub fn shrink_to_fit(&mut self) { self.0.shrink_to_fit(); } /// Shrinks the capacity of the set with a lower limit. It will drop /// down no lower than the supplied limit while maintaining the internal rules /// and possibly leaving some space in accordance with the resize policy. /// /// Refer to [`shrink_to`](hb::HashSet::shrink_to) for further details. #[inline] pub fn shrink_to(&mut self, min_capacity: usize) { self.0.shrink_to(min_capacity); } /// Visits the values representing the difference, /// i.e., the values that are in `self` but not in `other`. /// /// Refer to [`difference`](hb::HashSet::difference) for further details. #[inline] pub fn difference<'a>(&'a self, other: &'a Self) -> Difference<'a, T, S> { self.0.difference(other) } /// Visits the values representing the symmetric difference, /// i.e., the values that are in `self` or in `other` but not in both. /// /// Refer to [`symmetric_difference`](hb::HashSet::symmetric_difference) for further details. #[inline] pub fn symmetric_difference<'a>(&'a self, other: &'a Self) -> SymmetricDifference<'a, T, S> { self.0.symmetric_difference(other) } /// Visits the values representing the intersection, /// i.e., the values that are both in `self` and `other`. /// /// Refer to [`intersection`](hb::HashSet::intersection) for further details. #[inline] pub fn intersection<'a>(&'a self, other: &'a Self) -> Intersection<'a, T, S> { self.0.intersection(other) } /// Visits the values representing the union, /// i.e., all the values in `self` or `other`, without duplicates. /// /// Refer to [`union`](hb::HashSet::union) for further details. #[inline] pub fn union<'a>(&'a self, other: &'a Self) -> Union<'a, T, S> { self.0.union(other) } /// Returns `true` if the set contains a value. /// /// Refer to [`contains`](hb::HashSet::contains) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let mut map = HashSet::new(); /// /// map.insert("foo"); /// /// assert!(map.contains("foo")); /// ``` #[inline] pub fn contains<Q>(&self, value: &Q) -> bool where Q: Hash + Equivalent<T> + ?Sized, { self.0.contains(value) } /// Returns a reference to the value in the set, if any, that is equal to the given value. /// /// Refer to [`get`](hb::HashSet::get) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let mut map = HashSet::new(); /// /// map.insert("foo"); /// /// assert_eq!(map.get("foo"), Some(&"foo")); /// ``` #[inline] pub fn get<Q>(&self, value: &Q) -> Option<&T> where Q: Hash + Equivalent<T> + ?Sized, { self.0.get(value) } /// Inserts the given `value` into the set if it is not present, then /// returns a reference to the value in the set. /// /// Refer to [`get_or_insert`](hb::HashSet::get_or_insert) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let mut map = HashSet::new(); /// /// assert_eq!(map.get_or_insert("foo"), &"foo"); /// ``` #[inline] pub fn get_or_insert(&mut self, value: T) -> &T { self.0.get_or_insert(value) } /// Inserts a value computed from `f` into the set if the given `value` is /// not present, then returns a reference to the value in the set. /// /// Refer to [`get_or_insert_with`](hb::HashSet::get_or_insert_with) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let mut map = HashSet::new(); /// /// assert_eq!(map.get_or_insert_with(&"foo", |_| "foo"), &"foo"); /// ``` #[inline] pub fn get_or_insert_with<Q, F>(&mut self, value: &Q, f: F) -> &T where Q: Hash + Equivalent<T> + ?Sized, F: FnOnce(&Q) -> T, { self.0.get_or_insert_with(value, f) } /// Gets the given value's corresponding entry in the set for in-place manipulation. /// /// Refer to [`entry`](hb::HashSet::entry) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let mut map = HashSet::new(); /// /// let value = map.entry("foo").or_insert(); /// # /// # assert_eq!(value, ()); /// ``` #[inline] pub fn entry(&mut self, value: T) -> Entry<'_, T, S> { self.0.entry(value) } /// Returns `true` if `self` has no elements in common with `other`. /// This is equivalent to checking for an empty intersection. /// /// Refer to [`is_disjoint`](hb::HashSet::is_disjoint) for further details. #[inline] pub fn is_disjoint(&self, other: &Self) -> bool { self.0.is_disjoint(other) } /// Returns `true` if the set is a subset of another, /// i.e., `other` contains at least all the values in `self`. /// /// Refer to [`is_subset`](hb::HashSet::is_subset) for further details. #[inline] pub fn is_subset(&self, other: &Self) -> bool { self.0.is_subset(other) } /// Returns `true` if the set is a superset of another, /// i.e., `self` contains at least all the values in `other`. /// /// Refer to [`is_superset`](hb::HashSet::is_superset) for further details. #[inline] pub fn is_superset(&self, other: &Self) -> bool { self.0.is_superset(other) } /// Adds a value to the set. /// /// Refer to [`insert`](hb::HashSet::insert) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let mut map = HashSet::new(); /// /// map.insert("foo"); /// /// assert!(map.contains("foo")); /// ``` #[inline] pub fn insert(&mut self, value: T) -> bool { self.0.insert(value) } /// Adds a value to the set, replacing the existing value, if any, that is equal to the given /// one. Returns the replaced value. /// /// Refer to [`replace`](hb::HashSet::replace) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let mut map = HashSet::new(); /// /// map.insert("foo"); /// /// assert_eq!(map.replace("foo"), Some("foo")); /// ``` #[inline] pub fn replace(&mut self, value: T) -> Option<T> { self.0.replace(value) } /// Removes a value from the set. Returns whether the value was /// present in the set. /// /// Refer to [`remove`](hb::HashSet::remove) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let mut map = HashSet::new(); /// /// map.insert("foo"); /// /// assert!(map.remove("foo")); /// /// assert!(map.is_empty()); /// ``` #[inline] pub fn remove<Q>(&mut self, value: &Q) -> bool where Q: Hash + Equivalent<T> + ?Sized, { self.0.remove(value) } /// Removes and returns the value in the set, if any, that is equal to the given one. /// /// Refer to [`take`](hb::HashSet::take) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let mut map = HashSet::new(); /// /// map.insert("foo"); /// /// assert_eq!(map.take("foo"), Some("foo")); /// /// assert!(map.is_empty()); /// ``` #[inline] pub fn take<Q>(&mut self, value: &Q) -> Option<T> where Q: Hash + Equivalent<T> + ?Sized, { self.0.take(value) } /// Returns the total amount of memory allocated internally by the hash /// set, in bytes. /// /// Refer to [`allocation_size`](hb::HashSet::allocation_size) for further details. /// /// # Examples /// /// ```rust /// # use bevy_platform::collections::HashSet; /// let mut map = HashSet::new(); /// /// assert_eq!(map.allocation_size(), 0); /// /// map.insert("foo"); /// /// assert!(map.allocation_size() >= size_of::<&'static str>()); /// ``` #[inline] pub fn allocation_size(&self) -> usize { self.0.allocation_size() } /// Insert a value the set without checking if the value already exists in the set. /// /// Refer to [`insert_unique_unchecked`](hb::HashSet::insert_unique_unchecked) for further details. /// /// # Safety /// /// This operation is safe if a value does not exist in the set. /// /// However, if a value exists in the set already, the behavior is unspecified: /// this operation may panic, loop forever, or any following operation with the set /// may panic, loop forever or return arbitrary result. /// /// That said, this operation (and following operations) are guaranteed to /// not violate memory safety. /// /// However this operation is still unsafe because the resulting `HashSet` /// may be passed to unsafe code which does expect the set to behave /// correctly, and would cause unsoundness as a result. #[expect( unsafe_code, reason = "re-exporting unsafe method from Hashbrown requires unsafe code" )] #[inline] pub unsafe fn insert_unique_unchecked(&mut self, value: T) -> &T { // SAFETY: safety contract is ensured by the caller. unsafe { self.0.insert_unique_unchecked(value) } } } impl<T, S> BitOr<&HashSet<T, S>> for &HashSet<T, S> where for<'a> &'a hb::HashSet<T, S>: BitOr<&'a hb::HashSet<T, S>, Output = hb::HashSet<T, S>>, { type Output = HashSet<T, S>; /// Returns the union of `self` and `rhs` as a new `HashSet<T, S>`. #[inline] fn bitor(self, rhs: &HashSet<T, S>) -> HashSet<T, S> { HashSet(self.0.bitor(&rhs.0)) } } impl<T, S> BitAnd<&HashSet<T, S>> for &HashSet<T, S> where for<'a> &'a hb::HashSet<T, S>: BitAnd<&'a hb::HashSet<T, S>, Output = hb::HashSet<T, S>>, { type Output = HashSet<T, S>; /// Returns the intersection of `self` and `rhs` as a new `HashSet<T, S>`. #[inline] fn bitand(self, rhs: &HashSet<T, S>) -> HashSet<T, S> { HashSet(self.0.bitand(&rhs.0)) } } impl<T, S> BitXor<&HashSet<T, S>> for &HashSet<T, S> where for<'a> &'a hb::HashSet<T, S>: BitXor<&'a hb::HashSet<T, S>, Output = hb::HashSet<T, S>>, { type Output = HashSet<T, S>; /// Returns the symmetric difference of `self` and `rhs` as a new `HashSet<T, S>`. #[inline] fn bitxor(self, rhs: &HashSet<T, S>) -> HashSet<T, S> { HashSet(self.0.bitxor(&rhs.0)) } } impl<T, S> Sub<&HashSet<T, S>> for &HashSet<T, S> where for<'a> &'a hb::HashSet<T, S>: Sub<&'a hb::HashSet<T, S>, Output = hb::HashSet<T, S>>, { type Output = HashSet<T, S>; /// Returns the difference of `self` and `rhs` as a new `HashSet<T, S>`. #[inline] fn sub(self, rhs: &HashSet<T, S>) -> HashSet<T, S> { HashSet(self.0.sub(&rhs.0)) } } impl<T, S> BitOrAssign<&HashSet<T, S>> for HashSet<T, S> where hb::HashSet<T, S>: for<'a> BitOrAssign<&'a hb::HashSet<T, S>>, { /// Modifies this set to contain the union of `self` and `rhs`. #[inline] fn bitor_assign(&mut self, rhs: &HashSet<T, S>) { self.0.bitor_assign(&rhs.0); } } impl<T, S> BitAndAssign<&HashSet<T, S>> for HashSet<T, S> where hb::HashSet<T, S>: for<'a> BitAndAssign<&'a hb::HashSet<T, S>>, { /// Modifies this set to contain the intersection of `self` and `rhs`. #[inline] fn bitand_assign(&mut self, rhs: &HashSet<T, S>) { self.0.bitand_assign(&rhs.0); } } impl<T, S> BitXorAssign<&HashSet<T, S>> for HashSet<T, S> where hb::HashSet<T, S>: for<'a> BitXorAssign<&'a hb::HashSet<T, S>>, { /// Modifies this set to contain the symmetric difference of `self` and `rhs`. #[inline] fn bitxor_assign(&mut self, rhs: &HashSet<T, S>) { self.0.bitxor_assign(&rhs.0); } } impl<T, S> SubAssign<&HashSet<T, S>> for HashSet<T, S> where hb::HashSet<T, S>: for<'a> SubAssign<&'a hb::HashSet<T, S>>, { /// Modifies this set to contain the difference of `self` and `rhs`. #[inline] fn sub_assign(&mut self, rhs: &HashSet<T, S>) { self.0.sub_assign(&rhs.0); } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/collections/mod.rs
crates/bevy_platform/src/collections/mod.rs
//! Provides [`HashMap`] and [`HashSet`] from [`hashbrown`] with some customized defaults. //! //! Also provides the [`HashTable`] type, which is specific to [`hashbrown`]. pub use hash_map::HashMap; pub use hash_set::HashSet; pub use hash_table::HashTable; pub use hashbrown::Equivalent; pub mod hash_map; pub mod hash_set; pub mod hash_table;
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/collections/hash_table.rs
crates/bevy_platform/src/collections/hash_table.rs
//! Provides [`HashTable`] pub use hashbrown::hash_table::{ AbsentEntry, Drain, Entry, ExtractIf, HashTable, IntoIter, Iter, IterHash, IterHashMut, IterMut, OccupiedEntry, VacantEntry, };
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/sync/rwlock.rs
crates/bevy_platform/src/sync/rwlock.rs
//! Provides `RwLock`, `RwLockReadGuard`, `RwLockWriteGuard` pub use implementation::{RwLock, RwLockReadGuard, RwLockWriteGuard}; #[cfg(feature = "std")] use std::sync as implementation; #[cfg(not(feature = "std"))] mod implementation { use crate::sync::{LockResult, TryLockError, TryLockResult}; use core::fmt; pub use spin::rwlock::{RwLockReadGuard, RwLockWriteGuard}; /// Fallback implementation of `RwLock` from the standard library. pub struct RwLock<T: ?Sized> { inner: spin::RwLock<T>, } impl<T> RwLock<T> { /// Creates a new instance of an `RwLock<T>` which is unlocked. /// /// See the standard library for further details. pub const fn new(t: T) -> RwLock<T> { Self { inner: spin::RwLock::new(t), } } } impl<T: ?Sized> RwLock<T> { /// Locks this `RwLock` with shared read access, blocking the current thread /// until it can be acquired. /// /// See the standard library for further details. pub fn read(&self) -> LockResult<RwLockReadGuard<'_, T>> { Ok(self.inner.read()) } /// Attempts to acquire this `RwLock` with shared read access. /// /// See the standard library for further details. pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<'_, T>> { self.inner.try_read().ok_or(TryLockError::WouldBlock) } /// Locks this `RwLock` with exclusive write access, blocking the current /// thread until it can be acquired. /// /// See the standard library for further details. pub fn write(&self) -> LockResult<RwLockWriteGuard<'_, T>> { Ok(self.inner.write()) } /// Attempts to lock this `RwLock` with exclusive write access. /// /// See the standard library for further details. pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> { self.inner.try_write().ok_or(TryLockError::WouldBlock) } /// Determines whether the lock is poisoned. /// /// See the standard library for further details. pub fn is_poisoned(&self) -> bool { false } /// Clear the poisoned state from a lock. /// /// See the standard library for further details. pub fn clear_poison(&self) { // no-op } /// Consumes this `RwLock`, returning the underlying data. /// /// See the standard library for further details. pub fn into_inner(self) -> LockResult<T> where T: Sized, { Ok(self.inner.into_inner()) } /// Returns a mutable reference to the underlying data. /// /// See the standard library for further details. pub fn get_mut(&mut self) -> LockResult<&mut T> { Ok(self.inner.get_mut()) } } impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut d = f.debug_struct("RwLock"); match self.try_read() { Ok(guard) => { d.field("data", &&*guard); } Err(TryLockError::Poisoned(err)) => { d.field("data", &&**err.get_ref()); } Err(TryLockError::WouldBlock) => { d.field("data", &format_args!("<locked>")); } } d.field("poisoned", &false); d.finish_non_exhaustive() } } impl<T: Default> Default for RwLock<T> { fn default() -> RwLock<T> { RwLock::new(Default::default()) } } impl<T> From<T> for RwLock<T> { fn from(t: T) -> Self { RwLock::new(t) } } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/sync/mutex.rs
crates/bevy_platform/src/sync/mutex.rs
//! Provides `Mutex` and `MutexGuard` pub use implementation::{Mutex, MutexGuard}; #[cfg(feature = "std")] use std::sync as implementation; #[cfg(not(feature = "std"))] mod implementation { use crate::sync::{LockResult, TryLockError, TryLockResult}; use core::fmt; pub use spin::MutexGuard; /// Fallback implementation of `Mutex` from the standard library. pub struct Mutex<T: ?Sized> { inner: spin::Mutex<T>, } impl<T> Mutex<T> { /// Creates a new mutex in an unlocked state ready for use. /// /// See the standard library for further details. pub const fn new(t: T) -> Self { Self { inner: spin::Mutex::new(t), } } } impl<T: ?Sized> Mutex<T> { /// Acquires a mutex, blocking the current thread until it is able to do so. /// /// See the standard library for further details. pub fn lock(&self) -> LockResult<MutexGuard<'_, T>> { Ok(self.inner.lock()) } /// Attempts to acquire this lock. /// /// See the standard library for further details. pub fn try_lock(&self) -> TryLockResult<MutexGuard<'_, T>> { self.inner.try_lock().ok_or(TryLockError::WouldBlock) } /// Determines whether the mutex is poisoned. /// /// See the standard library for further details. pub fn is_poisoned(&self) -> bool { false } /// Clear the poisoned state from a mutex. /// /// See the standard library for further details. pub fn clear_poison(&self) { // no-op } /// Consumes this mutex, returning the underlying data. /// /// See the standard library for further details. pub fn into_inner(self) -> LockResult<T> where T: Sized, { Ok(self.inner.into_inner()) } /// Returns a mutable reference to the underlying data. /// /// See the standard library for further details. pub fn get_mut(&mut self) -> LockResult<&mut T> { Ok(self.inner.get_mut()) } } impl<T> From<T> for Mutex<T> { fn from(t: T) -> Self { Mutex::new(t) } } impl<T: Default> Default for Mutex<T> { fn default() -> Mutex<T> { Mutex::new(Default::default()) } } impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut d = f.debug_struct("Mutex"); match self.try_lock() { Ok(guard) => { d.field("data", &&*guard); } Err(TryLockError::Poisoned(err)) => { d.field("data", &&**err.get_ref()); } Err(TryLockError::WouldBlock) => { d.field("data", &format_args!("<locked>")); } } d.field("poisoned", &false); d.finish_non_exhaustive() } } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/sync/poison.rs
crates/bevy_platform/src/sync/poison.rs
//! Provides `LockResult`, `PoisonError`, `TryLockError`, `TryLockResult` pub use implementation::{LockResult, PoisonError, TryLockError, TryLockResult}; #[cfg(feature = "std")] use std::sync as implementation; #[cfg(not(feature = "std"))] mod implementation { use core::{error::Error, fmt}; /// Fallback implementation of `PoisonError` from the standard library. pub struct PoisonError<T> { guard: T, } impl<T> fmt::Debug for PoisonError<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PoisonError").finish_non_exhaustive() } } impl<T> fmt::Display for PoisonError<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "poisoned lock: another task failed inside".fmt(f) } } impl<T> Error for PoisonError<T> {} impl<T> PoisonError<T> { /// Creates a `PoisonError`. /// /// See the standard library for further details. #[cfg(panic = "unwind")] pub fn new(guard: T) -> PoisonError<T> { PoisonError { guard } } /// Consumes this error indicating that a lock is poisoned, returning the /// underlying guard to allow access regardless. /// /// See the standard library for further details. pub fn into_inner(self) -> T { self.guard } /// Reaches into this error indicating that a lock is poisoned, returning a /// reference to the underlying guard to allow access regardless. /// /// See the standard library for further details. pub fn get_ref(&self) -> &T { &self.guard } /// Reaches into this error indicating that a lock is poisoned, returning a /// mutable reference to the underlying guard to allow access regardless. /// /// See the standard library for further details. pub fn get_mut(&mut self) -> &mut T { &mut self.guard } } /// Fallback implementation of `TryLockError` from the standard library. pub enum TryLockError<T> { /// The lock could not be acquired because another thread failed while holding /// the lock. Poisoned(PoisonError<T>), /// The lock could not be acquired at this time because the operation would /// otherwise block. WouldBlock, } impl<T> From<PoisonError<T>> for TryLockError<T> { fn from(err: PoisonError<T>) -> TryLockError<T> { TryLockError::Poisoned(err) } } impl<T> fmt::Debug for TryLockError<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { TryLockError::Poisoned(..) => "Poisoned(..)".fmt(f), TryLockError::WouldBlock => "WouldBlock".fmt(f), } } } impl<T> fmt::Display for TryLockError<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { TryLockError::Poisoned(..) => "poisoned lock: another task failed inside", TryLockError::WouldBlock => "try_lock failed because the operation would block", } .fmt(f) } } impl<T> Error for TryLockError<T> {} /// Fallback implementation of `LockResult` from the standard library. pub type LockResult<Guard> = Result<Guard, PoisonError<Guard>>; /// Fallback implementation of `TryLockResult` from the standard library. pub type TryLockResult<Guard> = Result<Guard, TryLockError<Guard>>; }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/sync/lazy_lock.rs
crates/bevy_platform/src/sync/lazy_lock.rs
//! Provides `LazyLock` pub use implementation::LazyLock; #[cfg(feature = "std")] use std::sync as implementation; #[cfg(not(feature = "std"))] mod implementation { pub use spin::Lazy as LazyLock; }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/sync/barrier.rs
crates/bevy_platform/src/sync/barrier.rs
//! Provides `Barrier` and `BarrierWaitResult` pub use implementation::{Barrier, BarrierWaitResult}; #[cfg(feature = "std")] use std::sync as implementation; #[cfg(not(feature = "std"))] mod implementation { use core::fmt; /// Fallback implementation of `Barrier` from the standard library. pub struct Barrier { inner: spin::Barrier, } impl Barrier { /// Creates a new barrier that can block a given number of threads. /// /// See the standard library for further details. #[must_use] pub const fn new(n: usize) -> Self { Self { inner: spin::Barrier::new(n), } } /// Blocks the current thread until all threads have rendezvoused here. /// /// See the standard library for further details. pub fn wait(&self) -> BarrierWaitResult { BarrierWaitResult { inner: self.inner.wait(), } } } impl fmt::Debug for Barrier { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Barrier").finish_non_exhaustive() } } /// Fallback implementation of `BarrierWaitResult` from the standard library. pub struct BarrierWaitResult { inner: spin::barrier::BarrierWaitResult, } impl BarrierWaitResult { /// Returns `true` if this thread is the "leader thread" for the call to [`Barrier::wait()`]. /// /// See the standard library for further details. #[must_use] pub fn is_leader(&self) -> bool { self.inner.is_leader() } } impl fmt::Debug for BarrierWaitResult { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("BarrierWaitResult") .field("is_leader", &self.is_leader()) .finish() } } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/sync/mod.rs
crates/bevy_platform/src/sync/mod.rs
//! Provides various synchronization alternatives to language primitives. //! //! Currently missing from this module are the following items: //! * `Condvar` //! * `WaitTimeoutResult` //! * `mpsc` //! //! Otherwise, this is a drop-in replacement for `std::sync`. pub use barrier::{Barrier, BarrierWaitResult}; pub use lazy_lock::LazyLock; pub use mutex::{Mutex, MutexGuard}; pub use once::{Once, OnceLock, OnceState}; pub use poison::{LockResult, PoisonError, TryLockError, TryLockResult}; pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; crate::cfg::alloc! { pub use arc::{Arc, Weak}; crate::cfg::arc! { if { use alloc::sync as arc; } else { use portable_atomic_util as arc; } } } pub mod atomic; mod barrier; mod lazy_lock; mod mutex; mod once; mod poison; mod rwlock;
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/sync/atomic.rs
crates/bevy_platform/src/sync/atomic.rs
//! Provides various atomic alternatives to language primitives. //! //! Certain platforms lack complete atomic support, requiring the use of a fallback //! such as `portable-atomic`. //! Using these types will ensure the correct atomic provider is used without the need for //! feature gates in your own code. pub use atomic_16::{AtomicI16, AtomicU16}; pub use atomic_32::{AtomicI32, AtomicU32}; pub use atomic_64::{AtomicI64, AtomicU64}; pub use atomic_8::{AtomicBool, AtomicI8, AtomicU8}; pub use atomic_ptr::{AtomicIsize, AtomicPtr, AtomicUsize}; pub use core::sync::atomic::Ordering; #[cfg(target_has_atomic = "8")] use core::sync::atomic as atomic_8; #[cfg(not(target_has_atomic = "8"))] use portable_atomic as atomic_8; #[cfg(target_has_atomic = "16")] use core::sync::atomic as atomic_16; #[cfg(not(target_has_atomic = "16"))] use portable_atomic as atomic_16; #[cfg(target_has_atomic = "32")] use core::sync::atomic as atomic_32; #[cfg(not(target_has_atomic = "32"))] use portable_atomic as atomic_32; #[cfg(target_has_atomic = "64")] use core::sync::atomic as atomic_64; #[cfg(not(target_has_atomic = "64"))] use portable_atomic as atomic_64; #[cfg(target_has_atomic = "ptr")] use core::sync::atomic as atomic_ptr; #[cfg(not(target_has_atomic = "ptr"))] use portable_atomic as atomic_ptr;
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_platform/src/sync/once.rs
crates/bevy_platform/src/sync/once.rs
//! Provides `Once`, `OnceState`, `OnceLock` pub use implementation::{Once, OnceLock, OnceState}; #[cfg(feature = "std")] use std::sync as implementation; #[cfg(not(feature = "std"))] mod implementation { use core::{ fmt, panic::{RefUnwindSafe, UnwindSafe}, }; /// Fallback implementation of `OnceLock` from the standard library. pub struct OnceLock<T> { inner: spin::Once<T>, } impl<T> OnceLock<T> { /// Creates a new empty cell. /// /// See the standard library for further details. #[must_use] pub const fn new() -> Self { Self { inner: spin::Once::new(), } } /// Gets the reference to the underlying value. /// /// See the standard library for further details. pub fn get(&self) -> Option<&T> { self.inner.get() } /// Gets the mutable reference to the underlying value. /// /// See the standard library for further details. pub fn get_mut(&mut self) -> Option<&mut T> { self.inner.get_mut() } /// Sets the contents of this cell to `value`. /// /// See the standard library for further details. pub fn set(&self, value: T) -> Result<(), T> { let mut value = Some(value); self.inner.call_once(|| value.take().unwrap()); match value { Some(value) => Err(value), None => Ok(()), } } /// Gets the contents of the cell, initializing it with `f` if the cell /// was empty. /// /// See the standard library for further details. pub fn get_or_init<F>(&self, f: F) -> &T where F: FnOnce() -> T, { self.inner.call_once(f) } /// Consumes the `OnceLock`, returning the wrapped value. Returns /// `None` if the cell was empty. /// /// See the standard library for further details. pub fn into_inner(mut self) -> Option<T> { self.take() } /// Takes the value out of this `OnceLock`, moving it back to an uninitialized state. /// /// See the standard library for further details. pub fn take(&mut self) -> Option<T> { if self.inner.is_completed() { let mut inner = spin::Once::new(); core::mem::swap(&mut self.inner, &mut inner); inner.try_into_inner() } else { None } } } impl<T: RefUnwindSafe + UnwindSafe> RefUnwindSafe for OnceLock<T> {} impl<T: UnwindSafe> UnwindSafe for OnceLock<T> {} impl<T> Default for OnceLock<T> { fn default() -> OnceLock<T> { OnceLock::new() } } impl<T: fmt::Debug> fmt::Debug for OnceLock<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut d = f.debug_tuple("OnceLock"); match self.get() { Some(v) => d.field(v), None => d.field(&format_args!("<uninit>")), }; d.finish() } } impl<T: Clone> Clone for OnceLock<T> { fn clone(&self) -> OnceLock<T> { let cell = Self::new(); if let Some(value) = self.get() { cell.set(value.clone()).ok().unwrap(); } cell } } impl<T> From<T> for OnceLock<T> { fn from(value: T) -> Self { let cell = Self::new(); cell.set(value).map(move |_| cell).ok().unwrap() } } impl<T: PartialEq> PartialEq for OnceLock<T> { fn eq(&self, other: &OnceLock<T>) -> bool { self.get() == other.get() } } impl<T: Eq> Eq for OnceLock<T> {} /// Fallback implementation of `Once` from the standard library. pub struct Once { inner: OnceLock<()>, } impl Once { /// Creates a new `Once` value. /// /// See the standard library for further details. #[expect(clippy::new_without_default, reason = "matching std::sync::Once")] pub const fn new() -> Self { Self { inner: OnceLock::new(), } } /// Performs an initialization routine once and only once. The given closure /// will be executed if this is the first time `call_once` has been called, /// and otherwise the routine will *not* be invoked. /// /// See the standard library for further details. pub fn call_once<F: FnOnce()>(&self, f: F) { self.inner.get_or_init(f); } /// Performs the same function as [`call_once()`] except ignores poisoning. /// /// See the standard library for further details. pub fn call_once_force<F: FnOnce(&OnceState)>(&self, f: F) { const STATE: OnceState = OnceState { _private: () }; self.call_once(move || f(&STATE)); } /// Returns `true` if some [`call_once()`] call has completed /// successfully. Specifically, `is_completed` will return false in /// the following situations: /// * [`call_once()`] was not called at all, /// * [`call_once()`] was called, but has not yet completed, /// * the [`Once`] instance is poisoned /// /// See the standard library for further details. pub fn is_completed(&self) -> bool { self.inner.get().is_some() } } impl RefUnwindSafe for Once {} impl UnwindSafe for Once {} impl fmt::Debug for Once { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Once").finish_non_exhaustive() } } /// Fallback implementation of `OnceState` from the standard library. pub struct OnceState { _private: (), } impl OnceState { /// Returns `true` if the associated [`Once`] was poisoned prior to the /// invocation of the closure passed to [`Once::call_once_force()`]. /// /// See the standard library for further details. pub fn is_poisoned(&self) -> bool { false } } impl fmt::Debug for OnceState { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OnceState") .field("poisoned", &self.is_poisoned()) .finish() } } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_light/src/lib.rs
crates/bevy_light/src/lib.rs
#![expect(missing_docs, reason = "Not all docs are written yet, see #3492.")] use bevy_app::{App, Plugin, PostUpdate}; use bevy_camera::{ primitives::{Aabb, CascadesFrusta, CubemapFrusta, Frustum, Sphere}, visibility::{ CascadesVisibleEntities, CubemapVisibleEntities, InheritedVisibility, NoFrustumCulling, RenderLayers, ViewVisibility, VisibilityRange, VisibilitySystems, VisibleEntityRanges, VisibleMeshEntities, }, CameraUpdateSystems, }; use bevy_ecs::{entity::EntityHashSet, prelude::*}; use bevy_math::Vec3A; use bevy_mesh::Mesh3d; use bevy_reflect::prelude::*; use bevy_transform::{components::GlobalTransform, TransformSystems}; use bevy_utils::Parallel; use core::ops::DerefMut; pub mod cluster; pub use cluster::ClusteredDecal; use cluster::{ add_clusters, assign::assign_objects_to_clusters, GlobalVisibleClusterableObjects, VisibleClusterableObjects, }; mod ambient_light; pub use ambient_light::{AmbientLight, GlobalAmbientLight}; use bevy_camera::visibility::SetViewVisibility; mod probe; pub use probe::{ AtmosphereEnvironmentMapLight, EnvironmentMapLight, GeneratedEnvironmentMapLight, IrradianceVolume, LightProbe, }; mod volumetric; pub use volumetric::{FogVolume, VolumetricFog, VolumetricLight}; pub mod cascade; use cascade::{build_directional_light_cascades, clear_directional_light_cascades}; pub use cascade::{CascadeShadowConfig, CascadeShadowConfigBuilder, Cascades}; mod point_light; pub use point_light::{ update_point_light_frusta, PointLight, PointLightShadowMap, PointLightTexture, }; mod spot_light; pub use spot_light::{ orthonormalize, spot_light_clip_from_view, spot_light_world_from_view, update_spot_light_frusta, SpotLight, SpotLightTexture, }; mod directional_light; pub use directional_light::{ update_directional_light_frusta, DirectionalLight, DirectionalLightShadowMap, DirectionalLightTexture, SunDisk, }; /// The light prelude. /// /// This includes the most common types in this crate, re-exported for your convenience. pub mod prelude { #[doc(hidden)] pub use crate::{ light_consts, AmbientLight, DirectionalLight, EnvironmentMapLight, GeneratedEnvironmentMapLight, GlobalAmbientLight, LightProbe, PointLight, SpotLight, }; } use crate::directional_light::validate_shadow_map_size; /// Constants for operating with the light units: lumens, and lux. pub mod light_consts { /// Approximations for converting the wattage of lamps to lumens. /// /// The **lumen** (symbol: **lm**) is the unit of [luminous flux], a measure /// of the total quantity of [visible light] emitted by a source per unit of /// time, in the [International System of Units] (SI). /// /// For more information, see [wikipedia](https://en.wikipedia.org/wiki/Lumen_(unit)) /// /// [luminous flux]: https://en.wikipedia.org/wiki/Luminous_flux /// [visible light]: https://en.wikipedia.org/wiki/Visible_light /// [International System of Units]: https://en.wikipedia.org/wiki/International_System_of_Units pub mod lumens { pub const LUMENS_PER_LED_WATTS: f32 = 90.0; pub const LUMENS_PER_INCANDESCENT_WATTS: f32 = 13.8; pub const LUMENS_PER_HALOGEN_WATTS: f32 = 19.8; /// 1,000,000 lumens is a very large "cinema light" capable of registering brightly at Bevy's /// default "very overcast day" exposure level. For "indoor lighting" with a lower exposure, /// this would be way too bright. pub const VERY_LARGE_CINEMA_LIGHT: f32 = 1_000_000.0; } /// Predefined for lux values in several locations. /// /// The **lux** (symbol: **lx**) is the unit of [illuminance], or [luminous flux] per unit area, /// in the [International System of Units] (SI). It is equal to one lumen per square meter. /// /// For more information, see [wikipedia](https://en.wikipedia.org/wiki/Lux) /// /// [illuminance]: https://en.wikipedia.org/wiki/Illuminance /// [luminous flux]: https://en.wikipedia.org/wiki/Luminous_flux /// [International System of Units]: https://en.wikipedia.org/wiki/International_System_of_Units pub mod lux { /// The amount of light (lux) in a moonless, overcast night sky. (starlight) pub const MOONLESS_NIGHT: f32 = 0.0001; /// The amount of light (lux) during a full moon on a clear night. pub const FULL_MOON_NIGHT: f32 = 0.05; /// The amount of light (lux) during the dark limit of civil twilight under a clear sky. pub const CIVIL_TWILIGHT: f32 = 3.4; /// The amount of light (lux) in family living room lights. pub const LIVING_ROOM: f32 = 50.; /// The amount of light (lux) in an office building's hallway/toilet lighting. pub const HALLWAY: f32 = 80.; /// The amount of light (lux) in very dark overcast day pub const DARK_OVERCAST_DAY: f32 = 100.; /// The amount of light (lux) in an office. pub const OFFICE: f32 = 320.; /// The amount of light (lux) during sunrise or sunset on a clear day. pub const CLEAR_SUNRISE: f32 = 400.; /// The amount of light (lux) on an overcast day; typical TV studio lighting pub const OVERCAST_DAY: f32 = 1000.; /// The amount of light (lux) from ambient daylight (not direct sunlight). pub const AMBIENT_DAYLIGHT: f32 = 10_000.; /// The amount of light (lux) in full daylight (not direct sun). pub const FULL_DAYLIGHT: f32 = 20_000.; /// The amount of light (lux) in direct sunlight. pub const DIRECT_SUNLIGHT: f32 = 100_000.; /// The amount of light (lux) of raw sunlight, not filtered by the atmosphere. pub const RAW_SUNLIGHT: f32 = 130_000.; } } #[derive(Default)] pub struct LightPlugin; impl Plugin for LightPlugin { fn build(&self, app: &mut App) { app.init_resource::<GlobalVisibleClusterableObjects>() .init_resource::<GlobalAmbientLight>() .init_resource::<DirectionalLightShadowMap>() .init_resource::<PointLightShadowMap>() .configure_sets( PostUpdate, SimulationLightSystems::UpdateDirectionalLightCascades .ambiguous_with(SimulationLightSystems::UpdateDirectionalLightCascades), ) .configure_sets( PostUpdate, SimulationLightSystems::CheckLightVisibility .ambiguous_with(SimulationLightSystems::CheckLightVisibility), ) .add_systems( PostUpdate, ( validate_shadow_map_size.before(build_directional_light_cascades), add_clusters .in_set(SimulationLightSystems::AddClusters) .after(CameraUpdateSystems), assign_objects_to_clusters .in_set(SimulationLightSystems::AssignLightsToClusters) .after(TransformSystems::Propagate) .after(VisibilitySystems::CheckVisibility) .after(CameraUpdateSystems), clear_directional_light_cascades .in_set(SimulationLightSystems::UpdateDirectionalLightCascades) .after(TransformSystems::Propagate) .after(CameraUpdateSystems), update_directional_light_frusta .in_set(SimulationLightSystems::UpdateLightFrusta) // This must run after CheckVisibility because it relies on `ViewVisibility` .after(VisibilitySystems::CheckVisibility) .after(TransformSystems::Propagate) .after(SimulationLightSystems::UpdateDirectionalLightCascades) // We assume that no entity will be both a directional light and a spot light, // so these systems will run independently of one another. // FIXME: Add an archetype invariant for this https://github.com/bevyengine/bevy/issues/1481. .ambiguous_with(update_spot_light_frusta), update_point_light_frusta .in_set(SimulationLightSystems::UpdateLightFrusta) .after(TransformSystems::Propagate) .after(SimulationLightSystems::AssignLightsToClusters), update_spot_light_frusta .in_set(SimulationLightSystems::UpdateLightFrusta) .after(TransformSystems::Propagate) .after(SimulationLightSystems::AssignLightsToClusters), ( check_dir_light_mesh_visibility, check_point_light_mesh_visibility, ) .in_set(SimulationLightSystems::CheckLightVisibility) .after(VisibilitySystems::CalculateBounds) .after(TransformSystems::Propagate) .after(SimulationLightSystems::UpdateLightFrusta) // Lights can "see" entities and mark them as visible. This is done to // correctly render shadows for entities that are not in view of a camera, // but must be renderable to cast shadows. Because of this, we need to check // entity visibility and mark as visible before they can be hidden. .after(VisibilitySystems::CheckVisibility) .before(VisibilitySystems::MarkNewlyHiddenEntitiesInvisible), build_directional_light_cascades .in_set(SimulationLightSystems::UpdateDirectionalLightCascades) .after(clear_directional_light_cascades), ), ); } } /// A convenient alias for `Or<(With<PointLight>, With<SpotLight>, /// With<DirectionalLight>)>`, for use with [`bevy_camera::visibility::VisibleEntities`]. pub type WithLight = Or<(With<PointLight>, With<SpotLight>, With<DirectionalLight>)>; /// Add this component to make a [`Mesh3d`] not cast shadows. #[derive(Debug, Component, Reflect, Default, Clone, PartialEq)] #[reflect(Component, Default, Debug, Clone, PartialEq)] pub struct NotShadowCaster; /// Add this component to make a [`Mesh3d`] not receive shadows. /// /// **Note:** If you're using diffuse transmission, setting [`NotShadowReceiver`] will /// cause both “regular” shadows as well as diffusely transmitted shadows to be disabled, /// even when [`TransmittedShadowReceiver`] is being used. #[derive(Debug, Component, Reflect, Default)] #[reflect(Component, Default, Debug)] pub struct NotShadowReceiver; /// Add this component to make a [`Mesh3d`] using a PBR material with `StandardMaterial::diffuse_transmission > 0.0` /// receive shadows on its diffuse transmission lobe. (i.e. its “backside”) /// /// Not enabled by default, as it requires carefully setting up `StandardMaterial::thickness` /// (and potentially even baking a thickness texture!) to match the geometry of the mesh, in order to avoid self-shadow artifacts. /// /// **Note:** Using [`NotShadowReceiver`] overrides this component. #[derive(Debug, Component, Reflect, Default)] #[reflect(Component, Default, Debug)] pub struct TransmittedShadowReceiver; /// Add this component to a [`Camera3d`](bevy_camera::Camera3d) /// to control how to anti-alias shadow edges. /// /// The different modes use different approaches to /// [Percentage Closer Filtering](https://developer.nvidia.com/gpugems/gpugems/part-ii-lighting-and-shadows/chapter-11-shadow-map-antialiasing). #[derive(Debug, Component, Reflect, Clone, Copy, PartialEq, Eq, Default)] #[reflect(Component, Default, Debug, PartialEq, Clone)] pub enum ShadowFilteringMethod { /// Hardware 2x2. /// /// Fast but poor quality. Hardware2x2, /// Approximates a fixed Gaussian blur, good when TAA isn't in use. /// /// Good quality, good performance. /// /// For directional and spot lights, this uses a [method by Ignacio Castaño /// for *The Witness*] using 9 samples and smart filtering to achieve the same /// as a regular 5x5 filter kernel. /// /// [method by Ignacio Castaño for *The Witness*]: https://web.archive.org/web/20230210095515/http://the-witness.net/news/2013/09/shadow-mapping-summary-part-1/ #[default] Gaussian, /// A randomized filter that varies over time, good when TAA is in use. /// /// Good quality when used with `TemporalAntiAliasing` /// and good performance. /// /// For directional and spot lights, this uses a [method by Jorge Jimenez for /// *Call of Duty: Advanced Warfare*] using 8 samples in spiral pattern, /// randomly-rotated by interleaved gradient noise with spatial variation. /// /// [method by Jorge Jimenez for *Call of Duty: Advanced Warfare*]: https://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare/ Temporal, } /// System sets used to run light-related systems. #[derive(Debug, Hash, PartialEq, Eq, Clone, SystemSet)] pub enum SimulationLightSystems { AddClusters, AssignLightsToClusters, /// System order ambiguities between systems in this set are ignored: /// each [`build_directional_light_cascades`] system is independent of the others, /// and should operate on distinct sets of entities. UpdateDirectionalLightCascades, UpdateLightFrusta, /// System order ambiguities between systems in this set are ignored: /// the order of systems within this set is irrelevant, as the various visibility-checking systems /// assumes that their operations are irreversible during the frame. CheckLightVisibility, } fn shrink_entities(visible_entities: &mut Vec<Entity>) { // Check that visible entities capacity() is no more than two times greater than len() let capacity = visible_entities.capacity(); let reserved = capacity .checked_div(visible_entities.len()) .map_or(0, |reserve| { if reserve > 2 { capacity / (reserve / 2) } else { capacity } }); visible_entities.shrink_to(reserved); } pub fn check_dir_light_mesh_visibility( mut commands: Commands, mut directional_lights: Query< ( &DirectionalLight, &CascadesFrusta, &mut CascadesVisibleEntities, Option<&RenderLayers>, &ViewVisibility, ), Without<SpotLight>, >, visible_entity_query: Query< ( Entity, &InheritedVisibility, Option<&RenderLayers>, Option<&Aabb>, Option<&GlobalTransform>, Has<VisibilityRange>, Has<NoFrustumCulling>, ), ( Without<NotShadowCaster>, Without<DirectionalLight>, With<Mesh3d>, ), >, visible_entity_ranges: Option<Res<VisibleEntityRanges>>, mut defer_visible_entities_queue: Local<Parallel<Vec<Entity>>>, mut view_visible_entities_queue: Local<Parallel<Vec<Vec<Entity>>>>, ) { let visible_entity_ranges = visible_entity_ranges.as_deref(); for (directional_light, frusta, mut visible_entities, maybe_view_mask, light_view_visibility) in &mut directional_lights { let mut views_to_remove = Vec::new(); for (view, cascade_view_entities) in &mut visible_entities.entities { match frusta.frusta.get(view) { Some(view_frusta) => { cascade_view_entities.resize(view_frusta.len(), Default::default()); cascade_view_entities.iter_mut().for_each(|x| x.clear()); } None => views_to_remove.push(*view), }; } for (view, frusta) in &frusta.frusta { visible_entities .entities .entry(*view) .or_insert_with(|| vec![VisibleMeshEntities::default(); frusta.len()]); } for v in views_to_remove { visible_entities.entities.remove(&v); } // NOTE: If shadow mapping is disabled for the light then it must have no visible entities if !directional_light.shadows_enabled || !light_view_visibility.get() { continue; } let view_mask = maybe_view_mask.unwrap_or_default(); for (view, view_frusta) in &frusta.frusta { visible_entity_query.par_iter().for_each_init( || { let mut entities = view_visible_entities_queue.borrow_local_mut(); entities.resize(view_frusta.len(), Vec::default()); (defer_visible_entities_queue.borrow_local_mut(), entities) }, |(defer_visible_entities_local_queue, view_visible_entities_local_queue), ( entity, inherited_visibility, maybe_entity_mask, maybe_aabb, maybe_transform, has_visibility_range, has_no_frustum_culling, )| { if !inherited_visibility.get() { return; } let entity_mask = maybe_entity_mask.unwrap_or_default(); if !view_mask.intersects(entity_mask) { return; } // Check visibility ranges. if has_visibility_range && visible_entity_ranges.is_some_and(|visible_entity_ranges| { !visible_entity_ranges.entity_is_in_range_of_view(entity, *view) }) { return; } if let (Some(aabb), Some(transform)) = (maybe_aabb, maybe_transform) { let mut visible = false; for (frustum, frustum_visible_entities) in view_frusta .iter() .zip(view_visible_entities_local_queue.iter_mut()) { // Disable near-plane culling, as a shadow caster could lie before the near plane. if !has_no_frustum_culling && !frustum.intersects_obb(aabb, &transform.affine(), false, true) { continue; } visible = true; frustum_visible_entities.push(entity); } if visible { defer_visible_entities_local_queue.push(entity); } } else { defer_visible_entities_local_queue.push(entity); for frustum_visible_entities in view_visible_entities_local_queue.iter_mut() { frustum_visible_entities.push(entity); } } }, ); // collect entities from parallel queue for entities in view_visible_entities_queue.iter_mut() { visible_entities .entities .get_mut(view) .unwrap() .iter_mut() .zip(entities.iter_mut()) .for_each(|(dst, source)| { dst.append(source); }); } } for (_, cascade_view_entities) in &mut visible_entities.entities { cascade_view_entities .iter_mut() .map(DerefMut::deref_mut) .for_each(shrink_entities); } } // Defer marking view visibility so this system can run in parallel with check_point_light_mesh_visibility // TODO: use resource to avoid unnecessary memory alloc let mut defer_queue = core::mem::take(defer_visible_entities_queue.deref_mut()); commands.queue(move |world: &mut World| { let mut query = world.query::<&mut ViewVisibility>(); for entities in defer_queue.iter_mut() { let mut iter = query.iter_many_mut(world, entities.iter()); while let Some(mut view_visibility) = iter.fetch_next() { view_visibility.set_visible(); } } }); } pub fn check_point_light_mesh_visibility( visible_point_lights: Query<&VisibleClusterableObjects>, mut point_lights: Query<( &PointLight, &GlobalTransform, &CubemapFrusta, &mut CubemapVisibleEntities, Option<&RenderLayers>, )>, mut spot_lights: Query<( &SpotLight, &GlobalTransform, &Frustum, &mut VisibleMeshEntities, Option<&RenderLayers>, )>, mut visible_entity_query: Query< ( Entity, &InheritedVisibility, &mut ViewVisibility, Option<&RenderLayers>, Option<&Aabb>, Option<&GlobalTransform>, Has<VisibilityRange>, Has<NoFrustumCulling>, ), ( Without<NotShadowCaster>, Without<DirectionalLight>, With<Mesh3d>, ), >, visible_entity_ranges: Option<Res<VisibleEntityRanges>>, mut cubemap_visible_entities_queue: Local<Parallel<[Vec<Entity>; 6]>>, mut spot_visible_entities_queue: Local<Parallel<Vec<Entity>>>, mut checked_lights: Local<EntityHashSet>, ) { checked_lights.clear(); let visible_entity_ranges = visible_entity_ranges.as_deref(); for visible_lights in &visible_point_lights { for light_entity in visible_lights.entities.iter().copied() { if !checked_lights.insert(light_entity) { continue; } // Point lights if let Ok(( point_light, transform, cubemap_frusta, mut cubemap_visible_entities, maybe_view_mask, )) = point_lights.get_mut(light_entity) { for visible_entities in cubemap_visible_entities.iter_mut() { visible_entities.entities.clear(); } // NOTE: If shadow mapping is disabled for the light then it must have no visible entities if !point_light.shadows_enabled { continue; } let view_mask = maybe_view_mask.unwrap_or_default(); let light_sphere = Sphere { center: Vec3A::from(transform.translation()), radius: point_light.range, }; visible_entity_query.par_iter_mut().for_each_init( || cubemap_visible_entities_queue.borrow_local_mut(), |cubemap_visible_entities_local_queue, ( entity, inherited_visibility, mut view_visibility, maybe_entity_mask, maybe_aabb, maybe_transform, has_visibility_range, has_no_frustum_culling, )| { if !inherited_visibility.get() { return; } let entity_mask = maybe_entity_mask.unwrap_or_default(); if !view_mask.intersects(entity_mask) { return; } if has_visibility_range && visible_entity_ranges.is_some_and(|visible_entity_ranges| { !visible_entity_ranges.entity_is_in_range_of_any_view(entity) }) { return; } // If we have an aabb and transform, do frustum culling if let (Some(aabb), Some(transform)) = (maybe_aabb, maybe_transform) { let model_to_world = transform.affine(); // Do a cheap sphere vs obb test to prune out most meshes outside the sphere of the light if !has_no_frustum_culling && !light_sphere.intersects_obb(aabb, &model_to_world) { return; } for (frustum, visible_entities) in cubemap_frusta .iter() .zip(cubemap_visible_entities_local_queue.iter_mut()) { if has_no_frustum_culling || frustum.intersects_obb(aabb, &model_to_world, true, true) { view_visibility.set_visible(); visible_entities.push(entity); } } } else { view_visibility.set_visible(); for visible_entities in cubemap_visible_entities_local_queue.iter_mut() { visible_entities.push(entity); } } }, ); for entities in cubemap_visible_entities_queue.iter_mut() { for (dst, source) in cubemap_visible_entities.iter_mut().zip(entities.iter_mut()) { dst.entities.append(source); } } for visible_entities in cubemap_visible_entities.iter_mut() { shrink_entities(visible_entities); } } // Spot lights if let Ok((point_light, transform, frustum, mut visible_entities, maybe_view_mask)) = spot_lights.get_mut(light_entity) { visible_entities.clear(); // NOTE: If shadow mapping is disabled for the light then it must have no visible entities if !point_light.shadows_enabled { continue; } let view_mask = maybe_view_mask.unwrap_or_default(); let light_sphere = Sphere { center: Vec3A::from(transform.translation()), radius: point_light.range, }; visible_entity_query.par_iter_mut().for_each_init( || spot_visible_entities_queue.borrow_local_mut(), |spot_visible_entities_local_queue, ( entity, inherited_visibility, mut view_visibility, maybe_entity_mask, maybe_aabb, maybe_transform, has_visibility_range, has_no_frustum_culling, )| { if !inherited_visibility.get() { return; } let entity_mask = maybe_entity_mask.unwrap_or_default(); if !view_mask.intersects(entity_mask) { return; } // Check visibility ranges. if has_visibility_range && visible_entity_ranges.is_some_and(|visible_entity_ranges| { !visible_entity_ranges.entity_is_in_range_of_any_view(entity) }) { return; } if let (Some(aabb), Some(transform)) = (maybe_aabb, maybe_transform) { let model_to_world = transform.affine(); // Do a cheap sphere vs obb test to prune out most meshes outside the sphere of the light if !has_no_frustum_culling && !light_sphere.intersects_obb(aabb, &model_to_world) { return; } if has_no_frustum_culling || frustum.intersects_obb(aabb, &model_to_world, true, true) { view_visibility.set_visible(); spot_visible_entities_local_queue.push(entity); } } else { view_visibility.set_visible(); spot_visible_entities_local_queue.push(entity); } }, ); for entities in spot_visible_entities_queue.iter_mut() { visible_entities.append(entities); } shrink_entities(visible_entities.deref_mut()); } } } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_light/src/volumetric.rs
crates/bevy_light/src/volumetric.rs
use bevy_asset::Handle; use bevy_camera::visibility::Visibility; use bevy_color::Color; use bevy_ecs::prelude::*; use bevy_image::Image; use bevy_math::Vec3; use bevy_reflect::prelude::*; use bevy_transform::components::Transform; /// Add this component to a [`DirectionalLight`](crate::DirectionalLight) with a shadow map /// (`shadows_enabled: true`) to make volumetric fog interact with it. /// /// This allows the light to generate light shafts/god rays. #[derive(Clone, Copy, Component, Default, Debug, Reflect)] #[reflect(Component, Default, Debug, Clone)] pub struct VolumetricLight; /// When placed on a [`bevy_camera::Camera3d`], enables /// volumetric fog and volumetric lighting, also known as light shafts or god /// rays. /// /// Requires using WebGPU on Wasm builds. #[derive(Clone, Copy, Component, Debug, Reflect)] #[reflect(Component, Default, Debug, Clone)] pub struct VolumetricFog { /// Color of the ambient light. /// /// This is separate from Bevy's [`AmbientLight`](crate::AmbientLight) because an /// [`EnvironmentMapLight`](crate::EnvironmentMapLight) is /// still considered an ambient light for the purposes of volumetric fog. If you're using a /// [`EnvironmentMapLight`](crate::EnvironmentMapLight), for best results, /// this should be a good approximation of the average color of the environment map. /// /// Defaults to white. pub ambient_color: Color, /// The brightness of the ambient light. /// /// If there's no [`EnvironmentMapLight`](crate::EnvironmentMapLight), /// set this to 0. /// /// Defaults to 0.1. pub ambient_intensity: f32, /// The maximum distance to offset the ray origin randomly by, in meters. /// /// This is intended for use with temporal antialiasing. It helps fog look /// less blocky by varying the start position of the ray, using interleaved /// gradient noise. pub jitter: f32, /// The number of raymarching steps to perform. /// /// Higher values produce higher-quality results with less banding, but /// reduce performance. /// /// The default value is 64. pub step_count: u32, } impl Default for VolumetricFog { fn default() -> Self { Self { step_count: 64, // Matches `AmbientLight` defaults. ambient_color: Color::WHITE, ambient_intensity: 0.1, jitter: 0.0, } } } #[derive(Clone, Component, Debug, Reflect)] #[reflect(Component, Default, Debug, Clone)] #[require(Transform, Visibility)] pub struct FogVolume { /// The color of the fog. /// /// Note that the fog must be lit by a [`VolumetricLight`] or ambient light /// in order for this color to appear. /// /// Defaults to white. pub fog_color: Color, /// The density of fog, which measures how dark the fog is. /// /// The default value is 0.1. pub density_factor: f32, /// Optional 3D voxel density texture for the fog. pub density_texture: Option<Handle<Image>>, /// Configurable offset of the density texture in UVW coordinates. /// /// This can be used to scroll a repeating density texture in a direction over time /// to create effects like fog moving in the wind. Make sure to configure the texture /// to use `ImageAddressMode::Repeat` if this is your intention. /// /// Has no effect when no density texture is present. /// /// The default value is (0, 0, 0). pub density_texture_offset: Vec3, /// The absorption coefficient, which measures what fraction of light is /// absorbed by the fog at each step. /// /// Increasing this value makes the fog darker. /// /// The default value is 0.3. pub absorption: f32, /// The scattering coefficient, which measures the fraction of light that's /// scattered toward, and away from, the viewer. /// /// The default value is 0.3. pub scattering: f32, /// Measures the fraction of light that's scattered *toward* the camera, as /// opposed to *away* from the camera. /// /// Increasing this value makes light shafts become more prominent when the /// camera is facing toward their source and less prominent when the camera /// is facing away. Essentially, a high value here means the light shafts /// will fade into view as the camera focuses on them and fade away when the /// camera is pointing away. /// /// The default value is 0.8. pub scattering_asymmetry: f32, /// Applies a nonphysical color to the light. /// /// This can be useful for artistic purposes but is nonphysical. /// /// The default value is white. pub light_tint: Color, /// Scales the light by a fixed fraction. /// /// This can be useful for artistic purposes but is nonphysical. /// /// The default value is 1.0, which results in no adjustment. pub light_intensity: f32, } impl Default for FogVolume { fn default() -> Self { Self { absorption: 0.3, scattering: 0.3, density_factor: 0.1, density_texture: None, density_texture_offset: Vec3::ZERO, scattering_asymmetry: 0.5, fog_color: Color::WHITE, light_tint: Color::WHITE, light_intensity: 1.0, } } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_light/src/cascade.rs
crates/bevy_light/src/cascade.rs
use bevy_camera::{Camera, Projection}; use bevy_ecs::{entity::EntityHashMap, prelude::*}; use bevy_math::{ops, Mat4, Vec3A, Vec4}; use bevy_reflect::prelude::*; use bevy_transform::components::GlobalTransform; use crate::{DirectionalLight, DirectionalLightShadowMap}; /// Controls how cascaded shadow mapping works. /// Prefer using [`CascadeShadowConfigBuilder`] to construct an instance. /// /// ``` /// # use bevy_light::CascadeShadowConfig; /// # use bevy_light::CascadeShadowConfigBuilder; /// # use bevy_utils::default; /// # /// let config: CascadeShadowConfig = CascadeShadowConfigBuilder { /// maximum_distance: 100.0, /// ..default() /// }.into(); /// ``` #[derive(Component, Clone, Debug, Reflect)] #[reflect(Component, Default, Debug, Clone)] pub struct CascadeShadowConfig { /// The (positive) distance to the far boundary of each cascade. pub bounds: Vec<f32>, /// The proportion of overlap each cascade has with the previous cascade. pub overlap_proportion: f32, /// The (positive) distance to the near boundary of the first cascade. pub minimum_distance: f32, } impl Default for CascadeShadowConfig { fn default() -> Self { CascadeShadowConfigBuilder::default().into() } } fn calculate_cascade_bounds( num_cascades: usize, nearest_bound: f32, shadow_maximum_distance: f32, ) -> Vec<f32> { if num_cascades == 1 { return vec![shadow_maximum_distance]; } let base = ops::powf( shadow_maximum_distance / nearest_bound, 1.0 / (num_cascades - 1) as f32, ); (0..num_cascades) .map(|i| nearest_bound * ops::powf(base, i as f32)) .collect() } /// Builder for [`CascadeShadowConfig`]. pub struct CascadeShadowConfigBuilder { /// The number of shadow cascades. /// More cascades increases shadow quality by mitigating perspective aliasing - a phenomenon where areas /// nearer the camera are covered by fewer shadow map texels than areas further from the camera, causing /// blocky looking shadows. /// /// This does come at the cost increased rendering overhead, however this overhead is still less /// than if you were to use fewer cascades and much larger shadow map textures to achieve the /// same quality level. /// /// In case rendered geometry covers a relatively narrow and static depth relative to camera, it may /// make more sense to use fewer cascades and a higher resolution shadow map texture as perspective aliasing /// is not as much an issue. Be sure to adjust `minimum_distance` and `maximum_distance` appropriately. pub num_cascades: usize, /// The minimum shadow distance, which can help improve the texel resolution of the first cascade. /// Areas nearer to the camera than this will likely receive no shadows. /// /// NOTE: Due to implementation details, this usually does not impact shadow quality as much as /// `first_cascade_far_bound` and `maximum_distance`. At many view frustum field-of-views, the /// texel resolution of the first cascade is dominated by the width / height of the view frustum plane /// at `first_cascade_far_bound` rather than the depth of the frustum from `minimum_distance` to /// `first_cascade_far_bound`. pub minimum_distance: f32, /// The maximum shadow distance. /// Areas further from the camera than this will likely receive no shadows. pub maximum_distance: f32, /// Sets the far bound of the first cascade, relative to the view origin. /// In-between cascades will be exponentially spaced relative to the maximum shadow distance. /// NOTE: This is ignored if there is only one cascade, the maximum distance takes precedence. pub first_cascade_far_bound: f32, /// Sets the overlap proportion between cascades. /// The overlap is used to make the transition from one cascade's shadow map to the next /// less abrupt by blending between both shadow maps. pub overlap_proportion: f32, } impl CascadeShadowConfigBuilder { /// Returns the cascade config as specified by this builder. pub fn build(&self) -> CascadeShadowConfig { assert!( self.num_cascades > 0, "num_cascades must be positive, but was {}", self.num_cascades ); assert!( self.minimum_distance >= 0.0, "maximum_distance must be non-negative, but was {}", self.minimum_distance ); assert!( self.num_cascades == 1 || self.minimum_distance < self.first_cascade_far_bound, "minimum_distance must be less than first_cascade_far_bound, but was {}", self.minimum_distance ); assert!( self.maximum_distance > self.minimum_distance, "maximum_distance must be greater than minimum_distance, but was {}", self.maximum_distance ); assert!( (0.0..1.0).contains(&self.overlap_proportion), "overlap_proportion must be in [0.0, 1.0) but was {}", self.overlap_proportion ); CascadeShadowConfig { bounds: calculate_cascade_bounds( self.num_cascades, self.first_cascade_far_bound, self.maximum_distance, ), overlap_proportion: self.overlap_proportion, minimum_distance: self.minimum_distance, } } } impl Default for CascadeShadowConfigBuilder { fn default() -> Self { // The defaults are chosen to be similar to be Unity, Unreal, and Godot. // Unity: first cascade far bound = 10.05, maximum distance = 150.0 // Unreal Engine 5: maximum distance = 200.0 // Godot: first cascade far bound = 10.0, maximum distance = 100.0 Self { // Currently only support one cascade in WebGL 2. num_cascades: if cfg!(all( feature = "webgl", target_arch = "wasm32", not(feature = "webgpu") )) { 1 } else { 4 }, minimum_distance: 0.1, maximum_distance: 150.0, first_cascade_far_bound: 10.0, overlap_proportion: 0.2, } } } impl From<CascadeShadowConfigBuilder> for CascadeShadowConfig { fn from(builder: CascadeShadowConfigBuilder) -> Self { builder.build() } } #[derive(Component, Clone, Debug, Default, Reflect)] #[reflect(Component, Debug, Default, Clone)] pub struct Cascades { /// Map from a view to the configuration of each of its [`Cascade`]s. pub cascades: EntityHashMap<Vec<Cascade>>, } #[derive(Clone, Debug, Default, Reflect)] #[reflect(Clone, Default)] pub struct Cascade { /// The transform of the light, i.e. the view to world matrix. pub world_from_cascade: Mat4, /// The orthographic projection for this cascade. pub clip_from_cascade: Mat4, /// The view-projection matrix for this cascade, converting world space into light clip space. /// Importantly, this is derived and stored separately from `view_transform` and `projection` to /// ensure shadow stability. pub clip_from_world: Mat4, /// Size of each shadow map texel in world units. pub texel_size: f32, } pub fn clear_directional_light_cascades(mut lights: Query<(&DirectionalLight, &mut Cascades)>) { for (directional_light, mut cascades) in lights.iter_mut() { if !directional_light.shadows_enabled { continue; } cascades.cascades.clear(); } } pub fn build_directional_light_cascades( directional_light_shadow_map: Res<DirectionalLightShadowMap>, views: Query<(Entity, &GlobalTransform, &Projection, &Camera)>, mut lights: Query<( &GlobalTransform, &DirectionalLight, &CascadeShadowConfig, &mut Cascades, )>, ) { let views = views .iter() .filter_map(|(entity, transform, projection, camera)| { if camera.is_active { Some((entity, projection, transform.to_matrix())) } else { None } }) .collect::<Vec<_>>(); for (transform, directional_light, cascades_config, mut cascades) in &mut lights { if !directional_light.shadows_enabled { continue; } // It is very important to the numerical and thus visual stability of shadows that // light_to_world has orthogonal upper-left 3x3 and zero translation. // Even though only the direction (i.e. rotation) of the light matters, we don't constrain // users to not change any other aspects of the transform - there's no guarantee // `transform.to_matrix()` will give us a matrix with our desired properties. // Instead, we directly create a good matrix from just the rotation. let world_from_light = Mat4::from_quat(transform.rotation()); let light_to_world_inverse = world_from_light.transpose(); for (view_entity, projection, view_to_world) in views.iter().copied() { let camera_to_light_view = light_to_world_inverse * view_to_world; let overlap_factor = 1.0 - cascades_config.overlap_proportion; let far_bounds = cascades_config.bounds.iter(); let near_bounds = [cascades_config.minimum_distance] .into_iter() .chain(far_bounds.clone().map(|bound| overlap_factor * bound)); let view_cascades = near_bounds .zip(far_bounds) .map(|(near_bound, far_bound)| { // Negate bounds as -z is camera forward direction. let corners = projection.get_frustum_corners(-near_bound, -far_bound); calculate_cascade( corners, directional_light_shadow_map.size as f32, world_from_light, camera_to_light_view, ) }) .collect(); cascades.cascades.insert(view_entity, view_cascades); } } } /// Returns a [`Cascade`] for the frustum defined by `frustum_corners`. /// /// The corner vertices should be specified in the following order: /// first the bottom right, top right, top left, bottom left for the near plane, then similar for the far plane. /// /// See this [reference](https://developer.download.nvidia.com/SDK/10.5/opengl/src/cascaded_shadow_maps/doc/cascaded_shadow_maps.pdf) for more details. fn calculate_cascade( frustum_corners: [Vec3A; 8], cascade_texture_size: f32, world_from_light: Mat4, light_from_camera: Mat4, ) -> Cascade { let mut min = Vec3A::splat(f32::MAX); let mut max = Vec3A::splat(f32::MIN); for corner_camera_view in frustum_corners { let corner_light_view = light_from_camera.transform_point3a(corner_camera_view); min = min.min(corner_light_view); max = max.max(corner_light_view); } // NOTE: Use the larger of the frustum slice far plane diagonal and body diagonal lengths as this // will be the maximum possible projection size. Use the ceiling to get an integer which is // very important for floating point stability later. It is also important that these are // calculated using the original camera space corner positions for floating point precision // as even though the lengths using corner_light_view above should be the same, precision can // introduce small but significant differences. // NOTE: The size remains the same unless the view frustum or cascade configuration is modified. let body_diagonal = (frustum_corners[0] - frustum_corners[6]).length_squared(); let far_plane_diagonal = (frustum_corners[4] - frustum_corners[6]).length_squared(); let cascade_diameter = body_diagonal.max(far_plane_diagonal).sqrt().ceil(); // NOTE: If we ensure that cascade_texture_size is a power of 2, then as we made cascade_diameter an // integer, cascade_texel_size is then an integer multiple of a power of 2 and can be // exactly represented in a floating point value. let cascade_texel_size = cascade_diameter / cascade_texture_size; // NOTE: For shadow stability it is very important that the near_plane_center is at integer // multiples of the texel size to be exactly representable in a floating point value. let near_plane_center = Vec3A::new( (0.5 * (min.x + max.x) / cascade_texel_size).floor() * cascade_texel_size, (0.5 * (min.y + max.y) / cascade_texel_size).floor() * cascade_texel_size, // NOTE: max.z is the near plane for right-handed y-up max.z, ); // It is critical for `cascade_from_world` to be stable. So rather than forming `world_from_cascade` // and inverting it, which risks instability due to numerical precision, we directly form // `cascade_from_world` as the reference material suggests. let world_from_light_transpose = world_from_light.transpose(); let cascade_from_world = Mat4::from_cols( world_from_light_transpose.x_axis, world_from_light_transpose.y_axis, world_from_light_transpose.z_axis, (-near_plane_center).extend(1.0), ); let world_from_cascade = Mat4::from_cols( world_from_light.x_axis, world_from_light.y_axis, world_from_light.z_axis, world_from_light * near_plane_center.extend(1.0), ); // Right-handed orthographic projection, centered at `near_plane_center`. // NOTE: This is different from the reference material, as we use reverse Z. let r = (max.z - min.z).recip(); let clip_from_cascade = Mat4::from_cols( Vec4::new(2.0 / cascade_diameter, 0.0, 0.0, 0.0), Vec4::new(0.0, 2.0 / cascade_diameter, 0.0, 0.0), Vec4::new(0.0, 0.0, r, 0.0), Vec4::new(0.0, 0.0, 1.0, 1.0), ); let clip_from_world = clip_from_cascade * cascade_from_world; Cascade { world_from_cascade, clip_from_cascade, clip_from_world, texel_size: cascade_texel_size, } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_light/src/spot_light.rs
crates/bevy_light/src/spot_light.rs
use bevy_asset::Handle; use bevy_camera::{ primitives::Frustum, visibility::{self, Visibility, VisibilityClass, VisibleMeshEntities}, }; use bevy_color::Color; use bevy_ecs::prelude::*; use bevy_image::Image; use bevy_math::{Affine3A, Dir3, Mat3, Mat4, Vec3}; use bevy_reflect::prelude::*; use bevy_transform::components::{GlobalTransform, Transform}; use crate::cluster::{ClusterVisibilityClass, GlobalVisibleClusterableObjects}; /// A light that emits light in a given direction from a central point. /// /// Behaves like a point light in a perfectly absorbent housing that /// shines light only in a given direction. The direction is taken from /// the transform, and can be specified with [`Transform::looking_at`](Transform::looking_at). /// /// To control the resolution of the shadow maps, use the [`DirectionalLightShadowMap`](`crate::DirectionalLightShadowMap`) resource. #[derive(Component, Debug, Clone, Copy, Reflect)] #[reflect(Component, Default, Debug, Clone)] #[require(Frustum, VisibleMeshEntities, Transform, Visibility, VisibilityClass)] #[component(on_add = visibility::add_visibility_class::<ClusterVisibilityClass>)] pub struct SpotLight { /// The color of the light. /// /// By default, this is white. pub color: Color, /// Luminous power in lumens, representing the amount of light emitted by this source in all directions. pub intensity: f32, /// Range in meters that this light illuminates. /// /// Note that this value affects resolution of the shadow maps; generally, the /// higher you set it, the lower-resolution your shadow maps will be. /// Consequently, you should set this value to be only the size that you need. pub range: f32, /// Simulates a light source coming from a spherical volume with the given /// radius. /// /// This affects the size of specular highlights created by this light, as /// well as the soft shadow penumbra size. Because of this, large values may /// not produce the intended result -- for example, light radius does not /// affect shadow softness or diffuse lighting. pub radius: f32, /// Whether this light casts shadows. /// /// Note that shadows are rather expensive and become more so with every /// light that casts them. In general, it's best to aggressively limit the /// number of lights with shadows enabled to one or two at most. pub shadows_enabled: bool, /// Whether soft shadows are enabled. /// /// Soft shadows, also known as *percentage-closer soft shadows* or PCSS, /// cause shadows to become blurrier (i.e. their penumbra increases in /// radius) as they extend away from objects. The blurriness of the shadow /// depends on the [`SpotLight::radius`] of the light; larger lights result in larger /// penumbras and therefore blurrier shadows. /// /// Currently, soft shadows are rather noisy if not using the temporal mode. /// If you enable soft shadows, consider choosing /// [`ShadowFilteringMethod::Temporal`] and enabling temporal antialiasing /// (TAA) to smooth the noise out over time. /// /// Note that soft shadows are significantly more expensive to render than /// hard shadows. /// /// [`ShadowFilteringMethod::Temporal`]: crate::ShadowFilteringMethod::Temporal #[cfg(feature = "experimental_pbr_pcss")] pub soft_shadows_enabled: bool, /// Whether this spot light contributes diffuse lighting to meshes with /// lightmaps. /// /// Set this to false if your lightmap baking tool bakes the direct diffuse /// light from this directional light into the lightmaps in order to avoid /// counting the radiance from this light twice. Note that the specular /// portion of the light is always considered, because Bevy currently has no /// means to bake specular light. /// /// By default, this is set to true. pub affects_lightmapped_mesh_diffuse: bool, /// A value that adjusts the tradeoff between self-shadowing artifacts and /// proximity of shadows to their casters. /// /// This value frequently must be tuned to the specific scene; this is /// normal and a well-known part of the shadow mapping workflow. If set too /// low, unsightly shadow patterns appear on objects not in shadow as /// objects incorrectly cast shadows on themselves, known as *shadow acne*. /// If set too high, shadows detach from the objects casting them and seem /// to "fly" off the objects, known as *Peter Panning*. pub shadow_depth_bias: f32, /// A bias applied along the direction of the fragment's surface normal. It is scaled to the /// shadow map's texel size so that it can be small close to the camera and gets larger further /// away. pub shadow_normal_bias: f32, /// The distance from the light to the near Z plane in the shadow map. /// /// Objects closer than this distance to the light won't cast shadows. /// Setting this higher increases the shadow map's precision. /// /// This only has an effect if shadows are enabled. pub shadow_map_near_z: f32, /// Angle defining the distance from the spot light direction to the outer limit /// of the light's cone of effect. /// `outer_angle` should be < `PI / 2.0`. /// `PI / 2.0` defines a hemispherical spot light, but shadows become very blocky as the angle /// approaches this limit. pub outer_angle: f32, /// Angle defining the distance from the spot light direction to the inner limit /// of the light's cone of effect. /// Light is attenuated from `inner_angle` to `outer_angle` to give a smooth falloff. /// `inner_angle` should be <= `outer_angle` pub inner_angle: f32, } impl SpotLight { pub const DEFAULT_SHADOW_DEPTH_BIAS: f32 = 0.02; pub const DEFAULT_SHADOW_NORMAL_BIAS: f32 = 1.8; pub const DEFAULT_SHADOW_MAP_NEAR_Z: f32 = 0.1; } impl Default for SpotLight { fn default() -> Self { // a quarter arc attenuating from the center Self { color: Color::WHITE, // 1,000,000 lumens is a very large "cinema light" capable of registering brightly at Bevy's // default "very overcast day" exposure level. For "indoor lighting" with a lower exposure, // this would be way too bright. intensity: 1_000_000.0, range: 20.0, radius: 0.0, shadows_enabled: false, affects_lightmapped_mesh_diffuse: true, shadow_depth_bias: Self::DEFAULT_SHADOW_DEPTH_BIAS, shadow_normal_bias: Self::DEFAULT_SHADOW_NORMAL_BIAS, shadow_map_near_z: Self::DEFAULT_SHADOW_MAP_NEAR_Z, inner_angle: 0.0, outer_angle: core::f32::consts::FRAC_PI_4, #[cfg(feature = "experimental_pbr_pcss")] soft_shadows_enabled: false, } } } /// Constructs a right-handed orthonormal basis from a given unit Z vector. /// /// This method of constructing a basis from a [`Vec3`] is used by [`bevy_math::Vec3::any_orthonormal_pair`] // we will also construct it in the fragment shader and need our implementations to match exactly, // so we reproduce it here to avoid a mismatch if glam changes. // See bevy_render/maths.wgsl:orthonormalize pub fn orthonormalize(z_basis: Dir3) -> Mat3 { let sign = 1f32.copysign(z_basis.z); let a = -1.0 / (sign + z_basis.z); let b = z_basis.x * z_basis.y * a; let x_basis = Vec3::new( 1.0 + sign * z_basis.x * z_basis.x * a, sign * b, -sign * z_basis.x, ); let y_basis = Vec3::new(b, sign + z_basis.y * z_basis.y * a, -z_basis.y); Mat3::from_cols(x_basis, y_basis, z_basis.into()) } /// Constructs a right-handed orthonormal basis with translation, using only the forward direction and translation of a given [`GlobalTransform`]. /// /// This is a version of [`orthonormalize`] which also includes translation. pub fn spot_light_world_from_view(transform: &GlobalTransform) -> Affine3A { // the matrix z_local (opposite of transform.forward()) let fwd_dir = transform.back(); let basis = orthonormalize(fwd_dir); Affine3A::from_mat3_translation(basis, transform.translation()) } pub fn spot_light_clip_from_view(angle: f32, near_z: f32) -> Mat4 { // spot light projection FOV is 2x the angle from spot light center to outer edge Mat4::perspective_infinite_reverse_rh(angle * 2.0, 1.0, near_z) } /// Add to a [`SpotLight`] to add a light texture effect. /// A texture mask is applied to the light source to modulate its intensity, /// simulating patterns like window shadows, gobo/cookie effects, or soft falloffs. #[derive(Clone, Component, Debug, Reflect)] #[reflect(Component, Debug)] #[require(SpotLight)] pub struct SpotLightTexture { /// The texture image. Only the R channel is read. /// Note the border of the image should be entirely black to avoid leaking light. pub image: Handle<Image>, } pub fn update_spot_light_frusta( global_lights: Res<GlobalVisibleClusterableObjects>, mut views: Query< (Entity, &GlobalTransform, &SpotLight, &mut Frustum), Or<(Changed<GlobalTransform>, Changed<SpotLight>)>, >, ) { for (entity, transform, spot_light, mut frustum) in &mut views { // The frusta are used for culling meshes to the light for shadow mapping // so if shadow mapping is disabled for this light, then the frusta are // not needed. // Also, if the light is not relevant for any cluster, it will not be in the // global lights set and so there is no need to update its frusta. if !spot_light.shadows_enabled || !global_lights.entities.contains(&entity) { continue; } // ignore scale because we don't want to effectively scale light radius and range // by applying those as a view transform to shadow map rendering of objects let view_backward = transform.back(); let spot_world_from_view = spot_light_world_from_view(transform); let spot_clip_from_view = spot_light_clip_from_view(spot_light.outer_angle, spot_light.shadow_map_near_z); let clip_from_world = spot_clip_from_view * spot_world_from_view.inverse(); *frustum = Frustum::from_clip_from_world_custom_far( &clip_from_world, &transform.translation(), &view_backward, spot_light.range, ); } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_light/src/ambient_light.rs
crates/bevy_light/src/ambient_light.rs
use bevy_camera::Camera; use bevy_color::Color; use bevy_ecs::prelude::*; use bevy_reflect::prelude::*; /// An ambient light, which lights the entire scene equally. /// /// It can be added to a camera to override [`GlobalAmbientLight`], which is the default that is otherwise used. #[derive(Component, Clone, Debug, Reflect)] #[reflect(Component, Debug, Default, Clone)] #[require(Camera)] pub struct AmbientLight { pub color: Color, /// A direct scale factor multiplied with `color` before being passed to the shader. /// /// After applying this multiplier, the resulting value should be in units of [cd/m^2]. /// /// [cd/m^2]: https://en.wikipedia.org/wiki/Candela_per_square_metre pub brightness: f32, /// Whether this ambient light has an effect on meshes with lightmaps. /// /// Set this to false if your lightmap baking tool bakes the ambient light /// into the lightmaps, to avoid rendering that light twice. /// /// By default, this is set to true. pub affects_lightmapped_meshes: bool, } impl Default for AmbientLight { fn default() -> Self { Self { color: Color::WHITE, brightness: 80.0, affects_lightmapped_meshes: true, } } } /// The global ambient light, which lights the entire scene equally. /// /// This resource is inserted by the [`LightPlugin`] and by default it is set to a low ambient light. /// Inserting an [`AmbientLight`] on a camera will override this default. /// /// # Examples /// /// Make ambient light slightly brighter: /// /// ``` /// # use bevy_ecs::system::ResMut; /// # use bevy_light::GlobalAmbientLight; /// fn setup_ambient_light(mut ambient_light: ResMut<GlobalAmbientLight>) { /// ambient_light.brightness = 100.0; /// } /// ``` /// /// [`LightPlugin`]: crate::LightPlugin #[derive(Resource, Clone, Debug, Reflect)] #[reflect(Resource, Debug, Default, Clone)] pub struct GlobalAmbientLight { pub color: Color, /// A direct scale factor multiplied with `color` before being passed to the shader. /// /// After applying this multiplier, the resulting value should be in units of [cd/m^2]. /// /// [cd/m^2]: https://en.wikipedia.org/wiki/Candela_per_square_metre pub brightness: f32, /// Whether this ambient light has an effect on meshes with lightmaps. /// /// Set this to false if your lightmap baking tool bakes the ambient light /// into the lightmaps, to avoid rendering that light twice. /// /// By default, this is set to true. pub affects_lightmapped_meshes: bool, } impl Default for GlobalAmbientLight { fn default() -> Self { Self { color: Color::WHITE, brightness: 80.0, affects_lightmapped_meshes: true, } } } impl GlobalAmbientLight { pub const NONE: GlobalAmbientLight = GlobalAmbientLight { color: Color::WHITE, brightness: 0.0, affects_lightmapped_meshes: true, }; }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_light/src/directional_light.rs
crates/bevy_light/src/directional_light.rs
use bevy_asset::Handle; use bevy_camera::{ primitives::{CascadesFrusta, Frustum}, visibility::{self, CascadesVisibleEntities, ViewVisibility, Visibility, VisibilityClass}, Camera, }; use bevy_color::Color; use bevy_ecs::prelude::*; use bevy_image::Image; use bevy_reflect::prelude::*; use bevy_transform::components::Transform; use tracing::warn; use super::{ cascade::CascadeShadowConfig, cluster::ClusterVisibilityClass, light_consts, Cascades, }; /// A Directional light. /// /// Directional lights don't exist in reality but they are a good /// approximation for light sources VERY far away, like the sun or /// the moon. /// /// The light shines along the forward direction of the entity's transform. With a default transform /// this would be along the negative-Z axis. /// /// Valid values for `illuminance` are: /// /// | Illuminance (lux) | Surfaces illuminated by | /// |-------------------|------------------------------------------------| /// | 0.0001 | Moonless, overcast night sky (starlight) | /// | 0.002 | Moonless clear night sky with airglow | /// | 0.05–0.3 | Full moon on a clear night | /// | 3.4 | Dark limit of civil twilight under a clear sky | /// | 20–50 | Public areas with dark surroundings | /// | 50 | Family living room lights | /// | 80 | Office building hallway/toilet lighting | /// | 100 | Very dark overcast day | /// | 150 | Train station platforms | /// | 320–500 | Office lighting | /// | 400 | Sunrise or sunset on a clear day. | /// | 1000 | Overcast day; typical TV studio lighting | /// | 10,000–25,000 | Full daylight (not direct sun) | /// | 32,000–100,000 | Direct sunlight | /// /// Source: [Wikipedia](https://en.wikipedia.org/wiki/Lux) /// /// ## Shadows /// /// To enable shadows, set the `shadows_enabled` property to `true`. /// /// Shadows are produced via [cascaded shadow maps](https://developer.download.nvidia.com/SDK/10.5/opengl/src/cascaded_shadow_maps/doc/cascaded_shadow_maps.pdf). /// /// To modify the cascade setup, such as the number of cascades or the maximum shadow distance, /// change the [`CascadeShadowConfig`] component of the entity with the [`DirectionalLight`]. /// /// To control the resolution of the shadow maps, use the [`DirectionalLightShadowMap`] resource. #[derive(Component, Debug, Clone, Copy, Reflect)] #[reflect(Component, Default, Debug, Clone)] #[require( Cascades, CascadesFrusta, CascadeShadowConfig, CascadesVisibleEntities, Transform, Visibility, VisibilityClass )] #[component(on_add = visibility::add_visibility_class::<ClusterVisibilityClass>)] pub struct DirectionalLight { /// The color of the light. /// /// By default, this is white. pub color: Color, /// Illuminance in lux (lumens per square meter), representing the amount of /// light projected onto surfaces by this light source. Lux is used here /// instead of lumens because a directional light illuminates all surfaces /// more-or-less the same way (depending on the angle of incidence). Lumens /// can only be specified for light sources which emit light from a specific /// area. pub illuminance: f32, /// Whether this light casts shadows. /// /// Note that shadows are rather expensive and become more so with every /// light that casts them. In general, it's best to aggressively limit the /// number of lights with shadows enabled to one or two at most. pub shadows_enabled: bool, /// Whether soft shadows are enabled, and if so, the size of the light. /// /// Soft shadows, also known as *percentage-closer soft shadows* or PCSS, /// cause shadows to become blurrier (i.e. their penumbra increases in /// radius) as they extend away from objects. The blurriness of the shadow /// depends on the size of the light; larger lights result in larger /// penumbras and therefore blurrier shadows. /// /// Currently, soft shadows are rather noisy if not using the temporal mode. /// If you enable soft shadows, consider choosing /// [`ShadowFilteringMethod::Temporal`] and enabling temporal antialiasing /// (TAA) to smooth the noise out over time. /// /// Note that soft shadows are significantly more expensive to render than /// hard shadows. /// /// [`ShadowFilteringMethod::Temporal`]: crate::ShadowFilteringMethod::Temporal #[cfg(feature = "experimental_pbr_pcss")] pub soft_shadow_size: Option<f32>, /// Whether this directional light contributes diffuse lighting to meshes /// with lightmaps. /// /// Set this to false if your lightmap baking tool bakes the direct diffuse /// light from this directional light into the lightmaps in order to avoid /// counting the radiance from this light twice. Note that the specular /// portion of the light is always considered, because Bevy currently has no /// means to bake specular light. /// /// By default, this is set to true. pub affects_lightmapped_mesh_diffuse: bool, /// A value that adjusts the tradeoff between self-shadowing artifacts and /// proximity of shadows to their casters. /// /// This value frequently must be tuned to the specific scene; this is /// normal and a well-known part of the shadow mapping workflow. If set too /// low, unsightly shadow patterns appear on objects not in shadow as /// objects incorrectly cast shadows on themselves, known as *shadow acne*. /// If set too high, shadows detach from the objects casting them and seem /// to "fly" off the objects, known as *Peter Panning*. pub shadow_depth_bias: f32, /// A bias applied along the direction of the fragment's surface normal. It /// is scaled to the shadow map's texel size so that it is automatically /// adjusted to the orthographic projection. pub shadow_normal_bias: f32, } impl Default for DirectionalLight { fn default() -> Self { DirectionalLight { color: Color::WHITE, illuminance: light_consts::lux::AMBIENT_DAYLIGHT, shadows_enabled: false, shadow_depth_bias: Self::DEFAULT_SHADOW_DEPTH_BIAS, shadow_normal_bias: Self::DEFAULT_SHADOW_NORMAL_BIAS, affects_lightmapped_mesh_diffuse: true, #[cfg(feature = "experimental_pbr_pcss")] soft_shadow_size: None, } } } impl DirectionalLight { pub const DEFAULT_SHADOW_DEPTH_BIAS: f32 = 0.02; pub const DEFAULT_SHADOW_NORMAL_BIAS: f32 = 1.8; } /// Add to a [`DirectionalLight`] to add a light texture effect. /// A texture mask is applied to the light source to modulate its intensity, /// simulating patterns like window shadows, gobo/cookie effects, or soft falloffs. #[derive(Clone, Component, Debug, Reflect)] #[reflect(Component, Debug)] #[require(DirectionalLight)] pub struct DirectionalLightTexture { /// The texture image. Only the R channel is read. pub image: Handle<Image>, /// Whether to tile the image infinitely, or use only a single tile centered at the light's translation pub tiled: bool, } /// Controls the resolution of [`DirectionalLight`] and [`SpotLight`](crate::SpotLight) shadow maps. /// /// ``` /// # use bevy_app::prelude::*; /// # use bevy_light::DirectionalLightShadowMap; /// App::new() /// .insert_resource(DirectionalLightShadowMap { size: 4096 }); /// ``` #[derive(Resource, Clone, Debug, Reflect)] #[reflect(Resource, Debug, Default, Clone)] pub struct DirectionalLightShadowMap { // The width and height of each cascade. /// /// Must be a power of two to avoid unstable cascade positioning. /// /// Defaults to `2048`. pub size: usize, } impl Default for DirectionalLightShadowMap { fn default() -> Self { Self { size: 2048 } } } pub fn validate_shadow_map_size(mut shadow_map: ResMut<DirectionalLightShadowMap>) { if shadow_map.is_changed() && !shadow_map.size.is_power_of_two() { let new_size = shadow_map.size.next_power_of_two(); warn!("Non-power-of-two DirectionalLightShadowMap sizes are not supported, correcting {} to {new_size}", shadow_map.size); shadow_map.size = new_size; } } pub fn update_directional_light_frusta( mut views: Query< ( &Cascades, &DirectionalLight, &ViewVisibility, &mut CascadesFrusta, ), ( // Prevents this query from conflicting with camera queries. Without<Camera>, ), >, ) { for (cascades, directional_light, visibility, mut frusta) in &mut views { // The frustum is used for culling meshes to the light for shadow mapping // so if shadow mapping is disabled for this light, then the frustum is // not needed. if !directional_light.shadows_enabled || !visibility.get() { continue; } frusta.frusta = cascades .cascades .iter() .map(|(view, cascades)| { ( *view, cascades .iter() .map(|c| Frustum::from_clip_from_world(&c.clip_from_world)) .collect::<Vec<_>>(), ) }) .collect(); } } /// Add to a [`DirectionalLight`] to control rendering of the visible solar disk in the sky. /// Affects only the disk’s appearance, not the light’s illuminance or shadows. /// Requires a `bevy::pbr::Atmosphere` component on a [`Camera3d`](bevy_camera::Camera3d) to have any effect. /// /// By default, the atmosphere is rendered with [`SunDisk::EARTH`], which approximates the /// apparent size and brightness of the Sun as seen from Earth. You can also disable the sun /// disk entirely with [`SunDisk::OFF`]. /// /// In order to cause the sun to "glow" and light up the surrounding sky, enable bloom /// in your post-processing pipeline by adding a `Bloom` component to your camera. #[derive(Component, Clone)] #[require(DirectionalLight)] pub struct SunDisk { /// The angular size (diameter) of the sun disk in radians, as observed from the scene. pub angular_size: f32, /// Multiplier for the brightness of the sun disk. /// /// `0.0` disables the disk entirely (atmospheric scattering still occurs), /// `1.0` is the default physical intensity, and values `>1.0` overexpose it. pub intensity: f32, } impl SunDisk { /// Earth-like parameters for the sun disk. /// /// Uses the mean apparent size (~32 arcminutes) of the Sun at 1 AU distance /// with default intensity. pub const EARTH: SunDisk = SunDisk { angular_size: 0.00930842, intensity: 1.0, }; /// No visible sun disk. /// /// Keeps scattering and directional light illumination, but hides the disk itself. pub const OFF: SunDisk = SunDisk { angular_size: 0.0, intensity: 0.0, }; } impl Default for SunDisk { fn default() -> Self { Self::EARTH } } impl Default for &SunDisk { fn default() -> Self { &SunDisk::EARTH } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_light/src/probe.rs
crates/bevy_light/src/probe.rs
use bevy_asset::Handle; use bevy_camera::visibility::Visibility; use bevy_ecs::prelude::*; use bevy_image::Image; use bevy_math::{Quat, UVec2}; use bevy_reflect::prelude::*; use bevy_transform::components::Transform; /// A marker component for a light probe, which is a cuboid region that provides /// global illumination to all fragments inside it. /// /// Note that a light probe will have no effect unless the entity contains some /// kind of illumination, which can either be an [`EnvironmentMapLight`] or an /// [`IrradianceVolume`]. /// /// The light probe range is conceptually a unit cube (1×1×1) centered on the /// origin. The [`Transform`] applied to this entity can scale, rotate, or translate /// that cube so that it contains all fragments that should take this light probe into account. /// /// When multiple sources of indirect illumination can be applied to a fragment, /// the highest-quality one is chosen. Diffuse and specular illumination are /// considered separately, so, for example, Bevy may decide to sample the /// diffuse illumination from an irradiance volume and the specular illumination /// from a reflection probe. From highest priority to lowest priority, the /// ranking is as follows: /// /// | Rank | Diffuse | Specular | /// | ---- | -------------------- | -------------------- | /// | 1 | Lightmap | Lightmap | /// | 2 | Irradiance volume | Reflection probe | /// | 3 | Reflection probe | View environment map | /// | 4 | View environment map | | /// /// Note that ambient light is always added to the diffuse component and does /// not participate in the ranking. That is, ambient light is applied in /// addition to, not instead of, the light sources above. /// /// A terminology note: Unfortunately, there is little agreement across game and /// graphics engines as to what to call the various techniques that Bevy groups /// under the term *light probe*. In Bevy, a *light probe* is the generic term /// that encompasses both *reflection probes* and *irradiance volumes*. In /// object-oriented terms, *light probe* is the superclass, and *reflection /// probe* and *irradiance volume* are subclasses. In other engines, you may see /// the term *light probe* refer to an irradiance volume with a single voxel, or /// perhaps some other technique, while in Bevy *light probe* refers not to a /// specific technique but rather to a class of techniques. Developers familiar /// with other engines should be aware of this terminology difference. #[derive(Component, Debug, Clone, Copy, Default, Reflect)] #[reflect(Component, Default, Debug, Clone)] #[require(Transform, Visibility)] pub struct LightProbe; impl LightProbe { /// Creates a new light probe component. #[inline] pub fn new() -> Self { Self } } /// A pair of cubemap textures that represent the surroundings of a specific /// area in space. /// /// See `bevy_pbr::environment_map` for detailed information. #[derive(Clone, Component, Reflect)] #[reflect(Component, Default, Clone)] pub struct EnvironmentMapLight { /// The blurry image that represents diffuse radiance surrounding a region. pub diffuse_map: Handle<Image>, /// The typically-sharper, mipmapped image that represents specular radiance /// surrounding a region. pub specular_map: Handle<Image>, /// Scale factor applied to the diffuse and specular light generated by this component. /// /// After applying this multiplier, the resulting values should /// be in units of [cd/m^2](https://en.wikipedia.org/wiki/Candela_per_square_metre). /// /// See also <https://google.github.io/filament/Filament.html#lighting/imagebasedlights/iblunit>. pub intensity: f32, /// World space rotation applied to the environment light cubemaps. /// This is useful for users who require a different axis, such as the Z-axis, to serve /// as the vertical axis. pub rotation: Quat, /// Whether the light from this environment map contributes diffuse lighting /// to meshes with lightmaps. /// /// Set this to false if your lightmap baking tool bakes the diffuse light /// from this environment light into the lightmaps in order to avoid /// counting the radiance from this environment map twice. /// /// By default, this is set to true. pub affects_lightmapped_mesh_diffuse: bool, } impl Default for EnvironmentMapLight { fn default() -> Self { EnvironmentMapLight { diffuse_map: Handle::default(), specular_map: Handle::default(), intensity: 0.0, rotation: Quat::IDENTITY, affects_lightmapped_mesh_diffuse: true, } } } /// A generated environment map that is filtered at runtime. /// /// See `bevy_pbr::light_probe::generate` for detailed information. #[derive(Clone, Component, Reflect)] #[reflect(Component, Default, Clone)] pub struct GeneratedEnvironmentMapLight { /// Source cubemap to be filtered on the GPU, size must be a power of two. pub environment_map: Handle<Image>, /// Scale factor applied to the diffuse and specular light generated by this /// component. Expressed in cd/m² (candela per square meter). pub intensity: f32, /// World-space rotation applied to the cubemap. pub rotation: Quat, /// Whether this light contributes diffuse lighting to meshes that already /// have baked lightmaps. pub affects_lightmapped_mesh_diffuse: bool, } impl Default for GeneratedEnvironmentMapLight { fn default() -> Self { GeneratedEnvironmentMapLight { environment_map: Handle::default(), intensity: 0.0, rotation: Quat::IDENTITY, affects_lightmapped_mesh_diffuse: true, } } } /// Lets the atmosphere contribute environment lighting (reflections and ambient diffuse) to your scene. /// /// Attach this to a [`Camera3d`](bevy_camera::Camera3d) to light the entire view, or to a /// [`LightProbe`] to light only a specific region. /// Behind the scenes, this generates an environment map from the atmosphere for image-based lighting /// and inserts a corresponding [`GeneratedEnvironmentMapLight`]. /// /// For HDRI-based lighting, use a preauthored [`EnvironmentMapLight`] or filter one at runtime with /// [`GeneratedEnvironmentMapLight`]. #[derive(Component, Clone)] pub struct AtmosphereEnvironmentMapLight { /// Controls how bright the atmosphere's environment lighting is. /// Increase this value to brighten reflections and ambient diffuse lighting. /// /// The default is `1.0` so that the generated environment lighting matches /// the light intensity of the atmosphere in the scene. pub intensity: f32, /// Whether the diffuse contribution should affect meshes that already have lightmaps. pub affects_lightmapped_mesh_diffuse: bool, /// Cubemap resolution in pixels (must be a power-of-two). pub size: UVec2, } impl Default for AtmosphereEnvironmentMapLight { fn default() -> Self { Self { intensity: 1.0, affects_lightmapped_mesh_diffuse: true, size: UVec2::new(512, 512), } } } /// The component that defines an irradiance volume. /// /// See `bevy_pbr::irradiance_volume` for detailed information. /// /// This component requires the [`LightProbe`] component, and is typically used with /// [`bevy_transform::components::Transform`] to place the volume appropriately. #[derive(Clone, Reflect, Component, Debug)] #[reflect(Component, Default, Debug, Clone)] #[require(LightProbe)] pub struct IrradianceVolume { /// The 3D texture that represents the ambient cubes, encoded in the format /// described in `bevy_pbr::irradiance_volume`. pub voxels: Handle<Image>, /// Scale factor applied to the diffuse and specular light generated by this component. /// /// After applying this multiplier, the resulting values should /// be in units of [cd/m^2](https://en.wikipedia.org/wiki/Candela_per_square_metre). /// /// See also <https://google.github.io/filament/Filament.html#lighting/imagebasedlights/iblunit>. pub intensity: f32, /// Whether the light from this irradiance volume has an effect on meshes /// with lightmaps. /// /// Set this to false if your lightmap baking tool bakes the light from this /// irradiance volume into the lightmaps in order to avoid counting the /// irradiance twice. Frequently, applications use irradiance volumes as a /// lower-quality alternative to lightmaps for capturing indirect /// illumination on dynamic objects, and such applications will want to set /// this value to false. /// /// By default, this is set to true. pub affects_lightmapped_meshes: bool, } impl Default for IrradianceVolume { #[inline] fn default() -> Self { IrradianceVolume { voxels: Handle::default(), intensity: 0.0, affects_lightmapped_meshes: true, } } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false
bevyengine/bevy
https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_light/src/point_light.rs
crates/bevy_light/src/point_light.rs
use bevy_asset::Handle; use bevy_camera::{ primitives::{CubeMapFace, CubemapFrusta, CubemapLayout, Frustum, CUBE_MAP_FACES}, visibility::{self, CubemapVisibleEntities, Visibility, VisibilityClass}, }; use bevy_color::Color; use bevy_ecs::prelude::*; use bevy_image::Image; use bevy_math::Mat4; use bevy_reflect::prelude::*; use bevy_transform::components::{GlobalTransform, Transform}; use crate::{ cluster::{ClusterVisibilityClass, GlobalVisibleClusterableObjects}, light_consts, }; /// A light that emits light in all directions from a central point. /// /// Real-world values for `intensity` (luminous power in lumens) based on the electrical power /// consumption of the type of real-world light are: /// /// | Luminous Power (lumen) (i.e. the intensity member) | Incandescent non-halogen (Watts) | Incandescent halogen (Watts) | Compact fluorescent (Watts) | LED (Watts) | /// |------|-----|----|--------|-------| /// | 200 | 25 | | 3-5 | 3 | /// | 450 | 40 | 29 | 9-11 | 5-8 | /// | 800 | 60 | | 13-15 | 8-12 | /// | 1100 | 75 | 53 | 18-20 | 10-16 | /// | 1600 | 100 | 72 | 24-28 | 14-17 | /// | 2400 | 150 | | 30-52 | 24-30 | /// | 3100 | 200 | | 49-75 | 32 | /// | 4000 | 300 | | 75-100 | 40.5 | /// /// Source: [Wikipedia](https://en.wikipedia.org/wiki/Lumen_(unit)#Lighting) /// /// ## Shadows /// /// To enable shadows, set the `shadows_enabled` property to `true`. /// /// To control the resolution of the shadow maps, use the [`PointLightShadowMap`] resource. #[derive(Component, Debug, Clone, Copy, Reflect)] #[reflect(Component, Default, Debug, Clone)] #[require( CubemapFrusta, CubemapVisibleEntities, Transform, Visibility, VisibilityClass )] #[component(on_add = visibility::add_visibility_class::<ClusterVisibilityClass>)] pub struct PointLight { /// The color of this light source. pub color: Color, /// Luminous power in lumens, representing the amount of light emitted by this source in all directions. pub intensity: f32, /// Cut-off for the light's area-of-effect. Fragments outside this range will not be affected by /// this light at all, so it's important to tune this together with `intensity` to prevent hard /// lighting cut-offs. pub range: f32, /// Simulates a light source coming from a spherical volume with the given /// radius. /// /// This affects the size of specular highlights created by this light, as /// well as the soft shadow penumbra size. Because of this, large values may /// not produce the intended result -- for example, light radius does not /// affect shadow softness or diffuse lighting. pub radius: f32, /// Whether this light casts shadows. pub shadows_enabled: bool, /// Whether soft shadows are enabled. /// /// Soft shadows, also known as *percentage-closer soft shadows* or PCSS, /// cause shadows to become blurrier (i.e. their penumbra increases in /// radius) as they extend away from objects. The blurriness of the shadow /// depends on the [`PointLight::radius`] of the light; larger lights result /// in larger penumbras and therefore blurrier shadows. /// /// Currently, soft shadows are rather noisy if not using the temporal mode. /// If you enable soft shadows, consider choosing /// [`ShadowFilteringMethod::Temporal`] and enabling temporal antialiasing /// (TAA) to smooth the noise out over time. /// /// Note that soft shadows are significantly more expensive to render than /// hard shadows. /// /// [`ShadowFilteringMethod::Temporal`]: crate::ShadowFilteringMethod::Temporal #[cfg(feature = "experimental_pbr_pcss")] pub soft_shadows_enabled: bool, /// Whether this point light contributes diffuse lighting to meshes with /// lightmaps. /// /// Set this to false if your lightmap baking tool bakes the direct diffuse /// light from this point light into the lightmaps in order to avoid /// counting the radiance from this light twice. Note that the specular /// portion of the light is always considered, because Bevy currently has no /// means to bake specular light. /// /// By default, this is set to true. pub affects_lightmapped_mesh_diffuse: bool, /// A bias used when sampling shadow maps to avoid "shadow-acne", or false shadow occlusions /// that happen as a result of shadow-map fragments not mapping 1:1 to screen-space fragments. /// Too high of a depth bias can lead to shadows detaching from their casters, or /// "peter-panning". This bias can be tuned together with `shadow_normal_bias` to correct shadow /// artifacts for a given scene. pub shadow_depth_bias: f32, /// A bias applied along the direction of the fragment's surface normal. It is scaled to the /// shadow map's texel size so that it can be small close to the camera and gets larger further /// away. pub shadow_normal_bias: f32, /// The distance from the light to near Z plane in the shadow map. /// /// Objects closer than this distance to the light won't cast shadows. /// Setting this higher increases the shadow map's precision. /// /// This only has an effect if shadows are enabled. pub shadow_map_near_z: f32, } impl Default for PointLight { fn default() -> Self { PointLight { color: Color::WHITE, intensity: light_consts::lumens::VERY_LARGE_CINEMA_LIGHT, range: 20.0, radius: 0.0, shadows_enabled: false, affects_lightmapped_mesh_diffuse: true, shadow_depth_bias: Self::DEFAULT_SHADOW_DEPTH_BIAS, shadow_normal_bias: Self::DEFAULT_SHADOW_NORMAL_BIAS, shadow_map_near_z: Self::DEFAULT_SHADOW_MAP_NEAR_Z, #[cfg(feature = "experimental_pbr_pcss")] soft_shadows_enabled: false, } } } impl PointLight { pub const DEFAULT_SHADOW_DEPTH_BIAS: f32 = 0.08; pub const DEFAULT_SHADOW_NORMAL_BIAS: f32 = 0.6; pub const DEFAULT_SHADOW_MAP_NEAR_Z: f32 = 0.1; } /// Add to a [`PointLight`] to add a light texture effect. /// A texture mask is applied to the light source to modulate its intensity, /// simulating patterns like window shadows, gobo/cookie effects, or soft falloffs. #[derive(Clone, Component, Debug, Reflect)] #[reflect(Component, Debug)] #[require(PointLight)] pub struct PointLightTexture { /// The texture image. Only the R channel is read. pub image: Handle<Image>, /// The cubemap layout. The image should be a packed cubemap in one of the formats described by the [`CubemapLayout`] enum. pub cubemap_layout: CubemapLayout, } /// Controls the resolution of [`PointLight`] shadow maps. /// /// ``` /// # use bevy_app::prelude::*; /// # use bevy_light::PointLightShadowMap; /// App::new() /// .insert_resource(PointLightShadowMap { size: 2048 }); /// ``` #[derive(Resource, Clone, Debug, Reflect)] #[reflect(Resource, Debug, Default, Clone)] pub struct PointLightShadowMap { /// The width and height of each of the 6 faces of the cubemap. /// /// Defaults to `1024`. pub size: usize, } impl Default for PointLightShadowMap { fn default() -> Self { Self { size: 1024 } } } // NOTE: Run this after assign_lights_to_clusters! pub fn update_point_light_frusta( global_lights: Res<GlobalVisibleClusterableObjects>, mut views: Query<(Entity, &GlobalTransform, &PointLight, &mut CubemapFrusta)>, changed_lights: Query< Entity, ( With<PointLight>, Or<(Changed<GlobalTransform>, Changed<PointLight>)>, ), >, ) { let view_rotations = CUBE_MAP_FACES .iter() .map(|CubeMapFace { target, up }| Transform::IDENTITY.looking_at(*target, *up)) .collect::<Vec<_>>(); for (entity, transform, point_light, mut cubemap_frusta) in &mut views { // If this light hasn't changed, and neither has the set of global_lights, // then we can skip this calculation. if !global_lights.is_changed() && !changed_lights.contains(entity) { continue; } // The frusta are used for culling meshes to the light for shadow mapping // so if shadow mapping is disabled for this light, then the frusta are // not needed. // Also, if the light is not relevant for any cluster, it will not be in the // global lights set and so there is no need to update its frusta. if !point_light.shadows_enabled || !global_lights.entities.contains(&entity) { continue; } let clip_from_view = Mat4::perspective_infinite_reverse_rh( core::f32::consts::FRAC_PI_2, 1.0, point_light.shadow_map_near_z, ); // ignore scale because we don't want to effectively scale light radius and range // by applying those as a view transform to shadow map rendering of objects // and ignore rotation because we want the shadow map projections to align with the axes let view_translation = Transform::from_translation(transform.translation()); let view_backward = transform.back(); for (view_rotation, frustum) in view_rotations.iter().zip(cubemap_frusta.iter_mut()) { let world_from_view = view_translation * *view_rotation; let clip_from_world = clip_from_view * world_from_view.compute_affine().inverse(); *frustum = Frustum::from_clip_from_world_custom_far( &clip_from_world, &transform.translation(), &view_backward, point_light.range, ); } } }
rust
Apache-2.0
51a6fedb06a022ab5d39e099413caa882e1b022d
2026-01-04T15:31:59.438636Z
false