repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_ui/src/widget/text.rs | crates/bevy_ui/src/widget/text.rs | use crate::{
ComputedNode, ComputedUiRenderTargetInfo, ContentSize, FixedMeasure, Measure, MeasureArgs,
Node, NodeMeasure,
};
use bevy_asset::Assets;
use bevy_color::Color;
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
change_detection::DetectChanges,
component::Component,
entity::Entity,
query::With,
reflect::ReflectComponent,
system::{Query, Res, ResMut},
world::Ref,
};
use bevy_image::prelude::*;
use bevy_math::Vec2;
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_text::{
ComputedTextBlock, CosmicFontSystem, Font, FontAtlasSet, FontHinting, LineBreak, LineHeight,
SwashCache, TextBounds, TextColor, TextError, TextFont, TextLayout, TextLayoutInfo,
TextMeasureInfo, TextPipeline, TextReader, TextRoot, TextSpanAccess, TextWriter,
};
use taffy::style::AvailableSpace;
use tracing::error;
/// UI text system flags.
///
/// Used internally by [`measure_text_system`] and [`text_system`] to schedule text for processing.
#[derive(Component, Debug, Clone, Reflect)]
#[reflect(Component, Default, Debug, Clone)]
pub struct TextNodeFlags {
/// If set then a new measure function for the text node will be created.
needs_measure_fn: bool,
/// If set then the text will be recomputed.
needs_recompute: bool,
}
impl Default for TextNodeFlags {
fn default() -> Self {
Self {
needs_measure_fn: true,
needs_recompute: true,
}
}
}
/// The top-level UI text component.
///
/// Adding [`Text`] to an entity will pull in required components for setting up a UI text node.
///
/// The string in this component is the first 'text span' in a hierarchy of text spans that are collected into
/// a [`ComputedTextBlock`]. See [`TextSpan`](bevy_text::TextSpan) for the component used by children of entities with [`Text`].
///
/// Note that [`Transform`](bevy_transform::components::Transform) on this entity is managed automatically by the UI layout system.
///
///
/// ```
/// # use bevy_asset::Handle;
/// # use bevy_color::Color;
/// # use bevy_color::palettes::basic::BLUE;
/// # use bevy_ecs::world::World;
/// # use bevy_text::{Font, Justify, TextLayout, TextFont, TextColor, TextSpan};
/// # use bevy_ui::prelude::Text;
/// #
/// # let font_handle: Handle<Font> = Default::default();
/// # let mut world = World::default();
/// #
/// // Basic usage.
/// world.spawn(Text::new("hello world!"));
///
/// // With non-default style.
/// world.spawn((
/// Text::new("hello world!"),
/// TextFont {
/// font: font_handle.clone().into(),
/// font_size: 60.0,
/// ..Default::default()
/// },
/// TextColor(BLUE.into()),
/// ));
///
/// // With text justification.
/// world.spawn((
/// Text::new("hello world\nand bevy!"),
/// TextLayout::new_with_justify(Justify::Center)
/// ));
///
/// // With spans
/// world.spawn(Text::new("hello ")).with_children(|parent| {
/// parent.spawn(TextSpan::new("world"));
/// parent.spawn((TextSpan::new("!"), TextColor(BLUE.into())));
/// });
/// ```
#[derive(Component, Debug, Default, Clone, Deref, DerefMut, Reflect, PartialEq)]
#[reflect(Component, Default, Debug, PartialEq, Clone)]
#[require(
Node,
TextLayout,
TextFont,
TextColor,
LineHeight,
TextNodeFlags,
ContentSize,
// Enable hinting as UI text is normally pixel-aligned.
FontHinting::Enabled
)]
pub struct Text(pub String);
impl Text {
/// Makes a new text component.
pub fn new(text: impl Into<String>) -> Self {
Self(text.into())
}
}
impl TextRoot for Text {}
impl TextSpanAccess for Text {
fn read_span(&self) -> &str {
self.as_str()
}
fn write_span(&mut self) -> &mut String {
&mut *self
}
}
impl From<&str> for Text {
fn from(value: &str) -> Self {
Self(String::from(value))
}
}
impl From<String> for Text {
fn from(value: String) -> Self {
Self(value)
}
}
/// Adds a shadow behind text
///
/// Use the `Text2dShadow` component for `Text2d` shadows
#[derive(Component, Copy, Clone, Debug, PartialEq, Reflect)]
#[reflect(Component, Default, Debug, Clone, PartialEq)]
pub struct TextShadow {
/// Shadow displacement in logical pixels
/// With a value of zero the shadow will be hidden directly behind the text
pub offset: Vec2,
/// Color of the shadow
pub color: Color,
}
impl Default for TextShadow {
fn default() -> Self {
Self {
offset: Vec2::splat(4.),
color: Color::linear_rgba(0., 0., 0., 0.75),
}
}
}
/// UI alias for [`TextReader`].
pub type TextUiReader<'w, 's> = TextReader<'w, 's, Text>;
/// UI alias for [`TextWriter`].
pub type TextUiWriter<'w, 's> = TextWriter<'w, 's, Text>;
/// Text measurement for UI layout. See [`NodeMeasure`].
pub struct TextMeasure {
pub info: TextMeasureInfo,
}
impl TextMeasure {
/// Checks if the cosmic text buffer is needed for measuring the text.
#[inline]
pub const fn needs_buffer(height: Option<f32>, available_width: AvailableSpace) -> bool {
height.is_none() && matches!(available_width, AvailableSpace::Definite(_))
}
}
impl Measure for TextMeasure {
fn measure(&mut self, measure_args: MeasureArgs, _style: &taffy::Style) -> Vec2 {
let MeasureArgs {
width,
height,
available_width,
buffer,
font_system,
..
} = measure_args;
let x = width.unwrap_or_else(|| match available_width {
AvailableSpace::Definite(x) => {
// It is possible for the "min content width" to be larger than
// the "max content width" when soft-wrapping right-aligned text
// and possibly other situations.
x.max(self.info.min.x).min(self.info.max.x)
}
AvailableSpace::MinContent => self.info.min.x,
AvailableSpace::MaxContent => self.info.max.x,
});
height
.map_or_else(
|| match available_width {
AvailableSpace::Definite(_) => {
if let Some(buffer) = buffer {
self.info.compute_size(
TextBounds::new_horizontal(x),
buffer,
font_system,
)
} else {
error!("text measure failed, buffer is missing");
Vec2::default()
}
}
AvailableSpace::MinContent => Vec2::new(x, self.info.min.y),
AvailableSpace::MaxContent => Vec2::new(x, self.info.max.y),
},
|y| Vec2::new(x, y),
)
.ceil()
}
}
/// Generates a new [`Measure`] for a text node on changes to its [`Text`] component.
///
/// A `Measure` is used by the UI's layout algorithm to determine the appropriate amount of space
/// to provide for the text given the fonts, the text itself and the constraints of the layout.
///
/// * Measures are regenerated on changes to either [`ComputedTextBlock`] or [`ComputedUiRenderTargetInfo`].
/// * Changes that only modify the colors of a `Text` do not require a new `Measure`. This system
/// is only able to detect that a `Text` component has changed and will regenerate the `Measure` on
/// color changes. This can be expensive, particularly for large blocks of text, and the [`bypass_change_detection`](bevy_ecs::change_detection::DetectChangesMut::bypass_change_detection)
/// method should be called when only changing the `Text`'s colors.
pub fn measure_text_system(
fonts: Res<Assets<Font>>,
mut text_query: Query<
(
Entity,
Ref<TextLayout>,
&mut ContentSize,
&mut TextNodeFlags,
&mut ComputedTextBlock,
Ref<ComputedUiRenderTargetInfo>,
&ComputedNode,
Ref<FontHinting>,
),
With<Node>,
>,
mut text_reader: TextUiReader,
mut text_pipeline: ResMut<TextPipeline>,
mut font_system: ResMut<CosmicFontSystem>,
) {
for (
entity,
block,
mut content_size,
mut text_flags,
mut computed,
computed_target,
computed_node,
hinting,
) in &mut text_query
{
// Note: the ComputedTextBlock::needs_rerender bool is cleared in create_text_measure().
// 1e-5 epsilon to ignore tiny scale factor float errors
if !(1e-5
< (computed_target.scale_factor() - computed_node.inverse_scale_factor.recip()).abs()
|| computed.needs_rerender()
|| text_flags.needs_measure_fn
|| content_size.is_added()
|| hinting.is_changed())
{
continue;
}
match text_pipeline.create_text_measure(
entity,
fonts.as_ref(),
text_reader.iter(entity),
computed_target.scale_factor.into(),
&block,
computed.as_mut(),
&mut font_system,
*hinting,
) {
Ok(measure) => {
if block.linebreak == LineBreak::NoWrap {
content_size.set(NodeMeasure::Fixed(FixedMeasure { size: measure.max }));
} else {
content_size.set(NodeMeasure::Text(TextMeasure { info: measure }));
}
// Text measure func created successfully, so set `TextNodeFlags` to schedule a recompute
text_flags.needs_measure_fn = false;
text_flags.needs_recompute = true;
}
Err(TextError::NoSuchFont) => {
// Try again next frame
text_flags.needs_measure_fn = true;
}
Err(
e @ (TextError::FailedToAddGlyph(_)
| TextError::FailedToGetGlyphImage(_)
| TextError::MissingAtlasLayout
| TextError::MissingAtlasTexture
| TextError::InconsistentAtlasState),
) => {
panic!("Fatal error when processing text: {e}.");
}
};
}
}
/// Updates the layout and size information for a UI text node on changes to the size value of its [`Node`] component,
/// or when the `needs_recompute` field of [`TextNodeFlags`] is set to true.
/// This information is computed by the [`TextPipeline`] and then stored in [`TextLayoutInfo`].
///
/// ## World Resources
///
/// [`ResMut<Assets<Image>>`](Assets<Image>) -- This system only adds new [`Image`] assets.
/// It does not modify or observe existing ones. The exception is when adding new glyphs to a [`bevy_text::FontAtlas`].
pub fn text_system(
mut textures: ResMut<Assets<Image>>,
mut texture_atlases: ResMut<Assets<TextureAtlasLayout>>,
mut font_atlas_set: ResMut<FontAtlasSet>,
mut text_pipeline: ResMut<TextPipeline>,
mut text_query: Query<(
Ref<ComputedNode>,
&TextLayout,
&mut TextLayoutInfo,
&mut TextNodeFlags,
&mut ComputedTextBlock,
)>,
mut font_system: ResMut<CosmicFontSystem>,
mut swash_cache: ResMut<SwashCache>,
) {
for (node, block, mut text_layout_info, mut text_flags, mut computed) in &mut text_query {
if node.is_changed() || text_flags.needs_recompute {
// Skip the text node if it is waiting for a new measure func
if text_flags.needs_measure_fn {
continue;
}
let physical_node_size = if block.linebreak == LineBreak::NoWrap {
// With `NoWrap` set, no constraints are placed on the width of the text.
TextBounds::UNBOUNDED
} else {
// `scale_factor` is already multiplied by `UiScale`
TextBounds::new(node.unrounded_size.x, node.unrounded_size.y)
};
match text_pipeline.update_text_layout_info(
&mut text_layout_info,
&mut font_atlas_set,
&mut texture_atlases,
&mut textures,
&mut computed,
&mut font_system,
&mut swash_cache,
physical_node_size,
block.justify,
) {
Err(TextError::NoSuchFont) => {
// There was an error processing the text layout, try again next frame
text_flags.needs_recompute = true;
}
Err(
e @ (TextError::FailedToAddGlyph(_)
| TextError::FailedToGetGlyphImage(_)
| TextError::MissingAtlasLayout
| TextError::MissingAtlasTexture
| TextError::InconsistentAtlasState),
) => {
panic!("Fatal error when processing text: {e}.");
}
Ok(()) => {
text_layout_info.scale_factor = node.inverse_scale_factor().recip();
text_layout_info.size *= node.inverse_scale_factor();
text_flags.needs_recompute = false;
}
}
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_ui/src/widget/mod.rs | crates/bevy_ui/src/widget/mod.rs | //! This module contains the basic building blocks of Bevy's UI
mod button;
mod image;
mod label;
mod text;
mod viewport;
pub use button::*;
pub use image::*;
pub use label::*;
pub use text::*;
pub use viewport::*;
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_ui/src/widget/label.rs | crates/bevy_ui/src/widget/label.rs | use bevy_ecs::{prelude::Component, reflect::ReflectComponent};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
/// Marker struct for labels
#[derive(Component, Debug, Default, Clone, Copy, Reflect)]
#[reflect(Component, Default, Debug, Clone)]
pub struct Label;
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_ui_render/src/lib.rs | crates/bevy_ui_render/src/lib.rs | #![expect(missing_docs, reason = "Not all docs are written yet, see #3492.")]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc(
html_logo_url = "https://bevyengine.org/assets/icon.png",
html_favicon_url = "https://bevyengine.org/assets/icon.png"
)]
//! Provides rendering functionality for `bevy_ui`.
pub mod box_shadow;
mod color_space;
mod gradient;
mod pipeline;
mod render_pass;
pub mod ui_material;
mod ui_material_pipeline;
pub mod ui_texture_slice_pipeline;
#[cfg(feature = "bevy_ui_debug")]
mod debug_overlay;
use bevy_camera::visibility::InheritedVisibility;
use bevy_camera::{Camera, Camera2d, Camera3d, RenderTarget};
use bevy_reflect::prelude::ReflectDefault;
use bevy_reflect::Reflect;
use bevy_shader::load_shader_library;
use bevy_sprite_render::SpriteAssetEvents;
use bevy_ui::widget::{ImageNode, TextShadow, ViewportNode};
use bevy_ui::{
BackgroundColor, BorderColor, CalculatedClip, ComputedNode, ComputedUiTargetCamera, Display,
Node, Outline, ResolvedBorderRadius, UiGlobalTransform,
};
use bevy_app::prelude::*;
use bevy_asset::{AssetEvent, AssetId, Assets};
use bevy_color::{Alpha, ColorToComponents, LinearRgba};
use bevy_core_pipeline::core_2d::graph::{Core2d, Node2d};
use bevy_core_pipeline::core_3d::graph::{Core3d, Node3d};
use bevy_ecs::prelude::*;
use bevy_ecs::system::SystemParam;
use bevy_image::{prelude::*, TRANSPARENT_IMAGE_HANDLE};
use bevy_math::{Affine2, FloatOrd, Mat4, Rect, UVec4, Vec2};
use bevy_render::{
render_asset::RenderAssets,
render_graph::{Node as RenderGraphNode, NodeRunError, RenderGraph, RenderGraphContext},
render_phase::{
sort_phase_system, AddRenderCommand, DrawFunctions, PhaseItem, PhaseItemExtraIndex,
ViewSortedRenderPhases,
},
render_resource::*,
renderer::{RenderContext, RenderDevice, RenderQueue},
sync_world::{MainEntity, RenderEntity, TemporaryRenderEntity},
texture::GpuImage,
view::{ExtractedView, Hdr, RetainedViewEntity, ViewUniforms},
Extract, ExtractSchedule, Render, RenderApp, RenderStartup, RenderSystems,
};
use bevy_sprite::BorderRect;
#[cfg(feature = "bevy_ui_debug")]
pub use debug_overlay::UiDebugOptions;
use color_space::ColorSpacePlugin;
use gradient::GradientPlugin;
use bevy_platform::collections::{HashMap, HashSet};
use bevy_text::{
ComputedTextBlock, PositionedGlyph, Strikethrough, StrikethroughColor, TextBackgroundColor,
TextColor, TextLayoutInfo, Underline, UnderlineColor,
};
use bevy_transform::components::GlobalTransform;
use box_shadow::BoxShadowPlugin;
use bytemuck::{Pod, Zeroable};
use core::ops::Range;
use graph::{NodeUi, SubGraphUi};
pub use pipeline::*;
pub use render_pass::*;
pub use ui_material_pipeline::*;
use ui_texture_slice_pipeline::UiTextureSlicerPlugin;
pub mod graph {
use bevy_render::render_graph::{RenderLabel, RenderSubGraph};
#[derive(Debug, Hash, PartialEq, Eq, Clone, RenderSubGraph)]
pub struct SubGraphUi;
#[derive(Debug, Hash, PartialEq, Eq, Clone, RenderLabel)]
pub enum NodeUi {
UiPass,
}
}
pub mod prelude {
#[cfg(feature = "bevy_ui_debug")]
pub use crate::debug_overlay::UiDebugOptions;
pub use crate::{
ui_material::*, ui_material_pipeline::UiMaterialPlugin, BoxShadowSamples, UiAntiAlias,
};
}
/// Local Z offsets of "extracted nodes" for a given entity. These exist to allow rendering multiple "extracted nodes"
/// for a given source entity (ex: render both a background color _and_ a custom material for a given node).
///
/// When possible these offsets should be defined in _this_ module to ensure z-index coordination across contexts.
/// When this is _not_ possible, pick a suitably unique index unlikely to clash with other things (ex: `0.1826823` not `0.1`).
///
/// Offsets should be unique for a given node entity to avoid z fighting.
/// These should pretty much _always_ be larger than -0.5 and smaller than 0.5 to avoid clipping into nodes
/// above / below the current node in the stack.
///
/// A z-index of 0.0 is the baseline, which is used as the primary "background color" of the node.
///
/// Note that nodes "stack" on each other, so a negative offset on the node above could clip _into_
/// a positive offset on a node below.
pub mod stack_z_offsets {
pub const BOX_SHADOW: f32 = -0.1;
pub const BACKGROUND_COLOR: f32 = 0.0;
pub const BORDER: f32 = 0.01;
pub const GRADIENT: f32 = 0.02;
pub const BORDER_GRADIENT: f32 = 0.03;
pub const IMAGE: f32 = 0.04;
pub const MATERIAL: f32 = 0.05;
pub const TEXT: f32 = 0.06;
pub const TEXT_STRIKETHROUGH: f32 = 0.07;
}
#[derive(Debug, Hash, PartialEq, Eq, Clone, SystemSet)]
pub enum RenderUiSystems {
ExtractCameraViews,
ExtractBoxShadows,
ExtractBackgrounds,
ExtractImages,
ExtractTextureSlice,
ExtractBorders,
ExtractViewportNodes,
ExtractTextBackgrounds,
ExtractTextShadows,
ExtractText,
ExtractDebug,
ExtractGradient,
}
/// Marker for controlling whether UI is rendered with or without anti-aliasing
/// in a camera. By default, UI is always anti-aliased.
///
/// **Note:** This does not affect text anti-aliasing. For that, use the `font_smoothing` property of the [`TextFont`](bevy_text::TextFont) component.
///
/// ```
/// use bevy_camera::prelude::*;
/// use bevy_ecs::prelude::*;
/// use bevy_ui::prelude::*;
/// use bevy_ui_render::prelude::*;
///
/// fn spawn_camera(mut commands: Commands) {
/// commands.spawn((
/// Camera2d,
/// // This will cause all UI in this camera to be rendered without
/// // anti-aliasing
/// UiAntiAlias::Off,
/// ));
/// }
/// ```
#[derive(Component, Clone, Copy, Default, Debug, Reflect, Eq, PartialEq)]
#[reflect(Component, Default, PartialEq, Clone)]
pub enum UiAntiAlias {
/// UI will render with anti-aliasing
#[default]
On,
/// UI will render without anti-aliasing
Off,
}
/// Number of shadow samples.
/// A larger value will result in higher quality shadows.
/// Default is 4, values higher than ~10 offer diminishing returns.
///
/// ```
/// use bevy_camera::prelude::*;
/// use bevy_ecs::prelude::*;
/// use bevy_ui::prelude::*;
/// use bevy_ui_render::prelude::*;
///
/// fn spawn_camera(mut commands: Commands) {
/// commands.spawn((
/// Camera2d,
/// BoxShadowSamples(6),
/// ));
/// }
/// ```
#[derive(Component, Clone, Copy, Debug, Reflect, Eq, PartialEq)]
#[reflect(Component, Default, PartialEq, Clone)]
pub struct BoxShadowSamples(pub u32);
impl Default for BoxShadowSamples {
fn default() -> Self {
Self(4)
}
}
#[derive(Default)]
pub struct UiRenderPlugin;
impl Plugin for UiRenderPlugin {
fn build(&self, app: &mut App) {
load_shader_library!(app, "ui.wgsl");
#[cfg(feature = "bevy_ui_debug")]
app.init_resource::<UiDebugOptions>();
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.init_resource::<SpecializedRenderPipelines<UiPipeline>>()
.init_resource::<ImageNodeBindGroups>()
.init_resource::<UiMeta>()
.init_resource::<ExtractedUiNodes>()
.allow_ambiguous_resource::<ExtractedUiNodes>()
.init_resource::<DrawFunctions<TransparentUi>>()
.init_resource::<ViewSortedRenderPhases<TransparentUi>>()
.add_render_command::<TransparentUi, DrawUi>()
.configure_sets(
ExtractSchedule,
(
RenderUiSystems::ExtractCameraViews,
RenderUiSystems::ExtractBoxShadows,
RenderUiSystems::ExtractBackgrounds,
RenderUiSystems::ExtractImages,
RenderUiSystems::ExtractTextureSlice,
RenderUiSystems::ExtractBorders,
RenderUiSystems::ExtractTextBackgrounds,
RenderUiSystems::ExtractTextShadows,
RenderUiSystems::ExtractText,
RenderUiSystems::ExtractDebug,
)
.chain(),
)
.add_systems(RenderStartup, init_ui_pipeline)
.add_systems(
ExtractSchedule,
(
extract_ui_camera_view.in_set(RenderUiSystems::ExtractCameraViews),
extract_uinode_background_colors.in_set(RenderUiSystems::ExtractBackgrounds),
extract_uinode_images.in_set(RenderUiSystems::ExtractImages),
extract_uinode_borders.in_set(RenderUiSystems::ExtractBorders),
extract_viewport_nodes.in_set(RenderUiSystems::ExtractViewportNodes),
extract_text_decorations.in_set(RenderUiSystems::ExtractTextBackgrounds),
extract_text_shadows.in_set(RenderUiSystems::ExtractTextShadows),
extract_text_sections.in_set(RenderUiSystems::ExtractText),
#[cfg(feature = "bevy_ui_debug")]
debug_overlay::extract_debug_overlay.in_set(RenderUiSystems::ExtractDebug),
),
)
.add_systems(
Render,
(
queue_uinodes.in_set(RenderSystems::Queue),
sort_phase_system::<TransparentUi>.in_set(RenderSystems::PhaseSort),
prepare_uinodes.in_set(RenderSystems::PrepareBindGroups),
),
);
// Render graph
render_app
.world_mut()
.resource_scope(|world, mut graph: Mut<RenderGraph>| {
if let Some(graph_2d) = graph.get_sub_graph_mut(Core2d) {
let ui_graph_2d = new_ui_graph(world);
graph_2d.add_sub_graph(SubGraphUi, ui_graph_2d);
graph_2d.add_node(NodeUi::UiPass, RunUiSubgraphOnUiViewNode);
graph_2d.add_node_edge(Node2d::EndMainPass, NodeUi::UiPass);
graph_2d.add_node_edge(Node2d::EndMainPassPostProcessing, NodeUi::UiPass);
graph_2d.add_node_edge(NodeUi::UiPass, Node2d::Upscaling);
}
if let Some(graph_3d) = graph.get_sub_graph_mut(Core3d) {
let ui_graph_3d = new_ui_graph(world);
graph_3d.add_sub_graph(SubGraphUi, ui_graph_3d);
graph_3d.add_node(NodeUi::UiPass, RunUiSubgraphOnUiViewNode);
graph_3d.add_node_edge(Node3d::EndMainPass, NodeUi::UiPass);
graph_3d.add_node_edge(Node3d::EndMainPassPostProcessing, NodeUi::UiPass);
graph_3d.add_node_edge(NodeUi::UiPass, Node3d::Upscaling);
}
});
app.add_plugins(UiTextureSlicerPlugin);
app.add_plugins(ColorSpacePlugin);
app.add_plugins(GradientPlugin);
app.add_plugins(BoxShadowPlugin);
}
}
fn new_ui_graph(world: &mut World) -> RenderGraph {
let ui_pass_node = UiPassNode::new(world);
let mut ui_graph = RenderGraph::default();
ui_graph.add_node(NodeUi::UiPass, ui_pass_node);
ui_graph
}
#[derive(SystemParam)]
pub struct UiCameraMap<'w, 's> {
mapping: Query<'w, 's, RenderEntity>,
}
impl<'w, 's> UiCameraMap<'w, 's> {
/// Creates a [`UiCameraMapper`] for performing repeated camera-to-render-entity lookups.
///
/// The last successful mapping is cached to avoid redundant queries.
pub fn get_mapper(&'w self) -> UiCameraMapper<'w, 's> {
UiCameraMapper {
mapping: &self.mapping,
camera_entity: Entity::PLACEHOLDER,
render_entity: Entity::PLACEHOLDER,
}
}
}
/// Helper for mapping UI target camera entities to their corresponding render entities,
/// with caching to avoid repeated lookups for the same camera.
pub struct UiCameraMapper<'w, 's> {
mapping: &'w Query<'w, 's, RenderEntity>,
/// Cached camera entity from the last successful `map` call.
camera_entity: Entity,
/// Cached camera entity from the last successful `map` call.
render_entity: Entity,
}
impl<'w, 's> UiCameraMapper<'w, 's> {
/// Returns the render entity corresponding to the given [`ComputedUiTargetCamera`]'s camera, or none if no corresponding entity was found.
pub fn map(&mut self, computed_target: &ComputedUiTargetCamera) -> Option<Entity> {
let camera_entity = computed_target.get()?;
if self.camera_entity != camera_entity {
let new_render_camera_entity = self.mapping.get(camera_entity).ok()?;
self.render_entity = new_render_camera_entity;
self.camera_entity = camera_entity;
}
Some(self.render_entity)
}
/// Returns the cached camera entity from the last successful `map` call.
pub fn current_camera(&self) -> Entity {
self.camera_entity
}
}
pub struct ExtractedUiNode {
pub z_order: f32,
pub image: AssetId<Image>,
pub clip: Option<Rect>,
/// Render world entity of the extracted camera corresponding to this node's target camera.
pub extracted_camera_entity: Entity,
pub item: ExtractedUiItem,
pub main_entity: MainEntity,
pub render_entity: Entity,
pub transform: Affine2,
}
/// The type of UI node.
/// This is used to determine how to render the UI node.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum NodeType {
Rect,
Border(u32), // shader flags
}
pub enum ExtractedUiItem {
Node {
color: LinearRgba,
rect: Rect,
atlas_scaling: Option<Vec2>,
flip_x: bool,
flip_y: bool,
/// Border radius of the UI node.
/// Ordering: top left, top right, bottom right, bottom left.
border_radius: ResolvedBorderRadius,
/// Border thickness of the UI node.
/// Ordering: left, top, right, bottom.
border: BorderRect,
node_type: NodeType,
},
/// A contiguous sequence of text glyphs from the same section
Glyphs {
/// Indices into [`ExtractedUiNodes::glyphs`]
range: Range<usize>,
},
}
pub struct ExtractedGlyph {
pub color: LinearRgba,
pub translation: Vec2,
pub rect: Rect,
}
#[derive(Resource, Default)]
pub struct ExtractedUiNodes {
pub uinodes: Vec<ExtractedUiNode>,
pub glyphs: Vec<ExtractedGlyph>,
}
impl ExtractedUiNodes {
pub fn clear(&mut self) {
self.uinodes.clear();
self.glyphs.clear();
}
}
/// A [`RenderGraphNode`] that executes the UI rendering subgraph on the UI
/// view.
struct RunUiSubgraphOnUiViewNode;
impl RenderGraphNode for RunUiSubgraphOnUiViewNode {
fn run<'w>(
&self,
graph: &mut RenderGraphContext,
_: &mut RenderContext<'w>,
world: &'w World,
) -> Result<(), NodeRunError> {
// Fetch the UI view.
let Some(mut render_views) = world.try_query::<&UiCameraView>() else {
return Ok(());
};
let Ok(ui_camera_view) = render_views.get(world, graph.view_entity()) else {
return Ok(());
};
// Run the subgraph on the UI view.
graph.run_sub_graph(SubGraphUi, vec![], Some(ui_camera_view.0), None)?;
Ok(())
}
}
pub fn extract_uinode_background_colors(
mut commands: Commands,
mut extracted_uinodes: ResMut<ExtractedUiNodes>,
uinode_query: Extract<
Query<(
Entity,
&ComputedNode,
&UiGlobalTransform,
&InheritedVisibility,
Option<&CalculatedClip>,
&ComputedUiTargetCamera,
&BackgroundColor,
)>,
>,
camera_map: Extract<UiCameraMap>,
) {
let mut camera_mapper = camera_map.get_mapper();
for (entity, uinode, transform, inherited_visibility, clip, camera, background_color) in
&uinode_query
{
// Skip invisible backgrounds
if !inherited_visibility.get()
|| background_color.0.is_fully_transparent()
|| uinode.is_empty()
{
continue;
}
let Some(extracted_camera_entity) = camera_mapper.map(camera) else {
continue;
};
extracted_uinodes.uinodes.push(ExtractedUiNode {
render_entity: commands.spawn(TemporaryRenderEntity).id(),
z_order: uinode.stack_index as f32 + stack_z_offsets::BACKGROUND_COLOR,
clip: clip.map(|clip| clip.clip),
image: AssetId::default(),
extracted_camera_entity,
transform: transform.into(),
item: ExtractedUiItem::Node {
color: background_color.0.into(),
rect: Rect {
min: Vec2::ZERO,
max: uinode.size,
},
atlas_scaling: None,
flip_x: false,
flip_y: false,
border: uinode.border(),
border_radius: uinode.border_radius(),
node_type: NodeType::Rect,
},
main_entity: entity.into(),
});
}
}
pub fn extract_uinode_images(
mut commands: Commands,
mut extracted_uinodes: ResMut<ExtractedUiNodes>,
texture_atlases: Extract<Res<Assets<TextureAtlasLayout>>>,
uinode_query: Extract<
Query<(
Entity,
&ComputedNode,
&UiGlobalTransform,
&InheritedVisibility,
Option<&CalculatedClip>,
&ComputedUiTargetCamera,
&ImageNode,
)>,
>,
camera_map: Extract<UiCameraMap>,
) {
let mut camera_mapper = camera_map.get_mapper();
for (entity, uinode, transform, inherited_visibility, clip, camera, image) in &uinode_query {
// Skip invisible images
if !inherited_visibility.get()
|| image.color.is_fully_transparent()
|| image.image.id() == TRANSPARENT_IMAGE_HANDLE.id()
|| image.image_mode.uses_slices()
|| uinode.is_empty()
{
continue;
}
let Some(extracted_camera_entity) = camera_mapper.map(camera) else {
continue;
};
let atlas_rect = image
.texture_atlas
.as_ref()
.and_then(|s| s.texture_rect(&texture_atlases))
.map(|r| r.as_rect());
let mut rect = match (atlas_rect, image.rect) {
(None, None) => Rect {
min: Vec2::ZERO,
max: uinode.size,
},
(None, Some(image_rect)) => image_rect,
(Some(atlas_rect), None) => atlas_rect,
(Some(atlas_rect), Some(mut image_rect)) => {
image_rect.min += atlas_rect.min;
image_rect.max += atlas_rect.min;
image_rect
}
};
let atlas_scaling = if atlas_rect.is_some() || image.rect.is_some() {
let atlas_scaling = uinode.size() / rect.size();
rect.min *= atlas_scaling;
rect.max *= atlas_scaling;
Some(atlas_scaling)
} else {
None
};
extracted_uinodes.uinodes.push(ExtractedUiNode {
z_order: uinode.stack_index as f32 + stack_z_offsets::IMAGE,
render_entity: commands.spawn(TemporaryRenderEntity).id(),
clip: clip.map(|clip| clip.clip),
image: image.image.id(),
extracted_camera_entity,
transform: transform.into(),
item: ExtractedUiItem::Node {
color: image.color.into(),
rect,
atlas_scaling,
flip_x: image.flip_x,
flip_y: image.flip_y,
border: uinode.border,
border_radius: uinode.border_radius,
node_type: NodeType::Rect,
},
main_entity: entity.into(),
});
}
}
pub fn extract_uinode_borders(
mut commands: Commands,
mut extracted_uinodes: ResMut<ExtractedUiNodes>,
uinode_query: Extract<
Query<(
Entity,
&Node,
&ComputedNode,
&UiGlobalTransform,
&InheritedVisibility,
Option<&CalculatedClip>,
&ComputedUiTargetCamera,
AnyOf<(&BorderColor, &Outline)>,
)>,
>,
camera_map: Extract<UiCameraMap>,
) {
let image = AssetId::<Image>::default();
let mut camera_mapper = camera_map.get_mapper();
for (
entity,
node,
computed_node,
transform,
inherited_visibility,
maybe_clip,
camera,
(maybe_border_color, maybe_outline),
) in &uinode_query
{
// Skip invisible borders and removed nodes
if !inherited_visibility.get() || node.display == Display::None {
continue;
}
let Some(extracted_camera_entity) = camera_mapper.map(camera) else {
continue;
};
// Don't extract borders with zero width along all edges
if computed_node.border() != BorderRect::ZERO
&& let Some(border_color) = maybe_border_color
{
let border_colors = [
border_color.left.to_linear(),
border_color.top.to_linear(),
border_color.right.to_linear(),
border_color.bottom.to_linear(),
];
const BORDER_FLAGS: [u32; 4] = [
shader_flags::BORDER_LEFT,
shader_flags::BORDER_TOP,
shader_flags::BORDER_RIGHT,
shader_flags::BORDER_BOTTOM,
];
let mut completed_flags = 0;
for (i, &color) in border_colors.iter().enumerate() {
if color.is_fully_transparent() {
continue;
}
let mut border_flags = BORDER_FLAGS[i];
if completed_flags & border_flags != 0 {
continue;
}
for j in i + 1..4 {
if color == border_colors[j] {
border_flags |= BORDER_FLAGS[j];
}
}
completed_flags |= border_flags;
extracted_uinodes.uinodes.push(ExtractedUiNode {
z_order: computed_node.stack_index as f32 + stack_z_offsets::BORDER,
image,
clip: maybe_clip.map(|clip| clip.clip),
extracted_camera_entity,
transform: transform.into(),
item: ExtractedUiItem::Node {
color,
rect: Rect {
max: computed_node.size(),
..Default::default()
},
atlas_scaling: None,
flip_x: false,
flip_y: false,
border: computed_node.border(),
border_radius: computed_node.border_radius(),
node_type: NodeType::Border(border_flags),
},
main_entity: entity.into(),
render_entity: commands.spawn(TemporaryRenderEntity).id(),
});
}
}
if computed_node.outline_width() <= 0. {
continue;
}
if let Some(outline) = maybe_outline.filter(|outline| !outline.color.is_fully_transparent())
{
let outline_size = computed_node.outlined_node_size();
extracted_uinodes.uinodes.push(ExtractedUiNode {
z_order: computed_node.stack_index as f32 + stack_z_offsets::BORDER,
render_entity: commands.spawn(TemporaryRenderEntity).id(),
image,
clip: maybe_clip.map(|clip| clip.clip),
extracted_camera_entity,
transform: transform.into(),
item: ExtractedUiItem::Node {
color: outline.color.into(),
rect: Rect {
max: outline_size,
..Default::default()
},
atlas_scaling: None,
flip_x: false,
flip_y: false,
border: BorderRect::all(computed_node.outline_width()),
border_radius: computed_node.outline_radius(),
node_type: NodeType::Border(shader_flags::BORDER_ALL),
},
main_entity: entity.into(),
});
}
}
}
/// The UI camera is "moved back" by this many units (plus the [`UI_CAMERA_TRANSFORM_OFFSET`]) and also has a view
/// distance of this many units. This ensures that with a left-handed projection,
/// as ui elements are "stacked on top of each other", they are within the camera's view
/// and have room to grow.
// TODO: Consider computing this value at runtime based on the maximum z-value.
const UI_CAMERA_FAR: f32 = 1000.0;
// This value is subtracted from the far distance for the camera's z-position to ensure nodes at z == 0.0 are rendered
// TODO: Evaluate if we still need this.
const UI_CAMERA_TRANSFORM_OFFSET: f32 = -0.1;
/// The ID of the subview associated with a camera on which UI is to be drawn.
///
/// When UI is present, cameras extract to two views: the main 2D/3D one and a
/// UI one. The main 2D or 3D camera gets subview 0, and the corresponding UI
/// camera gets this subview, 1.
const UI_CAMERA_SUBVIEW: u32 = 1;
/// A render-world component that lives on the main render target view and
/// specifies the corresponding UI view.
///
/// For example, if UI is being rendered to a 3D camera, this component lives on
/// the 3D camera and contains the entity corresponding to the UI view.
#[derive(Component)]
/// Entity id of the temporary render entity with the corresponding extracted UI view.
pub struct UiCameraView(pub Entity);
/// A render-world component that lives on the UI view and specifies the
/// corresponding main render target view.
///
/// For example, if the UI is being rendered to a 3D camera, this component
/// lives on the UI view and contains the entity corresponding to the 3D camera.
///
/// This is the inverse of [`UiCameraView`].
#[derive(Component)]
pub struct UiViewTarget(pub Entity);
/// Extracts all UI elements associated with a camera into the render world.
pub fn extract_ui_camera_view(
mut commands: Commands,
mut transparent_render_phases: ResMut<ViewSortedRenderPhases<TransparentUi>>,
query: Extract<
Query<
(
Entity,
RenderEntity,
&Camera,
Has<Hdr>,
Option<&UiAntiAlias>,
Option<&BoxShadowSamples>,
),
Or<(With<Camera2d>, With<Camera3d>)>,
>,
>,
mut live_entities: Local<HashSet<RetainedViewEntity>>,
) {
live_entities.clear();
for (main_entity, render_entity, camera, hdr, ui_anti_alias, shadow_samples) in &query {
// ignore inactive cameras
if !camera.is_active {
commands
.get_entity(render_entity)
.expect("Camera entity wasn't synced.")
.remove::<(UiCameraView, UiAntiAlias, BoxShadowSamples)>();
continue;
}
if let Some(physical_viewport_rect) = camera.physical_viewport_rect() {
// use a projection matrix with the origin in the top left instead of the bottom left that comes with OrthographicProjection
let projection_matrix = Mat4::orthographic_rh(
0.0,
physical_viewport_rect.width() as f32,
physical_viewport_rect.height() as f32,
0.0,
0.0,
UI_CAMERA_FAR,
);
// We use `UI_CAMERA_SUBVIEW` here so as not to conflict with the
// main 3D or 2D camera, which will have subview index 0.
let retained_view_entity =
RetainedViewEntity::new(main_entity.into(), None, UI_CAMERA_SUBVIEW);
// Creates the UI view.
let ui_camera_view = commands
.spawn((
ExtractedView {
retained_view_entity,
clip_from_view: projection_matrix,
world_from_view: GlobalTransform::from_xyz(
0.0,
0.0,
UI_CAMERA_FAR + UI_CAMERA_TRANSFORM_OFFSET,
),
clip_from_world: None,
hdr,
viewport: UVec4::from((
physical_viewport_rect.min,
physical_viewport_rect.size(),
)),
color_grading: Default::default(),
invert_culling: false,
},
// Link to the main camera view.
UiViewTarget(render_entity),
TemporaryRenderEntity,
))
.id();
let mut entity_commands = commands
.get_entity(render_entity)
.expect("Camera entity wasn't synced.");
// Link from the main 2D/3D camera view to the UI view.
entity_commands.insert(UiCameraView(ui_camera_view));
if let Some(ui_anti_alias) = ui_anti_alias {
entity_commands.insert(*ui_anti_alias);
}
if let Some(shadow_samples) = shadow_samples {
entity_commands.insert(*shadow_samples);
}
transparent_render_phases.insert_or_clear(retained_view_entity);
live_entities.insert(retained_view_entity);
}
}
transparent_render_phases.retain(|entity, _| live_entities.contains(entity));
}
pub fn extract_viewport_nodes(
mut commands: Commands,
mut extracted_uinodes: ResMut<ExtractedUiNodes>,
camera_query: Extract<Query<(&Camera, &RenderTarget)>>,
uinode_query: Extract<
Query<(
Entity,
&ComputedNode,
&UiGlobalTransform,
&InheritedVisibility,
Option<&CalculatedClip>,
&ComputedUiTargetCamera,
&ViewportNode,
)>,
>,
camera_map: Extract<UiCameraMap>,
) {
let mut camera_mapper = camera_map.get_mapper();
for (entity, uinode, transform, inherited_visibility, clip, camera, viewport_node) in
&uinode_query
{
// Skip invisible images
if !inherited_visibility.get() || uinode.is_empty() {
continue;
}
let Some(extracted_camera_entity) = camera_mapper.map(camera) else {
continue;
};
let Some(image) = camera_query
.get(viewport_node.camera)
.ok()
.and_then(|(_, render_target)| render_target.as_image())
else {
continue;
};
extracted_uinodes.uinodes.push(ExtractedUiNode {
z_order: uinode.stack_index as f32 + stack_z_offsets::IMAGE,
render_entity: commands.spawn(TemporaryRenderEntity).id(),
clip: clip.map(|clip| clip.clip),
image: image.id(),
extracted_camera_entity,
transform: transform.into(),
item: ExtractedUiItem::Node {
color: LinearRgba::WHITE,
rect: Rect {
min: Vec2::ZERO,
max: uinode.size,
},
atlas_scaling: None,
flip_x: false,
flip_y: false,
border: uinode.border(),
border_radius: uinode.border_radius(),
node_type: NodeType::Rect,
},
main_entity: entity.into(),
});
}
}
pub fn extract_text_sections(
mut commands: Commands,
mut extracted_uinodes: ResMut<ExtractedUiNodes>,
texture_atlases: Extract<Res<Assets<TextureAtlasLayout>>>,
uinode_query: Extract<
Query<(
Entity,
&ComputedNode,
&UiGlobalTransform,
&InheritedVisibility,
Option<&CalculatedClip>,
&ComputedUiTargetCamera,
&ComputedTextBlock,
&TextColor,
&TextLayoutInfo,
)>,
>,
text_styles: Extract<Query<&TextColor>>,
camera_map: Extract<UiCameraMap>,
) {
let mut start = extracted_uinodes.glyphs.len();
let mut end = start + 1;
let mut camera_mapper = camera_map.get_mapper();
for (
entity,
uinode,
transform,
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_ui_render/src/box_shadow.rs | crates/bevy_ui_render/src/box_shadow.rs | //! Box shadows rendering
use core::{hash::Hash, ops::Range};
use bevy_app::prelude::*;
use bevy_asset::*;
use bevy_camera::visibility::InheritedVisibility;
use bevy_color::{Alpha, ColorToComponents, LinearRgba};
use bevy_ecs::prelude::*;
use bevy_ecs::{
prelude::Component,
system::{
lifetimeless::{Read, SRes},
*,
},
};
use bevy_image::BevyDefault as _;
use bevy_math::{vec2, Affine2, FloatOrd, Rect, Vec2};
use bevy_mesh::VertexBufferLayout;
use bevy_render::sync_world::{MainEntity, TemporaryRenderEntity};
use bevy_render::{
render_phase::*,
render_resource::{binding_types::uniform_buffer, *},
renderer::{RenderDevice, RenderQueue},
view::*,
Extract, ExtractSchedule, Render, RenderSystems,
};
use bevy_render::{RenderApp, RenderStartup};
use bevy_shader::{Shader, ShaderDefVal};
use bevy_ui::{
BoxShadow, CalculatedClip, ComputedNode, ComputedUiRenderTargetInfo, ComputedUiTargetCamera,
ResolvedBorderRadius, UiGlobalTransform, Val,
};
use bevy_utils::default;
use bytemuck::{Pod, Zeroable};
use crate::{BoxShadowSamples, RenderUiSystems, TransparentUi, UiCameraMap};
use super::{stack_z_offsets, UiCameraView, QUAD_INDICES, QUAD_VERTEX_POSITIONS};
/// A plugin that enables the rendering of box shadows.
pub struct BoxShadowPlugin;
impl Plugin for BoxShadowPlugin {
fn build(&self, app: &mut App) {
embedded_asset!(app, "box_shadow.wgsl");
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app
.add_render_command::<TransparentUi, DrawBoxShadows>()
.init_resource::<ExtractedBoxShadows>()
.init_resource::<BoxShadowMeta>()
.init_resource::<SpecializedRenderPipelines<BoxShadowPipeline>>()
.add_systems(RenderStartup, init_box_shadow_pipeline)
.add_systems(
ExtractSchedule,
extract_shadows.in_set(RenderUiSystems::ExtractBoxShadows),
)
.add_systems(
Render,
(
queue_shadows.in_set(RenderSystems::Queue),
prepare_shadows.in_set(RenderSystems::PrepareBindGroups),
),
);
}
}
}
#[repr(C)]
#[derive(Copy, Clone, Pod, Zeroable)]
struct BoxShadowVertex {
position: [f32; 3],
uvs: [f32; 2],
vertex_color: [f32; 4],
size: [f32; 2],
radius: [f32; 4],
blur: f32,
bounds: [f32; 2],
}
#[derive(Component)]
pub struct UiShadowsBatch {
pub range: Range<u32>,
pub camera: Entity,
}
/// Contains the vertices and bind groups to be sent to the GPU
#[derive(Resource)]
pub struct BoxShadowMeta {
vertices: RawBufferVec<BoxShadowVertex>,
indices: RawBufferVec<u32>,
view_bind_group: Option<BindGroup>,
}
impl Default for BoxShadowMeta {
fn default() -> Self {
Self {
vertices: RawBufferVec::new(BufferUsages::VERTEX),
indices: RawBufferVec::new(BufferUsages::INDEX),
view_bind_group: None,
}
}
}
#[derive(Resource)]
pub struct BoxShadowPipeline {
pub view_layout: BindGroupLayoutDescriptor,
pub shader: Handle<Shader>,
}
pub fn init_box_shadow_pipeline(mut commands: Commands, asset_server: Res<AssetServer>) {
let view_layout = BindGroupLayoutDescriptor::new(
"box_shadow_view_layout",
&BindGroupLayoutEntries::single(
ShaderStages::VERTEX_FRAGMENT,
uniform_buffer::<ViewUniform>(true),
),
);
commands.insert_resource(BoxShadowPipeline {
view_layout,
shader: load_embedded_asset!(asset_server.as_ref(), "box_shadow.wgsl"),
});
}
#[derive(Clone, Copy, Hash, PartialEq, Eq)]
pub struct BoxShadowPipelineKey {
pub hdr: bool,
/// Number of samples, a higher value results in better quality shadows.
pub samples: u32,
}
impl SpecializedRenderPipeline for BoxShadowPipeline {
type Key = BoxShadowPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let vertex_layout = VertexBufferLayout::from_vertex_formats(
VertexStepMode::Vertex,
vec![
// position
VertexFormat::Float32x3,
// uv
VertexFormat::Float32x2,
// color
VertexFormat::Float32x4,
// target rect size
VertexFormat::Float32x2,
// corner radius values (top left, top right, bottom right, bottom left)
VertexFormat::Float32x4,
// blur radius
VertexFormat::Float32,
// outer size
VertexFormat::Float32x2,
],
);
let shader_defs = vec![ShaderDefVal::UInt(
"SHADOW_SAMPLES".to_string(),
key.samples,
)];
RenderPipelineDescriptor {
vertex: VertexState {
shader: self.shader.clone(),
shader_defs: shader_defs.clone(),
buffers: vec![vertex_layout],
..default()
},
fragment: Some(FragmentState {
shader: self.shader.clone(),
shader_defs,
targets: vec![Some(ColorTargetState {
format: if key.hdr {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
},
blend: Some(BlendState::ALPHA_BLENDING),
write_mask: ColorWrites::ALL,
})],
..default()
}),
layout: vec![self.view_layout.clone()],
label: Some("box_shadow_pipeline".into()),
..default()
}
}
}
/// Description of a shadow to be sorted and queued for rendering
pub struct ExtractedBoxShadow {
pub stack_index: u32,
pub transform: Affine2,
pub bounds: Vec2,
pub clip: Option<Rect>,
pub extracted_camera_entity: Entity,
pub color: LinearRgba,
pub radius: ResolvedBorderRadius,
pub blur_radius: f32,
pub size: Vec2,
pub main_entity: MainEntity,
pub render_entity: Entity,
}
/// List of extracted shadows to be sorted and queued for rendering
#[derive(Resource, Default)]
pub struct ExtractedBoxShadows {
pub box_shadows: Vec<ExtractedBoxShadow>,
}
pub fn extract_shadows(
mut commands: Commands,
mut extracted_box_shadows: ResMut<ExtractedBoxShadows>,
box_shadow_query: Extract<
Query<(
Entity,
&ComputedNode,
&UiGlobalTransform,
&InheritedVisibility,
&BoxShadow,
Option<&CalculatedClip>,
&ComputedUiTargetCamera,
&ComputedUiRenderTargetInfo,
)>,
>,
camera_map: Extract<UiCameraMap>,
) {
let mut mapping = camera_map.get_mapper();
for (entity, uinode, transform, visibility, box_shadow, clip, camera, target) in
&box_shadow_query
{
// Skip if no visible shadows
if !visibility.get() || box_shadow.is_empty() || uinode.is_empty() {
continue;
}
let Some(extracted_camera_entity) = mapping.map(camera) else {
continue;
};
let ui_physical_viewport_size = target.physical_size().as_vec2();
let scale_factor = target.scale_factor();
for drop_shadow in box_shadow.iter() {
if drop_shadow.color.is_fully_transparent() {
continue;
}
let resolve_val = |val, base, scale_factor| match val {
Val::Auto => 0.,
Val::Px(px) => px * scale_factor,
Val::Percent(percent) => percent / 100. * base,
Val::Vw(percent) => percent / 100. * ui_physical_viewport_size.x,
Val::Vh(percent) => percent / 100. * ui_physical_viewport_size.y,
Val::VMin(percent) => percent / 100. * ui_physical_viewport_size.min_element(),
Val::VMax(percent) => percent / 100. * ui_physical_viewport_size.max_element(),
};
let spread_x = resolve_val(drop_shadow.spread_radius, uinode.size().x, scale_factor);
let spread_ratio = (spread_x + uinode.size().x) / uinode.size().x;
let spread = vec2(spread_x, uinode.size().y * spread_ratio - uinode.size().y);
let blur_radius = resolve_val(drop_shadow.blur_radius, uinode.size().x, scale_factor);
let offset = vec2(
resolve_val(drop_shadow.x_offset, uinode.size().x, scale_factor),
resolve_val(drop_shadow.y_offset, uinode.size().y, scale_factor),
);
let shadow_size = uinode.size() + spread;
if shadow_size.cmple(Vec2::ZERO).any() {
continue;
}
let radius = ResolvedBorderRadius {
top_left: uinode.border_radius.top_left * spread_ratio,
top_right: uinode.border_radius.top_right * spread_ratio,
bottom_left: uinode.border_radius.bottom_left * spread_ratio,
bottom_right: uinode.border_radius.bottom_right * spread_ratio,
};
extracted_box_shadows.box_shadows.push(ExtractedBoxShadow {
render_entity: commands.spawn(TemporaryRenderEntity).id(),
stack_index: uinode.stack_index,
transform: Affine2::from(transform) * Affine2::from_translation(offset),
color: drop_shadow.color.into(),
bounds: shadow_size + 6. * blur_radius,
clip: clip.map(|clip| clip.clip),
extracted_camera_entity,
radius,
blur_radius,
size: shadow_size,
main_entity: entity.into(),
});
}
}
}
#[expect(
clippy::too_many_arguments,
reason = "it's a system that needs a lot of them"
)]
pub fn queue_shadows(
extracted_box_shadows: ResMut<ExtractedBoxShadows>,
box_shadow_pipeline: Res<BoxShadowPipeline>,
mut pipelines: ResMut<SpecializedRenderPipelines<BoxShadowPipeline>>,
mut transparent_render_phases: ResMut<ViewSortedRenderPhases<TransparentUi>>,
mut render_views: Query<(&UiCameraView, Option<&BoxShadowSamples>), With<ExtractedView>>,
camera_views: Query<&ExtractedView>,
pipeline_cache: Res<PipelineCache>,
draw_functions: Res<DrawFunctions<TransparentUi>>,
) {
let draw_function = draw_functions.read().id::<DrawBoxShadows>();
for (index, extracted_shadow) in extracted_box_shadows.box_shadows.iter().enumerate() {
let entity = extracted_shadow.render_entity;
let Ok((default_camera_view, shadow_samples)) =
render_views.get_mut(extracted_shadow.extracted_camera_entity)
else {
continue;
};
let Ok(view) = camera_views.get(default_camera_view.0) else {
continue;
};
let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity)
else {
continue;
};
let pipeline = pipelines.specialize(
&pipeline_cache,
&box_shadow_pipeline,
BoxShadowPipelineKey {
hdr: view.hdr,
samples: shadow_samples.copied().unwrap_or_default().0,
},
);
transparent_phase.add(TransparentUi {
draw_function,
pipeline,
entity: (entity, extracted_shadow.main_entity),
sort_key: FloatOrd(extracted_shadow.stack_index as f32 + stack_z_offsets::BOX_SHADOW),
batch_range: 0..0,
extra_index: PhaseItemExtraIndex::None,
index,
indexed: true,
});
}
}
pub fn prepare_shadows(
mut commands: Commands,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
pipeline_cache: Res<PipelineCache>,
mut ui_meta: ResMut<BoxShadowMeta>,
mut extracted_shadows: ResMut<ExtractedBoxShadows>,
view_uniforms: Res<ViewUniforms>,
box_shadow_pipeline: Res<BoxShadowPipeline>,
mut phases: ResMut<ViewSortedRenderPhases<TransparentUi>>,
mut previous_len: Local<usize>,
) {
if let Some(view_binding) = view_uniforms.uniforms.binding() {
let mut batches: Vec<(Entity, UiShadowsBatch)> = Vec::with_capacity(*previous_len);
ui_meta.vertices.clear();
ui_meta.indices.clear();
ui_meta.view_bind_group = Some(render_device.create_bind_group(
"box_shadow_view_bind_group",
&pipeline_cache.get_bind_group_layout(&box_shadow_pipeline.view_layout),
&BindGroupEntries::single(view_binding),
));
// Buffer indexes
let mut vertices_index = 0;
let mut indices_index = 0;
for ui_phase in phases.values_mut() {
for item_index in 0..ui_phase.items.len() {
let item = &mut ui_phase.items[item_index];
let Some(box_shadow) = extracted_shadows
.box_shadows
.get(item.index)
.filter(|n| item.entity() == n.render_entity)
else {
continue;
};
let rect_size = box_shadow.bounds;
// Specify the corners of the node
let positions = QUAD_VERTEX_POSITIONS.map(|pos| {
box_shadow
.transform
.transform_point2(pos * rect_size)
.extend(0.)
});
// Calculate the effect of clipping
// Note: this won't work with rotation/scaling, but that's much more complex (may need more that 2 quads)
let positions_diff = if let Some(clip) = box_shadow.clip {
[
Vec2::new(
f32::max(clip.min.x - positions[0].x, 0.),
f32::max(clip.min.y - positions[0].y, 0.),
),
Vec2::new(
f32::min(clip.max.x - positions[1].x, 0.),
f32::max(clip.min.y - positions[1].y, 0.),
),
Vec2::new(
f32::min(clip.max.x - positions[2].x, 0.),
f32::min(clip.max.y - positions[2].y, 0.),
),
Vec2::new(
f32::max(clip.min.x - positions[3].x, 0.),
f32::min(clip.max.y - positions[3].y, 0.),
),
]
} else {
[Vec2::ZERO; 4]
};
let positions_clipped = [
positions[0] + positions_diff[0].extend(0.),
positions[1] + positions_diff[1].extend(0.),
positions[2] + positions_diff[2].extend(0.),
positions[3] + positions_diff[3].extend(0.),
];
let transformed_rect_size = box_shadow.transform.transform_vector2(rect_size);
// Don't try to cull nodes that have a rotation
// In a rotation around the Z-axis, this value is 0.0 for an angle of 0.0 or π
// In those two cases, the culling check can proceed normally as corners will be on
// horizontal / vertical lines
// For all other angles, bypass the culling check
// This does not properly handles all rotations on all axis
if box_shadow.transform.x_axis[1] == 0.0 {
// Cull nodes that are completely clipped
if positions_diff[0].x - positions_diff[1].x >= transformed_rect_size.x
|| positions_diff[1].y - positions_diff[2].y >= transformed_rect_size.y
{
continue;
}
}
let uvs = [
Vec2::new(positions_diff[0].x, positions_diff[0].y),
Vec2::new(
box_shadow.bounds.x + positions_diff[1].x,
positions_diff[1].y,
),
Vec2::new(
box_shadow.bounds.x + positions_diff[2].x,
box_shadow.bounds.y + positions_diff[2].y,
),
Vec2::new(
positions_diff[3].x,
box_shadow.bounds.y + positions_diff[3].y,
),
]
.map(|pos| pos / box_shadow.bounds);
for i in 0..4 {
ui_meta.vertices.push(BoxShadowVertex {
position: positions_clipped[i].into(),
uvs: uvs[i].into(),
vertex_color: box_shadow.color.to_f32_array(),
size: box_shadow.size.into(),
radius: box_shadow.radius.into(),
blur: box_shadow.blur_radius,
bounds: rect_size.into(),
});
}
for &i in &QUAD_INDICES {
ui_meta.indices.push(indices_index + i as u32);
}
batches.push((
item.entity(),
UiShadowsBatch {
range: vertices_index..vertices_index + 6,
camera: box_shadow.extracted_camera_entity,
},
));
vertices_index += 6;
indices_index += 4;
// shadows are sent to the gpu non-batched
*ui_phase.items[item_index].batch_range_mut() =
item_index as u32..item_index as u32 + 1;
}
}
ui_meta.vertices.write_buffer(&render_device, &render_queue);
ui_meta.indices.write_buffer(&render_device, &render_queue);
*previous_len = batches.len();
commands.try_insert_batch(batches);
}
extracted_shadows.box_shadows.clear();
}
pub type DrawBoxShadows = (SetItemPipeline, SetBoxShadowViewBindGroup<0>, DrawBoxShadow);
pub struct SetBoxShadowViewBindGroup<const I: usize>;
impl<P: PhaseItem, const I: usize> RenderCommand<P> for SetBoxShadowViewBindGroup<I> {
type Param = SRes<BoxShadowMeta>;
type ViewQuery = Read<ViewUniformOffset>;
type ItemQuery = ();
fn render<'w>(
_item: &P,
view_uniform: &'w ViewUniformOffset,
_entity: Option<()>,
ui_meta: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let Some(view_bind_group) = ui_meta.into_inner().view_bind_group.as_ref() else {
return RenderCommandResult::Failure("view_bind_group not available");
};
pass.set_bind_group(I, view_bind_group, &[view_uniform.offset]);
RenderCommandResult::Success
}
}
pub struct DrawBoxShadow;
impl<P: PhaseItem> RenderCommand<P> for DrawBoxShadow {
type Param = SRes<BoxShadowMeta>;
type ViewQuery = ();
type ItemQuery = Read<UiShadowsBatch>;
#[inline]
fn render<'w>(
_item: &P,
_view: (),
batch: Option<&'w UiShadowsBatch>,
ui_meta: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let Some(batch) = batch else {
return RenderCommandResult::Skip;
};
let ui_meta = ui_meta.into_inner();
let Some(vertices) = ui_meta.vertices.buffer() else {
return RenderCommandResult::Failure("missing vertices to draw ui");
};
let Some(indices) = ui_meta.indices.buffer() else {
return RenderCommandResult::Failure("missing indices to draw ui");
};
// Store the vertices
pass.set_vertex_buffer(0, vertices.slice(..));
// Define how to "connect" the vertices
pass.set_index_buffer(indices.slice(..), IndexFormat::Uint32);
// Draw the vertices
pass.draw_indexed(batch.range.clone(), 0, 0..1);
RenderCommandResult::Success
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_ui_render/src/ui_material_pipeline.rs | crates/bevy_ui_render/src/ui_material_pipeline.rs | use crate::ui_material::{MaterialNode, UiMaterial, UiMaterialKey};
use crate::*;
use bevy_asset::*;
use bevy_ecs::{
prelude::{Component, With},
query::ROQueryItem,
system::{
lifetimeless::{Read, SRes},
*,
},
};
use bevy_image::BevyDefault as _;
use bevy_math::{Affine2, FloatOrd, Rect, Vec2};
use bevy_mesh::VertexBufferLayout;
use bevy_render::{
globals::{GlobalsBuffer, GlobalsUniform},
render_asset::{PrepareAssetError, RenderAsset, RenderAssetPlugin, RenderAssets},
render_phase::*,
render_resource::{binding_types::uniform_buffer, *},
renderer::{RenderDevice, RenderQueue},
sync_world::{MainEntity, TemporaryRenderEntity},
view::*,
Extract, ExtractSchedule, Render, RenderSystems,
};
use bevy_render::{RenderApp, RenderStartup};
use bevy_shader::{load_shader_library, Shader, ShaderRef};
use bevy_sprite::BorderRect;
use bevy_utils::default;
use bytemuck::{Pod, Zeroable};
use core::{hash::Hash, marker::PhantomData, ops::Range};
/// Adds the necessary ECS resources and render logic to enable rendering entities using the given
/// [`UiMaterial`] asset type (which includes [`UiMaterial`] types).
pub struct UiMaterialPlugin<M: UiMaterial>(PhantomData<M>);
impl<M: UiMaterial> Default for UiMaterialPlugin<M> {
fn default() -> Self {
Self(Default::default())
}
}
impl<M: UiMaterial> Plugin for UiMaterialPlugin<M>
where
M::Data: PartialEq + Eq + Hash + Clone,
{
fn build(&self, app: &mut App) {
load_shader_library!(app, "ui_vertex_output.wgsl");
embedded_asset!(app, "ui_material.wgsl");
app.init_asset::<M>()
.register_type::<MaterialNode<M>>()
.add_plugins(RenderAssetPlugin::<PreparedUiMaterial<M>>::default());
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app
.add_render_command::<TransparentUi, DrawUiMaterial<M>>()
.init_resource::<ExtractedUiMaterialNodes<M>>()
.init_resource::<UiMaterialMeta<M>>()
.init_resource::<SpecializedRenderPipelines<UiMaterialPipeline<M>>>()
.add_systems(RenderStartup, init_ui_material_pipeline::<M>)
.add_systems(
ExtractSchedule,
extract_ui_material_nodes::<M>.in_set(RenderUiSystems::ExtractBackgrounds),
)
.add_systems(
Render,
(
queue_ui_material_nodes::<M>.in_set(RenderSystems::Queue),
prepare_uimaterial_nodes::<M>.in_set(RenderSystems::PrepareBindGroups),
),
);
}
}
}
#[derive(Resource)]
pub struct UiMaterialMeta<M: UiMaterial> {
vertices: RawBufferVec<UiMaterialVertex>,
view_bind_group: Option<BindGroup>,
marker: PhantomData<M>,
}
impl<M: UiMaterial> Default for UiMaterialMeta<M> {
fn default() -> Self {
Self {
vertices: RawBufferVec::new(BufferUsages::VERTEX),
view_bind_group: Default::default(),
marker: PhantomData,
}
}
}
#[repr(C)]
#[derive(Copy, Clone, Pod, Zeroable)]
pub struct UiMaterialVertex {
pub position: [f32; 3],
pub uv: [f32; 2],
pub size: [f32; 2],
pub border: [f32; 4],
pub radius: [f32; 4],
}
// in this [`UiMaterialPipeline`] there is (currently) no batching going on.
// Therefore the [`UiMaterialBatch`] is more akin to a draw call.
#[derive(Component)]
pub struct UiMaterialBatch<M: UiMaterial> {
/// The range of vertices inside the [`UiMaterialMeta`]
pub range: Range<u32>,
pub material: AssetId<M>,
}
/// Render pipeline data for a given [`UiMaterial`]
#[derive(Resource)]
pub struct UiMaterialPipeline<M: UiMaterial> {
pub ui_layout: BindGroupLayoutDescriptor,
pub view_layout: BindGroupLayoutDescriptor,
pub vertex_shader: Handle<Shader>,
pub fragment_shader: Handle<Shader>,
marker: PhantomData<M>,
}
impl<M: UiMaterial> SpecializedRenderPipeline for UiMaterialPipeline<M>
where
M::Data: PartialEq + Eq + Hash + Clone,
{
type Key = UiMaterialKey<M>;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let vertex_layout = VertexBufferLayout::from_vertex_formats(
VertexStepMode::Vertex,
vec![
// position
VertexFormat::Float32x3,
// uv
VertexFormat::Float32x2,
// size
VertexFormat::Float32x2,
// border widths
VertexFormat::Float32x4,
// border radius
VertexFormat::Float32x4,
],
);
let shader_defs = Vec::new();
let mut descriptor = RenderPipelineDescriptor {
vertex: VertexState {
shader: self.vertex_shader.clone(),
shader_defs: shader_defs.clone(),
buffers: vec![vertex_layout],
..default()
},
fragment: Some(FragmentState {
shader: self.fragment_shader.clone(),
shader_defs,
targets: vec![Some(ColorTargetState {
format: if key.hdr {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
},
blend: Some(BlendState::ALPHA_BLENDING),
write_mask: ColorWrites::ALL,
})],
..default()
}),
label: Some("ui_material_pipeline".into()),
..default()
};
descriptor.layout = vec![self.view_layout.clone(), self.ui_layout.clone()];
M::specialize(&mut descriptor, key);
descriptor
}
}
pub fn init_ui_material_pipeline<M: UiMaterial>(
mut commands: Commands,
asset_server: Res<AssetServer>,
render_device: Res<RenderDevice>,
) {
let ui_layout = M::bind_group_layout_descriptor(&render_device);
let view_layout = BindGroupLayoutDescriptor::new(
"ui_view_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::VERTEX_FRAGMENT,
(
uniform_buffer::<ViewUniform>(true),
uniform_buffer::<GlobalsUniform>(false),
),
),
);
let load_default = || load_embedded_asset!(asset_server.as_ref(), "ui_material.wgsl");
commands.insert_resource(UiMaterialPipeline::<M> {
ui_layout,
view_layout,
vertex_shader: match M::vertex_shader() {
ShaderRef::Default => load_default(),
ShaderRef::Handle(handle) => handle,
ShaderRef::Path(path) => asset_server.load(path),
},
fragment_shader: match M::fragment_shader() {
ShaderRef::Default => load_default(),
ShaderRef::Handle(handle) => handle,
ShaderRef::Path(path) => asset_server.load(path),
},
marker: PhantomData,
});
}
pub type DrawUiMaterial<M> = (
SetItemPipeline,
SetMatUiViewBindGroup<M, 0>,
SetUiMaterialBindGroup<M, 1>,
DrawUiMaterialNode<M>,
);
pub struct SetMatUiViewBindGroup<M: UiMaterial, const I: usize>(PhantomData<M>);
impl<P: PhaseItem, M: UiMaterial, const I: usize> RenderCommand<P> for SetMatUiViewBindGroup<M, I> {
type Param = SRes<UiMaterialMeta<M>>;
type ViewQuery = Read<ViewUniformOffset>;
type ItemQuery = ();
fn render<'w>(
_item: &P,
view_uniform: &'w ViewUniformOffset,
_entity: Option<()>,
ui_meta: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
pass.set_bind_group(
I,
ui_meta.into_inner().view_bind_group.as_ref().unwrap(),
&[view_uniform.offset],
);
RenderCommandResult::Success
}
}
pub struct SetUiMaterialBindGroup<M: UiMaterial, const I: usize>(PhantomData<M>);
impl<P: PhaseItem, M: UiMaterial, const I: usize> RenderCommand<P>
for SetUiMaterialBindGroup<M, I>
{
type Param = SRes<RenderAssets<PreparedUiMaterial<M>>>;
type ViewQuery = ();
type ItemQuery = Read<UiMaterialBatch<M>>;
fn render<'w>(
_item: &P,
_view: (),
material_handle: Option<ROQueryItem<'_, '_, Self::ItemQuery>>,
materials: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let Some(material_handle) = material_handle else {
return RenderCommandResult::Skip;
};
let Some(material) = materials.into_inner().get(material_handle.material) else {
return RenderCommandResult::Skip;
};
pass.set_bind_group(I, &material.bind_group, &[]);
RenderCommandResult::Success
}
}
pub struct DrawUiMaterialNode<M>(PhantomData<M>);
impl<P: PhaseItem, M: UiMaterial> RenderCommand<P> for DrawUiMaterialNode<M> {
type Param = SRes<UiMaterialMeta<M>>;
type ViewQuery = ();
type ItemQuery = Read<UiMaterialBatch<M>>;
#[inline]
fn render<'w>(
_item: &P,
_view: (),
batch: Option<&'w UiMaterialBatch<M>>,
ui_meta: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let Some(batch) = batch else {
return RenderCommandResult::Skip;
};
pass.set_vertex_buffer(0, ui_meta.into_inner().vertices.buffer().unwrap().slice(..));
pass.draw(batch.range.clone(), 0..1);
RenderCommandResult::Success
}
}
pub struct ExtractedUiMaterialNode<M: UiMaterial> {
pub stack_index: u32,
pub transform: Affine2,
pub rect: Rect,
pub border: BorderRect,
pub border_radius: [f32; 4],
pub material: AssetId<M>,
pub clip: Option<Rect>,
// Camera to render this UI node to. By the time it is extracted,
// it is defaulted to a single camera if only one exists.
// Nodes with ambiguous camera will be ignored.
pub extracted_camera_entity: Entity,
pub main_entity: MainEntity,
pub render_entity: Entity,
}
#[derive(Resource)]
pub struct ExtractedUiMaterialNodes<M: UiMaterial> {
pub uinodes: Vec<ExtractedUiMaterialNode<M>>,
}
impl<M: UiMaterial> Default for ExtractedUiMaterialNodes<M> {
fn default() -> Self {
Self {
uinodes: Default::default(),
}
}
}
pub fn extract_ui_material_nodes<M: UiMaterial>(
mut commands: Commands,
mut extracted_uinodes: ResMut<ExtractedUiMaterialNodes<M>>,
materials: Extract<Res<Assets<M>>>,
uinode_query: Extract<
Query<(
Entity,
&ComputedNode,
&UiGlobalTransform,
&MaterialNode<M>,
&InheritedVisibility,
Option<&CalculatedClip>,
&ComputedUiTargetCamera,
)>,
>,
camera_map: Extract<UiCameraMap>,
) {
let mut camera_mapper = camera_map.get_mapper();
for (entity, computed_node, transform, handle, inherited_visibility, clip, camera) in
uinode_query.iter()
{
// skip invisible nodes
if !inherited_visibility.get() || computed_node.is_empty() {
continue;
}
// Skip loading materials
if !materials.contains(handle) {
continue;
}
let Some(extracted_camera_entity) = camera_mapper.map(camera) else {
continue;
};
extracted_uinodes.uinodes.push(ExtractedUiMaterialNode {
render_entity: commands.spawn(TemporaryRenderEntity).id(),
stack_index: computed_node.stack_index,
transform: transform.into(),
material: handle.id(),
rect: Rect {
min: Vec2::ZERO,
max: computed_node.size(),
},
border: computed_node.border(),
border_radius: computed_node.border_radius().into(),
clip: clip.map(|clip| clip.clip),
extracted_camera_entity,
main_entity: entity.into(),
});
}
}
pub fn prepare_uimaterial_nodes<M: UiMaterial>(
mut commands: Commands,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
pipeline_cache: Res<PipelineCache>,
mut ui_meta: ResMut<UiMaterialMeta<M>>,
mut extracted_uinodes: ResMut<ExtractedUiMaterialNodes<M>>,
view_uniforms: Res<ViewUniforms>,
globals_buffer: Res<GlobalsBuffer>,
ui_material_pipeline: Res<UiMaterialPipeline<M>>,
mut phases: ResMut<ViewSortedRenderPhases<TransparentUi>>,
mut previous_len: Local<usize>,
) {
if let (Some(view_binding), Some(globals_binding)) = (
view_uniforms.uniforms.binding(),
globals_buffer.buffer.binding(),
) {
let mut batches: Vec<(Entity, UiMaterialBatch<M>)> = Vec::with_capacity(*previous_len);
ui_meta.vertices.clear();
ui_meta.view_bind_group = Some(render_device.create_bind_group(
"ui_material_view_bind_group",
&pipeline_cache.get_bind_group_layout(&ui_material_pipeline.view_layout),
&BindGroupEntries::sequential((view_binding, globals_binding)),
));
let mut index = 0;
for ui_phase in phases.values_mut() {
let mut batch_item_index = 0;
let mut batch_shader_handle = AssetId::invalid();
for item_index in 0..ui_phase.items.len() {
let item = &mut ui_phase.items[item_index];
if let Some(extracted_uinode) = extracted_uinodes
.uinodes
.get(item.index)
.filter(|n| item.entity() == n.render_entity)
{
let mut existing_batch = batches
.last_mut()
.filter(|_| batch_shader_handle == extracted_uinode.material);
if existing_batch.is_none() {
batch_item_index = item_index;
batch_shader_handle = extracted_uinode.material;
let new_batch = UiMaterialBatch {
range: index..index,
material: extracted_uinode.material,
};
batches.push((item.entity(), new_batch));
existing_batch = batches.last_mut();
}
let uinode_rect = extracted_uinode.rect;
let rect_size = uinode_rect.size();
let positions = QUAD_VERTEX_POSITIONS.map(|pos| {
extracted_uinode
.transform
.transform_point2(pos * rect_size)
.extend(1.0)
});
let positions_diff = if let Some(clip) = extracted_uinode.clip {
[
Vec2::new(
f32::max(clip.min.x - positions[0].x, 0.),
f32::max(clip.min.y - positions[0].y, 0.),
),
Vec2::new(
f32::min(clip.max.x - positions[1].x, 0.),
f32::max(clip.min.y - positions[1].y, 0.),
),
Vec2::new(
f32::min(clip.max.x - positions[2].x, 0.),
f32::min(clip.max.y - positions[2].y, 0.),
),
Vec2::new(
f32::max(clip.min.x - positions[3].x, 0.),
f32::min(clip.max.y - positions[3].y, 0.),
),
]
} else {
[Vec2::ZERO; 4]
};
let positions_clipped = [
positions[0] + positions_diff[0].extend(0.),
positions[1] + positions_diff[1].extend(0.),
positions[2] + positions_diff[2].extend(0.),
positions[3] + positions_diff[3].extend(0.),
];
let transformed_rect_size =
extracted_uinode.transform.transform_vector2(rect_size);
// Don't try to cull nodes that have a rotation
// In a rotation around the Z-axis, this value is 0.0 for an angle of 0.0 or π
// In those two cases, the culling check can proceed normally as corners will be on
// horizontal / vertical lines
// For all other angles, bypass the culling check
// This does not properly handles all rotations on all axis
if extracted_uinode.transform.x_axis[1] == 0.0 {
// Cull nodes that are completely clipped
if positions_diff[0].x - positions_diff[1].x >= transformed_rect_size.x
|| positions_diff[1].y - positions_diff[2].y >= transformed_rect_size.y
{
continue;
}
}
let uvs = [
Vec2::new(
uinode_rect.min.x + positions_diff[0].x,
uinode_rect.min.y + positions_diff[0].y,
),
Vec2::new(
uinode_rect.max.x + positions_diff[1].x,
uinode_rect.min.y + positions_diff[1].y,
),
Vec2::new(
uinode_rect.max.x + positions_diff[2].x,
uinode_rect.max.y + positions_diff[2].y,
),
Vec2::new(
uinode_rect.min.x + positions_diff[3].x,
uinode_rect.max.y + positions_diff[3].y,
),
]
.map(|pos| pos / uinode_rect.max);
for i in QUAD_INDICES {
ui_meta.vertices.push(UiMaterialVertex {
position: positions_clipped[i].into(),
uv: uvs[i].into(),
size: extracted_uinode.rect.size().into(),
radius: extracted_uinode.border_radius,
border: [
extracted_uinode.border.min_inset.x,
extracted_uinode.border.min_inset.y,
extracted_uinode.border.max_inset.x,
extracted_uinode.border.max_inset.y,
],
});
}
index += QUAD_INDICES.len() as u32;
existing_batch.unwrap().1.range.end = index;
ui_phase.items[batch_item_index].batch_range_mut().end += 1;
} else {
batch_shader_handle = AssetId::invalid();
}
}
}
ui_meta.vertices.write_buffer(&render_device, &render_queue);
*previous_len = batches.len();
commands.try_insert_batch(batches);
}
extracted_uinodes.uinodes.clear();
}
pub struct PreparedUiMaterial<T: UiMaterial> {
pub bindings: BindingResources,
pub bind_group: BindGroup,
pub key: T::Data,
}
impl<M: UiMaterial> RenderAsset for PreparedUiMaterial<M> {
type SourceAsset = M;
type Param = (
SRes<RenderDevice>,
SRes<PipelineCache>,
SRes<UiMaterialPipeline<M>>,
M::Param,
);
fn prepare_asset(
material: Self::SourceAsset,
_: AssetId<Self::SourceAsset>,
(render_device, pipeline_cache, pipeline, material_param): &mut SystemParamItem<
Self::Param,
>,
_: Option<&Self>,
) -> Result<Self, PrepareAssetError<Self::SourceAsset>> {
let bind_group_data = material.bind_group_data();
match material.as_bind_group(
&pipeline.ui_layout.clone(),
render_device,
pipeline_cache,
material_param,
) {
Ok(prepared) => Ok(PreparedUiMaterial {
bindings: prepared.bindings,
bind_group: prepared.bind_group,
key: bind_group_data,
}),
Err(AsBindGroupError::RetryNextUpdate) => {
Err(PrepareAssetError::RetryNextUpdate(material))
}
Err(other) => Err(PrepareAssetError::AsBindGroupError(other)),
}
}
}
pub fn queue_ui_material_nodes<M: UiMaterial>(
extracted_uinodes: Res<ExtractedUiMaterialNodes<M>>,
draw_functions: Res<DrawFunctions<TransparentUi>>,
ui_material_pipeline: Res<UiMaterialPipeline<M>>,
mut pipelines: ResMut<SpecializedRenderPipelines<UiMaterialPipeline<M>>>,
pipeline_cache: Res<PipelineCache>,
render_materials: Res<RenderAssets<PreparedUiMaterial<M>>>,
mut transparent_render_phases: ResMut<ViewSortedRenderPhases<TransparentUi>>,
mut render_views: Query<&UiCameraView, With<ExtractedView>>,
camera_views: Query<&ExtractedView>,
) where
M::Data: PartialEq + Eq + Hash + Clone,
{
let draw_function = draw_functions.read().id::<DrawUiMaterial<M>>();
for (index, extracted_uinode) in extracted_uinodes.uinodes.iter().enumerate() {
let Some(material) = render_materials.get(extracted_uinode.material) else {
continue;
};
let Ok(default_camera_view) =
render_views.get_mut(extracted_uinode.extracted_camera_entity)
else {
continue;
};
let Ok(view) = camera_views.get(default_camera_view.0) else {
continue;
};
let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity)
else {
continue;
};
let pipeline = pipelines.specialize(
&pipeline_cache,
&ui_material_pipeline,
UiMaterialKey {
hdr: view.hdr,
bind_group_data: material.key.clone(),
},
);
if transparent_phase.items.capacity() < extracted_uinodes.uinodes.len() {
transparent_phase.items.reserve_exact(
extracted_uinodes.uinodes.len() - transparent_phase.items.capacity(),
);
}
transparent_phase.add(TransparentUi {
draw_function,
pipeline,
entity: (extracted_uinode.render_entity, extracted_uinode.main_entity),
sort_key: FloatOrd(extracted_uinode.stack_index as f32 + M::stack_z_offset()),
batch_range: 0..0,
extra_index: PhaseItemExtraIndex::None,
index,
indexed: false,
});
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_ui_render/src/render_pass.rs | crates/bevy_ui_render/src/render_pass.rs | use core::ops::Range;
use super::{ImageNodeBindGroups, UiBatch, UiMeta, UiViewTarget};
use crate::UiCameraView;
use bevy_ecs::{
prelude::*,
system::{lifetimeless::*, SystemParamItem},
};
use bevy_math::FloatOrd;
use bevy_render::{
camera::ExtractedCamera,
diagnostic::RecordDiagnostics,
render_graph::*,
render_phase::*,
render_resource::{CachedRenderPipelineId, RenderPassDescriptor},
renderer::*,
sync_world::MainEntity,
view::*,
};
use tracing::error;
pub struct UiPassNode {
ui_view_query: QueryState<(&'static ExtractedView, &'static UiViewTarget)>,
ui_view_target_query: QueryState<(&'static ViewTarget, &'static ExtractedCamera)>,
ui_camera_view_query: QueryState<&'static UiCameraView>,
}
impl UiPassNode {
pub fn new(world: &mut World) -> Self {
Self {
ui_view_query: world.query_filtered(),
ui_view_target_query: world.query(),
ui_camera_view_query: world.query(),
}
}
}
impl Node for UiPassNode {
fn update(&mut self, world: &mut World) {
self.ui_view_query.update_archetypes(world);
self.ui_view_target_query.update_archetypes(world);
self.ui_camera_view_query.update_archetypes(world);
}
fn run(
&self,
graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
// Extract the UI view.
let input_view_entity = graph.view_entity();
let Some(transparent_render_phases) =
world.get_resource::<ViewSortedRenderPhases<TransparentUi>>()
else {
return Ok(());
};
// Query the UI view components.
let Ok((view, ui_view_target)) = self.ui_view_query.get_manual(world, input_view_entity)
else {
return Ok(());
};
let Ok((target, camera)) = self
.ui_view_target_query
.get_manual(world, ui_view_target.0)
else {
return Ok(());
};
let Some(transparent_phase) = transparent_render_phases.get(&view.retained_view_entity)
else {
return Ok(());
};
if transparent_phase.items.is_empty() {
return Ok(());
}
let diagnostics = render_context.diagnostic_recorder();
// use the UI view entity if it is defined
let view_entity = if let Ok(ui_camera_view) = self
.ui_camera_view_query
.get_manual(world, input_view_entity)
{
ui_camera_view.0
} else {
input_view_entity
};
let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("ui"),
color_attachments: &[Some(target.get_unsampled_color_attachment())],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
let pass_span = diagnostics.pass_span(&mut render_pass, "ui");
if let Some(viewport) = camera.viewport.as_ref() {
render_pass.set_camera_viewport(viewport);
}
if let Err(err) = transparent_phase.render(&mut render_pass, world, view_entity) {
error!("Error encountered while rendering the ui phase {err:?}");
}
pass_span.end(&mut render_pass);
Ok(())
}
}
pub struct TransparentUi {
pub sort_key: FloatOrd,
pub entity: (Entity, MainEntity),
pub pipeline: CachedRenderPipelineId,
pub draw_function: DrawFunctionId,
pub batch_range: Range<u32>,
pub extra_index: PhaseItemExtraIndex,
pub index: usize,
pub indexed: bool,
}
impl PhaseItem for TransparentUi {
#[inline]
fn entity(&self) -> Entity {
self.entity.0
}
fn main_entity(&self) -> MainEntity {
self.entity.1
}
#[inline]
fn draw_function(&self) -> DrawFunctionId {
self.draw_function
}
#[inline]
fn batch_range(&self) -> &Range<u32> {
&self.batch_range
}
#[inline]
fn batch_range_mut(&mut self) -> &mut Range<u32> {
&mut self.batch_range
}
#[inline]
fn extra_index(&self) -> PhaseItemExtraIndex {
self.extra_index.clone()
}
#[inline]
fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range<u32>, &mut PhaseItemExtraIndex) {
(&mut self.batch_range, &mut self.extra_index)
}
}
impl SortedPhaseItem for TransparentUi {
type SortKey = FloatOrd;
#[inline]
fn sort_key(&self) -> Self::SortKey {
self.sort_key
}
#[inline]
fn sort(items: &mut [Self]) {
items.sort_by_key(SortedPhaseItem::sort_key);
}
#[inline]
fn indexed(&self) -> bool {
self.indexed
}
}
impl CachedRenderPipelinePhaseItem for TransparentUi {
#[inline]
fn cached_pipeline(&self) -> CachedRenderPipelineId {
self.pipeline
}
}
pub type DrawUi = (
SetItemPipeline,
SetUiViewBindGroup<0>,
SetUiTextureBindGroup<1>,
DrawUiNode,
);
pub struct SetUiViewBindGroup<const I: usize>;
impl<P: PhaseItem, const I: usize> RenderCommand<P> for SetUiViewBindGroup<I> {
type Param = SRes<UiMeta>;
type ViewQuery = Read<ViewUniformOffset>;
type ItemQuery = ();
fn render<'w>(
_item: &P,
view_uniform: &'w ViewUniformOffset,
_entity: Option<()>,
ui_meta: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let Some(view_bind_group) = ui_meta.into_inner().view_bind_group.as_ref() else {
return RenderCommandResult::Failure("view_bind_group not available");
};
pass.set_bind_group(I, view_bind_group, &[view_uniform.offset]);
RenderCommandResult::Success
}
}
pub struct SetUiTextureBindGroup<const I: usize>;
impl<P: PhaseItem, const I: usize> RenderCommand<P> for SetUiTextureBindGroup<I> {
type Param = SRes<ImageNodeBindGroups>;
type ViewQuery = ();
type ItemQuery = Read<UiBatch>;
#[inline]
fn render<'w>(
_item: &P,
_view: (),
batch: Option<&'w UiBatch>,
image_bind_groups: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let image_bind_groups = image_bind_groups.into_inner();
let Some(batch) = batch else {
return RenderCommandResult::Skip;
};
pass.set_bind_group(I, image_bind_groups.values.get(&batch.image).unwrap(), &[]);
RenderCommandResult::Success
}
}
pub struct DrawUiNode;
impl<P: PhaseItem> RenderCommand<P> for DrawUiNode {
type Param = SRes<UiMeta>;
type ViewQuery = ();
type ItemQuery = Read<UiBatch>;
#[inline]
fn render<'w>(
_item: &P,
_view: (),
batch: Option<&'w UiBatch>,
ui_meta: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let Some(batch) = batch else {
return RenderCommandResult::Skip;
};
let ui_meta = ui_meta.into_inner();
let Some(vertices) = ui_meta.vertices.buffer() else {
return RenderCommandResult::Failure("missing vertices to draw ui");
};
let Some(indices) = ui_meta.indices.buffer() else {
return RenderCommandResult::Failure("missing indices to draw ui");
};
// Store the vertices
pass.set_vertex_buffer(0, vertices.slice(..));
// Define how to "connect" the vertices
pass.set_index_buffer(
indices.slice(..),
bevy_render::render_resource::IndexFormat::Uint32,
);
// Draw the vertices
pass.draw_indexed(batch.range.clone(), 0, 0..1);
RenderCommandResult::Success
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_ui_render/src/color_space.rs | crates/bevy_ui_render/src/color_space.rs | use bevy_app::{App, Plugin};
use bevy_shader::load_shader_library;
/// A plugin for WGSL color space utility functions
pub struct ColorSpacePlugin;
impl Plugin for ColorSpacePlugin {
fn build(&self, app: &mut App) {
load_shader_library!(app, "color_space.wgsl");
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_ui_render/src/ui_material.rs | crates/bevy_ui_render/src/ui_material.rs | use crate::Node;
use bevy_asset::{Asset, AssetId, Handle};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{component::Component, reflect::ReflectComponent};
use bevy_reflect::{prelude::ReflectDefault, Reflect};
use bevy_render::{
extract_component::ExtractComponent,
render_resource::{AsBindGroup, RenderPipelineDescriptor},
};
use bevy_shader::ShaderRef;
use derive_more::derive::From;
/// Materials are used alongside [`UiMaterialPlugin`](crate::UiMaterialPlugin) and [`MaterialNode`]
/// to spawn entities that are rendered with a specific [`UiMaterial`] type. They serve as an easy to use high level
/// way to render `Node` entities with custom shader logic.
///
/// `UiMaterials` must implement [`AsBindGroup`] to define how data will be transferred to the GPU and bound in shaders.
/// [`AsBindGroup`] can be derived, which makes generating bindings straightforward. See the [`AsBindGroup`] docs for details.
///
/// Materials must also implement [`Asset`] so they can be treated as such.
///
/// If you are only using the fragment shader, make sure your shader imports the `UiVertexOutput`
/// from `bevy_ui::ui_vertex_output` and uses it as the input of your fragment shader like the
/// example below does.
///
/// # Example
///
/// Here is a simple [`UiMaterial`] implementation. The [`AsBindGroup`] derive has many features. To see what else is available,
/// check out the [`AsBindGroup`] documentation.
/// ```
/// # use bevy_ui::prelude::*;
/// # use bevy_ecs::prelude::*;
/// # use bevy_image::Image;
/// # use bevy_reflect::TypePath;
/// # use bevy_render::render_resource::AsBindGroup;
/// # use bevy_color::LinearRgba;
/// # use bevy_shader::ShaderRef;
/// # use bevy_asset::{Handle, AssetServer, Assets, Asset};
/// # use bevy_ui_render::prelude::*;
///
/// #[derive(AsBindGroup, Asset, TypePath, Debug, Clone)]
/// pub struct CustomMaterial {
/// // Uniform bindings must implement `ShaderType`, which will be used to convert the value to
/// // its shader-compatible equivalent. Most core math types already implement `ShaderType`.
/// #[uniform(0)]
/// color: LinearRgba,
/// // Images can be bound as textures in shaders. If the Image's sampler is also needed, just
/// // add the sampler attribute with a different binding index.
/// #[texture(1)]
/// #[sampler(2)]
/// color_texture: Handle<Image>,
/// }
///
/// // All functions on `UiMaterial` have default impls. You only need to implement the
/// // functions that are relevant for your material.
/// impl UiMaterial for CustomMaterial {
/// fn fragment_shader() -> ShaderRef {
/// "shaders/custom_material.wgsl".into()
/// }
/// }
///
/// // Spawn an entity using `CustomMaterial`.
/// fn setup(mut commands: Commands, mut materials: ResMut<Assets<CustomMaterial>>, asset_server: Res<AssetServer>) {
/// commands.spawn((
/// MaterialNode(materials.add(CustomMaterial {
/// color: LinearRgba::RED,
/// color_texture: asset_server.load("some_image.png"),
/// })),
/// Node {
/// width: Val::Percent(100.0),
/// ..Default::default()
/// },
/// ));
/// }
/// ```
/// In WGSL shaders, the material's binding would look like this:
///
/// If you only use the fragment shader make sure to import `UiVertexOutput` from
/// `bevy_ui::ui_vertex_output` in your wgsl shader.
/// Also note that bind group 0 is always bound to the [`View Uniform`](bevy_render::view::ViewUniform)
/// and the [`Globals Uniform`](bevy_render::globals::GlobalsUniform).
///
/// ```wgsl
/// #import bevy_ui::ui_vertex_output UiVertexOutput
///
/// struct CustomMaterial {
/// color: vec4<f32>,
/// }
///
/// @group(1) @binding(0)
/// var<uniform> material: CustomMaterial;
/// @group(1) @binding(1)
/// var color_texture: texture_2d<f32>;
/// @group(1) @binding(2)
/// var color_sampler: sampler;
///
/// @fragment
/// fn fragment(in: UiVertexOutput) -> @location(0) vec4<f32> {
///
/// }
/// ```
pub trait UiMaterial: AsBindGroup + Asset + Clone + Sized {
/// Returns this materials vertex shader. If [`ShaderRef::Default`] is returned, the default UI
/// vertex shader will be used.
fn vertex_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this materials fragment shader. If [`ShaderRef::Default`] is returned, the default
/// UI fragment shader will be used.
fn fragment_shader() -> ShaderRef {
ShaderRef::Default
}
fn stack_z_offset() -> f32 {
crate::stack_z_offsets::MATERIAL
}
#[expect(
unused_variables,
reason = "The parameters here are intentionally unused by the default implementation; however, putting underscores here will result in the underscores being copied by rust-analyzer's tab completion."
)]
#[inline]
fn specialize(descriptor: &mut RenderPipelineDescriptor, key: UiMaterialKey<Self>) {}
}
pub struct UiMaterialKey<M: UiMaterial> {
pub hdr: bool,
pub bind_group_data: M::Data,
}
impl<M: UiMaterial> Eq for UiMaterialKey<M> where M::Data: PartialEq {}
impl<M: UiMaterial> PartialEq for UiMaterialKey<M>
where
M::Data: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.hdr == other.hdr && self.bind_group_data == other.bind_group_data
}
}
impl<M: UiMaterial> Clone for UiMaterialKey<M>
where
M::Data: Clone,
{
fn clone(&self) -> Self {
Self {
hdr: self.hdr,
bind_group_data: self.bind_group_data.clone(),
}
}
}
impl<M: UiMaterial> core::hash::Hash for UiMaterialKey<M>
where
M::Data: core::hash::Hash,
{
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
self.hdr.hash(state);
self.bind_group_data.hash(state);
}
}
#[derive(
Component, Clone, Debug, Deref, DerefMut, Reflect, PartialEq, Eq, ExtractComponent, From,
)]
#[reflect(Component, Default)]
#[require(Node)]
pub struct MaterialNode<M: UiMaterial>(pub Handle<M>);
impl<M: UiMaterial> Default for MaterialNode<M> {
fn default() -> Self {
Self(Handle::default())
}
}
impl<M: UiMaterial> From<MaterialNode<M>> for AssetId<M> {
fn from(material: MaterialNode<M>) -> Self {
material.id()
}
}
impl<M: UiMaterial> From<&MaterialNode<M>> for AssetId<M> {
fn from(material: &MaterialNode<M>) -> Self {
material.id()
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_ui_render/src/pipeline.rs | crates/bevy_ui_render/src/pipeline.rs | use bevy_asset::{load_embedded_asset, AssetServer, Handle};
use bevy_ecs::prelude::*;
use bevy_image::BevyDefault as _;
use bevy_mesh::VertexBufferLayout;
use bevy_render::{
render_resource::{
binding_types::{sampler, texture_2d, uniform_buffer},
*,
},
view::{ViewTarget, ViewUniform},
};
use bevy_shader::Shader;
use bevy_utils::default;
#[derive(Resource)]
pub struct UiPipeline {
pub view_layout: BindGroupLayoutDescriptor,
pub image_layout: BindGroupLayoutDescriptor,
pub shader: Handle<Shader>,
}
pub fn init_ui_pipeline(mut commands: Commands, asset_server: Res<AssetServer>) {
let view_layout = BindGroupLayoutDescriptor::new(
"ui_view_layout",
&BindGroupLayoutEntries::single(
ShaderStages::VERTEX_FRAGMENT,
uniform_buffer::<ViewUniform>(true),
),
);
let image_layout = BindGroupLayoutDescriptor::new(
"ui_image_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::FRAGMENT,
(
texture_2d(TextureSampleType::Float { filterable: true }),
sampler(SamplerBindingType::Filtering),
),
),
);
commands.insert_resource(UiPipeline {
view_layout,
image_layout,
shader: load_embedded_asset!(asset_server.as_ref(), "ui.wgsl"),
});
}
#[derive(Clone, Copy, Hash, PartialEq, Eq)]
pub struct UiPipelineKey {
pub hdr: bool,
pub anti_alias: bool,
}
impl SpecializedRenderPipeline for UiPipeline {
type Key = UiPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let vertex_layout = VertexBufferLayout::from_vertex_formats(
VertexStepMode::Vertex,
vec![
// position
VertexFormat::Float32x3,
// uv
VertexFormat::Float32x2,
// color
VertexFormat::Float32x4,
// mode
VertexFormat::Uint32,
// border radius
VertexFormat::Float32x4,
// border thickness
VertexFormat::Float32x4,
// border size
VertexFormat::Float32x2,
// position relative to the center
VertexFormat::Float32x2,
],
);
let shader_defs = if key.anti_alias {
vec!["ANTI_ALIAS".into()]
} else {
Vec::new()
};
RenderPipelineDescriptor {
vertex: VertexState {
shader: self.shader.clone(),
shader_defs: shader_defs.clone(),
buffers: vec![vertex_layout],
..default()
},
fragment: Some(FragmentState {
shader: self.shader.clone(),
shader_defs,
targets: vec![Some(ColorTargetState {
format: if key.hdr {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
},
blend: Some(BlendState::ALPHA_BLENDING),
write_mask: ColorWrites::ALL,
})],
..default()
}),
layout: vec![self.view_layout.clone(), self.image_layout.clone()],
label: Some("ui_pipeline".into()),
..default()
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_ui_render/src/ui_texture_slice_pipeline.rs | crates/bevy_ui_render/src/ui_texture_slice_pipeline.rs | use core::{hash::Hash, ops::Range};
use crate::*;
use bevy_asset::*;
use bevy_color::{ColorToComponents, LinearRgba};
use bevy_ecs::{
prelude::Component,
system::{
lifetimeless::{Read, SRes},
*,
},
};
use bevy_image::prelude::*;
use bevy_math::{Affine2, FloatOrd, Rect, Vec2};
use bevy_mesh::VertexBufferLayout;
use bevy_platform::collections::HashMap;
use bevy_render::{
render_asset::RenderAssets,
render_phase::*,
render_resource::{binding_types::uniform_buffer, *},
renderer::{RenderDevice, RenderQueue},
texture::GpuImage,
view::*,
Extract, ExtractSchedule, Render, RenderSystems,
};
use bevy_render::{sync_world::MainEntity, RenderStartup};
use bevy_shader::Shader;
use bevy_sprite::{SliceScaleMode, SpriteImageMode, TextureSlicer};
use bevy_sprite_render::SpriteAssetEvents;
use bevy_ui::widget;
use bevy_utils::default;
use binding_types::{sampler, texture_2d};
use bytemuck::{Pod, Zeroable};
pub struct UiTextureSlicerPlugin;
impl Plugin for UiTextureSlicerPlugin {
fn build(&self, app: &mut App) {
embedded_asset!(app, "ui_texture_slice.wgsl");
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app
.add_render_command::<TransparentUi, DrawUiTextureSlices>()
.init_resource::<ExtractedUiTextureSlices>()
.init_resource::<UiTextureSliceMeta>()
.init_resource::<UiTextureSliceImageBindGroups>()
.init_resource::<SpecializedRenderPipelines<UiTextureSlicePipeline>>()
.add_systems(RenderStartup, init_ui_texture_slice_pipeline)
.add_systems(
ExtractSchedule,
extract_ui_texture_slices.in_set(RenderUiSystems::ExtractTextureSlice),
)
.add_systems(
Render,
(
queue_ui_slices.in_set(RenderSystems::Queue),
prepare_ui_slices.in_set(RenderSystems::PrepareBindGroups),
),
);
}
}
}
#[repr(C)]
#[derive(Copy, Clone, Pod, Zeroable)]
struct UiTextureSliceVertex {
pub position: [f32; 3],
pub uv: [f32; 2],
pub color: [f32; 4],
pub slices: [f32; 4],
pub border: [f32; 4],
pub repeat: [f32; 4],
pub atlas: [f32; 4],
}
#[derive(Component)]
pub struct UiTextureSlicerBatch {
pub range: Range<u32>,
pub image: AssetId<Image>,
}
#[derive(Resource)]
pub struct UiTextureSliceMeta {
vertices: RawBufferVec<UiTextureSliceVertex>,
indices: RawBufferVec<u32>,
view_bind_group: Option<BindGroup>,
}
impl Default for UiTextureSliceMeta {
fn default() -> Self {
Self {
vertices: RawBufferVec::new(BufferUsages::VERTEX),
indices: RawBufferVec::new(BufferUsages::INDEX),
view_bind_group: None,
}
}
}
#[derive(Resource, Default)]
pub struct UiTextureSliceImageBindGroups {
pub values: HashMap<AssetId<Image>, BindGroup>,
}
#[derive(Resource)]
pub struct UiTextureSlicePipeline {
pub view_layout: BindGroupLayoutDescriptor,
pub image_layout: BindGroupLayoutDescriptor,
pub shader: Handle<Shader>,
}
pub fn init_ui_texture_slice_pipeline(mut commands: Commands, asset_server: Res<AssetServer>) {
let view_layout = BindGroupLayoutDescriptor::new(
"ui_texture_slice_view_layout",
&BindGroupLayoutEntries::single(
ShaderStages::VERTEX_FRAGMENT,
uniform_buffer::<ViewUniform>(true),
),
);
let image_layout = BindGroupLayoutDescriptor::new(
"ui_texture_slice_image_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::FRAGMENT,
(
texture_2d(TextureSampleType::Float { filterable: true }),
sampler(SamplerBindingType::Filtering),
),
),
);
commands.insert_resource(UiTextureSlicePipeline {
view_layout,
image_layout,
shader: load_embedded_asset!(asset_server.as_ref(), "ui_texture_slice.wgsl"),
});
}
#[derive(Clone, Copy, Hash, PartialEq, Eq)]
pub struct UiTextureSlicePipelineKey {
pub hdr: bool,
}
impl SpecializedRenderPipeline for UiTextureSlicePipeline {
type Key = UiTextureSlicePipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let vertex_layout = VertexBufferLayout::from_vertex_formats(
VertexStepMode::Vertex,
vec![
// position
VertexFormat::Float32x3,
// uv
VertexFormat::Float32x2,
// color
VertexFormat::Float32x4,
// normalized texture slicing lines (left, top, right, bottom)
VertexFormat::Float32x4,
// normalized target slicing lines (left, top, right, bottom)
VertexFormat::Float32x4,
// repeat values (horizontal side, vertical side, horizontal center, vertical center)
VertexFormat::Float32x4,
// normalized texture atlas rect (left, top, right, bottom)
VertexFormat::Float32x4,
],
);
let shader_defs = Vec::new();
RenderPipelineDescriptor {
vertex: VertexState {
shader: self.shader.clone(),
shader_defs: shader_defs.clone(),
buffers: vec![vertex_layout],
..default()
},
fragment: Some(FragmentState {
shader: self.shader.clone(),
shader_defs,
targets: vec![Some(ColorTargetState {
format: if key.hdr {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
},
blend: Some(BlendState::ALPHA_BLENDING),
write_mask: ColorWrites::ALL,
})],
..default()
}),
layout: vec![self.view_layout.clone(), self.image_layout.clone()],
label: Some("ui_texture_slice_pipeline".into()),
..default()
}
}
}
pub struct ExtractedUiTextureSlice {
pub stack_index: u32,
pub transform: Affine2,
pub rect: Rect,
pub atlas_rect: Option<Rect>,
pub image: AssetId<Image>,
pub clip: Option<Rect>,
pub extracted_camera_entity: Entity,
pub color: LinearRgba,
pub image_scale_mode: SpriteImageMode,
pub flip_x: bool,
pub flip_y: bool,
pub inverse_scale_factor: f32,
pub main_entity: MainEntity,
pub render_entity: Entity,
}
#[derive(Resource, Default)]
pub struct ExtractedUiTextureSlices {
pub slices: Vec<ExtractedUiTextureSlice>,
}
pub fn extract_ui_texture_slices(
mut commands: Commands,
mut extracted_ui_slicers: ResMut<ExtractedUiTextureSlices>,
texture_atlases: Extract<Res<Assets<TextureAtlasLayout>>>,
slicers_query: Extract<
Query<(
Entity,
&ComputedNode,
&UiGlobalTransform,
&InheritedVisibility,
Option<&CalculatedClip>,
&ComputedUiTargetCamera,
&ImageNode,
)>,
>,
camera_map: Extract<UiCameraMap>,
) {
let mut camera_mapper = camera_map.get_mapper();
for (entity, uinode, transform, inherited_visibility, clip, camera, image) in &slicers_query {
// Skip invisible images
if !inherited_visibility.get()
|| image.color.is_fully_transparent()
|| image.image.id() == TRANSPARENT_IMAGE_HANDLE.id()
{
continue;
}
let image_scale_mode = match image.image_mode.clone() {
widget::NodeImageMode::Sliced(texture_slicer) => {
SpriteImageMode::Sliced(texture_slicer)
}
widget::NodeImageMode::Tiled {
tile_x,
tile_y,
stretch_value,
} => SpriteImageMode::Tiled {
tile_x,
tile_y,
stretch_value,
},
_ => continue,
};
let Some(extracted_camera_entity) = camera_mapper.map(camera) else {
continue;
};
let atlas_rect = image
.texture_atlas
.as_ref()
.and_then(|s| s.texture_rect(&texture_atlases))
.map(|r| r.as_rect());
let atlas_rect = match (atlas_rect, image.rect) {
(None, None) => None,
(None, Some(image_rect)) => Some(image_rect),
(Some(atlas_rect), None) => Some(atlas_rect),
(Some(atlas_rect), Some(mut image_rect)) => {
image_rect.min += atlas_rect.min;
image_rect.max += atlas_rect.min;
Some(image_rect)
}
};
extracted_ui_slicers.slices.push(ExtractedUiTextureSlice {
render_entity: commands.spawn(TemporaryRenderEntity).id(),
stack_index: uinode.stack_index,
transform: transform.into(),
color: image.color.into(),
rect: Rect {
min: Vec2::ZERO,
max: uinode.size,
},
clip: clip.map(|clip| clip.clip),
image: image.image.id(),
extracted_camera_entity,
image_scale_mode,
atlas_rect,
flip_x: image.flip_x,
flip_y: image.flip_y,
inverse_scale_factor: uinode.inverse_scale_factor,
main_entity: entity.into(),
});
}
}
#[expect(
clippy::too_many_arguments,
reason = "it's a system that needs a lot of them"
)]
pub fn queue_ui_slices(
extracted_ui_slicers: ResMut<ExtractedUiTextureSlices>,
ui_slicer_pipeline: Res<UiTextureSlicePipeline>,
mut pipelines: ResMut<SpecializedRenderPipelines<UiTextureSlicePipeline>>,
mut transparent_render_phases: ResMut<ViewSortedRenderPhases<TransparentUi>>,
mut render_views: Query<&UiCameraView, With<ExtractedView>>,
camera_views: Query<&ExtractedView>,
pipeline_cache: Res<PipelineCache>,
draw_functions: Res<DrawFunctions<TransparentUi>>,
) {
let draw_function = draw_functions.read().id::<DrawUiTextureSlices>();
for (index, extracted_slicer) in extracted_ui_slicers.slices.iter().enumerate() {
let Ok(default_camera_view) =
render_views.get_mut(extracted_slicer.extracted_camera_entity)
else {
continue;
};
let Ok(view) = camera_views.get(default_camera_view.0) else {
continue;
};
let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity)
else {
continue;
};
let pipeline = pipelines.specialize(
&pipeline_cache,
&ui_slicer_pipeline,
UiTextureSlicePipelineKey { hdr: view.hdr },
);
transparent_phase.add(TransparentUi {
draw_function,
pipeline,
entity: (extracted_slicer.render_entity, extracted_slicer.main_entity),
sort_key: FloatOrd(extracted_slicer.stack_index as f32 + stack_z_offsets::IMAGE),
batch_range: 0..0,
extra_index: PhaseItemExtraIndex::None,
index,
indexed: true,
});
}
}
pub fn prepare_ui_slices(
mut commands: Commands,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
pipeline_cache: Res<PipelineCache>,
mut ui_meta: ResMut<UiTextureSliceMeta>,
mut extracted_slices: ResMut<ExtractedUiTextureSlices>,
view_uniforms: Res<ViewUniforms>,
texture_slicer_pipeline: Res<UiTextureSlicePipeline>,
mut image_bind_groups: ResMut<UiTextureSliceImageBindGroups>,
gpu_images: Res<RenderAssets<GpuImage>>,
mut phases: ResMut<ViewSortedRenderPhases<TransparentUi>>,
events: Res<SpriteAssetEvents>,
mut previous_len: Local<usize>,
) {
// If an image has changed, the GpuImage has (probably) changed
for event in &events.images {
match event {
AssetEvent::Added { .. } |
AssetEvent::Unused { .. } |
// Images don't have dependencies
AssetEvent::LoadedWithDependencies { .. } => {}
AssetEvent::Modified { id } | AssetEvent::Removed { id } => {
image_bind_groups.values.remove(id);
}
};
}
if let Some(view_binding) = view_uniforms.uniforms.binding() {
let mut batches: Vec<(Entity, UiTextureSlicerBatch)> = Vec::with_capacity(*previous_len);
ui_meta.vertices.clear();
ui_meta.indices.clear();
ui_meta.view_bind_group = Some(render_device.create_bind_group(
"ui_texture_slice_view_bind_group",
&pipeline_cache.get_bind_group_layout(&texture_slicer_pipeline.view_layout),
&BindGroupEntries::single(view_binding),
));
// Buffer indexes
let mut vertices_index = 0;
let mut indices_index = 0;
for ui_phase in phases.values_mut() {
let mut batch_item_index = 0;
let mut batch_image_handle = AssetId::invalid();
let mut batch_image_size = Vec2::ZERO;
for item_index in 0..ui_phase.items.len() {
let item = &mut ui_phase.items[item_index];
if let Some(texture_slices) = extracted_slices
.slices
.get(item.index)
.filter(|n| item.entity() == n.render_entity)
{
let mut existing_batch = batches.last_mut();
if batch_image_handle == AssetId::invalid()
|| existing_batch.is_none()
|| (batch_image_handle != AssetId::default()
&& texture_slices.image != AssetId::default()
&& batch_image_handle != texture_slices.image)
{
if let Some(gpu_image) = gpu_images.get(texture_slices.image) {
batch_item_index = item_index;
batch_image_handle = texture_slices.image;
batch_image_size = gpu_image.size_2d().as_vec2();
let new_batch = UiTextureSlicerBatch {
range: vertices_index..vertices_index,
image: texture_slices.image,
};
batches.push((item.entity(), new_batch));
image_bind_groups
.values
.entry(batch_image_handle)
.or_insert_with(|| {
render_device.create_bind_group(
"ui_texture_slice_image_layout",
&pipeline_cache.get_bind_group_layout(
&texture_slicer_pipeline.image_layout,
),
&BindGroupEntries::sequential((
&gpu_image.texture_view,
&gpu_image.sampler,
)),
)
});
existing_batch = batches.last_mut();
} else {
continue;
}
} else if let Some(ref mut existing_batch) = existing_batch
&& batch_image_handle == AssetId::default()
&& texture_slices.image != AssetId::default()
{
if let Some(gpu_image) = gpu_images.get(texture_slices.image) {
batch_image_handle = texture_slices.image;
batch_image_size = gpu_image.size_2d().as_vec2();
existing_batch.1.image = texture_slices.image;
image_bind_groups
.values
.entry(batch_image_handle)
.or_insert_with(|| {
render_device.create_bind_group(
"ui_texture_slice_image_layout",
&pipeline_cache.get_bind_group_layout(
&texture_slicer_pipeline.image_layout,
),
&BindGroupEntries::sequential((
&gpu_image.texture_view,
&gpu_image.sampler,
)),
)
});
} else {
continue;
}
}
let uinode_rect = texture_slices.rect;
let rect_size = uinode_rect.size();
// Specify the corners of the node
let positions = QUAD_VERTEX_POSITIONS.map(|pos| {
(texture_slices.transform.transform_point2(pos * rect_size)).extend(0.)
});
// Calculate the effect of clipping
// Note: this won't work with rotation/scaling, but that's much more complex (may need more that 2 quads)
let positions_diff = if let Some(clip) = texture_slices.clip {
[
Vec2::new(
f32::max(clip.min.x - positions[0].x, 0.),
f32::max(clip.min.y - positions[0].y, 0.),
),
Vec2::new(
f32::min(clip.max.x - positions[1].x, 0.),
f32::max(clip.min.y - positions[1].y, 0.),
),
Vec2::new(
f32::min(clip.max.x - positions[2].x, 0.),
f32::min(clip.max.y - positions[2].y, 0.),
),
Vec2::new(
f32::max(clip.min.x - positions[3].x, 0.),
f32::min(clip.max.y - positions[3].y, 0.),
),
]
} else {
[Vec2::ZERO; 4]
};
let positions_clipped = [
positions[0] + positions_diff[0].extend(0.),
positions[1] + positions_diff[1].extend(0.),
positions[2] + positions_diff[2].extend(0.),
positions[3] + positions_diff[3].extend(0.),
];
let transformed_rect_size =
texture_slices.transform.transform_vector2(rect_size);
// Don't try to cull nodes that have a rotation
// In a rotation around the Z-axis, this value is 0.0 for an angle of 0.0 or π
// In those two cases, the culling check can proceed normally as corners will be on
// horizontal / vertical lines
// For all other angles, bypass the culling check
// This does not properly handles all rotations on all axis
if texture_slices.transform.x_axis[1] == 0.0 {
// Cull nodes that are completely clipped
if positions_diff[0].x - positions_diff[1].x >= transformed_rect_size.x
|| positions_diff[1].y - positions_diff[2].y >= transformed_rect_size.y
{
continue;
}
}
let flags = if texture_slices.image != AssetId::default() {
shader_flags::TEXTURED
} else {
shader_flags::UNTEXTURED
};
let uvs = if flags == shader_flags::UNTEXTURED {
[Vec2::ZERO, Vec2::X, Vec2::ONE, Vec2::Y]
} else {
let atlas_extent = uinode_rect.max;
[
Vec2::new(
uinode_rect.min.x + positions_diff[0].x,
uinode_rect.min.y + positions_diff[0].y,
),
Vec2::new(
uinode_rect.max.x + positions_diff[1].x,
uinode_rect.min.y + positions_diff[1].y,
),
Vec2::new(
uinode_rect.max.x + positions_diff[2].x,
uinode_rect.max.y + positions_diff[2].y,
),
Vec2::new(
uinode_rect.min.x + positions_diff[3].x,
uinode_rect.max.y + positions_diff[3].y,
),
]
.map(|pos| pos / atlas_extent)
};
let color = texture_slices.color.to_f32_array();
let (image_size, mut atlas) = if let Some(atlas) = texture_slices.atlas_rect {
(
atlas.size(),
[
atlas.min.x / batch_image_size.x,
atlas.min.y / batch_image_size.y,
atlas.max.x / batch_image_size.x,
atlas.max.y / batch_image_size.y,
],
)
} else {
(batch_image_size, [0., 0., 1., 1.])
};
if texture_slices.flip_x {
atlas.swap(0, 2);
}
if texture_slices.flip_y {
atlas.swap(1, 3);
}
let [slices, border, repeat] = compute_texture_slices(
image_size,
uinode_rect.size() * texture_slices.inverse_scale_factor,
&texture_slices.image_scale_mode,
);
for i in 0..4 {
ui_meta.vertices.push(UiTextureSliceVertex {
position: positions_clipped[i].into(),
uv: uvs[i].into(),
color,
slices,
border,
repeat,
atlas,
});
}
for &i in &QUAD_INDICES {
ui_meta.indices.push(indices_index + i as u32);
}
vertices_index += 6;
indices_index += 4;
existing_batch.unwrap().1.range.end = vertices_index;
ui_phase.items[batch_item_index].batch_range_mut().end += 1;
} else {
batch_image_handle = AssetId::invalid();
}
}
}
ui_meta.vertices.write_buffer(&render_device, &render_queue);
ui_meta.indices.write_buffer(&render_device, &render_queue);
*previous_len = batches.len();
commands.try_insert_batch(batches);
}
extracted_slices.slices.clear();
}
pub type DrawUiTextureSlices = (
SetItemPipeline,
SetSlicerViewBindGroup<0>,
SetSlicerTextureBindGroup<1>,
DrawSlicer,
);
pub struct SetSlicerViewBindGroup<const I: usize>;
impl<P: PhaseItem, const I: usize> RenderCommand<P> for SetSlicerViewBindGroup<I> {
type Param = SRes<UiTextureSliceMeta>;
type ViewQuery = Read<ViewUniformOffset>;
type ItemQuery = ();
fn render<'w>(
_item: &P,
view_uniform: &'w ViewUniformOffset,
_entity: Option<()>,
ui_meta: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let Some(view_bind_group) = ui_meta.into_inner().view_bind_group.as_ref() else {
return RenderCommandResult::Failure("view_bind_group not available");
};
pass.set_bind_group(I, view_bind_group, &[view_uniform.offset]);
RenderCommandResult::Success
}
}
pub struct SetSlicerTextureBindGroup<const I: usize>;
impl<P: PhaseItem, const I: usize> RenderCommand<P> for SetSlicerTextureBindGroup<I> {
type Param = SRes<UiTextureSliceImageBindGroups>;
type ViewQuery = ();
type ItemQuery = Read<UiTextureSlicerBatch>;
#[inline]
fn render<'w>(
_item: &P,
_view: (),
batch: Option<&'w UiTextureSlicerBatch>,
image_bind_groups: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let image_bind_groups = image_bind_groups.into_inner();
let Some(batch) = batch else {
return RenderCommandResult::Skip;
};
pass.set_bind_group(I, image_bind_groups.values.get(&batch.image).unwrap(), &[]);
RenderCommandResult::Success
}
}
pub struct DrawSlicer;
impl<P: PhaseItem> RenderCommand<P> for DrawSlicer {
type Param = SRes<UiTextureSliceMeta>;
type ViewQuery = ();
type ItemQuery = Read<UiTextureSlicerBatch>;
#[inline]
fn render<'w>(
_item: &P,
_view: (),
batch: Option<&'w UiTextureSlicerBatch>,
ui_meta: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let Some(batch) = batch else {
return RenderCommandResult::Skip;
};
let ui_meta = ui_meta.into_inner();
let Some(vertices) = ui_meta.vertices.buffer() else {
return RenderCommandResult::Failure("missing vertices to draw ui");
};
let Some(indices) = ui_meta.indices.buffer() else {
return RenderCommandResult::Failure("missing indices to draw ui");
};
// Store the vertices
pass.set_vertex_buffer(0, vertices.slice(..));
// Define how to "connect" the vertices
pass.set_index_buffer(indices.slice(..), IndexFormat::Uint32);
// Draw the vertices
pass.draw_indexed(batch.range.clone(), 0, 0..1);
RenderCommandResult::Success
}
}
fn compute_texture_slices(
image_size: Vec2,
target_size: Vec2,
image_scale_mode: &SpriteImageMode,
) -> [[f32; 4]; 3] {
match image_scale_mode {
SpriteImageMode::Sliced(TextureSlicer {
border: border_rect,
center_scale_mode,
sides_scale_mode,
max_corner_scale,
}) => {
let min_coeff = (target_size / image_size)
.min_element()
.min(*max_corner_scale);
// calculate the normalized extents of the nine-patched image slices
let slices = [
border_rect.min_inset.x / image_size.x,
border_rect.min_inset.y / image_size.y,
1. - border_rect.max_inset.x / image_size.x,
1. - border_rect.max_inset.y / image_size.y,
];
// calculate the normalized extents of the target slices
let border = [
(border_rect.min_inset.x / target_size.x) * min_coeff,
(border_rect.min_inset.y / target_size.y) * min_coeff,
1. - (border_rect.max_inset.x / target_size.x) * min_coeff,
1. - (border_rect.max_inset.y / target_size.y) * min_coeff,
];
let image_side_width = image_size.x * (slices[2] - slices[0]);
let image_side_height = image_size.y * (slices[3] - slices[1]);
let target_side_width = target_size.x * (border[2] - border[0]);
let target_side_height = target_size.y * (border[3] - border[1]);
// compute the number of times to repeat the side and center slices when tiling along each axis
// if the returned value is `1.` the slice will be stretched to fill the axis.
let repeat_side_x =
compute_tiled_subaxis(image_side_width, target_side_width, sides_scale_mode);
let repeat_side_y =
compute_tiled_subaxis(image_side_height, target_side_height, sides_scale_mode);
let repeat_center_x =
compute_tiled_subaxis(image_side_width, target_side_width, center_scale_mode);
let repeat_center_y =
compute_tiled_subaxis(image_side_height, target_side_height, center_scale_mode);
[
slices,
border,
[
repeat_side_x,
repeat_side_y,
repeat_center_x,
repeat_center_y,
],
]
}
SpriteImageMode::Tiled {
tile_x,
tile_y,
stretch_value,
} => {
let rx = compute_tiled_axis(*tile_x, image_size.x, target_size.x, *stretch_value);
let ry = compute_tiled_axis(*tile_y, image_size.y, target_size.y, *stretch_value);
[[0., 0., 1., 1.], [0., 0., 1., 1.], [1., 1., rx, ry]]
}
SpriteImageMode::Auto => {
unreachable!("Slices can not be computed for SpriteImageMode::Stretch")
}
SpriteImageMode::Scale(_) => {
unreachable!("Slices can not be computed for SpriteImageMode::Scale")
}
}
}
fn compute_tiled_axis(tile: bool, image_extent: f32, target_extent: f32, stretch: f32) -> f32 {
if tile {
let s = image_extent * stretch;
target_extent / s
} else {
1.
}
}
fn compute_tiled_subaxis(image_extent: f32, target_extent: f32, mode: &SliceScaleMode) -> f32 {
match mode {
SliceScaleMode::Stretch => 1.,
SliceScaleMode::Tile { stretch_value } => {
let s = image_extent * *stretch_value;
target_extent / s
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_ui_render/src/debug_overlay.rs | crates/bevy_ui_render/src/debug_overlay.rs | use super::ExtractedUiItem;
use super::ExtractedUiNode;
use super::ExtractedUiNodes;
use super::NodeType;
use super::UiCameraMap;
use crate::shader_flags;
use bevy_asset::AssetId;
use bevy_camera::visibility::InheritedVisibility;
use bevy_color::Hsla;
use bevy_color::LinearRgba;
use bevy_ecs::entity::Entity;
use bevy_ecs::prelude::Component;
use bevy_ecs::prelude::ReflectComponent;
use bevy_ecs::prelude::ReflectResource;
use bevy_ecs::resource::Resource;
use bevy_ecs::system::Commands;
use bevy_ecs::system::Query;
use bevy_ecs::system::Res;
use bevy_ecs::system::ResMut;
use bevy_math::Affine2;
use bevy_math::Rect;
use bevy_math::Vec2;
use bevy_reflect::Reflect;
use bevy_render::sync_world::TemporaryRenderEntity;
use bevy_render::Extract;
use bevy_sprite::BorderRect;
use bevy_ui::ui_transform::UiGlobalTransform;
use bevy_ui::CalculatedClip;
use bevy_ui::ComputedNode;
use bevy_ui::ComputedUiTargetCamera;
use bevy_ui::ResolvedBorderRadius;
use bevy_ui::UiStack;
/// Configuration for the UI debug overlay
///
/// Can be added as both a global `Resource` and locally as a `Component` to individual UI node entities.
/// The local component options override the global resource.
#[derive(Component, Resource, Reflect)]
#[reflect(Component, Resource)]
pub struct UiDebugOptions {
/// Set to true to enable the UI debug overlay
pub enabled: bool,
/// Show outlines for the border boxes of UI nodes
pub outline_border_box: bool,
/// Show outlines for the padding boxes of UI nodes
pub outline_padding_box: bool,
/// Show outlines for the content boxes of UI nodes
pub outline_content_box: bool,
/// Show outlines for the scrollbar regions of UI nodes
pub outline_scrollbars: bool,
/// Width of the overlay's lines in logical pixels
pub line_width: f32,
/// Override Color for the overlay's lines
pub line_color_override: Option<LinearRgba>,
/// Show outlines for non-visible UI nodes
pub show_hidden: bool,
/// Show outlines for clipped sections of UI nodes
pub show_clipped: bool,
/// Draw outlines with sharp corners even if the UI nodes have border radii
pub ignore_border_radius: bool,
}
impl UiDebugOptions {
pub fn toggle(&mut self) {
self.enabled = !self.enabled;
}
}
impl Default for UiDebugOptions {
fn default() -> Self {
Self {
enabled: false,
line_width: 1.,
line_color_override: None,
show_hidden: false,
show_clipped: false,
ignore_border_radius: false,
outline_border_box: true,
outline_padding_box: false,
outline_content_box: false,
outline_scrollbars: false,
}
}
}
pub fn extract_debug_overlay(
mut commands: Commands,
debug_options: Extract<Res<UiDebugOptions>>,
mut extracted_uinodes: ResMut<ExtractedUiNodes>,
uinode_query: Extract<
Query<(
Entity,
&ComputedNode,
&UiGlobalTransform,
&InheritedVisibility,
Option<&CalculatedClip>,
&ComputedUiTargetCamera,
Option<&UiDebugOptions>,
)>,
>,
ui_stack: Extract<Res<UiStack>>,
camera_map: Extract<UiCameraMap>,
) {
let mut camera_mapper = camera_map.get_mapper();
for (entity, uinode, transform, visibility, maybe_clip, computed_target, debug) in &uinode_query
{
let debug_options = debug.unwrap_or(&debug_options);
if !debug_options.enabled {
continue;
}
if !debug_options.show_hidden && !visibility.get() {
continue;
}
let Some(extracted_camera_entity) = camera_mapper.map(computed_target) else {
continue;
};
let color = debug_options
.line_color_override
.unwrap_or_else(|| Hsla::sequential_dispersed(entity.index_u32()).into());
let z_order = (ui_stack.uinodes.len() as u32 + uinode.stack_index()) as f32;
let border = BorderRect::all(debug_options.line_width / uinode.inverse_scale_factor());
let transform = transform.affine();
let mut push_outline = |rect: Rect, radius: ResolvedBorderRadius| {
if rect.is_empty() {
return;
}
extracted_uinodes.uinodes.push(ExtractedUiNode {
render_entity: commands.spawn(TemporaryRenderEntity).id(),
// Keep all overlays above UI, and nudge each type slightly in Z so ordering is stable.
z_order,
clip: maybe_clip
.filter(|_| !debug_options.show_clipped)
.map(|clip| clip.clip),
image: AssetId::default(),
extracted_camera_entity,
transform: transform * Affine2::from_translation(rect.center()),
item: ExtractedUiItem::Node {
color,
rect: Rect {
min: Vec2::ZERO,
max: rect.size(),
},
atlas_scaling: None,
flip_x: false,
flip_y: false,
border,
border_radius: radius,
node_type: NodeType::Border(shader_flags::BORDER_ALL),
},
main_entity: entity.into(),
});
};
let border_box = Rect::from_center_size(Vec2::ZERO, uinode.size);
if debug_options.outline_border_box {
push_outline(border_box, uinode.border_radius());
}
if debug_options.outline_padding_box {
let mut padding_box = border_box;
padding_box.min += uinode.border.min_inset;
padding_box.max -= uinode.border.max_inset;
push_outline(padding_box, uinode.inner_radius());
}
if debug_options.outline_content_box {
let mut content_box = border_box;
let content_inset = uinode.content_inset();
content_box.min += content_inset.min_inset;
content_box.max -= content_inset.max_inset;
push_outline(content_box, ResolvedBorderRadius::ZERO);
}
if debug_options.outline_scrollbars {
if 0. <= uinode.scrollbar_size.y {
let content_inset = uinode.content_inset();
let half_size = 0.5 * uinode.size;
let min_x = -half_size.x + content_inset.min_inset.x;
let max_x = half_size.x - content_inset.max_inset.x - uinode.scrollbar_size.x;
let max_y = half_size.y - content_inset.max_inset.y;
let min_y = max_y - uinode.scrollbar_size.y;
let gutter = Rect {
min: Vec2::new(min_x, min_y),
max: Vec2::new(max_x, max_y),
};
let gutter_length = gutter.size().x;
let thumb_min =
gutter.min.x + gutter_length * uinode.scroll_position.x / uinode.content_size.x;
let thumb_max = thumb_min + gutter_length * gutter_length / uinode.content_size.x;
let thumb = Rect {
min: Vec2::new(thumb_min, gutter.min.y),
max: Vec2::new(thumb_max, gutter.max.y),
};
push_outline(gutter, ResolvedBorderRadius::ZERO);
push_outline(thumb, ResolvedBorderRadius::ZERO);
}
if 0. <= uinode.scrollbar_size.x {
let content_inset = uinode.content_inset();
let half_size = 0.5 * uinode.size;
let max_x = half_size.x - content_inset.max_inset.x;
let min_x = max_x - uinode.scrollbar_size.x;
let min_y = -half_size.y + content_inset.min_inset.y;
let max_y = half_size.y - content_inset.max_inset.y - uinode.scrollbar_size.y;
let gutter = Rect {
min: Vec2::new(min_x, min_y),
max: Vec2::new(max_x, max_y),
};
let gutter_length = gutter.size().y;
let thumb_min =
gutter.min.y + gutter_length * uinode.scroll_position.y / uinode.content_size.y;
let thumb_max = thumb_min + gutter_length * gutter_length / uinode.content_size.y;
let thumb = Rect {
min: Vec2::new(gutter.min.x, thumb_min),
max: Vec2::new(gutter.max.x, thumb_max),
};
push_outline(gutter, ResolvedBorderRadius::ZERO);
push_outline(thumb, ResolvedBorderRadius::ZERO);
}
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_ui_render/src/gradient.rs | crates/bevy_ui_render/src/gradient.rs | use core::{
f32::consts::{FRAC_PI_2, TAU},
hash::Hash,
ops::Range,
};
use super::shader_flags::BORDER_ALL;
use crate::*;
use bevy_asset::*;
use bevy_color::{ColorToComponents, Hsla, Hsva, LinearRgba, Oklaba, Oklcha, Srgba};
use bevy_ecs::{
prelude::Component,
system::{
lifetimeless::{Read, SRes},
*,
},
};
use bevy_image::prelude::*;
use bevy_math::{
ops::{cos, sin},
FloatOrd, Rect, Vec2,
};
use bevy_math::{Affine2, Vec2Swizzles};
use bevy_mesh::VertexBufferLayout;
use bevy_render::{
render_phase::*,
render_resource::{binding_types::uniform_buffer, *},
renderer::{RenderDevice, RenderQueue},
sync_world::TemporaryRenderEntity,
view::*,
Extract, ExtractSchedule, Render, RenderSystems,
};
use bevy_render::{sync_world::MainEntity, RenderStartup};
use bevy_shader::Shader;
use bevy_sprite::BorderRect;
use bevy_ui::{
BackgroundGradient, BorderGradient, ColorStop, ComputedUiRenderTargetInfo, ConicGradient,
Gradient, InterpolationColorSpace, LinearGradient, RadialGradient, ResolvedBorderRadius, Val,
};
use bevy_utils::default;
use bytemuck::{Pod, Zeroable};
pub struct GradientPlugin;
impl Plugin for GradientPlugin {
fn build(&self, app: &mut App) {
embedded_asset!(app, "gradient.wgsl");
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app
.add_render_command::<TransparentUi, DrawGradientFns>()
.init_resource::<ExtractedGradients>()
.init_resource::<ExtractedColorStops>()
.init_resource::<GradientMeta>()
.init_resource::<SpecializedRenderPipelines<GradientPipeline>>()
.add_systems(RenderStartup, init_gradient_pipeline)
.add_systems(
ExtractSchedule,
extract_gradients
.in_set(RenderUiSystems::ExtractGradient)
.after(extract_uinode_background_colors),
)
.add_systems(
Render,
(
queue_gradient.in_set(RenderSystems::Queue),
prepare_gradient.in_set(RenderSystems::PrepareBindGroups),
),
);
}
}
}
#[derive(Component)]
pub struct GradientBatch {
pub range: Range<u32>,
}
#[derive(Resource)]
pub struct GradientMeta {
vertices: RawBufferVec<UiGradientVertex>,
indices: RawBufferVec<u32>,
view_bind_group: Option<BindGroup>,
}
impl Default for GradientMeta {
fn default() -> Self {
Self {
vertices: RawBufferVec::new(BufferUsages::VERTEX),
indices: RawBufferVec::new(BufferUsages::INDEX),
view_bind_group: None,
}
}
}
#[derive(Resource)]
pub struct GradientPipeline {
pub view_layout: BindGroupLayoutDescriptor,
pub shader: Handle<Shader>,
}
pub fn init_gradient_pipeline(mut commands: Commands, asset_server: Res<AssetServer>) {
let view_layout = BindGroupLayoutDescriptor::new(
"ui_gradient_view_layout",
&BindGroupLayoutEntries::single(
ShaderStages::VERTEX_FRAGMENT,
uniform_buffer::<ViewUniform>(true),
),
);
commands.insert_resource(GradientPipeline {
view_layout,
shader: load_embedded_asset!(asset_server.as_ref(), "gradient.wgsl"),
});
}
pub fn compute_gradient_line_length(angle: f32, size: Vec2) -> f32 {
let center = 0.5 * size;
let v = Vec2::new(sin(angle), -cos(angle));
let (pos_corner, neg_corner) = if v.x >= 0.0 && v.y <= 0.0 {
(size.with_y(0.), size.with_x(0.))
} else if v.x >= 0.0 && v.y > 0.0 {
(size, Vec2::ZERO)
} else if v.x < 0.0 && v.y <= 0.0 {
(Vec2::ZERO, size)
} else {
(size.with_x(0.), size.with_y(0.))
};
let t_pos = (pos_corner - center).dot(v);
let t_neg = (neg_corner - center).dot(v);
(t_pos - t_neg).abs()
}
#[derive(Clone, Copy, Hash, PartialEq, Eq)]
pub struct UiGradientPipelineKey {
anti_alias: bool,
color_space: InterpolationColorSpace,
pub hdr: bool,
}
impl SpecializedRenderPipeline for GradientPipeline {
type Key = UiGradientPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let vertex_layout = VertexBufferLayout::from_vertex_formats(
VertexStepMode::Vertex,
vec![
// position
VertexFormat::Float32x3,
// uv
VertexFormat::Float32x2,
// flags
VertexFormat::Uint32,
// radius
VertexFormat::Float32x4,
// border
VertexFormat::Float32x4,
// size
VertexFormat::Float32x2,
// point
VertexFormat::Float32x2,
// start_point
VertexFormat::Float32x2,
// dir
VertexFormat::Float32x2,
// start_color
VertexFormat::Float32x4,
// start_len
VertexFormat::Float32,
// end_len
VertexFormat::Float32,
// end color
VertexFormat::Float32x4,
// hint
VertexFormat::Float32,
],
);
let color_space = match key.color_space {
InterpolationColorSpace::Oklaba => "IN_OKLAB",
InterpolationColorSpace::Oklcha => "IN_OKLCH",
InterpolationColorSpace::OklchaLong => "IN_OKLCH_LONG",
InterpolationColorSpace::Srgba => "IN_SRGB",
InterpolationColorSpace::LinearRgba => "IN_LINEAR_RGB",
InterpolationColorSpace::Hsla => "IN_HSL",
InterpolationColorSpace::HslaLong => "IN_HSL_LONG",
InterpolationColorSpace::Hsva => "IN_HSV",
InterpolationColorSpace::HsvaLong => "IN_HSV_LONG",
};
let shader_defs = if key.anti_alias {
vec![color_space.into(), "ANTI_ALIAS".into()]
} else {
vec![color_space.into()]
};
RenderPipelineDescriptor {
vertex: VertexState {
shader: self.shader.clone(),
shader_defs: shader_defs.clone(),
buffers: vec![vertex_layout],
..default()
},
fragment: Some(FragmentState {
shader: self.shader.clone(),
shader_defs,
targets: vec![Some(ColorTargetState {
format: if key.hdr {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
},
blend: Some(BlendState::ALPHA_BLENDING),
write_mask: ColorWrites::ALL,
})],
..default()
}),
layout: vec![self.view_layout.clone()],
label: Some("ui_gradient_pipeline".into()),
..default()
}
}
}
pub enum ResolvedGradient {
Linear { angle: f32 },
Conic { center: Vec2, start: f32 },
Radial { center: Vec2, size: Vec2 },
}
pub struct ExtractedGradient {
pub stack_index: u32,
pub transform: Affine2,
pub rect: Rect,
pub clip: Option<Rect>,
pub extracted_camera_entity: Entity,
/// range into `ExtractedColorStops`
pub stops_range: Range<usize>,
pub node_type: NodeType,
pub main_entity: MainEntity,
pub render_entity: Entity,
/// Border radius of the UI node.
/// Ordering: top left, top right, bottom right, bottom left.
pub border_radius: ResolvedBorderRadius,
/// Border thickness of the UI node.
/// Ordering: left, top, right, bottom.
pub border: BorderRect,
pub resolved_gradient: ResolvedGradient,
pub color_space: InterpolationColorSpace,
}
#[derive(Resource, Default)]
pub struct ExtractedGradients {
pub items: Vec<ExtractedGradient>,
}
#[derive(Resource, Default)]
pub struct ExtractedColorStops(pub Vec<(LinearRgba, f32, f32)>);
// Interpolate implicit stops (where position is `f32::NAN`)
// If the first and last stops are implicit set them to the `min` and `max` values
// so that we always have explicit start and end points to interpolate between.
fn interpolate_color_stops(stops: &mut [(LinearRgba, f32, f32)], min: f32, max: f32) {
if stops[0].1.is_nan() {
stops[0].1 = min;
}
if stops.last().unwrap().1.is_nan() {
stops.last_mut().unwrap().1 = max;
}
let mut i = 1;
while i < stops.len() - 1 {
let point = stops[i].1;
if point.is_nan() {
let start = i;
let mut end = i + 1;
while end < stops.len() - 1 && stops[end].1.is_nan() {
end += 1;
}
let start_point = stops[start - 1].1;
let end_point = stops[end].1;
let steps = end - start;
let step = (end_point - start_point) / (steps + 1) as f32;
for j in 0..steps {
stops[i + j].1 = start_point + step * (j + 1) as f32;
}
i = end;
}
i += 1;
}
}
fn compute_color_stops(
stops: &[ColorStop],
scale_factor: f32,
length: f32,
target_size: Vec2,
scratch: &mut Vec<(LinearRgba, f32, f32)>,
extracted_color_stops: &mut Vec<(LinearRgba, f32, f32)>,
) {
// resolve the physical distances of explicit stops and sort them
scratch.extend(stops.iter().filter_map(|stop| {
stop.point
.resolve(scale_factor, length, target_size)
.ok()
.map(|physical_point| (stop.color.to_linear(), physical_point, stop.hint))
}));
scratch.sort_by_key(|(_, point, _)| FloatOrd(*point));
let min = scratch
.first()
.map(|(_, min, _)| *min)
.unwrap_or(0.)
.min(0.);
// get the position of the last explicit stop and use the full length of the gradient if no explicit stops
let max = scratch
.last()
.map(|(_, max, _)| *max)
.unwrap_or(length)
.max(length);
let mut sorted_stops_drain = scratch.drain(..);
let range_start = extracted_color_stops.len();
// Fill the extracted color stops buffer
extracted_color_stops.extend(stops.iter().map(|stop| {
if stop.point == Val::Auto {
(stop.color.to_linear(), f32::NAN, stop.hint)
} else {
sorted_stops_drain.next().unwrap()
}
}));
interpolate_color_stops(&mut extracted_color_stops[range_start..], min, max);
}
pub fn extract_gradients(
mut commands: Commands,
mut extracted_gradients: ResMut<ExtractedGradients>,
mut extracted_color_stops: ResMut<ExtractedColorStops>,
mut extracted_uinodes: ResMut<ExtractedUiNodes>,
gradients_query: Extract<
Query<(
Entity,
&ComputedNode,
&ComputedUiTargetCamera,
&ComputedUiRenderTargetInfo,
&UiGlobalTransform,
&InheritedVisibility,
Option<&CalculatedClip>,
AnyOf<(&BackgroundGradient, &BorderGradient)>,
)>,
>,
camera_map: Extract<UiCameraMap>,
) {
let mut camera_mapper = camera_map.get_mapper();
let mut sorted_stops = vec![];
for (
entity,
uinode,
camera,
target,
transform,
inherited_visibility,
clip,
(gradient, gradient_border),
) in &gradients_query
{
// Skip invisible images
if !inherited_visibility.get() {
continue;
}
let Some(extracted_camera_entity) = camera_mapper.map(camera) else {
continue;
};
for (gradients, node_type) in [
(gradient.map(|g| &g.0), NodeType::Rect),
(gradient_border.map(|g| &g.0), NodeType::Border(BORDER_ALL)),
]
.iter()
.filter_map(|(g, n)| g.map(|g| (g, *n)))
{
for gradient in gradients.iter() {
if gradient.is_empty() {
continue;
}
if let Some(color) = gradient.get_single() {
// With a single color stop there's no gradient, fill the node with the color
extracted_uinodes.uinodes.push(ExtractedUiNode {
z_order: uinode.stack_index as f32
+ match node_type {
NodeType::Rect => stack_z_offsets::GRADIENT,
NodeType::Border(_) => stack_z_offsets::BORDER_GRADIENT,
},
image: AssetId::default(),
clip: clip.map(|clip| clip.clip),
extracted_camera_entity,
transform: transform.into(),
item: ExtractedUiItem::Node {
color: color.into(),
rect: Rect {
min: Vec2::ZERO,
max: uinode.size,
},
atlas_scaling: None,
flip_x: false,
flip_y: false,
border_radius: uinode.border_radius,
border: uinode.border,
node_type,
},
main_entity: entity.into(),
render_entity: commands.spawn(TemporaryRenderEntity).id(),
});
continue;
}
match gradient {
Gradient::Linear(LinearGradient {
color_space,
angle,
stops,
}) => {
let length = compute_gradient_line_length(*angle, uinode.size);
let range_start = extracted_color_stops.0.len();
compute_color_stops(
stops,
target.scale_factor(),
length,
target.physical_size().as_vec2(),
&mut sorted_stops,
&mut extracted_color_stops.0,
);
extracted_gradients.items.push(ExtractedGradient {
render_entity: commands.spawn(TemporaryRenderEntity).id(),
stack_index: uinode.stack_index,
transform: transform.into(),
stops_range: range_start..extracted_color_stops.0.len(),
rect: Rect {
min: Vec2::ZERO,
max: uinode.size,
},
clip: clip.map(|clip| clip.clip),
extracted_camera_entity,
main_entity: entity.into(),
node_type,
border_radius: uinode.border_radius,
border: uinode.border,
resolved_gradient: ResolvedGradient::Linear { angle: *angle },
color_space: *color_space,
});
}
Gradient::Radial(RadialGradient {
color_space,
position: center,
shape,
stops,
}) => {
let c = center.resolve(
target.scale_factor(),
uinode.size,
target.physical_size().as_vec2(),
);
let size = shape.resolve(
c,
target.scale_factor(),
uinode.size,
target.physical_size().as_vec2(),
);
let length = size.x;
let range_start = extracted_color_stops.0.len();
compute_color_stops(
stops,
target.scale_factor(),
length,
target.physical_size().as_vec2(),
&mut sorted_stops,
&mut extracted_color_stops.0,
);
extracted_gradients.items.push(ExtractedGradient {
render_entity: commands.spawn(TemporaryRenderEntity).id(),
stack_index: uinode.stack_index,
transform: transform.into(),
stops_range: range_start..extracted_color_stops.0.len(),
rect: Rect {
min: Vec2::ZERO,
max: uinode.size,
},
clip: clip.map(|clip| clip.clip),
extracted_camera_entity,
main_entity: entity.into(),
node_type,
border_radius: uinode.border_radius,
border: uinode.border,
resolved_gradient: ResolvedGradient::Radial { center: c, size },
color_space: *color_space,
});
}
Gradient::Conic(ConicGradient {
color_space,
start,
position: center,
stops,
}) => {
let g_start = center.resolve(
target.scale_factor(),
uinode.size,
target.physical_size().as_vec2(),
);
let range_start = extracted_color_stops.0.len();
// sort the explicit stops
sorted_stops.extend(stops.iter().filter_map(|stop| {
stop.angle.map(|angle| {
(stop.color.to_linear(), angle.clamp(0., TAU), stop.hint)
})
}));
sorted_stops.sort_by_key(|(_, angle, _)| FloatOrd(*angle));
let mut sorted_stops_drain = sorted_stops.drain(..);
// fill the extracted stops buffer
extracted_color_stops.0.extend(stops.iter().map(|stop| {
if stop.angle.is_none() {
(stop.color.to_linear(), f32::NAN, stop.hint)
} else {
sorted_stops_drain.next().unwrap()
}
}));
interpolate_color_stops(
&mut extracted_color_stops.0[range_start..],
0.,
TAU,
);
extracted_gradients.items.push(ExtractedGradient {
render_entity: commands.spawn(TemporaryRenderEntity).id(),
stack_index: uinode.stack_index,
transform: transform.into(),
stops_range: range_start..extracted_color_stops.0.len(),
rect: Rect {
min: Vec2::ZERO,
max: uinode.size,
},
clip: clip.map(|clip| clip.clip),
extracted_camera_entity,
main_entity: entity.into(),
node_type,
border_radius: uinode.border_radius,
border: uinode.border,
resolved_gradient: ResolvedGradient::Conic {
start: *start,
center: g_start,
},
color_space: *color_space,
});
}
}
}
}
}
}
#[expect(
clippy::too_many_arguments,
reason = "it's a system that needs a lot of them"
)]
pub fn queue_gradient(
extracted_gradients: ResMut<ExtractedGradients>,
gradients_pipeline: Res<GradientPipeline>,
mut pipelines: ResMut<SpecializedRenderPipelines<GradientPipeline>>,
mut transparent_render_phases: ResMut<ViewSortedRenderPhases<TransparentUi>>,
mut render_views: Query<(&UiCameraView, Option<&UiAntiAlias>), With<ExtractedView>>,
camera_views: Query<&ExtractedView>,
pipeline_cache: Res<PipelineCache>,
draw_functions: Res<DrawFunctions<TransparentUi>>,
) {
let draw_function = draw_functions.read().id::<DrawGradientFns>();
for (index, gradient) in extracted_gradients.items.iter().enumerate() {
let Ok((default_camera_view, ui_anti_alias)) =
render_views.get_mut(gradient.extracted_camera_entity)
else {
continue;
};
let Ok(view) = camera_views.get(default_camera_view.0) else {
continue;
};
let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity)
else {
continue;
};
let pipeline = pipelines.specialize(
&pipeline_cache,
&gradients_pipeline,
UiGradientPipelineKey {
anti_alias: matches!(ui_anti_alias, None | Some(UiAntiAlias::On)),
color_space: gradient.color_space,
hdr: view.hdr,
},
);
transparent_phase.add(TransparentUi {
draw_function,
pipeline,
entity: (gradient.render_entity, gradient.main_entity),
sort_key: FloatOrd(
gradient.stack_index as f32
+ match gradient.node_type {
NodeType::Rect => stack_z_offsets::GRADIENT,
NodeType::Border(_) => stack_z_offsets::BORDER_GRADIENT,
},
),
batch_range: 0..0,
extra_index: PhaseItemExtraIndex::None,
index,
indexed: true,
});
}
}
#[repr(C)]
#[derive(Copy, Clone, Pod, Zeroable)]
struct UiGradientVertex {
position: [f32; 3],
uv: [f32; 2],
flags: u32,
radius: [f32; 4],
border: [f32; 4],
size: [f32; 2],
point: [f32; 2],
g_start: [f32; 2],
g_dir: [f32; 2],
start_color: [f32; 4],
start_len: f32,
end_len: f32,
end_color: [f32; 4],
hint: f32,
}
fn convert_color_to_space(color: LinearRgba, space: InterpolationColorSpace) -> [f32; 4] {
match space {
InterpolationColorSpace::Oklaba => {
let oklaba: Oklaba = color.into();
[oklaba.lightness, oklaba.a, oklaba.b, oklaba.alpha]
}
InterpolationColorSpace::Oklcha | InterpolationColorSpace::OklchaLong => {
let oklcha: Oklcha = color.into();
[
oklcha.lightness,
oklcha.chroma,
// The shader expects normalized hues
oklcha.hue / 360.,
oklcha.alpha,
]
}
InterpolationColorSpace::Srgba => {
let srgba: Srgba = color.into();
[srgba.red, srgba.green, srgba.blue, srgba.alpha]
}
InterpolationColorSpace::LinearRgba => color.to_f32_array(),
InterpolationColorSpace::Hsla | InterpolationColorSpace::HslaLong => {
let hsla: Hsla = color.into();
// The shader expects normalized hues
[hsla.hue / 360., hsla.saturation, hsla.lightness, hsla.alpha]
}
InterpolationColorSpace::Hsva | InterpolationColorSpace::HsvaLong => {
let hsva: Hsva = color.into();
// The shader expects normalized hues
[hsva.hue / 360., hsva.saturation, hsva.value, hsva.alpha]
}
}
}
pub fn prepare_gradient(
mut commands: Commands,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
pipeline_cache: Res<PipelineCache>,
mut ui_meta: ResMut<GradientMeta>,
mut extracted_gradients: ResMut<ExtractedGradients>,
mut extracted_color_stops: ResMut<ExtractedColorStops>,
view_uniforms: Res<ViewUniforms>,
gradients_pipeline: Res<GradientPipeline>,
mut phases: ResMut<ViewSortedRenderPhases<TransparentUi>>,
mut previous_len: Local<usize>,
) {
if let Some(view_binding) = view_uniforms.uniforms.binding() {
let mut batches: Vec<(Entity, GradientBatch)> = Vec::with_capacity(*previous_len);
ui_meta.vertices.clear();
ui_meta.indices.clear();
ui_meta.view_bind_group = Some(render_device.create_bind_group(
"gradient_view_bind_group",
&pipeline_cache.get_bind_group_layout(&gradients_pipeline.view_layout),
&BindGroupEntries::single(view_binding),
));
// Buffer indexes
let mut vertices_index = 0;
let mut indices_index = 0;
for ui_phase in phases.values_mut() {
for item_index in 0..ui_phase.items.len() {
let item = &mut ui_phase.items[item_index];
if let Some(gradient) = extracted_gradients
.items
.get(item.index)
.filter(|n| item.entity() == n.render_entity)
{
*item.batch_range_mut() = item_index as u32..item_index as u32 + 1;
let uinode_rect = gradient.rect;
let rect_size = uinode_rect.size();
// Specify the corners of the node
let positions = QUAD_VERTEX_POSITIONS.map(|pos| {
gradient
.transform
.transform_point2(pos * rect_size)
.extend(0.)
});
let corner_points = QUAD_VERTEX_POSITIONS.map(|pos| pos * rect_size);
// Calculate the effect of clipping
// Note: this won't work with rotation/scaling, but that's much more complex (may need more that 2 quads)
let positions_diff = if let Some(clip) = gradient.clip {
[
Vec2::new(
f32::max(clip.min.x - positions[0].x, 0.),
f32::max(clip.min.y - positions[0].y, 0.),
),
Vec2::new(
f32::min(clip.max.x - positions[1].x, 0.),
f32::max(clip.min.y - positions[1].y, 0.),
),
Vec2::new(
f32::min(clip.max.x - positions[2].x, 0.),
f32::min(clip.max.y - positions[2].y, 0.),
),
Vec2::new(
f32::max(clip.min.x - positions[3].x, 0.),
f32::min(clip.max.y - positions[3].y, 0.),
),
]
} else {
[Vec2::ZERO; 4]
};
let positions_clipped = [
positions[0] + positions_diff[0].extend(0.),
positions[1] + positions_diff[1].extend(0.),
positions[2] + positions_diff[2].extend(0.),
positions[3] + positions_diff[3].extend(0.),
];
let points = [
corner_points[0] + positions_diff[0],
corner_points[1] + positions_diff[1],
corner_points[2] + positions_diff[2],
corner_points[3] + positions_diff[3],
];
let transformed_rect_size = gradient.transform.transform_vector2(rect_size);
// Don't try to cull nodes that have a rotation
// In a rotation around the Z-axis, this value is 0.0 for an angle of 0.0 or π
// In those two cases, the culling check can proceed normally as corners will be on
// horizontal / vertical lines
// For all other angles, bypass the culling check
// This does not properly handles all rotations on all axis
if gradient.transform.x_axis[1] == 0.0 {
// Cull nodes that are completely clipped
if positions_diff[0].x - positions_diff[1].x >= transformed_rect_size.x
|| positions_diff[1].y - positions_diff[2].y >= transformed_rect_size.y
{
continue;
}
}
let uvs = { [Vec2::ZERO, Vec2::X, Vec2::ONE, Vec2::Y] };
let mut flags = if let NodeType::Border(borders) = gradient.node_type {
borders
} else {
0
};
let (g_start, g_dir, g_flags) = match gradient.resolved_gradient {
ResolvedGradient::Linear { angle } => {
let corner_index = (angle - FRAC_PI_2).rem_euclid(TAU) / FRAC_PI_2;
(
corner_points[corner_index as usize].into(),
// CSS angles increase in a clockwise direction
[sin(angle), -cos(angle)],
0,
)
}
ResolvedGradient::Conic { center, start } => {
(center.into(), [start, 0.], shader_flags::CONIC)
}
ResolvedGradient::Radial { center, size } => (
center.into(),
Vec2::splat(if size.y != 0. { size.x / size.y } else { 1. }).into(),
shader_flags::RADIAL,
),
};
flags |= g_flags;
let range = gradient.stops_range.start..gradient.stops_range.end - 1;
let mut segment_count = 0;
for stop_index in range {
let mut start_stop = extracted_color_stops.0[stop_index];
let end_stop = extracted_color_stops.0[stop_index + 1];
if start_stop.1 == end_stop.1 {
if stop_index == gradient.stops_range.end - 2 {
if 0 < segment_count {
start_stop.0 = LinearRgba::NONE;
}
} else {
continue;
}
}
let start_color =
convert_color_to_space(start_stop.0, gradient.color_space);
let end_color = convert_color_to_space(end_stop.0, gradient.color_space);
let mut stop_flags = flags;
if 0. < start_stop.1
&& (stop_index == gradient.stops_range.start || segment_count == 0)
{
stop_flags |= shader_flags::FILL_START;
}
if stop_index == gradient.stops_range.end - 2 {
stop_flags |= shader_flags::FILL_END;
}
for i in 0..4 {
ui_meta.vertices.push(UiGradientVertex {
position: positions_clipped[i].into(),
uv: uvs[i].into(),
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_sprite_render/src/lib.rs | crates/bevy_sprite_render/src/lib.rs | #![expect(missing_docs, reason = "Not all docs are written yet, see #3492.")]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![forbid(unsafe_code)]
#![doc(
html_logo_url = "https://bevy.org/assets/icon.png",
html_favicon_url = "https://bevy.org/assets/icon.png"
)]
//! Provides 2D sprite rendering functionality.
extern crate alloc;
mod mesh2d;
mod render;
#[cfg(feature = "bevy_text")]
mod text2d;
mod texture_slice;
mod tilemap_chunk;
/// The sprite prelude.
///
/// This includes the most common types in this crate, re-exported for your convenience.
pub mod prelude {
#[doc(hidden)]
pub use crate::{ColorMaterial, MeshMaterial2d};
}
use bevy_shader::load_shader_library;
pub use mesh2d::*;
pub use render::*;
pub(crate) use texture_slice::*;
pub use tilemap_chunk::*;
use bevy_app::prelude::*;
use bevy_asset::{embedded_asset, AssetEventSystems};
use bevy_core_pipeline::core_2d::{AlphaMask2d, Opaque2d, Transparent2d};
use bevy_ecs::prelude::*;
use bevy_image::{prelude::*, TextureAtlasPlugin};
use bevy_mesh::Mesh2d;
use bevy_render::{
batching::sort_binned_render_phase, render_phase::AddRenderCommand,
render_resource::SpecializedRenderPipelines, sync_world::SyncToRenderWorld, ExtractSchedule,
Render, RenderApp, RenderStartup, RenderSystems,
};
use bevy_sprite::Sprite;
#[cfg(feature = "bevy_text")]
pub use crate::text2d::extract_text2d_sprite;
/// Adds support for 2D sprite rendering.
#[derive(Default)]
pub struct SpriteRenderPlugin;
/// System set for sprite rendering.
#[derive(Debug, Hash, PartialEq, Eq, Clone, SystemSet)]
pub enum SpriteSystems {
ExtractSprites,
ComputeSlices,
}
impl Plugin for SpriteRenderPlugin {
fn build(&self, app: &mut App) {
load_shader_library!(app, "render/sprite_view_bindings.wgsl");
embedded_asset!(app, "render/sprite.wgsl");
if !app.is_plugin_added::<TextureAtlasPlugin>() {
app.add_plugins(TextureAtlasPlugin);
}
app.add_plugins((
Mesh2dRenderPlugin,
ColorMaterialPlugin,
TilemapChunkPlugin,
TilemapChunkMaterialPlugin,
))
.add_systems(
PostUpdate,
(
compute_slices_on_asset_event.before(AssetEventSystems),
compute_slices_on_sprite_change,
)
.in_set(SpriteSystems::ComputeSlices),
);
app.register_required_components::<Sprite, SyncToRenderWorld>();
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app
.init_resource::<ImageBindGroups>()
.init_resource::<SpecializedRenderPipelines<SpritePipeline>>()
.init_resource::<SpriteMeta>()
.init_resource::<ExtractedSprites>()
.init_resource::<ExtractedSlices>()
.init_resource::<SpriteAssetEvents>()
.init_resource::<SpriteBatches>()
.add_render_command::<Transparent2d, DrawSprite>()
.add_systems(RenderStartup, init_sprite_pipeline)
.add_systems(
ExtractSchedule,
(
extract_sprites.in_set(SpriteSystems::ExtractSprites),
extract_sprite_events,
#[cfg(feature = "bevy_text")]
extract_text2d_sprite.after(SpriteSystems::ExtractSprites),
),
)
.add_systems(
Render,
(
queue_sprites
.in_set(RenderSystems::Queue)
.ambiguous_with(queue_material2d_meshes::<ColorMaterial>),
prepare_sprite_image_bind_groups.in_set(RenderSystems::PrepareBindGroups),
prepare_sprite_view_bind_groups.in_set(RenderSystems::PrepareBindGroups),
sort_binned_render_phase::<Opaque2d>.in_set(RenderSystems::PhaseSort),
sort_binned_render_phase::<AlphaMask2d>.in_set(RenderSystems::PhaseSort),
),
);
};
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_sprite_render/src/text2d/mod.rs | crates/bevy_sprite_render/src/text2d/mod.rs | use crate::{
ExtractedSlice, ExtractedSlices, ExtractedSprite, ExtractedSpriteKind, ExtractedSprites,
};
use bevy_asset::{AssetId, Assets};
use bevy_camera::visibility::ViewVisibility;
use bevy_color::LinearRgba;
use bevy_ecs::{
entity::Entity,
query::Has,
system::{Commands, Query, Res, ResMut},
};
use bevy_image::prelude::*;
use bevy_math::Vec2;
use bevy_render::sync_world::TemporaryRenderEntity;
use bevy_render::Extract;
use bevy_sprite::{Anchor, Text2dShadow};
use bevy_text::{
ComputedTextBlock, PositionedGlyph, Strikethrough, StrikethroughColor, TextBackgroundColor,
TextBounds, TextColor, TextLayoutInfo, Underline, UnderlineColor,
};
use bevy_transform::prelude::GlobalTransform;
/// This system extracts the sprites from the 2D text components and adds them to the
/// "render world".
pub fn extract_text2d_sprite(
mut commands: Commands,
mut extracted_sprites: ResMut<ExtractedSprites>,
mut extracted_slices: ResMut<ExtractedSlices>,
texture_atlases: Extract<Res<Assets<TextureAtlasLayout>>>,
text2d_query: Extract<
Query<(
Entity,
&ViewVisibility,
&ComputedTextBlock,
&TextLayoutInfo,
&TextBounds,
&Anchor,
Option<&Text2dShadow>,
&GlobalTransform,
)>,
>,
text_colors: Extract<Query<&TextColor>>,
text_background_colors_query: Extract<Query<&TextBackgroundColor>>,
decoration_query: Extract<
Query<(
&TextColor,
Has<Strikethrough>,
Has<Underline>,
Option<&StrikethroughColor>,
Option<&UnderlineColor>,
)>,
>,
) {
let mut start = extracted_slices.slices.len();
let mut end = start + 1;
for (
main_entity,
view_visibility,
computed_block,
text_layout_info,
text_bounds,
anchor,
maybe_shadow,
global_transform,
) in text2d_query.iter()
{
let scaling = GlobalTransform::from_scale(
Vec2::splat(text_layout_info.scale_factor.recip()).extend(1.),
);
if !view_visibility.get() {
continue;
}
let size = Vec2::new(
text_bounds.width.unwrap_or(text_layout_info.size.x),
text_bounds.height.unwrap_or(text_layout_info.size.y),
);
let top_left = (Anchor::TOP_LEFT.0 - anchor.as_vec()) * size;
for run in text_layout_info.run_geometry.iter() {
let section_entity = computed_block.entities()[run.span_index].entity;
let Ok(text_background_color) = text_background_colors_query.get(section_entity) else {
continue;
};
let render_entity = commands.spawn(TemporaryRenderEntity).id();
let offset = Vec2::new(run.bounds.center().x, -run.bounds.center().y);
let transform = *global_transform
* GlobalTransform::from_translation(top_left.extend(0.))
* scaling
* GlobalTransform::from_translation(offset.extend(0.));
extracted_sprites.sprites.push(ExtractedSprite {
main_entity,
render_entity,
transform,
color: text_background_color.0.into(),
image_handle_id: AssetId::default(),
flip_x: false,
flip_y: false,
kind: ExtractedSpriteKind::Single {
anchor: Vec2::ZERO,
rect: None,
scaling_mode: None,
custom_size: Some(run.bounds.size()),
},
});
}
if let Some(shadow) = maybe_shadow {
let shadow_transform = *global_transform
* GlobalTransform::from_translation((top_left + shadow.offset).extend(0.))
* scaling;
let color = shadow.color.into();
for (
i,
PositionedGlyph {
position,
atlas_info,
..
},
) in text_layout_info.glyphs.iter().enumerate()
{
let rect = texture_atlases
.get(atlas_info.texture_atlas)
.unwrap()
.textures[atlas_info.location.glyph_index]
.as_rect();
extracted_slices.slices.push(ExtractedSlice {
offset: Vec2::new(position.x, -position.y),
rect,
size: rect.size(),
});
if text_layout_info
.glyphs
.get(i + 1)
.is_none_or(|info| info.atlas_info.texture != atlas_info.texture)
{
let render_entity = commands.spawn(TemporaryRenderEntity).id();
extracted_sprites.sprites.push(ExtractedSprite {
main_entity,
render_entity,
transform: shadow_transform,
color,
image_handle_id: atlas_info.texture,
flip_x: false,
flip_y: false,
kind: ExtractedSpriteKind::Slices {
indices: start..end,
},
});
start = end;
}
end += 1;
}
for run in text_layout_info.run_geometry.iter() {
let section_entity = computed_block.entities()[run.span_index].entity;
let Ok((_, has_strikethrough, has_underline, _, _)) =
decoration_query.get(section_entity)
else {
continue;
};
if has_strikethrough {
let render_entity = commands.spawn(TemporaryRenderEntity).id();
let offset = run.strikethrough_position() * Vec2::new(1., -1.);
let transform =
shadow_transform * GlobalTransform::from_translation(offset.extend(0.));
extracted_sprites.sprites.push(ExtractedSprite {
main_entity,
render_entity,
transform,
color,
image_handle_id: AssetId::default(),
flip_x: false,
flip_y: false,
kind: ExtractedSpriteKind::Single {
anchor: Vec2::ZERO,
rect: None,
scaling_mode: None,
custom_size: Some(run.strikethrough_size()),
},
});
}
if has_underline {
let render_entity = commands.spawn(TemporaryRenderEntity).id();
let offset = run.underline_position() * Vec2::new(1., -1.);
let transform =
shadow_transform * GlobalTransform::from_translation(offset.extend(0.));
extracted_sprites.sprites.push(ExtractedSprite {
main_entity,
render_entity,
transform,
color,
image_handle_id: AssetId::default(),
flip_x: false,
flip_y: false,
kind: ExtractedSpriteKind::Single {
anchor: Vec2::ZERO,
rect: None,
scaling_mode: None,
custom_size: Some(run.underline_size()),
},
});
}
}
}
let transform =
*global_transform * GlobalTransform::from_translation(top_left.extend(0.)) * scaling;
let mut color = LinearRgba::WHITE;
let mut current_span = usize::MAX;
for (
i,
PositionedGlyph {
position,
atlas_info,
span_index,
..
},
) in text_layout_info.glyphs.iter().enumerate()
{
if *span_index != current_span {
color = text_colors
.get(
computed_block
.entities()
.get(*span_index)
.map(|t| t.entity)
.unwrap_or(Entity::PLACEHOLDER),
)
.map(|text_color| LinearRgba::from(text_color.0))
.unwrap_or_default();
current_span = *span_index;
}
let rect = texture_atlases
.get(atlas_info.texture_atlas)
.unwrap()
.textures[atlas_info.location.glyph_index]
.as_rect();
extracted_slices.slices.push(ExtractedSlice {
offset: Vec2::new(position.x, -position.y),
rect,
size: rect.size(),
});
if text_layout_info.glyphs.get(i + 1).is_none_or(|info| {
info.span_index != current_span || info.atlas_info.texture != atlas_info.texture
}) {
let render_entity = commands.spawn(TemporaryRenderEntity).id();
extracted_sprites.sprites.push(ExtractedSprite {
main_entity,
render_entity,
transform,
color,
image_handle_id: atlas_info.texture,
flip_x: false,
flip_y: false,
kind: ExtractedSpriteKind::Slices {
indices: start..end,
},
});
start = end;
}
end += 1;
}
for run in text_layout_info.run_geometry.iter() {
let section_entity = computed_block.entities()[run.span_index].entity;
let Ok((
text_color,
has_strike_through,
has_underline,
maybe_strikethrough_color,
maybe_underline_color,
)) = decoration_query.get(section_entity)
else {
continue;
};
if has_strike_through {
let color = maybe_strikethrough_color
.map(|c| c.0)
.unwrap_or(text_color.0)
.to_linear();
let render_entity = commands.spawn(TemporaryRenderEntity).id();
let offset = run.strikethrough_position() * Vec2::new(1., -1.);
let transform = *global_transform
* GlobalTransform::from_translation(top_left.extend(0.))
* scaling
* GlobalTransform::from_translation(offset.extend(0.));
extracted_sprites.sprites.push(ExtractedSprite {
main_entity,
render_entity,
transform,
color,
image_handle_id: AssetId::default(),
flip_x: false,
flip_y: false,
kind: ExtractedSpriteKind::Single {
anchor: Vec2::ZERO,
rect: None,
scaling_mode: None,
custom_size: Some(run.strikethrough_size()),
},
});
}
if has_underline {
let color = maybe_underline_color
.map(|c| c.0)
.unwrap_or(text_color.0)
.to_linear();
let render_entity = commands.spawn(TemporaryRenderEntity).id();
let offset = run.underline_position() * Vec2::new(1., -1.);
let transform = *global_transform
* GlobalTransform::from_translation(top_left.extend(0.))
* scaling
* GlobalTransform::from_translation(offset.extend(0.));
extracted_sprites.sprites.push(ExtractedSprite {
main_entity,
render_entity,
transform,
color,
image_handle_id: AssetId::default(),
flip_x: false,
flip_y: false,
kind: ExtractedSpriteKind::Single {
anchor: Vec2::ZERO,
rect: None,
scaling_mode: None,
custom_size: Some(run.underline_size()),
},
});
}
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_sprite_render/src/mesh2d/color_material.rs | crates/bevy_sprite_render/src/mesh2d/color_material.rs | use crate::{AlphaMode2d, Material2d, Material2dPlugin};
use bevy_app::{App, Plugin};
use bevy_asset::{embedded_asset, embedded_path, Asset, AssetApp, AssetPath, Assets, Handle};
use bevy_color::{Alpha, Color, ColorToComponents, LinearRgba};
use bevy_image::Image;
use bevy_math::{Affine2, Mat3, Vec4};
use bevy_reflect::prelude::*;
use bevy_render::{render_asset::RenderAssets, render_resource::*, texture::GpuImage};
use bevy_shader::ShaderRef;
#[derive(Default)]
pub struct ColorMaterialPlugin;
impl Plugin for ColorMaterialPlugin {
fn build(&self, app: &mut App) {
embedded_asset!(app, "color_material.wgsl");
app.add_plugins(Material2dPlugin::<ColorMaterial>::default())
.register_asset_reflect::<ColorMaterial>();
// Initialize the default material handle.
app.world_mut()
.resource_mut::<Assets<ColorMaterial>>()
.insert(
&Handle::<ColorMaterial>::default(),
ColorMaterial {
color: Color::srgb(1.0, 0.0, 1.0),
..Default::default()
},
)
.unwrap();
}
}
/// A [2d material](Material2d) that renders [2d meshes](crate::Mesh2d) with a texture tinted by a uniform color
#[derive(Asset, AsBindGroup, Reflect, Debug, Clone)]
#[reflect(Default, Debug, Clone)]
#[uniform(0, ColorMaterialUniform)]
pub struct ColorMaterial {
pub color: Color,
pub alpha_mode: AlphaMode2d,
pub uv_transform: Affine2,
#[texture(1)]
#[sampler(2)]
pub texture: Option<Handle<Image>>,
}
impl ColorMaterial {
/// Creates a new material from a given color
pub fn from_color(color: impl Into<Color>) -> Self {
Self::from(color.into())
}
}
impl Default for ColorMaterial {
fn default() -> Self {
ColorMaterial {
color: Color::WHITE,
uv_transform: Affine2::default(),
texture: None,
// TODO should probably default to AlphaMask once supported?
alpha_mode: AlphaMode2d::Blend,
}
}
}
impl From<Color> for ColorMaterial {
fn from(color: Color) -> Self {
ColorMaterial {
color,
alpha_mode: if color.alpha() < 1.0 {
AlphaMode2d::Blend
} else {
AlphaMode2d::Opaque
},
..Default::default()
}
}
}
impl From<Handle<Image>> for ColorMaterial {
fn from(texture: Handle<Image>) -> Self {
ColorMaterial {
texture: Some(texture),
..Default::default()
}
}
}
// NOTE: These must match the bit flags in bevy_sprite_render/src/mesh2d/color_material.wgsl!
bitflags::bitflags! {
#[repr(transparent)]
pub struct ColorMaterialFlags: u32 {
const TEXTURE = 1 << 0;
/// Bitmask reserving bits for the [`AlphaMode2d`]
/// Values are just sequential values bitshifted into
/// the bitmask, and can range from 0 to 3.
const ALPHA_MODE_RESERVED_BITS = Self::ALPHA_MODE_MASK_BITS << Self::ALPHA_MODE_SHIFT_BITS;
const ALPHA_MODE_OPAQUE = 0 << Self::ALPHA_MODE_SHIFT_BITS;
const ALPHA_MODE_MASK = 1 << Self::ALPHA_MODE_SHIFT_BITS;
const ALPHA_MODE_BLEND = 2 << Self::ALPHA_MODE_SHIFT_BITS;
const NONE = 0;
const UNINITIALIZED = 0xFFFF;
}
}
impl ColorMaterialFlags {
const ALPHA_MODE_MASK_BITS: u32 = 0b11;
const ALPHA_MODE_SHIFT_BITS: u32 = 32 - Self::ALPHA_MODE_MASK_BITS.count_ones();
}
/// The GPU representation of the uniform data of a [`ColorMaterial`].
#[derive(Clone, Default, ShaderType)]
pub struct ColorMaterialUniform {
pub color: Vec4,
pub uv_transform: Mat3,
pub flags: u32,
pub alpha_cutoff: f32,
}
impl AsBindGroupShaderType<ColorMaterialUniform> for ColorMaterial {
fn as_bind_group_shader_type(&self, _images: &RenderAssets<GpuImage>) -> ColorMaterialUniform {
let mut flags = ColorMaterialFlags::NONE;
if self.texture.is_some() {
flags |= ColorMaterialFlags::TEXTURE;
}
// Defaults to 0.5 like in 3d
let mut alpha_cutoff = 0.5;
match self.alpha_mode {
AlphaMode2d::Opaque => flags |= ColorMaterialFlags::ALPHA_MODE_OPAQUE,
AlphaMode2d::Mask(c) => {
alpha_cutoff = c;
flags |= ColorMaterialFlags::ALPHA_MODE_MASK;
}
AlphaMode2d::Blend => flags |= ColorMaterialFlags::ALPHA_MODE_BLEND,
};
ColorMaterialUniform {
color: LinearRgba::from(self.color).to_f32_array().into(),
uv_transform: self.uv_transform.into(),
flags: flags.bits(),
alpha_cutoff,
}
}
}
impl Material2d for ColorMaterial {
fn fragment_shader() -> ShaderRef {
ShaderRef::Path(
AssetPath::from_path_buf(embedded_path!("color_material.wgsl")).with_source("embedded"),
)
}
fn alpha_mode(&self) -> AlphaMode2d {
self.alpha_mode
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_sprite_render/src/mesh2d/mesh.rs | crates/bevy_sprite_render/src/mesh2d/mesh.rs | use bevy_app::Plugin;
use bevy_asset::{embedded_asset, load_embedded_asset, AssetId, AssetServer, Handle};
use bevy_camera::{visibility::ViewVisibility, Camera2d};
use bevy_render::RenderStartup;
use bevy_shader::{load_shader_library, Shader, ShaderDefVal, ShaderSettings};
use crate::{tonemapping_pipeline_key, Material2dBindGroupId};
use bevy_core_pipeline::{
core_2d::{AlphaMask2d, Opaque2d, Transparent2d, CORE_2D_DEPTH_FORMAT},
tonemapping::{
get_lut_bind_group_layout_entries, get_lut_bindings, DebandDither, Tonemapping,
TonemappingLuts,
},
};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::change_detection::Tick;
use bevy_ecs::system::SystemChangeTick;
use bevy_ecs::{
prelude::*,
query::ROQueryItem,
system::{lifetimeless::*, SystemParamItem},
};
use bevy_image::BevyDefault;
use bevy_math::{Affine3, Vec4};
use bevy_mesh::{Mesh, Mesh2d, MeshTag, MeshVertexBufferLayoutRef};
use bevy_render::prelude::Msaa;
use bevy_render::RenderSystems::PrepareAssets;
use bevy_render::{
batching::{
gpu_preprocessing::IndirectParametersCpuMetadata,
no_gpu_preprocessing::{
self, batch_and_prepare_binned_render_phase, batch_and_prepare_sorted_render_phase,
write_batched_instance_buffer, BatchedInstanceBuffer,
},
GetBatchData, GetFullBatchData, NoAutomaticBatching,
},
globals::{GlobalsBuffer, GlobalsUniform},
mesh::{allocator::MeshAllocator, RenderMesh, RenderMeshBufferInfo},
render_asset::RenderAssets,
render_phase::{
sweep_old_entities, PhaseItem, PhaseItemExtraIndex, RenderCommand, RenderCommandResult,
TrackedRenderPass,
},
render_resource::{binding_types::uniform_buffer, *},
renderer::RenderDevice,
sync_world::{MainEntity, MainEntityHashMap},
texture::{FallbackImage, GpuImage},
view::{ExtractedView, ViewTarget, ViewUniform, ViewUniformOffset, ViewUniforms},
Extract, ExtractSchedule, Render, RenderApp, RenderSystems,
};
use bevy_transform::components::GlobalTransform;
use bevy_utils::default;
use nonmax::NonMaxU32;
use tracing::error;
#[derive(Default)]
pub struct Mesh2dRenderPlugin;
impl Plugin for Mesh2dRenderPlugin {
fn build(&self, app: &mut bevy_app::App) {
load_shader_library!(app, "mesh2d_vertex_output.wgsl");
load_shader_library!(app, "mesh2d_view_types.wgsl");
load_shader_library!(app, "mesh2d_view_bindings.wgsl");
load_shader_library!(app, "mesh2d_types.wgsl");
load_shader_library!(app, "mesh2d_functions.wgsl");
embedded_asset!(app, "mesh2d.wgsl");
// These bindings should be loaded as a shader library, but it depends on runtime
// information, so we will load it in a system.
embedded_asset!(app, "mesh2d_bindings.wgsl");
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app
.init_resource::<ViewKeyCache>()
.init_resource::<RenderMesh2dInstances>()
.init_resource::<SpecializedMeshPipelines<Mesh2dPipeline>>()
.init_resource::<ViewSpecializationTicks>()
.add_systems(
RenderStartup,
(
init_mesh_2d_pipeline,
init_batched_instance_buffer,
load_mesh2d_bindings,
),
)
.add_systems(ExtractSchedule, extract_mesh2d)
.add_systems(
Render,
(
check_views_need_specialization.in_set(PrepareAssets),
(
sweep_old_entities::<Opaque2d>,
sweep_old_entities::<AlphaMask2d>,
)
.in_set(RenderSystems::QueueSweep),
batch_and_prepare_binned_render_phase::<Opaque2d, Mesh2dPipeline>
.in_set(RenderSystems::PrepareResources),
batch_and_prepare_binned_render_phase::<AlphaMask2d, Mesh2dPipeline>
.in_set(RenderSystems::PrepareResources),
batch_and_prepare_sorted_render_phase::<Transparent2d, Mesh2dPipeline>
.in_set(RenderSystems::PrepareResources),
write_batched_instance_buffer::<Mesh2dPipeline>
.in_set(RenderSystems::PrepareResourcesFlush),
prepare_mesh2d_bind_group.in_set(RenderSystems::PrepareBindGroups),
prepare_mesh2d_view_bind_groups.in_set(RenderSystems::PrepareBindGroups),
no_gpu_preprocessing::clear_batched_cpu_instance_buffers::<Mesh2dPipeline>
.in_set(RenderSystems::Cleanup)
.after(RenderSystems::Render),
),
);
}
}
}
#[derive(Resource, Deref, DerefMut, Default, Debug, Clone)]
pub struct ViewKeyCache(MainEntityHashMap<Mesh2dPipelineKey>);
#[derive(Resource, Deref, DerefMut, Default, Debug, Clone)]
pub struct ViewSpecializationTicks(MainEntityHashMap<Tick>);
pub fn check_views_need_specialization(
mut view_key_cache: ResMut<ViewKeyCache>,
mut view_specialization_ticks: ResMut<ViewSpecializationTicks>,
views: Query<(
&MainEntity,
&ExtractedView,
&Msaa,
Option<&Tonemapping>,
Option<&DebandDither>,
)>,
ticks: SystemChangeTick,
) {
for (view_entity, view, msaa, tonemapping, dither) in &views {
let mut view_key = Mesh2dPipelineKey::from_msaa_samples(msaa.samples())
| Mesh2dPipelineKey::from_hdr(view.hdr);
if !view.hdr {
if let Some(tonemapping) = tonemapping {
view_key |= Mesh2dPipelineKey::TONEMAP_IN_SHADER;
view_key |= tonemapping_pipeline_key(*tonemapping);
}
if let Some(DebandDither::Enabled) = dither {
view_key |= Mesh2dPipelineKey::DEBAND_DITHER;
}
}
if !view_key_cache
.get_mut(view_entity)
.is_some_and(|current_key| *current_key == view_key)
{
view_key_cache.insert(*view_entity, view_key);
view_specialization_ticks.insert(*view_entity, ticks.this_run());
}
}
}
pub fn init_batched_instance_buffer(mut commands: Commands, render_device: Res<RenderDevice>) {
commands.insert_resource(BatchedInstanceBuffer::<Mesh2dUniform>::new(
&render_device.limits(),
));
}
fn load_mesh2d_bindings(render_device: Res<RenderDevice>, asset_server: Res<AssetServer>) {
let mut mesh_bindings_shader_defs = Vec::with_capacity(1);
if let Some(per_object_buffer_batch_size) =
GpuArrayBuffer::<Mesh2dUniform>::batch_size(&render_device.limits())
{
mesh_bindings_shader_defs.push(ShaderDefVal::UInt(
"PER_OBJECT_BUFFER_BATCH_SIZE".into(),
per_object_buffer_batch_size,
));
}
// Load the mesh_bindings shader module here as it depends on runtime information about
// whether storage buffers are supported, or the maximum uniform buffer binding size.
let handle: Handle<Shader> = load_embedded_asset!(
asset_server.as_ref(),
"mesh2d_bindings.wgsl",
move |settings| {
*settings = ShaderSettings {
shader_defs: mesh_bindings_shader_defs.clone(),
}
}
);
// Forget the handle so we don't have to store it anywhere, and we keep the embedded asset
// loaded. Note: This is what happens in `load_shader_library` internally.
core::mem::forget(handle);
}
#[derive(Component)]
pub struct Mesh2dTransforms {
pub world_from_local: Affine3,
pub flags: u32,
}
#[derive(ShaderType, Clone, Copy)]
pub struct Mesh2dUniform {
// Affine 4x3 matrix transposed to 3x4
pub world_from_local: [Vec4; 3],
// 3x3 matrix packed in mat2x4 and f32 as:
// [0].xyz, [1].x,
// [1].yz, [2].xy
// [2].z
pub local_from_world_transpose_a: [Vec4; 2],
pub local_from_world_transpose_b: f32,
pub flags: u32,
pub tag: u32,
}
impl Mesh2dUniform {
fn from_components(mesh_transforms: &Mesh2dTransforms, tag: u32) -> Self {
let (local_from_world_transpose_a, local_from_world_transpose_b) =
mesh_transforms.world_from_local.inverse_transpose_3x3();
Self {
world_from_local: mesh_transforms.world_from_local.to_transpose(),
local_from_world_transpose_a,
local_from_world_transpose_b,
flags: mesh_transforms.flags,
tag,
}
}
}
// NOTE: These must match the bit flags in bevy_sprite_render/src/mesh2d/mesh2d.wgsl!
bitflags::bitflags! {
#[repr(transparent)]
pub struct MeshFlags: u32 {
const NONE = 0;
const UNINITIALIZED = 0xFFFF;
}
}
pub struct RenderMesh2dInstance {
pub transforms: Mesh2dTransforms,
pub mesh_asset_id: AssetId<Mesh>,
pub material_bind_group_id: Material2dBindGroupId,
pub automatic_batching: bool,
pub tag: u32,
}
#[derive(Default, Resource, Deref, DerefMut)]
pub struct RenderMesh2dInstances(MainEntityHashMap<RenderMesh2dInstance>);
#[derive(Component, Default)]
pub struct Mesh2dMarker;
pub fn extract_mesh2d(
mut render_mesh_instances: ResMut<RenderMesh2dInstances>,
query: Extract<
Query<(
Entity,
&ViewVisibility,
&GlobalTransform,
&Mesh2d,
Option<&MeshTag>,
Has<NoAutomaticBatching>,
)>,
>,
) {
render_mesh_instances.clear();
for (entity, view_visibility, transform, handle, tag, no_automatic_batching) in &query {
if !view_visibility.get() {
continue;
}
render_mesh_instances.insert(
entity.into(),
RenderMesh2dInstance {
transforms: Mesh2dTransforms {
world_from_local: (&transform.affine()).into(),
flags: MeshFlags::empty().bits(),
},
mesh_asset_id: handle.0.id(),
material_bind_group_id: Material2dBindGroupId::default(),
automatic_batching: !no_automatic_batching,
tag: tag.map_or(0, |i| **i),
},
);
}
}
#[derive(Resource, Clone)]
pub struct Mesh2dPipeline {
pub view_layout: BindGroupLayoutDescriptor,
pub mesh_layout: BindGroupLayoutDescriptor,
pub shader: Handle<Shader>,
pub per_object_buffer_batch_size: Option<u32>,
}
pub fn init_mesh_2d_pipeline(
mut commands: Commands,
render_device: Res<RenderDevice>,
asset_server: Res<AssetServer>,
) {
let tonemapping_lut_entries = get_lut_bind_group_layout_entries();
let view_layout = BindGroupLayoutDescriptor::new(
"mesh2d_view_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::VERTEX_FRAGMENT,
(
uniform_buffer::<ViewUniform>(true),
uniform_buffer::<GlobalsUniform>(false),
tonemapping_lut_entries[0].visibility(ShaderStages::FRAGMENT),
tonemapping_lut_entries[1].visibility(ShaderStages::FRAGMENT),
),
),
);
let mesh_layout = BindGroupLayoutDescriptor::new(
"mesh2d_layout",
&BindGroupLayoutEntries::single(
ShaderStages::VERTEX_FRAGMENT,
GpuArrayBuffer::<Mesh2dUniform>::binding_layout(&render_device.limits()),
),
);
commands.insert_resource(Mesh2dPipeline {
view_layout,
mesh_layout,
per_object_buffer_batch_size: GpuArrayBuffer::<Mesh2dUniform>::batch_size(
&render_device.limits(),
),
shader: load_embedded_asset!(asset_server.as_ref(), "mesh2d.wgsl"),
});
}
impl GetBatchData for Mesh2dPipeline {
type Param = (
SRes<RenderMesh2dInstances>,
SRes<RenderAssets<RenderMesh>>,
SRes<MeshAllocator>,
);
type CompareData = (Material2dBindGroupId, AssetId<Mesh>);
type BufferData = Mesh2dUniform;
fn get_batch_data(
(mesh_instances, _, _): &SystemParamItem<Self::Param>,
(_entity, main_entity): (Entity, MainEntity),
) -> Option<(Self::BufferData, Option<Self::CompareData>)> {
let mesh_instance = mesh_instances.get(&main_entity)?;
Some((
Mesh2dUniform::from_components(&mesh_instance.transforms, mesh_instance.tag),
mesh_instance.automatic_batching.then_some((
mesh_instance.material_bind_group_id,
mesh_instance.mesh_asset_id,
)),
))
}
}
impl GetFullBatchData for Mesh2dPipeline {
type BufferInputData = ();
fn get_binned_batch_data(
(mesh_instances, _, _): &SystemParamItem<Self::Param>,
main_entity: MainEntity,
) -> Option<Self::BufferData> {
let mesh_instance = mesh_instances.get(&main_entity)?;
Some(Mesh2dUniform::from_components(
&mesh_instance.transforms,
mesh_instance.tag,
))
}
fn get_index_and_compare_data(
_: &SystemParamItem<Self::Param>,
_query_item: MainEntity,
) -> Option<(NonMaxU32, Option<Self::CompareData>)> {
error!(
"`get_index_and_compare_data` is only intended for GPU mesh uniform building, \
but this is not yet implemented for 2d meshes"
);
None
}
fn get_binned_index(
_: &SystemParamItem<Self::Param>,
_query_item: MainEntity,
) -> Option<NonMaxU32> {
error!(
"`get_binned_index` is only intended for GPU mesh uniform building, \
but this is not yet implemented for 2d meshes"
);
None
}
fn write_batch_indirect_parameters_metadata(
indexed: bool,
base_output_index: u32,
batch_set_index: Option<NonMaxU32>,
indirect_parameters_buffer: &mut bevy_render::batching::gpu_preprocessing::UntypedPhaseIndirectParametersBuffers,
indirect_parameters_offset: u32,
) {
// Note that `IndirectParameters` covers both of these structures, even
// though they actually have distinct layouts. See the comment above that
// type for more information.
let indirect_parameters = IndirectParametersCpuMetadata {
base_output_index,
batch_set_index: match batch_set_index {
None => !0,
Some(batch_set_index) => u32::from(batch_set_index),
},
};
if indexed {
indirect_parameters_buffer
.indexed
.set(indirect_parameters_offset, indirect_parameters);
} else {
indirect_parameters_buffer
.non_indexed
.set(indirect_parameters_offset, indirect_parameters);
}
}
}
bitflags::bitflags! {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[repr(transparent)]
// NOTE: Apparently quadro drivers support up to 64x MSAA.
// MSAA uses the highest 3 bits for the MSAA log2(sample count) to support up to 128x MSAA.
// FIXME: make normals optional?
pub struct Mesh2dPipelineKey: u32 {
const NONE = 0;
const HDR = 1 << 0;
const TONEMAP_IN_SHADER = 1 << 1;
const DEBAND_DITHER = 1 << 2;
const BLEND_ALPHA = 1 << 3;
const MAY_DISCARD = 1 << 4;
const MSAA_RESERVED_BITS = Self::MSAA_MASK_BITS << Self::MSAA_SHIFT_BITS;
const PRIMITIVE_TOPOLOGY_RESERVED_BITS = Self::PRIMITIVE_TOPOLOGY_MASK_BITS << Self::PRIMITIVE_TOPOLOGY_SHIFT_BITS;
const TONEMAP_METHOD_RESERVED_BITS = Self::TONEMAP_METHOD_MASK_BITS << Self::TONEMAP_METHOD_SHIFT_BITS;
const TONEMAP_METHOD_NONE = 0 << Self::TONEMAP_METHOD_SHIFT_BITS;
const TONEMAP_METHOD_REINHARD = 1 << Self::TONEMAP_METHOD_SHIFT_BITS;
const TONEMAP_METHOD_REINHARD_LUMINANCE = 2 << Self::TONEMAP_METHOD_SHIFT_BITS;
const TONEMAP_METHOD_ACES_FITTED = 3 << Self::TONEMAP_METHOD_SHIFT_BITS;
const TONEMAP_METHOD_AGX = 4 << Self::TONEMAP_METHOD_SHIFT_BITS;
const TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM = 5 << Self::TONEMAP_METHOD_SHIFT_BITS;
const TONEMAP_METHOD_TONY_MC_MAPFACE = 6 << Self::TONEMAP_METHOD_SHIFT_BITS;
const TONEMAP_METHOD_BLENDER_FILMIC = 7 << Self::TONEMAP_METHOD_SHIFT_BITS;
}
}
impl Mesh2dPipelineKey {
const MSAA_MASK_BITS: u32 = 0b111;
const MSAA_SHIFT_BITS: u32 = 32 - Self::MSAA_MASK_BITS.count_ones();
const PRIMITIVE_TOPOLOGY_MASK_BITS: u32 = 0b111;
const PRIMITIVE_TOPOLOGY_SHIFT_BITS: u32 = Self::MSAA_SHIFT_BITS - 3;
const TONEMAP_METHOD_MASK_BITS: u32 = 0b111;
const TONEMAP_METHOD_SHIFT_BITS: u32 =
Self::PRIMITIVE_TOPOLOGY_SHIFT_BITS - Self::TONEMAP_METHOD_MASK_BITS.count_ones();
pub fn from_msaa_samples(msaa_samples: u32) -> Self {
let msaa_bits =
(msaa_samples.trailing_zeros() & Self::MSAA_MASK_BITS) << Self::MSAA_SHIFT_BITS;
Self::from_bits_retain(msaa_bits)
}
pub fn from_hdr(hdr: bool) -> Self {
if hdr {
Mesh2dPipelineKey::HDR
} else {
Mesh2dPipelineKey::NONE
}
}
pub fn msaa_samples(&self) -> u32 {
1 << ((self.bits() >> Self::MSAA_SHIFT_BITS) & Self::MSAA_MASK_BITS)
}
pub fn from_primitive_topology(primitive_topology: PrimitiveTopology) -> Self {
let primitive_topology_bits = ((primitive_topology as u32)
& Self::PRIMITIVE_TOPOLOGY_MASK_BITS)
<< Self::PRIMITIVE_TOPOLOGY_SHIFT_BITS;
Self::from_bits_retain(primitive_topology_bits)
}
pub fn primitive_topology(&self) -> PrimitiveTopology {
let primitive_topology_bits = (self.bits() >> Self::PRIMITIVE_TOPOLOGY_SHIFT_BITS)
& Self::PRIMITIVE_TOPOLOGY_MASK_BITS;
match primitive_topology_bits {
x if x == PrimitiveTopology::PointList as u32 => PrimitiveTopology::PointList,
x if x == PrimitiveTopology::LineList as u32 => PrimitiveTopology::LineList,
x if x == PrimitiveTopology::LineStrip as u32 => PrimitiveTopology::LineStrip,
x if x == PrimitiveTopology::TriangleList as u32 => PrimitiveTopology::TriangleList,
x if x == PrimitiveTopology::TriangleStrip as u32 => PrimitiveTopology::TriangleStrip,
_ => PrimitiveTopology::default(),
}
}
}
impl SpecializedMeshPipeline for Mesh2dPipeline {
type Key = Mesh2dPipelineKey;
fn specialize(
&self,
key: Self::Key,
layout: &MeshVertexBufferLayoutRef,
) -> Result<RenderPipelineDescriptor, SpecializedMeshPipelineError> {
let mut shader_defs = Vec::new();
let mut vertex_attributes = Vec::new();
if layout.0.contains(Mesh::ATTRIBUTE_POSITION) {
shader_defs.push("VERTEX_POSITIONS".into());
vertex_attributes.push(Mesh::ATTRIBUTE_POSITION.at_shader_location(0));
}
if layout.0.contains(Mesh::ATTRIBUTE_NORMAL) {
shader_defs.push("VERTEX_NORMALS".into());
vertex_attributes.push(Mesh::ATTRIBUTE_NORMAL.at_shader_location(1));
}
if layout.0.contains(Mesh::ATTRIBUTE_UV_0) {
shader_defs.push("VERTEX_UVS".into());
vertex_attributes.push(Mesh::ATTRIBUTE_UV_0.at_shader_location(2));
}
if layout.0.contains(Mesh::ATTRIBUTE_TANGENT) {
shader_defs.push("VERTEX_TANGENTS".into());
vertex_attributes.push(Mesh::ATTRIBUTE_TANGENT.at_shader_location(3));
}
if layout.0.contains(Mesh::ATTRIBUTE_COLOR) {
shader_defs.push("VERTEX_COLORS".into());
vertex_attributes.push(Mesh::ATTRIBUTE_COLOR.at_shader_location(4));
}
if key.contains(Mesh2dPipelineKey::TONEMAP_IN_SHADER) {
shader_defs.push("TONEMAP_IN_SHADER".into());
shader_defs.push(ShaderDefVal::UInt(
"TONEMAPPING_LUT_TEXTURE_BINDING_INDEX".into(),
2,
));
shader_defs.push(ShaderDefVal::UInt(
"TONEMAPPING_LUT_SAMPLER_BINDING_INDEX".into(),
3,
));
let method = key.intersection(Mesh2dPipelineKey::TONEMAP_METHOD_RESERVED_BITS);
match method {
Mesh2dPipelineKey::TONEMAP_METHOD_NONE => {
shader_defs.push("TONEMAP_METHOD_NONE".into());
}
Mesh2dPipelineKey::TONEMAP_METHOD_REINHARD => {
shader_defs.push("TONEMAP_METHOD_REINHARD".into());
}
Mesh2dPipelineKey::TONEMAP_METHOD_REINHARD_LUMINANCE => {
shader_defs.push("TONEMAP_METHOD_REINHARD_LUMINANCE".into());
}
Mesh2dPipelineKey::TONEMAP_METHOD_ACES_FITTED => {
shader_defs.push("TONEMAP_METHOD_ACES_FITTED".into());
}
Mesh2dPipelineKey::TONEMAP_METHOD_AGX => {
shader_defs.push("TONEMAP_METHOD_AGX".into());
}
Mesh2dPipelineKey::TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM => {
shader_defs.push("TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM".into());
}
Mesh2dPipelineKey::TONEMAP_METHOD_BLENDER_FILMIC => {
shader_defs.push("TONEMAP_METHOD_BLENDER_FILMIC".into());
}
Mesh2dPipelineKey::TONEMAP_METHOD_TONY_MC_MAPFACE => {
shader_defs.push("TONEMAP_METHOD_TONY_MC_MAPFACE".into());
}
_ => {}
}
// Debanding is tied to tonemapping in the shader, cannot run without it.
if key.contains(Mesh2dPipelineKey::DEBAND_DITHER) {
shader_defs.push("DEBAND_DITHER".into());
}
}
if key.contains(Mesh2dPipelineKey::MAY_DISCARD) {
shader_defs.push("MAY_DISCARD".into());
}
let vertex_buffer_layout = layout.0.get_layout(&vertex_attributes)?;
let format = match key.contains(Mesh2dPipelineKey::HDR) {
true => ViewTarget::TEXTURE_FORMAT_HDR,
false => TextureFormat::bevy_default(),
};
let (depth_write_enabled, label, blend);
if key.contains(Mesh2dPipelineKey::BLEND_ALPHA) {
label = "transparent_mesh2d_pipeline";
blend = Some(BlendState::ALPHA_BLENDING);
depth_write_enabled = false;
} else {
label = "opaque_mesh2d_pipeline";
blend = None;
depth_write_enabled = true;
}
Ok(RenderPipelineDescriptor {
vertex: VertexState {
shader: self.shader.clone(),
shader_defs: shader_defs.clone(),
buffers: vec![vertex_buffer_layout],
..default()
},
fragment: Some(FragmentState {
shader: self.shader.clone(),
shader_defs,
targets: vec![Some(ColorTargetState {
format,
blend,
write_mask: ColorWrites::ALL,
})],
..default()
}),
layout: vec![self.view_layout.clone(), self.mesh_layout.clone()],
primitive: PrimitiveState {
front_face: FrontFace::Ccw,
cull_mode: None,
unclipped_depth: false,
polygon_mode: PolygonMode::Fill,
conservative: false,
topology: key.primitive_topology(),
strip_index_format: None,
},
depth_stencil: Some(DepthStencilState {
format: CORE_2D_DEPTH_FORMAT,
depth_write_enabled,
depth_compare: CompareFunction::GreaterEqual,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
}),
multisample: MultisampleState {
count: key.msaa_samples(),
mask: !0,
alpha_to_coverage_enabled: false,
},
label: Some(label.into()),
..default()
})
}
}
#[derive(Resource)]
pub struct Mesh2dBindGroup {
pub value: BindGroup,
}
pub fn prepare_mesh2d_bind_group(
mut commands: Commands,
mesh2d_pipeline: Res<Mesh2dPipeline>,
render_device: Res<RenderDevice>,
pipeline_cache: Res<PipelineCache>,
mesh2d_uniforms: Res<BatchedInstanceBuffer<Mesh2dUniform>>,
) {
if let Some(binding) = mesh2d_uniforms.instance_data_binding() {
commands.insert_resource(Mesh2dBindGroup {
value: render_device.create_bind_group(
"mesh2d_bind_group",
&pipeline_cache.get_bind_group_layout(&mesh2d_pipeline.mesh_layout),
&BindGroupEntries::single(binding),
),
});
}
}
#[derive(Component)]
pub struct Mesh2dViewBindGroup {
pub value: BindGroup,
}
pub fn prepare_mesh2d_view_bind_groups(
mut commands: Commands,
render_device: Res<RenderDevice>,
pipeline_cache: Res<PipelineCache>,
mesh2d_pipeline: Res<Mesh2dPipeline>,
view_uniforms: Res<ViewUniforms>,
views: Query<(Entity, &Tonemapping), (With<ExtractedView>, With<Camera2d>)>,
globals_buffer: Res<GlobalsBuffer>,
tonemapping_luts: Res<TonemappingLuts>,
images: Res<RenderAssets<GpuImage>>,
fallback_image: Res<FallbackImage>,
) {
let (Some(view_binding), Some(globals)) = (
view_uniforms.uniforms.binding(),
globals_buffer.buffer.binding(),
) else {
return;
};
for (entity, tonemapping) in &views {
let lut_bindings =
get_lut_bindings(&images, &tonemapping_luts, tonemapping, &fallback_image);
let view_bind_group = render_device.create_bind_group(
"mesh2d_view_bind_group",
&pipeline_cache.get_bind_group_layout(&mesh2d_pipeline.view_layout),
&BindGroupEntries::sequential((
view_binding.clone(),
globals.clone(),
lut_bindings.0,
lut_bindings.1,
)),
);
commands.entity(entity).insert(Mesh2dViewBindGroup {
value: view_bind_group,
});
}
}
pub struct SetMesh2dViewBindGroup<const I: usize>;
impl<P: PhaseItem, const I: usize> RenderCommand<P> for SetMesh2dViewBindGroup<I> {
type Param = ();
type ViewQuery = (Read<ViewUniformOffset>, Read<Mesh2dViewBindGroup>);
type ItemQuery = ();
#[inline]
fn render<'w>(
_item: &P,
(view_uniform, mesh2d_view_bind_group): ROQueryItem<'w, '_, Self::ViewQuery>,
_view: Option<()>,
_param: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
pass.set_bind_group(I, &mesh2d_view_bind_group.value, &[view_uniform.offset]);
RenderCommandResult::Success
}
}
pub struct SetMesh2dBindGroup<const I: usize>;
impl<P: PhaseItem, const I: usize> RenderCommand<P> for SetMesh2dBindGroup<I> {
type Param = SRes<Mesh2dBindGroup>;
type ViewQuery = ();
type ItemQuery = ();
#[inline]
fn render<'w>(
item: &P,
_view: (),
_item_query: Option<()>,
mesh2d_bind_group: SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let mut dynamic_offsets: [u32; 1] = Default::default();
let mut offset_count = 0;
if let PhaseItemExtraIndex::DynamicOffset(dynamic_offset) = item.extra_index() {
dynamic_offsets[offset_count] = dynamic_offset;
offset_count += 1;
}
pass.set_bind_group(
I,
&mesh2d_bind_group.into_inner().value,
&dynamic_offsets[..offset_count],
);
RenderCommandResult::Success
}
}
pub struct DrawMesh2d;
impl<P: PhaseItem> RenderCommand<P> for DrawMesh2d {
type Param = (
SRes<RenderAssets<RenderMesh>>,
SRes<RenderMesh2dInstances>,
SRes<MeshAllocator>,
);
type ViewQuery = ();
type ItemQuery = ();
#[inline]
fn render<'w>(
item: &P,
_view: (),
_item_query: Option<()>,
(meshes, render_mesh2d_instances, mesh_allocator): SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let meshes = meshes.into_inner();
let render_mesh2d_instances = render_mesh2d_instances.into_inner();
let mesh_allocator = mesh_allocator.into_inner();
let Some(RenderMesh2dInstance { mesh_asset_id, .. }) =
render_mesh2d_instances.get(&item.main_entity())
else {
return RenderCommandResult::Skip;
};
let Some(gpu_mesh) = meshes.get(*mesh_asset_id) else {
return RenderCommandResult::Skip;
};
let Some(vertex_buffer_slice) = mesh_allocator.mesh_vertex_slice(mesh_asset_id) else {
return RenderCommandResult::Skip;
};
pass.set_vertex_buffer(0, vertex_buffer_slice.buffer.slice(..));
let batch_range = item.batch_range();
match &gpu_mesh.buffer_info {
RenderMeshBufferInfo::Indexed {
index_format,
count,
} => {
let Some(index_buffer_slice) = mesh_allocator.mesh_index_slice(mesh_asset_id)
else {
return RenderCommandResult::Skip;
};
pass.set_index_buffer(index_buffer_slice.buffer.slice(..), *index_format);
pass.draw_indexed(
index_buffer_slice.range.start..(index_buffer_slice.range.start + count),
vertex_buffer_slice.range.start as i32,
batch_range.clone(),
);
}
RenderMeshBufferInfo::NonIndexed => {
pass.draw(vertex_buffer_slice.range, batch_range.clone());
}
}
RenderCommandResult::Success
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_sprite_render/src/mesh2d/material.rs | crates/bevy_sprite_render/src/mesh2d/material.rs | use crate::{
init_mesh_2d_pipeline, DrawMesh2d, Mesh2d, Mesh2dPipeline, Mesh2dPipelineKey,
RenderMesh2dInstances, SetMesh2dBindGroup, SetMesh2dViewBindGroup, ViewKeyCache,
ViewSpecializationTicks,
};
use bevy_app::{App, Plugin, PostUpdate};
use bevy_asset::prelude::AssetChanged;
use bevy_asset::{AsAssetId, Asset, AssetApp, AssetEventSystems, AssetId, AssetServer, Handle};
use bevy_camera::visibility::ViewVisibility;
use bevy_core_pipeline::{
core_2d::{
AlphaMask2d, AlphaMask2dBinKey, BatchSetKey2d, Opaque2d, Opaque2dBinKey, Transparent2d,
},
tonemapping::Tonemapping,
};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::change_detection::Tick;
use bevy_ecs::system::SystemChangeTick;
use bevy_ecs::{
prelude::*,
system::{lifetimeless::SRes, SystemParamItem},
};
use bevy_math::FloatOrd;
use bevy_mesh::MeshVertexBufferLayoutRef;
use bevy_platform::collections::HashMap;
use bevy_reflect::{prelude::ReflectDefault, Reflect};
use bevy_render::render_resource::BindGroupLayoutDescriptor;
use bevy_render::{
camera::extract_cameras,
mesh::RenderMesh,
render_asset::{
prepare_assets, PrepareAssetError, RenderAsset, RenderAssetPlugin, RenderAssets,
},
render_phase::{
AddRenderCommand, BinnedRenderPhaseType, DrawFunctionId, DrawFunctions, InputUniformIndex,
PhaseItem, PhaseItemExtraIndex, RenderCommand, RenderCommandResult, SetItemPipeline,
TrackedRenderPass, ViewBinnedRenderPhases, ViewSortedRenderPhases,
},
render_resource::{
AsBindGroup, AsBindGroupError, BindGroup, BindGroupId, BindingResources,
CachedRenderPipelineId, PipelineCache, RenderPipelineDescriptor, SpecializedMeshPipeline,
SpecializedMeshPipelineError, SpecializedMeshPipelines,
},
renderer::RenderDevice,
sync_world::{MainEntity, MainEntityHashMap},
view::{ExtractedView, RenderVisibleEntities},
Extract, ExtractSchedule, Render, RenderApp, RenderStartup, RenderSystems,
};
use bevy_shader::{Shader, ShaderDefVal, ShaderRef};
use bevy_utils::Parallel;
use core::{hash::Hash, marker::PhantomData};
use derive_more::derive::From;
use tracing::error;
pub const MATERIAL_2D_BIND_GROUP_INDEX: usize = 2;
/// Materials are used alongside [`Material2dPlugin`], [`Mesh2d`], and [`MeshMaterial2d`]
/// to spawn entities that are rendered with a specific [`Material2d`] type. They serve as an easy to use high level
/// way to render [`Mesh2d`] entities with custom shader logic.
///
/// Materials must implement [`AsBindGroup`] to define how data will be transferred to the GPU and bound in shaders.
/// [`AsBindGroup`] can be derived, which makes generating bindings straightforward. See the [`AsBindGroup`] docs for details.
///
/// # Example
///
/// Here is a simple [`Material2d`] implementation. The [`AsBindGroup`] derive has many features. To see what else is available,
/// check out the [`AsBindGroup`] documentation.
///
/// ```
/// # use bevy_sprite_render::{Material2d, MeshMaterial2d};
/// # use bevy_ecs::prelude::*;
/// # use bevy_image::Image;
/// # use bevy_reflect::TypePath;
/// # use bevy_mesh::{Mesh, Mesh2d};
/// # use bevy_render::render_resource::AsBindGroup;
/// # use bevy_shader::ShaderRef;
/// # use bevy_color::LinearRgba;
/// # use bevy_color::palettes::basic::RED;
/// # use bevy_asset::{Handle, AssetServer, Assets, Asset};
/// # use bevy_math::primitives::Circle;
/// #
/// #[derive(AsBindGroup, Debug, Clone, Asset, TypePath)]
/// pub struct CustomMaterial {
/// // Uniform bindings must implement `ShaderType`, which will be used to convert the value to
/// // its shader-compatible equivalent. Most core math types already implement `ShaderType`.
/// #[uniform(0)]
/// color: LinearRgba,
/// // Images can be bound as textures in shaders. If the Image's sampler is also needed, just
/// // add the sampler attribute with a different binding index.
/// #[texture(1)]
/// #[sampler(2)]
/// color_texture: Handle<Image>,
/// }
///
/// // All functions on `Material2d` have default impls. You only need to implement the
/// // functions that are relevant for your material.
/// impl Material2d for CustomMaterial {
/// fn fragment_shader() -> ShaderRef {
/// "shaders/custom_material.wgsl".into()
/// }
/// }
///
/// // Spawn an entity with a mesh using `CustomMaterial`.
/// fn setup(
/// mut commands: Commands,
/// mut meshes: ResMut<Assets<Mesh>>,
/// mut materials: ResMut<Assets<CustomMaterial>>,
/// asset_server: Res<AssetServer>,
/// ) {
/// commands.spawn((
/// Mesh2d(meshes.add(Circle::new(50.0))),
/// MeshMaterial2d(materials.add(CustomMaterial {
/// color: RED.into(),
/// color_texture: asset_server.load("some_image.png"),
/// })),
/// ));
/// }
/// ```
///
/// In WGSL shaders, the material's binding would look like this:
///
/// ```wgsl
/// struct CustomMaterial {
/// color: vec4<f32>,
/// }
///
/// @group(2) @binding(0) var<uniform> material: CustomMaterial;
/// @group(2) @binding(1) var color_texture: texture_2d<f32>;
/// @group(2) @binding(2) var color_sampler: sampler;
/// ```
pub trait Material2d: AsBindGroup + Asset + Clone + Sized {
/// Returns this material's vertex shader. If [`ShaderRef::Default`] is returned, the default mesh vertex shader
/// will be used.
fn vertex_shader() -> ShaderRef {
ShaderRef::Default
}
/// Returns this material's fragment shader. If [`ShaderRef::Default`] is returned, the default mesh fragment shader
/// will be used.
fn fragment_shader() -> ShaderRef {
ShaderRef::Default
}
/// Add a bias to the view depth of the mesh which can be used to force a specific render order.
#[inline]
fn depth_bias(&self) -> f32 {
0.0
}
fn alpha_mode(&self) -> AlphaMode2d {
AlphaMode2d::Opaque
}
/// Customizes the default [`RenderPipelineDescriptor`].
#[expect(
unused_variables,
reason = "The parameters here are intentionally unused by the default implementation; however, putting underscores here will result in the underscores being copied by rust-analyzer's tab completion."
)]
#[inline]
fn specialize(
descriptor: &mut RenderPipelineDescriptor,
layout: &MeshVertexBufferLayoutRef,
key: Material2dKey<Self>,
) -> Result<(), SpecializedMeshPipelineError> {
Ok(())
}
}
/// A [material](Material2d) used for rendering a [`Mesh2d`].
///
/// See [`Material2d`] for general information about 2D materials and how to implement your own materials.
///
/// # Example
///
/// ```
/// # use bevy_sprite_render::{ColorMaterial, MeshMaterial2d};
/// # use bevy_ecs::prelude::*;
/// # use bevy_mesh::{Mesh, Mesh2d};
/// # use bevy_color::palettes::basic::RED;
/// # use bevy_asset::Assets;
/// # use bevy_math::primitives::Circle;
/// #
/// // Spawn an entity with a mesh using `ColorMaterial`.
/// fn setup(
/// mut commands: Commands,
/// mut meshes: ResMut<Assets<Mesh>>,
/// mut materials: ResMut<Assets<ColorMaterial>>,
/// ) {
/// commands.spawn((
/// Mesh2d(meshes.add(Circle::new(50.0))),
/// MeshMaterial2d(materials.add(ColorMaterial::from_color(RED))),
/// ));
/// }
/// ```
///
/// [`MeshMaterial2d`]: crate::MeshMaterial2d
#[derive(Component, Clone, Debug, Deref, DerefMut, Reflect, From)]
#[reflect(Component, Default, Clone)]
pub struct MeshMaterial2d<M: Material2d>(pub Handle<M>);
impl<M: Material2d> Default for MeshMaterial2d<M> {
fn default() -> Self {
Self(Handle::default())
}
}
impl<M: Material2d> PartialEq for MeshMaterial2d<M> {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl<M: Material2d> Eq for MeshMaterial2d<M> {}
impl<M: Material2d> From<MeshMaterial2d<M>> for AssetId<M> {
fn from(material: MeshMaterial2d<M>) -> Self {
material.id()
}
}
impl<M: Material2d> From<&MeshMaterial2d<M>> for AssetId<M> {
fn from(material: &MeshMaterial2d<M>) -> Self {
material.id()
}
}
impl<M: Material2d> AsAssetId for MeshMaterial2d<M> {
type Asset = M;
fn as_asset_id(&self) -> AssetId<Self::Asset> {
self.id()
}
}
/// Sets how a 2d material's base color alpha channel is used for transparency.
/// Currently, this only works with [`Mesh2d`]. Sprites are always transparent.
///
/// This is very similar to [`AlphaMode`](bevy_render::alpha::AlphaMode) but this only applies to 2d meshes.
/// We use a separate type because 2d doesn't support all the transparency modes that 3d does.
#[derive(Debug, Default, Reflect, Copy, Clone, PartialEq)]
#[reflect(Default, Debug, Clone)]
pub enum AlphaMode2d {
/// Base color alpha values are overridden to be fully opaque (1.0).
#[default]
Opaque,
/// Reduce transparency to fully opaque or fully transparent
/// based on a threshold.
///
/// Compares the base color alpha value to the specified threshold.
/// If the value is below the threshold,
/// considers the color to be fully transparent (alpha is set to 0.0).
/// If it is equal to or above the threshold,
/// considers the color to be fully opaque (alpha is set to 1.0).
Mask(f32),
/// The base color alpha value defines the opacity of the color.
/// Standard alpha-blending is used to blend the fragment's color
/// with the color behind it.
Blend,
}
/// Adds the necessary ECS resources and render logic to enable rendering entities using the given [`Material2d`]
/// asset type (which includes [`Material2d`] types).
pub struct Material2dPlugin<M: Material2d>(PhantomData<M>);
impl<M: Material2d> Default for Material2dPlugin<M> {
fn default() -> Self {
Self(Default::default())
}
}
impl<M: Material2d> Plugin for Material2dPlugin<M>
where
M::Data: PartialEq + Eq + Hash + Clone,
{
fn build(&self, app: &mut App) {
app.init_asset::<M>()
.init_resource::<EntitiesNeedingSpecialization<M>>()
.register_type::<MeshMaterial2d<M>>()
.add_plugins(RenderAssetPlugin::<PreparedMaterial2d<M>>::default())
.add_systems(
PostUpdate,
check_entities_needing_specialization::<M>.after(AssetEventSystems),
);
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app
.init_resource::<EntitySpecializationTickPair<M>>()
.init_resource::<SpecializedMaterial2dPipelineCache<M>>()
.add_render_command::<Opaque2d, DrawMaterial2d<M>>()
.add_render_command::<AlphaMask2d, DrawMaterial2d<M>>()
.add_render_command::<Transparent2d, DrawMaterial2d<M>>()
.init_resource::<RenderMaterial2dInstances<M>>()
.init_resource::<SpecializedMeshPipelines<Material2dPipeline<M>>>()
.add_systems(
RenderStartup,
init_material_2d_pipeline::<M>.after(init_mesh_2d_pipeline),
)
.add_systems(
ExtractSchedule,
(
extract_entities_needs_specialization::<M>.after(extract_cameras),
extract_mesh_materials_2d::<M>,
),
)
.add_systems(
Render,
(
specialize_material2d_meshes::<M>
.in_set(RenderSystems::PrepareMeshes)
.after(prepare_assets::<PreparedMaterial2d<M>>)
.after(prepare_assets::<RenderMesh>),
queue_material2d_meshes::<M>
.in_set(RenderSystems::QueueMeshes)
.after(prepare_assets::<PreparedMaterial2d<M>>),
),
);
}
}
}
#[derive(Resource, Deref, DerefMut)]
pub struct RenderMaterial2dInstances<M: Material2d>(MainEntityHashMap<AssetId<M>>);
impl<M: Material2d> Default for RenderMaterial2dInstances<M> {
fn default() -> Self {
Self(Default::default())
}
}
pub fn extract_mesh_materials_2d<M: Material2d>(
mut material_instances: ResMut<RenderMaterial2dInstances<M>>,
changed_meshes_query: Extract<
Query<
(Entity, &ViewVisibility, &MeshMaterial2d<M>),
Or<(Changed<ViewVisibility>, Changed<MeshMaterial2d<M>>)>,
>,
>,
mut removed_materials_query: Extract<RemovedComponents<MeshMaterial2d<M>>>,
) {
for (entity, view_visibility, material) in &changed_meshes_query {
if view_visibility.get() {
add_mesh_instance(entity, material, &mut material_instances);
} else {
remove_mesh_instance(entity, &mut material_instances);
}
}
for entity in removed_materials_query.read() {
// Only queue a mesh for removal if we didn't pick it up above.
// It's possible that a necessary component was removed and re-added in
// the same frame.
if !changed_meshes_query.contains(entity) {
remove_mesh_instance(entity, &mut material_instances);
}
}
// Adds or updates a mesh instance in the [`RenderMaterial2dInstances`]
// array.
fn add_mesh_instance<M>(
entity: Entity,
material: &MeshMaterial2d<M>,
material_instances: &mut RenderMaterial2dInstances<M>,
) where
M: Material2d,
{
material_instances.insert(entity.into(), material.id());
}
// Removes a mesh instance from the [`RenderMaterial2dInstances`] array.
fn remove_mesh_instance<M>(
entity: Entity,
material_instances: &mut RenderMaterial2dInstances<M>,
) where
M: Material2d,
{
material_instances.remove(&MainEntity::from(entity));
}
}
/// Render pipeline data for a given [`Material2d`]
#[derive(Resource)]
pub struct Material2dPipeline<M: Material2d> {
pub mesh2d_pipeline: Mesh2dPipeline,
pub material2d_layout: BindGroupLayoutDescriptor,
pub vertex_shader: Option<Handle<Shader>>,
pub fragment_shader: Option<Handle<Shader>>,
marker: PhantomData<M>,
}
pub struct Material2dKey<M: Material2d> {
pub mesh_key: Mesh2dPipelineKey,
pub bind_group_data: M::Data,
}
impl<M: Material2d> Eq for Material2dKey<M> where M::Data: PartialEq {}
impl<M: Material2d> PartialEq for Material2dKey<M>
where
M::Data: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.mesh_key == other.mesh_key && self.bind_group_data == other.bind_group_data
}
}
impl<M: Material2d> Clone for Material2dKey<M>
where
M::Data: Clone,
{
fn clone(&self) -> Self {
Self {
mesh_key: self.mesh_key,
bind_group_data: self.bind_group_data.clone(),
}
}
}
impl<M: Material2d> Hash for Material2dKey<M>
where
M::Data: Hash,
{
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
self.mesh_key.hash(state);
self.bind_group_data.hash(state);
}
}
impl<M: Material2d> Clone for Material2dPipeline<M> {
fn clone(&self) -> Self {
Self {
mesh2d_pipeline: self.mesh2d_pipeline.clone(),
material2d_layout: self.material2d_layout.clone(),
vertex_shader: self.vertex_shader.clone(),
fragment_shader: self.fragment_shader.clone(),
marker: PhantomData,
}
}
}
impl<M: Material2d> SpecializedMeshPipeline for Material2dPipeline<M>
where
M::Data: PartialEq + Eq + Hash + Clone,
{
type Key = Material2dKey<M>;
fn specialize(
&self,
key: Self::Key,
layout: &MeshVertexBufferLayoutRef,
) -> Result<RenderPipelineDescriptor, SpecializedMeshPipelineError> {
let mut descriptor = self.mesh2d_pipeline.specialize(key.mesh_key, layout)?;
descriptor.vertex.shader_defs.push(ShaderDefVal::UInt(
"MATERIAL_BIND_GROUP".into(),
MATERIAL_2D_BIND_GROUP_INDEX as u32,
));
if let Some(ref mut fragment) = descriptor.fragment {
fragment.shader_defs.push(ShaderDefVal::UInt(
"MATERIAL_BIND_GROUP".into(),
MATERIAL_2D_BIND_GROUP_INDEX as u32,
));
}
if let Some(vertex_shader) = &self.vertex_shader {
descriptor.vertex.shader = vertex_shader.clone();
}
if let Some(fragment_shader) = &self.fragment_shader {
descriptor.fragment.as_mut().unwrap().shader = fragment_shader.clone();
}
descriptor.layout = vec![
self.mesh2d_pipeline.view_layout.clone(),
self.mesh2d_pipeline.mesh_layout.clone(),
self.material2d_layout.clone(),
];
M::specialize(&mut descriptor, layout, key)?;
Ok(descriptor)
}
}
pub fn init_material_2d_pipeline<M: Material2d>(
mut commands: Commands,
asset_server: Res<AssetServer>,
render_device: Res<RenderDevice>,
mesh_2d_pipeline: Res<Mesh2dPipeline>,
) {
let material2d_layout = M::bind_group_layout_descriptor(&render_device);
commands.insert_resource(Material2dPipeline::<M> {
mesh2d_pipeline: mesh_2d_pipeline.clone(),
material2d_layout,
vertex_shader: match M::vertex_shader() {
ShaderRef::Default => None,
ShaderRef::Handle(handle) => Some(handle),
ShaderRef::Path(path) => Some(asset_server.load(path)),
},
fragment_shader: match M::fragment_shader() {
ShaderRef::Default => None,
ShaderRef::Handle(handle) => Some(handle),
ShaderRef::Path(path) => Some(asset_server.load(path)),
},
marker: PhantomData,
});
}
pub(super) type DrawMaterial2d<M> = (
SetItemPipeline,
SetMesh2dViewBindGroup<0>,
SetMesh2dBindGroup<1>,
SetMaterial2dBindGroup<M, MATERIAL_2D_BIND_GROUP_INDEX>,
DrawMesh2d,
);
pub struct SetMaterial2dBindGroup<M: Material2d, const I: usize>(PhantomData<M>);
impl<P: PhaseItem, M: Material2d, const I: usize> RenderCommand<P>
for SetMaterial2dBindGroup<M, I>
{
type Param = (
SRes<RenderAssets<PreparedMaterial2d<M>>>,
SRes<RenderMaterial2dInstances<M>>,
);
type ViewQuery = ();
type ItemQuery = ();
#[inline]
fn render<'w>(
item: &P,
_view: (),
_item_query: Option<()>,
(materials, material_instances): SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let materials = materials.into_inner();
let material_instances = material_instances.into_inner();
let Some(material_instance) = material_instances.get(&item.main_entity()) else {
return RenderCommandResult::Skip;
};
let Some(material2d) = materials.get(*material_instance) else {
return RenderCommandResult::Skip;
};
pass.set_bind_group(I, &material2d.bind_group, &[]);
RenderCommandResult::Success
}
}
pub const fn alpha_mode_pipeline_key(alpha_mode: AlphaMode2d) -> Mesh2dPipelineKey {
match alpha_mode {
AlphaMode2d::Blend => Mesh2dPipelineKey::BLEND_ALPHA,
AlphaMode2d::Mask(_) => Mesh2dPipelineKey::MAY_DISCARD,
_ => Mesh2dPipelineKey::NONE,
}
}
pub const fn tonemapping_pipeline_key(tonemapping: Tonemapping) -> Mesh2dPipelineKey {
match tonemapping {
Tonemapping::None => Mesh2dPipelineKey::TONEMAP_METHOD_NONE,
Tonemapping::Reinhard => Mesh2dPipelineKey::TONEMAP_METHOD_REINHARD,
Tonemapping::ReinhardLuminance => Mesh2dPipelineKey::TONEMAP_METHOD_REINHARD_LUMINANCE,
Tonemapping::AcesFitted => Mesh2dPipelineKey::TONEMAP_METHOD_ACES_FITTED,
Tonemapping::AgX => Mesh2dPipelineKey::TONEMAP_METHOD_AGX,
Tonemapping::SomewhatBoringDisplayTransform => {
Mesh2dPipelineKey::TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM
}
Tonemapping::TonyMcMapface => Mesh2dPipelineKey::TONEMAP_METHOD_TONY_MC_MAPFACE,
Tonemapping::BlenderFilmic => Mesh2dPipelineKey::TONEMAP_METHOD_BLENDER_FILMIC,
}
}
pub fn extract_entities_needs_specialization<M>(
entities_needing_specialization: Extract<Res<EntitiesNeedingSpecialization<M>>>,
mut entity_specialization_ticks: ResMut<EntitySpecializationTickPair<M>>,
mut removed_mesh_material_components: Extract<RemovedComponents<MeshMaterial2d<M>>>,
mut specialized_material2d_pipeline_cache: ResMut<SpecializedMaterial2dPipelineCache<M>>,
views: Query<&MainEntity, With<ExtractedView>>,
ticks: SystemChangeTick,
) where
M: Material2d,
{
// Clean up any despawned entities, we do this first in case the removed material was re-added
// the same frame, thus will appear both in the removed components list and have been added to
// the `EntitiesNeedingSpecialization` collection by triggering the `Changed` filter
for entity in removed_mesh_material_components.read() {
entity_specialization_ticks.remove(&MainEntity::from(entity));
for view in views {
if let Some(cache) = specialized_material2d_pipeline_cache.get_mut(view) {
cache.remove(&MainEntity::from(entity));
}
}
}
for entity in entities_needing_specialization.iter() {
// Update the entity's specialization tick with this run's tick
entity_specialization_ticks.insert((*entity).into(), ticks.this_run());
}
}
#[derive(Clone, Resource, Deref, DerefMut, Debug)]
pub struct EntitiesNeedingSpecialization<M> {
#[deref]
pub entities: Vec<Entity>,
_marker: PhantomData<M>,
}
impl<M> Default for EntitiesNeedingSpecialization<M> {
fn default() -> Self {
Self {
entities: Default::default(),
_marker: Default::default(),
}
}
}
#[derive(Clone, Resource, Deref, DerefMut, Debug)]
pub struct EntitySpecializationTickPair<M> {
#[deref]
pub entities: MainEntityHashMap<Tick>,
_marker: PhantomData<M>,
}
impl<M> Default for EntitySpecializationTickPair<M> {
fn default() -> Self {
Self {
entities: MainEntityHashMap::default(),
_marker: Default::default(),
}
}
}
/// Stores the [`SpecializedMaterial2dViewPipelineCache`] for each view.
#[derive(Resource, Deref, DerefMut)]
pub struct SpecializedMaterial2dPipelineCache<M> {
// view_entity -> view pipeline cache
#[deref]
map: MainEntityHashMap<SpecializedMaterial2dViewPipelineCache<M>>,
marker: PhantomData<M>,
}
/// Stores the cached render pipeline ID for each entity in a single view, as
/// well as the last time it was changed.
#[derive(Deref, DerefMut)]
pub struct SpecializedMaterial2dViewPipelineCache<M> {
// material entity -> (tick, pipeline_id)
#[deref]
map: MainEntityHashMap<(Tick, CachedRenderPipelineId)>,
marker: PhantomData<M>,
}
impl<M> Default for SpecializedMaterial2dPipelineCache<M> {
fn default() -> Self {
Self {
map: HashMap::default(),
marker: PhantomData,
}
}
}
impl<M> Default for SpecializedMaterial2dViewPipelineCache<M> {
fn default() -> Self {
Self {
map: HashMap::default(),
marker: PhantomData,
}
}
}
pub fn check_entities_needing_specialization<M>(
needs_specialization: Query<
Entity,
(
Or<(
Changed<Mesh2d>,
AssetChanged<Mesh2d>,
Changed<MeshMaterial2d<M>>,
AssetChanged<MeshMaterial2d<M>>,
)>,
With<MeshMaterial2d<M>>,
),
>,
mut par_local: Local<Parallel<Vec<Entity>>>,
mut entities_needing_specialization: ResMut<EntitiesNeedingSpecialization<M>>,
) where
M: Material2d,
{
entities_needing_specialization.clear();
needs_specialization
.par_iter()
.for_each(|entity| par_local.borrow_local_mut().push(entity));
par_local.drain_into(&mut entities_needing_specialization);
}
pub fn specialize_material2d_meshes<M: Material2d>(
material2d_pipeline: Res<Material2dPipeline<M>>,
mut pipelines: ResMut<SpecializedMeshPipelines<Material2dPipeline<M>>>,
pipeline_cache: Res<PipelineCache>,
(render_meshes, render_materials): (
Res<RenderAssets<RenderMesh>>,
Res<RenderAssets<PreparedMaterial2d<M>>>,
),
mut render_mesh_instances: ResMut<RenderMesh2dInstances>,
render_material_instances: Res<RenderMaterial2dInstances<M>>,
transparent_render_phases: Res<ViewSortedRenderPhases<Transparent2d>>,
opaque_render_phases: Res<ViewBinnedRenderPhases<Opaque2d>>,
alpha_mask_render_phases: Res<ViewBinnedRenderPhases<AlphaMask2d>>,
views: Query<(&MainEntity, &ExtractedView, &RenderVisibleEntities)>,
view_key_cache: Res<ViewKeyCache>,
entity_specialization_ticks: Res<EntitySpecializationTickPair<M>>,
view_specialization_ticks: Res<ViewSpecializationTicks>,
ticks: SystemChangeTick,
mut specialized_material_pipeline_cache: ResMut<SpecializedMaterial2dPipelineCache<M>>,
) where
M::Data: PartialEq + Eq + Hash + Clone,
{
if render_material_instances.is_empty() {
return;
}
for (view_entity, view, visible_entities) in &views {
if !transparent_render_phases.contains_key(&view.retained_view_entity)
&& !opaque_render_phases.contains_key(&view.retained_view_entity)
&& !alpha_mask_render_phases.contains_key(&view.retained_view_entity)
{
continue;
}
let Some(view_key) = view_key_cache.get(view_entity) else {
continue;
};
let view_tick = view_specialization_ticks.get(view_entity).unwrap();
let view_specialized_material_pipeline_cache = specialized_material_pipeline_cache
.entry(*view_entity)
.or_default();
for (_, visible_entity) in visible_entities.iter::<Mesh2d>() {
let Some(material_asset_id) = render_material_instances.get(visible_entity) else {
continue;
};
let Some(mesh_instance) = render_mesh_instances.get_mut(visible_entity) else {
continue;
};
let Some(entity_tick) = entity_specialization_ticks.get(visible_entity) else {
error!("{visible_entity:?} is missing specialization tick. Spawning Meshes in PostUpdate or later is currently not fully supported.");
continue;
};
let last_specialized_tick = view_specialized_material_pipeline_cache
.get(visible_entity)
.map(|(tick, _)| *tick);
let needs_specialization = last_specialized_tick.is_none_or(|tick| {
view_tick.is_newer_than(tick, ticks.this_run())
|| entity_tick.is_newer_than(tick, ticks.this_run())
});
if !needs_specialization {
continue;
}
let Some(material_2d) = render_materials.get(*material_asset_id) else {
continue;
};
let Some(mesh) = render_meshes.get(mesh_instance.mesh_asset_id) else {
continue;
};
let mesh_key = *view_key
| Mesh2dPipelineKey::from_primitive_topology(mesh.primitive_topology())
| material_2d.properties.mesh_pipeline_key_bits;
let pipeline_id = pipelines.specialize(
&pipeline_cache,
&material2d_pipeline,
Material2dKey {
mesh_key,
bind_group_data: material_2d.key.clone(),
},
&mesh.layout,
);
let pipeline_id = match pipeline_id {
Ok(id) => id,
Err(err) => {
error!("{}", err);
continue;
}
};
view_specialized_material_pipeline_cache
.insert(*visible_entity, (ticks.this_run(), pipeline_id));
}
}
}
pub fn queue_material2d_meshes<M: Material2d>(
(render_meshes, render_materials): (
Res<RenderAssets<RenderMesh>>,
Res<RenderAssets<PreparedMaterial2d<M>>>,
),
mut render_mesh_instances: ResMut<RenderMesh2dInstances>,
render_material_instances: Res<RenderMaterial2dInstances<M>>,
mut transparent_render_phases: ResMut<ViewSortedRenderPhases<Transparent2d>>,
mut opaque_render_phases: ResMut<ViewBinnedRenderPhases<Opaque2d>>,
mut alpha_mask_render_phases: ResMut<ViewBinnedRenderPhases<AlphaMask2d>>,
views: Query<(&MainEntity, &ExtractedView, &RenderVisibleEntities)>,
specialized_material_pipeline_cache: ResMut<SpecializedMaterial2dPipelineCache<M>>,
) where
M::Data: PartialEq + Eq + Hash + Clone,
{
if render_material_instances.is_empty() {
return;
}
for (view_entity, view, visible_entities) in &views {
let Some(view_specialized_material_pipeline_cache) =
specialized_material_pipeline_cache.get(view_entity)
else {
continue;
};
let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity)
else {
continue;
};
let Some(opaque_phase) = opaque_render_phases.get_mut(&view.retained_view_entity) else {
continue;
};
let Some(alpha_mask_phase) = alpha_mask_render_phases.get_mut(&view.retained_view_entity)
else {
continue;
};
for (render_entity, visible_entity) in visible_entities.iter::<Mesh2d>() {
let Some((current_change_tick, pipeline_id)) = view_specialized_material_pipeline_cache
.get(visible_entity)
.map(|(current_change_tick, pipeline_id)| (*current_change_tick, *pipeline_id))
else {
continue;
};
// Skip the entity if it's cached in a bin and up to date.
if opaque_phase.validate_cached_entity(*visible_entity, current_change_tick)
|| alpha_mask_phase.validate_cached_entity(*visible_entity, current_change_tick)
{
continue;
}
let Some(material_asset_id) = render_material_instances.get(visible_entity) else {
continue;
};
let Some(mesh_instance) = render_mesh_instances.get_mut(visible_entity) else {
continue;
};
let Some(material_2d) = render_materials.get(*material_asset_id) else {
continue;
};
let Some(mesh) = render_meshes.get(mesh_instance.mesh_asset_id) else {
continue;
};
mesh_instance.material_bind_group_id = material_2d.get_bind_group_id();
let mesh_z = mesh_instance.transforms.world_from_local.translation.z;
// We don't support multidraw yet for 2D meshes, so we use this
// custom logic to generate the `BinnedRenderPhaseType` instead of
// `BinnedRenderPhaseType::mesh`, which can return
// `BinnedRenderPhaseType::MultidrawableMesh` if the hardware
// supports multidraw.
let binned_render_phase_type = if mesh_instance.automatic_batching {
BinnedRenderPhaseType::BatchableMesh
} else {
BinnedRenderPhaseType::UnbatchableMesh
};
match material_2d.properties.alpha_mode {
AlphaMode2d::Opaque => {
let bin_key = Opaque2dBinKey {
pipeline: pipeline_id,
draw_function: material_2d.properties.draw_function_id,
asset_id: mesh_instance.mesh_asset_id.into(),
material_bind_group_id: material_2d.get_bind_group_id().0,
};
opaque_phase.add(
BatchSetKey2d {
indexed: mesh.indexed(),
},
bin_key,
(*render_entity, *visible_entity),
InputUniformIndex::default(),
binned_render_phase_type,
current_change_tick,
);
}
AlphaMode2d::Mask(_) => {
let bin_key = AlphaMask2dBinKey {
pipeline: pipeline_id,
draw_function: material_2d.properties.draw_function_id,
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_sprite_render/src/mesh2d/mod.rs | crates/bevy_sprite_render/src/mesh2d/mod.rs | mod color_material;
mod material;
mod mesh;
mod wireframe2d;
pub use color_material::*;
pub use material::*;
pub use mesh::*;
pub use wireframe2d::*;
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_sprite_render/src/mesh2d/wireframe2d.rs | crates/bevy_sprite_render/src/mesh2d/wireframe2d.rs | use crate::{
init_mesh_2d_pipeline, DrawMesh2d, Mesh2dPipeline, Mesh2dPipelineKey, RenderMesh2dInstances,
SetMesh2dBindGroup, SetMesh2dViewBindGroup, ViewKeyCache, ViewSpecializationTicks,
};
use bevy_app::{App, Plugin, PostUpdate, Startup, Update};
use bevy_asset::{
embedded_asset, load_embedded_asset, prelude::AssetChanged, AsAssetId, Asset, AssetApp,
AssetEventSystems, AssetId, AssetServer, Assets, Handle, UntypedAssetId,
};
use bevy_camera::{visibility::ViewVisibility, Camera, Camera2d};
use bevy_color::{Color, ColorToComponents};
use bevy_core_pipeline::core_2d::graph::{Core2d, Node2d};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
change_detection::Tick,
prelude::*,
query::QueryItem,
system::{lifetimeless::SRes, SystemChangeTick, SystemParamItem},
};
use bevy_mesh::{Mesh2d, MeshVertexBufferLayoutRef};
use bevy_platform::{
collections::{HashMap, HashSet},
hash::FixedHasher,
};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::{
batching::gpu_preprocessing::GpuPreprocessingMode,
camera::ExtractedCamera,
diagnostic::RecordDiagnostics,
extract_resource::ExtractResource,
mesh::{
allocator::{MeshAllocator, SlabId},
RenderMesh,
},
prelude::*,
render_asset::{
prepare_assets, PrepareAssetError, RenderAsset, RenderAssetPlugin, RenderAssets,
},
render_graph::{NodeRunError, RenderGraphContext, RenderGraphExt, ViewNode, ViewNodeRunner},
render_phase::{
AddRenderCommand, BinnedPhaseItem, BinnedRenderPhasePlugin, BinnedRenderPhaseType,
CachedRenderPipelinePhaseItem, DrawFunctionId, DrawFunctions, InputUniformIndex, PhaseItem,
PhaseItemBatchSetKey, PhaseItemExtraIndex, RenderCommand, RenderCommandResult,
SetItemPipeline, TrackedRenderPass, ViewBinnedRenderPhases,
},
render_resource::*,
renderer::RenderContext,
sync_world::{MainEntity, MainEntityHashMap},
view::{
ExtractedView, RenderVisibleEntities, RetainedViewEntity, ViewDepthTexture, ViewTarget,
},
Extract, Render, RenderApp, RenderDebugFlags, RenderStartup, RenderSystems,
};
use bevy_shader::Shader;
use core::{hash::Hash, ops::Range};
use tracing::error;
/// A [`Plugin`] that draws wireframes for 2D meshes.
///
/// Wireframes currently do not work when using webgl or webgpu.
/// Supported rendering backends:
/// - DX12
/// - Vulkan
/// - Metal
///
/// This is a native only feature.
#[derive(Debug, Default)]
pub struct Wireframe2dPlugin {
/// Debugging flags that can optionally be set when constructing the renderer.
pub debug_flags: RenderDebugFlags,
}
impl Wireframe2dPlugin {
/// Creates a new [`Wireframe2dPlugin`] with the given debug flags.
pub fn new(debug_flags: RenderDebugFlags) -> Self {
Self { debug_flags }
}
}
impl Plugin for Wireframe2dPlugin {
fn build(&self, app: &mut App) {
embedded_asset!(app, "wireframe2d.wgsl");
app.add_plugins((
BinnedRenderPhasePlugin::<Wireframe2dPhaseItem, Mesh2dPipeline>::new(self.debug_flags),
RenderAssetPlugin::<RenderWireframeMaterial>::default(),
))
.init_asset::<Wireframe2dMaterial>()
.init_resource::<SpecializedMeshPipelines<Wireframe2dPipeline>>()
.init_resource::<Wireframe2dConfig>()
.init_resource::<WireframeEntitiesNeedingSpecialization>()
.add_systems(Startup, setup_global_wireframe_material)
.add_systems(
Update,
(
global_color_changed.run_if(resource_changed::<Wireframe2dConfig>),
wireframe_color_changed,
// Run `apply_global_wireframe_material` after `apply_wireframe_material` so that the global
// wireframe setting is applied to a mesh on the same frame its wireframe marker component is removed.
(apply_wireframe_material, apply_global_wireframe_material).chain(),
),
)
.add_systems(
PostUpdate,
check_wireframe_entities_needing_specialization
.after(AssetEventSystems)
.run_if(resource_exists::<Wireframe2dConfig>),
);
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.init_resource::<WireframeEntitySpecializationTicks>()
.init_resource::<SpecializedWireframePipelineCache>()
.init_resource::<DrawFunctions<Wireframe2dPhaseItem>>()
.add_render_command::<Wireframe2dPhaseItem, DrawWireframe2d>()
.init_resource::<RenderWireframeInstances>()
.init_resource::<SpecializedMeshPipelines<Wireframe2dPipeline>>()
.add_render_graph_node::<ViewNodeRunner<Wireframe2dNode>>(Core2d, Node2d::Wireframe)
.add_render_graph_edges(
Core2d,
(
Node2d::EndMainPass,
Node2d::Wireframe,
Node2d::PostProcessing,
),
)
.add_systems(
RenderStartup,
init_wireframe_2d_pipeline.after(init_mesh_2d_pipeline),
)
.add_systems(
ExtractSchedule,
(
extract_wireframe_2d_camera,
extract_wireframe_entities_needing_specialization,
extract_wireframe_materials,
),
)
.add_systems(
Render,
(
specialize_wireframes
.in_set(RenderSystems::PrepareMeshes)
.after(prepare_assets::<RenderWireframeMaterial>)
.after(prepare_assets::<RenderMesh>),
queue_wireframes
.in_set(RenderSystems::QueueMeshes)
.after(prepare_assets::<RenderWireframeMaterial>),
),
);
}
}
/// Enables wireframe rendering for any entity it is attached to.
/// It will ignore the [`Wireframe2dConfig`] global setting.
///
/// This requires the [`Wireframe2dPlugin`] to be enabled.
#[derive(Component, Debug, Clone, Default, Reflect, Eq, PartialEq)]
#[reflect(Component, Default, Debug, PartialEq)]
pub struct Wireframe2d;
pub struct Wireframe2dPhaseItem {
/// Determines which objects can be placed into a *batch set*.
///
/// Objects in a single batch set can potentially be multi-drawn together,
/// if it's enabled and the current platform supports it.
pub batch_set_key: Wireframe2dBatchSetKey,
/// The key, which determines which can be batched.
pub bin_key: Wireframe2dBinKey,
/// An entity from which data will be fetched, including the mesh if
/// applicable.
pub representative_entity: (Entity, MainEntity),
/// The ranges of instances.
pub batch_range: Range<u32>,
/// An extra index, which is either a dynamic offset or an index in the
/// indirect parameters list.
pub extra_index: PhaseItemExtraIndex,
}
impl PhaseItem for Wireframe2dPhaseItem {
fn entity(&self) -> Entity {
self.representative_entity.0
}
fn main_entity(&self) -> MainEntity {
self.representative_entity.1
}
fn draw_function(&self) -> DrawFunctionId {
self.batch_set_key.draw_function
}
fn batch_range(&self) -> &Range<u32> {
&self.batch_range
}
fn batch_range_mut(&mut self) -> &mut Range<u32> {
&mut self.batch_range
}
fn extra_index(&self) -> PhaseItemExtraIndex {
self.extra_index.clone()
}
fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range<u32>, &mut PhaseItemExtraIndex) {
(&mut self.batch_range, &mut self.extra_index)
}
}
impl CachedRenderPipelinePhaseItem for Wireframe2dPhaseItem {
fn cached_pipeline(&self) -> CachedRenderPipelineId {
self.batch_set_key.pipeline
}
}
impl BinnedPhaseItem for Wireframe2dPhaseItem {
type BinKey = Wireframe2dBinKey;
type BatchSetKey = Wireframe2dBatchSetKey;
fn new(
batch_set_key: Self::BatchSetKey,
bin_key: Self::BinKey,
representative_entity: (Entity, MainEntity),
batch_range: Range<u32>,
extra_index: PhaseItemExtraIndex,
) -> Self {
Self {
batch_set_key,
bin_key,
representative_entity,
batch_range,
extra_index,
}
}
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Wireframe2dBatchSetKey {
/// The identifier of the render pipeline.
pub pipeline: CachedRenderPipelineId,
/// The wireframe material asset ID.
pub asset_id: UntypedAssetId,
/// The function used to draw.
pub draw_function: DrawFunctionId,
/// The ID of the slab of GPU memory that contains vertex data.
///
/// For non-mesh items, you can fill this with 0 if your items can be
/// multi-drawn, or with a unique value if they can't.
pub vertex_slab: SlabId,
/// The ID of the slab of GPU memory that contains index data, if present.
///
/// For non-mesh items, you can safely fill this with `None`.
pub index_slab: Option<SlabId>,
}
impl PhaseItemBatchSetKey for Wireframe2dBatchSetKey {
fn indexed(&self) -> bool {
self.index_slab.is_some()
}
}
/// Data that must be identical in order to *batch* phase items together.
///
/// Note that a *batch set* (if multi-draw is in use) contains multiple batches.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Wireframe2dBinKey {
/// The wireframe mesh asset ID.
pub asset_id: UntypedAssetId,
}
pub struct SetWireframe2dPushConstants;
impl<P: PhaseItem> RenderCommand<P> for SetWireframe2dPushConstants {
type Param = (
SRes<RenderWireframeInstances>,
SRes<RenderAssets<RenderWireframeMaterial>>,
);
type ViewQuery = ();
type ItemQuery = ();
#[inline]
fn render<'w>(
item: &P,
_view: (),
_item_query: Option<()>,
(wireframe_instances, wireframe_assets): SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let Some(wireframe_material) = wireframe_instances.get(&item.main_entity()) else {
return RenderCommandResult::Failure("No wireframe material found for entity");
};
let Some(wireframe_material) = wireframe_assets.get(*wireframe_material) else {
return RenderCommandResult::Failure("No wireframe material found for entity");
};
pass.set_push_constants(
ShaderStages::FRAGMENT,
0,
bytemuck::bytes_of(&wireframe_material.color),
);
RenderCommandResult::Success
}
}
pub type DrawWireframe2d = (
SetItemPipeline,
SetMesh2dViewBindGroup<0>,
SetMesh2dBindGroup<1>,
SetWireframe2dPushConstants,
DrawMesh2d,
);
#[derive(Resource, Clone)]
pub struct Wireframe2dPipeline {
mesh_pipeline: Mesh2dPipeline,
shader: Handle<Shader>,
}
pub fn init_wireframe_2d_pipeline(
mut commands: Commands,
mesh_2d_pipeline: Res<Mesh2dPipeline>,
asset_server: Res<AssetServer>,
) {
commands.insert_resource(Wireframe2dPipeline {
mesh_pipeline: mesh_2d_pipeline.clone(),
shader: load_embedded_asset!(asset_server.as_ref(), "wireframe2d.wgsl"),
});
}
impl SpecializedMeshPipeline for Wireframe2dPipeline {
type Key = Mesh2dPipelineKey;
fn specialize(
&self,
key: Self::Key,
layout: &MeshVertexBufferLayoutRef,
) -> Result<RenderPipelineDescriptor, SpecializedMeshPipelineError> {
let mut descriptor = self.mesh_pipeline.specialize(key, layout)?;
descriptor.label = Some("wireframe_2d_pipeline".into());
descriptor.push_constant_ranges.push(PushConstantRange {
stages: ShaderStages::FRAGMENT,
range: 0..16,
});
let fragment = descriptor.fragment.as_mut().unwrap();
fragment.shader = self.shader.clone();
descriptor.primitive.polygon_mode = PolygonMode::Line;
descriptor.depth_stencil.as_mut().unwrap().bias.slope_scale = 1.0;
Ok(descriptor)
}
}
#[derive(Default)]
struct Wireframe2dNode;
impl ViewNode for Wireframe2dNode {
type ViewQuery = (
&'static ExtractedCamera,
&'static ExtractedView,
&'static ViewTarget,
&'static ViewDepthTexture,
);
fn run<'w>(
&self,
graph: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
(camera, view, target, depth): QueryItem<'w, '_, Self::ViewQuery>,
world: &'w World,
) -> Result<(), NodeRunError> {
let Some(wireframe_phase) =
world.get_resource::<ViewBinnedRenderPhases<Wireframe2dPhaseItem>>()
else {
return Ok(());
};
let Some(wireframe_phase) = wireframe_phase.get(&view.retained_view_entity) else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("wireframe_2d"),
color_attachments: &[Some(target.get_color_attachment())],
depth_stencil_attachment: Some(depth.get_attachment(StoreOp::Store)),
timestamp_writes: None,
occlusion_query_set: None,
});
let pass_span = diagnostics.pass_span(&mut render_pass, "wireframe_2d");
if let Some(viewport) = camera.viewport.as_ref() {
render_pass.set_camera_viewport(viewport);
}
if let Err(err) = wireframe_phase.render(&mut render_pass, world, graph.view_entity()) {
error!("Error encountered while rendering the stencil phase {err:?}");
return Err(NodeRunError::DrawError(err));
}
pass_span.end(&mut render_pass);
Ok(())
}
}
/// Sets the color of the [`Wireframe2d`] of the entity it is attached to.
///
/// If this component is present but there's no [`Wireframe2d`] component,
/// it will still affect the color of the wireframe when [`Wireframe2dConfig::global`] is set to true.
///
/// This overrides the [`Wireframe2dConfig::default_color`].
#[derive(Component, Debug, Clone, Default, Reflect)]
#[reflect(Component, Default, Debug)]
pub struct Wireframe2dColor {
pub color: Color,
}
#[derive(Component, Debug, Clone, Default)]
pub struct ExtractedWireframeColor {
pub color: [f32; 4],
}
/// Disables wireframe rendering for any entity it is attached to.
/// It will ignore the [`Wireframe2dConfig`] global setting.
///
/// This requires the [`Wireframe2dPlugin`] to be enabled.
#[derive(Component, Debug, Clone, Default, Reflect, Eq, PartialEq)]
#[reflect(Component, Default, Debug, PartialEq)]
pub struct NoWireframe2d;
#[derive(Resource, Debug, Clone, Default, ExtractResource, Reflect)]
#[reflect(Resource, Debug, Default)]
pub struct Wireframe2dConfig {
/// Whether to show wireframes for all meshes.
/// Can be overridden for individual meshes by adding a [`Wireframe2d`] or [`NoWireframe2d`] component.
pub global: bool,
/// If [`Self::global`] is set, any [`Entity`] that does not have a [`Wireframe2d`] component attached to it will have
/// wireframes using this color. Otherwise, this will be the fallback color for any entity that has a [`Wireframe2d`],
/// but no [`Wireframe2dColor`].
pub default_color: Color,
}
#[derive(Asset, Reflect, Clone, Debug, Default)]
#[reflect(Clone, Default)]
pub struct Wireframe2dMaterial {
pub color: Color,
}
pub struct RenderWireframeMaterial {
pub color: [f32; 4],
}
#[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq)]
#[reflect(Component, Default, Clone, PartialEq)]
pub struct Mesh2dWireframe(pub Handle<Wireframe2dMaterial>);
impl AsAssetId for Mesh2dWireframe {
type Asset = Wireframe2dMaterial;
fn as_asset_id(&self) -> AssetId<Self::Asset> {
self.0.id()
}
}
impl RenderAsset for RenderWireframeMaterial {
type SourceAsset = Wireframe2dMaterial;
type Param = ();
fn prepare_asset(
source_asset: Self::SourceAsset,
_asset_id: AssetId<Self::SourceAsset>,
_param: &mut SystemParamItem<Self::Param>,
_previous_asset: Option<&Self>,
) -> Result<Self, PrepareAssetError<Self::SourceAsset>> {
Ok(RenderWireframeMaterial {
color: source_asset.color.to_linear().to_f32_array(),
})
}
}
#[derive(Resource, Deref, DerefMut, Default)]
pub struct RenderWireframeInstances(MainEntityHashMap<AssetId<Wireframe2dMaterial>>);
#[derive(Clone, Resource, Deref, DerefMut, Debug, Default)]
pub struct WireframeEntitiesNeedingSpecialization {
#[deref]
pub entities: Vec<Entity>,
}
#[derive(Resource, Deref, DerefMut, Clone, Debug, Default)]
pub struct WireframeEntitySpecializationTicks {
pub entities: MainEntityHashMap<Tick>,
}
/// Stores the [`SpecializedWireframeViewPipelineCache`] for each view.
#[derive(Resource, Deref, DerefMut, Default)]
pub struct SpecializedWireframePipelineCache {
// view entity -> view pipeline cache
#[deref]
map: HashMap<RetainedViewEntity, SpecializedWireframeViewPipelineCache>,
}
/// Stores the cached render pipeline ID for each entity in a single view, as
/// well as the last time it was changed.
#[derive(Deref, DerefMut, Default)]
pub struct SpecializedWireframeViewPipelineCache {
// material entity -> (tick, pipeline_id)
#[deref]
map: MainEntityHashMap<(Tick, CachedRenderPipelineId)>,
}
#[derive(Resource)]
struct GlobalWireframeMaterial {
// This handle will be reused when the global config is enabled
handle: Handle<Wireframe2dMaterial>,
}
pub fn extract_wireframe_materials(
mut material_instances: ResMut<RenderWireframeInstances>,
changed_meshes_query: Extract<
Query<
(Entity, &ViewVisibility, &Mesh2dWireframe),
Or<(Changed<ViewVisibility>, Changed<Mesh2dWireframe>)>,
>,
>,
mut removed_visibilities_query: Extract<RemovedComponents<ViewVisibility>>,
mut removed_materials_query: Extract<RemovedComponents<Mesh2dWireframe>>,
) {
for (entity, view_visibility, material) in &changed_meshes_query {
if view_visibility.get() {
material_instances.insert(entity.into(), material.id());
} else {
material_instances.remove(&MainEntity::from(entity));
}
}
for entity in removed_visibilities_query
.read()
.chain(removed_materials_query.read())
{
// Only queue a mesh for removal if we didn't pick it up above.
// It's possible that a necessary component was removed and re-added in
// the same frame.
if !changed_meshes_query.contains(entity) {
material_instances.remove(&MainEntity::from(entity));
}
}
}
fn setup_global_wireframe_material(
mut commands: Commands,
mut materials: ResMut<Assets<Wireframe2dMaterial>>,
config: Res<Wireframe2dConfig>,
) {
// Create the handle used for the global material
commands.insert_resource(GlobalWireframeMaterial {
handle: materials.add(Wireframe2dMaterial {
color: config.default_color,
}),
});
}
/// Updates the wireframe material of all entities without a [`Wireframe2dColor`] or without a [`Wireframe2d`] component
fn global_color_changed(
config: Res<Wireframe2dConfig>,
mut materials: ResMut<Assets<Wireframe2dMaterial>>,
global_material: Res<GlobalWireframeMaterial>,
) {
if let Some(global_material) = materials.get_mut(&global_material.handle) {
global_material.color = config.default_color;
}
}
/// Updates the wireframe material when the color in [`Wireframe2dColor`] changes
fn wireframe_color_changed(
mut materials: ResMut<Assets<Wireframe2dMaterial>>,
mut colors_changed: Query<
(&mut Mesh2dWireframe, &Wireframe2dColor),
(With<Wireframe2d>, Changed<Wireframe2dColor>),
>,
) {
for (mut handle, wireframe_color) in &mut colors_changed {
handle.0 = materials.add(Wireframe2dMaterial {
color: wireframe_color.color,
});
}
}
/// Applies or remove the wireframe material to any mesh with a [`Wireframe2d`] component, and removes it
/// for any mesh with a [`NoWireframe2d`] component.
fn apply_wireframe_material(
mut commands: Commands,
mut materials: ResMut<Assets<Wireframe2dMaterial>>,
wireframes: Query<
(Entity, Option<&Wireframe2dColor>),
(With<Wireframe2d>, Without<Mesh2dWireframe>),
>,
no_wireframes: Query<Entity, (With<NoWireframe2d>, With<Mesh2dWireframe>)>,
mut removed_wireframes: RemovedComponents<Wireframe2d>,
global_material: Res<GlobalWireframeMaterial>,
) {
for e in removed_wireframes.read().chain(no_wireframes.iter()) {
if let Ok(mut commands) = commands.get_entity(e) {
commands.remove::<Mesh2dWireframe>();
}
}
let mut material_to_spawn = vec![];
for (e, maybe_color) in &wireframes {
let material = get_wireframe_material(maybe_color, &mut materials, &global_material);
material_to_spawn.push((e, Mesh2dWireframe(material)));
}
commands.try_insert_batch(material_to_spawn);
}
type WireframeFilter = (With<Mesh2d>, Without<Wireframe2d>, Without<NoWireframe2d>);
/// Applies or removes a wireframe material on any mesh without a [`Wireframe2d`] or [`NoWireframe2d`] component.
fn apply_global_wireframe_material(
mut commands: Commands,
config: Res<Wireframe2dConfig>,
meshes_without_material: Query<
(Entity, Option<&Wireframe2dColor>),
(WireframeFilter, Without<Mesh2dWireframe>),
>,
meshes_with_global_material: Query<Entity, (WireframeFilter, With<Mesh2dWireframe>)>,
global_material: Res<GlobalWireframeMaterial>,
mut materials: ResMut<Assets<Wireframe2dMaterial>>,
) {
if config.global {
let mut material_to_spawn = vec![];
for (e, maybe_color) in &meshes_without_material {
let material = get_wireframe_material(maybe_color, &mut materials, &global_material);
// We only add the material handle but not the Wireframe component
// This makes it easy to detect which mesh is using the global material and which ones are user specified
material_to_spawn.push((e, Mesh2dWireframe(material)));
}
commands.try_insert_batch(material_to_spawn);
} else {
for e in &meshes_with_global_material {
commands.entity(e).remove::<Mesh2dWireframe>();
}
}
}
/// Gets a handle to a wireframe material with a fallback on the default material
fn get_wireframe_material(
maybe_color: Option<&Wireframe2dColor>,
wireframe_materials: &mut Assets<Wireframe2dMaterial>,
global_material: &GlobalWireframeMaterial,
) -> Handle<Wireframe2dMaterial> {
if let Some(wireframe_color) = maybe_color {
wireframe_materials.add(Wireframe2dMaterial {
color: wireframe_color.color,
})
} else {
// If there's no color specified we can use the global material since it's already set to use the default_color
global_material.handle.clone()
}
}
fn extract_wireframe_2d_camera(
mut wireframe_2d_phases: ResMut<ViewBinnedRenderPhases<Wireframe2dPhaseItem>>,
cameras: Extract<Query<(Entity, &Camera), With<Camera2d>>>,
mut live_entities: Local<HashSet<RetainedViewEntity>>,
) {
live_entities.clear();
for (main_entity, camera) in &cameras {
if !camera.is_active {
continue;
}
let retained_view_entity = RetainedViewEntity::new(main_entity.into(), None, 0);
wireframe_2d_phases.prepare_for_new_frame(retained_view_entity, GpuPreprocessingMode::None);
live_entities.insert(retained_view_entity);
}
// Clear out all dead views.
wireframe_2d_phases.retain(|camera_entity, _| live_entities.contains(camera_entity));
}
pub fn extract_wireframe_entities_needing_specialization(
entities_needing_specialization: Extract<Res<WireframeEntitiesNeedingSpecialization>>,
mut entity_specialization_ticks: ResMut<WireframeEntitySpecializationTicks>,
views: Query<&ExtractedView>,
mut specialized_wireframe_pipeline_cache: ResMut<SpecializedWireframePipelineCache>,
mut removed_meshes_query: Extract<RemovedComponents<Mesh2d>>,
ticks: SystemChangeTick,
) {
for entity in entities_needing_specialization.iter() {
// Update the entity's specialization tick with this run's tick
entity_specialization_ticks.insert((*entity).into(), ticks.this_run());
}
for entity in removed_meshes_query.read() {
for view in &views {
if let Some(specialized_wireframe_pipeline_cache) =
specialized_wireframe_pipeline_cache.get_mut(&view.retained_view_entity)
{
specialized_wireframe_pipeline_cache.remove(&MainEntity::from(entity));
}
}
}
}
pub fn check_wireframe_entities_needing_specialization(
needs_specialization: Query<
Entity,
Or<(
Changed<Mesh2d>,
AssetChanged<Mesh2d>,
Changed<Mesh2dWireframe>,
AssetChanged<Mesh2dWireframe>,
)>,
>,
mut entities_needing_specialization: ResMut<WireframeEntitiesNeedingSpecialization>,
) {
entities_needing_specialization.clear();
for entity in &needs_specialization {
entities_needing_specialization.push(entity);
}
}
pub fn specialize_wireframes(
render_meshes: Res<RenderAssets<RenderMesh>>,
render_mesh_instances: Res<RenderMesh2dInstances>,
render_wireframe_instances: Res<RenderWireframeInstances>,
wireframe_phases: Res<ViewBinnedRenderPhases<Wireframe2dPhaseItem>>,
views: Query<(&ExtractedView, &RenderVisibleEntities)>,
view_key_cache: Res<ViewKeyCache>,
entity_specialization_ticks: Res<WireframeEntitySpecializationTicks>,
view_specialization_ticks: Res<ViewSpecializationTicks>,
mut specialized_material_pipeline_cache: ResMut<SpecializedWireframePipelineCache>,
mut pipelines: ResMut<SpecializedMeshPipelines<Wireframe2dPipeline>>,
pipeline: Res<Wireframe2dPipeline>,
pipeline_cache: Res<PipelineCache>,
ticks: SystemChangeTick,
) {
// Record the retained IDs of all views so that we can expire old
// pipeline IDs.
let mut all_views: HashSet<RetainedViewEntity, FixedHasher> = HashSet::default();
for (view, visible_entities) in &views {
all_views.insert(view.retained_view_entity);
if !wireframe_phases.contains_key(&view.retained_view_entity) {
continue;
}
let Some(view_key) = view_key_cache.get(&view.retained_view_entity.main_entity) else {
continue;
};
let view_tick = view_specialization_ticks
.get(&view.retained_view_entity.main_entity)
.unwrap();
let view_specialized_material_pipeline_cache = specialized_material_pipeline_cache
.entry(view.retained_view_entity)
.or_default();
for (_, visible_entity) in visible_entities.iter::<Mesh2d>() {
if !render_wireframe_instances.contains_key(visible_entity) {
continue;
};
let Some(mesh_instance) = render_mesh_instances.get(visible_entity) else {
continue;
};
let entity_tick = entity_specialization_ticks.get(visible_entity).unwrap();
let last_specialized_tick = view_specialized_material_pipeline_cache
.get(visible_entity)
.map(|(tick, _)| *tick);
let needs_specialization = last_specialized_tick.is_none_or(|tick| {
view_tick.is_newer_than(tick, ticks.this_run())
|| entity_tick.is_newer_than(tick, ticks.this_run())
});
if !needs_specialization {
continue;
}
let Some(mesh) = render_meshes.get(mesh_instance.mesh_asset_id) else {
continue;
};
let mut mesh_key = *view_key;
mesh_key |= Mesh2dPipelineKey::from_primitive_topology(mesh.primitive_topology());
let pipeline_id =
pipelines.specialize(&pipeline_cache, &pipeline, mesh_key, &mesh.layout);
let pipeline_id = match pipeline_id {
Ok(id) => id,
Err(err) => {
error!("{}", err);
continue;
}
};
view_specialized_material_pipeline_cache
.insert(*visible_entity, (ticks.this_run(), pipeline_id));
}
}
// Delete specialized pipelines belonging to views that have expired.
specialized_material_pipeline_cache
.retain(|retained_view_entity, _| all_views.contains(retained_view_entity));
}
fn queue_wireframes(
custom_draw_functions: Res<DrawFunctions<Wireframe2dPhaseItem>>,
render_mesh_instances: Res<RenderMesh2dInstances>,
mesh_allocator: Res<MeshAllocator>,
specialized_wireframe_pipeline_cache: Res<SpecializedWireframePipelineCache>,
render_wireframe_instances: Res<RenderWireframeInstances>,
mut wireframe_2d_phases: ResMut<ViewBinnedRenderPhases<Wireframe2dPhaseItem>>,
mut views: Query<(&ExtractedView, &RenderVisibleEntities)>,
) {
for (view, visible_entities) in &mut views {
let Some(wireframe_phase) = wireframe_2d_phases.get_mut(&view.retained_view_entity) else {
continue;
};
let draw_wireframe = custom_draw_functions.read().id::<DrawWireframe2d>();
let Some(view_specialized_material_pipeline_cache) =
specialized_wireframe_pipeline_cache.get(&view.retained_view_entity)
else {
continue;
};
for (render_entity, visible_entity) in visible_entities.iter::<Mesh2d>() {
let Some(wireframe_instance) = render_wireframe_instances.get(visible_entity) else {
continue;
};
let Some((current_change_tick, pipeline_id)) = view_specialized_material_pipeline_cache
.get(visible_entity)
.map(|(current_change_tick, pipeline_id)| (*current_change_tick, *pipeline_id))
else {
continue;
};
// Skip the entity if it's cached in a bin and up to date.
if wireframe_phase.validate_cached_entity(*visible_entity, current_change_tick) {
continue;
}
let Some(mesh_instance) = render_mesh_instances.get(visible_entity) else {
continue;
};
let (vertex_slab, index_slab) = mesh_allocator.mesh_slabs(&mesh_instance.mesh_asset_id);
let bin_key = Wireframe2dBinKey {
asset_id: mesh_instance.mesh_asset_id.untyped(),
};
let batch_set_key = Wireframe2dBatchSetKey {
pipeline: pipeline_id,
asset_id: wireframe_instance.untyped(),
draw_function: draw_wireframe,
vertex_slab: vertex_slab.unwrap_or_default(),
index_slab,
};
wireframe_phase.add(
batch_set_key,
bin_key,
(*render_entity, *visible_entity),
InputUniformIndex::default(),
if mesh_instance.automatic_batching {
BinnedRenderPhaseType::BatchableMesh
} else {
BinnedRenderPhaseType::UnbatchableMesh
},
current_change_tick,
);
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_sprite_render/src/render/mod.rs | crates/bevy_sprite_render/src/render/mod.rs | use core::ops::Range;
use crate::ComputedTextureSlices;
use bevy_asset::{load_embedded_asset, AssetEvent, AssetId, AssetServer, Assets, Handle};
use bevy_camera::visibility::ViewVisibility;
use bevy_color::{ColorToComponents, LinearRgba};
use bevy_core_pipeline::{
core_2d::{Transparent2d, CORE_2D_DEPTH_FORMAT},
tonemapping::{
get_lut_bind_group_layout_entries, get_lut_bindings, DebandDither, Tonemapping,
TonemappingLuts,
},
};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
prelude::*,
query::ROQueryItem,
system::{lifetimeless::*, SystemParamItem},
};
use bevy_image::{BevyDefault, Image, TextureAtlasLayout};
use bevy_math::{Affine3A, FloatOrd, Quat, Rect, Vec2, Vec4};
use bevy_mesh::VertexBufferLayout;
use bevy_platform::collections::HashMap;
use bevy_render::view::{RenderVisibleEntities, RetainedViewEntity};
use bevy_render::{
render_asset::RenderAssets,
render_phase::{
DrawFunctions, PhaseItem, PhaseItemExtraIndex, RenderCommand, RenderCommandResult,
SetItemPipeline, TrackedRenderPass, ViewSortedRenderPhases,
},
render_resource::{
binding_types::{sampler, texture_2d, uniform_buffer},
*,
},
renderer::{RenderDevice, RenderQueue},
sync_world::RenderEntity,
texture::{FallbackImage, GpuImage},
view::{ExtractedView, Msaa, ViewTarget, ViewUniform, ViewUniformOffset, ViewUniforms},
Extract,
};
use bevy_shader::{Shader, ShaderDefVal};
use bevy_sprite::{Anchor, Sprite, SpriteScalingMode};
use bevy_transform::components::GlobalTransform;
use bevy_utils::default;
use bytemuck::{Pod, Zeroable};
use fixedbitset::FixedBitSet;
#[derive(Resource)]
pub struct SpritePipeline {
view_layout: BindGroupLayoutDescriptor,
material_layout: BindGroupLayoutDescriptor,
shader: Handle<Shader>,
}
pub fn init_sprite_pipeline(mut commands: Commands, asset_server: Res<AssetServer>) {
let tonemapping_lut_entries = get_lut_bind_group_layout_entries();
let view_layout = BindGroupLayoutDescriptor::new(
"sprite_view_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::VERTEX_FRAGMENT,
(
uniform_buffer::<ViewUniform>(true),
tonemapping_lut_entries[0].visibility(ShaderStages::FRAGMENT),
tonemapping_lut_entries[1].visibility(ShaderStages::FRAGMENT),
),
),
);
let material_layout = BindGroupLayoutDescriptor::new(
"sprite_material_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::FRAGMENT,
(
texture_2d(TextureSampleType::Float { filterable: true }),
sampler(SamplerBindingType::Filtering),
),
),
);
commands.insert_resource(SpritePipeline {
view_layout,
material_layout,
shader: load_embedded_asset!(asset_server.as_ref(), "sprite.wgsl"),
});
}
bitflags::bitflags! {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[repr(transparent)]
// NOTE: Apparently quadro drivers support up to 64x MSAA.
// MSAA uses the highest 3 bits for the MSAA log2(sample count) to support up to 128x MSAA.
pub struct SpritePipelineKey: u32 {
const NONE = 0;
const HDR = 1 << 0;
const TONEMAP_IN_SHADER = 1 << 1;
const DEBAND_DITHER = 1 << 2;
const MSAA_RESERVED_BITS = Self::MSAA_MASK_BITS << Self::MSAA_SHIFT_BITS;
const TONEMAP_METHOD_RESERVED_BITS = Self::TONEMAP_METHOD_MASK_BITS << Self::TONEMAP_METHOD_SHIFT_BITS;
const TONEMAP_METHOD_NONE = 0 << Self::TONEMAP_METHOD_SHIFT_BITS;
const TONEMAP_METHOD_REINHARD = 1 << Self::TONEMAP_METHOD_SHIFT_BITS;
const TONEMAP_METHOD_REINHARD_LUMINANCE = 2 << Self::TONEMAP_METHOD_SHIFT_BITS;
const TONEMAP_METHOD_ACES_FITTED = 3 << Self::TONEMAP_METHOD_SHIFT_BITS;
const TONEMAP_METHOD_AGX = 4 << Self::TONEMAP_METHOD_SHIFT_BITS;
const TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM = 5 << Self::TONEMAP_METHOD_SHIFT_BITS;
const TONEMAP_METHOD_TONY_MC_MAPFACE = 6 << Self::TONEMAP_METHOD_SHIFT_BITS;
const TONEMAP_METHOD_BLENDER_FILMIC = 7 << Self::TONEMAP_METHOD_SHIFT_BITS;
}
}
impl SpritePipelineKey {
const MSAA_MASK_BITS: u32 = 0b111;
const MSAA_SHIFT_BITS: u32 = 32 - Self::MSAA_MASK_BITS.count_ones();
const TONEMAP_METHOD_MASK_BITS: u32 = 0b111;
const TONEMAP_METHOD_SHIFT_BITS: u32 =
Self::MSAA_SHIFT_BITS - Self::TONEMAP_METHOD_MASK_BITS.count_ones();
#[inline]
pub const fn from_msaa_samples(msaa_samples: u32) -> Self {
let msaa_bits =
(msaa_samples.trailing_zeros() & Self::MSAA_MASK_BITS) << Self::MSAA_SHIFT_BITS;
Self::from_bits_retain(msaa_bits)
}
#[inline]
pub const fn msaa_samples(&self) -> u32 {
1 << ((self.bits() >> Self::MSAA_SHIFT_BITS) & Self::MSAA_MASK_BITS)
}
#[inline]
pub const fn from_hdr(hdr: bool) -> Self {
if hdr {
SpritePipelineKey::HDR
} else {
SpritePipelineKey::NONE
}
}
}
impl SpecializedRenderPipeline for SpritePipeline {
type Key = SpritePipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let mut shader_defs = Vec::new();
if key.contains(SpritePipelineKey::TONEMAP_IN_SHADER) {
shader_defs.push("TONEMAP_IN_SHADER".into());
shader_defs.push(ShaderDefVal::UInt(
"TONEMAPPING_LUT_TEXTURE_BINDING_INDEX".into(),
1,
));
shader_defs.push(ShaderDefVal::UInt(
"TONEMAPPING_LUT_SAMPLER_BINDING_INDEX".into(),
2,
));
let method = key.intersection(SpritePipelineKey::TONEMAP_METHOD_RESERVED_BITS);
if method == SpritePipelineKey::TONEMAP_METHOD_NONE {
shader_defs.push("TONEMAP_METHOD_NONE".into());
} else if method == SpritePipelineKey::TONEMAP_METHOD_REINHARD {
shader_defs.push("TONEMAP_METHOD_REINHARD".into());
} else if method == SpritePipelineKey::TONEMAP_METHOD_REINHARD_LUMINANCE {
shader_defs.push("TONEMAP_METHOD_REINHARD_LUMINANCE".into());
} else if method == SpritePipelineKey::TONEMAP_METHOD_ACES_FITTED {
shader_defs.push("TONEMAP_METHOD_ACES_FITTED".into());
} else if method == SpritePipelineKey::TONEMAP_METHOD_AGX {
shader_defs.push("TONEMAP_METHOD_AGX".into());
} else if method == SpritePipelineKey::TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM
{
shader_defs.push("TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM".into());
} else if method == SpritePipelineKey::TONEMAP_METHOD_BLENDER_FILMIC {
shader_defs.push("TONEMAP_METHOD_BLENDER_FILMIC".into());
} else if method == SpritePipelineKey::TONEMAP_METHOD_TONY_MC_MAPFACE {
shader_defs.push("TONEMAP_METHOD_TONY_MC_MAPFACE".into());
}
// Debanding is tied to tonemapping in the shader, cannot run without it.
if key.contains(SpritePipelineKey::DEBAND_DITHER) {
shader_defs.push("DEBAND_DITHER".into());
}
}
let format = match key.contains(SpritePipelineKey::HDR) {
true => ViewTarget::TEXTURE_FORMAT_HDR,
false => TextureFormat::bevy_default(),
};
let instance_rate_vertex_buffer_layout = VertexBufferLayout {
array_stride: 80,
step_mode: VertexStepMode::Instance,
attributes: vec![
// @location(0) i_model_transpose_col0: vec4<f32>,
VertexAttribute {
format: VertexFormat::Float32x4,
offset: 0,
shader_location: 0,
},
// @location(1) i_model_transpose_col1: vec4<f32>,
VertexAttribute {
format: VertexFormat::Float32x4,
offset: 16,
shader_location: 1,
},
// @location(2) i_model_transpose_col2: vec4<f32>,
VertexAttribute {
format: VertexFormat::Float32x4,
offset: 32,
shader_location: 2,
},
// @location(3) i_color: vec4<f32>,
VertexAttribute {
format: VertexFormat::Float32x4,
offset: 48,
shader_location: 3,
},
// @location(4) i_uv_offset_scale: vec4<f32>,
VertexAttribute {
format: VertexFormat::Float32x4,
offset: 64,
shader_location: 4,
},
],
};
RenderPipelineDescriptor {
vertex: VertexState {
shader: self.shader.clone(),
shader_defs: shader_defs.clone(),
buffers: vec![instance_rate_vertex_buffer_layout],
..default()
},
fragment: Some(FragmentState {
shader: self.shader.clone(),
shader_defs,
targets: vec![Some(ColorTargetState {
format,
blend: Some(BlendState::ALPHA_BLENDING),
write_mask: ColorWrites::ALL,
})],
..default()
}),
layout: vec![self.view_layout.clone(), self.material_layout.clone()],
// Sprites are always alpha blended so they never need to write to depth.
// They just need to read it in case an opaque mesh2d
// that wrote to depth is present.
depth_stencil: Some(DepthStencilState {
format: CORE_2D_DEPTH_FORMAT,
depth_write_enabled: false,
depth_compare: CompareFunction::GreaterEqual,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
}),
multisample: MultisampleState {
count: key.msaa_samples(),
mask: !0,
alpha_to_coverage_enabled: false,
},
label: Some("sprite_pipeline".into()),
..default()
}
}
}
pub struct ExtractedSlice {
pub offset: Vec2,
pub rect: Rect,
pub size: Vec2,
}
pub struct ExtractedSprite {
pub main_entity: Entity,
pub render_entity: Entity,
pub transform: GlobalTransform,
pub color: LinearRgba,
/// Change the on-screen size of the sprite
/// Asset ID of the [`Image`] of this sprite
/// PERF: storing an `AssetId` instead of `Handle<Image>` enables some optimizations (`ExtractedSprite` becomes `Copy` and doesn't need to be dropped)
pub image_handle_id: AssetId<Image>,
pub flip_x: bool,
pub flip_y: bool,
pub kind: ExtractedSpriteKind,
}
pub enum ExtractedSpriteKind {
/// A single sprite with custom sizing and scaling options
Single {
anchor: Vec2,
rect: Option<Rect>,
scaling_mode: Option<SpriteScalingMode>,
custom_size: Option<Vec2>,
},
/// Indexes into the list of [`ExtractedSlice`]s stored in the [`ExtractedSlices`] resource
/// Used for elements composed from multiple sprites such as text or nine-patched borders
Slices { indices: Range<usize> },
}
#[derive(Resource, Default)]
pub struct ExtractedSprites {
pub sprites: Vec<ExtractedSprite>,
}
#[derive(Resource, Default)]
pub struct ExtractedSlices {
pub slices: Vec<ExtractedSlice>,
}
#[derive(Resource, Default)]
pub struct SpriteAssetEvents {
pub images: Vec<AssetEvent<Image>>,
}
pub fn extract_sprite_events(
mut events: ResMut<SpriteAssetEvents>,
mut image_events: Extract<MessageReader<AssetEvent<Image>>>,
) {
let SpriteAssetEvents { ref mut images } = *events;
images.clear();
for event in image_events.read() {
images.push(*event);
}
}
pub fn extract_sprites(
mut extracted_sprites: ResMut<ExtractedSprites>,
mut extracted_slices: ResMut<ExtractedSlices>,
texture_atlases: Extract<Res<Assets<TextureAtlasLayout>>>,
sprite_query: Extract<
Query<(
Entity,
RenderEntity,
&ViewVisibility,
&Sprite,
&GlobalTransform,
&Anchor,
Option<&ComputedTextureSlices>,
)>,
>,
) {
extracted_sprites.sprites.clear();
extracted_slices.slices.clear();
for (main_entity, render_entity, view_visibility, sprite, transform, anchor, slices) in
sprite_query.iter()
{
if !view_visibility.get() {
continue;
}
if let Some(slices) = slices {
let start = extracted_slices.slices.len();
extracted_slices
.slices
.extend(slices.extract_slices(sprite, anchor.as_vec()));
let end = extracted_slices.slices.len();
extracted_sprites.sprites.push(ExtractedSprite {
main_entity,
render_entity,
color: sprite.color.into(),
transform: *transform,
flip_x: sprite.flip_x,
flip_y: sprite.flip_y,
image_handle_id: sprite.image.id(),
kind: ExtractedSpriteKind::Slices {
indices: start..end,
},
});
} else {
let atlas_rect = sprite
.texture_atlas
.as_ref()
.and_then(|s| s.texture_rect(&texture_atlases).map(|r| r.as_rect()));
let rect = match (atlas_rect, sprite.rect) {
(None, None) => None,
(None, Some(sprite_rect)) => Some(sprite_rect),
(Some(atlas_rect), None) => Some(atlas_rect),
(Some(atlas_rect), Some(mut sprite_rect)) => {
sprite_rect.min += atlas_rect.min;
sprite_rect.max += atlas_rect.min;
Some(sprite_rect)
}
};
// PERF: we don't check in this function that the `Image` asset is ready, since it should be in most cases and hashing the handle is expensive
extracted_sprites.sprites.push(ExtractedSprite {
main_entity,
render_entity,
color: sprite.color.into(),
transform: *transform,
flip_x: sprite.flip_x,
flip_y: sprite.flip_y,
image_handle_id: sprite.image.id(),
kind: ExtractedSpriteKind::Single {
anchor: anchor.as_vec(),
rect,
scaling_mode: sprite.image_mode.scale(),
// Pass the custom size
custom_size: sprite.custom_size,
},
});
}
}
}
#[repr(C)]
#[derive(Copy, Clone, Pod, Zeroable)]
struct SpriteInstance {
// Affine 4x3 transposed to 3x4
pub i_model_transpose: [Vec4; 3],
pub i_color: [f32; 4],
pub i_uv_offset_scale: [f32; 4],
}
impl SpriteInstance {
#[inline]
fn from(transform: &Affine3A, color: &LinearRgba, uv_offset_scale: &Vec4) -> Self {
let transpose_model_3x3 = transform.matrix3.transpose();
Self {
i_model_transpose: [
transpose_model_3x3.x_axis.extend(transform.translation.x),
transpose_model_3x3.y_axis.extend(transform.translation.y),
transpose_model_3x3.z_axis.extend(transform.translation.z),
],
i_color: color.to_f32_array(),
i_uv_offset_scale: uv_offset_scale.to_array(),
}
}
}
#[derive(Resource)]
pub struct SpriteMeta {
sprite_index_buffer: RawBufferVec<u32>,
sprite_instance_buffer: RawBufferVec<SpriteInstance>,
}
impl Default for SpriteMeta {
fn default() -> Self {
Self {
sprite_index_buffer: RawBufferVec::<u32>::new(BufferUsages::INDEX),
sprite_instance_buffer: RawBufferVec::<SpriteInstance>::new(BufferUsages::VERTEX),
}
}
}
#[derive(Component)]
pub struct SpriteViewBindGroup {
pub value: BindGroup,
}
#[derive(Resource, Deref, DerefMut, Default)]
pub struct SpriteBatches(HashMap<(RetainedViewEntity, Entity), SpriteBatch>);
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct SpriteBatch {
image_handle_id: AssetId<Image>,
range: Range<u32>,
}
#[derive(Resource, Default)]
pub struct ImageBindGroups {
values: HashMap<AssetId<Image>, BindGroup>,
}
pub fn queue_sprites(
mut view_entities: Local<FixedBitSet>,
draw_functions: Res<DrawFunctions<Transparent2d>>,
sprite_pipeline: Res<SpritePipeline>,
mut pipelines: ResMut<SpecializedRenderPipelines<SpritePipeline>>,
pipeline_cache: Res<PipelineCache>,
extracted_sprites: Res<ExtractedSprites>,
mut transparent_render_phases: ResMut<ViewSortedRenderPhases<Transparent2d>>,
mut views: Query<(
&RenderVisibleEntities,
&ExtractedView,
&Msaa,
Option<&Tonemapping>,
Option<&DebandDither>,
)>,
) {
let draw_sprite_function = draw_functions.read().id::<DrawSprite>();
for (visible_entities, view, msaa, tonemapping, dither) in &mut views {
let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity)
else {
continue;
};
let msaa_key = SpritePipelineKey::from_msaa_samples(msaa.samples());
let mut view_key = SpritePipelineKey::from_hdr(view.hdr) | msaa_key;
if !view.hdr {
if let Some(tonemapping) = tonemapping {
view_key |= SpritePipelineKey::TONEMAP_IN_SHADER;
view_key |= match tonemapping {
Tonemapping::None => SpritePipelineKey::TONEMAP_METHOD_NONE,
Tonemapping::Reinhard => SpritePipelineKey::TONEMAP_METHOD_REINHARD,
Tonemapping::ReinhardLuminance => {
SpritePipelineKey::TONEMAP_METHOD_REINHARD_LUMINANCE
}
Tonemapping::AcesFitted => SpritePipelineKey::TONEMAP_METHOD_ACES_FITTED,
Tonemapping::AgX => SpritePipelineKey::TONEMAP_METHOD_AGX,
Tonemapping::SomewhatBoringDisplayTransform => {
SpritePipelineKey::TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM
}
Tonemapping::TonyMcMapface => SpritePipelineKey::TONEMAP_METHOD_TONY_MC_MAPFACE,
Tonemapping::BlenderFilmic => SpritePipelineKey::TONEMAP_METHOD_BLENDER_FILMIC,
};
}
if let Some(DebandDither::Enabled) = dither {
view_key |= SpritePipelineKey::DEBAND_DITHER;
}
}
let pipeline = pipelines.specialize(&pipeline_cache, &sprite_pipeline, view_key);
view_entities.clear();
view_entities.extend(
visible_entities
.iter::<Sprite>()
.map(|(_, e)| e.index_u32() as usize),
);
transparent_phase
.items
.reserve(extracted_sprites.sprites.len());
for (index, extracted_sprite) in extracted_sprites.sprites.iter().enumerate() {
let view_index = extracted_sprite.main_entity.index_u32();
if !view_entities.contains(view_index as usize) {
continue;
}
// These items will be sorted by depth with other phase items
let sort_key = FloatOrd(extracted_sprite.transform.translation().z);
// Add the item to the render phase
transparent_phase.add(Transparent2d {
draw_function: draw_sprite_function,
pipeline,
entity: (
extracted_sprite.render_entity,
extracted_sprite.main_entity.into(),
),
sort_key,
// `batch_range` is calculated in `prepare_sprite_image_bind_groups`
batch_range: 0..0,
extra_index: PhaseItemExtraIndex::None,
extracted_index: index,
indexed: true,
});
}
}
}
pub fn prepare_sprite_view_bind_groups(
mut commands: Commands,
render_device: Res<RenderDevice>,
pipeline_cache: Res<PipelineCache>,
sprite_pipeline: Res<SpritePipeline>,
view_uniforms: Res<ViewUniforms>,
views: Query<(Entity, &Tonemapping), With<ExtractedView>>,
tonemapping_luts: Res<TonemappingLuts>,
images: Res<RenderAssets<GpuImage>>,
fallback_image: Res<FallbackImage>,
) {
let Some(view_binding) = view_uniforms.uniforms.binding() else {
return;
};
for (entity, tonemapping) in &views {
let lut_bindings =
get_lut_bindings(&images, &tonemapping_luts, tonemapping, &fallback_image);
let view_bind_group = render_device.create_bind_group(
"mesh2d_view_bind_group",
&pipeline_cache.get_bind_group_layout(&sprite_pipeline.view_layout),
&BindGroupEntries::sequential((view_binding.clone(), lut_bindings.0, lut_bindings.1)),
);
commands.entity(entity).insert(SpriteViewBindGroup {
value: view_bind_group,
});
}
}
pub fn prepare_sprite_image_bind_groups(
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
pipeline_cache: Res<PipelineCache>,
mut sprite_meta: ResMut<SpriteMeta>,
sprite_pipeline: Res<SpritePipeline>,
mut image_bind_groups: ResMut<ImageBindGroups>,
gpu_images: Res<RenderAssets<GpuImage>>,
extracted_sprites: Res<ExtractedSprites>,
extracted_slices: Res<ExtractedSlices>,
mut phases: ResMut<ViewSortedRenderPhases<Transparent2d>>,
events: Res<SpriteAssetEvents>,
mut batches: ResMut<SpriteBatches>,
) {
// If an image has changed, the GpuImage has (probably) changed
for event in &events.images {
match event {
AssetEvent::Added { .. } |
// Images don't have dependencies
AssetEvent::LoadedWithDependencies { .. } => {}
AssetEvent::Unused { id } | AssetEvent::Modified { id } | AssetEvent::Removed { id } => {
image_bind_groups.values.remove(id);
}
};
}
batches.clear();
// Clear the sprite instances
sprite_meta.sprite_instance_buffer.clear();
// Index buffer indices
let mut index = 0;
let image_bind_groups = &mut *image_bind_groups;
for (retained_view, transparent_phase) in phases.iter_mut() {
let mut current_batch = None;
let mut batch_item_index = 0;
let mut batch_image_size = Vec2::ZERO;
let mut batch_image_handle = AssetId::invalid();
// Iterate through the phase items and detect when successive sprites that can be batched.
// Spawn an entity with a `SpriteBatch` component for each possible batch.
// Compatible items share the same entity.
for item_index in 0..transparent_phase.items.len() {
let item = &transparent_phase.items[item_index];
let Some(extracted_sprite) = extracted_sprites
.sprites
.get(item.extracted_index)
.filter(|extracted_sprite| extracted_sprite.render_entity == item.entity())
else {
// If there is a phase item that is not a sprite, then we must start a new
// batch to draw the other phase item(s) and to respect draw order. This can be
// done by invalidating the batch_image_handle
batch_image_handle = AssetId::invalid();
continue;
};
if batch_image_handle != extracted_sprite.image_handle_id {
let Some(gpu_image) = gpu_images.get(extracted_sprite.image_handle_id) else {
continue;
};
batch_image_size = gpu_image.size_2d().as_vec2();
batch_image_handle = extracted_sprite.image_handle_id;
image_bind_groups
.values
.entry(batch_image_handle)
.or_insert_with(|| {
render_device.create_bind_group(
"sprite_material_bind_group",
&pipeline_cache.get_bind_group_layout(&sprite_pipeline.material_layout),
&BindGroupEntries::sequential((
&gpu_image.texture_view,
&gpu_image.sampler,
)),
)
});
batch_item_index = item_index;
current_batch = Some(batches.entry((*retained_view, item.entity())).insert(
SpriteBatch {
image_handle_id: batch_image_handle,
range: index..index,
},
));
}
match extracted_sprite.kind {
ExtractedSpriteKind::Single {
anchor,
rect,
scaling_mode,
custom_size,
} => {
// By default, the size of the quad is the size of the texture
let mut quad_size = batch_image_size;
let mut texture_size = batch_image_size;
// Calculate vertex data for this item
// If a rect is specified, adjust UVs and the size of the quad
let mut uv_offset_scale = if let Some(rect) = rect {
let rect_size = rect.size();
quad_size = rect_size;
// Update texture size to the rect size
// It will help scale properly only portion of the image
texture_size = rect_size;
Vec4::new(
rect.min.x / batch_image_size.x,
rect.max.y / batch_image_size.y,
rect_size.x / batch_image_size.x,
-rect_size.y / batch_image_size.y,
)
} else {
Vec4::new(0.0, 1.0, 1.0, -1.0)
};
if extracted_sprite.flip_x {
uv_offset_scale.x += uv_offset_scale.z;
uv_offset_scale.z *= -1.0;
}
if extracted_sprite.flip_y {
uv_offset_scale.y += uv_offset_scale.w;
uv_offset_scale.w *= -1.0;
}
// Override the size if a custom one is specified
quad_size = custom_size.unwrap_or(quad_size);
// Used for translation of the quad if `TextureScale::Fit...` is specified.
let mut quad_translation = Vec2::ZERO;
// Scales the texture based on the `texture_scale` field.
if let Some(scaling_mode) = scaling_mode {
apply_scaling(
scaling_mode,
texture_size,
&mut quad_size,
&mut quad_translation,
&mut uv_offset_scale,
);
}
let transform = extracted_sprite.transform.affine()
* Affine3A::from_scale_rotation_translation(
quad_size.extend(1.0),
Quat::IDENTITY,
((quad_size + quad_translation) * (-anchor - Vec2::splat(0.5)))
.extend(0.0),
);
// Store the vertex data and add the item to the render phase
sprite_meta
.sprite_instance_buffer
.push(SpriteInstance::from(
&transform,
&extracted_sprite.color,
&uv_offset_scale,
));
current_batch.as_mut().unwrap().get_mut().range.end += 1;
index += 1;
}
ExtractedSpriteKind::Slices { ref indices } => {
for i in indices.clone() {
let slice = &extracted_slices.slices[i];
let rect = slice.rect;
let rect_size = rect.size();
// Calculate vertex data for this item
let mut uv_offset_scale: Vec4;
// If a rect is specified, adjust UVs and the size of the quad
uv_offset_scale = Vec4::new(
rect.min.x / batch_image_size.x,
rect.max.y / batch_image_size.y,
rect_size.x / batch_image_size.x,
-rect_size.y / batch_image_size.y,
);
if extracted_sprite.flip_x {
uv_offset_scale.x += uv_offset_scale.z;
uv_offset_scale.z *= -1.0;
}
if extracted_sprite.flip_y {
uv_offset_scale.y += uv_offset_scale.w;
uv_offset_scale.w *= -1.0;
}
let transform = extracted_sprite.transform.affine()
* Affine3A::from_scale_rotation_translation(
slice.size.extend(1.0),
Quat::IDENTITY,
(slice.size * -Vec2::splat(0.5) + slice.offset).extend(0.0),
);
// Store the vertex data and add the item to the render phase
sprite_meta
.sprite_instance_buffer
.push(SpriteInstance::from(
&transform,
&extracted_sprite.color,
&uv_offset_scale,
));
current_batch.as_mut().unwrap().get_mut().range.end += 1;
index += 1;
}
}
}
transparent_phase.items[batch_item_index]
.batch_range_mut()
.end += 1;
}
sprite_meta
.sprite_instance_buffer
.write_buffer(&render_device, &render_queue);
if sprite_meta.sprite_index_buffer.len() != 6 {
sprite_meta.sprite_index_buffer.clear();
// NOTE: This code is creating 6 indices pointing to 4 vertices.
// The vertices form the corners of a quad based on their two least significant bits.
// 10 11
//
// 00 01
// The sprite shader can then use the two least significant bits as the vertex index.
// The rest of the properties to transform the vertex positions and UVs (which are
// implicit) are baked into the instance transform, and UV offset and scale.
// See bevy_sprite_render/src/render/sprite.wgsl for the details.
sprite_meta.sprite_index_buffer.push(2);
sprite_meta.sprite_index_buffer.push(0);
sprite_meta.sprite_index_buffer.push(1);
sprite_meta.sprite_index_buffer.push(1);
sprite_meta.sprite_index_buffer.push(3);
sprite_meta.sprite_index_buffer.push(2);
sprite_meta
.sprite_index_buffer
.write_buffer(&render_device, &render_queue);
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_sprite_render/src/texture_slice/computed_slices.rs | crates/bevy_sprite_render/src/texture_slice/computed_slices.rs | use crate::{ExtractedSlice, TextureAtlasLayout};
use bevy_asset::{AssetEvent, Assets};
use bevy_ecs::prelude::*;
use bevy_image::Image;
use bevy_math::{Rect, Vec2};
use bevy_platform::collections::HashSet;
use bevy_sprite::{Sprite, SpriteImageMode, TextureSlice};
/// Component storing texture slices for tiled or sliced sprite entities
///
/// This component is automatically inserted and updated
#[derive(Debug, Clone, Component)]
pub struct ComputedTextureSlices(Vec<TextureSlice>);
impl ComputedTextureSlices {
/// Computes [`ExtractedSlice`] iterator from the sprite slices
///
/// # Arguments
///
/// * `sprite` - The sprite component
#[must_use]
pub(crate) fn extract_slices<'a>(
&'a self,
sprite: &'a Sprite,
anchor: Vec2,
) -> impl ExactSizeIterator<Item = ExtractedSlice> + 'a {
let mut flip = Vec2::ONE;
if sprite.flip_x {
flip.x *= -1.0;
}
if sprite.flip_y {
flip.y *= -1.0;
}
let anchor = anchor
* sprite
.custom_size
.unwrap_or(sprite.rect.unwrap_or_default().size());
self.0.iter().map(move |slice| ExtractedSlice {
offset: slice.offset * flip - anchor,
rect: slice.texture_rect,
size: slice.draw_size,
})
}
}
/// Generates sprite slices for a [`Sprite`] with [`SpriteImageMode::Sliced`] or [`SpriteImageMode::Sliced`]. The slices
/// will be computed according to the `image_handle` dimensions or the sprite rect.
///
/// Returns `None` if the image asset is not loaded
///
/// # Arguments
///
/// * `sprite` - The sprite component with the image handle and image mode
/// * `images` - The image assets, use to retrieve the image dimensions
/// * `atlas_layouts` - The atlas layout assets, used to retrieve the texture atlas section rect
#[must_use]
fn compute_sprite_slices(
sprite: &Sprite,
images: &Assets<Image>,
atlas_layouts: &Assets<TextureAtlasLayout>,
) -> Option<ComputedTextureSlices> {
let (image_size, texture_rect) = match &sprite.texture_atlas {
Some(a) => {
let layout = atlas_layouts.get(&a.layout)?;
(
layout.size.as_vec2(),
layout.textures.get(a.index)?.as_rect(),
)
}
None => {
let image = images.get(&sprite.image)?;
let size = Vec2::new(
image.texture_descriptor.size.width as f32,
image.texture_descriptor.size.height as f32,
);
let rect = sprite.rect.unwrap_or(Rect {
min: Vec2::ZERO,
max: size,
});
(size, rect)
}
};
let slices = match &sprite.image_mode {
SpriteImageMode::Sliced(slicer) => slicer.compute_slices(texture_rect, sprite.custom_size),
SpriteImageMode::Tiled {
tile_x,
tile_y,
stretch_value,
} => {
let slice = TextureSlice {
texture_rect,
draw_size: sprite.custom_size.unwrap_or(image_size),
offset: Vec2::ZERO,
};
slice.tiled(*stretch_value, (*tile_x, *tile_y))
}
SpriteImageMode::Auto => {
unreachable!("Slices should not be computed for SpriteImageMode::Stretch")
}
SpriteImageMode::Scale(_) => {
unreachable!("Slices should not be computed for SpriteImageMode::Scale")
}
};
Some(ComputedTextureSlices(slices))
}
/// System reacting to added or modified [`Image`] handles, and recompute sprite slices
/// on sprite entities with a matching [`SpriteImageMode`]
pub(crate) fn compute_slices_on_asset_event(
mut commands: Commands,
mut events: MessageReader<AssetEvent<Image>>,
images: Res<Assets<Image>>,
atlas_layouts: Res<Assets<TextureAtlasLayout>>,
sprites: Query<(Entity, &Sprite)>,
) {
// We store the asset ids of added/modified image assets
let added_handles: HashSet<_> = events
.read()
.filter_map(|e| match e {
AssetEvent::Added { id } | AssetEvent::Modified { id } => Some(*id),
_ => None,
})
.collect();
if added_handles.is_empty() {
return;
}
// We recompute the sprite slices for sprite entities with a matching asset handle id
for (entity, sprite) in &sprites {
if !sprite.image_mode.uses_slices() {
continue;
}
if !added_handles.contains(&sprite.image.id()) {
continue;
}
if let Some(slices) = compute_sprite_slices(sprite, &images, &atlas_layouts) {
commands.entity(entity).insert(slices);
}
}
}
/// System reacting to changes on the [`Sprite`] component to compute the sprite slices
pub(crate) fn compute_slices_on_sprite_change(
mut commands: Commands,
images: Res<Assets<Image>>,
atlas_layouts: Res<Assets<TextureAtlasLayout>>,
changed_sprites: Query<(Entity, &Sprite), Changed<Sprite>>,
) {
for (entity, sprite) in &changed_sprites {
if !sprite.image_mode.uses_slices() {
continue;
}
if let Some(slices) = compute_sprite_slices(sprite, &images, &atlas_layouts) {
commands.entity(entity).insert(slices);
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_sprite_render/src/texture_slice/mod.rs | crates/bevy_sprite_render/src/texture_slice/mod.rs | mod computed_slices;
pub(crate) use computed_slices::{
compute_slices_on_asset_event, compute_slices_on_sprite_change, ComputedTextureSlices,
};
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_sprite_render/src/tilemap_chunk/tilemap_chunk_material.rs | crates/bevy_sprite_render/src/tilemap_chunk/tilemap_chunk_material.rs | use crate::{AlphaMode2d, Material2d, Material2dPlugin, TileData};
use bevy_app::{App, Plugin};
use bevy_asset::{embedded_asset, embedded_path, Asset, AssetPath, Handle, RenderAssetUsages};
use bevy_color::ColorToPacked;
use bevy_image::{Image, ImageSampler, ToExtents};
use bevy_math::UVec2;
use bevy_reflect::prelude::*;
use bevy_render::render_resource::*;
use bevy_shader::ShaderRef;
use bytemuck::{Pod, Zeroable};
/// Plugin that adds support for tilemap chunk materials.
pub struct TilemapChunkMaterialPlugin;
impl Plugin for TilemapChunkMaterialPlugin {
fn build(&self, app: &mut App) {
embedded_asset!(app, "tilemap_chunk_material.wgsl");
app.add_plugins(Material2dPlugin::<TilemapChunkMaterial>::default());
}
}
/// Material used for rendering tilemap chunks.
///
/// This material is used internally by the tilemap system to render chunks of tiles
/// efficiently using a single draw call per chunk.
#[derive(Asset, TypePath, AsBindGroup, Debug, Clone)]
pub struct TilemapChunkMaterial {
pub alpha_mode: AlphaMode2d,
#[texture(0, dimension = "2d_array")]
#[sampler(1)]
pub tileset: Handle<Image>,
#[texture(2, sample_type = "u_int")]
pub tile_data: Handle<Image>,
}
impl Material2d for TilemapChunkMaterial {
fn fragment_shader() -> ShaderRef {
ShaderRef::Path(
AssetPath::from_path_buf(embedded_path!("tilemap_chunk_material.wgsl"))
.with_source("embedded"),
)
}
fn alpha_mode(&self) -> AlphaMode2d {
self.alpha_mode
}
}
/// Packed per-tile data for use in the `Rgba16Uint` tile data texture in `TilemapChunkMaterial`.
#[repr(C)]
#[derive(Clone, Copy, Debug, Pod, Zeroable)]
pub struct PackedTileData {
tileset_index: u16, // red channel
color: [u8; 4], // green and blue channels
flags: u16, // alpha channel
}
impl PackedTileData {
fn empty() -> Self {
Self {
tileset_index: u16::MAX,
color: [0, 0, 0, 0],
flags: 0,
}
}
}
impl From<TileData> for PackedTileData {
fn from(
TileData {
tileset_index,
color,
visible,
}: TileData,
) -> Self {
Self {
tileset_index,
color: color.to_srgba().to_u8_array(),
flags: visible as u16,
}
}
}
impl From<Option<TileData>> for PackedTileData {
fn from(maybe_tile_data: Option<TileData>) -> Self {
maybe_tile_data
.map(Into::into)
.unwrap_or(PackedTileData::empty())
}
}
pub fn make_chunk_tile_data_image(size: &UVec2, data: &[PackedTileData]) -> Image {
Image {
data: Some(bytemuck::cast_slice(data).to_vec()),
data_order: TextureDataOrder::default(),
texture_descriptor: TextureDescriptor {
size: size.to_extents(),
dimension: TextureDimension::D2,
format: TextureFormat::Rgba16Uint,
label: None,
mip_level_count: 1,
sample_count: 1,
usage: TextureUsages::TEXTURE_BINDING | TextureUsages::COPY_DST,
view_formats: &[],
},
sampler: ImageSampler::nearest(),
texture_view_descriptor: None,
asset_usage: RenderAssetUsages::RENDER_WORLD | RenderAssetUsages::MAIN_WORLD,
copy_on_resize: false,
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_sprite_render/src/tilemap_chunk/mod.rs | crates/bevy_sprite_render/src/tilemap_chunk/mod.rs | use crate::{AlphaMode2d, MeshMaterial2d};
use bevy_app::{App, Plugin, Update};
use bevy_asset::{Assets, Handle};
use bevy_color::Color;
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
component::Component,
entity::Entity,
lifecycle::HookContext,
query::Changed,
reflect::{ReflectComponent, ReflectResource},
resource::Resource,
system::{Query, ResMut},
world::DeferredWorld,
};
use bevy_image::Image;
use bevy_math::{primitives::Rectangle, UVec2};
use bevy_mesh::{Mesh, Mesh2d};
use bevy_platform::collections::HashMap;
use bevy_reflect::{prelude::*, Reflect};
use bevy_transform::components::Transform;
use bevy_utils::default;
use tracing::warn;
mod tilemap_chunk_material;
pub use tilemap_chunk_material::*;
/// Plugin that handles the initialization and updating of tilemap chunks.
/// Adds systems for processing newly added tilemap chunks and updating their indices.
pub struct TilemapChunkPlugin;
impl Plugin for TilemapChunkPlugin {
fn build(&self, app: &mut App) {
app.init_resource::<TilemapChunkMeshCache>()
.add_systems(Update, update_tilemap_chunk_indices);
}
}
/// A resource storing the meshes for each tilemap chunk size.
#[derive(Resource, Default, Deref, DerefMut, Reflect)]
#[reflect(Resource, Default)]
pub struct TilemapChunkMeshCache(HashMap<UVec2, Handle<Mesh>>);
/// A component representing a chunk of a tilemap.
/// Each chunk is a rectangular section of tiles that is rendered as a single mesh.
#[derive(Component, Clone, Debug, Default, Reflect)]
#[reflect(Component, Clone, Debug, Default)]
#[component(immutable, on_insert = on_insert_tilemap_chunk)]
pub struct TilemapChunk {
/// The size of the chunk in tiles.
pub chunk_size: UVec2,
/// The size to use for each tile, not to be confused with the size of a tile in the tileset image.
/// The size of the tile in the tileset image is determined by the tileset image's dimensions.
pub tile_display_size: UVec2,
/// Handle to the tileset image containing all tile textures.
pub tileset: Handle<Image>,
/// The alpha mode to use for the tilemap chunk.
pub alpha_mode: AlphaMode2d,
}
impl TilemapChunk {
pub fn calculate_tile_transform(&self, position: UVec2) -> Transform {
Transform::from_xyz(
// tile position
position.x as f32
// times display size for a tile
* self.tile_display_size.x as f32
// plus 1/2 the tile_display_size to correct the center
+ self.tile_display_size.x as f32 / 2.
// minus 1/2 the tilechunk size, in terms of the tile_display_size,
// to place the 0 at left of tilemapchunk
- self.tile_display_size.x as f32 * self.chunk_size.x as f32 / 2.,
// tile position
position.y as f32
// times display size for a tile
* self.tile_display_size.y as f32
// minus 1/2 the tile_display_size to correct the center
+ self.tile_display_size.y as f32 / 2.
// plus 1/2 the tilechunk size, in terms of the tile_display_size,
// to place the 0 at bottom of tilemapchunk
- self.tile_display_size.y as f32 * self.chunk_size.y as f32 / 2.,
0.,
)
}
}
/// Data for a single tile in the tilemap chunk.
#[derive(Clone, Copy, Debug, Reflect)]
#[reflect(Clone, Debug, Default)]
pub struct TileData {
/// The index of the tile in the corresponding tileset array texture.
pub tileset_index: u16,
/// The color tint of the tile. White leaves the sampled texture color unchanged.
pub color: Color,
/// The visibility of the tile.
pub visible: bool,
}
impl TileData {
/// Creates a new `TileData` with the given tileset index and default values.
pub fn from_tileset_index(tileset_index: u16) -> Self {
Self {
tileset_index,
..default()
}
}
}
impl Default for TileData {
fn default() -> Self {
Self {
tileset_index: 0,
color: Color::WHITE,
visible: true,
}
}
}
/// Component storing the data of tiles within a chunk.
/// Each index corresponds to a specific tile in the tileset. `None` indicates an empty tile.
#[derive(Component, Clone, Debug, Deref, DerefMut, Reflect)]
#[reflect(Component, Clone, Debug)]
pub struct TilemapChunkTileData(pub Vec<Option<TileData>>);
fn on_insert_tilemap_chunk(mut world: DeferredWorld, HookContext { entity, .. }: HookContext) {
let Some(tilemap_chunk) = world.get::<TilemapChunk>(entity) else {
warn!("TilemapChunk not found for tilemap chunk {}", entity);
return;
};
let chunk_size = tilemap_chunk.chunk_size;
let alpha_mode = tilemap_chunk.alpha_mode;
let tileset = tilemap_chunk.tileset.clone();
let Some(tile_data) = world.get::<TilemapChunkTileData>(entity) else {
warn!("TilemapChunkIndices not found for tilemap chunk {}", entity);
return;
};
let expected_tile_data_length = chunk_size.element_product() as usize;
if tile_data.len() != expected_tile_data_length {
warn!(
"Invalid tile data length for tilemap chunk {} of size {}. Expected {}, got {}",
entity,
chunk_size,
expected_tile_data_length,
tile_data.len(),
);
return;
}
let packed_tile_data: Vec<PackedTileData> =
tile_data.0.iter().map(|&tile| tile.into()).collect();
let tile_data_image = make_chunk_tile_data_image(&chunk_size, &packed_tile_data);
let tilemap_chunk_mesh_cache = world.resource::<TilemapChunkMeshCache>();
let mesh_size = chunk_size * tilemap_chunk.tile_display_size;
let mesh = if let Some(mesh) = tilemap_chunk_mesh_cache.get(&mesh_size) {
mesh.clone()
} else {
let mut meshes = world.resource_mut::<Assets<Mesh>>();
meshes.add(Rectangle::from_size(mesh_size.as_vec2()))
};
let mut images = world.resource_mut::<Assets<Image>>();
let tile_data = images.add(tile_data_image);
let mut materials = world.resource_mut::<Assets<TilemapChunkMaterial>>();
let material = materials.add(TilemapChunkMaterial {
tileset,
tile_data,
alpha_mode,
});
world
.commands()
.entity(entity)
.insert((Mesh2d(mesh), MeshMaterial2d(material)));
}
pub fn update_tilemap_chunk_indices(
query: Query<
(
Entity,
&TilemapChunk,
&TilemapChunkTileData,
&MeshMaterial2d<TilemapChunkMaterial>,
),
Changed<TilemapChunkTileData>,
>,
mut materials: ResMut<Assets<TilemapChunkMaterial>>,
mut images: ResMut<Assets<Image>>,
) {
for (chunk_entity, TilemapChunk { chunk_size, .. }, tile_data, material) in query {
let expected_tile_data_length = chunk_size.element_product() as usize;
if tile_data.len() != expected_tile_data_length {
warn!(
"Invalid TilemapChunkTileData length for tilemap chunk {} of size {}. Expected {}, got {}",
chunk_entity,
chunk_size,
tile_data.len(),
expected_tile_data_length
);
continue;
}
let packed_tile_data: Vec<PackedTileData> =
tile_data.0.iter().map(|&tile| tile.into()).collect();
// Getting the material mutably to trigger change detection
let Some(material) = materials.get_mut(material.id()) else {
warn!(
"TilemapChunkMaterial not found for tilemap chunk {}",
chunk_entity
);
continue;
};
let Some(tile_data_image) = images.get_mut(&material.tile_data) else {
warn!(
"TilemapChunkMaterial tile data image not found for tilemap chunk {}",
chunk_entity
);
continue;
};
let Some(data) = tile_data_image.data.as_mut() else {
warn!(
"TilemapChunkMaterial tile data image data not found for tilemap chunk {}",
chunk_entity
);
continue;
};
data.clear();
data.extend_from_slice(bytemuck::cast_slice(&packed_tile_data));
}
}
impl TilemapChunkTileData {
pub fn tile_data_from_tile_pos(
&self,
tilemap_size: UVec2,
position: UVec2,
) -> Option<&TileData> {
self.0
.get(tilemap_size.x as usize * position.y as usize + position.x as usize)
.and_then(|opt| opt.as_ref())
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/lib.rs | crates/bevy_mesh/src/lib.rs | #![expect(missing_docs, reason = "Not all docs are written yet, see #3492.")]
extern crate alloc;
extern crate core;
mod components;
mod conversions;
mod index;
mod mesh;
#[cfg(feature = "bevy_mikktspace")]
mod mikktspace;
#[cfg(feature = "morph")]
pub mod morph;
pub mod primitives;
pub mod skinning;
mod vertex;
use bevy_app::{App, Plugin, PostUpdate};
use bevy_asset::{AssetApp, AssetEventSystems};
use bevy_ecs::schedule::{IntoScheduleConfigs, SystemSet};
use bitflags::bitflags;
pub use components::*;
pub use index::*;
pub use mesh::*;
#[cfg(feature = "bevy_mikktspace")]
pub use mikktspace::*;
pub use primitives::*;
pub use vertex::*;
pub use wgpu_types::VertexFormat;
/// The mesh prelude.
///
/// This includes the most common types in this crate, re-exported for your convenience.
pub mod prelude {
#[cfg(feature = "morph")]
pub use crate::morph::MorphWeights;
#[doc(hidden)]
pub use crate::{primitives::MeshBuilder, primitives::Meshable, Mesh, Mesh2d, Mesh3d};
}
bitflags! {
/// Our base mesh pipeline key bits start from the highest bit and go
/// downward. The PBR mesh pipeline key bits start from the lowest bit and
/// go upward. This allows the PBR bits in the downstream crate `bevy_pbr`
/// to coexist in the same field without any shifts.
#[derive(Clone, Debug)]
pub struct BaseMeshPipelineKey: u64 {
const MORPH_TARGETS = 1 << (u64::BITS - 1);
}
}
/// Adds [`Mesh`] as an asset.
#[derive(Default)]
pub struct MeshPlugin;
impl Plugin for MeshPlugin {
fn build(&self, app: &mut App) {
app.init_asset::<Mesh>()
.init_asset::<skinning::SkinnedMeshInverseBindposes>()
.register_asset_reflect::<Mesh>()
.add_systems(
PostUpdate,
mark_3d_meshes_as_changed_if_their_assets_changed.after(AssetEventSystems),
);
}
}
impl BaseMeshPipelineKey {
pub const PRIMITIVE_TOPOLOGY_MASK_BITS: u64 = 0b111;
pub const PRIMITIVE_TOPOLOGY_SHIFT_BITS: u64 =
(u64::BITS - 1 - Self::PRIMITIVE_TOPOLOGY_MASK_BITS.count_ones()) as u64;
pub fn from_primitive_topology(primitive_topology: PrimitiveTopology) -> Self {
let primitive_topology_bits = ((primitive_topology as u64)
& Self::PRIMITIVE_TOPOLOGY_MASK_BITS)
<< Self::PRIMITIVE_TOPOLOGY_SHIFT_BITS;
Self::from_bits_retain(primitive_topology_bits)
}
pub fn primitive_topology(&self) -> PrimitiveTopology {
let primitive_topology_bits = (self.bits() >> Self::PRIMITIVE_TOPOLOGY_SHIFT_BITS)
& Self::PRIMITIVE_TOPOLOGY_MASK_BITS;
match primitive_topology_bits {
x if x == PrimitiveTopology::PointList as u64 => PrimitiveTopology::PointList,
x if x == PrimitiveTopology::LineList as u64 => PrimitiveTopology::LineList,
x if x == PrimitiveTopology::LineStrip as u64 => PrimitiveTopology::LineStrip,
x if x == PrimitiveTopology::TriangleList as u64 => PrimitiveTopology::TriangleList,
x if x == PrimitiveTopology::TriangleStrip as u64 => PrimitiveTopology::TriangleStrip,
_ => PrimitiveTopology::default(),
}
}
}
/// `bevy_render::mesh::inherit_weights` runs in this `SystemSet`
#[derive(Debug, Hash, PartialEq, Eq, Clone, SystemSet)]
pub struct InheritWeightSystems;
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/index.rs | crates/bevy_mesh/src/index.rs | use bevy_reflect::Reflect;
use core::iter;
use core::iter::FusedIterator;
#[cfg(feature = "serialize")]
use serde::{Deserialize, Serialize};
use thiserror::Error;
use wgpu_types::IndexFormat;
use crate::MeshAccessError;
/// A disjunction of four iterators. This is necessary to have a well-formed type for the output
/// of [`Mesh::triangles`](super::Mesh::triangles), which produces iterators of four different types depending on the
/// branch taken.
pub(crate) enum FourIterators<A, B, C, D> {
First(A),
Second(B),
Third(C),
Fourth(D),
}
impl<A, B, C, D, I> Iterator for FourIterators<A, B, C, D>
where
A: Iterator<Item = I>,
B: Iterator<Item = I>,
C: Iterator<Item = I>,
D: Iterator<Item = I>,
{
type Item = I;
fn next(&mut self) -> Option<Self::Item> {
match self {
FourIterators::First(iter) => iter.next(),
FourIterators::Second(iter) => iter.next(),
FourIterators::Third(iter) => iter.next(),
FourIterators::Fourth(iter) => iter.next(),
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self {
FourIterators::First(iter) => iter.size_hint(),
FourIterators::Second(iter) => iter.size_hint(),
FourIterators::Third(iter) => iter.size_hint(),
FourIterators::Fourth(iter) => iter.size_hint(),
}
}
}
/// An error that occurred while trying to invert the winding of a [`Mesh`](super::Mesh).
#[derive(Debug, Error)]
pub enum MeshWindingInvertError {
/// This error occurs when you try to invert the winding for a mesh with [`PrimitiveTopology::PointList`](super::PrimitiveTopology::PointList).
#[error("Mesh winding inversion does not work for primitive topology `PointList`")]
WrongTopology,
/// This error occurs when you try to invert the winding for a mesh with
/// * [`PrimitiveTopology::TriangleList`](super::PrimitiveTopology::TriangleList), but the indices are not in chunks of 3.
/// * [`PrimitiveTopology::LineList`](super::PrimitiveTopology::LineList), but the indices are not in chunks of 2.
#[error("Indices weren't in chunks according to topology")]
AbruptIndicesEnd,
#[error("Mesh access error: {0}")]
MeshAccessError(#[from] MeshAccessError),
}
/// An error that occurred while trying to extract a collection of triangles from a [`Mesh`](super::Mesh).
#[derive(Debug, Error)]
pub enum MeshTrianglesError {
#[error("Source mesh does not have primitive topology TriangleList or TriangleStrip")]
WrongTopology,
#[error("Source mesh position data is not Float32x3")]
PositionsFormat,
#[error("Face index data references vertices that do not exist")]
BadIndices,
#[error("mesh access error: {0}")]
MeshAccessError(#[from] MeshAccessError),
}
/// An array of indices into the [`VertexAttributeValues`](super::VertexAttributeValues) for a mesh.
///
/// It describes the order in which the vertex attributes should be joined into faces.
#[derive(Debug, Clone, Reflect, PartialEq)]
#[reflect(Clone)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub enum Indices {
U16(Vec<u16>),
U32(Vec<u32>),
}
impl Indices {
/// Returns an iterator over the indices.
pub fn iter(&self) -> impl Iterator<Item = usize> + '_ {
match self {
Indices::U16(vec) => IndicesIter::U16(vec.iter()),
Indices::U32(vec) => IndicesIter::U32(vec.iter()),
}
}
/// Returns the number of indices.
pub fn len(&self) -> usize {
match self {
Indices::U16(vec) => vec.len(),
Indices::U32(vec) => vec.len(),
}
}
/// Returns `true` if there are no indices.
pub fn is_empty(&self) -> bool {
match self {
Indices::U16(vec) => vec.is_empty(),
Indices::U32(vec) => vec.is_empty(),
}
}
/// Add an index. If the index is greater than `u16::MAX`,
/// the storage will be converted to `u32`.
pub fn push(&mut self, index: u32) {
self.extend([index]);
}
}
/// Extend the indices with indices from an iterator.
/// Semantically equivalent to calling [`push`](Indices::push) for each element in the iterator,
/// but more efficient.
impl Extend<u32> for Indices {
fn extend<T: IntoIterator<Item = u32>>(&mut self, iter: T) {
let mut iter = iter.into_iter();
match self {
Indices::U32(indices) => indices.extend(iter),
Indices::U16(indices) => {
indices.reserve(iter.size_hint().0);
while let Some(index) = iter.next() {
match u16::try_from(index) {
Ok(index) => indices.push(index),
Err(_) => {
let new_vec = indices
.iter()
.map(|&index| u32::from(index))
.chain(iter::once(index))
.chain(iter)
.collect::<Vec<u32>>();
*self = Indices::U32(new_vec);
break;
}
}
}
}
}
}
}
/// An Iterator for the [`Indices`].
enum IndicesIter<'a> {
U16(core::slice::Iter<'a, u16>),
U32(core::slice::Iter<'a, u32>),
}
impl Iterator for IndicesIter<'_> {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
match self {
IndicesIter::U16(iter) => iter.next().map(|val| *val as usize),
IndicesIter::U32(iter) => iter.next().map(|val| *val as usize),
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self {
IndicesIter::U16(iter) => iter.size_hint(),
IndicesIter::U32(iter) => iter.size_hint(),
}
}
}
impl<'a> ExactSizeIterator for IndicesIter<'a> {}
impl<'a> FusedIterator for IndicesIter<'a> {}
impl From<&Indices> for IndexFormat {
fn from(indices: &Indices) -> Self {
match indices {
Indices::U16(_) => IndexFormat::Uint16,
Indices::U32(_) => IndexFormat::Uint32,
}
}
}
#[cfg(test)]
mod tests {
use crate::Indices;
use wgpu_types::IndexFormat;
#[test]
fn test_indices_push() {
let mut indices = Indices::U16(Vec::new());
indices.push(10);
assert_eq!(IndexFormat::Uint16, IndexFormat::from(&indices));
assert_eq!(vec![10], indices.iter().collect::<Vec<_>>());
// Add a value that is too large for `u16` so the storage should be converted to `U32`.
indices.push(0x10000);
assert_eq!(IndexFormat::Uint32, IndexFormat::from(&indices));
assert_eq!(vec![10, 0x10000], indices.iter().collect::<Vec<_>>());
indices.push(20);
indices.push(0x20000);
assert_eq!(IndexFormat::Uint32, IndexFormat::from(&indices));
assert_eq!(
vec![10, 0x10000, 20, 0x20000],
indices.iter().collect::<Vec<_>>()
);
}
#[test]
fn test_indices_extend() {
let mut indices = Indices::U16(Vec::new());
indices.extend([10, 11]);
assert_eq!(IndexFormat::Uint16, IndexFormat::from(&indices));
assert_eq!(vec![10, 11], indices.iter().collect::<Vec<_>>());
// Add a value that is too large for `u16` so the storage should be converted to `U32`.
indices.extend([12, 0x10013, 0x10014]);
assert_eq!(IndexFormat::Uint32, IndexFormat::from(&indices));
assert_eq!(
vec![10, 11, 12, 0x10013, 0x10014],
indices.iter().collect::<Vec<_>>()
);
indices.extend([15, 0x10016]);
assert_eq!(IndexFormat::Uint32, IndexFormat::from(&indices));
assert_eq!(
vec![10, 11, 12, 0x10013, 0x10014, 15, 0x10016],
indices.iter().collect::<Vec<_>>()
);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/skinning.rs | crates/bevy_mesh/src/skinning.rs | use bevy_asset::{AsAssetId, Asset, AssetId, Handle};
use bevy_ecs::{component::Component, entity::Entity, prelude::ReflectComponent};
use bevy_math::Mat4;
use bevy_reflect::prelude::*;
use core::ops::Deref;
#[derive(Component, Debug, Default, Clone, Reflect)]
#[reflect(Component, Default, Debug, Clone)]
pub struct SkinnedMesh {
pub inverse_bindposes: Handle<SkinnedMeshInverseBindposes>,
#[entities]
pub joints: Vec<Entity>,
}
impl AsAssetId for SkinnedMesh {
type Asset = SkinnedMeshInverseBindposes;
// We implement this so that `AssetChanged` will work to pick up any changes
// to `SkinnedMeshInverseBindposes`.
fn as_asset_id(&self) -> AssetId<Self::Asset> {
self.inverse_bindposes.id()
}
}
#[derive(Asset, TypePath, Debug)]
pub struct SkinnedMeshInverseBindposes(Box<[Mat4]>);
impl From<Vec<Mat4>> for SkinnedMeshInverseBindposes {
fn from(value: Vec<Mat4>) -> Self {
Self(value.into_boxed_slice())
}
}
impl Deref for SkinnedMeshInverseBindposes {
type Target = [Mat4];
fn deref(&self) -> &Self::Target {
&self.0
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/morph.rs | crates/bevy_mesh/src/morph.rs | use super::Mesh;
use bevy_asset::{Handle, RenderAssetUsages};
use bevy_ecs::prelude::*;
use bevy_image::Image;
use bevy_math::Vec3;
use bevy_reflect::prelude::*;
use bytemuck::{Pod, Zeroable};
use thiserror::Error;
use wgpu_types::{Extent3d, TextureDimension, TextureFormat};
const MAX_TEXTURE_WIDTH: u32 = 2048;
// NOTE: "component" refers to the element count of math objects,
// Vec3 has 3 components, Mat2 has 4 components.
const MAX_COMPONENTS: u32 = MAX_TEXTURE_WIDTH * MAX_TEXTURE_WIDTH;
/// Max target count available for [morph targets](MorphWeights).
pub const MAX_MORPH_WEIGHTS: usize = 256;
#[derive(Error, Clone, Debug)]
pub enum MorphBuildError {
#[error(
"Too many vertex×components in morph target, max is {MAX_COMPONENTS}, \
got {vertex_count}×{component_count} = {}",
*vertex_count * *component_count as usize
)]
TooManyAttributes {
vertex_count: usize,
component_count: u32,
},
#[error(
"Bevy only supports up to {} morph targets (individual poses), tried to \
create a model with {target_count} morph targets",
MAX_MORPH_WEIGHTS
)]
TooManyTargets { target_count: usize },
}
/// An image formatted for use with [`MorphWeights`] for rendering the morph target.
#[derive(Debug)]
pub struct MorphTargetImage(pub Image);
impl MorphTargetImage {
/// Generate textures for each morph target.
///
/// This accepts an "iterator of [`MorphAttributes`] iterators". Each item iterated in the top level
/// iterator corresponds "the attributes of a specific morph target".
///
/// Each pixel of the texture is a component of morph target animated
/// attributes. So a set of 9 pixels is this morph's displacement for
/// position, normal and tangents of a single vertex (each taking 3 pixels).
pub fn new(
targets: impl ExactSizeIterator<Item = impl Iterator<Item = MorphAttributes>>,
vertex_count: usize,
asset_usage: RenderAssetUsages,
) -> Result<Self, MorphBuildError> {
let max = MAX_TEXTURE_WIDTH;
let target_count = targets.len();
if target_count > MAX_MORPH_WEIGHTS {
return Err(MorphBuildError::TooManyTargets { target_count });
}
let component_count = (vertex_count * MorphAttributes::COMPONENT_COUNT) as u32;
let Some((Rect(width, height), padding)) = lowest_2d(component_count, max) else {
return Err(MorphBuildError::TooManyAttributes {
vertex_count,
component_count,
});
};
let data = targets
.flat_map(|mut attributes| {
let layer_byte_count = (padding + component_count) as usize * size_of::<f32>();
let mut buffer = Vec::with_capacity(layer_byte_count);
for _ in 0..vertex_count {
let Some(to_add) = attributes.next() else {
break;
};
buffer.extend_from_slice(bytemuck::bytes_of(&to_add));
}
// Pad each layer so that they fit width * height
buffer.extend(core::iter::repeat_n(0, padding as usize * size_of::<f32>()));
debug_assert_eq!(buffer.len(), layer_byte_count);
buffer
})
.collect();
let extents = Extent3d {
width,
height,
depth_or_array_layers: target_count as u32,
};
let image = Image::new(
extents,
TextureDimension::D3,
data,
TextureFormat::R32Float,
asset_usage,
);
Ok(MorphTargetImage(image))
}
}
/// Controls the [morph targets] for all child [`Mesh3d`](crate::Mesh3d) entities. In most cases, [`MorphWeights`] should be considered
/// the "source of truth" when writing morph targets for meshes. However you can choose to write child [`MeshMorphWeights`]
/// if your situation requires more granularity. Just note that if you set [`MorphWeights`], it will overwrite child
/// [`MeshMorphWeights`] values.
///
/// This exists because Bevy's [`Mesh`] corresponds to a _single_ surface / material, whereas morph targets
/// as defined in the GLTF spec exist on "multi-primitive meshes" (where each primitive is its own surface with its own material).
/// Therefore in Bevy [`MorphWeights`] an a parent entity are the "canonical weights" from a GLTF perspective, which then
/// synchronized to child [`Mesh3d`](crate::Mesh3d) / [`MeshMorphWeights`] (which correspond to "primitives" / "surfaces" from a GLTF perspective).
///
/// Add this to the parent of one or more [`Entities`](`Entity`) with a [`Mesh3d`](crate::Mesh3d) with a [`MeshMorphWeights`].
///
/// [morph targets]: https://en.wikipedia.org/wiki/Morph_target_animation
#[derive(Reflect, Default, Debug, Clone, Component)]
#[reflect(Debug, Component, Default, Clone)]
pub struct MorphWeights {
weights: Vec<f32>,
/// The first mesh primitive assigned to these weights
first_mesh: Option<Handle<Mesh>>,
}
impl MorphWeights {
pub fn new(
weights: Vec<f32>,
first_mesh: Option<Handle<Mesh>>,
) -> Result<Self, MorphBuildError> {
if weights.len() > MAX_MORPH_WEIGHTS {
let target_count = weights.len();
return Err(MorphBuildError::TooManyTargets { target_count });
}
Ok(MorphWeights {
weights,
first_mesh,
})
}
/// The first child [`Mesh3d`](crate::Mesh3d) primitive controlled by these weights.
/// This can be used to look up metadata information such as [`Mesh::morph_target_names`].
pub fn first_mesh(&self) -> Option<&Handle<Mesh>> {
self.first_mesh.as_ref()
}
pub fn weights(&self) -> &[f32] {
&self.weights
}
pub fn weights_mut(&mut self) -> &mut [f32] {
&mut self.weights
}
}
/// Control a specific [`Mesh`] instance's [morph targets]. These control the weights of
/// specific "mesh primitives" in scene formats like GLTF. They can be set manually, but
/// in most cases they should "automatically" synced by setting the [`MorphWeights`] component
/// on a parent entity.
///
/// See [`MorphWeights`] for more details on Bevy's morph target implementation.
///
/// Add this to an [`Entity`] with a [`Mesh3d`](crate::Mesh3d) with a [`MorphAttributes`] set
/// to control individual weights of each morph target.
///
/// [morph targets]: https://en.wikipedia.org/wiki/Morph_target_animation
#[derive(Reflect, Default, Debug, Clone, Component)]
#[reflect(Debug, Component, Default, Clone)]
pub struct MeshMorphWeights {
weights: Vec<f32>,
}
impl MeshMorphWeights {
pub fn new(weights: Vec<f32>) -> Result<Self, MorphBuildError> {
if weights.len() > MAX_MORPH_WEIGHTS {
let target_count = weights.len();
return Err(MorphBuildError::TooManyTargets { target_count });
}
Ok(MeshMorphWeights { weights })
}
pub fn weights(&self) -> &[f32] {
&self.weights
}
pub fn weights_mut(&mut self) -> &mut [f32] {
&mut self.weights
}
pub fn clear_weights(&mut self) {
self.weights.clear();
}
pub fn extend_weights(&mut self, weights: &[f32]) {
self.weights.extend(weights);
}
}
/// Attributes **differences** used for morph targets.
///
/// See [`MorphTargetImage`] for more information.
#[derive(Copy, Clone, PartialEq, Pod, Zeroable, Default)]
#[repr(C)]
pub struct MorphAttributes {
/// The vertex position difference between base mesh and this target.
pub position: Vec3,
/// The vertex normal difference between base mesh and this target.
pub normal: Vec3,
/// The vertex tangent difference between base mesh and this target.
///
/// Note that tangents are a `Vec4`, but only the `xyz` components are
/// animated, as the `w` component is the sign and cannot be animated.
pub tangent: Vec3,
}
impl From<[Vec3; 3]> for MorphAttributes {
fn from([position, normal, tangent]: [Vec3; 3]) -> Self {
MorphAttributes {
position,
normal,
tangent,
}
}
}
impl MorphAttributes {
/// How many components `MorphAttributes` has.
///
/// Each `Vec3` has 3 components, we have 3 `Vec3`, for a total of 9.
pub const COMPONENT_COUNT: usize = 9;
pub fn new(position: Vec3, normal: Vec3, tangent: Vec3) -> Self {
MorphAttributes {
position,
normal,
tangent,
}
}
}
struct Rect(u32, u32);
/// Find the smallest rectangle of maximum edge size `max_edge` that contains
/// at least `min_includes` cells. `u32` is how many extra cells the rectangle
/// has.
///
/// The following rectangle contains 27 cells, and its longest edge is 9:
/// ```text
/// ----------------------------
/// |1 |2 |3 |4 |5 |6 |7 |8 |9 |
/// ----------------------------
/// |2 | | | | | | | | |
/// ----------------------------
/// |3 | | | | | | | | |
/// ----------------------------
/// ```
///
/// Returns `None` if `max_edge` is too small to build a rectangle
/// containing `min_includes` cells.
fn lowest_2d(min_includes: u32, max_edge: u32) -> Option<(Rect, u32)> {
(1..=max_edge)
.filter_map(|a| {
let b = min_includes.div_ceil(a);
let diff = (a * b).checked_sub(min_includes)?;
Some((Rect(a, b), diff))
})
.filter_map(|(rect, diff)| (rect.1 <= max_edge).then_some((rect, diff)))
.min_by_key(|(_, diff)| *diff)
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/mesh.rs | crates/bevy_mesh/src/mesh.rs | use bevy_transform::components::Transform;
pub use wgpu_types::PrimitiveTopology;
use super::{
triangle_area_normal, triangle_normal, FourIterators, Indices, MeshAttributeData,
MeshTrianglesError, MeshVertexAttribute, MeshVertexAttributeId, MeshVertexBufferLayout,
MeshVertexBufferLayoutRef, MeshVertexBufferLayouts, MeshWindingInvertError,
VertexAttributeValues, VertexBufferLayout,
};
#[cfg(feature = "serialize")]
use crate::SerializedMeshAttributeData;
use alloc::collections::BTreeMap;
#[cfg(feature = "morph")]
use bevy_asset::Handle;
use bevy_asset::{Asset, RenderAssetUsages};
#[cfg(feature = "morph")]
use bevy_image::Image;
use bevy_math::{bounding::Aabb3d, primitives::Triangle3d, *};
#[cfg(feature = "serialize")]
use bevy_platform::collections::HashMap;
use bevy_reflect::Reflect;
use bytemuck::cast_slice;
#[cfg(feature = "serialize")]
use serde::{Deserialize, Serialize};
use thiserror::Error;
use tracing::warn;
use wgpu_types::{VertexAttribute, VertexFormat, VertexStepMode};
pub const INDEX_BUFFER_ASSET_INDEX: u64 = 0;
pub const VERTEX_ATTRIBUTE_BUFFER_ID: u64 = 10;
/// Error from accessing mesh vertex attributes or indices
#[derive(Error, Debug, Clone)]
pub enum MeshAccessError {
#[error("The mesh vertex/index data has been extracted to the RenderWorld (via `Mesh::asset_usage`)")]
ExtractedToRenderWorld,
#[error("The requested mesh data wasn't found in this mesh")]
NotFound,
}
const MESH_EXTRACTED_ERROR: &str = "Mesh has been extracted to RenderWorld. To access vertex attributes, the mesh `asset_usage` must include `MAIN_WORLD`";
// storage for extractable data with access methods which return errors if the
// contents have already been extracted
#[derive(Debug, Clone, PartialEq, Reflect, Default)]
enum MeshExtractableData<T> {
Data(T),
#[default]
NoData,
ExtractedToRenderWorld,
}
impl<T> MeshExtractableData<T> {
// get a reference to internal data. returns error if data has been extracted, or if no
// data exists
fn as_ref(&self) -> Result<&T, MeshAccessError> {
match self {
MeshExtractableData::Data(data) => Ok(data),
MeshExtractableData::NoData => Err(MeshAccessError::NotFound),
MeshExtractableData::ExtractedToRenderWorld => {
Err(MeshAccessError::ExtractedToRenderWorld)
}
}
}
// get an optional reference to internal data. returns error if data has been extracted
fn as_ref_option(&self) -> Result<Option<&T>, MeshAccessError> {
match self {
MeshExtractableData::Data(data) => Ok(Some(data)),
MeshExtractableData::NoData => Ok(None),
MeshExtractableData::ExtractedToRenderWorld => {
Err(MeshAccessError::ExtractedToRenderWorld)
}
}
}
// get a mutable reference to internal data. returns error if data has been extracted,
// or if no data exists
fn as_mut(&mut self) -> Result<&mut T, MeshAccessError> {
match self {
MeshExtractableData::Data(data) => Ok(data),
MeshExtractableData::NoData => Err(MeshAccessError::NotFound),
MeshExtractableData::ExtractedToRenderWorld => {
Err(MeshAccessError::ExtractedToRenderWorld)
}
}
}
// get an optional mutable reference to internal data. returns error if data has been extracted
fn as_mut_option(&mut self) -> Result<Option<&mut T>, MeshAccessError> {
match self {
MeshExtractableData::Data(data) => Ok(Some(data)),
MeshExtractableData::NoData => Ok(None),
MeshExtractableData::ExtractedToRenderWorld => {
Err(MeshAccessError::ExtractedToRenderWorld)
}
}
}
// extract data and replace self with `ExtractedToRenderWorld`. returns error if
// data has been extracted
fn extract(&mut self) -> Result<MeshExtractableData<T>, MeshAccessError> {
match core::mem::replace(self, MeshExtractableData::ExtractedToRenderWorld) {
MeshExtractableData::ExtractedToRenderWorld => {
Err(MeshAccessError::ExtractedToRenderWorld)
}
not_extracted => Ok(not_extracted),
}
}
// replace internal data. returns the existing data, or an error if data has been extracted
fn replace(
&mut self,
data: impl Into<MeshExtractableData<T>>,
) -> Result<Option<T>, MeshAccessError> {
match core::mem::replace(self, data.into()) {
MeshExtractableData::ExtractedToRenderWorld => {
*self = MeshExtractableData::ExtractedToRenderWorld;
Err(MeshAccessError::ExtractedToRenderWorld)
}
MeshExtractableData::Data(t) => Ok(Some(t)),
MeshExtractableData::NoData => Ok(None),
}
}
}
impl<T> From<Option<T>> for MeshExtractableData<T> {
fn from(value: Option<T>) -> Self {
match value {
Some(data) => MeshExtractableData::Data(data),
None => MeshExtractableData::NoData,
}
}
}
/// A 3D object made out of vertices representing triangles, lines, or points,
/// with "attribute" values for each vertex.
///
/// Meshes can be automatically generated by a bevy `AssetLoader` (generally by loading a `Gltf` file),
/// or by converting a [primitive](bevy_math::primitives) using [`into`](Into).
/// It is also possible to create one manually. They can be edited after creation.
///
/// Meshes can be rendered with a [`Mesh2d`](crate::Mesh2d) and `MeshMaterial2d`
/// or [`Mesh3d`](crate::Mesh3d) and `MeshMaterial3d` for 2D and 3D respectively.
///
/// A [`Mesh`] in Bevy is equivalent to a "primitive" in the glTF format, for a
/// glTF Mesh representation, see `GltfMesh`.
///
/// ## Manual creation
///
/// The following function will construct a flat mesh, to be rendered with a
/// `StandardMaterial` or `ColorMaterial`:
///
/// ```
/// # use bevy_mesh::{Mesh, Indices, PrimitiveTopology};
/// # use bevy_asset::RenderAssetUsages;
/// fn create_simple_parallelogram() -> Mesh {
/// // Create a new mesh using a triangle list topology, where each set of 3 vertices composes a triangle.
/// Mesh::new(PrimitiveTopology::TriangleList, RenderAssetUsages::default())
/// // Add 4 vertices, each with its own position attribute (coordinate in
/// // 3D space), for each of the corners of the parallelogram.
/// .with_inserted_attribute(
/// Mesh::ATTRIBUTE_POSITION,
/// vec![[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [2.0, 2.0, 0.0], [1.0, 0.0, 0.0]]
/// )
/// // Assign a UV coordinate to each vertex.
/// .with_inserted_attribute(
/// Mesh::ATTRIBUTE_UV_0,
/// vec![[0.0, 1.0], [0.5, 0.0], [1.0, 0.0], [0.5, 1.0]]
/// )
/// // Assign normals (everything points outwards)
/// .with_inserted_attribute(
/// Mesh::ATTRIBUTE_NORMAL,
/// vec![[0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0]]
/// )
/// // After defining all the vertices and their attributes, build each triangle using the
/// // indices of the vertices that make it up in a counter-clockwise order.
/// .with_inserted_indices(Indices::U32(vec![
/// // First triangle
/// 0, 3, 1,
/// // Second triangle
/// 1, 3, 2
/// ]))
/// }
/// ```
///
/// You can see how it looks like [here](https://github.com/bevyengine/bevy/blob/main/assets/docs/Mesh.png),
/// used in a [`Mesh3d`](crate::Mesh3d) with a square bevy logo texture, with added axis, points,
/// lines and text for clarity.
///
/// ## Other examples
///
/// For further visualization, explanation, and examples, see the built-in Bevy examples,
/// and the [implementation of the built-in shapes](https://github.com/bevyengine/bevy/tree/main/crates/bevy_mesh/src/primitives).
/// In particular, [generate_custom_mesh](https://github.com/bevyengine/bevy/blob/main/examples/3d/generate_custom_mesh.rs)
/// teaches you to access and modify the attributes of a [`Mesh`] after creating it.
///
/// ## Common points of confusion
///
/// - UV maps in Bevy start at the top-left, see [`ATTRIBUTE_UV_0`](Mesh::ATTRIBUTE_UV_0),
/// other APIs can have other conventions, `OpenGL` starts at bottom-left.
/// - It is possible and sometimes useful for multiple vertices to have the same
/// [position attribute](Mesh::ATTRIBUTE_POSITION) value,
/// it's a common technique in 3D modeling for complex UV mapping or other calculations.
/// - Bevy performs frustum culling based on the `Aabb` of meshes, which is calculated
/// and added automatically for new meshes only. If a mesh is modified, the entity's `Aabb`
/// needs to be updated manually or deleted so that it is re-calculated.
///
/// ## Use with `StandardMaterial`
///
/// To render correctly with `StandardMaterial`, a mesh needs to have properly defined:
/// - [`UVs`](Mesh::ATTRIBUTE_UV_0): Bevy needs to know how to map a texture onto the mesh
/// (also true for `ColorMaterial`).
/// - [`Normals`](Mesh::ATTRIBUTE_NORMAL): Bevy needs to know how light interacts with your mesh.
/// [0.0, 0.0, 1.0] is very common for simple flat meshes on the XY plane,
/// because simple meshes are smooth and they don't require complex light calculations.
/// - Vertex winding order: by default, `StandardMaterial.cull_mode` is `Some(Face::Back)`,
/// which means that Bevy would *only* render the "front" of each triangle, which
/// is the side of the triangle from where the vertices appear in a *counter-clockwise* order.
///
/// ## Remote Inspection
///
/// To transmit a [`Mesh`] between two running Bevy apps, e.g. through BRP, use [`SerializedMesh`].
/// This type is only meant for short-term transmission between same versions and should not be stored anywhere.
#[derive(Asset, Debug, Clone, Reflect, PartialEq)]
#[reflect(Clone)]
pub struct Mesh {
#[reflect(ignore, clone)]
primitive_topology: PrimitiveTopology,
/// `std::collections::BTreeMap` with all defined vertex attributes (Positions, Normals, ...)
/// for this mesh. Attribute ids to attribute values.
/// Uses a [`BTreeMap`] because, unlike `HashMap`, it has a defined iteration order,
/// which allows easy stable `VertexBuffers` (i.e. same buffer order)
#[reflect(ignore, clone)]
attributes: MeshExtractableData<BTreeMap<MeshVertexAttributeId, MeshAttributeData>>,
indices: MeshExtractableData<Indices>,
#[cfg(feature = "morph")]
morph_targets: MeshExtractableData<Handle<Image>>,
#[cfg(feature = "morph")]
morph_target_names: MeshExtractableData<Vec<String>>,
pub asset_usage: RenderAssetUsages,
/// Whether or not to build a BLAS for use with `bevy_solari` raytracing.
///
/// Note that this is _not_ whether the mesh is _compatible_ with `bevy_solari` raytracing.
/// This field just controls whether or not a BLAS gets built for this mesh, assuming that
/// the mesh is compatible.
///
/// The use case for this field is using lower-resolution proxy meshes for raytracing (to save on BLAS memory usage),
/// while using higher-resolution meshes for raster. You can set this field to true for the lower-resolution proxy mesh,
/// and to false for the high-resolution raster mesh.
///
/// Alternatively, you can use the same mesh for both raster and raytracing, with this field set to true.
///
/// Does nothing if not used with `bevy_solari`, or if the mesh is not compatible
/// with `bevy_solari` (see `bevy_solari`'s docs).
pub enable_raytracing: bool,
/// Precomputed min and max extents of the mesh position data. Used mainly for constructing `Aabb`s for frustum culling.
/// This data will be set if/when a mesh is extracted to the GPU
pub final_aabb: Option<Aabb3d>,
}
impl Mesh {
/// Where the vertex is located in space. Use in conjunction with [`Mesh::insert_attribute`]
/// or [`Mesh::with_inserted_attribute`].
///
/// The format of this attribute is [`VertexFormat::Float32x3`].
pub const ATTRIBUTE_POSITION: MeshVertexAttribute =
MeshVertexAttribute::new("Vertex_Position", 0, VertexFormat::Float32x3);
/// The direction the vertex normal is facing in.
/// Use in conjunction with [`Mesh::insert_attribute`] or [`Mesh::with_inserted_attribute`].
///
/// The format of this attribute is [`VertexFormat::Float32x3`].
pub const ATTRIBUTE_NORMAL: MeshVertexAttribute =
MeshVertexAttribute::new("Vertex_Normal", 1, VertexFormat::Float32x3);
/// Texture coordinates for the vertex. Use in conjunction with [`Mesh::insert_attribute`]
/// or [`Mesh::with_inserted_attribute`].
///
/// Generally `[0.,0.]` is mapped to the top left of the texture, and `[1.,1.]` to the bottom-right.
///
/// By default values outside will be clamped per pixel not for the vertex,
/// "stretching" the borders of the texture.
/// This behavior can be useful in some cases, usually when the borders have only
/// one color, for example a logo, and you want to "extend" those borders.
///
/// For different mapping outside of `0..=1` range,
/// see [`ImageAddressMode`](bevy_image::ImageAddressMode).
///
/// The format of this attribute is [`VertexFormat::Float32x2`].
pub const ATTRIBUTE_UV_0: MeshVertexAttribute =
MeshVertexAttribute::new("Vertex_Uv", 2, VertexFormat::Float32x2);
/// Alternate texture coordinates for the vertex. Use in conjunction with
/// [`Mesh::insert_attribute`] or [`Mesh::with_inserted_attribute`].
///
/// Typically, these are used for lightmaps, textures that provide
/// precomputed illumination.
///
/// The format of this attribute is [`VertexFormat::Float32x2`].
pub const ATTRIBUTE_UV_1: MeshVertexAttribute =
MeshVertexAttribute::new("Vertex_Uv_1", 3, VertexFormat::Float32x2);
/// The direction of the vertex tangent. Used for normal mapping.
/// Usually generated with [`generate_tangents`](Mesh::generate_tangents) or
/// [`with_generated_tangents`](Mesh::with_generated_tangents).
///
/// The format of this attribute is [`VertexFormat::Float32x4`].
pub const ATTRIBUTE_TANGENT: MeshVertexAttribute =
MeshVertexAttribute::new("Vertex_Tangent", 4, VertexFormat::Float32x4);
/// Per vertex coloring. Use in conjunction with [`Mesh::insert_attribute`]
/// or [`Mesh::with_inserted_attribute`].
///
/// The format of this attribute is [`VertexFormat::Float32x4`].
pub const ATTRIBUTE_COLOR: MeshVertexAttribute =
MeshVertexAttribute::new("Vertex_Color", 5, VertexFormat::Float32x4);
/// Per vertex joint transform matrix weight. Use in conjunction with [`Mesh::insert_attribute`]
/// or [`Mesh::with_inserted_attribute`].
///
/// The format of this attribute is [`VertexFormat::Float32x4`].
pub const ATTRIBUTE_JOINT_WEIGHT: MeshVertexAttribute =
MeshVertexAttribute::new("Vertex_JointWeight", 6, VertexFormat::Float32x4);
/// Per vertex joint transform matrix index. Use in conjunction with [`Mesh::insert_attribute`]
/// or [`Mesh::with_inserted_attribute`].
///
/// The format of this attribute is [`VertexFormat::Uint16x4`].
pub const ATTRIBUTE_JOINT_INDEX: MeshVertexAttribute =
MeshVertexAttribute::new("Vertex_JointIndex", 7, VertexFormat::Uint16x4);
/// The first index that can be used for custom vertex attributes.
/// Only the attributes with an index below this are used by Bevy.
pub const FIRST_AVAILABLE_CUSTOM_ATTRIBUTE: u64 = 8;
/// Construct a new mesh. You need to provide a [`PrimitiveTopology`] so that the
/// renderer knows how to treat the vertex data. Most of the time this will be
/// [`PrimitiveTopology::TriangleList`].
pub fn new(primitive_topology: PrimitiveTopology, asset_usage: RenderAssetUsages) -> Self {
Mesh {
primitive_topology,
attributes: MeshExtractableData::Data(Default::default()),
indices: MeshExtractableData::NoData,
#[cfg(feature = "morph")]
morph_targets: MeshExtractableData::NoData,
#[cfg(feature = "morph")]
morph_target_names: MeshExtractableData::NoData,
asset_usage,
enable_raytracing: true,
final_aabb: None,
}
}
/// Returns the topology of the mesh.
pub fn primitive_topology(&self) -> PrimitiveTopology {
self.primitive_topology
}
/// Sets the data for a vertex attribute (position, normal, etc.). The name will
/// often be one of the associated constants such as [`Mesh::ATTRIBUTE_POSITION`].
///
/// `Aabb` of entities with modified mesh are not updated automatically.
///
/// # Panics
/// Panics when the format of the values does not match the attribute's format.
/// Panics when the mesh data has already been extracted to `RenderWorld`. To handle
/// this as an error use [`Mesh::try_insert_attribute`]
#[inline]
pub fn insert_attribute(
&mut self,
attribute: MeshVertexAttribute,
values: impl Into<VertexAttributeValues>,
) {
self.try_insert_attribute(attribute, values)
.expect(MESH_EXTRACTED_ERROR);
}
/// Sets the data for a vertex attribute (position, normal, etc.). The name will
/// often be one of the associated constants such as [`Mesh::ATTRIBUTE_POSITION`].
///
/// `Aabb` of entities with modified mesh are not updated automatically.
///
/// Returns an error if the mesh data has been extracted to `RenderWorld`.
///
/// # Panics
/// Panics when the format of the values does not match the attribute's format.
#[inline]
pub fn try_insert_attribute(
&mut self,
attribute: MeshVertexAttribute,
values: impl Into<VertexAttributeValues>,
) -> Result<(), MeshAccessError> {
let values = values.into();
let values_format = VertexFormat::from(&values);
if values_format != attribute.format {
panic!(
"Failed to insert attribute. Invalid attribute format for {}. Given format is {values_format:?} but expected {:?}",
attribute.name, attribute.format
);
}
self.attributes
.as_mut()?
.insert(attribute.id, MeshAttributeData { attribute, values });
Ok(())
}
/// Consumes the mesh and returns a mesh with data set for a vertex attribute (position, normal, etc.).
/// The name will often be one of the associated constants such as [`Mesh::ATTRIBUTE_POSITION`].
///
/// (Alternatively, you can use [`Mesh::insert_attribute`] to mutate an existing mesh in-place)
///
/// `Aabb` of entities with modified mesh are not updated automatically.
///
/// # Panics
/// Panics when the format of the values does not match the attribute's format.
/// Panics when the mesh data has already been extracted to `RenderWorld`. To handle
/// this as an error use [`Mesh::try_with_inserted_attribute`]
#[must_use]
#[inline]
pub fn with_inserted_attribute(
mut self,
attribute: MeshVertexAttribute,
values: impl Into<VertexAttributeValues>,
) -> Self {
self.insert_attribute(attribute, values);
self
}
/// Consumes the mesh and returns a mesh with data set for a vertex attribute (position, normal, etc.).
/// The name will often be one of the associated constants such as [`Mesh::ATTRIBUTE_POSITION`].
///
/// (Alternatively, you can use [`Mesh::insert_attribute`] to mutate an existing mesh in-place)
///
/// `Aabb` of entities with modified mesh are not updated automatically.
///
/// Returns an error if the mesh data has been extracted to `RenderWorld`.
#[inline]
pub fn try_with_inserted_attribute(
mut self,
attribute: MeshVertexAttribute,
values: impl Into<VertexAttributeValues>,
) -> Result<Self, MeshAccessError> {
self.try_insert_attribute(attribute, values)?;
Ok(self)
}
/// Removes the data for a vertex attribute
///
/// # Panics
/// Panics when the mesh data has already been extracted to `RenderWorld`. To handle
/// this as an error use [`Mesh::try_remove_attribute`]
pub fn remove_attribute(
&mut self,
attribute: impl Into<MeshVertexAttributeId>,
) -> Option<VertexAttributeValues> {
self.attributes
.as_mut()
.expect(MESH_EXTRACTED_ERROR)
.remove(&attribute.into())
.map(|data| data.values)
}
/// Removes the data for a vertex attribute
/// Returns an error if the mesh data has been extracted to `RenderWorld`or
/// if the attribute does not exist.
pub fn try_remove_attribute(
&mut self,
attribute: impl Into<MeshVertexAttributeId>,
) -> Result<VertexAttributeValues, MeshAccessError> {
Ok(self
.attributes
.as_mut()?
.remove(&attribute.into())
.ok_or(MeshAccessError::NotFound)?
.values)
}
/// Consumes the mesh and returns a mesh without the data for a vertex attribute
///
/// (Alternatively, you can use [`Mesh::remove_attribute`] to mutate an existing mesh in-place)
///
/// # Panics
/// Panics when the mesh data has already been extracted to `RenderWorld`. To handle
/// this as an error use [`Mesh::try_with_removed_attribute`]
#[must_use]
pub fn with_removed_attribute(mut self, attribute: impl Into<MeshVertexAttributeId>) -> Self {
self.remove_attribute(attribute);
self
}
/// Consumes the mesh and returns a mesh without the data for a vertex attribute
///
/// (Alternatively, you can use [`Mesh::remove_attribute`] to mutate an existing mesh in-place)
///
/// Returns an error if the mesh data has been extracted to `RenderWorld`or
/// if the attribute does not exist.
pub fn try_with_removed_attribute(
mut self,
attribute: impl Into<MeshVertexAttributeId>,
) -> Result<Self, MeshAccessError> {
self.try_remove_attribute(attribute)?;
Ok(self)
}
/// Returns a bool indicating if the attribute is present in this mesh's vertex data.
///
/// # Panics
/// Panics when the mesh data has already been extracted to `RenderWorld`. To handle
/// this as an error use [`Mesh::try_contains_attribute`]
#[inline]
pub fn contains_attribute(&self, id: impl Into<MeshVertexAttributeId>) -> bool {
self.attributes
.as_ref()
.expect(MESH_EXTRACTED_ERROR)
.contains_key(&id.into())
}
/// Returns a bool indicating if the attribute is present in this mesh's vertex data.
///
/// Returns an error if the mesh data has been extracted to `RenderWorld`.
#[inline]
pub fn try_contains_attribute(
&self,
id: impl Into<MeshVertexAttributeId>,
) -> Result<bool, MeshAccessError> {
Ok(self.attributes.as_ref()?.contains_key(&id.into()))
}
/// Retrieves the data currently set to the vertex attribute with the specified [`MeshVertexAttributeId`].
///
/// # Panics
/// Panics when the mesh data has already been extracted to `RenderWorld`. To handle
/// this as an error use [`Mesh::try_attribute`] or [`Mesh::try_attribute_option`]
#[inline]
pub fn attribute(
&self,
id: impl Into<MeshVertexAttributeId>,
) -> Option<&VertexAttributeValues> {
self.try_attribute_option(id).expect(MESH_EXTRACTED_ERROR)
}
/// Retrieves the data currently set to the vertex attribute with the specified [`MeshVertexAttributeId`].
///
/// Returns an error if the mesh data has been extracted to `RenderWorld`or
/// if the attribute does not exist.
#[inline]
pub fn try_attribute(
&self,
id: impl Into<MeshVertexAttributeId>,
) -> Result<&VertexAttributeValues, MeshAccessError> {
self.try_attribute_option(id)?
.ok_or(MeshAccessError::NotFound)
}
/// Retrieves the data currently set to the vertex attribute with the specified [`MeshVertexAttributeId`].
///
/// Returns an error if the mesh data has been extracted to `RenderWorld`.
#[inline]
pub fn try_attribute_option(
&self,
id: impl Into<MeshVertexAttributeId>,
) -> Result<Option<&VertexAttributeValues>, MeshAccessError> {
Ok(self
.attributes
.as_ref()?
.get(&id.into())
.map(|data| &data.values))
}
/// Retrieves the full data currently set to the vertex attribute with the specified [`MeshVertexAttributeId`].
#[inline]
pub(crate) fn try_attribute_data(
&self,
id: impl Into<MeshVertexAttributeId>,
) -> Result<Option<&MeshAttributeData>, MeshAccessError> {
Ok(self.attributes.as_ref()?.get(&id.into()))
}
/// Retrieves the data currently set to the vertex attribute with the specified `name` mutably.
///
/// # Panics
/// Panics when the mesh data has already been extracted to `RenderWorld`. To handle
/// this as an error use [`Mesh::try_attribute_mut`]
#[inline]
pub fn attribute_mut(
&mut self,
id: impl Into<MeshVertexAttributeId>,
) -> Option<&mut VertexAttributeValues> {
self.try_attribute_mut_option(id)
.expect(MESH_EXTRACTED_ERROR)
}
/// Retrieves the data currently set to the vertex attribute with the specified `name` mutably.
///
/// Returns an error if the mesh data has been extracted to `RenderWorld`or
/// if the attribute does not exist.
#[inline]
pub fn try_attribute_mut(
&mut self,
id: impl Into<MeshVertexAttributeId>,
) -> Result<&mut VertexAttributeValues, MeshAccessError> {
self.try_attribute_mut_option(id)?
.ok_or(MeshAccessError::NotFound)
}
/// Retrieves the data currently set to the vertex attribute with the specified `name` mutably.
///
/// Returns an error if the mesh data has been extracted to `RenderWorld`.
#[inline]
pub fn try_attribute_mut_option(
&mut self,
id: impl Into<MeshVertexAttributeId>,
) -> Result<Option<&mut VertexAttributeValues>, MeshAccessError> {
Ok(self
.attributes
.as_mut()?
.get_mut(&id.into())
.map(|data| &mut data.values))
}
/// Returns an iterator that yields references to the data of each vertex attribute.
///
/// # Panics
/// Panics when the mesh data has already been extracted to `RenderWorld`. To handle
/// this as an error use [`Mesh::try_attributes`]
pub fn attributes(
&self,
) -> impl Iterator<Item = (&MeshVertexAttribute, &VertexAttributeValues)> {
self.try_attributes().expect(MESH_EXTRACTED_ERROR)
}
/// Returns an iterator that yields references to the data of each vertex attribute.
/// Returns an error if data has been extracted to `RenderWorld`
pub fn try_attributes(
&self,
) -> Result<impl Iterator<Item = (&MeshVertexAttribute, &VertexAttributeValues)>, MeshAccessError>
{
Ok(self
.attributes
.as_ref()?
.values()
.map(|data| (&data.attribute, &data.values)))
}
/// Returns an iterator that yields mutable references to the data of each vertex attribute.
///
/// # Panics
/// Panics when the mesh data has already been extracted to `RenderWorld`. To handle
/// this as an error use [`Mesh::try_attributes_mut`]
pub fn attributes_mut(
&mut self,
) -> impl Iterator<Item = (&MeshVertexAttribute, &mut VertexAttributeValues)> {
self.try_attributes_mut().expect(MESH_EXTRACTED_ERROR)
}
/// Returns an iterator that yields mutable references to the data of each vertex attribute.
///
/// Returns an error if the mesh data has been extracted to `RenderWorld`.
pub fn try_attributes_mut(
&mut self,
) -> Result<
impl Iterator<Item = (&MeshVertexAttribute, &mut VertexAttributeValues)>,
MeshAccessError,
> {
Ok(self
.attributes
.as_mut()?
.values_mut()
.map(|data| (&data.attribute, &mut data.values)))
}
/// Sets the vertex indices of the mesh. They describe how triangles are constructed out of the
/// vertex attributes and are therefore only useful for the [`PrimitiveTopology`] variants
/// that use triangles.
///
/// # Panics
/// Panics when the mesh data has already been extracted to `RenderWorld`. To handle
/// this as an error use [`Mesh::try_insert_indices`]
#[inline]
pub fn insert_indices(&mut self, indices: Indices) {
self.indices
.replace(Some(indices))
.expect(MESH_EXTRACTED_ERROR);
}
/// Sets the vertex indices of the mesh. They describe how triangles are constructed out of the
/// vertex attributes and are therefore only useful for the [`PrimitiveTopology`] variants
/// that use triangles.
///
/// Returns an error if the mesh data has been extracted to `RenderWorld`.
#[inline]
pub fn try_insert_indices(&mut self, indices: Indices) -> Result<(), MeshAccessError> {
self.indices.replace(Some(indices))?;
Ok(())
}
/// Consumes the mesh and returns a mesh with the given vertex indices. They describe how triangles
/// are constructed out of the vertex attributes and are therefore only useful for the
/// [`PrimitiveTopology`] variants that use triangles.
///
/// (Alternatively, you can use [`Mesh::insert_indices`] to mutate an existing mesh in-place)
///
/// # Panics
/// Panics when the mesh data has already been extracted to `RenderWorld`. To handle
/// this as an error use [`Mesh::try_with_inserted_indices`]
#[must_use]
#[inline]
pub fn with_inserted_indices(mut self, indices: Indices) -> Self {
self.insert_indices(indices);
self
}
/// Consumes the mesh and returns a mesh with the given vertex indices. They describe how triangles
/// are constructed out of the vertex attributes and are therefore only useful for the
/// [`PrimitiveTopology`] variants that use triangles.
///
/// (Alternatively, you can use [`Mesh::try_insert_indices`] to mutate an existing mesh in-place)
///
/// Returns an error if the mesh data has been extracted to `RenderWorld`.
#[inline]
pub fn try_with_inserted_indices(mut self, indices: Indices) -> Result<Self, MeshAccessError> {
self.try_insert_indices(indices)?;
Ok(self)
}
/// Retrieves the vertex `indices` of the mesh, returns None if not found.
///
/// # Panics
/// Panics when the mesh data has already been extracted to `RenderWorld`. To handle
/// this as an error use [`Mesh::try_indices`]
#[inline]
pub fn indices(&self) -> Option<&Indices> {
self.indices.as_ref_option().expect(MESH_EXTRACTED_ERROR)
}
/// Retrieves the vertex `indices` of the mesh.
///
/// Returns an error if the mesh data has been extracted to `RenderWorld`or
/// if the attribute does not exist.
#[inline]
pub fn try_indices(&self) -> Result<&Indices, MeshAccessError> {
self.indices.as_ref()
}
/// Retrieves the vertex `indices` of the mesh, returns None if not found.
///
/// Returns an error if the mesh data has been extracted to `RenderWorld`.
#[inline]
pub fn try_indices_option(&self) -> Result<Option<&Indices>, MeshAccessError> {
self.indices.as_ref_option()
}
/// Retrieves the vertex `indices` of the mesh mutably.
#[inline]
pub fn indices_mut(&mut self) -> Option<&mut Indices> {
self.try_indices_mut_option().expect(MESH_EXTRACTED_ERROR)
}
/// Retrieves the vertex `indices` of the mesh mutably.
///
/// Returns an error if the mesh data has been extracted to `RenderWorld`.
#[inline]
pub fn try_indices_mut(&mut self) -> Result<&mut Indices, MeshAccessError> {
self.indices.as_mut()
}
/// Retrieves the vertex `indices` of the mesh mutably.
///
/// Returns an error if the mesh data has been extracted to `RenderWorld`.
#[inline]
pub fn try_indices_mut_option(&mut self) -> Result<Option<&mut Indices>, MeshAccessError> {
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/conversions.rs | crates/bevy_mesh/src/conversions.rs | //! These implementations allow you to
//! convert `std::vec::Vec<T>` to `VertexAttributeValues::T` and back.
//!
//! # Examples
//!
//! ```
//! use bevy_mesh::VertexAttributeValues;
//!
//! // creating std::vec::Vec
//! let buffer = vec![[0_u32; 4]; 10];
//!
//! // converting std::vec::Vec to bevy_mesh::VertexAttributeValues
//! let values = VertexAttributeValues::from(buffer.clone());
//!
//! // converting bevy_mesh::VertexAttributeValues to std::vec::Vec with two ways
//! let result_into: Vec<[u32; 4]> = values.clone().try_into().unwrap();
//! let result_from: Vec<[u32; 4]> = Vec::try_from(values.clone()).unwrap();
//!
//! // getting an error when trying to convert incorrectly
//! let error: Result<Vec<u32>, _> = values.try_into();
//!
//! assert_eq!(buffer, result_into);
//! assert_eq!(buffer, result_from);
//! assert!(error.is_err());
//! ```
use super::VertexAttributeValues;
use bevy_math::{IVec2, IVec3, IVec4, UVec2, UVec3, UVec4, Vec2, Vec3, Vec3A, Vec4};
use thiserror::Error;
#[derive(Debug, Clone, Error)]
#[error("cannot convert VertexAttributeValues::{variant} to {into}")]
pub struct FromVertexAttributeError {
from: VertexAttributeValues,
variant: &'static str,
into: &'static str,
}
impl FromVertexAttributeError {
fn new<T: 'static>(from: VertexAttributeValues) -> Self {
Self {
variant: from.enum_variant_name(),
into: core::any::type_name::<T>(),
from,
}
}
}
macro_rules! impl_from {
($from:ty, $variant:tt) => {
impl From<Vec<$from>> for VertexAttributeValues {
fn from(vec: Vec<$from>) -> Self {
VertexAttributeValues::$variant(vec)
}
}
};
}
macro_rules! impl_from_into {
($from:ty, $variant:tt) => {
impl From<Vec<$from>> for VertexAttributeValues {
fn from(vec: Vec<$from>) -> Self {
let vec: Vec<_> = vec.into_iter().map(|t| t.into()).collect();
VertexAttributeValues::$variant(vec)
}
}
};
}
impl_from!(f32, Float32);
impl_from!([f32; 2], Float32x2);
impl_from_into!(Vec2, Float32x2);
impl_from!([f32; 3], Float32x3);
impl_from_into!(Vec3, Float32x3);
impl_from_into!(Vec3A, Float32x3);
impl_from!([f32; 4], Float32x4);
impl_from_into!(Vec4, Float32x4);
impl_from!(i32, Sint32);
impl_from!([i32; 2], Sint32x2);
impl_from_into!(IVec2, Sint32x2);
impl_from!([i32; 3], Sint32x3);
impl_from_into!(IVec3, Sint32x3);
impl_from!([i32; 4], Sint32x4);
impl_from_into!(IVec4, Sint32x4);
impl_from!(u32, Uint32);
impl_from!([u32; 2], Uint32x2);
impl_from_into!(UVec2, Uint32x2);
impl_from!([u32; 3], Uint32x3);
impl_from_into!(UVec3, Uint32x3);
impl_from!([u32; 4], Uint32x4);
impl_from_into!(UVec4, Uint32x4);
macro_rules! impl_try_from {
($into:ty, $($variant:tt), +) => {
impl TryFrom<VertexAttributeValues> for Vec<$into> {
type Error = FromVertexAttributeError;
fn try_from(value: VertexAttributeValues) -> Result<Self, Self::Error> {
match value {
$(VertexAttributeValues::$variant(value)) |+ => Ok(value),
_ => Err(FromVertexAttributeError::new::<Self>(value)),
}
}
}
};
}
macro_rules! impl_try_from_into {
($into:ty, $($variant:tt), +) => {
impl TryFrom<VertexAttributeValues> for Vec<$into> {
type Error = FromVertexAttributeError;
fn try_from(value: VertexAttributeValues) -> Result<Self, Self::Error> {
match value {
$(VertexAttributeValues::$variant(value)) |+ => {
Ok(value.into_iter().map(|t| t.into()).collect())
}
_ => Err(FromVertexAttributeError::new::<Self>(value)),
}
}
}
};
}
impl_try_from!(f32, Float32);
impl_try_from!([f32; 2], Float32x2);
impl_try_from_into!(Vec2, Float32x2);
impl_try_from!([f32; 3], Float32x3);
impl_try_from_into!(Vec3, Float32x3);
impl_try_from_into!(Vec3A, Float32x3);
impl_try_from!([f32; 4], Float32x4);
impl_try_from_into!(Vec4, Float32x4);
impl_try_from!(i32, Sint32);
impl_try_from!([i32; 2], Sint32x2);
impl_try_from_into!(IVec2, Sint32x2);
impl_try_from!([i32; 3], Sint32x3);
impl_try_from_into!(IVec3, Sint32x3);
impl_try_from!([i32; 4], Sint32x4);
impl_try_from_into!(IVec4, Sint32x4);
impl_try_from!(u32, Uint32);
impl_try_from!([u32; 2], Uint32x2);
impl_try_from_into!(UVec2, Uint32x2);
impl_try_from!([u32; 3], Uint32x3);
impl_try_from_into!(UVec3, Uint32x3);
impl_try_from!([u32; 4], Uint32x4);
impl_try_from_into!(UVec4, Uint32x4);
impl_try_from!([i8; 2], Sint8x2, Snorm8x2);
impl_try_from!([i8; 4], Sint8x4, Snorm8x4);
impl_try_from!([u8; 2], Uint8x2, Unorm8x2);
impl_try_from!([u8; 4], Uint8x4, Unorm8x4);
impl_try_from!([i16; 2], Sint16x2, Snorm16x2);
impl_try_from!([i16; 4], Sint16x4, Snorm16x4);
impl_try_from!([u16; 2], Uint16x2, Unorm16x2);
impl_try_from!([u16; 4], Uint16x4, Unorm16x4);
#[cfg(test)]
mod tests {
use bevy_math::{IVec2, IVec3, IVec4, UVec2, UVec3, UVec4, Vec2, Vec3, Vec3A, Vec4};
use super::VertexAttributeValues;
#[test]
fn f32() {
let buffer = vec![0.0; 10];
let values = VertexAttributeValues::from(buffer.clone());
let result_into: Vec<f32> = values.clone().try_into().unwrap();
let result_from: Vec<f32> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn i32() {
let buffer = vec![0; 10];
let values = VertexAttributeValues::from(buffer.clone());
let result_into: Vec<i32> = values.clone().try_into().unwrap();
let result_from: Vec<i32> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn u32() {
let buffer = vec![0_u32; 10];
let values = VertexAttributeValues::from(buffer.clone());
let result_into: Vec<u32> = values.clone().try_into().unwrap();
let result_from: Vec<u32> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<f32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn f32_2() {
let buffer = vec![[0.0; 2]; 10];
let values = VertexAttributeValues::from(buffer.clone());
let result_into: Vec<[f32; 2]> = values.clone().try_into().unwrap();
let result_from: Vec<[f32; 2]> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn vec2() {
let buffer = vec![Vec2::ZERO; 10];
let values = VertexAttributeValues::from(buffer.clone());
assert!(matches!(values, VertexAttributeValues::Float32x2(_)));
let result_into: Vec<Vec2> = values.clone().try_into().unwrap();
let result_from: Vec<Vec2> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn i32_2() {
let buffer = vec![[0; 2]; 10];
let values = VertexAttributeValues::from(buffer.clone());
let result_into: Vec<[i32; 2]> = values.clone().try_into().unwrap();
let result_from: Vec<[i32; 2]> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn ivec2() {
let buffer = vec![IVec2::ZERO; 10];
let values = VertexAttributeValues::from(buffer.clone());
assert!(matches!(values, VertexAttributeValues::Sint32x2(_)));
let result_into: Vec<IVec2> = values.clone().try_into().unwrap();
let result_from: Vec<IVec2> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn u32_2() {
let buffer = vec![[0_u32; 2]; 10];
let values = VertexAttributeValues::from(buffer.clone());
let result_into: Vec<[u32; 2]> = values.clone().try_into().unwrap();
let result_from: Vec<[u32; 2]> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn uvec2() {
let buffer = vec![UVec2::ZERO; 10];
let values = VertexAttributeValues::from(buffer.clone());
assert!(matches!(values, VertexAttributeValues::Uint32x2(_)));
let result_into: Vec<UVec2> = values.clone().try_into().unwrap();
let result_from: Vec<UVec2> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn f32_3() {
let buffer = vec![[0.0; 3]; 10];
let values = VertexAttributeValues::from(buffer.clone());
let result_into: Vec<[f32; 3]> = values.clone().try_into().unwrap();
let result_from: Vec<[f32; 3]> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn vec3() {
let buffer = vec![Vec3::ZERO; 10];
let values = VertexAttributeValues::from(buffer.clone());
assert!(matches!(values, VertexAttributeValues::Float32x3(_)));
let result_into: Vec<Vec3> = values.clone().try_into().unwrap();
let result_from: Vec<Vec3> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn vec3a() {
let buffer = vec![Vec3A::ZERO; 10];
let values = VertexAttributeValues::from(buffer.clone());
assert!(matches!(values, VertexAttributeValues::Float32x3(_)));
let result_into: Vec<Vec3A> = values.clone().try_into().unwrap();
let result_from: Vec<Vec3A> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn i32_3() {
let buffer = vec![[0; 3]; 10];
let values = VertexAttributeValues::from(buffer.clone());
let result_into: Vec<[i32; 3]> = values.clone().try_into().unwrap();
let result_from: Vec<[i32; 3]> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn ivec3() {
let buffer = vec![IVec3::ZERO; 10];
let values = VertexAttributeValues::from(buffer.clone());
assert!(matches!(values, VertexAttributeValues::Sint32x3(_)));
let result_into: Vec<IVec3> = values.clone().try_into().unwrap();
let result_from: Vec<IVec3> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn u32_3() {
let buffer = vec![[0_u32; 3]; 10];
let values = VertexAttributeValues::from(buffer.clone());
let result_into: Vec<[u32; 3]> = values.clone().try_into().unwrap();
let result_from: Vec<[u32; 3]> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn uvec3() {
let buffer = vec![UVec3::ZERO; 10];
let values = VertexAttributeValues::from(buffer.clone());
assert!(matches!(values, VertexAttributeValues::Uint32x3(_)));
let result_into: Vec<UVec3> = values.clone().try_into().unwrap();
let result_from: Vec<UVec3> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn f32_4() {
let buffer = vec![[0.0; 4]; 10];
let values = VertexAttributeValues::from(buffer.clone());
let result_into: Vec<[f32; 4]> = values.clone().try_into().unwrap();
let result_from: Vec<[f32; 4]> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn vec4() {
let buffer = vec![Vec4::ZERO; 10];
let values = VertexAttributeValues::from(buffer.clone());
assert!(matches!(values, VertexAttributeValues::Float32x4(_)));
let result_into: Vec<Vec4> = values.clone().try_into().unwrap();
let result_from: Vec<Vec4> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn i32_4() {
let buffer = vec![[0; 4]; 10];
let values = VertexAttributeValues::from(buffer.clone());
let result_into: Vec<[i32; 4]> = values.clone().try_into().unwrap();
let result_from: Vec<[i32; 4]> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn ivec4() {
let buffer = vec![IVec4::ZERO; 10];
let values = VertexAttributeValues::from(buffer.clone());
assert!(matches!(values, VertexAttributeValues::Sint32x4(_)));
let result_into: Vec<IVec4> = values.clone().try_into().unwrap();
let result_from: Vec<IVec4> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn u32_4() {
let buffer = vec![[0_u32; 4]; 10];
let values = VertexAttributeValues::from(buffer.clone());
let result_into: Vec<[u32; 4]> = values.clone().try_into().unwrap();
let result_from: Vec<[u32; 4]> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn uvec4() {
let buffer = vec![UVec4::ZERO; 10];
let values = VertexAttributeValues::from(buffer.clone());
assert!(matches!(values, VertexAttributeValues::Uint32x4(_)));
let result_into: Vec<UVec4> = values.clone().try_into().unwrap();
let result_from: Vec<UVec4> = Vec::try_from(values.clone()).unwrap();
let error: Result<Vec<u32>, _> = values.try_into();
assert_eq!(buffer, result_into);
assert_eq!(buffer, result_from);
assert!(error.is_err());
}
#[test]
fn correct_message() {
let buffer = vec![[0_u32; 4]; 3];
let values = VertexAttributeValues::from(buffer);
let error_result: Result<Vec<u32>, _> = values.try_into();
let Err(error) = error_result else {
unreachable!()
};
assert_eq!(
error.to_string(),
"cannot convert VertexAttributeValues::Uint32x4 to alloc::vec::Vec<u32>"
);
assert_eq!(format!("{error:?}"),
"FromVertexAttributeError { from: Uint32x4([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]), variant: \"Uint32x4\", into: \"alloc::vec::Vec<u32>\" }");
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/components.rs | crates/bevy_mesh/src/components.rs | use crate::mesh::Mesh;
use bevy_asset::{AsAssetId, AssetEvent, AssetId, Handle};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
change_detection::DetectChangesMut, component::Component, message::MessageReader,
reflect::ReflectComponent, system::Query,
};
use bevy_platform::{collections::HashSet, hash::FixedHasher};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_transform::components::Transform;
use derive_more::derive::From;
/// A component for 2D meshes. Requires a [`MeshMaterial2d`] to be rendered, commonly using a [`ColorMaterial`].
///
/// [`MeshMaterial2d`]: <https://docs.rs/bevy/latest/bevy/prelude/struct.MeshMaterial2d.html>
/// [`ColorMaterial`]: <https://docs.rs/bevy/latest/bevy/prelude/struct.ColorMaterial.html>
///
/// # Example
///
/// ```ignore
/// # use bevy_sprite::{ColorMaterial, MeshMaterial2d};
/// # use bevy_ecs::prelude::*;
/// # use bevy_mesh::{Mesh, Mesh2d};
/// # use bevy_color::palettes::basic::RED;
/// # use bevy_asset::Assets;
/// # use bevy_math::primitives::Circle;
/// #
/// // Spawn an entity with a mesh using `ColorMaterial`.
/// fn setup(
/// mut commands: Commands,
/// mut meshes: ResMut<Assets<Mesh>>,
/// mut materials: ResMut<Assets<ColorMaterial>>,
/// ) {
/// commands.spawn((
/// Mesh2d(meshes.add(Circle::new(50.0))),
/// MeshMaterial2d(materials.add(ColorMaterial::from_color(RED))),
/// ));
/// }
/// ```
#[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)]
#[reflect(Component, Default, Clone, PartialEq)]
#[require(Transform)]
pub struct Mesh2d(pub Handle<Mesh>);
impl From<Mesh2d> for AssetId<Mesh> {
fn from(mesh: Mesh2d) -> Self {
mesh.id()
}
}
impl From<&Mesh2d> for AssetId<Mesh> {
fn from(mesh: &Mesh2d) -> Self {
mesh.id()
}
}
impl AsAssetId for Mesh2d {
type Asset = Mesh;
fn as_asset_id(&self) -> AssetId<Self::Asset> {
self.id()
}
}
/// A component for 3D meshes. Requires a [`MeshMaterial3d`] to be rendered, commonly using a [`StandardMaterial`].
///
/// [`MeshMaterial3d`]: <https://docs.rs/bevy/latest/bevy/pbr/struct.MeshMaterial3d.html>
/// [`StandardMaterial`]: <https://docs.rs/bevy/latest/bevy/pbr/struct.StandardMaterial.html>
///
/// # Example
///
/// ```ignore
/// # use bevy_pbr::{Material, MeshMaterial3d, StandardMaterial};
/// # use bevy_ecs::prelude::*;
/// # use bevy_mesh::{Mesh, Mesh3d};
/// # use bevy_color::palettes::basic::RED;
/// # use bevy_asset::Assets;
/// # use bevy_math::primitives::Capsule3d;
/// #
/// // Spawn an entity with a mesh using `StandardMaterial`.
/// fn setup(
/// mut commands: Commands,
/// mut meshes: ResMut<Assets<Mesh>>,
/// mut materials: ResMut<Assets<StandardMaterial>>,
/// ) {
/// commands.spawn((
/// Mesh3d(meshes.add(Capsule3d::default())),
/// MeshMaterial3d(materials.add(StandardMaterial {
/// base_color: RED.into(),
/// ..Default::default()
/// })),
/// ));
/// }
/// ```
#[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)]
#[reflect(Component, Default, Clone, PartialEq)]
#[require(Transform)]
pub struct Mesh3d(pub Handle<Mesh>);
impl From<Mesh3d> for AssetId<Mesh> {
fn from(mesh: Mesh3d) -> Self {
mesh.id()
}
}
impl From<&Mesh3d> for AssetId<Mesh> {
fn from(mesh: &Mesh3d) -> Self {
mesh.id()
}
}
impl AsAssetId for Mesh3d {
type Asset = Mesh;
fn as_asset_id(&self) -> AssetId<Self::Asset> {
self.id()
}
}
/// A system that marks a [`Mesh3d`] as changed if the associated [`Mesh`] asset
/// has changed.
///
/// This is needed because the systems that extract meshes, such as
/// `extract_meshes_for_gpu_building`, write some metadata about the mesh (like
/// the location within each slab) into the GPU structures that they build that
/// needs to be kept up to date if the contents of the mesh change.
pub fn mark_3d_meshes_as_changed_if_their_assets_changed(
mut meshes_3d: Query<&mut Mesh3d>,
mut mesh_asset_events: MessageReader<AssetEvent<Mesh>>,
) {
let mut changed_meshes: HashSet<AssetId<Mesh>, FixedHasher> = HashSet::default();
for mesh_asset_event in mesh_asset_events.read() {
if let AssetEvent::Modified { id } = mesh_asset_event {
changed_meshes.insert(*id);
}
}
if changed_meshes.is_empty() {
return;
}
for mut mesh_3d in &mut meshes_3d {
if changed_meshes.contains(&mesh_3d.0.id()) {
mesh_3d.set_changed();
}
}
}
/// A component that stores an arbitrary index used to identify the mesh instance when rendering.
#[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq)]
#[reflect(Component, Default, Clone, PartialEq)]
pub struct MeshTag(pub u32);
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/mikktspace.rs | crates/bevy_mesh/src/mikktspace.rs | use crate::MeshAccessError;
use super::{Indices, Mesh, VertexAttributeValues};
use thiserror::Error;
use wgpu_types::{PrimitiveTopology, VertexFormat};
struct MikktspaceGeometryHelper<'a> {
indices: Option<&'a Indices>,
positions: &'a Vec<[f32; 3]>,
normals: &'a Vec<[f32; 3]>,
uvs: &'a Vec<[f32; 2]>,
tangents: Vec<[f32; 4]>,
}
impl MikktspaceGeometryHelper<'_> {
fn index(&self, face: usize, vert: usize) -> usize {
let index_index = face * 3 + vert;
match self.indices {
Some(Indices::U16(indices)) => indices[index_index] as usize,
Some(Indices::U32(indices)) => indices[index_index] as usize,
None => index_index,
}
}
}
impl bevy_mikktspace::Geometry for MikktspaceGeometryHelper<'_> {
fn num_faces(&self) -> usize {
self.indices
.map(Indices::len)
.unwrap_or_else(|| self.positions.len())
/ 3
}
fn num_vertices_of_face(&self, _: usize) -> usize {
3
}
fn position(&self, face: usize, vert: usize) -> [f32; 3] {
self.positions[self.index(face, vert)]
}
fn normal(&self, face: usize, vert: usize) -> [f32; 3] {
self.normals[self.index(face, vert)]
}
fn tex_coord(&self, face: usize, vert: usize) -> [f32; 2] {
self.uvs[self.index(face, vert)]
}
fn set_tangent(
&mut self,
tangent_space: Option<bevy_mikktspace::TangentSpace>,
face: usize,
vert: usize,
) {
let idx = self.index(face, vert);
self.tangents[idx] = tangent_space.unwrap_or_default().tangent_encoded();
}
}
#[derive(Error, Debug)]
/// Failed to generate tangents for the mesh.
pub enum GenerateTangentsError {
#[error("cannot generate tangents for {0:?}")]
UnsupportedTopology(PrimitiveTopology),
#[error("missing indices")]
MissingIndices,
#[error("missing vertex attributes '{0}'")]
MissingVertexAttribute(&'static str),
#[error("the '{0}' vertex attribute should have {1:?} format")]
InvalidVertexAttributeFormat(&'static str, VertexFormat),
#[error("mesh not suitable for tangent generation")]
MikktspaceError(#[from] bevy_mikktspace::GenerateTangentSpaceError),
#[error("Mesh access error: {0}")]
MeshAccessError(#[from] MeshAccessError),
}
pub(crate) fn generate_tangents_for_mesh(
mesh: &Mesh,
) -> Result<Vec<[f32; 4]>, GenerateTangentsError> {
match mesh.primitive_topology() {
PrimitiveTopology::TriangleList => {}
other => return Err(GenerateTangentsError::UnsupportedTopology(other)),
};
let positions = mesh.try_attribute_option(Mesh::ATTRIBUTE_POSITION)?.ok_or(
GenerateTangentsError::MissingVertexAttribute(Mesh::ATTRIBUTE_POSITION.name),
)?;
let VertexAttributeValues::Float32x3(positions) = positions else {
return Err(GenerateTangentsError::InvalidVertexAttributeFormat(
Mesh::ATTRIBUTE_POSITION.name,
VertexFormat::Float32x3,
));
};
let normals = mesh.try_attribute_option(Mesh::ATTRIBUTE_NORMAL)?.ok_or(
GenerateTangentsError::MissingVertexAttribute(Mesh::ATTRIBUTE_NORMAL.name),
)?;
let VertexAttributeValues::Float32x3(normals) = normals else {
return Err(GenerateTangentsError::InvalidVertexAttributeFormat(
Mesh::ATTRIBUTE_NORMAL.name,
VertexFormat::Float32x3,
));
};
let uvs = mesh.try_attribute_option(Mesh::ATTRIBUTE_UV_0)?.ok_or(
GenerateTangentsError::MissingVertexAttribute(Mesh::ATTRIBUTE_UV_0.name),
)?;
let VertexAttributeValues::Float32x2(uvs) = uvs else {
return Err(GenerateTangentsError::InvalidVertexAttributeFormat(
Mesh::ATTRIBUTE_UV_0.name,
VertexFormat::Float32x2,
));
};
let len = positions.len();
let tangents = vec![[0., 0., 0., 0.]; len];
let mut mikktspace_mesh = MikktspaceGeometryHelper {
indices: mesh.try_indices_option()?,
positions,
normals,
uvs,
tangents,
};
bevy_mikktspace::generate_tangents(&mut mikktspace_mesh)?;
// mikktspace seems to assume left-handedness so we can flip the sign to correct for this
for tangent in &mut mikktspace_mesh.tangents {
tangent[3] = -tangent[3];
}
Ok(mikktspace_mesh.tangents)
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/vertex.rs | crates/bevy_mesh/src/vertex.rs | use alloc::sync::Arc;
use bevy_derive::EnumVariantMeta;
use bevy_ecs::resource::Resource;
use bevy_math::Vec3;
#[cfg(feature = "serialize")]
use bevy_platform::collections::HashMap;
use bevy_platform::collections::HashSet;
use bytemuck::cast_slice;
use core::hash::{Hash, Hasher};
#[cfg(feature = "serialize")]
use serde::{Deserialize, Serialize};
use thiserror::Error;
use wgpu_types::{BufferAddress, VertexAttribute, VertexFormat, VertexStepMode};
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct MeshVertexAttribute {
/// The friendly name of the vertex attribute
pub name: &'static str,
/// The _unique_ id of the vertex attribute. This will also determine sort ordering
/// when generating vertex buffers. Built-in / standard attributes will use "close to zero"
/// indices. When in doubt, use a random / very large u64 to avoid conflicts.
pub id: MeshVertexAttributeId,
/// The format of the vertex attribute.
pub format: VertexFormat,
}
#[cfg(feature = "serialize")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct SerializedMeshVertexAttribute {
pub(crate) name: String,
pub(crate) id: MeshVertexAttributeId,
pub(crate) format: VertexFormat,
}
#[cfg(feature = "serialize")]
impl SerializedMeshVertexAttribute {
pub(crate) fn from_mesh_vertex_attribute(attribute: MeshVertexAttribute) -> Self {
Self {
name: attribute.name.to_string(),
id: attribute.id,
format: attribute.format,
}
}
pub(crate) fn try_into_mesh_vertex_attribute(
self,
possible_attributes: &HashMap<Box<str>, MeshVertexAttribute>,
) -> Option<MeshVertexAttribute> {
let attr = possible_attributes.get(self.name.as_str())?;
if attr.id == self.id {
Some(*attr)
} else {
None
}
}
}
impl MeshVertexAttribute {
pub const fn new(name: &'static str, id: u64, format: VertexFormat) -> Self {
Self {
name,
id: MeshVertexAttributeId(id),
format,
}
}
pub const fn at_shader_location(&self, shader_location: u32) -> VertexAttributeDescriptor {
VertexAttributeDescriptor::new(shader_location, self.id, self.name)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct MeshVertexAttributeId(u64);
impl From<MeshVertexAttribute> for MeshVertexAttributeId {
fn from(attribute: MeshVertexAttribute) -> Self {
attribute.id
}
}
#[derive(Debug, Clone, Hash, Eq, PartialEq)]
pub struct MeshVertexBufferLayout {
pub(crate) attribute_ids: Vec<MeshVertexAttributeId>,
pub(crate) layout: VertexBufferLayout,
}
impl MeshVertexBufferLayout {
pub fn new(attribute_ids: Vec<MeshVertexAttributeId>, layout: VertexBufferLayout) -> Self {
Self {
attribute_ids,
layout,
}
}
#[inline]
pub fn contains(&self, attribute_id: impl Into<MeshVertexAttributeId>) -> bool {
self.attribute_ids.contains(&attribute_id.into())
}
#[inline]
pub fn attribute_ids(&self) -> &[MeshVertexAttributeId] {
&self.attribute_ids
}
#[inline]
pub fn layout(&self) -> &VertexBufferLayout {
&self.layout
}
pub fn get_layout(
&self,
attribute_descriptors: &[VertexAttributeDescriptor],
) -> Result<VertexBufferLayout, MissingVertexAttributeError> {
let mut attributes = Vec::with_capacity(attribute_descriptors.len());
for attribute_descriptor in attribute_descriptors {
if let Some(index) = self
.attribute_ids
.iter()
.position(|id| *id == attribute_descriptor.id)
{
let layout_attribute = &self.layout.attributes[index];
attributes.push(VertexAttribute {
format: layout_attribute.format,
offset: layout_attribute.offset,
shader_location: attribute_descriptor.shader_location,
});
} else {
return Err(MissingVertexAttributeError {
id: attribute_descriptor.id,
name: attribute_descriptor.name,
pipeline_type: None,
});
}
}
Ok(VertexBufferLayout {
array_stride: self.layout.array_stride,
step_mode: self.layout.step_mode,
attributes,
})
}
}
#[derive(Error, Debug)]
#[error("Mesh is missing requested attribute: {name} ({id:?}, pipeline type: {pipeline_type:?})")]
pub struct MissingVertexAttributeError {
pub pipeline_type: Option<&'static str>,
id: MeshVertexAttributeId,
name: &'static str,
}
pub struct VertexAttributeDescriptor {
pub shader_location: u32,
pub id: MeshVertexAttributeId,
name: &'static str,
}
impl VertexAttributeDescriptor {
pub const fn new(shader_location: u32, id: MeshVertexAttributeId, name: &'static str) -> Self {
Self {
shader_location,
id,
name,
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub(crate) struct MeshAttributeData {
pub(crate) attribute: MeshVertexAttribute,
pub(crate) values: VertexAttributeValues,
}
#[cfg(feature = "serialize")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct SerializedMeshAttributeData {
pub(crate) attribute: SerializedMeshVertexAttribute,
pub(crate) values: VertexAttributeValues,
}
#[cfg(feature = "serialize")]
impl SerializedMeshAttributeData {
pub(crate) fn from_mesh_attribute_data(data: MeshAttributeData) -> Self {
Self {
attribute: SerializedMeshVertexAttribute::from_mesh_vertex_attribute(data.attribute),
values: data.values,
}
}
pub(crate) fn try_into_mesh_attribute_data(
self,
possible_attributes: &HashMap<Box<str>, MeshVertexAttribute>,
) -> Option<MeshAttributeData> {
let attribute = self
.attribute
.try_into_mesh_vertex_attribute(possible_attributes)?;
Some(MeshAttributeData {
attribute,
values: self.values,
})
}
}
/// Compute a vector whose direction is the normal of the triangle formed by
/// points a, b, c, and whose magnitude is double the area of the triangle. This
/// is useful for computing smooth normals where the contributing normals are
/// proportionate to the areas of the triangles as [discussed
/// here](https://iquilezles.org/articles/normals/).
///
/// Question: Why double the area? Because the area of a triangle _A_ is
/// determined by this equation:
///
/// _A = |(b - a) x (c - a)| / 2_
///
/// By computing _2 A_ we avoid a division operation, and when calculating the
/// the sum of these vectors which are then normalized, a constant multiple has
/// no effect.
#[inline]
pub fn triangle_area_normal(a: [f32; 3], b: [f32; 3], c: [f32; 3]) -> [f32; 3] {
let (a, b, c) = (Vec3::from(a), Vec3::from(b), Vec3::from(c));
(b - a).cross(c - a).into()
}
/// Compute the normal of a face made of three points: a, b, and c.
#[inline]
pub fn triangle_normal(a: [f32; 3], b: [f32; 3], c: [f32; 3]) -> [f32; 3] {
let (a, b, c) = (Vec3::from(a), Vec3::from(b), Vec3::from(c));
(b - a).cross(c - a).normalize_or_zero().into()
}
/// Contains an array where each entry describes a property of a single vertex.
/// Matches the [`VertexFormats`](VertexFormat).
#[derive(Clone, Debug, EnumVariantMeta, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub enum VertexAttributeValues {
Float32(Vec<f32>),
Sint32(Vec<i32>),
Uint32(Vec<u32>),
Float32x2(Vec<[f32; 2]>),
Sint32x2(Vec<[i32; 2]>),
Uint32x2(Vec<[u32; 2]>),
Float32x3(Vec<[f32; 3]>),
Sint32x3(Vec<[i32; 3]>),
Uint32x3(Vec<[u32; 3]>),
Float32x4(Vec<[f32; 4]>),
Sint32x4(Vec<[i32; 4]>),
Uint32x4(Vec<[u32; 4]>),
Sint16x2(Vec<[i16; 2]>),
Snorm16x2(Vec<[i16; 2]>),
Uint16x2(Vec<[u16; 2]>),
Unorm16x2(Vec<[u16; 2]>),
Sint16x4(Vec<[i16; 4]>),
Snorm16x4(Vec<[i16; 4]>),
Uint16x4(Vec<[u16; 4]>),
Unorm16x4(Vec<[u16; 4]>),
Sint8x2(Vec<[i8; 2]>),
Snorm8x2(Vec<[i8; 2]>),
Uint8x2(Vec<[u8; 2]>),
Unorm8x2(Vec<[u8; 2]>),
Sint8x4(Vec<[i8; 4]>),
Snorm8x4(Vec<[i8; 4]>),
Uint8x4(Vec<[u8; 4]>),
Unorm8x4(Vec<[u8; 4]>),
}
impl VertexAttributeValues {
/// Returns the number of vertices in this [`VertexAttributeValues`]. For a single
/// mesh, all of the [`VertexAttributeValues`] must have the same length.
#[expect(
clippy::match_same_arms,
reason = "Although the `values` binding on some match arms may have matching types, each variant has different semantics; thus it's not guaranteed that they will use the same type forever."
)]
pub fn len(&self) -> usize {
match self {
VertexAttributeValues::Float32(values) => values.len(),
VertexAttributeValues::Sint32(values) => values.len(),
VertexAttributeValues::Uint32(values) => values.len(),
VertexAttributeValues::Float32x2(values) => values.len(),
VertexAttributeValues::Sint32x2(values) => values.len(),
VertexAttributeValues::Uint32x2(values) => values.len(),
VertexAttributeValues::Float32x3(values) => values.len(),
VertexAttributeValues::Sint32x3(values) => values.len(),
VertexAttributeValues::Uint32x3(values) => values.len(),
VertexAttributeValues::Float32x4(values) => values.len(),
VertexAttributeValues::Sint32x4(values) => values.len(),
VertexAttributeValues::Uint32x4(values) => values.len(),
VertexAttributeValues::Sint16x2(values) => values.len(),
VertexAttributeValues::Snorm16x2(values) => values.len(),
VertexAttributeValues::Uint16x2(values) => values.len(),
VertexAttributeValues::Unorm16x2(values) => values.len(),
VertexAttributeValues::Sint16x4(values) => values.len(),
VertexAttributeValues::Snorm16x4(values) => values.len(),
VertexAttributeValues::Uint16x4(values) => values.len(),
VertexAttributeValues::Unorm16x4(values) => values.len(),
VertexAttributeValues::Sint8x2(values) => values.len(),
VertexAttributeValues::Snorm8x2(values) => values.len(),
VertexAttributeValues::Uint8x2(values) => values.len(),
VertexAttributeValues::Unorm8x2(values) => values.len(),
VertexAttributeValues::Sint8x4(values) => values.len(),
VertexAttributeValues::Snorm8x4(values) => values.len(),
VertexAttributeValues::Uint8x4(values) => values.len(),
VertexAttributeValues::Unorm8x4(values) => values.len(),
}
}
/// Returns `true` if there are no vertices in this [`VertexAttributeValues`].
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the values as float triples if possible.
pub fn as_float3(&self) -> Option<&[[f32; 3]]> {
match self {
VertexAttributeValues::Float32x3(values) => Some(values),
_ => None,
}
}
// TODO: add vertex format as parameter here and perform type conversions
/// Flattens the [`VertexAttributeValues`] into a sequence of bytes. This is
/// useful for serialization and sending to the GPU.
#[expect(
clippy::match_same_arms,
reason = "Although the `values` binding on some match arms may have matching types, each variant has different semantics; thus it's not guaranteed that they will use the same type forever."
)]
pub fn get_bytes(&self) -> &[u8] {
match self {
VertexAttributeValues::Float32(values) => cast_slice(values),
VertexAttributeValues::Sint32(values) => cast_slice(values),
VertexAttributeValues::Uint32(values) => cast_slice(values),
VertexAttributeValues::Float32x2(values) => cast_slice(values),
VertexAttributeValues::Sint32x2(values) => cast_slice(values),
VertexAttributeValues::Uint32x2(values) => cast_slice(values),
VertexAttributeValues::Float32x3(values) => cast_slice(values),
VertexAttributeValues::Sint32x3(values) => cast_slice(values),
VertexAttributeValues::Uint32x3(values) => cast_slice(values),
VertexAttributeValues::Float32x4(values) => cast_slice(values),
VertexAttributeValues::Sint32x4(values) => cast_slice(values),
VertexAttributeValues::Uint32x4(values) => cast_slice(values),
VertexAttributeValues::Sint16x2(values) => cast_slice(values),
VertexAttributeValues::Snorm16x2(values) => cast_slice(values),
VertexAttributeValues::Uint16x2(values) => cast_slice(values),
VertexAttributeValues::Unorm16x2(values) => cast_slice(values),
VertexAttributeValues::Sint16x4(values) => cast_slice(values),
VertexAttributeValues::Snorm16x4(values) => cast_slice(values),
VertexAttributeValues::Uint16x4(values) => cast_slice(values),
VertexAttributeValues::Unorm16x4(values) => cast_slice(values),
VertexAttributeValues::Sint8x2(values) => cast_slice(values),
VertexAttributeValues::Snorm8x2(values) => cast_slice(values),
VertexAttributeValues::Uint8x2(values) => cast_slice(values),
VertexAttributeValues::Unorm8x2(values) => cast_slice(values),
VertexAttributeValues::Sint8x4(values) => cast_slice(values),
VertexAttributeValues::Snorm8x4(values) => cast_slice(values),
VertexAttributeValues::Uint8x4(values) => cast_slice(values),
VertexAttributeValues::Unorm8x4(values) => cast_slice(values),
}
}
}
impl From<&VertexAttributeValues> for VertexFormat {
fn from(values: &VertexAttributeValues) -> Self {
match values {
VertexAttributeValues::Float32(_) => VertexFormat::Float32,
VertexAttributeValues::Sint32(_) => VertexFormat::Sint32,
VertexAttributeValues::Uint32(_) => VertexFormat::Uint32,
VertexAttributeValues::Float32x2(_) => VertexFormat::Float32x2,
VertexAttributeValues::Sint32x2(_) => VertexFormat::Sint32x2,
VertexAttributeValues::Uint32x2(_) => VertexFormat::Uint32x2,
VertexAttributeValues::Float32x3(_) => VertexFormat::Float32x3,
VertexAttributeValues::Sint32x3(_) => VertexFormat::Sint32x3,
VertexAttributeValues::Uint32x3(_) => VertexFormat::Uint32x3,
VertexAttributeValues::Float32x4(_) => VertexFormat::Float32x4,
VertexAttributeValues::Sint32x4(_) => VertexFormat::Sint32x4,
VertexAttributeValues::Uint32x4(_) => VertexFormat::Uint32x4,
VertexAttributeValues::Sint16x2(_) => VertexFormat::Sint16x2,
VertexAttributeValues::Snorm16x2(_) => VertexFormat::Snorm16x2,
VertexAttributeValues::Uint16x2(_) => VertexFormat::Uint16x2,
VertexAttributeValues::Unorm16x2(_) => VertexFormat::Unorm16x2,
VertexAttributeValues::Sint16x4(_) => VertexFormat::Sint16x4,
VertexAttributeValues::Snorm16x4(_) => VertexFormat::Snorm16x4,
VertexAttributeValues::Uint16x4(_) => VertexFormat::Uint16x4,
VertexAttributeValues::Unorm16x4(_) => VertexFormat::Unorm16x4,
VertexAttributeValues::Sint8x2(_) => VertexFormat::Sint8x2,
VertexAttributeValues::Snorm8x2(_) => VertexFormat::Snorm8x2,
VertexAttributeValues::Uint8x2(_) => VertexFormat::Uint8x2,
VertexAttributeValues::Unorm8x2(_) => VertexFormat::Unorm8x2,
VertexAttributeValues::Sint8x4(_) => VertexFormat::Sint8x4,
VertexAttributeValues::Snorm8x4(_) => VertexFormat::Snorm8x4,
VertexAttributeValues::Uint8x4(_) => VertexFormat::Uint8x4,
VertexAttributeValues::Unorm8x4(_) => VertexFormat::Unorm8x4,
}
}
}
/// Describes how the vertex buffer is interpreted.
#[derive(Default, Clone, Debug, Hash, Eq, PartialEq)]
pub struct VertexBufferLayout {
/// The stride, in bytes, between elements of this buffer.
pub array_stride: BufferAddress,
/// How often this vertex buffer is "stepped" forward.
pub step_mode: VertexStepMode,
/// The list of attributes which comprise a single vertex.
pub attributes: Vec<VertexAttribute>,
}
impl VertexBufferLayout {
/// Creates a new densely packed [`VertexBufferLayout`] from an iterator of vertex formats.
/// Iteration order determines the `shader_location` and `offset` of the [`VertexAttributes`](VertexAttribute).
/// The first iterated item will have a `shader_location` and `offset` of zero.
/// The `array_stride` is the sum of the size of the iterated [`VertexFormats`](VertexFormat) (in bytes).
pub fn from_vertex_formats<T: IntoIterator<Item = VertexFormat>>(
step_mode: VertexStepMode,
vertex_formats: T,
) -> Self {
let mut offset = 0;
let mut attributes = Vec::new();
for (shader_location, format) in vertex_formats.into_iter().enumerate() {
attributes.push(VertexAttribute {
format,
offset,
shader_location: shader_location as u32,
});
offset += format.size();
}
VertexBufferLayout {
array_stride: offset,
step_mode,
attributes,
}
}
/// Returns a [`VertexBufferLayout`] with the shader location of every attribute offset by
/// `location`.
pub fn offset_locations_by(mut self, location: u32) -> Self {
self.attributes.iter_mut().for_each(|attr| {
attr.shader_location += location;
});
self
}
}
/// Describes the layout of the mesh vertices in GPU memory.
///
/// At most one copy of a mesh vertex buffer layout ever exists in GPU memory at
/// once. Therefore, comparing these for equality requires only a single pointer
/// comparison, and this type's [`PartialEq`] and [`Hash`] implementations take
/// advantage of this. To that end, this type doesn't implement
/// [`bevy_derive::Deref`] or [`bevy_derive::DerefMut`] in order to reduce the
/// possibility of accidental deep comparisons, which would be needlessly
/// expensive.
#[derive(Clone, Debug)]
pub struct MeshVertexBufferLayoutRef(pub Arc<MeshVertexBufferLayout>);
/// Stores the single copy of each mesh vertex buffer layout.
#[derive(Clone, Default, Resource)]
pub struct MeshVertexBufferLayouts(HashSet<Arc<MeshVertexBufferLayout>>);
impl MeshVertexBufferLayouts {
/// Inserts a new mesh vertex buffer layout in the store and returns a
/// reference to it, reusing the existing reference if this mesh vertex
/// buffer layout was already in the store.
pub fn insert(&mut self, layout: MeshVertexBufferLayout) -> MeshVertexBufferLayoutRef {
// Because the special `PartialEq` and `Hash` implementations that
// compare by pointer are on `MeshVertexBufferLayoutRef`, not on
// `Arc<MeshVertexBufferLayout>`, this compares the mesh vertex buffer
// structurally, not by pointer.
MeshVertexBufferLayoutRef(
self.0
.get_or_insert_with(&layout, |layout| Arc::new(layout.clone()))
.clone(),
)
}
}
impl PartialEq for MeshVertexBufferLayoutRef {
fn eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.0, &other.0)
}
}
impl Eq for MeshVertexBufferLayoutRef {}
impl Hash for MeshVertexBufferLayoutRef {
fn hash<H: Hasher>(&self, state: &mut H) {
// Hash the address of the underlying data, so two layouts that share the same
// `MeshVertexBufferLayout` will have the same hash.
(Arc::as_ptr(&self.0) as usize).hash(state);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/primitives/dim2.rs | crates/bevy_mesh/src/primitives/dim2.rs | use core::f32::consts::FRAC_PI_2;
use core::mem;
use crate::{primitives::dim3::triangle3d, Indices, Mesh, PerimeterSegment, VertexAttributeValues};
use bevy_asset::RenderAssetUsages;
use super::{Extrudable, MeshBuilder, Meshable};
use bevy_math::prelude::Polyline2d;
use bevy_math::{
ops,
primitives::{
Annulus, Capsule2d, Circle, CircularSector, CircularSegment, ConvexPolygon, Ellipse,
Primitive2d, Rectangle, RegularPolygon, Rhombus, Ring, Segment2d, Triangle2d, Triangle3d,
WindingOrder,
},
FloatExt, Vec2, Vec3,
};
use bevy_reflect::prelude::*;
use wgpu_types::PrimitiveTopology;
/// A builder used for creating a [`Mesh`] with a [`Circle`] shape.
#[derive(Clone, Copy, Debug, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct CircleMeshBuilder {
/// The [`Circle`] shape.
pub circle: Circle,
/// The number of vertices used for the circle mesh.
/// The default is `32`.
#[doc(alias = "vertices")]
pub resolution: u32,
}
impl Default for CircleMeshBuilder {
fn default() -> Self {
Self {
circle: Circle::default(),
resolution: 32,
}
}
}
impl CircleMeshBuilder {
/// Creates a new [`CircleMeshBuilder`] from a given radius and vertex count.
#[inline]
pub const fn new(radius: f32, resolution: u32) -> Self {
Self {
circle: Circle { radius },
resolution,
}
}
/// Sets the number of vertices used for the circle mesh.
#[inline]
#[doc(alias = "vertices")]
pub const fn resolution(mut self, resolution: u32) -> Self {
self.resolution = resolution;
self
}
}
impl MeshBuilder for CircleMeshBuilder {
fn build(&self) -> Mesh {
Ellipse::new(self.circle.radius, self.circle.radius)
.mesh()
.resolution(self.resolution)
.build()
}
}
impl Extrudable for CircleMeshBuilder {
fn perimeter(&self) -> Vec<PerimeterSegment> {
vec![PerimeterSegment::Smooth {
first_normal: Vec2::Y,
last_normal: Vec2::Y,
indices: (0..self.resolution).chain([0]).collect(),
}]
}
}
impl Meshable for Circle {
type Output = CircleMeshBuilder;
fn mesh(&self) -> Self::Output {
CircleMeshBuilder {
circle: *self,
..Default::default()
}
}
}
impl From<Circle> for Mesh {
fn from(circle: Circle) -> Self {
circle.mesh().build()
}
}
/// Specifies how to generate UV-mappings for the [`CircularSector`] and [`CircularSegment`] shapes.
///
/// Currently the only variant is `Mask`, which is good for showing a portion of a texture that includes
/// the entire circle, particularly the same texture will be displayed with different fractions of a
/// complete circle.
///
/// It's expected that more will be added in the future, such as a variant that causes the texture to be
/// scaled to fit the bounding box of the shape, which would be good for packed textures only including the
/// portion of the circle that is needed to display.
#[derive(Copy, Clone, Debug, PartialEq, Reflect)]
#[reflect(Default, Debug, Clone)]
#[non_exhaustive]
pub enum CircularMeshUvMode {
/// Treats the shape as a mask over a circle of equal size and radius,
/// with the center of the circle at the center of the texture.
Mask {
/// Angle by which to rotate the shape when generating the UV map.
angle: f32,
},
}
impl Default for CircularMeshUvMode {
fn default() -> Self {
CircularMeshUvMode::Mask { angle: 0.0 }
}
}
/// A builder used for creating a [`Mesh`] with a [`CircularSector`] shape.
///
/// The resulting mesh will have a UV-map such that the center of the circle is
/// at the center of the texture.
#[derive(Clone, Debug, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct CircularSectorMeshBuilder {
/// The sector shape.
pub sector: CircularSector,
/// The number of vertices used for the arc portion of the sector mesh.
/// The default is `32`.
#[doc(alias = "vertices")]
pub resolution: u32,
/// The UV mapping mode
pub uv_mode: CircularMeshUvMode,
}
impl Default for CircularSectorMeshBuilder {
fn default() -> Self {
Self {
sector: CircularSector::default(),
resolution: 32,
uv_mode: CircularMeshUvMode::default(),
}
}
}
impl CircularSectorMeshBuilder {
/// Creates a new [`CircularSectorMeshBuilder`] from a given sector
#[inline]
pub fn new(sector: CircularSector) -> Self {
Self {
sector,
..Self::default()
}
}
/// Sets the number of vertices used for the sector mesh.
#[inline]
#[doc(alias = "vertices")]
pub const fn resolution(mut self, resolution: u32) -> Self {
self.resolution = resolution;
self
}
/// Sets the uv mode used for the sector mesh
#[inline]
pub const fn uv_mode(mut self, uv_mode: CircularMeshUvMode) -> Self {
self.uv_mode = uv_mode;
self
}
}
impl MeshBuilder for CircularSectorMeshBuilder {
fn build(&self) -> Mesh {
let resolution = self.resolution as usize;
let mut indices = Vec::with_capacity((resolution - 1) * 3);
let mut positions = Vec::with_capacity(resolution + 1);
let normals = vec![[0.0, 0.0, 1.0]; resolution + 1];
let mut uvs = Vec::with_capacity(resolution + 1);
let CircularMeshUvMode::Mask { angle: uv_angle } = self.uv_mode;
// Push the center of the circle.
positions.push([0.0; 3]);
uvs.push([0.5; 2]);
let first_angle = FRAC_PI_2 - self.sector.half_angle();
let last_angle = FRAC_PI_2 + self.sector.half_angle();
let last_i = (self.resolution - 1) as f32;
for i in 0..self.resolution {
let angle = f32::lerp(first_angle, last_angle, i as f32 / last_i);
// Compute the vertex
let vertex = self.sector.radius() * Vec2::from_angle(angle);
// Compute the UV coordinate by taking the modified angle's unit vector, negating the Y axis, and rescaling and centering it at (0.5, 0.5).
// We accomplish the Y axis flip by negating the angle.
let uv =
Vec2::from_angle(-(angle + uv_angle)).mul_add(Vec2::splat(0.5), Vec2::splat(0.5));
positions.push([vertex.x, vertex.y, 0.0]);
uvs.push([uv.x, uv.y]);
}
for i in 1..self.resolution {
// Index 0 is the center.
indices.extend_from_slice(&[0, i, i + 1]);
}
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs)
.with_inserted_indices(Indices::U32(indices))
}
}
impl Extrudable for CircularSectorMeshBuilder {
fn perimeter(&self) -> Vec<PerimeterSegment> {
let (sin, cos) = ops::sin_cos(self.sector.arc.half_angle);
let first_normal = Vec2::new(sin, cos);
let last_normal = Vec2::new(-sin, cos);
vec![
PerimeterSegment::Flat {
indices: vec![self.resolution, 0, 1],
},
PerimeterSegment::Smooth {
first_normal,
last_normal,
indices: (1..=self.resolution).collect(),
},
]
}
}
impl Meshable for CircularSector {
type Output = CircularSectorMeshBuilder;
fn mesh(&self) -> Self::Output {
CircularSectorMeshBuilder {
sector: *self,
..Default::default()
}
}
}
impl From<CircularSector> for Mesh {
/// Converts this sector into a [`Mesh`] using a default [`CircularSectorMeshBuilder`].
///
/// See the documentation of [`CircularSectorMeshBuilder`] for more details.
fn from(sector: CircularSector) -> Self {
sector.mesh().build()
}
}
/// A builder used for creating a [`Mesh`] with a [`CircularSegment`] shape.
///
/// The resulting mesh will have a UV-map such that the center of the circle is
/// at the center of the texture.
#[derive(Clone, Copy, Debug, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct CircularSegmentMeshBuilder {
/// The segment shape.
pub segment: CircularSegment,
/// The number of vertices used for the arc portion of the segment mesh.
/// The default is `32`.
#[doc(alias = "vertices")]
pub resolution: u32,
/// The UV mapping mode
pub uv_mode: CircularMeshUvMode,
}
impl Default for CircularSegmentMeshBuilder {
fn default() -> Self {
Self {
segment: CircularSegment::default(),
resolution: 32,
uv_mode: CircularMeshUvMode::default(),
}
}
}
impl CircularSegmentMeshBuilder {
/// Creates a new [`CircularSegmentMeshBuilder`] from a given segment
#[inline]
pub fn new(segment: CircularSegment) -> Self {
Self {
segment,
..Self::default()
}
}
/// Sets the number of vertices used for the segment mesh.
#[inline]
#[doc(alias = "vertices")]
pub const fn resolution(mut self, resolution: u32) -> Self {
self.resolution = resolution;
self
}
/// Sets the uv mode used for the segment mesh
#[inline]
pub const fn uv_mode(mut self, uv_mode: CircularMeshUvMode) -> Self {
self.uv_mode = uv_mode;
self
}
}
impl MeshBuilder for CircularSegmentMeshBuilder {
fn build(&self) -> Mesh {
let resolution = self.resolution as usize;
let mut indices = Vec::with_capacity((resolution - 1) * 3);
let mut positions = Vec::with_capacity(resolution + 1);
let normals = vec![[0.0, 0.0, 1.0]; resolution + 1];
let mut uvs = Vec::with_capacity(resolution + 1);
let CircularMeshUvMode::Mask { angle: uv_angle } = self.uv_mode;
// Push the center of the chord.
let midpoint_vertex = self.segment.chord_midpoint();
positions.push([midpoint_vertex.x, midpoint_vertex.y, 0.0]);
// Compute the UV coordinate of the midpoint vertex.
// This is similar to the computation inside the loop for the arc vertices,
// but the vertex angle is PI/2, and we must scale by the ratio of the apothem to the radius
// to correctly position the vertex.
let midpoint_uv = Vec2::from_angle(-uv_angle - FRAC_PI_2).mul_add(
Vec2::splat(0.5 * (self.segment.apothem() / self.segment.radius())),
Vec2::splat(0.5),
);
uvs.push([midpoint_uv.x, midpoint_uv.y]);
let first_angle = FRAC_PI_2 - self.segment.half_angle();
let last_angle = FRAC_PI_2 + self.segment.half_angle();
let last_i = (self.resolution - 1) as f32;
for i in 0..self.resolution {
let angle = f32::lerp(first_angle, last_angle, i as f32 / last_i);
// Compute the vertex
let vertex = self.segment.radius() * Vec2::from_angle(angle);
// Compute the UV coordinate by taking the modified angle's unit vector, negating the Y axis, and rescaling and centering it at (0.5, 0.5).
// We accomplish the Y axis flip by negating the angle.
let uv =
Vec2::from_angle(-(angle + uv_angle)).mul_add(Vec2::splat(0.5), Vec2::splat(0.5));
positions.push([vertex.x, vertex.y, 0.0]);
uvs.push([uv.x, uv.y]);
}
for i in 1..self.resolution {
// Index 0 is the midpoint of the chord.
indices.extend_from_slice(&[0, i, i + 1]);
}
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs)
.with_inserted_indices(Indices::U32(indices))
}
}
impl Extrudable for CircularSegmentMeshBuilder {
fn perimeter(&self) -> Vec<PerimeterSegment> {
let (sin, cos) = ops::sin_cos(self.segment.arc.half_angle);
let first_normal = Vec2::new(sin, cos);
let last_normal = Vec2::new(-sin, cos);
vec![
PerimeterSegment::Flat {
indices: vec![self.resolution, 0, 1],
},
PerimeterSegment::Smooth {
first_normal,
last_normal,
indices: (1..=self.resolution).collect(),
},
]
}
}
impl Meshable for CircularSegment {
type Output = CircularSegmentMeshBuilder;
fn mesh(&self) -> Self::Output {
CircularSegmentMeshBuilder {
segment: *self,
..Default::default()
}
}
}
impl From<CircularSegment> for Mesh {
/// Converts this sector into a [`Mesh`] using a default [`CircularSegmentMeshBuilder`].
///
/// See the documentation of [`CircularSegmentMeshBuilder`] for more details.
fn from(segment: CircularSegment) -> Self {
segment.mesh().build()
}
}
/// A builder used for creating a [`Mesh`] with a [`ConvexPolygon`] shape.
///
/// You must verify that the `vertices` are not concave when constructing this type. You can
/// guarantee this by creating a [`ConvexPolygon`] first, then calling [`ConvexPolygon::mesh()`].
#[derive(Clone, Debug, Reflect)]
#[reflect(Debug, Clone)]
pub struct ConvexPolygonMeshBuilder {
pub vertices: Vec<Vec2>,
}
impl Meshable for ConvexPolygon {
type Output = ConvexPolygonMeshBuilder;
fn mesh(&self) -> Self::Output {
Self::Output {
vertices: self.vertices().to_vec(),
}
}
}
impl MeshBuilder for ConvexPolygonMeshBuilder {
fn build(&self) -> Mesh {
let len = self.vertices.len();
let mut indices = Vec::with_capacity((len - 2) * 3);
let mut positions = Vec::with_capacity(len);
for vertex in &self.vertices {
positions.push([vertex.x, vertex.y, 0.0]);
}
for i in 2..len as u32 {
indices.extend_from_slice(&[0, i - 1, i]);
}
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_indices(Indices::U32(indices))
}
}
impl Extrudable for ConvexPolygonMeshBuilder {
fn perimeter(&self) -> Vec<PerimeterSegment> {
vec![PerimeterSegment::Flat {
indices: (0..self.vertices.len() as u32).chain([0]).collect(),
}]
}
}
impl From<ConvexPolygon> for Mesh {
fn from(polygon: ConvexPolygon) -> Self {
polygon.mesh().build()
}
}
/// A builder used for creating a [`Mesh`] with a [`RegularPolygon`] shape.
#[derive(Clone, Copy, Debug, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct RegularPolygonMeshBuilder {
circumradius: f32,
sides: u32,
}
impl Default for RegularPolygonMeshBuilder {
/// Returns the default [`RegularPolygonMeshBuilder`] with six sides (a hexagon) and a circumradius of `0.5`.
fn default() -> Self {
Self {
circumradius: 0.5,
sides: 6,
}
}
}
impl RegularPolygonMeshBuilder {
/// Creates a new [`RegularPolygonMeshBuilder`] from the radius of a circumcircle and a number
/// of sides.
///
/// # Panics
///
/// Panics in debug mode if `circumradius` is negative, or if `sides` is less than 3.
pub const fn new(circumradius: f32, sides: u32) -> Self {
debug_assert!(
circumradius.is_sign_positive(),
"polygon has a negative radius"
);
debug_assert!(sides > 2, "polygon has less than 3 sides");
Self {
circumradius,
sides,
}
}
}
impl Meshable for RegularPolygon {
type Output = RegularPolygonMeshBuilder;
fn mesh(&self) -> Self::Output {
Self::Output {
circumradius: self.circumcircle.radius,
sides: self.sides,
}
}
}
impl MeshBuilder for RegularPolygonMeshBuilder {
fn build(&self) -> Mesh {
// The ellipse mesh is just a regular polygon with two radii
Ellipse::new(self.circumradius, self.circumradius)
.mesh()
.resolution(self.sides)
.build()
}
}
impl Extrudable for RegularPolygonMeshBuilder {
fn perimeter(&self) -> Vec<PerimeterSegment> {
vec![PerimeterSegment::Flat {
indices: (0..self.sides).chain([0]).collect(),
}]
}
}
impl From<RegularPolygon> for Mesh {
fn from(polygon: RegularPolygon) -> Self {
polygon.mesh().build()
}
}
/// A builder used for creating a [`Mesh`] with an [`Ellipse`] shape.
#[derive(Clone, Copy, Debug, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct EllipseMeshBuilder {
/// The [`Ellipse`] shape.
pub ellipse: Ellipse,
/// The number of vertices used for the ellipse mesh.
/// The default is `32`.
#[doc(alias = "vertices")]
pub resolution: u32,
}
impl Default for EllipseMeshBuilder {
fn default() -> Self {
Self {
ellipse: Ellipse::default(),
resolution: 32,
}
}
}
impl EllipseMeshBuilder {
/// Creates a new [`EllipseMeshBuilder`] from a given half width and half height and a vertex count.
#[inline]
pub const fn new(half_width: f32, half_height: f32, resolution: u32) -> Self {
Self {
ellipse: Ellipse::new(half_width, half_height),
resolution,
}
}
/// Sets the number of vertices used for the ellipse mesh.
#[inline]
#[doc(alias = "vertices")]
pub const fn resolution(mut self, resolution: u32) -> Self {
self.resolution = resolution;
self
}
}
impl MeshBuilder for EllipseMeshBuilder {
fn build(&self) -> Mesh {
let resolution = self.resolution as usize;
let mut indices = Vec::with_capacity((resolution - 2) * 3);
let mut positions = Vec::with_capacity(resolution);
let normals = vec![[0.0, 0.0, 1.0]; resolution];
let mut uvs = Vec::with_capacity(resolution);
// Add pi/2 so that there is a vertex at the top (sin is 1.0 and cos is 0.0)
let start_angle = FRAC_PI_2;
let step = core::f32::consts::TAU / self.resolution as f32;
for i in 0..self.resolution {
// Compute vertex position at angle theta
let theta = start_angle + i as f32 * step;
let (sin, cos) = ops::sin_cos(theta);
let x = cos * self.ellipse.half_size.x;
let y = sin * self.ellipse.half_size.y;
positions.push([x, y, 0.0]);
uvs.push([0.5 * (cos + 1.0), 1.0 - 0.5 * (sin + 1.0)]);
}
for i in 1..(self.resolution - 1) {
indices.extend_from_slice(&[0, i, i + 1]);
}
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs)
.with_inserted_indices(Indices::U32(indices))
}
}
impl Extrudable for EllipseMeshBuilder {
fn perimeter(&self) -> Vec<PerimeterSegment> {
vec![PerimeterSegment::Smooth {
first_normal: Vec2::Y,
last_normal: Vec2::Y,
indices: (0..self.resolution).chain([0]).collect(),
}]
}
}
impl Meshable for Ellipse {
type Output = EllipseMeshBuilder;
fn mesh(&self) -> Self::Output {
EllipseMeshBuilder {
ellipse: *self,
..Default::default()
}
}
}
impl From<Ellipse> for Mesh {
fn from(ellipse: Ellipse) -> Self {
ellipse.mesh().build()
}
}
/// A builder used for creating a [`Mesh`] with a [`Segment2d`].
pub struct Segment2dMeshBuilder {
/// The [`Segment2d`] shape.
pub segment: Segment2d,
}
impl Segment2dMeshBuilder {
/// Creates a new [`Segment2dMeshBuilder`] from a given segment.
#[inline]
pub const fn new(line: Segment2d) -> Self {
Self { segment: line }
}
}
impl MeshBuilder for Segment2dMeshBuilder {
fn build(&self) -> Mesh {
let positions = self.segment.vertices.map(|v| v.extend(0.0)).to_vec();
let indices = Indices::U32(vec![0, 1]);
Mesh::new(PrimitiveTopology::LineList, RenderAssetUsages::default())
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_indices(indices)
}
}
impl Meshable for Segment2d {
type Output = Segment2dMeshBuilder;
fn mesh(&self) -> Self::Output {
Segment2dMeshBuilder::new(*self)
}
}
impl From<Segment2d> for Mesh {
/// Converts this segment into a [`Mesh`] using a default [`Segment2dMeshBuilder`].
fn from(segment: Segment2d) -> Self {
segment.mesh().build()
}
}
/// A builder used for creating a [`Mesh`] with a [`Polyline2d`] shape.
#[derive(Clone, Debug, Default, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct Polyline2dMeshBuilder {
polyline: Polyline2d,
}
impl MeshBuilder for Polyline2dMeshBuilder {
fn build(&self) -> Mesh {
let positions: Vec<_> = self
.polyline
.vertices
.iter()
.map(|v| v.extend(0.0))
.collect();
let indices = Indices::U32(
(0..self.polyline.vertices.len() as u32 - 1)
.flat_map(|i| [i, i + 1])
.collect(),
);
Mesh::new(PrimitiveTopology::LineList, RenderAssetUsages::default())
.with_inserted_indices(indices)
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
}
}
impl Meshable for Polyline2d {
type Output = Polyline2dMeshBuilder;
fn mesh(&self) -> Self::Output {
Polyline2dMeshBuilder {
polyline: self.clone(),
}
}
}
impl From<Polyline2d> for Mesh {
fn from(polyline: Polyline2d) -> Self {
polyline.mesh().build()
}
}
/// A builder for creating a [`Mesh`] with an [`Annulus`] shape.
#[derive(Clone, Copy, Debug, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct AnnulusMeshBuilder {
/// The [`Annulus`] shape.
pub annulus: Annulus,
/// The number of vertices used in constructing each concentric circle of the annulus mesh.
/// The default is `32`.
pub resolution: u32,
}
impl Default for AnnulusMeshBuilder {
fn default() -> Self {
Self {
annulus: Annulus::default(),
resolution: 32,
}
}
}
impl AnnulusMeshBuilder {
/// Create an [`AnnulusMeshBuilder`] with the given inner radius, outer radius, and angular vertex count.
#[inline]
pub fn new(inner_radius: f32, outer_radius: f32, resolution: u32) -> Self {
Self {
annulus: Annulus::new(inner_radius, outer_radius),
resolution,
}
}
/// Sets the number of vertices used in constructing the concentric circles of the annulus mesh.
#[inline]
pub fn resolution(mut self, resolution: u32) -> Self {
self.resolution = resolution;
self
}
}
impl MeshBuilder for AnnulusMeshBuilder {
fn build(&self) -> Mesh {
let inner_radius = self.annulus.inner_circle.radius;
let outer_radius = self.annulus.outer_circle.radius;
let num_vertices = (self.resolution as usize + 1) * 2;
let mut indices = Vec::with_capacity(self.resolution as usize * 6);
let mut positions = Vec::with_capacity(num_vertices);
let mut uvs = Vec::with_capacity(num_vertices);
let normals = vec![[0.0, 0.0, 1.0]; num_vertices];
// We have one more set of vertices than might be naïvely expected;
// the vertices at `start_angle` are duplicated for the purposes of UV
// mapping. Here, each iteration places a pair of vertices at a fixed
// angle from the center of the annulus.
let start_angle = FRAC_PI_2;
let step = core::f32::consts::TAU / self.resolution as f32;
for i in 0..=self.resolution {
let theta = start_angle + (i % self.resolution) as f32 * step;
let (sin, cos) = ops::sin_cos(theta);
let inner_pos = [cos * inner_radius, sin * inner_radius, 0.];
let outer_pos = [cos * outer_radius, sin * outer_radius, 0.];
positions.push(inner_pos);
positions.push(outer_pos);
// The first UV direction is radial and the second is angular;
// i.e., a single UV rectangle is stretched around the annulus, with
// its top and bottom meeting as the circle closes. Lines of constant
// U map to circles, and lines of constant V map to radial line segments.
let inner_uv = [0., i as f32 / self.resolution as f32];
let outer_uv = [1., i as f32 / self.resolution as f32];
uvs.push(inner_uv);
uvs.push(outer_uv);
}
// Adjacent pairs of vertices form two triangles with each other; here,
// we are just making sure that they both have the right orientation,
// which is the CCW order of
// `inner_vertex` -> `outer_vertex` -> `next_outer` -> `next_inner`
for i in 0..self.resolution {
let inner_vertex = 2 * i;
let outer_vertex = 2 * i + 1;
let next_inner = inner_vertex + 2;
let next_outer = outer_vertex + 2;
indices.extend_from_slice(&[inner_vertex, outer_vertex, next_outer]);
indices.extend_from_slice(&[next_outer, next_inner, inner_vertex]);
}
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs)
.with_inserted_indices(Indices::U32(indices))
}
}
impl Extrudable for AnnulusMeshBuilder {
fn perimeter(&self) -> Vec<PerimeterSegment> {
let vert_count = 2 * self.resolution;
vec![
PerimeterSegment::Smooth {
first_normal: Vec2::NEG_Y,
last_normal: Vec2::NEG_Y,
indices: (0..vert_count).step_by(2).chain([0]).rev().collect(), // Inner hole
},
PerimeterSegment::Smooth {
first_normal: Vec2::Y,
last_normal: Vec2::Y,
indices: (1..vert_count).step_by(2).chain([1]).collect(), // Outer perimeter
},
]
}
}
impl Meshable for Annulus {
type Output = AnnulusMeshBuilder;
fn mesh(&self) -> Self::Output {
AnnulusMeshBuilder {
annulus: *self,
..Default::default()
}
}
}
impl From<Annulus> for Mesh {
fn from(annulus: Annulus) -> Self {
annulus.mesh().build()
}
}
/// A builder for creating a [`Mesh`] with an [`Rhombus`] shape.
#[derive(Clone, Copy, Debug, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct RhombusMeshBuilder {
half_diagonals: Vec2,
}
impl Default for RhombusMeshBuilder {
/// Returns the default [`RhombusMeshBuilder`] with a half-horizontal and half-vertical diagonal of `0.5`.
fn default() -> Self {
Self {
half_diagonals: Vec2::splat(0.5),
}
}
}
impl RhombusMeshBuilder {
/// Creates a new [`RhombusMeshBuilder`] from a horizontal and vertical diagonal size.
///
/// # Panics
///
/// Panics in debug mode if `horizontal_diagonal` or `vertical_diagonal` is negative.
pub const fn new(horizontal_diagonal: f32, vertical_diagonal: f32) -> Self {
debug_assert!(
horizontal_diagonal >= 0.0,
"rhombus has a negative horizontal size",
);
debug_assert!(
vertical_diagonal >= 0.0,
"rhombus has a negative vertical size"
);
Self {
half_diagonals: Vec2::new(horizontal_diagonal / 2.0, vertical_diagonal / 2.0),
}
}
}
impl MeshBuilder for RhombusMeshBuilder {
fn build(&self) -> Mesh {
let [hhd, vhd] = [self.half_diagonals.x, self.half_diagonals.y];
let positions = vec![
[hhd, 0.0, 0.0],
[0.0, vhd, 0.0],
[-hhd, 0.0, 0.0],
[0.0, -vhd, 0.0],
];
let normals = vec![[0.0, 0.0, 1.0]; 4];
let uvs = vec![[1.0, 0.5], [0.5, 0.0], [0.0, 0.5], [0.5, 1.0]];
let indices = Indices::U32(vec![2, 0, 1, 2, 3, 0]);
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_indices(indices)
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs)
}
}
impl Extrudable for RhombusMeshBuilder {
fn perimeter(&self) -> Vec<PerimeterSegment> {
vec![PerimeterSegment::Flat {
indices: vec![0, 1, 2, 3, 0],
}]
}
}
impl Meshable for Rhombus {
type Output = RhombusMeshBuilder;
fn mesh(&self) -> Self::Output {
Self::Output {
half_diagonals: self.half_diagonals,
}
}
}
impl From<Rhombus> for Mesh {
fn from(rhombus: Rhombus) -> Self {
rhombus.mesh().build()
}
}
/// A builder used for creating a [`Mesh`] with a [`Triangle2d`] shape.
#[derive(Clone, Copy, Debug, Default, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct Triangle2dMeshBuilder {
triangle: Triangle2d,
}
impl Triangle2dMeshBuilder {
/// Creates a new [`Triangle2dMeshBuilder`] from the points `a`, `b`, and `c`.
pub const fn new(a: Vec2, b: Vec2, c: Vec2) -> Self {
Self {
triangle: Triangle2d::new(a, b, c),
}
}
}
impl Meshable for Triangle2d {
type Output = Triangle2dMeshBuilder;
fn mesh(&self) -> Self::Output {
Self::Output { triangle: *self }
}
}
impl MeshBuilder for Triangle2dMeshBuilder {
fn build(&self) -> Mesh {
let vertices_3d = self.triangle.vertices.map(|v| v.extend(0.));
let positions: Vec<_> = vertices_3d.into();
let normals = vec![[0.0, 0.0, 1.0]; 3];
let uvs: Vec<_> = triangle3d::uv_coords(&Triangle3d::new(
vertices_3d[0],
vertices_3d[1],
vertices_3d[2],
))
.into();
let is_ccw = self.triangle.winding_order() == WindingOrder::CounterClockwise;
let indices = if is_ccw {
Indices::U32(vec![0, 1, 2])
} else {
Indices::U32(vec![2, 1, 0])
};
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_indices(indices)
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs)
}
}
impl Extrudable for Triangle2dMeshBuilder {
fn perimeter(&self) -> Vec<PerimeterSegment> {
let is_ccw = self.triangle.winding_order() == WindingOrder::CounterClockwise;
if is_ccw {
vec![PerimeterSegment::Flat {
indices: vec![0, 1, 2, 0],
}]
} else {
vec![PerimeterSegment::Flat {
indices: vec![2, 1, 0, 2],
}]
}
}
}
impl From<Triangle2d> for Mesh {
fn from(triangle: Triangle2d) -> Self {
triangle.mesh().build()
}
}
/// A builder used for creating a [`Mesh`] with a [`Rectangle`] shape.
#[derive(Clone, Copy, Debug, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct RectangleMeshBuilder {
half_size: Vec2,
}
impl Default for RectangleMeshBuilder {
/// Returns the default [`RectangleMeshBuilder`] with a half-width and half-height of `0.5`.
fn default() -> Self {
Self {
half_size: Vec2::splat(0.5),
}
}
}
impl RectangleMeshBuilder {
/// Creates a new [`RectangleMeshBuilder`] from a full width and height.
///
/// # Panics
///
/// Panics in debug mode if `width` or `height` is negative.
pub const fn new(width: f32, height: f32) -> Self {
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/primitives/extrusion.rs | crates/bevy_mesh/src/primitives/extrusion.rs | use bevy_math::{
primitives::{Annulus, Capsule2d, Circle, Ellipse, Extrusion, Primitive2d},
Vec2, Vec3,
};
use super::{MeshBuilder, Meshable};
use crate::{Indices, Mesh, PrimitiveTopology, VertexAttributeValues};
/// A type representing a segment of the perimeter of an extrudable mesh.
pub enum PerimeterSegment {
/// This segment of the perimeter will be shaded smooth.
///
/// This has the effect of rendering the segment's faces with softened edges, so it is appropriate for curved shapes.
///
/// The normals for the vertices that are part of this segment will be calculated based on the positions of their neighbors.
/// Each normal is interpolated between the normals of the two line segments connecting it with its neighbors.
/// Closer vertices have a stronger effect on the normal than more distant ones.
///
/// Since the vertices corresponding to the first and last indices do not have two neighboring vertices, their normals must be provided manually.
Smooth {
/// The normal of the first vertex.
first_normal: Vec2,
/// The normal of the last vertex.
last_normal: Vec2,
/// A list of indices representing this segment of the perimeter of the mesh.
///
/// The `indices` refer to the indices of the vertices generated by the `MeshBuilder` of the underlying 2D primitive.
/// For example, a triangle has 3 vertices with indices 0, 1 and 2.
///
/// The indices must be ordered such that the *outside* of the mesh is to the right
/// when walking along the vertices of the mesh in the order provided by the indices.
///
/// For geometry to be rendered, you must provide at least two indices.
indices: Vec<u32>,
},
/// This segment of the perimeter will be shaded flat.
///
/// This has the effect of rendering the segment's faces with hard edges.
Flat {
/// A list of indices representing this segment of the perimeter of the mesh.
///
/// The `indices` refer to the indices of the vertices generated by the `MeshBuilder` of the underlying 2D primitive.
/// For example, a triangle has 3 vertices with indices 0, 1 and 2.
///
/// The indices must be ordered such that the *outside* of the mesh is to the right
/// when walking along the vertices of the mesh in the order provided by indices.
///
/// For geometry to be rendered, you must provide at least two indices.
indices: Vec<u32>,
},
}
impl PerimeterSegment {
/// Returns the amount of vertices each 'layer' of the extrusion should include for this perimeter segment.
///
/// A layer is the set of vertices sharing a common Z value or depth.
fn vertices_per_layer(&self) -> u32 {
match self {
PerimeterSegment::Smooth { indices, .. } => indices.len() as u32,
PerimeterSegment::Flat { indices } => 2 * (indices.len() as u32 - 1),
}
}
/// Returns the amount of indices each 'segment' of the extrusion should include for this perimeter segment.
///
/// A segment is the set of faces on the mantel of the extrusion between two layers of vertices.
fn indices_per_segment(&self) -> usize {
match self {
PerimeterSegment::Smooth { indices, .. } | PerimeterSegment::Flat { indices } => {
6 * (indices.len() - 1)
}
}
}
}
/// A trait required for implementing `Meshable` for `Extrusion<T>`.
///
/// ## Warning
///
/// By implementing this trait you guarantee that the `primitive_topology` of the mesh returned by
/// this builder is [`PrimitiveTopology::TriangleList`]
/// and that your mesh has a [`Mesh::ATTRIBUTE_POSITION`] attribute.
pub trait Extrudable: MeshBuilder {
/// A list of the indices each representing a part of the perimeter of the mesh.
fn perimeter(&self) -> Vec<PerimeterSegment>;
}
impl<P> Meshable for Extrusion<P>
where
P: Primitive2d + Meshable,
P::Output: Extrudable,
{
type Output = ExtrusionBuilder<P>;
fn mesh(&self) -> Self::Output {
ExtrusionBuilder {
base_builder: self.base_shape.mesh(),
half_depth: self.half_depth,
segments: 1,
}
}
}
/// A builder used for creating a [`Mesh`] with an [`Extrusion`] shape.
pub struct ExtrusionBuilder<P>
where
P: Primitive2d + Meshable,
P::Output: Extrudable,
{
pub base_builder: P::Output,
pub half_depth: f32,
pub segments: usize,
}
impl<P> ExtrusionBuilder<P>
where
P: Primitive2d + Meshable,
P::Output: Extrudable,
{
/// Create a new `ExtrusionBuilder<P>` from a given `base_shape` and the full `depth` of the extrusion.
pub fn new(base_shape: &P, depth: f32) -> Self {
Self {
base_builder: base_shape.mesh(),
half_depth: depth / 2.,
segments: 1,
}
}
/// Sets the number of segments along the depth of the extrusion.
/// Must be greater than `0` for the geometry of the mantel to be generated.
pub fn segments(mut self, segments: usize) -> Self {
self.segments = segments;
self
}
/// Apply a function to the inner builder
pub fn with_inner(mut self, func: impl Fn(P::Output) -> P::Output) -> Self {
self.base_builder = func(self.base_builder);
self
}
}
impl ExtrusionBuilder<Circle> {
/// Sets the number of vertices used for the circle mesh at each end of the extrusion.
pub fn resolution(mut self, resolution: u32) -> Self {
self.base_builder.resolution = resolution;
self
}
}
impl ExtrusionBuilder<Ellipse> {
/// Sets the number of vertices used for the ellipse mesh at each end of the extrusion.
pub fn resolution(mut self, resolution: u32) -> Self {
self.base_builder.resolution = resolution;
self
}
}
impl ExtrusionBuilder<Annulus> {
/// Sets the number of vertices used in constructing the concentric circles of the annulus mesh at each end of the extrusion.
pub fn resolution(mut self, resolution: u32) -> Self {
self.base_builder.resolution = resolution;
self
}
}
impl ExtrusionBuilder<Capsule2d> {
/// Sets the number of vertices used for each hemicircle at the ends of the extrusion.
pub fn resolution(mut self, resolution: u32) -> Self {
self.base_builder.resolution = resolution;
self
}
}
impl<P> MeshBuilder for ExtrusionBuilder<P>
where
P: Primitive2d + Meshable,
P::Output: Extrudable,
{
fn build(&self) -> Mesh {
// Create and move the base mesh to the front
let mut front_face =
self.base_builder
.build()
.translated_by(Vec3::new(0., 0., self.half_depth));
// Move the uvs of the front face to be between (0., 0.) and (0.5, 0.5)
if let Some(VertexAttributeValues::Float32x2(uvs)) =
front_face.attribute_mut(Mesh::ATTRIBUTE_UV_0)
{
for uv in uvs {
*uv = uv.map(|coord| coord * 0.5);
}
}
let back_face = {
let topology = front_face.primitive_topology();
// Flip the normals, etc. and move mesh to the back
let mut back_face = front_face.clone().scaled_by(Vec3::new(1., 1., -1.));
// Move the uvs of the back face to be between (0.5, 0.) and (1., 0.5)
if let Some(VertexAttributeValues::Float32x2(uvs)) =
back_face.attribute_mut(Mesh::ATTRIBUTE_UV_0)
{
for uv in uvs {
*uv = [uv[0] + 0.5, uv[1]];
}
}
// By swapping the first and second indices of each triangle we invert the winding order thus making the mesh visible from the other side
if let Some(indices) = back_face.indices_mut() {
match topology {
PrimitiveTopology::TriangleList => match indices {
Indices::U16(indices) => {
indices.chunks_exact_mut(3).for_each(|arr| arr.swap(1, 0));
}
Indices::U32(indices) => {
indices.chunks_exact_mut(3).for_each(|arr| arr.swap(1, 0));
}
},
_ => {
panic!("Meshes used with Extrusions must have a primitive topology of `PrimitiveTopology::TriangleList`");
}
};
}
back_face
};
// An extrusion of depth 0 does not need a mantel
if self.half_depth == 0. {
front_face.merge(&back_face).unwrap();
return front_face;
}
let mantel = {
let Some(VertexAttributeValues::Float32x3(cap_verts)) =
front_face.attribute(Mesh::ATTRIBUTE_POSITION)
else {
panic!("The base mesh did not have vertex positions");
};
debug_assert!(self.segments > 0);
let layers = self.segments + 1;
let layer_depth_delta = self.half_depth * 2.0 / self.segments as f32;
let perimeter = self.base_builder.perimeter();
let (vert_count, index_count) =
perimeter
.iter()
.fold((0, 0), |(verts, indices), perimeter| {
(
verts + layers * perimeter.vertices_per_layer() as usize,
indices + self.segments * perimeter.indices_per_segment(),
)
});
let mut positions = Vec::with_capacity(vert_count);
let mut normals = Vec::with_capacity(vert_count);
let mut indices = Vec::with_capacity(index_count);
let mut uvs = Vec::with_capacity(vert_count);
// Compute the amount of horizontal space allocated to each segment of the perimeter.
let uv_segment_delta = 1. / perimeter.len() as f32;
for (i, segment) in perimeter.into_iter().enumerate() {
// The start of the x range of the area of the current perimeter-segment.
let uv_start = i as f32 * uv_segment_delta;
match segment {
PerimeterSegment::Flat {
indices: segment_indices,
} => {
let uv_delta = uv_segment_delta / (segment_indices.len() - 1) as f32;
for i in 0..(segment_indices.len() - 1) {
let uv_x = uv_start + uv_delta * i as f32;
// Get the positions for the current and the next index.
let a = cap_verts[segment_indices[i] as usize];
let b = cap_verts[segment_indices[i + 1] as usize];
// Get the index of the next vertex added to the mantel.
let index = positions.len() as u32;
// Push the positions of the two indices and their equivalent points on each layer.
for i in 0..layers {
let i = i as f32;
let z = a[2] - layer_depth_delta * i;
positions.push([a[0], a[1], z]);
positions.push([b[0], b[1], z]);
// UVs for the mantel are between (0, 0.5) and (1, 1).
let uv_y = 0.5 + 0.5 * i / self.segments as f32;
uvs.push([uv_x, uv_y]);
uvs.push([uv_x + uv_delta, uv_y]);
}
// The normal is calculated to be the normal of the line segment connecting a and b.
let n = Vec3::from_array([b[1] - a[1], a[0] - b[0], 0.])
.normalize_or_zero()
.to_array();
normals.extend_from_slice(&vec![n; 2 * layers]);
// Add the indices for the vertices created above to the mesh.
for i in 0..self.segments as u32 {
let base_index = index + 2 * i;
indices.extend_from_slice(&[
base_index,
base_index + 2,
base_index + 1,
base_index + 1,
base_index + 2,
base_index + 3,
]);
}
}
}
PerimeterSegment::Smooth {
first_normal,
last_normal,
indices: segment_indices,
} => {
let uv_delta = uv_segment_delta / (segment_indices.len() - 1) as f32;
// Since the indices for this segment will be added after its vertices have been added,
// we need to store the index of the first vertex that is part of this segment.
let base_index = positions.len() as u32;
// If there is a first vertex, we need to add it and its counterparts on each layer.
// The normal is provided by `segment.first_normal`.
if let Some(i) = segment_indices.first() {
let p = cap_verts[*i as usize];
for i in 0..layers {
let i = i as f32;
let z = p[2] - layer_depth_delta * i;
positions.push([p[0], p[1], z]);
let uv_y = 0.5 + 0.5 * i / self.segments as f32;
uvs.push([uv_start, uv_y]);
}
normals.extend_from_slice(&vec![
first_normal.extend(0.).to_array();
layers
]);
}
// For all points inbetween the first and last vertices, we can automatically compute the normals.
for i in 1..(segment_indices.len() - 1) {
let uv_x = uv_start + uv_delta * i as f32;
// Get the positions for the last, current and the next index.
let a = cap_verts[segment_indices[i - 1] as usize];
let b = cap_verts[segment_indices[i] as usize];
let c = cap_verts[segment_indices[i + 1] as usize];
// Add the current vertex and its counterparts on each layer.
for i in 0..layers {
let i = i as f32;
let z = b[2] - layer_depth_delta * i;
positions.push([b[0], b[1], z]);
let uv_y = 0.5 + 0.5 * i / self.segments as f32;
uvs.push([uv_x, uv_y]);
}
// The normal for the current vertices can be calculated based on the two neighboring vertices.
// The normal is interpolated between the normals of the two line segments connecting the current vertex with its neighbors.
// Closer vertices have a stronger effect on the normal than more distant ones.
let n = {
let ab = Vec2::from_slice(&b) - Vec2::from_slice(&a);
let bc = Vec2::from_slice(&c) - Vec2::from_slice(&b);
let n = ab.normalize_or_zero() + bc.normalize_or_zero();
Vec2::new(n.y, -n.x)
.normalize_or_zero()
.extend(0.)
.to_array()
};
normals.extend_from_slice(&vec![n; layers]);
}
// If there is a last vertex, we need to add it and its counterparts on each layer.
// The normal is provided by `segment.last_normal`.
if let Some(i) = segment_indices.last() {
let p = cap_verts[*i as usize];
for i in 0..layers {
let i = i as f32;
let z = p[2] - layer_depth_delta * i;
positions.push([p[0], p[1], z]);
let uv_y = 0.5 + 0.5 * i / self.segments as f32;
uvs.push([uv_start + uv_segment_delta, uv_y]);
}
normals.extend_from_slice(&vec![
last_normal.extend(0.).to_array();
layers
]);
}
let columns = segment_indices.len() as u32;
let segments = self.segments as u32;
let layers = segments + 1;
for s in 0..segments {
for column in 0..(columns - 1) {
let index = base_index + s + column * layers;
indices.extend_from_slice(&[
index,
index + 1,
index + layers,
index + layers,
index + 1,
index + layers + 1,
]);
}
}
}
}
}
Mesh::new(PrimitiveTopology::TriangleList, front_face.asset_usage)
.with_inserted_indices(Indices::U32(indices))
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs)
};
front_face.merge(&back_face).unwrap();
front_face.merge(&mantel).unwrap();
front_face
}
}
impl<P> From<Extrusion<P>> for Mesh
where
P: Primitive2d + Meshable,
P::Output: Extrudable,
{
fn from(value: Extrusion<P>) -> Self {
value.mesh().build()
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/primitives/mod.rs | crates/bevy_mesh/src/primitives/mod.rs | //! Mesh generation for [primitive shapes](bevy_math::primitives).
//!
//! Primitives that support meshing implement the [`Meshable`] trait.
//! Calling [`mesh`](Meshable::mesh) will return either a [`Mesh`] or a builder
//! that can be used to specify shape-specific configuration for creating the [`Mesh`].
//!
//! ```
//! # use bevy_asset::Assets;
//! # use bevy_ecs::prelude::ResMut;
//! # use bevy_math::prelude::Circle;
//! # use bevy_mesh::*;
//! #
//! # fn setup(mut meshes: ResMut<Assets<Mesh>>) {
//! // Create circle mesh with default configuration
//! let circle = meshes.add(Circle { radius: 25.0 });
//!
//! // Specify number of vertices
//! let circle = meshes.add(Circle { radius: 25.0 }.mesh().resolution(64));
//! # }
//! ```
mod dim2;
pub use dim2::*;
mod dim3;
pub use dim3::*;
mod extrusion;
pub use extrusion::*;
use super::Mesh;
/// A trait for shapes that can be turned into a [`Mesh`].
pub trait Meshable {
/// The output of [`Self::mesh`]. This will be a [`MeshBuilder`] used for creating a [`Mesh`].
type Output: MeshBuilder;
/// Creates a [`Mesh`] for a shape.
fn mesh(&self) -> Self::Output;
}
/// A trait used to build [`Mesh`]es from a configuration
pub trait MeshBuilder {
/// Builds a [`Mesh`] based on the configuration in `self`.
fn build(&self) -> Mesh;
}
impl<T: MeshBuilder> From<T> for Mesh {
fn from(builder: T) -> Self {
builder.build()
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/primitives/dim3/torus.rs | crates/bevy_mesh/src/primitives/dim3/torus.rs | use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology};
use bevy_asset::RenderAssetUsages;
use bevy_math::{ops, primitives::Torus, Vec3};
use bevy_reflect::prelude::*;
use core::ops::RangeInclusive;
/// A builder used for creating a [`Mesh`] with a [`Torus`] shape.
#[derive(Clone, Debug, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct TorusMeshBuilder {
/// The [`Torus`] shape.
pub torus: Torus,
/// The number of vertices used for each circular segment
/// in the ring or tube of the torus.
///
/// The default is `24`.
pub minor_resolution: usize,
/// The number of segments used for the main ring of the torus.
///
/// A resolution of `4` would make the torus appear rectangular,
/// while a resolution of `32` resembles a circular ring.
///
/// The default is `32`.
pub major_resolution: usize,
/// Optional angle range in radians, defaults to a full circle (0.0..=2 * PI)
pub angle_range: RangeInclusive<f32>,
}
impl Default for TorusMeshBuilder {
fn default() -> Self {
Self {
torus: Torus::default(),
minor_resolution: 24,
major_resolution: 32,
angle_range: (0.0..=2.0 * core::f32::consts::PI),
}
}
}
impl TorusMeshBuilder {
/// Creates a new [`TorusMeshBuilder`] from an inner and outer radius.
///
/// The inner radius is the radius of the hole, and the outer radius
/// is the radius of the entire object.
#[inline]
pub fn new(inner_radius: f32, outer_radius: f32) -> Self {
Self {
torus: Torus::new(inner_radius, outer_radius),
..Default::default()
}
}
/// Sets the number of vertices used for each circular segment
/// in the ring or tube of the torus.
#[inline]
pub const fn minor_resolution(mut self, resolution: usize) -> Self {
self.minor_resolution = resolution;
self
}
/// Sets the number of segments used for the main ring of the torus.
///
/// A resolution of `4` would make the torus appear rectangular,
/// while a resolution of `32` resembles a circular ring.
#[inline]
pub const fn major_resolution(mut self, resolution: usize) -> Self {
self.major_resolution = resolution;
self
}
/// Sets a custom angle range in radians instead of a full circle
#[inline]
pub const fn angle_range(mut self, range: RangeInclusive<f32>) -> Self {
self.angle_range = range;
self
}
}
impl MeshBuilder for TorusMeshBuilder {
fn build(&self) -> Mesh {
// code adapted from http://apparat-engine.blogspot.com/2013/04/procedural-meshes-torus.html
let n_vertices = (self.major_resolution + 1) * (self.minor_resolution + 1);
let mut positions: Vec<[f32; 3]> = Vec::with_capacity(n_vertices);
let mut normals: Vec<[f32; 3]> = Vec::with_capacity(n_vertices);
let mut uvs: Vec<[f32; 2]> = Vec::with_capacity(n_vertices);
let start_angle = self.angle_range.start();
let end_angle = self.angle_range.end();
let segment_stride = (end_angle - start_angle) / self.major_resolution as f32;
let side_stride = 2.0 * core::f32::consts::PI / self.minor_resolution as f32;
for segment in 0..=self.major_resolution {
let theta = start_angle + segment_stride * segment as f32;
for side in 0..=self.minor_resolution {
let phi = side_stride * side as f32;
let (sin_theta, cos_theta) = ops::sin_cos(theta);
let (sin_phi, cos_phi) = ops::sin_cos(phi);
let radius = self.torus.major_radius + self.torus.minor_radius * cos_phi;
let position = Vec3::new(
cos_theta * radius,
self.torus.minor_radius * sin_phi,
sin_theta * radius,
);
let center = Vec3::new(
self.torus.major_radius * cos_theta,
0.,
self.torus.major_radius * sin_theta,
);
let normal = (position - center).normalize();
positions.push(position.into());
normals.push(normal.into());
uvs.push([
segment as f32 / self.major_resolution as f32,
side as f32 / self.minor_resolution as f32,
]);
}
}
let n_faces = (self.major_resolution) * (self.minor_resolution);
let n_triangles = n_faces * 2;
let n_indices = n_triangles * 3;
let mut indices: Vec<u32> = Vec::with_capacity(n_indices);
let n_vertices_per_row = self.minor_resolution + 1;
for segment in 0..self.major_resolution {
for side in 0..self.minor_resolution {
let lt = side + segment * n_vertices_per_row;
let rt = (side + 1) + segment * n_vertices_per_row;
let lb = side + (segment + 1) * n_vertices_per_row;
let rb = (side + 1) + (segment + 1) * n_vertices_per_row;
indices.push(lt as u32);
indices.push(rt as u32);
indices.push(lb as u32);
indices.push(rt as u32);
indices.push(rb as u32);
indices.push(lb as u32);
}
}
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_indices(Indices::U32(indices))
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs)
}
}
impl Meshable for Torus {
type Output = TorusMeshBuilder;
fn mesh(&self) -> Self::Output {
TorusMeshBuilder {
torus: *self,
..Default::default()
}
}
}
impl From<Torus> for Mesh {
fn from(torus: Torus) -> Self {
torus.mesh().build()
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/primitives/dim3/conical_frustum.rs | crates/bevy_mesh/src/primitives/dim3/conical_frustum.rs | use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology};
use bevy_asset::RenderAssetUsages;
use bevy_math::{ops, primitives::ConicalFrustum, Vec3};
use bevy_reflect::prelude::*;
/// A builder used for creating a [`Mesh`] with a [`ConicalFrustum`] shape.
#[derive(Clone, Copy, Debug, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct ConicalFrustumMeshBuilder {
/// The [`ConicalFrustum`] shape.
pub frustum: ConicalFrustum,
/// The number of vertices used for the top and bottom of the conical frustum.
///
/// The default is `32`.
pub resolution: u32,
/// The number of horizontal lines subdividing the lateral surface of the conical frustum.
///
/// The default is `1`.
pub segments: u32,
}
impl Default for ConicalFrustumMeshBuilder {
fn default() -> Self {
Self {
frustum: ConicalFrustum::default(),
resolution: 32,
segments: 1,
}
}
}
impl ConicalFrustumMeshBuilder {
/// Creates a new [`ConicalFrustumMeshBuilder`] from the given top and bottom radii, a height,
/// and a resolution used for the top and bottom.
#[inline]
pub const fn new(radius_top: f32, radius_bottom: f32, height: f32, resolution: u32) -> Self {
Self {
frustum: ConicalFrustum {
radius_top,
radius_bottom,
height,
},
resolution,
segments: 1,
}
}
/// Sets the number of vertices used for the top and bottom of the conical frustum.
#[inline]
pub const fn resolution(mut self, resolution: u32) -> Self {
self.resolution = resolution;
self
}
/// Sets the number of horizontal lines subdividing the lateral surface of the conical frustum.
#[inline]
pub const fn segments(mut self, segments: u32) -> Self {
self.segments = segments;
self
}
}
impl MeshBuilder for ConicalFrustumMeshBuilder {
fn build(&self) -> Mesh {
debug_assert!(self.resolution > 2);
debug_assert!(self.segments > 0);
let ConicalFrustum {
radius_top,
radius_bottom,
height,
} = self.frustum;
let half_height = height / 2.0;
let num_rings = self.segments + 1;
let num_vertices = (self.resolution * 2 + num_rings * (self.resolution + 1)) as usize;
let num_faces = self.resolution * (num_rings - 2);
let num_indices = ((2 * num_faces + 2 * (self.resolution - 1) * 2) * 3) as usize;
let mut positions = Vec::with_capacity(num_vertices);
let mut normals = Vec::with_capacity(num_vertices);
let mut uvs = Vec::with_capacity(num_vertices);
let mut indices = Vec::with_capacity(num_indices);
let step_theta = core::f32::consts::TAU / self.resolution as f32;
let step_y = height / self.segments as f32;
let step_radius = (radius_top - radius_bottom) / self.segments as f32;
// Rings
for ring in 0..num_rings {
let y = -half_height + ring as f32 * step_y;
let radius = radius_bottom + ring as f32 * step_radius;
for segment in 0..=self.resolution {
let theta = segment as f32 * step_theta;
let (sin, cos) = ops::sin_cos(theta);
positions.push([radius * cos, y, radius * sin]);
normals.push(
Vec3::new(cos, (radius_bottom - radius_top) / height, sin)
.normalize()
.to_array(),
);
uvs.push([
segment as f32 / self.resolution as f32,
ring as f32 / self.segments as f32,
]);
}
}
// Lateral surface
for i in 0..self.segments {
let ring = i * (self.resolution + 1);
let next_ring = (i + 1) * (self.resolution + 1);
for j in 0..self.resolution {
indices.extend_from_slice(&[
ring + j,
next_ring + j,
ring + j + 1,
next_ring + j,
next_ring + j + 1,
ring + j + 1,
]);
}
}
// Caps
let mut build_cap = |top: bool, radius: f32| {
let offset = positions.len() as u32;
let (y, normal_y, winding) = if top {
(half_height, 1.0, (1, 0))
} else {
(-half_height, -1.0, (0, 1))
};
for i in 0..self.resolution {
let theta = i as f32 * step_theta;
let (sin, cos) = ops::sin_cos(theta);
positions.push([cos * radius, y, sin * radius]);
normals.push([0.0, normal_y, 0.0]);
uvs.push([0.5 * (cos + 1.0), 1.0 - 0.5 * (sin + 1.0)]);
}
for i in 1..(self.resolution - 1) {
indices.extend_from_slice(&[
offset,
offset + i + winding.0,
offset + i + winding.1,
]);
}
};
build_cap(true, radius_top);
build_cap(false, radius_bottom);
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_indices(Indices::U32(indices))
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs)
}
}
impl Meshable for ConicalFrustum {
type Output = ConicalFrustumMeshBuilder;
fn mesh(&self) -> Self::Output {
ConicalFrustumMeshBuilder {
frustum: *self,
..Default::default()
}
}
}
impl From<ConicalFrustum> for Mesh {
fn from(frustum: ConicalFrustum) -> Self {
frustum.mesh().build()
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/primitives/dim3/cone.rs | crates/bevy_mesh/src/primitives/dim3/cone.rs | use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology};
use bevy_asset::RenderAssetUsages;
use bevy_math::{ops, primitives::Cone, Vec3};
use bevy_reflect::prelude::*;
/// Anchoring options for [`ConeMeshBuilder`]
#[derive(Debug, Copy, Clone, Default, Reflect)]
#[reflect(Default, Debug, Clone)]
pub enum ConeAnchor {
#[default]
/// Midpoint between the tip of the cone and the center of its base.
MidPoint,
/// The Tip of the triangle
Tip,
/// The center of the base circle
Base,
}
/// A builder used for creating a [`Mesh`] with a [`Cone`] shape.
#[derive(Clone, Copy, Debug, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct ConeMeshBuilder {
/// The [`Cone`] shape.
pub cone: Cone,
/// The number of vertices used for the base of the cone.
///
/// The default is `32`.
pub resolution: u32,
/// The anchor point for the cone mesh, defaults to the midpoint between
/// the tip of the cone and the center of its base
pub anchor: ConeAnchor,
}
impl Default for ConeMeshBuilder {
fn default() -> Self {
Self {
cone: Cone::default(),
resolution: 32,
anchor: ConeAnchor::default(),
}
}
}
impl ConeMeshBuilder {
/// Creates a new [`ConeMeshBuilder`] from a given radius, height,
/// and number of vertices used for the base of the cone.
#[inline]
pub const fn new(radius: f32, height: f32, resolution: u32) -> Self {
Self {
cone: Cone { radius, height },
resolution,
anchor: ConeAnchor::MidPoint,
}
}
/// Sets the number of vertices used for the base of the cone.
#[inline]
pub const fn resolution(mut self, resolution: u32) -> Self {
self.resolution = resolution;
self
}
/// Sets a custom anchor point for the mesh
#[inline]
pub const fn anchor(mut self, anchor: ConeAnchor) -> Self {
self.anchor = anchor;
self
}
}
impl MeshBuilder for ConeMeshBuilder {
fn build(&self) -> Mesh {
let half_height = self.cone.height / 2.0;
// `resolution` vertices for the base, `resolution` vertices for the bottom of the lateral surface,
// and one vertex for the tip.
let num_vertices = self.resolution as usize * 2 + 1;
let num_indices = self.resolution as usize * 6 - 6;
let mut positions = Vec::with_capacity(num_vertices);
let mut normals = Vec::with_capacity(num_vertices);
let mut uvs = Vec::with_capacity(num_vertices);
let mut indices = Vec::with_capacity(num_indices);
// Tip
positions.push([0.0, half_height, 0.0]);
// The tip doesn't have a singular normal that works correctly.
// We use an invalid normal here so that it becomes NaN in the fragment shader
// and doesn't affect the overall shading. This might seem hacky, but it's one of
// the only ways to get perfectly smooth cones without creases or other shading artifacts.
//
// Note that this requires that normals are not normalized in the vertex shader,
// as that would make the entire triangle invalid and make the cone appear as black.
normals.push([0.0, 0.0, 0.0]);
// The UVs of the cone are in polar coordinates, so it's like projecting a circle texture from above.
// The center of the texture is at the center of the lateral surface, at the tip of the cone.
uvs.push([0.5, 0.5]);
// Now we build the lateral surface, the side of the cone.
// The vertex normals will be perpendicular to the surface.
//
// Here we get the slope of a normal and use it for computing
// the multiplicative inverse of the length of a vector in the direction
// of the normal. This allows us to normalize vertex normals efficiently.
let normal_slope = self.cone.radius / self.cone.height;
// Equivalent to Vec2::new(1.0, slope).length().recip()
let normalization_factor = (1.0 + normal_slope * normal_slope).sqrt().recip();
// How much the angle changes at each step
let step_theta = core::f32::consts::TAU / self.resolution as f32;
// Add vertices for the bottom of the lateral surface.
for segment in 0..self.resolution {
let theta = segment as f32 * step_theta;
let (sin, cos) = ops::sin_cos(theta);
// The vertex normal perpendicular to the side
let normal = Vec3::new(cos, normal_slope, sin) * normalization_factor;
positions.push([self.cone.radius * cos, -half_height, self.cone.radius * sin]);
normals.push(normal.to_array());
uvs.push([0.5 + cos * 0.5, 0.5 + sin * 0.5]);
}
// Add indices for the lateral surface. Each triangle is formed by the tip
// and two vertices at the base.
for j in 1..self.resolution {
indices.extend_from_slice(&[0, j + 1, j]);
}
// Close the surface with a triangle between the tip, first base vertex, and last base vertex.
indices.extend_from_slice(&[0, 1, self.resolution]);
// Now we build the actual base of the cone.
let index_offset = positions.len() as u32;
// Add base vertices.
for i in 0..self.resolution {
let theta = i as f32 * step_theta;
let (sin, cos) = ops::sin_cos(theta);
positions.push([cos * self.cone.radius, -half_height, sin * self.cone.radius]);
normals.push([0.0, -1.0, 0.0]);
uvs.push([0.5 * (cos + 1.0), 1.0 - 0.5 * (sin + 1.0)]);
}
// Add base indices.
for i in 1..(self.resolution - 1) {
indices.extend_from_slice(&[index_offset, index_offset + i, index_offset + i + 1]);
}
// Offset the vertex positions Y axis to match the anchor
match self.anchor {
ConeAnchor::Tip => positions.iter_mut().for_each(|p| p[1] -= half_height),
ConeAnchor::Base => positions.iter_mut().for_each(|p| p[1] += half_height),
ConeAnchor::MidPoint => (),
};
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_indices(Indices::U32(indices))
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs)
}
}
impl Meshable for Cone {
type Output = ConeMeshBuilder;
fn mesh(&self) -> Self::Output {
ConeMeshBuilder {
cone: *self,
..Default::default()
}
}
}
impl From<Cone> for Mesh {
fn from(cone: Cone) -> Self {
cone.mesh().build()
}
}
#[cfg(test)]
mod tests {
use crate::{Mesh, MeshBuilder, Meshable, VertexAttributeValues};
use bevy_math::{primitives::Cone, Vec2};
/// Rounds floats to handle floating point error in tests.
fn round_floats<const N: usize>(points: &mut [[f32; N]]) {
for point in points.iter_mut() {
for coord in point.iter_mut() {
let round = (*coord * 100.0).round() / 100.0;
if (*coord - round).abs() < 0.00001 {
*coord = round;
}
}
}
}
#[test]
fn cone_mesh() {
let mut mesh = Cone {
radius: 0.5,
height: 1.0,
}
.mesh()
.resolution(4)
.build();
let Some(VertexAttributeValues::Float32x3(mut positions)) =
mesh.remove_attribute(Mesh::ATTRIBUTE_POSITION)
else {
panic!("Expected positions f32x3");
};
let Some(VertexAttributeValues::Float32x3(mut normals)) =
mesh.remove_attribute(Mesh::ATTRIBUTE_NORMAL)
else {
panic!("Expected normals f32x3");
};
round_floats(&mut positions);
round_floats(&mut normals);
// Vertex positions
assert_eq!(
[
// Tip
[0.0, 0.5, 0.0],
// Lateral surface
[0.5, -0.5, 0.0],
[0.0, -0.5, 0.5],
[-0.5, -0.5, 0.0],
[0.0, -0.5, -0.5],
// Base
[0.5, -0.5, 0.0],
[0.0, -0.5, 0.5],
[-0.5, -0.5, 0.0],
[0.0, -0.5, -0.5],
],
&positions[..]
);
// Vertex normals
let [x, y] = Vec2::new(0.5, -1.0).perp().normalize().to_array();
assert_eq!(
&[
// Tip
[0.0, 0.0, 0.0],
// Lateral surface
[x, y, 0.0],
[0.0, y, x],
[-x, y, 0.0],
[0.0, y, -x],
// Base
[0.0, -1.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, -1.0, 0.0],
],
&normals[..]
);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/primitives/dim3/segment3d.rs | crates/bevy_mesh/src/primitives/dim3/segment3d.rs | use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology};
use bevy_asset::RenderAssetUsages;
use bevy_math::primitives::Segment3d;
use bevy_reflect::prelude::*;
/// A builder used for creating a [`Mesh`] with a [`Segment3d`] shape.
#[derive(Clone, Copy, Debug, Default, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct Segment3dMeshBuilder {
segment: Segment3d,
}
impl MeshBuilder for Segment3dMeshBuilder {
fn build(&self) -> Mesh {
let positions: Vec<_> = self.segment.vertices.into();
let indices = Indices::U32(vec![0, 1]);
Mesh::new(PrimitiveTopology::LineList, RenderAssetUsages::default())
.with_inserted_indices(indices)
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
}
}
impl Meshable for Segment3d {
type Output = Segment3dMeshBuilder;
fn mesh(&self) -> Self::Output {
Segment3dMeshBuilder { segment: *self }
}
}
impl From<Segment3d> for Mesh {
fn from(segment: Segment3d) -> Self {
segment.mesh().build()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Meshable;
use bevy_math::Vec3;
#[test]
fn segment3d_mesh_builder() {
let segment = Segment3d::new(Vec3::ZERO, Vec3::X);
let mesh = segment.mesh().build();
assert_eq!(mesh.attribute(Mesh::ATTRIBUTE_POSITION).unwrap().len(), 2);
assert_eq!(mesh.indices().unwrap().len(), 2);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/primitives/dim3/cuboid.rs | crates/bevy_mesh/src/primitives/dim3/cuboid.rs | use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology};
use bevy_asset::RenderAssetUsages;
use bevy_math::{primitives::Cuboid, Vec3};
use bevy_reflect::prelude::*;
/// A builder used for creating a [`Mesh`] with a [`Cuboid`] shape.
#[derive(Clone, Copy, Debug, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct CuboidMeshBuilder {
half_size: Vec3,
}
impl Default for CuboidMeshBuilder {
/// Returns the default [`CuboidMeshBuilder`] with a width, height, and depth of `1.0`.
fn default() -> Self {
Self {
half_size: Vec3::splat(0.5),
}
}
}
impl MeshBuilder for CuboidMeshBuilder {
fn build(&self) -> Mesh {
let min = -self.half_size;
let max = self.half_size;
// Suppose Y-up right hand, and camera look from +Z to -Z
let vertices = &[
// Front
([min.x, min.y, max.z], [0.0, 0.0, 1.0], [0.0, 0.0]),
([max.x, min.y, max.z], [0.0, 0.0, 1.0], [1.0, 0.0]),
([max.x, max.y, max.z], [0.0, 0.0, 1.0], [1.0, 1.0]),
([min.x, max.y, max.z], [0.0, 0.0, 1.0], [0.0, 1.0]),
// Back
([min.x, max.y, min.z], [0.0, 0.0, -1.0], [1.0, 0.0]),
([max.x, max.y, min.z], [0.0, 0.0, -1.0], [0.0, 0.0]),
([max.x, min.y, min.z], [0.0, 0.0, -1.0], [0.0, 1.0]),
([min.x, min.y, min.z], [0.0, 0.0, -1.0], [1.0, 1.0]),
// Right
([max.x, min.y, min.z], [1.0, 0.0, 0.0], [0.0, 0.0]),
([max.x, max.y, min.z], [1.0, 0.0, 0.0], [1.0, 0.0]),
([max.x, max.y, max.z], [1.0, 0.0, 0.0], [1.0, 1.0]),
([max.x, min.y, max.z], [1.0, 0.0, 0.0], [0.0, 1.0]),
// Left
([min.x, min.y, max.z], [-1.0, 0.0, 0.0], [1.0, 0.0]),
([min.x, max.y, max.z], [-1.0, 0.0, 0.0], [0.0, 0.0]),
([min.x, max.y, min.z], [-1.0, 0.0, 0.0], [0.0, 1.0]),
([min.x, min.y, min.z], [-1.0, 0.0, 0.0], [1.0, 1.0]),
// Top
([max.x, max.y, min.z], [0.0, 1.0, 0.0], [1.0, 0.0]),
([min.x, max.y, min.z], [0.0, 1.0, 0.0], [0.0, 0.0]),
([min.x, max.y, max.z], [0.0, 1.0, 0.0], [0.0, 1.0]),
([max.x, max.y, max.z], [0.0, 1.0, 0.0], [1.0, 1.0]),
// Bottom
([max.x, min.y, max.z], [0.0, -1.0, 0.0], [0.0, 0.0]),
([min.x, min.y, max.z], [0.0, -1.0, 0.0], [1.0, 0.0]),
([min.x, min.y, min.z], [0.0, -1.0, 0.0], [1.0, 1.0]),
([max.x, min.y, min.z], [0.0, -1.0, 0.0], [0.0, 1.0]),
];
let positions: Vec<_> = vertices.iter().map(|(p, _, _)| *p).collect();
let normals: Vec<_> = vertices.iter().map(|(_, n, _)| *n).collect();
let uvs: Vec<_> = vertices.iter().map(|(_, _, uv)| *uv).collect();
let indices = Indices::U32(vec![
0, 1, 2, 2, 3, 0, // front
4, 5, 6, 6, 7, 4, // back
8, 9, 10, 10, 11, 8, // right
12, 13, 14, 14, 15, 12, // left
16, 17, 18, 18, 19, 16, // top
20, 21, 22, 22, 23, 20, // bottom
]);
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs)
.with_inserted_indices(indices)
}
}
impl Meshable for Cuboid {
type Output = CuboidMeshBuilder;
fn mesh(&self) -> Self::Output {
CuboidMeshBuilder {
half_size: self.half_size,
}
}
}
impl From<Cuboid> for Mesh {
fn from(cuboid: Cuboid) -> Self {
cuboid.mesh().build()
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/primitives/dim3/tetrahedron.rs | crates/bevy_mesh/src/primitives/dim3/tetrahedron.rs | use super::triangle3d;
use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology};
use bevy_asset::RenderAssetUsages;
use bevy_math::primitives::{Tetrahedron, Triangle3d};
use bevy_reflect::prelude::*;
/// A builder used for creating a [`Mesh`] with a [`Tetrahedron`] shape.
#[derive(Clone, Copy, Debug, Default, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct TetrahedronMeshBuilder {
tetrahedron: Tetrahedron,
}
impl MeshBuilder for TetrahedronMeshBuilder {
fn build(&self) -> Mesh {
let mut faces: Vec<_> = self.tetrahedron.faces().into();
// If the tetrahedron has negative orientation, reverse all the triangles so that
// they still face outward.
if self.tetrahedron.signed_volume().is_sign_negative() {
faces.iter_mut().for_each(Triangle3d::reverse);
}
let mut positions = vec![];
let mut normals = vec![];
let mut uvs = vec![];
// Each face is meshed as a `Triangle3d`, and we just shove the data into the
// vertex attributes sequentially.
for face in faces {
positions.extend(face.vertices);
let face_normal = triangle3d::normal_vec(&face);
normals.extend(vec![face_normal; 3]);
let face_uvs = triangle3d::uv_coords(&face);
uvs.extend(face_uvs);
}
// There are four faces and none of them share vertices.
let indices = Indices::U32(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_indices(indices)
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs)
}
}
impl Meshable for Tetrahedron {
type Output = TetrahedronMeshBuilder;
fn mesh(&self) -> Self::Output {
TetrahedronMeshBuilder { tetrahedron: *self }
}
}
impl From<Tetrahedron> for Mesh {
fn from(tetrahedron: Tetrahedron) -> Self {
tetrahedron.mesh().build()
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/primitives/dim3/plane.rs | crates/bevy_mesh/src/primitives/dim3/plane.rs | use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology};
use bevy_asset::RenderAssetUsages;
use bevy_math::{primitives::Plane3d, Dir3, Quat, Vec2, Vec3};
use bevy_reflect::prelude::*;
/// A builder used for creating a [`Mesh`] with a [`Plane3d`] shape.
#[derive(Clone, Copy, Debug, Default, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct PlaneMeshBuilder {
/// The [`Plane3d`] shape.
pub plane: Plane3d,
/// The number of subdivisions in the mesh.
///
/// 0 - is the original plane geometry, the 4 points in the XZ plane.
///
/// 1 - is split by 1 line in the middle of the plane on both the X axis and the Z axis, resulting in a plane with 4 quads / 8 triangles.
///
/// 2 - is a plane split by 2 lines on both the X and Z axes, subdividing the plane into 3 equal sections along each axis, resulting in a plane with 9 quads / 18 triangles.
///
/// and so on...
pub subdivisions: u32,
}
impl PlaneMeshBuilder {
/// Creates a new [`PlaneMeshBuilder`] from a given normal and size.
#[inline]
pub fn new(normal: Dir3, size: Vec2) -> Self {
Self {
plane: Plane3d {
normal,
half_size: size / 2.0,
},
subdivisions: 0,
}
}
/// Creates a new [`PlaneMeshBuilder`] from the given size, with the normal pointing upwards.
#[inline]
pub fn from_size(size: Vec2) -> Self {
Self {
plane: Plane3d {
half_size: size / 2.0,
..Default::default()
},
subdivisions: 0,
}
}
/// Creates a new [`PlaneMeshBuilder`] from the given length, with the normal pointing upwards,
/// and the resulting [`PlaneMeshBuilder`] being a square.
#[inline]
pub fn from_length(length: f32) -> Self {
Self {
plane: Plane3d {
half_size: Vec2::splat(length) / 2.0,
..Default::default()
},
subdivisions: 0,
}
}
/// Sets the normal of the plane, aka the direction the plane is facing.
#[inline]
#[doc(alias = "facing")]
pub fn normal(mut self, normal: Dir3) -> Self {
self.plane = Plane3d {
normal,
..self.plane
};
self
}
/// Sets the size of the plane mesh.
#[inline]
pub fn size(mut self, width: f32, height: f32) -> Self {
self.plane.half_size = Vec2::new(width, height) / 2.0;
self
}
/// Sets the subdivisions of the plane mesh.
///
/// 0 - is the original plane geometry, the 4 points in the XZ plane.
///
/// 1 - is split by 1 line in the middle of the plane on both the X axis and the Z axis,
/// resulting in a plane with 4 quads / 8 triangles.
///
/// 2 - is a plane split by 2 lines on both the X and Z axes, subdividing the plane into 3
/// equal sections along each axis, resulting in a plane with 9 quads / 18 triangles.
#[inline]
pub fn subdivisions(mut self, subdivisions: u32) -> Self {
self.subdivisions = subdivisions;
self
}
}
impl MeshBuilder for PlaneMeshBuilder {
fn build(&self) -> Mesh {
let z_vertex_count = self.subdivisions + 2;
let x_vertex_count = self.subdivisions + 2;
let num_vertices = (z_vertex_count * x_vertex_count) as usize;
let num_indices = ((z_vertex_count - 1) * (x_vertex_count - 1) * 6) as usize;
let mut positions: Vec<Vec3> = Vec::with_capacity(num_vertices);
let mut normals: Vec<[f32; 3]> = Vec::with_capacity(num_vertices);
let mut uvs: Vec<[f32; 2]> = Vec::with_capacity(num_vertices);
let mut indices: Vec<u32> = Vec::with_capacity(num_indices);
let rotation = Quat::from_rotation_arc(Vec3::Y, *self.plane.normal);
let size = self.plane.half_size * 2.0;
for z in 0..z_vertex_count {
for x in 0..x_vertex_count {
let tx = x as f32 / (x_vertex_count - 1) as f32;
let tz = z as f32 / (z_vertex_count - 1) as f32;
let pos = rotation * Vec3::new((-0.5 + tx) * size.x, 0.0, (-0.5 + tz) * size.y);
positions.push(pos);
normals.push(self.plane.normal.to_array());
uvs.push([tx, tz]);
}
}
for z in 0..z_vertex_count - 1 {
for x in 0..x_vertex_count - 1 {
let quad = z * x_vertex_count + x;
indices.push(quad + x_vertex_count + 1);
indices.push(quad + 1);
indices.push(quad + x_vertex_count);
indices.push(quad);
indices.push(quad + x_vertex_count);
indices.push(quad + 1);
}
}
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_indices(Indices::U32(indices))
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs)
}
}
impl Meshable for Plane3d {
type Output = PlaneMeshBuilder;
fn mesh(&self) -> Self::Output {
PlaneMeshBuilder {
plane: *self,
subdivisions: 0,
}
}
}
impl From<Plane3d> for Mesh {
fn from(plane: Plane3d) -> Self {
plane.mesh().build()
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/primitives/dim3/cylinder.rs | crates/bevy_mesh/src/primitives/dim3/cylinder.rs | use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology};
use bevy_asset::RenderAssetUsages;
use bevy_math::{ops, primitives::Cylinder};
use bevy_reflect::prelude::*;
/// Anchoring options for [`CylinderMeshBuilder`]
#[derive(Debug, Copy, Clone, Default, Reflect)]
#[reflect(Default, Debug, Clone)]
pub enum CylinderAnchor {
#[default]
/// Midpoint between the top and bottom caps of the cylinder
MidPoint,
/// The center of the top circle cap
Top,
/// The center of the bottom circle cap
Bottom,
}
/// A builder used for creating a [`Mesh`] with a [`Cylinder`] shape.
#[derive(Clone, Copy, Debug, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct CylinderMeshBuilder {
/// The [`Cylinder`] shape.
pub cylinder: Cylinder,
/// The number of vertices used for the top and bottom of the cylinder.
///
/// The default is `32`.
pub resolution: u32,
/// The number of segments along the height of the cylinder.
/// Must be greater than `0` for geometry to be generated.
///
/// The default is `1`.
pub segments: u32,
/// If set to `true`, the cylinder caps (flat circle faces) are built,
/// otherwise the mesh will be a shallow tube
pub caps: bool,
/// The anchor point for the cylinder mesh, defaults to the midpoint between
/// the top and bottom caps
pub anchor: CylinderAnchor,
}
impl Default for CylinderMeshBuilder {
fn default() -> Self {
Self {
cylinder: Cylinder::default(),
resolution: 32,
segments: 1,
caps: true,
anchor: CylinderAnchor::default(),
}
}
}
impl CylinderMeshBuilder {
/// Creates a new [`CylinderMeshBuilder`] from the given radius, a height,
/// and a resolution used for the top and bottom.
#[inline]
pub fn new(radius: f32, height: f32, resolution: u32) -> Self {
Self {
cylinder: Cylinder::new(radius, height),
resolution,
..Default::default()
}
}
/// Sets the number of vertices used for the top and bottom of the cylinder.
#[inline]
pub const fn resolution(mut self, resolution: u32) -> Self {
self.resolution = resolution;
self
}
/// Sets the number of segments along the height of the cylinder.
/// Must be greater than `0` for geometry to be generated.
#[inline]
pub const fn segments(mut self, segments: u32) -> Self {
self.segments = segments;
self
}
/// Ignore the cylinder caps, making the mesh a shallow tube instead
#[inline]
pub const fn without_caps(mut self) -> Self {
self.caps = false;
self
}
/// Sets a custom anchor point for the mesh
#[inline]
pub const fn anchor(mut self, anchor: CylinderAnchor) -> Self {
self.anchor = anchor;
self
}
}
impl MeshBuilder for CylinderMeshBuilder {
fn build(&self) -> Mesh {
let resolution = self.resolution;
let segments = self.segments;
debug_assert!(resolution > 2);
debug_assert!(segments > 0);
let num_rings = segments + 1;
let num_vertices = resolution * 2 + num_rings * (resolution + 1);
let num_faces = resolution * (num_rings - 2);
let num_indices = (2 * num_faces + 2 * (resolution - 1) * 2) * 3;
let mut positions = Vec::with_capacity(num_vertices as usize);
let mut normals = Vec::with_capacity(num_vertices as usize);
let mut uvs = Vec::with_capacity(num_vertices as usize);
let mut indices = Vec::with_capacity(num_indices as usize);
let step_theta = core::f32::consts::TAU / resolution as f32;
let step_y = 2.0 * self.cylinder.half_height / segments as f32;
// rings
for ring in 0..num_rings {
let y = -self.cylinder.half_height + ring as f32 * step_y;
for segment in 0..=resolution {
let theta = segment as f32 * step_theta;
let (sin, cos) = ops::sin_cos(theta);
positions.push([self.cylinder.radius * cos, y, self.cylinder.radius * sin]);
normals.push([cos, 0., sin]);
uvs.push([
segment as f32 / resolution as f32,
ring as f32 / segments as f32,
]);
}
}
// barrel skin
for i in 0..segments {
let ring = i * (resolution + 1);
let next_ring = (i + 1) * (resolution + 1);
for j in 0..resolution {
indices.extend_from_slice(&[
ring + j,
next_ring + j,
ring + j + 1,
next_ring + j,
next_ring + j + 1,
ring + j + 1,
]);
}
}
// caps
if self.caps {
let mut build_cap = |top: bool| {
let offset = positions.len() as u32;
let (y, normal_y, winding) = if top {
(self.cylinder.half_height, 1., (1, 0))
} else {
(-self.cylinder.half_height, -1., (0, 1))
};
for i in 0..self.resolution {
let theta = i as f32 * step_theta;
let (sin, cos) = ops::sin_cos(theta);
positions.push([cos * self.cylinder.radius, y, sin * self.cylinder.radius]);
normals.push([0.0, normal_y, 0.0]);
uvs.push([0.5 * (cos + 1.0), 1.0 - 0.5 * (sin + 1.0)]);
}
for i in 1..(self.resolution - 1) {
indices.extend_from_slice(&[
offset,
offset + i + winding.0,
offset + i + winding.1,
]);
}
};
build_cap(true);
build_cap(false);
}
// Offset the vertex positions Y axis to match the anchor
match self.anchor {
CylinderAnchor::Top => positions
.iter_mut()
.for_each(|p| p[1] -= self.cylinder.half_height),
CylinderAnchor::Bottom => positions
.iter_mut()
.for_each(|p| p[1] += self.cylinder.half_height),
CylinderAnchor::MidPoint => (),
};
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_indices(Indices::U32(indices))
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs)
}
}
impl Meshable for Cylinder {
type Output = CylinderMeshBuilder;
fn mesh(&self) -> Self::Output {
CylinderMeshBuilder {
cylinder: *self,
..Default::default()
}
}
}
impl From<Cylinder> for Mesh {
fn from(cylinder: Cylinder) -> Self {
cylinder.mesh().build()
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/primitives/dim3/mod.rs | crates/bevy_mesh/src/primitives/dim3/mod.rs | mod capsule;
mod cone;
mod conical_frustum;
mod cuboid;
mod cylinder;
mod plane;
mod polyline3d;
mod segment3d;
mod sphere;
mod tetrahedron;
mod torus;
pub(crate) mod triangle3d;
pub use capsule::*;
pub use cone::*;
pub use conical_frustum::*;
pub use cuboid::*;
pub use cylinder::*;
pub use plane::*;
pub use sphere::*;
pub use tetrahedron::*;
pub use torus::*;
pub use triangle3d::*;
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/primitives/dim3/polyline3d.rs | crates/bevy_mesh/src/primitives/dim3/polyline3d.rs | use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology};
use bevy_asset::RenderAssetUsages;
use bevy_math::primitives::Polyline3d;
use bevy_reflect::prelude::*;
/// A builder used for creating a [`Mesh`] with a [`Polyline3d`] shape.
#[derive(Clone, Debug, Default, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct Polyline3dMeshBuilder {
polyline: Polyline3d,
}
impl MeshBuilder for Polyline3dMeshBuilder {
fn build(&self) -> Mesh {
let positions: Vec<_> = self.polyline.vertices.clone();
let indices = Indices::U32(
(0..self.polyline.vertices.len() as u32 - 1)
.flat_map(|i| [i, i + 1])
.collect(),
);
Mesh::new(PrimitiveTopology::LineList, RenderAssetUsages::default())
.with_inserted_indices(indices)
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
}
}
impl Meshable for Polyline3d {
type Output = Polyline3dMeshBuilder;
fn mesh(&self) -> Self::Output {
Polyline3dMeshBuilder {
polyline: self.clone(),
}
}
}
impl From<Polyline3d> for Mesh {
fn from(polyline: Polyline3d) -> Self {
polyline.mesh().build()
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/primitives/dim3/capsule.rs | crates/bevy_mesh/src/primitives/dim3/capsule.rs | use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology};
use bevy_asset::RenderAssetUsages;
use bevy_math::{ops, primitives::Capsule3d, Vec2, Vec3};
use bevy_reflect::prelude::*;
/// Manner in which UV coordinates are distributed vertically.
#[derive(Clone, Copy, Debug, Default, Reflect)]
#[reflect(Default, Debug, Clone)]
pub enum CapsuleUvProfile {
/// UV space is distributed by how much of the capsule consists of the hemispheres.
#[default]
Aspect,
/// Hemispheres get UV space according to the ratio of latitudes to rings.
Uniform,
/// Upper third of the texture goes to the northern hemisphere, middle third to the cylinder
/// and lower third to the southern one.
Fixed,
}
/// A builder used for creating a [`Mesh`] with a [`Capsule3d`] shape.
#[derive(Clone, Copy, Debug, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct Capsule3dMeshBuilder {
/// The [`Capsule3d`] shape.
pub capsule: Capsule3d,
/// The number of horizontal lines subdividing the cylindrical part of the capsule.
/// The default is `0`.
pub rings: u32,
/// The number of vertical lines subdividing the hemispheres of the capsule.
/// The default is `32`.
pub longitudes: u32,
/// The number of horizontal lines subdividing the hemispheres of the capsule.
/// The default is `16`.
pub latitudes: u32,
/// The manner in which UV coordinates are distributed vertically.
/// The default is [`CapsuleUvProfile::Aspect`].
pub uv_profile: CapsuleUvProfile,
}
impl Default for Capsule3dMeshBuilder {
fn default() -> Self {
Self {
capsule: Capsule3d::default(),
rings: 0,
longitudes: 32,
latitudes: 16,
uv_profile: CapsuleUvProfile::default(),
}
}
}
impl Capsule3dMeshBuilder {
/// Creates a new [`Capsule3dMeshBuilder`] from a given radius, height, longitudes, and latitudes.
///
/// Note that `height` is the distance between the centers of the hemispheres.
/// `radius` will be added to both ends to get the real height of the mesh.
#[inline]
pub fn new(radius: f32, height: f32, longitudes: u32, latitudes: u32) -> Self {
Self {
capsule: Capsule3d::new(radius, height),
longitudes,
latitudes,
..Default::default()
}
}
/// Sets the number of horizontal lines subdividing the cylindrical part of the capsule.
#[inline]
pub const fn rings(mut self, rings: u32) -> Self {
self.rings = rings;
self
}
/// Sets the number of vertical lines subdividing the hemispheres of the capsule.
#[inline]
pub const fn longitudes(mut self, longitudes: u32) -> Self {
self.longitudes = longitudes;
self
}
/// Sets the number of horizontal lines subdividing the hemispheres of the capsule.
#[inline]
pub const fn latitudes(mut self, latitudes: u32) -> Self {
self.latitudes = latitudes;
self
}
/// Sets the manner in which UV coordinates are distributed vertically.
#[inline]
pub const fn uv_profile(mut self, uv_profile: CapsuleUvProfile) -> Self {
self.uv_profile = uv_profile;
self
}
}
impl MeshBuilder for Capsule3dMeshBuilder {
fn build(&self) -> Mesh {
// code adapted from https://behreajj.medium.com/making-a-capsule-mesh-via-script-in-five-3d-environments-c2214abf02db
let Capsule3dMeshBuilder {
capsule,
rings,
longitudes,
latitudes,
uv_profile,
} = *self;
let Capsule3d {
radius,
half_length,
} = capsule;
let calc_middle = rings > 0;
let half_lats = latitudes / 2;
let half_latsn1 = half_lats - 1;
let half_latsn2 = half_lats - 2;
let ringsp1 = rings + 1;
let lonsp1 = longitudes + 1;
let summit = half_length + radius;
// Vertex index offsets.
let vert_offset_north_hemi = longitudes;
let vert_offset_north_equator = vert_offset_north_hemi + lonsp1 * half_latsn1;
let vert_offset_cylinder = vert_offset_north_equator + lonsp1;
let vert_offset_south_equator = if calc_middle {
vert_offset_cylinder + lonsp1 * rings
} else {
vert_offset_cylinder
};
let vert_offset_south_hemi = vert_offset_south_equator + lonsp1;
let vert_offset_south_polar = vert_offset_south_hemi + lonsp1 * half_latsn2;
let vert_offset_south_cap = vert_offset_south_polar + lonsp1;
// Initialize arrays.
let vert_len = (vert_offset_south_cap + longitudes) as usize;
let mut vs: Vec<Vec3> = vec![Vec3::ZERO; vert_len];
let mut vts: Vec<Vec2> = vec![Vec2::ZERO; vert_len];
let mut vns: Vec<Vec3> = vec![Vec3::ZERO; vert_len];
let to_theta = 2.0 * core::f32::consts::PI / longitudes as f32;
let to_phi = core::f32::consts::PI / latitudes as f32;
let to_tex_horizontal = 1.0 / longitudes as f32;
let to_tex_vertical = 1.0 / half_lats as f32;
let vt_aspect_ratio = match uv_profile {
CapsuleUvProfile::Aspect => radius / (2.0 * half_length + radius + radius),
CapsuleUvProfile::Uniform => half_lats as f32 / (ringsp1 + latitudes) as f32,
CapsuleUvProfile::Fixed => 1.0 / 3.0,
};
let vt_aspect_north = 1.0 - vt_aspect_ratio;
let vt_aspect_south = vt_aspect_ratio;
let mut theta_cartesian: Vec<Vec2> = vec![Vec2::ZERO; longitudes as usize];
let mut rho_theta_cartesian: Vec<Vec2> = vec![Vec2::ZERO; longitudes as usize];
let mut s_texture_cache: Vec<f32> = vec![0.0; lonsp1 as usize];
for j in 0..longitudes as usize {
let jf = j as f32;
let s_texture_polar = 1.0 - ((jf + 0.5) * to_tex_horizontal);
let theta = jf * to_theta;
theta_cartesian[j] = Vec2::from_angle(theta);
rho_theta_cartesian[j] = radius * theta_cartesian[j];
// North.
vs[j] = Vec3::new(0.0, summit, 0.0);
vts[j] = Vec2::new(s_texture_polar, 1.0);
vns[j] = Vec3::Y;
// South.
let idx = vert_offset_south_cap as usize + j;
vs[idx] = Vec3::new(0.0, -summit, 0.0);
vts[idx] = Vec2::new(s_texture_polar, 0.0);
vns[idx] = Vec3::new(0.0, -1.0, 0.0);
}
// Equatorial vertices.
for (j, s_texture_cache_j) in s_texture_cache.iter_mut().enumerate().take(lonsp1 as usize) {
let s_texture = 1.0 - j as f32 * to_tex_horizontal;
*s_texture_cache_j = s_texture;
// Wrap to first element upon reaching last.
let j_mod = j % longitudes as usize;
let tc = theta_cartesian[j_mod];
let rtc = rho_theta_cartesian[j_mod];
// North equator.
let idxn = vert_offset_north_equator as usize + j;
vs[idxn] = Vec3::new(rtc.x, half_length, -rtc.y);
vts[idxn] = Vec2::new(s_texture, vt_aspect_north);
vns[idxn] = Vec3::new(tc.x, 0.0, -tc.y);
// South equator.
let idxs = vert_offset_south_equator as usize + j;
vs[idxs] = Vec3::new(rtc.x, -half_length, -rtc.y);
vts[idxs] = Vec2::new(s_texture, vt_aspect_south);
vns[idxs] = Vec3::new(tc.x, 0.0, -tc.y);
}
// Hemisphere vertices.
for i in 0..half_latsn1 {
let ip1f = i as f32 + 1.0;
let phi = ip1f * to_phi;
// For coordinates.
let (sin_phi_south, cos_phi_south) = ops::sin_cos(phi);
// Symmetrical hemispheres mean cosine and sine only needs
// to be calculated once.
let cos_phi_north = sin_phi_south;
let sin_phi_north = -cos_phi_south;
let rho_cos_phi_north = radius * cos_phi_north;
let rho_sin_phi_north = radius * sin_phi_north;
let z_offset_north = half_length - rho_sin_phi_north;
let rho_cos_phi_south = radius * cos_phi_south;
let rho_sin_phi_south = radius * sin_phi_south;
let z_offset_sout = -half_length - rho_sin_phi_south;
// For texture coordinates.
let t_tex_fac = ip1f * to_tex_vertical;
let cmpl_tex_fac = 1.0 - t_tex_fac;
let t_tex_north = cmpl_tex_fac + vt_aspect_north * t_tex_fac;
let t_tex_south = cmpl_tex_fac * vt_aspect_south;
let i_lonsp1 = i * lonsp1;
let vert_curr_lat_north = vert_offset_north_hemi + i_lonsp1;
let vert_curr_lat_south = vert_offset_south_hemi + i_lonsp1;
for (j, s_texture) in s_texture_cache.iter().enumerate().take(lonsp1 as usize) {
let j_mod = j % longitudes as usize;
let tc = theta_cartesian[j_mod];
// North hemisphere.
let idxn = vert_curr_lat_north as usize + j;
vs[idxn] = Vec3::new(
rho_cos_phi_north * tc.x,
z_offset_north,
-rho_cos_phi_north * tc.y,
);
vts[idxn] = Vec2::new(*s_texture, t_tex_north);
vns[idxn] = Vec3::new(cos_phi_north * tc.x, -sin_phi_north, -cos_phi_north * tc.y);
// South hemisphere.
let idxs = vert_curr_lat_south as usize + j;
vs[idxs] = Vec3::new(
rho_cos_phi_south * tc.x,
z_offset_sout,
-rho_cos_phi_south * tc.y,
);
vts[idxs] = Vec2::new(*s_texture, t_tex_south);
vns[idxs] = Vec3::new(cos_phi_south * tc.x, -sin_phi_south, -cos_phi_south * tc.y);
}
}
// Cylinder vertices.
if calc_middle {
// Exclude both origin and destination edges
// (North and South equators) from the interpolation.
let to_fac = 1.0 / ringsp1 as f32;
let mut idx_cyl_lat = vert_offset_cylinder as usize;
for h in 1..ringsp1 {
let fac = h as f32 * to_fac;
let cmpl_fac = 1.0 - fac;
let t_texture = cmpl_fac * vt_aspect_north + fac * vt_aspect_south;
let z = half_length - 2.0 * half_length * fac;
for (j, s_texture) in s_texture_cache.iter().enumerate().take(lonsp1 as usize) {
let j_mod = j % longitudes as usize;
let tc = theta_cartesian[j_mod];
let rtc = rho_theta_cartesian[j_mod];
vs[idx_cyl_lat] = Vec3::new(rtc.x, z, -rtc.y);
vts[idx_cyl_lat] = Vec2::new(*s_texture, t_texture);
vns[idx_cyl_lat] = Vec3::new(tc.x, 0.0, -tc.y);
idx_cyl_lat += 1;
}
}
}
// Triangle indices.
// Stride is 3 for polar triangles;
// stride is 6 for two triangles forming a quad.
let lons3 = longitudes * 3;
let lons6 = longitudes * 6;
let hemi_lons = half_latsn1 * lons6;
let tri_offset_north_hemi = lons3;
let tri_offset_cylinder = tri_offset_north_hemi + hemi_lons;
let tri_offset_south_hemi = tri_offset_cylinder + ringsp1 * lons6;
let tri_offset_south_cap = tri_offset_south_hemi + hemi_lons;
let fs_len = tri_offset_south_cap + lons3;
let mut tris: Vec<u32> = vec![0; fs_len as usize];
// Polar caps.
let mut i = 0;
let mut k = 0;
let mut m = tri_offset_south_cap as usize;
while i < longitudes {
// North.
tris[k] = i;
tris[k + 1] = vert_offset_north_hemi + i;
tris[k + 2] = vert_offset_north_hemi + i + 1;
// South.
tris[m] = vert_offset_south_cap + i;
tris[m + 1] = vert_offset_south_polar + i + 1;
tris[m + 2] = vert_offset_south_polar + i;
i += 1;
k += 3;
m += 3;
}
// Hemispheres.
let mut i = 0;
let mut k = tri_offset_north_hemi as usize;
let mut m = tri_offset_south_hemi as usize;
while i < half_latsn1 {
let i_lonsp1 = i * lonsp1;
let vert_curr_lat_north = vert_offset_north_hemi + i_lonsp1;
let vert_next_lat_north = vert_curr_lat_north + lonsp1;
let vert_curr_lat_south = vert_offset_south_equator + i_lonsp1;
let vert_next_lat_south = vert_curr_lat_south + lonsp1;
let mut j = 0;
while j < longitudes {
// North.
let north00 = vert_curr_lat_north + j;
let north01 = vert_next_lat_north + j;
let north11 = vert_next_lat_north + j + 1;
let north10 = vert_curr_lat_north + j + 1;
tris[k] = north00;
tris[k + 1] = north11;
tris[k + 2] = north10;
tris[k + 3] = north00;
tris[k + 4] = north01;
tris[k + 5] = north11;
// South.
let south00 = vert_curr_lat_south + j;
let south01 = vert_next_lat_south + j;
let south11 = vert_next_lat_south + j + 1;
let south10 = vert_curr_lat_south + j + 1;
tris[m] = south00;
tris[m + 1] = south11;
tris[m + 2] = south10;
tris[m + 3] = south00;
tris[m + 4] = south01;
tris[m + 5] = south11;
j += 1;
k += 6;
m += 6;
}
i += 1;
}
// Cylinder.
let mut i = 0;
let mut k = tri_offset_cylinder as usize;
while i < ringsp1 {
let vert_curr_lat = vert_offset_north_equator + i * lonsp1;
let vert_next_lat = vert_curr_lat + lonsp1;
let mut j = 0;
while j < longitudes {
let cy00 = vert_curr_lat + j;
let cy01 = vert_next_lat + j;
let cy11 = vert_next_lat + j + 1;
let cy10 = vert_curr_lat + j + 1;
tris[k] = cy00;
tris[k + 1] = cy11;
tris[k + 2] = cy10;
tris[k + 3] = cy00;
tris[k + 4] = cy01;
tris[k + 5] = cy11;
j += 1;
k += 6;
}
i += 1;
}
let vs: Vec<[f32; 3]> = vs.into_iter().map(Into::into).collect();
let vns: Vec<[f32; 3]> = vns.into_iter().map(Into::into).collect();
let vts: Vec<[f32; 2]> = vts.into_iter().map(Into::into).collect();
assert_eq!(vs.len(), vert_len);
assert_eq!(tris.len(), fs_len as usize);
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, vs)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, vns)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, vts)
.with_inserted_indices(Indices::U32(tris))
}
}
impl Meshable for Capsule3d {
type Output = Capsule3dMeshBuilder;
fn mesh(&self) -> Self::Output {
Capsule3dMeshBuilder {
capsule: *self,
..Default::default()
}
}
}
impl From<Capsule3d> for Mesh {
fn from(capsule: Capsule3d) -> Self {
capsule.mesh().build()
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/primitives/dim3/sphere.rs | crates/bevy_mesh/src/primitives/dim3/sphere.rs | use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology};
use bevy_asset::RenderAssetUsages;
use bevy_math::{ops, primitives::Sphere};
use bevy_reflect::prelude::*;
use core::f32::consts::PI;
use hexasphere::shapes::IcoSphere;
use thiserror::Error;
/// An error when creating an icosphere [`Mesh`] from a [`SphereMeshBuilder`].
#[derive(Clone, Copy, Debug, Error)]
pub enum IcosphereError {
/// The icosphere has too many vertices.
#[error("Cannot create an icosphere of {subdivisions} subdivisions due to there being too many vertices being generated: {number_of_resulting_points}. (Limited to 65535 vertices or 79 subdivisions)")]
TooManyVertices {
/// The number of subdivisions used. 79 is the largest allowed value for a mesh to be generated.
subdivisions: u32,
/// The number of vertices generated. 65535 is the largest allowed value for a mesh to be generated.
number_of_resulting_points: u32,
},
}
/// A type of sphere mesh.
#[derive(Clone, Copy, Debug, Reflect)]
#[reflect(Default, Debug, Clone)]
pub enum SphereKind {
/// An icosphere, a spherical mesh that consists of similar sized triangles.
Ico {
/// The number of subdivisions applied.
/// The number of faces quadruples with each subdivision.
subdivisions: u32,
},
/// A UV sphere, a spherical mesh that consists of quadrilaterals
/// apart from triangles at the top and bottom.
Uv {
/// The number of longitudinal sectors, aka the horizontal resolution.
#[doc(alias = "horizontal_resolution")]
sectors: u32,
/// The number of latitudinal stacks, aka the vertical resolution.
#[doc(alias = "vertical_resolution")]
stacks: u32,
},
}
impl Default for SphereKind {
fn default() -> Self {
Self::Ico { subdivisions: 5 }
}
}
/// A builder used for creating a [`Mesh`] with an [`Sphere`] shape.
#[derive(Clone, Copy, Debug, Default, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct SphereMeshBuilder {
/// The [`Sphere`] shape.
pub sphere: Sphere,
/// The type of sphere mesh that will be built.
pub kind: SphereKind,
}
impl SphereMeshBuilder {
/// Creates a new [`SphereMeshBuilder`] from a radius and [`SphereKind`].
#[inline]
pub const fn new(radius: f32, kind: SphereKind) -> Self {
Self {
sphere: Sphere { radius },
kind,
}
}
/// Sets the [`SphereKind`] that will be used for building the mesh.
#[inline]
pub const fn kind(mut self, kind: SphereKind) -> Self {
self.kind = kind;
self
}
/// Creates an icosphere mesh with the given number of subdivisions.
///
/// The number of faces quadruples with each subdivision.
/// If there are `80` or more subdivisions, the vertex count will be too large,
/// and an [`IcosphereError`] is returned.
///
/// A good default is `5` subdivisions.
pub fn ico(&self, subdivisions: u32) -> Result<Mesh, IcosphereError> {
if subdivisions >= 80 {
/*
Number of triangles:
N = 20
Number of edges:
E = 30
Number of vertices:
V = 12
Number of points within a triangle (triangular numbers):
inner(s) = (s^2 + s) / 2
Number of points on an edge:
edges(s) = s
Add up all vertices on the surface:
vertices(s) = edges(s) * E + inner(s - 1) * N + V
Expand and simplify. Notice that the triangular number formula has roots at -1, and 0, so translating it one to the right fixes it.
subdivisions(s) = 30s + 20((s^2 - 2s + 1 + s - 1) / 2) + 12
subdivisions(s) = 30s + 10s^2 - 10s + 12
subdivisions(s) = 10(s^2 + 2s) + 12
Factor an (s + 1) term to simplify in terms of calculation
subdivisions(s) = 10(s + 1)^2 + 12 - 10
resulting_vertices(s) = 10(s + 1)^2 + 2
*/
let temp = subdivisions + 1;
let number_of_resulting_points = temp * temp * 10 + 2;
return Err(IcosphereError::TooManyVertices {
subdivisions,
number_of_resulting_points,
});
}
let generated = IcoSphere::new(subdivisions as usize, |point| {
let inclination = ops::acos(point.y);
let azimuth = ops::atan2(point.z, point.x);
let norm_inclination = inclination / PI;
let norm_azimuth = 0.5 - (azimuth / core::f32::consts::TAU);
[norm_azimuth, norm_inclination]
});
let raw_points = generated.raw_points();
let points = raw_points
.iter()
.map(|&p| (p * self.sphere.radius).into())
.collect::<Vec<[f32; 3]>>();
let normals = raw_points
.iter()
.copied()
.map(Into::into)
.collect::<Vec<[f32; 3]>>();
let uvs = generated.raw_data().to_owned();
let mut indices = Vec::with_capacity(generated.indices_per_main_triangle() * 20);
for i in 0..20 {
generated.get_indices(i, &mut indices);
}
let indices = Indices::U32(indices);
Ok(Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_indices(indices)
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, points)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs))
}
/// Creates a UV sphere [`Mesh`] with the given number of
/// longitudinal sectors and latitudinal stacks, aka horizontal and vertical resolution.
///
/// A good default is `32` sectors and `18` stacks.
pub fn uv(&self, sectors: u32, stacks: u32) -> Mesh {
// Largely inspired from http://www.songho.ca/opengl/gl_sphere.html
let sectors_f32 = sectors as f32;
let stacks_f32 = stacks as f32;
let length_inv = 1. / self.sphere.radius;
let sector_step = 2. * PI / sectors_f32;
let stack_step = PI / stacks_f32;
let n_vertices = (stacks * sectors) as usize;
let mut vertices: Vec<[f32; 3]> = Vec::with_capacity(n_vertices);
let mut normals: Vec<[f32; 3]> = Vec::with_capacity(n_vertices);
let mut uvs: Vec<[f32; 2]> = Vec::with_capacity(n_vertices);
let mut indices: Vec<u32> = Vec::with_capacity(n_vertices * 2 * 3);
for i in 0..stacks + 1 {
let stack_angle = PI / 2. - (i as f32) * stack_step;
let xy = self.sphere.radius * ops::cos(stack_angle);
let z = self.sphere.radius * ops::sin(stack_angle);
for j in 0..sectors + 1 {
let sector_angle = (j as f32) * sector_step;
let x = xy * ops::cos(sector_angle);
let y = xy * ops::sin(sector_angle);
vertices.push([x, y, z]);
normals.push([x * length_inv, y * length_inv, z * length_inv]);
uvs.push([(j as f32) / sectors_f32, (i as f32) / stacks_f32]);
}
}
// indices
// k1--k1+1
// | / |
// | / |
// k2--k2+1
for i in 0..stacks {
let mut k1 = i * (sectors + 1);
let mut k2 = k1 + sectors + 1;
for _j in 0..sectors {
if i != 0 {
indices.push(k1);
indices.push(k2);
indices.push(k1 + 1);
}
if i != stacks - 1 {
indices.push(k1 + 1);
indices.push(k2);
indices.push(k2 + 1);
}
k1 += 1;
k2 += 1;
}
}
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_indices(Indices::U32(indices))
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, vertices)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs)
}
}
impl MeshBuilder for SphereMeshBuilder {
/// Builds a [`Mesh`] according to the configuration in `self`.
///
/// # Panics
///
/// Panics if the sphere is a [`SphereKind::Ico`] with a subdivision count
/// that is greater than or equal to `80` because there will be too many vertices.
fn build(&self) -> Mesh {
match self.kind {
SphereKind::Ico { subdivisions } => self.ico(subdivisions).unwrap(),
SphereKind::Uv { sectors, stacks } => self.uv(sectors, stacks),
}
}
}
impl Meshable for Sphere {
type Output = SphereMeshBuilder;
fn mesh(&self) -> Self::Output {
SphereMeshBuilder {
sphere: *self,
..Default::default()
}
}
}
impl From<Sphere> for Mesh {
fn from(sphere: Sphere) -> Self {
sphere.mesh().build()
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_mesh/src/primitives/dim3/triangle3d.rs | crates/bevy_mesh/src/primitives/dim3/triangle3d.rs | use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology};
use bevy_asset::RenderAssetUsages;
use bevy_math::{primitives::Triangle3d, Vec3};
use bevy_reflect::prelude::*;
/// A builder used for creating a [`Mesh`] with a [`Triangle3d`] shape.
#[derive(Clone, Copy, Debug, Default, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct Triangle3dMeshBuilder {
triangle: Triangle3d,
}
impl MeshBuilder for Triangle3dMeshBuilder {
fn build(&self) -> Mesh {
let positions: Vec<_> = self.triangle.vertices.into();
let uvs: Vec<_> = uv_coords(&self.triangle).into();
// Every vertex has the normal of the face of the triangle (or zero if the triangle is degenerate).
let normal: Vec3 = normal_vec(&self.triangle);
let normals = vec![normal; 3];
let indices = Indices::U32(vec![0, 1, 2]);
Mesh::new(
PrimitiveTopology::TriangleList,
RenderAssetUsages::default(),
)
.with_inserted_indices(indices)
.with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions)
.with_inserted_attribute(Mesh::ATTRIBUTE_NORMAL, normals)
.with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs)
}
}
impl Meshable for Triangle3d {
type Output = Triangle3dMeshBuilder;
fn mesh(&self) -> Self::Output {
Triangle3dMeshBuilder { triangle: *self }
}
}
/// The normal of a [`Triangle3d`] with zeroing so that a [`Vec3`] is always obtained for meshing.
#[inline]
pub(crate) fn normal_vec(triangle: &Triangle3d) -> Vec3 {
triangle.normal().map_or(Vec3::ZERO, Into::into)
}
/// Unskewed uv-coordinates for a [`Triangle3d`].
#[inline]
pub(crate) fn uv_coords(triangle: &Triangle3d) -> [[f32; 2]; 3] {
let [a, b, c] = triangle.vertices;
let main_length = a.distance(b);
let Some(x) = (b - a).try_normalize() else {
return [[0., 0.], [1., 0.], [0., 1.]];
};
let y = c - a;
// `x` corresponds to one of the axes in uv-coordinates;
// to uv-map the triangle without skewing, we use the orthogonalization
// of `y` with respect to `x` as the second direction and construct a rectangle that
// contains `triangle`.
let y_proj = y.project_onto_normalized(x);
// `offset` represents the x-coordinate of the point `c`; note that x has been shrunk by a
// factor of `main_length`, so `offset` follows it.
let offset = y_proj.dot(x) / main_length;
// Obtuse triangle leaning to the left => x direction extends to the left, shifting a from 0.
if offset < 0. {
let total_length = 1. - offset;
let a_uv = [offset.abs() / total_length, 0.];
let b_uv = [1., 0.];
let c_uv = [0., 1.];
[a_uv, b_uv, c_uv]
}
// Obtuse triangle leaning to the right => x direction extends to the right, shifting b from 1.
else if offset > 1. {
let a_uv = [0., 0.];
let b_uv = [1. / offset, 0.];
let c_uv = [1., 1.];
[a_uv, b_uv, c_uv]
}
// Acute triangle => no extending necessary; a remains at 0 and b remains at 1.
else {
let a_uv = [0., 0.];
let b_uv = [1., 0.];
let c_uv = [offset, 1.];
[a_uv, b_uv, c_uv]
}
}
impl From<Triangle3d> for Mesh {
fn from(triangle: Triangle3d) -> Self {
triangle.mesh().build()
}
}
#[cfg(test)]
mod tests {
use super::uv_coords;
use bevy_math::primitives::Triangle3d;
#[test]
fn uv_test() {
use bevy_math::vec3;
let mut triangle = Triangle3d::new(vec3(0., 0., 0.), vec3(2., 0., 0.), vec3(-1., 1., 0.));
let [a_uv, b_uv, c_uv] = uv_coords(&triangle);
assert_eq!(a_uv, [1. / 3., 0.]);
assert_eq!(b_uv, [1., 0.]);
assert_eq!(c_uv, [0., 1.]);
triangle.vertices[2] = vec3(3., 1., 0.);
let [a_uv, b_uv, c_uv] = uv_coords(&triangle);
assert_eq!(a_uv, [0., 0.]);
assert_eq!(b_uv, [2. / 3., 0.]);
assert_eq!(c_uv, [1., 1.]);
triangle.vertices[2] = vec3(2., 1., 0.);
let [a_uv, b_uv, c_uv] = uv_coords(&triangle);
assert_eq!(a_uv, [0., 0.]);
assert_eq!(b_uv, [1., 0.]);
assert_eq!(c_uv, [1., 1.]);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/cursor.rs | crates/bevy_feathers/src/cursor.rs | //! Provides a way to automatically set the mouse cursor based on hovered entity.
use bevy_app::{App, Plugin, PreUpdate};
use bevy_derive::Deref;
use bevy_ecs::{
component::Component,
entity::Entity,
hierarchy::ChildOf,
query::{With, Without},
reflect::{ReflectComponent, ReflectResource},
resource::Resource,
schedule::IntoScheduleConfigs,
system::{Commands, Query, Res},
};
use bevy_picking::{hover::HoverMap, pointer::PointerId, PickingSystems};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
#[cfg(feature = "custom_cursor")]
use bevy_window::CustomCursor;
use bevy_window::{CursorIcon, SystemCursorIcon, Window};
/// A resource that specifies the cursor icon to be used when the mouse is not hovering over
/// any other entity. This is used to set the default cursor icon for the window.
#[derive(Deref, Resource, Debug, Clone, Default, Reflect)]
#[reflect(Resource, Debug, Default)]
pub struct DefaultCursor(pub EntityCursor);
/// A component that specifies the cursor shape to be used when the pointer hovers over an entity.
/// This is copied to the windows's [`CursorIcon`] component.
///
/// This is effectively the same type as [`CustomCursor`] but with different methods, and used
/// in different places.
#[derive(Component, Debug, Clone, Reflect, PartialEq, Eq)]
#[reflect(Component, Debug, Default, PartialEq, Clone)]
pub enum EntityCursor {
#[cfg(feature = "custom_cursor")]
/// Custom cursor image.
Custom(CustomCursor),
/// System provided cursor icon.
System(SystemCursorIcon),
}
/// A component used to override any [`EntityCursor`] cursor changes.
///
/// This is meant for cases like loading where you don't want the cursor to imply you
/// can interact with something.
#[derive(Deref, Resource, Debug, Clone, Default, Reflect)]
pub struct OverrideCursor(pub Option<EntityCursor>);
impl EntityCursor {
/// Convert the [`EntityCursor`] to a [`CursorIcon`] so that it can be inserted into a
/// window.
pub fn to_cursor_icon(&self) -> CursorIcon {
match self {
#[cfg(feature = "custom_cursor")]
EntityCursor::Custom(custom_cursor) => CursorIcon::Custom(custom_cursor.clone()),
EntityCursor::System(icon) => CursorIcon::from(*icon),
}
}
/// Compare the [`EntityCursor`] to a [`CursorIcon`] so that we can see whether or not
/// the window cursor needs to be changed.
pub fn eq_cursor_icon(&self, cursor_icon: &CursorIcon) -> bool {
// If feature custom_cursor is not enabled in bevy_feathers, we can't know if it is or not
// in bevy_window. So we use the wrapper function `as_system` to let bevy_window check its own feature.
// Otherwise it is not possible to have a match that both covers all cases and doesn't have unreachable
// branches under all feature combinations.
match (self, cursor_icon, cursor_icon.as_system()) {
#[cfg(feature = "custom_cursor")]
(EntityCursor::Custom(custom), CursorIcon::Custom(other), _) => custom == other,
(EntityCursor::System(system), _, Some(cursor_icon)) => *system == *cursor_icon,
_ => false,
}
}
}
impl Default for EntityCursor {
fn default() -> Self {
EntityCursor::System(Default::default())
}
}
/// System which updates the window cursor icon whenever the mouse hovers over an entity with
/// a [`CursorIcon`] component. If no entity is hovered, the cursor icon is set to
/// the cursor in the [`DefaultCursor`] resource.
pub(crate) fn update_cursor(
mut commands: Commands,
hover_map: Option<Res<HoverMap>>,
parent_query: Query<&ChildOf>,
cursor_query: Query<&EntityCursor, Without<Window>>,
q_windows: Query<(Entity, Option<&CursorIcon>), With<Window>>,
r_default_cursor: Res<DefaultCursor>,
r_override_cursor: Res<OverrideCursor>,
) {
let cursor = r_override_cursor.0.as_ref().unwrap_or_else(|| {
hover_map
.and_then(|hover_map| match hover_map.get(&PointerId::Mouse) {
Some(hover_set) => hover_set.keys().find_map(|entity| {
cursor_query.get(*entity).ok().or_else(|| {
parent_query
.iter_ancestors(*entity)
.find_map(|e| cursor_query.get(e).ok())
})
}),
None => None,
})
.unwrap_or(&r_default_cursor)
});
for (entity, prev_cursor) in q_windows.iter() {
if let Some(prev_cursor) = prev_cursor
&& cursor.eq_cursor_icon(prev_cursor)
{
continue;
}
commands.entity(entity).insert(cursor.to_cursor_icon());
}
}
/// Plugin that supports automatically changing the cursor based on the hovered entity.
pub struct CursorIconPlugin;
impl Plugin for CursorIconPlugin {
fn build(&self, app: &mut App) {
if app.world().get_resource::<DefaultCursor>().is_none() {
app.init_resource::<DefaultCursor>();
app.init_resource::<OverrideCursor>();
}
app.add_systems(PreUpdate, update_cursor.in_set(PickingSystems::Last));
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/lib.rs | crates/bevy_feathers/src/lib.rs | //! `bevy_feathers` is a collection of styled and themed widgets for building editors and
//! inspectors.
//!
//! The aesthetic choices made here are designed with a future Bevy Editor in mind,
//! but this crate is deliberately exposed to the public to allow the broader ecosystem to easily create
//! tooling for themselves and others that fits cohesively together.
//!
//! While it may be tempting to use this crate for your game's UI, it's deliberately not intended for that.
//! We've opted for a clean, functional style, and prioritized consistency over customization.
//! That said, if you like what you see, it can be a helpful learning tool.
//! Consider copying this code into your own project,
//! and refining the styles and abstractions provided to meet your needs.
//!
//! ## Warning: Experimental!
//! All that said, this crate is still experimental and unfinished!
//! It will change in breaking ways, and there will be both bugs and limitations.
//!
//! Please report issues, submit fixes and propose changes.
//! Thanks for stress-testing; let's build something better together.
use bevy_app::{
HierarchyPropagatePlugin, Plugin, PluginGroup, PluginGroupBuilder, PostUpdate, PropagateSet,
};
use bevy_asset::embedded_asset;
use bevy_ecs::{query::With, schedule::IntoScheduleConfigs};
use bevy_input_focus::{tab_navigation::TabNavigationPlugin, InputDispatchPlugin};
use bevy_text::{TextColor, TextFont};
use bevy_ui::UiSystems;
use bevy_ui_render::UiMaterialPlugin;
use bevy_ui_widgets::UiWidgetsPlugins;
use crate::{
alpha_pattern::{AlphaPatternMaterial, AlphaPatternResource},
controls::ControlsPlugin,
cursor::{CursorIconPlugin, DefaultCursor, EntityCursor},
theme::{ThemedText, UiTheme},
};
mod alpha_pattern;
pub mod constants;
pub mod controls;
pub mod cursor;
pub mod dark_theme;
pub mod font_styles;
pub mod handle_or_path;
pub mod palette;
pub mod rounded_corners;
pub mod theme;
pub mod tokens;
/// Plugin which installs observers and systems for feathers themes, cursors, and all controls.
pub struct FeathersPlugin;
impl Plugin for FeathersPlugin {
fn build(&self, app: &mut bevy_app::App) {
app.init_resource::<UiTheme>();
// Embedded font
embedded_asset!(app, "assets/fonts/FiraSans-Bold.ttf");
embedded_asset!(app, "assets/fonts/FiraSans-BoldItalic.ttf");
embedded_asset!(app, "assets/fonts/FiraSans-Regular.ttf");
embedded_asset!(app, "assets/fonts/FiraSans-Italic.ttf");
embedded_asset!(app, "assets/fonts/FiraMono-Medium.ttf");
// Embedded shader
embedded_asset!(app, "assets/shaders/alpha_pattern.wgsl");
embedded_asset!(app, "assets/shaders/color_plane.wgsl");
app.add_plugins((
ControlsPlugin,
CursorIconPlugin,
HierarchyPropagatePlugin::<TextColor, With<ThemedText>>::new(PostUpdate),
HierarchyPropagatePlugin::<TextFont, With<ThemedText>>::new(PostUpdate),
UiMaterialPlugin::<AlphaPatternMaterial>::default(),
));
// This needs to run in UiSystems::Propagate so the fonts are up-to-date for `measure_text_system`
// and `detect_text_needs_rerender` in UiSystems::Content
app.configure_sets(
PostUpdate,
PropagateSet::<TextFont>::default().in_set(UiSystems::Propagate),
);
app.insert_resource(DefaultCursor(EntityCursor::System(
bevy_window::SystemCursorIcon::Default,
)));
app.add_systems(PostUpdate, theme::update_theme)
.add_observer(theme::on_changed_background)
.add_observer(theme::on_changed_border)
.add_observer(theme::on_changed_font_color)
.add_observer(font_styles::on_changed_font);
app.init_resource::<AlphaPatternResource>();
}
}
/// A plugin group that adds all dependencies for Feathers
pub struct FeathersPlugins;
impl PluginGroup for FeathersPlugins {
fn build(self) -> PluginGroupBuilder {
PluginGroupBuilder::start::<Self>()
.add_group(UiWidgetsPlugins)
.add(InputDispatchPlugin)
.add(TabNavigationPlugin)
.add(FeathersPlugin)
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/theme.rs | crates/bevy_feathers/src/theme.rs | //! A framework for theming.
use bevy_app::{Propagate, PropagateOver};
use bevy_color::{palettes, Color};
use bevy_ecs::{
change_detection::DetectChanges,
component::Component,
lifecycle::Insert,
observer::On,
query::Changed,
reflect::{ReflectComponent, ReflectResource},
resource::Resource,
system::{Commands, Query, Res},
};
use bevy_log::warn_once;
use bevy_platform::collections::HashMap;
use bevy_reflect::{prelude::ReflectDefault, Reflect};
use bevy_text::TextColor;
use bevy_ui::{BackgroundColor, BorderColor};
use smol_str::SmolStr;
/// A design token for the theme. This serves as the lookup key for the theme properties.
#[derive(Clone, PartialEq, Eq, Hash, Reflect)]
pub struct ThemeToken(SmolStr);
impl ThemeToken {
/// Construct a new [`ThemeToken`] from a [`SmolStr`].
pub const fn new(text: SmolStr) -> Self {
Self(text)
}
/// Construct a new [`ThemeToken`] from a static string.
pub const fn new_static(text: &'static str) -> Self {
Self(SmolStr::new_static(text))
}
}
impl core::fmt::Display for ThemeToken {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "{}", self.0)
}
}
impl core::fmt::Debug for ThemeToken {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "ThemeToken({:?})", self.0)
}
}
/// A collection of properties that make up a theme.
#[derive(Default, Clone, Reflect, Debug)]
#[reflect(Default, Debug)]
pub struct ThemeProps {
/// Map of design tokens to colors.
pub color: HashMap<ThemeToken, Color>,
// Other style property types to be added later.
}
/// The currently selected user interface theme. Overwriting this resource changes the theme.
#[derive(Resource, Default, Reflect, Debug)]
#[reflect(Resource, Default, Debug)]
pub struct UiTheme(pub ThemeProps);
impl UiTheme {
/// Lookup a color by design token. If the theme does not have an entry for that token,
/// logs a warning and returns an error color.
pub fn color(&self, token: &ThemeToken) -> Color {
let color = self.0.color.get(token);
match color {
Some(c) => *c,
None => {
warn_once!("Theme color {} not found.", token);
// Return a bright obnoxious color to make the error obvious.
palettes::basic::FUCHSIA.into()
}
}
}
/// Associate a design token with a given color.
pub fn set_color(&mut self, token: &str, color: Color) {
self.0
.color
.insert(ThemeToken::new(SmolStr::new(token)), color);
}
}
/// Component which causes the background color of an entity to be set based on a theme color.
#[derive(Component, Clone)]
#[require(BackgroundColor)]
#[component(immutable)]
#[derive(Reflect)]
#[reflect(Component, Clone)]
pub struct ThemeBackgroundColor(pub ThemeToken);
/// Component which causes the border color of an entity to be set based on a theme color.
/// Only supports setting all borders to the same color.
#[derive(Component, Clone)]
#[require(BorderColor)]
#[component(immutable)]
#[derive(Reflect)]
#[reflect(Component, Clone)]
pub struct ThemeBorderColor(pub ThemeToken);
/// Component which causes the inherited text color of an entity to be set based on a theme color.
#[derive(Component, Clone)]
#[component(immutable)]
#[derive(Reflect)]
#[reflect(Component, Clone)]
#[require(ThemedText, PropagateOver::<TextColor>::default())]
pub struct ThemeFontColor(pub ThemeToken);
/// A marker component that is used to indicate that the text entity wants to opt-in to using
/// inherited text styles.
#[derive(Component, Reflect, Default)]
#[reflect(Component)]
pub struct ThemedText;
pub(crate) fn update_theme(
mut q_background: Query<(&mut BackgroundColor, &ThemeBackgroundColor)>,
mut q_border: Query<(&mut BorderColor, &ThemeBorderColor)>,
theme: Res<UiTheme>,
) {
if theme.is_changed() {
// Update all background colors
for (mut bg, theme_bg) in q_background.iter_mut() {
bg.0 = theme.color(&theme_bg.0);
}
// Update all border colors
for (mut border, theme_border) in q_border.iter_mut() {
border.set_all(theme.color(&theme_border.0));
}
}
}
pub(crate) fn on_changed_background(
insert: On<Insert, ThemeBackgroundColor>,
mut q_background: Query<
(&mut BackgroundColor, &ThemeBackgroundColor),
Changed<ThemeBackgroundColor>,
>,
theme: Res<UiTheme>,
) {
// Update background colors where the design token has changed.
if let Ok((mut bg, theme_bg)) = q_background.get_mut(insert.entity) {
bg.0 = theme.color(&theme_bg.0);
}
}
pub(crate) fn on_changed_border(
insert: On<Insert, ThemeBorderColor>,
mut q_border: Query<(&mut BorderColor, &ThemeBorderColor), Changed<ThemeBorderColor>>,
theme: Res<UiTheme>,
) {
// Update background colors where the design token has changed.
if let Ok((mut border, theme_border)) = q_border.get_mut(insert.entity) {
border.set_all(theme.color(&theme_border.0));
}
}
/// An observer which looks for changes to the [`ThemeFontColor`] component on an entity, and
/// propagates downward the text color to all participating text entities.
pub(crate) fn on_changed_font_color(
insert: On<Insert, ThemeFontColor>,
font_color: Query<&ThemeFontColor>,
theme: Res<UiTheme>,
mut commands: Commands,
) {
if let Ok(token) = font_color.get(insert.entity) {
let color = theme.color(&token.0);
commands
.entity(insert.entity)
.insert(Propagate(TextColor(color)));
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/palette.rs | crates/bevy_feathers/src/palette.rs | //! The Feathers standard color palette.
use bevy_color::Color;
/// <div style="background-color: #000000; width: 10px; padding: 10px; border: 1px solid;"></div>
pub const BLACK: Color = Color::oklcha(0.0, 0.0, 0.0, 1.0);
/// <div style="background-color: #1F1F24; width: 10px; padding: 10px; border: 1px solid;"></div> - window background
pub const GRAY_0: Color = Color::oklcha(0.2414, 0.0095, 285.67, 1.0);
/// <div style="background-color: #2A2A2E; width: 10px; padding: 10px; border: 1px solid;"></div> - pane background
pub const GRAY_1: Color = Color::oklcha(0.2866, 0.0072, 285.93, 1.0);
/// <div style="background-color: #36373B; width: 10px; padding: 10px; border: 1px solid;"></div> - item background
pub const GRAY_2: Color = Color::oklcha(0.3373, 0.0071, 274.77, 1.0);
/// <div style="background-color: #46474D; width: 10px; padding: 10px; border: 1px solid;"></div> - item background (active)
pub const GRAY_3: Color = Color::oklcha(0.3992, 0.0101, 278.38, 1.0);
/// <div style="background-color: #414142; width: 10px; padding: 10px; border: 1px solid;"></div> - border
pub const WARM_GRAY_1: Color = Color::oklcha(0.3757, 0.0017, 286.32, 1.0);
/// <div style="background-color: #B1B1B2; width: 10px; padding: 10px; border: 1px solid;"></div> - bright label text
pub const LIGHT_GRAY_1: Color = Color::oklcha(0.7607, 0.0014, 286.37, 1.0);
/// <div style="background-color: #838385; width: 10px; padding: 10px; border: 1px solid;"></div> - dim label text
pub const LIGHT_GRAY_2: Color = Color::oklcha(0.6106, 0.003, 286.31, 1.0);
/// <div style="background-color: #FFFFFF; width: 10px; padding: 10px; border: 1px solid;"></div> - button label text
pub const WHITE: Color = Color::oklcha(1.0, 0.000000059604645, 90.0, 1.0);
/// <div style="background-color: #206EC9; width: 10px; padding: 10px; border: 1px solid;"></div> - call-to-action and selection color
pub const ACCENT: Color = Color::oklcha(0.542, 0.1594, 255.4, 1.0);
/// <div style="background-color: #AB4051; width: 10px; padding: 10px; border: 1px solid;"></div> - for X-axis inputs and drag handles
pub const X_AXIS: Color = Color::oklcha(0.5232, 0.1404, 13.84, 1.0);
/// <div style="background-color: #5D8D0A; width: 10px; padding: 10px; border: 1px solid;"></div> - for Y-axis inputs and drag handles
pub const Y_AXIS: Color = Color::oklcha(0.5866, 0.1543, 129.84, 1.0);
/// <div style="background-color: #2160A3; width: 10px; padding: 10px; border: 1px solid;"></div> - for Z-axis inputs and drag handles
pub const Z_AXIS: Color = Color::oklcha(0.4847, 0.1249, 253.08, 1.0);
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/alpha_pattern.rs | crates/bevy_feathers/src/alpha_pattern.rs | use bevy_app::Plugin;
use bevy_asset::{Asset, Assets, Handle};
use bevy_ecs::{
component::Component,
lifecycle::Add,
observer::On,
reflect::ReflectComponent,
resource::Resource,
system::{Query, Res},
world::FromWorld,
};
use bevy_reflect::{prelude::ReflectDefault, Reflect, TypePath};
use bevy_render::render_resource::AsBindGroup;
use bevy_shader::ShaderRef;
use bevy_ui_render::ui_material::{MaterialNode, UiMaterial};
#[derive(AsBindGroup, Asset, TypePath, Default, Debug, Clone)]
pub(crate) struct AlphaPatternMaterial {}
impl UiMaterial for AlphaPatternMaterial {
fn fragment_shader() -> ShaderRef {
"embedded://bevy_feathers/assets/shaders/alpha_pattern.wgsl".into()
}
}
#[derive(Resource)]
pub(crate) struct AlphaPatternResource(pub(crate) Handle<AlphaPatternMaterial>);
impl FromWorld for AlphaPatternResource {
fn from_world(world: &mut bevy_ecs::world::World) -> Self {
let mut ui_materials = world
.get_resource_mut::<Assets<AlphaPatternMaterial>>()
.unwrap();
Self(ui_materials.add(AlphaPatternMaterial::default()))
}
}
/// Marker that tells us we want to fill in the [`MaterialNode`] with the alpha material.
#[derive(Component, Default, Clone, Reflect)]
#[reflect(Component, Default)]
pub(crate) struct AlphaPattern;
/// Observer to fill in the material handle (since we don't have access to the materials asset
/// in the template)
fn on_add_alpha_pattern(
add: On<Add, AlphaPattern>,
mut q_material_node: Query<&mut MaterialNode<AlphaPatternMaterial>>,
r_material: Res<AlphaPatternResource>,
) {
if let Ok(mut material) = q_material_node.get_mut(add.entity) {
material.0 = r_material.0.clone();
}
}
/// Plugin which registers the systems for updating the button styles.
pub struct AlphaPatternPlugin;
impl Plugin for AlphaPatternPlugin {
fn build(&self, app: &mut bevy_app::App) {
app.add_observer(on_add_alpha_pattern);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/handle_or_path.rs | crates/bevy_feathers/src/handle_or_path.rs | //! Provides a way to specify assets either by handle or by path.
use bevy_asset::{Asset, Handle};
use bevy_reflect::Reflect;
/// Enum that represents a reference to an asset as either a [`Handle`] or a [`String`] path.
///
/// This is useful for when you want to specify an asset, but don't always have convenient
/// access to an asset server reference.
#[derive(Clone, Debug, Reflect)]
pub enum HandleOrPath<T: Asset> {
/// Specify the asset reference as a handle.
Handle(Handle<T>),
/// Specify the asset reference as a [`String`].
Path(String),
}
impl<T: Asset> Default for HandleOrPath<T> {
fn default() -> Self {
Self::Path("".to_string())
}
}
// Necessary because we don't want to require T: PartialEq
impl<T: Asset> PartialEq for HandleOrPath<T> {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(HandleOrPath::Handle(h1), HandleOrPath::Handle(h2)) => h1 == h2,
(HandleOrPath::Path(p1), HandleOrPath::Path(p2)) => p1 == p2,
_ => false,
}
}
}
impl<T: Asset> From<Handle<T>> for HandleOrPath<T> {
fn from(h: Handle<T>) -> Self {
HandleOrPath::Handle(h)
}
}
impl<T: Asset> From<&str> for HandleOrPath<T> {
fn from(p: &str) -> Self {
HandleOrPath::Path(p.to_string())
}
}
impl<T: Asset> From<String> for HandleOrPath<T> {
fn from(p: String) -> Self {
HandleOrPath::Path(p.clone())
}
}
impl<T: Asset> From<&String> for HandleOrPath<T> {
fn from(p: &String) -> Self {
HandleOrPath::Path(p.to_string())
}
}
impl<T: Asset + Clone> From<&HandleOrPath<T>> for HandleOrPath<T> {
fn from(p: &HandleOrPath<T>) -> Self {
p.to_owned()
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/tokens.rs | crates/bevy_feathers/src/tokens.rs | //! Design tokens used by Feathers themes.
//!
//! The term "design token" is commonly used in UX design to mean the smallest unit of a theme,
//! similar in concept to a CSS variable. Each token represents an assignment of a color or
//! value to a specific visual aspect of a widget, such as background or border.
use crate::theme::ThemeToken;
/// Window background
pub const WINDOW_BG: ThemeToken = ThemeToken::new_static("feathers.window.bg");
/// Focus ring
pub const FOCUS_RING: ThemeToken = ThemeToken::new_static("feathers.focus");
/// Regular text
pub const TEXT_MAIN: ThemeToken = ThemeToken::new_static("feathers.text.main");
/// Dim text
pub const TEXT_DIM: ThemeToken = ThemeToken::new_static("feathers.text.dim");
// Normal buttons
/// Regular button background
pub const BUTTON_BG: ThemeToken = ThemeToken::new_static("feathers.button.bg");
/// Regular button background (hovered)
pub const BUTTON_BG_HOVER: ThemeToken = ThemeToken::new_static("feathers.button.bg.hover");
/// Regular button background (disabled)
pub const BUTTON_BG_DISABLED: ThemeToken = ThemeToken::new_static("feathers.button.bg.disabled");
/// Regular button background (pressed)
pub const BUTTON_BG_PRESSED: ThemeToken = ThemeToken::new_static("feathers.button.bg.pressed");
/// Regular button text
pub const BUTTON_TEXT: ThemeToken = ThemeToken::new_static("feathers.button.txt");
/// Regular button text (disabled)
pub const BUTTON_TEXT_DISABLED: ThemeToken = ThemeToken::new_static("feathers.button.txt.disabled");
// Primary ("default") buttons
/// Primary button background
pub const BUTTON_PRIMARY_BG: ThemeToken = ThemeToken::new_static("feathers.button.primary.bg");
/// Primary button background (hovered)
pub const BUTTON_PRIMARY_BG_HOVER: ThemeToken =
ThemeToken::new_static("feathers.button.primary.bg.hover");
/// Primary button background (disabled)
pub const BUTTON_PRIMARY_BG_DISABLED: ThemeToken =
ThemeToken::new_static("feathers.button.primary.bg.disabled");
/// Primary button background (pressed)
pub const BUTTON_PRIMARY_BG_PRESSED: ThemeToken =
ThemeToken::new_static("feathers.button.primary.bg.pressed");
/// Primary button text
pub const BUTTON_PRIMARY_TEXT: ThemeToken = ThemeToken::new_static("feathers.button.primary.txt");
/// Primary button text (disabled)
pub const BUTTON_PRIMARY_TEXT_DISABLED: ThemeToken =
ThemeToken::new_static("feathers.button.primary.txt.disabled");
// Slider
/// Background for slider
pub const SLIDER_BG: ThemeToken = ThemeToken::new_static("feathers.slider.bg");
/// Background for slider moving bar
pub const SLIDER_BAR: ThemeToken = ThemeToken::new_static("feathers.slider.bar");
/// Background for slider moving bar (disabled)
pub const SLIDER_BAR_DISABLED: ThemeToken = ThemeToken::new_static("feathers.slider.bar.disabled");
/// Background for slider text
pub const SLIDER_TEXT: ThemeToken = ThemeToken::new_static("feathers.slider.text");
/// Background for slider text (disabled)
pub const SLIDER_TEXT_DISABLED: ThemeToken =
ThemeToken::new_static("feathers.slider.text.disabled");
// Checkbox
/// Checkbox background around the checkmark
pub const CHECKBOX_BG: ThemeToken = ThemeToken::new_static("feathers.checkbox.bg");
/// Checkbox border around the checkmark (disabled)
pub const CHECKBOX_BG_DISABLED: ThemeToken =
ThemeToken::new_static("feathers.checkbox.bg.disabled");
/// Checkbox background around the checkmark
pub const CHECKBOX_BG_CHECKED: ThemeToken = ThemeToken::new_static("feathers.checkbox.bg.checked");
/// Checkbox border around the checkmark (disabled)
pub const CHECKBOX_BG_CHECKED_DISABLED: ThemeToken =
ThemeToken::new_static("feathers.checkbox.bg.checked.disabled");
/// Checkbox border around the checkmark
pub const CHECKBOX_BORDER: ThemeToken = ThemeToken::new_static("feathers.checkbox.border");
/// Checkbox border around the checkmark (hovered)
pub const CHECKBOX_BORDER_HOVER: ThemeToken =
ThemeToken::new_static("feathers.checkbox.border.hover");
/// Checkbox border around the checkmark (disabled)
pub const CHECKBOX_BORDER_DISABLED: ThemeToken =
ThemeToken::new_static("feathers.checkbox.border.disabled");
/// Checkbox check mark
pub const CHECKBOX_MARK: ThemeToken = ThemeToken::new_static("feathers.checkbox.mark");
/// Checkbox check mark (disabled)
pub const CHECKBOX_MARK_DISABLED: ThemeToken =
ThemeToken::new_static("feathers.checkbox.mark.disabled");
/// Checkbox label text
pub const CHECKBOX_TEXT: ThemeToken = ThemeToken::new_static("feathers.checkbox.text");
/// Checkbox label text (disabled)
pub const CHECKBOX_TEXT_DISABLED: ThemeToken =
ThemeToken::new_static("feathers.checkbox.text.disabled");
// Radio button
/// Radio border around the checkmark
pub const RADIO_BORDER: ThemeToken = ThemeToken::new_static("feathers.radio.border");
/// Radio border around the checkmark (hovered)
pub const RADIO_BORDER_HOVER: ThemeToken = ThemeToken::new_static("feathers.radio.border.hover");
/// Radio border around the checkmark (disabled)
pub const RADIO_BORDER_DISABLED: ThemeToken =
ThemeToken::new_static("feathers.radio.border.disabled");
/// Radio check mark
pub const RADIO_MARK: ThemeToken = ThemeToken::new_static("feathers.radio.mark");
/// Radio check mark (disabled)
pub const RADIO_MARK_DISABLED: ThemeToken = ThemeToken::new_static("feathers.radio.mark.disabled");
/// Radio label text
pub const RADIO_TEXT: ThemeToken = ThemeToken::new_static("feathers.radio.text");
/// Radio label text (disabled)
pub const RADIO_TEXT_DISABLED: ThemeToken = ThemeToken::new_static("feathers.radio.text.disabled");
// Toggle Switch
/// Switch background around the checkmark
pub const SWITCH_BG: ThemeToken = ThemeToken::new_static("feathers.switch.bg");
/// Switch border around the checkmark (disabled)
pub const SWITCH_BG_DISABLED: ThemeToken = ThemeToken::new_static("feathers.switch.bg.disabled");
/// Switch background around the checkmark
pub const SWITCH_BG_CHECKED: ThemeToken = ThemeToken::new_static("feathers.switch.bg.checked");
/// Switch border around the checkmark (disabled)
pub const SWITCH_BG_CHECKED_DISABLED: ThemeToken =
ThemeToken::new_static("feathers.switch.bg.checked.disabled");
/// Switch border around the checkmark
pub const SWITCH_BORDER: ThemeToken = ThemeToken::new_static("feathers.switch.border");
/// Switch border around the checkmark (hovered)
pub const SWITCH_BORDER_HOVER: ThemeToken = ThemeToken::new_static("feathers.switch.border.hover");
/// Switch border around the checkmark (disabled)
pub const SWITCH_BORDER_DISABLED: ThemeToken =
ThemeToken::new_static("feathers.switch.border.disabled");
/// Switch slide
pub const SWITCH_SLIDE: ThemeToken = ThemeToken::new_static("feathers.switch.slide");
/// Switch slide (disabled)
pub const SWITCH_SLIDE_DISABLED: ThemeToken =
ThemeToken::new_static("feathers.switch.slide.disabled");
// Color Plane
/// Color plane frame background
pub const COLOR_PLANE_BG: ThemeToken = ThemeToken::new_static("feathers.colorplane.bg");
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/dark_theme.rs | crates/bevy_feathers/src/dark_theme.rs | //! The standard `bevy_feathers` dark theme.
use crate::{palette, tokens};
use bevy_color::{Alpha, Luminance};
use bevy_platform::collections::HashMap;
use crate::theme::ThemeProps;
/// Create a [`ThemeProps`] object and populate it with the colors for the default dark theme.
pub fn create_dark_theme() -> ThemeProps {
ThemeProps {
color: HashMap::from([
(tokens::WINDOW_BG, palette::GRAY_0),
// Button
(tokens::BUTTON_BG, palette::GRAY_3),
(tokens::BUTTON_BG_HOVER, palette::GRAY_3.lighter(0.05)),
(tokens::BUTTON_BG_PRESSED, palette::GRAY_3.lighter(0.1)),
(tokens::BUTTON_BG_DISABLED, palette::GRAY_2),
(tokens::BUTTON_PRIMARY_BG, palette::ACCENT),
(
tokens::BUTTON_PRIMARY_BG_HOVER,
palette::ACCENT.lighter(0.05),
),
(
tokens::BUTTON_PRIMARY_BG_PRESSED,
palette::ACCENT.lighter(0.1),
),
(tokens::BUTTON_PRIMARY_BG_DISABLED, palette::GRAY_2),
(tokens::BUTTON_TEXT, palette::WHITE),
(tokens::BUTTON_TEXT_DISABLED, palette::WHITE.with_alpha(0.5)),
(tokens::BUTTON_PRIMARY_TEXT, palette::WHITE),
(
tokens::BUTTON_PRIMARY_TEXT_DISABLED,
palette::WHITE.with_alpha(0.5),
),
// Slider
(tokens::SLIDER_BG, palette::GRAY_1),
(tokens::SLIDER_BAR, palette::ACCENT),
(tokens::SLIDER_BAR_DISABLED, palette::GRAY_2),
(tokens::SLIDER_TEXT, palette::WHITE),
(tokens::SLIDER_TEXT_DISABLED, palette::WHITE.with_alpha(0.5)),
// Checkbox
(tokens::CHECKBOX_BG, palette::GRAY_3),
(tokens::CHECKBOX_BG_CHECKED, palette::ACCENT),
(
tokens::CHECKBOX_BG_DISABLED,
palette::GRAY_1.with_alpha(0.5),
),
(
tokens::CHECKBOX_BG_CHECKED_DISABLED,
palette::GRAY_3.with_alpha(0.5),
),
(tokens::CHECKBOX_BORDER, palette::GRAY_3),
(tokens::CHECKBOX_BORDER_HOVER, palette::GRAY_3.lighter(0.1)),
(
tokens::CHECKBOX_BORDER_DISABLED,
palette::GRAY_3.with_alpha(0.5),
),
(tokens::CHECKBOX_MARK, palette::WHITE),
(tokens::CHECKBOX_MARK_DISABLED, palette::LIGHT_GRAY_2),
(tokens::CHECKBOX_TEXT, palette::LIGHT_GRAY_1),
(
tokens::CHECKBOX_TEXT_DISABLED,
palette::LIGHT_GRAY_1.with_alpha(0.5),
),
// Radio
(tokens::RADIO_BORDER, palette::GRAY_3),
(tokens::RADIO_BORDER_HOVER, palette::GRAY_3.lighter(0.1)),
(
tokens::RADIO_BORDER_DISABLED,
palette::GRAY_3.with_alpha(0.5),
),
(tokens::RADIO_MARK, palette::ACCENT),
(tokens::RADIO_MARK_DISABLED, palette::ACCENT.with_alpha(0.5)),
(tokens::RADIO_TEXT, palette::LIGHT_GRAY_1),
(
tokens::RADIO_TEXT_DISABLED,
palette::LIGHT_GRAY_1.with_alpha(0.5),
),
// Toggle Switch
(tokens::SWITCH_BG, palette::GRAY_3),
(tokens::SWITCH_BG_CHECKED, palette::ACCENT),
(tokens::SWITCH_BG_DISABLED, palette::GRAY_1.with_alpha(0.5)),
(
tokens::SWITCH_BG_CHECKED_DISABLED,
palette::GRAY_3.with_alpha(0.5),
),
(tokens::SWITCH_BORDER, palette::GRAY_3),
(tokens::SWITCH_BORDER_HOVER, palette::GRAY_3.lighter(0.1)),
(
tokens::SWITCH_BORDER_DISABLED,
palette::GRAY_3.with_alpha(0.5),
),
(tokens::SWITCH_SLIDE, palette::LIGHT_GRAY_2),
(
tokens::SWITCH_SLIDE_DISABLED,
palette::LIGHT_GRAY_2.with_alpha(0.3),
),
(tokens::COLOR_PLANE_BG, palette::GRAY_1),
]),
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/rounded_corners.rs | crates/bevy_feathers/src/rounded_corners.rs | //! Mechanism for specifying which corners of a widget are rounded, used for segmented buttons
//! and control groups.
use bevy_ui::{BorderRadius, Val};
/// Allows specifying which corners are rounded and which are sharp. All rounded corners
/// have the same radius. Not all combinations are supported, only the ones that make
/// sense for a segmented buttons.
///
/// A typical use case would be a segmented button consisting of 3 individual buttons in a
/// row. In that case, you would have the leftmost button have rounded corners on the left,
/// the right-most button have rounded corners on the right, and the center button have
/// only sharp corners.
#[derive(Debug, Clone, Copy, Default, PartialEq)]
pub enum RoundedCorners {
/// No corners are rounded.
None,
#[default]
/// All corners are rounded.
All,
/// Top-left corner is rounded.
TopLeft,
/// Top-right corner is rounded.
TopRight,
/// Bottom-right corner is rounded.
BottomRight,
/// Bottom-left corner is rounded.
BottomLeft,
/// Top corners are rounded.
Top,
/// Right corners are rounded.
Right,
/// Bottom corners are rounded.
Bottom,
/// Left corners are rounded.
Left,
}
impl RoundedCorners {
/// Convert the `RoundedCorners` to a `BorderRadius` for use in a `Node`.
pub fn to_border_radius(&self, radius: f32) -> BorderRadius {
let radius = Val::Px(radius);
let zero = Val::ZERO;
match self {
RoundedCorners::None => BorderRadius::all(zero),
RoundedCorners::All => BorderRadius::all(radius),
RoundedCorners::TopLeft => BorderRadius {
top_left: radius,
top_right: zero,
bottom_right: zero,
bottom_left: zero,
},
RoundedCorners::TopRight => BorderRadius {
top_left: zero,
top_right: radius,
bottom_right: zero,
bottom_left: zero,
},
RoundedCorners::BottomRight => BorderRadius {
top_left: zero,
top_right: zero,
bottom_right: radius,
bottom_left: zero,
},
RoundedCorners::BottomLeft => BorderRadius {
top_left: zero,
top_right: zero,
bottom_right: zero,
bottom_left: radius,
},
RoundedCorners::Top => BorderRadius {
top_left: radius,
top_right: radius,
bottom_right: zero,
bottom_left: zero,
},
RoundedCorners::Right => BorderRadius {
top_left: zero,
top_right: radius,
bottom_right: radius,
bottom_left: zero,
},
RoundedCorners::Bottom => BorderRadius {
top_left: zero,
top_right: zero,
bottom_right: radius,
bottom_left: radius,
},
RoundedCorners::Left => BorderRadius {
top_left: radius,
top_right: zero,
bottom_right: zero,
bottom_left: radius,
},
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/constants.rs | crates/bevy_feathers/src/constants.rs | //! Various non-themable constants for the Feathers look and feel.
/// Font asset paths
pub mod fonts {
/// Default regular font path
pub const REGULAR: &str = "embedded://bevy_feathers/assets/fonts/FiraSans-Regular.ttf";
/// Regular italic font path
pub const ITALIC: &str = "embedded://bevy_feathers/assets/fonts/FiraSans-Italic.ttf";
/// Bold font path
pub const BOLD: &str = "embedded://bevy_feathers/assets/fonts/FiraSans-Bold.ttf";
/// Bold italic font path
pub const BOLD_ITALIC: &str = "embedded://bevy_feathers/assets/fonts/FiraSans-BoldItalic.ttf";
/// Monospace font path
pub const MONO: &str = "embedded://bevy_feathers/assets/fonts/FiraMono-Medium.ttf";
}
/// Size constants
pub mod size {
use bevy_ui::Val;
/// Common row size for buttons, sliders, spinners, etc.
pub const ROW_HEIGHT: Val = Val::Px(24.0);
/// Width and height of a checkbox
pub const CHECKBOX_SIZE: Val = Val::Px(18.0);
/// Width and height of a radio button
pub const RADIO_SIZE: Val = Val::Px(18.0);
/// Width of a toggle switch
pub const TOGGLE_WIDTH: Val = Val::Px(32.0);
/// Height of a toggle switch
pub const TOGGLE_HEIGHT: Val = Val::Px(18.0);
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/font_styles.rs | crates/bevy_feathers/src/font_styles.rs | //! A framework for inheritable font styles.
use bevy_app::{Propagate, PropagateOver};
use bevy_asset::{AssetServer, Handle};
use bevy_ecs::{
component::Component,
lifecycle::Insert,
observer::On,
reflect::ReflectComponent,
system::{Commands, Query, Res},
};
use bevy_reflect::{prelude::ReflectDefault, Reflect};
use bevy_text::{Font, TextFont};
use crate::{handle_or_path::HandleOrPath, theme::ThemedText};
/// A component which, when inserted on an entity, will load the given font and propagate it
/// downward to any child text entity that has the [`ThemedText`] marker.
#[derive(Component, Default, Clone, Debug, Reflect)]
#[reflect(Component, Default)]
#[require(ThemedText, PropagateOver::<TextFont>::default())]
pub struct InheritableFont {
/// The font handle or path.
pub font: HandleOrPath<Font>,
/// The desired font size.
pub font_size: f32,
}
impl InheritableFont {
/// Create a new `InheritableFont` from a handle.
pub fn from_handle(handle: Handle<Font>) -> Self {
Self {
font: HandleOrPath::Handle(handle),
font_size: 16.0,
}
}
/// Create a new `InheritableFont` from a path.
pub fn from_path(path: &str) -> Self {
Self {
font: HandleOrPath::Path(path.to_string()),
font_size: 16.0,
}
}
}
/// An observer which looks for changes to the [`InheritableFont`] component on an entity, and
/// propagates downward the font to all participating text entities.
pub(crate) fn on_changed_font(
insert: On<Insert, InheritableFont>,
font_style: Query<&InheritableFont>,
assets: Res<AssetServer>,
mut commands: Commands,
) {
if let Ok(style) = font_style.get(insert.entity)
&& let Some(font) = match style.font {
HandleOrPath::Handle(ref h) => Some(h.clone()),
HandleOrPath::Path(ref p) => Some(assets.load::<Font>(p)),
}
{
commands.entity(insert.entity).insert(Propagate(TextFont {
font: font.into(),
font_size: style.font_size,
..Default::default()
}));
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/controls/button.rs | crates/bevy_feathers/src/controls/button.rs | use bevy_app::{Plugin, PreUpdate};
use bevy_ecs::{
bundle::Bundle,
component::Component,
entity::Entity,
hierarchy::{ChildOf, Children},
lifecycle::RemovedComponents,
query::{Added, Changed, Has, Or},
reflect::ReflectComponent,
schedule::IntoScheduleConfigs,
spawn::{SpawnRelated, SpawnableList},
system::{Commands, Query},
};
use bevy_input_focus::tab_navigation::TabIndex;
use bevy_picking::{hover::Hovered, PickingSystems};
use bevy_reflect::{prelude::ReflectDefault, Reflect};
use bevy_ui::{AlignItems, InteractionDisabled, JustifyContent, Node, Pressed, UiRect, Val};
use bevy_ui_widgets::Button;
use crate::{
constants::{fonts, size},
cursor::EntityCursor,
font_styles::InheritableFont,
handle_or_path::HandleOrPath,
rounded_corners::RoundedCorners,
theme::{ThemeBackgroundColor, ThemeFontColor},
tokens,
};
/// Color variants for buttons. This also functions as a component used by the dynamic styling
/// system to identify which entities are buttons.
#[derive(Component, Default, Clone, Reflect, Debug, PartialEq, Eq)]
#[reflect(Component, Clone, Default)]
pub enum ButtonVariant {
/// The standard button appearance
#[default]
Normal,
/// A button with a more prominent color, this is used for "call to action" buttons,
/// default buttons for dialog boxes, and so on.
Primary,
}
/// Parameters for the button template, passed to [`button`] function.
#[derive(Default)]
pub struct ButtonProps {
/// Color variant for the button.
pub variant: ButtonVariant,
/// Rounded corners options
pub corners: RoundedCorners,
}
/// Template function to spawn a button.
///
/// # Arguments
/// * `props` - construction properties for the button.
/// * `overrides` - a bundle of components that are merged in with the normal button components.
/// * `children` - a [`SpawnableList`] of child elements, such as a label or icon for the button.
///
/// # Emitted events
/// * [`bevy_ui_widgets::Activate`] when any of the following happens:
/// * the pointer is released while hovering over the button.
/// * the ENTER or SPACE key is pressed while the button has keyboard focus.
///
/// These events can be disabled by adding an [`bevy_ui::InteractionDisabled`] component to the entity
pub fn button<C: SpawnableList<ChildOf> + Send + Sync + 'static, B: Bundle>(
props: ButtonProps,
overrides: B,
children: C,
) -> impl Bundle {
(
Node {
height: size::ROW_HEIGHT,
justify_content: JustifyContent::Center,
align_items: AlignItems::Center,
padding: UiRect::axes(Val::Px(8.0), Val::Px(0.)),
flex_grow: 1.0,
border_radius: props.corners.to_border_radius(4.0),
..Default::default()
},
Button,
props.variant,
Hovered::default(),
EntityCursor::System(bevy_window::SystemCursorIcon::Pointer),
TabIndex(0),
ThemeBackgroundColor(tokens::BUTTON_BG),
ThemeFontColor(tokens::BUTTON_TEXT),
InheritableFont {
font: HandleOrPath::Path(fonts::REGULAR.to_owned()),
font_size: 14.0,
},
overrides,
Children::spawn(children),
)
}
fn update_button_styles(
q_buttons: Query<
(
Entity,
&ButtonVariant,
Has<InteractionDisabled>,
Has<Pressed>,
&Hovered,
&ThemeBackgroundColor,
&ThemeFontColor,
),
Or<(
Changed<Hovered>,
Changed<ButtonVariant>,
Added<Pressed>,
Added<InteractionDisabled>,
)>,
>,
mut commands: Commands,
) {
for (button_ent, variant, disabled, pressed, hovered, bg_color, font_color) in q_buttons.iter()
{
set_button_styles(
button_ent,
variant,
disabled,
pressed,
hovered.0,
bg_color,
font_color,
&mut commands,
);
}
}
fn update_button_styles_remove(
q_buttons: Query<(
Entity,
&ButtonVariant,
Has<InteractionDisabled>,
Has<Pressed>,
&Hovered,
&ThemeBackgroundColor,
&ThemeFontColor,
)>,
mut removed_disabled: RemovedComponents<InteractionDisabled>,
mut removed_pressed: RemovedComponents<Pressed>,
mut commands: Commands,
) {
removed_disabled
.read()
.chain(removed_pressed.read())
.for_each(|ent| {
if let Ok((button_ent, variant, disabled, pressed, hovered, bg_color, font_color)) =
q_buttons.get(ent)
{
set_button_styles(
button_ent,
variant,
disabled,
pressed,
hovered.0,
bg_color,
font_color,
&mut commands,
);
}
});
}
fn set_button_styles(
button_ent: Entity,
variant: &ButtonVariant,
disabled: bool,
pressed: bool,
hovered: bool,
bg_color: &ThemeBackgroundColor,
font_color: &ThemeFontColor,
commands: &mut Commands,
) {
let bg_token = match (variant, disabled, pressed, hovered) {
(ButtonVariant::Normal, true, _, _) => tokens::BUTTON_BG_DISABLED,
(ButtonVariant::Normal, false, true, _) => tokens::BUTTON_BG_PRESSED,
(ButtonVariant::Normal, false, false, true) => tokens::BUTTON_BG_HOVER,
(ButtonVariant::Normal, false, false, false) => tokens::BUTTON_BG,
(ButtonVariant::Primary, true, _, _) => tokens::BUTTON_PRIMARY_BG_DISABLED,
(ButtonVariant::Primary, false, true, _) => tokens::BUTTON_PRIMARY_BG_PRESSED,
(ButtonVariant::Primary, false, false, true) => tokens::BUTTON_PRIMARY_BG_HOVER,
(ButtonVariant::Primary, false, false, false) => tokens::BUTTON_PRIMARY_BG,
};
let font_color_token = match (variant, disabled) {
(ButtonVariant::Normal, true) => tokens::BUTTON_TEXT_DISABLED,
(ButtonVariant::Normal, false) => tokens::BUTTON_TEXT,
(ButtonVariant::Primary, true) => tokens::BUTTON_PRIMARY_TEXT_DISABLED,
(ButtonVariant::Primary, false) => tokens::BUTTON_PRIMARY_TEXT,
};
let cursor_shape = match disabled {
true => bevy_window::SystemCursorIcon::NotAllowed,
false => bevy_window::SystemCursorIcon::Pointer,
};
// Change background color
if bg_color.0 != bg_token {
commands
.entity(button_ent)
.insert(ThemeBackgroundColor(bg_token));
}
// Change font color
if font_color.0 != font_color_token {
commands
.entity(button_ent)
.insert(ThemeFontColor(font_color_token));
}
// Change cursor shape
commands
.entity(button_ent)
.insert(EntityCursor::System(cursor_shape));
}
/// Plugin which registers the systems for updating the button styles.
pub struct ButtonPlugin;
impl Plugin for ButtonPlugin {
fn build(&self, app: &mut bevy_app::App) {
app.add_systems(
PreUpdate,
(update_button_styles, update_button_styles_remove).in_set(PickingSystems::Last),
);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/controls/virtual_keyboard.rs | crates/bevy_feathers/src/controls/virtual_keyboard.rs | use bevy_ecs::prelude::*;
use bevy_input_focus::tab_navigation::TabGroup;
use bevy_ui::Node;
use bevy_ui::Val;
use bevy_ui::{widget::Text, FlexDirection};
use bevy_ui_widgets::{observe, Activate};
use crate::controls::{button, ButtonProps};
/// Fired whenever a virtual key is pressed.
#[derive(EntityEvent)]
pub struct VirtualKeyPressed<T> {
/// The virtual keyboard entity
pub entity: Entity,
/// The pressed virtual key
pub key: T,
}
/// Function to spawn a virtual keyboard
///
/// # Emitted events
/// * [`crate::controls::VirtualKeyPressed<T>`] when a virtual key on the keyboard is un-pressed.
///
/// These events can be disabled by adding an [`bevy_ui::InteractionDisabled`] component to the entity
pub fn virtual_keyboard<T>(
keys: impl Iterator<Item = Vec<T>> + Send + Sync + 'static,
) -> impl Bundle
where
T: AsRef<str> + Clone + Send + Sync + 'static,
{
(
Node {
flex_direction: FlexDirection::Column,
row_gap: Val::Px(4.),
..Default::default()
},
TabGroup::new(0),
Children::spawn(SpawnIter(keys.map(move |row| {
(
Node {
flex_direction: FlexDirection::Row,
column_gap: Val::Px(4.),
..Default::default()
},
Children::spawn(SpawnIter(row.into_iter().map(move |key| {
(
button(ButtonProps::default(), (), Spawn(Text::new(key.as_ref()))),
observe(
move |activate: On<Activate>,
mut commands: Commands,
query: Query<&ChildOf>|
-> Result {
let virtual_keyboard =
query.get(query.get(activate.entity)?.parent())?.parent();
commands.trigger(VirtualKeyPressed {
entity: virtual_keyboard,
key: key.clone(),
});
Ok(())
},
),
)
}))),
)
}))),
)
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/controls/toggle_switch.rs | crates/bevy_feathers/src/controls/toggle_switch.rs | use accesskit::Role;
use bevy_a11y::AccessibilityNode;
use bevy_app::{Plugin, PreUpdate};
use bevy_ecs::{
bundle::Bundle,
children,
component::Component,
entity::Entity,
hierarchy::Children,
lifecycle::RemovedComponents,
query::{Added, Changed, Has, Or, With},
reflect::ReflectComponent,
schedule::IntoScheduleConfigs,
spawn::SpawnRelated,
system::{Commands, Query},
world::Mut,
};
use bevy_input_focus::tab_navigation::TabIndex;
use bevy_picking::{hover::Hovered, PickingSystems};
use bevy_reflect::{prelude::ReflectDefault, Reflect};
use bevy_ui::{BorderRadius, Checked, InteractionDisabled, Node, PositionType, UiRect, Val};
use bevy_ui_widgets::Checkbox;
use crate::{
constants::size,
cursor::EntityCursor,
theme::{ThemeBackgroundColor, ThemeBorderColor},
tokens,
};
/// Marker for the toggle switch outline
#[derive(Component, Default, Clone, Reflect)]
#[reflect(Component, Clone, Default)]
struct ToggleSwitchOutline;
/// Marker for the toggle switch slide
#[derive(Component, Default, Clone, Reflect)]
#[reflect(Component, Clone, Default)]
struct ToggleSwitchSlide;
/// Template function to spawn a toggle switch.
///
/// # Arguments
/// * `props` - construction properties for the toggle switch.
/// * `overrides` - a bundle of components that are merged in with the normal toggle switch components.
///
/// # Emitted events
/// * [`bevy_ui_widgets::ValueChange<bool>`] with the new value when the toggle switch changes state.
///
/// These events can be disabled by adding an [`bevy_ui::InteractionDisabled`] component to the bundle
pub fn toggle_switch<B: Bundle>(overrides: B) -> impl Bundle {
(
Node {
width: size::TOGGLE_WIDTH,
height: size::TOGGLE_HEIGHT,
border: UiRect::all(Val::Px(2.0)),
border_radius: BorderRadius::all(Val::Px(5.0)),
..Default::default()
},
Checkbox,
ToggleSwitchOutline,
ThemeBackgroundColor(tokens::SWITCH_BG),
ThemeBorderColor(tokens::SWITCH_BORDER),
AccessibilityNode(accesskit::Node::new(Role::Switch)),
Hovered::default(),
EntityCursor::System(bevy_window::SystemCursorIcon::Pointer),
TabIndex(0),
overrides,
children![(
Node {
position_type: PositionType::Absolute,
left: Val::Percent(0.),
top: Val::Px(0.),
bottom: Val::Px(0.),
width: Val::Percent(50.),
border_radius: BorderRadius::all(Val::Px(3.0)),
..Default::default()
},
ToggleSwitchSlide,
ThemeBackgroundColor(tokens::SWITCH_SLIDE),
)],
)
}
fn update_switch_styles(
q_switches: Query<
(
Entity,
Has<InteractionDisabled>,
Has<Checked>,
&Hovered,
&ThemeBackgroundColor,
&ThemeBorderColor,
),
(
With<ToggleSwitchOutline>,
Or<(Changed<Hovered>, Added<Checked>, Added<InteractionDisabled>)>,
),
>,
q_children: Query<&Children>,
mut q_slide: Query<(&mut Node, &ThemeBackgroundColor), With<ToggleSwitchSlide>>,
mut commands: Commands,
) {
for (switch_ent, disabled, checked, hovered, outline_bg, outline_border) in q_switches.iter() {
let Some(slide_ent) = q_children
.iter_descendants(switch_ent)
.find(|en| q_slide.contains(*en))
else {
continue;
};
// Safety: since we just checked the query, should always work.
let (ref mut slide_style, slide_color) = q_slide.get_mut(slide_ent).unwrap();
set_switch_styles(
switch_ent,
slide_ent,
disabled,
checked,
hovered.0,
outline_bg,
outline_border,
slide_style,
slide_color,
&mut commands,
);
}
}
fn update_switch_styles_remove(
q_switches: Query<
(
Entity,
Has<InteractionDisabled>,
Has<Checked>,
&Hovered,
&ThemeBackgroundColor,
&ThemeBorderColor,
),
With<ToggleSwitchOutline>,
>,
q_children: Query<&Children>,
mut q_slide: Query<(&mut Node, &ThemeBackgroundColor), With<ToggleSwitchSlide>>,
mut removed_disabled: RemovedComponents<InteractionDisabled>,
mut removed_checked: RemovedComponents<Checked>,
mut commands: Commands,
) {
removed_disabled
.read()
.chain(removed_checked.read())
.for_each(|ent| {
if let Ok((switch_ent, disabled, checked, hovered, outline_bg, outline_border)) =
q_switches.get(ent)
{
let Some(slide_ent) = q_children
.iter_descendants(switch_ent)
.find(|en| q_slide.contains(*en))
else {
return;
};
// Safety: since we just checked the query, should always work.
let (ref mut slide_style, slide_color) = q_slide.get_mut(slide_ent).unwrap();
set_switch_styles(
switch_ent,
slide_ent,
disabled,
checked,
hovered.0,
outline_bg,
outline_border,
slide_style,
slide_color,
&mut commands,
);
}
});
}
fn set_switch_styles(
switch_ent: Entity,
slide_ent: Entity,
disabled: bool,
checked: bool,
hovered: bool,
outline_bg: &ThemeBackgroundColor,
outline_border: &ThemeBorderColor,
slide_style: &mut Mut<Node>,
slide_color: &ThemeBackgroundColor,
commands: &mut Commands,
) {
let outline_border_token = match (disabled, hovered) {
(true, _) => tokens::SWITCH_BORDER_DISABLED,
(false, true) => tokens::SWITCH_BORDER_HOVER,
_ => tokens::SWITCH_BORDER,
};
let outline_bg_token = match (disabled, checked) {
(true, true) => tokens::SWITCH_BG_CHECKED_DISABLED,
(true, false) => tokens::SWITCH_BG_DISABLED,
(false, true) => tokens::SWITCH_BG_CHECKED,
(false, false) => tokens::SWITCH_BG,
};
let slide_token = match disabled {
true => tokens::SWITCH_SLIDE_DISABLED,
false => tokens::SWITCH_SLIDE,
};
let slide_pos = match checked {
true => Val::Percent(50.),
false => Val::Percent(0.),
};
let cursor_shape = match disabled {
true => bevy_window::SystemCursorIcon::NotAllowed,
false => bevy_window::SystemCursorIcon::Pointer,
};
// Change outline background
if outline_bg.0 != outline_bg_token {
commands
.entity(switch_ent)
.insert(ThemeBackgroundColor(outline_bg_token));
}
// Change outline border
if outline_border.0 != outline_border_token {
commands
.entity(switch_ent)
.insert(ThemeBorderColor(outline_border_token));
}
// Change slide color
if slide_color.0 != slide_token {
commands
.entity(slide_ent)
.insert(ThemeBackgroundColor(slide_token));
}
// Change slide position
if slide_pos != slide_style.left {
slide_style.left = slide_pos;
}
// Change cursor shape
commands
.entity(switch_ent)
.insert(EntityCursor::System(cursor_shape));
}
/// Plugin which registers the systems for updating the toggle switch styles.
pub struct ToggleSwitchPlugin;
impl Plugin for ToggleSwitchPlugin {
fn build(&self, app: &mut bevy_app::App) {
app.add_systems(
PreUpdate,
(update_switch_styles, update_switch_styles_remove).in_set(PickingSystems::Last),
);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/controls/color_swatch.rs | crates/bevy_feathers/src/controls/color_swatch.rs | use bevy_app::{Plugin, PostUpdate};
use bevy_asset::Handle;
use bevy_color::{Alpha, Color};
use bevy_ecs::{
bundle::Bundle,
children,
component::Component,
hierarchy::Children,
query::Changed,
reflect::ReflectComponent,
spawn::SpawnRelated,
system::{Commands, Query},
};
use bevy_reflect::{prelude::ReflectDefault, Reflect};
use bevy_ui::{BackgroundColor, BorderRadius, Node, PositionType, Val};
use bevy_ui_render::ui_material::MaterialNode;
use crate::{
alpha_pattern::{AlphaPattern, AlphaPatternMaterial},
constants::size,
palette,
};
/// Marker identifying a color swatch.
#[derive(Component, Default, Clone, Reflect)]
#[reflect(Component, Clone, Default)]
pub struct ColorSwatch;
/// Component that contains the value of the color swatch. This is copied to the child element
/// background.
#[derive(Component, Default, Clone, Reflect)]
#[reflect(Component, Clone, Default)]
pub struct ColorSwatchValue(pub Color);
/// Marker identifying the color swatch foreground, the piece that actually displays the color
/// in front of the alpha pattern. This exists so that users can reach in and change the color
/// dynamically.
#[derive(Component, Default, Clone, Reflect)]
#[reflect(Component, Clone, Default)]
pub struct ColorSwatchFg;
/// Template function to spawn a color swatch.
///
/// # Arguments
/// * `overrides` - a bundle of components that are merged in with the normal swatch components.
pub fn color_swatch<B: Bundle>(overrides: B) -> impl Bundle {
(
Node {
height: size::ROW_HEIGHT,
min_width: size::ROW_HEIGHT,
border_radius: BorderRadius::all(Val::Px(5.0)),
..Default::default()
},
ColorSwatch,
ColorSwatchValue::default(),
AlphaPattern,
MaterialNode::<AlphaPatternMaterial>(Handle::default()),
overrides,
children![(
Node {
position_type: PositionType::Absolute,
left: Val::Px(0.),
top: Val::Px(0.),
bottom: Val::Px(0.),
right: Val::Px(0.),
border_radius: BorderRadius::all(Val::Px(5.0)),
..Default::default()
},
ColorSwatchFg,
BackgroundColor(palette::ACCENT.with_alpha(0.5)),
)],
)
}
fn update_swatch_color(
q_swatch: Query<(&ColorSwatchValue, &Children), Changed<ColorSwatchValue>>,
mut commands: Commands,
) {
for (value, children) in q_swatch.iter() {
if let Some(first_child) = children.first() {
commands
.entity(*first_child)
.insert(BackgroundColor(value.0));
}
}
}
/// Plugin which registers the observers for updating the swatch color.
pub struct ColorSwatchPlugin;
impl Plugin for ColorSwatchPlugin {
fn build(&self, app: &mut bevy_app::App) {
app.add_systems(PostUpdate, update_swatch_color);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/controls/color_slider.rs | crates/bevy_feathers/src/controls/color_slider.rs | use core::f32::consts::PI;
use bevy_app::{Plugin, PreUpdate};
use bevy_asset::Handle;
use bevy_color::{Alpha, Color, Hsla};
use bevy_ecs::{
bundle::Bundle,
children,
component::Component,
entity::Entity,
hierarchy::Children,
query::{Changed, Or, With},
schedule::IntoScheduleConfigs,
spawn::SpawnRelated,
system::Query,
};
use bevy_input_focus::tab_navigation::TabIndex;
use bevy_log::warn_once;
use bevy_picking::PickingSystems;
use bevy_ui::{
AlignItems, BackgroundColor, BackgroundGradient, BorderColor, BorderRadius, ColorStop, Display,
FlexDirection, Gradient, InterpolationColorSpace, LinearGradient, Node, Outline, PositionType,
UiRect, UiTransform, Val, Val2, ZIndex,
};
use bevy_ui_render::ui_material::MaterialNode;
use bevy_ui_widgets::{Slider, SliderRange, SliderThumb, SliderValue, TrackClick};
use crate::{
alpha_pattern::{AlphaPattern, AlphaPatternMaterial},
cursor::EntityCursor,
palette,
rounded_corners::RoundedCorners,
};
const SLIDER_HEIGHT: f32 = 16.0;
const TRACK_PADDING: f32 = 3.0;
const TRACK_RADIUS: f32 = SLIDER_HEIGHT * 0.5 - TRACK_PADDING;
const THUMB_SIZE: f32 = SLIDER_HEIGHT - 2.0;
/// Indicates which color channel we want to edit.
#[derive(Component, Default, Clone)]
pub enum ColorChannel {
/// Editing the RGB red channel (0..=1)
#[default]
Red,
/// Editing the RGB green channel (0..=1)
Green,
/// Editing the RGB blue channel (0..=1)
Blue,
/// Editing the hue channel (0..=360)
HslHue,
/// Editing the chroma / saturation channel (0..=1)
HslSaturation,
/// Editing the luminance channel (0..=1)
HslLightness,
/// Editing the alpha channel (0..=1)
Alpha,
}
impl ColorChannel {
/// Return the range of this color channel.
pub fn range(&self) -> SliderRange {
match self {
ColorChannel::Red
| ColorChannel::Green
| ColorChannel::Blue
| ColorChannel::Alpha
| ColorChannel::HslSaturation
| ColorChannel::HslLightness => SliderRange::new(0., 1.),
ColorChannel::HslHue => SliderRange::new(0., 360.),
}
}
/// Return the color endpoints and midpoint of the gradient. This is determined by both the
/// channel being edited and the base color.
pub fn gradient_ends(&self, base_color: Color) -> (Color, Color, Color) {
match self {
ColorChannel::Red => {
let base_rgb = base_color.to_srgba();
(
Color::srgb(0.0, base_rgb.green, base_rgb.blue),
Color::srgb(0.5, base_rgb.green, base_rgb.blue),
Color::srgb(1.0, base_rgb.green, base_rgb.blue),
)
}
ColorChannel::Green => {
let base_rgb = base_color.to_srgba();
(
Color::srgb(base_rgb.red, 0.0, base_rgb.blue),
Color::srgb(base_rgb.red, 0.5, base_rgb.blue),
Color::srgb(base_rgb.red, 1.0, base_rgb.blue),
)
}
ColorChannel::Blue => {
let base_rgb = base_color.to_srgba();
(
Color::srgb(base_rgb.red, base_rgb.green, 0.0),
Color::srgb(base_rgb.red, base_rgb.green, 0.5),
Color::srgb(base_rgb.red, base_rgb.green, 1.0),
)
}
ColorChannel::HslHue => (
Color::hsl(0.0 + 0.0001, 1.0, 0.5),
Color::hsl(180.0, 1.0, 0.5),
Color::hsl(360.0 - 0.0001, 1.0, 0.5),
),
ColorChannel::HslSaturation => {
let base_hsla: Hsla = base_color.into();
(
Color::hsl(base_hsla.hue, 0.0, base_hsla.lightness),
Color::hsl(base_hsla.hue, 0.5, base_hsla.lightness),
Color::hsl(base_hsla.hue, 1.0, base_hsla.lightness),
)
}
ColorChannel::HslLightness => {
let base_hsla: Hsla = base_color.into();
(
Color::hsl(base_hsla.hue, base_hsla.saturation, 0.0),
Color::hsl(base_hsla.hue, base_hsla.saturation, 0.5),
Color::hsl(base_hsla.hue, base_hsla.saturation, 1.0),
)
}
ColorChannel::Alpha => (
base_color.with_alpha(0.),
base_color.with_alpha(0.5),
base_color.with_alpha(1.),
),
}
}
}
/// Used to store the color channels that we are not editing: the components of the color
/// that are constant for this slider.
#[derive(Component, Default, Clone)]
pub struct SliderBaseColor(pub Color);
/// Color slider template properties, passed to [`color_slider`] function.
pub struct ColorSliderProps {
/// Slider current value
pub value: f32,
/// Which color component we're editing
pub channel: ColorChannel,
}
impl Default for ColorSliderProps {
fn default() -> Self {
Self {
value: 0.0,
channel: ColorChannel::Alpha,
}
}
}
/// A color slider widget.
#[derive(Component, Default, Clone)]
#[require(Slider, SliderBaseColor(Color::WHITE))]
pub struct ColorSlider {
/// Which channel is being edited by this slider.
pub channel: ColorChannel,
}
/// Marker for the track
#[derive(Component, Default, Clone)]
struct ColorSliderTrack;
/// Marker for the thumb
#[derive(Component, Default, Clone)]
struct ColorSliderThumb;
/// Spawn a new slider widget.
///
/// # Arguments
///
/// * `props` - construction properties for the slider.
/// * `overrides` - a bundle of components that are merged in with the normal slider components.
///
/// # Emitted events
///
/// * [`bevy_ui_widgets::ValueChange<f32>`] when the slider value is changed.
///
/// These events can be disabled by adding an [`bevy_ui::InteractionDisabled`] component to the entity
pub fn color_slider<B: Bundle>(props: ColorSliderProps, overrides: B) -> impl Bundle {
(
Node {
display: Display::Flex,
flex_direction: FlexDirection::Row,
height: Val::Px(SLIDER_HEIGHT),
align_items: AlignItems::Stretch,
flex_grow: 1.0,
..Default::default()
},
Slider {
track_click: TrackClick::Snap,
},
ColorSlider {
channel: props.channel.clone(),
},
SliderValue(props.value),
props.channel.range(),
EntityCursor::System(bevy_window::SystemCursorIcon::Pointer),
TabIndex(0),
overrides,
children![
// track
(
Node {
position_type: PositionType::Absolute,
left: Val::Px(0.),
right: Val::Px(0.),
top: Val::Px(TRACK_PADDING),
bottom: Val::Px(TRACK_PADDING),
border_radius: RoundedCorners::All.to_border_radius(TRACK_RADIUS),
..Default::default()
},
ColorSliderTrack,
AlphaPattern,
MaterialNode::<AlphaPatternMaterial>(Handle::default()),
children![
// Left endcap
(
Node {
width: Val::Px(THUMB_SIZE * 0.5),
border_radius: RoundedCorners::Left.to_border_radius(TRACK_RADIUS),
..Default::default()
},
BackgroundColor(palette::X_AXIS),
),
// Track with gradient
(
Node {
flex_grow: 1.0,
..Default::default()
},
BackgroundGradient(vec![Gradient::Linear(LinearGradient {
angle: PI * 0.5,
stops: vec![
ColorStop::new(Color::NONE, Val::Percent(0.)),
ColorStop::new(Color::NONE, Val::Percent(50.)),
ColorStop::new(Color::NONE, Val::Percent(100.)),
],
color_space: InterpolationColorSpace::Srgba,
})]),
ZIndex(1),
children![(
Node {
position_type: PositionType::Absolute,
left: Val::Percent(0.),
top: Val::Percent(50.),
width: Val::Px(THUMB_SIZE),
height: Val::Px(THUMB_SIZE),
border: UiRect::all(Val::Px(2.0)),
border_radius: BorderRadius::MAX,
..Default::default()
},
SliderThumb,
ColorSliderThumb,
BorderColor::all(palette::WHITE),
Outline {
width: Val::Px(1.),
offset: Val::Px(0.),
color: palette::BLACK
},
UiTransform::from_translation(Val2::new(
Val::Percent(-50.0),
Val::Percent(-50.0),
))
)]
),
// Right endcap
(
Node {
width: Val::Px(THUMB_SIZE * 0.5),
border_radius: RoundedCorners::Right.to_border_radius(TRACK_RADIUS),
..Default::default()
},
BackgroundColor(palette::Z_AXIS),
),
]
),
],
)
}
fn update_slider_pos(
mut q_sliders: Query<
(Entity, &SliderValue, &SliderRange),
(
With<ColorSlider>,
Or<(Changed<SliderValue>, Changed<SliderRange>)>,
),
>,
q_children: Query<&Children>,
mut q_slider_thumb: Query<&mut Node, With<ColorSliderThumb>>,
) {
for (slider_ent, value, range) in q_sliders.iter_mut() {
for child in q_children.iter_descendants(slider_ent) {
if let Ok(mut thumb_node) = q_slider_thumb.get_mut(child) {
thumb_node.left = Val::Percent(range.thumb_position(value.0) * 100.0);
}
}
}
}
fn update_track_color(
mut q_sliders: Query<(Entity, &ColorSlider, &SliderBaseColor), Changed<SliderBaseColor>>,
q_children: Query<&Children>,
q_track: Query<(), With<ColorSliderTrack>>,
mut q_background: Query<&mut BackgroundColor>,
mut q_gradient: Query<&mut BackgroundGradient>,
) {
for (slider_ent, slider, SliderBaseColor(base_color)) in q_sliders.iter_mut() {
let (start, middle, end) = slider.channel.gradient_ends(*base_color);
if let Some(track_ent) = q_children
.iter_descendants(slider_ent)
.find(|ent| q_track.contains(*ent))
{
let Ok(track_children) = q_children.get(track_ent) else {
continue;
};
if let Ok(mut cap_bg) = q_background.get_mut(track_children[0]) {
cap_bg.0 = start;
}
if let Ok(mut gradient) = q_gradient.get_mut(track_children[1])
&& let [Gradient::Linear(linear_gradient)] = &mut gradient.0[..]
{
linear_gradient.stops[0].color = start;
linear_gradient.stops[1].color = middle;
linear_gradient.stops[2].color = end;
linear_gradient.color_space = match slider.channel {
ColorChannel::Red | ColorChannel::Green | ColorChannel::Blue => {
InterpolationColorSpace::Srgba
}
ColorChannel::HslHue
| ColorChannel::HslLightness
| ColorChannel::HslSaturation => InterpolationColorSpace::Hsla,
ColorChannel::Alpha => match base_color {
Color::Srgba(_) => InterpolationColorSpace::Srgba,
Color::LinearRgba(_) => InterpolationColorSpace::LinearRgba,
Color::Oklaba(_) => InterpolationColorSpace::Oklaba,
Color::Oklcha(_) => InterpolationColorSpace::OklchaLong,
Color::Hsla(_) | Color::Hsva(_) => InterpolationColorSpace::Hsla,
_ => {
warn_once!("Unsupported color space for ColorSlider: {:?}", base_color);
InterpolationColorSpace::Srgba
}
},
};
}
if let Ok(mut cap_bg) = q_background.get_mut(track_children[2]) {
cap_bg.0 = end;
}
}
}
}
/// Plugin which registers the systems for updating the slider styles.
pub struct ColorSliderPlugin;
impl Plugin for ColorSliderPlugin {
fn build(&self, app: &mut bevy_app::App) {
app.add_systems(
PreUpdate,
(update_slider_pos, update_track_color).in_set(PickingSystems::Last),
);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/controls/slider.rs | crates/bevy_feathers/src/controls/slider.rs | use core::f32::consts::PI;
use bevy_app::{Plugin, PreUpdate};
use bevy_color::Color;
use bevy_ecs::{
bundle::Bundle,
children,
component::Component,
entity::Entity,
hierarchy::Children,
lifecycle::RemovedComponents,
query::{Added, Changed, Has, Or, Spawned, With},
reflect::ReflectComponent,
schedule::IntoScheduleConfigs,
spawn::SpawnRelated,
system::{Commands, Query, Res},
};
use bevy_input_focus::tab_navigation::TabIndex;
use bevy_picking::PickingSystems;
use bevy_reflect::{prelude::ReflectDefault, Reflect};
use bevy_ui::{
widget::Text, AlignItems, BackgroundGradient, ColorStop, Display, FlexDirection, Gradient,
InteractionDisabled, InterpolationColorSpace, JustifyContent, LinearGradient, Node,
PositionType, UiRect, Val,
};
use bevy_ui_widgets::{Slider, SliderPrecision, SliderRange, SliderValue, TrackClick};
use crate::{
constants::{fonts, size},
cursor::EntityCursor,
font_styles::InheritableFont,
handle_or_path::HandleOrPath,
rounded_corners::RoundedCorners,
theme::{ThemeFontColor, ThemedText, UiTheme},
tokens,
};
/// Slider template properties, passed to [`slider`] function.
pub struct SliderProps {
/// Slider current value
pub value: f32,
/// Slider minimum value
pub min: f32,
/// Slider maximum value
pub max: f32,
}
impl Default for SliderProps {
fn default() -> Self {
Self {
value: 0.0,
min: 0.0,
max: 1.0,
}
}
}
#[derive(Component, Default, Clone)]
#[require(Slider)]
#[derive(Reflect)]
#[reflect(Component, Clone, Default)]
struct SliderStyle;
/// Marker for the text
#[derive(Component, Default, Clone, Reflect)]
#[reflect(Component, Clone, Default)]
struct SliderValueText;
/// Spawn a new slider widget.
///
/// # Arguments
///
/// * `props` - construction properties for the slider.
/// * `overrides` - a bundle of components that are merged in with the normal slider components.
///
/// # Emitted events
///
/// * [`bevy_ui_widgets::ValueChange<f32>`] when the slider value is changed.
///
/// These events can be disabled by adding an [`bevy_ui::InteractionDisabled`] component to the entity
pub fn slider<B: Bundle>(props: SliderProps, overrides: B) -> impl Bundle {
(
Node {
height: size::ROW_HEIGHT,
justify_content: JustifyContent::Center,
align_items: AlignItems::Center,
padding: UiRect::axes(Val::Px(8.0), Val::Px(0.)),
flex_grow: 1.0,
border_radius: RoundedCorners::All.to_border_radius(6.0),
..Default::default()
},
Slider {
track_click: TrackClick::Drag,
},
SliderStyle,
SliderValue(props.value),
SliderRange::new(props.min, props.max),
EntityCursor::System(bevy_window::SystemCursorIcon::EwResize),
TabIndex(0),
// Use a gradient to draw the moving bar
BackgroundGradient(vec![Gradient::Linear(LinearGradient {
angle: PI * 0.5,
stops: vec![
ColorStop::new(Color::NONE, Val::Percent(0.)),
ColorStop::new(Color::NONE, Val::Percent(50.)),
ColorStop::new(Color::NONE, Val::Percent(50.)),
ColorStop::new(Color::NONE, Val::Percent(100.)),
],
color_space: InterpolationColorSpace::Srgba,
})]),
overrides,
children![(
// Text container
Node {
display: Display::Flex,
position_type: PositionType::Absolute,
flex_direction: FlexDirection::Row,
align_items: AlignItems::Center,
justify_content: JustifyContent::Center,
..Default::default()
},
ThemeFontColor(tokens::SLIDER_TEXT),
InheritableFont {
font: HandleOrPath::Path(fonts::MONO.to_owned()),
font_size: 12.0,
},
children![(Text::new("10.0"), ThemedText, SliderValueText,)],
)],
)
}
fn update_slider_styles(
mut q_sliders: Query<
(Entity, Has<InteractionDisabled>, &mut BackgroundGradient),
(With<SliderStyle>, Or<(Spawned, Added<InteractionDisabled>)>),
>,
theme: Res<UiTheme>,
mut commands: Commands,
) {
for (slider_ent, disabled, mut gradient) in q_sliders.iter_mut() {
set_slider_styles(
slider_ent,
&theme,
disabled,
gradient.as_mut(),
&mut commands,
);
}
}
fn update_slider_styles_remove(
mut q_sliders: Query<(Entity, Has<InteractionDisabled>, &mut BackgroundGradient)>,
mut removed_disabled: RemovedComponents<InteractionDisabled>,
theme: Res<UiTheme>,
mut commands: Commands,
) {
removed_disabled.read().for_each(|ent| {
if let Ok((slider_ent, disabled, mut gradient)) = q_sliders.get_mut(ent) {
set_slider_styles(
slider_ent,
&theme,
disabled,
gradient.as_mut(),
&mut commands,
);
}
});
}
fn set_slider_styles(
slider_ent: Entity,
theme: &Res<'_, UiTheme>,
disabled: bool,
gradient: &mut BackgroundGradient,
commands: &mut Commands,
) {
let bar_color = theme.color(&match disabled {
true => tokens::SLIDER_BAR_DISABLED,
false => tokens::SLIDER_BAR,
});
let bg_color = theme.color(&tokens::SLIDER_BG);
let cursor_shape = match disabled {
true => bevy_window::SystemCursorIcon::NotAllowed,
false => bevy_window::SystemCursorIcon::EwResize,
};
if let [Gradient::Linear(linear_gradient)] = &mut gradient.0[..] {
linear_gradient.stops[0].color = bar_color;
linear_gradient.stops[1].color = bar_color;
linear_gradient.stops[2].color = bg_color;
linear_gradient.stops[3].color = bg_color;
}
// Change cursor shape
commands
.entity(slider_ent)
.insert(EntityCursor::System(cursor_shape));
}
fn update_slider_pos(
mut q_sliders: Query<
(
Entity,
&SliderValue,
&SliderRange,
&SliderPrecision,
&mut BackgroundGradient,
),
(
With<SliderStyle>,
Or<(
Changed<SliderValue>,
Changed<SliderRange>,
Changed<Children>,
)>,
),
>,
q_children: Query<&Children>,
mut q_slider_text: Query<&mut Text, With<SliderValueText>>,
) {
for (slider_ent, value, range, precision, mut gradient) in q_sliders.iter_mut() {
if let [Gradient::Linear(linear_gradient)] = &mut gradient.0[..] {
let percent_value = (range.thumb_position(value.0) * 100.0).clamp(0.0, 100.0);
linear_gradient.stops[1].point = Val::Percent(percent_value);
linear_gradient.stops[2].point = Val::Percent(percent_value);
}
// Find slider text child entity and update its text with the formatted value
q_children.iter_descendants(slider_ent).for_each(|child| {
if let Ok(mut text) = q_slider_text.get_mut(child) {
let label = format!("{}", value.0);
let decimals_len = label
.split_once('.')
.map(|(_, decimals)| decimals.len() as i32)
.unwrap_or(precision.0);
// Don't format with precision if the value has more decimals than the precision
text.0 = if precision.0 >= 0 && decimals_len <= precision.0 {
format!("{:.precision$}", value.0, precision = precision.0 as usize)
} else {
label
};
}
});
}
}
/// Plugin which registers the systems for updating the slider styles.
pub struct SliderPlugin;
impl Plugin for SliderPlugin {
fn build(&self, app: &mut bevy_app::App) {
app.add_systems(
PreUpdate,
(
update_slider_styles,
update_slider_styles_remove,
update_slider_pos,
)
.in_set(PickingSystems::Last),
);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/controls/color_plane.rs | crates/bevy_feathers/src/controls/color_plane.rs | use bevy_app::{Plugin, PostUpdate};
use bevy_asset::{Asset, Assets};
use bevy_ecs::{
bundle::Bundle,
children,
component::Component,
entity::Entity,
hierarchy::{ChildOf, Children},
observer::On,
query::{Changed, Has, Or, With},
reflect::ReflectComponent,
spawn::SpawnRelated,
system::{Commands, Query, Res, ResMut},
};
use bevy_math::{Vec2, Vec3};
use bevy_picking::{
events::{Cancel, Drag, DragEnd, DragStart, Pointer, Press},
Pickable,
};
use bevy_reflect::{prelude::ReflectDefault, Reflect, TypePath};
use bevy_render::render_resource::AsBindGroup;
use bevy_shader::{ShaderDefVal, ShaderRef};
use bevy_ui::{
px, AlignSelf, BorderColor, BorderRadius, ComputedNode, ComputedUiRenderTargetInfo, Display,
InteractionDisabled, Node, Outline, PositionType, UiGlobalTransform, UiRect, UiScale,
UiTransform, Val, Val2,
};
use bevy_ui_render::{prelude::UiMaterial, ui_material::MaterialNode, UiMaterialPlugin};
use bevy_ui_widgets::ValueChange;
use crate::{cursor::EntityCursor, palette, theme::ThemeBackgroundColor, tokens};
/// Marker identifying a color plane widget.
///
/// The variant selects which view of the color pane is shown.
#[derive(Component, Default, Debug, Clone, Reflect, Copy, PartialEq, Eq, Hash)]
#[reflect(Component, Clone, Default)]
#[require(ColorPlaneDragState)]
pub enum ColorPlane {
/// Show red on the horizontal axis and green on the vertical.
RedGreen,
/// Show red on the horizontal axis and blue on the vertical.
RedBlue,
/// Show green on the horizontal axis and blue on the vertical.
GreenBlue,
/// Show hue on the horizontal axis and saturation on the vertical.
HueSaturation,
/// Show hue on the horizontal axis and lightness on the vertical.
#[default]
HueLightness,
}
/// Component that contains the two components of the selected color, as well as the "z" value.
/// The x and y values determine the placement of the thumb element, while the z value controls
/// the background gradient.
#[derive(Component, Default, Clone, Reflect)]
#[reflect(Component, Clone, Default)]
pub struct ColorPlaneValue(pub Vec3);
/// Marker identifying the inner element of the color plane.
#[derive(Component, Default, Clone, Reflect)]
#[reflect(Component, Clone, Default)]
struct ColorPlaneInner;
/// Marker identifying the thumb element of the color plane.
#[derive(Component, Default, Clone, Reflect)]
#[reflect(Component, Clone, Default)]
struct ColorPlaneThumb;
/// Component used to manage the state of a slider during dragging.
#[derive(Component, Default, Reflect)]
#[reflect(Component)]
struct ColorPlaneDragState(bool);
#[repr(C)]
#[derive(Eq, PartialEq, Hash, Copy, Clone)]
struct ColorPlaneMaterialKey {
plane: ColorPlane,
}
#[derive(AsBindGroup, Asset, TypePath, Default, Debug, Clone)]
#[bind_group_data(ColorPlaneMaterialKey)]
struct ColorPlaneMaterial {
plane: ColorPlane,
#[uniform(0)]
fixed_channel: f32,
}
impl From<&ColorPlaneMaterial> for ColorPlaneMaterialKey {
fn from(material: &ColorPlaneMaterial) -> Self {
Self {
plane: material.plane,
}
}
}
impl UiMaterial for ColorPlaneMaterial {
fn fragment_shader() -> ShaderRef {
"embedded://bevy_feathers/assets/shaders/color_plane.wgsl".into()
}
fn specialize(
descriptor: &mut bevy_render::render_resource::RenderPipelineDescriptor,
key: bevy_ui_render::prelude::UiMaterialKey<Self>,
) {
let plane_def = match key.bind_group_data.plane {
ColorPlane::RedGreen => "PLANE_RG",
ColorPlane::RedBlue => "PLANE_RB",
ColorPlane::GreenBlue => "PLANE_GB",
ColorPlane::HueSaturation => "PLANE_HS",
ColorPlane::HueLightness => "PLANE_HL",
};
descriptor.fragment.as_mut().unwrap().shader_defs =
vec![ShaderDefVal::Bool(plane_def.into(), true)];
}
}
/// Template function to spawn a "color plane", which is a 2d picker that allows selecting two
/// components of a color space.
///
/// The control emits a [`ValueChange<Vec2>`] representing the current x and y values, ranging
/// from 0 to 1. The control accepts a [`Vec3`] input value, where the third component ('z')
/// is used to provide the fixed constant channel for the background gradient.
///
/// The control does not do any color space conversions internally, other than the shader code
/// for displaying gradients. Avoiding excess conversions helps avoid gimble-lock problems when
/// implementing a color picker for cylindrical color spaces such as HSL.
///
/// # Arguments
/// * `overrides` - a bundle of components that are merged in with the normal swatch components.
pub fn color_plane<B: Bundle>(plane: ColorPlane, overrides: B) -> impl Bundle {
(
Node {
display: Display::Flex,
min_height: px(100.0),
align_self: AlignSelf::Stretch,
padding: UiRect::all(px(4)),
border_radius: BorderRadius::all(px(5)),
..Default::default()
},
plane,
ColorPlaneValue::default(),
ThemeBackgroundColor(tokens::COLOR_PLANE_BG),
EntityCursor::System(bevy_window::SystemCursorIcon::Crosshair),
overrides,
children![(
Node {
align_self: AlignSelf::Stretch,
flex_grow: 1.0,
..Default::default()
},
ColorPlaneInner,
children![(
Node {
position_type: PositionType::Absolute,
left: Val::Percent(0.),
top: Val::Percent(0.),
width: px(10),
height: px(10),
border: UiRect::all(Val::Px(1.0)),
border_radius: BorderRadius::MAX,
..Default::default()
},
ColorPlaneThumb,
BorderColor::all(palette::WHITE),
Outline {
width: Val::Px(1.),
offset: Val::Px(0.),
color: palette::BLACK
},
Pickable::IGNORE,
UiTransform::from_translation(Val2::new(Val::Percent(-50.0), Val::Percent(-50.0),))
)],
),],
)
}
fn update_plane_color(
q_color_plane: Query<
(Entity, &ColorPlane, &ColorPlaneValue),
Or<(Changed<ColorPlane>, Changed<ColorPlaneValue>)>,
>,
q_children: Query<&Children>,
q_material_node: Query<&MaterialNode<ColorPlaneMaterial>>,
mut q_node: Query<&mut Node>,
mut r_materials: ResMut<Assets<ColorPlaneMaterial>>,
mut commands: Commands,
) {
for (plane_ent, plane, plane_value) in q_color_plane.iter() {
// Find the inner entity
let Ok(children) = q_children.get(plane_ent) else {
continue;
};
let Some(inner_ent) = children.first() else {
continue;
};
if let Ok(material_node) = q_material_node.get(*inner_ent) {
// Node component exists, update it
if let Some(material) = r_materials.get_mut(material_node.id()) {
// Update properties
material.plane = *plane;
material.fixed_channel = plane_value.0.z;
}
} else {
// Insert new node component
let material = r_materials.add(ColorPlaneMaterial {
plane: *plane,
fixed_channel: plane_value.0.z,
});
commands.entity(*inner_ent).insert(MaterialNode(material));
}
// Find the thumb.
let Ok(children_inner) = q_children.get(*inner_ent) else {
continue;
};
let Some(thumb_ent) = children_inner.first() else {
continue;
};
let Ok(mut thumb_node) = q_node.get_mut(*thumb_ent) else {
continue;
};
thumb_node.left = Val::Percent(plane_value.0.x * 100.0);
thumb_node.top = Val::Percent(plane_value.0.y * 100.0);
}
}
fn on_pointer_press(
mut press: On<Pointer<Press>>,
q_color_planes: Query<Has<InteractionDisabled>, With<ColorPlane>>,
q_color_plane_inner: Query<
(
&ComputedNode,
&ComputedUiRenderTargetInfo,
&UiGlobalTransform,
&ChildOf,
),
With<ColorPlaneInner>,
>,
ui_scale: Res<UiScale>,
mut commands: Commands,
) {
if let Ok((node, node_target, transform, parent)) = q_color_plane_inner.get(press.entity)
&& let Ok(disabled) = q_color_planes.get(parent.0)
{
press.propagate(false);
if !disabled {
let local_pos = transform.try_inverse().unwrap().transform_point2(
press.pointer_location.position * node_target.scale_factor() / ui_scale.0,
);
let pos = local_pos / node.size() + Vec2::splat(0.5);
let new_value = pos.clamp(Vec2::ZERO, Vec2::ONE);
commands.trigger(ValueChange {
source: parent.0,
value: new_value,
});
}
}
}
fn on_drag_start(
mut drag_start: On<Pointer<DragStart>>,
mut q_color_planes: Query<
(&mut ColorPlaneDragState, Has<InteractionDisabled>),
With<ColorPlane>,
>,
q_color_plane_inner: Query<&ChildOf, With<ColorPlaneInner>>,
) {
if let Ok(parent) = q_color_plane_inner.get(drag_start.entity)
&& let Ok((mut state, disabled)) = q_color_planes.get_mut(parent.0)
{
drag_start.propagate(false);
if !disabled {
state.0 = true;
}
}
}
fn on_drag(
mut drag: On<Pointer<Drag>>,
q_color_planes: Query<(&ColorPlaneDragState, Has<InteractionDisabled>), With<ColorPlane>>,
q_color_plane_inner: Query<
(
&ComputedNode,
&ComputedUiRenderTargetInfo,
&UiGlobalTransform,
&ChildOf,
),
With<ColorPlaneInner>,
>,
ui_scale: Res<UiScale>,
mut commands: Commands,
) {
if let Ok((node, node_target, transform, parent)) = q_color_plane_inner.get(drag.entity)
&& let Ok((state, disabled)) = q_color_planes.get(parent.0)
{
drag.propagate(false);
if state.0 && !disabled {
let local_pos = transform.try_inverse().unwrap().transform_point2(
drag.pointer_location.position * node_target.scale_factor() / ui_scale.0,
);
let pos = local_pos / node.size() + Vec2::splat(0.5);
let new_value = pos.clamp(Vec2::ZERO, Vec2::ONE);
commands.trigger(ValueChange {
source: parent.0,
value: new_value,
});
}
}
}
fn on_drag_end(
mut drag_end: On<Pointer<DragEnd>>,
mut q_color_planes: Query<&mut ColorPlaneDragState, With<ColorPlane>>,
q_color_plane_inner: Query<&ChildOf, With<ColorPlaneInner>>,
) {
if let Ok(parent) = q_color_plane_inner.get(drag_end.entity)
&& let Ok(mut state) = q_color_planes.get_mut(parent.0)
{
drag_end.propagate(false);
state.0 = false;
}
}
fn on_drag_cancel(
drag_cancel: On<Pointer<Cancel>>,
mut q_color_planes: Query<&mut ColorPlaneDragState, With<ColorPlane>>,
q_color_plane_inner: Query<&ChildOf, With<ColorPlaneInner>>,
) {
if let Ok(parent) = q_color_plane_inner.get(drag_cancel.entity)
&& let Ok(mut state) = q_color_planes.get_mut(parent.0)
{
state.0 = false;
}
}
/// Plugin which registers the observers for updating the swatch color.
pub struct ColorPlanePlugin;
impl Plugin for ColorPlanePlugin {
fn build(&self, app: &mut bevy_app::App) {
app.add_plugins(UiMaterialPlugin::<ColorPlaneMaterial>::default());
app.add_systems(PostUpdate, update_plane_color);
app.add_observer(on_pointer_press)
.add_observer(on_drag_start)
.add_observer(on_drag)
.add_observer(on_drag_end)
.add_observer(on_drag_cancel);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/controls/mod.rs | crates/bevy_feathers/src/controls/mod.rs | //! Meta-module containing all feathers controls (widgets that are interactive).
use bevy_app::Plugin;
mod button;
mod checkbox;
mod color_plane;
mod color_slider;
mod color_swatch;
mod radio;
mod slider;
mod toggle_switch;
mod virtual_keyboard;
pub use button::{button, ButtonPlugin, ButtonProps, ButtonVariant};
pub use checkbox::{checkbox, CheckboxPlugin};
pub use color_plane::{color_plane, ColorPlane, ColorPlaneValue};
pub use color_slider::{
color_slider, ColorChannel, ColorSlider, ColorSliderPlugin, ColorSliderProps, SliderBaseColor,
};
pub use color_swatch::{color_swatch, ColorSwatch, ColorSwatchFg, ColorSwatchValue};
pub use radio::{radio, RadioPlugin};
pub use slider::{slider, SliderPlugin, SliderProps};
pub use toggle_switch::{toggle_switch, ToggleSwitchPlugin};
pub use virtual_keyboard::{virtual_keyboard, VirtualKeyPressed};
use crate::{
alpha_pattern::AlphaPatternPlugin,
controls::{color_plane::ColorPlanePlugin, color_swatch::ColorSwatchPlugin},
};
/// Plugin which registers all `bevy_feathers` controls.
pub struct ControlsPlugin;
impl Plugin for ControlsPlugin {
fn build(&self, app: &mut bevy_app::App) {
app.add_plugins((
AlphaPatternPlugin,
ButtonPlugin,
CheckboxPlugin,
ColorPlanePlugin,
ColorSliderPlugin,
ColorSwatchPlugin,
RadioPlugin,
SliderPlugin,
ToggleSwitchPlugin,
));
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/controls/radio.rs | crates/bevy_feathers/src/controls/radio.rs | use bevy_app::{Plugin, PreUpdate};
use bevy_camera::visibility::Visibility;
use bevy_ecs::{
bundle::Bundle,
children,
component::Component,
entity::Entity,
hierarchy::{ChildOf, Children},
lifecycle::RemovedComponents,
query::{Added, Changed, Has, Or, With},
reflect::ReflectComponent,
schedule::IntoScheduleConfigs,
spawn::{Spawn, SpawnRelated, SpawnableList},
system::{Commands, Query},
};
use bevy_input_focus::tab_navigation::TabIndex;
use bevy_picking::{hover::Hovered, PickingSystems};
use bevy_reflect::{prelude::ReflectDefault, Reflect};
use bevy_ui::{
AlignItems, BorderRadius, Checked, Display, FlexDirection, InteractionDisabled, JustifyContent,
Node, UiRect, Val,
};
use bevy_ui_widgets::RadioButton;
use crate::{
constants::{fonts, size},
cursor::EntityCursor,
font_styles::InheritableFont,
handle_or_path::HandleOrPath,
theme::{ThemeBackgroundColor, ThemeBorderColor, ThemeFontColor},
tokens,
};
/// Marker for the radio outline
#[derive(Component, Default, Clone, Reflect)]
#[reflect(Component, Clone, Default)]
struct RadioOutline;
/// Marker for the radio check mark
#[derive(Component, Default, Clone, Reflect)]
#[reflect(Component, Clone, Default)]
struct RadioMark;
/// Template function to spawn a radio.
///
/// # Arguments
/// * `props` - construction properties for the radio.
/// * `overrides` - a bundle of components that are merged in with the normal radio components.
/// * `label` - the label of the radio.
///
/// # Emitted events
/// * [`bevy_ui_widgets::ValueChange<bool>`] with the value true when it becomes checked.
/// * [`bevy_ui_widgets::ValueChange<Entity>`] with the selected entity's id when a new radio button is selected.
///
/// These events can be disabled by adding an [`bevy_ui::InteractionDisabled`] component to the entity
pub fn radio<C: SpawnableList<ChildOf> + Send + Sync + 'static, B: Bundle>(
overrides: B,
label: C,
) -> impl Bundle {
(
Node {
display: Display::Flex,
flex_direction: FlexDirection::Row,
justify_content: JustifyContent::Start,
align_items: AlignItems::Center,
column_gap: Val::Px(4.0),
..Default::default()
},
RadioButton,
Hovered::default(),
EntityCursor::System(bevy_window::SystemCursorIcon::Pointer),
TabIndex(0),
ThemeFontColor(tokens::RADIO_TEXT),
InheritableFont {
font: HandleOrPath::Path(fonts::REGULAR.to_owned()),
font_size: 14.0,
},
overrides,
Children::spawn((
Spawn((
Node {
display: Display::Flex,
align_items: AlignItems::Center,
justify_content: JustifyContent::Center,
width: size::RADIO_SIZE,
height: size::RADIO_SIZE,
border: UiRect::all(Val::Px(2.0)),
border_radius: BorderRadius::MAX,
..Default::default()
},
RadioOutline,
ThemeBorderColor(tokens::RADIO_BORDER),
children![(
// Cheesy checkmark: rotated node with L-shaped border.
Node {
width: Val::Px(8.),
height: Val::Px(8.),
border_radius: BorderRadius::MAX,
..Default::default()
},
RadioMark,
ThemeBackgroundColor(tokens::RADIO_MARK),
)],
)),
label,
)),
)
}
fn update_radio_styles(
q_radioes: Query<
(
Entity,
Has<InteractionDisabled>,
Has<Checked>,
&Hovered,
&ThemeFontColor,
),
(
With<RadioButton>,
Or<(Changed<Hovered>, Added<Checked>, Added<InteractionDisabled>)>,
),
>,
q_children: Query<&Children>,
mut q_outline: Query<&ThemeBorderColor, With<RadioOutline>>,
mut q_mark: Query<&ThemeBackgroundColor, With<RadioMark>>,
mut commands: Commands,
) {
for (radio_ent, disabled, checked, hovered, font_color) in q_radioes.iter() {
let Some(outline_ent) = q_children
.iter_descendants(radio_ent)
.find(|en| q_outline.contains(*en))
else {
continue;
};
let Some(mark_ent) = q_children
.iter_descendants(radio_ent)
.find(|en| q_mark.contains(*en))
else {
continue;
};
let outline_border = q_outline.get_mut(outline_ent).unwrap();
let mark_color = q_mark.get_mut(mark_ent).unwrap();
set_radio_styles(
radio_ent,
outline_ent,
mark_ent,
disabled,
checked,
hovered.0,
outline_border,
mark_color,
font_color,
&mut commands,
);
}
}
fn update_radio_styles_remove(
q_radioes: Query<
(
Entity,
Has<InteractionDisabled>,
Has<Checked>,
&Hovered,
&ThemeFontColor,
),
With<RadioButton>,
>,
q_children: Query<&Children>,
mut q_outline: Query<&ThemeBorderColor, With<RadioOutline>>,
mut q_mark: Query<&ThemeBackgroundColor, With<RadioMark>>,
mut removed_disabled: RemovedComponents<InteractionDisabled>,
mut removed_checked: RemovedComponents<Checked>,
mut commands: Commands,
) {
removed_disabled
.read()
.chain(removed_checked.read())
.for_each(|ent| {
if let Ok((radio_ent, disabled, checked, hovered, font_color)) = q_radioes.get(ent) {
let Some(outline_ent) = q_children
.iter_descendants(radio_ent)
.find(|en| q_outline.contains(*en))
else {
return;
};
let Some(mark_ent) = q_children
.iter_descendants(radio_ent)
.find(|en| q_mark.contains(*en))
else {
return;
};
let outline_border = q_outline.get_mut(outline_ent).unwrap();
let mark_color = q_mark.get_mut(mark_ent).unwrap();
set_radio_styles(
radio_ent,
outline_ent,
mark_ent,
disabled,
checked,
hovered.0,
outline_border,
mark_color,
font_color,
&mut commands,
);
}
});
}
fn set_radio_styles(
radio_ent: Entity,
outline_ent: Entity,
mark_ent: Entity,
disabled: bool,
checked: bool,
hovered: bool,
outline_border: &ThemeBorderColor,
mark_color: &ThemeBackgroundColor,
font_color: &ThemeFontColor,
commands: &mut Commands,
) {
let outline_border_token = match (disabled, hovered) {
(true, _) => tokens::RADIO_BORDER_DISABLED,
(false, true) => tokens::RADIO_BORDER_HOVER,
_ => tokens::RADIO_BORDER,
};
let mark_token = match disabled {
true => tokens::RADIO_MARK_DISABLED,
false => tokens::RADIO_MARK,
};
let font_color_token = match disabled {
true => tokens::RADIO_TEXT_DISABLED,
false => tokens::RADIO_TEXT,
};
let cursor_shape = match disabled {
true => bevy_window::SystemCursorIcon::NotAllowed,
false => bevy_window::SystemCursorIcon::Pointer,
};
// Change outline border
if outline_border.0 != outline_border_token {
commands
.entity(outline_ent)
.insert(ThemeBorderColor(outline_border_token));
}
// Change mark color
if mark_color.0 != mark_token {
commands
.entity(mark_ent)
.insert(ThemeBorderColor(mark_token));
}
// Change mark visibility
commands.entity(mark_ent).insert(match checked {
true => Visibility::Inherited,
false => Visibility::Hidden,
});
// Change font color
if font_color.0 != font_color_token {
commands
.entity(radio_ent)
.insert(ThemeFontColor(font_color_token));
}
// Change cursor shape
commands
.entity(radio_ent)
.insert(EntityCursor::System(cursor_shape));
}
/// Plugin which registers the systems for updating the radio styles.
pub struct RadioPlugin;
impl Plugin for RadioPlugin {
fn build(&self, app: &mut bevy_app::App) {
app.add_systems(
PreUpdate,
(update_radio_styles, update_radio_styles_remove).in_set(PickingSystems::Last),
);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_feathers/src/controls/checkbox.rs | crates/bevy_feathers/src/controls/checkbox.rs | use bevy_app::{Plugin, PreUpdate};
use bevy_camera::visibility::Visibility;
use bevy_ecs::{
bundle::Bundle,
children,
component::Component,
entity::Entity,
hierarchy::{ChildOf, Children},
lifecycle::RemovedComponents,
query::{Added, Changed, Has, Or, With},
reflect::ReflectComponent,
schedule::IntoScheduleConfigs,
spawn::{Spawn, SpawnRelated, SpawnableList},
system::{Commands, Query},
};
use bevy_input_focus::tab_navigation::TabIndex;
use bevy_math::Rot2;
use bevy_picking::{hover::Hovered, PickingSystems};
use bevy_reflect::{prelude::ReflectDefault, Reflect};
use bevy_ui::{
AlignItems, BorderRadius, Checked, Display, FlexDirection, InteractionDisabled, JustifyContent,
Node, PositionType, UiRect, UiTransform, Val,
};
use bevy_ui_widgets::Checkbox;
use crate::{
constants::{fonts, size},
cursor::EntityCursor,
font_styles::InheritableFont,
handle_or_path::HandleOrPath,
theme::{ThemeBackgroundColor, ThemeBorderColor, ThemeFontColor},
tokens,
};
/// Marker for the checkbox frame (contains both checkbox and label)
#[derive(Component, Default, Clone, Reflect)]
#[reflect(Component, Clone, Default)]
struct CheckboxFrame;
/// Marker for the checkbox outline
#[derive(Component, Default, Clone, Reflect)]
#[reflect(Component, Clone, Default)]
struct CheckboxOutline;
/// Marker for the checkbox check mark
#[derive(Component, Default, Clone, Reflect)]
#[reflect(Component, Clone, Default)]
struct CheckboxMark;
/// Template function to spawn a checkbox.
///
/// # Arguments
/// * `props` - construction properties for the checkbox.
/// * `overrides` - a bundle of components that are merged in with the normal checkbox components.
/// * `label` - the label of the checkbox.
///
/// # Emitted events
/// * [`bevy_ui_widgets::ValueChange<bool>`] with the new value when the checkbox changes state.
///
/// These events can be disabled by adding an [`bevy_ui::InteractionDisabled`] component to the entity
pub fn checkbox<C: SpawnableList<ChildOf> + Send + Sync + 'static, B: Bundle>(
overrides: B,
label: C,
) -> impl Bundle {
(
Node {
display: Display::Flex,
flex_direction: FlexDirection::Row,
justify_content: JustifyContent::Start,
align_items: AlignItems::Center,
column_gap: Val::Px(4.0),
..Default::default()
},
Checkbox,
CheckboxFrame,
Hovered::default(),
EntityCursor::System(bevy_window::SystemCursorIcon::Pointer),
TabIndex(0),
ThemeFontColor(tokens::CHECKBOX_TEXT),
InheritableFont {
font: HandleOrPath::Path(fonts::REGULAR.to_owned()),
font_size: 14.0,
},
overrides,
Children::spawn((
Spawn((
Node {
width: size::CHECKBOX_SIZE,
height: size::CHECKBOX_SIZE,
border: UiRect::all(Val::Px(2.0)),
border_radius: BorderRadius::all(Val::Px(4.0)),
..Default::default()
},
CheckboxOutline,
ThemeBackgroundColor(tokens::CHECKBOX_BG),
ThemeBorderColor(tokens::CHECKBOX_BORDER),
children![(
// Cheesy checkmark: rotated node with L-shaped border.
Node {
position_type: PositionType::Absolute,
left: Val::Px(4.0),
top: Val::Px(0.0),
width: Val::Px(6.),
height: Val::Px(11.),
border: UiRect {
bottom: Val::Px(2.0),
right: Val::Px(2.0),
..Default::default()
},
..Default::default()
},
UiTransform::from_rotation(Rot2::FRAC_PI_4),
CheckboxMark,
ThemeBorderColor(tokens::CHECKBOX_MARK),
)],
)),
label,
)),
)
}
fn update_checkbox_styles(
q_checkboxes: Query<
(
Entity,
Has<InteractionDisabled>,
Has<Checked>,
&Hovered,
&ThemeFontColor,
),
(
With<CheckboxFrame>,
Or<(Changed<Hovered>, Added<Checked>, Added<InteractionDisabled>)>,
),
>,
q_children: Query<&Children>,
mut q_outline: Query<(&ThemeBackgroundColor, &ThemeBorderColor), With<CheckboxOutline>>,
mut q_mark: Query<&ThemeBorderColor, With<CheckboxMark>>,
mut commands: Commands,
) {
for (checkbox_ent, disabled, checked, hovered, font_color) in q_checkboxes.iter() {
let Some(outline_ent) = q_children
.iter_descendants(checkbox_ent)
.find(|en| q_outline.contains(*en))
else {
continue;
};
let Some(mark_ent) = q_children
.iter_descendants(checkbox_ent)
.find(|en| q_mark.contains(*en))
else {
continue;
};
let (outline_bg, outline_border) = q_outline.get_mut(outline_ent).unwrap();
let mark_color = q_mark.get_mut(mark_ent).unwrap();
set_checkbox_styles(
checkbox_ent,
outline_ent,
mark_ent,
disabled,
checked,
hovered.0,
outline_bg,
outline_border,
mark_color,
font_color,
&mut commands,
);
}
}
fn update_checkbox_styles_remove(
q_checkboxes: Query<
(
Entity,
Has<InteractionDisabled>,
Has<Checked>,
&Hovered,
&ThemeFontColor,
),
With<CheckboxFrame>,
>,
q_children: Query<&Children>,
mut q_outline: Query<(&ThemeBackgroundColor, &ThemeBorderColor), With<CheckboxOutline>>,
mut q_mark: Query<&ThemeBorderColor, With<CheckboxMark>>,
mut removed_disabled: RemovedComponents<InteractionDisabled>,
mut removed_checked: RemovedComponents<Checked>,
mut commands: Commands,
) {
removed_disabled
.read()
.chain(removed_checked.read())
.for_each(|ent| {
if let Ok((checkbox_ent, disabled, checked, hovered, font_color)) =
q_checkboxes.get(ent)
{
let Some(outline_ent) = q_children
.iter_descendants(checkbox_ent)
.find(|en| q_outline.contains(*en))
else {
return;
};
let Some(mark_ent) = q_children
.iter_descendants(checkbox_ent)
.find(|en| q_mark.contains(*en))
else {
return;
};
let (outline_bg, outline_border) = q_outline.get_mut(outline_ent).unwrap();
let mark_color = q_mark.get_mut(mark_ent).unwrap();
set_checkbox_styles(
checkbox_ent,
outline_ent,
mark_ent,
disabled,
checked,
hovered.0,
outline_bg,
outline_border,
mark_color,
font_color,
&mut commands,
);
}
});
}
fn set_checkbox_styles(
checkbox_ent: Entity,
outline_ent: Entity,
mark_ent: Entity,
disabled: bool,
checked: bool,
hovered: bool,
outline_bg: &ThemeBackgroundColor,
outline_border: &ThemeBorderColor,
mark_color: &ThemeBorderColor,
font_color: &ThemeFontColor,
commands: &mut Commands,
) {
let outline_border_token = match (disabled, hovered) {
(true, _) => tokens::CHECKBOX_BORDER_DISABLED,
(false, true) => tokens::CHECKBOX_BORDER_HOVER,
_ => tokens::CHECKBOX_BORDER,
};
let outline_bg_token = match (disabled, checked) {
(true, true) => tokens::CHECKBOX_BG_CHECKED_DISABLED,
(true, false) => tokens::CHECKBOX_BG_DISABLED,
(false, true) => tokens::CHECKBOX_BG_CHECKED,
(false, false) => tokens::CHECKBOX_BG,
};
let mark_token = match disabled {
true => tokens::CHECKBOX_MARK_DISABLED,
false => tokens::CHECKBOX_MARK,
};
let font_color_token = match disabled {
true => tokens::CHECKBOX_TEXT_DISABLED,
false => tokens::CHECKBOX_TEXT,
};
let cursor_shape = match disabled {
true => bevy_window::SystemCursorIcon::NotAllowed,
false => bevy_window::SystemCursorIcon::Pointer,
};
// Change outline background
if outline_bg.0 != outline_bg_token {
commands
.entity(outline_ent)
.insert(ThemeBackgroundColor(outline_bg_token));
}
// Change outline border
if outline_border.0 != outline_border_token {
commands
.entity(outline_ent)
.insert(ThemeBorderColor(outline_border_token));
}
// Change mark color
if mark_color.0 != mark_token {
commands
.entity(mark_ent)
.insert(ThemeBorderColor(mark_token));
}
// Change mark visibility
commands.entity(mark_ent).insert(match checked {
true => Visibility::Inherited,
false => Visibility::Hidden,
});
// Change font color
if font_color.0 != font_color_token {
commands
.entity(checkbox_ent)
.insert(ThemeFontColor(font_color_token));
}
// Change cursor shape
commands
.entity(checkbox_ent)
.insert(EntityCursor::System(cursor_shape));
}
/// Plugin which registers the systems for updating the checkbox styles.
pub struct CheckboxPlugin;
impl Plugin for CheckboxPlugin {
fn build(&self, app: &mut bevy_app::App) {
app.add_systems(
PreUpdate,
(update_checkbox_styles, update_checkbox_styles_remove).in_set(PickingSystems::Last),
);
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_camera/src/lib.rs | crates/bevy_camera/src/lib.rs | #![expect(missing_docs, reason = "Not all docs are written yet, see #3492.")]
mod camera;
mod clear_color;
mod components;
pub mod primitives;
mod projection;
pub mod visibility;
use bevy_ecs::schedule::SystemSet;
pub use camera::*;
pub use clear_color::*;
pub use components::*;
pub use projection::*;
use bevy_app::{App, Plugin};
#[derive(Default)]
pub struct CameraPlugin;
impl Plugin for CameraPlugin {
fn build(&self, app: &mut App) {
app.init_resource::<ClearColor>().add_plugins((
CameraProjectionPlugin,
visibility::VisibilityPlugin,
visibility::VisibilityRangePlugin,
));
}
}
/// The camera prelude.
///
/// This includes the most common types in this crate, re-exported for your convenience.
pub mod prelude {
#[doc(hidden)]
pub use crate::{
visibility::{InheritedVisibility, ViewVisibility, Visibility},
Camera, Camera2d, Camera3d, ClearColor, ClearColorConfig, MsaaWriteback,
OrthographicProjection, PerspectiveProjection, Projection,
};
}
/// Label for `camera_system<T>`, shared across all `T`.
#[derive(SystemSet, Clone, Eq, PartialEq, Hash, Debug)]
pub struct CameraUpdateSystems;
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_camera/src/primitives.rs | crates/bevy_camera/src/primitives.rs | use core::borrow::Borrow;
use bevy_ecs::{component::Component, entity::EntityHashMap, reflect::ReflectComponent};
use bevy_math::{
bounding::{Aabb3d, BoundingVolume},
Affine3A, Mat3A, Mat4, Vec3, Vec3A, Vec4, Vec4Swizzles,
};
use bevy_mesh::{Mesh, VertexAttributeValues};
use bevy_reflect::prelude::*;
pub trait MeshAabb {
/// Compute the Axis-Aligned Bounding Box of the mesh vertices in model space
///
/// Returns `None` if `self` doesn't have [`Mesh::ATTRIBUTE_POSITION`] of
/// type [`VertexAttributeValues::Float32x3`], or if `self` doesn't have any vertices.
fn compute_aabb(&self) -> Option<Aabb>;
}
impl MeshAabb for Mesh {
fn compute_aabb(&self) -> Option<Aabb> {
if let Some(aabb) = self.final_aabb {
// use precomputed extents
return Some(aabb.into());
}
let Ok(VertexAttributeValues::Float32x3(values)) =
self.try_attribute(Mesh::ATTRIBUTE_POSITION)
else {
return None;
};
Aabb::enclosing(values.iter().map(|p| Vec3::from_slice(p)))
}
}
/// An axis-aligned bounding box, defined by:
/// - a center,
/// - the distances from the center to each faces along the axis,
/// the faces are orthogonal to the axis.
///
/// It is typically used as a component on an entity to represent the local space
/// occupied by this entity, with faces orthogonal to its local axis.
///
/// This component is notably used during "frustum culling", a process to determine
/// if an entity should be rendered by a [`Camera`] if its bounding box intersects
/// with the camera's [`Frustum`].
///
/// It will be added automatically by the systems in [`CalculateBounds`] to entities that:
/// - could be subject to frustum culling, for example with a [`Mesh3d`]
/// or `Sprite` component,
/// - don't have the [`NoFrustumCulling`] component.
///
/// It won't be updated automatically if the space occupied by the entity changes,
/// for example if the vertex positions of a [`Mesh3d`] are updated.
///
/// [`Camera`]: crate::Camera
/// [`NoFrustumCulling`]: crate::visibility::NoFrustumCulling
/// [`CalculateBounds`]: crate::visibility::VisibilitySystems::CalculateBounds
/// [`Mesh3d`]: bevy_mesh::Mesh
#[derive(Component, Clone, Copy, Debug, Default, Reflect, PartialEq)]
#[reflect(Component, Default, Debug, PartialEq, Clone)]
pub struct Aabb {
pub center: Vec3A,
pub half_extents: Vec3A,
}
impl Aabb {
#[inline]
pub fn from_min_max(minimum: Vec3, maximum: Vec3) -> Self {
let minimum = Vec3A::from(minimum);
let maximum = Vec3A::from(maximum);
let center = 0.5 * (maximum + minimum);
let half_extents = 0.5 * (maximum - minimum);
Self {
center,
half_extents,
}
}
/// Returns a bounding box enclosing the specified set of points.
///
/// Returns `None` if the iterator is empty.
///
/// # Examples
///
/// ```
/// # use bevy_math::{Vec3, Vec3A};
/// # use bevy_camera::primitives::Aabb;
/// let bb = Aabb::enclosing([Vec3::X, Vec3::Z * 2.0, Vec3::Y * -0.5]).unwrap();
/// assert_eq!(bb.min(), Vec3A::new(0.0, -0.5, 0.0));
/// assert_eq!(bb.max(), Vec3A::new(1.0, 0.0, 2.0));
/// ```
pub fn enclosing<T: Borrow<Vec3>>(iter: impl IntoIterator<Item = T>) -> Option<Self> {
let mut iter = iter.into_iter().map(|p| *p.borrow());
let mut min = iter.next()?;
let mut max = min;
for v in iter {
min = Vec3::min(min, v);
max = Vec3::max(max, v);
}
Some(Self::from_min_max(min, max))
}
/// Calculate the relative radius of the AABB with respect to a plane
#[inline]
pub fn relative_radius(&self, p_normal: &Vec3A, world_from_local: &Mat3A) -> f32 {
// NOTE: dot products on Vec3A use SIMD and even with the overhead of conversion are net faster than Vec3
let half_extents = self.half_extents;
Vec3A::new(
p_normal.dot(world_from_local.x_axis),
p_normal.dot(world_from_local.y_axis),
p_normal.dot(world_from_local.z_axis),
)
.abs()
.dot(half_extents)
}
#[inline]
pub fn min(&self) -> Vec3A {
self.center - self.half_extents
}
#[inline]
pub fn max(&self) -> Vec3A {
self.center + self.half_extents
}
/// Check if the AABB is at the front side of the bisecting plane.
/// Referenced from: [AABB Plane intersection](https://gdbooks.gitbooks.io/3dcollisions/content/Chapter2/static_aabb_plane.html)
#[inline]
pub fn is_in_half_space(&self, half_space: &HalfSpace, world_from_local: &Affine3A) -> bool {
// transform the half-extents into world space.
let half_extents_world = world_from_local.matrix3.abs() * self.half_extents.abs();
// collapse the half-extents onto the plane normal.
let p_normal = half_space.normal();
let r = half_extents_world.dot(p_normal.abs());
let aabb_center_world = world_from_local.transform_point3a(self.center);
let signed_distance = p_normal.dot(aabb_center_world) + half_space.d();
signed_distance > r
}
/// Optimized version of [`Self::is_in_half_space`] when the AABB is already in world space.
/// Use this when `world_from_local` would be the identity transform.
#[inline]
pub fn is_in_half_space_identity(&self, half_space: &HalfSpace) -> bool {
let p_normal = half_space.normal();
let r = self.half_extents.abs().dot(p_normal.abs());
let signed_distance = p_normal.dot(self.center) + half_space.d();
signed_distance > r
}
}
impl From<Aabb3d> for Aabb {
fn from(aabb: Aabb3d) -> Self {
Self {
center: aabb.center(),
half_extents: aabb.half_size(),
}
}
}
impl From<Aabb> for Aabb3d {
fn from(aabb: Aabb) -> Self {
Self {
min: aabb.min(),
max: aabb.max(),
}
}
}
impl From<Sphere> for Aabb {
#[inline]
fn from(sphere: Sphere) -> Self {
Self {
center: sphere.center,
half_extents: Vec3A::splat(sphere.radius),
}
}
}
#[derive(Clone, Debug, Default)]
pub struct Sphere {
pub center: Vec3A,
pub radius: f32,
}
impl Sphere {
#[inline]
pub fn intersects_obb(&self, aabb: &Aabb, world_from_local: &Affine3A) -> bool {
let aabb_center_world = world_from_local.transform_point3a(aabb.center);
let v = aabb_center_world - self.center;
let d = v.length();
let relative_radius = aabb.relative_radius(&(v / d), &world_from_local.matrix3);
d < self.radius + relative_radius
}
}
/// A region of 3D space, specifically an open set whose border is a bisecting 2D plane.
///
/// This bisecting plane partitions 3D space into two infinite regions,
/// the half-space is one of those regions and excludes the bisecting plane.
///
/// Each instance of this type is characterized by:
/// - the bisecting plane's unit normal, normalized and pointing "inside" the half-space,
/// - the signed distance along the normal from the bisecting plane to the origin of 3D space.
///
/// The distance can also be seen as:
/// - the distance along the inverse of the normal from the origin of 3D space to the bisecting plane,
/// - the opposite of the distance along the normal from the origin of 3D space to the bisecting plane.
///
/// Any point `p` is considered to be within the `HalfSpace` when the length of the projection
/// of p on the normal is greater or equal than the opposite of the distance,
/// meaning: if the equation `normal.dot(p) + distance > 0.` is satisfied.
///
/// For example, the half-space containing all the points with a z-coordinate lesser
/// or equal than `8.0` would be defined by: `HalfSpace::new(Vec3::NEG_Z.extend(-8.0))`.
/// It includes all the points from the bisecting plane towards `NEG_Z`, and the distance
/// from the plane to the origin is `-8.0` along `NEG_Z`.
///
/// It is used to define a [`Frustum`], but is also a useful mathematical primitive for rendering tasks such as light computation.
#[derive(Clone, Copy, Debug, Default)]
pub struct HalfSpace {
normal_d: Vec4,
}
impl HalfSpace {
/// Constructs a `HalfSpace` from a 4D vector whose first 3 components
/// represent the bisecting plane's unit normal, and the last component is
/// the signed distance along the normal from the plane to the origin.
/// The constructor ensures the normal vector is normalized and the distance is appropriately scaled.
#[inline]
pub fn new(normal_d: Vec4) -> Self {
Self {
normal_d: normal_d * normal_d.xyz().length_recip(),
}
}
/// Returns the unit normal vector of the bisecting plane that characterizes the `HalfSpace`.
#[inline]
pub fn normal(&self) -> Vec3A {
Vec3A::from_vec4(self.normal_d)
}
/// Returns the signed distance from the bisecting plane to the origin along
/// the plane's unit normal vector.
#[inline]
pub fn d(&self) -> f32 {
self.normal_d.w
}
/// Returns the bisecting plane's unit normal vector and the signed distance
/// from the plane to the origin.
#[inline]
pub fn normal_d(&self) -> Vec4 {
self.normal_d
}
}
/// A region of 3D space defined by the intersection of 6 [`HalfSpace`]s.
///
/// Frustums are typically an apex-truncated square pyramid (a pyramid without the top) or a cuboid.
///
/// Half spaces are ordered left, right, top, bottom, near, far. The normal vectors
/// of the half-spaces point towards the interior of the frustum.
///
/// A frustum component is used on an entity with a [`Camera`] component to
/// determine which entities will be considered for rendering by this camera.
/// All entities with an [`Aabb`] component that are not contained by (or crossing
/// the boundary of) the frustum will not be rendered, and not be used in rendering computations.
///
/// This process is called frustum culling, and entities can opt out of it using
/// the [`NoFrustumCulling`] component.
///
/// The frustum component is typically added automatically for cameras, either [`Camera2d`] or [`Camera3d`].
/// It is usually updated automatically by [`update_frusta`] from the
/// [`CameraProjection`] component and [`GlobalTransform`] of the camera entity.
///
/// [`Camera`]: crate::Camera
/// [`NoFrustumCulling`]: crate::visibility::NoFrustumCulling
/// [`update_frusta`]: crate::visibility::update_frusta
/// [`CameraProjection`]: crate::CameraProjection
/// [`GlobalTransform`]: bevy_transform::components::GlobalTransform
/// [`Camera2d`]: crate::Camera2d
/// [`Camera3d`]: crate::Camera3d
#[derive(Component, Clone, Copy, Debug, Default, Reflect)]
#[reflect(Component, Default, Debug, Clone)]
pub struct Frustum {
#[reflect(ignore, clone)]
pub half_spaces: [HalfSpace; 6],
}
impl Frustum {
pub const NEAR_PLANE_IDX: usize = 4;
const FAR_PLANE_IDX: usize = 5;
const INACTIVE_HALF_SPACE: Vec4 = Vec4::new(0.0, 0.0, 0.0, f32::INFINITY);
/// Returns a frustum derived from `clip_from_world`.
#[inline]
pub fn from_clip_from_world(clip_from_world: &Mat4) -> Self {
let mut frustum = Frustum::from_clip_from_world_no_far(clip_from_world);
frustum.half_spaces[Self::FAR_PLANE_IDX] = HalfSpace::new(clip_from_world.row(2));
frustum
}
/// Returns a frustum derived from `clip_from_world`,
/// but with a custom far plane.
#[inline]
pub fn from_clip_from_world_custom_far(
clip_from_world: &Mat4,
view_translation: &Vec3,
view_backward: &Vec3,
far: f32,
) -> Self {
let mut frustum = Frustum::from_clip_from_world_no_far(clip_from_world);
let far_center = *view_translation - far * *view_backward;
frustum.half_spaces[Self::FAR_PLANE_IDX] =
HalfSpace::new(view_backward.extend(-view_backward.dot(far_center)));
frustum
}
// NOTE: This approach of extracting the frustum half-space from the view
// projection matrix is from Foundations of Game Engine Development 2
// Rendering by Lengyel.
/// Returns a frustum derived from `view_projection`,
/// without a far plane.
fn from_clip_from_world_no_far(clip_from_world: &Mat4) -> Self {
let row0 = clip_from_world.row(0);
let row1 = clip_from_world.row(1);
let row2 = clip_from_world.row(2);
let row3 = clip_from_world.row(3);
Self {
half_spaces: [
HalfSpace::new(row3 + row0),
HalfSpace::new(row3 - row0),
HalfSpace::new(row3 + row1),
HalfSpace::new(row3 - row1),
HalfSpace::new(row3 + row2),
HalfSpace::new(Self::INACTIVE_HALF_SPACE),
],
}
}
/// Checks if a sphere intersects the frustum.
#[inline]
pub fn intersects_sphere(&self, sphere: &Sphere, intersect_far: bool) -> bool {
let sphere_center = sphere.center.extend(1.0);
let max = if intersect_far {
Self::FAR_PLANE_IDX
} else {
Self::NEAR_PLANE_IDX
};
for half_space in &self.half_spaces[..=max] {
if half_space.normal_d().dot(sphere_center) + sphere.radius <= 0.0 {
return false;
}
}
true
}
/// Checks if an Oriented Bounding Box (obb) intersects the frustum.
#[inline]
pub fn intersects_obb(
&self,
aabb: &Aabb,
world_from_local: &Affine3A,
intersect_near: bool,
intersect_far: bool,
) -> bool {
let aabb_center_world = world_from_local.transform_point3a(aabb.center).extend(1.0);
for (idx, half_space) in self.half_spaces.into_iter().enumerate() {
if (idx == Self::NEAR_PLANE_IDX && !intersect_near)
|| (idx == Self::FAR_PLANE_IDX && !intersect_far)
{
continue;
}
let p_normal = half_space.normal();
let relative_radius = aabb.relative_radius(&p_normal, &world_from_local.matrix3);
if half_space.normal_d().dot(aabb_center_world) + relative_radius <= 0.0 {
return false;
}
}
true
}
/// Optimized version of [`Frustum::intersects_obb`]
/// where the transform is [`Affine3A::IDENTITY`] and both `intersect_near` and `intersect_far` are `true`.
#[inline]
pub fn intersects_obb_identity(&self, aabb: &Aabb) -> bool {
let aabb_center_world = aabb.center.extend(1.0);
for half_space in self.half_spaces.iter() {
let p_normal = half_space.normal();
let relative_radius = aabb.half_extents.abs().dot(p_normal.abs());
if half_space.normal_d().dot(aabb_center_world) + relative_radius <= 0.0 {
return false;
}
}
true
}
/// Check if the frustum contains the entire Axis-Aligned Bounding Box (AABB).
/// Referenced from: [Frustum Culling](https://learnopengl.com/Guest-Articles/2021/Scene/Frustum-Culling)
#[inline]
pub fn contains_aabb(&self, aabb: &Aabb, world_from_local: &Affine3A) -> bool {
for half_space in &self.half_spaces {
if !aabb.is_in_half_space(half_space, world_from_local) {
return false;
}
}
true
}
/// Optimized version of [`Self::contains_aabb`] when the AABB is already in world space.
/// Use this when `world_from_local` would be [`Affine3A::IDENTITY`].
#[inline]
pub fn contains_aabb_identity(&self, aabb: &Aabb) -> bool {
for half_space in &self.half_spaces {
if !aabb.is_in_half_space_identity(half_space) {
return false;
}
}
true
}
}
pub struct CubeMapFace {
pub target: Vec3,
pub up: Vec3,
}
// Cubemap faces are [+X, -X, +Y, -Y, +Z, -Z], per https://www.w3.org/TR/webgpu/#texture-view-creation
// Note: Cubemap coordinates are left-handed y-up, unlike the rest of Bevy.
// See https://registry.khronos.org/vulkan/specs/1.2/html/chap16.html#_cube_map_face_selection
//
// For each cubemap face, we take care to specify the appropriate target/up axis such that the rendered
// texture using Bevy's right-handed y-up coordinate space matches the expected cubemap face in
// left-handed y-up cubemap coordinates.
pub const CUBE_MAP_FACES: [CubeMapFace; 6] = [
// +X
CubeMapFace {
target: Vec3::X,
up: Vec3::Y,
},
// -X
CubeMapFace {
target: Vec3::NEG_X,
up: Vec3::Y,
},
// +Y
CubeMapFace {
target: Vec3::Y,
up: Vec3::Z,
},
// -Y
CubeMapFace {
target: Vec3::NEG_Y,
up: Vec3::NEG_Z,
},
// +Z (with left-handed conventions, pointing forwards)
CubeMapFace {
target: Vec3::NEG_Z,
up: Vec3::Y,
},
// -Z (with left-handed conventions, pointing backwards)
CubeMapFace {
target: Vec3::Z,
up: Vec3::Y,
},
];
pub fn face_index_to_name(face_index: usize) -> &'static str {
match face_index {
0 => "+x",
1 => "-x",
2 => "+y",
3 => "-y",
4 => "+z",
5 => "-z",
_ => "invalid",
}
}
#[derive(Component, Clone, Debug, Default, Reflect)]
#[reflect(Component, Default, Debug, Clone)]
pub struct CubemapFrusta {
#[reflect(ignore, clone)]
pub frusta: [Frustum; 6],
}
impl CubemapFrusta {
pub fn iter(&self) -> impl DoubleEndedIterator<Item = &Frustum> {
self.frusta.iter()
}
pub fn iter_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut Frustum> {
self.frusta.iter_mut()
}
}
/// Cubemap layout defines the order of images in a packed cubemap image.
#[derive(Default, Reflect, Debug, Clone, Copy)]
pub enum CubemapLayout {
/// layout in a vertical cross format
/// ```text
/// +y
/// -x -z +x
/// -y
/// +z
/// ```
#[default]
CrossVertical = 0,
/// layout in a horizontal cross format
/// ```text
/// +y
/// -x -z +x +z
/// -y
/// ```
CrossHorizontal = 1,
/// layout in a vertical sequence
/// ```text
/// +x
/// -x
/// +y
/// -y
/// -z
/// +z
/// ```
SequenceVertical = 2,
/// layout in a horizontal sequence
/// ```text
/// +x -x +y -y -z +z
/// ```
SequenceHorizontal = 3,
}
#[derive(Component, Debug, Default, Reflect, Clone)]
#[reflect(Component, Default, Debug, Clone)]
pub struct CascadesFrusta {
#[reflect(ignore, clone)]
pub frusta: EntityHashMap<Vec<Frustum>>,
}
#[cfg(test)]
mod tests {
use core::f32::consts::PI;
use bevy_math::{ops, Quat};
use bevy_transform::components::GlobalTransform;
use crate::{CameraProjection, PerspectiveProjection};
use super::*;
// A big, offset frustum
fn big_frustum() -> Frustum {
Frustum {
half_spaces: [
HalfSpace::new(Vec4::new(-0.9701, -0.2425, -0.0000, 7.7611)),
HalfSpace::new(Vec4::new(-0.0000, 1.0000, -0.0000, 4.0000)),
HalfSpace::new(Vec4::new(-0.0000, -0.2425, -0.9701, 2.9104)),
HalfSpace::new(Vec4::new(-0.0000, -1.0000, -0.0000, 4.0000)),
HalfSpace::new(Vec4::new(-0.0000, -0.2425, 0.9701, 2.9104)),
HalfSpace::new(Vec4::new(0.9701, -0.2425, -0.0000, -1.9403)),
],
}
}
#[test]
fn intersects_sphere_big_frustum_outside() {
// Sphere outside frustum
let frustum = big_frustum();
let sphere = Sphere {
center: Vec3A::new(0.9167, 0.0000, 0.0000),
radius: 0.7500,
};
assert!(!frustum.intersects_sphere(&sphere, true));
}
#[test]
fn intersects_sphere_big_frustum_intersect() {
// Sphere intersects frustum boundary
let frustum = big_frustum();
let sphere = Sphere {
center: Vec3A::new(7.9288, 0.0000, 2.9728),
radius: 2.0000,
};
assert!(frustum.intersects_sphere(&sphere, true));
}
// A frustum
fn frustum() -> Frustum {
Frustum {
half_spaces: [
HalfSpace::new(Vec4::new(-0.9701, -0.2425, -0.0000, 0.7276)),
HalfSpace::new(Vec4::new(-0.0000, 1.0000, -0.0000, 1.0000)),
HalfSpace::new(Vec4::new(-0.0000, -0.2425, -0.9701, 0.7276)),
HalfSpace::new(Vec4::new(-0.0000, -1.0000, -0.0000, 1.0000)),
HalfSpace::new(Vec4::new(-0.0000, -0.2425, 0.9701, 0.7276)),
HalfSpace::new(Vec4::new(0.9701, -0.2425, -0.0000, 0.7276)),
],
}
}
#[test]
fn intersects_sphere_frustum_surrounding() {
// Sphere surrounds frustum
let frustum = frustum();
let sphere = Sphere {
center: Vec3A::new(0.0000, 0.0000, 0.0000),
radius: 3.0000,
};
assert!(frustum.intersects_sphere(&sphere, true));
}
#[test]
fn intersects_sphere_frustum_contained() {
// Sphere is contained in frustum
let frustum = frustum();
let sphere = Sphere {
center: Vec3A::new(0.0000, 0.0000, 0.0000),
radius: 0.7000,
};
assert!(frustum.intersects_sphere(&sphere, true));
}
#[test]
fn intersects_sphere_frustum_intersects_plane() {
// Sphere intersects a plane
let frustum = frustum();
let sphere = Sphere {
center: Vec3A::new(0.0000, 0.0000, 0.9695),
radius: 0.7000,
};
assert!(frustum.intersects_sphere(&sphere, true));
}
#[test]
fn intersects_sphere_frustum_intersects_2_planes() {
// Sphere intersects 2 planes
let frustum = frustum();
let sphere = Sphere {
center: Vec3A::new(1.2037, 0.0000, 0.9695),
radius: 0.7000,
};
assert!(frustum.intersects_sphere(&sphere, true));
}
#[test]
fn intersects_sphere_frustum_intersects_3_planes() {
// Sphere intersects 3 planes
let frustum = frustum();
let sphere = Sphere {
center: Vec3A::new(1.2037, -1.0988, 0.9695),
radius: 0.7000,
};
assert!(frustum.intersects_sphere(&sphere, true));
}
#[test]
fn intersects_sphere_frustum_dodges_1_plane() {
// Sphere avoids intersecting the frustum by 1 plane
let frustum = frustum();
let sphere = Sphere {
center: Vec3A::new(-1.7020, 0.0000, 0.0000),
radius: 0.7000,
};
assert!(!frustum.intersects_sphere(&sphere, true));
}
// A long frustum.
fn long_frustum() -> Frustum {
Frustum {
half_spaces: [
HalfSpace::new(Vec4::new(-0.9998, -0.0222, -0.0000, -1.9543)),
HalfSpace::new(Vec4::new(-0.0000, 1.0000, -0.0000, 45.1249)),
HalfSpace::new(Vec4::new(-0.0000, -0.0168, -0.9999, 2.2718)),
HalfSpace::new(Vec4::new(-0.0000, -1.0000, -0.0000, 45.1249)),
HalfSpace::new(Vec4::new(-0.0000, -0.0168, 0.9999, 2.2718)),
HalfSpace::new(Vec4::new(0.9998, -0.0222, -0.0000, 7.9528)),
],
}
}
#[test]
fn intersects_sphere_long_frustum_outside() {
// Sphere outside frustum
let frustum = long_frustum();
let sphere = Sphere {
center: Vec3A::new(-4.4889, 46.9021, 0.0000),
radius: 0.7500,
};
assert!(!frustum.intersects_sphere(&sphere, true));
}
#[test]
fn intersects_sphere_long_frustum_intersect() {
// Sphere intersects frustum boundary
let frustum = long_frustum();
let sphere = Sphere {
center: Vec3A::new(-4.9957, 0.0000, -0.7396),
radius: 4.4094,
};
assert!(frustum.intersects_sphere(&sphere, true));
}
#[test]
fn aabb_enclosing() {
assert_eq!(Aabb::enclosing([] as [Vec3; 0]), None);
assert_eq!(
Aabb::enclosing(vec![Vec3::ONE]).unwrap(),
Aabb::from_min_max(Vec3::ONE, Vec3::ONE)
);
assert_eq!(
Aabb::enclosing(&[Vec3::Y, Vec3::X, Vec3::Z][..]).unwrap(),
Aabb::from_min_max(Vec3::ZERO, Vec3::ONE)
);
assert_eq!(
Aabb::enclosing([
Vec3::NEG_X,
Vec3::X * 2.0,
Vec3::NEG_Y * 5.0,
Vec3::Z,
Vec3::ZERO
])
.unwrap(),
Aabb::from_min_max(Vec3::new(-1.0, -5.0, 0.0), Vec3::new(2.0, 0.0, 1.0))
);
}
// A frustum with an offset for testing the [`Frustum::contains_aabb`] algorithm.
fn contains_aabb_test_frustum() -> Frustum {
let proj = PerspectiveProjection {
fov: 90.0_f32.to_radians(),
aspect_ratio: 1.0,
near: 1.0,
far: 100.0,
..PerspectiveProjection::default()
};
proj.compute_frustum(&GlobalTransform::from_translation(Vec3::new(2.0, 2.0, 0.0)))
}
fn contains_aabb_test_frustum_with_rotation() -> Frustum {
let half_extent_world = (((49.5 * 49.5) * 0.5) as f32).sqrt() + 0.5f32.sqrt();
let near = 50.5 - half_extent_world;
let far = near + 2.0 * half_extent_world;
let fov = 2.0 * ops::atan(half_extent_world / near);
let proj = PerspectiveProjection {
aspect_ratio: 1.0,
near,
far,
fov,
..PerspectiveProjection::default()
};
proj.compute_frustum(&GlobalTransform::IDENTITY)
}
#[test]
fn aabb_inside_frustum() {
let frustum = contains_aabb_test_frustum();
let aabb = Aabb {
center: Vec3A::ZERO,
half_extents: Vec3A::new(0.99, 0.99, 49.49),
};
let model = Affine3A::from_translation(Vec3::new(2.0, 2.0, -50.5));
assert!(frustum.contains_aabb(&aabb, &model));
}
#[test]
fn aabb_intersect_frustum() {
let frustum = contains_aabb_test_frustum();
let aabb = Aabb {
center: Vec3A::ZERO,
half_extents: Vec3A::new(0.99, 0.99, 49.6),
};
let model = Affine3A::from_translation(Vec3::new(2.0, 2.0, -50.5));
assert!(!frustum.contains_aabb(&aabb, &model));
}
#[test]
fn aabb_outside_frustum() {
let frustum = contains_aabb_test_frustum();
let aabb = Aabb {
center: Vec3A::ZERO,
half_extents: Vec3A::new(0.99, 0.99, 0.99),
};
let model = Affine3A::from_translation(Vec3::new(0.0, 0.0, 49.6));
assert!(!frustum.contains_aabb(&aabb, &model));
}
#[test]
fn aabb_inside_frustum_rotation() {
let frustum = contains_aabb_test_frustum_with_rotation();
let aabb = Aabb {
center: Vec3A::new(0.0, 0.0, 0.0),
half_extents: Vec3A::new(0.99, 0.99, 49.49),
};
let model = Affine3A::from_rotation_translation(
Quat::from_rotation_x(PI / 4.0),
Vec3::new(0.0, 0.0, -50.5),
);
assert!(frustum.contains_aabb(&aabb, &model));
}
#[test]
fn aabb_intersect_frustum_rotation() {
let frustum = contains_aabb_test_frustum_with_rotation();
let aabb = Aabb {
center: Vec3A::new(0.0, 0.0, 0.0),
half_extents: Vec3A::new(0.99, 0.99, 49.6),
};
let model = Affine3A::from_rotation_translation(
Quat::from_rotation_x(PI / 4.0),
Vec3::new(0.0, 0.0, -50.5),
);
assert!(!frustum.contains_aabb(&aabb, &model));
}
#[test]
fn test_identity_optimized_equivalence() {
let cases = vec![
(
Aabb {
center: Vec3A::ZERO,
half_extents: Vec3A::splat(1.0),
},
HalfSpace::new(Vec4::new(1.0, 0.0, 0.0, -0.5)),
),
(
Aabb {
center: Vec3A::new(2.0, -1.0, 0.5),
half_extents: Vec3A::new(1.0, 2.0, 0.5),
},
HalfSpace::new(Vec4::new(1.0, 1.0, 1.0, -1.0).normalize()),
),
(
Aabb {
center: Vec3A::new(1.0, 1.0, 1.0),
half_extents: Vec3A::ZERO,
},
HalfSpace::new(Vec4::new(0.0, 0.0, 1.0, -2.0)),
),
];
for (aabb, half_space) in cases {
let general = aabb.is_in_half_space(&half_space, &Affine3A::IDENTITY);
let identity = aabb.is_in_half_space_identity(&half_space);
assert_eq!(general, identity,);
}
}
#[test]
fn intersects_obb_identity_matches_standard_true_true() {
let frusta = [frustum(), long_frustum(), big_frustum()];
let aabbs = [
Aabb {
center: Vec3A::ZERO,
half_extents: Vec3A::new(0.5, 0.5, 0.5),
},
Aabb {
center: Vec3A::new(1.0, 0.0, 0.5),
half_extents: Vec3A::new(0.9, 0.9, 0.9),
},
Aabb {
center: Vec3A::new(100.0, 100.0, 100.0),
half_extents: Vec3A::new(1.0, 1.0, 1.0),
},
];
for fr in &frusta {
for aabb in &aabbs {
let standard = fr.intersects_obb(aabb, &Affine3A::IDENTITY, true, true);
let optimized = fr.intersects_obb_identity(aabb);
assert_eq!(standard, optimized);
}
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_camera/src/projection.rs | crates/bevy_camera/src/projection.rs | use core::fmt::Debug;
use core::ops::{Deref, DerefMut};
use crate::{primitives::Frustum, visibility::VisibilitySystems};
use bevy_app::{App, Plugin, PostUpdate};
use bevy_ecs::prelude::*;
use bevy_math::{ops, vec4, AspectRatio, Mat4, Rect, Vec2, Vec3A, Vec4};
use bevy_reflect::{std_traits::ReflectDefault, Reflect, ReflectDeserialize, ReflectSerialize};
use bevy_transform::{components::GlobalTransform, TransformSystems};
use derive_more::derive::From;
use serde::{Deserialize, Serialize};
/// Adds [`Camera`](crate::camera::Camera) driver systems for a given projection type.
///
/// If you are using `bevy_pbr`, then you need to add `PbrProjectionPlugin` along with this.
#[derive(Default)]
pub struct CameraProjectionPlugin;
impl Plugin for CameraProjectionPlugin {
fn build(&self, app: &mut App) {
app.add_systems(
PostUpdate,
crate::visibility::update_frusta
.in_set(VisibilitySystems::UpdateFrusta)
.after(TransformSystems::Propagate),
);
}
}
/// Describes a type that can generate a projection matrix, allowing it to be added to a
/// [`Camera`]'s [`Projection`] component.
///
/// Once implemented, the projection can be added to a camera using [`Projection::custom`].
///
/// The projection will be automatically updated as the render area is resized. This is useful when,
/// for example, a projection type has a field like `fov` that should change when the window width
/// is changed but not when the height changes.
///
/// This trait is implemented by bevy's built-in projections [`PerspectiveProjection`] and
/// [`OrthographicProjection`].
///
/// [`Camera`]: crate::camera::Camera
pub trait CameraProjection {
/// Generate the projection matrix.
fn get_clip_from_view(&self) -> Mat4;
/// Generate the projection matrix for a [`SubCameraView`](super::SubCameraView).
fn get_clip_from_view_for_sub(&self, sub_view: &super::SubCameraView) -> Mat4;
/// When the area this camera renders to changes dimensions, this method will be automatically
/// called. Use this to update any projection properties that depend on the aspect ratio or
/// dimensions of the render area.
fn update(&mut self, width: f32, height: f32);
/// The far plane distance of the projection.
fn far(&self) -> f32;
/// The eight corners of the camera frustum, as defined by this projection.
///
/// The corners should be provided in the following order: first the bottom right, top right,
/// top left, bottom left for the near plane, then similar for the far plane.
// TODO: This seems somewhat redundant with `compute_frustum`, and similarly should be possible
// to compute with a default impl.
fn get_frustum_corners(&self, z_near: f32, z_far: f32) -> [Vec3A; 8];
/// Compute camera frustum for camera with given projection and transform.
///
/// This code is called by [`update_frusta`](crate::visibility::update_frusta) system
/// for each camera to update its frustum.
fn compute_frustum(&self, camera_transform: &GlobalTransform) -> Frustum {
let clip_from_world = self.get_clip_from_view() * camera_transform.affine().inverse();
Frustum::from_clip_from_world_custom_far(
&clip_from_world,
&camera_transform.translation(),
&camera_transform.back(),
self.far(),
)
}
}
mod sealed {
use super::CameraProjection;
/// A wrapper trait to make it possible to implement Clone for boxed [`CameraProjection`](`super::CameraProjection`)
/// trait objects, without breaking object safety rules by making it `Sized`. Additional bounds
/// are included for downcasting, and fulfilling the trait bounds on `Projection`.
pub trait DynCameraProjection:
CameraProjection + core::fmt::Debug + Send + Sync + downcast_rs::Downcast
{
fn clone_box(&self) -> Box<dyn DynCameraProjection>;
}
downcast_rs::impl_downcast!(DynCameraProjection);
impl<T> DynCameraProjection for T
where
T: 'static + CameraProjection + core::fmt::Debug + Send + Sync + Clone,
{
fn clone_box(&self) -> Box<dyn DynCameraProjection> {
Box::new(self.clone())
}
}
}
/// Holds a dynamic [`CameraProjection`] trait object. Use [`Projection::custom()`] to construct a
/// custom projection.
///
/// The contained dynamic object can be downcast into a static type using [`CustomProjection::get`].
#[derive(Debug, Reflect)]
#[reflect(Default, Clone)]
pub struct CustomProjection {
#[reflect(ignore)]
dyn_projection: Box<dyn sealed::DynCameraProjection>,
}
impl Default for CustomProjection {
fn default() -> Self {
Self {
dyn_projection: Box::new(PerspectiveProjection::default()),
}
}
}
impl Clone for CustomProjection {
fn clone(&self) -> Self {
Self {
dyn_projection: self.dyn_projection.clone_box(),
}
}
}
impl CustomProjection {
/// Returns a reference to the [`CameraProjection`] `P`.
///
/// Returns `None` if this dynamic object is not a projection of type `P`.
///
/// ```
/// # use bevy_camera::{Projection, PerspectiveProjection};
/// // For simplicity's sake, use perspective as a custom projection:
/// let projection = Projection::custom(PerspectiveProjection::default());
/// let Projection::Custom(custom) = projection else { return };
///
/// // At this point the projection type is erased.
/// // We can use `get()` if we know what kind of projection we have.
/// let perspective = custom.get::<PerspectiveProjection>().unwrap();
///
/// assert_eq!(perspective.fov, PerspectiveProjection::default().fov);
/// ```
pub fn get<P>(&self) -> Option<&P>
where
P: CameraProjection + Debug + Send + Sync + Clone + 'static,
{
self.dyn_projection.downcast_ref()
}
/// Returns a mutable reference to the [`CameraProjection`] `P`.
///
/// Returns `None` if this dynamic object is not a projection of type `P`.
///
/// ```
/// # use bevy_camera::{Projection, PerspectiveProjection};
/// // For simplicity's sake, use perspective as a custom projection:
/// let mut projection = Projection::custom(PerspectiveProjection::default());
/// let Projection::Custom(mut custom) = projection else { return };
///
/// // At this point the projection type is erased.
/// // We can use `get_mut()` if we know what kind of projection we have.
/// let perspective = custom.get_mut::<PerspectiveProjection>().unwrap();
///
/// assert_eq!(perspective.fov, PerspectiveProjection::default().fov);
/// perspective.fov = 1.0;
/// ```
pub fn get_mut<P>(&mut self) -> Option<&mut P>
where
P: CameraProjection + Debug + Send + Sync + Clone + 'static,
{
self.dyn_projection.downcast_mut()
}
}
impl Deref for CustomProjection {
type Target = dyn CameraProjection;
fn deref(&self) -> &Self::Target {
self.dyn_projection.as_ref()
}
}
impl DerefMut for CustomProjection {
fn deref_mut(&mut self) -> &mut Self::Target {
self.dyn_projection.as_mut()
}
}
/// Component that defines how to compute a [`Camera`]'s projection matrix.
///
/// Common projections, like perspective and orthographic, are provided out of the box to handle the
/// majority of use cases. Custom projections can be added using the [`CameraProjection`] trait and
/// the [`Projection::custom`] constructor.
///
/// ## What's a projection?
///
/// A camera projection essentially describes how 3d points from the point of view of a camera are
/// projected onto a 2d screen. This is where properties like a camera's field of view are defined.
/// More specifically, a projection is a 4x4 matrix that transforms points from view space (the
/// point of view of the camera) into clip space. Clip space is almost, but not quite, equivalent to
/// the rectangle that is rendered to your screen, with a depth axis. Any points that land outside
/// the bounds of this cuboid are "clipped" and not rendered.
///
/// You can also think of the projection as the thing that describes the shape of a camera's
/// frustum: the volume in 3d space that is visible to a camera.
///
/// [`Camera`]: crate::camera::Camera
#[derive(Component, Debug, Clone, Reflect, From)]
#[reflect(Component, Default, Debug, Clone)]
pub enum Projection {
Perspective(PerspectiveProjection),
Orthographic(OrthographicProjection),
Custom(CustomProjection),
}
impl Projection {
/// Construct a new custom camera projection from a type that implements [`CameraProjection`].
pub fn custom<P>(projection: P) -> Self
where
// Implementation note: pushing these trait bounds all the way out to this function makes
// errors nice for users. If a trait is missing, they will get a helpful error telling them
// that, say, the `Debug` implementation is missing. Wrapping these traits behind a super
// trait or some other indirection will make the errors harder to understand.
//
// For example, we don't use the `DynCameraProjection` trait bound, because it is not the
// trait the user should be implementing - they only need to worry about implementing
// `CameraProjection`.
P: CameraProjection + Debug + Send + Sync + Clone + 'static,
{
Projection::Custom(CustomProjection {
dyn_projection: Box::new(projection),
})
}
/// Check if the projection is perspective.
/// For [`CustomProjection`], this checks if the projection matrix's w-axis's w is 0.0.
pub fn is_perspective(&self) -> bool {
match self {
Projection::Perspective(_) => true,
Projection::Orthographic(_) => false,
Projection::Custom(projection) => projection.get_clip_from_view().w_axis.w == 0.0,
}
}
}
impl Deref for Projection {
type Target = dyn CameraProjection;
fn deref(&self) -> &Self::Target {
match self {
Projection::Perspective(projection) => projection,
Projection::Orthographic(projection) => projection,
Projection::Custom(projection) => projection.deref(),
}
}
}
impl DerefMut for Projection {
fn deref_mut(&mut self) -> &mut Self::Target {
match self {
Projection::Perspective(projection) => projection,
Projection::Orthographic(projection) => projection,
Projection::Custom(projection) => projection.deref_mut(),
}
}
}
impl Default for Projection {
fn default() -> Self {
Projection::Perspective(Default::default())
}
}
/// A 3D camera projection in which distant objects appear smaller than close objects.
#[derive(Debug, Clone, Reflect)]
#[reflect(Default, Debug, Clone)]
pub struct PerspectiveProjection {
/// The vertical field of view (FOV) in radians.
///
/// Defaults to a value of π/4 radians or 45 degrees.
pub fov: f32,
/// The aspect ratio (width divided by height) of the viewing frustum.
///
/// Bevy's `camera_system` automatically updates this value when the aspect ratio
/// of the associated window changes.
///
/// Defaults to a value of `1.0`.
pub aspect_ratio: f32,
/// The distance from the camera in world units of the viewing frustum's near plane.
///
/// Objects closer to the camera than this value will not be visible.
///
/// Defaults to a value of `0.1`.
pub near: f32,
/// The distance from the camera in world units of the viewing frustum's far plane.
///
/// Objects farther from the camera than this value will not be visible.
///
/// Defaults to a value of `1000.0`.
pub far: f32,
/// The orientation of a custom clipping plane, as well as its distance from
/// the camera.
///
/// If you supply a plane here, anything in front of the plane will be
/// clipped out. This is useful for portals and mirrors, in order to clip
/// any geometry that would pass through the plane of the portal or mirror.
///
/// The X, Y, and Z components of the vector describe its normal, in view
/// space. This normal vector must have length 1, and it should point away
/// from the camera. (That is, only geometry on the side of the plane that
/// the normal points toward will be rendered.) The W component of the
/// vector must be the *negative shortest signed distance* from the camera
/// to the plane, again in view space. This final component can also be
/// computed as -(N · Q), where N is the normal of the plane and Q is any
/// point on it.
///
/// By default, this is (0, 0, -1, -[`Self::near`]), which describes a near
/// plane located [`Self::near`] meters away pointing directly away from the
/// camera.
///
/// See the `calculate_mirror_camera_transform_and_projection` function in
/// the `mirror` example for an exhaustive example of usage.
pub near_clip_plane: Vec4,
}
impl CameraProjection for PerspectiveProjection {
fn get_clip_from_view(&self) -> Mat4 {
let mut matrix =
Mat4::perspective_infinite_reverse_rh(self.fov, self.aspect_ratio, self.near);
self.adjust_perspective_matrix_for_clip_plane(&mut matrix);
matrix
}
fn get_clip_from_view_for_sub(&self, sub_view: &super::SubCameraView) -> Mat4 {
let full_width = sub_view.full_size.x as f32;
let full_height = sub_view.full_size.y as f32;
let sub_width = sub_view.size.x as f32;
let sub_height = sub_view.size.y as f32;
let offset_x = sub_view.offset.x;
// Y-axis increases from top to bottom
let offset_y = full_height - (sub_view.offset.y + sub_height);
let full_aspect = full_width / full_height;
// Original frustum parameters
let top = self.near * ops::tan(0.5 * self.fov);
let bottom = -top;
let right = top * full_aspect;
let left = -right;
// Calculate scaling factors
let width = right - left;
let height = top - bottom;
// Calculate the new frustum parameters
let left_prime = left + (width * offset_x) / full_width;
let right_prime = left + (width * (offset_x + sub_width)) / full_width;
let bottom_prime = bottom + (height * offset_y) / full_height;
let top_prime = bottom + (height * (offset_y + sub_height)) / full_height;
// Compute the new projection matrix
let x = (2.0 * self.near) / (right_prime - left_prime);
let y = (2.0 * self.near) / (top_prime - bottom_prime);
let a = (right_prime + left_prime) / (right_prime - left_prime);
let b = (top_prime + bottom_prime) / (top_prime - bottom_prime);
let mut matrix = Mat4::from_cols(
Vec4::new(x, 0.0, 0.0, 0.0),
Vec4::new(0.0, y, 0.0, 0.0),
Vec4::new(a, b, 0.0, -1.0),
Vec4::new(0.0, 0.0, self.near, 0.0),
);
self.adjust_perspective_matrix_for_clip_plane(&mut matrix);
matrix
}
fn update(&mut self, width: f32, height: f32) {
self.aspect_ratio = AspectRatio::try_new(width, height)
.expect("Failed to update PerspectiveProjection: width and height must be positive, non-zero values")
.ratio();
}
fn far(&self) -> f32 {
self.far
}
fn get_frustum_corners(&self, z_near: f32, z_far: f32) -> [Vec3A; 8] {
let tan_half_fov = ops::tan(self.fov / 2.);
let a = z_near.abs() * tan_half_fov;
let b = z_far.abs() * tan_half_fov;
let aspect_ratio = self.aspect_ratio;
// NOTE: These vertices are in the specific order required by [`calculate_cascade`].
[
Vec3A::new(a * aspect_ratio, -a, z_near), // bottom right
Vec3A::new(a * aspect_ratio, a, z_near), // top right
Vec3A::new(-a * aspect_ratio, a, z_near), // top left
Vec3A::new(-a * aspect_ratio, -a, z_near), // bottom left
Vec3A::new(b * aspect_ratio, -b, z_far), // bottom right
Vec3A::new(b * aspect_ratio, b, z_far), // top right
Vec3A::new(-b * aspect_ratio, b, z_far), // top left
Vec3A::new(-b * aspect_ratio, -b, z_far), // bottom left
]
}
}
impl Default for PerspectiveProjection {
fn default() -> Self {
PerspectiveProjection {
fov: core::f32::consts::PI / 4.0,
near: 0.1,
far: 1000.0,
aspect_ratio: 1.0,
near_clip_plane: vec4(0.0, 0.0, -1.0, -0.1),
}
}
}
impl PerspectiveProjection {
/// Adjusts the perspective matrix for an oblique clip plane if necessary.
///
/// This changes the near and (infinite) far planes so that they correctly
/// clip everything in front of the [`Self::near_clip_plane`]. See [Lengyel
/// 2005] for an exhaustive treatment of the way this works. Custom near
/// clip planes are typically used for portals and mirrors; see
/// `examples/3d/mirror.rs` for an example of usage.
fn adjust_perspective_matrix_for_clip_plane(&self, matrix: &mut Mat4) {
// If we don't have an oblique clip plane, save ourselves the trouble.
if self.near_clip_plane == vec4(0.0, 0.0, -1.0, -self.near) {
return;
}
// To understand this, refer to [Lengyel 2005]. The notation follows the
// paper. The formulas are different because the paper uses a standard
// OpenGL convention (near -1, far 1), while we use a reversed Vulkan
// convention (near 1, far 0).
//
// [Lengyel 2005]: https://terathon.com/lengyel/Lengyel-Oblique.pdf
let c = self.near_clip_plane;
// First, calculate the position of Q′, the corner in clip space lying
// opposite from the near clip plane. This is identical to equation (7).
// Note that this is a point at infinity in view space, because we use
// an infinite far plane, but in clip space it's finite.
let q_prime = vec4(c.x.signum(), c.y.signum(), 0.0, 1.0);
// Now convert that point to view space. This *will* be a point at
// infinity, but that's OK because we're in homogeneous coordinates.
let q = matrix.inverse() * q_prime;
// Here we're computing the scaling factor to apply to the near plane so
// that the far plane will intersect Q. This one differs from the paper.
// Using the notation Mᵢ to mean the *i*th row of the matrix M, start by
// observing that the near plane (z = 1) is described by M₄ - M₃ and the
// far plane (z = 0) is described by simply M₃. So:
//
// * Equation (4) becomes C = M₄ - M₃.
// * Equation (5) becomes M′₃ = M₄ - C.
// * Equation (6) becomes F = M′₃ = M₄ - C.
// * Equation (8) becomes F = M₄ - aC.
// * Equation (9) becomes F · Q = 0 ⇒ (M₄ - aC) · Q = 0.
//
// And, solving the modified equation (9), we get:
//
// M₄ · Q
// a = ⎯⎯⎯⎯⎯⎯
// C · Q
//
// Because M₄ = (0, 0, -1, 0) (just as it is in the paper), this reduces to:
//
// -Qz
// a = ⎯⎯⎯⎯⎯
// C · Q
//
// Which is what we calculate here.
let a = -q.z / c.dot(q);
// Finally, we have the revised equation (10), which is M′₃ = M₄ - aC.
// Similarly to the above, this simplifies to M′₃ = (0, 0, -1, 0) - aC.
let m3_prime = Vec4::NEG_Z - c * a;
// We have the replacement third row; write it in.
matrix.x_axis.z = m3_prime.x;
matrix.y_axis.z = m3_prime.y;
matrix.z_axis.z = m3_prime.z;
matrix.w_axis.z = m3_prime.w;
}
}
/// Scaling mode for [`OrthographicProjection`].
///
/// The effect of these scaling modes are combined with the [`OrthographicProjection::scale`] property.
///
/// For example, if the scaling mode is `ScalingMode::Fixed { width: 100.0, height: 300 }` and the scale is `2.0`,
/// the projection will be 200 world units wide and 600 world units tall.
///
/// # Examples
///
/// Configure the orthographic projection to two world units per window height:
///
/// ```
/// # use bevy_camera::{OrthographicProjection, Projection, ScalingMode};
/// let projection = Projection::Orthographic(OrthographicProjection {
/// scaling_mode: ScalingMode::FixedVertical { viewport_height: 2.0 },
/// ..OrthographicProjection::default_2d()
/// });
/// ```
#[derive(Default, Debug, Clone, Copy, Reflect, Serialize, Deserialize)]
#[reflect(Serialize, Deserialize, Default, Clone)]
pub enum ScalingMode {
/// Match the viewport size.
///
/// With a scale of 1, lengths in world units will map 1:1 with the number of pixels used to render it.
/// For example, if we have a 64x64 sprite with a [`Transform::scale`](bevy_transform::prelude::Transform) of 1.0,
/// no custom size and no inherited scale, the sprite will be 64 world units wide and 64 world units tall.
/// When rendered with [`OrthographicProjection::scaling_mode`] set to `WindowSize` when the window scale factor is 1
/// the sprite will be rendered at 64 pixels wide and 64 pixels tall.
///
/// Changing any of these properties will multiplicatively affect the final size.
#[default]
WindowSize,
/// Manually specify the projection's size, ignoring window resizing. The image will stretch.
///
/// Arguments describe the area of the world that is shown (in world units).
Fixed { width: f32, height: f32 },
/// Keeping the aspect ratio while the axes can't be smaller than given minimum.
///
/// Arguments are in world units.
AutoMin { min_width: f32, min_height: f32 },
/// Keeping the aspect ratio while the axes can't be bigger than given maximum.
///
/// Arguments are in world units.
AutoMax { max_width: f32, max_height: f32 },
/// Keep the projection's height constant; width will be adjusted to match aspect ratio.
///
/// The argument is the desired height of the projection in world units.
FixedVertical { viewport_height: f32 },
/// Keep the projection's width constant; height will be adjusted to match aspect ratio.
///
/// The argument is the desired width of the projection in world units.
FixedHorizontal { viewport_width: f32 },
}
/// Project a 3D space onto a 2D surface using parallel lines, i.e., unlike [`PerspectiveProjection`],
/// the size of objects remains the same regardless of their distance to the camera.
///
/// The volume contained in the projection is called the *view frustum*. Since the viewport is rectangular
/// and projection lines are parallel, the view frustum takes the shape of a cuboid.
///
/// Note that the scale of the projection and the apparent size of objects are inversely proportional.
/// As the size of the projection increases, the size of objects decreases.
///
/// # Examples
///
/// Configure the orthographic projection to one world unit per 100 window pixels:
///
/// ```
/// # use bevy_camera::{OrthographicProjection, Projection, ScalingMode};
/// let projection = Projection::Orthographic(OrthographicProjection {
/// scaling_mode: ScalingMode::WindowSize,
/// scale: 0.01,
/// ..OrthographicProjection::default_2d()
/// });
/// ```
#[derive(Debug, Clone, Reflect)]
#[reflect(Debug, FromWorld, Clone)]
pub struct OrthographicProjection {
/// The distance of the near clipping plane in world units.
///
/// Objects closer than this will not be rendered.
///
/// Defaults to `0.0`
pub near: f32,
/// The distance of the far clipping plane in world units.
///
/// Objects further than this will not be rendered.
///
/// Defaults to `1000.0`
pub far: f32,
/// Specifies the origin of the viewport as a normalized position from 0 to 1, where (0, 0) is the bottom left
/// and (1, 1) is the top right. This determines where the camera's position sits inside the viewport.
///
/// When the projection scales due to viewport resizing, the position of the camera, and thereby `viewport_origin`,
/// remains at the same relative point.
///
/// Consequently, this is pivot point when scaling. With a bottom left pivot, the projection will expand
/// upwards and to the right. With a top right pivot, the projection will expand downwards and to the left.
/// Values in between will caused the projection to scale proportionally on each axis.
///
/// Defaults to `(0.5, 0.5)`, which makes scaling affect opposite sides equally, keeping the center
/// point of the viewport centered.
pub viewport_origin: Vec2,
/// How the projection will scale to the viewport.
///
/// Defaults to [`ScalingMode::WindowSize`],
/// and works in concert with [`OrthographicProjection::scale`] to determine the final effect.
///
/// For simplicity, zooming should be done by changing [`OrthographicProjection::scale`],
/// rather than changing the parameters of the scaling mode.
pub scaling_mode: ScalingMode,
/// Scales the projection.
///
/// As scale increases, the apparent size of objects decreases, and vice versa.
///
/// Note: scaling can be set by [`scaling_mode`](Self::scaling_mode) as well.
/// This parameter scales on top of that.
///
/// This property is particularly useful in implementing zoom functionality.
///
/// Defaults to `1.0`, which under standard settings corresponds to a 1:1 mapping of world units to rendered pixels.
/// See [`ScalingMode::WindowSize`] for more information.
pub scale: f32,
/// The area that the projection covers relative to `viewport_origin`.
///
/// Bevy's `camera_system` automatically
/// updates this value when the viewport is resized depending on `OrthographicProjection`'s other fields.
/// In this case, `area` should not be manually modified.
///
/// It may be necessary to set this manually for shadow projections and such.
pub area: Rect,
}
impl CameraProjection for OrthographicProjection {
fn get_clip_from_view(&self) -> Mat4 {
Mat4::orthographic_rh(
self.area.min.x,
self.area.max.x,
self.area.min.y,
self.area.max.y,
// NOTE: near and far are swapped to invert the depth range from [0,1] to [1,0]
// This is for interoperability with pipelines using infinite reverse perspective projections.
self.far,
self.near,
)
}
fn get_clip_from_view_for_sub(&self, sub_view: &super::SubCameraView) -> Mat4 {
let full_width = sub_view.full_size.x as f32;
let full_height = sub_view.full_size.y as f32;
let offset_x = sub_view.offset.x;
let offset_y = sub_view.offset.y;
let sub_width = sub_view.size.x as f32;
let sub_height = sub_view.size.y as f32;
let full_aspect = full_width / full_height;
// Base the vertical size on self.area and adjust the horizontal size
let top = self.area.max.y;
let bottom = self.area.min.y;
let ortho_height = top - bottom;
let ortho_width = ortho_height * full_aspect;
// Center the orthographic area horizontally
let center_x = (self.area.max.x + self.area.min.x) / 2.0;
let left = center_x - ortho_width / 2.0;
let right = center_x + ortho_width / 2.0;
// Calculate scaling factors
let scale_w = (right - left) / full_width;
let scale_h = (top - bottom) / full_height;
// Calculate the new orthographic bounds
let left_prime = left + scale_w * offset_x;
let right_prime = left_prime + scale_w * sub_width;
let top_prime = top - scale_h * offset_y;
let bottom_prime = top_prime - scale_h * sub_height;
Mat4::orthographic_rh(
left_prime,
right_prime,
bottom_prime,
top_prime,
// NOTE: near and far are swapped to invert the depth range from [0,1] to [1,0]
// This is for interoperability with pipelines using infinite reverse perspective projections.
self.far,
self.near,
)
}
fn update(&mut self, width: f32, height: f32) {
let (projection_width, projection_height) = match self.scaling_mode {
ScalingMode::WindowSize => (width, height),
ScalingMode::AutoMin {
min_width,
min_height,
} => {
// Compare Pixels of current width and minimal height and Pixels of minimal width with current height.
// Then use bigger (min_height when true) as what it refers to (height when true) and calculate rest so it can't get under minimum.
if width * min_height > min_width * height {
(width * min_height / height, min_height)
} else {
(min_width, height * min_width / width)
}
}
ScalingMode::AutoMax {
max_width,
max_height,
} => {
// Compare Pixels of current width and maximal height and Pixels of maximal width with current height.
// Then use smaller (max_height when true) as what it refers to (height when true) and calculate rest so it can't get over maximum.
if width * max_height < max_width * height {
(width * max_height / height, max_height)
} else {
(max_width, height * max_width / width)
}
}
ScalingMode::FixedVertical { viewport_height } => {
(width * viewport_height / height, viewport_height)
}
ScalingMode::FixedHorizontal { viewport_width } => {
(viewport_width, height * viewport_width / width)
}
ScalingMode::Fixed { width, height } => (width, height),
};
let origin_x = projection_width * self.viewport_origin.x;
let origin_y = projection_height * self.viewport_origin.y;
self.area = Rect::new(
self.scale * -origin_x,
self.scale * -origin_y,
self.scale * (projection_width - origin_x),
self.scale * (projection_height - origin_y),
);
}
fn far(&self) -> f32 {
self.far
}
fn get_frustum_corners(&self, z_near: f32, z_far: f32) -> [Vec3A; 8] {
let area = self.area;
// NOTE: These vertices are in the specific order required by [`calculate_cascade`].
[
Vec3A::new(area.max.x, area.min.y, z_near), // bottom right
Vec3A::new(area.max.x, area.max.y, z_near), // top right
Vec3A::new(area.min.x, area.max.y, z_near), // top left
Vec3A::new(area.min.x, area.min.y, z_near), // bottom left
Vec3A::new(area.max.x, area.min.y, z_far), // bottom right
Vec3A::new(area.max.x, area.max.y, z_far), // top right
Vec3A::new(area.min.x, area.max.y, z_far), // top left
Vec3A::new(area.min.x, area.min.y, z_far), // bottom left
]
}
}
impl FromWorld for OrthographicProjection {
fn from_world(_world: &mut World) -> Self {
OrthographicProjection::default_3d()
}
}
impl OrthographicProjection {
/// Returns the default orthographic projection for a 2D context.
///
/// The near plane is set to a negative value so that the camera can still
/// render the scene when using positive z coordinates to order foreground elements.
pub fn default_2d() -> Self {
OrthographicProjection {
near: -1000.0,
..OrthographicProjection::default_3d()
}
}
/// Returns the default orthographic projection for a 3D context.
///
/// The near plane is set to 0.0 so that the camera doesn't render
/// objects that are behind it.
pub fn default_3d() -> Self {
OrthographicProjection {
scale: 1.0,
near: 0.0,
far: 1000.0,
viewport_origin: Vec2::new(0.5, 0.5),
scaling_mode: ScalingMode::WindowSize,
area: Rect::new(-1.0, -1.0, 1.0, 1.0),
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_camera/src/camera.rs | crates/bevy_camera/src/camera.rs | use crate::primitives::Frustum;
use super::{
visibility::{Visibility, VisibleEntities},
ClearColorConfig, MsaaWriteback,
};
use bevy_asset::Handle;
use bevy_derive::Deref;
use bevy_ecs::{component::Component, entity::Entity, reflect::ReflectComponent};
use bevy_image::Image;
use bevy_math::{ops, Dir3, FloatOrd, Mat4, Ray3d, Rect, URect, UVec2, Vec2, Vec3, Vec3A};
use bevy_reflect::prelude::*;
use bevy_transform::components::{GlobalTransform, Transform};
use bevy_window::{NormalizedWindowRef, WindowRef};
use core::ops::Range;
use derive_more::derive::From;
use thiserror::Error;
use wgpu_types::{BlendState, TextureUsages};
/// Render viewport configuration for the [`Camera`] component.
///
/// The viewport defines the area on the render target to which the camera renders its image.
/// You can overlay multiple cameras in a single window using viewports to create effects like
/// split screen, minimaps, and character viewers.
#[derive(Reflect, Debug, Clone)]
#[reflect(Default, Clone)]
pub struct Viewport {
/// The physical position to render this viewport to within the [`RenderTarget`] of this [`Camera`].
/// (0,0) corresponds to the top-left corner
pub physical_position: UVec2,
/// The physical size of the viewport rectangle to render to within the [`RenderTarget`] of this [`Camera`].
/// The origin of the rectangle is in the top-left corner.
pub physical_size: UVec2,
/// The minimum and maximum depth to render (on a scale from 0.0 to 1.0).
pub depth: Range<f32>,
}
impl Default for Viewport {
fn default() -> Self {
Self {
physical_position: Default::default(),
physical_size: UVec2::new(1, 1),
depth: 0.0..1.0,
}
}
}
impl Viewport {
/// Cut the viewport rectangle so that it lies inside a rectangle of the
/// given size.
///
/// If either of the viewport's position coordinates lies outside the given
/// dimensions, it will be moved just inside first. If either of the given
/// dimensions is zero, the position and size of the viewport rectangle will
/// both be set to zero in that dimension.
pub fn clamp_to_size(&mut self, size: UVec2) {
// If the origin of the viewport rect is outside, then adjust so that
// it's just barely inside. Then, cut off the part that is outside.
if self.physical_size.x + self.physical_position.x > size.x {
if self.physical_position.x < size.x {
self.physical_size.x = size.x - self.physical_position.x;
} else if size.x > 0 {
self.physical_position.x = size.x - 1;
self.physical_size.x = 1;
} else {
self.physical_position.x = 0;
self.physical_size.x = 0;
}
}
if self.physical_size.y + self.physical_position.y > size.y {
if self.physical_position.y < size.y {
self.physical_size.y = size.y - self.physical_position.y;
} else if size.y > 0 {
self.physical_position.y = size.y - 1;
self.physical_size.y = 1;
} else {
self.physical_position.y = 0;
self.physical_size.y = 0;
}
}
}
pub fn from_viewport_and_override(
viewport: Option<&Self>,
main_pass_resolution_override: Option<&MainPassResolutionOverride>,
) -> Option<Self> {
if let Some(override_size) = main_pass_resolution_override {
let mut vp = viewport.map_or_else(Self::default, Self::clone);
vp.physical_size = **override_size;
Some(vp)
} else {
viewport.cloned()
}
}
}
/// Override the resolution a 3d camera's main pass is rendered at.
///
/// Does not affect post processing.
///
/// ## Usage
///
/// * Insert this component on a 3d camera entity in the render world.
/// * The resolution override must be smaller than the camera's viewport size.
/// * The resolution override is specified in physical pixels.
/// * In shaders, use `View::main_pass_viewport` instead of `View::viewport`.
#[derive(Component, Reflect, Deref, Debug)]
#[reflect(Component)]
pub struct MainPassResolutionOverride(pub UVec2);
/// Settings to define a camera sub view.
///
/// When [`Camera::sub_camera_view`] is `Some`, only the sub-section of the
/// image defined by `size` and `offset` (relative to the `full_size` of the
/// whole image) is projected to the cameras viewport.
///
/// Take the example of the following multi-monitor setup:
/// ```css
/// ┌───┬───┐
/// │ A │ B │
/// ├───┼───┤
/// │ C │ D │
/// └───┴───┘
/// ```
/// If each monitor is 1920x1080, the whole image will have a resolution of
/// 3840x2160. For each monitor we can use a single camera with a viewport of
/// the same size as the monitor it corresponds to. To ensure that the image is
/// cohesive, we can use a different sub view on each camera:
/// - Camera A: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,0
/// - Camera B: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 1920,0
/// - Camera C: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,1080
/// - Camera D: `full_size` = 3840x2160, `size` = 1920x1080, `offset` =
/// 1920,1080
///
/// However since only the ratio between the values is important, they could all
/// be divided by 120 and still produce the same image. Camera D would for
/// example have the following values:
/// `full_size` = 32x18, `size` = 16x9, `offset` = 16,9
#[derive(Debug, Clone, Copy, Reflect, PartialEq)]
#[reflect(Clone, PartialEq, Default)]
pub struct SubCameraView {
/// Size of the entire camera view
pub full_size: UVec2,
/// Offset of the sub camera
pub offset: Vec2,
/// Size of the sub camera
pub size: UVec2,
}
impl Default for SubCameraView {
fn default() -> Self {
Self {
full_size: UVec2::new(1, 1),
offset: Vec2::new(0., 0.),
size: UVec2::new(1, 1),
}
}
}
/// Information about the current [`RenderTarget`].
#[derive(Debug, Clone)]
pub struct RenderTargetInfo {
/// The physical size of this render target (in physical pixels, ignoring scale factor).
pub physical_size: UVec2,
/// The scale factor of this render target.
///
/// When rendering to a window, typically it is a value greater or equal than 1.0,
/// representing the ratio between the size of the window in physical pixels and the logical size of the window.
pub scale_factor: f32,
}
impl Default for RenderTargetInfo {
fn default() -> Self {
Self {
physical_size: Default::default(),
scale_factor: 1.,
}
}
}
/// Holds internally computed [`Camera`] values.
#[derive(Default, Debug, Clone)]
pub struct ComputedCameraValues {
pub clip_from_view: Mat4,
pub target_info: Option<RenderTargetInfo>,
// size of the `Viewport`
pub old_viewport_size: Option<UVec2>,
pub old_sub_camera_view: Option<SubCameraView>,
}
/// How much energy a [`Camera3d`](crate::Camera3d) absorbs from incoming light.
///
/// <https://en.wikipedia.org/wiki/Exposure_(photography)>
#[derive(Component, Clone, Copy, Reflect)]
#[reflect(opaque)]
#[reflect(Component, Default, Clone)]
pub struct Exposure {
/// <https://en.wikipedia.org/wiki/Exposure_value#Tabulated_exposure_values>
pub ev100: f32,
}
impl Exposure {
pub const SUNLIGHT: Self = Self {
ev100: Self::EV100_SUNLIGHT,
};
pub const OVERCAST: Self = Self {
ev100: Self::EV100_OVERCAST,
};
pub const INDOOR: Self = Self {
ev100: Self::EV100_INDOOR,
};
/// This value was calibrated to match Blender's implicit/default exposure as closely as possible.
/// It also happens to be a reasonable default.
///
/// See <https://github.com/bevyengine/bevy/issues/11577> for details.
pub const BLENDER: Self = Self {
ev100: Self::EV100_BLENDER,
};
pub const EV100_SUNLIGHT: f32 = 15.0;
pub const EV100_OVERCAST: f32 = 12.0;
pub const EV100_INDOOR: f32 = 7.0;
/// This value was calibrated to match Blender's implicit/default exposure as closely as possible.
/// It also happens to be a reasonable default.
///
/// See <https://github.com/bevyengine/bevy/issues/11577> for details.
pub const EV100_BLENDER: f32 = 9.7;
pub fn from_physical_camera(physical_camera_parameters: PhysicalCameraParameters) -> Self {
Self {
ev100: physical_camera_parameters.ev100(),
}
}
/// Converts EV100 values to exposure values.
/// <https://google.github.io/filament/Filament.md.html#imagingpipeline/physicallybasedcamera/exposure>
#[inline]
pub fn exposure(&self) -> f32 {
ops::exp2(-self.ev100) / 1.2
}
}
impl Default for Exposure {
fn default() -> Self {
Self::BLENDER
}
}
/// Parameters based on physical camera characteristics for calculating EV100
/// values for use with [`Exposure`]. This is also used for depth of field.
#[derive(Clone, Copy)]
pub struct PhysicalCameraParameters {
/// <https://en.wikipedia.org/wiki/F-number>
pub aperture_f_stops: f32,
/// <https://en.wikipedia.org/wiki/Shutter_speed>
pub shutter_speed_s: f32,
/// <https://en.wikipedia.org/wiki/Film_speed>
pub sensitivity_iso: f32,
/// The height of the [image sensor format] in meters.
///
/// Focal length is derived from the FOV and this value. The default is
/// 18.66mm, matching the [Super 35] format, which is popular in cinema.
///
/// [image sensor format]: https://en.wikipedia.org/wiki/Image_sensor_format
///
/// [Super 35]: https://en.wikipedia.org/wiki/Super_35
pub sensor_height: f32,
}
impl PhysicalCameraParameters {
/// Calculate the [EV100](https://en.wikipedia.org/wiki/Exposure_value).
pub fn ev100(&self) -> f32 {
ops::log2(
self.aperture_f_stops * self.aperture_f_stops * 100.0
/ (self.shutter_speed_s * self.sensitivity_iso),
)
}
}
impl Default for PhysicalCameraParameters {
fn default() -> Self {
Self {
aperture_f_stops: 1.0,
shutter_speed_s: 1.0 / 125.0,
sensitivity_iso: 100.0,
sensor_height: 0.01866,
}
}
}
/// Error returned when a conversion between world-space and viewport-space coordinates fails.
///
/// See [`world_to_viewport`][Camera::world_to_viewport] and [`viewport_to_world`][Camera::viewport_to_world].
#[derive(Debug, Eq, PartialEq, Copy, Clone, Error)]
pub enum ViewportConversionError {
/// The pre-computed size of the viewport was not available.
///
/// This may be because the `Camera` was just created and `camera_system` has not been executed
/// yet, or because the [`RenderTarget`] is misconfigured in one of the following ways:
/// - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,
/// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,
/// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),
/// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).
#[error("pre-computed size of viewport not available")]
NoViewportSize,
/// The computed coordinate was beyond the `Camera`'s near plane.
///
/// Only applicable when converting from world-space to viewport-space.
#[error("computed coordinate beyond `Camera`'s near plane")]
PastNearPlane,
/// The computed coordinate was beyond the `Camera`'s far plane.
///
/// Only applicable when converting from world-space to viewport-space.
#[error("computed coordinate beyond `Camera`'s far plane")]
PastFarPlane,
/// The Normalized Device Coordinates could not be computed because the `camera_transform`, the
/// `world_position`, or the projection matrix defined by [`Projection`](super::projection::Projection)
/// contained `NAN` (see [`world_to_ndc`][Camera::world_to_ndc] and [`ndc_to_world`][Camera::ndc_to_world]).
#[error("found NaN while computing NDC")]
InvalidData,
}
/// The defining [`Component`] for camera entities,
/// storing information about how and what to render through this camera.
///
/// The [`Camera`] component is added to an entity to define the properties of the viewpoint from
/// which rendering occurs. It defines the position of the view to render, the projection method
/// to transform the 3D objects into a 2D image, as well as the render target into which that image
/// is produced.
///
/// Note that a [`Camera`] needs a `CameraRenderGraph` to render anything.
/// This is typically provided by adding a [`Camera2d`] or [`Camera3d`] component,
/// but custom render graphs can also be defined. Inserting a [`Camera`] with no render
/// graph will emit an error at runtime.
///
/// [`Camera2d`]: crate::Camera2d
/// [`Camera3d`]: crate::Camera3d
#[derive(Component, Debug, Reflect, Clone)]
#[reflect(Component, Default, Debug, Clone)]
#[require(
Frustum,
CameraMainTextureUsages,
VisibleEntities,
Transform,
Visibility,
RenderTarget
)]
pub struct Camera {
/// If set, this camera will render to the given [`Viewport`] rectangle within the configured [`RenderTarget`].
pub viewport: Option<Viewport>,
/// Cameras with a higher order are rendered later, and thus on top of lower order cameras.
pub order: isize,
/// If this is set to `true`, this camera will be rendered to its specified [`RenderTarget`]. If `false`, this
/// camera will not be rendered.
pub is_active: bool,
/// Computed values for this camera, such as the projection matrix and the render target size.
#[reflect(ignore, clone)]
pub computed: ComputedCameraValues,
// todo: reflect this when #6042 lands
/// The [`CameraOutputMode`] for this camera.
pub output_mode: CameraOutputMode,
/// Controls when MSAA writeback occurs for this camera.
/// See [`MsaaWriteback`] for available options.
pub msaa_writeback: MsaaWriteback,
/// The clear color operation to perform on the render target.
pub clear_color: ClearColorConfig,
/// Whether to switch culling mode so that materials that request backface
/// culling cull front faces, and vice versa.
///
/// This is typically used for cameras that mirror the world that they
/// render across a plane, because doing that flips the winding of each
/// polygon.
///
/// This setting doesn't affect materials that disable backface culling.
pub invert_culling: bool,
/// If set, this camera will be a sub camera of a large view, defined by a [`SubCameraView`].
pub sub_camera_view: Option<SubCameraView>,
}
impl Default for Camera {
fn default() -> Self {
Self {
is_active: true,
order: 0,
viewport: None,
computed: Default::default(),
output_mode: Default::default(),
msaa_writeback: MsaaWriteback::default(),
clear_color: Default::default(),
invert_culling: false,
sub_camera_view: None,
}
}
}
impl Camera {
/// Converts a physical size in this `Camera` to a logical size.
#[inline]
pub fn to_logical(&self, physical_size: UVec2) -> Option<Vec2> {
let scale = self.computed.target_info.as_ref()?.scale_factor;
Some(physical_size.as_vec2() / scale)
}
/// The rendered physical bounds [`URect`] of the camera. If the `viewport` field is
/// set to [`Some`], this will be the rect of that custom viewport. Otherwise it will default to
/// the full physical rect of the current [`RenderTarget`].
#[inline]
pub fn physical_viewport_rect(&self) -> Option<URect> {
let min = self
.viewport
.as_ref()
.map(|v| v.physical_position)
.unwrap_or(UVec2::ZERO);
let max = min + self.physical_viewport_size()?;
Some(URect { min, max })
}
/// The rendered logical bounds [`Rect`] of the camera. If the `viewport` field is set to
/// [`Some`], this will be the rect of that custom viewport. Otherwise it will default to the
/// full logical rect of the current [`RenderTarget`].
#[inline]
pub fn logical_viewport_rect(&self) -> Option<Rect> {
let URect { min, max } = self.physical_viewport_rect()?;
Some(Rect {
min: self.to_logical(min)?,
max: self.to_logical(max)?,
})
}
/// The logical size of this camera's viewport. If the `viewport` field is set to [`Some`], this
/// will be the size of that custom viewport. Otherwise it will default to the full logical size
/// of the current [`RenderTarget`].
/// For logic that requires the full logical size of the
/// [`RenderTarget`], prefer [`Camera::logical_target_size`].
///
/// Returns `None` if either:
/// - the function is called just after the `Camera` is created, before `camera_system` is executed,
/// - the [`RenderTarget`] isn't correctly set:
/// - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,
/// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,
/// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),
/// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).
#[inline]
pub fn logical_viewport_size(&self) -> Option<Vec2> {
self.viewport
.as_ref()
.and_then(|v| self.to_logical(v.physical_size))
.or_else(|| self.logical_target_size())
}
/// The physical size of this camera's viewport (in physical pixels).
/// If the `viewport` field is set to [`Some`], this
/// will be the size of that custom viewport. Otherwise it will default to the full physical size of
/// the current [`RenderTarget`].
/// For logic that requires the full physical size of the [`RenderTarget`], prefer [`Camera::physical_target_size`].
#[inline]
pub fn physical_viewport_size(&self) -> Option<UVec2> {
self.viewport
.as_ref()
.map(|v| v.physical_size)
.or_else(|| self.physical_target_size())
}
/// The full logical size of this camera's [`RenderTarget`], ignoring custom `viewport` configuration.
/// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.
/// For logic that requires the size of the actually rendered area, prefer [`Camera::logical_viewport_size`].
#[inline]
pub fn logical_target_size(&self) -> Option<Vec2> {
self.computed
.target_info
.as_ref()
.and_then(|t| self.to_logical(t.physical_size))
}
/// The full physical size of this camera's [`RenderTarget`] (in physical pixels),
/// ignoring custom `viewport` configuration.
/// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.
/// For logic that requires the size of the actually rendered area, prefer [`Camera::physical_viewport_size`].
#[inline]
pub fn physical_target_size(&self) -> Option<UVec2> {
self.computed.target_info.as_ref().map(|t| t.physical_size)
}
#[inline]
pub fn target_scaling_factor(&self) -> Option<f32> {
self.computed
.target_info
.as_ref()
.map(|t: &RenderTargetInfo| t.scale_factor)
}
/// The projection matrix computed using this camera's [`Projection`](super::projection::Projection).
#[inline]
pub fn clip_from_view(&self) -> Mat4 {
self.computed.clip_from_view
}
/// Core conversion logic to compute viewport coordinates
///
/// This function is shared by `world_to_viewport` and `world_to_viewport_with_depth`
/// to avoid code duplication.
///
/// Returns a tuple `(viewport_position, depth)`.
fn world_to_viewport_core(
&self,
camera_transform: &GlobalTransform,
world_position: Vec3,
) -> Result<(Vec2, f32), ViewportConversionError> {
let target_rect = self
.logical_viewport_rect()
.ok_or(ViewportConversionError::NoViewportSize)?;
let mut ndc_space_coords = self
.world_to_ndc(camera_transform, world_position)
.ok_or(ViewportConversionError::InvalidData)?;
// NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space
if ndc_space_coords.z < 0.0 {
return Err(ViewportConversionError::PastFarPlane);
}
if ndc_space_coords.z > 1.0 {
return Err(ViewportConversionError::PastNearPlane);
}
let depth = ndc_space_coords.z;
// Flip the Y co-ordinate origin from the bottom to the top.
ndc_space_coords.y = -ndc_space_coords.y;
// Once in NDC space, we can discard the z element and map x/y to the viewport rect
let viewport_position =
(ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;
Ok((viewport_position, depth))
}
/// Given a position in world space, use the camera to compute the viewport-space coordinates.
///
/// To get the coordinates in Normalized Device Coordinates, you should use
/// [`world_to_ndc`](Self::world_to_ndc).
///
/// # Panics
///
/// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`
/// (see [`world_to_ndc`][Self::world_to_ndc]).
#[doc(alias = "world_to_screen")]
pub fn world_to_viewport(
&self,
camera_transform: &GlobalTransform,
world_position: Vec3,
) -> Result<Vec2, ViewportConversionError> {
Ok(self
.world_to_viewport_core(camera_transform, world_position)?
.0)
}
/// Given a position in world space, use the camera to compute the viewport-space coordinates and depth.
///
/// To get the coordinates in Normalized Device Coordinates, you should use
/// [`world_to_ndc`](Self::world_to_ndc).
///
/// # Panics
///
/// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`
/// (see [`world_to_ndc`][Self::world_to_ndc]).
#[doc(alias = "world_to_screen_with_depth")]
pub fn world_to_viewport_with_depth(
&self,
camera_transform: &GlobalTransform,
world_position: Vec3,
) -> Result<Vec3, ViewportConversionError> {
let result = self.world_to_viewport_core(camera_transform, world_position)?;
// Stretching ndc depth to value via near plane and negating result to be in positive room again.
let depth = -self.depth_ndc_to_view_z(result.1);
Ok(result.0.extend(depth))
}
/// Returns a ray originating from the camera, that passes through everything beyond `viewport_position`.
///
/// The resulting ray starts on the near plane of the camera.
///
/// If the camera's projection is orthographic the direction of the ray is always equal to `camera_transform.forward()`.
///
/// To get the world space coordinates with Normalized Device Coordinates, you should use
/// [`ndc_to_world`](Self::ndc_to_world).
///
/// # Example
/// ```no_run
/// # use bevy_window::Window;
/// # use bevy_ecs::prelude::{Single, IntoScheduleConfigs};
/// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};
/// # use bevy_camera::Camera;
/// # use bevy_app::{App, PostUpdate};
/// #
/// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {
/// let (camera, camera_transform) = *camera_query;
///
/// if let Some(cursor_position) = window.cursor_position()
/// // Calculate a ray pointing from the camera into the world based on the cursor's position.
/// && let Ok(ray) = camera.viewport_to_world(camera_transform, cursor_position)
/// {
/// println!("{ray:?}");
/// }
/// }
///
/// # let mut app = App::new();
/// // Run the system after transform propagation so the camera's global transform is up-to-date.
/// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));
/// ```
///
/// # Panics
///
/// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and
/// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).
pub fn viewport_to_world(
&self,
camera_transform: &GlobalTransform,
viewport_position: Vec2,
) -> Result<Ray3d, ViewportConversionError> {
let ndc_xy = self.viewport_to_ndc(viewport_position)?;
let ndc_point_near = ndc_xy.extend(1.0).into();
// Using EPSILON because an ndc with Z = 0 returns NaNs.
let ndc_point_far = ndc_xy.extend(f32::EPSILON).into();
let view_from_clip = self.computed.clip_from_view.inverse();
let world_from_view = camera_transform.affine();
// We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss
// (and performance penalty) incurred by pre-composing an affine transform with a projective transform.
// Additionally, we avoid adding and subtracting translation to the direction component to maintain precision.
let view_point_near = view_from_clip.project_point3a(ndc_point_near);
let view_point_far = view_from_clip.project_point3a(ndc_point_far);
let view_dir = view_point_far - view_point_near;
let origin = world_from_view.transform_point3a(view_point_near).into();
let direction = world_from_view.transform_vector3a(view_dir).into();
// The fallible direction constructor ensures that direction isn't NaN.
Dir3::new(direction)
.map_err(|_| ViewportConversionError::InvalidData)
.map(|direction| Ray3d { origin, direction })
}
/// Returns a 2D world position computed from a position on this [`Camera`]'s viewport.
///
/// Useful for 2D cameras and other cameras with an orthographic projection pointing along the Z axis.
///
/// To get the world space coordinates with Normalized Device Coordinates, you should use
/// [`ndc_to_world`](Self::ndc_to_world).
///
/// # Example
/// ```no_run
/// # use bevy_window::Window;
/// # use bevy_ecs::prelude::*;
/// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};
/// # use bevy_camera::Camera;
/// # use bevy_app::{App, PostUpdate};
/// #
/// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {
/// let (camera, camera_transform) = *camera_query;
///
/// if let Some(cursor_position) = window.cursor_position()
/// // Calculate a world position based on the cursor's position.
/// && let Ok(world_pos) = camera.viewport_to_world_2d(camera_transform, cursor_position)
/// {
/// println!("World position: {world_pos:.2}");
/// }
/// }
///
/// # let mut app = App::new();
/// // Run the system after transform propagation so the camera's global transform is up-to-date.
/// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));
/// ```
///
/// # Panics
///
/// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and
/// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).
pub fn viewport_to_world_2d(
&self,
camera_transform: &GlobalTransform,
viewport_position: Vec2,
) -> Result<Vec2, ViewportConversionError> {
let ndc = self.viewport_to_ndc(viewport_position)?;
let world_near_plane = self
.ndc_to_world(camera_transform, ndc.extend(1.))
.ok_or(ViewportConversionError::InvalidData)?;
Ok(world_near_plane.truncate())
}
/// Given a point in world space, use the camera's viewport to compute the Normalized Device Coordinates of the point.
///
/// When the point is within the viewport the values returned will be between -1.0 (bottom left) and 1.0 (top right)
/// on the X and Y axes, and between 0.0 (far) and 1.0 (near) on the Z axis.
/// To get the coordinates in the render target's viewport dimensions, you should use
/// [`world_to_viewport`](Self::world_to_viewport).
///
/// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by
/// [`Projection`](super::projection::Projection) contain `NAN`.
///
/// # Panics
///
/// Will panic if the `camera_transform` contains `NAN` and the `glam_assert` feature is enabled.
pub fn world_to_ndc<V: Into<Vec3A> + From<Vec3A>>(
&self,
camera_transform: &GlobalTransform,
world_point: V,
) -> Option<V> {
let view_from_world = camera_transform.affine().inverse();
let view_point = view_from_world.transform_point3a(world_point.into());
let ndc_point = self.computed.clip_from_view.project_point3a(view_point);
(!ndc_point.is_nan()).then_some(ndc_point.into())
}
/// Given a position in Normalized Device Coordinates,
/// use the camera's viewport to compute the world space position.
///
/// The input is expected to be in NDC: `x` and `y` in the range `[-1.0, 1.0]`, and `z` in `[0.0, 1.0]`
/// (with `z = 0.0` at the far plane and `z = 1.0` at the near plane).
/// The returned value is a position in world space (your game's world units) and is not limited to `[-1.0, 1.0]`.
/// To convert from a viewport position to world space, you should use
/// [`viewport_to_world`](Self::viewport_to_world).
///
/// Returns `None` if the `camera_transform`, the `ndc_point`, or the projection matrix defined by
/// [`Projection`](super::projection::Projection) contain `NAN`.
///
/// # Panics
///
/// Will panic if the projection matrix is invalid (has a determinant of 0) and `glam_assert` is enabled.
pub fn ndc_to_world<V: Into<Vec3A> + From<Vec3A>>(
&self,
camera_transform: &GlobalTransform,
ndc_point: V,
) -> Option<V> {
// We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss
// (and performance penalty) incurred by pre-composing an affine transform with a projective transform.
let view_point = self
.computed
.clip_from_view
.inverse()
.project_point3a(ndc_point.into());
let world_point = camera_transform.affine().transform_point3a(view_point);
(!world_point.is_nan()).then_some(world_point.into())
}
/// Converts the depth in Normalized Device Coordinates
/// to linear view z for perspective projections.
///
/// Note: Depth values in front of the camera will be negative as -z is forward
pub fn depth_ndc_to_view_z(&self, ndc_depth: f32) -> f32 {
let near = self.clip_from_view().w_axis.z; // [3][2]
-near / ndc_depth
}
/// Converts the depth in Normalized Device Coordinates
/// to linear view z for orthographic projections.
///
/// Note: Depth values in front of the camera will be negative as -z is forward
pub fn depth_ndc_to_view_z_2d(&self, ndc_depth: f32) -> f32 {
-(self.clip_from_view().w_axis.z - ndc_depth) / self.clip_from_view().z_axis.z
// [3][2] [2][2]
}
/// Converts a position in viewport coordinates to NDC.
pub fn viewport_to_ndc(
&self,
viewport_position: Vec2,
) -> Result<Vec2, ViewportConversionError> {
let target_rect = self
.logical_viewport_rect()
.ok_or(ViewportConversionError::NoViewportSize)?;
let rect_relative = (viewport_position - target_rect.min) / target_rect.size();
let mut ndc = rect_relative * 2. - Vec2::ONE;
// Flip the Y co-ordinate from the top to the bottom to enter NDC.
ndc.y = -ndc.y;
Ok(ndc)
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_camera/src/components.rs | crates/bevy_camera/src/components.rs | use crate::{primitives::Frustum, Camera, CameraProjection, OrthographicProjection, Projection};
use bevy_ecs::prelude::*;
use bevy_reflect::{std_traits::ReflectDefault, Reflect, ReflectDeserialize, ReflectSerialize};
use bevy_transform::prelude::{GlobalTransform, Transform};
use serde::{Deserialize, Serialize};
use wgpu_types::{LoadOp, TextureUsages};
/// A 2D camera component. Enables the 2D render graph for a [`Camera`].
#[derive(Component, Default, Reflect, Clone)]
#[reflect(Component, Default, Clone)]
#[require(
Camera,
Projection::Orthographic(OrthographicProjection::default_2d()),
Frustum = OrthographicProjection::default_2d().compute_frustum(&GlobalTransform::from(Transform::default())),
)]
pub struct Camera2d;
/// A 3D camera component. Enables the main 3D render graph for a [`Camera`].
///
/// The camera coordinate space is right-handed X-right, Y-up, Z-back.
/// This means "forward" is -Z.
#[derive(Component, Reflect, Clone)]
#[reflect(Component, Default, Clone)]
#[require(Camera, Projection)]
pub struct Camera3d {
/// The depth clear operation to perform for the main 3d pass.
pub depth_load_op: Camera3dDepthLoadOp,
/// The texture usages for the depth texture created for the main 3d pass.
pub depth_texture_usages: Camera3dDepthTextureUsage,
/// How many individual steps should be performed in the `Transmissive3d` pass.
///
/// Roughly corresponds to how many “layers of transparency” are rendered for screen space
/// specular transmissive objects. Each step requires making one additional
/// texture copy, so it's recommended to keep this number to a reasonably low value. Defaults to `1`.
///
/// ### Notes
///
/// - No copies will be performed if there are no transmissive materials currently being rendered,
/// regardless of this setting.
/// - Setting this to `0` disables the screen-space refraction effect entirely, and falls
/// back to refracting only the environment map light's texture.
/// - If set to more than `0`, any opaque [`clear_color`](Camera::clear_color) will obscure the environment
/// map light's texture, preventing it from being visible “through” transmissive materials. If you'd like
/// to still have the environment map show up in your refractions, you can set the clear color's alpha to `0.0`.
/// Keep in mind that depending on the platform and your window settings, this may cause the window to become
/// transparent.
pub screen_space_specular_transmission_steps: usize,
/// The quality of the screen space specular transmission blur effect, applied to whatever's “behind” transmissive
/// objects when their `roughness` is greater than `0.0`.
///
/// Higher qualities are more GPU-intensive.
///
/// **Note:** You can get better-looking results at any quality level by enabling TAA. See: `TemporalAntiAliasPlugin`
pub screen_space_specular_transmission_quality: ScreenSpaceTransmissionQuality,
}
impl Default for Camera3d {
fn default() -> Self {
Self {
depth_load_op: Default::default(),
depth_texture_usages: TextureUsages::RENDER_ATTACHMENT.into(),
screen_space_specular_transmission_steps: 1,
screen_space_specular_transmission_quality: Default::default(),
}
}
}
#[derive(Clone, Copy, Reflect, Serialize, Deserialize)]
#[reflect(Serialize, Deserialize, Clone)]
pub struct Camera3dDepthTextureUsage(pub u32);
impl From<TextureUsages> for Camera3dDepthTextureUsage {
fn from(value: TextureUsages) -> Self {
Self(value.bits())
}
}
impl From<Camera3dDepthTextureUsage> for TextureUsages {
fn from(value: Camera3dDepthTextureUsage) -> Self {
Self::from_bits_truncate(value.0)
}
}
/// The depth clear operation to perform for the main 3d pass.
#[derive(Reflect, Serialize, Deserialize, Clone, Debug)]
#[reflect(Serialize, Deserialize, Clone, Default)]
pub enum Camera3dDepthLoadOp {
/// Clear with a specified value.
/// Note that 0.0 is the far plane due to bevy's use of reverse-z projections.
Clear(f32),
/// Load from memory.
Load,
}
impl Default for Camera3dDepthLoadOp {
fn default() -> Self {
Camera3dDepthLoadOp::Clear(0.0)
}
}
impl From<Camera3dDepthLoadOp> for LoadOp<f32> {
fn from(config: Camera3dDepthLoadOp) -> Self {
match config {
Camera3dDepthLoadOp::Clear(x) => LoadOp::Clear(x),
Camera3dDepthLoadOp::Load => LoadOp::Load,
}
}
}
/// The quality of the screen space transmission blur effect, applied to whatever's “behind” transmissive
/// objects when their `roughness` is greater than `0.0`.
///
/// Higher qualities are more GPU-intensive.
///
/// **Note:** You can get better-looking results at any quality level by enabling TAA. See: `TemporalAntiAliasPlugin`
#[derive(Resource, Default, Clone, Copy, Reflect, PartialEq, PartialOrd, Debug)]
#[reflect(Resource, Default, Clone, Debug, PartialEq)]
pub enum ScreenSpaceTransmissionQuality {
/// Best performance at the cost of quality. Suitable for lower end GPUs. (e.g. Mobile)
///
/// `num_taps` = 4
Low,
/// A balanced option between quality and performance.
///
/// `num_taps` = 8
#[default]
Medium,
/// Better quality. Suitable for high end GPUs. (e.g. Desktop)
///
/// `num_taps` = 16
High,
/// The highest quality, suitable for non-realtime rendering. (e.g. Pre-rendered cinematics and photo mode)
///
/// `num_taps` = 32
Ultra,
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_camera/src/clear_color.rs | crates/bevy_camera/src/clear_color.rs | use bevy_color::Color;
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::prelude::*;
use bevy_reflect::prelude::*;
use derive_more::derive::From;
use serde::{Deserialize, Serialize};
/// For a camera, specifies the color used to clear the viewport
/// [before rendering](crate::camera::Camera::clear_color)
/// or when [writing to the final render target texture](crate::camera::Camera::output_mode).
#[derive(Reflect, Serialize, Deserialize, Copy, Clone, Debug, Default, From)]
#[reflect(Serialize, Deserialize, Default, Clone)]
pub enum ClearColorConfig {
/// The clear color is taken from the world's [`ClearColor`] resource.
#[default]
Default,
/// The given clear color is used, overriding the [`ClearColor`] resource defined in the world.
Custom(Color),
/// No clear color is used: the camera will simply draw on top of anything already in the viewport.
///
/// This can be useful when multiple cameras are rendering to the same viewport.
None,
}
/// Controls when MSAA writeback occurs for a camera.
///
/// MSAA writeback copies the previous camera's output into the MSAA sampled texture before
/// rendering, allowing multiple cameras to layer their results when MSAA is enabled.
#[derive(Reflect, Serialize, Deserialize, Copy, Clone, Debug, Default, PartialEq, Eq)]
#[reflect(Serialize, Deserialize, Default, Clone)]
pub enum MsaaWriteback {
/// Never perform MSAA writeback for this camera.
Off,
/// Perform MSAA writeback only when this camera is not the first one rendering to the target.
/// This is the default behavior - the first camera has nothing to write back.
#[default]
Auto,
/// Always perform MSAA writeback, even if this is the first camera rendering to the target.
/// This is useful when content has been written directly to the main texture (e.g., via
/// `write_texture`) and needs to be preserved through the MSAA render pass.
Always,
}
/// A [`Resource`] that stores the default color that cameras use to clear the screen between frames.
///
/// This color appears as the "background" color for simple apps,
/// when there are portions of the screen with nothing rendered.
///
/// Individual cameras may use [`Camera.clear_color`] to specify a different
/// clear color or opt out of clearing their viewport.
///
/// [`Camera.clear_color`]: crate::camera::Camera::clear_color
#[derive(Resource, Clone, Debug, Deref, DerefMut, Reflect)]
#[reflect(Resource, Default, Debug, Clone)]
pub struct ClearColor(pub Color);
/// Match the dark gray bevy website code block color by default.
impl Default for ClearColor {
fn default() -> Self {
Self(Color::srgb_u8(43, 44, 47))
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_camera/src/visibility/range.rs | crates/bevy_camera/src/visibility/range.rs | //! Specific distances from the camera in which entities are visible, also known
//! as *hierarchical levels of detail* or *HLOD*s.
use core::{
hash::{Hash, Hasher},
ops::Range,
};
use bevy_app::{App, Plugin, PostUpdate};
use bevy_ecs::{
component::Component,
entity::{Entity, EntityHashMap},
query::With,
reflect::ReflectComponent,
resource::Resource,
schedule::IntoScheduleConfigs as _,
system::{Local, Query, ResMut},
};
use bevy_math::FloatOrd;
use bevy_reflect::Reflect;
use bevy_transform::components::GlobalTransform;
use bevy_utils::Parallel;
use super::{check_visibility, VisibilitySystems};
use crate::{camera::Camera, primitives::Aabb};
/// A plugin that enables [`VisibilityRange`]s, which allow entities to be
/// hidden or shown based on distance to the camera.
pub struct VisibilityRangePlugin;
impl Plugin for VisibilityRangePlugin {
fn build(&self, app: &mut App) {
app.init_resource::<VisibleEntityRanges>().add_systems(
PostUpdate,
check_visibility_ranges
.in_set(VisibilitySystems::CheckVisibility)
.before(check_visibility),
);
}
}
/// Specifies the range of distances that this entity must be from the camera in
/// order to be rendered.
///
/// This is also known as *hierarchical level of detail* or *HLOD*.
///
/// Use this component when you want to render a high-polygon mesh when the
/// camera is close and a lower-polygon mesh when the camera is far away. This
/// is a common technique for improving performance, because fine details are
/// hard to see in a mesh at a distance. To avoid an artifact known as *popping*
/// between levels, each level has a *margin*, within which the object
/// transitions gradually from invisible to visible using a dithering effect.
///
/// You can also use this feature to replace multiple meshes with a single mesh
/// when the camera is distant. This is the reason for the term "*hierarchical*
/// level of detail". Reducing the number of meshes can be useful for reducing
/// drawcall count. Note that you must place the [`VisibilityRange`] component
/// on each entity you want to be part of a LOD group, as [`VisibilityRange`]
/// isn't automatically propagated down to children.
///
/// A typical use of this feature might look like this:
///
/// | Entity | `start_margin` | `end_margin` |
/// |-------------------------|----------------|--------------|
/// | Root | N/A | N/A |
/// | ├─ High-poly mesh | [0, 0) | [20, 25) |
/// | ├─ Low-poly mesh | [20, 25) | [70, 75) |
/// | └─ Billboard *imposter* | [70, 75) | [150, 160) |
///
/// With this setup, the user will see a high-poly mesh when the camera is
/// closer than 20 units. As the camera zooms out, between 20 units to 25 units,
/// the high-poly mesh will gradually fade to a low-poly mesh. When the camera
/// is 70 to 75 units away, the low-poly mesh will fade to a single textured
/// quad. And between 150 and 160 units, the object fades away entirely. Note
/// that the `end_margin` of a higher LOD is always identical to the
/// `start_margin` of the next lower LOD; this is important for the crossfade
/// effect to function properly.
#[derive(Component, Clone, PartialEq, Default, Reflect)]
#[reflect(Component, PartialEq, Hash, Clone)]
pub struct VisibilityRange {
/// The range of distances, in world units, between which this entity will
/// smoothly fade into view as the camera zooms out.
///
/// If the start and end of this range are identical, the transition will be
/// abrupt, with no crossfading.
///
/// `start_margin.end` must be less than or equal to `end_margin.start`.
pub start_margin: Range<f32>,
/// The range of distances, in world units, between which this entity will
/// smoothly fade out of view as the camera zooms out.
///
/// If the start and end of this range are identical, the transition will be
/// abrupt, with no crossfading.
///
/// `end_margin.start` must be greater than or equal to `start_margin.end`.
pub end_margin: Range<f32>,
/// If set to true, Bevy will use the center of the axis-aligned bounding
/// box ([`Aabb`]) as the position of the mesh for the purposes of
/// visibility range computation.
///
/// Otherwise, if this field is set to false, Bevy will use the origin of
/// the mesh as the mesh's position.
///
/// Usually you will want to leave this set to false, because different LODs
/// may have different AABBs, and smooth crossfades between LOD levels
/// require that all LODs of a mesh be at *precisely* the same position. If
/// you aren't using crossfading, however, and your meshes aren't centered
/// around their origins, then this flag may be useful.
pub use_aabb: bool,
}
impl Eq for VisibilityRange {}
impl Hash for VisibilityRange {
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
FloatOrd(self.start_margin.start).hash(state);
FloatOrd(self.start_margin.end).hash(state);
FloatOrd(self.end_margin.start).hash(state);
FloatOrd(self.end_margin.end).hash(state);
}
}
impl VisibilityRange {
/// Creates a new *abrupt* visibility range, with no crossfade.
///
/// There will be no crossfade; the object will immediately vanish if the
/// camera is closer than `start` units or farther than `end` units from the
/// model.
///
/// The `start` value must be less than or equal to the `end` value.
#[inline]
pub fn abrupt(start: f32, end: f32) -> Self {
Self {
start_margin: start..start,
end_margin: end..end,
use_aabb: false,
}
}
/// Returns true if both the start and end transitions for this range are
/// abrupt: that is, there is no crossfading.
#[inline]
pub fn is_abrupt(&self) -> bool {
self.start_margin.start == self.start_margin.end
&& self.end_margin.start == self.end_margin.end
}
/// Returns true if the object will be visible at all, given a camera
/// `camera_distance` units away.
///
/// Any amount of visibility, even with the heaviest dithering applied, is
/// considered visible according to this check.
#[inline]
pub fn is_visible_at_all(&self, camera_distance: f32) -> bool {
camera_distance >= self.start_margin.start && camera_distance < self.end_margin.end
}
/// Returns true if the object is completely invisible, given a camera
/// `camera_distance` units away.
///
/// This is equivalent to `!VisibilityRange::is_visible_at_all()`.
#[inline]
pub fn is_culled(&self, camera_distance: f32) -> bool {
!self.is_visible_at_all(camera_distance)
}
}
/// Stores which entities are in within the [`VisibilityRange`]s of views.
///
/// This doesn't store the results of frustum or occlusion culling; use
/// [`ViewVisibility`](`super::ViewVisibility`) for that. Thus entities in this list may not
/// actually be visible.
///
/// For efficiency, these tables only store entities that have
/// [`VisibilityRange`] components. Entities without such a component won't be
/// in these tables at all.
///
/// The table is indexed by entity and stores a 32-bit bitmask with one bit for
/// each camera, where a 0 bit corresponds to "out of range" and a 1 bit
/// corresponds to "in range". Hence it's limited to storing information for 32
/// views.
#[derive(Resource, Default)]
pub struct VisibleEntityRanges {
/// Stores which bit index each view corresponds to.
views: EntityHashMap<u8>,
/// Stores a bitmask in which each view has a single bit.
///
/// A 0 bit for a view corresponds to "out of range"; a 1 bit corresponds to
/// "in range".
entities: EntityHashMap<u32>,
}
impl VisibleEntityRanges {
/// Clears out the [`VisibleEntityRanges`] in preparation for a new frame.
fn clear(&mut self) {
self.views.clear();
self.entities.clear();
}
/// Returns true if the entity is in range of the given camera.
///
/// This only checks [`VisibilityRange`]s and doesn't perform any frustum or
/// occlusion culling. Thus the entity might not *actually* be visible.
///
/// The entity is assumed to have a [`VisibilityRange`] component. If the
/// entity doesn't have that component, this method will return false.
#[inline]
pub fn entity_is_in_range_of_view(&self, entity: Entity, view: Entity) -> bool {
let Some(visibility_bitmask) = self.entities.get(&entity) else {
return false;
};
let Some(view_index) = self.views.get(&view) else {
return false;
};
(visibility_bitmask & (1 << view_index)) != 0
}
/// Returns true if the entity is in range of any view.
///
/// This only checks [`VisibilityRange`]s and doesn't perform any frustum or
/// occlusion culling. Thus the entity might not *actually* be visible.
///
/// The entity is assumed to have a [`VisibilityRange`] component. If the
/// entity doesn't have that component, this method will return false.
#[inline]
pub fn entity_is_in_range_of_any_view(&self, entity: Entity) -> bool {
self.entities.contains_key(&entity)
}
}
/// Checks all entities against all views in order to determine which entities
/// with [`VisibilityRange`]s are potentially visible.
///
/// This only checks distance from the camera and doesn't frustum or occlusion
/// cull.
pub fn check_visibility_ranges(
mut visible_entity_ranges: ResMut<VisibleEntityRanges>,
view_query: Query<(Entity, &GlobalTransform), With<Camera>>,
mut par_local: Local<Parallel<Vec<(Entity, u32)>>>,
entity_query: Query<(Entity, &GlobalTransform, Option<&Aabb>, &VisibilityRange)>,
) {
visible_entity_ranges.clear();
// Early out if the visibility range feature isn't in use.
if entity_query.is_empty() {
return;
}
// Assign an index to each view.
let mut views = vec![];
for (view, view_transform) in view_query.iter().take(32) {
let view_index = views.len() as u8;
visible_entity_ranges.views.insert(view, view_index);
views.push((view, view_transform.translation_vec3a()));
}
// Check each entity/view pair. Only consider entities with
// [`VisibilityRange`] components.
entity_query.par_iter().for_each(
|(entity, entity_transform, maybe_model_aabb, visibility_range)| {
let mut visibility = 0;
for (view_index, &(_, view_position)) in views.iter().enumerate() {
// If instructed to use the AABB and the model has one, use its
// center as the model position. Otherwise, use the model's
// translation.
let model_position = match (visibility_range.use_aabb, maybe_model_aabb) {
(true, Some(model_aabb)) => entity_transform
.affine()
.transform_point3a(model_aabb.center),
_ => entity_transform.translation_vec3a(),
};
if visibility_range.is_visible_at_all((view_position - model_position).length()) {
visibility |= 1 << view_index;
}
}
// Invisible entities have no entry at all in the hash map. This speeds
// up checks slightly in this common case.
if visibility != 0 {
par_local.borrow_local_mut().push((entity, visibility));
}
},
);
visible_entity_ranges.entities.extend(par_local.drain());
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_camera/src/visibility/render_layers.rs | crates/bevy_camera/src/visibility/render_layers.rs | use bevy_ecs::prelude::{Component, ReflectComponent};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use smallvec::SmallVec;
pub const DEFAULT_LAYERS: &RenderLayers = &RenderLayers::layer(0);
/// An identifier for a rendering layer.
pub type Layer = usize;
/// Defines which rendering layers an entity belongs to.
///
/// A camera renders an entity only when their render layers intersect.
///
/// The [`Default`] instance of `RenderLayers` contains layer `0`, the first layer. Entities
/// without this component also belong to layer `0`.
///
/// An empty `RenderLayers` makes the entity invisible.
#[derive(Component, Clone, Reflect, PartialEq, Eq, PartialOrd, Ord)]
#[reflect(Component, Default, PartialEq, Debug, Clone)]
pub struct RenderLayers(SmallVec<[u64; INLINE_BLOCKS]>);
/// The number of memory blocks stored inline
const INLINE_BLOCKS: usize = 1;
impl Default for &RenderLayers {
fn default() -> Self {
DEFAULT_LAYERS
}
}
impl core::fmt::Debug for RenderLayers {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_tuple("RenderLayers")
.field(&self.iter().collect::<Vec<_>>())
.finish()
}
}
impl FromIterator<Layer> for RenderLayers {
fn from_iter<T: IntoIterator<Item = Layer>>(i: T) -> Self {
i.into_iter().fold(Self::none(), RenderLayers::with)
}
}
impl Default for RenderLayers {
/// By default, this structure includes layer `0`, which represents the first layer.
///
/// This is distinct from [`RenderLayers::none`], which doesn't belong to any layers.
fn default() -> Self {
const { Self::layer(0) }
}
}
impl RenderLayers {
/// Create a new `RenderLayers` belonging to the given layer.
///
/// This `const` constructor is limited to `size_of::<usize>()` layers.
/// If you need to support an arbitrary number of layers, use [`with`](RenderLayers::with)
/// or [`from_layers`](RenderLayers::from_layers).
pub const fn layer(n: Layer) -> Self {
let (buffer_index, bit) = Self::layer_info(n);
assert!(
buffer_index < INLINE_BLOCKS,
"layer is out of bounds for const construction"
);
let mut buffer = [0; INLINE_BLOCKS];
buffer[buffer_index] = bit;
RenderLayers(SmallVec::from_const(buffer))
}
/// Create a new `RenderLayers` that belongs to no layers.
///
/// This is distinct from [`RenderLayers::default`], which belongs to the first layer.
pub const fn none() -> Self {
RenderLayers(SmallVec::from_const([0; INLINE_BLOCKS]))
}
/// Create a `RenderLayers` from a list of layers.
pub fn from_layers(layers: &[Layer]) -> Self {
layers.iter().copied().collect()
}
/// Add the given layer.
///
/// This may be called multiple times to allow an entity to belong
/// to multiple rendering layers.
#[must_use]
pub fn with(mut self, layer: Layer) -> Self {
let (buffer_index, bit) = Self::layer_info(layer);
self.extend_buffer(buffer_index + 1);
self.0[buffer_index] |= bit;
self
}
/// Removes the given rendering layer.
#[must_use]
pub fn without(mut self, layer: Layer) -> Self {
let (buffer_index, bit) = Self::layer_info(layer);
if buffer_index < self.0.len() {
self.0[buffer_index] &= !bit;
// Drop trailing zero memory blocks.
// NOTE: This is not just an optimization, it is necessary for the derived PartialEq impl to be correct.
if buffer_index == self.0.len() - 1 {
self = self.shrink();
}
}
self
}
/// Get an iterator of the layers.
pub fn iter(&self) -> impl Iterator<Item = Layer> + '_ {
self.0.iter().copied().zip(0..).flat_map(Self::iter_layers)
}
/// Determine if a `RenderLayers` intersects another.
///
/// `RenderLayers`s intersect if they share any common layers.
///
/// A `RenderLayers` with no layers will not match any other
/// `RenderLayers`, even another with no layers.
pub fn intersects(&self, other: &RenderLayers) -> bool {
// Check for the common case where the view layer and entity layer
// both point towards our default layer.
if self.0.as_ptr() == other.0.as_ptr() {
return true;
}
for (self_layer, other_layer) in self.0.iter().zip(other.0.iter()) {
if (*self_layer & *other_layer) != 0 {
return true;
}
}
false
}
/// Get the bitmask representation of the contained layers.
pub fn bits(&self) -> &[u64] {
self.0.as_slice()
}
const fn layer_info(layer: usize) -> (usize, u64) {
let buffer_index = layer / 64;
let bit_index = layer % 64;
let bit = 1u64 << bit_index;
(buffer_index, bit)
}
fn extend_buffer(&mut self, other_len: usize) {
let new_size = core::cmp::max(self.0.len(), other_len);
self.0.reserve_exact(new_size - self.0.len());
self.0.resize(new_size, 0u64);
}
fn iter_layers(buffer_and_offset: (u64, usize)) -> impl Iterator<Item = Layer> + 'static {
let (mut buffer, mut layer) = buffer_and_offset;
layer *= 64;
core::iter::from_fn(move || {
if buffer == 0 {
return None;
}
let next = buffer.trailing_zeros() + 1;
buffer = buffer.checked_shr(next).unwrap_or(0);
layer += next as usize;
Some(layer - 1)
})
}
/// Returns the set of [layers](Layer) shared by two instances of [`RenderLayers`].
///
/// This corresponds to the `self & other` operation.
pub fn intersection(&self, other: &Self) -> Self {
self.combine_blocks(other, |a, b| a & b).shrink()
}
/// Returns all [layers](Layer) included in either instance of [`RenderLayers`].
///
/// This corresponds to the `self | other` operation.
pub fn union(&self, other: &Self) -> Self {
self.combine_blocks(other, |a, b| a | b) // doesn't need to be shrunk, if the inputs are nonzero then the result will be too
}
/// Returns all [layers](Layer) included in exactly one of the instances of [`RenderLayers`].
///
/// This corresponds to the "exclusive or" (XOR) operation: `self ^ other`.
pub fn symmetric_difference(&self, other: &Self) -> Self {
self.combine_blocks(other, |a, b| a ^ b).shrink()
}
/// Deallocates any trailing-zero memory blocks from this instance
fn shrink(mut self) -> Self {
let mut any_dropped = false;
while self.0.len() > INLINE_BLOCKS && self.0.last() == Some(&0) {
self.0.pop();
any_dropped = true;
}
if any_dropped && self.0.len() <= INLINE_BLOCKS {
self.0.shrink_to_fit();
}
self
}
/// Creates a new instance of [`RenderLayers`] by applying a function to the memory blocks
/// of self and another instance.
///
/// If the function `f` might return `0` for non-zero inputs, you should call [`Self::shrink`]
/// on the output to ensure that there are no trailing zero memory blocks that would break
/// this type's equality comparison.
fn combine_blocks(&self, other: &Self, mut f: impl FnMut(u64, u64) -> u64) -> Self {
let mut a = self.0.iter();
let mut b = other.0.iter();
let mask = core::iter::from_fn(|| {
let a = a.next().copied();
let b = b.next().copied();
if a.is_none() && b.is_none() {
return None;
}
Some(f(a.unwrap_or_default(), b.unwrap_or_default()))
});
Self(mask.collect())
}
}
impl core::ops::BitAnd for RenderLayers {
type Output = Self;
fn bitand(self, rhs: Self) -> Self::Output {
self.intersection(&rhs)
}
}
impl core::ops::BitOr for RenderLayers {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
self.union(&rhs)
}
}
impl core::ops::BitXor for RenderLayers {
type Output = Self;
fn bitxor(self, rhs: Self) -> Self::Output {
self.symmetric_difference(&rhs)
}
}
#[cfg(test)]
mod rendering_mask_tests {
use super::{Layer, RenderLayers};
use smallvec::SmallVec;
#[test]
fn rendering_mask_sanity() {
let layer_0 = RenderLayers::layer(0);
assert_eq!(layer_0.0.len(), 1, "layer 0 is one buffer");
assert_eq!(layer_0.0[0], 1, "layer 0 is mask 1");
let layer_1 = RenderLayers::layer(1);
assert_eq!(layer_1.0.len(), 1, "layer 1 is one buffer");
assert_eq!(layer_1.0[0], 2, "layer 1 is mask 2");
let layer_0_1 = RenderLayers::layer(0).with(1);
assert_eq!(layer_0_1.0.len(), 1, "layer 0 + 1 is one buffer");
assert_eq!(layer_0_1.0[0], 3, "layer 0 + 1 is mask 3");
let layer_0_1_without_0 = layer_0_1.without(0);
assert_eq!(
layer_0_1_without_0.0.len(),
1,
"layer 0 + 1 - 0 is one buffer"
);
assert_eq!(layer_0_1_without_0.0[0], 2, "layer 0 + 1 - 0 is mask 2");
let layer_0_2345 = RenderLayers::layer(0).with(2345);
assert_eq!(layer_0_2345.0.len(), 37, "layer 0 + 2345 is 37 buffers");
assert_eq!(layer_0_2345.0[0], 1, "layer 0 + 2345 is mask 1");
assert_eq!(
layer_0_2345.0[36], 2199023255552,
"layer 0 + 2345 is mask 2199023255552"
);
assert!(
layer_0_2345.intersects(&layer_0),
"layer 0 + 2345 intersects 0"
);
assert!(
RenderLayers::layer(1).intersects(&RenderLayers::layer(1)),
"layers match like layers"
);
assert!(
RenderLayers::layer(0).intersects(&RenderLayers(SmallVec::from_const([1]))),
"a layer of 0 means the mask is just 1 bit"
);
assert!(
RenderLayers::layer(0)
.with(3)
.intersects(&RenderLayers::layer(3)),
"a mask will match another mask containing any similar layers"
);
assert!(
RenderLayers::default().intersects(&RenderLayers::default()),
"default masks match each other"
);
assert!(
!RenderLayers::layer(0).intersects(&RenderLayers::layer(1)),
"masks with differing layers do not match"
);
assert!(
!RenderLayers::none().intersects(&RenderLayers::none()),
"empty masks don't match"
);
assert_eq!(
RenderLayers::from_layers(&[0, 2, 16, 30])
.iter()
.collect::<Vec<_>>(),
vec![0, 2, 16, 30],
"from_layers and get_layers should roundtrip"
);
assert_eq!(
format!("{:?}", RenderLayers::from_layers(&[0, 1, 2, 3])).as_str(),
"RenderLayers([0, 1, 2, 3])",
"Debug instance shows layers"
);
assert_eq!(
RenderLayers::from_layers(&[0, 1, 2]),
<RenderLayers as FromIterator<Layer>>::from_iter(vec![0, 1, 2]),
"from_layers and from_iter are equivalent"
);
let tricky_layers = vec![0, 5, 17, 55, 999, 1025, 1026];
let layers = RenderLayers::from_layers(&tricky_layers);
let out = layers.iter().collect::<Vec<_>>();
assert_eq!(tricky_layers, out, "tricky layers roundtrip");
}
const MANY: RenderLayers = RenderLayers(SmallVec::from_const([u64::MAX]));
#[test]
fn render_layer_ops() {
let a = RenderLayers::from_layers(&[2, 4, 6]);
let b = RenderLayers::from_layers(&[1, 2, 3, 4, 5]);
assert_eq!(
a.clone() | b.clone(),
RenderLayers::from_layers(&[1, 2, 3, 4, 5, 6])
);
assert_eq!(a.clone() & b.clone(), RenderLayers::from_layers(&[2, 4]));
assert_eq!(a ^ b, RenderLayers::from_layers(&[1, 3, 5, 6]));
assert_eq!(RenderLayers::none() & MANY, RenderLayers::none());
assert_eq!(RenderLayers::none() | MANY, MANY);
assert_eq!(RenderLayers::none() ^ MANY, MANY);
}
#[test]
fn render_layer_shrink() {
// Since it has layers greater than 64, the instance should take up two memory blocks
let layers = RenderLayers::from_layers(&[1, 77]);
assert!(layers.0.len() == 2);
// When excluding that layer, it should drop the extra memory block
let layers = layers.without(77);
assert!(layers.0.len() == 1);
}
#[test]
fn render_layer_iter_no_overflow() {
let layers = RenderLayers::from_layers(&[63]);
layers.iter().count();
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_camera/src/visibility/mod.rs | crates/bevy_camera/src/visibility/mod.rs | mod range;
mod render_layers;
use core::any::TypeId;
use bevy_ecs::entity::EntityHashMap;
use bevy_ecs::lifecycle::HookContext;
use bevy_ecs::world::DeferredWorld;
use derive_more::derive::{Deref, DerefMut};
pub use range::*;
pub use render_layers::*;
use bevy_app::{Plugin, PostUpdate};
use bevy_asset::prelude::AssetChanged;
use bevy_asset::{AssetEventSystems, Assets};
use bevy_ecs::{hierarchy::validate_parent_has_component, prelude::*};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_transform::{components::GlobalTransform, TransformSystems};
use bevy_utils::{Parallel, TypeIdMap};
use smallvec::SmallVec;
use crate::{
camera::Camera,
primitives::{Aabb, Frustum, MeshAabb, Sphere},
Projection,
};
use bevy_mesh::{mark_3d_meshes_as_changed_if_their_assets_changed, Mesh, Mesh2d, Mesh3d};
#[derive(Component, Default)]
pub struct NoCpuCulling;
/// User indication of whether an entity is visible. Propagates down the entity hierarchy.
///
/// If an entity is hidden in this way, all [`Children`] (and all of their children and so on) who
/// are set to [`Inherited`](Self::Inherited) will also be hidden.
///
/// This is done by the `visibility_propagate_system` which uses the entity hierarchy and
/// `Visibility` to set the values of each entity's [`InheritedVisibility`] component.
#[derive(Component, Clone, Copy, Reflect, Debug, PartialEq, Eq, Default)]
#[reflect(Component, Default, Debug, PartialEq, Clone)]
#[require(InheritedVisibility, ViewVisibility)]
pub enum Visibility {
/// An entity with `Visibility::Inherited` will inherit the Visibility of its [`ChildOf`] target.
///
/// A root-level entity that is set to `Inherited` will be visible.
#[default]
Inherited,
/// An entity with `Visibility::Hidden` will be unconditionally hidden.
Hidden,
/// An entity with `Visibility::Visible` will be unconditionally visible.
///
/// Note that an entity with `Visibility::Visible` will be visible regardless of whether the
/// [`ChildOf`] target entity is hidden.
Visible,
}
impl Visibility {
/// Toggles between `Visibility::Inherited` and `Visibility::Visible`.
/// If the value is `Visibility::Hidden`, it remains unaffected.
#[inline]
pub fn toggle_inherited_visible(&mut self) {
*self = match *self {
Visibility::Inherited => Visibility::Visible,
Visibility::Visible => Visibility::Inherited,
_ => *self,
};
}
/// Toggles between `Visibility::Inherited` and `Visibility::Hidden`.
/// If the value is `Visibility::Visible`, it remains unaffected.
#[inline]
pub fn toggle_inherited_hidden(&mut self) {
*self = match *self {
Visibility::Inherited => Visibility::Hidden,
Visibility::Hidden => Visibility::Inherited,
_ => *self,
};
}
/// Toggles between `Visibility::Visible` and `Visibility::Hidden`.
/// If the value is `Visibility::Inherited`, it remains unaffected.
#[inline]
pub fn toggle_visible_hidden(&mut self) {
*self = match *self {
Visibility::Visible => Visibility::Hidden,
Visibility::Hidden => Visibility::Visible,
_ => *self,
};
}
}
// Allows `&Visibility == Visibility`
impl PartialEq<Visibility> for &Visibility {
#[inline]
fn eq(&self, other: &Visibility) -> bool {
// Use the base Visibility == Visibility implementation.
<Visibility as PartialEq<Visibility>>::eq(*self, other)
}
}
// Allows `Visibility == &Visibility`
impl PartialEq<&Visibility> for Visibility {
#[inline]
fn eq(&self, other: &&Visibility) -> bool {
// Use the base Visibility == Visibility implementation.
<Visibility as PartialEq<Visibility>>::eq(self, *other)
}
}
/// Whether or not an entity is visible in the hierarchy.
/// This will not be accurate until [`VisibilityPropagate`] runs in the [`PostUpdate`] schedule.
///
/// If this is false, then [`ViewVisibility`] should also be false.
///
/// [`VisibilityPropagate`]: VisibilitySystems::VisibilityPropagate
#[derive(Component, Deref, Debug, Default, Clone, Copy, Reflect, PartialEq, Eq)]
#[reflect(Component, Default, Debug, PartialEq, Clone)]
#[component(on_insert = validate_parent_has_component::<Self>)]
pub struct InheritedVisibility(bool);
impl InheritedVisibility {
/// An entity that is invisible in the hierarchy.
pub const HIDDEN: Self = Self(false);
/// An entity that is visible in the hierarchy.
pub const VISIBLE: Self = Self(true);
/// Returns `true` if the entity is visible in the hierarchy.
/// Otherwise, returns `false`.
#[inline]
pub fn get(self) -> bool {
self.0
}
}
/// A bucket into which we group entities for the purposes of visibility.
///
/// Bevy's various rendering subsystems (3D, 2D, etc.) want to be able to
/// quickly winnow the set of entities to only those that the subsystem is
/// tasked with rendering, to avoid spending time examining irrelevant entities.
/// At the same time, Bevy wants the [`check_visibility`] system to determine
/// all entities' visibilities at the same time, regardless of what rendering
/// subsystem is responsible for drawing them. Additionally, your application
/// may want to add more types of renderable objects that Bevy determines
/// visibility for just as it does for Bevy's built-in objects.
///
/// The solution to this problem is *visibility classes*. A visibility class is
/// a type, typically the type of a component, that represents the subsystem
/// that renders it: for example, `Mesh3d`, `Mesh2d`, and `Sprite`. The
/// [`VisibilityClass`] component stores the visibility class or classes that
/// the entity belongs to. (Generally, an object will belong to only one
/// visibility class, but in rare cases it may belong to multiple.)
///
/// When adding a new renderable component, you'll typically want to write an
/// add-component hook that adds the type ID of that component to the
/// [`VisibilityClass`] array. See `custom_phase_item` for an example.
///
/// `VisibilityClass` is automatically added by a hook on the `Mesh3d` and
/// `Mesh2d` components. To avoid duplicating the `VisibilityClass` and
/// causing issues when cloning, we use `#[component(clone_behavior=Ignore)]`
//
// Note: This can't be a `ComponentId` because the visibility classes are copied
// into the render world, and component IDs are per-world.
#[derive(Clone, Component, Default, Reflect, Deref, DerefMut)]
#[reflect(Component, Default, Clone)]
#[component(clone_behavior=Ignore)]
pub struct VisibilityClass(pub SmallVec<[TypeId; 1]>);
/// Algorithmically computed indication of whether an entity is visible and should be extracted for
/// rendering.
///
/// Each frame, this will be reset to `false` during [`VisibilityPropagate`] systems in
/// [`PostUpdate`]. Later in the frame, systems in [`CheckVisibility`] will mark any visible
/// entities using [`ViewVisibility::set`]. Because of this, values of this type will be marked as
/// changed every frame, even when they do not change.
///
/// If you wish to add a custom visibility system that sets this value, be sure to add it to the
/// [`CheckVisibility`] set.
///
/// [`VisibilityPropagate`]: VisibilitySystems::VisibilityPropagate
/// [`CheckVisibility`]: VisibilitySystems::CheckVisibility
#[derive(Component, Debug, Default, Clone, Copy, Reflect, PartialEq, Eq)]
#[reflect(Component, Default, Debug, PartialEq, Clone)]
pub struct ViewVisibility(
/// Bit packed booleans to track current and previous view visibility state.
///
/// Previous visibility is used as a scratch space to ensure that [`ViewVisibility`] is only
/// mutated (triggering change detection) when necessary.
///
/// This is needed because an entity might be seen by many views (cameras, lights that cast
/// shadows, etc.), so it is easy to know if an entity is visible to something, but hard to know
/// if it is *globally* non-visible to any view. To solve this, we track the visibility from the
/// previous frame. Then, during the [`VisibilitySystems::CheckVisibility`] system set, systems
/// call [`SetViewVisibility::set_visible`] to mark entities as visible.
///
/// Finally, we can look for entities that were previously visible but are no longer visible
/// and set their current state to hidden, ensuring that we have only triggered change detection
/// when necessary.
u8,
);
impl ViewVisibility {
/// An entity that cannot be seen from any views.
pub const HIDDEN: Self = Self(0);
/// Returns `true` if the entity is visible in any view.
/// Otherwise, returns `false`.
#[inline]
pub fn get(self) -> bool {
self.0 & 1 != 0
}
/// Returns `true` if this entity was visible in the previous frame but is now hidden.
#[inline]
fn was_visible_now_hidden(self) -> bool {
// The first bit is false (current), and the second bit is true (previous).
self.0 == 0b10
}
#[inline]
fn update(&mut self) {
// Copy the first bit (current) to the second bit position (previous)
// Clear the second bit, then set it based on the first bit
self.0 = (self.0 & !2) | ((self.0 & 1) << 1);
}
}
pub trait SetViewVisibility {
/// Sets the visibility to `true` if not already visible, triggering change detection only when
/// needed. This should not be considered reversible for a given frame, as this component tracks
/// if the entity is visible in _any_ view.
///
/// You should only manually set this if you are defining a custom visibility system,
/// in which case the system should be placed in the [`CheckVisibility`] set.
/// For normal user-defined entity visibility, see [`Visibility`].
///
/// [`CheckVisibility`]: VisibilitySystems::CheckVisibility
fn set_visible(&mut self);
}
impl<'a> SetViewVisibility for Mut<'a, ViewVisibility> {
#[inline]
fn set_visible(&mut self) {
if !self.as_ref().get() {
// Set the first bit (current vis) to true
self.0 |= 1;
}
}
}
/// Use this component to opt-out of built-in frustum culling for entities, see
/// [`Frustum`].
///
/// It can be used for example:
/// - when a [`Mesh`] is updated but its [`Aabb`] is not, which might happen with animations,
/// - when using some light effects, like wanting a [`Mesh`] out of the [`Frustum`]
/// to appear in the reflection of a [`Mesh`] within.
#[derive(Debug, Component, Default, Reflect)]
#[reflect(Component, Default, Debug)]
pub struct NoFrustumCulling;
/// Collection of entities visible from the current view.
///
/// This component contains all entities which are visible from the currently
/// rendered view. The collection is updated automatically by the [`VisibilitySystems::CheckVisibility`]
/// system set. Renderers can use the equivalent `RenderVisibleEntities` to optimize rendering of
/// a particular view, to prevent drawing items not visible from that view.
///
/// This component is intended to be attached to the same entity as the [`Camera`] and
/// the [`Frustum`] defining the view.
#[derive(Clone, Component, Default, Debug, Reflect)]
#[reflect(Component, Default, Debug, Clone)]
pub struct VisibleEntities {
#[reflect(ignore, clone)]
pub entities: TypeIdMap<Vec<Entity>>,
}
impl VisibleEntities {
pub fn get(&self, type_id: TypeId) -> &[Entity] {
match self.entities.get(&type_id) {
Some(entities) => &entities[..],
None => &[],
}
}
pub fn get_mut(&mut self, type_id: TypeId) -> &mut Vec<Entity> {
self.entities.entry(type_id).or_default()
}
pub fn iter(&self, type_id: TypeId) -> impl DoubleEndedIterator<Item = &Entity> {
self.get(type_id).iter()
}
pub fn len(&self, type_id: TypeId) -> usize {
self.get(type_id).len()
}
pub fn is_empty(&self, type_id: TypeId) -> bool {
self.get(type_id).is_empty()
}
pub fn clear(&mut self, type_id: TypeId) {
self.get_mut(type_id).clear();
}
pub fn clear_all(&mut self) {
// Don't just nuke the hash table; we want to reuse allocations.
for entities in self.entities.values_mut() {
entities.clear();
}
}
pub fn push(&mut self, entity: Entity, type_id: TypeId) {
self.get_mut(type_id).push(entity);
}
}
/// Collection of mesh entities visible for 3D lighting.
///
/// This component contains all mesh entities visible from the current light view.
/// The collection is updated automatically by `bevy_pbr::SimulationLightSystems`.
#[derive(Component, Clone, Debug, Default, Reflect, Deref, DerefMut)]
#[reflect(Component, Debug, Default, Clone)]
pub struct VisibleMeshEntities {
#[reflect(ignore, clone)]
pub entities: Vec<Entity>,
}
#[derive(Component, Clone, Debug, Default, Reflect)]
#[reflect(Component, Debug, Default, Clone)]
pub struct CubemapVisibleEntities {
#[reflect(ignore, clone)]
data: [VisibleMeshEntities; 6],
}
impl CubemapVisibleEntities {
pub fn get(&self, i: usize) -> &VisibleMeshEntities {
&self.data[i]
}
pub fn get_mut(&mut self, i: usize) -> &mut VisibleMeshEntities {
&mut self.data[i]
}
pub fn iter(&self) -> impl DoubleEndedIterator<Item = &VisibleMeshEntities> {
self.data.iter()
}
pub fn iter_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut VisibleMeshEntities> {
self.data.iter_mut()
}
}
#[derive(Component, Clone, Debug, Default, Reflect)]
#[reflect(Component, Default, Clone)]
pub struct CascadesVisibleEntities {
/// Map of view entity to the visible entities for each cascade frustum.
#[reflect(ignore, clone)]
pub entities: EntityHashMap<Vec<VisibleMeshEntities>>,
}
#[derive(Debug, Hash, PartialEq, Eq, Clone, SystemSet)]
pub enum VisibilitySystems {
/// Label for the [`calculate_bounds`], `calculate_bounds_2d` and `calculate_bounds_text2d` systems,
/// calculating and inserting an [`Aabb`] to relevant entities.
CalculateBounds,
/// Label for [`update_frusta`] in [`CameraProjectionPlugin`](crate::CameraProjectionPlugin).
UpdateFrusta,
/// Label for the system propagating the [`InheritedVisibility`] in a
/// [`ChildOf`] / [`Children`] hierarchy.
VisibilityPropagate,
/// Label for the [`check_visibility`] system updating [`ViewVisibility`]
/// of each entity and the [`VisibleEntities`] of each view.\
///
/// System order ambiguities between systems in this set are ignored:
/// the order of systems within this set is irrelevant, as [`check_visibility`]
/// assumes that its operations are irreversible during the frame.
CheckVisibility,
/// Label for the `mark_newly_hidden_entities_invisible` system, which sets
/// [`ViewVisibility`] to [`ViewVisibility::HIDDEN`] for entities that no
/// view has marked as visible.
MarkNewlyHiddenEntitiesInvisible,
}
pub struct VisibilityPlugin;
impl Plugin for VisibilityPlugin {
fn build(&self, app: &mut bevy_app::App) {
use VisibilitySystems::*;
app.register_required_components::<Mesh3d, Visibility>()
.register_required_components::<Mesh3d, VisibilityClass>()
.register_required_components::<Mesh2d, Visibility>()
.register_required_components::<Mesh2d, VisibilityClass>()
.configure_sets(
PostUpdate,
(UpdateFrusta, VisibilityPropagate)
.before(CheckVisibility)
.after(TransformSystems::Propagate),
)
.configure_sets(
PostUpdate,
MarkNewlyHiddenEntitiesInvisible.after(CheckVisibility),
)
.configure_sets(
PostUpdate,
(CalculateBounds)
.before(CheckVisibility)
.after(TransformSystems::Propagate)
.after(AssetEventSystems)
.ambiguous_with(CalculateBounds)
.ambiguous_with(mark_3d_meshes_as_changed_if_their_assets_changed),
)
.add_systems(
PostUpdate,
(
calculate_bounds.in_set(CalculateBounds),
(visibility_propagate_system, reset_view_visibility)
.in_set(VisibilityPropagate),
check_visibility.in_set(CheckVisibility),
mark_newly_hidden_entities_invisible.in_set(MarkNewlyHiddenEntitiesInvisible),
),
);
app.world_mut()
.register_component_hooks::<Mesh3d>()
.on_add(add_visibility_class::<Mesh3d>);
app.world_mut()
.register_component_hooks::<Mesh2d>()
.on_add(add_visibility_class::<Mesh2d>);
}
}
/// Add this component to an entity to prevent its `AABB` from being automatically recomputed.
///
/// This is useful if entities are already spawned with a correct `Aabb` component, or you have
/// many entities and want to avoid the cost of table scans searching for entities that need to have
/// their AABB recomputed.
#[derive(Component, Clone, Debug, Default, Reflect)]
pub struct NoAutoAabb;
/// Computes and adds an [`Aabb`] component to entities with a
/// [`Mesh3d`] component and without a [`NoFrustumCulling`] component.
///
/// This system is used in system set [`VisibilitySystems::CalculateBounds`].
pub fn calculate_bounds(
mut commands: Commands,
meshes: Res<Assets<Mesh>>,
new_aabb: Query<
(Entity, &Mesh3d),
(
Without<Aabb>,
Without<NoFrustumCulling>,
Without<NoAutoAabb>,
),
>,
mut update_aabb: Query<
(&Mesh3d, &mut Aabb),
(
Or<(AssetChanged<Mesh3d>, Changed<Mesh3d>)>,
Without<NoFrustumCulling>,
Without<NoAutoAabb>,
),
>,
) {
for (entity, mesh_handle) in &new_aabb {
if let Some(mesh) = meshes.get(mesh_handle)
&& let Some(aabb) = mesh.compute_aabb()
{
commands.entity(entity).try_insert(aabb);
}
}
update_aabb
.par_iter_mut()
.for_each(|(mesh_handle, mut old_aabb)| {
if let Some(aabb) = meshes.get(mesh_handle).and_then(MeshAabb::compute_aabb) {
*old_aabb = aabb;
}
});
}
/// Updates [`Frustum`].
///
/// This system is used in [`CameraProjectionPlugin`](crate::CameraProjectionPlugin).
pub fn update_frusta(
mut views: Query<
(&GlobalTransform, &Projection, &mut Frustum),
Or<(Changed<GlobalTransform>, Changed<Projection>)>,
>,
) {
for (transform, projection, mut frustum) in &mut views {
*frustum = projection.compute_frustum(transform);
}
}
fn visibility_propagate_system(
changed: Query<
(Entity, &Visibility, Option<&ChildOf>, Option<&Children>),
(
With<InheritedVisibility>,
Or<(Changed<Visibility>, Changed<ChildOf>)>,
),
>,
mut visibility_query: Query<(&Visibility, &mut InheritedVisibility)>,
children_query: Query<&Children, (With<Visibility>, With<InheritedVisibility>)>,
) {
for (entity, visibility, child_of, children) in &changed {
let is_visible = match visibility {
Visibility::Visible => true,
Visibility::Hidden => false,
// fall back to true if no parent is found or parent lacks components
Visibility::Inherited => child_of
.and_then(|c| visibility_query.get(c.parent()).ok())
.is_none_or(|(_, x)| x.get()),
};
let (_, mut inherited_visibility) = visibility_query
.get_mut(entity)
.expect("With<InheritedVisibility> ensures this query will return a value");
// Only update the visibility if it has changed.
// This will also prevent the visibility from propagating multiple times in the same frame
// if this entity's visibility has been updated recursively by its parent.
if inherited_visibility.get() != is_visible {
inherited_visibility.0 = is_visible;
// Recursively update the visibility of each child.
for &child in children.into_iter().flatten() {
let _ =
propagate_recursive(is_visible, child, &mut visibility_query, &children_query);
}
}
}
}
fn propagate_recursive(
parent_is_visible: bool,
entity: Entity,
visibility_query: &mut Query<(&Visibility, &mut InheritedVisibility)>,
children_query: &Query<&Children, (With<Visibility>, With<InheritedVisibility>)>,
// BLOCKED: https://github.com/rust-lang/rust/issues/31436
// We use a result here to use the `?` operator. Ideally we'd use a try block instead
) -> Result<(), ()> {
// Get the visibility components for the current entity.
// If the entity does not have the required components, just return early.
let (visibility, mut inherited_visibility) = visibility_query.get_mut(entity).map_err(drop)?;
let is_visible = match visibility {
Visibility::Visible => true,
Visibility::Hidden => false,
Visibility::Inherited => parent_is_visible,
};
// Only update the visibility if it has changed.
if inherited_visibility.get() != is_visible {
inherited_visibility.0 = is_visible;
// Recursively update the visibility of each child.
for &child in children_query.get(entity).ok().into_iter().flatten() {
let _ = propagate_recursive(is_visible, child, visibility_query, children_query);
}
}
Ok(())
}
/// Track entities that were visible last frame, used to granularly update [`ViewVisibility`] this
/// frame without spurious `Change` detecation.
fn reset_view_visibility(mut query: Query<&mut ViewVisibility>) {
query.par_iter_mut().for_each(|mut view_visibility| {
view_visibility.bypass_change_detection().update();
});
}
/// System updating the visibility of entities each frame.
///
/// The system is part of the [`VisibilitySystems::CheckVisibility`] set. Each
/// frame, it updates the [`ViewVisibility`] of all entities, and for each view
/// also compute the [`VisibleEntities`] for that view.
///
/// To ensure that an entity is checked for visibility, make sure that it has a
/// [`VisibilityClass`] component and that that component is nonempty.
pub fn check_visibility(
mut thread_queues: Local<Parallel<TypeIdMap<Vec<Entity>>>>,
mut view_query: Query<(
Entity,
&mut VisibleEntities,
&Frustum,
Option<&RenderLayers>,
&Camera,
Has<NoCpuCulling>,
)>,
mut visible_aabb_query: Query<(
Entity,
&InheritedVisibility,
&mut ViewVisibility,
Option<&VisibilityClass>,
Option<&RenderLayers>,
Option<&Aabb>,
&GlobalTransform,
Has<NoFrustumCulling>,
Has<VisibilityRange>,
)>,
visible_entity_ranges: Option<Res<VisibleEntityRanges>>,
) {
let visible_entity_ranges = visible_entity_ranges.as_deref();
for (view, mut visible_entities, frustum, maybe_view_mask, camera, no_cpu_culling) in
&mut view_query
{
if !camera.is_active {
continue;
}
let view_mask = maybe_view_mask.unwrap_or_default();
visible_aabb_query.par_iter_mut().for_each_init(
|| thread_queues.borrow_local_mut(),
|queue, query_item| {
let (
entity,
inherited_visibility,
mut view_visibility,
visibility_class,
maybe_entity_mask,
maybe_model_aabb,
transform,
no_frustum_culling,
has_visibility_range,
) = query_item;
// Skip computing visibility for entities that are configured to be hidden.
// ViewVisibility has already been reset in `reset_view_visibility`.
if !inherited_visibility.get() {
return;
}
let entity_mask = maybe_entity_mask.unwrap_or_default();
if !view_mask.intersects(entity_mask) {
return;
}
// If outside of the visibility range, cull.
if has_visibility_range
&& visible_entity_ranges.is_some_and(|visible_entity_ranges| {
!visible_entity_ranges.entity_is_in_range_of_view(entity, view)
})
{
return;
}
// If we have an aabb, do frustum culling
if !no_frustum_culling
&& !no_cpu_culling
&& let Some(model_aabb) = maybe_model_aabb
{
let world_from_local = transform.affine();
let model_sphere = Sphere {
center: world_from_local.transform_point3a(model_aabb.center),
radius: transform.radius_vec3a(model_aabb.half_extents),
};
// Do quick sphere-based frustum culling
if !frustum.intersects_sphere(&model_sphere, false) {
return;
}
// Do aabb-based frustum culling
if !frustum.intersects_obb(model_aabb, &world_from_local, true, false) {
return;
}
}
view_visibility.set_visible();
// The visibility class may be None here because AABB gizmos can be enabled via
// config without a renderable component being added to the entity. This workaround
// allows view visibility to be set for entities without a renderable component, but
// still need to render gizmos.
if let Some(visibility_class) = visibility_class {
// Add the entity to the queue for all visibility classes the entity is in.
for visibility_class_id in visibility_class.iter() {
queue.entry(*visibility_class_id).or_default().push(entity);
}
}
},
);
visible_entities.clear_all();
// Drain all the thread queues into the `visible_entities` list.
for class_queues in thread_queues.iter_mut() {
for (class, entities) in class_queues {
visible_entities.get_mut(*class).append(entities);
}
}
}
}
/// The last step in the visibility pipeline. Looks at entities that were visible last frame but not
/// marked as visible this frame and marks them as hidden by setting the [`ViewVisibility`]. This
/// process is needed to ensure we only trigger change detection on [`ViewVisibility`] when needed.
fn mark_newly_hidden_entities_invisible(mut view_visibilities: Query<&mut ViewVisibility>) {
view_visibilities
.par_iter_mut()
.for_each(|mut view_visibility| {
if view_visibility.as_ref().was_visible_now_hidden() {
*view_visibility = ViewVisibility::HIDDEN;
}
});
}
/// A generic component add hook that automatically adds the appropriate
/// [`VisibilityClass`] to an entity.
///
/// This can be handy when creating custom renderable components. To use this
/// hook, add it to your renderable component like this:
///
/// ```ignore
/// #[derive(Component)]
/// #[component(on_add = add_visibility_class::<MyComponent>)]
/// struct MyComponent {
/// ...
/// }
/// ```
pub fn add_visibility_class<C>(
mut world: DeferredWorld<'_>,
HookContext { entity, .. }: HookContext,
) where
C: 'static,
{
if let Some(mut visibility_class) = world.get_mut::<VisibilityClass>(entity) {
visibility_class.push(TypeId::of::<C>());
}
}
#[cfg(test)]
mod test {
use super::*;
use bevy_app::prelude::*;
#[test]
fn visibility_propagation() {
let mut app = App::new();
app.add_systems(Update, visibility_propagate_system);
let root1 = app.world_mut().spawn(Visibility::Hidden).id();
let root1_child1 = app.world_mut().spawn(Visibility::default()).id();
let root1_child2 = app.world_mut().spawn(Visibility::Hidden).id();
let root1_child1_grandchild1 = app.world_mut().spawn(Visibility::default()).id();
let root1_child2_grandchild1 = app.world_mut().spawn(Visibility::default()).id();
app.world_mut()
.entity_mut(root1)
.add_children(&[root1_child1, root1_child2]);
app.world_mut()
.entity_mut(root1_child1)
.add_children(&[root1_child1_grandchild1]);
app.world_mut()
.entity_mut(root1_child2)
.add_children(&[root1_child2_grandchild1]);
let root2 = app.world_mut().spawn(Visibility::default()).id();
let root2_child1 = app.world_mut().spawn(Visibility::default()).id();
let root2_child2 = app.world_mut().spawn(Visibility::Hidden).id();
let root2_child1_grandchild1 = app.world_mut().spawn(Visibility::default()).id();
let root2_child2_grandchild1 = app.world_mut().spawn(Visibility::default()).id();
app.world_mut()
.entity_mut(root2)
.add_children(&[root2_child1, root2_child2]);
app.world_mut()
.entity_mut(root2_child1)
.add_children(&[root2_child1_grandchild1]);
app.world_mut()
.entity_mut(root2_child2)
.add_children(&[root2_child2_grandchild1]);
app.update();
let is_visible = |e: Entity| {
app.world()
.entity(e)
.get::<InheritedVisibility>()
.unwrap()
.get()
};
assert!(
!is_visible(root1),
"invisibility propagates down tree from root"
);
assert!(
!is_visible(root1_child1),
"invisibility propagates down tree from root"
);
assert!(
!is_visible(root1_child2),
"invisibility propagates down tree from root"
);
assert!(
!is_visible(root1_child1_grandchild1),
"invisibility propagates down tree from root"
);
assert!(
!is_visible(root1_child2_grandchild1),
"invisibility propagates down tree from root"
);
assert!(
is_visible(root2),
"visibility propagates down tree from root"
);
assert!(
is_visible(root2_child1),
"visibility propagates down tree from root"
);
assert!(
!is_visible(root2_child2),
"visibility propagates down tree from root, but local invisibility is preserved"
);
assert!(
is_visible(root2_child1_grandchild1),
"visibility propagates down tree from root"
);
assert!(
!is_visible(root2_child2_grandchild1),
"child's invisibility propagates down to grandchild"
);
}
#[test]
fn test_visibility_propagation_on_parent_change() {
// Setup the world and schedule
let mut app = App::new();
app.add_systems(Update, visibility_propagate_system);
// Create entities with visibility and hierarchy
let parent1 = app.world_mut().spawn((Visibility::Hidden,)).id();
let parent2 = app.world_mut().spawn((Visibility::Visible,)).id();
let child1 = app.world_mut().spawn((Visibility::Inherited,)).id();
let child2 = app.world_mut().spawn((Visibility::Inherited,)).id();
// Build hierarchy
app.world_mut()
.entity_mut(parent1)
.add_children(&[child1, child2]);
// Run the system initially to set up visibility
app.update();
// Change parent visibility to Hidden
app.world_mut()
.entity_mut(parent2)
.insert(Visibility::Visible);
// Simulate a change in the parent component
app.world_mut().entity_mut(child2).insert(ChildOf(parent2)); // example of changing parent
// Run the system again to propagate changes
app.update();
let is_visible = |e: Entity| {
app.world()
.entity(e)
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/lib.rs | crates/bevy_core_pipeline/src/lib.rs | #![expect(missing_docs, reason = "Not all docs are written yet, see #3492.")]
#![forbid(unsafe_code)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc(
html_logo_url = "https://bevy.org/assets/icon.png",
html_favicon_url = "https://bevy.org/assets/icon.png"
)]
pub mod blit;
pub mod core_2d;
pub mod core_3d;
pub mod deferred;
pub mod fullscreen_material;
pub mod mip_generation;
pub mod oit;
pub mod prepass;
pub mod tonemapping;
pub mod upscaling;
pub use fullscreen_vertex_shader::FullscreenShader;
pub use skybox::Skybox;
mod fullscreen_vertex_shader;
mod skybox;
use crate::{
blit::BlitPlugin, core_2d::Core2dPlugin, core_3d::Core3dPlugin,
deferred::copy_lighting_id::CopyDeferredLightingIdPlugin, mip_generation::MipGenerationPlugin,
tonemapping::TonemappingPlugin, upscaling::UpscalingPlugin,
};
use bevy_app::{App, Plugin};
use bevy_asset::embedded_asset;
use bevy_render::RenderApp;
use oit::OrderIndependentTransparencyPlugin;
#[derive(Default)]
pub struct CorePipelinePlugin;
impl Plugin for CorePipelinePlugin {
fn build(&self, app: &mut App) {
embedded_asset!(app, "fullscreen_vertex_shader/fullscreen.wgsl");
app.add_plugins((Core2dPlugin, Core3dPlugin, CopyDeferredLightingIdPlugin))
.add_plugins((
BlitPlugin,
TonemappingPlugin,
UpscalingPlugin,
OrderIndependentTransparencyPlugin,
MipGenerationPlugin,
));
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app.init_resource::<FullscreenShader>();
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/fullscreen_material.rs | crates/bevy_core_pipeline/src/fullscreen_material.rs | //! This is mostly a pluginified version of the `custom_post_processing` example
//!
//! The plugin will create a new node that runs a fullscreen triangle.
//!
//! Users need to use the [`FullscreenMaterial`] trait to define the parameters like the graph label or the graph ordering.
use core::any::type_name;
use core::marker::PhantomData;
use crate::{core_2d::graph::Core2d, core_3d::graph::Core3d, FullscreenShader};
use bevy_app::{App, Plugin};
use bevy_asset::AssetServer;
use bevy_camera::{Camera2d, Camera3d};
use bevy_ecs::{
component::Component,
entity::Entity,
query::{Added, Has, QueryItem},
resource::Resource,
system::{Commands, Res},
world::{FromWorld, World},
};
use bevy_image::BevyDefault;
use bevy_render::{
extract_component::{
ComponentUniforms, DynamicUniformIndex, ExtractComponent, ExtractComponentPlugin,
UniformComponentPlugin,
},
render_graph::{
InternedRenderLabel, InternedRenderSubGraph, NodeRunError, RenderGraph, RenderGraphContext,
RenderGraphError, RenderGraphExt, RenderLabel, ViewNode, ViewNodeRunner,
},
render_resource::{
binding_types::{sampler, texture_2d, uniform_buffer},
encase::internal::WriteInto,
BindGroupEntries, BindGroupLayoutDescriptor, BindGroupLayoutEntries,
CachedRenderPipelineId, ColorTargetState, ColorWrites, FragmentState, Operations,
PipelineCache, RenderPassColorAttachment, RenderPassDescriptor, RenderPipelineDescriptor,
Sampler, SamplerBindingType, SamplerDescriptor, ShaderStages, ShaderType, TextureFormat,
TextureSampleType,
},
renderer::{RenderContext, RenderDevice},
view::ViewTarget,
ExtractSchedule, MainWorld, RenderApp, RenderStartup,
};
use bevy_shader::ShaderRef;
use bevy_utils::default;
use tracing::warn;
#[derive(Default)]
pub struct FullscreenMaterialPlugin<T: FullscreenMaterial> {
_marker: PhantomData<T>,
}
impl<T: FullscreenMaterial> Plugin for FullscreenMaterialPlugin<T> {
fn build(&self, app: &mut App) {
app.add_plugins((
ExtractComponentPlugin::<T>::default(),
UniformComponentPlugin::<T>::default(),
));
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app.add_systems(RenderStartup, init_pipeline::<T>);
if let Some(sub_graph) = T::sub_graph() {
render_app.add_render_graph_node::<ViewNodeRunner<FullscreenMaterialNode<T>>>(
sub_graph,
T::node_label(),
);
// We can't use add_render_graph_edges because it doesn't accept a Vec<RenderLabel>
if let Some(mut render_graph) = render_app.world_mut().get_resource_mut::<RenderGraph>()
&& let Some(graph) = render_graph.get_sub_graph_mut(sub_graph)
{
for window in T::node_edges().windows(2) {
let [a, b] = window else {
break;
};
let Err(err) = graph.try_add_node_edge(*a, *b) else {
continue;
};
match err {
// Already existing edges are very easy to produce with this api
// and shouldn't cause a panic
RenderGraphError::EdgeAlreadyExists(_) => {}
_ => panic!("{err:?}"),
}
}
} else {
warn!("Failed to add edges for FullscreenMaterial");
};
} else {
// If there was no sub_graph specified we try to determine the graph based on the camera
// it gets added to.
render_app.add_systems(ExtractSchedule, extract_on_add::<T>);
}
}
}
fn extract_on_add<T: FullscreenMaterial>(world: &mut World) {
world.resource_scope::<MainWorld, ()>(|world, mut main_world| {
// Extract the material from the main world
let mut query =
main_world.query_filtered::<(Entity, Has<Camera3d>, Has<Camera2d>), Added<T>>();
// Create the node and add it to the render graph
world.resource_scope::<RenderGraph, ()>(|world, mut render_graph| {
for (_entity, is_3d, is_2d) in query.iter(&main_world) {
let graph = if is_3d && let Some(graph) = render_graph.get_sub_graph_mut(Core3d) {
graph
} else if is_2d && let Some(graph) = render_graph.get_sub_graph_mut(Core2d) {
graph
} else {
warn!("FullscreenMaterial was added to an entity that isn't a camera");
continue;
};
let node = ViewNodeRunner::<FullscreenMaterialNode<T>>::from_world(world);
graph.add_node(T::node_label(), node);
for window in T::node_edges().windows(2) {
let [a, b] = window else {
break;
};
let Err(err) = graph.try_add_node_edge(*a, *b) else {
continue;
};
match err {
// Already existing edges are very easy to produce with this api
// and shouldn't cause a panic
RenderGraphError::EdgeAlreadyExists(_) => {}
_ => panic!("{err:?}"),
}
}
}
});
});
}
/// A trait to define a material that will render to the entire screen using a fullscrene triangle
pub trait FullscreenMaterial:
Component + ExtractComponent + Clone + Copy + ShaderType + WriteInto + Default
{
/// The shader that will run on the entire screen using a fullscreen triangle
fn fragment_shader() -> ShaderRef;
/// The list of `node_edges`. In 3d, for a post processing effect, it would look like this:
///
/// ```compile_fail
/// # use bevy_core_pipeline::core_3d::graph::Node3d;
/// # use bevy_render::render_graph::RenderLabel;
/// vec![
/// Node3d::Tonemapping.intern(),
/// // Self::sub_graph().intern(), // <--- your own label here
/// Node3d::EndMainPassPostProcessing.intern(),
/// ]
/// ```
///
/// This tell the render graph to run your fullscreen effect after the tonemapping pass but
/// before the end of post processing. For 2d, it would be the same but using Node2d. You can
/// specify any edges you want but make sure to include your own label.
fn node_edges() -> Vec<InternedRenderLabel>;
/// The [`bevy_render::render_graph::RenderSubGraph`] the effect will run in
///
/// For 2d this is generally [`crate::core_2d::graph::Core2d`] and for 3d it's
/// [`crate::core_3d::graph::Core3d`]
fn sub_graph() -> Option<InternedRenderSubGraph> {
None
}
/// The label used to represent the render node that will run the pass
fn node_label() -> impl RenderLabel {
FullscreenMaterialLabel(type_name::<Self>())
}
}
#[derive(Debug, Hash, PartialEq, Eq, Clone)]
struct FullscreenMaterialLabel(&'static str);
impl RenderLabel for FullscreenMaterialLabel
where
Self: 'static + Send + Sync + Clone + Eq + ::core::fmt::Debug + ::core::hash::Hash,
{
fn dyn_clone(&self) -> Box<dyn RenderLabel> {
Box::new(::core::clone::Clone::clone(self))
}
}
#[derive(Resource)]
struct FullscreenMaterialPipeline {
layout: BindGroupLayoutDescriptor,
sampler: Sampler,
pipeline_id: CachedRenderPipelineId,
pipeline_id_hdr: CachedRenderPipelineId,
}
fn init_pipeline<T: FullscreenMaterial>(
mut commands: Commands,
render_device: Res<RenderDevice>,
asset_server: Res<AssetServer>,
fullscreen_shader: Res<FullscreenShader>,
pipeline_cache: Res<PipelineCache>,
) {
let layout = BindGroupLayoutDescriptor::new(
"post_process_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::FRAGMENT,
(
// The screen texture
texture_2d(TextureSampleType::Float { filterable: true }),
// The sampler that will be used to sample the screen texture
sampler(SamplerBindingType::Filtering),
// We use a uniform buffer so users can pass some data to the effect
// Eventually we should just use a separate bind group for user data
uniform_buffer::<T>(true),
),
),
);
let sampler = render_device.create_sampler(&SamplerDescriptor::default());
let shader = match T::fragment_shader() {
ShaderRef::Default => {
// TODO not sure what an actual fallback should be. An empty shader or output a solid
// color to indicate a missing shader?
unimplemented!(
"FullscreenMaterial::fragment_shader() must not return ShaderRef::Default"
)
}
ShaderRef::Handle(handle) => handle,
ShaderRef::Path(path) => asset_server.load(path),
};
// Setup a fullscreen triangle for the vertex state.
let vertex_state = fullscreen_shader.to_vertex_state();
let mut desc = RenderPipelineDescriptor {
label: Some("post_process_pipeline".into()),
layout: vec![layout.clone()],
vertex: vertex_state,
fragment: Some(FragmentState {
shader,
targets: vec![Some(ColorTargetState {
format: TextureFormat::bevy_default(),
blend: None,
write_mask: ColorWrites::ALL,
})],
..default()
}),
..default()
};
let pipeline_id = pipeline_cache.queue_render_pipeline(desc.clone());
desc.fragment.as_mut().unwrap().targets[0]
.as_mut()
.unwrap()
.format = ViewTarget::TEXTURE_FORMAT_HDR;
let pipeline_id_hdr = pipeline_cache.queue_render_pipeline(desc);
commands.insert_resource(FullscreenMaterialPipeline {
layout,
sampler,
pipeline_id,
pipeline_id_hdr,
});
}
#[derive(Default)]
struct FullscreenMaterialNode<T: FullscreenMaterial> {
_marker: PhantomData<T>,
}
impl<T: FullscreenMaterial> ViewNode for FullscreenMaterialNode<T> {
// TODO we should expose the depth buffer and the gbuffer if using deferred
type ViewQuery = (&'static ViewTarget, &'static DynamicUniformIndex<T>);
fn run<'w>(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
(view_target, settings_index): QueryItem<Self::ViewQuery>,
world: &World,
) -> Result<(), NodeRunError> {
let fullscreen_pipeline = world.resource::<FullscreenMaterialPipeline>();
let pipeline_cache = world.resource::<PipelineCache>();
let pipeline_id = if view_target.is_hdr() {
fullscreen_pipeline.pipeline_id_hdr
} else {
fullscreen_pipeline.pipeline_id
};
let Some(pipeline) = pipeline_cache.get_render_pipeline(pipeline_id) else {
return Ok(());
};
let data_uniforms = world.resource::<ComponentUniforms<T>>();
let Some(settings_binding) = data_uniforms.uniforms().binding() else {
return Ok(());
};
let post_process = view_target.post_process_write();
let bind_group = render_context.render_device().create_bind_group(
"post_process_bind_group",
&pipeline_cache.get_bind_group_layout(&fullscreen_pipeline.layout),
&BindGroupEntries::sequential((
post_process.source,
&fullscreen_pipeline.sampler,
settings_binding.clone(),
)),
);
let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("post_process_pass"),
color_attachments: &[Some(RenderPassColorAttachment {
view: post_process.destination,
depth_slice: None,
resolve_target: None,
ops: Operations::default(),
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
render_pass.set_render_pipeline(pipeline);
render_pass.set_bind_group(0, &bind_group, &[settings_index.index()]);
render_pass.draw(0..3, 0..1);
Ok(())
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/prepass/node.rs | crates/bevy_core_pipeline/src/prepass/node.rs | use bevy_camera::{MainPassResolutionOverride, Viewport};
use bevy_ecs::{prelude::*, query::QueryItem};
use bevy_render::{
camera::ExtractedCamera,
diagnostic::RecordDiagnostics,
experimental::occlusion_culling::OcclusionCulling,
render_graph::{NodeRunError, RenderGraphContext, ViewNode},
render_phase::{TrackedRenderPass, ViewBinnedRenderPhases},
render_resource::{CommandEncoderDescriptor, PipelineCache, RenderPassDescriptor, StoreOp},
renderer::RenderContext,
view::{ExtractedView, NoIndirectDrawing, ViewDepthTexture, ViewUniformOffset},
};
use tracing::error;
#[cfg(feature = "trace")]
use tracing::info_span;
use crate::skybox::prepass::{RenderSkyboxPrepassPipeline, SkyboxPrepassBindGroup};
use super::{
AlphaMask3dPrepass, DeferredPrepass, Opaque3dPrepass, PreviousViewUniformOffset,
ViewPrepassTextures,
};
/// The phase of the prepass that draws meshes that were visible last frame.
///
/// If occlusion culling isn't in use, this prepass simply draws all meshes.
///
/// Like all prepass nodes, this is inserted before the main pass in the render
/// graph.
#[derive(Default)]
pub struct EarlyPrepassNode;
impl ViewNode for EarlyPrepassNode {
type ViewQuery = <LatePrepassNode as ViewNode>::ViewQuery;
fn run<'w>(
&self,
graph: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
view_query: QueryItem<'w, '_, Self::ViewQuery>,
world: &'w World,
) -> Result<(), NodeRunError> {
run_prepass(graph, render_context, view_query, world, "early prepass")
}
}
/// The phase of the prepass that runs after occlusion culling against the
/// meshes that were visible last frame.
///
/// If occlusion culling isn't in use, this is a no-op.
///
/// Like all prepass nodes, this is inserted before the main pass in the render
/// graph.
#[derive(Default)]
pub struct LatePrepassNode;
impl ViewNode for LatePrepassNode {
type ViewQuery = (
(
&'static ExtractedCamera,
&'static ExtractedView,
&'static ViewDepthTexture,
&'static ViewPrepassTextures,
&'static ViewUniformOffset,
),
(
Option<&'static DeferredPrepass>,
Option<&'static RenderSkyboxPrepassPipeline>,
Option<&'static SkyboxPrepassBindGroup>,
Option<&'static PreviousViewUniformOffset>,
Option<&'static MainPassResolutionOverride>,
),
(
Has<OcclusionCulling>,
Has<NoIndirectDrawing>,
Has<DeferredPrepass>,
),
);
fn run<'w>(
&self,
graph: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
query: QueryItem<'w, '_, Self::ViewQuery>,
world: &'w World,
) -> Result<(), NodeRunError> {
// We only need a late prepass if we have occlusion culling and indirect
// drawing.
let (_, _, (occlusion_culling, no_indirect_drawing, _)) = query;
if !occlusion_culling || no_indirect_drawing {
return Ok(());
}
run_prepass(graph, render_context, query, world, "late prepass")
}
}
/// Runs a prepass that draws all meshes to the depth buffer, and possibly
/// normal and motion vector buffers as well.
///
/// If occlusion culling isn't in use, and a prepass is enabled, then there's
/// only one prepass. If occlusion culling is in use, then any prepass is split
/// into two: an *early* prepass and a *late* prepass. The early prepass draws
/// what was visible last frame, and the last prepass performs occlusion culling
/// against a conservative hierarchical Z buffer before drawing unoccluded
/// meshes.
fn run_prepass<'w>(
graph: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
(
(camera, extracted_view, view_depth_texture, view_prepass_textures, view_uniform_offset),
(
deferred_prepass,
skybox_prepass_pipeline,
skybox_prepass_bind_group,
view_prev_uniform_offset,
resolution_override,
),
(_, _, has_deferred),
): QueryItem<'w, '_, <LatePrepassNode as ViewNode>::ViewQuery>,
world: &'w World,
label: &'static str,
) -> Result<(), NodeRunError> {
// If we're using deferred rendering, there will be a deferred prepass
// instead of this one. Just bail out so we don't have to bother looking at
// the empty bins.
if has_deferred {
return Ok(());
}
let (Some(opaque_prepass_phases), Some(alpha_mask_prepass_phases)) = (
world.get_resource::<ViewBinnedRenderPhases<Opaque3dPrepass>>(),
world.get_resource::<ViewBinnedRenderPhases<AlphaMask3dPrepass>>(),
) else {
return Ok(());
};
let (Some(opaque_prepass_phase), Some(alpha_mask_prepass_phase)) = (
opaque_prepass_phases.get(&extracted_view.retained_view_entity),
alpha_mask_prepass_phases.get(&extracted_view.retained_view_entity),
) else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
let mut color_attachments = vec![
view_prepass_textures
.normal
.as_ref()
.map(|normals_texture| normals_texture.get_attachment()),
view_prepass_textures
.motion_vectors
.as_ref()
.map(|motion_vectors_texture| motion_vectors_texture.get_attachment()),
// Use None in place of deferred attachments
None,
None,
];
// If all color attachments are none: clear the color attachment list so that no fragment shader is required
if color_attachments.iter().all(Option::is_none) {
color_attachments.clear();
}
let depth_stencil_attachment = Some(view_depth_texture.get_attachment(StoreOp::Store));
let view_entity = graph.view_entity();
render_context.add_command_buffer_generation_task(move |render_device| {
#[cfg(feature = "trace")]
let _prepass_span = info_span!("prepass").entered();
// Command encoder setup
let mut command_encoder = render_device.create_command_encoder(&CommandEncoderDescriptor {
label: Some("prepass_command_encoder"),
});
// Render pass setup
let render_pass = command_encoder.begin_render_pass(&RenderPassDescriptor {
label: Some(label),
color_attachments: &color_attachments,
depth_stencil_attachment,
timestamp_writes: None,
occlusion_query_set: None,
});
let mut render_pass = TrackedRenderPass::new(&render_device, render_pass);
let pass_span = diagnostics.pass_span(&mut render_pass, label);
if let Some(viewport) =
Viewport::from_viewport_and_override(camera.viewport.as_ref(), resolution_override)
{
render_pass.set_camera_viewport(&viewport);
}
// Opaque draws
if !opaque_prepass_phase.is_empty() {
#[cfg(feature = "trace")]
let _opaque_prepass_span = info_span!("opaque_prepass").entered();
if let Err(err) = opaque_prepass_phase.render(&mut render_pass, world, view_entity) {
error!("Error encountered while rendering the opaque prepass phase {err:?}");
}
}
// Alpha masked draws
if !alpha_mask_prepass_phase.is_empty() {
#[cfg(feature = "trace")]
let _alpha_mask_prepass_span = info_span!("alpha_mask_prepass").entered();
if let Err(err) = alpha_mask_prepass_phase.render(&mut render_pass, world, view_entity)
{
error!("Error encountered while rendering the alpha mask prepass phase {err:?}");
}
}
// Skybox draw using a fullscreen triangle
if let (
Some(skybox_prepass_pipeline),
Some(skybox_prepass_bind_group),
Some(view_prev_uniform_offset),
) = (
skybox_prepass_pipeline,
skybox_prepass_bind_group,
view_prev_uniform_offset,
) {
let pipeline_cache = world.resource::<PipelineCache>();
if let Some(pipeline) = pipeline_cache.get_render_pipeline(skybox_prepass_pipeline.0) {
render_pass.set_render_pipeline(pipeline);
render_pass.set_bind_group(
0,
&skybox_prepass_bind_group.0,
&[view_uniform_offset.offset, view_prev_uniform_offset.offset],
);
render_pass.draw(0..3, 0..1);
}
}
pass_span.end(&mut render_pass);
drop(render_pass);
// After rendering to the view depth texture, copy it to the prepass depth texture if deferred isn't going to
if deferred_prepass.is_none()
&& let Some(prepass_depth_texture) = &view_prepass_textures.depth
{
command_encoder.copy_texture_to_texture(
view_depth_texture.texture.as_image_copy(),
prepass_depth_texture.texture.texture.as_image_copy(),
view_prepass_textures.size,
);
}
command_encoder.finish()
});
Ok(())
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/prepass/mod.rs | crates/bevy_core_pipeline/src/prepass/mod.rs | //! Run a prepass before the main pass to generate depth, normals, and/or motion vectors textures, sometimes called a thin g-buffer.
//! These textures are useful for various screen-space effects and reducing overdraw in the main pass.
//!
//! The prepass only runs for opaque meshes or meshes with an alpha mask. Transparent meshes are ignored.
//!
//! To enable the prepass, you need to add a prepass component to a [`bevy_camera::Camera3d`].
//!
//! [`DepthPrepass`]
//! [`NormalPrepass`]
//! [`MotionVectorPrepass`]
//!
//! The textures are automatically added to the default mesh view bindings. You can also get the raw textures
//! by querying the [`ViewPrepassTextures`] component on any camera with a prepass component.
//!
//! The depth prepass will always run and generate the depth buffer as a side effect, but it won't copy it
//! to a separate texture unless the [`DepthPrepass`] is activated. This means that if any prepass component is present
//! it will always create a depth buffer that will be used by the main pass.
//!
//! When using the default mesh view bindings you should be able to use `prepass_depth()`,
//! `prepass_normal()`, and `prepass_motion_vector()` to load the related textures.
//! These functions are defined in `bevy_pbr::prepass_utils`. See the `shader_prepass` example that shows how to use them.
//!
//! The prepass runs for each `Material`. You can control if the prepass should run per-material by setting the `prepass_enabled`
//! flag on the `MaterialPlugin`.
//!
//! Currently only works for 3D.
pub mod node;
use core::ops::Range;
use crate::deferred::{DEFERRED_LIGHTING_PASS_ID_FORMAT, DEFERRED_PREPASS_FORMAT};
use bevy_asset::UntypedAssetId;
use bevy_ecs::prelude::*;
use bevy_math::Mat4;
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::mesh::allocator::SlabId;
use bevy_render::render_phase::PhaseItemBatchSetKey;
use bevy_render::sync_world::MainEntity;
use bevy_render::{
render_phase::{
BinnedPhaseItem, CachedRenderPipelinePhaseItem, DrawFunctionId, PhaseItem,
PhaseItemExtraIndex,
},
render_resource::{
CachedRenderPipelineId, ColorTargetState, ColorWrites, DynamicUniformBuffer, Extent3d,
ShaderType, TextureFormat, TextureView,
},
texture::ColorAttachment,
};
pub const NORMAL_PREPASS_FORMAT: TextureFormat = TextureFormat::Rgb10a2Unorm;
pub const MOTION_VECTOR_PREPASS_FORMAT: TextureFormat = TextureFormat::Rg16Float;
/// If added to a [`bevy_camera::Camera3d`] then depth values will be copied to a separate texture available to the main pass.
#[derive(Component, Default, Reflect, Clone)]
#[reflect(Component, Default, Clone)]
pub struct DepthPrepass;
/// If added to a [`bevy_camera::Camera3d`] then vertex world normals will be copied to a separate texture available to the main pass.
/// Normals will have normal map textures already applied.
#[derive(Component, Default, Reflect, Clone)]
#[reflect(Component, Default, Clone)]
pub struct NormalPrepass;
/// If added to a [`bevy_camera::Camera3d`] then screen space motion vectors will be copied to a separate texture available to the main pass.
///
/// Motion vectors are stored in the range -1,1, with +x right and +y down.
/// A value of (1.0,1.0) indicates a pixel moved from the top left corner to the bottom right corner of the screen.
#[derive(Component, Default, Reflect, Clone)]
#[reflect(Component, Default, Clone)]
pub struct MotionVectorPrepass;
/// If added to a [`bevy_camera::Camera3d`] then deferred materials will be rendered to the deferred gbuffer texture and will be available to subsequent passes.
/// Note the default deferred lighting plugin also requires `DepthPrepass` to work correctly.
#[derive(Component, Default, Reflect)]
#[reflect(Component, Default)]
pub struct DeferredPrepass;
/// Allows querying the previous frame's [`DepthPrepass`].
#[derive(Component, Default, Reflect, Clone)]
#[reflect(Component, Default, Clone)]
#[require(DepthPrepass)]
pub struct DepthPrepassDoubleBuffer;
/// Allows querying the previous frame's [`DeferredPrepass`].
#[derive(Component, Default, Reflect, Clone)]
#[reflect(Component, Default, Clone)]
#[require(DeferredPrepass)]
pub struct DeferredPrepassDoubleBuffer;
/// View matrices from the previous frame.
///
/// Useful for temporal rendering techniques that need access to last frame's camera data.
#[derive(Component, ShaderType, Clone)]
pub struct PreviousViewData {
pub view_from_world: Mat4,
pub clip_from_world: Mat4,
pub clip_from_view: Mat4,
pub world_from_clip: Mat4,
pub view_from_clip: Mat4,
}
#[derive(Resource, Default)]
pub struct PreviousViewUniforms {
pub uniforms: DynamicUniformBuffer<PreviousViewData>,
}
#[derive(Component)]
pub struct PreviousViewUniformOffset {
pub offset: u32,
}
/// Textures that are written to by the prepass.
///
/// This component will only be present if any of the relevant prepass components are also present.
#[derive(Component)]
pub struct ViewPrepassTextures {
/// The depth texture generated by the prepass.
/// Exists only if [`DepthPrepass`] is added to the [`ViewTarget`](bevy_render::view::ViewTarget)
pub depth: Option<ColorAttachment>,
/// The normals texture generated by the prepass.
/// Exists only if [`NormalPrepass`] is added to the [`ViewTarget`](bevy_render::view::ViewTarget)
pub normal: Option<ColorAttachment>,
/// The motion vectors texture generated by the prepass.
/// Exists only if [`MotionVectorPrepass`] is added to the `ViewTarget`
pub motion_vectors: Option<ColorAttachment>,
/// The deferred gbuffer generated by the deferred pass.
/// Exists only if [`DeferredPrepass`] is added to the `ViewTarget`
pub deferred: Option<ColorAttachment>,
/// A texture that specifies the deferred lighting pass id for a material.
/// Exists only if [`DeferredPrepass`] is added to the `ViewTarget`
pub deferred_lighting_pass_id: Option<ColorAttachment>,
/// The size of the textures.
pub size: Extent3d,
}
impl ViewPrepassTextures {
pub fn depth_view(&self) -> Option<&TextureView> {
self.depth.as_ref().map(|t| &t.texture.default_view)
}
pub fn previous_depth_view(&self) -> Option<&TextureView> {
self.depth
.as_ref()
.and_then(|t| t.previous_frame_texture.as_ref().map(|t| &t.default_view))
}
pub fn normal_view(&self) -> Option<&TextureView> {
self.normal.as_ref().map(|t| &t.texture.default_view)
}
pub fn motion_vectors_view(&self) -> Option<&TextureView> {
self.motion_vectors
.as_ref()
.map(|t| &t.texture.default_view)
}
pub fn deferred_view(&self) -> Option<&TextureView> {
self.deferred.as_ref().map(|t| &t.texture.default_view)
}
pub fn previous_deferred_view(&self) -> Option<&TextureView> {
self.deferred
.as_ref()
.and_then(|t| t.previous_frame_texture.as_ref().map(|t| &t.default_view))
}
}
/// Opaque phase of the 3D prepass.
///
/// Sorted by pipeline, then by mesh to improve batching.
///
/// Used to render all 3D meshes with materials that have no transparency.
pub struct Opaque3dPrepass {
/// Determines which objects can be placed into a *batch set*.
///
/// Objects in a single batch set can potentially be multi-drawn together,
/// if it's enabled and the current platform supports it.
pub batch_set_key: OpaqueNoLightmap3dBatchSetKey,
/// Information that separates items into bins.
pub bin_key: OpaqueNoLightmap3dBinKey,
/// An entity from which Bevy fetches data common to all instances in this
/// batch, such as the mesh.
pub representative_entity: (Entity, MainEntity),
pub batch_range: Range<u32>,
pub extra_index: PhaseItemExtraIndex,
}
/// Information that must be identical in order to place opaque meshes in the
/// same *batch set* in the prepass and deferred pass.
///
/// A batch set is a set of batches that can be multi-drawn together, if
/// multi-draw is in use.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct OpaqueNoLightmap3dBatchSetKey {
/// The ID of the GPU pipeline.
pub pipeline: CachedRenderPipelineId,
/// The function used to draw the mesh.
pub draw_function: DrawFunctionId,
/// The ID of a bind group specific to the material.
///
/// In the case of PBR, this is the `MaterialBindGroupIndex`.
pub material_bind_group_index: Option<u32>,
/// The ID of the slab of GPU memory that contains vertex data.
///
/// For non-mesh items, you can fill this with 0 if your items can be
/// multi-drawn, or with a unique value if they can't.
pub vertex_slab: SlabId,
/// The ID of the slab of GPU memory that contains index data, if present.
///
/// For non-mesh items, you can safely fill this with `None`.
pub index_slab: Option<SlabId>,
}
impl PhaseItemBatchSetKey for OpaqueNoLightmap3dBatchSetKey {
fn indexed(&self) -> bool {
self.index_slab.is_some()
}
}
// TODO: Try interning these.
/// The data used to bin each opaque 3D object in the prepass and deferred pass.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct OpaqueNoLightmap3dBinKey {
/// The ID of the asset.
pub asset_id: UntypedAssetId,
}
impl PhaseItem for Opaque3dPrepass {
#[inline]
fn entity(&self) -> Entity {
self.representative_entity.0
}
fn main_entity(&self) -> MainEntity {
self.representative_entity.1
}
#[inline]
fn draw_function(&self) -> DrawFunctionId {
self.batch_set_key.draw_function
}
#[inline]
fn batch_range(&self) -> &Range<u32> {
&self.batch_range
}
#[inline]
fn batch_range_mut(&mut self) -> &mut Range<u32> {
&mut self.batch_range
}
#[inline]
fn extra_index(&self) -> PhaseItemExtraIndex {
self.extra_index.clone()
}
#[inline]
fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range<u32>, &mut PhaseItemExtraIndex) {
(&mut self.batch_range, &mut self.extra_index)
}
}
impl BinnedPhaseItem for Opaque3dPrepass {
type BatchSetKey = OpaqueNoLightmap3dBatchSetKey;
type BinKey = OpaqueNoLightmap3dBinKey;
#[inline]
fn new(
batch_set_key: Self::BatchSetKey,
bin_key: Self::BinKey,
representative_entity: (Entity, MainEntity),
batch_range: Range<u32>,
extra_index: PhaseItemExtraIndex,
) -> Self {
Opaque3dPrepass {
batch_set_key,
bin_key,
representative_entity,
batch_range,
extra_index,
}
}
}
impl CachedRenderPipelinePhaseItem for Opaque3dPrepass {
#[inline]
fn cached_pipeline(&self) -> CachedRenderPipelineId {
self.batch_set_key.pipeline
}
}
/// Alpha mask phase of the 3D prepass.
///
/// Sorted by pipeline, then by mesh to improve batching.
///
/// Used to render all meshes with a material with an alpha mask.
pub struct AlphaMask3dPrepass {
/// Determines which objects can be placed into a *batch set*.
///
/// Objects in a single batch set can potentially be multi-drawn together,
/// if it's enabled and the current platform supports it.
pub batch_set_key: OpaqueNoLightmap3dBatchSetKey,
/// Information that separates items into bins.
pub bin_key: OpaqueNoLightmap3dBinKey,
pub representative_entity: (Entity, MainEntity),
pub batch_range: Range<u32>,
pub extra_index: PhaseItemExtraIndex,
}
impl PhaseItem for AlphaMask3dPrepass {
#[inline]
fn entity(&self) -> Entity {
self.representative_entity.0
}
fn main_entity(&self) -> MainEntity {
self.representative_entity.1
}
#[inline]
fn draw_function(&self) -> DrawFunctionId {
self.batch_set_key.draw_function
}
#[inline]
fn batch_range(&self) -> &Range<u32> {
&self.batch_range
}
#[inline]
fn batch_range_mut(&mut self) -> &mut Range<u32> {
&mut self.batch_range
}
#[inline]
fn extra_index(&self) -> PhaseItemExtraIndex {
self.extra_index.clone()
}
#[inline]
fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range<u32>, &mut PhaseItemExtraIndex) {
(&mut self.batch_range, &mut self.extra_index)
}
}
impl BinnedPhaseItem for AlphaMask3dPrepass {
type BatchSetKey = OpaqueNoLightmap3dBatchSetKey;
type BinKey = OpaqueNoLightmap3dBinKey;
#[inline]
fn new(
batch_set_key: Self::BatchSetKey,
bin_key: Self::BinKey,
representative_entity: (Entity, MainEntity),
batch_range: Range<u32>,
extra_index: PhaseItemExtraIndex,
) -> Self {
Self {
batch_set_key,
bin_key,
representative_entity,
batch_range,
extra_index,
}
}
}
impl CachedRenderPipelinePhaseItem for AlphaMask3dPrepass {
#[inline]
fn cached_pipeline(&self) -> CachedRenderPipelineId {
self.batch_set_key.pipeline
}
}
pub fn prepass_target_descriptors(
normal_prepass: bool,
motion_vector_prepass: bool,
deferred_prepass: bool,
) -> Vec<Option<ColorTargetState>> {
vec![
normal_prepass.then_some(ColorTargetState {
format: NORMAL_PREPASS_FORMAT,
blend: None,
write_mask: ColorWrites::ALL,
}),
motion_vector_prepass.then_some(ColorTargetState {
format: MOTION_VECTOR_PREPASS_FORMAT,
blend: None,
write_mask: ColorWrites::ALL,
}),
deferred_prepass.then_some(ColorTargetState {
format: DEFERRED_PREPASS_FORMAT,
blend: None,
write_mask: ColorWrites::ALL,
}),
deferred_prepass.then_some(ColorTargetState {
format: DEFERRED_LIGHTING_PASS_ID_FORMAT,
blend: None,
write_mask: ColorWrites::ALL,
}),
]
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/upscaling/node.rs | crates/bevy_core_pipeline/src/upscaling/node.rs | use crate::{blit::BlitPipeline, upscaling::ViewUpscalingPipeline};
use bevy_camera::{CameraOutputMode, ClearColor, ClearColorConfig};
use bevy_ecs::{prelude::*, query::QueryItem};
use bevy_render::{
camera::ExtractedCamera,
diagnostic::RecordDiagnostics,
render_graph::{NodeRunError, RenderGraphContext, ViewNode},
render_resource::{BindGroup, PipelineCache, RenderPassDescriptor, TextureViewId},
renderer::RenderContext,
view::ViewTarget,
};
use std::sync::Mutex;
#[derive(Default)]
pub struct UpscalingNode {
cached_texture_bind_group: Mutex<Option<(TextureViewId, BindGroup)>>,
}
impl ViewNode for UpscalingNode {
type ViewQuery = (
&'static ViewTarget,
&'static ViewUpscalingPipeline,
Option<&'static ExtractedCamera>,
);
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
(target, upscaling_target, camera): QueryItem<Self::ViewQuery>,
world: &World,
) -> Result<(), NodeRunError> {
let pipeline_cache = world.resource::<PipelineCache>();
let blit_pipeline = world.resource::<BlitPipeline>();
let clear_color_global = world.resource::<ClearColor>();
let diagnostics = render_context.diagnostic_recorder();
let clear_color = if let Some(camera) = camera {
match camera.output_mode {
CameraOutputMode::Write { clear_color, .. } => clear_color,
CameraOutputMode::Skip => return Ok(()),
}
} else {
ClearColorConfig::Default
};
let clear_color = match clear_color {
ClearColorConfig::Default => Some(clear_color_global.0),
ClearColorConfig::Custom(color) => Some(color),
ClearColorConfig::None => None,
};
let converted_clear_color = clear_color.map(Into::into);
// texture to be upscaled to the output texture
let main_texture_view = target.main_texture_view();
let mut cached_bind_group = self.cached_texture_bind_group.lock().unwrap();
let bind_group = match &mut *cached_bind_group {
Some((id, bind_group)) if main_texture_view.id() == *id => bind_group,
cached_bind_group => {
let bind_group = blit_pipeline.create_bind_group(
render_context.render_device(),
main_texture_view,
pipeline_cache,
);
let (_, bind_group) =
cached_bind_group.insert((main_texture_view.id(), bind_group));
bind_group
}
};
let Some(pipeline) = pipeline_cache.get_render_pipeline(upscaling_target.0) else {
return Ok(());
};
let pass_descriptor = RenderPassDescriptor {
label: Some("upscaling"),
color_attachments: &[Some(
target.out_texture_color_attachment(converted_clear_color),
)],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
};
let mut render_pass = render_context
.command_encoder()
.begin_render_pass(&pass_descriptor);
let pass_span = diagnostics.pass_span(&mut render_pass, "upscaling");
if let Some(camera) = camera
&& let Some(viewport) = &camera.viewport
{
let size = viewport.physical_size;
let position = viewport.physical_position;
render_pass.set_scissor_rect(position.x, position.y, size.x, size.y);
}
render_pass.set_pipeline(pipeline);
render_pass.set_bind_group(0, bind_group, &[]);
render_pass.draw(0..3, 0..1);
pass_span.end(&mut render_pass);
Ok(())
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/upscaling/mod.rs | crates/bevy_core_pipeline/src/upscaling/mod.rs | use crate::blit::{BlitPipeline, BlitPipelineKey};
use bevy_app::prelude::*;
use bevy_camera::CameraOutputMode;
use bevy_ecs::prelude::*;
use bevy_platform::collections::HashSet;
use bevy_render::{
camera::ExtractedCamera, render_resource::*, view::ViewTarget, Render, RenderApp, RenderSystems,
};
mod node;
pub use node::UpscalingNode;
pub struct UpscalingPlugin;
impl Plugin for UpscalingPlugin {
fn build(&self, app: &mut App) {
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app.add_systems(
Render,
// This system should probably technically be run *after* all of the other systems
// that might modify `PipelineCache` via interior mutability, but for now,
// we've chosen to simply ignore the ambiguities out of a desire for a better refactor
// and aversion to extensive and intrusive system ordering.
// See https://github.com/bevyengine/bevy/issues/14770 for more context.
prepare_view_upscaling_pipelines
.in_set(RenderSystems::Prepare)
.ambiguous_with_all(),
);
}
}
}
#[derive(Component)]
pub struct ViewUpscalingPipeline(CachedRenderPipelineId);
fn prepare_view_upscaling_pipelines(
mut commands: Commands,
mut pipeline_cache: ResMut<PipelineCache>,
mut pipelines: ResMut<SpecializedRenderPipelines<BlitPipeline>>,
blit_pipeline: Res<BlitPipeline>,
view_targets: Query<(Entity, &ViewTarget, Option<&ExtractedCamera>)>,
) {
let mut output_textures = <HashSet<_>>::default();
for (entity, view_target, camera) in view_targets.iter() {
let out_texture_id = view_target.out_texture().id();
let blend_state = if let Some(extracted_camera) = camera {
match extracted_camera.output_mode {
CameraOutputMode::Skip => None,
CameraOutputMode::Write { blend_state, .. } => {
let already_seen = output_textures.contains(&out_texture_id);
output_textures.insert(out_texture_id);
match blend_state {
None => {
// If we've already seen this output for a camera and it doesn't have an output blend
// mode configured, default to alpha blend so that we don't accidentally overwrite
// the output texture
if already_seen {
Some(BlendState::ALPHA_BLENDING)
} else {
None
}
}
_ => blend_state,
}
}
}
} else {
output_textures.insert(out_texture_id);
None
};
let key = BlitPipelineKey {
texture_format: view_target.out_texture_view_format(),
blend_state,
samples: 1,
};
let pipeline = pipelines.specialize(&pipeline_cache, &blit_pipeline, key);
// Ensure the pipeline is loaded before continuing the frame to prevent frames without any GPU work submitted
pipeline_cache.block_on_render_pipeline(pipeline);
commands
.entity(entity)
.insert(ViewUpscalingPipeline(pipeline));
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/deferred/node.rs | crates/bevy_core_pipeline/src/deferred/node.rs | use bevy_camera::{MainPassResolutionOverride, Viewport};
use bevy_ecs::{prelude::*, query::QueryItem};
use bevy_render::experimental::occlusion_culling::OcclusionCulling;
use bevy_render::render_graph::ViewNode;
use bevy_render::view::{ExtractedView, NoIndirectDrawing};
use bevy_render::{
camera::ExtractedCamera,
diagnostic::RecordDiagnostics,
render_graph::{NodeRunError, RenderGraphContext},
render_phase::{TrackedRenderPass, ViewBinnedRenderPhases},
render_resource::{CommandEncoderDescriptor, RenderPassDescriptor, StoreOp},
renderer::RenderContext,
view::ViewDepthTexture,
};
use tracing::error;
#[cfg(feature = "trace")]
use tracing::info_span;
use crate::prepass::ViewPrepassTextures;
use super::{AlphaMask3dDeferred, Opaque3dDeferred};
/// The phase of the deferred prepass that draws meshes that were visible last
/// frame.
///
/// If occlusion culling isn't in use, this prepass simply draws all meshes.
///
/// Like all prepass nodes, this is inserted before the main pass in the render
/// graph.
#[derive(Default)]
pub struct EarlyDeferredGBufferPrepassNode;
impl ViewNode for EarlyDeferredGBufferPrepassNode {
type ViewQuery = <LateDeferredGBufferPrepassNode as ViewNode>::ViewQuery;
fn run<'w>(
&self,
graph: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
view_query: QueryItem<'w, '_, Self::ViewQuery>,
world: &'w World,
) -> Result<(), NodeRunError> {
run_deferred_prepass(
graph,
render_context,
view_query,
false,
world,
"early deferred prepass",
)
}
}
/// The phase of the prepass that runs after occlusion culling against the
/// meshes that were visible last frame.
///
/// If occlusion culling isn't in use, this is a no-op.
///
/// Like all prepass nodes, this is inserted before the main pass in the render
/// graph.
#[derive(Default)]
pub struct LateDeferredGBufferPrepassNode;
impl ViewNode for LateDeferredGBufferPrepassNode {
type ViewQuery = (
&'static ExtractedCamera,
&'static ExtractedView,
&'static ViewDepthTexture,
&'static ViewPrepassTextures,
Option<&'static MainPassResolutionOverride>,
Has<OcclusionCulling>,
Has<NoIndirectDrawing>,
);
fn run<'w>(
&self,
graph: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
view_query: QueryItem<'w, '_, Self::ViewQuery>,
world: &'w World,
) -> Result<(), NodeRunError> {
let (.., occlusion_culling, no_indirect_drawing) = view_query;
if !occlusion_culling || no_indirect_drawing {
return Ok(());
}
run_deferred_prepass(
graph,
render_context,
view_query,
true,
world,
"late deferred prepass",
)
}
}
/// Runs the deferred prepass that draws all meshes to the depth buffer and
/// G-buffers.
///
/// If occlusion culling isn't in use, and a prepass is enabled, then there's
/// only one prepass. If occlusion culling is in use, then any prepass is split
/// into two: an *early* prepass and a *late* prepass. The early prepass draws
/// what was visible last frame, and the last prepass performs occlusion culling
/// against a conservative hierarchical Z buffer before drawing unoccluded
/// meshes.
fn run_deferred_prepass<'w>(
graph: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
(camera, extracted_view, view_depth_texture, view_prepass_textures, resolution_override, _, _): QueryItem<
'w,
'_,
<LateDeferredGBufferPrepassNode as ViewNode>::ViewQuery,
>,
is_late: bool,
world: &'w World,
label: &'static str,
) -> Result<(), NodeRunError> {
let (Some(opaque_deferred_phases), Some(alpha_mask_deferred_phases)) = (
world.get_resource::<ViewBinnedRenderPhases<Opaque3dDeferred>>(),
world.get_resource::<ViewBinnedRenderPhases<AlphaMask3dDeferred>>(),
) else {
return Ok(());
};
let (Some(opaque_deferred_phase), Some(alpha_mask_deferred_phase)) = (
opaque_deferred_phases.get(&extracted_view.retained_view_entity),
alpha_mask_deferred_phases.get(&extracted_view.retained_view_entity),
) else {
return Ok(());
};
let diagnostic = render_context.diagnostic_recorder();
let mut color_attachments = vec![];
color_attachments.push(
view_prepass_textures
.normal
.as_ref()
.map(|normals_texture| normals_texture.get_attachment()),
);
color_attachments.push(
view_prepass_textures
.motion_vectors
.as_ref()
.map(|motion_vectors_texture| motion_vectors_texture.get_attachment()),
);
// If we clear the deferred texture with LoadOp::Clear(Default::default()) we get these errors:
// Chrome: GL_INVALID_OPERATION: No defined conversion between clear value and attachment format.
// Firefox: WebGL warning: clearBufferu?[fi]v: This attachment is of type FLOAT, but this function is of type UINT.
// Appears to be unsupported: https://registry.khronos.org/webgl/specs/latest/2.0/#3.7.9
// For webgl2 we fallback to manually clearing
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
if !is_late {
if let Some(deferred_texture) = &view_prepass_textures.deferred {
render_context.command_encoder().clear_texture(
&deferred_texture.texture.texture,
&bevy_render::render_resource::ImageSubresourceRange::default(),
);
}
}
color_attachments.push(
view_prepass_textures
.deferred
.as_ref()
.map(|deferred_texture| {
if is_late {
deferred_texture.get_attachment()
} else {
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
{
bevy_render::render_resource::RenderPassColorAttachment {
view: &deferred_texture.texture.default_view,
resolve_target: None,
ops: bevy_render::render_resource::Operations {
load: bevy_render::render_resource::LoadOp::Load,
store: StoreOp::Store,
},
depth_slice: None,
}
}
#[cfg(any(
not(feature = "webgl"),
not(target_arch = "wasm32"),
feature = "webgpu"
))]
deferred_texture.get_attachment()
}
}),
);
color_attachments.push(
view_prepass_textures
.deferred_lighting_pass_id
.as_ref()
.map(|deferred_lighting_pass_id| deferred_lighting_pass_id.get_attachment()),
);
// If all color attachments are none: clear the color attachment list so that no fragment shader is required
if color_attachments.iter().all(Option::is_none) {
color_attachments.clear();
}
let depth_stencil_attachment = Some(view_depth_texture.get_attachment(StoreOp::Store));
let view_entity = graph.view_entity();
render_context.add_command_buffer_generation_task(move |render_device| {
#[cfg(feature = "trace")]
let _deferred_span = info_span!("deferred_prepass").entered();
// Command encoder setup
let mut command_encoder = render_device.create_command_encoder(&CommandEncoderDescriptor {
label: Some("deferred_prepass_command_encoder"),
});
// Render pass setup
let render_pass = command_encoder.begin_render_pass(&RenderPassDescriptor {
label: Some(label),
color_attachments: &color_attachments,
depth_stencil_attachment,
timestamp_writes: None,
occlusion_query_set: None,
});
let mut render_pass = TrackedRenderPass::new(&render_device, render_pass);
let pass_span = diagnostic.pass_span(&mut render_pass, label);
if let Some(viewport) =
Viewport::from_viewport_and_override(camera.viewport.as_ref(), resolution_override)
{
render_pass.set_camera_viewport(&viewport);
}
// Opaque draws
if !opaque_deferred_phase.multidrawable_meshes.is_empty()
|| !opaque_deferred_phase.batchable_meshes.is_empty()
|| !opaque_deferred_phase.unbatchable_meshes.is_empty()
{
#[cfg(feature = "trace")]
let _opaque_prepass_span = info_span!("opaque_deferred_prepass").entered();
if let Err(err) = opaque_deferred_phase.render(&mut render_pass, world, view_entity) {
error!("Error encountered while rendering the opaque deferred phase {err:?}");
}
}
// Alpha masked draws
if !alpha_mask_deferred_phase.is_empty() {
#[cfg(feature = "trace")]
let _alpha_mask_deferred_span = info_span!("alpha_mask_deferred_prepass").entered();
if let Err(err) = alpha_mask_deferred_phase.render(&mut render_pass, world, view_entity)
{
error!("Error encountered while rendering the alpha mask deferred phase {err:?}");
}
}
pass_span.end(&mut render_pass);
drop(render_pass);
// After rendering to the view depth texture, copy it to the prepass depth texture
if let Some(prepass_depth_texture) = &view_prepass_textures.depth {
command_encoder.copy_texture_to_texture(
view_depth_texture.texture.as_image_copy(),
prepass_depth_texture.texture.texture.as_image_copy(),
view_prepass_textures.size,
);
}
command_encoder.finish()
});
Ok(())
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/deferred/mod.rs | crates/bevy_core_pipeline/src/deferred/mod.rs | pub mod copy_lighting_id;
pub mod node;
use core::ops::Range;
use crate::prepass::{OpaqueNoLightmap3dBatchSetKey, OpaqueNoLightmap3dBinKey};
use bevy_ecs::prelude::*;
use bevy_render::sync_world::MainEntity;
use bevy_render::{
render_phase::{
BinnedPhaseItem, CachedRenderPipelinePhaseItem, DrawFunctionId, PhaseItem,
PhaseItemExtraIndex,
},
render_resource::{CachedRenderPipelineId, TextureFormat},
};
pub const DEFERRED_PREPASS_FORMAT: TextureFormat = TextureFormat::Rgba32Uint;
pub const DEFERRED_LIGHTING_PASS_ID_FORMAT: TextureFormat = TextureFormat::R8Uint;
pub const DEFERRED_LIGHTING_PASS_ID_DEPTH_FORMAT: TextureFormat = TextureFormat::Depth16Unorm;
/// Opaque phase of the 3D Deferred pass.
///
/// Sorted by pipeline, then by mesh to improve batching.
///
/// Used to render all 3D meshes with materials that have no transparency.
#[derive(PartialEq, Eq, Hash)]
pub struct Opaque3dDeferred {
/// Determines which objects can be placed into a *batch set*.
///
/// Objects in a single batch set can potentially be multi-drawn together,
/// if it's enabled and the current platform supports it.
pub batch_set_key: OpaqueNoLightmap3dBatchSetKey,
/// Information that separates items into bins.
pub bin_key: OpaqueNoLightmap3dBinKey,
pub representative_entity: (Entity, MainEntity),
pub batch_range: Range<u32>,
pub extra_index: PhaseItemExtraIndex,
}
impl PhaseItem for Opaque3dDeferred {
#[inline]
fn entity(&self) -> Entity {
self.representative_entity.0
}
fn main_entity(&self) -> MainEntity {
self.representative_entity.1
}
#[inline]
fn draw_function(&self) -> DrawFunctionId {
self.batch_set_key.draw_function
}
#[inline]
fn batch_range(&self) -> &Range<u32> {
&self.batch_range
}
#[inline]
fn batch_range_mut(&mut self) -> &mut Range<u32> {
&mut self.batch_range
}
#[inline]
fn extra_index(&self) -> PhaseItemExtraIndex {
self.extra_index.clone()
}
#[inline]
fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range<u32>, &mut PhaseItemExtraIndex) {
(&mut self.batch_range, &mut self.extra_index)
}
}
impl BinnedPhaseItem for Opaque3dDeferred {
type BatchSetKey = OpaqueNoLightmap3dBatchSetKey;
type BinKey = OpaqueNoLightmap3dBinKey;
#[inline]
fn new(
batch_set_key: Self::BatchSetKey,
bin_key: Self::BinKey,
representative_entity: (Entity, MainEntity),
batch_range: Range<u32>,
extra_index: PhaseItemExtraIndex,
) -> Self {
Self {
batch_set_key,
bin_key,
representative_entity,
batch_range,
extra_index,
}
}
}
impl CachedRenderPipelinePhaseItem for Opaque3dDeferred {
#[inline]
fn cached_pipeline(&self) -> CachedRenderPipelineId {
self.batch_set_key.pipeline
}
}
/// Alpha mask phase of the 3D Deferred pass.
///
/// Sorted by pipeline, then by mesh to improve batching.
///
/// Used to render all meshes with a material with an alpha mask.
pub struct AlphaMask3dDeferred {
/// Determines which objects can be placed into a *batch set*.
///
/// Objects in a single batch set can potentially be multi-drawn together,
/// if it's enabled and the current platform supports it.
pub batch_set_key: OpaqueNoLightmap3dBatchSetKey,
/// Information that separates items into bins.
pub bin_key: OpaqueNoLightmap3dBinKey,
pub representative_entity: (Entity, MainEntity),
pub batch_range: Range<u32>,
pub extra_index: PhaseItemExtraIndex,
}
impl PhaseItem for AlphaMask3dDeferred {
#[inline]
fn entity(&self) -> Entity {
self.representative_entity.0
}
#[inline]
fn main_entity(&self) -> MainEntity {
self.representative_entity.1
}
#[inline]
fn draw_function(&self) -> DrawFunctionId {
self.batch_set_key.draw_function
}
#[inline]
fn batch_range(&self) -> &Range<u32> {
&self.batch_range
}
#[inline]
fn batch_range_mut(&mut self) -> &mut Range<u32> {
&mut self.batch_range
}
#[inline]
fn extra_index(&self) -> PhaseItemExtraIndex {
self.extra_index.clone()
}
#[inline]
fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range<u32>, &mut PhaseItemExtraIndex) {
(&mut self.batch_range, &mut self.extra_index)
}
}
impl BinnedPhaseItem for AlphaMask3dDeferred {
type BatchSetKey = OpaqueNoLightmap3dBatchSetKey;
type BinKey = OpaqueNoLightmap3dBinKey;
fn new(
batch_set_key: Self::BatchSetKey,
bin_key: Self::BinKey,
representative_entity: (Entity, MainEntity),
batch_range: Range<u32>,
extra_index: PhaseItemExtraIndex,
) -> Self {
Self {
batch_set_key,
bin_key,
representative_entity,
batch_range,
extra_index,
}
}
}
impl CachedRenderPipelinePhaseItem for AlphaMask3dDeferred {
#[inline]
fn cached_pipeline(&self) -> CachedRenderPipelineId {
self.batch_set_key.pipeline
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs | crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs | use crate::{
prepass::{DeferredPrepass, ViewPrepassTextures},
FullscreenShader,
};
use bevy_app::prelude::*;
use bevy_asset::{embedded_asset, load_embedded_asset, AssetServer};
use bevy_ecs::prelude::*;
use bevy_image::ToExtents;
use bevy_render::{
camera::ExtractedCamera,
diagnostic::RecordDiagnostics,
render_resource::{binding_types::texture_2d, *},
renderer::RenderDevice,
texture::{CachedTexture, TextureCache},
view::ViewTarget,
Render, RenderApp, RenderStartup, RenderSystems,
};
use super::DEFERRED_LIGHTING_PASS_ID_DEPTH_FORMAT;
use bevy_ecs::query::QueryItem;
use bevy_render::{
render_graph::{NodeRunError, RenderGraphContext, ViewNode},
renderer::RenderContext,
};
use bevy_utils::default;
pub struct CopyDeferredLightingIdPlugin;
impl Plugin for CopyDeferredLightingIdPlugin {
fn build(&self, app: &mut App) {
embedded_asset!(app, "copy_deferred_lighting_id.wgsl");
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.add_systems(RenderStartup, init_copy_deferred_lighting_id_pipeline)
.add_systems(
Render,
(prepare_deferred_lighting_id_textures.in_set(RenderSystems::PrepareResources),),
);
}
}
#[derive(Default)]
pub struct CopyDeferredLightingIdNode;
impl CopyDeferredLightingIdNode {
pub const NAME: &'static str = "copy_deferred_lighting_id";
}
impl ViewNode for CopyDeferredLightingIdNode {
type ViewQuery = (
&'static ViewTarget,
&'static ViewPrepassTextures,
&'static DeferredLightingIdDepthTexture,
);
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
(_view_target, view_prepass_textures, deferred_lighting_id_depth_texture): QueryItem<
Self::ViewQuery,
>,
world: &World,
) -> Result<(), NodeRunError> {
let copy_deferred_lighting_id_pipeline = world.resource::<CopyDeferredLightingIdPipeline>();
let pipeline_cache = world.resource::<PipelineCache>();
let Some(pipeline) =
pipeline_cache.get_render_pipeline(copy_deferred_lighting_id_pipeline.pipeline_id)
else {
return Ok(());
};
let Some(deferred_lighting_pass_id_texture) =
&view_prepass_textures.deferred_lighting_pass_id
else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
let bind_group = render_context.render_device().create_bind_group(
"copy_deferred_lighting_id_bind_group",
&pipeline_cache.get_bind_group_layout(©_deferred_lighting_id_pipeline.layout),
&BindGroupEntries::single(&deferred_lighting_pass_id_texture.texture.default_view),
);
let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("copy_deferred_lighting_id"),
color_attachments: &[],
depth_stencil_attachment: Some(RenderPassDepthStencilAttachment {
view: &deferred_lighting_id_depth_texture.texture.default_view,
depth_ops: Some(Operations {
load: LoadOp::Clear(0.0),
store: StoreOp::Store,
}),
stencil_ops: None,
}),
timestamp_writes: None,
occlusion_query_set: None,
});
let pass_span = diagnostics.pass_span(&mut render_pass, "copy_deferred_lighting_id");
render_pass.set_render_pipeline(pipeline);
render_pass.set_bind_group(0, &bind_group, &[]);
render_pass.draw(0..3, 0..1);
pass_span.end(&mut render_pass);
Ok(())
}
}
#[derive(Resource)]
struct CopyDeferredLightingIdPipeline {
layout: BindGroupLayoutDescriptor,
pipeline_id: CachedRenderPipelineId,
}
pub fn init_copy_deferred_lighting_id_pipeline(
mut commands: Commands,
fullscreen_shader: Res<FullscreenShader>,
asset_server: Res<AssetServer>,
pipeline_cache: Res<PipelineCache>,
) {
let layout = BindGroupLayoutDescriptor::new(
"copy_deferred_lighting_id_bind_group_layout",
&BindGroupLayoutEntries::single(
ShaderStages::FRAGMENT,
texture_2d(TextureSampleType::Uint),
),
);
let vertex_state = fullscreen_shader.to_vertex_state();
let shader = load_embedded_asset!(asset_server.as_ref(), "copy_deferred_lighting_id.wgsl");
let pipeline_id = pipeline_cache.queue_render_pipeline(RenderPipelineDescriptor {
label: Some("copy_deferred_lighting_id_pipeline".into()),
layout: vec![layout.clone()],
vertex: vertex_state,
fragment: Some(FragmentState {
shader,
..default()
}),
depth_stencil: Some(DepthStencilState {
format: DEFERRED_LIGHTING_PASS_ID_DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: CompareFunction::Always,
stencil: StencilState::default(),
bias: DepthBiasState::default(),
}),
..default()
});
commands.insert_resource(CopyDeferredLightingIdPipeline {
layout,
pipeline_id,
});
}
#[derive(Component)]
pub struct DeferredLightingIdDepthTexture {
pub texture: CachedTexture,
}
fn prepare_deferred_lighting_id_textures(
mut commands: Commands,
mut texture_cache: ResMut<TextureCache>,
render_device: Res<RenderDevice>,
views: Query<(Entity, &ExtractedCamera), With<DeferredPrepass>>,
) {
for (entity, camera) in &views {
if let Some(physical_target_size) = camera.physical_target_size {
let texture_descriptor = TextureDescriptor {
label: Some("deferred_lighting_id_depth_texture_a"),
size: physical_target_size.to_extents(),
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: DEFERRED_LIGHTING_PASS_ID_DEPTH_FORMAT,
usage: TextureUsages::RENDER_ATTACHMENT | TextureUsages::COPY_SRC,
view_formats: &[],
};
let texture = texture_cache.get(&render_device, texture_descriptor);
commands
.entity(entity)
.insert(DeferredLightingIdDepthTexture { texture });
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/tonemapping/node.rs | crates/bevy_core_pipeline/src/tonemapping/node.rs | use std::sync::Mutex;
use crate::tonemapping::{TonemappingLuts, TonemappingPipeline, ViewTonemappingPipeline};
use bevy_ecs::{prelude::*, query::QueryItem};
use bevy_render::{
diagnostic::RecordDiagnostics,
render_asset::RenderAssets,
render_graph::{NodeRunError, RenderGraphContext, ViewNode},
render_resource::{
BindGroup, BindGroupEntries, BufferId, LoadOp, Operations, PipelineCache,
RenderPassColorAttachment, RenderPassDescriptor, StoreOp, TextureViewId,
},
renderer::RenderContext,
texture::{FallbackImage, GpuImage},
view::{ViewTarget, ViewUniformOffset, ViewUniforms},
};
use super::{get_lut_bindings, Tonemapping};
#[derive(Default)]
pub struct TonemappingNode {
cached_bind_group: Mutex<Option<(BufferId, TextureViewId, TextureViewId, BindGroup)>>,
last_tonemapping: Mutex<Option<Tonemapping>>,
}
impl ViewNode for TonemappingNode {
type ViewQuery = (
&'static ViewUniformOffset,
&'static ViewTarget,
&'static ViewTonemappingPipeline,
&'static Tonemapping,
);
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
(view_uniform_offset, target, view_tonemapping_pipeline, tonemapping): QueryItem<
Self::ViewQuery,
>,
world: &World,
) -> Result<(), NodeRunError> {
let pipeline_cache = world.resource::<PipelineCache>();
let tonemapping_pipeline = world.resource::<TonemappingPipeline>();
let gpu_images = world.get_resource::<RenderAssets<GpuImage>>().unwrap();
let fallback_image = world.resource::<FallbackImage>();
let view_uniforms_resource = world.resource::<ViewUniforms>();
let view_uniforms = &view_uniforms_resource.uniforms;
let view_uniforms_id = view_uniforms.buffer().unwrap().id();
if *tonemapping == Tonemapping::None {
return Ok(());
}
if !target.is_hdr() {
return Ok(());
}
let Some(pipeline) = pipeline_cache.get_render_pipeline(view_tonemapping_pipeline.0) else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
let post_process = target.post_process_write();
let source = post_process.source;
let destination = post_process.destination;
let mut last_tonemapping = self.last_tonemapping.lock().unwrap();
let tonemapping_changed = if let Some(last_tonemapping) = &*last_tonemapping {
tonemapping != last_tonemapping
} else {
true
};
if tonemapping_changed {
*last_tonemapping = Some(*tonemapping);
}
let mut cached_bind_group = self.cached_bind_group.lock().unwrap();
let bind_group = match &mut *cached_bind_group {
Some((buffer_id, texture_id, lut_id, bind_group))
if view_uniforms_id == *buffer_id
&& source.id() == *texture_id
&& *lut_id != fallback_image.d3.texture_view.id()
&& !tonemapping_changed =>
{
bind_group
}
cached_bind_group => {
let tonemapping_luts = world.resource::<TonemappingLuts>();
let lut_bindings =
get_lut_bindings(gpu_images, tonemapping_luts, tonemapping, fallback_image);
let bind_group = render_context.render_device().create_bind_group(
None,
&pipeline_cache.get_bind_group_layout(&tonemapping_pipeline.texture_bind_group),
&BindGroupEntries::sequential((
view_uniforms,
source,
&tonemapping_pipeline.sampler,
lut_bindings.0,
lut_bindings.1,
)),
);
let (_, _, _, bind_group) = cached_bind_group.insert((
view_uniforms_id,
source.id(),
lut_bindings.0.id(),
bind_group,
));
bind_group
}
};
let pass_descriptor = RenderPassDescriptor {
label: Some("tonemapping"),
color_attachments: &[Some(RenderPassColorAttachment {
view: destination,
depth_slice: None,
resolve_target: None,
ops: Operations {
load: LoadOp::Clear(Default::default()), // TODO shouldn't need to be cleared
store: StoreOp::Store,
},
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
};
let mut render_pass = render_context
.command_encoder()
.begin_render_pass(&pass_descriptor);
let pass_span = diagnostics.pass_span(&mut render_pass, "tonemapping");
render_pass.set_pipeline(pipeline);
render_pass.set_bind_group(0, bind_group, &[view_uniform_offset.offset]);
render_pass.draw(0..3, 0..1);
pass_span.end(&mut render_pass);
Ok(())
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/tonemapping/mod.rs | crates/bevy_core_pipeline/src/tonemapping/mod.rs | use bevy_app::prelude::*;
use bevy_asset::{
embedded_asset, load_embedded_asset, AssetServer, Assets, Handle, RenderAssetUsages,
};
use bevy_camera::Camera;
use bevy_ecs::prelude::*;
use bevy_image::{CompressedImageFormats, Image, ImageSampler, ImageType};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::{
extract_component::{ExtractComponent, ExtractComponentPlugin},
extract_resource::{ExtractResource, ExtractResourcePlugin},
render_asset::RenderAssets,
render_resource::{
binding_types::{sampler, texture_2d, texture_3d, uniform_buffer},
*,
},
renderer::RenderDevice,
texture::{FallbackImage, GpuImage},
view::{ExtractedView, ViewTarget, ViewUniform},
Render, RenderApp, RenderStartup, RenderSystems,
};
use bevy_shader::{load_shader_library, Shader, ShaderDefVal};
use bitflags::bitflags;
#[cfg(not(feature = "tonemapping_luts"))]
use tracing::error;
mod node;
use bevy_utils::default;
pub use node::TonemappingNode;
use crate::FullscreenShader;
/// 3D LUT (look up table) textures used for tonemapping
#[derive(Resource, Clone, ExtractResource)]
pub struct TonemappingLuts {
pub blender_filmic: Handle<Image>,
pub agx: Handle<Image>,
pub tony_mc_mapface: Handle<Image>,
}
pub struct TonemappingPlugin;
impl Plugin for TonemappingPlugin {
fn build(&self, app: &mut App) {
load_shader_library!(app, "tonemapping_shared.wgsl");
load_shader_library!(app, "lut_bindings.wgsl");
embedded_asset!(app, "tonemapping.wgsl");
if !app.world().is_resource_added::<TonemappingLuts>() {
let mut images = app.world_mut().resource_mut::<Assets<Image>>();
#[cfg(feature = "tonemapping_luts")]
let tonemapping_luts = {
TonemappingLuts {
blender_filmic: images.add(setup_tonemapping_lut_image(
include_bytes!("luts/Blender_-11_12.ktx2"),
ImageType::Extension("ktx2"),
)),
agx: images.add(setup_tonemapping_lut_image(
include_bytes!("luts/AgX-default_contrast.ktx2"),
ImageType::Extension("ktx2"),
)),
tony_mc_mapface: images.add(setup_tonemapping_lut_image(
include_bytes!("luts/tony_mc_mapface.ktx2"),
ImageType::Extension("ktx2"),
)),
}
};
#[cfg(not(feature = "tonemapping_luts"))]
let tonemapping_luts = {
let placeholder = images.add(lut_placeholder());
TonemappingLuts {
blender_filmic: placeholder.clone(),
agx: placeholder.clone(),
tony_mc_mapface: placeholder,
}
};
app.insert_resource(tonemapping_luts);
}
app.add_plugins(ExtractResourcePlugin::<TonemappingLuts>::default());
app.add_plugins((
ExtractComponentPlugin::<Tonemapping>::default(),
ExtractComponentPlugin::<DebandDither>::default(),
));
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.init_resource::<SpecializedRenderPipelines<TonemappingPipeline>>()
.add_systems(RenderStartup, init_tonemapping_pipeline)
.add_systems(
Render,
prepare_view_tonemapping_pipelines.in_set(RenderSystems::Prepare),
);
}
}
#[derive(Resource)]
pub struct TonemappingPipeline {
texture_bind_group: BindGroupLayoutDescriptor,
sampler: Sampler,
fullscreen_shader: FullscreenShader,
fragment_shader: Handle<Shader>,
}
/// Optionally enables a tonemapping shader that attempts to map linear input stimulus into a perceptually uniform image for a given [`Camera`] entity.
#[derive(
Component, Debug, Hash, Clone, Copy, Reflect, Default, ExtractComponent, PartialEq, Eq,
)]
#[extract_component_filter(With<Camera>)]
#[reflect(Component, Debug, Hash, Default, PartialEq)]
pub enum Tonemapping {
/// Bypass tonemapping.
None,
/// Suffers from lots hue shifting, brights don't desaturate naturally.
/// Bright primaries and secondaries don't desaturate at all.
Reinhard,
/// Suffers from hue shifting. Brights don't desaturate much at all across the spectrum.
ReinhardLuminance,
/// Same base implementation that Godot 4.0 uses for Tonemap ACES.
/// <https://github.com/TheRealMJP/BakingLab/blob/master/BakingLab/ACES.hlsl>
/// Not neutral, has a very specific aesthetic, intentional and dramatic hue shifting.
/// Bright greens and reds turn orange. Bright blues turn magenta.
/// Significantly increased contrast. Brights desaturate across the spectrum.
AcesFitted,
/// By Troy Sobotka
/// <https://github.com/sobotka/AgX>
/// Very neutral. Image is somewhat desaturated when compared to other tonemappers.
/// Little to no hue shifting. Subtle [Abney shifting](https://en.wikipedia.org/wiki/Abney_effect).
/// NOTE: Requires the `tonemapping_luts` cargo feature.
AgX,
/// By Tomasz Stachowiak
/// Has little hue shifting in the darks and mids, but lots in the brights. Brights desaturate across the spectrum.
/// Is sort of between Reinhard and `ReinhardLuminance`. Conceptually similar to reinhard-jodie.
/// Designed as a compromise if you want e.g. decent skin tones in low light, but can't afford to re-do your
/// VFX to look good without hue shifting.
SomewhatBoringDisplayTransform,
/// Current Bevy default.
/// By Tomasz Stachowiak
/// <https://github.com/h3r2tic/tony-mc-mapface>
/// Very neutral. Subtle but intentional hue shifting. Brights desaturate across the spectrum.
/// Comment from author:
/// Tony is a display transform intended for real-time applications such as games.
/// It is intentionally boring, does not increase contrast or saturation, and stays close to the
/// input stimulus where compression isn't necessary.
/// Brightness-equivalent luminance of the input stimulus is compressed. The non-linearity resembles Reinhard.
/// Color hues are preserved during compression, except for a deliberate [Bezold–Brücke shift](https://en.wikipedia.org/wiki/Bezold%E2%80%93Br%C3%BCcke_shift).
/// To avoid posterization, selective desaturation is employed, with care to avoid the [Abney effect](https://en.wikipedia.org/wiki/Abney_effect).
/// NOTE: Requires the `tonemapping_luts` cargo feature.
#[default]
TonyMcMapface,
/// Default Filmic Display Transform from blender.
/// Somewhat neutral. Suffers from hue shifting. Brights desaturate across the spectrum.
/// NOTE: Requires the `tonemapping_luts` cargo feature.
BlenderFilmic,
}
impl Tonemapping {
pub fn is_enabled(&self) -> bool {
*self != Tonemapping::None
}
}
bitflags! {
/// Various flags describing what tonemapping needs to do.
///
/// This allows the shader to skip unneeded steps.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub struct TonemappingPipelineKeyFlags: u8 {
/// The hue needs to be changed.
const HUE_ROTATE = 0x01;
/// The white balance needs to be adjusted.
const WHITE_BALANCE = 0x02;
/// Saturation/contrast/gamma/gain/lift for one or more sections
/// (shadows, midtones, highlights) need to be adjusted.
const SECTIONAL_COLOR_GRADING = 0x04;
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct TonemappingPipelineKey {
deband_dither: DebandDither,
tonemapping: Tonemapping,
flags: TonemappingPipelineKeyFlags,
}
impl SpecializedRenderPipeline for TonemappingPipeline {
type Key = TonemappingPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let mut shader_defs = Vec::new();
shader_defs.push(ShaderDefVal::UInt(
"TONEMAPPING_LUT_TEXTURE_BINDING_INDEX".into(),
3,
));
shader_defs.push(ShaderDefVal::UInt(
"TONEMAPPING_LUT_SAMPLER_BINDING_INDEX".into(),
4,
));
if let DebandDither::Enabled = key.deband_dither {
shader_defs.push("DEBAND_DITHER".into());
}
// Define shader flags depending on the color grading options in use.
if key.flags.contains(TonemappingPipelineKeyFlags::HUE_ROTATE) {
shader_defs.push("HUE_ROTATE".into());
}
if key
.flags
.contains(TonemappingPipelineKeyFlags::WHITE_BALANCE)
{
shader_defs.push("WHITE_BALANCE".into());
}
if key
.flags
.contains(TonemappingPipelineKeyFlags::SECTIONAL_COLOR_GRADING)
{
shader_defs.push("SECTIONAL_COLOR_GRADING".into());
}
match key.tonemapping {
Tonemapping::None => shader_defs.push("TONEMAP_METHOD_NONE".into()),
Tonemapping::Reinhard => shader_defs.push("TONEMAP_METHOD_REINHARD".into()),
Tonemapping::ReinhardLuminance => {
shader_defs.push("TONEMAP_METHOD_REINHARD_LUMINANCE".into());
}
Tonemapping::AcesFitted => shader_defs.push("TONEMAP_METHOD_ACES_FITTED".into()),
Tonemapping::AgX => {
#[cfg(not(feature = "tonemapping_luts"))]
error!(
"AgX tonemapping requires the `tonemapping_luts` feature.
Either enable the `tonemapping_luts` feature for bevy in `Cargo.toml` (recommended),
or use a different `Tonemapping` method for your `Camera2d`/`Camera3d`."
);
shader_defs.push("TONEMAP_METHOD_AGX".into());
}
Tonemapping::SomewhatBoringDisplayTransform => {
shader_defs.push("TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM".into());
}
Tonemapping::TonyMcMapface => {
#[cfg(not(feature = "tonemapping_luts"))]
error!(
"TonyMcMapFace tonemapping requires the `tonemapping_luts` feature.
Either enable the `tonemapping_luts` feature for bevy in `Cargo.toml` (recommended),
or use a different `Tonemapping` method for your `Camera2d`/`Camera3d`."
);
shader_defs.push("TONEMAP_METHOD_TONY_MC_MAPFACE".into());
}
Tonemapping::BlenderFilmic => {
#[cfg(not(feature = "tonemapping_luts"))]
error!(
"BlenderFilmic tonemapping requires the `tonemapping_luts` feature.
Either enable the `tonemapping_luts` feature for bevy in `Cargo.toml` (recommended),
or use a different `Tonemapping` method for your `Camera2d`/`Camera3d`."
);
shader_defs.push("TONEMAP_METHOD_BLENDER_FILMIC".into());
}
}
RenderPipelineDescriptor {
label: Some("tonemapping pipeline".into()),
layout: vec![self.texture_bind_group.clone()],
vertex: self.fullscreen_shader.to_vertex_state(),
fragment: Some(FragmentState {
shader: self.fragment_shader.clone(),
shader_defs,
targets: vec![Some(ColorTargetState {
format: ViewTarget::TEXTURE_FORMAT_HDR,
blend: None,
write_mask: ColorWrites::ALL,
})],
..default()
}),
..default()
}
}
}
pub fn init_tonemapping_pipeline(
mut commands: Commands,
render_device: Res<RenderDevice>,
fullscreen_shader: Res<FullscreenShader>,
asset_server: Res<AssetServer>,
) {
let mut entries = DynamicBindGroupLayoutEntries::new_with_indices(
ShaderStages::FRAGMENT,
(
(0, uniform_buffer::<ViewUniform>(true)),
(
1,
texture_2d(TextureSampleType::Float { filterable: false }),
),
(2, sampler(SamplerBindingType::NonFiltering)),
),
);
let lut_layout_entries = get_lut_bind_group_layout_entries();
entries = entries.extend_with_indices(((3, lut_layout_entries[0]), (4, lut_layout_entries[1])));
let tonemap_texture_bind_group =
BindGroupLayoutDescriptor::new("tonemapping_hdr_texture_bind_group_layout", &entries);
let sampler = render_device.create_sampler(&SamplerDescriptor::default());
commands.insert_resource(TonemappingPipeline {
texture_bind_group: tonemap_texture_bind_group,
sampler,
fullscreen_shader: fullscreen_shader.clone(),
fragment_shader: load_embedded_asset!(asset_server.as_ref(), "tonemapping.wgsl"),
});
}
#[derive(Component)]
pub struct ViewTonemappingPipeline(CachedRenderPipelineId);
pub fn prepare_view_tonemapping_pipelines(
mut commands: Commands,
pipeline_cache: Res<PipelineCache>,
mut pipelines: ResMut<SpecializedRenderPipelines<TonemappingPipeline>>,
upscaling_pipeline: Res<TonemappingPipeline>,
view_targets: Query<
(
Entity,
&ExtractedView,
Option<&Tonemapping>,
Option<&DebandDither>,
),
With<ViewTarget>,
>,
) {
for (entity, view, tonemapping, dither) in view_targets.iter() {
// As an optimization, we omit parts of the shader that are unneeded.
let mut flags = TonemappingPipelineKeyFlags::empty();
flags.set(
TonemappingPipelineKeyFlags::HUE_ROTATE,
view.color_grading.global.hue != 0.0,
);
flags.set(
TonemappingPipelineKeyFlags::WHITE_BALANCE,
view.color_grading.global.temperature != 0.0 || view.color_grading.global.tint != 0.0,
);
flags.set(
TonemappingPipelineKeyFlags::SECTIONAL_COLOR_GRADING,
view.color_grading
.all_sections()
.any(|section| *section != default()),
);
let key = TonemappingPipelineKey {
deband_dither: *dither.unwrap_or(&DebandDither::Disabled),
tonemapping: *tonemapping.unwrap_or(&Tonemapping::None),
flags,
};
let pipeline = pipelines.specialize(&pipeline_cache, &upscaling_pipeline, key);
commands
.entity(entity)
.insert(ViewTonemappingPipeline(pipeline));
}
}
/// Enables a debanding shader that applies dithering to mitigate color banding in the final image for a given [`Camera`] entity.
#[derive(
Component, Debug, Hash, Clone, Copy, Reflect, Default, ExtractComponent, PartialEq, Eq,
)]
#[extract_component_filter(With<Camera>)]
#[reflect(Component, Debug, Hash, Default, PartialEq)]
pub enum DebandDither {
#[default]
Disabled,
Enabled,
}
pub fn get_lut_bindings<'a>(
images: &'a RenderAssets<GpuImage>,
tonemapping_luts: &'a TonemappingLuts,
tonemapping: &Tonemapping,
fallback_image: &'a FallbackImage,
) -> (&'a TextureView, &'a Sampler) {
let image = match tonemapping {
// AgX lut texture used when tonemapping doesn't need a texture since it's very small (32x32x32)
Tonemapping::None
| Tonemapping::Reinhard
| Tonemapping::ReinhardLuminance
| Tonemapping::AcesFitted
| Tonemapping::AgX
| Tonemapping::SomewhatBoringDisplayTransform => &tonemapping_luts.agx,
Tonemapping::TonyMcMapface => &tonemapping_luts.tony_mc_mapface,
Tonemapping::BlenderFilmic => &tonemapping_luts.blender_filmic,
};
let lut_image = images.get(image).unwrap_or(&fallback_image.d3);
(&lut_image.texture_view, &lut_image.sampler)
}
pub fn get_lut_bind_group_layout_entries() -> [BindGroupLayoutEntryBuilder; 2] {
[
texture_3d(TextureSampleType::Float { filterable: true }),
sampler(SamplerBindingType::Filtering),
]
}
#[expect(clippy::allow_attributes, reason = "`dead_code` is not always linted.")]
#[allow(
dead_code,
reason = "There is unused code when the `tonemapping_luts` feature is disabled."
)]
fn setup_tonemapping_lut_image(bytes: &[u8], image_type: ImageType) -> Image {
let image_sampler = ImageSampler::Descriptor(bevy_image::ImageSamplerDescriptor {
label: Some("Tonemapping LUT sampler".to_string()),
address_mode_u: bevy_image::ImageAddressMode::ClampToEdge,
address_mode_v: bevy_image::ImageAddressMode::ClampToEdge,
address_mode_w: bevy_image::ImageAddressMode::ClampToEdge,
mag_filter: bevy_image::ImageFilterMode::Linear,
min_filter: bevy_image::ImageFilterMode::Linear,
mipmap_filter: bevy_image::ImageFilterMode::Linear,
..default()
});
Image::from_buffer(
bytes,
image_type,
CompressedImageFormats::NONE,
false,
image_sampler,
RenderAssetUsages::RENDER_WORLD,
)
.unwrap()
}
pub fn lut_placeholder() -> Image {
let format = TextureFormat::Rgba8Unorm;
let data = vec![255, 0, 255, 255];
Image {
data: Some(data),
data_order: TextureDataOrder::default(),
texture_descriptor: TextureDescriptor {
size: Extent3d::default(),
format,
dimension: TextureDimension::D3,
label: None,
mip_level_count: 1,
sample_count: 1,
usage: TextureUsages::TEXTURE_BINDING | TextureUsages::COPY_DST,
view_formats: &[],
},
sampler: ImageSampler::Default,
texture_view_descriptor: None,
asset_usage: RenderAssetUsages::RENDER_WORLD,
copy_on_resize: false,
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/oit/mod.rs | crates/bevy_core_pipeline/src/oit/mod.rs | //! Order Independent Transparency (OIT) for 3d rendering. See [`OrderIndependentTransparencyPlugin`] for more details.
use bevy_app::prelude::*;
use bevy_camera::{Camera3d, RenderTarget};
use bevy_ecs::{component::*, lifecycle::ComponentHook, prelude::*};
use bevy_math::UVec2;
use bevy_platform::collections::HashSet;
use bevy_platform::time::Instant;
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::{
camera::ExtractedCamera,
extract_component::{ExtractComponent, ExtractComponentPlugin},
render_graph::{RenderGraphExt, ViewNodeRunner},
render_resource::{BufferUsages, BufferVec, DynamicUniformBuffer, ShaderType, TextureUsages},
renderer::{RenderDevice, RenderQueue},
view::Msaa,
Render, RenderApp, RenderStartup, RenderSystems,
};
use bevy_shader::load_shader_library;
use bevy_window::PrimaryWindow;
use resolve::{
node::{OitResolveNode, OitResolvePass},
OitResolvePlugin,
};
use tracing::{trace, warn};
use crate::core_3d::graph::{Core3d, Node3d};
/// Module that defines the necessary systems to resolve the OIT buffer and render it to the screen.
pub mod resolve;
/// Used to identify which camera will use OIT to render transparent meshes
/// and to configure OIT.
// TODO consider supporting multiple OIT techniques like WBOIT, Moment Based OIT,
// depth peeling, stochastic transparency, ray tracing etc.
// This should probably be done by adding an enum to this component.
// We use the same struct to pass on the settings to the drawing shader.
#[derive(Clone, Copy, ExtractComponent, Reflect, ShaderType)]
#[reflect(Clone, Default)]
pub struct OrderIndependentTransparencySettings {
/// Controls how many layers will be used to compute the blending.
/// The more layers you use the more memory it will use but it will also give better results.
/// 8 is generally recommended, going above 32 is probably not worth it in the vast majority of cases
pub layer_count: i32,
/// Threshold for which fragments will be added to the blending layers.
/// This can be tweaked to optimize quality / layers count. Higher values will
/// allow lower number of layers and a better performance, compromising quality.
pub alpha_threshold: f32,
}
impl Default for OrderIndependentTransparencySettings {
fn default() -> Self {
Self {
layer_count: 8,
alpha_threshold: 0.0,
}
}
}
// OrderIndependentTransparencySettings is also a Component. We explicitly implement the trait so
// we can hook on_add to issue a warning in case `layer_count` is seemingly too high.
impl Component for OrderIndependentTransparencySettings {
const STORAGE_TYPE: StorageType = StorageType::SparseSet;
type Mutability = Mutable;
fn on_add() -> Option<ComponentHook> {
Some(|world, context| {
if let Some(value) = world.get::<OrderIndependentTransparencySettings>(context.entity)
&& value.layer_count > 32
{
warn!("{}OrderIndependentTransparencySettings layer_count set to {} might be too high.",
context.caller.map(|location|format!("{location}: ")).unwrap_or_default(),
value.layer_count
);
}
})
}
}
/// A plugin that adds support for Order Independent Transparency (OIT).
/// This can correctly render some scenes that would otherwise have artifacts due to alpha blending, but uses more memory.
///
/// To enable OIT for a camera you need to add the [`OrderIndependentTransparencySettings`] component to it.
///
/// If you want to use OIT for your custom material you need to call `oit_draw(position, color)` in your fragment shader.
/// You also need to make sure that your fragment shader doesn't output any colors.
///
/// # Implementation details
/// This implementation uses 2 passes.
///
/// The first pass writes the depth and color of all the fragments to a big buffer.
/// The buffer contains N layers for each pixel, where N can be set with [`OrderIndependentTransparencySettings::layer_count`].
/// This pass is essentially a forward pass.
///
/// The second pass is a single fullscreen triangle pass that sorts all the fragments then blends them together
/// and outputs the result to the screen.
pub struct OrderIndependentTransparencyPlugin;
impl Plugin for OrderIndependentTransparencyPlugin {
fn build(&self, app: &mut App) {
load_shader_library!(app, "oit_draw.wgsl");
app.add_plugins((
ExtractComponentPlugin::<OrderIndependentTransparencySettings>::default(),
OitResolvePlugin,
))
.add_systems(Update, check_msaa)
.add_systems(Last, configure_depth_texture_usages);
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.add_systems(RenderStartup, init_oit_buffers)
.add_systems(
Render,
prepare_oit_buffers.in_set(RenderSystems::PrepareResources),
);
render_app
.add_render_graph_node::<ViewNodeRunner<OitResolveNode>>(Core3d, OitResolvePass)
.add_render_graph_edges(
Core3d,
(
Node3d::MainTransparentPass,
OitResolvePass,
Node3d::EndMainPass,
),
);
}
}
// WARN This should only happen for cameras with the [`OrderIndependentTransparencySettings`] component
// but when multiple cameras are present on the same window
// bevy reuses the same depth texture so we need to set this on all cameras with the same render target.
fn configure_depth_texture_usages(
p: Query<Entity, With<PrimaryWindow>>,
cameras: Query<(&RenderTarget, Has<OrderIndependentTransparencySettings>)>,
mut new_cameras: Query<(&mut Camera3d, &RenderTarget), Added<Camera3d>>,
) {
if new_cameras.is_empty() {
return;
}
// Find all the render target that potentially uses OIT
let primary_window = p.single().ok();
let mut render_target_has_oit = <HashSet<_>>::default();
for (render_target, has_oit) in &cameras {
if has_oit {
render_target_has_oit.insert(render_target.normalize(primary_window));
}
}
// Update the depth texture usage for cameras with a render target that has OIT
for (mut camera_3d, render_target) in &mut new_cameras {
if render_target_has_oit.contains(&render_target.normalize(primary_window)) {
let mut usages = TextureUsages::from(camera_3d.depth_texture_usages);
usages |= TextureUsages::RENDER_ATTACHMENT | TextureUsages::TEXTURE_BINDING;
camera_3d.depth_texture_usages = usages.into();
}
}
}
fn check_msaa(cameras: Query<&Msaa, With<OrderIndependentTransparencySettings>>) {
for msaa in &cameras {
if msaa.samples() > 1 {
panic!("MSAA is not supported when using OrderIndependentTransparency");
}
}
}
/// Holds the buffers that contain the data of all OIT layers.
/// We use one big buffer for the entire app. Each camera will reuse it so it will
/// always be the size of the biggest OIT enabled camera.
#[derive(Resource)]
pub struct OitBuffers {
/// The OIT layers containing depth and color for each fragments.
/// This is essentially used as a 3d array where xy is the screen coordinate and z is
/// the list of fragments rendered with OIT.
pub layers: BufferVec<UVec2>,
/// Buffer containing the index of the last layer that was written for each fragment.
pub layer_ids: BufferVec<i32>,
pub settings: DynamicUniformBuffer<OrderIndependentTransparencySettings>,
}
pub fn init_oit_buffers(
mut commands: Commands,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
) {
// initialize buffers with something so there's a valid binding
let mut layers = BufferVec::new(BufferUsages::COPY_DST | BufferUsages::STORAGE);
layers.set_label(Some("oit_layers"));
layers.reserve(1, &render_device);
layers.write_buffer(&render_device, &render_queue);
let mut layer_ids = BufferVec::new(BufferUsages::COPY_DST | BufferUsages::STORAGE);
layer_ids.set_label(Some("oit_layer_ids"));
layer_ids.reserve(1, &render_device);
layer_ids.write_buffer(&render_device, &render_queue);
let mut settings = DynamicUniformBuffer::default();
settings.set_label(Some("oit_settings"));
commands.insert_resource(OitBuffers {
layers,
layer_ids,
settings,
});
}
#[derive(Component)]
pub struct OrderIndependentTransparencySettingsOffset {
pub offset: u32,
}
/// This creates or resizes the oit buffers for each camera.
/// It will always create one big buffer that's as big as the biggest buffer needed.
/// Cameras with smaller viewports or less layers will simply use the big buffer and ignore the rest.
pub fn prepare_oit_buffers(
mut commands: Commands,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
cameras: Query<
(&ExtractedCamera, &OrderIndependentTransparencySettings),
(
Changed<ExtractedCamera>,
Changed<OrderIndependentTransparencySettings>,
),
>,
camera_oit_uniforms: Query<(Entity, &OrderIndependentTransparencySettings)>,
mut buffers: ResMut<OitBuffers>,
) {
// Get the max buffer size for any OIT enabled camera
let mut max_layer_ids_size = usize::MIN;
let mut max_layers_size = usize::MIN;
for (camera, settings) in &cameras {
let Some(size) = camera.physical_target_size else {
continue;
};
let layer_count = settings.layer_count as usize;
let size = (size.x * size.y) as usize;
max_layer_ids_size = max_layer_ids_size.max(size);
max_layers_size = max_layers_size.max(size * layer_count);
}
// Create or update the layers buffer based on the max size
if buffers.layers.capacity() < max_layers_size {
let start = Instant::now();
buffers.layers.reserve(max_layers_size, &render_device);
let remaining = max_layers_size - buffers.layers.capacity();
for _ in 0..remaining {
buffers.layers.push(UVec2::ZERO);
}
buffers.layers.write_buffer(&render_device, &render_queue);
trace!(
"OIT layers buffer updated in {:.01}ms with total size {} MiB",
start.elapsed().as_millis(),
buffers.layers.capacity() * size_of::<UVec2>() / 1024 / 1024,
);
}
// Create or update the layer_ids buffer based on the max size
if buffers.layer_ids.capacity() < max_layer_ids_size {
let start = Instant::now();
buffers
.layer_ids
.reserve(max_layer_ids_size, &render_device);
let remaining = max_layer_ids_size - buffers.layer_ids.capacity();
for _ in 0..remaining {
buffers.layer_ids.push(0);
}
buffers
.layer_ids
.write_buffer(&render_device, &render_queue);
trace!(
"OIT layer ids buffer updated in {:.01}ms with total size {} MiB",
start.elapsed().as_millis(),
buffers.layer_ids.capacity() * size_of::<UVec2>() / 1024 / 1024,
);
}
if let Some(mut writer) = buffers.settings.get_writer(
camera_oit_uniforms.iter().len(),
&render_device,
&render_queue,
) {
for (entity, settings) in &camera_oit_uniforms {
let offset = writer.write(settings);
commands
.entity(entity)
.insert(OrderIndependentTransparencySettingsOffset { offset });
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/oit/resolve/node.rs | crates/bevy_core_pipeline/src/oit/resolve/node.rs | use bevy_camera::{MainPassResolutionOverride, Viewport};
use bevy_ecs::{prelude::*, query::QueryItem};
use bevy_render::{
camera::ExtractedCamera,
diagnostic::RecordDiagnostics,
render_graph::{NodeRunError, RenderGraphContext, RenderLabel, ViewNode},
render_resource::{BindGroupEntries, PipelineCache, RenderPassDescriptor},
renderer::RenderContext,
view::{ViewDepthTexture, ViewTarget, ViewUniformOffset},
};
use super::{OitResolveBindGroup, OitResolvePipeline, OitResolvePipelineId};
/// Render label for the OIT resolve pass.
#[derive(RenderLabel, Debug, Clone, Hash, PartialEq, Eq)]
pub struct OitResolvePass;
/// The node that executes the OIT resolve pass.
#[derive(Default)]
pub struct OitResolveNode;
impl ViewNode for OitResolveNode {
type ViewQuery = (
&'static ExtractedCamera,
&'static ViewTarget,
&'static ViewUniformOffset,
&'static OitResolvePipelineId,
&'static ViewDepthTexture,
Option<&'static MainPassResolutionOverride>,
);
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
(camera, view_target, view_uniform, oit_resolve_pipeline_id, depth, resolution_override): QueryItem<
Self::ViewQuery,
>,
world: &World,
) -> Result<(), NodeRunError> {
let Some(resolve_pipeline) = world.get_resource::<OitResolvePipeline>() else {
return Ok(());
};
// resolve oit
// sorts the layers and renders the final blended color to the screen
{
let pipeline_cache = world.resource::<PipelineCache>();
let bind_group = world.resource::<OitResolveBindGroup>();
let Some(pipeline) = pipeline_cache.get_render_pipeline(oit_resolve_pipeline_id.0)
else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
let depth_bind_group = render_context.render_device().create_bind_group(
"oit_resolve_depth_bind_group",
&pipeline_cache
.get_bind_group_layout(&resolve_pipeline.oit_depth_bind_group_layout),
&BindGroupEntries::single(depth.view()),
);
let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("oit_resolve"),
color_attachments: &[Some(view_target.get_color_attachment())],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
let pass_span = diagnostics.pass_span(&mut render_pass, "oit_resolve");
if let Some(viewport) =
Viewport::from_viewport_and_override(camera.viewport.as_ref(), resolution_override)
{
render_pass.set_camera_viewport(&viewport);
}
render_pass.set_render_pipeline(pipeline);
render_pass.set_bind_group(0, bind_group, &[view_uniform.offset]);
render_pass.set_bind_group(1, &depth_bind_group, &[]);
render_pass.draw(0..3, 0..1);
pass_span.end(&mut render_pass);
}
Ok(())
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/oit/resolve/mod.rs | crates/bevy_core_pipeline/src/oit/resolve/mod.rs | use super::OitBuffers;
use crate::{oit::OrderIndependentTransparencySettings, FullscreenShader};
use bevy_app::Plugin;
use bevy_asset::{embedded_asset, load_embedded_asset, AssetServer};
use bevy_derive::Deref;
use bevy_ecs::{
entity::{EntityHashMap, EntityHashSet},
prelude::*,
};
use bevy_image::BevyDefault as _;
use bevy_render::{
render_resource::{
binding_types::{storage_buffer_sized, texture_depth_2d, uniform_buffer},
BindGroup, BindGroupEntries, BindGroupLayoutDescriptor, BindGroupLayoutEntries,
BlendComponent, BlendState, CachedRenderPipelineId, ColorTargetState, ColorWrites,
DownlevelFlags, FragmentState, PipelineCache, RenderPipelineDescriptor, ShaderStages,
TextureFormat,
},
renderer::{RenderAdapter, RenderDevice},
view::{ExtractedView, ViewTarget, ViewUniform, ViewUniforms},
Render, RenderApp, RenderSystems,
};
use bevy_shader::ShaderDefVal;
use bevy_utils::default;
use tracing::warn;
/// Contains the render node used to run the resolve pass.
pub mod node;
/// Minimum required value of `wgpu::Limits::max_storage_buffers_per_shader_stage`.
pub const OIT_REQUIRED_STORAGE_BUFFERS: u32 = 2;
/// Plugin needed to resolve the Order Independent Transparency (OIT) buffer to the screen.
pub struct OitResolvePlugin;
impl Plugin for OitResolvePlugin {
fn build(&self, app: &mut bevy_app::App) {
embedded_asset!(app, "oit_resolve.wgsl");
}
fn finish(&self, app: &mut bevy_app::App) {
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
if !is_oit_supported(
render_app.world().resource::<RenderAdapter>(),
render_app.world().resource::<RenderDevice>(),
true,
) {
return;
}
render_app
.add_systems(
Render,
(
queue_oit_resolve_pipeline.in_set(RenderSystems::Queue),
prepare_oit_resolve_bind_group.in_set(RenderSystems::PrepareBindGroups),
),
)
.insert_resource(OitResolvePipeline::new());
}
}
pub fn is_oit_supported(adapter: &RenderAdapter, device: &RenderDevice, warn: bool) -> bool {
if !adapter
.get_downlevel_capabilities()
.flags
.contains(DownlevelFlags::FRAGMENT_WRITABLE_STORAGE)
{
if warn {
warn!("OrderIndependentTransparencyPlugin not loaded. GPU lacks support: DownlevelFlags::FRAGMENT_WRITABLE_STORAGE.");
}
return false;
}
let max_storage_buffers_per_shader_stage = device.limits().max_storage_buffers_per_shader_stage;
if max_storage_buffers_per_shader_stage < OIT_REQUIRED_STORAGE_BUFFERS {
if warn {
warn!(
max_storage_buffers_per_shader_stage,
OIT_REQUIRED_STORAGE_BUFFERS,
"OrderIndependentTransparencyPlugin not loaded. RenderDevice lacks support: max_storage_buffers_per_shader_stage < OIT_REQUIRED_STORAGE_BUFFERS."
);
}
return false;
}
true
}
/// Bind group for the OIT resolve pass.
#[derive(Resource, Deref)]
pub struct OitResolveBindGroup(pub BindGroup);
/// Bind group layouts used for the OIT resolve pass.
#[derive(Resource)]
pub struct OitResolvePipeline {
/// View bind group layout.
pub view_bind_group_layout: BindGroupLayoutDescriptor,
/// Depth bind group layout.
pub oit_depth_bind_group_layout: BindGroupLayoutDescriptor,
}
impl OitResolvePipeline {
fn new() -> Self {
let view_bind_group_layout = BindGroupLayoutDescriptor::new(
"oit_resolve_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::FRAGMENT,
(
uniform_buffer::<ViewUniform>(true),
// layers
storage_buffer_sized(false, None),
// layer ids
storage_buffer_sized(false, None),
),
),
);
let oit_depth_bind_group_layout = BindGroupLayoutDescriptor::new(
"oit_depth_bind_group_layout",
&BindGroupLayoutEntries::single(ShaderStages::FRAGMENT, texture_depth_2d()),
);
OitResolvePipeline {
view_bind_group_layout,
oit_depth_bind_group_layout,
}
}
}
#[derive(Component, Deref, Clone, Copy)]
pub struct OitResolvePipelineId(pub CachedRenderPipelineId);
/// This key is used to cache the pipeline id and to specialize the render pipeline descriptor.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub struct OitResolvePipelineKey {
hdr: bool,
layer_count: i32,
}
pub fn queue_oit_resolve_pipeline(
mut commands: Commands,
pipeline_cache: Res<PipelineCache>,
resolve_pipeline: Res<OitResolvePipeline>,
views: Query<
(
Entity,
&ExtractedView,
&OrderIndependentTransparencySettings,
),
With<OrderIndependentTransparencySettings>,
>,
fullscreen_shader: Res<FullscreenShader>,
asset_server: Res<AssetServer>,
// Store the key with the id to make the clean up logic easier.
// This also means it will always replace the entry if the key changes so nothing to clean up.
mut cached_pipeline_id: Local<EntityHashMap<(OitResolvePipelineKey, CachedRenderPipelineId)>>,
) {
let mut current_view_entities = EntityHashSet::default();
for (e, view, oit_settings) in &views {
current_view_entities.insert(e);
let key = OitResolvePipelineKey {
hdr: view.hdr,
layer_count: oit_settings.layer_count,
};
if let Some((cached_key, id)) = cached_pipeline_id.get(&e)
&& *cached_key == key
{
commands.entity(e).insert(OitResolvePipelineId(*id));
continue;
}
let desc = specialize_oit_resolve_pipeline(
key,
&resolve_pipeline,
&fullscreen_shader,
&asset_server,
);
let pipeline_id = pipeline_cache.queue_render_pipeline(desc);
commands.entity(e).insert(OitResolvePipelineId(pipeline_id));
cached_pipeline_id.insert(e, (key, pipeline_id));
}
// Clear cache for views that don't exist anymore.
for e in cached_pipeline_id.keys().copied().collect::<Vec<_>>() {
if !current_view_entities.contains(&e) {
cached_pipeline_id.remove(&e);
}
}
}
fn specialize_oit_resolve_pipeline(
key: OitResolvePipelineKey,
resolve_pipeline: &OitResolvePipeline,
fullscreen_shader: &FullscreenShader,
asset_server: &AssetServer,
) -> RenderPipelineDescriptor {
let format = if key.hdr {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
};
RenderPipelineDescriptor {
label: Some("oit_resolve_pipeline".into()),
layout: vec![
resolve_pipeline.view_bind_group_layout.clone(),
resolve_pipeline.oit_depth_bind_group_layout.clone(),
],
fragment: Some(FragmentState {
shader: load_embedded_asset!(asset_server, "oit_resolve.wgsl"),
shader_defs: vec![ShaderDefVal::UInt(
"LAYER_COUNT".into(),
key.layer_count as u32,
)],
targets: vec![Some(ColorTargetState {
format,
blend: Some(BlendState {
color: BlendComponent::OVER,
alpha: BlendComponent::OVER,
}),
write_mask: ColorWrites::ALL,
})],
..default()
}),
vertex: fullscreen_shader.to_vertex_state(),
..default()
}
}
pub fn prepare_oit_resolve_bind_group(
mut commands: Commands,
resolve_pipeline: Res<OitResolvePipeline>,
render_device: Res<RenderDevice>,
view_uniforms: Res<ViewUniforms>,
pipeline_cache: Res<PipelineCache>,
buffers: Res<OitBuffers>,
) {
if let (Some(binding), Some(layers_binding), Some(layer_ids_binding)) = (
view_uniforms.uniforms.binding(),
buffers.layers.binding(),
buffers.layer_ids.binding(),
) {
let bind_group = render_device.create_bind_group(
"oit_resolve_bind_group",
&pipeline_cache.get_bind_group_layout(&resolve_pipeline.view_bind_group_layout),
&BindGroupEntries::sequential((binding.clone(), layers_binding, layer_ids_binding)),
);
commands.insert_resource(OitResolveBindGroup(bind_group));
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/skybox/prepass.rs | crates/bevy_core_pipeline/src/skybox/prepass.rs | //! Adds motion vector support to skyboxes. See [`SkyboxPrepassPipeline`] for details.
use bevy_asset::{load_embedded_asset, AssetServer, Handle};
use bevy_ecs::{
component::Component,
entity::Entity,
query::{Has, With},
resource::Resource,
system::{Commands, Query, Res, ResMut},
};
use bevy_render::{
render_resource::{
binding_types::uniform_buffer, BindGroup, BindGroupEntries, BindGroupLayoutDescriptor,
BindGroupLayoutEntries, CachedRenderPipelineId, CompareFunction, DepthStencilState,
FragmentState, MultisampleState, PipelineCache, RenderPipelineDescriptor, ShaderStages,
SpecializedRenderPipeline, SpecializedRenderPipelines,
},
renderer::RenderDevice,
view::{Msaa, ViewUniform, ViewUniforms},
};
use bevy_shader::Shader;
use bevy_utils::prelude::default;
use crate::{
core_3d::CORE_3D_DEPTH_FORMAT,
prepass::{
prepass_target_descriptors, MotionVectorPrepass, NormalPrepass, PreviousViewData,
PreviousViewUniforms,
},
FullscreenShader, Skybox,
};
/// This pipeline writes motion vectors to the prepass for all [`Skybox`]es.
///
/// This allows features like motion blur and TAA to work correctly on the skybox. Without this, for
/// example, motion blur would not be applied to the skybox when the camera is rotated and motion
/// blur is enabled.
#[derive(Resource)]
pub struct SkyboxPrepassPipeline {
bind_group_layout: BindGroupLayoutDescriptor,
fullscreen_shader: FullscreenShader,
fragment_shader: Handle<Shader>,
}
/// Used to specialize the [`SkyboxPrepassPipeline`].
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
pub struct SkyboxPrepassPipelineKey {
samples: u32,
normal_prepass: bool,
}
/// Stores the ID for a camera's specialized pipeline, so it can be retrieved from the
/// [`PipelineCache`].
#[derive(Component)]
pub struct RenderSkyboxPrepassPipeline(pub CachedRenderPipelineId);
/// Stores the [`SkyboxPrepassPipeline`] bind group for a camera. This is later used by the prepass
/// render graph node to add this binding to the prepass's render pass.
#[derive(Component)]
pub struct SkyboxPrepassBindGroup(pub BindGroup);
pub fn init_skybox_prepass_pipeline(
mut commands: Commands,
fullscreen_shader: Res<FullscreenShader>,
asset_server: Res<AssetServer>,
) {
commands.insert_resource(SkyboxPrepassPipeline {
bind_group_layout: BindGroupLayoutDescriptor::new(
"skybox_prepass_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::FRAGMENT,
(
uniform_buffer::<ViewUniform>(true),
uniform_buffer::<PreviousViewData>(true),
),
),
),
fullscreen_shader: fullscreen_shader.clone(),
fragment_shader: load_embedded_asset!(asset_server.as_ref(), "skybox_prepass.wgsl"),
});
}
impl SpecializedRenderPipeline for SkyboxPrepassPipeline {
type Key = SkyboxPrepassPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
RenderPipelineDescriptor {
label: Some("skybox_prepass_pipeline".into()),
layout: vec![self.bind_group_layout.clone()],
vertex: self.fullscreen_shader.to_vertex_state(),
depth_stencil: Some(DepthStencilState {
format: CORE_3D_DEPTH_FORMAT,
depth_write_enabled: false,
depth_compare: CompareFunction::GreaterEqual,
stencil: default(),
bias: default(),
}),
multisample: MultisampleState {
count: key.samples,
mask: !0,
alpha_to_coverage_enabled: false,
},
fragment: Some(FragmentState {
shader: self.fragment_shader.clone(),
targets: prepass_target_descriptors(key.normal_prepass, true, false),
..default()
}),
..default()
}
}
}
/// Specialize and cache the [`SkyboxPrepassPipeline`] for each camera with a [`Skybox`].
pub fn prepare_skybox_prepass_pipelines(
mut commands: Commands,
pipeline_cache: Res<PipelineCache>,
mut pipelines: ResMut<SpecializedRenderPipelines<SkyboxPrepassPipeline>>,
pipeline: Res<SkyboxPrepassPipeline>,
views: Query<(Entity, Has<NormalPrepass>, &Msaa), (With<Skybox>, With<MotionVectorPrepass>)>,
) {
for (entity, normal_prepass, msaa) in &views {
let pipeline_key = SkyboxPrepassPipelineKey {
samples: msaa.samples(),
normal_prepass,
};
let render_skybox_prepass_pipeline =
pipelines.specialize(&pipeline_cache, &pipeline, pipeline_key);
commands
.entity(entity)
.insert(RenderSkyboxPrepassPipeline(render_skybox_prepass_pipeline));
}
}
/// Creates the required bind groups for the [`SkyboxPrepassPipeline`]. This binds the view uniforms
/// from the CPU for access in the prepass shader on the GPU, allowing us to compute camera motion
/// between frames. This is then stored in the [`SkyboxPrepassBindGroup`] component on the camera.
pub fn prepare_skybox_prepass_bind_groups(
mut commands: Commands,
pipeline: Res<SkyboxPrepassPipeline>,
view_uniforms: Res<ViewUniforms>,
prev_view_uniforms: Res<PreviousViewUniforms>,
render_device: Res<RenderDevice>,
pipeline_cache: Res<PipelineCache>,
views: Query<Entity, (With<Skybox>, With<MotionVectorPrepass>)>,
) {
for entity in &views {
let (Some(view_uniforms), Some(prev_view_uniforms)) = (
view_uniforms.uniforms.binding(),
prev_view_uniforms.uniforms.binding(),
) else {
continue;
};
let bind_group = render_device.create_bind_group(
"skybox_prepass_bind_group",
&pipeline_cache.get_bind_group_layout(&pipeline.bind_group_layout),
&BindGroupEntries::sequential((view_uniforms, prev_view_uniforms)),
);
commands
.entity(entity)
.insert(SkyboxPrepassBindGroup(bind_group));
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/skybox/mod.rs | crates/bevy_core_pipeline/src/skybox/mod.rs | use bevy_app::{App, Plugin};
use bevy_asset::{embedded_asset, load_embedded_asset, AssetServer, Handle};
use bevy_camera::Exposure;
use bevy_ecs::{
prelude::{Component, Entity},
query::{QueryItem, With},
reflect::ReflectComponent,
resource::Resource,
schedule::IntoScheduleConfigs,
system::{Commands, Query, Res, ResMut},
};
use bevy_image::{BevyDefault, Image};
use bevy_math::{Mat4, Quat};
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::{
extract_component::{
ComponentUniforms, DynamicUniformIndex, ExtractComponent, ExtractComponentPlugin,
UniformComponentPlugin,
},
render_asset::RenderAssets,
render_resource::{
binding_types::{sampler, texture_cube, uniform_buffer},
*,
},
renderer::RenderDevice,
texture::GpuImage,
view::{ExtractedView, Msaa, ViewTarget, ViewUniform, ViewUniforms},
Render, RenderApp, RenderStartup, RenderSystems,
};
use bevy_shader::Shader;
use bevy_transform::components::Transform;
use bevy_utils::default;
use prepass::SkyboxPrepassPipeline;
use crate::{
core_3d::CORE_3D_DEPTH_FORMAT, prepass::PreviousViewUniforms,
skybox::prepass::init_skybox_prepass_pipeline,
};
pub mod prepass;
pub struct SkyboxPlugin;
impl Plugin for SkyboxPlugin {
fn build(&self, app: &mut App) {
embedded_asset!(app, "skybox.wgsl");
embedded_asset!(app, "skybox_prepass.wgsl");
app.add_plugins((
ExtractComponentPlugin::<Skybox>::default(),
UniformComponentPlugin::<SkyboxUniforms>::default(),
));
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.init_resource::<SpecializedRenderPipelines<SkyboxPipeline>>()
.init_resource::<SpecializedRenderPipelines<SkyboxPrepassPipeline>>()
.init_resource::<PreviousViewUniforms>()
.add_systems(
RenderStartup,
(init_skybox_pipeline, init_skybox_prepass_pipeline),
)
.add_systems(
Render,
(
prepare_skybox_pipelines.in_set(RenderSystems::Prepare),
prepass::prepare_skybox_prepass_pipelines.in_set(RenderSystems::Prepare),
prepare_skybox_bind_groups.in_set(RenderSystems::PrepareBindGroups),
prepass::prepare_skybox_prepass_bind_groups
.in_set(RenderSystems::PrepareBindGroups),
),
);
}
}
/// Adds a skybox to a 3D camera, based on a cubemap texture.
///
/// Note that this component does not (currently) affect the scene's lighting.
/// To do so, use `EnvironmentMapLight` alongside this component.
///
/// See also <https://en.wikipedia.org/wiki/Skybox_(video_games)>.
#[derive(Component, Clone, Reflect)]
#[reflect(Component, Default, Clone)]
pub struct Skybox {
pub image: Handle<Image>,
/// Scale factor applied to the skybox image.
/// After applying this multiplier to the image samples, the resulting values should
/// be in units of [cd/m^2](https://en.wikipedia.org/wiki/Candela_per_square_metre).
pub brightness: f32,
/// View space rotation applied to the skybox cubemap.
/// This is useful for users who require a different axis, such as the Z-axis, to serve
/// as the vertical axis.
pub rotation: Quat,
}
impl Default for Skybox {
fn default() -> Self {
Skybox {
image: Handle::default(),
brightness: 0.0,
rotation: Quat::IDENTITY,
}
}
}
impl ExtractComponent for Skybox {
type QueryData = (&'static Self, Option<&'static Exposure>);
type QueryFilter = ();
type Out = (Self, SkyboxUniforms);
fn extract_component(
(skybox, exposure): QueryItem<'_, '_, Self::QueryData>,
) -> Option<Self::Out> {
let exposure = exposure
.map(Exposure::exposure)
.unwrap_or_else(|| Exposure::default().exposure());
Some((
skybox.clone(),
SkyboxUniforms {
brightness: skybox.brightness * exposure,
transform: Transform::from_rotation(skybox.rotation.inverse()).to_matrix(),
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
_wasm_padding_8b: 0,
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
_wasm_padding_12b: 0,
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
_wasm_padding_16b: 0,
},
))
}
}
// TODO: Replace with a push constant once WebGPU gets support for that
#[derive(Component, ShaderType, Clone)]
pub struct SkyboxUniforms {
brightness: f32,
transform: Mat4,
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
_wasm_padding_8b: u32,
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
_wasm_padding_12b: u32,
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
_wasm_padding_16b: u32,
}
#[derive(Resource)]
struct SkyboxPipeline {
bind_group_layout: BindGroupLayoutDescriptor,
shader: Handle<Shader>,
}
impl SkyboxPipeline {
fn new(shader: Handle<Shader>) -> Self {
Self {
bind_group_layout: BindGroupLayoutDescriptor::new(
"skybox_bind_group_layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::FRAGMENT,
(
texture_cube(TextureSampleType::Float { filterable: true }),
sampler(SamplerBindingType::Filtering),
uniform_buffer::<ViewUniform>(true)
.visibility(ShaderStages::VERTEX_FRAGMENT),
uniform_buffer::<SkyboxUniforms>(true),
),
),
),
shader,
}
}
}
fn init_skybox_pipeline(mut commands: Commands, asset_server: Res<AssetServer>) {
let shader = load_embedded_asset!(asset_server.as_ref(), "skybox.wgsl");
commands.insert_resource(SkyboxPipeline::new(shader));
}
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
struct SkyboxPipelineKey {
hdr: bool,
samples: u32,
depth_format: TextureFormat,
}
impl SpecializedRenderPipeline for SkyboxPipeline {
type Key = SkyboxPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
RenderPipelineDescriptor {
label: Some("skybox_pipeline".into()),
layout: vec![self.bind_group_layout.clone()],
vertex: VertexState {
shader: self.shader.clone(),
..default()
},
depth_stencil: Some(DepthStencilState {
format: key.depth_format,
depth_write_enabled: false,
depth_compare: CompareFunction::GreaterEqual,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
}),
multisample: MultisampleState {
count: key.samples,
mask: !0,
alpha_to_coverage_enabled: false,
},
fragment: Some(FragmentState {
shader: self.shader.clone(),
targets: vec![Some(ColorTargetState {
format: if key.hdr {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
},
// BlendState::REPLACE is not needed here, and None will be potentially much faster in some cases.
blend: None,
write_mask: ColorWrites::ALL,
})],
..default()
}),
..default()
}
}
}
#[derive(Component)]
pub struct SkyboxPipelineId(pub CachedRenderPipelineId);
fn prepare_skybox_pipelines(
mut commands: Commands,
pipeline_cache: Res<PipelineCache>,
mut pipelines: ResMut<SpecializedRenderPipelines<SkyboxPipeline>>,
pipeline: Res<SkyboxPipeline>,
views: Query<(Entity, &ExtractedView, &Msaa), With<Skybox>>,
) {
for (entity, view, msaa) in &views {
let pipeline_id = pipelines.specialize(
&pipeline_cache,
&pipeline,
SkyboxPipelineKey {
hdr: view.hdr,
samples: msaa.samples(),
depth_format: CORE_3D_DEPTH_FORMAT,
},
);
commands
.entity(entity)
.insert(SkyboxPipelineId(pipeline_id));
}
}
#[derive(Component)]
pub struct SkyboxBindGroup(pub (BindGroup, u32));
fn prepare_skybox_bind_groups(
mut commands: Commands,
pipeline: Res<SkyboxPipeline>,
view_uniforms: Res<ViewUniforms>,
skybox_uniforms: Res<ComponentUniforms<SkyboxUniforms>>,
images: Res<RenderAssets<GpuImage>>,
render_device: Res<RenderDevice>,
pipeline_cache: Res<PipelineCache>,
views: Query<(Entity, &Skybox, &DynamicUniformIndex<SkyboxUniforms>)>,
) {
for (entity, skybox, skybox_uniform_index) in &views {
if let (Some(skybox), Some(view_uniforms), Some(skybox_uniforms)) = (
images.get(&skybox.image),
view_uniforms.uniforms.binding(),
skybox_uniforms.binding(),
) {
let bind_group = render_device.create_bind_group(
"skybox_bind_group",
&pipeline_cache.get_bind_group_layout(&pipeline.bind_group_layout),
&BindGroupEntries::sequential((
&skybox.texture_view,
&skybox.sampler,
view_uniforms,
skybox_uniforms,
)),
);
commands
.entity(entity)
.insert(SkyboxBindGroup((bind_group, skybox_uniform_index.index())));
}
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/core_3d/main_transparent_pass_3d_node.rs | crates/bevy_core_pipeline/src/core_3d/main_transparent_pass_3d_node.rs | use crate::core_3d::Transparent3d;
use bevy_camera::{MainPassResolutionOverride, Viewport};
use bevy_ecs::{prelude::*, query::QueryItem};
use bevy_render::{
camera::ExtractedCamera,
diagnostic::RecordDiagnostics,
render_graph::{NodeRunError, RenderGraphContext, ViewNode},
render_phase::ViewSortedRenderPhases,
render_resource::{RenderPassDescriptor, StoreOp},
renderer::RenderContext,
view::{ExtractedView, ViewDepthTexture, ViewTarget},
};
use tracing::error;
#[cfg(feature = "trace")]
use tracing::info_span;
/// A [`bevy_render::render_graph::Node`] that runs the [`Transparent3d`]
/// [`ViewSortedRenderPhases`].
#[derive(Default)]
pub struct MainTransparentPass3dNode;
impl ViewNode for MainTransparentPass3dNode {
type ViewQuery = (
&'static ExtractedCamera,
&'static ExtractedView,
&'static ViewTarget,
&'static ViewDepthTexture,
Option<&'static MainPassResolutionOverride>,
);
fn run(
&self,
graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
(camera, view, target, depth, resolution_override): QueryItem<Self::ViewQuery>,
world: &World,
) -> Result<(), NodeRunError> {
let view_entity = graph.view_entity();
let Some(transparent_phases) =
world.get_resource::<ViewSortedRenderPhases<Transparent3d>>()
else {
return Ok(());
};
let Some(transparent_phase) = transparent_phases.get(&view.retained_view_entity) else {
return Ok(());
};
if !transparent_phase.items.is_empty() {
// Run the transparent pass, sorted back-to-front
// NOTE: Scoped to drop the mutable borrow of render_context
#[cfg(feature = "trace")]
let _main_transparent_pass_3d_span = info_span!("main_transparent_pass_3d").entered();
let diagnostics = render_context.diagnostic_recorder();
let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("main_transparent_pass_3d"),
color_attachments: &[Some(target.get_color_attachment())],
// NOTE: For the transparent pass we load the depth buffer. There should be no
// need to write to it, but store is set to `true` as a workaround for issue #3776,
// https://github.com/bevyengine/bevy/issues/3776
// so that wgpu does not clear the depth buffer.
// As the opaque and alpha mask passes run first, opaque meshes can occlude
// transparent ones.
depth_stencil_attachment: Some(depth.get_attachment(StoreOp::Store)),
timestamp_writes: None,
occlusion_query_set: None,
});
let pass_span = diagnostics.pass_span(&mut render_pass, "main_transparent_pass_3d");
if let Some(viewport) =
Viewport::from_viewport_and_override(camera.viewport.as_ref(), resolution_override)
{
render_pass.set_camera_viewport(&viewport);
}
if let Err(err) = transparent_phase.render(&mut render_pass, world, view_entity) {
error!("Error encountered while rendering the transparent phase {err:?}");
}
pass_span.end(&mut render_pass);
}
// WebGL2 quirk: if ending with a render pass with a custom viewport, the viewport isn't
// reset for the next render pass so add an empty render pass without a custom viewport
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
if camera.viewport.is_some() {
#[cfg(feature = "trace")]
let _reset_viewport_pass_3d = info_span!("reset_viewport_pass_3d").entered();
let pass_descriptor = RenderPassDescriptor {
label: Some("reset_viewport_pass_3d"),
color_attachments: &[Some(target.get_color_attachment())],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
};
render_context
.command_encoder()
.begin_render_pass(&pass_descriptor);
}
Ok(())
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/core_3d/mod.rs | crates/bevy_core_pipeline/src/core_3d/mod.rs | mod main_opaque_pass_3d_node;
mod main_transmissive_pass_3d_node;
mod main_transparent_pass_3d_node;
pub mod graph {
use bevy_render::render_graph::{RenderLabel, RenderSubGraph};
#[derive(Debug, Hash, PartialEq, Eq, Clone, RenderSubGraph)]
pub struct Core3d;
pub mod input {
pub const VIEW_ENTITY: &str = "view_entity";
}
#[derive(Debug, Hash, PartialEq, Eq, Clone, RenderLabel)]
pub enum Node3d {
MsaaWriteback,
EarlyPrepass,
EarlyDownsampleDepth,
LatePrepass,
EarlyDeferredPrepass,
LateDeferredPrepass,
CopyDeferredLightingId,
EndPrepasses,
StartMainPass,
MainOpaquePass,
MainTransmissivePass,
MainTransparentPass,
EndMainPass,
Wireframe,
StartMainPassPostProcessing,
LateDownsampleDepth,
MotionBlur,
Taa,
DlssSuperResolution,
DlssRayReconstruction,
Bloom,
AutoExposure,
DepthOfField,
PostProcessing,
Tonemapping,
Fxaa,
Smaa,
Upscaling,
ContrastAdaptiveSharpening,
EndMainPassPostProcessing,
}
}
// PERF: vulkan docs recommend using 24 bit depth for better performance
pub const CORE_3D_DEPTH_FORMAT: TextureFormat = TextureFormat::Depth32Float;
/// True if multisampled depth textures are supported on this platform.
///
/// In theory, Naga supports depth textures on WebGL 2. In practice, it doesn't,
/// because of a silly bug whereby Naga assumes that all depth textures are
/// `sampler2DShadow` and will cheerfully generate invalid GLSL that tries to
/// perform non-percentage-closer-filtering with such a sampler. Therefore we
/// disable depth of field and screen space reflections entirely on WebGL 2.
#[cfg(not(any(feature = "webgpu", not(target_arch = "wasm32"))))]
pub const DEPTH_TEXTURE_SAMPLING_SUPPORTED: bool = false;
/// True if multisampled depth textures are supported on this platform.
///
/// In theory, Naga supports depth textures on WebGL 2. In practice, it doesn't,
/// because of a silly bug whereby Naga assumes that all depth textures are
/// `sampler2DShadow` and will cheerfully generate invalid GLSL that tries to
/// perform non-percentage-closer-filtering with such a sampler. Therefore we
/// disable depth of field and screen space reflections entirely on WebGL 2.
#[cfg(any(feature = "webgpu", not(target_arch = "wasm32")))]
pub const DEPTH_TEXTURE_SAMPLING_SUPPORTED: bool = true;
use core::ops::Range;
use bevy_camera::{Camera, Camera3d, Camera3dDepthLoadOp};
use bevy_diagnostic::FrameCount;
use bevy_render::{
batching::gpu_preprocessing::{GpuPreprocessingMode, GpuPreprocessingSupport},
camera::CameraRenderGraph,
experimental::occlusion_culling::OcclusionCulling,
mesh::allocator::SlabId,
render_phase::PhaseItemBatchSetKey,
texture::CachedTexture,
view::{prepare_view_targets, NoIndirectDrawing, RetainedViewEntity},
};
pub use main_opaque_pass_3d_node::*;
pub use main_transparent_pass_3d_node::*;
use bevy_app::{App, Plugin, PostUpdate};
use bevy_asset::UntypedAssetId;
use bevy_color::LinearRgba;
use bevy_ecs::prelude::*;
use bevy_image::{BevyDefault, ToExtents};
use bevy_math::FloatOrd;
use bevy_platform::collections::{HashMap, HashSet};
use bevy_render::{
camera::ExtractedCamera,
extract_component::ExtractComponentPlugin,
prelude::Msaa,
render_graph::{EmptyNode, RenderGraphExt, ViewNodeRunner},
render_phase::{
sort_phase_system, BinnedPhaseItem, CachedRenderPipelinePhaseItem, DrawFunctionId,
DrawFunctions, PhaseItem, PhaseItemExtraIndex, SortedPhaseItem, ViewBinnedRenderPhases,
ViewSortedRenderPhases,
},
render_resource::{
CachedRenderPipelineId, FilterMode, Sampler, SamplerDescriptor, Texture, TextureDescriptor,
TextureDimension, TextureFormat, TextureUsages, TextureView,
},
renderer::RenderDevice,
sync_world::{MainEntity, RenderEntity},
texture::{ColorAttachment, TextureCache},
view::{ExtractedView, ViewDepthTexture, ViewTarget},
Extract, ExtractSchedule, Render, RenderApp, RenderSystems,
};
use nonmax::NonMaxU32;
use tracing::warn;
use crate::{
core_3d::main_transmissive_pass_3d_node::MainTransmissivePass3dNode,
deferred::{
copy_lighting_id::CopyDeferredLightingIdNode,
node::{EarlyDeferredGBufferPrepassNode, LateDeferredGBufferPrepassNode},
AlphaMask3dDeferred, Opaque3dDeferred, DEFERRED_LIGHTING_PASS_ID_FORMAT,
DEFERRED_PREPASS_FORMAT,
},
prepass::{
node::{EarlyPrepassNode, LatePrepassNode},
AlphaMask3dPrepass, DeferredPrepass, DeferredPrepassDoubleBuffer, DepthPrepass,
DepthPrepassDoubleBuffer, MotionVectorPrepass, NormalPrepass, Opaque3dPrepass,
OpaqueNoLightmap3dBatchSetKey, OpaqueNoLightmap3dBinKey, ViewPrepassTextures,
MOTION_VECTOR_PREPASS_FORMAT, NORMAL_PREPASS_FORMAT,
},
skybox::SkyboxPlugin,
tonemapping::{DebandDither, Tonemapping, TonemappingNode},
upscaling::UpscalingNode,
};
use self::graph::{Core3d, Node3d};
pub struct Core3dPlugin;
impl Plugin for Core3dPlugin {
fn build(&self, app: &mut App) {
app.register_required_components_with::<Camera3d, DebandDither>(|| DebandDither::Enabled)
.register_required_components_with::<Camera3d, CameraRenderGraph>(|| {
CameraRenderGraph::new(Core3d)
})
.register_required_components::<Camera3d, Tonemapping>()
.add_plugins((SkyboxPlugin, ExtractComponentPlugin::<Camera3d>::default()))
.add_systems(PostUpdate, check_msaa);
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.init_resource::<DrawFunctions<Opaque3d>>()
.init_resource::<DrawFunctions<AlphaMask3d>>()
.init_resource::<DrawFunctions<Transmissive3d>>()
.init_resource::<DrawFunctions<Transparent3d>>()
.init_resource::<DrawFunctions<Opaque3dPrepass>>()
.init_resource::<DrawFunctions<AlphaMask3dPrepass>>()
.init_resource::<DrawFunctions<Opaque3dDeferred>>()
.init_resource::<DrawFunctions<AlphaMask3dDeferred>>()
.init_resource::<ViewBinnedRenderPhases<Opaque3d>>()
.init_resource::<ViewBinnedRenderPhases<AlphaMask3d>>()
.init_resource::<ViewBinnedRenderPhases<Opaque3dPrepass>>()
.init_resource::<ViewBinnedRenderPhases<AlphaMask3dPrepass>>()
.init_resource::<ViewBinnedRenderPhases<Opaque3dDeferred>>()
.init_resource::<ViewBinnedRenderPhases<AlphaMask3dDeferred>>()
.init_resource::<ViewSortedRenderPhases<Transmissive3d>>()
.init_resource::<ViewSortedRenderPhases<Transparent3d>>()
.add_systems(ExtractSchedule, extract_core_3d_camera_phases)
.add_systems(ExtractSchedule, extract_camera_prepass_phase)
.add_systems(
Render,
(
sort_phase_system::<Transmissive3d>.in_set(RenderSystems::PhaseSort),
sort_phase_system::<Transparent3d>.in_set(RenderSystems::PhaseSort),
configure_occlusion_culling_view_targets
.after(prepare_view_targets)
.in_set(RenderSystems::ManageViews),
prepare_core_3d_depth_textures.in_set(RenderSystems::PrepareResources),
prepare_core_3d_transmission_textures.in_set(RenderSystems::PrepareResources),
prepare_prepass_textures.in_set(RenderSystems::PrepareResources),
),
);
render_app
.add_render_sub_graph(Core3d)
.add_render_graph_node::<ViewNodeRunner<EarlyPrepassNode>>(Core3d, Node3d::EarlyPrepass)
.add_render_graph_node::<ViewNodeRunner<LatePrepassNode>>(Core3d, Node3d::LatePrepass)
.add_render_graph_node::<ViewNodeRunner<EarlyDeferredGBufferPrepassNode>>(
Core3d,
Node3d::EarlyDeferredPrepass,
)
.add_render_graph_node::<ViewNodeRunner<LateDeferredGBufferPrepassNode>>(
Core3d,
Node3d::LateDeferredPrepass,
)
.add_render_graph_node::<ViewNodeRunner<CopyDeferredLightingIdNode>>(
Core3d,
Node3d::CopyDeferredLightingId,
)
.add_render_graph_node::<EmptyNode>(Core3d, Node3d::EndPrepasses)
.add_render_graph_node::<EmptyNode>(Core3d, Node3d::StartMainPass)
.add_render_graph_node::<ViewNodeRunner<MainOpaquePass3dNode>>(
Core3d,
Node3d::MainOpaquePass,
)
.add_render_graph_node::<ViewNodeRunner<MainTransmissivePass3dNode>>(
Core3d,
Node3d::MainTransmissivePass,
)
.add_render_graph_node::<ViewNodeRunner<MainTransparentPass3dNode>>(
Core3d,
Node3d::MainTransparentPass,
)
.add_render_graph_node::<EmptyNode>(Core3d, Node3d::EndMainPass)
.add_render_graph_node::<EmptyNode>(Core3d, Node3d::StartMainPassPostProcessing)
.add_render_graph_node::<ViewNodeRunner<TonemappingNode>>(Core3d, Node3d::Tonemapping)
.add_render_graph_node::<EmptyNode>(Core3d, Node3d::EndMainPassPostProcessing)
.add_render_graph_node::<ViewNodeRunner<UpscalingNode>>(Core3d, Node3d::Upscaling)
.add_render_graph_edges(
Core3d,
(
Node3d::EarlyPrepass,
Node3d::EarlyDeferredPrepass,
Node3d::LatePrepass,
Node3d::LateDeferredPrepass,
Node3d::CopyDeferredLightingId,
Node3d::EndPrepasses,
Node3d::StartMainPass,
Node3d::MainOpaquePass,
Node3d::MainTransmissivePass,
Node3d::MainTransparentPass,
Node3d::EndMainPass,
Node3d::StartMainPassPostProcessing,
Node3d::Tonemapping,
Node3d::EndMainPassPostProcessing,
Node3d::Upscaling,
),
);
}
}
/// Opaque 3D [`BinnedPhaseItem`]s.
pub struct Opaque3d {
/// Determines which objects can be placed into a *batch set*.
///
/// Objects in a single batch set can potentially be multi-drawn together,
/// if it's enabled and the current platform supports it.
pub batch_set_key: Opaque3dBatchSetKey,
/// The key, which determines which can be batched.
pub bin_key: Opaque3dBinKey,
/// An entity from which data will be fetched, including the mesh if
/// applicable.
pub representative_entity: (Entity, MainEntity),
/// The ranges of instances.
pub batch_range: Range<u32>,
/// An extra index, which is either a dynamic offset or an index in the
/// indirect parameters list.
pub extra_index: PhaseItemExtraIndex,
}
/// Information that must be identical in order to place opaque meshes in the
/// same *batch set*.
///
/// A batch set is a set of batches that can be multi-drawn together, if
/// multi-draw is in use.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Opaque3dBatchSetKey {
/// The identifier of the render pipeline.
pub pipeline: CachedRenderPipelineId,
/// The function used to draw.
pub draw_function: DrawFunctionId,
/// The ID of a bind group specific to the material instance.
///
/// In the case of PBR, this is the `MaterialBindGroupIndex`.
pub material_bind_group_index: Option<u32>,
/// The ID of the slab of GPU memory that contains vertex data.
///
/// For non-mesh items, you can fill this with 0 if your items can be
/// multi-drawn, or with a unique value if they can't.
pub vertex_slab: SlabId,
/// The ID of the slab of GPU memory that contains index data, if present.
///
/// For non-mesh items, you can safely fill this with `None`.
pub index_slab: Option<SlabId>,
/// Index of the slab that the lightmap resides in, if a lightmap is
/// present.
pub lightmap_slab: Option<NonMaxU32>,
}
impl PhaseItemBatchSetKey for Opaque3dBatchSetKey {
fn indexed(&self) -> bool {
self.index_slab.is_some()
}
}
/// Data that must be identical in order to *batch* phase items together.
///
/// Note that a *batch set* (if multi-draw is in use) contains multiple batches.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Opaque3dBinKey {
/// The asset that this phase item is associated with.
///
/// Normally, this is the ID of the mesh, but for non-mesh items it might be
/// the ID of another type of asset.
pub asset_id: UntypedAssetId,
}
impl PhaseItem for Opaque3d {
#[inline]
fn entity(&self) -> Entity {
self.representative_entity.0
}
#[inline]
fn main_entity(&self) -> MainEntity {
self.representative_entity.1
}
#[inline]
fn draw_function(&self) -> DrawFunctionId {
self.batch_set_key.draw_function
}
#[inline]
fn batch_range(&self) -> &Range<u32> {
&self.batch_range
}
#[inline]
fn batch_range_mut(&mut self) -> &mut Range<u32> {
&mut self.batch_range
}
fn extra_index(&self) -> PhaseItemExtraIndex {
self.extra_index.clone()
}
fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range<u32>, &mut PhaseItemExtraIndex) {
(&mut self.batch_range, &mut self.extra_index)
}
}
impl BinnedPhaseItem for Opaque3d {
type BatchSetKey = Opaque3dBatchSetKey;
type BinKey = Opaque3dBinKey;
#[inline]
fn new(
batch_set_key: Self::BatchSetKey,
bin_key: Self::BinKey,
representative_entity: (Entity, MainEntity),
batch_range: Range<u32>,
extra_index: PhaseItemExtraIndex,
) -> Self {
Opaque3d {
batch_set_key,
bin_key,
representative_entity,
batch_range,
extra_index,
}
}
}
impl CachedRenderPipelinePhaseItem for Opaque3d {
#[inline]
fn cached_pipeline(&self) -> CachedRenderPipelineId {
self.batch_set_key.pipeline
}
}
pub struct AlphaMask3d {
/// Determines which objects can be placed into a *batch set*.
///
/// Objects in a single batch set can potentially be multi-drawn together,
/// if it's enabled and the current platform supports it.
pub batch_set_key: OpaqueNoLightmap3dBatchSetKey,
/// The key, which determines which can be batched.
pub bin_key: OpaqueNoLightmap3dBinKey,
pub representative_entity: (Entity, MainEntity),
pub batch_range: Range<u32>,
pub extra_index: PhaseItemExtraIndex,
}
impl PhaseItem for AlphaMask3d {
#[inline]
fn entity(&self) -> Entity {
self.representative_entity.0
}
fn main_entity(&self) -> MainEntity {
self.representative_entity.1
}
#[inline]
fn draw_function(&self) -> DrawFunctionId {
self.batch_set_key.draw_function
}
#[inline]
fn batch_range(&self) -> &Range<u32> {
&self.batch_range
}
#[inline]
fn batch_range_mut(&mut self) -> &mut Range<u32> {
&mut self.batch_range
}
#[inline]
fn extra_index(&self) -> PhaseItemExtraIndex {
self.extra_index.clone()
}
#[inline]
fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range<u32>, &mut PhaseItemExtraIndex) {
(&mut self.batch_range, &mut self.extra_index)
}
}
impl BinnedPhaseItem for AlphaMask3d {
type BinKey = OpaqueNoLightmap3dBinKey;
type BatchSetKey = OpaqueNoLightmap3dBatchSetKey;
#[inline]
fn new(
batch_set_key: Self::BatchSetKey,
bin_key: Self::BinKey,
representative_entity: (Entity, MainEntity),
batch_range: Range<u32>,
extra_index: PhaseItemExtraIndex,
) -> Self {
Self {
batch_set_key,
bin_key,
representative_entity,
batch_range,
extra_index,
}
}
}
impl CachedRenderPipelinePhaseItem for AlphaMask3d {
#[inline]
fn cached_pipeline(&self) -> CachedRenderPipelineId {
self.batch_set_key.pipeline
}
}
pub struct Transmissive3d {
pub distance: f32,
pub pipeline: CachedRenderPipelineId,
pub entity: (Entity, MainEntity),
pub draw_function: DrawFunctionId,
pub batch_range: Range<u32>,
pub extra_index: PhaseItemExtraIndex,
/// Whether the mesh in question is indexed (uses an index buffer in
/// addition to its vertex buffer).
pub indexed: bool,
}
impl PhaseItem for Transmissive3d {
/// For now, automatic batching is disabled for transmissive items because their rendering is
/// split into multiple steps depending on [`Camera3d::screen_space_specular_transmission_steps`],
/// which the batching system doesn't currently know about.
///
/// Having batching enabled would cause the same item to be drawn multiple times across different
/// steps, whenever the batching range crossed a step boundary.
///
/// Eventually, we could add support for this by having the batching system break up the batch ranges
/// using the same logic as the transmissive pass, but for now it's simpler to just disable batching.
const AUTOMATIC_BATCHING: bool = false;
#[inline]
fn entity(&self) -> Entity {
self.entity.0
}
#[inline]
fn main_entity(&self) -> MainEntity {
self.entity.1
}
#[inline]
fn draw_function(&self) -> DrawFunctionId {
self.draw_function
}
#[inline]
fn batch_range(&self) -> &Range<u32> {
&self.batch_range
}
#[inline]
fn batch_range_mut(&mut self) -> &mut Range<u32> {
&mut self.batch_range
}
#[inline]
fn extra_index(&self) -> PhaseItemExtraIndex {
self.extra_index.clone()
}
#[inline]
fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range<u32>, &mut PhaseItemExtraIndex) {
(&mut self.batch_range, &mut self.extra_index)
}
}
impl SortedPhaseItem for Transmissive3d {
// NOTE: Values increase towards the camera. Back-to-front ordering for transmissive means we need an ascending sort.
type SortKey = FloatOrd;
#[inline]
fn sort_key(&self) -> Self::SortKey {
FloatOrd(self.distance)
}
#[inline]
fn sort(items: &mut [Self]) {
radsort::sort_by_key(items, |item| item.distance);
}
#[inline]
fn indexed(&self) -> bool {
self.indexed
}
}
impl CachedRenderPipelinePhaseItem for Transmissive3d {
#[inline]
fn cached_pipeline(&self) -> CachedRenderPipelineId {
self.pipeline
}
}
pub struct Transparent3d {
pub distance: f32,
pub pipeline: CachedRenderPipelineId,
pub entity: (Entity, MainEntity),
pub draw_function: DrawFunctionId,
pub batch_range: Range<u32>,
pub extra_index: PhaseItemExtraIndex,
/// Whether the mesh in question is indexed (uses an index buffer in
/// addition to its vertex buffer).
pub indexed: bool,
}
impl PhaseItem for Transparent3d {
#[inline]
fn entity(&self) -> Entity {
self.entity.0
}
fn main_entity(&self) -> MainEntity {
self.entity.1
}
#[inline]
fn draw_function(&self) -> DrawFunctionId {
self.draw_function
}
#[inline]
fn batch_range(&self) -> &Range<u32> {
&self.batch_range
}
#[inline]
fn batch_range_mut(&mut self) -> &mut Range<u32> {
&mut self.batch_range
}
#[inline]
fn extra_index(&self) -> PhaseItemExtraIndex {
self.extra_index.clone()
}
#[inline]
fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range<u32>, &mut PhaseItemExtraIndex) {
(&mut self.batch_range, &mut self.extra_index)
}
}
impl SortedPhaseItem for Transparent3d {
// NOTE: Values increase towards the camera. Back-to-front ordering for transparent means we need an ascending sort.
type SortKey = FloatOrd;
#[inline]
fn sort_key(&self) -> Self::SortKey {
FloatOrd(self.distance)
}
#[inline]
fn sort(items: &mut [Self]) {
radsort::sort_by_key(items, |item| item.distance);
}
#[inline]
fn indexed(&self) -> bool {
self.indexed
}
}
impl CachedRenderPipelinePhaseItem for Transparent3d {
#[inline]
fn cached_pipeline(&self) -> CachedRenderPipelineId {
self.pipeline
}
}
pub fn extract_core_3d_camera_phases(
mut opaque_3d_phases: ResMut<ViewBinnedRenderPhases<Opaque3d>>,
mut alpha_mask_3d_phases: ResMut<ViewBinnedRenderPhases<AlphaMask3d>>,
mut transmissive_3d_phases: ResMut<ViewSortedRenderPhases<Transmissive3d>>,
mut transparent_3d_phases: ResMut<ViewSortedRenderPhases<Transparent3d>>,
cameras_3d: Extract<Query<(Entity, &Camera, Has<NoIndirectDrawing>), With<Camera3d>>>,
mut live_entities: Local<HashSet<RetainedViewEntity>>,
gpu_preprocessing_support: Res<GpuPreprocessingSupport>,
) {
live_entities.clear();
for (main_entity, camera, no_indirect_drawing) in &cameras_3d {
if !camera.is_active {
continue;
}
// If GPU culling is in use, use it (and indirect mode); otherwise, just
// preprocess the meshes.
let gpu_preprocessing_mode = gpu_preprocessing_support.min(if !no_indirect_drawing {
GpuPreprocessingMode::Culling
} else {
GpuPreprocessingMode::PreprocessingOnly
});
// This is the main 3D camera, so use the first subview index (0).
let retained_view_entity = RetainedViewEntity::new(main_entity.into(), None, 0);
opaque_3d_phases.prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode);
alpha_mask_3d_phases.prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode);
transmissive_3d_phases.insert_or_clear(retained_view_entity);
transparent_3d_phases.insert_or_clear(retained_view_entity);
live_entities.insert(retained_view_entity);
}
opaque_3d_phases.retain(|view_entity, _| live_entities.contains(view_entity));
alpha_mask_3d_phases.retain(|view_entity, _| live_entities.contains(view_entity));
transmissive_3d_phases.retain(|view_entity, _| live_entities.contains(view_entity));
transparent_3d_phases.retain(|view_entity, _| live_entities.contains(view_entity));
}
// Extract the render phases for the prepass
pub fn extract_camera_prepass_phase(
mut commands: Commands,
mut opaque_3d_prepass_phases: ResMut<ViewBinnedRenderPhases<Opaque3dPrepass>>,
mut alpha_mask_3d_prepass_phases: ResMut<ViewBinnedRenderPhases<AlphaMask3dPrepass>>,
mut opaque_3d_deferred_phases: ResMut<ViewBinnedRenderPhases<Opaque3dDeferred>>,
mut alpha_mask_3d_deferred_phases: ResMut<ViewBinnedRenderPhases<AlphaMask3dDeferred>>,
cameras_3d: Extract<
Query<
(
Entity,
RenderEntity,
&Camera,
Has<NoIndirectDrawing>,
Has<DepthPrepass>,
Has<NormalPrepass>,
Has<MotionVectorPrepass>,
Has<DeferredPrepass>,
Has<DepthPrepassDoubleBuffer>,
Has<DeferredPrepassDoubleBuffer>,
),
With<Camera3d>,
>,
>,
mut live_entities: Local<HashSet<RetainedViewEntity>>,
gpu_preprocessing_support: Res<GpuPreprocessingSupport>,
) {
live_entities.clear();
for (
main_entity,
entity,
camera,
no_indirect_drawing,
depth_prepass,
normal_prepass,
motion_vector_prepass,
deferred_prepass,
depth_prepass_double_buffer,
deferred_prepass_double_buffer,
) in cameras_3d.iter()
{
if !camera.is_active {
continue;
}
// If GPU culling is in use, use it (and indirect mode); otherwise, just
// preprocess the meshes.
let gpu_preprocessing_mode = gpu_preprocessing_support.min(if !no_indirect_drawing {
GpuPreprocessingMode::Culling
} else {
GpuPreprocessingMode::PreprocessingOnly
});
// This is the main 3D camera, so we use the first subview index (0).
let retained_view_entity = RetainedViewEntity::new(main_entity.into(), None, 0);
if depth_prepass || normal_prepass || motion_vector_prepass {
opaque_3d_prepass_phases
.prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode);
alpha_mask_3d_prepass_phases
.prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode);
} else {
opaque_3d_prepass_phases.remove(&retained_view_entity);
alpha_mask_3d_prepass_phases.remove(&retained_view_entity);
}
if deferred_prepass {
opaque_3d_deferred_phases
.prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode);
alpha_mask_3d_deferred_phases
.prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode);
} else {
opaque_3d_deferred_phases.remove(&retained_view_entity);
alpha_mask_3d_deferred_phases.remove(&retained_view_entity);
}
live_entities.insert(retained_view_entity);
// Add or remove prepasses as appropriate.
let mut camera_commands = commands
.get_entity(entity)
.expect("Camera entity wasn't synced.");
if depth_prepass {
camera_commands.insert(DepthPrepass);
} else {
camera_commands.remove::<DepthPrepass>();
}
if normal_prepass {
camera_commands.insert(NormalPrepass);
} else {
camera_commands.remove::<NormalPrepass>();
}
if motion_vector_prepass {
camera_commands.insert(MotionVectorPrepass);
} else {
camera_commands.remove::<MotionVectorPrepass>();
}
if deferred_prepass {
camera_commands.insert(DeferredPrepass);
} else {
camera_commands.remove::<DeferredPrepass>();
}
if depth_prepass_double_buffer {
camera_commands.insert(DepthPrepassDoubleBuffer);
} else {
camera_commands.remove::<DepthPrepassDoubleBuffer>();
}
if deferred_prepass_double_buffer {
camera_commands.insert(DeferredPrepassDoubleBuffer);
} else {
camera_commands.remove::<DeferredPrepassDoubleBuffer>();
}
}
opaque_3d_prepass_phases.retain(|view_entity, _| live_entities.contains(view_entity));
alpha_mask_3d_prepass_phases.retain(|view_entity, _| live_entities.contains(view_entity));
opaque_3d_deferred_phases.retain(|view_entity, _| live_entities.contains(view_entity));
alpha_mask_3d_deferred_phases.retain(|view_entity, _| live_entities.contains(view_entity));
}
pub fn prepare_core_3d_depth_textures(
mut commands: Commands,
mut texture_cache: ResMut<TextureCache>,
render_device: Res<RenderDevice>,
opaque_3d_phases: Res<ViewBinnedRenderPhases<Opaque3d>>,
alpha_mask_3d_phases: Res<ViewBinnedRenderPhases<AlphaMask3d>>,
transmissive_3d_phases: Res<ViewSortedRenderPhases<Transmissive3d>>,
transparent_3d_phases: Res<ViewSortedRenderPhases<Transparent3d>>,
views_3d: Query<(
Entity,
&ExtractedCamera,
&ExtractedView,
Option<&DepthPrepass>,
&Camera3d,
&Msaa,
)>,
) {
let mut render_target_usage = <HashMap<_, _>>::default();
for (_, camera, extracted_view, depth_prepass, camera_3d, _msaa) in &views_3d {
if !opaque_3d_phases.contains_key(&extracted_view.retained_view_entity)
|| !alpha_mask_3d_phases.contains_key(&extracted_view.retained_view_entity)
|| !transmissive_3d_phases.contains_key(&extracted_view.retained_view_entity)
|| !transparent_3d_phases.contains_key(&extracted_view.retained_view_entity)
{
continue;
};
// Default usage required to write to the depth texture
let mut usage: TextureUsages = camera_3d.depth_texture_usages.into();
if depth_prepass.is_some() {
// Required to read the output of the prepass
usage |= TextureUsages::COPY_SRC;
}
render_target_usage
.entry(camera.target.clone())
.and_modify(|u| *u |= usage)
.or_insert_with(|| usage);
}
let mut textures = <HashMap<_, _>>::default();
for (entity, camera, _, _, camera_3d, msaa) in &views_3d {
let Some(physical_target_size) = camera.physical_target_size else {
continue;
};
let cached_texture = textures
.entry((camera.target.clone(), msaa))
.or_insert_with(|| {
let usage = *render_target_usage
.get(&camera.target.clone())
.expect("The depth texture usage should already exist for this target");
let descriptor = TextureDescriptor {
label: Some("view_depth_texture"),
// The size of the depth texture
size: physical_target_size.to_extents(),
mip_level_count: 1,
sample_count: msaa.samples(),
dimension: TextureDimension::D2,
format: CORE_3D_DEPTH_FORMAT,
usage,
view_formats: &[],
};
texture_cache.get(&render_device, descriptor)
})
.clone();
commands.entity(entity).insert(ViewDepthTexture::new(
cached_texture,
match camera_3d.depth_load_op {
Camera3dDepthLoadOp::Clear(v) => Some(v),
Camera3dDepthLoadOp::Load => None,
},
));
}
}
#[derive(Component)]
pub struct ViewTransmissionTexture {
pub texture: Texture,
pub view: TextureView,
pub sampler: Sampler,
}
pub fn prepare_core_3d_transmission_textures(
mut commands: Commands,
mut texture_cache: ResMut<TextureCache>,
render_device: Res<RenderDevice>,
opaque_3d_phases: Res<ViewBinnedRenderPhases<Opaque3d>>,
alpha_mask_3d_phases: Res<ViewBinnedRenderPhases<AlphaMask3d>>,
transmissive_3d_phases: Res<ViewSortedRenderPhases<Transmissive3d>>,
transparent_3d_phases: Res<ViewSortedRenderPhases<Transparent3d>>,
views_3d: Query<(Entity, &ExtractedCamera, &Camera3d, &ExtractedView)>,
) {
let mut textures = <HashMap<_, _>>::default();
for (entity, camera, camera_3d, view) in &views_3d {
if !opaque_3d_phases.contains_key(&view.retained_view_entity)
|| !alpha_mask_3d_phases.contains_key(&view.retained_view_entity)
|| !transparent_3d_phases.contains_key(&view.retained_view_entity)
{
continue;
};
let Some(transmissive_3d_phase) = transmissive_3d_phases.get(&view.retained_view_entity)
else {
continue;
};
let Some(physical_target_size) = camera.physical_target_size else {
continue;
};
// Don't prepare a transmission texture if the number of steps is set to 0
if camera_3d.screen_space_specular_transmission_steps == 0 {
continue;
}
// Don't prepare a transmission texture if there are no transmissive items to render
if transmissive_3d_phase.items.is_empty() {
continue;
}
let cached_texture = textures
.entry(camera.target.clone())
.or_insert_with(|| {
let usage = TextureUsages::TEXTURE_BINDING | TextureUsages::COPY_DST;
let format = if view.hdr {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
};
let descriptor = TextureDescriptor {
label: Some("view_transmission_texture"),
// The size of the transmission texture
size: physical_target_size.to_extents(),
mip_level_count: 1,
sample_count: 1, // No need for MSAA, as we'll only copy the main texture here
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | true |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/core_3d/main_transmissive_pass_3d_node.rs | crates/bevy_core_pipeline/src/core_3d/main_transmissive_pass_3d_node.rs | use super::ViewTransmissionTexture;
use crate::core_3d::Transmissive3d;
use bevy_camera::{Camera3d, MainPassResolutionOverride, Viewport};
use bevy_ecs::{prelude::*, query::QueryItem};
use bevy_image::ToExtents;
use bevy_render::{
camera::ExtractedCamera,
diagnostic::RecordDiagnostics,
render_graph::{NodeRunError, RenderGraphContext, ViewNode},
render_phase::ViewSortedRenderPhases,
render_resource::{RenderPassDescriptor, StoreOp},
renderer::RenderContext,
view::{ExtractedView, ViewDepthTexture, ViewTarget},
};
use core::ops::Range;
use tracing::error;
#[cfg(feature = "trace")]
use tracing::info_span;
/// A [`bevy_render::render_graph::Node`] that runs the [`Transmissive3d`]
/// [`ViewSortedRenderPhases`].
#[derive(Default)]
pub struct MainTransmissivePass3dNode;
impl ViewNode for MainTransmissivePass3dNode {
type ViewQuery = (
&'static ExtractedCamera,
&'static ExtractedView,
&'static Camera3d,
&'static ViewTarget,
Option<&'static ViewTransmissionTexture>,
&'static ViewDepthTexture,
Option<&'static MainPassResolutionOverride>,
);
fn run(
&self,
graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
(camera, view, camera_3d, target, transmission, depth, resolution_override): QueryItem<
Self::ViewQuery,
>,
world: &World,
) -> Result<(), NodeRunError> {
let view_entity = graph.view_entity();
let Some(transmissive_phases) =
world.get_resource::<ViewSortedRenderPhases<Transmissive3d>>()
else {
return Ok(());
};
let Some(transmissive_phase) = transmissive_phases.get(&view.retained_view_entity) else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
let physical_target_size = camera.physical_target_size.unwrap();
let render_pass_descriptor = RenderPassDescriptor {
label: Some("main_transmissive_pass_3d"),
color_attachments: &[Some(target.get_color_attachment())],
depth_stencil_attachment: Some(depth.get_attachment(StoreOp::Store)),
timestamp_writes: None,
occlusion_query_set: None,
};
// Run the transmissive pass, sorted back-to-front
// NOTE: Scoped to drop the mutable borrow of render_context
#[cfg(feature = "trace")]
let _main_transmissive_pass_3d_span = info_span!("main_transmissive_pass_3d").entered();
if !transmissive_phase.items.is_empty() {
let screen_space_specular_transmission_steps =
camera_3d.screen_space_specular_transmission_steps;
if screen_space_specular_transmission_steps > 0 {
let transmission =
transmission.expect("`ViewTransmissionTexture` should exist at this point");
// `transmissive_phase.items` are depth sorted, so we split them into N = `screen_space_specular_transmission_steps`
// ranges, rendering them back-to-front in multiple steps, allowing multiple levels of transparency.
//
// Note: For the sake of simplicity, we currently split items evenly among steps. In the future, we
// might want to use a more sophisticated heuristic (e.g. based on view bounds, or with an exponential
// falloff so that nearby objects have more levels of transparency available to them)
for range in split_range(
0..transmissive_phase.items.len(),
screen_space_specular_transmission_steps,
) {
// Copy the main texture to the transmission texture, allowing to use the color output of the
// previous step (or of the `Opaque3d` phase, for the first step) as a transmissive color input
render_context.command_encoder().copy_texture_to_texture(
target.main_texture().as_image_copy(),
transmission.texture.as_image_copy(),
physical_target_size.to_extents(),
);
let mut render_pass =
render_context.begin_tracked_render_pass(render_pass_descriptor.clone());
let pass_span =
diagnostics.pass_span(&mut render_pass, "main_transmissive_pass_3d");
if let Some(viewport) = camera.viewport.as_ref() {
render_pass.set_camera_viewport(viewport);
}
// render items in range
if let Err(err) =
transmissive_phase.render_range(&mut render_pass, world, view_entity, range)
{
error!("Error encountered while rendering the transmissive phase {err:?}");
}
pass_span.end(&mut render_pass);
}
} else {
let mut render_pass =
render_context.begin_tracked_render_pass(render_pass_descriptor);
let pass_span =
diagnostics.pass_span(&mut render_pass, "main_transmissive_pass_3d");
if let Some(viewport) = Viewport::from_viewport_and_override(
camera.viewport.as_ref(),
resolution_override,
) {
render_pass.set_camera_viewport(&viewport);
}
if let Err(err) = transmissive_phase.render(&mut render_pass, world, view_entity) {
error!("Error encountered while rendering the transmissive phase {err:?}");
}
pass_span.end(&mut render_pass);
}
}
Ok(())
}
}
/// Splits a [`Range`] into at most `max_num_splits` sub-ranges without overlaps
///
/// Properly takes into account remainders of inexact divisions (by adding extra
/// elements to the initial sub-ranges as needed)
fn split_range(range: Range<usize>, max_num_splits: usize) -> impl Iterator<Item = Range<usize>> {
let len = range.end - range.start;
assert!(len > 0, "to be split, a range must not be empty");
assert!(max_num_splits > 0, "max_num_splits must be at least 1");
let num_splits = max_num_splits.min(len);
let step = len / num_splits;
let mut rem = len % num_splits;
let mut start = range.start;
(0..num_splits).map(move |_| {
let extra = if rem > 0 {
rem -= 1;
1
} else {
0
};
let end = (start + step + extra).min(range.end);
let result = start..end;
start = end;
result
})
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
bevyengine/bevy | https://github.com/bevyengine/bevy/blob/51a6fedb06a022ab5d39e099413caa882e1b022d/crates/bevy_core_pipeline/src/core_3d/main_opaque_pass_3d_node.rs | crates/bevy_core_pipeline/src/core_3d/main_opaque_pass_3d_node.rs | use crate::{
core_3d::Opaque3d,
skybox::{SkyboxBindGroup, SkyboxPipelineId},
};
use bevy_camera::{MainPassResolutionOverride, Viewport};
use bevy_ecs::{prelude::World, query::QueryItem};
use bevy_render::{
camera::ExtractedCamera,
diagnostic::RecordDiagnostics,
render_graph::{NodeRunError, RenderGraphContext, ViewNode},
render_phase::{TrackedRenderPass, ViewBinnedRenderPhases},
render_resource::{CommandEncoderDescriptor, PipelineCache, RenderPassDescriptor, StoreOp},
renderer::RenderContext,
view::{ExtractedView, ViewDepthTexture, ViewTarget, ViewUniformOffset},
};
use tracing::error;
#[cfg(feature = "trace")]
use tracing::info_span;
use super::AlphaMask3d;
/// A [`bevy_render::render_graph::Node`] that runs the [`Opaque3d`] and [`AlphaMask3d`]
/// [`ViewBinnedRenderPhases`]s.
#[derive(Default)]
pub struct MainOpaquePass3dNode;
impl ViewNode for MainOpaquePass3dNode {
type ViewQuery = (
&'static ExtractedCamera,
&'static ExtractedView,
&'static ViewTarget,
&'static ViewDepthTexture,
Option<&'static SkyboxPipelineId>,
Option<&'static SkyboxBindGroup>,
&'static ViewUniformOffset,
Option<&'static MainPassResolutionOverride>,
);
fn run<'w>(
&self,
graph: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
(
camera,
extracted_view,
target,
depth,
skybox_pipeline,
skybox_bind_group,
view_uniform_offset,
resolution_override,
): QueryItem<'w, '_, Self::ViewQuery>,
world: &'w World,
) -> Result<(), NodeRunError> {
let (Some(opaque_phases), Some(alpha_mask_phases)) = (
world.get_resource::<ViewBinnedRenderPhases<Opaque3d>>(),
world.get_resource::<ViewBinnedRenderPhases<AlphaMask3d>>(),
) else {
return Ok(());
};
let (Some(opaque_phase), Some(alpha_mask_phase)) = (
opaque_phases.get(&extracted_view.retained_view_entity),
alpha_mask_phases.get(&extracted_view.retained_view_entity),
) else {
return Ok(());
};
let diagnostics = render_context.diagnostic_recorder();
let color_attachments = [Some(target.get_color_attachment())];
let depth_stencil_attachment = Some(depth.get_attachment(StoreOp::Store));
let view_entity = graph.view_entity();
render_context.add_command_buffer_generation_task(move |render_device| {
#[cfg(feature = "trace")]
let _main_opaque_pass_3d_span = info_span!("main_opaque_pass_3d").entered();
// Command encoder setup
let mut command_encoder =
render_device.create_command_encoder(&CommandEncoderDescriptor {
label: Some("main_opaque_pass_3d_command_encoder"),
});
// Render pass setup
let render_pass = command_encoder.begin_render_pass(&RenderPassDescriptor {
label: Some("main_opaque_pass_3d"),
color_attachments: &color_attachments,
depth_stencil_attachment,
timestamp_writes: None,
occlusion_query_set: None,
});
let mut render_pass = TrackedRenderPass::new(&render_device, render_pass);
let pass_span = diagnostics.pass_span(&mut render_pass, "main_opaque_pass_3d");
if let Some(viewport) =
Viewport::from_viewport_and_override(camera.viewport.as_ref(), resolution_override)
{
render_pass.set_camera_viewport(&viewport);
}
// Opaque draws
if !opaque_phase.is_empty() {
#[cfg(feature = "trace")]
let _opaque_main_pass_3d_span = info_span!("opaque_main_pass_3d").entered();
if let Err(err) = opaque_phase.render(&mut render_pass, world, view_entity) {
error!("Error encountered while rendering the opaque phase {err:?}");
}
}
// Alpha draws
if !alpha_mask_phase.is_empty() {
#[cfg(feature = "trace")]
let _alpha_mask_main_pass_3d_span = info_span!("alpha_mask_main_pass_3d").entered();
if let Err(err) = alpha_mask_phase.render(&mut render_pass, world, view_entity) {
error!("Error encountered while rendering the alpha mask phase {err:?}");
}
}
// Skybox draw using a fullscreen triangle
if let (Some(skybox_pipeline), Some(SkyboxBindGroup(skybox_bind_group))) =
(skybox_pipeline, skybox_bind_group)
{
let pipeline_cache = world.resource::<PipelineCache>();
if let Some(pipeline) = pipeline_cache.get_render_pipeline(skybox_pipeline.0) {
render_pass.set_render_pipeline(pipeline);
render_pass.set_bind_group(
0,
&skybox_bind_group.0,
&[view_uniform_offset.offset, skybox_bind_group.1],
);
render_pass.draw(0..3, 0..1);
}
}
pass_span.end(&mut render_pass);
drop(render_pass);
command_encoder.finish()
});
Ok(())
}
}
| rust | Apache-2.0 | 51a6fedb06a022ab5d39e099413caa882e1b022d | 2026-01-04T15:31:59.438636Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.