repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/nodes/brush/src/brush_cache.rs
node-graph/nodes/brush/src/brush_cache.rs
use crate::brush_stroke::BrushStroke; use crate::brush_stroke::BrushStyle; use core_types::table::TableRow; use dyn_any::DynAny; use raster_types::CPU; use raster_types::Raster; use std::collections::HashMap; use std::hash::Hash; use std::hash::Hasher; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; // TODO: This is a temporary hack, be sure to not reuse this when the brush system is replaced/rewritten. static NEXT_BRUSH_CACHE_IMPL_ID: AtomicU64 = AtomicU64::new(0); #[derive(Clone, Debug, DynAny, serde::Serialize, serde::Deserialize)] struct BrushCacheImpl { #[serde(default = "new_unique_id")] unique_id: u64, // The full previous input that was cached. #[serde(default)] prev_input: Vec<BrushStroke>, // The strokes that have been fully processed and blended into the background. #[serde(default, deserialize_with = "raster_types::image::migrate_image_frame_row")] background: TableRow<Raster<CPU>>, #[serde(default, deserialize_with = "raster_types::image::migrate_image_frame_row")] blended_image: TableRow<Raster<CPU>>, #[serde(default, deserialize_with = "raster_types::image::migrate_image_frame_row")] last_stroke_texture: TableRow<Raster<CPU>>, // A cache for brush textures. #[serde(skip)] brush_texture_cache: HashMap<BrushStyle, Raster<CPU>>, } impl BrushCacheImpl { fn compute_brush_plan(&mut self, mut background: TableRow<Raster<CPU>>, input: &[BrushStroke]) -> BrushPlan { // Do background invalidation. if background != self.background { self.background = background.clone(); return BrushPlan { strokes: input.to_vec(), background, ..Default::default() }; } // Do blended_image invalidation. let blended_strokes = &self.prev_input[..self.prev_input.len().saturating_sub(1)]; let num_blended_strokes = blended_strokes.len(); if input.get(..num_blended_strokes) != Some(blended_strokes) { return BrushPlan { strokes: input.to_vec(), background, ..Default::default() }; } // Take our previous blended image (and invalidate the cache). // Since we're about to replace our cache anyway, this saves a clone. background = std::mem::take(&mut self.blended_image); // Check if the first non-blended stroke is an extension of the last one. let mut first_stroke_texture = TableRow { element: Raster::<CPU>::default(), transform: glam::DAffine2::ZERO, ..Default::default() }; let mut first_stroke_point_skip = 0; let strokes = input[num_blended_strokes..].to_vec(); if !strokes.is_empty() && self.prev_input.len() > num_blended_strokes { let last_stroke = &self.prev_input[num_blended_strokes]; let same_style = strokes[0].style == last_stroke.style; let prev_points = last_stroke.compute_blit_points(); let new_points = strokes[0].compute_blit_points(); let is_point_prefix = new_points.get(..prev_points.len()) == Some(&prev_points); if same_style && is_point_prefix { first_stroke_texture = std::mem::take(&mut self.last_stroke_texture); first_stroke_point_skip = prev_points.len(); } } self.prev_input = Vec::new(); BrushPlan { strokes, background, first_stroke_texture, first_stroke_point_skip, } } pub fn cache_results(&mut self, input: Vec<BrushStroke>, blended_image: TableRow<Raster<CPU>>, last_stroke_texture: TableRow<Raster<CPU>>) { self.prev_input = input; self.blended_image = blended_image; self.last_stroke_texture = last_stroke_texture; } } impl Default for BrushCacheImpl { fn default() -> Self { Self { unique_id: new_unique_id(), prev_input: Vec::new(), background: Default::default(), blended_image: Default::default(), last_stroke_texture: Default::default(), brush_texture_cache: HashMap::new(), } } } impl PartialEq for BrushCacheImpl { fn eq(&self, other: &Self) -> bool { self.unique_id == other.unique_id } } impl Hash for BrushCacheImpl { fn hash<H: Hasher>(&self, state: &mut H) { self.unique_id.hash(state); } } fn new_unique_id() -> u64 { NEXT_BRUSH_CACHE_IMPL_ID.fetch_add(1, Ordering::SeqCst) } #[derive(Clone, Debug, Default)] pub struct BrushPlan { pub strokes: Vec<BrushStroke>, pub background: TableRow<Raster<CPU>>, pub first_stroke_texture: TableRow<Raster<CPU>>, pub first_stroke_point_skip: usize, } #[derive(Debug, Default, DynAny, serde::Serialize, serde::Deserialize)] pub struct BrushCache(Arc<Mutex<BrushCacheImpl>>); // A bit of a cursed implementation to work around the current node system. // The original object is a 'prototype' that when cloned gives you a independent // new object. Any further clones however are all the same underlying cache object. impl Clone for BrushCache { fn clone(&self) -> Self { Self(Arc::new(Mutex::new(self.0.lock().unwrap().clone()))) } } impl PartialEq for BrushCache { fn eq(&self, other: &Self) -> bool { if Arc::ptr_eq(&self.0, &other.0) { return true; } let s = self.0.lock().unwrap(); let o = other.0.lock().unwrap(); *s == *o } } impl Hash for BrushCache { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.0.lock().unwrap().hash(state); } } impl BrushCache { pub fn compute_brush_plan(&self, background: TableRow<Raster<CPU>>, input: &[BrushStroke]) -> BrushPlan { let mut inner = self.0.lock().unwrap(); inner.compute_brush_plan(background, input) } pub fn cache_results(&self, input: Vec<BrushStroke>, blended_image: TableRow<Raster<CPU>>, last_stroke_texture: TableRow<Raster<CPU>>) { let mut inner = self.0.lock().unwrap(); inner.cache_results(input, blended_image, last_stroke_texture) } pub fn get_cached_brush(&self, style: &BrushStyle) -> Option<Raster<CPU>> { let inner = self.0.lock().unwrap(); inner.brush_texture_cache.get(style).cloned() } pub fn store_brush(&self, style: BrushStyle, brush: Raster<CPU>) { let mut inner = self.0.lock().unwrap(); inner.brush_texture_cache.insert(style, brush); } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/nodes/brush/src/lib.rs
node-graph/nodes/brush/src/lib.rs
pub mod brush; pub mod brush_cache; pub mod brush_stroke;
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/nodes/brush/src/brush.rs
node-graph/nodes/brush/src/brush.rs
use crate::brush_cache::BrushCache; use crate::brush_stroke::{BrushStroke, BrushStyle}; use core_types::blending::BlendMode; use core_types::bounds::{BoundingBox, RenderBoundingBox}; use core_types::color::{Alpha, Color, Pixel, Sample}; use core_types::generic::FnNode; use core_types::math::bbox::{AxisAlignedBbox, Bbox}; use core_types::registry::FutureWrapperNode; use core_types::table::{Table, TableRow}; use core_types::transform::Transform; use core_types::value::ClonedNode; use core_types::{Ctx, Node}; use glam::{DAffine2, DVec2}; use raster_nodes::blending_nodes::blend_colors; use raster_nodes::std_nodes::{empty_image, extend_image_to_bounds}; use raster_types::BitmapMut; use raster_types::Image; use raster_types::{CPU, Raster}; #[derive(Clone, Copy, Debug, PartialEq)] pub struct BrushStampGenerator<P: Pixel + Alpha> { color: P, feather_exponent: f32, transform: DAffine2, } impl<P: Pixel + Alpha> Transform for BrushStampGenerator<P> { fn transform(&self) -> DAffine2 { self.transform } } impl<P: Pixel + Alpha> Sample for BrushStampGenerator<P> { type Pixel = P; #[inline] fn sample(&self, position: DVec2, area: DVec2) -> Option<P> { let position = self.transform.inverse().transform_point2(position); let area = self.transform.inverse().transform_vector2(area); let aa_blur_radius = area.length() as f32 * 2.; let center = DVec2::splat(0.5); let distance = (position + area / 2. - center).length() as f32 * 2.; let edge_opacity = 1. - (1. - aa_blur_radius).powf(self.feather_exponent); let result = if distance < 1. - aa_blur_radius { 1. - distance.powf(self.feather_exponent) } else if distance < 1. { // TODO: Replace this with a proper analytical AA implementation edge_opacity * ((1. - distance) / aa_blur_radius) } else { return None; }; use core_types::color::Channel; Some(self.color.multiplied_alpha(P::AlphaChannel::from_linear(result))) } } #[node_macro::node(skip_impl)] fn brush_stamp_generator(#[unit(" px")] diameter: f64, color: Color, hardness: f64, flow: f64) -> BrushStampGenerator<Color> { // Diameter let radius = diameter / 2.; // Hardness let hardness = hardness / 100.; let feather_exponent = 1. / (1. - hardness) as f32; // Flow let flow = flow / 100.; // Color let color = color.apply_opacity(flow as f32); let transform = DAffine2::from_scale_angle_translation(DVec2::splat(diameter), 0., -DVec2::splat(radius)); BrushStampGenerator { color, feather_exponent, transform } } #[node_macro::node(skip_impl)] fn blit<BlendFn>(mut target: Table<Raster<CPU>>, texture: Raster<CPU>, positions: Vec<DVec2>, blend_mode: BlendFn) -> Table<Raster<CPU>> where BlendFn: for<'any_input> Node<'any_input, (Color, Color), Output = Color>, { if positions.is_empty() { return target; } for table_row in target.iter_mut() { let target_width = table_row.element.width; let target_height = table_row.element.height; let target_size = DVec2::new(target_width as f64, target_height as f64); let texture_size = DVec2::new(texture.width as f64, texture.height as f64); let document_to_target = DAffine2::from_translation(-texture_size / 2.) * DAffine2::from_scale(target_size) * table_row.transform.inverse(); for position in &positions { let start = document_to_target.transform_point2(*position).round(); let stop = start + texture_size; // Half-open integer ranges [start, stop). let clamp_start = start.clamp(DVec2::ZERO, target_size).as_uvec2(); let clamp_stop = stop.clamp(DVec2::ZERO, target_size).as_uvec2(); let blit_area_offset = (clamp_start.as_dvec2() - start).as_uvec2().min(texture_size.as_uvec2()); let blit_area_dimensions = (clamp_stop - clamp_start).min(texture_size.as_uvec2() - blit_area_offset); // Tight blitting loop. Eagerly assert bounds to hopefully eliminate bounds check inside loop. let texture_index = |x: u32, y: u32| -> usize { (y as usize * texture.width as usize) + (x as usize) }; let target_index = |x: u32, y: u32| -> usize { (y as usize * target_width as usize) + (x as usize) }; let max_y = (blit_area_offset.y + blit_area_dimensions.y).saturating_sub(1); let max_x = (blit_area_offset.x + blit_area_dimensions.x).saturating_sub(1); assert!(texture_index(max_x, max_y) < texture.data.len()); assert!(target_index(max_x, max_y) < table_row.element.data.len()); for y in blit_area_offset.y..blit_area_offset.y + blit_area_dimensions.y { for x in blit_area_offset.x..blit_area_offset.x + blit_area_dimensions.x { let src_pixel = texture.data[texture_index(x, y)]; let dst_pixel = &mut table_row.element.data_mut().data[target_index(x + clamp_start.x, y + clamp_start.y)]; *dst_pixel = blend_mode.eval((src_pixel, *dst_pixel)); } } } } target } pub async fn create_brush_texture(brush_style: &BrushStyle) -> Raster<CPU> { let stamp = brush_stamp_generator(brush_style.diameter, brush_style.color, brush_style.hardness, brush_style.flow); let transform = DAffine2::from_scale_angle_translation(DVec2::splat(brush_style.diameter), 0., -DVec2::splat(brush_style.diameter / 2.)); let blank_texture = empty_image((), transform, Table::new_from_element(Color::TRANSPARENT)).into_iter().next().unwrap_or_default(); let image = blend_stamp_closure(stamp, blank_texture, |a, b| blend_colors(a, b, BlendMode::Normal, 1.)); image.element } pub fn blend_with_mode(background: TableRow<Raster<CPU>>, foreground: TableRow<Raster<CPU>>, blend_mode: BlendMode, opacity: f64) -> TableRow<Raster<CPU>> { let opacity = opacity as f32 / 100.; match std::hint::black_box(blend_mode) { // Normal group BlendMode::Normal => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::Normal, opacity)), // Darken group BlendMode::Darken => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::Darken, opacity)), BlendMode::Multiply => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::Multiply, opacity)), BlendMode::ColorBurn => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::ColorBurn, opacity)), BlendMode::LinearBurn => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::LinearBurn, opacity)), BlendMode::DarkerColor => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::DarkerColor, opacity)), // Lighten group BlendMode::Lighten => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::Lighten, opacity)), BlendMode::Screen => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::Screen, opacity)), BlendMode::ColorDodge => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::ColorDodge, opacity)), BlendMode::LinearDodge => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::LinearDodge, opacity)), BlendMode::LighterColor => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::LighterColor, opacity)), // Contrast group BlendMode::Overlay => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::Overlay, opacity)), BlendMode::SoftLight => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::SoftLight, opacity)), BlendMode::HardLight => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::HardLight, opacity)), BlendMode::VividLight => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::VividLight, opacity)), BlendMode::LinearLight => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::LinearLight, opacity)), BlendMode::PinLight => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::PinLight, opacity)), BlendMode::HardMix => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::HardMix, opacity)), // Inversion group BlendMode::Difference => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::Difference, opacity)), BlendMode::Exclusion => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::Exclusion, opacity)), BlendMode::Subtract => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::Subtract, opacity)), BlendMode::Divide => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::Divide, opacity)), // Component group BlendMode::Hue => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::Hue, opacity)), BlendMode::Saturation => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::Saturation, opacity)), BlendMode::Color => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::Color, opacity)), BlendMode::Luminosity => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::Luminosity, opacity)), // Other utility blend modes (hidden from the normal list) BlendMode::Erase => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::Erase, opacity)), BlendMode::Restore => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::Restore, opacity)), BlendMode::MultiplyAlpha => blend_image_closure(foreground, background, |a, b| blend_colors(a, b, BlendMode::MultiplyAlpha, opacity)), } } /// Generates the brush strokes painted with the Brush tool as a raster image. /// If an input image is supplied, strokes are drawn on top of it, expanding bounds as needed. #[node_macro::node(category("Raster"))] async fn brush( _: impl Ctx, /// Optional raster content that may be drawn onto. mut image: Table<Raster<CPU>>, /// The list of brush stroke paths drawn by the Brush tool, with each including both its coordinates and styles. strokes: Vec<BrushStroke>, /// Internal cache data used to accelerate rendering of the brush content. cache: BrushCache, ) -> Table<Raster<CPU>> { if image.is_empty() { image.push(TableRow::default()); } // TODO: Find a way to handle more than one row let table_row = image.iter().next().expect("Expected the one row we just pushed").into_cloned(); let bounds = Table::new_from_row(table_row.clone()).bounding_box(DAffine2::IDENTITY, false); let [start, end] = if let RenderBoundingBox::Rectangle(rect) = bounds { rect } else { [DVec2::ZERO, DVec2::ZERO] }; let image_bbox = AxisAlignedBbox { start, end }; let stroke_bbox = strokes.iter().map(|s| s.bounding_box()).reduce(|a, b| a.union(&b)).unwrap_or(AxisAlignedBbox::ZERO); let bbox = if image_bbox.size().length() < 0.1 { stroke_bbox } else { stroke_bbox.union(&image_bbox) }; let background_bounds = bbox.to_transform(); let mut draw_strokes: Vec<_> = strokes.iter().filter(|&s| !matches!(s.style.blend_mode, BlendMode::Erase | BlendMode::Restore)).cloned().collect(); let mut brush_plan = cache.compute_brush_plan(table_row, &draw_strokes); // TODO: Find a way to handle more than one row let Some(mut actual_image) = extend_image_to_bounds((), Table::new_from_row(brush_plan.background), background_bounds).into_iter().next() else { return Table::new(); }; let final_stroke_idx = brush_plan.strokes.len().saturating_sub(1); for (idx, stroke) in brush_plan.strokes.into_iter().enumerate() { // Create brush texture. // TODO: apply rotation from layer to stamp for non-rotationally-symmetric brushes. let mut brush_texture = cache.get_cached_brush(&stroke.style); if brush_texture.is_none() { let tex = create_brush_texture(&stroke.style).await; cache.store_brush(stroke.style.clone(), tex.clone()); brush_texture = Some(tex); } let brush_texture = brush_texture.unwrap(); // Compute transformation from stroke texture space into layer space, and create the stroke texture. let skip = if idx == 0 { brush_plan.first_stroke_point_skip } else { 0 }; let positions: Vec<_> = stroke.compute_blit_points().into_iter().skip(skip).collect(); let stroke_texture = if idx == 0 && positions.is_empty() { core::mem::take(&mut brush_plan.first_stroke_texture) } else { let mut bbox = stroke.bounding_box(); bbox.start = bbox.start.floor(); bbox.end = bbox.end.floor(); let stroke_size = bbox.size() + DVec2::splat(stroke.style.diameter); // For numerical stability we want to place the first blit point at a stable, integer offset in layer space. let snap_offset = positions[0].floor() - positions[0]; let stroke_origin_in_layer = bbox.start - snap_offset - DVec2::splat(stroke.style.diameter / 2.); let stroke_to_layer = DAffine2::from_translation(stroke_origin_in_layer) * DAffine2::from_scale(stroke_size); let normal_blend = FnNode::new(|(a, b)| blend_colors(a, b, BlendMode::Normal, 1.)); let blit_node = BlitNode::new( FutureWrapperNode::new(ClonedNode::new(brush_texture)), FutureWrapperNode::new(ClonedNode::new(positions)), FutureWrapperNode::new(ClonedNode::new(normal_blend)), ); let blit_target = if idx == 0 { let target = core::mem::take(&mut brush_plan.first_stroke_texture); extend_image_to_bounds((), Table::new_from_row(target), stroke_to_layer) } else { empty_image((), stroke_to_layer, Table::new_from_element(Color::TRANSPARENT)) // EmptyImageNode::new(CopiedNode::new(stroke_to_layer), CopiedNode::new(Color::TRANSPARENT)).eval(()) }; let table = blit_node.eval(blit_target).await; assert_eq!(table.len(), 1); table.into_iter().next().unwrap_or_default() }; // Cache image before doing final blend, and store final stroke texture. if idx == final_stroke_idx { cache.cache_results(core::mem::take(&mut draw_strokes), actual_image.clone(), stroke_texture.clone()); } // TODO: Is this the correct way to do opacity in blending? actual_image = blend_with_mode(actual_image, stroke_texture, stroke.style.blend_mode, (stroke.style.color.a() * 100.) as f64); } let has_erase_or_restore_strokes = strokes.iter().any(|s| matches!(s.style.blend_mode, BlendMode::Erase | BlendMode::Restore)); if has_erase_or_restore_strokes { let opaque_image = Image::new(bbox.size().x as u32, bbox.size().y as u32, Color::WHITE); let mut erase_restore_mask = TableRow { element: Raster::new_cpu(opaque_image), transform: background_bounds, ..Default::default() }; for stroke in strokes { let mut brush_texture = cache.get_cached_brush(&stroke.style); if brush_texture.is_none() { let tex = create_brush_texture(&stroke.style).await; cache.store_brush(stroke.style.clone(), tex.clone()); brush_texture = Some(tex); } let brush_texture = brush_texture.unwrap(); let positions: Vec<_> = stroke.compute_blit_points().into_iter().collect(); // For mask composition: Erase subtracts alpha, Restore adds alpha, and Draw acts like Restore to allow repainting erased areas. let mask_blend_mode = match stroke.style.blend_mode { BlendMode::Erase => BlendMode::Erase, BlendMode::Restore => BlendMode::Restore, _ => BlendMode::Restore, }; let blend_params = FnNode::new(move |(a, b)| blend_colors(a, b, mask_blend_mode, 1.)); let blit_node = BlitNode::new( FutureWrapperNode::new(ClonedNode::new(brush_texture)), FutureWrapperNode::new(ClonedNode::new(positions)), FutureWrapperNode::new(ClonedNode::new(blend_params)), ); erase_restore_mask = blit_node.eval(Table::new_from_row(erase_restore_mask)).await.into_iter().next().unwrap_or_default(); } let blend_params = FnNode::new(|(a, b)| blend_colors(a, b, BlendMode::MultiplyAlpha, 1.)); actual_image = blend_image_closure(erase_restore_mask, actual_image, |a, b| blend_params.eval((a, b))); } let first_row = image.iter_mut().next().unwrap(); *first_row.element = actual_image.element; *first_row.transform = actual_image.transform; *first_row.alpha_blending = actual_image.alpha_blending; *first_row.source_node_id = actual_image.source_node_id; image } pub fn blend_image_closure(foreground: TableRow<Raster<CPU>>, mut background: TableRow<Raster<CPU>>, map_fn: impl Fn(Color, Color) -> Color) -> TableRow<Raster<CPU>> { let foreground_size = DVec2::new(foreground.element.width as f64, foreground.element.height as f64); let background_size = DVec2::new(background.element.width as f64, background.element.height as f64); // Transforms a point from the background image to the foreground image let background_to_foreground = DAffine2::from_scale(foreground_size) * foreground.transform.inverse() * background.transform * DAffine2::from_scale(1. / background_size); // Footprint of the foreground image (0, 0)..(1, 1) in the background image space let background_aabb = Bbox::unit().affine_transform(background.transform.inverse() * foreground.transform).to_axis_aligned_bbox(); // Clamp the foreground image to the background image let start = (background_aabb.start * background_size).max(DVec2::ZERO).as_uvec2(); let end = (background_aabb.end * background_size).min(background_size).as_uvec2(); for y in start.y..end.y { for x in start.x..end.x { let background_point = DVec2::new(x as f64, y as f64); let foreground_point = background_to_foreground.transform_point2(background_point); let source_pixel = foreground.element.sample(foreground_point); let Some(destination_pixel) = background.element.data_mut().get_pixel_mut(x, y) else { continue }; *destination_pixel = map_fn(source_pixel, *destination_pixel); } } background } pub fn blend_stamp_closure(foreground: BrushStampGenerator<Color>, mut background: TableRow<Raster<CPU>>, map_fn: impl Fn(Color, Color) -> Color) -> TableRow<Raster<CPU>> { let background_size = DVec2::new(background.element.width as f64, background.element.height as f64); // Transforms a point from the background image to the foreground image let background_to_foreground = background.transform * DAffine2::from_scale(1. / background_size); // Footprint of the foreground image (0, 0)..(1, 1) in the background image space let background_aabb = Bbox::unit().affine_transform(background.transform.inverse() * foreground.transform).to_axis_aligned_bbox(); // Clamp the foreground image to the background image let start = (background_aabb.start * background_size).max(DVec2::ZERO).as_uvec2(); let end = (background_aabb.end * background_size).min(background_size).as_uvec2(); let area = background_to_foreground.transform_point2(DVec2::new(1., 1.)) - background_to_foreground.transform_point2(DVec2::ZERO); for y in start.y..end.y { for x in start.x..end.x { let background_point = DVec2::new(x as f64, y as f64); let foreground_point = background_to_foreground.transform_point2(background_point); let Some(source_pixel) = foreground.sample(foreground_point, area) else { continue }; let Some(destination_pixel) = background.element.data_mut().get_pixel_mut(x, y) else { continue }; *destination_pixel = map_fn(source_pixel, *destination_pixel); } } background } #[cfg(test)] mod test { use super::*; use core_types::transform::Transform; use glam::DAffine2; #[test] fn test_brush_texture() { let size = 20.; let image = brush_stamp_generator(size, Color::BLACK, 100., 100.); assert_eq!(image.transform(), DAffine2::from_scale_angle_translation(DVec2::splat(size.ceil()), 0., -DVec2::splat(size / 2.))); // center pixel should be BLACK assert_eq!(image.sample(DVec2::splat(0.), DVec2::ONE), Some(Color::BLACK)); } #[tokio::test] async fn test_brush_output_size() { let image = brush( (), Table::new_from_element(Raster::new_cpu(Image::<Color>::default())), vec![BrushStroke { trace: vec![crate::brush_stroke::BrushInputSample { position: DVec2::ZERO }], style: BrushStyle { color: Color::BLACK, diameter: 20., hardness: 20., flow: 20., spacing: 20., blend_mode: BlendMode::Normal, }, }], BrushCache::default(), ) .await; assert_eq!(image.iter().next().unwrap().element.width, 20); } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/nodes/gstd/src/lib.rs
node-graph/nodes/gstd/src/lib.rs
pub mod any; pub mod render_node; pub mod text; #[cfg(feature = "wasm")] pub mod wasm_application_io; pub use blending_nodes; pub use brush_nodes as brush; pub use core_types::*; pub use graphene_application_io as application_io; pub use graphene_core; pub use graphic_nodes; pub use math_nodes; pub use path_bool_nodes as path_bool; pub use raster_nodes; pub use text_nodes; pub use transform_nodes; pub use vector_nodes; pub use vector_types; /// Backward compatibility re-exports pub mod vector { pub use graphic_types::Vector; pub use vector_types::vector::{VectorModification, VectorModificationType, misc, style}; pub use vector_types::*; // Re-export commonly used types and submodules pub use vector_types::vector::algorithms; pub use vector_types::vector::click_target; pub use vector_types::vector::misc::HandleId; pub use vector_types::vector::{PointId, RegionId, SegmentId, StrokeId}; pub use vector_types::vector::{deserialize_hashmap, serialize_hashmap}; // Re-export HandleExt trait and NoHashBuilder pub use vector_types::vector::HandleExt; pub use vector_types::vector::NoHashBuilder; // Re-export vector node modules and functions pub use vector_nodes::*; } pub mod graphic { pub use graphic_nodes::graphic::*; pub use graphic_types::Artboard; pub use graphic_types::graphic::*; } pub mod artboard { pub use graphic_nodes::artboard::*; pub use graphic_types::artboard::*; } pub mod subpath { pub use vector_types::subpath::*; } pub mod gradient { pub use vector_types::GradientStops; } pub mod transform { pub use core_types::transform::*; pub use vector_types::ReferencePoint; } pub mod math { pub use core_types::math::quad; pub mod math_ext { pub use vector_types::{QuadExt, RectExt}; } } pub mod logic { pub use graphene_core::logic::*; } pub use graphene_core::debug; // Re-export graphene_core modules for backward compatibility pub mod ops { pub use core_types::ops::*; pub use graphene_core::ops::*; } pub mod extract_xy { pub use graphene_core::extract_xy::*; } pub mod animation { pub use graphene_core::animation::*; } // Re-export at top level for convenience pub use graphic_types::{Artboard, Graphic, Vector}; /// stop gap solutions until all paths have been replaced with their absolute ones pub mod renderer { pub use core_types::math::quad::Quad; pub use core_types::math::rect::Rect; pub use rendering::*; } pub mod raster { pub use graphic_types::raster_types::*; pub use raster_nodes::adjustments::*; pub use raster_nodes::*; } pub mod raster_types { pub use graphic_types::raster_types::*; } pub mod memo { pub use core_types::memo::*; pub use graphene_core::memo::*; }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/nodes/gstd/src/text.rs
node-graph/nodes/gstd/src/text.rs
use core_types::{Ctx, table::Table}; use graph_craft::wasm_application_io::WasmEditorApi; use graphic_types::Vector; pub use text_nodes::*; #[node_macro::node(category(""))] fn text<'i: 'n>( _: impl Ctx, editor: &'i WasmEditorApi, text: String, font: Font, #[unit(" px")] #[default(24.)] font_size: f64, #[unit("x")] #[default(1.2)] line_height_ratio: f64, #[unit(" px")] #[default(0.)] character_spacing: f64, #[unit(" px")] max_width: Option<f64>, #[unit(" px")] max_height: Option<f64>, /// Faux italic. #[unit("°")] #[default(0.)] tilt: f64, align: TextAlign, /// Splits each text glyph into its own row in the table of vector geometry. #[default(false)] per_glyph_instances: bool, ) -> Table<Vector> { let typesetting = TypesettingConfig { font_size, line_height_ratio, character_spacing, max_width, max_height, tilt, align, }; to_path(&text, &font, &editor.font_cache, typesetting, per_glyph_instances) }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/nodes/gstd/src/any.rs
node-graph/nodes/gstd/src/any.rs
use core_types::NodeIO; use core_types::WasmNotSend; pub use core_types::registry::{DowncastBothNode, DynAnyNode, FutureWrapperNode, PanicNode}; pub use core_types::{Node, generic, ops}; use dyn_any::StaticType; pub use graph_craft::proto::{Any, NodeContainer, TypeErasedBox, TypeErasedNode}; use graph_craft::proto::{FutureAny, SharedNodeContainer}; pub trait IntoTypeErasedNode<'n> { fn into_type_erased(self) -> TypeErasedBox<'n>; } impl<'n, N: 'n> IntoTypeErasedNode<'n> for N where N: for<'i> NodeIO<'i, Any<'i>, Output = FutureAny<'i>> + Sync + WasmNotSend, { fn into_type_erased(self) -> TypeErasedBox<'n> { Box::new(self) } } pub fn input_node<O: StaticType>(n: SharedNodeContainer) -> DowncastBothNode<(), O> { downcast_node(n) } pub fn downcast_node<I: StaticType, O: StaticType>(n: SharedNodeContainer) -> DowncastBothNode<I, O> { DowncastBothNode::new(n) }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/nodes/gstd/src/wasm_application_io.rs
node-graph/nodes/gstd/src/wasm_application_io.rs
#[cfg(target_family = "wasm")] use base64::Engine; #[cfg(target_family = "wasm")] use core_types::WasmNotSend; #[cfg(target_family = "wasm")] use core_types::math::bbox::Bbox; use core_types::table::Table; #[cfg(target_family = "wasm")] use core_types::transform::Footprint; use core_types::{Color, Ctx}; pub use graph_craft::document::value::RenderOutputType; pub use graph_craft::wasm_application_io::*; use graphene_application_io::ApplicationIo; #[cfg(target_family = "wasm")] use graphic_types::Graphic; #[cfg(target_family = "wasm")] use graphic_types::Vector; use graphic_types::raster_types::Image; use graphic_types::raster_types::{CPU, Raster}; #[cfg(target_family = "wasm")] use graphic_types::vector_types::gradient::GradientStops; #[cfg(target_family = "wasm")] use rendering::{Render, RenderParams, RenderSvgSegmentList, SvgRender}; use std::sync::Arc; #[cfg(target_family = "wasm")] use wasm_bindgen::JsCast; #[cfg(target_family = "wasm")] use web_sys::{CanvasRenderingContext2d, HtmlCanvasElement}; #[cfg(feature = "wgpu")] #[node_macro::node(category("Debug: GPU"))] async fn create_surface<'a: 'n>(_: impl Ctx, editor: &'a WasmEditorApi) -> Arc<WasmSurfaceHandle> { Arc::new(editor.application_io.as_ref().unwrap().create_window()) } #[node_macro::node(category("Web Request"))] async fn get_request(_: impl Ctx, _primary: (), #[name("URL")] url: String, discard_result: bool) -> String { #[cfg(target_family = "wasm")] { if discard_result { wasm_bindgen_futures::spawn_local(async move { let _ = reqwest::get(url).await; }); return String::new(); } } #[cfg(not(target_family = "wasm"))] { #[cfg(feature = "tokio")] if discard_result { tokio::spawn(async move { let _ = reqwest::get(url).await; }); return String::new(); } #[cfg(not(feature = "tokio"))] if discard_result { return String::new(); } } let Ok(response) = reqwest::get(url).await else { return String::new() }; response.text().await.ok().unwrap_or_default() } #[node_macro::node(category("Web Request"))] async fn post_request(_: impl Ctx, _primary: (), #[name("URL")] url: String, body: Vec<u8>, discard_result: bool) -> String { #[cfg(target_family = "wasm")] { if discard_result { wasm_bindgen_futures::spawn_local(async move { let _ = reqwest::Client::new().post(url).body(body).header("Content-Type", "application/octet-stream").send().await; }); return String::new(); } } #[cfg(not(target_family = "wasm"))] { #[cfg(feature = "tokio")] if discard_result { let url = url.clone(); let body = body.clone(); tokio::spawn(async move { let _ = reqwest::Client::new().post(url).body(body).header("Content-Type", "application/octet-stream").send().await; }); return String::new(); } #[cfg(not(feature = "tokio"))] if discard_result { return String::new(); } } let Ok(response) = reqwest::Client::new().post(url).body(body).header("Content-Type", "application/octet-stream").send().await else { return String::new(); }; response.text().await.ok().unwrap_or_default() } #[node_macro::node(category("Web Request"), name("String to Bytes"))] fn string_to_bytes(_: impl Ctx, string: String) -> Vec<u8> { string.into_bytes() } #[node_macro::node(category("Web Request"), name("Image to Bytes"))] fn image_to_bytes(_: impl Ctx, image: Table<Raster<CPU>>) -> Vec<u8> { let Some(image) = image.iter().next() else { return vec![] }; image.element.data.iter().flat_map(|color| color.to_rgb8_srgb().into_iter()).collect::<Vec<u8>>() } #[node_macro::node(category("Web Request"))] async fn load_resource<'a: 'n>(_: impl Ctx, _primary: (), #[scope("editor-api")] editor: &'a WasmEditorApi, #[name("URL")] url: String) -> Arc<[u8]> { let Some(api) = editor.application_io.as_ref() else { return Arc::from(include_bytes!("../../../graph-craft/src/null.png").to_vec()); }; let Ok(data) = api.load_resource(url) else { return Arc::from(include_bytes!("../../../graph-craft/src/null.png").to_vec()); }; let Ok(data) = data.await else { return Arc::from(include_bytes!("../../../graph-craft/src/null.png").to_vec()); }; data } #[node_macro::node(category("Web Request"))] fn decode_image(_: impl Ctx, data: Arc<[u8]>) -> Table<Raster<CPU>> { let Some(image) = image::load_from_memory(data.as_ref()).ok() else { return Table::new(); }; let image = image.to_rgba32f(); let image = Image { data: image .chunks(4) .map(|pixel| Color::from_unassociated_alpha(pixel[0], pixel[1], pixel[2], pixel[3]).to_linear_srgb()) .collect(), width: image.width(), height: image.height(), ..Default::default() }; Table::new_from_element(Raster::new_cpu(image)) } #[cfg(target_family = "wasm")] #[node_macro::node(category(""))] async fn rasterize<T: WasmNotSend + 'n>( _: impl Ctx, #[implementations( Table<Vector>, Table<Raster<CPU>>, Table<Graphic>, Table<Color>, Table<GradientStops>, )] mut data: Table<T>, footprint: Footprint, surface_handle: Arc<graphene_application_io::SurfaceHandle<HtmlCanvasElement>>, ) -> Table<Raster<CPU>> where Table<T>: Render, { use core_types::table::TableRow; if footprint.transform.matrix2.determinant() == 0. { log::trace!("Invalid footprint received for rasterization"); return Table::new(); } let mut render = SvgRender::new(); let aabb = Bbox::from_transform(footprint.transform).to_axis_aligned_bbox(); let size = aabb.size(); let resolution = footprint.resolution; let render_params = RenderParams { footprint, for_export: true, ..Default::default() }; for row in data.iter_mut() { *row.transform = glam::DAffine2::from_translation(-aabb.start) * *row.transform; } data.render_svg(&mut render, &render_params); render.format_svg(glam::DVec2::ZERO, size); let svg_string = render.svg.to_svg_string(); let canvas = &surface_handle.surface; canvas.set_width(resolution.x); canvas.set_height(resolution.y); let context = canvas.get_context("2d").unwrap().unwrap().dyn_into::<CanvasRenderingContext2d>().unwrap(); let preamble = "data:image/svg+xml;base64,"; let mut base64_string = String::with_capacity(preamble.len() + svg_string.len() * 4); base64_string.push_str(preamble); base64::engine::general_purpose::STANDARD.encode_string(svg_string, &mut base64_string); let image_data = web_sys::HtmlImageElement::new().unwrap(); image_data.set_src(base64_string.as_str()); wasm_bindgen_futures::JsFuture::from(image_data.decode()).await.unwrap(); context .draw_image_with_html_image_element_and_dw_and_dh(&image_data, 0., 0., resolution.x as f64, resolution.y as f64) .unwrap(); let rasterized = context.get_image_data(0., 0., resolution.x as f64, resolution.y as f64).unwrap(); let image = Image::from_image_data(&rasterized.data().0, resolution.x as u32, resolution.y as u32); Table::new_from_row(TableRow { element: Raster::new_cpu(image), transform: footprint.transform, ..Default::default() }) }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/nodes/gstd/src/render_node.rs
node-graph/nodes/gstd/src/render_node.rs
use core_types::table::Table; use core_types::transform::Footprint; use core_types::{CloneVarArgs, ExtractAll, ExtractVarArgs}; use core_types::{Color, Context, Ctx, ExtractFootprint, OwnedContextImpl, WasmNotSend}; use graph_craft::document::value::RenderOutput; pub use graph_craft::document::value::RenderOutputType; pub use graph_craft::wasm_application_io::*; use graphene_application_io::{ApplicationIo, ExportFormat, ImageTexture, RenderConfig}; use graphic_types::Artboard; use graphic_types::Graphic; use graphic_types::Vector; use graphic_types::raster_types::Image; use graphic_types::raster_types::{CPU, Raster}; use rendering::{Render, RenderOutputType as RenderOutputTypeRequest, RenderParams, RenderSvgSegmentList, SvgRender, format_transform_matrix}; use rendering::{RenderMetadata, SvgSegment}; use std::collections::HashMap; use std::sync::Arc; use vector_types::GradientStops; use wgpu_executor::RenderContext; /// List of (canvas id, image data) pairs for embedding images as canvases in the final SVG string. type ImageData = HashMap<Image<Color>, u64>; #[derive(Clone, dyn_any::DynAny)] pub enum RenderIntermediateType { Vello(Arc<(vello::Scene, RenderContext)>), Svg(Arc<(String, ImageData, String)>), } #[derive(Clone, dyn_any::DynAny)] pub struct RenderIntermediate { ty: RenderIntermediateType, metadata: RenderMetadata, contains_artboard: bool, } #[node_macro::node(category(""))] async fn render_intermediate<'a: 'n, T: 'static + Render + WasmNotSend + Send + Sync>( ctx: impl Ctx + ExtractVarArgs + ExtractAll + CloneVarArgs, #[implementations( Context -> Table<Artboard>, Context -> Table<Graphic>, Context -> Table<Vector>, Context -> Table<Raster<CPU>>, Context -> Table<Color>, Context -> Table<GradientStops>, )] data: impl Node<Context<'static>, Output = T>, ) -> RenderIntermediate { let render_params = ctx .vararg(0) .expect("Did not find var args") .downcast_ref::<RenderParams>() .expect("Downcasting render params yielded invalid type"); let ctx = OwnedContextImpl::from(ctx.clone()).into_context(); let data = data.eval(ctx).await; let footprint = Footprint::default(); let mut metadata = RenderMetadata::default(); data.collect_metadata(&mut metadata, footprint, None); let contains_artboard = data.contains_artboard(); match &render_params.render_output_type { RenderOutputTypeRequest::Vello => { let mut scene = vello::Scene::new(); let mut context = wgpu_executor::RenderContext::default(); data.render_to_vello(&mut scene, Default::default(), &mut context, render_params); RenderIntermediate { ty: RenderIntermediateType::Vello(Arc::new((scene, context))), metadata, contains_artboard, } } RenderOutputTypeRequest::Svg => { let mut render = SvgRender::new(); data.render_svg(&mut render, render_params); RenderIntermediate { ty: RenderIntermediateType::Svg(Arc::new((render.svg.to_svg_string(), render.image_data, render.svg_defs.clone()))), metadata, contains_artboard, } } } } #[node_macro::node(category(""))] async fn create_context<'a: 'n>( // Context injections are defined in the wrap_network_in_scope function render_config: RenderConfig, data: impl Node<Context<'static>, Output = RenderOutput>, ) -> RenderOutput { let footprint = render_config.viewport; let render_output_type = match render_config.export_format { ExportFormat::Svg => RenderOutputTypeRequest::Svg, ExportFormat::Raster => RenderOutputTypeRequest::Vello, }; let render_params = RenderParams { render_mode: render_config.render_mode, hide_artboards: render_config.hide_artboards, for_export: render_config.for_export, render_output_type, footprint: Footprint::default(), scale: render_config.scale, ..Default::default() }; let ctx = OwnedContextImpl::default() .with_footprint(footprint) .with_real_time(render_config.time.time) .with_animation_time(render_config.time.animation_time.as_secs_f64()) .with_pointer(render_config.pointer) .with_vararg(Box::new(render_params)) .into_context(); data.eval(ctx).await } #[node_macro::node(category(""))] async fn render<'a: 'n>(ctx: impl Ctx + ExtractFootprint + ExtractVarArgs, editor_api: &'a WasmEditorApi, data: RenderIntermediate) -> RenderOutput { let footprint = ctx.footprint(); let render_params = ctx .vararg(0) .expect("Did not find var args") .downcast_ref::<RenderParams>() .expect("Downcasting render params yielded invalid type"); let mut render_params = render_params.clone(); render_params.footprint = *footprint; let render_params = &render_params; let scale = render_params.scale; let physical_resolution = render_params.footprint.resolution; let logical_resolution = render_params.footprint.resolution.as_dvec2() / scale; let RenderIntermediate { ty, mut metadata, contains_artboard } = data; metadata.apply_transform(footprint.transform); let data = match (render_params.render_output_type, &ty) { (RenderOutputTypeRequest::Svg, RenderIntermediateType::Svg(svg_data)) => { let mut rendering = SvgRender::new(); if !contains_artboard && !render_params.hide_artboards { rendering.leaf_tag("rect", |attributes| { attributes.push("x", "0"); attributes.push("y", "0"); attributes.push("width", logical_resolution.x.to_string()); attributes.push("height", logical_resolution.y.to_string()); let matrix = format_transform_matrix(footprint.transform.inverse()); if !matrix.is_empty() { attributes.push("transform", matrix); } attributes.push("fill", "white"); }); } rendering.svg.push(SvgSegment::from(svg_data.0.clone())); rendering.image_data = svg_data.1.clone(); rendering.svg_defs = svg_data.2.clone(); rendering.wrap_with_transform(footprint.transform, Some(logical_resolution)); RenderOutputType::Svg { svg: rendering.svg.to_svg_string(), image_data: rendering.image_data.into_iter().map(|(image, id)| (id, image)).collect(), } } (RenderOutputTypeRequest::Vello, RenderIntermediateType::Vello(vello_data)) => { let Some(exec) = editor_api.application_io.as_ref().unwrap().gpu_executor() else { unreachable!("Attempted to render with Vello when no GPU executor is available"); }; let (child, context) = Arc::as_ref(vello_data); let scale_transform = glam::DAffine2::from_scale(glam::DVec2::splat(scale)); let footprint_transform = scale_transform * footprint.transform; let footprint_transform_vello = vello::kurbo::Affine::new(footprint_transform.to_cols_array()); let mut scene = vello::Scene::new(); scene.append(child, Some(footprint_transform_vello)); // We now replace all transforms which are supposed to be infinite with a transform which covers the entire viewport // See <https://xi.zulipchat.com/#narrow/channel/197075-vello/topic/Full.20screen.20color.2Fgradients/near/538435044> for more detail let scaled_infinite_transform = vello::kurbo::Affine::scale_non_uniform(physical_resolution.x as f64, physical_resolution.y as f64); let encoding = scene.encoding_mut(); for transform in encoding.transforms.iter_mut() { if transform.matrix[0] == f32::INFINITY { *transform = vello_encoding::Transform::from_kurbo(&scaled_infinite_transform); } } let mut background = Color::from_rgb8_srgb(0x22, 0x22, 0x22); if !contains_artboard && !render_params.hide_artboards { background = Color::WHITE; } let texture = exec .render_vello_scene_to_texture(&scene, physical_resolution, context, background) .await .expect("Failed to render Vello scene"); RenderOutputType::Texture(ImageTexture { texture }) } _ => unreachable!("Render node did not receive its requested data type"), }; RenderOutput { data, metadata } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/nodes/text/src/lib.rs
node-graph/nodes/text/src/lib.rs
mod font_cache; mod path_builder; mod text_context; mod to_path; use dyn_any::DynAny; pub use font_cache::*; pub use text_context::TextContext; pub use to_path::*; // Re-export for convenience pub use core_types as gcore; pub use vector_types; // Import specta so derive macros can find it use core_types::specta; /// Alignment of lines of type within a text block. #[repr(C)] #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize, Hash, DynAny, core_types::specta::Type, node_macro::ChoiceType)] #[widget(Radio)] pub enum TextAlign { #[default] Left, Center, Right, #[label("Justify")] JustifyLeft, // TODO: JustifyCenter, JustifyRight, JustifyAll } impl From<TextAlign> for parley::Alignment { fn from(val: TextAlign) -> Self { match val { TextAlign::Left => parley::Alignment::Left, TextAlign::Center => parley::Alignment::Center, TextAlign::Right => parley::Alignment::Right, TextAlign::JustifyLeft => parley::Alignment::Justify, } } } #[derive(PartialEq, Clone, Copy, Debug, serde::Serialize, serde::Deserialize)] pub struct TypesettingConfig { pub font_size: f64, pub line_height_ratio: f64, pub character_spacing: f64, pub max_width: Option<f64>, pub max_height: Option<f64>, pub tilt: f64, pub align: TextAlign, } impl Default for TypesettingConfig { fn default() -> Self { Self { font_size: 24., line_height_ratio: 1.2, character_spacing: 0., max_width: None, max_height: None, tilt: 0., align: TextAlign::default(), } } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/nodes/text/src/path_builder.rs
node-graph/nodes/text/src/path_builder.rs
use core_types::table::{Table, TableRow}; use glam::{DAffine2, DVec2}; use parley::GlyphRun; use skrifa::GlyphId; use skrifa::instance::{LocationRef, NormalizedCoord, Size}; use skrifa::outline::{DrawSettings, OutlinePen}; use skrifa::raw::FontRef as ReadFontsRef; use skrifa::{MetadataProvider, OutlineGlyph}; use vector_types::subpath::{ManipulatorGroup, Subpath}; use vector_types::vector::{PointId, Vector}; pub struct PathBuilder<Upstream> { current_subpath: Subpath<PointId>, origin: DVec2, glyph_subpaths: Vec<Subpath<PointId>>, pub vector_table: Table<Vector<Upstream>>, scale: f64, id: PointId, } impl<Upstream: Default + 'static> PathBuilder<Upstream> { pub fn new(per_glyph_instances: bool, scale: f64) -> Self { Self { current_subpath: Subpath::new(Vec::new(), false), glyph_subpaths: Vec::new(), vector_table: if per_glyph_instances { Table::new() } else { Table::new_from_element(Vector::default()) }, scale, id: PointId::ZERO, origin: DVec2::default(), } } fn point(&self, x: f32, y: f32) -> DVec2 { DVec2::new(self.origin.x + x as f64, self.origin.y - y as f64) * self.scale } #[allow(clippy::too_many_arguments)] fn draw_glyph(&mut self, glyph: &OutlineGlyph<'_>, size: f32, normalized_coords: &[NormalizedCoord], glyph_offset: DVec2, style_skew: Option<DAffine2>, skew: DAffine2, per_glyph_instances: bool) { let location_ref = LocationRef::new(normalized_coords); let settings = DrawSettings::unhinted(Size::new(size), location_ref); glyph.draw(settings, self).unwrap(); // Apply transforms in correct order: style-based skew first, then user-requested skew // This ensures font synthesis (italic) is applied before user transformations for glyph_subpath in &mut self.glyph_subpaths { if let Some(style_skew) = style_skew { glyph_subpath.apply_transform(style_skew); } glyph_subpath.apply_transform(skew); } if per_glyph_instances { self.vector_table.push(TableRow { element: Vector::from_subpaths(core::mem::take(&mut self.glyph_subpaths), false), transform: DAffine2::from_translation(glyph_offset), ..Default::default() }); } else { for subpath in self.glyph_subpaths.drain(..) { // Unwrapping here is ok because `self.vector_table` is initialized with a single `Vector` table element self.vector_table.get_mut(0).unwrap().element.append_subpath(subpath, false); } } } pub fn render_glyph_run(&mut self, glyph_run: &GlyphRun<'_, ()>, tilt: f64, per_glyph_instances: bool) { let mut run_x = glyph_run.offset(); let run_y = glyph_run.baseline(); let run = glyph_run.run(); // User-requested tilt applied around baseline to avoid vertical displacement // Translation ensures rotation point is at the baseline, not origin let skew = if per_glyph_instances { DAffine2::from_cols_array(&[1., 0., -tilt.to_radians().tan(), 1., 0., 0.]) } else { DAffine2::from_translation(DVec2::new(0., run_y as f64)) * DAffine2::from_cols_array(&[1., 0., -tilt.to_radians().tan(), 1., 0., 0.]) * DAffine2::from_translation(DVec2::new(0., -run_y as f64)) }; let synthesis = run.synthesis(); // Font synthesis (e.g., synthetic italic) applied separately from user transforms // This preserves the distinction between font styling and user transformations let style_skew = synthesis.skew().map(|angle| { if per_glyph_instances { DAffine2::from_cols_array(&[1., 0., -angle.to_radians().tan() as f64, 1., 0., 0.]) } else { DAffine2::from_translation(DVec2::new(0., run_y as f64)) * DAffine2::from_cols_array(&[1., 0., -angle.to_radians().tan() as f64, 1., 0., 0.]) * DAffine2::from_translation(DVec2::new(0., -run_y as f64)) } }); let font = run.font(); let font_size = run.font_size(); let normalized_coords = run.normalized_coords().iter().map(|coord| NormalizedCoord::from_bits(*coord)).collect::<Vec<_>>(); // TODO: This can be cached for better performance let font_collection_ref = font.data.as_ref(); let font_ref = ReadFontsRef::from_index(font_collection_ref, font.index).unwrap(); let outlines = font_ref.outline_glyphs(); for glyph in glyph_run.glyphs() { let glyph_offset = DVec2::new((run_x + glyph.x) as f64, (run_y - glyph.y) as f64); run_x += glyph.advance; let glyph_id = GlyphId::from(glyph.id); if let Some(glyph_outline) = outlines.get(glyph_id) { if !per_glyph_instances { self.origin = glyph_offset; } self.draw_glyph(&glyph_outline, font_size, &normalized_coords, glyph_offset, style_skew, skew, per_glyph_instances); } } } pub fn finalize(mut self) -> Table<Vector<Upstream>> { if self.vector_table.is_empty() { self.vector_table = Table::new_from_element(Vector::default()); } self.vector_table } } impl<Upstream: Default + 'static> OutlinePen for PathBuilder<Upstream> { fn move_to(&mut self, x: f32, y: f32) { if !self.current_subpath.is_empty() { self.glyph_subpaths.push(std::mem::replace(&mut self.current_subpath, Subpath::new(Vec::new(), false))); } self.current_subpath.push_manipulator_group(ManipulatorGroup::new_anchor_with_id(self.point(x, y), self.id.next_id())); } fn line_to(&mut self, x: f32, y: f32) { self.current_subpath.push_manipulator_group(ManipulatorGroup::new_anchor_with_id(self.point(x, y), self.id.next_id())); } fn quad_to(&mut self, x1: f32, y1: f32, x2: f32, y2: f32) { let [handle, anchor] = [self.point(x1, y1), self.point(x2, y2)]; self.current_subpath.last_manipulator_group_mut().unwrap().out_handle = Some(handle); self.current_subpath.push_manipulator_group(ManipulatorGroup::new_with_id(anchor, None, None, self.id.next_id())); } fn curve_to(&mut self, x1: f32, y1: f32, x2: f32, y2: f32, x3: f32, y3: f32) { let [handle1, handle2, anchor] = [self.point(x1, y1), self.point(x2, y2), self.point(x3, y3)]; self.current_subpath.last_manipulator_group_mut().unwrap().out_handle = Some(handle1); self.current_subpath .push_manipulator_group(ManipulatorGroup::new_with_id(anchor, Some(handle2), None, self.id.next_id())); } fn close(&mut self) { self.current_subpath.set_closed(true); self.glyph_subpaths.push(std::mem::replace(&mut self.current_subpath, Subpath::new(Vec::new(), false))); } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/nodes/text/src/text_context.rs
node-graph/nodes/text/src/text_context.rs
use super::{Font, FontCache, TypesettingConfig}; use core::cell::RefCell; use core_types::table::Table; use glam::DVec2; use parley::fontique::{Blob, FamilyId, FontInfo}; use parley::{AlignmentOptions, FontContext, Layout, LayoutContext, LineHeight, PositionedLayoutItem, StyleProperty}; use std::collections::HashMap; use vector_types::Vector; use super::path_builder::PathBuilder; thread_local! { static THREAD_TEXT: RefCell<TextContext> = RefCell::new(TextContext::default()); } /// Unified thread-local text processing context that combines font and layout management /// for efficient text rendering operations. #[derive(Default)] pub struct TextContext { font_context: FontContext, layout_context: LayoutContext<()>, /// Cached font metadata for performance optimization font_info_cache: HashMap<Font, (FamilyId, FontInfo)>, } impl TextContext { /// Access the thread-local TextContext instance for text processing operations pub fn with_thread_local<F, R>(f: F) -> R where F: FnOnce(&mut TextContext) -> R, { THREAD_TEXT.with_borrow_mut(f) } /// Resolve a font and return its data as a Blob if available fn resolve_font_data<'a>(&self, font: &'a Font, font_cache: &'a FontCache) -> Option<(Blob<u8>, &'a Font)> { font_cache.get_blob(font) } /// Get or cache font information for a given font fn get_font_info(&mut self, font: &Font, font_data: &Blob<u8>) -> Option<(String, FontInfo)> { // Check if we already have the font info cached if let Some((family_id, font_info)) = self.font_info_cache.get(font) && let Some(family_name) = self.font_context.collection.family_name(*family_id) { return Some((family_name.to_string(), font_info.clone())); } // Register the font and cache the info let families = self.font_context.collection.register_fonts(font_data.clone(), None); families.first().and_then(|(family_id, fonts_info)| { fonts_info.first().and_then(|font_info| { self.font_context.collection.family_name(*family_id).map(|family_name| { // Cache the font info for future use self.font_info_cache.insert(font.clone(), (*family_id, font_info.clone())); (family_name.to_string(), font_info.clone()) }) }) }) } /// Create a text layout using the specified font and typesetting configuration fn layout_text(&mut self, text: &str, font: &Font, font_cache: &FontCache, typesetting: TypesettingConfig) -> Option<Layout<()>> { // Note that the actual_font may not be the desired font if that font is not yet loaded. // It is important not to cache the default font under the name of another font. let (font_data, actual_font) = self.resolve_font_data(font, font_cache)?; let (font_family, font_info) = self.get_font_info(actual_font, &font_data)?; const DISPLAY_SCALE: f32 = 1.; let mut builder = self.layout_context.ranged_builder(&mut self.font_context, text, DISPLAY_SCALE, false); builder.push_default(StyleProperty::FontSize(typesetting.font_size as f32)); builder.push_default(StyleProperty::LetterSpacing(typesetting.character_spacing as f32)); builder.push_default(StyleProperty::FontStack(parley::FontStack::Single(parley::FontFamily::Named(std::borrow::Cow::Owned(font_family))))); builder.push_default(StyleProperty::FontWeight(font_info.weight())); builder.push_default(StyleProperty::FontStyle(font_info.style())); builder.push_default(StyleProperty::FontWidth(font_info.width())); builder.push_default(LineHeight::FontSizeRelative(typesetting.line_height_ratio as f32)); let mut layout: Layout<()> = builder.build(text); layout.break_all_lines(typesetting.max_width.map(|mw| mw as f32)); layout.align(typesetting.max_width.map(|max_w| max_w as f32), typesetting.align.into(), AlignmentOptions::default()); Some(layout) } /// Convert text to vector paths using the specified font and typesetting configuration pub fn to_path<Upstream: Default + 'static>(&mut self, text: &str, font: &Font, font_cache: &FontCache, typesetting: TypesettingConfig, per_glyph_instances: bool) -> Table<Vector<Upstream>> { let Some(layout) = self.layout_text(text, font, font_cache, typesetting) else { return Table::new_from_element(Vector::default()); }; let mut path_builder = PathBuilder::new(per_glyph_instances, layout.scale() as f64); for line in layout.lines() { for item in line.items() { if let PositionedLayoutItem::GlyphRun(glyph_run) = item { path_builder.render_glyph_run(&glyph_run, typesetting.tilt, per_glyph_instances); } } } path_builder.finalize() } /// Calculate the bounding box of text using the specified font and typesetting configuration pub fn bounding_box(&mut self, text: &str, font: &Font, font_cache: &FontCache, typesetting: TypesettingConfig, for_clipping_test: bool) -> DVec2 { if !for_clipping_test && let (Some(max_height), Some(max_width)) = (typesetting.max_height, typesetting.max_width) { return DVec2::new(max_width, max_height); } let Some(layout) = self.layout_text(text, font, font_cache, typesetting) else { return DVec2::ZERO; }; DVec2::new(layout.full_width() as f64, layout.height() as f64) } /// Check if text lines are being clipped due to height constraints pub fn lines_clipping(&mut self, text: &str, font: &Font, font_cache: &FontCache, typesetting: TypesettingConfig) -> bool { let Some(max_height) = typesetting.max_height else { return false }; let bounds = self.bounding_box(text, font, font_cache, typesetting, true); max_height < bounds.y } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/nodes/text/src/font_cache.rs
node-graph/nodes/text/src/font_cache.rs
use dyn_any::DynAny; use parley::fontique::Blob; use std::collections::HashMap; use std::sync::Arc; // Import specta so derive macros can find it use core_types::specta; /// A font type (storing font family and font style and an optional preview URL) #[derive(Debug, Clone, serde::Serialize, serde::Deserialize, Eq, DynAny, core_types::specta::Type)] pub struct Font { #[serde(rename = "fontFamily")] pub font_family: String, #[serde(rename = "fontStyle", deserialize_with = "migrate_font_style")] pub font_style: String, #[serde(skip)] pub font_style_to_restore: Option<String>, } impl std::hash::Hash for Font { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.font_family.hash(state); self.font_style.hash(state); // Don't consider `font_style_to_restore` in the HashMaps } } impl PartialEq for Font { fn eq(&self, other: &Self) -> bool { // Don't consider `font_style_to_restore` in the HashMaps self.font_family == other.font_family && self.font_style == other.font_style } } impl Font { pub fn new(font_family: String, font_style: String) -> Self { Self { font_family, font_style, font_style_to_restore: None, } } pub fn named_weight(weight: u32) -> &'static str { // From https://developer.mozilla.org/en-US/docs/Web/CSS/font-weight#common_weight_name_mapping match weight { 100 => "Thin", 200 => "Extra Light", 300 => "Light", 400 => "Regular", 500 => "Medium", 600 => "Semi Bold", 700 => "Bold", 800 => "Extra Bold", 900 => "Black", 950 => "Extra Black", _ => "Regular", } } } impl Default for Font { fn default() -> Self { Self::new(core_types::consts::DEFAULT_FONT_FAMILY.into(), core_types::consts::DEFAULT_FONT_STYLE.into()) } } /// A cache of all loaded font data and preview urls along with the default font (send from `init_app` in `editor_api.rs`) #[derive(Clone, serde::Serialize, serde::Deserialize, Default, DynAny)] pub struct FontCache { /// Actual font file data used for rendering a font font_file_data: HashMap<Font, Vec<u8>>, } impl std::fmt::Debug for FontCache { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("FontCache").field("font_file_data", &self.font_file_data.keys().collect::<Vec<_>>()).finish() } } impl std::hash::Hash for FontCache { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.font_file_data.len().hash(state); self.font_file_data.keys().for_each(|font| font.hash(state)); } } impl PartialEq for FontCache { fn eq(&self, other: &Self) -> bool { if self.font_file_data.len() != other.font_file_data.len() { return false; } self.font_file_data.keys().all(|font| other.font_file_data.contains_key(font)) } } impl FontCache { /// Returns the font family name if the font is cached, otherwise returns the fallback font family name if that is cached pub fn resolve_font<'a>(&'a self, font: &'a Font) -> Option<&'a Font> { if self.font_file_data.contains_key(font) { Some(font) } else { self.font_file_data .keys() .find(|font| font.font_family == core_types::consts::DEFAULT_FONT_FAMILY && font.font_style == core_types::consts::DEFAULT_FONT_STYLE) } } /// Try to get the bytes for a font pub fn get<'a>(&'a self, font: &'a Font) -> Option<(&'a Vec<u8>, &'a Font)> { self.resolve_font(font).and_then(|font| self.font_file_data.get(font).map(|data| (data, font))) } /// Get font data as a Blob for use with parley/skrifa pub fn get_blob<'a>(&'a self, font: &'a Font) -> Option<(Blob<u8>, &'a Font)> { self.get(font).map(|(data, font)| (Blob::new(Arc::new(data.clone())), font)) } /// Check if the font is already loaded pub fn loaded_font(&self, font: &Font) -> bool { self.font_file_data.contains_key(font) } /// Insert a new font into the cache pub fn insert(&mut self, font: Font, data: Vec<u8>) { self.font_file_data.insert(font.clone(), data); } } // TODO: Eventually remove this migration document upgrade code fn migrate_font_style<'de, D: serde::Deserializer<'de>>(deserializer: D) -> Result<String, D::Error> { use serde::Deserialize; String::deserialize(deserializer).map(|name| if name == "Normal (400)" { "Regular (400)".to_string() } else { name }) }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/nodes/text/src/to_path.rs
node-graph/nodes/text/src/to_path.rs
use super::text_context::TextContext; use super::{Font, FontCache, TypesettingConfig}; use core_types::table::Table; use glam::DVec2; use parley::fontique::Blob; use std::sync::Arc; use vector_types::Vector; pub fn to_path<Upstream: Default + 'static>(text: &str, font: &Font, font_cache: &FontCache, typesetting: TypesettingConfig, per_glyph_instances: bool) -> Table<Vector<Upstream>> { TextContext::with_thread_local(|ctx| ctx.to_path(text, font, font_cache, typesetting, per_glyph_instances)) } pub fn bounding_box(text: &str, font: &Font, font_cache: &FontCache, typesetting: TypesettingConfig, for_clipping_test: bool) -> DVec2 { TextContext::with_thread_local(|ctx| ctx.bounding_box(text, font, font_cache, typesetting, for_clipping_test)) } pub fn load_font(data: &[u8]) -> Blob<u8> { Blob::new(Arc::new(data.to_vec())) } pub fn lines_clipping(text: &str, font: &Font, font_cache: &FontCache, typesetting: TypesettingConfig) -> bool { TextContext::with_thread_local(|ctx| ctx.lines_clipping(text, font, font_cache, typesetting)) }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/nodes/path-bool/src/lib.rs
node-graph/nodes/path-bool/src/lib.rs
use core_types::table::{Table, TableRow, TableRowRef}; use core_types::{Color, Ctx}; use dyn_any::DynAny; use glam::{DAffine2, DVec2}; use graphic_types::vector_types::subpath::{ManipulatorGroup, PathSegPoints, Subpath, pathseg_points}; use graphic_types::vector_types::vector::PointId; use graphic_types::vector_types::vector::algorithms::merge_by_distance::MergeByDistanceExt; use graphic_types::vector_types::vector::style::Fill; use graphic_types::{Graphic, Vector}; pub use path_bool as path_bool_lib; use path_bool::{FillRule, PathBooleanOperation}; use std::ops::Mul; // Import specta so derive macros can find it use core_types::specta; // TODO: Fix boolean ops to work by removing .transform() and .one_instnace_*() calls, // TODO: since before we used a Vec of single-row tables and now we use a single table // TODO: with multiple rows while still assuming a single row for the boolean operations. #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize, Hash, DynAny, specta::Type, node_macro::ChoiceType)] #[widget(Radio)] pub enum BooleanOperation { #[default] #[icon("BooleanUnion")] Union, #[icon("BooleanSubtractFront")] SubtractFront, #[icon("BooleanSubtractBack")] SubtractBack, #[icon("BooleanIntersect")] Intersect, #[icon("BooleanDifference")] Difference, } /// Combines the geometric forms of one or more closed paths into a new vector path that results from cutting or joining the paths by the chosen method. #[node_macro::node(category(""))] async fn boolean_operation<I: graphic_types::IntoGraphicTable + 'n + Send + Clone>( _: impl Ctx, /// The table of vector paths to perform the boolean operation on. Nested tables are automatically flattened. #[implementations(Table<Graphic>, Table<Vector>)] content: I, /// Which boolean operation to perform on the paths. /// /// Union combines all paths while cutting out overlapping areas (even the interiors of a single path). /// Subtraction cuts overlapping areas out from the last (Subtract Front) or first (Subtract Back) path. /// Intersection cuts away all but the overlapping areas shared by every path. /// Difference cuts away the overlapping areas shared by every path, leaving only the non-overlapping areas. operation: BooleanOperation, ) -> Table<Vector> { let content = content.into_graphic_table(); // The first index is the bottom of the stack let mut result_vector_table = boolean_operation_on_vector_table(flatten_vector(&content).iter(), operation); // Replace the transformation matrix with a mutation of the vector points themselves if let Some(result_vector) = result_vector_table.iter_mut().next() { let transform = *result_vector.transform; *result_vector.transform = DAffine2::IDENTITY; Vector::transform(result_vector.element, transform); result_vector.element.style.set_stroke_transform(DAffine2::IDENTITY); result_vector.element.upstream_data = Some(content.clone()); // Clean up the boolean operation result by merging duplicated points result_vector.element.merge_by_distance_spatial(*result_vector.transform, 0.0001); } result_vector_table } fn boolean_operation_on_vector_table<'a>(vector: impl DoubleEndedIterator<Item = TableRowRef<'a, Vector>> + Clone, boolean_operation: BooleanOperation) -> Table<Vector> { match boolean_operation { BooleanOperation::Union => union(vector), BooleanOperation::SubtractFront => subtract(vector), BooleanOperation::SubtractBack => subtract(vector.rev()), BooleanOperation::Intersect => intersect(vector), BooleanOperation::Difference => difference(vector), } } fn union<'a>(vector: impl DoubleEndedIterator<Item = TableRowRef<'a, Vector>>) -> Table<Vector> { // Reverse the vector table rows so that the result style is the style of the first vector row let mut vector_reversed = vector.rev(); let mut result_vector_table = Table::new_from_row(vector_reversed.next().map(|x| x.into_cloned()).unwrap_or_default()); let mut first_row = result_vector_table.iter_mut().next().expect("Expected the one row we just pushed"); // Loop over all vector table rows and union it with the result let default = TableRow::default(); let mut second_vector = Some(vector_reversed.next().unwrap_or(default.as_ref())); while let Some(lower_vector) = second_vector { let transform_of_lower_into_space_of_upper = first_row.transform.inverse() * *lower_vector.transform; let result = &mut first_row.element; let upper_path_string = to_path(result, DAffine2::IDENTITY); let lower_path_string = to_path(lower_vector.element, transform_of_lower_into_space_of_upper); #[allow(unused_unsafe)] let boolean_operation_string = unsafe { boolean_union(upper_path_string, lower_path_string) }; let boolean_operation_result = from_path(&boolean_operation_string); result.colinear_manipulators = boolean_operation_result.colinear_manipulators; result.point_domain = boolean_operation_result.point_domain; result.segment_domain = boolean_operation_result.segment_domain; result.region_domain = boolean_operation_result.region_domain; second_vector = vector_reversed.next(); } result_vector_table } fn subtract<'a>(vector: impl Iterator<Item = TableRowRef<'a, Vector>>) -> Table<Vector> { let mut vector = vector.into_iter(); let mut result_vector_table = Table::new_from_row(vector.next().map(|x| x.into_cloned()).unwrap_or_default()); let mut first_row = result_vector_table.iter_mut().next().expect("Expected the one row we just pushed"); let first_row_transform = if first_row.transform.matrix2.determinant() != 0. { first_row.transform.inverse() } else { DAffine2::IDENTITY }; let mut next_vector = vector.next(); while let Some(lower_vector) = next_vector { let transform_of_lower_into_space_of_upper = first_row_transform * *lower_vector.transform; let result = &mut first_row.element; let upper_path_string = to_path(result, DAffine2::IDENTITY); let lower_path_string = to_path(lower_vector.element, transform_of_lower_into_space_of_upper); #[allow(unused_unsafe)] let boolean_operation_string = unsafe { boolean_subtract(upper_path_string, lower_path_string) }; let boolean_operation_result = from_path(&boolean_operation_string); result.colinear_manipulators = boolean_operation_result.colinear_manipulators; result.point_domain = boolean_operation_result.point_domain; result.segment_domain = boolean_operation_result.segment_domain; result.region_domain = boolean_operation_result.region_domain; next_vector = vector.next(); } result_vector_table } fn intersect<'a>(vector: impl DoubleEndedIterator<Item = TableRowRef<'a, Vector>>) -> Table<Vector> { let mut vector = vector.rev(); let mut result_vector_table = Table::new_from_row(vector.next().map(|x| x.into_cloned()).unwrap_or_default()); let mut first_row = result_vector_table.iter_mut().next().expect("Expected the one row we just pushed"); let default = TableRow::default(); let mut second_vector = Some(vector.next().unwrap_or(default.as_ref())); // For each vector table row, set the result to the intersection of that path and the current result while let Some(lower_vector) = second_vector { let transform_of_lower_into_space_of_upper = first_row.transform.inverse() * *lower_vector.transform; let result = &mut first_row.element; let upper_path_string = to_path(result, DAffine2::IDENTITY); let lower_path_string = to_path(lower_vector.element, transform_of_lower_into_space_of_upper); #[allow(unused_unsafe)] let boolean_operation_string = unsafe { boolean_intersect(upper_path_string, lower_path_string) }; let boolean_operation_result = from_path(&boolean_operation_string); result.colinear_manipulators = boolean_operation_result.colinear_manipulators; result.point_domain = boolean_operation_result.point_domain; result.segment_domain = boolean_operation_result.segment_domain; result.region_domain = boolean_operation_result.region_domain; second_vector = vector.next(); } result_vector_table } fn difference<'a>(vector: impl DoubleEndedIterator<Item = TableRowRef<'a, Vector>> + Clone) -> Table<Vector> { let mut vector_iter = vector.clone().rev(); let mut any_intersection = TableRow::default(); let default = TableRow::default(); let mut second_vector = Some(vector_iter.next().unwrap_or(default.as_ref())); // Find where all vector table row paths intersect at least once while let Some(lower_vector) = second_vector { let filtered_vector = vector.clone().filter(|v| *v != lower_vector).collect::<Vec<_>>().into_iter(); let unioned = boolean_operation_on_vector_table(filtered_vector, BooleanOperation::Union); let first_row = unioned.iter().next().expect("Expected at least one row after the boolean union"); let transform_of_lower_into_space_of_upper = first_row.transform.inverse() * *lower_vector.transform; let upper_path_string = to_path(first_row.element, DAffine2::IDENTITY); let lower_path_string = to_path(lower_vector.element, transform_of_lower_into_space_of_upper); #[allow(unused_unsafe)] let boolean_intersection_string = unsafe { boolean_intersect(upper_path_string, lower_path_string) }; let mut element = from_path(&boolean_intersection_string); element.style = first_row.element.style.clone(); let boolean_intersection_result = TableRow { element, transform: *first_row.transform, alpha_blending: *first_row.alpha_blending, source_node_id: *first_row.source_node_id, }; let transform_of_lower_into_space_of_upper = boolean_intersection_result.transform.inverse() * any_intersection.transform; let upper_path_string = to_path(&boolean_intersection_result.element, DAffine2::IDENTITY); let lower_path_string = to_path(&any_intersection.element, transform_of_lower_into_space_of_upper); #[allow(unused_unsafe)] let union_result = from_path(&unsafe { boolean_union(upper_path_string, lower_path_string) }); any_intersection.element = union_result; any_intersection.transform = boolean_intersection_result.transform; any_intersection.element.style = boolean_intersection_result.element.style.clone(); any_intersection.alpha_blending = boolean_intersection_result.alpha_blending; second_vector = vector_iter.next(); } // Subtract the area where they intersect at least once from the union of all vector paths let union = boolean_operation_on_vector_table(vector, BooleanOperation::Union); boolean_operation_on_vector_table(union.iter().chain(std::iter::once(any_intersection.as_ref())), BooleanOperation::SubtractFront) } fn flatten_vector(graphic_table: &Table<Graphic>) -> Table<Vector> { graphic_table .iter() .flat_map(|element| { match element.element.clone() { Graphic::Vector(vector) => { // Apply the parent graphic's transform to each element of the vector table vector .into_iter() .map(|mut sub_vector| { sub_vector.transform = *element.transform * sub_vector.transform; sub_vector }) .collect::<Vec<_>>() } Graphic::RasterCPU(image) => { let make_row = |transform| { // Convert the image frame into a rectangular subpath with the image's transform let mut subpath = Subpath::new_rect(DVec2::ZERO, DVec2::ONE); subpath.apply_transform(transform); // Create a vector table row from the rectangular subpath, with a default black fill let mut element = Vector::from_subpath(subpath); element.style.set_fill(Fill::Solid(Color::BLACK)); TableRow { element, ..Default::default() } }; // Apply the parent graphic's transform to each raster element image.iter().map(|row| make_row(*element.transform * *row.transform)).collect::<Vec<_>>() } Graphic::RasterGPU(image) => { let make_row = |transform| { // Convert the image frame into a rectangular subpath with the image's transform let mut subpath = Subpath::new_rect(DVec2::ZERO, DVec2::ONE); subpath.apply_transform(transform); // Create a vector table row from the rectangular subpath, with a default black fill let mut element = Vector::from_subpath(subpath); element.style.set_fill(Fill::Solid(Color::BLACK)); TableRow { element, ..Default::default() } }; // Apply the parent graphic's transform to each raster element image.iter().map(|row| make_row(*element.transform * *row.transform)).collect::<Vec<_>>() } Graphic::Graphic(mut graphic) => { // Apply the parent graphic's transform to each element of inner table for sub_element in graphic.iter_mut() { *sub_element.transform = *element.transform * *sub_element.transform; } // Recursively flatten the inner table into the output vector table let unioned = boolean_operation_on_vector_table(flatten_vector(&graphic).iter(), BooleanOperation::Union); unioned.into_iter().collect::<Vec<_>>() } Graphic::Color(color) => color .into_iter() .map(|row| { let mut element = Vector::default(); element.style.set_fill(Fill::Solid(row.element)); element.style.set_stroke_transform(DAffine2::IDENTITY); TableRow { element, transform: row.transform, alpha_blending: row.alpha_blending, source_node_id: row.source_node_id, } }) .collect::<Vec<_>>(), Graphic::Gradient(gradient) => gradient .into_iter() .map(|row| { let mut element = Vector::default(); element.style.set_fill(Fill::Gradient(graphic_types::vector_types::gradient::Gradient { stops: row.element, ..Default::default() })); element.style.set_stroke_transform(DAffine2::IDENTITY); TableRow { element, transform: row.transform, alpha_blending: row.alpha_blending, source_node_id: row.source_node_id, } }) .collect::<Vec<_>>(), } }) .collect() } fn to_path(vector: &Vector, transform: DAffine2) -> Vec<path_bool::PathSegment> { let mut path = Vec::new(); for subpath in vector.stroke_bezier_paths() { to_path_segments(&mut path, &subpath, transform); } path } fn to_path_segments(path: &mut Vec<path_bool::PathSegment>, subpath: &Subpath<PointId>, transform: DAffine2) { use path_bool::PathSegment; let mut global_start = None; let mut global_end = DVec2::ZERO; for bezier in subpath.iter() { const EPS: f64 = 1e-8; let transform_point = |pos: DVec2| transform.transform_point2(pos).mul(EPS.recip()).round().mul(EPS); let PathSegPoints { p0, p1, p2, p3 } = pathseg_points(bezier); let p0 = transform_point(p0); let p1 = p1.map(transform_point); let p2 = p2.map(transform_point); let p3 = transform_point(p3); if global_start.is_none() { global_start = Some(p0); } global_end = p3; let segment = match (p1, p2) { (None, None) => PathSegment::Line(p0, p3), (None, Some(p2)) | (Some(p2), None) => PathSegment::Quadratic(p0, p2, p3), (Some(p1), Some(p2)) => PathSegment::Cubic(p0, p1, p2, p3), }; path.push(segment); } if let Some(start) = global_start { path.push(PathSegment::Line(global_end, start)); } } fn from_path(path_data: &[Path]) -> Vector { const EPSILON: f64 = 1e-5; fn is_close(a: DVec2, b: DVec2) -> bool { (a - b).length_squared() < EPSILON * EPSILON } let mut all_subpaths = Vec::new(); for path in path_data.iter().filter(|path| !path.is_empty()) { let cubics: Vec<[DVec2; 4]> = path.iter().map(|segment| segment.to_cubic()).collect(); let mut manipulators_list = Vec::new(); let mut current_start = None; for (index, cubic) in cubics.iter().enumerate() { let [start, handle1, handle2, end] = *cubic; if current_start.is_none() || !is_close(start, current_start.unwrap()) { // Start a new subpath if !manipulators_list.is_empty() { all_subpaths.push(Subpath::new(std::mem::take(&mut manipulators_list), true)); } // Use the correct in-handle (None) and out-handle for the start point manipulators_list.push(ManipulatorGroup::new(start, None, Some(handle1))); } else { // Update the out-handle of the previous point if let Some(last) = manipulators_list.last_mut() { last.out_handle = Some(handle1); } } // Add the end point with the correct in-handle and out-handle (None) manipulators_list.push(ManipulatorGroup::new(end, Some(handle2), None)); current_start = Some(end); // Check if this is the last segment if index == cubics.len() - 1 { all_subpaths.push(Subpath::new(manipulators_list, true)); manipulators_list = Vec::new(); // Reset manipulators for the next path } } } Vector::from_subpaths(all_subpaths, false) } type Path = Vec<path_bool::PathSegment>; fn boolean_union(a: Path, b: Path) -> Vec<Path> { path_bool(a, b, PathBooleanOperation::Union) } fn path_bool(a: Path, b: Path, op: PathBooleanOperation) -> Vec<Path> { match path_bool::path_boolean(&a, FillRule::NonZero, &b, FillRule::NonZero, op) { Ok(results) => results, Err(e) => { let a_path = path_bool::path_to_path_data(&a, 0.001); let b_path = path_bool::path_to_path_data(&b, 0.001); log::error!("Boolean error {e:?} encountered while processing {a_path}\n {op:?}\n {b_path}"); Vec::new() } } } fn boolean_subtract(a: Path, b: Path) -> Vec<Path> { path_bool(a, b, PathBooleanOperation::Difference) } pub fn boolean_intersect(a: Path, b: Path) -> Vec<Path> { path_bool(a, b, PathBooleanOperation::Intersection) }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/node-macro/src/lib.rs
node-graph/node-macro/src/lib.rs
use crate::crate_ident::CrateIdent; use proc_macro::TokenStream; use proc_macro_error2::proc_macro_error; use syn::GenericParam; mod buffer_struct; mod codegen; mod crate_ident; mod derive_choice_type; mod parsing; mod shader_nodes; mod validation; /// Used to create a node definition. #[proc_macro_error] #[proc_macro_attribute] pub fn node(attr: TokenStream, item: TokenStream) -> TokenStream { // Performs the `node_impl` macro's functionality of attaching an `impl Node for TheGivenStruct` block to the node struct parsing::new_node_fn(attr.into(), item.into()).unwrap_or_else(|err| err.to_compile_error()).into() } /// Generate meta-information for an enum. /// /// `#[widget(F)]` on a type indicates the type of widget to use to display/edit the type, currently `Radio` and `Dropdown` are supported. /// /// `#[label("Foo")]` on a variant overrides the default UI label (which is otherwise the name converted to title case). All labels are collected into a [`core::fmt::Display`] impl. /// /// `#[icon("tag"))]` sets the icon to use when a variant is shown in a menu or radio button. /// /// Doc comments on a variant become tooltip description text. #[proc_macro_derive(ChoiceType, attributes(widget, menu_separator, label, icon))] pub fn derive_choice_type(input_item: TokenStream) -> TokenStream { derive_choice_type::derive_choice_type_impl(input_item.into()).unwrap_or_else(|err| err.to_compile_error()).into() } /// Derive a struct to implement `ShaderStruct`, see that for docs. #[proc_macro_derive(BufferStruct)] pub fn derive_buffer_struct(input_item: TokenStream) -> TokenStream { let crate_ident = CrateIdent::default(); TokenStream::from(buffer_struct::derive_buffer_struct(&crate_ident, input_item).unwrap_or_else(|err| err.to_compile_error())) }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/node-macro/src/validation.rs
node-graph/node-macro/src/validation.rs
use crate::parsing::{Implementation, NodeParsedField, ParsedField, ParsedFieldType, ParsedNodeFn, RegularParsedField}; use proc_macro_error2::emit_error; use quote::quote; use syn::spanned::Spanned; use syn::{GenericParam, Type}; pub fn validate_node_fn(parsed: &ParsedNodeFn) -> syn::Result<()> { let validators: &[fn(&ParsedNodeFn)] = &[ // Add more validators here as needed validate_implementations_for_generics, validate_primary_input_expose, validate_min_max, ]; for validator in validators { validator(parsed); } Ok(()) } fn validate_min_max(parsed: &ParsedNodeFn) { for field in &parsed.fields { if let ParsedField { ty: ParsedFieldType::Regular(RegularParsedField { number_hard_max, number_hard_min, number_soft_max, number_soft_min, .. }), pat_ident, .. } = field { if let (Some(soft_min), Some(hard_min)) = (number_soft_min, number_hard_min) { let soft_min_value: f64 = soft_min.base10_parse().unwrap_or_default(); let hard_min_value: f64 = hard_min.base10_parse().unwrap_or_default(); if soft_min_value == hard_min_value { emit_error!( pat_ident.span(), "Unnecessary #[soft_min] attribute on `{}`, as #[hard_min] has the same value.", pat_ident.ident; help = "You can safely remove the #[soft_min] attribute from this field."; note = "#[soft_min] is redundant when it equals #[hard_min].", ); } else if soft_min_value < hard_min_value { emit_error!( pat_ident.span(), "The #[soft_min] attribute on `{}` is incorrectly greater than #[hard_min].", pat_ident.ident; help = "You probably meant to reverse the two attribute values."; note = "Allowing the possible slider range to preceed #[hard_min] doesn't make sense.", ); } } if let (Some(soft_max), Some(hard_max)) = (number_soft_max, number_hard_max) { let soft_max_value: f64 = soft_max.base10_parse().unwrap_or_default(); let hard_max_value: f64 = hard_max.base10_parse().unwrap_or_default(); if soft_max_value == hard_max_value { emit_error!( pat_ident.span(), "Unnecessary #[soft_max] attribute on `{}`, as #[hard_max] has the same value.", pat_ident.ident; help = "You can safely remove the #[soft_max] attribute from this field."; note = "#[soft_max] is redundant when it equals #[hard_max].", ); } else if soft_max_value < hard_max_value { emit_error!( pat_ident.span(), "The #[soft_max] attribute on `{}` is incorrectly greater than #[hard_max].", pat_ident.ident; help = "You probably meant to reverse the two attribute values."; note = "Allowing the possible slider range to exceed #[hard_max] doesn't make sense.", ); } } } } } fn validate_primary_input_expose(parsed: &ParsedNodeFn) { if let Some(ParsedField { ty: ParsedFieldType::Regular(RegularParsedField { exposed: true, .. }), pat_ident, .. }) = parsed.fields.first() { emit_error!( pat_ident.span(), "Unnecessary #[expose] attribute on primary input `{}`. Primary inputs are always exposed.", pat_ident.ident; help = "You can safely remove the #[expose] attribute from this field."; note = "The function's second argument, `{}`, is the node's primary input and it's always exposed by default", pat_ident.ident ); } } fn validate_implementations_for_generics(parsed: &ParsedNodeFn) { let has_skip_impl = parsed.attributes.skip_impl; if !has_skip_impl && !parsed.fn_generics.is_empty() { for field in &parsed.fields { let pat_ident = &field.pat_ident; match &field.ty { ParsedFieldType::Regular(RegularParsedField { ty, implementations, .. }) => { if contains_generic_param(ty, &parsed.fn_generics) && implementations.is_empty() { emit_error!( ty.span(), "Generic type `{}` in field `{}` requires an #[implementations(...)] attribute", quote!(#ty), pat_ident.ident; help = "Add #[implementations(ConcreteType1, ConcreteType2)] to field '{}'", pat_ident.ident; help = "Or use #[skip_impl] if you want to manually implement the node" ); } } ParsedFieldType::Node(NodeParsedField { input_type, output_type, implementations, .. }) => { if (contains_generic_param(input_type, &parsed.fn_generics) || contains_generic_param(output_type, &parsed.fn_generics)) && implementations.is_empty() { emit_error!( pat_ident.span(), "Generic types in Node field `{}` require an #[implementations(...)] attribute", pat_ident.ident; help = "Add #[implementations(InputType1 -> OutputType1, InputType2 -> OutputType2)] to field '{}'", pat_ident.ident; help = "Or use #[skip_impl] if you want to manually implement the node" ); } // Additional check for Node implementations for impl_ in implementations { validate_node_implementation(impl_, input_type, output_type, &parsed.fn_generics); } } } } } } fn validate_node_implementation(impl_: &Implementation, input_type: &Type, output_type: &Type, fn_generics: &[GenericParam]) { if contains_generic_param(&impl_.input, fn_generics) || contains_generic_param(&impl_.output, fn_generics) { emit_error!( impl_.input.span(), "Implementation types `{}` and `{}` must be concrete, not generic", quote!(#input_type), quote!(#output_type); help = "Replace generic types with concrete types in the implementation" ); } } fn contains_generic_param(ty: &Type, fn_generics: &[GenericParam]) -> bool { struct GenericParamChecker<'a> { fn_generics: &'a [GenericParam], found: bool, } impl<'a> syn::visit::Visit<'a> for GenericParamChecker<'a> { fn visit_ident(&mut self, ident: &'a syn::Ident) { if self .fn_generics .iter() .any(|param| if let GenericParam::Type(type_param) = param { type_param.ident == *ident } else { false }) { self.found = true; } } } let mut checker = GenericParamChecker { fn_generics, found: false }; syn::visit::visit_type(&mut checker, ty); checker.found }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/node-macro/src/buffer_struct.rs
node-graph/node-macro/src/buffer_struct.rs
use crate::crate_ident::CrateIdent; use proc_macro2::{Ident, Span, TokenStream}; use quote::{ToTokens, format_ident, quote}; use std::collections::HashSet; use syn::punctuated::Punctuated; use syn::visit_mut::VisitMut; use syn::{Fields, GenericParam, Generics, Item, ItemEnum, ItemStruct, Meta, MetaList, Path, PathSegment, Result, Token, TypeParam, TypeParamBound, visit_mut}; pub fn derive_buffer_struct(crate_ident: &CrateIdent, content: proc_macro::TokenStream) -> Result<TokenStream> { let item = syn::parse::<Item>(content)?; match &item { Item::Enum(item) => derive_buffer_struct_enum(crate_ident, item), Item::Struct(item) => derive_buffer_struct_struct(crate_ident, item), _ => Err(syn::Error::new_spanned(&item, "Expected a struct or an enum")), } } pub fn derive_buffer_struct_enum(crate_ident: &CrateIdent, item: &ItemEnum) -> Result<TokenStream> { let gcore_shaders = crate_ident.gcore_shaders()?; let mod_buffer_struct = quote!(#gcore_shaders::shaders::buffer_struct); let reexport = quote!(#gcore_shaders::shaders::__private); if !item.generics.params.is_empty() { return Err(syn::Error::new_spanned(&item.generics, "enum must not have any generics")); } let enum_requirements_error = || { syn::Error::new( Span::call_site(), "deriving `BufferStruct` on an enum requires `#[repr(u32)]` and `#[derive(num_enum::FromPrimitive, num_enum::IntoPrimitive)]`", ) }; let repr_path = Path::from(format_ident!("repr")); let repr = item .attrs .iter() .filter_map(|a| match &a.meta { Meta::List(MetaList { path, tokens, .. }) if *path == repr_path => Some(tokens), _ => None, }) .next() .ok_or_else(enum_requirements_error)?; let ident = &item.ident; Ok(quote! { unsafe impl #mod_buffer_struct::BufferStruct for #ident { type Buffer = #repr; fn write(from: Self) -> Self::Buffer { <#repr as From<Self>>::from(from) } fn read(from: Self::Buffer) -> Self { <Self as #reexport::num_enum::FromPrimitive>::from_primitive(from) } } }) } /// see [`BufferStruct`] docs /// /// This is also largely copied from my (@firestar99) project and adjusted /// /// [`BufferStruct`]: `no_std_types::shaders::buffer_struct::BufferStruct` pub fn derive_buffer_struct_struct(crate_ident: &CrateIdent, item: &ItemStruct) -> Result<TokenStream> { let gcore_shaders = crate_ident.gcore_shaders()?; let mod_buffer_struct = quote!(#gcore_shaders::shaders::buffer_struct); let reexport = quote!(#gcore_shaders::shaders::__private); let generics = item .generics .params .iter() .filter_map(|g| match g { GenericParam::Lifetime(_) => None, GenericParam::Type(t) => Some(t.ident.clone()), GenericParam::Const(c) => Some(c.ident.clone()), }) .collect(); let mut members_buffer = Punctuated::<TokenStream, Token![,]>::new(); let mut write = Punctuated::<TokenStream, Token![,]>::new(); let mut read = Punctuated::<TokenStream, Token![,]>::new(); let mut gen_name_gen = GenericNameGen::new(); let mut gen_ref_tys = Vec::new(); let (members_buffer, write, read) = match &item.fields { Fields::Named(named) => { for f in &named.named { let name = f.ident.as_ref().unwrap(); let mut ty = f.ty.clone(); let mut visitor = GenericsVisitor::new(&item.ident, &generics); visit_mut::visit_type_mut(&mut visitor, &mut ty); if visitor.found_generics { gen_ref_tys.push(f.ty.clone()); let gen_ident = gen_name_gen.next(); members_buffer.push(quote!(#name: #gen_ident)); } else { members_buffer.push(quote! { #name: <#ty as #mod_buffer_struct::BufferStruct>::Buffer }); } write.push(quote! { #name: <#ty as #mod_buffer_struct::BufferStruct>::write(from.#name) }); read.push(quote! { #name: <#ty as #mod_buffer_struct::BufferStruct>::read(from.#name) }); } (quote!({#members_buffer}), quote!(Self::Buffer {#write}), quote!(Self {#read})) } Fields::Unnamed(unnamed) => { for (i, f) in unnamed.unnamed.iter().enumerate() { let mut ty = f.ty.clone(); let mut visitor = GenericsVisitor::new(&item.ident, &generics); visit_mut::visit_type_mut(&mut visitor, &mut ty); if visitor.found_generics { gen_ref_tys.push(f.ty.clone()); members_buffer.push(gen_name_gen.next().into_token_stream()); } else { members_buffer.push(quote! { <#ty as #mod_buffer_struct::BufferStruct>::Buffer }); } let index = syn::Index::from(i); write.push(quote! { <#ty as #mod_buffer_struct::BufferStruct>::write(from.#index) }); read.push(quote! { <#ty as #mod_buffer_struct::BufferStruct>::read(from.#index) }); } (quote!((#members_buffer);), quote!(Self::Buffer(#write)), quote!(Self(#read))) } Fields::Unit => (quote!(;), quote!(let _ = from; Self::Buffer {}), quote!(let _ = from; Self::Shader {})), }; let generics_decl = &item.generics; let generics_ref = decl_to_ref(item.generics.params.iter()); let generics_where = gen_ref_tys .iter() .map(|ty| quote!(#ty: #mod_buffer_struct::BufferStruct)) .collect::<Punctuated<TokenStream, Token![,]>>() .into_token_stream(); let generics_decl_any = gen_name_gen.decl(quote! { #reexport::bytemuck::Pod + Send + Sync }); let generics_ref_buffer = gen_ref_tys .iter() .map(|ty| quote!(<#ty as #mod_buffer_struct::BufferStruct>::Buffer)) .collect::<Punctuated<TokenStream, Token![,]>>() .into_token_stream(); let vis = &item.vis; let ident = &item.ident; let buffer_ident = format_ident!("{}Buffer", ident); Ok(quote! { #[repr(C)] #[derive(Copy, Clone, #reexport::bytemuck::Zeroable, #reexport::bytemuck::Pod)] #vis struct #buffer_ident #generics_decl_any #members_buffer unsafe impl #generics_decl #mod_buffer_struct::BufferStruct for #ident #generics_ref where #ident #generics_ref: Copy, #generics_where { type Buffer = #buffer_ident <#generics_ref_buffer>; fn write(from: Self) -> Self::Buffer { #write } fn read(from: Self::Buffer) -> Self { #read } } }) } struct GenericsVisitor<'a> { self_ident: &'a Ident, generics: &'a HashSet<Ident>, found_generics: bool, } impl<'a> GenericsVisitor<'a> { pub fn new(self_ident: &'a Ident, generics: &'a HashSet<Ident>) -> Self { Self { self_ident, generics, found_generics: false, } } } impl VisitMut for GenericsVisitor<'_> { fn visit_ident_mut(&mut self, i: &mut Ident) { if self.generics.contains(i) { self.found_generics = true; } visit_mut::visit_ident_mut(self, i); } fn visit_path_segment_mut(&mut self, i: &mut PathSegment) { if i.ident == "Self" { i.ident = self.self_ident.clone(); } visit_mut::visit_path_segment_mut(self, i); } } struct GenericNameGen(u32); impl GenericNameGen { pub fn new() -> Self { Self(0) } pub fn next(&mut self) -> Ident { let i = self.0; self.0 += 1; format_ident!("T{}", i) } pub fn decl(self, ty: TokenStream) -> Generics { let params: Punctuated<GenericParam, Token![,]> = (0..self.0) .map(|i| { GenericParam::Type(TypeParam { attrs: Vec::new(), ident: format_ident!("T{}", i), colon_token: Some(Default::default()), bounds: Punctuated::from_iter([TypeParamBound::Verbatim(ty.clone())]), eq_token: None, default: None, }) }) .collect(); if !params.is_empty() { Generics { lt_token: Some(Default::default()), params, gt_token: Some(Default::default()), where_clause: None, } } else { Generics::default() } } } fn decl_to_ref<'a>(generics: impl Iterator<Item = &'a GenericParam>) -> TokenStream { let out = generics .map(|generic| match generic { GenericParam::Lifetime(l) => l.lifetime.to_token_stream(), GenericParam::Type(t) => t.ident.to_token_stream(), GenericParam::Const(c) => c.ident.to_token_stream(), }) .collect::<Punctuated<TokenStream, Token![,]>>(); if out.is_empty() { TokenStream::new() } else { quote!(<#out>) } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/node-macro/src/codegen.rs
node-graph/node-macro/src/codegen.rs
use crate::parsing::*; use convert_case::{Case, Casing}; use proc_macro2::TokenStream as TokenStream2; use quote::{ToTokens, format_ident, quote, quote_spanned}; use std::sync::atomic::AtomicU64; use syn::punctuated::Punctuated; use syn::spanned::Spanned; use syn::token::Comma; use syn::{Error, Ident, PatIdent, Token, WhereClause, WherePredicate, parse_quote}; static NODE_ID: AtomicU64 = AtomicU64::new(0); pub(crate) fn generate_node_code(crate_ident: &CrateIdent, parsed: &ParsedNodeFn) -> syn::Result<TokenStream2> { let ParsedNodeFn { vis, attributes, fn_name, struct_name, mod_name, fn_generics, where_clause, input, output_type, is_async, fields, body, description, .. } = parsed; let core_types = crate_ident.gcore()?; let category = &attributes.category.as_ref().map(|value| quote!(Some(#value))).unwrap_or(quote!(None)); let mod_name = format_ident!("_{}_mod", mod_name); let display_name = match &attributes.display_name.as_ref() { Some(lit) => lit.value(), None => struct_name.to_string().to_case(Case::Title), }; let struct_name = format_ident!("{}Node", struct_name); let struct_generics: Vec<Ident> = fields.iter().enumerate().map(|(i, _)| format_ident!("Node{}", i)).collect(); let input_ident = &input.pat_ident; let context_features = &input.context_features; let field_idents: Vec<_> = fields.iter().map(|f| &f.pat_ident).collect(); let field_names: Vec<_> = field_idents.iter().map(|pat_ident| &pat_ident.ident).collect(); let input_names: Vec<_> = fields .iter() .map(|f| &f.name) .zip(field_names.iter()) .map(|zipped| match zipped { (Some(name), _) => name.value(), (_, name) => name.to_string().to_case(Case::Title), }) .collect(); let input_descriptions: Vec<_> = fields.iter().map(|f| &f.description).collect(); let struct_fields = field_names.iter().zip(struct_generics.iter()).map(|(name, r#gen)| { quote! { pub(super) #name: #r#gen } }); let mut future_idents = Vec::new(); let field_types: Vec<_> = fields .iter() .map(|field| match &field.ty { ParsedFieldType::Regular(RegularParsedField { ty, .. }) => ty.clone(), ParsedFieldType::Node(NodeParsedField { output_type, input_type, .. }) => match parsed.is_async { true => parse_quote!(&'n impl #core_types::Node<'n, #input_type, Output = impl core::future::Future<Output=#output_type>>), false => parse_quote!(&'n impl #core_types::Node<'n, #input_type, Output = #output_type>), }, }) .collect(); let widget_override: Vec<_> = fields .iter() .map(|field| match &field.widget_override { ParsedWidgetOverride::None => quote!(RegistryWidgetOverride::None), ParsedWidgetOverride::Hidden => quote!(RegistryWidgetOverride::Hidden), ParsedWidgetOverride::String(lit_str) => quote!(RegistryWidgetOverride::String(#lit_str)), ParsedWidgetOverride::Custom(lit_str) => quote!(RegistryWidgetOverride::Custom(#lit_str)), }) .collect(); let value_sources: Vec<_> = fields .iter() .map(|field| match &field.ty { ParsedFieldType::Regular(RegularParsedField { value_source, .. }) => match value_source { ParsedValueSource::Default(data) => { // Check if the data is a string literal by parsing the token stream let data_str = data.to_string(); if data_str.starts_with('"') && data_str.ends_with('"') && data_str.len() >= 2 { quote!(RegistryValueSource::Default(#data)) } else { quote!(RegistryValueSource::Default(stringify!(#data))) } } ParsedValueSource::Scope(data) => quote!(RegistryValueSource::Scope(#data)), _ => quote!(RegistryValueSource::None), }, _ => quote!(RegistryValueSource::None), }) .collect(); let default_types: Vec<_> = fields .iter() .map(|field| match &field.ty { ParsedFieldType::Regular(RegularParsedField { implementations, .. }) => match implementations.first() { Some(ty) => quote!(Some(concrete!(#ty))), _ => quote!(None), }, _ => quote!(None), }) .collect(); let number_min_values: Vec<_> = fields .iter() .map(|field| match &field.ty { ParsedFieldType::Regular(RegularParsedField { number_soft_min, number_hard_min, .. }) => match (number_soft_min, number_hard_min) { (Some(soft_min), _) => quote!(Some(#soft_min)), (None, Some(hard_min)) => quote!(Some(#hard_min)), (None, None) => quote!(None), }, _ => quote!(None), }) .collect(); let number_max_values: Vec<_> = fields .iter() .map(|field| match &field.ty { ParsedFieldType::Regular(RegularParsedField { number_soft_max, number_hard_max, .. }) => match (number_soft_max, number_hard_max) { (Some(soft_max), _) => quote!(Some(#soft_max)), (None, Some(hard_max)) => quote!(Some(#hard_max)), (None, None) => quote!(None), }, _ => quote!(None), }) .collect(); let number_mode_range_values: Vec<_> = fields .iter() .map(|field| match &field.ty { ParsedFieldType::Regular(RegularParsedField { number_mode_range: Some(number_mode_range), .. }) => quote!(Some(#number_mode_range)), _ => quote!(None), }) .collect(); let number_display_decimal_places: Vec<_> = fields .iter() .map(|field| field.number_display_decimal_places.as_ref().map_or(quote!(None), |i| quote!(Some(#i)))) .collect(); let number_step: Vec<_> = fields.iter().map(|field| field.number_step.as_ref().map_or(quote!(None), |i| quote!(Some(#i)))).collect(); let unit_suffix: Vec<_> = fields.iter().map(|field| field.unit.as_ref().map_or(quote!(None), |i| quote!(Some(#i)))).collect(); let exposed: Vec<_> = fields .iter() .map(|field| match &field.ty { ParsedFieldType::Regular(RegularParsedField { exposed, .. }) => quote!(#exposed), _ => quote!(true), }) .collect(); let eval_args = fields.iter().map(|field| { let name = &field.pat_ident.ident; match &field.ty { ParsedFieldType::Regular { .. } => { quote! { let #name = self.#name.eval(__input.clone()).await; } } ParsedFieldType::Node { .. } => { quote! { let #name = &self.#name; } } } }); let min_max_args = fields.iter().map(|field| match &field.ty { ParsedFieldType::Regular(RegularParsedField { number_hard_min, number_hard_max, .. }) => { let name = &field.pat_ident.ident; let mut tokens = quote!(); if let Some(min) = number_hard_min { tokens.extend(quote_spanned! {min.span()=> let #name = #core_types::misc::Clampable::clamp_hard_min(#name, #min); }); } if let Some(max) = number_hard_max { tokens.extend(quote_spanned! {max.span()=> let #name = #core_types::misc::Clampable::clamp_hard_max(#name, #max); }); } tokens } ParsedFieldType::Node { .. } => quote!(), }); let all_implementation_types = fields.iter().flat_map(|field| match &field.ty { ParsedFieldType::Regular(RegularParsedField { implementations, .. }) => implementations.iter().cloned().collect::<Vec<_>>(), ParsedFieldType::Node(NodeParsedField { implementations, .. }) => implementations .iter() .flat_map(|implementation| [implementation.input.clone(), implementation.output.clone()]) .collect(), }); let all_implementation_types = all_implementation_types.chain(input.implementations.iter().cloned()); let input_type = &parsed.input.ty; let mut clauses = Vec::new(); let mut clampable_clauses = Vec::new(); for (field, name) in fields.iter().zip(struct_generics.iter()) { clauses.push(match (&field.ty, *is_async) { ( ParsedFieldType::Regular(RegularParsedField { ty, number_hard_min, number_hard_max, .. }), _, ) => { let all_lifetime_ty = substitute_lifetimes(ty.clone(), "all"); let id = future_idents.len(); let fut_ident = format_ident!("F{}", id); future_idents.push(fut_ident.clone()); // Add Clampable bound if this field uses hard_min or hard_max if number_hard_min.is_some() || number_hard_max.is_some() { // The bound applies to the Output type of the future, which is #ty clampable_clauses.push(quote!(#ty: #core_types::misc::Clampable)); } quote!( #fut_ident: core::future::Future<Output = #ty> + #core_types::WasmNotSend + 'n, for<'all> #all_lifetime_ty: #core_types::WasmNotSend, #name: #core_types::Node<'n, #input_type, Output = #fut_ident> + #core_types::WasmNotSync ) } (ParsedFieldType::Node(NodeParsedField { input_type, output_type, .. }), true) => { let id = future_idents.len(); let fut_ident = format_ident!("F{}", id); future_idents.push(fut_ident.clone()); quote!( #fut_ident: core::future::Future<Output = #output_type> + #core_types::WasmNotSend + 'n, #name: #core_types::Node<'n, #input_type, Output = #fut_ident > + #core_types::WasmNotSync ) } (ParsedFieldType::Node { .. }, false) => unreachable!("Found node which takes an impl Node<> input but is not async"), }); } let where_clause = where_clause.clone().unwrap_or(WhereClause { where_token: Token![where](output_type.span()), predicates: Default::default(), }); let mut struct_where_clause = where_clause.clone(); let extra_where: Punctuated<WherePredicate, Comma> = parse_quote!( #(#clauses,)* #(#clampable_clauses,)* #output_type: 'n, ); struct_where_clause.predicates.extend(extra_where); let new_args = struct_generics.iter().zip(field_names.iter()).map(|(r#gen, name)| { quote! { #name: #r#gen } }); let async_keyword = is_async.then(|| quote!(async)); let await_keyword = is_async.then(|| quote!(.await)); let eval_impl = quote! { type Output = #core_types::registry::DynFuture<'n, #output_type>; #[inline] fn eval(&'n self, __input: #input_type) -> Self::Output { Box::pin(async move { use #core_types::misc::Clampable; #(#eval_args)* #(#min_max_args)* self::#fn_name(__input #(, #field_names)*) #await_keyword }) } }; let identifier = format_ident!("{}_proto_ident", fn_name); let identifier_path = match parsed.attributes.path.as_ref() { Some(path) => { let path = path.to_token_stream().to_string().replace(' ', ""); quote!(#path) } None => quote!(std::module_path!()), }; let register_node_impl = generate_register_node_impl(parsed, &field_names, &struct_name, &identifier)?; let import_name = format_ident!("_IMPORT_STUB_{}", mod_name.to_string().to_case(Case::UpperSnake)); let properties = &attributes.properties_string.as_ref().map(|value| quote!(Some(#value))).unwrap_or(quote!(None)); let cfg = crate::shader_nodes::modify_cfg(attributes); let node_input_accessor = generate_node_input_references(parsed, fn_generics, &field_idents, core_types, &identifier, &cfg); let ShaderTokens { shader_entry_point, gpu_node } = attributes.shader_node.as_ref().map(|n| n.codegen(crate_ident, parsed)).unwrap_or(Ok(ShaderTokens::default()))?; Ok(quote! { /// Underlying implementation for [#struct_name] #[inline] #[allow(clippy::too_many_arguments)] #vis #async_keyword fn #fn_name <'n, #(#fn_generics,)*> (#input_ident: #input_type #(, #field_idents: #field_types)*) -> #output_type #where_clause #body #cfg #[automatically_derived] impl<'n, #(#fn_generics,)* #(#struct_generics,)* #(#future_idents,)*> #core_types::Node<'n, #input_type> for #mod_name::#struct_name<#(#struct_generics,)*> #struct_where_clause { #eval_impl } #cfg const fn #identifier() -> #core_types::ProtoNodeIdentifier { #core_types::ProtoNodeIdentifier::new(std::concat!(#identifier_path, "::", std::stringify!(#struct_name))) } #cfg #[doc(inline)] pub use #mod_name::#struct_name; #[doc(hidden)] #node_input_accessor #cfg #[doc(hidden)] #[allow(clippy::module_inception)] mod #mod_name { use super::*; use #core_types as gcore; use gcore::{Node, NodeIOTypes, concrete, fn_type, fn_type_fut, future, ProtoNodeIdentifier, WasmNotSync, NodeIO, ContextFeature}; use gcore::value::ClonedNode; use gcore::ops::TypeNode; use gcore::registry::{NodeMetadata, FieldMetadata, NODE_REGISTRY, NODE_METADATA, DynAnyNode, DowncastBothNode, DynFuture, TypeErasedBox, PanicNode, RegistryValueSource, RegistryWidgetOverride}; use gcore::ctor::ctor; // Use the types specified in the implementation static #import_name: core::marker::PhantomData<(#(#all_implementation_types,)*)> = core::marker::PhantomData; #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct #struct_name<#(#struct_generics,)*> { #(#struct_fields,)* } #[automatically_derived] impl<'n, #(#struct_generics,)*> #struct_name<#(#struct_generics,)*> { #[allow(clippy::too_many_arguments)] pub fn new(#(#new_args,)*) -> Self { Self { #(#field_names,)* } } } #register_node_impl #[cfg_attr(not(target_family = "wasm"), ctor)] fn register_metadata() { let metadata = NodeMetadata { display_name: #display_name, category: #category, description: #description, properties: #properties, context_features: vec![#(ContextFeature::#context_features,)*], fields: vec![ #( FieldMetadata { name: #input_names, widget_override: #widget_override, description: #input_descriptions, exposed: #exposed, value_source: #value_sources, default_type: #default_types, number_min: #number_min_values, number_max: #number_max_values, number_mode_range: #number_mode_range_values, number_display_decimal_places: #number_display_decimal_places, number_step: #number_step, unit: #unit_suffix, }, )* ], }; NODE_METADATA.lock().unwrap().insert(#identifier(), metadata); } } #shader_entry_point #gpu_node }) } /// Generates strongly typed utilites to access inputs fn generate_node_input_references( parsed: &ParsedNodeFn, fn_generics: &[crate::GenericParam], field_idents: &[&PatIdent], core_types: &TokenStream2, identifier: &Ident, cfg: &TokenStream2, ) -> TokenStream2 { let inputs_module_name = format_ident!("{}", parsed.struct_name.to_string().to_case(Case::Snake)); let mut generated_input_accessor = Vec::new(); if !parsed.attributes.skip_impl { let (mut modified, mut generic_collector) = FilterUsedGenerics::new(fn_generics); for (input_index, (parsed_input, input_ident)) in parsed.fields.iter().zip(field_idents).enumerate() { let mut ty = match &parsed_input.ty { ParsedFieldType::Regular(RegularParsedField { ty, .. }) => ty, ParsedFieldType::Node(NodeParsedField { output_type, .. }) => output_type, } .clone(); // We only want the necessary generics. let used = generic_collector.filter_unnecessary_generics(&mut modified, &mut ty); // TODO: figure out a better name that doesn't conflict with so many types let struct_name = format_ident!("{}Input", input_ident.ident.to_string().to_case(Case::Pascal)); let (fn_generic_params, phantom_data_declerations) = generate_phantom_data(used.iter()); // Only create structs with phantom data where necessary. generated_input_accessor.push(if phantom_data_declerations.is_empty() { quote! { pub struct #struct_name; } } else { quote! { pub struct #struct_name <#(#used),*>{ #(#phantom_data_declerations,)* } } }); generated_input_accessor.push(quote! { impl <#(#used),*> #core_types::NodeInputDecleration for #struct_name <#(#fn_generic_params),*> { const INDEX: usize = #input_index; fn identifier() -> #core_types::ProtoNodeIdentifier { #inputs_module_name::IDENTIFIER.clone() } type Result = #ty; } }) } } quote! { #cfg pub mod #inputs_module_name { use super::*; /// The `ProtoNodeIdentifier` of this node without any generics attached to it pub const IDENTIFIER: #core_types::ProtoNodeIdentifier = #identifier(); #(#generated_input_accessor)* } } } /// It is necessary to generate PhantomData for each fn generic to avoid compiler errors. fn generate_phantom_data<'a>(fn_generics: impl Iterator<Item = &'a crate::GenericParam>) -> (Vec<TokenStream2>, Vec<TokenStream2>) { let mut phantom_data_declerations = Vec::new(); let mut fn_generic_params = Vec::new(); for fn_generic_param in fn_generics { let field_name = format_ident!("phantom_{}", phantom_data_declerations.len()); match fn_generic_param { crate::GenericParam::Lifetime(lifetime_param) => { let lifetime = &lifetime_param.lifetime; fn_generic_params.push(quote! {#lifetime}); phantom_data_declerations.push(quote! {#field_name: core::marker::PhantomData<&#lifetime ()>}) } crate::GenericParam::Type(type_param) => { let generic_name = &type_param.ident; fn_generic_params.push(quote! {#generic_name}); phantom_data_declerations.push(quote! {#field_name: core::marker::PhantomData<#generic_name>}); } _ => {} } } (fn_generic_params, phantom_data_declerations) } fn generate_register_node_impl(parsed: &ParsedNodeFn, field_names: &[&Ident], struct_name: &Ident, identifier: &Ident) -> Result<TokenStream2, Error> { if parsed.attributes.skip_impl { return Ok(quote!()); } let mut constructors = Vec::new(); let unit = parse_quote!(gcore::Context); let parameter_types: Vec<_> = parsed .fields .iter() .map(|field| { match &field.ty { ParsedFieldType::Regular(RegularParsedField { implementations, ty, .. }) => { if !implementations.is_empty() { implementations.iter().map(|ty| (&unit, ty)).collect() } else { vec![(&unit, ty)] } } ParsedFieldType::Node(NodeParsedField { implementations, input_type, output_type, .. }) => { if !implementations.is_empty() { implementations.iter().map(|impl_| (&impl_.input, &impl_.output)).collect() } else { vec![(input_type, output_type)] } } } .into_iter() .map(|(input, out)| (substitute_lifetimes(input.clone(), "_"), substitute_lifetimes(out.clone(), "_"))) .collect::<Vec<_>>() }) .collect(); let max_implementations = parameter_types.iter().map(|x| x.len()).chain([parsed.input.implementations.len().max(1)]).max(); for i in 0..max_implementations.unwrap_or(0) { let mut temp_constructors = Vec::new(); let mut temp_node_io = Vec::new(); let mut panic_node_types = Vec::new(); for (j, types) in parameter_types.iter().enumerate() { let field_name = field_names[j]; let (input_type, output_type) = &types[i.min(types.len() - 1)]; let node = matches!(parsed.fields[j].ty, ParsedFieldType::Node { .. }); let downcast_node = quote!( let #field_name: DowncastBothNode<#input_type, #output_type> = DowncastBothNode::new(args[#j].clone()); ); if node && !parsed.is_async { return Err(Error::new_spanned(&parsed.fn_name, "Node needs to be async if you want to use lambda parameters")); } temp_constructors.push(downcast_node); temp_node_io.push(quote!(fn_type_fut!(#input_type, #output_type, alias: #output_type))); panic_node_types.push(quote!(#input_type, DynFuture<'static, #output_type>)); } let input_type = match parsed.input.implementations.is_empty() { true => parsed.input.ty.clone(), false => parsed.input.implementations[i.min(parsed.input.implementations.len() - 1)].clone(), }; constructors.push(quote!( ( |args| { Box::pin(async move { #(#temp_constructors;)* let node = #struct_name::new(#(#field_names,)*); // try polling futures let any: DynAnyNode<#input_type, _, _> = DynAnyNode::new(node); Box::new(any) as TypeErasedBox<'_> }) }, { let node = #struct_name::new(#(PanicNode::<#panic_node_types>::new(),)*); let params = vec![#(#temp_node_io,)*]; let mut node_io = NodeIO::<'_, #input_type>::to_async_node_io(&node, params); node_io } ) )); } let registry_name = format_ident!("__node_registry_{}_{}", NODE_ID.fetch_add(1, std::sync::atomic::Ordering::SeqCst), struct_name); Ok(quote! { #[cfg_attr(not(target_family = "wasm"), ctor)] fn register_node() { let mut registry = NODE_REGISTRY.lock().unwrap(); registry.insert( #identifier(), vec![ #(#constructors,)* ] ); } #[cfg(target_family = "wasm")] #[unsafe(no_mangle)] extern "C" fn #registry_name() { register_node(); register_metadata(); } }) } use crate::crate_ident::CrateIdent; use crate::shader_nodes::{ShaderCodegen, ShaderTokens}; use syn::visit_mut::VisitMut; use syn::{GenericArgument, Lifetime, Type}; struct LifetimeReplacer(&'static str); impl VisitMut for LifetimeReplacer { fn visit_lifetime_mut(&mut self, lifetime: &mut Lifetime) { lifetime.ident = Ident::new(self.0, lifetime.ident.span()); } fn visit_type_mut(&mut self, ty: &mut Type) { match ty { Type::Reference(type_reference) => { if let Some(lifetime) = &mut type_reference.lifetime { self.visit_lifetime_mut(lifetime); } self.visit_type_mut(&mut type_reference.elem); } _ => syn::visit_mut::visit_type_mut(self, ty), } } fn visit_generic_argument_mut(&mut self, arg: &mut GenericArgument) { if let GenericArgument::Lifetime(lifetime) = arg { self.visit_lifetime_mut(lifetime); } else { syn::visit_mut::visit_generic_argument_mut(self, arg); } } } #[must_use] fn substitute_lifetimes(mut ty: Type, lifetime: &'static str) -> Type { LifetimeReplacer(lifetime).visit_type_mut(&mut ty); ty } /// Get only the necessary generics. struct FilterUsedGenerics { all: Vec<crate::GenericParam>, used: Vec<bool>, } impl VisitMut for FilterUsedGenerics { fn visit_lifetime_mut(&mut self, used_lifetime: &mut Lifetime) { for (generic, used) in self.all.iter().zip(self.used.iter_mut()) { let crate::GenericParam::Lifetime(lifetime_param) = generic else { continue }; if used_lifetime == &lifetime_param.lifetime { *used = true; } } } fn visit_path_mut(&mut self, path: &mut syn::Path) { for (index, (generic, used)) in self.all.iter().zip(self.used.iter_mut()).enumerate() { let crate::GenericParam::Type(type_param) = generic else { continue }; if path.leading_colon.is_none() && !path.segments.is_empty() && path.segments[0].arguments.is_none() && path.segments[0].ident == type_param.ident { *used = true; // Sometimes the generics conflict with the type name so we rename the generics. path.segments[0].ident = format_ident!("G{index}"); } } for mut el in Punctuated::pairs_mut(&mut path.segments) { self.visit_path_segment_mut(el.value_mut()); } } } impl FilterUsedGenerics { fn new(fn_generics: &[crate::GenericParam]) -> (Vec<crate::GenericParam>, Self) { let mut all_possible_generics = fn_generics.to_vec(); // The 'n lifetime may also be needed; we must add it in all_possible_generics.insert(0, syn::GenericParam::Lifetime(syn::LifetimeParam::new(Lifetime::new("'n", proc_macro2::Span::call_site())))); let modified = all_possible_generics .iter() .cloned() .enumerate() .map(|(index, mut generic)| { let crate::GenericParam::Type(type_param) = &mut generic else { return generic }; // Sometimes the generics conflict with the type name so we rename the generics. type_param.ident = format_ident!("G{index}"); generic }) .collect::<Vec<_>>(); let generic_collector = Self { used: vec![false; all_possible_generics.len()], all: all_possible_generics, }; (modified, generic_collector) } fn used<'a>(&'a self, modified: &'a [crate::GenericParam]) -> impl Iterator<Item = &'a crate::GenericParam> { modified.iter().zip(&self.used).filter(|(_, used)| **used).map(move |(value, _)| value) } fn filter_unnecessary_generics(&mut self, modified: &mut Vec<syn::GenericParam>, ty: &mut Type) -> Vec<syn::GenericParam> { self.used.fill(false); // Find out which generics are necessary to support the node input self.visit_type_mut(ty); // Sometimes generics may reference other generics. This is a non-optimal way of dealing with that. for _ in 0..=self.all.len() { for (index, item) in modified.iter_mut().enumerate() { if self.used[index] { self.visit_generic_param_mut(item); } } } self.used(&*modified).cloned().collect() } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/node-macro/src/parsing.rs
node-graph/node-macro/src/parsing.rs
use convert_case::{Case, Casing}; use indoc::{formatdoc, indoc}; use proc_macro2::TokenStream as TokenStream2; use quote::{ToTokens, format_ident}; use syn::parse::{Parse, ParseStream, Parser}; use syn::punctuated::Punctuated; use syn::spanned::Spanned; use syn::token::{Comma, RArrow}; use syn::{ AttrStyle, Attribute, Error, Expr, ExprTuple, FnArg, GenericParam, Ident, ItemFn, Lit, LitFloat, LitInt, LitStr, Meta, Pat, PatIdent, PatType, Path, ReturnType, TraitBound, Type, TypeImplTrait, TypeParam, TypeParamBound, Visibility, WhereClause, parse_quote, }; use crate::codegen::generate_node_code; use crate::crate_ident::CrateIdent; use crate::shader_nodes::ShaderNodeType; #[derive(Clone, Debug)] pub(crate) struct Implementation { pub(crate) input: Type, pub(crate) _arrow: RArrow, pub(crate) output: Type, } #[derive(Debug)] pub(crate) struct ParsedNodeFn { pub(crate) vis: Visibility, pub(crate) attributes: NodeFnAttributes, pub(crate) fn_name: Ident, pub(crate) struct_name: Ident, pub(crate) mod_name: Ident, pub(crate) fn_generics: Vec<GenericParam>, pub(crate) where_clause: Option<WhereClause>, pub(crate) input: Input, pub(crate) output_type: Type, pub(crate) is_async: bool, pub(crate) fields: Vec<ParsedField>, pub(crate) body: TokenStream2, pub(crate) description: String, } #[derive(Debug, Default, Clone)] pub(crate) struct NodeFnAttributes { pub(crate) category: Option<LitStr>, pub(crate) display_name: Option<LitStr>, pub(crate) path: Option<Path>, pub(crate) skip_impl: bool, pub(crate) properties_string: Option<LitStr>, /// whether to `#[cfg]` gate the node implementation, defaults to None pub(crate) cfg: Option<TokenStream2>, /// if this node should get a gpu implementation, defaults to None pub(crate) shader_node: Option<ShaderNodeType>, // Add more attributes as needed } #[derive(Clone, Debug, Default)] pub enum ParsedValueSource { #[default] None, Default(TokenStream2), Scope(LitStr), } // #[widget(ParsedWidgetOverride::Hidden)] // #[widget(ParsedWidgetOverride::String = "Some string")] // #[widget(ParsedWidgetOverride::Custom = "Custom string")] #[derive(Clone, Debug, Default)] pub enum ParsedWidgetOverride { #[default] None, Hidden, String(LitStr), Custom(LitStr), } impl Parse for ParsedWidgetOverride { fn parse(input: ParseStream) -> syn::Result<Self> { // Parse the full path (e.g., ParsedWidgetOverride::Hidden) let path: Path = input.parse()?; // Ensure the path starts with `ParsedWidgetOverride` if path.segments.len() == 2 && path.segments[0].ident == "ParsedWidgetOverride" { let variant = &path.segments[1].ident; match variant.to_string().as_str() { "Hidden" => Ok(ParsedWidgetOverride::Hidden), "String" => { input.parse::<syn::Token![=]>()?; let lit: LitStr = input.parse()?; Ok(ParsedWidgetOverride::String(lit)) } "Custom" => { input.parse::<syn::Token![=]>()?; let lit: LitStr = input.parse()?; Ok(ParsedWidgetOverride::Custom(lit)) } _ => Err(Error::new(variant.span(), "Unknown ParsedWidgetOverride variant")), } } else { Err(Error::new(input.span(), "Expected ParsedWidgetOverride::<variant>")) } } } #[derive(Clone, Debug)] pub struct ParsedField { pub pat_ident: PatIdent, pub name: Option<LitStr>, pub description: String, pub widget_override: ParsedWidgetOverride, pub ty: ParsedFieldType, pub number_display_decimal_places: Option<LitInt>, pub number_step: Option<LitFloat>, pub unit: Option<LitStr>, } #[derive(Clone, Debug)] pub enum ParsedFieldType { Regular(RegularParsedField), Node(NodeParsedField), } /// a param of any kind, either a concrete type or a generic type with a set of possible types specified via /// `#[implementation(type)]` #[derive(Clone, Debug)] pub struct RegularParsedField { pub ty: Type, pub exposed: bool, pub value_source: ParsedValueSource, pub number_soft_min: Option<LitFloat>, pub number_soft_max: Option<LitFloat>, pub number_hard_min: Option<LitFloat>, pub number_hard_max: Option<LitFloat>, pub number_mode_range: Option<ExprTuple>, pub implementations: Punctuated<Type, Comma>, pub gpu_image: bool, } /// a param of `impl Node` with `#[implementation(in -> out)]` #[derive(Clone, Debug)] pub struct NodeParsedField { pub input_type: Type, pub output_type: Type, pub implementations: Punctuated<Implementation, Comma>, } #[derive(Clone, Debug)] pub(crate) struct Input { pub(crate) pat_ident: PatIdent, pub(crate) ty: Type, pub(crate) implementations: Punctuated<Type, Comma>, pub(crate) context_features: Vec<Ident>, } impl Parse for Implementation { fn parse(input: ParseStream) -> syn::Result<Self> { let input_type: Type = input.parse().map_err(|e| { Error::new( input.span(), formatdoc!( "Failed to parse input type for #[implementation(...)]. Expected a valid Rust type. Error: {}", e, ), ) })?; let arrow: RArrow = input.parse().map_err(|_| { Error::new( input.span(), indoc!( "Expected `->` arrow after input type in #[implementations(...)] on a field of type `impl Node`. The correct syntax is `InputType -> OutputType`." ), ) })?; let output_type: Type = input.parse().map_err(|e| { Error::new( input.span(), formatdoc!( "Failed to parse output type for #[implementation(...)]. Expected a valid Rust type after `->`. Error: {}", e ), ) })?; Ok(Implementation { input: input_type, _arrow: arrow, output: output_type, }) } } impl Parse for NodeFnAttributes { fn parse(input: ParseStream) -> syn::Result<Self> { let mut category = None; let mut display_name = None; let mut path = None; let mut skip_impl = false; let mut properties_string = None; let mut cfg = None; let mut shader_node = None; let content = input; // let content; // syn::parenthesized!(content in input); let nested = content.call(Punctuated::<Meta, Comma>::parse_terminated)?; for meta in nested { let name = meta.path().get_ident().ok_or_else(|| Error::new_spanned(meta.path(), "Node macro expects a known Ident, not a path"))?; match name.to_string().as_str() { "category" => { let meta = meta.require_list()?; if category.is_some() { return Err(Error::new_spanned(meta, "Multiple 'category' attributes are not allowed")); } let lit: LitStr = meta .parse_args() .map_err(|_| Error::new_spanned(meta, "Expected a string literal for 'category', e.g., category(\"Value\")"))?; category = Some(lit); } "name" => { let meta = meta.require_list()?; if display_name.is_some() { return Err(Error::new_spanned(meta, "Multiple 'name' attributes are not allowed")); } let parsed_name: LitStr = meta.parse_args().map_err(|_| Error::new_spanned(meta, "Expected a string for 'name', e.g., name(\"Memoize\")"))?; display_name = Some(parsed_name); } "path" => { let meta = meta.require_list()?; if path.is_some() { return Err(Error::new_spanned(meta, "Multiple 'path' attributes are not allowed")); } let parsed_path: Path = meta .parse_args() .map_err(|_| Error::new_spanned(meta, "Expected a valid path for 'path', e.g., path(crate::MemoizeNode)"))?; path = Some(parsed_path); } "skip_impl" => { let path = meta.require_path_only()?; if skip_impl { return Err(Error::new_spanned(path, "Multiple 'skip_impl' attributes are not allowed")); } skip_impl = true; } "properties" => { let meta = meta.require_list()?; if properties_string.is_some() { return Err(Error::new_spanned(path, "Multiple 'properties_string' attributes are not allowed")); } let parsed_properties_string: LitStr = meta .parse_args() .map_err(|_| Error::new_spanned(meta, "Expected a string for 'properties', e.g., name(\"channel_mixer_properties\")"))?; properties_string = Some(parsed_properties_string); } "cfg" => { if cfg.is_some() { return Err(Error::new_spanned(path, "Multiple 'feature' attributes are not allowed")); } let meta = meta.require_list()?; cfg = Some(meta.tokens.clone()); } "shader_node" => { if shader_node.is_some() { return Err(Error::new_spanned(path, "Multiple 'feature' attributes are not allowed")); } let meta = meta.require_list()?; shader_node = Some(syn::parse2(meta.tokens.to_token_stream())?); } _ => { return Err(Error::new_spanned( meta, indoc!( r#" Unsupported attribute in `node`. Supported attributes are 'category', 'path' 'name', 'skip_impl', 'cfg' and 'properties'. Example usage: #[node_macro::node(category("Value"), name("Test Node"))] "# ), )); } } } Ok(NodeFnAttributes { category, display_name, path, skip_impl, properties_string, cfg, shader_node, }) } } fn parse_node_fn(attr: TokenStream2, item: TokenStream2) -> syn::Result<ParsedNodeFn> { let attributes = syn::parse2::<NodeFnAttributes>(attr.clone()).map_err(|e| Error::new(e.span(), format!("Failed to parse node_fn attributes: {e}")))?; let input_fn = syn::parse2::<ItemFn>(item.clone()).map_err(|e| Error::new(e.span(), format!("Failed to parse function: {e}. Make sure it's a valid Rust function.")))?; let vis = input_fn.vis; let fn_name = input_fn.sig.ident.clone(); let struct_name = format_ident!("{}", fn_name.to_string().to_case(Case::Pascal)); let mod_name = fn_name.clone(); let fn_generics = input_fn.sig.generics.params.into_iter().collect(); let is_async = input_fn.sig.asyncness.is_some(); let (input, fields) = parse_inputs(&input_fn.sig.inputs)?; let output_type = parse_output(&input_fn.sig.output)?; let where_clause = input_fn.sig.generics.where_clause; let body = input_fn.block.to_token_stream(); let description = input_fn .attrs .iter() .filter_map(|a| { if a.style != AttrStyle::Outer { return None; } let Meta::NameValue(name_val) = &a.meta else { return None }; if name_val.path.get_ident().map(|x| x.to_string()) != Some("doc".into()) { return None; } let Expr::Lit(expr_lit) = &name_val.value else { return None }; let Lit::Str(ref text) = expr_lit.lit else { return None }; Some(text.value().trim().to_string()) }) .fold(String::new(), |acc, b| acc + &b + "\n"); Ok(ParsedNodeFn { vis, attributes, fn_name, struct_name, mod_name, fn_generics, input, output_type, is_async, fields, where_clause, body, description, }) } fn parse_inputs(inputs: &Punctuated<FnArg, Comma>) -> syn::Result<(Input, Vec<ParsedField>)> { let mut fields = Vec::new(); let mut input = None; for (index, arg) in inputs.iter().enumerate() { if let FnArg::Typed(PatType { pat, ty, attrs, .. }) = arg { // Call argument if index == 0 { if extract_attribute(attrs, "default").is_some() { return Err(Error::new_spanned(&attrs[0], "Call argument cannot be given a default value".to_string())); } if extract_attribute(attrs, "expose").is_some() { return Err(Error::new_spanned(&attrs[0], "Call argument cannot be exposed".to_string())); } let pat_ident = match (**pat).clone() { Pat::Ident(pat_ident) => pat_ident, Pat::Wild(wild) => PatIdent { attrs: wild.attrs, by_ref: None, mutability: None, ident: wild.underscore_token.into(), subpat: None, }, _ => continue, }; let implementations = extract_attribute(attrs, "implementations") .map(|attr| parse_implementations(attr, &pat_ident.ident)) .transpose()? .unwrap_or_default(); let context_features = parse_context_feature_idents(ty); input = Some(Input { pat_ident, ty: (**ty).clone(), implementations, context_features, }); } else if let Pat::Ident(pat_ident) = &**pat { let field = parse_field(pat_ident.clone(), (**ty).clone(), attrs).map_err(|e| Error::new_spanned(pat_ident, format!("Failed to parse argument '{}': {}", pat_ident.ident, e)))?; fields.push(field); } else { return Err(Error::new_spanned(pat, "Expected a simple identifier for the field name")); } } else { return Err(Error::new_spanned(arg, "Expected a typed argument (e.g., `x: i32`)")); } } let input = input.ok_or_else(|| Error::new_spanned(inputs, "Expected at least one input argument. The first argument should be the node input type."))?; Ok((input, fields)) } /// Parse context feature identifiers from the trait bounds of a context parameter. fn parse_context_feature_idents(ty: &Type) -> Vec<Ident> { let mut features = Vec::new(); // Check if this is an impl trait (impl Ctx + ...) if let Type::ImplTrait(TypeImplTrait { bounds, .. }) = ty { for bound in bounds { if let TypeParamBound::Trait(TraitBound { path, .. }) = bound { // Extract the last segment of the trait path if let Some(segment) = path.segments.last() { match segment.ident.to_string().as_str() { "ExtractFootprint" | "ExtractRealTime" | "ExtractAnimationTime" | "ExtractPointer" | "ExtractIndex" | "ExtractVarArgs" | "InjectFootprint" | "InjectRealTime" | "InjectAnimationTime" | "InjectPointer" | "InjectIndex" | "InjectVarArgs" => { features.push(segment.ident.clone()); } // Skip Modify* traits as they don't affect usage tracking // Also ignore other traits like Ctx, ExtractAll, etc. _ => {} } } } } } features } fn parse_implementations(attr: &Attribute, name: &Ident) -> syn::Result<Punctuated<Type, Comma>> { let content: TokenStream2 = attr.parse_args()?; let parser = Punctuated::<Type, Comma>::parse_terminated; parser.parse2(content.clone()).map_err(|e| { let span = e.span(); // Get the span of the error Error::new(span, format!("Failed to parse implementations for argument '{name}': {e}")) }) } fn parse_node_implementations<T: Parse>(attr: &Attribute, name: &Ident) -> syn::Result<Punctuated<T, Comma>> { let content: TokenStream2 = attr.parse_args()?; let parser = Punctuated::<T, Comma>::parse_terminated; parser.parse2(content.clone()).map_err(|e| { Error::new( e.span(), formatdoc!( "Invalid #[implementations(...)] for argument `{}`. Expected a comma-separated list of `InputType -> OutputType` pairs. Example: #[implementations(i32 -> f64, String -> Vec<u8>)] Error: {}", name, e ), ) }) } fn parse_field(pat_ident: PatIdent, ty: Type, attrs: &[Attribute]) -> syn::Result<ParsedField> { let ident = &pat_ident.ident; let default_value = extract_attribute(attrs, "default") .map(|attr| attr.parse_args().map_err(|e| Error::new_spanned(attr, format!("Invalid `default` value for argument '{ident}': {e}")))) .transpose()?; let scope = extract_attribute(attrs, "scope") .map(|attr| attr.parse_args().map_err(|e| Error::new_spanned(attr, format!("Invalid `scope` value for argument '{ident}': {e}")))) .transpose()?; let name = extract_attribute(attrs, "name") .map(|attr| attr.parse_args().map_err(|e| Error::new_spanned(attr, format!("Invalid `name` value for argument '{ident}': {e}")))) .transpose()?; let widget_override = extract_attribute(attrs, "widget") .map(|attr| { attr.parse_args() .map_err(|e| Error::new_spanned(attr, format!("Invalid `widget override` value for argument '{ident}': {e}"))) }) .transpose()? .unwrap_or_default(); let exposed = extract_attribute(attrs, "expose").is_some(); let value_source = match (default_value, scope) { (Some(_), Some(_)) => return Err(Error::new_spanned(&pat_ident, "Cannot have both `default` and `scope` attributes")), (Some(default_value), _) => ParsedValueSource::Default(default_value), (_, Some(scope)) => ParsedValueSource::Scope(scope), _ => ParsedValueSource::None, }; let number_soft_min = extract_attribute(attrs, "soft_min") .map(|attr| { attr.parse_args() .map_err(|e| Error::new_spanned(attr, format!("Invalid numerical `soft_min` value for argument '{ident}': {e}"))) }) .transpose()?; let number_soft_max = extract_attribute(attrs, "soft_max") .map(|attr| { attr.parse_args() .map_err(|e| Error::new_spanned(attr, format!("Invalid numerical `soft_max` value for argument '{ident}': {e}"))) }) .transpose()?; let number_hard_min = extract_attribute(attrs, "hard_min") .map(|attr| { attr.parse_args() .map_err(|e| Error::new_spanned(attr, format!("Invalid numerical `hard_min` value for argument '{ident}': {e}"))) }) .transpose()?; let number_hard_max = extract_attribute(attrs, "hard_max") .map(|attr| { attr.parse_args() .map_err(|e| Error::new_spanned(attr, format!("Invalid numerical `hard_max` value for argument '{ident}': {e}"))) }) .transpose()?; let number_mode_range = extract_attribute(attrs, "range") .map(|attr| { attr.parse_args::<ExprTuple>().map_err(|e| { Error::new_spanned( attr, format!("Invalid `range` tuple of min and max range slider values for argument '{ident}': {e}\nUSAGE EXAMPLE: #[range((0., 100.))]"), ) }) }) .transpose()?; if let Some(range) = &number_mode_range && range.elems.len() != 2 { return Err(Error::new_spanned(range, "Expected a tuple of two values for `range` for the min and max, respectively")); } let unit = extract_attribute(attrs, "unit") .map(|attr| attr.parse_args::<LitStr>().map_err(|_e| Error::new_spanned(attr, "Expected a unit type as string".to_string()))) .transpose()?; let number_display_decimal_places = extract_attribute(attrs, "display_decimal_places") .map(|attr| { attr.parse_args::<LitInt>().map_err(|e| { Error::new_spanned( attr, format!("Invalid `integer` for number of decimals for argument '{ident}': {e}\nUSAGE EXAMPLE: #[display_decimal_places(2)]"), ) }) }) .transpose()? .map(|f| { if let Err(e) = f.base10_parse::<u32>() { Err(Error::new_spanned(f, format!("Expected a `u32` for `display_decimal_places` for '{ident}': {e}"))) } else { Ok(f) } }) .transpose()?; let number_step = extract_attribute(attrs, "step") .map(|attr| { attr.parse_args::<LitFloat>() .map_err(|e| Error::new_spanned(attr, format!("Invalid `step` for argument '{ident}': {e}\nUSAGE EXAMPLE: #[step(2.)]"))) }) .transpose()?; let gpu_image = extract_attribute(attrs, "gpu_image").is_some(); let (is_node, node_input_type, node_output_type) = parse_node_type(&ty); let description = attrs .iter() .filter_map(|a| { if a.style != AttrStyle::Outer { return None; } let Meta::NameValue(name_val) = &a.meta else { return None }; if name_val.path.get_ident().map(|x| x.to_string()) != Some("doc".into()) { return None; } let Expr::Lit(expr_lit) = &name_val.value else { return None }; let Lit::Str(ref text) = expr_lit.lit else { return None }; Some(text.value().trim().to_string()) }) .fold(String::new(), |acc, b| acc + &b + "\n"); if is_node { let (input_type, output_type) = node_input_type .zip(node_output_type) .ok_or_else(|| Error::new_spanned(&ty, "Invalid Node type. Expected `impl Node<Input, Output = OutputType>`"))?; if !matches!(&value_source, ParsedValueSource::None) { return Err(Error::new_spanned(&ty, "No default values for `impl Node` allowed")); } let implementations = extract_attribute(attrs, "implementations") .map(|attr| parse_node_implementations(attr, ident)) .transpose()? .unwrap_or_default(); Ok(ParsedField { pat_ident, ty: ParsedFieldType::Node(NodeParsedField { input_type, output_type, implementations, }), name, description, widget_override, number_display_decimal_places, number_step, unit, }) } else { let implementations = extract_attribute(attrs, "implementations") .map(|attr| parse_implementations(attr, ident)) .transpose()? .unwrap_or_default(); Ok(ParsedField { pat_ident, ty: ParsedFieldType::Regular(RegularParsedField { exposed, number_soft_min, number_soft_max, number_hard_min, number_hard_max, number_mode_range, ty, value_source, implementations, gpu_image, }), name, description, widget_override, number_display_decimal_places, number_step, unit, }) } } fn parse_node_type(ty: &Type) -> (bool, Option<Type>, Option<Type>) { if let Type::ImplTrait(impl_trait) = ty { for bound in &impl_trait.bounds { if let syn::TypeParamBound::Trait(trait_bound) = bound && trait_bound.path.segments.last().is_some_and(|seg| seg.ident == "Node") && let syn::PathArguments::AngleBracketed(args) = &trait_bound.path.segments.last().unwrap().arguments { let input_type = args.args.iter().find_map(|arg| if let syn::GenericArgument::Type(ty) = arg { Some(ty.clone()) } else { None }); let output_type = args.args.iter().find_map(|arg| { if let syn::GenericArgument::AssocType(assoc_type) = arg { if assoc_type.ident == "Output" { Some(assoc_type.ty.clone()) } else { None } } else { None } }); return (true, input_type, output_type); } } } (false, None, None) } fn parse_output(output: &ReturnType) -> syn::Result<Type> { match output { ReturnType::Default => Ok(syn::parse_quote!(())), ReturnType::Type(_, ty) => Ok((**ty).clone()), } } fn extract_attribute<'a>(attrs: &'a [Attribute], name: &str) -> Option<&'a Attribute> { attrs.iter().find(|attr| attr.path().is_ident(name)) } // Modify the new_node_fn function to use the code generation pub fn new_node_fn(attr: TokenStream2, item: TokenStream2) -> syn::Result<TokenStream2> { let crate_ident = CrateIdent::default(); let mut parsed_node = parse_node_fn(attr, item.clone()).map_err(|e| Error::new(e.span(), format!("Failed to parse node function: {e}")))?; parsed_node.replace_impl_trait_in_input(); crate::validation::validate_node_fn(&parsed_node).map_err(|e| Error::new(e.span(), format!("Validation Error: {e}")))?; generate_node_code(&crate_ident, &parsed_node).map_err(|e| Error::new(e.span(), format!("Failed to generate node code: {e}"))) } impl ParsedNodeFn { pub fn replace_impl_trait_in_input(&mut self) { if let Type::ImplTrait(impl_trait) = self.input.ty.clone() { let ident = Ident::new("_Input", impl_trait.span()); let mut bounds = impl_trait.bounds; bounds.push(parse_quote!('n)); self.fn_generics.push(GenericParam::Type(TypeParam { attrs: Default::default(), ident: ident.clone(), colon_token: Some(Default::default()), bounds, eq_token: None, default: None, })); self.input.ty = parse_quote!(#ident); if self.input.implementations.is_empty() { self.input.implementations.push(parse_quote!(gcore::Context)); } } if self.input.pat_ident.ident == "_" { self.input.pat_ident.ident = Ident::new("__ctx", self.input.pat_ident.ident.span()); } } } #[cfg(test)] mod tests { use super::*; use proc_macro2::Span; use quote::{quote, quote_spanned}; use syn::parse_quote; fn pat_ident(name: &str) -> PatIdent { PatIdent { attrs: Vec::new(), by_ref: None, mutability: None, ident: Ident::new(name, Span::call_site()), subpat: None, } } fn assert_parsed_node_fn(parsed: &ParsedNodeFn, expected: &ParsedNodeFn) { assert_eq!(parsed.fn_name, expected.fn_name); assert_eq!(parsed.struct_name, expected.struct_name); assert_eq!(parsed.mod_name, expected.mod_name); assert_eq!(parsed.is_async, expected.is_async); assert_eq!(format!("{:?}", parsed.input), format!("{:?}", expected.input)); assert_eq!(format!("{:?}", parsed.output_type), format!("{:?}", expected.output_type)); assert_eq!(parsed.attributes.category, expected.attributes.category); assert_eq!(parsed.attributes.display_name, expected.attributes.display_name); assert_eq!(parsed.attributes.path, expected.attributes.path); assert_eq!(parsed.attributes.skip_impl, expected.attributes.skip_impl); assert_eq!(parsed.fields.len(), expected.fields.len()); assert_eq!(parsed.description, expected.description); for (parsed_field, expected_field) in parsed.fields.iter().zip(expected.fields.iter()) { match (parsed_field, expected_field) { ( ParsedField { pat_ident: p_name, ty: ParsedFieldType::Regular(RegularParsedField { ty: p_ty, exposed: p_exp, value_source: p_default, .. }), .. }, ParsedField { pat_ident: e_name, ty: ParsedFieldType::Regular(RegularParsedField { ty: e_ty, exposed: e_exp, value_source: e_default, .. }), .. }, ) => { assert_eq!(p_name, e_name); assert_eq!(p_exp, e_exp); match (p_default, e_default) { (ParsedValueSource::None, ParsedValueSource::None) => {} (ParsedValueSource::Default(p), ParsedValueSource::Default(e)) => { assert_eq!(p.to_token_stream().to_string(), e.to_token_stream().to_string()); } (ParsedValueSource::Scope(p), ParsedValueSource::Scope(e)) => { assert_eq!(p.value(), e.value()); } _ => panic!("Mismatched default values"), } assert_eq!(format!("{p_ty:?}"), format!("{:?}", e_ty)); } ( ParsedField { pat_ident: p_name, ty: ParsedFieldType::Node(NodeParsedField { input_type: p_input, output_type: p_output, .. }), .. }, ParsedField { pat_ident: e_name, ty: ParsedFieldType::Node(NodeParsedField { input_type: e_input, output_type: e_output, .. }), .. }, ) => { assert_eq!(p_name, e_name); assert_eq!(format!("{p_input:?}"), format!("{:?}", e_input)); assert_eq!(format!("{p_output:?}"), format!("{:?}", e_output)); } _ => panic!("Mismatched field types"), } } } #[test] fn test_basic_node() { let attr = quote!(category("Math: Arithmetic"), path(core_types::TestNode), skip_impl); let input = quote!( /// Multi /// Line fn add(a: f64, b: f64) -> f64 { a + b } ); let parsed = parse_node_fn(attr, input).unwrap(); let expected = ParsedNodeFn { vis: Visibility::Inherited, attributes: NodeFnAttributes { category: Some(parse_quote!("Math: Arithmetic")), display_name: None, path: Some(parse_quote!(core_types::TestNode)), skip_impl: true, properties_string: None, cfg: None, shader_node: None, }, fn_name: Ident::new("add", Span::call_site()), struct_name: Ident::new("Add", Span::call_site()), mod_name: Ident::new("add", Span::call_site()), fn_generics: vec![], where_clause: None, input: Input { pat_ident: pat_ident("a"), ty: parse_quote!(f64), implementations: Punctuated::new(), context_features: vec![], }, output_type: parse_quote!(f64), is_async: false, fields: vec![ParsedField { pat_ident: pat_ident("b"), name: None, description: String::new(), widget_override: ParsedWidgetOverride::None, ty: ParsedFieldType::Regular(RegularParsedField { ty: parse_quote!(f64), exposed: false, value_source: ParsedValueSource::None, number_soft_min: None, number_soft_max: None, number_hard_min: None, number_hard_max: None, number_mode_range: None, implementations: Punctuated::new(), gpu_image: false, }), number_display_decimal_places: None, number_step: None, unit: None, }], body: TokenStream2::new(), description: String::from("Multi\nLine\n"), }; assert_parsed_node_fn(&parsed, &expected); } #[test] fn test_node_with_impl_node() { let attr = quote!(category("General")); let input = quote!( /** Hello World */ fn transform<T: 'static>(footprint: Footprint, transform_target: impl Node<Footprint, Output = T>, translate: DVec2) -> T { // Implementation details... } ); let parsed = parse_node_fn(attr, input).unwrap(); let expected = ParsedNodeFn { vis: Visibility::Inherited, attributes: NodeFnAttributes { category: Some(parse_quote!("General")), display_name: None, path: None, skip_impl: false, properties_string: None, cfg: None, shader_node: None, }, fn_name: Ident::new("transform", Span::call_site()), struct_name: Ident::new("Transform", Span::call_site()), mod_name: Ident::new("transform", Span::call_site()), fn_generics: vec![parse_quote!(T: 'static)], where_clause: None, input: Input { pat_ident: pat_ident("footprint"), ty: parse_quote!(Footprint), implementations: Punctuated::new(), context_features: vec![], }, output_type: parse_quote!(T), is_async: false, fields: vec![ ParsedField { pat_ident: pat_ident("transform_target"), name: None, description: String::new(), widget_override: ParsedWidgetOverride::None, ty: ParsedFieldType::Node(NodeParsedField { input_type: parse_quote!(Footprint), output_type: parse_quote!(T), implementations: Punctuated::new(), }), number_display_decimal_places: None, number_step: None, unit: None, }, ParsedField { pat_ident: pat_ident("translate"), name: None, description: String::new(), widget_override: ParsedWidgetOverride::None, ty: ParsedFieldType::Regular(RegularParsedField { ty: parse_quote!(DVec2), exposed: false, value_source: ParsedValueSource::None, number_soft_min: None, number_soft_max: None, number_hard_min: None, number_hard_max: None, number_mode_range: None, implementations: Punctuated::new(), gpu_image: false, }), number_display_decimal_places: None, number_step: None, unit: None, }, ], body: TokenStream2::new(), description: String::from("Hello\n\t\t\t\tWorld\n"), }; assert_parsed_node_fn(&parsed, &expected); } #[test] fn test_node_with_default_values() { let attr = quote!(category("Vector: Shape")); let input = quote!( /// Test fn circle(_: impl Ctx + ExtractFootprint, #[default(50.)] radius: f64) -> Vector { // Implementation details... } ); let parsed = parse_node_fn(attr, input).unwrap(); let expected = ParsedNodeFn { vis: Visibility::Inherited, attributes: NodeFnAttributes { category: Some(parse_quote!("Vector: Shape")), display_name: None, path: None, skip_impl: false, properties_string: None, cfg: None, shader_node: None, }, fn_name: Ident::new("circle", Span::call_site()), struct_name: Ident::new("Circle", Span::call_site()), mod_name: Ident::new("circle", Span::call_site()), fn_generics: vec![], where_clause: None, input: Input { pat_ident: pat_ident("_"), ty: parse_quote!(impl Ctx + ExtractFootprint), implementations: Punctuated::new(), context_features: vec![format_ident!("ExtractFootprint")], }, output_type: parse_quote!(Vector), is_async: false, fields: vec![ParsedField { pat_ident: pat_ident("radius"), name: None, description: String::new(), widget_override: ParsedWidgetOverride::None, ty: ParsedFieldType::Regular(RegularParsedField { ty: parse_quote!(f64), exposed: false, value_source: ParsedValueSource::Default(quote!(50.)), number_soft_min: None, number_soft_max: None, number_hard_min: None, number_hard_max: None, number_mode_range: None, implementations: Punctuated::new(), gpu_image: false, }), number_display_decimal_places: None, number_step: None, unit: None, }], body: TokenStream2::new(), description: "Test\n".into(), }; assert_parsed_node_fn(&parsed, &expected); } #[test] fn test_node_with_implementations() { let attr = quote!(category("Raster: Adjustment")); let input = quote!( fn levels<P: Pixel>(image: Table<Raster<P>>, #[implementations(f32, f64)] shadows: f64) -> Table<Raster<P>> { // Implementation details... } ); let parsed = parse_node_fn(attr, input).unwrap(); let expected = ParsedNodeFn { vis: Visibility::Inherited, attributes: NodeFnAttributes { category: Some(parse_quote!("Raster: Adjustment")), display_name: None, path: None, skip_impl: false, properties_string: None, cfg: None, shader_node: None, }, fn_name: Ident::new("levels", Span::call_site()), struct_name: Ident::new("Levels", Span::call_site()), mod_name: Ident::new("levels", Span::call_site()), fn_generics: vec![parse_quote!(P: Pixel)], where_clause: None, input: Input { pat_ident: pat_ident("image"), ty: parse_quote!(Table<Raster<P>>),
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
true
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/node-macro/src/derive_choice_type.rs
node-graph/node-macro/src/derive_choice_type.rs
use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; use syn::parse::Parse; use syn::{Attribute, DeriveInput, Expr, LitStr, Meta}; pub fn derive_choice_type_impl(input_item: TokenStream) -> syn::Result<TokenStream> { let input = syn::parse2::<DeriveInput>(input_item).unwrap(); match input.data { syn::Data::Enum(data_enum) => derive_enum(&input.attrs, input.ident, data_enum), _ => Err(syn::Error::new(input.ident.span(), "Only enums are supported at the moment")), } } struct Type { basic_item: BasicItem, widget_hint: WidgetHint, } enum WidgetHint { Radio, Dropdown, } impl Parse for WidgetHint { fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> { let tokens: Ident = input.parse()?; if tokens == "Radio" { Ok(Self::Radio) } else if tokens == "Dropdown" { Ok(Self::Dropdown) } else { Err(syn::Error::new_spanned(tokens, "Widget must be either Radio or Dropdown")) } } } #[derive(Default)] struct BasicItem { label: String, description: Option<String>, icon: Option<String>, } impl BasicItem { fn read_attribute(&mut self, attribute: &Attribute) -> syn::Result<()> { if attribute.path().is_ident("label") { let token: LitStr = attribute.parse_args()?; self.label = token.value(); } if attribute.path().is_ident("icon") { let token: LitStr = attribute.parse_args()?; self.icon = Some(token.value()); } if attribute.path().is_ident("doc") && let Meta::NameValue(meta_name_value) = &attribute.meta && let Expr::Lit(el) = &meta_name_value.value && let syn::Lit::Str(token) = &el.lit { self.description = Some(token.value()); } Ok(()) } } struct Variant { name: Ident, basic_item: BasicItem, } fn derive_enum(enum_attributes: &[Attribute], name: Ident, input: syn::DataEnum) -> syn::Result<TokenStream> { let mut enum_info = Type { basic_item: BasicItem::default(), widget_hint: WidgetHint::Dropdown, }; for attribute in enum_attributes { enum_info.basic_item.read_attribute(attribute)?; if attribute.path().is_ident("widget") { enum_info.widget_hint = attribute.parse_args()?; } } let mut variants = vec![Vec::new()]; for variant in &input.variants { let mut basic_item = BasicItem::default(); for attribute in &variant.attrs { if attribute.path().is_ident("menu_separator") { attribute.meta.require_path_only()?; variants.push(Vec::new()); } basic_item.read_attribute(attribute)?; } if basic_item.label.is_empty() { basic_item.label = ident_to_label(&variant.ident); } variants.last_mut().unwrap().push(Variant { name: variant.ident.clone(), basic_item, }) } let display_arm: Vec<_> = variants .iter() .flat_map(|variants| variants.iter()) .map(|variant| { let variant_name = &variant.name; let variant_label = &variant.basic_item.label; quote! { #name::#variant_name => write!(f, #variant_label), } }) .collect(); let crate_name = { let crate_name = proc_macro_crate::crate_name("no-std-types").or_else(|_e| proc_macro_crate::crate_name("core-types")).map_err(|e| { syn::Error::new( Span::call_site(), format!("Failed to find location of 'no-std-types' or 'core-types'. Make sure it is imported as a dependency: {e}"), ) })?; match crate_name { proc_macro_crate::FoundCrate::Itself => quote!(crate), proc_macro_crate::FoundCrate::Name(name) => { let identifier = Ident::new(&name, Span::call_site()); quote! { #identifier } } } }; let enum_description = match &enum_info.basic_item.description { Some(s) => { let s = s.trim(); quote! { Some(#s) } } None => quote! { None }, }; let group: Vec<_> = variants .iter() .map(|variants| { let items = variants .iter() .map(|variant| { let vname = &variant.name; let vname_str = variant.name.to_string(); let label = &variant.basic_item.label; let description = match &variant.basic_item.description { Some(s) => { let s = s.trim(); quote! { Some(#s) } } None => quote! { None }, }; let icon = match &variant.basic_item.icon { Some(s) => quote! { Some(#s) }, None => quote! { None }, }; quote! { ( #name::#vname, #crate_name::choice_type::VariantMetadata { name: #vname_str, label: #label, description: #description, icon: #icon, } ), } }) .collect::<Vec<_>>(); quote! { &[ #(#items)* ], } }) .collect(); let widget_hint = match enum_info.widget_hint { WidgetHint::Radio => quote! { RadioButtons }, WidgetHint::Dropdown => quote! { Dropdown }, }; Ok(quote! { impl #crate_name::AsU32 for #name { fn as_u32(&self) -> u32 { *self as u32 } } impl #crate_name::choice_type::ChoiceTypeStatic for #name { const WIDGET_HINT: #crate_name::choice_type::ChoiceWidgetHint = #crate_name::choice_type::ChoiceWidgetHint::#widget_hint; const DESCRIPTION: Option<&'static str> = #enum_description; fn list() -> &'static [&'static [(Self, #crate_name::choice_type::VariantMetadata)]] { &[ #(#group)* ] } } impl core::fmt::Display for #name { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { #( #display_arm )* } } } }) } fn ident_to_label(id: &Ident) -> String { use convert_case::{Case, Casing}; id.to_string().from_case(Case::Pascal).to_case(Case::Title) }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/node-macro/src/crate_ident.rs
node-graph/node-macro/src/crate_ident.rs
use proc_macro_crate::{FoundCrate, crate_name}; use proc_macro2::{Span, TokenStream}; use quote::{format_ident, quote}; pub struct CrateIdent { gcore: syn::Result<TokenStream>, gcore_shaders: syn::Result<TokenStream>, raster_types: syn::Result<TokenStream>, wgpu_executor: syn::Result<TokenStream>, } impl CrateIdent { pub fn gcore(&self) -> syn::Result<&TokenStream> { self.gcore.as_ref().map_err(Clone::clone) } pub fn gcore_shaders(&self) -> syn::Result<&TokenStream> { self.gcore_shaders.as_ref().map_err(Clone::clone) } pub fn raster_types(&self) -> syn::Result<&TokenStream> { self.raster_types.as_ref().map_err(Clone::clone) } pub fn wgpu_executor(&self) -> syn::Result<&TokenStream> { self.wgpu_executor.as_ref().map_err(Clone::clone) } } impl Default for CrateIdent { fn default() -> Self { let find_crate = |orig_name| match crate_name(orig_name) { Ok(FoundCrate::Itself) => Ok(quote!(crate)), Ok(FoundCrate::Name(name)) => { let name = format_ident!("{}", name); Ok(quote!(::#name)) } Err(e) => Err(syn::Error::new(Span::call_site(), format!("Could not find dependency on `{orig_name}`:\n{e}"))), }; let gcore = find_crate("core-types"); let gcore_shaders = find_crate("no-std-types").or_else(|eshaders| gcore.clone().map_err(|ecore| syn::Error::new(Span::call_site(), format!("{ecore}\n\nFallback: {eshaders}")))); let raster_types = find_crate("raster-types"); let wgpu_executor = find_crate("wgpu-executor"); Self { gcore, gcore_shaders, raster_types, wgpu_executor, } } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/node-macro/src/shader_nodes/per_pixel_adjust.rs
node-graph/node-macro/src/shader_nodes/per_pixel_adjust.rs
use crate::crate_ident::CrateIdent; use crate::parsing::{Input, NodeFnAttributes, ParsedField, ParsedFieldType, ParsedNodeFn, ParsedValueSource, RegularParsedField}; use crate::shader_nodes::{SHADER_NODES_FEATURE_GATE, ShaderCodegen, ShaderNodeType, ShaderTokens}; use convert_case::{Case, Casing}; use proc_macro2::{Ident, Span, TokenStream}; use quote::{ToTokens, format_ident, quote}; use std::borrow::Cow; use syn::parse::{Parse, ParseStream}; use syn::punctuated::Punctuated; use syn::{LitStr, PatIdent, Type, parse_quote}; #[derive(Debug, Clone)] pub struct PerPixelAdjust {} impl Parse for PerPixelAdjust { fn parse(_input: ParseStream) -> syn::Result<Self> { Ok(Self {}) } } impl ShaderCodegen for PerPixelAdjust { fn codegen(&self, crate_ident: &CrateIdent, parsed: &ParsedNodeFn) -> syn::Result<ShaderTokens> { let fn_name = &parsed.fn_name; let mut params; let has_uniform; { // categorize params params = parsed .fields .iter() .map(|f| { let ident = &f.pat_ident; match &f.ty { ParsedFieldType::Node { .. } => Err(syn::Error::new_spanned(ident, "PerPixelAdjust shader nodes cannot accept other nodes as generics")), ParsedFieldType::Regular(RegularParsedField { gpu_image: false, ty, .. }) => Ok(Param { ident: Cow::Borrowed(&ident.ident), ty: ty.to_token_stream(), param_type: ParamType::Uniform, }), ParsedFieldType::Regular(RegularParsedField { gpu_image: true, .. }) => { let param = Param { ident: Cow::Owned(format_ident!("image_{}", &ident.ident)), ty: quote!(Image2d), param_type: ParamType::Image { binding: 0 }, }; Ok(param) } } }) .collect::<syn::Result<Vec<_>>>()?; has_uniform = params.iter().any(|p| matches!(p.param_type, ParamType::Uniform)); // assign image bindings // if an arg_buffer exists, bindings for images start at 1 to leave 0 for arg buffer let mut binding_cnt = if has_uniform { 1 } else { 0 }; for p in params.iter_mut() { match &mut p.param_type { ParamType::Image { binding } => { *binding = binding_cnt; binding_cnt += 1; } ParamType::Uniform => {} } } } let entry_point_mod = format_ident!("{}_gpu_entry_point", fn_name); let entry_point_name_ident = format_ident!("ENTRY_POINT_NAME"); let entry_point_name = quote!(#entry_point_mod::#entry_point_name_ident); let uniform_struct_ident = format_ident!("Uniform"); let uniform_struct = quote!(#entry_point_mod::#uniform_struct_ident); let shader_node_mod = format_ident!("{}_shader_node", fn_name); let codegen = PerPixelAdjustCodegen { crate_ident, parsed, params, has_uniform, entry_point_mod, entry_point_name_ident, entry_point_name, uniform_struct_ident, uniform_struct, shader_node_mod, }; Ok(ShaderTokens { shader_entry_point: codegen.codegen_shader_entry_point()?, gpu_node: codegen.codegen_gpu_node()?, }) } } pub struct PerPixelAdjustCodegen<'a> { crate_ident: &'a CrateIdent, parsed: &'a ParsedNodeFn, params: Vec<Param<'a>>, has_uniform: bool, entry_point_mod: Ident, entry_point_name_ident: Ident, entry_point_name: TokenStream, uniform_struct_ident: Ident, uniform_struct: TokenStream, shader_node_mod: Ident, } impl PerPixelAdjustCodegen<'_> { fn codegen_shader_entry_point(&self) -> syn::Result<TokenStream> { let fn_name = &self.parsed.fn_name; let gcore_shaders = self.crate_ident.gcore_shaders()?; let reexport = quote!(#gcore_shaders::shaders::__private); let uniform_members = self .params .iter() .filter_map(|Param { ident, ty, param_type }| match param_type { ParamType::Image { .. } => None, ParamType::Uniform => Some(quote! {#ident: #ty}), }) .collect::<Vec<_>>(); let uniform_struct_ident = &self.uniform_struct_ident; let uniform_struct = parse_quote! { #[repr(C)] #[derive(Copy, Clone)] pub struct #uniform_struct_ident { #(pub #uniform_members),* } }; let uniform_struct_shader_struct_derive = crate::buffer_struct::derive_buffer_struct_struct(self.crate_ident, &uniform_struct)?; let image_params = self .params .iter() .filter_map(|Param { ident, ty, param_type }| match param_type { ParamType::Image { binding } => Some(quote! {#[spirv(descriptor_set = 0, binding = #binding)] #ident: &#ty}), ParamType::Uniform => None, }) .collect::<Vec<_>>(); let call_args = self .params .iter() .map(|Param { ident, param_type, .. }| match param_type { ParamType::Image { .. } => quote!(Color::from_vec4(#ident.fetch_with(texel_coord, lod(0)))), ParamType::Uniform => quote!(uniform.#ident), }) .collect::<Vec<_>>(); let context = quote!(()); let entry_point_mod = &self.entry_point_mod; let entry_point_name = &self.entry_point_name_ident; Ok(quote! { pub mod #entry_point_mod { use super::*; use #gcore_shaders::color::Color; use #reexport::glam::{Vec4, Vec4Swizzles}; use #reexport::spirv_std::spirv; use #reexport::spirv_std::image::{Image2d, ImageWithMethods}; use #reexport::spirv_std::image::sample_with::lod; pub const #entry_point_name: &str = core::concat!(core::module_path!(), "::entry_point"); #uniform_struct #uniform_struct_shader_struct_derive #[spirv(fragment)] pub fn entry_point( #[spirv(frag_coord)] frag_coord: Vec4, color_out: &mut Vec4, #[spirv(descriptor_set = 0, binding = 0, storage_buffer)] uniform: &UniformBuffer, #(#image_params),* ) { let uniform = <Uniform as #gcore_shaders::shaders::buffer_struct::BufferStruct>::read(*uniform); let texel_coord = frag_coord.xy().as_uvec2(); let color: Color = #fn_name(#context, #(#call_args),*); *color_out = color.to_vec4(); } } }) } fn codegen_gpu_node(&self) -> syn::Result<TokenStream> { let gcore = self.crate_ident.gcore()?; let raster_types = self.crate_ident.raster_types()?; let wgpu_executor = self.crate_ident.wgpu_executor()?; // adapt fields for gpu node let raster_gpu: Type = parse_quote!(#gcore::table::Table<#raster_types::Raster<#raster_types::GPU>>); let mut fields = self .parsed .fields .iter() .map(|f| match &f.ty { ParsedFieldType::Regular(reg @ RegularParsedField { gpu_image: true, .. }) => Ok(ParsedField { pat_ident: PatIdent { mutability: None, by_ref: None, ..f.pat_ident.clone() }, ty: ParsedFieldType::Regular(RegularParsedField { ty: raster_gpu.clone(), implementations: Punctuated::default(), ..reg.clone() }), ..f.clone() }), ParsedFieldType::Regular(RegularParsedField { gpu_image: false, .. }) => Ok(ParsedField { pat_ident: PatIdent { mutability: None, by_ref: None, ..f.pat_ident.clone() }, ..f.clone() }), ParsedFieldType::Node { .. } => Err(syn::Error::new_spanned(&f.pat_ident, "PerPixelAdjust shader nodes cannot accept other nodes as generics")), }) .collect::<syn::Result<Vec<_>>>()?; // insert wgpu_executor field let executor = format_ident!("__wgpu_executor"); fields.push(ParsedField { pat_ident: PatIdent { attrs: vec![], by_ref: None, mutability: None, ident: parse_quote!(#executor), subpat: None, }, name: None, description: "".to_string(), widget_override: Default::default(), ty: ParsedFieldType::Regular(RegularParsedField { ty: parse_quote!(&'a WgpuExecutor), exposed: true, value_source: ParsedValueSource::Scope(LitStr::new("wgpu-executor", Span::call_site())), number_soft_min: None, number_soft_max: None, number_hard_min: None, number_hard_max: None, number_mode_range: None, implementations: Default::default(), gpu_image: false, }), number_display_decimal_places: None, number_step: None, unit: None, }); // find exactly one gpu_image field, runtime doesn't support more than 1 atm let gpu_image_field = { let mut iter = fields.iter().filter(|f| matches!(f.ty, ParsedFieldType::Regular(RegularParsedField { gpu_image: true, .. }))); match (iter.next(), iter.next()) { (Some(v), None) => Ok(v), (Some(_), Some(more)) => Err(syn::Error::new_spanned(&more.pat_ident, "No more than one parameter must be annotated with `#[gpu_image]`")), (None, _) => Err(syn::Error::new_spanned(&self.parsed.fn_name, "At least one parameter must be annotated with `#[gpu_image]`")), }? }; let gpu_image = &gpu_image_field.pat_ident.ident; // uniform buffer struct construction let has_uniform = self.has_uniform; let uniform_buffer = if has_uniform { let uniform_struct = &self.uniform_struct; let uniform_members = self .params .iter() .filter_map(|p| match p.param_type { ParamType::Image { .. } => None, ParamType::Uniform => Some(p.ident.as_ref()), }) .collect::<Vec<_>>(); quote!(Some(&super::#uniform_struct { #(#uniform_members),* })) } else { // explicit generics placed here cause it's easier than explicitly writing `run_per_pixel_adjust::<()>` quote!(Option::<&()>::None) }; // node function body let entry_point_name = &self.entry_point_name; let body = quote! { { #executor.shader_runtime.run_per_pixel_adjust(&::wgpu_executor::shader_runtime::per_pixel_adjust_runtime::Shaders { wgsl_shader: crate::WGSL_SHADER, fragment_shader_name: super::#entry_point_name, has_uniform: #has_uniform, }, #gpu_image, #uniform_buffer).await } }; // call node codegen let display_name = self.parsed.attributes.display_name.clone(); let display_name = display_name.unwrap_or_else(|| LitStr::new(&self.shader_node_mod.to_string().strip_suffix("_shader_node").unwrap().to_case(Case::Title), Span::call_site())); let display_name = LitStr::new(&format!("{} GPU", display_name.value()), display_name.span()); let mut parsed_node_fn = ParsedNodeFn { vis: self.parsed.vis.clone(), attributes: NodeFnAttributes { display_name: Some(display_name), shader_node: Some(ShaderNodeType::ShaderNode), ..self.parsed.attributes.clone() }, fn_name: self.shader_node_mod.clone(), struct_name: format_ident!("{}", self.shader_node_mod.to_string().to_case(Case::Pascal)), mod_name: self.shader_node_mod.clone(), fn_generics: vec![parse_quote!('a: 'n)], where_clause: None, input: Input { pat_ident: self.parsed.input.pat_ident.clone(), ty: parse_quote!(impl #gcore::context::Ctx), implementations: Default::default(), context_features: self.parsed.input.context_features.clone(), }, output_type: raster_gpu, is_async: true, fields, body, description: self.parsed.description.clone(), }; parsed_node_fn.replace_impl_trait_in_input(); let gpu_node_impl = crate::codegen::generate_node_code(self.crate_ident, &parsed_node_fn)?; // wrap node in `mod #gpu_node_mod` let shader_node_mod = &self.shader_node_mod; Ok(quote! { #[cfg(feature = #SHADER_NODES_FEATURE_GATE)] mod #shader_node_mod { use super::*; use #wgpu_executor::WgpuExecutor; #gpu_node_impl } }) } } struct Param<'a> { ident: Cow<'a, Ident>, ty: TokenStream, param_type: ParamType, } enum ParamType { Image { binding: u32 }, Uniform, }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/node-macro/src/shader_nodes/mod.rs
node-graph/node-macro/src/shader_nodes/mod.rs
use crate::crate_ident::CrateIdent; use crate::parsing::{NodeFnAttributes, ParsedNodeFn}; use crate::shader_nodes::per_pixel_adjust::PerPixelAdjust; use proc_macro2::{Ident, TokenStream}; use quote::quote; use strum::VariantNames; use syn::parse::{Parse, ParseStream}; use syn::punctuated::Punctuated; use syn::{Error, Token}; pub mod per_pixel_adjust; pub const STD_FEATURE_GATE: &str = "std"; pub const SHADER_NODES_FEATURE_GATE: &str = "shader-nodes"; pub fn modify_cfg(attributes: &NodeFnAttributes) -> TokenStream { let feature_gate = match &attributes.shader_node { // shader node cfg is done on the mod Some(ShaderNodeType::ShaderNode) => quote!(), Some(_) => quote!(feature = #STD_FEATURE_GATE), None => quote!(), }; let cfgs: Punctuated<_, Token![,]> = match &attributes.cfg { None => [&feature_gate].into_iter().collect(), Some(cfg) => [cfg, &feature_gate].into_iter().collect(), }; quote!(#[cfg(all(#cfgs))]) } #[derive(Debug, Clone, VariantNames)] pub(crate) enum ShaderNodeType { /// Marker for this node being in a gpu node crate, but not having a gpu implementation. This is distinct from not /// declaring `shader_node` at all, as it will wrap the CPU node with a `#[cfg(feature = "std")]` feature gate. None, /// Marker for this node being a generated gpu node implementation, that should not emit anything to prevent /// recursively generating more gpu nodes. But it still counts as a gpu node and will get the /// `#[cfg(feature = "std")]` feature gate around it's impl. ShaderNode, PerPixelAdjust(PerPixelAdjust), } impl Parse for ShaderNodeType { fn parse(input: ParseStream) -> syn::Result<Self> { let ident: Ident = input.parse()?; Ok(match ident.to_string().as_str() { "None" => ShaderNodeType::None, "PerPixelAdjust" => ShaderNodeType::PerPixelAdjust(PerPixelAdjust::parse(input)?), _ => return Err(Error::new_spanned(&ident, format!("attr 'shader_node' must be one of {:?}", Self::VARIANTS))), }) } } pub trait ShaderCodegen { fn codegen(&self, crate_ident: &CrateIdent, parsed: &ParsedNodeFn) -> syn::Result<ShaderTokens>; } impl ShaderCodegen for ShaderNodeType { fn codegen(&self, crate_ident: &CrateIdent, parsed: &ParsedNodeFn) -> syn::Result<ShaderTokens> { match self { ShaderNodeType::None | ShaderNodeType::ShaderNode => (), _ => { if parsed.is_async { return Err(Error::new_spanned(&parsed.fn_name, "Shader nodes must not be async")); } } } match self { ShaderNodeType::None | ShaderNodeType::ShaderNode => Ok(ShaderTokens::default()), ShaderNodeType::PerPixelAdjust(x) => x.codegen(crate_ident, parsed), } } } #[derive(Clone, Default)] pub struct ShaderTokens { pub shader_entry_point: TokenStream, pub gpu_node: TokenStream, }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/graphene-cli/src/main.rs
node-graph/graphene-cli/src/main.rs
mod export; use clap::{Args, Parser, Subcommand}; use fern::colors::{Color, ColoredLevelConfig}; use futures::executor::block_on; use graph_craft::document::*; use graph_craft::graphene_compiler::Compiler; use graph_craft::proto::ProtoNetwork; use graph_craft::util::load_network; use graph_craft::wasm_application_io::EditorPreferences; use graphene_std::application_io::{ApplicationIo, NodeGraphUpdateMessage, NodeGraphUpdateSender}; use graphene_std::text::FontCache; use graphene_std::wasm_application_io::{WasmApplicationIo, WasmEditorApi}; use interpreted_executor::dynamic_executor::DynamicExecutor; use interpreted_executor::util::wrap_network_in_scope; use std::error::Error; use std::path::PathBuf; use std::sync::Arc; struct UpdateLogger {} impl NodeGraphUpdateSender for UpdateLogger { fn send(&self, message: NodeGraphUpdateMessage) { println!("{message:?}"); } } #[derive(Debug, Parser)] #[clap(name = "graphene-cli", version)] pub struct App { #[clap(flatten)] global_opts: GlobalOpts, #[clap(subcommand)] command: Command, } #[derive(Debug, Subcommand)] enum Command { /// Help message for compile. Compile { /// Print proto network #[clap(long, short = 'p')] print_proto: bool, /// Path to the .graphite document document: PathBuf, }, /// Export a .graphite document to a file (SVG, PNG, or JPG). Export { /// Path to the .graphite document document: PathBuf, /// Output file path (extension determines format: .svg, .png, .jpg) #[clap(long, short = 'o')] output: PathBuf, /// Optional input image resource #[clap(long)] image: Option<PathBuf>, /// Scale factor for export (default: 1.0) #[clap(long, default_value = "1.0")] scale: f64, /// Output width in pixels #[clap(long)] width: Option<u32>, /// Output height in pixels #[clap(long)] height: Option<u32>, /// Transparent background for PNG exports #[clap(long)] transparent: bool, }, ListNodeIdentifiers, } #[derive(Debug, Args)] struct GlobalOpts { /// Verbosity level (can be specified multiple times) #[clap(long, short, global = true, action = clap::ArgAction::Count)] verbose: u8, } #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { let app = App::parse(); let log_level = app.global_opts.verbose; init_logging(log_level); let document_path = match app.command { Command::Compile { ref document, .. } => document, Command::Export { ref document, .. } => document, Command::ListNodeIdentifiers => { let mut ids: Vec<_> = graphene_std::registry::NODE_METADATA.lock().unwrap().keys().cloned().collect(); ids.sort_by_key(|x| x.name.clone()); for id in ids { println!("{}", id.name) } return Ok(()); } }; let document_string = std::fs::read_to_string(document_path).expect("Failed to read document"); log::info!("creating gpu context",); let mut application_io = block_on(WasmApplicationIo::new_offscreen()); if let Command::Export { image: Some(ref image_path), .. } = app.command { application_io.resources.insert("null".to_string(), Arc::from(std::fs::read(image_path).expect("Failed to read image"))); } // Convert application_io to Arc first let application_io_arc = Arc::new(application_io); // Clone the application_io Arc before borrowing to extract executor let application_io_for_api = application_io_arc.clone(); // Get reference to wgpu executor and clone device handle let wgpu_executor_ref = application_io_arc.gpu_executor().unwrap(); let device = wgpu_executor_ref.context.device.clone(); let preferences = EditorPreferences { use_vello: true }; let editor_api = Arc::new(WasmEditorApi { font_cache: FontCache::default(), application_io: Some(application_io_for_api), node_graph_message_sender: Box::new(UpdateLogger {}), editor_preferences: Box::new(preferences), }); let proto_graph = compile_graph(document_string, editor_api)?; match app.command { Command::Compile { print_proto, .. } => { if print_proto { println!("{proto_graph}"); } } Command::Export { output, scale, width, height, transparent, .. } => { // Spawn thread to poll GPU device std::thread::spawn(move || { loop { std::thread::sleep(std::time::Duration::from_nanos(10)); device.poll(wgpu::PollType::Poll).unwrap(); } }); // Detect output file type let file_type = export::detect_file_type(&output)?; // Create executor let executor = create_executor(proto_graph)?; // Perform export export::export_document(&executor, wgpu_executor_ref, output, file_type, scale, width, height, transparent).await?; } _ => unreachable!("All other commands should be handled before this match statement is run"), } Ok(()) } fn init_logging(log_level: u8) { let default_level = match log_level { 0 => log::LevelFilter::Error, 1 => log::LevelFilter::Info, 2 => log::LevelFilter::Debug, _ => log::LevelFilter::Trace, }; let colors = ColoredLevelConfig::new().debug(Color::Magenta).info(Color::Green).error(Color::Red); fern::Dispatch::new() .chain(std::io::stdout()) .level_for("wgpu", log::LevelFilter::Error) .level_for("naga", log::LevelFilter::Error) .level_for("wgpu_hal", log::LevelFilter::Error) .level_for("wgpu_core", log::LevelFilter::Error) .level(default_level) .format(move |out, message, record| { out.finish(format_args!( "[{}]{}{} {}", // This will color the log level only, not the whole line. Just a touch. colors.color(record.level()), chrono::Utc::now().format("[%Y-%m-%d %H:%M:%S]"), record.module_path().unwrap_or(""), message )) }) .apply() .unwrap(); } // Migrations are done in the editor which is unfortunately not available here. // TODO: remove this and share migrations between the editor and the CLI. fn fix_nodes(network: &mut NodeNetwork) { for node in network.nodes.values_mut() { match &mut node.implementation { // Recursively fix DocumentNodeImplementation::Network(network) => fix_nodes(network), // This replicates the migration from the editor linked: // https://github.com/GraphiteEditor/Graphite/blob/d68f91ccca69e90e6d2df78d544d36cd1aaf348e/editor/src/messages/portfolio/portfolio_message_handler.rs#L535 // Since the CLI doesn't have the document node definitions, a less robust method of just patching the inputs is used. DocumentNodeImplementation::ProtoNode(proto_node_identifier) if (proto_node_identifier.name.starts_with("graphene_core::ConstructLayerNode") || proto_node_identifier.name.starts_with("graphene_core::AddArtboardNode")) && node.inputs.len() < 3 => { node.inputs.push(NodeInput::Reflection(DocumentNodeMetadata::DocumentNodePath)); } _ => {} } } } fn compile_graph(document_string: String, editor_api: Arc<WasmEditorApi>) -> Result<ProtoNetwork, Box<dyn Error>> { let mut network = load_network(&document_string); fix_nodes(&mut network); let substitutions = preprocessor::generate_node_substitutions(); preprocessor::expand_network(&mut network, &substitutions); let wrapped_network = wrap_network_in_scope(network.clone(), editor_api); let compiler = Compiler {}; compiler.compile_single(wrapped_network).map_err(|x| x.into()) } fn create_executor(proto_network: ProtoNetwork) -> Result<DynamicExecutor, Box<dyn Error>> { let executor = block_on(DynamicExecutor::new(proto_network)).map_err(|errors| errors.iter().map(|e| format!("{e:?}")).reduce(|acc, e| format!("{acc}\n{e}")).unwrap_or_default())?; Ok(executor) }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/graphene-cli/src/export.rs
node-graph/graphene-cli/src/export.rs
use graph_craft::document::value::{RenderOutputType, TaggedValue, UVec2}; use graph_craft::graphene_compiler::Executor; use graphene_std::application_io::{ExportFormat, RenderConfig}; use graphene_std::core_types::ops::Convert; use graphene_std::core_types::transform::Footprint; use graphene_std::raster_types::{CPU, GPU, Raster}; use interpreted_executor::dynamic_executor::DynamicExecutor; use std::error::Error; use std::io::Cursor; use std::path::{Path, PathBuf}; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum FileType { Svg, Png, Jpg, } pub fn detect_file_type(path: &Path) -> Result<FileType, String> { match path.extension().and_then(|s| s.to_str()) { Some("svg") => Ok(FileType::Svg), Some("png") => Ok(FileType::Png), Some("jpg" | "jpeg") => Ok(FileType::Jpg), _ => Err(format!("Unsupported file extension. Supported formats: .svg, .png, .jpg")), } } pub async fn export_document( executor: &DynamicExecutor, wgpu_executor: &wgpu_executor::WgpuExecutor, output_path: PathBuf, file_type: FileType, scale: f64, width: Option<u32>, height: Option<u32>, transparent: bool, ) -> Result<(), Box<dyn Error>> { // Determine export format based on file type let export_format = match file_type { FileType::Svg => ExportFormat::Svg, _ => ExportFormat::Raster, }; // Create render config with export settings let mut render_config = RenderConfig::default(); render_config.export_format = export_format; render_config.for_export = true; render_config.scale = scale; // Set viewport dimensions if specified if let (Some(w), Some(h)) = (width, height) { render_config.viewport.resolution = UVec2::new(w, h); } // Execute the graph let result = executor.execute(render_config).await?; // Handle the result based on output type match result { TaggedValue::RenderOutput(output) => match output.data { RenderOutputType::Svg { svg, .. } => { // Write SVG directly to file std::fs::write(&output_path, svg)?; log::info!("Exported SVG to: {}", output_path.display()); } RenderOutputType::Texture(image_texture) => { // Convert GPU texture to CPU buffer let gpu_raster = Raster::<GPU>::new_gpu(image_texture.texture); let cpu_raster: Raster<CPU> = gpu_raster.convert(Footprint::BOUNDLESS, wgpu_executor).await; let (data, width, height) = cpu_raster.to_flat_u8(); // Encode and write raster image write_raster_image(output_path, file_type, data, width, height, transparent)?; } RenderOutputType::Buffer { data, width, height } => { // Encode and write raster image when buffer is already provided write_raster_image(output_path, file_type, data, width, height, transparent)?; } other => { return Err(format!("Unexpected render output type: {:?}. Expected Texture, Buffer for raster export or Svg for SVG export.", other).into()); } }, other => return Err(format!("Expected RenderOutput, got: {:?}", other).into()), } Ok(()) } fn write_raster_image(output_path: PathBuf, file_type: FileType, data: Vec<u8>, width: u32, height: u32, transparent: bool) -> Result<(), Box<dyn Error>> { use image::{ImageFormat, RgbaImage}; let image = RgbaImage::from_raw(width, height, data).ok_or("Failed to create image from buffer")?; let mut cursor = Cursor::new(Vec::new()); match file_type { FileType::Png => { if transparent { image.write_to(&mut cursor, ImageFormat::Png)?; } else { let image: image::RgbImage = image::DynamicImage::ImageRgba8(image).to_rgb8(); image.write_to(&mut cursor, ImageFormat::Png)?; } log::info!("Exported PNG to: {}", output_path.display()); } FileType::Jpg => { let image: image::RgbImage = image::DynamicImage::ImageRgba8(image).to_rgb8(); image.write_to(&mut cursor, ImageFormat::Jpeg)?; log::info!("Exported JPG to: {}", output_path.display()); } FileType::Svg => unreachable!("SVG should have been handled in export_document"), } std::fs::write(&output_path, cursor.into_inner())?; Ok(()) }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/preprocessor/src/lib.rs
node-graph/preprocessor/src/lib.rs
#[macro_use] extern crate log; use graph_craft::document::value::*; use graph_craft::document::*; use graph_craft::proto::RegistryValueSource; use graph_craft::{ProtoNodeIdentifier, concrete}; use graphene_std::registry::*; use graphene_std::*; use std::collections::{HashMap, HashSet}; pub fn expand_network(network: &mut NodeNetwork, substitutions: &HashMap<ProtoNodeIdentifier, DocumentNode>) { if network.generated { return; } for node in network.nodes.values_mut() { match &mut node.implementation { DocumentNodeImplementation::Network(node_network) => expand_network(node_network, substitutions), DocumentNodeImplementation::ProtoNode(proto_node_identifier) => { if let Some(new_node) = substitutions.get(proto_node_identifier) { node.implementation = new_node.implementation.clone(); } } DocumentNodeImplementation::Extract => (), } } } pub fn generate_node_substitutions() -> HashMap<ProtoNodeIdentifier, DocumentNode> { let mut custom = HashMap::new(); // We pre initialize the node registry here to avoid a deadlock let into_node_registry = &*interpreted_executor::node_registry::NODE_REGISTRY; let node_registry = core_types::registry::NODE_REGISTRY.lock().unwrap(); for (id, metadata) in core_types::registry::NODE_METADATA.lock().unwrap().iter() { let id = id.clone(); let NodeMetadata { fields, .. } = metadata; let Some(implementations) = &node_registry.get(&id) else { continue }; let valid_inputs: HashSet<_> = implementations.iter().map(|(_, node_io)| node_io.call_argument.clone()).collect(); let first_node_io = implementations.first().map(|(_, node_io)| node_io).unwrap_or(const { &NodeIOTypes::empty() }); let mut node_io_types = vec![HashSet::new(); fields.len()]; for (_, node_io) in implementations.iter() { for (i, ty) in node_io.inputs.iter().enumerate() { node_io_types[i].insert(ty.clone()); } } let mut input_type = &first_node_io.call_argument; if valid_inputs.len() > 1 { input_type = &const { generic!(D) }; } let inputs: Vec<_> = node_inputs(fields, first_node_io); let input_count = inputs.len(); let network_inputs = (0..input_count).map(|i| NodeInput::node(NodeId(i as u64), 0)).collect(); let identity_node = ops::identity::IDENTIFIER; let mut generated_nodes = 0; let mut nodes: HashMap<_, _, _> = node_io_types .iter() .enumerate() .map(|(i, inputs)| { ( NodeId(i as u64), match inputs.len() { 1 => { let input = inputs.iter().next().unwrap(); let input_ty = input.nested_type(); let mut inputs = vec![NodeInput::import(input.clone(), i)]; let into_node_identifier = ProtoNodeIdentifier { name: format!("graphene_core::ops::IntoNode<{}>", input_ty.clone()).into(), }; let convert_node_identifier = ProtoNodeIdentifier { name: format!("graphene_core::ops::ConvertNode<{}>", input_ty.clone()).into(), }; let proto_node = if into_node_registry.keys().any(|ident: &ProtoNodeIdentifier| ident.name.as_ref() == into_node_identifier.name.as_ref()) { generated_nodes += 1; into_node_identifier } else if into_node_registry.keys().any(|ident| ident.name.as_ref() == convert_node_identifier.name.as_ref()) { generated_nodes += 1; inputs.push(NodeInput::value(TaggedValue::None, false)); convert_node_identifier } else { identity_node.clone() }; let mut original_location = OriginalLocation::default(); original_location.auto_convert_index = Some(i); DocumentNode { inputs, implementation: DocumentNodeImplementation::ProtoNode(proto_node), visible: true, original_location, ..Default::default() } } _ => DocumentNode { inputs: vec![NodeInput::import(generic!(X), i)], implementation: DocumentNodeImplementation::ProtoNode(identity_node.clone()), visible: false, ..Default::default() }, }, ) }) .collect(); if generated_nodes == 0 { continue; } let document_node = DocumentNode { inputs: network_inputs, call_argument: input_type.clone(), implementation: DocumentNodeImplementation::ProtoNode(id.clone()), visible: true, skip_deduplication: false, context_features: ContextDependencies::from(metadata.context_features.as_slice()), ..Default::default() }; nodes.insert(NodeId(input_count as u64), document_node); let node = DocumentNode { inputs, call_argument: input_type.clone(), implementation: DocumentNodeImplementation::Network(NodeNetwork { exports: vec![NodeInput::Node { node_id: NodeId(input_count as u64), output_index: 0, }], nodes, scope_injections: Default::default(), generated: true, }), visible: true, skip_deduplication: false, ..Default::default() }; custom.insert(id.clone(), node); } custom } pub fn node_inputs(fields: &[registry::FieldMetadata], first_node_io: &NodeIOTypes) -> Vec<NodeInput> { fields .iter() .zip(first_node_io.inputs.iter()) .enumerate() .map(|(index, (field, node_io_ty))| { let ty = field.default_type.as_ref().unwrap_or(node_io_ty); let exposed = if index == 0 { *ty != fn_type_fut!(Context, ()) } else { field.exposed }; match field.value_source { RegistryValueSource::None => {} RegistryValueSource::Default(data) => { if let Some(custom_default) = TaggedValue::from_primitive_string(data, ty) { return NodeInput::value(custom_default, exposed); } else { // It is incredibly useful to get a warning when the default type cannot be parsed rather than defaulting to `()`. warn!("Failed to parse default value for type {ty:?} with data {data}"); } } RegistryValueSource::Scope(data) => return NodeInput::scope(Cow::Borrowed(data)), }; if let Some(type_default) = TaggedValue::from_type(ty) { return NodeInput::value(type_default, exposed); } NodeInput::value(TaggedValue::None, true) }) .collect() }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/graph-craft/src/lib.rs
node-graph/graph-craft/src/lib.rs
#[macro_use] extern crate log; #[macro_use] extern crate core_types; pub use core_types::{ProtoNodeIdentifier, Type, TypeDescriptor, concrete, generic}; pub mod document; pub mod graphene_compiler; pub mod proto; #[cfg(feature = "loading")] pub mod util; pub mod wasm_application_io;
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/graph-craft/src/document.rs
node-graph/graph-craft/src/document.rs
pub mod value; use crate::document::value::TaggedValue; use crate::proto::{ConstructionArgs, ProtoNetwork, ProtoNode}; use core_types::memo::MemoHashGuard; pub use core_types::uuid::NodeId; pub use core_types::uuid::generate_uuid; use core_types::{Context, ContextDependencies, Cow, MemoHash, ProtoNodeIdentifier, Type}; use dyn_any::DynAny; use glam::IVec2; use log::Metadata; use rustc_hash::{FxBuildHasher, FxHashMap}; use std::collections::HashMap; use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; /// Hash two IDs together, returning a new ID that is always consistent for two input IDs in a specific order. /// This is used during [`NodeNetwork::flatten`] in order to ensure consistent yet non-conflicting IDs for inner networks. fn merge_ids(a: NodeId, b: NodeId) -> NodeId { let mut hasher = DefaultHasher::new(); a.hash(&mut hasher); b.hash(&mut hasher); NodeId(hasher.finish()) } /// Utility function for providing a default boolean value to serde. #[inline(always)] fn return_true() -> bool { true } /// An instance of a [`DocumentNodeDefinition`] that has been instantiated in a [`NodeNetwork`]. /// Currently, when an instance is made, it lives all on its own without any lasting connection to the definition. /// But we will want to change it in the future so it merely references its definition. #[derive(Clone, Debug, PartialEq, Hash, DynAny, serde::Serialize, serde::Deserialize)] pub struct DocumentNode { /// The inputs to a node, which are either: /// - From other nodes within this graph [`NodeInput::Node`], /// - A constant value [`NodeInput::Value`], /// - A [`NodeInput::Import`] which specifies that this input comes from outside the graph, which is resolved in the graph flattening step in the case of nested networks. /// /// In the root network, it is resolved when evaluating the borrow tree. /// Ensure the click target in the encapsulating network is updated when the inputs cause the node shape to change (currently only when exposing/hiding an input) /// by using network.update_click_target(node_id). #[cfg_attr(target_family = "wasm", serde(alias = "outputs"))] pub inputs: Vec<NodeInput>, /// Type of the argument which this node can be evaluated with. #[serde(default, alias = "manual_composition", deserialize_with = "migrate_call_argument")] pub call_argument: Type, // A nested document network or a proto-node identifier. pub implementation: DocumentNodeImplementation, /// Represents the eye icon for hiding/showing the node in the graph UI. When hidden, a node gets replaced with an identity node during the graph flattening step. #[serde(default = "return_true")] pub visible: bool, /// When two different proto nodes hash to the same value (e.g. two value nodes each containing `2_u32` or two multiply nodes that have the same node IDs as input), the duplicates are removed. /// See [`ProtoNetwork::generate_stable_node_ids`] for details. /// However sometimes this is not desirable, for example in the case of a [`graphene_core::memo::MonitorNode`] that needs to be accessed outside of the graph. #[serde(default)] pub skip_deduplication: bool, /// List of Extract and Inject annotations for the Context. #[serde(default)] pub context_features: ContextDependencies, /// The path to this node and its inputs and outputs as of when [`NodeNetwork::generate_node_paths`] was called. #[serde(skip)] pub original_location: OriginalLocation, } /// Represents the original location of a node input/output when [`NodeNetwork::generate_node_paths`] was called, allowing the types and errors to be derived. #[derive(Clone, Debug, PartialEq, Eq, Hash, DynAny, serde::Serialize, serde::Deserialize)] pub struct Source { pub node: Vec<NodeId>, pub index: usize, } /// The path to this node and its inputs and outputs as of when [`NodeNetwork::generate_node_paths`] was called. #[derive(Clone, Debug, PartialEq, Eq, DynAny, Default, serde::Serialize, serde::Deserialize)] #[non_exhaustive] pub struct OriginalLocation { /// The original location to the document node - e.g. [grandparent_id, parent_id, node_id]. pub path: Option<Vec<NodeId>>, /// Each document input source maps to one proto node input (however one proto node input may come from several sources) pub inputs_source: HashMap<Source, usize>, /// List of nodes which depend on this node pub dependants: Vec<Vec<NodeId>>, /// A list of flags indicating whether the input is exposed in the UI pub inputs_exposed: Vec<bool>, /// For automatically inserted Convert and Into nodes, if there is an error, display it on the node it is connect to. pub auto_convert_index: Option<usize>, } impl Default for DocumentNode { fn default() -> Self { Self { inputs: Default::default(), call_argument: concrete!(Context), implementation: Default::default(), visible: true, skip_deduplication: Default::default(), original_location: OriginalLocation::default(), context_features: Default::default(), } } } impl Hash for OriginalLocation { fn hash<H: Hasher>(&self, state: &mut H) { self.path.hash(state); self.inputs_source.iter().for_each(|val| val.hash(state)); self.inputs_exposed.hash(state); } } impl OriginalLocation { pub fn inputs(&self, index: usize) -> impl Iterator<Item = Source> + '_ { [(index >= 1).then(|| Source { node: self.path.clone().unwrap_or_default(), index: self.inputs_exposed.iter().take(index - 1).filter(|&&exposed| exposed).count(), })] .into_iter() .flatten() .chain(self.inputs_source.iter().filter(move |x| *x.1 == index).map(|(source, _)| source.clone())) } } impl DocumentNode { /// Locate the input that is a [`NodeInput::Import`] at index `offset` and replace it with a [`NodeInput::Node`]. pub fn populate_first_network_input(&mut self, node_id: NodeId, output_index: usize, offset: usize, source: impl Iterator<Item = Source>, skip: usize) { let (index, _) = self .inputs .iter() .enumerate() .nth(offset) .unwrap_or_else(|| panic!("no network input found for {self:#?} and offset: {offset}")); self.inputs[index] = NodeInput::Node { node_id, output_index }; let input_source = &mut self.original_location.inputs_source; for source in source { input_source.insert(source, (index + 1).saturating_sub(skip)); } } fn resolve_proto_node(self) -> ProtoNode { let DocumentNodeImplementation::ProtoNode(identifier) = self.implementation else { unreachable!("tried to resolve not flattened node on resolved node {self:?}"); }; assert!(!self.inputs.iter().any(|input| matches!(input, NodeInput::Import { .. })), "received non-resolved input"); let mut construction_args = ConstructionArgs::Nodes(vec![]); // If we have one input of the type inline, set it as the construction args if let &[NodeInput::Inline(ref inline)] = self.inputs.as_slice() { construction_args = ConstructionArgs::Inline(inline.clone()); } // If we have one input of the type inline, set it as the construction args if let &[NodeInput::Value { ref tagged_value, .. }] = self.inputs.as_slice() { construction_args = ConstructionArgs::Value(tagged_value.clone()); } if let ConstructionArgs::Nodes(nodes) = &mut construction_args { nodes.extend(self.inputs.iter().map(|input| match input { NodeInput::Node { node_id, .. } => *node_id, _ => unreachable!(), })); } ProtoNode { identifier, call_argument: self.call_argument, construction_args, original_location: self.original_location, skip_deduplication: self.skip_deduplication, context_features: self.context_features, } } } /// Represents the possible inputs to a node. #[derive(Debug, Clone, PartialEq, Hash, DynAny, serde::Serialize, serde::Deserialize)] pub enum NodeInput { /// A reference to another node in the same network from which this node can receive its input. Node { node_id: NodeId, output_index: usize }, /// A hardcoded value that can't change after the graph is compiled. Gets converted into a value node during graph compilation. Value { tagged_value: MemoHash<TaggedValue>, exposed: bool }, // TODO: Remove import_type and get type from parent node input /// Input that is provided by the import from the parent network to this document node network. #[serde(alias = "Network")] Import { import_type: Type, import_index: usize }, /// Input that is extracted from the parent scopes the node resides in. The string argument is the key. Scope(Cow<'static, str>), /// Input that is extracted from the parent scopes the node resides in. The string argument is the key. Reflection(DocumentNodeMetadata), /// A Rust source code string. Allows us to insert literal Rust code. Only used for GPU compilation. /// We can use this whenever we spin up Rustc. Sort of like inline assembly, but because our language is Rust, it acts as inline Rust. Inline(InlineRust), } #[derive(Debug, Clone, PartialEq, Hash, DynAny, serde::Serialize, serde::Deserialize)] pub struct InlineRust { pub expr: String, pub ty: Type, } impl InlineRust { pub fn new(expr: String, ty: Type) -> Self { Self { expr, ty } } } #[derive(Debug, Clone, PartialEq, Hash, DynAny, serde::Serialize, serde::Deserialize)] pub enum DocumentNodeMetadata { DocumentNodePath, } impl DocumentNodeMetadata { pub fn ty(&self) -> Type { match self { DocumentNodeMetadata::DocumentNodePath => concrete!(Vec<NodeId>), } } } impl NodeInput { pub const fn node(node_id: NodeId, output_index: usize) -> Self { Self::Node { node_id, output_index } } pub fn value(tagged_value: TaggedValue, exposed: bool) -> Self { let tagged_value = tagged_value.into(); Self::Value { tagged_value, exposed } } pub const fn import(import_type: Type, import_index: usize) -> Self { Self::Import { import_type, import_index } } pub fn scope(key: impl Into<Cow<'static, str>>) -> Self { Self::Scope(key.into()) } fn map_ids(&mut self, f: impl Fn(NodeId) -> NodeId) { if let &mut NodeInput::Node { node_id, output_index } = self { *self = NodeInput::Node { node_id: f(node_id), output_index } } } pub fn is_exposed(&self) -> bool { match self { NodeInput::Node { .. } => true, NodeInput::Value { exposed, .. } => *exposed, NodeInput::Import { .. } => true, NodeInput::Inline(_) => false, NodeInput::Scope(_) => false, NodeInput::Reflection(_) => false, } } pub fn ty(&self) -> Type { match self { NodeInput::Node { .. } => unreachable!("ty() called on NodeInput::Node"), NodeInput::Value { tagged_value, .. } => tagged_value.ty(), NodeInput::Import { import_type, .. } => import_type.clone(), NodeInput::Inline(_) => panic!("ty() called on NodeInput::Inline"), NodeInput::Scope(_) => panic!("ty() called on NodeInput::Scope"), NodeInput::Reflection(_) => concrete!(Metadata), } } pub fn as_value(&self) -> Option<&TaggedValue> { if let NodeInput::Value { tagged_value, .. } = self { Some(tagged_value) } else { None } } pub fn as_value_mut(&mut self) -> Option<MemoHashGuard<'_, TaggedValue>> { if let NodeInput::Value { tagged_value, .. } = self { Some(tagged_value.inner_mut()) } else { None } } pub fn as_non_exposed_value(&self) -> Option<&TaggedValue> { if let NodeInput::Value { tagged_value, exposed: false } = self { Some(tagged_value) } else { None } } pub fn as_node(&self) -> Option<NodeId> { if let NodeInput::Node { node_id, .. } = self { Some(*node_id) } else { None } } } // TODO: Eventually remove this document upgrade code #[derive(Clone, Debug, DynAny, serde::Serialize, serde::Deserialize)] /// Represents the implementation of a node, which can be a nested [`NodeNetwork`], a proto [`ProtoNodeIdentifier`], or `Extract`. pub enum OldDocumentNodeImplementation { Network(OldNodeNetwork), #[serde(alias = "Unresolved")] ProtoNode(ProtoNodeIdentifier), Extract, } #[derive(Clone, Debug, PartialEq, Hash, DynAny, serde::Serialize, serde::Deserialize)] /// Represents the implementation of a node, which can be a nested [`NodeNetwork`], a proto [`ProtoNodeIdentifier`], or `Extract`. pub enum DocumentNodeImplementation { /// This describes a (document) node built out of a subgraph of other (document) nodes. /// /// A nested [`NodeNetwork`] that is flattened by the [`NodeNetwork::flatten`] function. Network(NodeNetwork), /// This describes a (document) node implemented as a proto node. /// /// A proto node identifier which can be found in `node_registry.rs`. #[serde(alias = "Unresolved")] // TODO: Eventually remove this alias document upgrade code ProtoNode(ProtoNodeIdentifier), /// The Extract variant is a tag which tells the compilation process to do something special: it invokes language-level functionality built for use by the ExtractNode to enable metaprogramming. /// When the ExtractNode is compiled, it gets replaced by a value node containing a representation of the source code for the function/lambda of the document node that's fed into the ExtractNode /// (but only that one document node, not upstream nodes). /// /// This is explained in more detail here: <https://www.youtube.com/watch?v=72KJa3jQClo> /// /// Currently we use it for GPU execution, where a node has to get "extracted" to its source code representation and stored as a value that can be given to the GpuCompiler node at runtime /// (to become a compute shader). Future use could involve the addition of an InjectNode to convert the source code form back into an executable node, enabling metaprogramming in the node graph. /// We would use an assortment of nodes that operate on Graphene source code (just data, no different from any other data flowing through the graph) to make graph transformations. /// /// We use this for dealing with macros in a syntactic way of modifying the node graph from within the graph itself. Just like we often deal with lambdas to represent a whole group of /// operations/code/logic, this allows us to basically deal with a lambda at a meta/source-code level, because we need to pass the GPU SPIR-V compiler the source code for a lambda, /// not the executable logic of a lambda. /// /// This is analogous to how Rust macros operate at the level of source code, not executable code. When we speak of source code, that represents Graphene's source code in the form of a /// DocumentNode network, not the text form of Rust's source code. (Analogous to the token stream/AST of a Rust macro.) /// /// `DocumentNode`s with a `DocumentNodeImplementation::Extract` are converted into a `ClonedNode` that returns the `DocumentNode` specified by the single `NodeInput::Node`. The referenced node /// (specified by the single `NodeInput::Node`) is removed from the network, and any `NodeInput::Node`s used by the referenced node are replaced with a generically typed network input. Extract, } impl Default for DocumentNodeImplementation { fn default() -> Self { Self::ProtoNode(graphene_core::ops::identity::IDENTIFIER) } } impl DocumentNodeImplementation { pub fn get_network(&self) -> Option<&NodeNetwork> { match self { DocumentNodeImplementation::Network(n) => Some(n), _ => None, } } pub fn get_network_mut(&mut self) -> Option<&mut NodeNetwork> { match self { DocumentNodeImplementation::Network(n) => Some(n), _ => None, } } pub fn get_proto_node(&self) -> Option<&ProtoNodeIdentifier> { match self { DocumentNodeImplementation::ProtoNode(p) => Some(p), _ => None, } } pub fn output_count(&self) -> usize { match self { DocumentNodeImplementation::Network(network) => network.exports.len(), _ => 1, } } } // TODO: Eventually remove this document upgrade code #[derive(Debug, serde::Deserialize)] #[serde(untagged)] pub enum NodeExportVersions { OldNodeInput(NodeOutput), NodeInput(NodeInput), } // TODO: Eventually remove this document upgrade code #[derive(Debug, serde::Deserialize)] pub struct NodeOutput { pub node_id: NodeId, pub node_output_index: usize, } // TODO: Eventually remove this document upgrade code fn deserialize_exports<'de, D>(deserializer: D) -> Result<Vec<NodeInput>, D::Error> where D: serde::Deserializer<'de>, { use serde::Deserialize; let node_input_versions = Vec::<NodeExportVersions>::deserialize(deserializer)?; // Convert Vec<NodeOutput> to Vec<NodeInput> let inputs = node_input_versions .into_iter() .map(|node_input_version| { let node_output = match node_input_version { NodeExportVersions::OldNodeInput(node_output) => node_output, NodeExportVersions::NodeInput(node_input) => return node_input, }; NodeInput::node(node_output.node_id, node_output.node_output_index) }) .collect(); Ok(inputs) } /// An instance of a [`DocumentNodeDefinition`] that has been instantiated in a [`NodeNetwork`]. /// Currently, when an instance is made, it lives all on its own without any lasting connection to the definition. /// But we will want to change it in the future so it merely references its definition. #[derive(Clone, Debug, DynAny, serde::Serialize, serde::Deserialize)] pub struct OldDocumentNode { /// A name chosen by the user for this instance of the node. Empty indicates no given name, in which case the node definition's name is displayed to the user in italics. /// Ensure the click target in the encapsulating network is updated when this is modified by using network.update_click_target(node_id). #[serde(default)] pub alias: String, // TODO: Replace this name with a reference to the [`DocumentNodeDefinition`] node definition to use the name from there instead. /// The name of the node definition, as originally set by [`DocumentNodeDefinition`], used to display in the UI and to display the appropriate properties. #[serde(deserialize_with = "migrate_layer_to_merge")] pub name: String, /// The inputs to a node, which are either: /// - From other nodes within this graph [`NodeInput::Node`], /// - A constant value [`NodeInput::Value`], /// - A [`NodeInput::Import`] which specifies that this input is from outside the graph, which is resolved in the graph flattening step in the case of nested networks. /// /// In the root network, it is resolved when evaluating the borrow tree. /// Ensure the click target in the encapsulating network is updated when the inputs cause the node shape to change (currently only when exposing/hiding an input) by using network.update_click_target(node_id). #[cfg_attr(target_family = "wasm", serde(alias = "outputs"))] pub inputs: Vec<NodeInput>, pub manual_composition: Option<Type>, // TODO: Remove once this references its definition instead (see above TODO). /// Indicates to the UI if a primary output should be drawn for this node. /// True for most nodes, but the Split Channels node is an example of a node that has multiple secondary outputs but no primary output. #[serde(default = "return_true")] pub has_primary_output: bool, // A nested document network or a proto-node identifier. pub implementation: OldDocumentNodeImplementation, /// User chosen state for displaying this as a left-to-right node or bottom-to-top layer. Ensure the click target in the encapsulating network is updated when the node changes to a layer by using network.update_click_target(node_id). #[serde(default)] pub is_layer: bool, /// Represents the eye icon for hiding/showing the node in the graph UI. When hidden, a node gets replaced with an identity node during the graph flattening step. #[serde(default = "return_true")] pub visible: bool, /// Represents the lock icon for locking/unlocking the node in the graph UI. When locked, a node cannot be moved in the graph UI. #[serde(default)] pub locked: bool, /// Metadata about the node including its position in the graph UI. Ensure the click target in the encapsulating network is updated when the node moves by using network.update_click_target(node_id). pub metadata: OldDocumentNodeMetadata, /// When two different proto nodes hash to the same value (e.g. two value nodes each containing `2_u32` or two multiply nodes that have the same node IDs as input), the duplicates are removed. /// See [`ProtoNetwork::generate_stable_node_ids`] for details. /// However sometimes this is not desirable, for example in the case of a [`graphene_core::memo::MonitorNode`] that needs to be accessed outside of the graph. #[serde(default)] pub skip_deduplication: bool, /// The path to this node and its inputs and outputs as of when [`NodeNetwork::generate_node_paths`] was called. #[serde(skip)] pub original_location: OriginalLocation, } // TODO: Eventually remove this document upgrade code #[derive(Clone, Debug, PartialEq, Default, specta::Type, Hash, DynAny, serde::Serialize, serde::Deserialize)] /// Metadata about the node including its position in the graph UI pub struct OldDocumentNodeMetadata { pub position: IVec2, } // TODO: Eventually remove this document upgrade code #[derive(Clone, Copy, Debug, PartialEq, Hash, serde::Serialize, serde::Deserialize)] /// Root Node is the "default" export for a node network. Used by document metadata, displaying UI-only "Export" node, and for restoring the default preview node. pub struct OldRootNode { pub id: NodeId, pub output_index: usize, } // TODO: Eventually remove this document upgrade code #[derive(PartialEq, Debug, Clone, Hash, Default, serde::Serialize, serde::Deserialize)] pub enum OldPreviewing { /// If there is a node to restore the connection to the export for, then it is stored in the option. /// Otherwise, nothing gets restored and the primary export is disconnected. Yes { root_node_to_restore: Option<OldRootNode> }, #[default] No, } // TODO: Eventually remove this document upgrade code #[derive(Clone, Debug, DynAny, serde::Serialize, serde::Deserialize)] /// A network (subgraph) of nodes containing each [`DocumentNode`] and its ID, as well as list mapping each export to its connected node, or a value if disconnected pub struct OldNodeNetwork { /// The list of data outputs that are exported from this network to the parent network. /// Each export is a reference to a node within this network, paired with its output index, that is the source of the network's exported data. #[serde(alias = "outputs", deserialize_with = "deserialize_exports")] // TODO: Eventually remove this alias document upgrade code pub exports: Vec<NodeInput>, /// The list of all nodes in this network. //cfg_attr(feature = "serde", #[serde(serialize_with = "core_types::vector::serialize_hashmap", deserialize_with = "core_types::vector::deserialize_hashmap"))] pub nodes: HashMap<NodeId, OldDocumentNode>, /// Indicates whether the network is currently rendered with a particular node that is previewed, and if so, which connection should be restored when the preview ends. #[serde(default)] pub previewing: OldPreviewing, /// Temporary fields to store metadata for "Import"/"Export" UI-only nodes, eventually will be replaced with lines leading to edges #[serde(default = "default_import_metadata")] pub imports_metadata: (NodeId, IVec2), #[serde(default = "default_export_metadata")] pub exports_metadata: (NodeId, IVec2), /// A network may expose nodes as constants which can by used by other nodes using a `NodeInput::Scope(key)`. #[serde(default)] //cfg_attr(feature = "serde", #[serde(serialize_with = "core_types::vector::serialize_hashmap", deserialize_with = "core_types::vector::deserialize_hashmap"))] pub scope_injections: HashMap<String, (NodeId, Type)>, } // TODO: Eventually remove this document upgrade code fn migrate_layer_to_merge<'de, D: serde::Deserializer<'de>>(deserializer: D) -> Result<String, D::Error> { let mut s: String = serde::Deserialize::deserialize(deserializer)?; if s == "Layer" { s = "Merge".to_string(); } Ok(s) } // TODO: Eventually remove this document upgrade code fn default_import_metadata() -> (NodeId, IVec2) { (NodeId::new(), IVec2::new(-25, -4)) } // TODO: Eventually remove this document upgrade code fn default_export_metadata() -> (NodeId, IVec2) { (NodeId::new(), IVec2::new(8, -4)) } #[derive(Clone, Default, Debug, DynAny, serde::Serialize, serde::Deserialize)] /// A network (subgraph) of nodes containing each [`DocumentNode`] and its ID, as well as list mapping each export to its connected node, or a value if disconnected pub struct NodeNetwork { /// The list of data outputs that are exported from this network to the parent network. /// Each export is a reference to a node within this network, paired with its output index, that is the source of the network's exported data. // TODO: Eventually remove this alias document upgrade code #[cfg_attr(target_family = "wasm", serde(alias = "outputs", deserialize_with = "deserialize_exports"))] pub exports: Vec<NodeInput>, // TODO: Instead of storing import types in each NodeInput::Import connection, the types are stored here. This is similar to how types need to be defined for parameters when creating a function in Rust. // pub import_types: Vec<Type>, /// The list of all nodes in this network. #[serde( serialize_with = "graphic_types::vector_types::vector::serialize_hashmap", deserialize_with = "graphic_types::vector_types::vector::deserialize_hashmap" )] pub nodes: FxHashMap<NodeId, DocumentNode>, /// A network may expose nodes as constants which can by used by other nodes using a `NodeInput::Scope(key)`. #[serde(default)] #[serde( serialize_with = "graphic_types::vector_types::vector::serialize_hashmap", deserialize_with = "graphic_types::vector_types::vector::deserialize_hashmap" )] pub scope_injections: FxHashMap<String, (NodeId, Type)>, #[serde(skip)] pub generated: bool, } impl Hash for NodeNetwork { fn hash<H: Hasher>(&self, state: &mut H) { self.exports.hash(state); let mut nodes: Vec<_> = self.nodes.iter().collect(); nodes.sort_by_key(|(id, _)| *id); for (id, node) in nodes { id.hash(state); node.hash(state); } } } impl PartialEq for NodeNetwork { fn eq(&self, other: &Self) -> bool { self.exports == other.exports } } /// Graph modification functions impl NodeNetwork { pub fn current_hash(&self) -> u64 { use std::hash::BuildHasher; FxBuildHasher.hash_one(self) } pub fn value_network(node: DocumentNode) -> Self { Self { exports: vec![NodeInput::node(NodeId(0), 0)], nodes: [(NodeId(0), node)].into_iter().collect(), ..Default::default() } } /// Get the nested network given by the path of node ids pub fn nested_network(&self, nested_path: &[NodeId]) -> Option<&Self> { let mut network = Some(self); for segment in nested_path { network = network.and_then(|network| network.nodes.get(segment)).and_then(|node| node.implementation.get_network()); } network } /// Get the mutable nested network given by the path of node ids pub fn nested_network_mut(&mut self, nested_path: &[NodeId]) -> Option<&mut Self> { let mut network = Some(self); for segment in nested_path { network = network.and_then(|network| network.nodes.get_mut(segment)).and_then(|node| node.implementation.get_network_mut()); } network } /// Is the node being used directly as an output? pub fn outputs_contain(&self, node_id_to_check: NodeId) -> bool { self.exports .iter() .any(|output| if let NodeInput::Node { node_id, .. } = output { *node_id == node_id_to_check } else { false }) } /// Check there are no cycles in the graph (this should never happen). pub fn is_acyclic(&self) -> bool { let mut dependencies: HashMap<NodeId, Vec<NodeId>> = HashMap::new(); for (node_id, node) in &self.nodes { dependencies.insert( *node_id, node.inputs .iter() .filter_map(|input| if let NodeInput::Node { node_id, .. } = input { Some(*node_id) } else { None }) .collect(), ); } while !dependencies.is_empty() { let Some((&disconnected, _)) = dependencies.iter().find(|(_, l)| l.is_empty()) else { error!("Dependencies {dependencies:?}"); return false; }; dependencies.remove(&disconnected); for connections in dependencies.values_mut() { connections.retain(|&id| id != disconnected); } } true } } /// Functions for compiling the network impl NodeNetwork { /// Replace all references in the graph of a node ID with a new node ID defined by the function `f`. pub fn map_ids(&mut self, f: impl Fn(NodeId) -> NodeId + Copy) { self.exports.iter_mut().for_each(|output| { if let NodeInput::Node { node_id, .. } = output { *node_id = f(*node_id) } }); self.scope_injections.values_mut().for_each(|(id, _ty)| *id = f(*id)); let nodes = std::mem::take(&mut self.nodes); self.nodes = nodes .into_iter() .map(|(id, mut node)| { node.inputs.iter_mut().for_each(|input| input.map_ids(f)); node.original_location.dependants.iter_mut().for_each(|deps| deps.iter_mut().for_each(|id| *id = f(*id))); (f(id), node) }) .collect(); } /// Populate the [`DocumentNode::path`], which stores the location of the document node to allow for matching the resulting proto nodes to the document node for the purposes of typing and finding monitor nodes. pub fn generate_node_paths(&mut self, prefix: &[NodeId]) { for (node_id, node) in &mut self.nodes { let mut new_path = prefix.to_vec(); if !self.generated { new_path.push(*node_id); } if let DocumentNodeImplementation::Network(network) = &mut node.implementation { network.generate_node_paths(new_path.as_slice()); } if node.original_location.path.is_some() { log::warn!("Attempting to overwrite node path"); } else { node.original_location.path = Some(new_path); node.original_location.inputs_exposed = node.inputs.iter().map(|input| input.is_exposed()).collect(); node.original_location.dependants = (0..node.implementation.output_count()).map(|_| Vec::new()).collect(); } } } pub fn populate_dependants(&mut self) { let mut dep_changes = Vec::new(); for (node_id, node) in &mut self.nodes { let len = node.original_location.dependants.len(); node.original_location.dependants.extend(vec![vec![]; (node.implementation.output_count()).max(len) - len]); for input in &node.inputs { if let NodeInput::Node { node_id: dep_id, output_index, .. } = input { dep_changes.push((*dep_id, *output_index, *node_id)); } } } // println!("{:#?}", self.nodes.get(&NodeId(1))); for (dep_id, output_index, node_id) in dep_changes { let node = self.nodes.get_mut(&dep_id).expect("Encountered invalid node id"); let len = node.original_location.dependants.len(); // One must be added to the index to find the length because indexing in rust starts from 0. node.original_location.dependants.extend(vec![vec![]; (output_index + 1).max(len) - len]); // println!("{node_id} {output_index} {}", node.implementation.output_count()); node.original_location.dependants[output_index].push(node_id); } } /// Replace all references in any node of `old_input` with `new_input` fn replace_node_inputs(&mut self, node_id: NodeId, old_input: (NodeId, usize), new_input: (NodeId, usize)) { let Some(node) = self.nodes.get_mut(&node_id) else { return }; node.inputs.iter_mut().for_each(|input| { if let NodeInput::Node { node_id: input_id, output_index, .. } = input && (*input_id, *output_index) == old_input { (*input_id, *output_index) = new_input; } }); } /// Replace all references in any node of `old_output` with `new_output` fn replace_network_outputs(&mut self, old_output: NodeInput, new_output: NodeInput) { for output in self.exports.iter_mut() { if *output == old_output { *output = new_output.clone(); } } } /// Removes unused nodes from the graph. Returns a list of booleans which represent if each of the inputs have been retained. pub fn remove_dead_nodes(&mut self, number_of_inputs: usize) -> Vec<bool> { // Take all the nodes out of the nodes list let mut old_nodes = std::mem::take(&mut self.nodes); let mut stack = self .exports .iter() .filter_map(|output| if let NodeInput::Node { node_id, .. } = output { Some(*node_id) } else { None }) .collect::<Vec<_>>(); while let Some(node_id) = stack.pop() { let Some((node_id, mut document_node)) = old_nodes.remove_entry(&node_id) else { continue; }; // Remove dead nodes from child networks if let DocumentNodeImplementation::Network(network) = &mut document_node.implementation { // Remove inputs to the parent node if they have been removed from the child let mut retain_inputs = network.remove_dead_nodes(document_node.inputs.len()).into_iter(); document_node.inputs.retain(|_| retain_inputs.next().unwrap_or(true)) } // Visit all nodes that this node references stack.extend( document_node .inputs .iter()
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
true
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/graph-craft/src/util.rs
node-graph/graph-craft/src/util.rs
use crate::document::NodeNetwork; use crate::graphene_compiler::Compiler; use crate::proto::ProtoNetwork; pub fn load_network(document_string: &str) -> NodeNetwork { let document: serde_json::Value = serde_json::from_str(document_string).expect("Failed to parse document"); let document = (document["network_interface"]["network"].clone()).to_string(); serde_json::from_str::<NodeNetwork>(&document).expect("Failed to parse document") } pub fn compile(network: NodeNetwork) -> ProtoNetwork { let compiler = Compiler {}; compiler.compile_single(network).unwrap() } pub fn load_from_name(name: &str) -> NodeNetwork { let content = std::fs::read(format!("../../demo-artwork/{name}.graphite")).expect("failed to read file"); let content = std::str::from_utf8(&content).unwrap(); load_network(content) } pub static DEMO_ART: [&str; 7] = [ "changing-seasons", "painted-dreams", "red-dress", "valley-of-spires", "isometric-fountain", "procedural-string-lights", "parametric-dunescape", ];
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/graph-craft/src/wasm_application_io.rs
node-graph/graph-craft/src/wasm_application_io.rs
use dyn_any::StaticType; use graphene_application_io::{ApplicationError, ApplicationIo, ResourceFuture, SurfaceHandle, SurfaceId}; #[cfg(target_family = "wasm")] use js_sys::{Object, Reflect}; use std::collections::HashMap; use std::hash::Hash; use std::sync::Arc; #[cfg(target_family = "wasm")] use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; #[cfg(feature = "tokio")] use tokio::io::AsyncReadExt; #[cfg(target_family = "wasm")] use wasm_bindgen::JsCast; #[cfg(target_family = "wasm")] use wasm_bindgen::JsValue; #[cfg(target_family = "wasm")] use web_sys::HtmlCanvasElement; #[cfg(target_family = "wasm")] use web_sys::window; #[cfg(feature = "wgpu")] use wgpu_executor::WgpuExecutor; #[derive(Debug)] struct WindowWrapper { #[cfg(target_family = "wasm")] window: SurfaceHandle<HtmlCanvasElement>, #[cfg(not(target_family = "wasm"))] window: SurfaceHandle<Arc<dyn winit::window::Window>>, } #[cfg(target_family = "wasm")] impl Drop for WindowWrapper { fn drop(&mut self) { let window = window().expect("should have a window in this context"); let window = Object::from(window); let image_canvases_key = JsValue::from_str("imageCanvases"); let wrapper = || { if let Ok(canvases) = Reflect::get(&window, &image_canvases_key) { // Convert key and value to JsValue let js_key = JsValue::from_str(self.window.window_id.to_string().as_str()); // Use Reflect API to set property Reflect::delete_property(&canvases.into(), &js_key)?; } Ok::<_, JsValue>(()) }; wrapper().expect("should be able to set canvas in global scope") } } #[cfg(target_family = "wasm")] unsafe impl Sync for WindowWrapper {} #[cfg(target_family = "wasm")] unsafe impl Send for WindowWrapper {} #[derive(Debug, Default)] pub struct WasmApplicationIo { #[cfg(target_family = "wasm")] ids: AtomicU64, #[cfg(feature = "wgpu")] pub(crate) gpu_executor: Option<WgpuExecutor>, windows: Vec<WindowWrapper>, pub resources: HashMap<String, Arc<[u8]>>, } static WGPU_AVAILABLE: std::sync::atomic::AtomicI8 = std::sync::atomic::AtomicI8::new(-1); pub fn wgpu_available() -> Option<bool> { match WGPU_AVAILABLE.load(Ordering::SeqCst) { -1 => None, 0 => Some(false), _ => Some(true), } } impl WasmApplicationIo { pub async fn new() -> Self { #[cfg(all(feature = "wgpu", target_family = "wasm"))] let executor = if let Some(gpu) = web_sys::window().map(|w| w.navigator().gpu()) { let request_adapter = || { let request_adapter = js_sys::Reflect::get(&gpu, &wasm_bindgen::JsValue::from_str("requestAdapter")).ok()?; let function = request_adapter.dyn_ref::<js_sys::Function>()?; Some(function.call0(&gpu).ok()) }; let result = request_adapter(); match result { None => None, Some(_) => WgpuExecutor::new().await, } } else { None }; #[cfg(all(feature = "wgpu", not(target_family = "wasm")))] let executor = WgpuExecutor::new().await; #[cfg(not(feature = "wgpu"))] let wgpu_available = false; #[cfg(feature = "wgpu")] let wgpu_available = executor.is_some(); WGPU_AVAILABLE.store(wgpu_available as i8, Ordering::SeqCst); let mut io = Self { #[cfg(target_family = "wasm")] ids: AtomicU64::new(0), #[cfg(feature = "wgpu")] gpu_executor: executor, windows: Vec::new(), resources: HashMap::new(), }; let window = io.create_window(); io.windows.push(WindowWrapper { window }); io.resources.insert("null".to_string(), Arc::from(include_bytes!("null.png").to_vec())); io } pub async fn new_offscreen() -> Self { #[cfg(feature = "wgpu")] let executor = WgpuExecutor::new().await; #[cfg(not(feature = "wgpu"))] let wgpu_available = false; #[cfg(feature = "wgpu")] let wgpu_available = executor.is_some(); WGPU_AVAILABLE.store(wgpu_available as i8, Ordering::SeqCst); let mut io = Self { #[cfg(target_family = "wasm")] ids: AtomicU64::new(0), #[cfg(feature = "wgpu")] gpu_executor: executor, windows: Vec::new(), resources: HashMap::new(), }; io.resources.insert("null".to_string(), Arc::from(include_bytes!("null.png").to_vec())); io } #[cfg(all(not(target_family = "wasm"), feature = "wgpu"))] pub fn new_with_context(context: wgpu_executor::WgpuContext) -> Self { #[cfg(feature = "wgpu")] let executor = WgpuExecutor::with_context(context); #[cfg(not(feature = "wgpu"))] let wgpu_available = false; #[cfg(feature = "wgpu")] let wgpu_available = executor.is_some(); WGPU_AVAILABLE.store(wgpu_available as i8, Ordering::SeqCst); let mut io = Self { gpu_executor: executor, windows: Vec::new(), resources: HashMap::new(), }; io.resources.insert("null".to_string(), Arc::from(include_bytes!("null.png").to_vec())); io } } unsafe impl StaticType for WasmApplicationIo { type Static = WasmApplicationIo; } impl<'a> From<&'a WasmEditorApi> for &'a WasmApplicationIo { fn from(editor_api: &'a WasmEditorApi) -> Self { editor_api.application_io.as_ref().unwrap() } } #[cfg(feature = "wgpu")] impl<'a> From<&'a WasmApplicationIo> for &'a WgpuExecutor { fn from(app_io: &'a WasmApplicationIo) -> Self { app_io.gpu_executor.as_ref().unwrap() } } pub type WasmEditorApi = graphene_application_io::EditorApi<WasmApplicationIo>; impl ApplicationIo for WasmApplicationIo { #[cfg(target_family = "wasm")] type Surface = HtmlCanvasElement; #[cfg(not(target_family = "wasm"))] type Surface = Arc<dyn winit::window::Window>; #[cfg(feature = "wgpu")] type Executor = WgpuExecutor; #[cfg(not(feature = "wgpu"))] type Executor = (); #[cfg(target_family = "wasm")] fn create_window(&self) -> SurfaceHandle<Self::Surface> { let wrapper = || { let document = window().expect("should have a window in this context").document().expect("window should have a document"); let canvas: HtmlCanvasElement = document.create_element("canvas")?.dyn_into::<HtmlCanvasElement>()?; let id = self.ids.fetch_add(1, Ordering::SeqCst); // store the canvas in the global scope so it doesn't get garbage collected let window = window().expect("should have a window in this context"); let window = Object::from(window); let image_canvases_key = JsValue::from_str("imageCanvases"); let mut canvases = Reflect::get(&window, &image_canvases_key); if canvases.is_err() { Reflect::set(&JsValue::from(web_sys::window().unwrap()), &image_canvases_key, &Object::new()).unwrap(); canvases = Reflect::get(&window, &image_canvases_key); } // Convert key and value to JsValue let js_key = JsValue::from_str(id.to_string().as_str()); let js_value = JsValue::from(canvas.clone()); let canvases = Object::from(canvases.unwrap()); // Use Reflect API to set property Reflect::set(&canvases, &js_key, &js_value)?; Ok::<_, JsValue>(SurfaceHandle { window_id: SurfaceId(id), surface: canvas, }) }; wrapper().expect("should be able to set canvas in global scope") } #[cfg(not(target_family = "wasm"))] fn create_window(&self) -> SurfaceHandle<Self::Surface> { todo!("winit api changed, calling create_window on EventLoop is deprecated"); // log::trace!("Spawning window"); // #[cfg(all(not(test), target_os = "linux", feature = "wayland"))] // use winit::platform::wayland::EventLoopBuilderExtWayland; // #[cfg(all(not(test), target_os = "linux", feature = "wayland"))] // let event_loop = winit::event_loop::EventLoopBuilder::new().with_any_thread(true).build().unwrap(); // #[cfg(not(all(not(test), target_os = "linux", feature = "wayland")))] // let event_loop = winit::event_loop::EventLoop::new().unwrap(); // let window = event_loop // .create_window( // winit::window::WindowAttributes::default() // .with_title("Graphite") // .with_inner_size(winit::dpi::PhysicalSize::new(800, 600)), // ) // .unwrap(); // SurfaceHandle { // window_id: SurfaceId(window.id().into()), // surface: Arc::new(window), // } } #[cfg(target_family = "wasm")] fn destroy_window(&self, surface_id: SurfaceId) { let window = window().expect("should have a window in this context"); let window = Object::from(window); let image_canvases_key = JsValue::from_str("imageCanvases"); let wrapper = || { if let Ok(canvases) = Reflect::get(&window, &image_canvases_key) { // Convert key and value to JsValue let js_key = JsValue::from_str(surface_id.0.to_string().as_str()); // Use Reflect API to set property Reflect::delete_property(&canvases.into(), &js_key)?; } Ok::<_, JsValue>(()) }; wrapper().expect("should be able to set canvas in global scope") } #[cfg(not(target_family = "wasm"))] fn destroy_window(&self, _surface_id: SurfaceId) {} #[cfg(feature = "wgpu")] fn gpu_executor(&self) -> Option<&Self::Executor> { self.gpu_executor.as_ref() } fn load_resource(&self, url: impl AsRef<str>) -> Result<ResourceFuture, ApplicationError> { let url = url::Url::parse(url.as_ref()).map_err(|_| ApplicationError::InvalidUrl)?; log::trace!("Loading resource: {url:?}"); match url.scheme() { #[cfg(feature = "tokio")] "file" => { let path = url.to_file_path().map_err(|_| ApplicationError::NotFound)?; let path = path.to_str().ok_or(ApplicationError::NotFound)?; let path = path.to_owned(); Ok(Box::pin(async move { let file = tokio::fs::File::open(path).await.map_err(|_| ApplicationError::NotFound)?; let mut reader = tokio::io::BufReader::new(file); let mut data = Vec::new(); reader.read_to_end(&mut data).await.map_err(|_| ApplicationError::NotFound)?; Ok(Arc::from(data)) }) as ResourceFuture) } "http" | "https" => { let url = url.to_string(); Ok(Box::pin(async move { let client = reqwest::Client::new(); let response = client.get(url).send().await.map_err(|_| ApplicationError::NotFound)?; let data = response.bytes().await.map_err(|_| ApplicationError::NotFound)?; Ok(Arc::from(data.to_vec())) }) as ResourceFuture) } "graphite" => { let path = url.path(); let path = path.to_owned(); log::trace!("Loading local resource: {path}"); let data = self.resources.get(&path).ok_or(ApplicationError::NotFound)?.clone(); Ok(Box::pin(async move { Ok(data.clone()) }) as ResourceFuture) } _ => Err(ApplicationError::NotFound), } } fn window(&self) -> Option<SurfaceHandle<Self::Surface>> { self.windows.first().map(|wrapper| wrapper.window.clone()) } } #[cfg(feature = "wgpu")] pub type WasmSurfaceHandle = SurfaceHandle<wgpu_executor::Window>; #[cfg(feature = "wgpu")] pub type WasmSurfaceHandleFrame = graphene_application_io::SurfaceHandleFrame<wgpu_executor::Window>; #[derive(Clone, Debug, PartialEq, Hash, specta::Type, serde::Serialize, serde::Deserialize)] pub struct EditorPreferences { pub use_vello: bool, } impl graphene_application_io::GetEditorPreferences for EditorPreferences { fn use_vello(&self) -> bool { self.use_vello } } impl Default for EditorPreferences { fn default() -> Self { Self { #[cfg(target_family = "wasm")] use_vello: false, #[cfg(not(target_family = "wasm"))] use_vello: true, } } } unsafe impl StaticType for EditorPreferences { type Static = EditorPreferences; }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/graph-craft/src/graphene_compiler.rs
node-graph/graph-craft/src/graphene_compiler.rs
use crate::document::NodeNetwork; use crate::proto::{LocalFuture, ProtoNetwork}; use std::error::Error; pub struct Compiler {} impl Compiler { pub fn compile(&self, mut network: NodeNetwork) -> impl Iterator<Item = Result<ProtoNetwork, String>> { let node_ids = network.nodes.keys().copied().collect::<Vec<_>>(); network.populate_dependants(); for id in node_ids { network.flatten(id); } network.resolve_scope_inputs(); network.remove_redundant_id_nodes(); // network.remove_dead_nodes(0); let proto_networks = network.into_proto_networks(); proto_networks.map(move |mut proto_network| { proto_network.insert_context_nullification_nodes()?; proto_network.generate_stable_node_ids(); Ok(proto_network) }) } pub fn compile_single(&self, network: NodeNetwork) -> Result<ProtoNetwork, String> { assert_eq!(network.exports.len(), 1, "Graph with multiple outputs not yet handled"); let Some(proto_network) = self.compile(network).next() else { return Err("Failed to convert graph into proto graph".to_string()); }; proto_network } } pub trait Executor<I, O> { fn execute(&self, input: I) -> LocalFuture<'_, Result<O, Box<dyn Error>>>; }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/graph-craft/src/proto.rs
node-graph/graph-craft/src/proto.rs
use crate::document::value::TaggedValue; use crate::document::{InlineRust, value}; use crate::document::{NodeId, OriginalLocation}; pub use core_types::registry::*; use core_types::*; use rustc_hash::FxHashMap; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::fmt::Debug; use std::hash::Hash; #[derive(Debug, Default, PartialEq, Clone, Hash, Eq, serde::Serialize, serde::Deserialize)] /// A list of [`ProtoNode`]s, which is an intermediate step between the [`crate::document::NodeNetwork`] and the `BorrowTree` containing a single flattened network. pub struct ProtoNetwork { // TODO: remove this since it seems to be unused? // Should a proto Network even allow inputs? Don't think so pub inputs: Vec<NodeId>, /// The node ID that provides the output. This node is then responsible for calling the rest of the graph. pub output: NodeId, /// A list of nodes stored in a Vec to allow for sorting. pub nodes: Vec<(NodeId, ProtoNode)>, } impl core::fmt::Display for ProtoNetwork { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.write_str("Proto Network with nodes: ")?; fn write_node(f: &mut core::fmt::Formatter<'_>, network: &ProtoNetwork, id: NodeId, indent: usize) -> core::fmt::Result { f.write_str(&"\t".repeat(indent))?; let Some((_, node)) = network.nodes.iter().find(|(node_id, _)| *node_id == id) else { return f.write_str("{{Unknown Node}}"); }; f.write_str("Node: ")?; f.write_str(&node.identifier.name)?; f.write_str("\n")?; f.write_str(&"\t".repeat(indent))?; f.write_str("{\n")?; f.write_str(&"\t".repeat(indent + 1))?; f.write_str("Input: ")?; f.write_fmt(format_args!("Call Argument (type = {:?})", node.call_argument))?; f.write_str("\n")?; match &node.construction_args { ConstructionArgs::Value(value) => { f.write_str(&"\t".repeat(indent + 1))?; f.write_fmt(format_args!("Value construction argument: {value:?}"))? } ConstructionArgs::Nodes(nodes) => { for id in nodes { write_node(f, network, *id, indent + 1)?; } } ConstructionArgs::Inline(inline) => { f.write_str(&"\t".repeat(indent + 1))?; f.write_fmt(format_args!("Inline construction argument: {inline:?}"))? } } f.write_str(&"\t".repeat(indent))?; f.write_str("}\n")?; Ok(()) } let id = self.output; write_node(f, self, id, 0) } } #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] /// Defines the arguments used to construct the boxed node struct. This is used to call the constructor function in the `node_registry.rs` file - which is hidden behind a wall of macros. pub enum ConstructionArgs { /// A value of a type that is known, allowing serialization (serde::Deserialize is not object safe) Value(MemoHash<value::TaggedValue>), /// A list of nodes used as inputs to the constructor function in `node_registry.rs`. /// The bool indicates whether to treat the node as lambda node. // TODO: use a struct for clearer naming. Nodes(Vec<NodeId>), /// Used for GPU computation to work around the limitations of rust-gpu. Inline(InlineRust), } impl Eq for ConstructionArgs {} impl PartialEq for ConstructionArgs { fn eq(&self, other: &Self) -> bool { match (&self, &other) { (Self::Nodes(n1), Self::Nodes(n2)) => n1 == n2, (Self::Value(v1), Self::Value(v2)) => v1 == v2, _ => { use std::hash::Hasher; let hash = |input: &Self| { let mut hasher = rustc_hash::FxHasher::default(); input.hash(&mut hasher); hasher.finish() }; hash(self) == hash(other) } } } } impl Hash for ConstructionArgs { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { core::mem::discriminant(self).hash(state); match self { Self::Nodes(nodes) => { for node in nodes { node.hash(state); } } Self::Value(value) => value.hash(state), Self::Inline(inline) => inline.hash(state), } } } impl ConstructionArgs { pub fn new_function_args(&self) -> Vec<String> { match self { ConstructionArgs::Nodes(nodes) => nodes.iter().map(|n| format!("n{:0x}", n.0)).collect(), ConstructionArgs::Value(value) => vec![value.to_primitive_string()], ConstructionArgs::Inline(inline) => vec![inline.expr.clone()], } } } #[derive(Debug, Clone, PartialEq, Hash, Eq, serde::Serialize, serde::Deserialize)] /// A proto node is an intermediate step between the `DocumentNode` and the boxed struct that actually runs the node (found in the [`BorrowTree`]). /// At different stages in the compilation process, this struct will be transformed into a reduced (more restricted) form acting as a subset of its original form, but that restricted form is still valid in the earlier stage in the compilation process before it was transformed. pub struct ProtoNode { pub construction_args: ConstructionArgs, pub call_argument: Type, pub identifier: ProtoNodeIdentifier, pub original_location: OriginalLocation, pub skip_deduplication: bool, pub(crate) context_features: ContextDependencies, } impl Default for ProtoNode { fn default() -> Self { Self { identifier: graphene_core::ops::identity::IDENTIFIER, construction_args: ConstructionArgs::Value(value::TaggedValue::U32(0).into()), call_argument: concrete!(()), original_location: OriginalLocation::default(), skip_deduplication: false, context_features: Default::default(), } } } impl ProtoNode { /// A stable node ID is a hash of a node that should stay constant. This is used in order to remove duplicates from the graph. /// In the case of `skip_deduplication`, the `document_node_path` is also hashed in order to avoid duplicate monitor nodes from being removed (which would make it impossible to load thumbnails). pub fn stable_node_id(&self) -> Option<NodeId> { use std::hash::Hasher; let mut hasher = rustc_hash::FxHasher::default(); self.identifier.name.hash(&mut hasher); self.construction_args.hash(&mut hasher); if self.skip_deduplication { self.original_location.path.hash(&mut hasher); } std::mem::discriminant(&self.call_argument).hash(&mut hasher); self.call_argument.hash(&mut hasher); Some(NodeId(hasher.finish())) } /// Construct a new [`ProtoNode`] with the specified construction args and a `ClonedNode` implementation. pub fn value(value: ConstructionArgs, path: Vec<NodeId>) -> Self { let inputs_exposed = match &value { ConstructionArgs::Nodes(nodes) => nodes.len() + 1, _ => 2, }; Self { identifier: ProtoNodeIdentifier::new("core_types::value::ClonedNode"), construction_args: value, call_argument: concrete!(Context), original_location: OriginalLocation { path: Some(path), inputs_exposed: vec![false; inputs_exposed], ..Default::default() }, skip_deduplication: false, context_features: Default::default(), } } /// Converts all references to other node IDs into new IDs by running the specified function on them. /// This can be used when changing the IDs of the nodes, for example in the case of generating stable IDs. pub fn map_ids(&mut self, f: impl Fn(NodeId) -> NodeId) { if let ConstructionArgs::Nodes(ids) = &mut self.construction_args { ids.iter_mut().for_each(|id| *id = f(*id)); } } pub fn unwrap_construction_nodes(&self) -> Vec<NodeId> { match &self.construction_args { ConstructionArgs::Nodes(nodes) => nodes.clone(), _ => panic!("tried to unwrap nodes from non node construction args \n node: {self:#?}"), } } } #[derive(Clone, Copy, PartialEq)] enum NodeState { Unvisited, Visiting, Visited, } impl ProtoNetwork { fn check_ref(&self, ref_id: &NodeId, id: &NodeId) { debug_assert!( self.nodes.iter().any(|(check_id, _)| check_id == ref_id), "Node with ID {id} has a reference which uses the node with ID {ref_id} which doesn't exist in network {self:#?}" ); } #[cfg(debug_assertions)] pub fn example() -> (Self, NodeId, ProtoNode) { let node_id = NodeId(1); let proto_node = ProtoNode::default(); let proto_network = ProtoNetwork { inputs: vec![node_id], output: node_id, nodes: vec![(node_id, proto_node.clone())], }; (proto_network, node_id, proto_node) } /// Construct a hashmap containing a list of the nodes that depend on this proto network. pub fn collect_outwards_edges(&self) -> HashMap<NodeId, Vec<NodeId>> { let mut edges: HashMap<NodeId, Vec<NodeId>> = HashMap::new(); for (id, node) in &self.nodes { if let ConstructionArgs::Nodes(ref_nodes) = &node.construction_args { for ref_id in ref_nodes { self.check_ref(ref_id, id); edges.entry(*ref_id).or_default().push(*id) } } } edges } /// Convert all node IDs to be stable (based on the hash generated by [`ProtoNode::stable_node_id`]). /// This function requires that the graph be topologically sorted. pub fn generate_stable_node_ids(&mut self) { debug_assert!(self.is_topologically_sorted()); let outwards_edges = self.collect_outwards_edges(); for index in 0..self.nodes.len() { let Some(sni) = self.nodes[index].1.stable_node_id() else { panic!("failed to generate stable node id for node {:#?}", self.nodes[index].1); }; self.replace_node_id(&outwards_edges, NodeId(index as u64), sni); self.nodes[index].0 = sni; } } // TODO: Remove /// Create a hashmap with the list of nodes this proto network depends on/uses as inputs. pub fn collect_inwards_edges(&self) -> HashMap<NodeId, Vec<NodeId>> { let mut edges: HashMap<NodeId, Vec<NodeId>> = HashMap::new(); for (id, node) in &self.nodes { if let ConstructionArgs::Nodes(ref_nodes) = &node.construction_args { for ref_id in ref_nodes { self.check_ref(ref_id, id); edges.entry(*id).or_default().push(*ref_id) } } } edges } fn collect_inwards_edges_with_mapping(&self) -> (Vec<Vec<usize>>, FxHashMap<NodeId, usize>) { let id_map: FxHashMap<_, _> = self.nodes.iter().enumerate().map(|(idx, (id, _))| (*id, idx)).collect(); // Collect inwards edges using dense indices let mut inwards_edges = vec![Vec::new(); self.nodes.len()]; for (node_id, node) in &self.nodes { let node_index = id_map[node_id]; if let ConstructionArgs::Nodes(ref_nodes) = &node.construction_args { for ref_id in ref_nodes { self.check_ref(ref_id, &NodeId(node_index as u64)); inwards_edges[node_index].push(id_map[ref_id]); } } } (inwards_edges, id_map) } /// Inserts context nullification nodes to optimize caching. /// This analysis is performed after topological sorting to ensure proper dependency tracking. pub fn insert_context_nullification_nodes(&mut self) -> Result<(), String> { // Perform topological sort once self.reorder_ids()?; self.find_context_dependencies(self.output); // Perform topological sort a second time to integrate the new nodes self.reorder_ids()?; Ok(()) } fn insert_context_nullification_node(&mut self, node_id: NodeId, context_deps: ContextFeatures) -> NodeId { let (_, node) = &self.nodes[node_id.0 as usize]; let mut path = node.original_location.path.clone(); // Add a path extension with a placeholder value which should not conflict with existing paths if let Some(p) = path.as_mut() { p.push(NodeId(10)) } let memo_node_id = NodeId(self.nodes.len() as u64); self.nodes.push(( memo_node_id, ProtoNode { construction_args: ConstructionArgs::Nodes(vec![node_id]), call_argument: concrete!(Context), identifier: graphene_core::memo::memo::IDENTIFIER, original_location: OriginalLocation { path: path.clone(), ..Default::default() }, ..Default::default() }, )); let nullification_value_node_id = NodeId(self.nodes.len() as u64); self.nodes.push(( nullification_value_node_id, ProtoNode { construction_args: ConstructionArgs::Value(MemoHash::new(TaggedValue::ContextFeatures(context_deps))), call_argument: concrete!(Context), identifier: ProtoNodeIdentifier::new("core_types::value::ClonedNode"), original_location: OriginalLocation { path: path.clone(), ..Default::default() }, ..Default::default() }, )); let nullification_node_id = NodeId(self.nodes.len() as u64); self.nodes.push(( nullification_node_id, ProtoNode { construction_args: ConstructionArgs::Nodes(vec![memo_node_id, nullification_value_node_id]), call_argument: concrete!(Context), identifier: graphene_core::context_modification::context_modification::IDENTIFIER, original_location: OriginalLocation { path: path.clone(), ..Default::default() }, ..Default::default() }, )); nullification_node_id } fn find_context_dependencies(&mut self, id: NodeId) -> (ContextFeatures, Option<NodeId>) { let mut branch_dependencies = Vec::new(); let mut combined_deps = ContextFeatures::default(); let node_index = id.0 as usize; let context_features = self.nodes[node_index].1.context_features; let mut inputs = match &self.nodes[node_index].1.construction_args { // We pretend like we have already placed context modification nodes after ourselves because value nodes don't need to be cached ConstructionArgs::Value(_) => return (context_features.extract, Some(id)), ConstructionArgs::Nodes(items) => items.clone(), ConstructionArgs::Inline(_) => return (context_features.extract, Some(id)), }; // Compute the dependencies for each branch and combine all of them for &node in &inputs { let branch = self.find_context_dependencies(node); branch_dependencies.push(branch); combined_deps |= branch.0; } let mut new_deps = combined_deps; // Remove requirements which this node provides new_deps &= !context_features.inject; // Add requirements we have new_deps |= context_features.extract; // If we either introduce new dependencies, we can cache all children which don't yet need that dependency let we_introduce_new_deps = !combined_deps.contains(new_deps); // For diverging branches, we can add a cache node for all branches which don't reqire all dependencies for (child_node, (deps, new_id)) in inputs.iter_mut().zip(branch_dependencies.into_iter()) { if let Some(new_id) = new_id { *child_node = new_id; } else if we_introduce_new_deps || deps != combined_deps { *child_node = self.insert_context_nullification_node(*child_node, deps); } } self.nodes[node_index].1.construction_args = ConstructionArgs::Nodes(inputs); // Which dependencies do we supply (and don't need ourselves)? let net_injections = context_features.inject.difference(context_features.extract); // Which dependencies still need to be met after this node? let remaining_deps_from_children = combined_deps.difference(net_injections); // Do we satisfy any existing dependencies? let we_supply_existing_deps = !combined_deps.difference(remaining_deps_from_children).is_empty(); let mut new_id = None; if we_supply_existing_deps { // Our set of context dependencies has shrunk so we can add a cache node after the current node new_id = Some(self.insert_context_nullification_node(id, new_deps)); } (new_deps, new_id) } /// Update all of the references to a node ID in the graph with a new ID named `compose_node_id`. fn replace_node_id(&mut self, outwards_edges: &HashMap<NodeId, Vec<NodeId>>, node_id: NodeId, replacement_node_id: NodeId) { // Update references in other nodes to use the new node if let Some(referring_nodes) = outwards_edges.get(&node_id) { for &referring_node_id in referring_nodes { let (_, referring_node) = &mut self.nodes[referring_node_id.0 as usize]; referring_node.map_ids(|id| if id == node_id { replacement_node_id } else { id }) } } if self.output == node_id { self.output = replacement_node_id; } self.inputs.iter_mut().for_each(|id| { if *id == node_id { *id = replacement_node_id; } }); } // Based on https://en.wikipedia.org/wiki/Topological_sorting#Depth-first_search // This approach excludes nodes that are not connected pub fn topological_sort(&self) -> Result<(Vec<NodeId>, FxHashMap<NodeId, usize>), String> { let (inwards_edges, id_map) = self.collect_inwards_edges_with_mapping(); let mut sorted = Vec::with_capacity(self.nodes.len()); let mut stack = vec![id_map[&self.output]]; let mut state = vec![NodeState::Unvisited; self.nodes.len()]; while let Some(&node_index) = stack.last() { match state[node_index] { NodeState::Unvisited => { state[node_index] = NodeState::Visiting; for &dep_index in inwards_edges[node_index].iter().rev() { match state[dep_index] { NodeState::Visiting => { return Err(format!("Cycle detected involving node {}", self.nodes[dep_index].0)); } NodeState::Unvisited => { stack.push(dep_index); } NodeState::Visited => {} } } } NodeState::Visiting => { stack.pop(); state[node_index] = NodeState::Visited; sorted.push(NodeId(node_index as u64)); } NodeState::Visited => { stack.pop(); } } } Ok((sorted, id_map)) } fn is_topologically_sorted(&self) -> bool { let mut visited = HashSet::new(); let inwards_edges = self.collect_inwards_edges(); for (id, _) in &self.nodes { for &dependency in inwards_edges.get(id).unwrap_or(&Vec::new()) { if !visited.contains(&dependency) { dbg!(id, dependency); dbg!(&visited); dbg!(&self.nodes); return false; } } visited.insert(*id); } true } /// Sort the nodes vec so it is in a topological order. This ensures that no node takes an input from a node that is found later in the list. fn reorder_ids(&mut self) -> Result<(), String> { let (order, _id_map) = self.topological_sort()?; // // Map of node ids to their current index in the nodes vector // let current_positions: FxHashMap<_, _> = self.nodes.iter().enumerate().map(|(pos, (id, _))| (*id, pos)).collect(); // // Map of node ids to their new index based on topological order let new_positions: FxHashMap<_, _> = order.iter().enumerate().map(|(pos, id)| (self.nodes[id.0 as usize].0, pos)).collect(); // assert_eq!(id_map, current_positions); // Create a new nodes vector based on the topological order let mut new_nodes = Vec::with_capacity(order.len()); for (index, &id) in order.iter().enumerate() { let mut node = std::mem::take(&mut self.nodes[id.0 as usize].1); // Update node references to reflect the new order node.map_ids(|id| NodeId(*new_positions.get(&id).expect("node not found in lookup table") as u64)); new_nodes.push((NodeId(index as u64), node)); } // Update node references to reflect the new order // new_nodes.iter_mut().for_each(|(_, node)| { // node.map_ids(|id| *new_positions.get(&id).expect("node not found in lookup table"), false); // }); // Update the nodes vector and other references self.nodes = new_nodes; self.inputs = self.inputs.iter().filter_map(|id| new_positions.get(id).map(|x| NodeId(*x as u64))).collect(); self.output = NodeId(*new_positions.get(&self.output).unwrap() as u64); assert_eq!(order.len(), self.nodes.len()); Ok(()) } } #[derive(Clone, PartialEq, serde::Serialize, serde::Deserialize)] pub enum GraphErrorType { NodeNotFound(NodeId), UnexpectedGenerics { index: usize, inputs: Vec<Type>, }, NoImplementations, NoConstructor, /// The `inputs` represents a formatted list of input indices corresponding to their types. /// Each element in `error_inputs` represents a valid `NodeIOTypes` implementation. /// The inner Vec stores the inputs which need to be changed and what type each needs to be changed to. InvalidImplementations { inputs: String, error_inputs: Vec<Vec<(usize, (Type, Type))>>, }, MultipleImplementations { inputs: String, valid: Vec<NodeIOTypes>, }, } impl Debug for GraphErrorType { // TODO: format with the document graph context so the input index is the same as in the graph UI. fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { GraphErrorType::NodeNotFound(id) => write!(f, "Input node {id} is not present in the typing context"), GraphErrorType::UnexpectedGenerics { index, inputs } => write!(f, "Generic inputs should not exist but found at {index}: {inputs:?}"), GraphErrorType::NoImplementations => write!(f, "No implementations found"), GraphErrorType::NoConstructor => write!(f, "No construct found for node"), GraphErrorType::InvalidImplementations { inputs, error_inputs } => { let format_error = |(index, (found, expected)): &(usize, (Type, Type))| { let index = index + 1; format!( "\ • Input {index}:\n\ …found: {found}\n\ …expected: {expected}\ " ) }; let format_error_list = |errors: &Vec<(usize, (Type, Type))>| errors.iter().map(format_error).collect::<Vec<_>>().join("\n"); let mut errors = error_inputs.iter().map(format_error_list).collect::<Vec<_>>(); errors.sort(); let errors = errors.join("\n"); let incompatibility = if errors.chars().filter(|&c| c == '•').count() == 1 { "This input type is incompatible:" } else { "These input types are incompatible:" }; write!( f, "\ {incompatibility}\n\ {errors}\n\ \n\ The node is currently receiving all of the following input types:\n\ {inputs}\n\ This is not a supported arrangement of types for the node.\ " ) } GraphErrorType::MultipleImplementations { inputs, valid } => write!(f, "Multiple implementations found ({inputs}):\n{valid:#?}"), } } } #[derive(Clone, PartialEq, serde::Serialize, serde::Deserialize)] pub struct GraphError { pub node_path: Vec<NodeId>, pub identifier: Cow<'static, str>, pub error: GraphErrorType, } impl GraphError { pub fn new(node: &ProtoNode, text: impl Into<GraphErrorType>) -> Self { Self { node_path: node.original_location.path.clone().unwrap_or_default(), identifier: node.identifier.name.clone(), error: text.into(), } } } impl Debug for GraphError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("NodeGraphError") .field("path", &self.node_path.iter().map(|id| id.0).collect::<Vec<_>>()) .field("identifier", &self.identifier.to_string()) .field("error", &self.error) .finish() } } pub type GraphErrors = Vec<GraphError>; /// The `TypingContext` is used to store the types of the nodes indexed by their stable node id. #[derive(Default, Clone, dyn_any::DynAny)] pub struct TypingContext { lookup: Cow<'static, HashMap<ProtoNodeIdentifier, HashMap<NodeIOTypes, NodeConstructor>>>, inferred: HashMap<NodeId, NodeIOTypes>, constructor: HashMap<NodeId, NodeConstructor>, } impl TypingContext { /// Creates a new `TypingContext` with the given lookup table. pub fn new(lookup: &'static HashMap<ProtoNodeIdentifier, HashMap<NodeIOTypes, NodeConstructor>>) -> Self { Self { lookup: Cow::Borrowed(lookup), ..Default::default() } } /// Updates the `TypingContext` with a given proto network. This will infer the types of the nodes /// and store them in the `inferred` field. The proto network has to be topologically sorted /// and contain fully resolved stable node ids. pub fn update(&mut self, network: &ProtoNetwork) -> Result<(), GraphErrors> { for (id, node) in network.nodes.iter() { self.infer(*id, node)?; } Ok(()) } pub fn remove_inference(&mut self, node_id: NodeId) -> Option<NodeIOTypes> { self.constructor.remove(&node_id); self.inferred.remove(&node_id) } /// Returns the node constructor for a given node id. pub fn constructor(&self, node_id: NodeId) -> Option<NodeConstructor> { self.constructor.get(&node_id).copied() } /// Returns the type of a given node id if it exists pub fn type_of(&self, node_id: NodeId) -> Option<&NodeIOTypes> { self.inferred.get(&node_id) } /// Returns the inferred types for a given node id. pub fn infer(&mut self, node_id: NodeId, node: &ProtoNode) -> Result<NodeIOTypes, GraphErrors> { // Return the inferred type if it is already known if let Some(inferred) = self.inferred.get(&node_id) { return Ok(inferred.clone()); } let inputs = match node.construction_args { // If the node has a value input we can infer the return type from it ConstructionArgs::Value(ref v) => { // TODO: This should return a reference to the value let types = NodeIOTypes::new(concrete!(Context), Type::Future(Box::new(v.ty())), vec![]); self.inferred.insert(node_id, types.clone()); return Ok(types); } // If the node has nodes as inputs we can infer the types from the node outputs ConstructionArgs::Nodes(ref nodes) => nodes .iter() .map(|id| { self.inferred .get(id) .ok_or_else(|| vec![GraphError::new(node, GraphErrorType::NodeNotFound(*id))]) .map(|node| node.ty()) }) .collect::<Result<Vec<Type>, GraphErrors>>()?, ConstructionArgs::Inline(ref inline) => vec![inline.ty.clone()], }; // Get the node input type from the proto node declaration let call_argument = &node.call_argument; let impls = self.lookup.get(&node.identifier).ok_or_else(|| vec![GraphError::new(node, GraphErrorType::NoImplementations)])?; if let Some(index) = inputs.iter().position(|p| { matches!(p, Type::Fn(_, b) if matches!(b.as_ref(), Type::Generic(_))) }) { return Err(vec![GraphError::new(node, GraphErrorType::UnexpectedGenerics { index, inputs })]); } /// Checks if a proposed input to a particular (primary or secondary) input connector is valid for its type signature. /// `from` indicates the value given to a input, `to` indicates the input's allowed type as specified by its type signature. fn valid_type(from: &Type, to: &Type) -> bool { match (from, to) { // Direct comparison of two concrete types. (Type::Concrete(type1), Type::Concrete(type2)) => type1 == type2, // Check inner type for futures (Type::Future(type1), Type::Future(type2)) => valid_type(type1, type2), // Direct comparison of two function types. // Note: in the presence of subtyping, functions are considered on a "greater than or equal to" basis of its function type's generality. // That means we compare their types with a contravariant relationship, which means that a more general type signature may be substituted for a more specific type signature. // For example, we allow `T -> V` to be substituted with `T' -> V` or `() -> V` where T' and () are more specific than T. // This allows us to supply anything to a function that is satisfied with `()`. // In other words, we are implementing these two relations, where the >= operator means that the left side is more general than the right side: // - `T >= T' ⇒ (T' -> V) >= (T -> V)` (functions are contravariant in their input types) // - `V >= V' ⇒ (T -> V) >= (T -> V')` (functions are covariant in their output types) // While these two relations aren't a truth about the universe, they are a design decision that we are employing in our language design that is also common in other languages. // For example, Rust implements these same relations as it describes here: <https://doc.rust-lang.org/nomicon/subtyping.html> // Graphite doesn't have subtyping currently, but it used to have it, and may do so again, so we make sure to compare types in this way to make things easier. // More details explained here: <https://github.com/GraphiteEditor/Graphite/issues/1741> (Type::Fn(in1, out1), Type::Fn(in2, out2)) => valid_type(out2, out1) && valid_type(in1, in2), // If either the proposed input or the allowed input are generic, we allow the substitution (meaning this is a valid subtype). // TODO: Add proper generic counting which is not based on the name (Type::Generic(_), _) | (_, Type::Generic(_)) => true, // Reject unknown type relationships. _ => false, } } // List of all implementations that match the input types let valid_output_types = impls .keys() .filter(|node_io| valid_type(&node_io.call_argument, call_argument) && inputs.iter().zip(node_io.inputs.iter()).all(|(p1, p2)| valid_type(p1, p2))) .collect::<Vec<_>>(); // Attempt to substitute generic types with concrete types and save the list of results let substitution_results = valid_output_types .iter() .map(|node_io| { let generics_lookup: Result<HashMap<_, _>, _> = collect_generics(node_io) .iter() .map(|generic| check_generic(node_io, call_argument, &inputs, generic).map(|x| (generic.to_string(), x))) .collect(); generics_lookup.map(|generics_lookup| { let orig_node_io = (*node_io).clone(); let mut new_node_io = orig_node_io.clone(); replace_generics(&mut new_node_io, &generics_lookup); (new_node_io, orig_node_io) }) }) .collect::<Vec<_>>(); // Collect all substitutions that are valid let valid_impls = substitution_results.iter().filter_map(|result| result.as_ref().ok()).collect::<Vec<_>>(); match valid_impls.as_slice() { [] => { let convert_node_index_offset = node.original_location.auto_convert_index.unwrap_or(0); let mut best_errors = usize::MAX; let mut error_inputs = Vec::new(); for node_io in impls.keys() { // For errors on Convert nodes, offset the input index so it correctly corresponds to the node it is connected to. let current_errors = [call_argument] .into_iter() .chain(&inputs) .cloned() .zip([&node_io.call_argument].into_iter().chain(&node_io.inputs).cloned()) .enumerate() .filter(|(_, (p1, p2))| !valid_type(p1, p2)) .map(|(index, expected)| (index - 1 + convert_node_index_offset, expected)) .collect::<Vec<_>>(); if current_errors.len() < best_errors { best_errors = current_errors.len(); error_inputs.clear(); } if current_errors.len() <= best_errors { error_inputs.push(current_errors); } } let inputs = [call_argument] .into_iter() .chain(&inputs) .enumerate() .filter_map(|(i, t)| if i == 0 { None } else { Some(format!("• Input {}: {t}", i + convert_node_index_offset)) }) .collect::<Vec<_>>() .join("\n"); Err(vec![GraphError::new(node, GraphErrorType::InvalidImplementations { inputs, error_inputs })]) } [(node_io, org_nio)] => { let node_io = node_io.clone(); // Save the inferred type self.inferred.insert(node_id, node_io.clone()); self.constructor.insert(node_id, impls[org_nio]); Ok(node_io) } // If two types are available and one of them accepts () an input, always choose that one [first, second] => { if first.0.call_argument != second.0.call_argument { for (node_io, orig_nio) in [first, second] { if node_io.call_argument != concrete!(()) { continue; } // Save the inferred type self.inferred.insert(node_id, node_io.clone()); self.constructor.insert(node_id, impls[orig_nio]); return Ok(node_io.clone()); } } let inputs = [call_argument].into_iter().chain(&inputs).map(|t| t.to_string()).collect::<Vec<_>>().join(", "); let valid = valid_output_types.into_iter().cloned().collect(); Err(vec![GraphError::new(node, GraphErrorType::MultipleImplementations { inputs, valid })]) } _ => { let inputs = [call_argument].into_iter().chain(&inputs).map(|t| t.to_string()).collect::<Vec<_>>().join(", "); let valid = valid_output_types.into_iter().cloned().collect(); Err(vec![GraphError::new(node, GraphErrorType::MultipleImplementations { inputs, valid })]) } } } } /// Returns a list of all generic types used in the node fn collect_generics(types: &NodeIOTypes) -> Vec<Cow<'static, str>> { let inputs = [&types.call_argument].into_iter().chain(types.inputs.iter().map(|x| x.nested_type())); let mut generics = inputs .filter_map(|t| match t { Type::Generic(out) => Some(out.clone()), _ => None, }) .collect::<Vec<_>>(); if let Type::Generic(out) = &types.return_value { generics.push(out.clone()); } generics.dedup(); generics } /// Checks if a generic type can be substituted with a concrete type and returns the concrete type fn check_generic(types: &NodeIOTypes, input: &Type, parameters: &[Type], generic: &str) -> Result<Type, String> { let inputs = [(Some(&types.call_argument), Some(input))] .into_iter() .chain(types.inputs.iter().map(|x| x.fn_input()).zip(parameters.iter().map(|x| x.fn_input()))) .chain(types.inputs.iter().map(|x| x.fn_output()).zip(parameters.iter().map(|x| x.fn_output()))); let concrete_inputs = inputs.filter(|(ni, _)| matches!(ni, Some(Type::Generic(input)) if generic == input));
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
true
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/graph-craft/src/document/value.rs
node-graph/graph-craft/src/document/value.rs
use super::DocumentNode; use crate::proto::{Any as DAny, FutureAny}; use crate::wasm_application_io::WasmEditorApi; use brush_nodes::brush_cache::BrushCache; use brush_nodes::brush_stroke::BrushStroke; use core_types::table::Table; use core_types::uuid::NodeId; use core_types::{Color, ContextFeatures, MemoHash, Node, Type}; use dyn_any::DynAny; pub use dyn_any::StaticType; use glam::{Affine2, Vec2}; pub use glam::{DAffine2, DVec2, IVec2, UVec2}; use graphene_application_io::{ImageTexture, SurfaceFrame}; use graphic_types::Artboard; use graphic_types::Graphic; use graphic_types::Vector; use graphic_types::raster_types::Image; use graphic_types::raster_types::{CPU, Raster}; use graphic_types::vector_types::vector; use graphic_types::vector_types::vector::ReferencePoint; use graphic_types::vector_types::vector::style::Fill; use graphic_types::vector_types::vector::style::GradientStops; use rendering::RenderMetadata; use std::fmt::Display; use std::hash::Hash; use std::marker::PhantomData; use std::str::FromStr; pub use std::sync::Arc; pub struct TaggedValueTypeError; /// Macro to generate the tagged value enum. macro_rules! tagged_value { ($ ($( #[$meta:meta] )* $identifier:ident ($ty:ty) ),* $(,)?) => { /// A type that is known, allowing serialization (serde::Deserialize is not object safe) #[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] #[allow(clippy::large_enum_variant)] // TODO(TrueDoctor): Properly solve this disparity between the size of the largest and next largest variants pub enum TaggedValue { None, $( $(#[$meta] ) *$identifier( $ty ), )* RenderOutput(RenderOutput), SurfaceFrame(SurfaceFrame), #[serde(skip)] EditorApi(Arc<WasmEditorApi>) } // We must manually implement hashing because some values are floats and so do not reproducibly hash (see FakeHash below) #[allow(clippy::derived_hash_with_manual_eq)] impl Hash for TaggedValue { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { core::mem::discriminant(self).hash(state); match self { Self::None => {} $( Self::$identifier(x) => {x.hash(state)}),* Self::RenderOutput(x) => x.hash(state), Self::SurfaceFrame(x) => x.hash(state), Self::EditorApi(x) => x.hash(state), } } } impl<'a> TaggedValue { /// Converts to a Box<dyn DynAny> pub fn to_dynany(self) -> DAny<'a> { match self { Self::None => Box::new(()), $( Self::$identifier(x) => Box::new(x), )* Self::RenderOutput(x) => Box::new(x), Self::SurfaceFrame(x) => Box::new(x), Self::EditorApi(x) => Box::new(x), } } /// Converts to a Arc<dyn Any + Send + Sync + 'static> pub fn to_any(self) -> Arc<dyn std::any::Any + Send + Sync + 'static> { match self { Self::None => Arc::new(()), $( Self::$identifier(x) => Arc::new(x), )* Self::RenderOutput(x) => Arc::new(x), Self::SurfaceFrame(x) => Arc::new(x), Self::EditorApi(x) => Arc::new(x), } } /// Creates a core_types::Type::Concrete(TypeDescriptor { .. }) with the type of the value inside the tagged value pub fn ty(&self) -> Type { match self { Self::None => concrete!(()), $( Self::$identifier(_) => concrete!($ty), )* Self::RenderOutput(_) => concrete!(RenderOutput), Self::SurfaceFrame(_) => concrete!(SurfaceFrame), Self::EditorApi(_) => concrete!(&WasmEditorApi) } } /// Attempts to downcast the dynamic type to a tagged value pub fn try_from_any(input: Box<dyn DynAny<'a> + 'a>) -> Result<Self, String> { use dyn_any::downcast; use std::any::TypeId; match DynAny::type_id(input.as_ref()) { x if x == TypeId::of::<()>() => Ok(TaggedValue::None), $( x if x == TypeId::of::<$ty>() => Ok(TaggedValue::$identifier(*downcast(input).unwrap())), )* x if x == TypeId::of::<RenderOutput>() => Ok(TaggedValue::RenderOutput(*downcast(input).unwrap())), x if x == TypeId::of::<SurfaceFrame>() => Ok(TaggedValue::SurfaceFrame(*downcast(input).unwrap())), _ => Err(format!("Cannot convert {:?} to TaggedValue", DynAny::type_name(input.as_ref()))), } } /// Attempts to downcast the dynamic type to a tagged value pub fn try_from_std_any_ref(input: &dyn std::any::Any) -> Result<Self, String> { use std::any::TypeId; match input.type_id() { x if x == TypeId::of::<()>() => Ok(TaggedValue::None), $( x if x == TypeId::of::<$ty>() => Ok(TaggedValue::$identifier(<$ty as Clone>::clone(input.downcast_ref().unwrap()))), )* x if x == TypeId::of::<RenderOutput>() => Ok(TaggedValue::RenderOutput(RenderOutput::clone(input.downcast_ref().unwrap()))), x if x == TypeId::of::<SurfaceFrame>() => Ok(TaggedValue::SurfaceFrame(SurfaceFrame::clone(input.downcast_ref().unwrap()))), _ => Err(format!("Cannot convert {:?} to TaggedValue",std::any::type_name_of_val(input))), } } pub fn from_type(input: &Type) -> Option<Self> { match input { Type::Generic(_) => None, Type::Concrete(concrete_type) => { let internal_id = concrete_type.id?; use std::any::TypeId; // TODO: Add default implementations for types such as TaggedValue::Subpaths, and use the defaults here and in document_node_types // Tries using the default for the tagged value type. If it not implemented, then uses the default used in document_node_types. If it is not used there, then TaggedValue::None is returned. Some(match internal_id { x if x == TypeId::of::<()>() => TaggedValue::None, $( x if x == TypeId::of::<$ty>() => TaggedValue::$identifier(Default::default()), )* _ => return None, }) } Type::Fn(_, output) => TaggedValue::from_type(output), Type::Future(output) => { TaggedValue::from_type(output) } } } pub fn from_type_or_none(input: &Type) -> Self { Self::from_type(input).unwrap_or(TaggedValue::None) } } $( impl From<$ty> for TaggedValue { fn from(value: $ty) -> Self { Self::$identifier(value) } } )* $( impl<'a> TryFrom<&'a TaggedValue> for &'a $ty { type Error = TaggedValueTypeError; fn try_from(value: &'a TaggedValue) -> Result<Self, Self::Error> { match value{ TaggedValue::$identifier(value) => Ok(value), _ => Err(TaggedValueTypeError), } } } )* }; } tagged_value! { // =============== // PRIMITIVE TYPES // =============== F32(f32), F64(f64), U32(u32), U64(u64), Bool(bool), String(String), OptionalF64(Option<f64>), ColorNotInTable(Color), OptionalColorNotInTable(Option<Color>), // ======================== // LISTS OF PRIMITIVE TYPES // ======================== #[serde(alias = "VecF32")] // TODO: Eventually remove this alias document upgrade code VecF64(Vec<f64>), VecDVec2(Vec<DVec2>), F64Array4([f64; 4]), VecString(Vec<String>), NodePath(Vec<NodeId>), // =========== // TABLE TYPES // =========== GraphicUnused(Graphic), // TODO: This is unused but removing it causes `cargo test` to infinitely recurse its type solving; figure out why and then remove this #[cfg_attr(target_family = "wasm", serde(deserialize_with = "graphic_types::migrations::migrate_vector"))] // TODO: Eventually remove this migration document upgrade code #[serde(alias = "VectorData")] Vector(Table<Vector>), #[cfg_attr(target_family = "wasm", serde(deserialize_with = "graphic_types::raster_types::image::migrate_image_frame"))] // TODO: Eventually remove this migration document upgrade code #[serde(alias = "ImageFrame", alias = "RasterData", alias = "Image")] Raster(Table<Raster<CPU>>), #[cfg_attr(target_family = "wasm", serde(deserialize_with = "graphic_types::graphic::migrate_graphic"))] // TODO: Eventually remove this migration document upgrade code #[serde(alias = "GraphicGroup", alias = "Group")] Graphic(Table<Graphic>), #[cfg_attr(target_family = "wasm", serde(deserialize_with = "graphic_types::artboard::migrate_artboard"))] // TODO: Eventually remove this migration document upgrade code #[serde(alias = "ArtboardGroup")] Artboard(Table<Artboard>), #[cfg_attr(target_family = "wasm", serde(deserialize_with = "core_types::misc::migrate_color"))] // TODO: Eventually remove this migration document upgrade code #[serde(alias = "ColorTable", alias = "OptionalColor")] Color(Table<Color>), GradientTable(Table<GradientStops>), // ============ // STRUCT TYPES // ============ FVec2(Vec2), FAffine2(Affine2), #[serde(alias = "IVec2", alias = "UVec2")] DVec2(DVec2), DAffine2(DAffine2), Stroke(graphic_types::vector_types::vector::style::Stroke), Gradient(graphic_types::vector_types::vector::style::Gradient), #[serde(alias = "GradientPositions")] // TODO: Eventually remove this alias document upgrade code GradientStops(GradientStops), Font(text_nodes::Font), BrushStrokes(Vec<BrushStroke>), BrushCache(BrushCache), DocumentNode(DocumentNode), ContextFeatures(ContextFeatures), Curve(raster_nodes::curve::Curve), Footprint(core_types::transform::Footprint), VectorModification(Box<vector::VectorModification>), // ========== // ENUM TYPES // ========== Fill(vector::style::Fill), BlendMode(core_types::blending::BlendMode), LuminanceCalculation(raster_nodes::adjustments::LuminanceCalculation), XY(graphene_core::extract_xy::XY), RedGreenBlue(raster_nodes::adjustments::RedGreenBlue), RedGreenBlueAlpha(raster_nodes::adjustments::RedGreenBlueAlpha), RealTimeMode(graphene_core::animation::RealTimeMode), NoiseType(raster_nodes::adjustments::NoiseType), FractalType(raster_nodes::adjustments::FractalType), CellularDistanceFunction(raster_nodes::adjustments::CellularDistanceFunction), CellularReturnType(raster_nodes::adjustments::CellularReturnType), DomainWarpType(raster_nodes::adjustments::DomainWarpType), RelativeAbsolute(raster_nodes::adjustments::RelativeAbsolute), SelectiveColorChoice(raster_nodes::adjustments::SelectiveColorChoice), GridType(vector::misc::GridType), ArcType(vector::misc::ArcType), MergeByDistanceAlgorithm(vector::misc::MergeByDistanceAlgorithm), ExtrudeJoiningAlgorithm(vector::misc::ExtrudeJoiningAlgorithm), PointSpacingType(vector::misc::PointSpacingType), SpiralType(vector::misc::SpiralType), #[serde(alias = "LineCap")] StrokeCap(vector::style::StrokeCap), #[serde(alias = "LineJoin")] StrokeJoin(vector::style::StrokeJoin), StrokeAlign(vector::style::StrokeAlign), PaintOrder(vector::style::PaintOrder), FillType(vector::style::FillType), GradientType(vector::style::GradientType), ReferencePoint(vector::ReferencePoint), CentroidType(vector::misc::CentroidType), BooleanOperation(path_bool_nodes::BooleanOperation), TextAlign(text_nodes::TextAlign), } impl TaggedValue { pub fn to_primitive_string(&self) -> String { match self { TaggedValue::None => "()".to_string(), TaggedValue::String(x) => format!("\"{x}\""), TaggedValue::U32(x) => x.to_string() + "_u32", TaggedValue::U64(x) => x.to_string() + "_u64", TaggedValue::F32(x) => x.to_string() + "_f32", TaggedValue::F64(x) => x.to_string() + "_f64", TaggedValue::Bool(x) => x.to_string(), TaggedValue::BlendMode(x) => "BlendMode::".to_string() + &x.to_string(), _ => panic!("Cannot convert to primitive string"), } } pub fn from_primitive_string(string: &str, ty: &Type) -> Option<Self> { fn to_dvec2(input: &str) -> Option<DVec2> { let mut split = input.split(','); let x = split.next()?.trim().parse().ok()?; let y = split.next()?.trim().parse().ok()?; Some(DVec2::new(x, y)) } fn to_color(input: &str) -> Option<Color> { // String syntax (e.g. "000000ff") if input.starts_with('"') && input.ends_with('"') { let color = input.trim().trim_matches('"').trim().trim_start_matches('#'); match color.len() { 6 => return Color::from_rgb_str(color), 8 => return Color::from_rgba_str(color), _ => { log::error!("Invalid default value color string: {input}"); return None; } } } // Color constant syntax (e.g. Color::BLACK) let mut choices = input.split("::"); let (first, second) = (choices.next()?.trim(), choices.next()?.trim()); if first == "Color" { return Some(match second { "BLACK" => Color::BLACK, "WHITE" => Color::WHITE, "RED" => Color::RED, "GREEN" => Color::GREEN, "BLUE" => Color::BLUE, "YELLOW" => Color::YELLOW, "CYAN" => Color::CYAN, "MAGENTA" => Color::MAGENTA, "TRANSPARENT" => Color::TRANSPARENT, _ => { log::error!("Invalid default value color constant: {input}"); return None; } }); } log::error!("Invalid default value color: {input}"); None } fn to_reference_point(input: &str) -> Option<ReferencePoint> { let mut choices = input.split("::"); let (first, second) = (choices.next()?.trim(), choices.next()?.trim()); if first == "ReferencePoint" { return Some(match second { "None" => ReferencePoint::None, "TopLeft" => ReferencePoint::TopLeft, "TopCenter" => ReferencePoint::TopCenter, "TopRight" => ReferencePoint::TopRight, "CenterLeft" => ReferencePoint::CenterLeft, "Center" => ReferencePoint::Center, "CenterRight" => ReferencePoint::CenterRight, "BottomLeft" => ReferencePoint::BottomLeft, "BottomCenter" => ReferencePoint::BottomCenter, "BottomRight" => ReferencePoint::BottomRight, _ => { log::error!("Invalid ReferencePoint default type variant: {input}"); return None; } }); } log::error!("Invalid ReferencePoint default type: {input}"); None } match ty { Type::Generic(_) => None, Type::Concrete(concrete_type) => { let internal_id = concrete_type.id?; use std::any::TypeId; // TODO: Add default implementations for types such as TaggedValue::Subpaths, and use the defaults here and in document_node_types // Tries using the default for the tagged value type. If it not implemented, then uses the default used in document_node_types. If it is not used there, then TaggedValue::None is returned. let ty = match internal_id { x if x == TypeId::of::<()>() => TaggedValue::None, x if x == TypeId::of::<String>() => TaggedValue::String(string.into()), x if x == TypeId::of::<f64>() => FromStr::from_str(string).map(TaggedValue::F64).ok()?, x if x == TypeId::of::<f32>() => FromStr::from_str(string).map(TaggedValue::F32).ok()?, x if x == TypeId::of::<u64>() => FromStr::from_str(string).map(TaggedValue::U64).ok()?, x if x == TypeId::of::<u32>() => FromStr::from_str(string).map(TaggedValue::U32).ok()?, x if x == TypeId::of::<DVec2>() => to_dvec2(string).map(TaggedValue::DVec2)?, x if x == TypeId::of::<bool>() => FromStr::from_str(string).map(TaggedValue::Bool).ok()?, x if x == TypeId::of::<Table<Color>>() => to_color(string).map(|color| TaggedValue::Color(Table::new_from_element(color)))?, x if x == TypeId::of::<Color>() => to_color(string).map(TaggedValue::ColorNotInTable)?, x if x == TypeId::of::<Option<Color>>() => TaggedValue::ColorNotInTable(to_color(string)?), x if x == TypeId::of::<Fill>() => to_color(string).map(|color| TaggedValue::Fill(Fill::solid(color)))?, x if x == TypeId::of::<ReferencePoint>() => to_reference_point(string).map(TaggedValue::ReferencePoint)?, _ => return None, }; Some(ty) } Type::Fn(_, output) => TaggedValue::from_primitive_string(string, output), Type::Future(fut) => TaggedValue::from_primitive_string(string, fut), } } pub fn to_u32(&self) -> u32 { match self { TaggedValue::U32(x) => *x, _ => panic!("Passed value is not of type u32"), } } } impl Display for TaggedValue { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { TaggedValue::String(x) => f.write_str(x), TaggedValue::U32(x) => f.write_fmt(format_args!("{x}")), TaggedValue::U64(x) => f.write_fmt(format_args!("{x}")), TaggedValue::F32(x) => f.write_fmt(format_args!("{x}")), TaggedValue::F64(x) => f.write_fmt(format_args!("{x}")), TaggedValue::Bool(x) => f.write_fmt(format_args!("{x}")), _ => panic!("Cannot convert to string"), } } } pub struct UpcastNode { value: MemoHash<TaggedValue>, } impl<'input> Node<'input, DAny<'input>> for UpcastNode { type Output = FutureAny<'input>; fn eval(&'input self, _: DAny<'input>) -> Self::Output { let memo_clone = MemoHash::clone(&self.value); Box::pin(async move { memo_clone.into_inner().as_ref().clone().to_dynany() }) } } impl UpcastNode { pub fn new(value: MemoHash<TaggedValue>) -> Self { Self { value } } } #[derive(Default, Debug, Clone, Copy)] pub struct UpcastAsRefNode<T: AsRef<U> + Sync + Send, U: Sync + Send>(pub T, PhantomData<U>); impl<'i, T: 'i + AsRef<U> + Sync + Send, U: 'i + StaticType + Sync + Send> Node<'i, DAny<'i>> for UpcastAsRefNode<T, U> { type Output = FutureAny<'i>; #[inline(always)] fn eval(&'i self, _: DAny<'i>) -> Self::Output { Box::pin(async move { Box::new(self.0.as_ref()) as DAny<'i> }) } } impl<T: AsRef<U> + Sync + Send, U: Sync + Send> UpcastAsRefNode<T, U> { pub const fn new(value: T) -> UpcastAsRefNode<T, U> { UpcastAsRefNode(value, PhantomData) } } #[derive(Debug, Clone, PartialEq, dyn_any::DynAny, serde::Serialize, serde::Deserialize)] pub struct RenderOutput { pub data: RenderOutputType, pub metadata: RenderMetadata, } #[derive(Debug, Clone, Hash, PartialEq, dyn_any::DynAny, serde::Serialize, serde::Deserialize)] pub enum RenderOutputType { CanvasFrame(SurfaceFrame), #[serde(skip)] Texture(ImageTexture), #[serde(skip)] Buffer { data: Vec<u8>, width: u32, height: u32, }, Svg { svg: String, image_data: Vec<(u64, Image<Color>)>, }, } impl Hash for RenderOutput { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.data.hash(state) } } /// We hash the floats and so-forth despite it not being reproducible because all inputs to the node graph must be hashed otherwise the graph execution breaks (so sorry about this hack) trait FakeHash { fn hash<H: core::hash::Hasher>(&self, state: &mut H); } mod fake_hash { use super::*; impl FakeHash for f64 { fn hash<H: core::hash::Hasher>(&self, state: &mut H) { self.to_bits().hash(state) } } impl FakeHash for f32 { fn hash<H: core::hash::Hasher>(&self, state: &mut H) { self.to_bits().hash(state) } } impl FakeHash for DVec2 { fn hash<H: core::hash::Hasher>(&self, state: &mut H) { self.to_array().iter().for_each(|x| x.to_bits().hash(state)) } } impl FakeHash for Vec2 { fn hash<H: core::hash::Hasher>(&self, state: &mut H) { self.to_array().iter().for_each(|x| x.to_bits().hash(state)) } } impl FakeHash for DAffine2 { fn hash<H: core::hash::Hasher>(&self, state: &mut H) { self.to_cols_array().iter().for_each(|x| x.to_bits().hash(state)) } } impl FakeHash for Affine2 { fn hash<H: core::hash::Hasher>(&self, state: &mut H) { self.to_cols_array().iter().for_each(|x| x.to_bits().hash(state)) } } impl<T: FakeHash> FakeHash for Option<T> { fn hash<H: core::hash::Hasher>(&self, state: &mut H) { if let Some(x) = self { 1.hash(state); x.hash(state); } else { 0.hash(state); } } } impl<T: FakeHash> FakeHash for Vec<T> { fn hash<H: core::hash::Hasher>(&self, state: &mut H) { self.len().hash(state); self.iter().for_each(|x| x.hash(state)) } } impl<T: FakeHash, const N: usize> FakeHash for [T; N] { fn hash<H: core::hash::Hasher>(&self, state: &mut H) { self.iter().for_each(|x| x.hash(state)) } } impl FakeHash for (f64, Color) { fn hash<H: core::hash::Hasher>(&self, state: &mut H) { self.0.to_bits().hash(state); self.1.hash(state) } } } #[test] fn can_construct_color() { assert_eq!(TaggedValue::from_type(&concrete!(Color)).unwrap(), TaggedValue::ColorNotInTable(Color::default())); }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/graph-craft/benches/compile_demo_art_criterion.rs
node-graph/graph-craft/benches/compile_demo_art_criterion.rs
use criterion::{Criterion, black_box, criterion_group, criterion_main}; use graph_craft::util::DEMO_ART; fn compile_to_proto(c: &mut Criterion) { use graph_craft::util::{compile, load_from_name}; let mut c = c.benchmark_group("Compile Network cold"); for name in DEMO_ART { let network = load_from_name(name); c.bench_function(name, |b| b.iter_batched(|| network.clone(), |network| compile(black_box(network)), criterion::BatchSize::SmallInput)); } } criterion_group!(benches, compile_to_proto); criterion_main!(benches);
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/graph-craft/benches/compile_demo_art_iai.rs
node-graph/graph-craft/benches/compile_demo_art_iai.rs
use graph_craft::document::NodeNetwork; use graph_craft::util::*; use iai_callgrind::{library_benchmark, library_benchmark_group, main}; #[library_benchmark] #[benches::with_setup(args = ["isometric-fountain", "painted-dreams", "procedural-string-lights", "parametric-dunescape", "red-dress", "valley-of-spires"], setup = load_from_name)] pub fn compile_to_proto(_input: NodeNetwork) { std::hint::black_box(compile(_input)); } library_benchmark_group!(name = compile_group; benchmarks = compile_to_proto); main!(library_benchmark_groups = compile_group);
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/interpreted-executor/src/lib.rs
node-graph/interpreted-executor/src/lib.rs
pub mod dynamic_executor; pub mod node_registry; pub mod util; #[cfg(test)] mod tests { use core_types::*; use futures::executor::block_on; use graphene_core::ops::identity; #[test] fn double_number() { use graph_craft::document::*; use graph_craft::*; let network = NodeNetwork { exports: vec![NodeInput::node(NodeId(1), 0)], nodes: [ // Simple identity node taking a number as input from outside the graph ( NodeId(0), DocumentNode { inputs: vec![], call_argument: concrete!(u32), implementation: DocumentNodeImplementation::ProtoNode(identity::IDENTIFIER), ..Default::default() }, ), // An add node adding the result of the id node to its self ( NodeId(1), DocumentNode { inputs: vec![NodeInput::node(NodeId(0), 0), NodeInput::node(NodeId(0), 0)], implementation: DocumentNodeImplementation::ProtoNode(ProtoNodeIdentifier::new("core_types::ops::AddNode")), ..Default::default() }, ), ] .into_iter() .collect(), ..Default::default() }; use crate::dynamic_executor::DynamicExecutor; use graph_craft::graphene_compiler::Compiler; let compiler = Compiler {}; let protograph = compiler.compile_single(network).expect("Graph should be generated"); let _exec = block_on(DynamicExecutor::new(protograph)).map(|_e| panic!("The network should not type check ")).unwrap_err(); } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/interpreted-executor/src/dynamic_executor.rs
node-graph/interpreted-executor/src/dynamic_executor.rs
use crate::node_registry; use dyn_any::StaticType; use graph_craft::Type; use graph_craft::document::NodeId; use graph_craft::document::value::{TaggedValue, UpcastAsRefNode, UpcastNode}; use graph_craft::graphene_compiler::Executor; use graph_craft::proto::{ConstructionArgs, GraphError, LocalFuture, NodeContainer, ProtoNetwork, ProtoNode, SharedNodeContainer, TypeErasedBox, TypingContext}; use graph_craft::proto::{GraphErrorType, GraphErrors}; use std::collections::{HashMap, HashSet}; use std::error::Error; use std::sync::Arc; /// An executor of a node graph that does not require an online compilation server, and instead uses `Box<dyn ...>`. #[derive(Clone)] pub struct DynamicExecutor { output: NodeId, /// Stores all of the dynamic node structs. tree: BorrowTree, /// Stores the types of the proto nodes. typing_context: TypingContext, // This allows us to keep the nodes around for one more frame which is used for introspection orphaned_nodes: HashSet<NodeId>, } impl Default for DynamicExecutor { fn default() -> Self { Self { output: Default::default(), tree: Default::default(), typing_context: TypingContext::new(&node_registry::NODE_REGISTRY), orphaned_nodes: HashSet::new(), } } } #[derive(PartialEq, Clone, Debug, Default, serde::Serialize, serde::Deserialize)] pub struct NodeTypes { pub inputs: Vec<Type>, pub output: Type, } type Path = Box<[NodeId]>; #[derive(PartialEq, Clone, Debug, Default, serde::Serialize, serde::Deserialize)] pub struct ResolvedDocumentNodeTypesDelta { pub add: Vec<(Path, NodeTypes)>, pub remove: Vec<Path>, } impl DynamicExecutor { pub async fn new(proto_network: ProtoNetwork) -> Result<Self, GraphErrors> { let mut typing_context = TypingContext::new(&node_registry::NODE_REGISTRY); typing_context.update(&proto_network)?; let output = proto_network.output; let tree = BorrowTree::new(proto_network, &typing_context).await?; Ok(Self { tree, output, typing_context, orphaned_nodes: HashSet::new(), }) } /// Updates the existing [`BorrowTree`] to reflect the new [`ProtoNetwork`], reusing nodes where possible. #[cfg_attr(debug_assertions, inline(never))] pub async fn update(&mut self, proto_network: ProtoNetwork) -> Result<ResolvedDocumentNodeTypesDelta, (ResolvedDocumentNodeTypesDelta, GraphErrors)> { self.output = proto_network.output; self.typing_context.update(&proto_network).map_err(|e| { // If there is an error then get types that have been resolved before the error let add = proto_network .nodes .iter() .filter_map(|(id, node)| node.original_location.path.as_ref().map(|path| (path.clone().into_boxed_slice(), self.typing_context.infer(*id, node)))) .take_while(|(_, r)| r.is_ok()) .map(|(path, r)| { let r = r.unwrap(); ( path, NodeTypes { inputs: r.inputs, output: r.return_value, }, ) }) .collect::<Vec<_>>(); (ResolvedDocumentNodeTypesDelta { add, remove: Vec::new() }, e) })?; let (add, orphaned) = self .tree .update(proto_network, &self.typing_context) .await .map_err(|e| (ResolvedDocumentNodeTypesDelta::default(), e))?; let old_to_remove = core::mem::replace(&mut self.orphaned_nodes, orphaned); let mut remove = Vec::with_capacity(old_to_remove.len() - self.orphaned_nodes.len().min(old_to_remove.len())); for node_id in old_to_remove { if self.orphaned_nodes.contains(&node_id) { let path = self.tree.free_node(node_id); self.typing_context.remove_inference(node_id); if let Some(path) = path { remove.push(path); } } } let add = self.document_node_types(add.into_iter()).collect(); Ok(ResolvedDocumentNodeTypesDelta { add, remove }) } /// Calls the `Node::serialize` for that specific node, returning for example the cached value for a monitor node. The node path must match the document node path. pub fn introspect(&self, node_path: &[NodeId]) -> Result<Arc<dyn std::any::Any + Send + Sync + 'static>, IntrospectError> { self.tree.introspect(node_path) } pub fn input_type(&self) -> Option<Type> { self.typing_context.type_of(self.output).map(|node_io| node_io.call_argument.clone()) } pub fn tree(&self) -> &BorrowTree { &self.tree } pub fn output(&self) -> NodeId { self.output } pub fn output_type(&self) -> Option<Type> { self.typing_context.type_of(self.output).map(|node_io| node_io.return_value.clone()) } pub fn document_node_types<'a>(&'a self, nodes: impl Iterator<Item = Path> + 'a) -> impl Iterator<Item = (Path, NodeTypes)> + 'a { nodes.flat_map(|id| self.tree.source_map().get(&id).map(|(_, b)| (id, b.clone()))) // TODO: https://github.com/GraphiteEditor/Graphite/issues/1767 // TODO: Non exposed inputs are not added to the inputs_source_map, so they are not included in the resolved_document_node_types. The type is still available in the typing_context. This only affects the UI-only "Import" node. } } impl<I> Executor<I, TaggedValue> for &DynamicExecutor where I: StaticType + 'static + Send + Sync + std::panic::UnwindSafe, { fn execute(&self, input: I) -> LocalFuture<'_, Result<TaggedValue, Box<dyn Error>>> { Box::pin(async move { use futures::FutureExt; let result = self.tree.eval_tagged_value(self.output, input); let wrapped_result = std::panic::AssertUnwindSafe(result).catch_unwind().await; match wrapped_result { Ok(result) => result.map_err(|e| e.into()), Err(e) => { Box::leak(e); Err("Node graph execution panicked".into()) } } }) } } pub struct InputMapping {} #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum IntrospectError { PathNotFound(Vec<NodeId>), ProtoNodeNotFound(NodeId), NoData, RuntimeNotReady, } impl std::fmt::Display for IntrospectError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { IntrospectError::PathNotFound(path) => write!(f, "Path not found: {path:?}"), IntrospectError::ProtoNodeNotFound(id) => write!(f, "ProtoNode not found: {id:?}"), IntrospectError::NoData => write!(f, "No data found for this node"), IntrospectError::RuntimeNotReady => write!(f, "Node runtime is not ready"), } } } /// A store of dynamically typed nodes and their associated source map. /// /// [`BorrowTree`] maintains two main data structures: /// 1. A map of [`NodeId`]s to their corresponding nodes and paths. /// 2. A source map that links document paths to node IDs and their types. /// /// This structure is central to managing the graph of nodes in the interpreter, /// allowing for efficient access and manipulation of nodes based on their IDs or paths. /// /// # Fields /// /// * `nodes`: A [`HashMap`] of [`NodeId`]s to tuples of [`SharedNodeContainer`] and [`Path`]. /// This stores the actual node instances and their associated paths. /// /// * `source_map`: A [`HashMap`] from [`Path`] to tuples of [`NodeId`] and [`NodeTypes`]. /// This maps document paths to node IDs and their associated type information. /// /// A store of the dynamically typed nodes and also the source map. #[derive(Default, Clone)] pub struct BorrowTree { /// A hashmap of node IDs and dynamically typed nodes. nodes: HashMap<NodeId, (SharedNodeContainer, Path)>, /// A hashmap from the document path to the proto node ID. source_map: HashMap<Path, (NodeId, NodeTypes)>, } impl BorrowTree { pub async fn new(proto_network: ProtoNetwork, typing_context: &TypingContext) -> Result<BorrowTree, GraphErrors> { let mut nodes = BorrowTree::default(); for (id, node) in proto_network.nodes { nodes.push_node(id, node, typing_context).await? } Ok(nodes) } /// Pushes new nodes into the tree and return orphaned nodes pub async fn update(&mut self, proto_network: ProtoNetwork, typing_context: &TypingContext) -> Result<(Vec<Path>, HashSet<NodeId>), GraphErrors> { let mut old_nodes: HashSet<_> = self.nodes.keys().copied().collect(); let mut new_nodes: Vec<_> = Vec::new(); // TODO: Problem: When an identity node is connected directly to an export the first input to identity node is not added to the proto network, while the second input is. This means the primary input does not have a type. for (id, node) in proto_network.nodes { if !self.nodes.contains_key(&id) { new_nodes.push(node.original_location.path.clone().unwrap_or_default().into()); self.push_node(id, node, typing_context).await?; } else if self.update_source_map(id, typing_context, &node) { new_nodes.push(node.original_location.path.clone().unwrap_or_default().into()); } old_nodes.remove(&id); } Ok((new_nodes, old_nodes)) } fn node_deps(&self, nodes: &[NodeId]) -> Vec<SharedNodeContainer> { nodes.iter().map(|node| self.nodes.get(node).unwrap().0.clone()).collect() } fn store_node(&mut self, node: SharedNodeContainer, id: NodeId, path: Path) { self.nodes.insert(id, (node, path)); } /// Calls the `Node::serialize` for that specific node, returning for example the cached value for a monitor node. The node path must match the document node path. pub fn introspect(&self, node_path: &[NodeId]) -> Result<Arc<dyn std::any::Any + Send + Sync + 'static>, IntrospectError> { let (id, _) = self.source_map.get(node_path).ok_or_else(|| IntrospectError::PathNotFound(node_path.to_vec()))?; let (node, _path) = self.nodes.get(id).ok_or(IntrospectError::ProtoNodeNotFound(*id))?; node.serialize().ok_or(IntrospectError::NoData) } pub fn get(&self, id: NodeId) -> Option<SharedNodeContainer> { self.nodes.get(&id).map(|(node, _)| node.clone()) } /// Evaluate the output node of the [`BorrowTree`]. pub async fn eval<'i, I, O>(&'i self, id: NodeId, input: I) -> Option<O> where I: StaticType + 'i + Send + Sync, O: StaticType + 'i, { let (node, _path) = self.nodes.get(&id).cloned()?; let output = node.eval(Box::new(input)); dyn_any::downcast::<O>(output.await).ok().map(|o| *o) } /// Evaluate the output node of the [`BorrowTree`] and cast it to a tagged value. /// This ensures that no borrowed data can escape the node graph. pub async fn eval_tagged_value<I>(&self, id: NodeId, input: I) -> Result<TaggedValue, String> where I: StaticType + 'static + Send + Sync, { let (node, _path) = self.nodes.get(&id).cloned().ok_or("Output node not found in executor")?; let output = node.eval(Box::new(input)); TaggedValue::try_from_any(output.await) } /// Removes a node from the [`BorrowTree`] and returns its associated path. /// /// This method removes the specified node from both the `nodes` HashMap and, /// if applicable, the `source_map` HashMap. /// /// # Arguments /// /// * `self` - Mutable reference to the [`BorrowTree`]. /// * `id` - The `NodeId` of the node to be removed. /// /// # Returns /// /// [`Option<Path>`] - The path associated with the removed node, or `None` if the node wasn't found. /// /// # Example /// /// ```rust /// use std::collections::HashMap; /// use graph_craft::document::*; /// use graph_craft::proto::*; /// use interpreted_executor::dynamic_executor::BorrowTree; /// use interpreted_executor::node_registry; /// /// /// async fn example() -> Result<(), GraphErrors> { /// let (proto_network, node_id, proto_node) = ProtoNetwork::example(); /// let typing_context = TypingContext::new(&node_registry::NODE_REGISTRY); /// let mut borrow_tree = BorrowTree::new(proto_network, &typing_context).await?; /// /// // Assert that the node exists in the BorrowTree /// assert!(borrow_tree.get(node_id).is_some(), "Node should exist before removal"); /// /// // Remove the node /// let removed_path = borrow_tree.free_node(node_id); /// /// // Assert that the node was successfully removed /// assert!(removed_path.is_some(), "Node removal should return a path"); /// assert!(borrow_tree.get(node_id).is_none(), "Node should not exist after removal"); /// /// // Try to remove the same node again /// let second_removal = borrow_tree.free_node(node_id); /// /// assert_eq!(second_removal, None, "Second removal should return None"); /// /// println!("All assertions passed. free_node function works as expected."); /// /// Ok(()) /// } /// ``` /// /// # Notes /// /// - Removes the node from `nodes` HashMap. /// - If the node is the primary node for its path in the `source_map`, it's also removed from there. /// - Returns `None` if the node is not found in the `nodes` HashMap. pub fn free_node(&mut self, id: NodeId) -> Option<Path> { let (_, path) = self.nodes.remove(&id)?; if self.source_map.get(&path)?.0 == id { self.source_map.remove(&path); return Some(path); } None } /// Updates the source map for a given node in the [`BorrowTree`]. /// /// This method updates or inserts an entry in the `source_map` HashMap for the specified node, /// using type information from the provided [`TypingContext`] and [`ProtoNode`]. /// /// # Arguments /// /// * `self` - Mutable reference to the [`BorrowTree`]. /// * `id` - The `NodeId` of the node to update in the source map. /// * `typing_context` - A reference to the [`TypingContext`] containing type information. /// * `proto_node` - A reference to the [`ProtoNode`] containing original location information. /// /// # Returns /// /// `bool` - `true` if a new entry was inserted, `false` if an existing entry was updated. /// /// # Notes /// /// - Updates or inserts an entry in the `source_map` HashMap. /// - Uses the `ProtoNode`'s original location path as the key for the source map. /// - Collects input types from both the main input and parameters. /// - Returns `false` and logs a warning if the node's type information is not found in the typing context. fn update_source_map(&mut self, id: NodeId, typing_context: &TypingContext, proto_node: &ProtoNode) -> bool { let Some(node_io) = typing_context.type_of(id) else { log::warn!("did not find type"); return false; }; let inputs = [&node_io.call_argument].into_iter().chain(&node_io.inputs).cloned().collect(); let node_path = &proto_node.original_location.path.as_ref().unwrap_or(const { &vec![] }); let entry = self.source_map.entry(node_path.to_vec().into()).or_default(); let update = ( id, NodeTypes { inputs, output: node_io.return_value.clone(), }, ); let modified = *entry != update; *entry = update; modified } /// Inserts a new node into the [`BorrowTree`], calling the constructor function from `node_registry.rs`. /// /// This method creates a new node container based on the provided `ProtoNode`, updates the source map, /// and stores the node container in the `BorrowTree`. /// /// /// # Notes /// /// - Updates the source map using [`update_source_map`](BorrowTree::update_source_map) before inserting the node. /// - Handles different types of construction arguments: /// - `Value`: Creates a node from a `TaggedValue`, with special handling for `EditorApi` values. /// - `Inline`: Currently unimplemented. Only used for `rust-gpu` support. /// - `Nodes`: Constructs a node using other nodes as dependencies. /// - Uses the constructor function from the `typing_context` for `Nodes` construction arguments. /// - Returns an error if no constructor is found for the given node ID. async fn push_node(&mut self, id: NodeId, proto_node: ProtoNode, typing_context: &TypingContext) -> Result<(), GraphErrors> { self.update_source_map(id, typing_context, &proto_node); let path = proto_node.original_location.path.clone().unwrap_or_default(); match &proto_node.construction_args { ConstructionArgs::Value(value) => { let node = if let TaggedValue::EditorApi(api) = &**value { let editor_api = UpcastAsRefNode::new(api.clone()); let node = Box::new(editor_api) as TypeErasedBox<'_>; NodeContainer::new(node) } else { let upcasted = UpcastNode::new(value.to_owned()); let node = Box::new(upcasted) as TypeErasedBox<'_>; NodeContainer::new(node) }; self.store_node(node, id, path.into()); } ConstructionArgs::Inline(_) => unimplemented!("Inline nodes are not supported yet"), ConstructionArgs::Nodes(ids) => { let ids = ids.to_vec(); let construction_nodes = self.node_deps(&ids); let constructor = typing_context.constructor(id).ok_or_else(|| vec![GraphError::new(&proto_node, GraphErrorType::NoConstructor)])?; let node = constructor(construction_nodes).await; let node = NodeContainer::new(node); self.store_node(node, id, path.into()); } }; Ok(()) } /// Returns the source map of the borrow tree pub fn source_map(&self) -> &HashMap<Path, (NodeId, NodeTypes)> { &self.source_map } } #[cfg(test)] mod test { use super::*; use graph_craft::document::value::TaggedValue; #[test] fn push_node_sync() { let mut tree = BorrowTree::default(); let val_1_protonode = ProtoNode::value(ConstructionArgs::Value(TaggedValue::U32(2u32).into()), vec![]); let context = TypingContext::default(); let future = tree.push_node(NodeId(0), val_1_protonode, &context); futures::executor::block_on(future).unwrap(); let _node = tree.get(NodeId(0)).unwrap(); let result = futures::executor::block_on(tree.eval(NodeId(0), ())); assert_eq!(result, Some(2u32)); } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/interpreted-executor/src/util.rs
node-graph/interpreted-executor/src/util.rs
use graph_craft::ProtoNodeIdentifier; use graph_craft::concrete; use graph_craft::document::value::TaggedValue; use graph_craft::document::{DocumentNode, DocumentNodeImplementation, NodeInput, NodeNetwork}; use graph_craft::generic; use graph_craft::wasm_application_io::WasmEditorApi; use graphene_std::Context; use graphene_std::ContextFeatures; use graphene_std::uuid::NodeId; use std::sync::Arc; use wgpu_executor::WgpuExecutor; pub fn wrap_network_in_scope(mut network: NodeNetwork, editor_api: Arc<WasmEditorApi>) -> NodeNetwork { network.generate_node_paths(&[]); let inner_network = DocumentNode { implementation: DocumentNodeImplementation::Network(network), inputs: vec![], ..Default::default() }; // TODO: Replace with "Output" definition? // let render_node = resolve_document_node_type("Output") // .expect("Output node type not found") // .node_template_input_override(vec![Some(NodeInput::node(NodeId(1), 0)), Some(NodeInput::node(NodeId(0), 1))]) // .document_node; let render_node = DocumentNode { inputs: vec![NodeInput::node(NodeId(0), 0)], implementation: DocumentNodeImplementation::Network(NodeNetwork { exports: vec![NodeInput::node(NodeId(2), 0)], nodes: [ DocumentNode { call_argument: concrete!(Context), inputs: vec![NodeInput::import(core_types::Type::Fn(Box::new(concrete!(Context)), Box::new(generic!(T))), 0)], implementation: DocumentNodeImplementation::ProtoNode(graphene_std::render_node::render_intermediate::IDENTIFIER), context_features: graphene_std::ContextDependencies { extract: ContextFeatures::VARARGS, inject: ContextFeatures::empty(), }, ..Default::default() }, // Keep this in sync with the protonode in valid_input_types DocumentNode { call_argument: concrete!(Context), inputs: vec![NodeInput::scope("editor-api"), NodeInput::node(NodeId(0), 0)], implementation: DocumentNodeImplementation::ProtoNode(graphene_std::render_node::render::IDENTIFIER), context_features: graphene_std::ContextDependencies { extract: ContextFeatures::FOOTPRINT | ContextFeatures::VARARGS, inject: ContextFeatures::empty(), }, ..Default::default() }, DocumentNode { call_argument: concrete!(graphene_std::application_io::RenderConfig), inputs: vec![NodeInput::node(NodeId(1), 0)], implementation: DocumentNodeImplementation::ProtoNode(graphene_std::render_node::create_context::IDENTIFIER), context_features: graphene_std::ContextDependencies { extract: ContextFeatures::empty(), inject: ContextFeatures::REAL_TIME | ContextFeatures::ANIMATION_TIME | ContextFeatures::POINTER | ContextFeatures::FOOTPRINT | ContextFeatures::VARARGS, }, ..Default::default() }, ] .into_iter() .enumerate() .map(|(id, node)| (NodeId(id as u64), node)) .collect(), ..Default::default() }), ..Default::default() }; // wrap the inner network in a scope let mut nodes = vec![ inner_network, render_node, DocumentNode { implementation: DocumentNodeImplementation::ProtoNode(graphene_std::ops::identity::IDENTIFIER), inputs: vec![NodeInput::value(TaggedValue::EditorApi(editor_api), false)], ..Default::default() }, ]; let mut scope_injections = vec![("editor-api".to_string(), (NodeId(2), concrete!(&WasmEditorApi)))]; if cfg!(feature = "gpu") { nodes.push(DocumentNode { implementation: DocumentNodeImplementation::ProtoNode(ProtoNodeIdentifier::from("graphene_core::ops::IntoNode<&WgpuExecutor>")), inputs: vec![NodeInput::node(NodeId(2), 0)], ..Default::default() }); scope_injections.push(("wgpu-executor".to_string(), (NodeId(3), concrete!(&WgpuExecutor)))); } NodeNetwork { exports: vec![NodeInput::node(NodeId(1), 0)], nodes: nodes.into_iter().enumerate().map(|(id, node)| (NodeId(id as u64), node)).collect(), scope_injections: scope_injections.into_iter().collect(), // TODO(TrueDoctor): check if it makes sense to set `generated` to `true` generated: false, } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/interpreted-executor/src/node_registry.rs
node-graph/interpreted-executor/src/node_registry.rs
use dyn_any::StaticType; use glam::{DAffine2, DVec2, IVec2}; use graph_craft::document::DocumentNode; use graph_craft::document::value::RenderOutput; use graph_craft::proto::{NodeConstructor, TypeErasedBox}; use graphene_std::Artboard; use graphene_std::Context; use graphene_std::Graphic; use graphene_std::any::DynAnyNode; use graphene_std::application_io::{ImageTexture, SurfaceFrame}; use graphene_std::brush::brush_cache::BrushCache; use graphene_std::brush::brush_stroke::BrushStroke; use graphene_std::concrete; use graphene_std::gradient::GradientStops; #[cfg(feature = "gpu")] use graphene_std::raster::GPU; use graphene_std::raster::color::Color; use graphene_std::raster::*; use graphene_std::raster::{CPU, Raster}; use graphene_std::render_node::RenderIntermediate; use graphene_std::table::Table; use graphene_std::transform::Footprint; use graphene_std::uuid::NodeId; use graphene_std::vector::Vector; use graphene_std::wasm_application_io::WasmEditorApi; #[cfg(feature = "gpu")] use graphene_std::wasm_application_io::WasmSurfaceHandle; use graphene_std::{Cow, ProtoNodeIdentifier}; use graphene_std::{NodeIO, NodeIOTypes}; use graphene_std::{fn_type_fut, future}; use node_registry_macros::{async_node, convert_node, into_node}; use once_cell::sync::Lazy; use std::collections::HashMap; #[cfg(feature = "gpu")] use std::sync::Arc; #[cfg(feature = "gpu")] use wgpu_executor::WgpuExecutor; use wgpu_executor::{WgpuSurface, WindowHandle}; // TODO: turn into hashmap fn node_registry() -> HashMap<ProtoNodeIdentifier, HashMap<NodeIOTypes, NodeConstructor>> { let mut node_types: Vec<(ProtoNodeIdentifier, NodeConstructor, NodeIOTypes)> = vec![ // ========== // INTO NODES // ========== into_node!(from: Table<Graphic>, to: Table<Graphic>), into_node!(from: Table<Vector>, to: Table<Vector>), into_node!(from: Table<Raster<CPU>>, to: Table<Raster<CPU>>), #[cfg(feature = "gpu")] into_node!(from: Table<Raster<GPU>>, to: Table<Raster<GPU>>), convert_node!(from: Table<Vector>, to: Table<Graphic>), convert_node!(from: Table<Raster<CPU>>, to: Table<Graphic>), #[cfg(feature = "gpu")] convert_node!(from: Table<Raster<GPU>>, to: Table<Graphic>), // into_node!(from: Table<Raster<CPU>>, to: Table<Raster<SRGBA8>>), #[cfg(feature = "gpu")] into_node!(from: &WasmEditorApi, to: &WgpuExecutor), convert_node!(from: DVec2, to: DVec2), convert_node!(from: String, to: String), convert_node!(from: bool, to: String), convert_node!(from: DVec2, to: String), convert_node!(from: IVec2, to: String), convert_node!(from: DAffine2, to: String), #[cfg(feature = "gpu")] convert_node!(from: Table<Raster<CPU>>, to: Table<Raster<CPU>>, converter: &WgpuExecutor), #[cfg(feature = "gpu")] convert_node!(from: Table<Raster<CPU>>, to: Table<Raster<GPU>>, converter: &WgpuExecutor), #[cfg(feature = "gpu")] convert_node!(from: Table<Raster<GPU>>, to: Table<Raster<GPU>>, converter: &WgpuExecutor), #[cfg(feature = "gpu")] convert_node!(from: Table<Raster<GPU>>, to: Table<Raster<CPU>>, converter: &WgpuExecutor), // ============= // MONITOR NODES // ============= async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => ()]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => Table<Artboard>]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => Table<Graphic>]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => Table<Vector>]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => Table<Raster<CPU>>]), #[cfg(feature = "gpu")] async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => Table<Raster<GPU>>]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => Table<Color>]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => Table<GradientStops>]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => String]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => IVec2]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => DVec2]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => DAffine2]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => bool]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => f64]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => u32]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => u64]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => Vec<f64>]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => BlendMode]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => ImageTexture]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::transform::ReferencePoint]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::path_bool::BooleanOperation]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::style::Fill]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::style::StrokeCap]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::style::StrokeJoin]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::style::PaintOrder]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::style::StrokeAlign]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::style::Stroke]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::style::Gradient]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => GradientStops]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => Vec<graphene_std::uuid::NodeId>]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => Box<graphene_std::vector::VectorModification>]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::misc::CentroidType]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::misc::PointSpacingType]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => Option<f64>]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => Vec<DVec2>]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => Vec<String>]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => [f64; 4]]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => Vec<NodeId>]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => Graphic]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::text::Font]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => Vec<BrushStroke>]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => BrushCache]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => DocumentNode]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::raster::curve::Curve]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::transform::Footprint]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::blending::BlendMode]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::raster::adjustments::LuminanceCalculation]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::extract_xy::XY]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::raster::adjustments::RedGreenBlue]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::raster::adjustments::RedGreenBlueAlpha]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::animation::RealTimeMode]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::raster::adjustments::NoiseType]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::raster::adjustments::FractalType]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::raster::adjustments::CellularDistanceFunction]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::raster::adjustments::CellularReturnType]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::raster::adjustments::DomainWarpType]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::raster::adjustments::RelativeAbsolute]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::raster::adjustments::SelectiveColorChoice]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::misc::GridType]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::misc::ArcType]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::misc::MergeByDistanceAlgorithm]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::misc::ExtrudeJoiningAlgorithm]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::misc::PointSpacingType]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::style::FillType]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::style::GradientType]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::transform::ReferencePoint]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::vector::misc::CentroidType]), async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => graphene_std::text::TextAlign]), // Context nullification #[cfg(feature = "gpu")] async_node!(graphene_core::context_modification::ContextModificationNode<_, _>, input: Context, fn_params: [Context => &WasmEditorApi, Context => graphene_std::ContextFeatures]), #[cfg(feature = "gpu")] async_node!(graphene_core::context_modification::ContextModificationNode<_, _>, input: Context, fn_params: [Context => Arc<WasmSurfaceHandle>, Context => graphene_std::ContextFeatures]), async_node!(graphene_core::context_modification::ContextModificationNode<_, _>, input: Context, fn_params: [Context => RenderIntermediate, Context => graphene_std::ContextFeatures]), async_node!(graphene_core::context_modification::ContextModificationNode<_, _>, input: Context, fn_params: [Context => WgpuSurface, Context => graphene_std::ContextFeatures]), async_node!(graphene_core::context_modification::ContextModificationNode<_, _>, input: Context, fn_params: [Context => Option<WgpuSurface>, Context => graphene_std::ContextFeatures]), async_node!(graphene_core::context_modification::ContextModificationNode<_, _>, input: Context, fn_params: [Context => WindowHandle, Context => graphene_std::ContextFeatures]), // ========== // MEMO NODES // ========== async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => ()]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => bool]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Table<Artboard>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Table<Graphic>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Table<Vector>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Table<Raster<CPU>>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Table<Color>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Image<Color>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Table<GradientStops>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => GradientStops]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Vec<DVec2>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Vec<NodeId>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Vec<f64>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Vec<f32>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Vec<String>]), #[cfg(feature = "gpu")] async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Arc<WasmSurfaceHandle>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => WindowHandle]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Option<WgpuSurface>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => SurfaceFrame]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => f64]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => f32]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => u32]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => u64]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => DVec2]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => String]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => DAffine2]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Footprint]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => RenderOutput]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => &WasmEditorApi]), #[cfg(feature = "gpu")] async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => WgpuSurface]), #[cfg(feature = "gpu")] async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Table<Raster<GPU>>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Option<f64>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Color]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Option<Color>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => [f64; 4]]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Graphic]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => glam::f32::Vec2]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => glam::f32::Affine2]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::vector::style::Stroke]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::vector::style::Gradient]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::text::Font]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Vec<BrushStroke>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => BrushCache]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => DocumentNode]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::ContextFeatures]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::raster::curve::Curve]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::transform::Footprint]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => Box<graphene_std::vector::VectorModification>]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::vector::style::Fill]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::blending::BlendMode]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::raster::LuminanceCalculation]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::extract_xy::XY]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::raster::RedGreenBlue]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::raster::RedGreenBlueAlpha]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::animation::RealTimeMode]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::raster::NoiseType]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::raster::FractalType]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::raster::CellularDistanceFunction]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::raster::CellularReturnType]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::raster::DomainWarpType]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::raster::RelativeAbsolute]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::raster::SelectiveColorChoice]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::vector::misc::GridType]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::vector::misc::ArcType]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::vector::misc::MergeByDistanceAlgorithm]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::vector::misc::ExtrudeJoiningAlgorithm]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::vector::misc::PointSpacingType]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::vector::style::StrokeCap]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::vector::style::StrokeJoin]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::vector::style::StrokeAlign]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::vector::style::PaintOrder]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::vector::style::FillType]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::vector::style::GradientType]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::transform::ReferencePoint]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::vector::misc::CentroidType]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => path_bool_nodes::BooleanOperation]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => graphene_std::text::TextAlign]), async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => RenderIntermediate]), ]; // ============= // CONVERT NODES // ============= node_types.extend( [ convert_node!(from: f32, to: numbers), convert_node!(from: f64, to: numbers), convert_node!(from: i8, to: numbers), convert_node!(from: u8, to: numbers), convert_node!(from: u16, to: numbers), convert_node!(from: i16, to: numbers), convert_node!(from: i32, to: numbers), convert_node!(from: u32, to: numbers), convert_node!(from: i64, to: numbers), convert_node!(from: u64, to: numbers), convert_node!(from: i128, to: numbers), convert_node!(from: u128, to: numbers), convert_node!(from: isize, to: numbers), convert_node!(from: usize, to: numbers), convert_node!(from: numbers, to: DVec2), convert_node!(from: numbers, to: String), ] .into_iter() .flatten(), ); let mut map: HashMap<ProtoNodeIdentifier, HashMap<NodeIOTypes, NodeConstructor>> = HashMap::new(); for (id, entry) in graphene_std::registry::NODE_REGISTRY.lock().unwrap().iter() { for (constructor, types) in entry.iter() { map.entry(id.clone()).or_default().insert(types.clone(), *constructor); } } for (id, c, types) in node_types.into_iter() { // TODO: this is a hack to remove the newline from the node new_name // This occurs for the ChannelMixerNode presumably because of the long name. // This might be caused by the stringify! macro let mut new_name = id.name.replace('\n', " "); // Remove struct generics for all nodes except for the IntoNode and ConvertNode if !(new_name.contains("IntoNode") || new_name.contains("ConvertNode")) && let Some((path, _generics)) = new_name.split_once("<") { new_name = path.to_string(); } let nid = ProtoNodeIdentifier { name: Cow::Owned(new_name) }; map.entry(nid).or_default().insert(types.clone(), c); } map } pub static NODE_REGISTRY: Lazy<HashMap<ProtoNodeIdentifier, HashMap<NodeIOTypes, NodeConstructor>>> = Lazy::new(|| node_registry()); mod node_registry_macros { macro_rules! async_node { // TODO: we currently need to annotate the type here because the compiler would otherwise (correctly) // TODO: assign a Pin<Box<dyn Future<Output=T>>> type to the node, which is not what we want for now. // // This `params` variant of the macro wraps the normal `fn_params` variant and is used as a shorthand for writing `T` instead of `() => T` ($path:ty, input: $input:ty, params: [$($type:ty),*]) => { async_node!($path, input: $input, fn_params: [ $(() => $type),*]) }; ($path:ty, input: $input:ty, fn_params: [$($arg:ty => $type:ty),*]) => { ( ProtoNodeIdentifier::new(stringify!($path)), |mut args| { Box::pin(async move { args.reverse(); let node = <$path>::new($(graphene_std::any::downcast_node::<$arg, $type>(args.pop().expect("Not enough arguments provided to construct node"))),*); let any: DynAnyNode<$input, _, _> = graphene_std::any::DynAnyNode::new(node); Box::new(any) as TypeErasedBox }) }, { let node = <$path>::new($( graphene_std::any::PanicNode::<$arg, core::pin::Pin<Box<dyn core::future::Future<Output = $type> + Send>>>::new() ),*); let params = vec![$(fn_type_fut!($arg, $type)),*]; let mut node_io = NodeIO::<'_, $input>::to_async_node_io(&node, params); node_io.call_argument = concrete!(<$input as StaticType>::Static); node_io }, ) }; } macro_rules! into_node { (from: $from:ty, to: $to:ty) => { ( ProtoNodeIdentifier::new(concat!["graphene_core::ops::IntoNode<", stringify!($to), ">"]), |mut args| { Box::pin(async move { let node = graphene_std::ops::IntoNode::new( graphene_std::any::downcast_node::<Context, $from>(args.pop().unwrap()), graphene_std::any::FutureWrapperNode::new(graphene_std::value::ClonedNode::new(std::marker::PhantomData::<$to>)), ); let any: DynAnyNode<Context, $to, _> = graphene_std::any::DynAnyNode::new(node); Box::new(any) as TypeErasedBox }) }, { let node = graphene_std::ops::IntoNode::new( graphene_std::any::PanicNode::<Context, core::pin::Pin<Box<dyn core::future::Future<Output = $from> + Send>>>::new(), graphene_std::any::FutureWrapperNode::new(graphene_std::value::ClonedNode::new(std::marker::PhantomData::<$to>)), ); let params = vec![fn_type_fut!(Context, $from)]; let node_io = NodeIO::<'_, Context>::to_async_node_io(&node, params); node_io }, ) }; } macro_rules! convert_node { (from: $from:ty, to: numbers) => {{ let x: Vec<(ProtoNodeIdentifier, NodeConstructor, NodeIOTypes)> = vec![ convert_node!(from: $from, to: f32), convert_node!(from: $from, to: f64), convert_node!(from: $from, to: i8), convert_node!(from: $from, to: u8), convert_node!(from: $from, to: u16), convert_node!(from: $from, to: i16), convert_node!(from: $from, to: i32), convert_node!(from: $from, to: u32), convert_node!(from: $from, to: i64), convert_node!(from: $from, to: u64), convert_node!(from: $from, to: i128), convert_node!(from: $from, to: u128), convert_node!(from: $from, to: isize), convert_node!(from: $from, to: usize), ]; x }}; (from: numbers, to: $to:ty) => {{ let x: Vec<(ProtoNodeIdentifier, NodeConstructor, NodeIOTypes)> = vec![ convert_node!(from: f32, to: $to), convert_node!(from: f64, to: $to), convert_node!(from: i8, to: $to), convert_node!(from: u8, to: $to), convert_node!(from: u16, to: $to), convert_node!(from: i16, to: $to), convert_node!(from: i32, to: $to), convert_node!(from: u32, to: $to), convert_node!(from: i64, to: $to), convert_node!(from: u64, to: $to), convert_node!(from: i128, to: $to), convert_node!(from: u128, to: $to), convert_node!(from: isize, to: $to), convert_node!(from: usize, to: $to), ]; x }}; (from: $from:ty, to: $to:ty) => { convert_node!(from: $from, to: $to, converter: ()) }; (from: $from:ty, to: $to:ty, converter: $convert:ty) => { ( ProtoNodeIdentifier::new(concat!["graphene_core::ops::ConvertNode<", stringify!($to), ">"]), |mut args| { Box::pin(async move { let mut args = args.drain(..); let node = graphene_std::ops::ConvertNode::new( graphene_std::any::downcast_node::<Context, $from>(args.next().expect("Convert node did not get first argument")), graphene_std::any::downcast_node::<Context, $convert>(args.next().expect("Convert node did not get converter argument")), graphene_std::any::FutureWrapperNode::new(graphene_std::value::ClonedNode::new(std::marker::PhantomData::<$to>)) ); let any: DynAnyNode<Context, $to, _> = graphene_std::any::DynAnyNode::new(node); Box::new(any) as TypeErasedBox }) }, { let node = graphene_std::ops::ConvertNode::new( graphene_std::any::PanicNode::<Context, core::pin::Pin<Box<dyn core::future::Future<Output = $from> + Send>>>::new(), graphene_std::any::PanicNode::<Context, core::pin::Pin<Box<dyn core::future::Future<Output = $convert> + Send>>>::new(), graphene_std::any::FutureWrapperNode::new(graphene_std::value::ClonedNode::new(std::marker::PhantomData::<$to>)) ); let params = vec![fn_type_fut!(Context, $from), fn_type_fut!(Context, $convert)]; let node_io = NodeIO::<'_, Context>::to_async_node_io(&node, params); node_io }, ) }; } pub(crate) use async_node; pub(crate) use convert_node; pub(crate) use into_node; }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/interpreted-executor/benches/update_executor.rs
node-graph/interpreted-executor/benches/update_executor.rs
mod benchmark_util; use benchmark_util::{bench_for_each_demo, setup_network}; use criterion::{Criterion, criterion_group, criterion_main}; use graph_craft::proto::ProtoNetwork; use interpreted_executor::dynamic_executor::DynamicExecutor; fn update_executor(c: &mut Criterion) { let mut group = c.benchmark_group("Update Executor"); bench_for_each_demo(&mut group, |name, g| { g.bench_function(name, |b| { b.iter_batched( || { let (_, proto_network) = setup_network(name); let empty = ProtoNetwork::default(); let executor = futures::executor::block_on(DynamicExecutor::new(empty)).unwrap(); (executor, proto_network) }, |(mut executor, network)| futures::executor::block_on(executor.update(std::hint::black_box(network))), criterion::BatchSize::SmallInput, ) }); }); group.finish(); } criterion_group!(benches, update_executor); criterion_main!(benches);
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/interpreted-executor/benches/run_demo_art_criterion.rs
node-graph/interpreted-executor/benches/run_demo_art_criterion.rs
use criterion::measurement::Measurement; use criterion::{BenchmarkGroup, Criterion, criterion_group, criterion_main}; use graph_craft::graphene_compiler::Executor; use graph_craft::proto::ProtoNetwork; use graph_craft::util::{DEMO_ART, compile, load_from_name}; use graphene_std::transform::Footprint; use interpreted_executor::dynamic_executor::DynamicExecutor; fn update_executor<M: Measurement>(name: &str, c: &mut BenchmarkGroup<M>) { let network = load_from_name(name); let proto_network = compile(network); let empty = ProtoNetwork::default(); let executor = futures::executor::block_on(DynamicExecutor::new(empty)).unwrap(); c.bench_function(name, |b| { b.iter_batched( || (executor.clone(), proto_network.clone()), |(mut executor, network)| futures::executor::block_on(executor.update(std::hint::black_box(network))), criterion::BatchSize::SmallInput, ) }); } fn update_executor_demo(c: &mut Criterion) { let mut g = c.benchmark_group("Update Executor"); for name in DEMO_ART { update_executor(name, &mut g); } } fn run_once<M: Measurement>(name: &str, c: &mut BenchmarkGroup<M>) { let network = load_from_name(name); let proto_network = compile(network); let executor = futures::executor::block_on(DynamicExecutor::new(proto_network)).unwrap(); let footprint = Footprint::default(); c.bench_function(name, |b| b.iter(|| futures::executor::block_on((&executor).execute(footprint)))); } fn run_once_demo(c: &mut Criterion) { let mut g = c.benchmark_group("Run Once no render"); for name in DEMO_ART { run_once(name, &mut g); } } criterion_group!(benches, update_executor_demo, run_once_demo); criterion_main!(benches);
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/interpreted-executor/benches/run_once_iai.rs
node-graph/interpreted-executor/benches/run_once_iai.rs
mod benchmark_util; use benchmark_util::setup_network; use graphene_std::application_io; use iai_callgrind::{library_benchmark, library_benchmark_group, main}; use interpreted_executor::dynamic_executor::DynamicExecutor; use std::hint::black_box; fn setup_run_once(name: &str) -> DynamicExecutor { let (executor, _) = setup_network(name); executor } #[library_benchmark] #[benches::with_setup(args = ["isometric-fountain", "painted-dreams", "procedural-string-lights", "parametric-dunescape", "red-dress", "valley-of-spires"], setup = setup_run_once)] pub fn run_once(executor: DynamicExecutor) { let context = application_io::RenderConfig::default(); black_box(futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), black_box(context))).unwrap()); } library_benchmark_group!(name = run_once_group; benchmarks = run_once); main!(library_benchmark_groups = run_once_group);
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/interpreted-executor/benches/run_cached_iai.rs
node-graph/interpreted-executor/benches/run_cached_iai.rs
mod benchmark_util; use benchmark_util::setup_network; use graphene_std::application_io::RenderConfig; use iai_callgrind::{library_benchmark, library_benchmark_group, main}; use interpreted_executor::dynamic_executor::DynamicExecutor; use std::hint::black_box; fn setup_run_cached(name: &str) -> DynamicExecutor { let (executor, _) = setup_network(name); // Warm up the cache by running once let context = RenderConfig::default(); let _ = futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), context)); executor } #[library_benchmark] #[benches::with_setup(args = ["isometric-fountain", "painted-dreams", "parametric-dunescape", "red-dress", "valley-of-spires"], setup = setup_run_cached)] pub fn run_cached(executor: DynamicExecutor) { let context = RenderConfig::default(); black_box(futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), black_box(context))).unwrap()); } library_benchmark_group!(name = run_cached_group; benchmarks = run_cached); main!(library_benchmark_groups = run_cached_group);
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/interpreted-executor/benches/benchmark_util.rs
node-graph/interpreted-executor/benches/benchmark_util.rs
use criterion::BenchmarkGroup; use criterion::measurement::Measurement; use futures::executor::block_on; use graph_craft::proto::ProtoNetwork; use graph_craft::util::{DEMO_ART, compile, load_from_name}; use graphene_std::application_io::EditorApi; use interpreted_executor::dynamic_executor::DynamicExecutor; use interpreted_executor::util::wrap_network_in_scope; pub fn setup_network(name: &str) -> (DynamicExecutor, ProtoNetwork) { let mut network = load_from_name(name); let editor_api = std::sync::Arc::new(EditorApi::default()); println!("generating substitutions"); let substitutions = preprocessor::generate_node_substitutions(); println!("expanding network"); preprocessor::expand_network(&mut network, &substitutions); let network = wrap_network_in_scope(network, editor_api); let proto_network = compile(network); let executor = block_on(DynamicExecutor::new(proto_network.clone())).unwrap(); (executor, proto_network) } pub fn bench_for_each_demo<M: Measurement, F>(group: &mut BenchmarkGroup<M>, f: F) where F: Fn(&str, &mut BenchmarkGroup<M>), { for name in DEMO_ART { f(name, group); } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/interpreted-executor/benches/run_once.rs
node-graph/interpreted-executor/benches/run_once.rs
mod benchmark_util; use benchmark_util::{bench_for_each_demo, setup_network}; use criterion::{Criterion, criterion_group, criterion_main}; use graphene_std::application_io::RenderConfig; fn run_once(c: &mut Criterion) { let mut group = c.benchmark_group("Run Once"); let context = RenderConfig::default(); bench_for_each_demo(&mut group, |name, g| { g.bench_function(name, |b| { b.iter_batched( || setup_network(name), |(executor, _)| futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), std::hint::black_box(context))).unwrap(), criterion::BatchSize::SmallInput, ) }); }); group.finish(); } criterion_group!(benches, run_once); criterion_main!(benches);
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/interpreted-executor/benches/run_cached.rs
node-graph/interpreted-executor/benches/run_cached.rs
mod benchmark_util; use benchmark_util::{bench_for_each_demo, setup_network}; use criterion::{Criterion, criterion_group, criterion_main}; use graphene_std::application_io::RenderConfig; fn subsequent_evaluations(c: &mut Criterion) { let mut group = c.benchmark_group("Subsequent Evaluations"); let context = RenderConfig::default(); bench_for_each_demo(&mut group, |name, g| { let (executor, _) = setup_network(name); g.bench_function(name, |b| { b.iter(|| futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), std::hint::black_box(context))).unwrap()) }); }); group.finish(); } criterion_group!(benches, subsequent_evaluations); criterion_main!(benches);
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/interpreted-executor/benches/update_executor_iai.rs
node-graph/interpreted-executor/benches/update_executor_iai.rs
mod benchmark_util; use benchmark_util::setup_network; use graph_craft::proto::ProtoNetwork; use iai_callgrind::{library_benchmark, library_benchmark_group, main}; use interpreted_executor::dynamic_executor::DynamicExecutor; use std::hint::black_box; fn setup_update_executor(name: &str) -> (DynamicExecutor, ProtoNetwork) { let (_, proto_network) = setup_network(name); let empty = ProtoNetwork::default(); let executor = futures::executor::block_on(DynamicExecutor::new(empty)).unwrap(); (executor, proto_network) } #[library_benchmark] #[benches::with_setup(args = ["isometric-fountain", "painted-dreams", "procedural-string-lights", "parametric-dunescape", "red-dress", "valley-of-spires"], setup = setup_update_executor)] pub fn update_executor(setup: (DynamicExecutor, ProtoNetwork)) { let (mut executor, network) = setup; let _ = black_box(futures::executor::block_on(executor.update(black_box(network)))); } library_benchmark_group!(name = update_group; benchmarks = update_executor); main!(library_benchmark_groups = update_group);
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/graphic-types/src/lib.rs
node-graph/libraries/graphic-types/src/lib.rs
pub mod artboard; pub mod graphic; // Re-export all transitive dependencies so downstream crates only need to depend on graphic-types pub use core_types; pub use raster_types; pub use vector_types; // Re-export commonly used types at the crate root pub use artboard::Artboard; pub use graphic::{Graphic, IntoGraphicTable, Vector}; pub mod migrations { use core_types::{ AlphaBlending, table::{Table, TableRow}, }; use dyn_any::DynAny; use glam::DAffine2; use vector_types::vector::{PathStyle, PointDomain, RegionDomain, SegmentDomain, misc::HandleId}; use crate::{Graphic, Vector}; // TODO: Eventually remove this migration document upgrade code pub fn migrate_vector<'de, D: serde::Deserializer<'de>>(deserializer: D) -> Result<Table<Vector>, D::Error> { use serde::Deserialize; #[derive(Clone, Debug, PartialEq, DynAny, serde::Serialize, serde::Deserialize)] pub struct OldVectorData { pub transform: DAffine2, pub alpha_blending: AlphaBlending, pub style: PathStyle, pub colinear_manipulators: Vec<[HandleId; 2]>, pub point_domain: PointDomain, pub segment_domain: SegmentDomain, pub region_domain: RegionDomain, pub upstream_graphic_group: Option<Table<Graphic>>, } #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] pub struct OldTable<T> { #[serde(alias = "instances", alias = "instance")] element: Vec<T>, transform: Vec<DAffine2>, alpha_blending: Vec<AlphaBlending>, } #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] pub struct OlderTable<T> { id: Vec<u64>, #[serde(alias = "instances", alias = "instance")] element: Vec<T>, } #[derive(serde::Serialize, serde::Deserialize)] #[serde(untagged)] #[allow(clippy::large_enum_variant)] enum VectorFormat { Vector(Vector), OldVectorData(OldVectorData), OldVectorTable(OldTable<Vector>), OlderVectorTable(OlderTable<Vector>), VectorTable(Table<Vector>), } Ok(match VectorFormat::deserialize(deserializer)? { VectorFormat::Vector(vector) => Table::new_from_element(vector), VectorFormat::OldVectorData(old) => { let mut vector_table = Table::new_from_element(Vector { style: old.style, colinear_manipulators: old.colinear_manipulators, point_domain: old.point_domain, segment_domain: old.segment_domain, region_domain: old.region_domain, upstream_data: old.upstream_graphic_group, }); *vector_table.iter_mut().next().unwrap().transform = old.transform; *vector_table.iter_mut().next().unwrap().alpha_blending = old.alpha_blending; vector_table } VectorFormat::OlderVectorTable(older_table) => older_table.element.into_iter().map(|element| TableRow { element, ..Default::default() }).collect(), VectorFormat::OldVectorTable(old_table) => old_table .element .into_iter() .zip(old_table.transform.into_iter().zip(old_table.alpha_blending)) .map(|(element, (transform, alpha_blending))| TableRow { element, transform, alpha_blending, source_node_id: None, }) .collect(), VectorFormat::VectorTable(vector_table) => vector_table, }) } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/graphic-types/src/artboard.rs
node-graph/libraries/graphic-types/src/artboard.rs
use crate::graphic::Graphic; use core_types::Color; use core_types::blending::AlphaBlending; use core_types::bounds::{BoundingBox, RenderBoundingBox}; use core_types::math::quad::Quad; use core_types::render_complexity::RenderComplexity; use core_types::table::{Table, TableRow}; use core_types::transform::Transform; use core_types::uuid::NodeId; use dyn_any::DynAny; use glam::{DAffine2, DVec2, IVec2}; use std::hash::Hash; /// Some [`ArtboardData`] with some optional clipping bounds that can be exported. #[derive(Clone, Debug, Hash, PartialEq, DynAny, serde::Serialize, serde::Deserialize)] pub struct Artboard { pub content: Table<Graphic>, pub label: String, pub location: IVec2, pub dimensions: IVec2, pub background: Color, pub clip: bool, } impl Default for Artboard { fn default() -> Self { Self::new(IVec2::ZERO, IVec2::new(1920, 1080)) } } impl Artboard { pub fn new(location: IVec2, dimensions: IVec2) -> Self { Self { content: Table::new(), label: "Artboard".to_string(), location: location.min(location + dimensions), dimensions: dimensions.abs(), background: Color::WHITE, clip: false, } } } impl BoundingBox for Artboard { fn bounding_box(&self, transform: DAffine2, include_stroke: bool) -> RenderBoundingBox { let artboard_bounds = || (transform * Quad::from_box([self.location.as_dvec2(), self.location.as_dvec2() + self.dimensions.as_dvec2()])).bounding_box(); if self.clip { return RenderBoundingBox::Rectangle(artboard_bounds()); } match self.content.bounding_box(transform, include_stroke) { RenderBoundingBox::Rectangle(content_bounds) => RenderBoundingBox::Rectangle(Quad::combine_bounds(content_bounds, artboard_bounds())), other => other, } } } impl RenderComplexity for Artboard { fn render_complexity(&self) -> usize { self.content.render_complexity() } } // Implementations for Artboard impl Transform for Artboard { fn transform(&self) -> DAffine2 { DAffine2::from_translation(self.location.as_dvec2()) } fn local_pivot(&self, pivot: DVec2) -> DVec2 { self.location.as_dvec2() + self.dimensions.as_dvec2() * pivot } } // TODO: Eventually remove this migration document upgrade code pub fn migrate_artboard<'de, D: serde::Deserializer<'de>>(deserializer: D) -> Result<Table<Artboard>, D::Error> { use serde::Deserialize; #[derive(Clone, Default, Debug, Hash, PartialEq, DynAny, serde::Serialize, serde::Deserialize)] pub struct ArtboardGroup { pub artboards: Vec<(Artboard, Option<NodeId>)>, } #[derive(serde::Serialize, serde::Deserialize)] #[serde(untagged)] enum ArtboardFormat { ArtboardGroup(ArtboardGroup), OldArtboardTable(OldTable<Artboard>), ArtboardTable(Table<Artboard>), } #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] pub struct OldTable<T> { #[serde(alias = "instances", alias = "instance")] element: Vec<T>, transform: Vec<DAffine2>, alpha_blending: Vec<AlphaBlending>, } Ok(match ArtboardFormat::deserialize(deserializer)? { ArtboardFormat::ArtboardGroup(artboard_group) => { let mut table = Table::new(); for (artboard, source_node_id) in artboard_group.artboards { table.push(TableRow { element: artboard, transform: DAffine2::IDENTITY, alpha_blending: AlphaBlending::default(), source_node_id, }); } table } ArtboardFormat::OldArtboardTable(old_table) => old_table .element .into_iter() .zip(old_table.transform.into_iter().zip(old_table.alpha_blending)) .map(|(element, (transform, alpha_blending))| TableRow { element, transform, alpha_blending, source_node_id: None, }) .collect(), ArtboardFormat::ArtboardTable(artboard_table) => artboard_table, }) } // Node definitions moved to graphic-nodes crate
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/graphic-types/src/graphic.rs
node-graph/libraries/graphic-types/src/graphic.rs
use core_types::Color; use core_types::blending::AlphaBlending; use core_types::bounds::{BoundingBox, RenderBoundingBox}; use core_types::ops::TableConvert; use core_types::render_complexity::RenderComplexity; use core_types::table::{Table, TableRow}; use core_types::uuid::NodeId; use dyn_any::DynAny; use glam::DAffine2; use raster_types::{CPU, GPU, Raster}; use std::hash::Hash; use vector_types::GradientStops; // use vector_types::Vector; pub type Vector = vector_types::Vector<Option<Table<Graphic>>>; /// The possible forms of graphical content that can be rendered by the Render node into either an image or SVG syntax. #[derive(Clone, Debug, Hash, PartialEq, DynAny, serde::Serialize, serde::Deserialize)] pub enum Graphic { Graphic(Table<Graphic>), Vector(Table<Vector>), RasterCPU(Table<Raster<CPU>>), RasterGPU(Table<Raster<GPU>>), Color(Table<Color>), Gradient(Table<GradientStops>), } impl Default for Graphic { fn default() -> Self { Self::Graphic(Table::new()) } } // Graphic impl From<Table<Graphic>> for Graphic { fn from(graphic: Table<Graphic>) -> Self { Graphic::Graphic(graphic) } } // Vector impl From<Vector> for Graphic { fn from(vector: Vector) -> Self { Graphic::Vector(Table::new_from_element(vector)) } } impl From<Table<Vector>> for Graphic { fn from(vector: Table<Vector>) -> Self { Graphic::Vector(vector) } } // Note: Table<Vector> -> Table<Graphic> conversion handled by blanket impl in gcore // Raster<CPU> impl From<Raster<CPU>> for Graphic { fn from(raster: Raster<CPU>) -> Self { Graphic::RasterCPU(Table::new_from_element(raster)) } } impl From<Table<Raster<CPU>>> for Graphic { fn from(raster: Table<Raster<CPU>>) -> Self { Graphic::RasterCPU(raster) } } // Note: Table conversions handled by blanket impl in gcore // Raster<GPU> impl From<Raster<GPU>> for Graphic { fn from(raster: Raster<GPU>) -> Self { Graphic::RasterGPU(Table::new_from_element(raster)) } } impl From<Table<Raster<GPU>>> for Graphic { fn from(raster: Table<Raster<GPU>>) -> Self { Graphic::RasterGPU(raster) } } // Note: Table conversions handled by blanket impl in gcore // Color impl From<Color> for Graphic { fn from(color: Color) -> Self { Graphic::Color(Table::new_from_element(color)) } } impl From<Table<Color>> for Graphic { fn from(color: Table<Color>) -> Self { Graphic::Color(color) } } // Note: Table conversions handled by blanket impl in gcore // Option<Color> impl From<Option<Color>> for Graphic { fn from(color: Option<Color>) -> Self { if let Some(color) = color { Graphic::Color(Table::new_from_element(color)) } else { Graphic::default() } } } // Note: Table conversions handled by blanket impl in gcore // Note: Table<Color> -> Option<Color> is in gcore (Color is defined there) // GradientStops impl From<GradientStops> for Graphic { fn from(gradient: GradientStops) -> Self { Graphic::Gradient(Table::new_from_element(gradient)) } } impl From<Table<GradientStops>> for Graphic { fn from(gradient: Table<GradientStops>) -> Self { Graphic::Gradient(gradient) } } // Local trait to convert types to Table<Graphic> (avoids orphan rule issues) pub trait IntoGraphicTable { fn into_graphic_table(self) -> Table<Graphic>; /// Deeply flattens any vector content within a graphic table, discarding non-vector content, and returning a table of only vector elements. fn into_flattened_vector_table(self) -> Table<Vector> where Self: std::marker::Sized, { let content = self.into_graphic_table(); // TODO: Avoid mutable reference, instead return a new Table<Graphic>? fn flatten_table(output_vector_table: &mut Table<Vector>, current_graphic_table: Table<Graphic>) { for current_graphic_row in current_graphic_table.iter() { let current_graphic = current_graphic_row.element.clone(); let source_node_id = *current_graphic_row.source_node_id; match current_graphic { // If we're allowed to recurse, flatten any tables we encounter Graphic::Graphic(mut current_graphic_table) => { // Apply the parent graphic's transform to all child elements for graphic in current_graphic_table.iter_mut() { *graphic.transform = *current_graphic_row.transform * *graphic.transform; } flatten_table(output_vector_table, current_graphic_table); } // Push any leaf Vector elements we encounter Graphic::Vector(vector_table) => { for current_vector_row in vector_table.iter() { output_vector_table.push(TableRow { element: current_vector_row.element.clone(), transform: *current_graphic_row.transform * *current_vector_row.transform, alpha_blending: AlphaBlending { blend_mode: current_vector_row.alpha_blending.blend_mode, opacity: current_graphic_row.alpha_blending.opacity * current_vector_row.alpha_blending.opacity, fill: current_vector_row.alpha_blending.fill, clip: current_vector_row.alpha_blending.clip, }, source_node_id, }); } } _ => {} } } } let mut output = Table::new(); flatten_table(&mut output, content); output } } impl IntoGraphicTable for Table<Graphic> { fn into_graphic_table(self) -> Table<Graphic> { self } } impl IntoGraphicTable for Table<Vector> { fn into_graphic_table(self) -> Table<Graphic> { Table::new_from_element(Graphic::Vector(self)) } } impl IntoGraphicTable for Table<Raster<CPU>> { fn into_graphic_table(self) -> Table<Graphic> { Table::new_from_element(Graphic::RasterCPU(self)) } } impl IntoGraphicTable for Table<Raster<GPU>> { fn into_graphic_table(self) -> Table<Graphic> { Table::new_from_element(Graphic::RasterGPU(self)) } } impl IntoGraphicTable for Table<Color> { fn into_graphic_table(self) -> Table<Graphic> { Table::new_from_element(Graphic::Color(self)) } } impl IntoGraphicTable for Table<GradientStops> { fn into_graphic_table(self) -> Table<Graphic> { Table::new_from_element(Graphic::Gradient(self)) } } impl IntoGraphicTable for DAffine2 { fn into_graphic_table(self) -> Table<Graphic> { Table::new_from_element(Graphic::default()) } } // DAffine2 impl From<DAffine2> for Graphic { fn from(_: DAffine2) -> Self { Graphic::default() } } // Note: Table conversions handled by blanket impl in gcore impl Graphic { pub fn as_graphic(&self) -> Option<&Table<Graphic>> { match self { Graphic::Graphic(graphic) => Some(graphic), _ => None, } } pub fn as_graphic_mut(&mut self) -> Option<&mut Table<Graphic>> { match self { Graphic::Graphic(graphic) => Some(graphic), _ => None, } } pub fn as_vector(&self) -> Option<&Table<Vector>> { match self { Graphic::Vector(vector) => Some(vector), _ => None, } } pub fn as_vector_mut(&mut self) -> Option<&mut Table<Vector>> { match self { Graphic::Vector(vector) => Some(vector), _ => None, } } pub fn as_raster(&self) -> Option<&Table<Raster<CPU>>> { match self { Graphic::RasterCPU(raster) => Some(raster), _ => None, } } pub fn as_raster_mut(&mut self) -> Option<&mut Table<Raster<CPU>>> { match self { Graphic::RasterCPU(raster) => Some(raster), _ => None, } } pub fn had_clip_enabled(&self) -> bool { match self { Graphic::Vector(vector) => vector.iter().all(|row| row.alpha_blending.clip), Graphic::Graphic(graphic) => graphic.iter().all(|row| row.alpha_blending.clip), Graphic::RasterCPU(raster) => raster.iter().all(|row| row.alpha_blending.clip), Graphic::RasterGPU(raster) => raster.iter().all(|row| row.alpha_blending.clip), Graphic::Color(color) => color.iter().all(|row| row.alpha_blending.clip), Graphic::Gradient(gradient) => gradient.iter().all(|row| row.alpha_blending.clip), } } pub fn can_reduce_to_clip_path(&self) -> bool { match self { Graphic::Vector(vector) => vector.iter().all(|row| { let style = &row.element.style; let alpha_blending = &row.alpha_blending; (alpha_blending.opacity > 1. - f32::EPSILON) && style.fill().is_opaque() && style.stroke().is_none_or(|stroke| !stroke.has_renderable_stroke()) }), _ => false, } } } impl BoundingBox for Graphic { fn bounding_box(&self, transform: DAffine2, include_stroke: bool) -> RenderBoundingBox { match self { Graphic::Vector(vector) => vector.bounding_box(transform, include_stroke), Graphic::RasterCPU(raster) => raster.bounding_box(transform, include_stroke), Graphic::RasterGPU(raster) => raster.bounding_box(transform, include_stroke), Graphic::Graphic(graphic) => graphic.bounding_box(transform, include_stroke), Graphic::Color(color) => color.bounding_box(transform, include_stroke), Graphic::Gradient(gradient) => gradient.bounding_box(transform, include_stroke), } } } impl TableConvert<Graphic> for Vector { fn convert_row(self) -> Graphic { Graphic::Vector(Table::new_from_element(self)) } } impl TableConvert<Graphic> for Raster<CPU> { fn convert_row(self) -> Graphic { Graphic::RasterCPU(Table::new_from_element(self)) } } impl TableConvert<Graphic> for Raster<GPU> { fn convert_row(self) -> Graphic { Graphic::RasterGPU(Table::new_from_element(self)) } } impl RenderComplexity for Graphic { fn render_complexity(&self) -> usize { match self { Self::Graphic(table) => table.render_complexity(), Self::Vector(table) => table.render_complexity(), Self::RasterCPU(table) => table.render_complexity(), Self::RasterGPU(table) => table.render_complexity(), Self::Color(table) => table.render_complexity(), Self::Gradient(table) => table.render_complexity(), } } } // Node definitions moved to graphic-nodes crate pub trait AtIndex { type Output; fn at_index(&self, index: usize) -> Option<Self::Output>; fn at_index_from_end(&self, index: usize) -> Option<Self::Output>; } impl<T: Clone> AtIndex for Vec<T> { type Output = T; fn at_index(&self, index: usize) -> Option<Self::Output> { self.get(index).cloned() } fn at_index_from_end(&self, index: usize) -> Option<Self::Output> { if index == 0 || index > self.len() { None } else { self.get(self.len() - index).cloned() } } } impl<T: Clone> AtIndex for Table<T> { type Output = Table<T>; fn at_index(&self, index: usize) -> Option<Self::Output> { let mut result_table = Self::default(); if let Some(row) = self.iter().nth(index) { result_table.push(row.into_cloned()); Some(result_table) } else { None } } fn at_index_from_end(&self, index: usize) -> Option<Self::Output> { let mut result_table = Self::default(); if index == 0 || index > self.len() { None } else if let Some(row) = self.iter().nth(self.len() - index) { result_table.push(row.into_cloned()); Some(result_table) } else { None } } } // TODO: Eventually remove this migration document upgrade code pub fn migrate_graphic<'de, D: serde::Deserializer<'de>>(deserializer: D) -> Result<Table<Graphic>, D::Error> { use serde::Deserialize; #[derive(Clone, Debug, PartialEq, DynAny, Default, serde::Serialize, serde::Deserialize)] pub struct OldGraphicGroup { elements: Vec<(Graphic, Option<NodeId>)>, transform: DAffine2, alpha_blending: AlphaBlending, } #[derive(Clone, Debug, PartialEq, DynAny, Default, serde::Serialize, serde::Deserialize)] pub struct GraphicGroup { elements: Vec<(Graphic, Option<NodeId>)>, } #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] pub struct OlderTable<T> { id: Vec<u64>, #[serde(alias = "instances", alias = "instance")] element: Vec<T>, } #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] pub struct OldTable<T> { id: Vec<u64>, #[serde(alias = "instances", alias = "instance")] element: Vec<T>, transform: Vec<DAffine2>, alpha_blending: Vec<AlphaBlending>, } #[derive(serde::Serialize, serde::Deserialize)] #[serde(untagged)] enum GraphicFormat { OldGraphicGroup(OldGraphicGroup), OlderTableOldGraphicGroup(OlderTable<OldGraphicGroup>), OldTableOldGraphicGroup(OldTable<OldGraphicGroup>), OldTableGraphicGroup(OldTable<GraphicGroup>), Table(serde_json::Value), } Ok(match GraphicFormat::deserialize(deserializer)? { GraphicFormat::OldGraphicGroup(old) => { let mut graphic_table = Table::new(); for (graphic, source_node_id) in old.elements { graphic_table.push(TableRow { element: graphic, transform: old.transform, alpha_blending: old.alpha_blending, source_node_id, }); } graphic_table } GraphicFormat::OlderTableOldGraphicGroup(old) => old .element .into_iter() .flat_map(|element| { element.elements.into_iter().map(move |(graphic, source_node_id)| TableRow { element: graphic, transform: element.transform, alpha_blending: element.alpha_blending, source_node_id, }) }) .collect(), GraphicFormat::OldTableOldGraphicGroup(old) => old .element .into_iter() .flat_map(|element| { element.elements.into_iter().map(move |(graphic, source_node_id)| TableRow { element: graphic, transform: element.transform, alpha_blending: element.alpha_blending, source_node_id, }) }) .collect(), GraphicFormat::OldTableGraphicGroup(old) => old .element .into_iter() .flat_map(|element| { element.elements.into_iter().map(move |(graphic, source_node_id)| TableRow { element: graphic, transform: Default::default(), alpha_blending: Default::default(), source_node_id, }) }) .collect(), GraphicFormat::Table(value) => { // Try to deserialize as either table format if let Ok(old_table) = serde_json::from_value::<Table<GraphicGroup>>(value.clone()) { let mut graphic_table = Table::new(); for row in old_table.iter() { for (graphic, source_node_id) in &row.element.elements { graphic_table.push(TableRow { element: graphic.clone(), transform: *row.transform, alpha_blending: *row.alpha_blending, source_node_id: *source_node_id, }); } } graphic_table } else if let Ok(new_table) = serde_json::from_value::<Table<Graphic>>(value) { new_table } else { return Err(serde::de::Error::custom("Failed to deserialize Table<Graphic>")); } } }) }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/rendering/src/to_peniko.rs
node-graph/libraries/rendering/src/to_peniko.rs
use core_types::BlendMode; use vello::peniko; pub trait BlendModeExt { fn to_peniko(&self) -> peniko::Mix; } impl BlendModeExt for BlendMode { fn to_peniko(&self) -> peniko::Mix { match self { // Normal group BlendMode::Normal => peniko::Mix::Normal, // Darken group BlendMode::Darken => peniko::Mix::Darken, BlendMode::Multiply => peniko::Mix::Multiply, BlendMode::ColorBurn => peniko::Mix::ColorBurn, // Lighten group BlendMode::Lighten => peniko::Mix::Lighten, BlendMode::Screen => peniko::Mix::Screen, BlendMode::ColorDodge => peniko::Mix::ColorDodge, // Contrast group BlendMode::Overlay => peniko::Mix::Overlay, BlendMode::SoftLight => peniko::Mix::SoftLight, BlendMode::HardLight => peniko::Mix::HardLight, // Inversion group BlendMode::Difference => peniko::Mix::Difference, BlendMode::Exclusion => peniko::Mix::Exclusion, // Component group BlendMode::Hue => peniko::Mix::Hue, BlendMode::Saturation => peniko::Mix::Saturation, BlendMode::Color => peniko::Mix::Color, BlendMode::Luminosity => peniko::Mix::Luminosity, _ => todo!(), } } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/rendering/src/lib.rs
node-graph/libraries/rendering/src/lib.rs
pub mod convert_usvg_path; pub mod render_ext; mod renderer; pub mod to_peniko; pub use renderer::*;
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/rendering/src/renderer.rs
node-graph/libraries/rendering/src/renderer.rs
use crate::render_ext::RenderExt; use crate::to_peniko::BlendModeExt; use core_types::blending::BlendMode; use core_types::bounds::BoundingBox; use core_types::bounds::RenderBoundingBox; use core_types::color::Color; use core_types::math::quad::Quad; use core_types::render_complexity::RenderComplexity; use core_types::table::{Table, TableRow}; use core_types::transform::{Footprint, Transform}; use core_types::uuid::{NodeId, generate_uuid}; use dyn_any::DynAny; use glam::{DAffine2, DVec2}; use graphic_types::Vector; use graphic_types::raster_types::BitmapMut; use graphic_types::raster_types::Image; use graphic_types::raster_types::{CPU, GPU, Raster}; use graphic_types::vector_types::gradient::GradientStops; use graphic_types::vector_types::gradient::GradientType; use graphic_types::vector_types::subpath::Subpath; use graphic_types::vector_types::vector::click_target::{ClickTarget, FreePoint}; use graphic_types::vector_types::vector::style::{Fill, PaintOrder, RenderMode, Stroke, StrokeAlign}; use graphic_types::{Artboard, Graphic}; use kurbo::Affine; use kurbo::Shape; use num_traits::Zero; use std::collections::{HashMap, HashSet}; use std::fmt::Write; use std::hash::{Hash, Hasher}; use std::ops::Deref; use std::sync::{Arc, LazyLock}; use vello::*; #[derive(Clone, Copy, Debug, PartialEq, serde::Serialize, serde::Deserialize)] enum MaskType { Clip, Mask, } impl MaskType { fn to_attribute(self) -> String { match self { Self::Mask => "mask".to_string(), Self::Clip => "clip-path".to_string(), } } fn write_to_defs(self, svg_defs: &mut String, uuid: u64, svg_string: String) { let id = format!("mask-{uuid}"); match self { Self::Clip => write!(svg_defs, r##"<clipPath id="{id}">{svg_string}</clipPath>"##).unwrap(), Self::Mask => write!(svg_defs, r##"<mask id="{id}" mask-type="alpha">{svg_string}</mask>"##).unwrap(), } } } /// Mutable state used whilst rendering to an SVG pub struct SvgRender { pub svg: Vec<SvgSegment>, pub svg_defs: String, pub transform: DAffine2, pub image_data: HashMap<Image<Color>, u64>, indent: usize, } impl SvgRender { pub fn new() -> Self { Self { svg: Vec::default(), svg_defs: String::new(), transform: DAffine2::IDENTITY, image_data: HashMap::new(), indent: 0, } } pub fn indent(&mut self) { self.svg.push("\n".into()); self.svg.push("\t".repeat(self.indent).into()); } /// Add an outer `<svg>...</svg>` tag with a `viewBox` and the `<defs />` pub fn format_svg(&mut self, bounds_min: DVec2, bounds_max: DVec2) { let (x, y) = bounds_min.into(); let (size_x, size_y) = (bounds_max - bounds_min).into(); let defs = &self.svg_defs; let svg_header = format!(r#"<svg xmlns="http://www.w3.org/2000/svg" viewBox="{x} {y} {size_x} {size_y}"><defs>{defs}</defs>"#,); self.svg.insert(0, svg_header.into()); self.svg.push("</svg>".into()); } /// Wraps the SVG with `<svg><g transform="...">...</g></svg>`, which allows for rotation pub fn wrap_with_transform(&mut self, transform: DAffine2, size: Option<DVec2>) { let defs = &self.svg_defs; let view_box = size .map(|size| format!("viewBox=\"0 0 {} {}\" width=\"{}\" height=\"{}\"", size.x, size.y, size.x, size.y)) .unwrap_or_default(); let matrix = format_transform_matrix(transform); let transform = if matrix.is_empty() { String::new() } else { format!(r#" transform="{matrix}""#) }; let svg_header = format!(r#"<svg xmlns="http://www.w3.org/2000/svg" {view_box}><defs>{defs}</defs><g{transform}>"#); self.svg.insert(0, svg_header.into()); self.svg.push("</g></svg>".into()); } pub fn leaf_tag(&mut self, name: impl Into<SvgSegment>, attributes: impl FnOnce(&mut SvgRenderAttrs)) { self.indent(); self.svg.push("<".into()); self.svg.push(name.into()); attributes(&mut SvgRenderAttrs(self)); self.svg.push("/>".into()); } pub fn leaf_node(&mut self, content: impl Into<SvgSegment>) { self.indent(); self.svg.push(content.into()); } pub fn parent_tag(&mut self, name: impl Into<SvgSegment>, attributes: impl FnOnce(&mut SvgRenderAttrs), inner: impl FnOnce(&mut Self)) { let name = name.into(); self.indent(); self.svg.push("<".into()); self.svg.push(name.clone()); // Wraps `self` in a newtype (1-tuple) which is then mutated by the `attributes` closure attributes(&mut SvgRenderAttrs(self)); self.svg.push(">".into()); let length = self.svg.len(); self.indent += 1; inner(self); self.indent -= 1; if self.svg.len() != length { self.indent(); self.svg.push("</".into()); self.svg.push(name); self.svg.push(">".into()); } else { self.svg.pop(); self.svg.push("/>".into()); } } } impl Default for SvgRender { fn default() -> Self { Self::new() } } #[derive(Clone, Debug, Default)] pub struct RenderContext { pub resource_overrides: Vec<(peniko::ImageBrush, wgpu::Texture)>, } #[derive(Default, Clone, Copy, Hash)] pub enum RenderOutputType { #[default] Svg, Vello, } /// Static state used whilst rendering #[derive(Default, Clone)] pub struct RenderParams { pub render_mode: RenderMode, pub footprint: Footprint, /// Ratio of physical pixels to logical pixels. `scale := physical_pixels / logical_pixels` /// Ignored when rendering to SVG. pub scale: f64, pub render_output_type: RenderOutputType, pub thumbnail: bool, /// Don't render the rectangle for an artboard to allow exporting with a transparent background. pub hide_artboards: bool, /// Are we exporting pub for_export: bool, /// Are we generating a mask in this render pass? Used to see if fill should be multiplied with alpha. pub for_mask: bool, /// Are we generating a mask for alignment? Used to prevent unnecessary transforms in masks pub alignment_parent_transform: Option<DAffine2>, pub aligned_strokes: bool, pub override_paint_order: bool, } impl Hash for RenderParams { fn hash<H: Hasher>(&self, state: &mut H) { self.render_mode.hash(state); self.footprint.hash(state); self.render_output_type.hash(state); self.thumbnail.hash(state); self.hide_artboards.hash(state); self.for_export.hash(state); self.for_mask.hash(state); if let Some(x) = self.alignment_parent_transform { x.to_cols_array().iter().for_each(|x| x.to_bits().hash(state)) } self.aligned_strokes.hash(state); self.override_paint_order.hash(state); } } impl RenderParams { pub fn for_clipper(&self) -> Self { Self { for_mask: true, ..*self } } pub fn for_alignment(&self, transform: DAffine2) -> Self { let alignment_parent_transform = Some(transform); Self { alignment_parent_transform, ..*self } } pub fn to_canvas(&self) -> bool { !self.for_export && !self.thumbnail && !self.for_mask } } pub fn format_transform_matrix(transform: DAffine2) -> String { if transform == DAffine2::IDENTITY { return String::new(); } transform.to_cols_array().iter().enumerate().fold("matrix(".to_string(), |val, (i, num)| { let num = if num.abs() < 1_000_000_000. { (num * 1_000_000_000.).round() / 1_000_000_000. } else { *num }; let num = if num.is_zero() { "0".to_string() } else { num.to_string() }; let comma = if i == 5 { "" } else { "," }; val + &(num + comma) }) + ")" } fn max_scale(transform: DAffine2) -> f64 { let sx = transform.x_axis.length_squared(); let sy = transform.y_axis.length_squared(); (sx + sy).sqrt() } pub fn to_transform(transform: DAffine2) -> usvg::Transform { let cols = transform.to_cols_array(); usvg::Transform::from_row(cols[0] as f32, cols[1] as f32, cols[2] as f32, cols[3] as f32, cols[4] as f32, cols[5] as f32) } // TODO: Click targets can be removed from the render output, since the vector data is available in the vector modify data from Monitor nodes. // This will require that the transform for child layers into that layer space be calculated, or it could be returned from the RenderOutput instead of click targets. #[derive(Debug, Default, Clone, PartialEq, DynAny, serde::Serialize, serde::Deserialize)] pub struct RenderMetadata { pub upstream_footprints: HashMap<NodeId, Footprint>, pub local_transforms: HashMap<NodeId, DAffine2>, pub first_element_source_id: HashMap<NodeId, Option<NodeId>>, pub click_targets: HashMap<NodeId, Vec<ClickTarget>>, pub clip_targets: HashSet<NodeId>, } impl RenderMetadata { pub fn apply_transform(&mut self, transform: DAffine2) { for value in self.upstream_footprints.values_mut() { value.transform = transform * value.transform; } } } // TODO: Rename to "Graphical" pub trait Render: BoundingBox + RenderComplexity { fn render_svg(&self, render: &mut SvgRender, render_params: &RenderParams); fn render_to_vello(&self, scene: &mut Scene, transform: DAffine2, context: &mut RenderContext, _render_params: &RenderParams); /// The upstream click targets for each layer are collected during the render so that they do not have to be calculated for each click detection. fn add_upstream_click_targets(&self, _click_targets: &mut Vec<ClickTarget>) {} // TODO: Store all click targets in a vec which contains the AABB, click target, and path // fn add_click_targets(&self, click_targets: &mut Vec<([DVec2; 2], ClickTarget, Vec<NodeId>)>, current_path: Option<NodeId>) {} /// Recursively iterate over data in the render (including nested layer stacks upstream of a vector node, in the case of a boolean operation) to collect the footprints, click targets, and vector modify. fn collect_metadata(&self, _metadata: &mut RenderMetadata, _footprint: Footprint, _element_id: Option<NodeId>) {} fn contains_artboard(&self) -> bool { false } fn new_ids_from_hash(&mut self, _reference: Option<NodeId>) {} } impl Render for Graphic { fn render_svg(&self, render: &mut SvgRender, render_params: &RenderParams) { match self { Graphic::Graphic(table) => table.render_svg(render, render_params), Graphic::Vector(table) => table.render_svg(render, render_params), Graphic::RasterCPU(table) => table.render_svg(render, render_params), Graphic::RasterGPU(_) => (), Graphic::Color(table) => table.render_svg(render, render_params), Graphic::Gradient(table) => table.render_svg(render, render_params), } } fn render_to_vello(&self, scene: &mut Scene, transform: DAffine2, context: &mut RenderContext, render_params: &RenderParams) { match self { Graphic::Graphic(table) => table.render_to_vello(scene, transform, context, render_params), Graphic::Vector(table) => table.render_to_vello(scene, transform, context, render_params), Graphic::RasterCPU(table) => table.render_to_vello(scene, transform, context, render_params), Graphic::RasterGPU(table) => table.render_to_vello(scene, transform, context, render_params), Graphic::Color(table) => table.render_to_vello(scene, transform, context, render_params), Graphic::Gradient(table) => table.render_to_vello(scene, transform, context, render_params), } } fn collect_metadata(&self, metadata: &mut RenderMetadata, footprint: Footprint, element_id: Option<NodeId>) { if let Some(element_id) = element_id { match self { Graphic::Graphic(_) => { metadata.upstream_footprints.insert(element_id, footprint); } Graphic::Vector(table) => { metadata.upstream_footprints.insert(element_id, footprint); // TODO: Find a way to handle more than the first row if let Some(row) = table.iter().next() { metadata.first_element_source_id.insert(element_id, *row.source_node_id); metadata.local_transforms.insert(element_id, *row.transform); } } Graphic::RasterCPU(table) => { metadata.upstream_footprints.insert(element_id, footprint); // TODO: Find a way to handle more than the first row if let Some(row) = table.iter().next() { metadata.local_transforms.insert(element_id, *row.transform); } } Graphic::RasterGPU(table) => { metadata.upstream_footprints.insert(element_id, footprint); // TODO: Find a way to handle more than the first row if let Some(row) = table.iter().next() { metadata.local_transforms.insert(element_id, *row.transform); } } Graphic::Color(table) => { metadata.upstream_footprints.insert(element_id, footprint); // TODO: Find a way to handle more than the first row if let Some(row) = table.iter().next() { metadata.local_transforms.insert(element_id, *row.transform); } } Graphic::Gradient(table) => { metadata.upstream_footprints.insert(element_id, footprint); // TODO: Find a way to handle more than the first row if let Some(row) = table.iter().next() { metadata.local_transforms.insert(element_id, *row.transform); } } } } match self { Graphic::Graphic(table) => table.collect_metadata(metadata, footprint, element_id), Graphic::Vector(table) => table.collect_metadata(metadata, footprint, element_id), Graphic::RasterCPU(table) => table.collect_metadata(metadata, footprint, element_id), Graphic::RasterGPU(table) => table.collect_metadata(metadata, footprint, element_id), Graphic::Color(table) => table.collect_metadata(metadata, footprint, element_id), Graphic::Gradient(table) => table.collect_metadata(metadata, footprint, element_id), } } fn add_upstream_click_targets(&self, click_targets: &mut Vec<ClickTarget>) { match self { Graphic::Graphic(table) => table.add_upstream_click_targets(click_targets), Graphic::Vector(table) => table.add_upstream_click_targets(click_targets), Graphic::RasterCPU(table) => table.add_upstream_click_targets(click_targets), Graphic::RasterGPU(table) => table.add_upstream_click_targets(click_targets), Graphic::Color(table) => table.add_upstream_click_targets(click_targets), Graphic::Gradient(table) => table.add_upstream_click_targets(click_targets), } } fn contains_artboard(&self) -> bool { match self { Graphic::Graphic(table) => table.contains_artboard(), Graphic::Vector(table) => table.contains_artboard(), Graphic::RasterCPU(table) => table.contains_artboard(), Graphic::RasterGPU(table) => table.contains_artboard(), Graphic::Color(table) => table.contains_artboard(), Graphic::Gradient(table) => table.contains_artboard(), } } fn new_ids_from_hash(&mut self, reference: Option<NodeId>) { match self { Graphic::Graphic(table) => table.new_ids_from_hash(reference), Graphic::Vector(table) => table.new_ids_from_hash(reference), Graphic::RasterCPU(_) => (), Graphic::RasterGPU(_) => (), Graphic::Color(_) => (), Graphic::Gradient(_) => (), } } } impl Render for Artboard { fn render_svg(&self, render: &mut SvgRender, render_params: &RenderParams) { // Rectangle for the artboard if !render_params.hide_artboards { // Background render.leaf_tag("rect", |attributes| { attributes.push("fill", format!("#{}", self.background.to_rgb_hex_srgb_from_gamma())); if self.background.a() < 1. { attributes.push("fill-opacity", ((self.background.a() * 1000.).round() / 1000.).to_string()); } attributes.push("x", self.location.x.min(self.location.x + self.dimensions.x).to_string()); attributes.push("y", self.location.y.min(self.location.y + self.dimensions.y).to_string()); attributes.push("width", self.dimensions.x.abs().to_string()); attributes.push("height", self.dimensions.y.abs().to_string()); }); } // Artwork render.parent_tag( // SVG group tag "g", // Group tag attributes |attributes| { let matrix = format_transform_matrix(self.transform()); if !matrix.is_empty() { attributes.push("transform", matrix); } if self.clip { let id = format!("artboard-{}", generate_uuid()); let selector = format!("url(#{id})"); write!( &mut attributes.0.svg_defs, r##"<clipPath id="{id}"><rect x="0" y="0" width="{}" height="{}"/></clipPath>"##, self.dimensions.x, self.dimensions.y, ) .unwrap(); attributes.push("clip-path", selector); } }, // Artwork content |render| { self.content.render_svg(render, render_params); }, ); } fn render_to_vello(&self, scene: &mut Scene, transform: DAffine2, context: &mut RenderContext, render_params: &RenderParams) { use vello::peniko; // Render background let color = peniko::Color::new([self.background.r(), self.background.g(), self.background.b(), self.background.a()]); let [a, b] = [self.location.as_dvec2(), self.location.as_dvec2() + self.dimensions.as_dvec2()]; let rect = kurbo::Rect::new(a.x.min(b.x), a.y.min(b.y), a.x.max(b.x), a.y.max(b.y)); scene.push_layer(peniko::Mix::Normal, 1., kurbo::Affine::new(transform.to_cols_array()), &rect); scene.fill(peniko::Fill::NonZero, kurbo::Affine::new(transform.to_cols_array()), color, None, &rect); scene.pop_layer(); if self.clip { scene.push_clip_layer(kurbo::Affine::new(transform.to_cols_array()), &rect); } // Since the content's transform is right multiplied in when rendering the content, we just need to right multiply by the artboard offset here. let child_transform = transform * DAffine2::from_translation(self.location.as_dvec2()); self.content.render_to_vello(scene, child_transform, context, render_params); if self.clip { scene.pop_layer(); } } fn collect_metadata(&self, metadata: &mut RenderMetadata, mut footprint: Footprint, element_id: Option<NodeId>) { if let Some(element_id) = element_id { let subpath = Subpath::new_rect(DVec2::ZERO, self.dimensions.as_dvec2()); metadata.click_targets.insert(element_id, vec![ClickTarget::new_with_subpath(subpath, 0.)]); metadata.upstream_footprints.insert(element_id, footprint); metadata.local_transforms.insert(element_id, DAffine2::from_translation(self.location.as_dvec2())); if self.clip { metadata.clip_targets.insert(element_id); } } footprint.transform *= self.transform(); self.content.collect_metadata(metadata, footprint, None); } fn add_upstream_click_targets(&self, click_targets: &mut Vec<ClickTarget>) { let subpath_rectangle = Subpath::new_rect(DVec2::ZERO, self.dimensions.as_dvec2()); click_targets.push(ClickTarget::new_with_subpath(subpath_rectangle, 0.)); } fn contains_artboard(&self) -> bool { true } } impl Render for Table<Artboard> { fn render_svg(&self, render: &mut SvgRender, render_params: &RenderParams) { for artboard in self.iter() { artboard.element.render_svg(render, render_params); } } fn render_to_vello(&self, scene: &mut Scene, transform: DAffine2, context: &mut RenderContext, render_params: &RenderParams) { for row in self.iter() { row.element.render_to_vello(scene, transform, context, render_params); } } fn collect_metadata(&self, metadata: &mut RenderMetadata, footprint: Footprint, _element_id: Option<NodeId>) { for row in self.iter() { row.element.collect_metadata(metadata, footprint, *row.source_node_id); } } fn add_upstream_click_targets(&self, click_targets: &mut Vec<ClickTarget>) { for row in self.iter() { row.element.add_upstream_click_targets(click_targets); } } fn contains_artboard(&self) -> bool { self.iter().count() > 0 } } impl Render for Table<Graphic> { fn render_svg(&self, render: &mut SvgRender, render_params: &RenderParams) { let mut iter = self.iter().peekable(); let mut mask_state = None; while let Some(row) = iter.next() { render.parent_tag( "g", |attributes| { let matrix = format_transform_matrix(*row.transform); if !matrix.is_empty() { attributes.push("transform", matrix); } let opacity = row.alpha_blending.opacity(render_params.for_mask); if opacity < 1. { attributes.push("opacity", opacity.to_string()); } if row.alpha_blending.blend_mode != BlendMode::default() { attributes.push("style", row.alpha_blending.blend_mode.render()); } let next_clips = iter.peek().is_some_and(|next_row| next_row.element.had_clip_enabled()); if next_clips && mask_state.is_none() { let uuid = generate_uuid(); let mask_type = if row.element.can_reduce_to_clip_path() { MaskType::Clip } else { MaskType::Mask }; mask_state = Some((uuid, mask_type)); let mut svg = SvgRender::new(); row.element.render_svg(&mut svg, &render_params.for_clipper()); write!(&mut attributes.0.svg_defs, r##"{}"##, svg.svg_defs).unwrap(); mask_type.write_to_defs(&mut attributes.0.svg_defs, uuid, svg.svg.to_svg_string()); } else if let Some((uuid, mask_type)) = mask_state { if !next_clips { mask_state = None; } let id = format!("mask-{uuid}"); let selector = format!("url(#{id})"); attributes.push(mask_type.to_attribute(), selector); } }, |render| { row.element.render_svg(render, render_params); }, ); } } fn render_to_vello(&self, scene: &mut Scene, transform: DAffine2, context: &mut RenderContext, render_params: &RenderParams) { let mut iter = self.iter().peekable(); let mut mask_element_and_transform = None; while let Some(row) = iter.next() { let transform = transform * *row.transform; let alpha_blending = *row.alpha_blending; let mut layer = false; let blend_mode = match render_params.render_mode { RenderMode::Outline => peniko::Mix::Normal, _ => alpha_blending.blend_mode.to_peniko(), }; let mut bounds = RenderBoundingBox::None; let opacity = row.alpha_blending.opacity(render_params.for_mask); if opacity < 1. || (render_params.render_mode != RenderMode::Outline && alpha_blending.blend_mode != BlendMode::default()) { bounds = row.element.bounding_box(transform, true); if let RenderBoundingBox::Rectangle(bounds) = bounds { scene.push_layer( peniko::BlendMode::new(blend_mode, peniko::Compose::SrcOver), opacity, kurbo::Affine::IDENTITY, &kurbo::Rect::new(bounds[0].x, bounds[0].y, bounds[1].x, bounds[1].y), ); layer = true; } } let next_clips = iter.peek().is_some_and(|next_row| next_row.element.had_clip_enabled()); if next_clips && mask_element_and_transform.is_none() { mask_element_and_transform = Some((row.element, transform)); row.element.render_to_vello(scene, transform, context, render_params); } else if let Some((mask_element, transform_mask)) = mask_element_and_transform { if !next_clips { mask_element_and_transform = None; } if !layer { bounds = row.element.bounding_box(transform, true); } if let RenderBoundingBox::Rectangle(bounds) = bounds { let rect = kurbo::Rect::new(bounds[0].x, bounds[0].y, bounds[1].x, bounds[1].y); scene.push_layer(peniko::Mix::Normal, 1., kurbo::Affine::IDENTITY, &rect); mask_element.render_to_vello(scene, transform_mask, context, &render_params.for_clipper()); scene.push_layer(peniko::BlendMode::new(peniko::Mix::Normal, peniko::Compose::SrcIn), 1., kurbo::Affine::IDENTITY, &rect); } row.element.render_to_vello(scene, transform, context, render_params); if matches!(bounds, RenderBoundingBox::Rectangle(_)) { scene.pop_layer(); scene.pop_layer(); } } else { row.element.render_to_vello(scene, transform, context, render_params); } if layer { scene.pop_layer(); } } } fn collect_metadata(&self, metadata: &mut RenderMetadata, footprint: Footprint, element_id: Option<NodeId>) { for row in self.iter() { if let Some(element_id) = row.source_node_id { let mut footprint = footprint; footprint.transform *= *row.transform; row.element.collect_metadata(metadata, footprint, Some(*element_id)); } } if let Some(element_id) = element_id { let mut all_upstream_click_targets = Vec::new(); for row in self.iter() { let mut new_click_targets = Vec::new(); row.element.add_upstream_click_targets(&mut new_click_targets); for click_target in new_click_targets.iter_mut() { click_target.apply_transform(*row.transform) } all_upstream_click_targets.extend(new_click_targets); } metadata.click_targets.insert(element_id, all_upstream_click_targets); } } fn add_upstream_click_targets(&self, click_targets: &mut Vec<ClickTarget>) { for row in self.iter() { let mut new_click_targets = Vec::new(); row.element.add_upstream_click_targets(&mut new_click_targets); for click_target in new_click_targets.iter_mut() { click_target.apply_transform(*row.transform) } click_targets.extend(new_click_targets); } } fn contains_artboard(&self) -> bool { self.iter().any(|row| row.element.contains_artboard()) } fn new_ids_from_hash(&mut self, _reference: Option<NodeId>) { for row in self.iter_mut() { row.element.new_ids_from_hash(*row.source_node_id); } } } impl Render for Table<Vector> { fn render_svg(&self, render: &mut SvgRender, render_params: &RenderParams) { for row in self.iter() { let multiplied_transform = *row.transform; let vector = &row.element; // Only consider strokes with non-zero weight, since default strokes with zero weight would prevent assigning the correct stroke transform let has_real_stroke = vector.style.stroke().filter(|stroke| stroke.weight() > 0.); let set_stroke_transform = has_real_stroke.map(|stroke| stroke.transform).filter(|transform| transform.matrix2.determinant() != 0.); let applied_stroke_transform = set_stroke_transform.unwrap_or(*row.transform); let applied_stroke_transform = render_params.alignment_parent_transform.unwrap_or(applied_stroke_transform); let element_transform = set_stroke_transform.map(|stroke_transform| multiplied_transform * stroke_transform.inverse()); let element_transform = element_transform.unwrap_or(DAffine2::IDENTITY); let layer_bounds = vector.bounding_box().unwrap_or_default(); let transformed_bounds = vector.bounding_box_with_transform(applied_stroke_transform).unwrap_or_default(); let bounds_matrix = DAffine2::from_scale_angle_translation(layer_bounds[1] - layer_bounds[0], 0., layer_bounds[0]); let transformed_bounds_matrix = element_transform * DAffine2::from_scale_angle_translation(transformed_bounds[1] - transformed_bounds[0], 0., transformed_bounds[0]); let mut path = String::new(); for mut bezpath in row.element.stroke_bezpath_iter() { bezpath.apply_affine(Affine::new(applied_stroke_transform.to_cols_array())); path.push_str(bezpath.to_svg().as_str()); } let mask_type = if vector.style.stroke().map(|x| x.align) == Some(StrokeAlign::Inside) { MaskType::Clip } else { MaskType::Mask }; let path_is_closed = vector.stroke_bezier_paths().all(|path| path.closed()); let can_draw_aligned_stroke = path_is_closed && vector.style.stroke().is_some_and(|stroke| stroke.has_renderable_stroke() && stroke.align.is_not_centered()); let can_use_paint_order = !(row.element.style.fill().is_none() || !row.element.style.fill().is_opaque() || mask_type == MaskType::Clip); let needs_separate_alignment_fill = can_draw_aligned_stroke && !can_use_paint_order; let wants_stroke_below = vector.style.stroke().map(|s| s.paint_order) == Some(PaintOrder::StrokeBelow); if needs_separate_alignment_fill && !wants_stroke_below { render.leaf_tag("path", |attributes| { attributes.push("d", path.clone()); let matrix = format_transform_matrix(element_transform); if !matrix.is_empty() { attributes.push("transform", matrix); } let mut style = row.element.style.clone(); style.clear_stroke(); let fill_and_stroke = style.render( &mut attributes.0.svg_defs, element_transform, applied_stroke_transform, bounds_matrix, transformed_bounds_matrix, render_params, ); attributes.push_val(fill_and_stroke); }); } let push_id = needs_separate_alignment_fill.then_some({ let id = format!("alignment-{}", generate_uuid()); let mut element = row.element.clone(); element.style.clear_stroke(); element.style.set_fill(Fill::solid(Color::BLACK)); let vector_row = Table::new_from_row(TableRow { element, alpha_blending: *row.alpha_blending, transform: *row.transform, source_node_id: None, }); (id, mask_type, vector_row) }); if vector.is_branching() { for mut face_path in vector.construct_faces().filter(|face| !(face.area() < 0.0)) { face_path.apply_affine(Affine::new(applied_stroke_transform.to_cols_array())); let face_d = face_path.to_svg(); render.leaf_tag("path", |attributes| { attributes.push("d", face_d.clone()); let matrix = format_transform_matrix(element_transform); if !matrix.is_empty() { attributes.push("transform", matrix); } let mut style = row.element.style.clone(); style.clear_stroke(); let fill_only = style.render( &mut attributes.0.svg_defs, element_transform, applied_stroke_transform, bounds_matrix, transformed_bounds_matrix, render_params, ); attributes.push_val(fill_only); }); } } render.leaf_tag("path", |attributes| { attributes.push("d", path.clone()); let matrix = format_transform_matrix(element_transform); if !matrix.is_empty() { attributes.push("transform", matrix); } let defs = &mut attributes.0.svg_defs; if let Some((ref id, mask_type, ref vector_row)) = push_id { let mut svg = SvgRender::new(); vector_row.render_svg(&mut svg, &render_params.for_alignment(applied_stroke_transform)); let stroke = row.element.style.stroke().unwrap(); let weight = stroke.effective_width() * max_scale(applied_stroke_transform); let quad = Quad::from_box(transformed_bounds).inflate(weight); let (x, y) = quad.top_left().into(); let (width, height) = (quad.bottom_right() - quad.top_left()).into(); write!(defs, r##"{}"##, svg.svg_defs).unwrap(); let rect = format!(r##"<rect x="{x}" y="{y}" width="{width}" height="{height}" fill="white" />"##); match mask_type { MaskType::Clip => write!(defs, r##"<clipPath id="{id}">{}</clipPath>"##, svg.svg.to_svg_string()).unwrap(), MaskType::Mask => write!( defs, r##"<mask id="{id}" maskUnits="userSpaceOnUse" maskContentUnits="userSpaceOnUse" x="{x}" y="{y}" width="{width}" height="{height}">{}{}</mask>"##, rect, svg.svg.to_svg_string() ) .unwrap(), } } let mut render_params = render_params.clone(); render_params.aligned_strokes = can_draw_aligned_stroke; render_params.override_paint_order = can_draw_aligned_stroke && can_use_paint_order; let mut style = row.element.style.clone(); if needs_separate_alignment_fill || vector.is_branching() { style.clear_fill(); } let fill_and_stroke = style.render(defs, element_transform, applied_stroke_transform, bounds_matrix, transformed_bounds_matrix, &render_params); if let Some((id, mask_type, _)) = push_id { let selector = format!("url(#{id})"); attributes.push(mask_type.to_attribute(), selector); } attributes.push_val(fill_and_stroke); let opacity = row.alpha_blending.opacity(render_params.for_mask); if opacity < 1. { attributes.push("opacity", opacity.to_string()); } if row.alpha_blending.blend_mode != BlendMode::default() { attributes.push("style", row.alpha_blending.blend_mode.render()); } }); // When splitting passes and stroke is below, draw the fill after the stroke. if needs_separate_alignment_fill && wants_stroke_below { render.leaf_tag("path", |attributes| { attributes.push("d", path); let matrix = format_transform_matrix(element_transform); if !matrix.is_empty() { attributes.push("transform", matrix); } let mut style = row.element.style.clone(); style.clear_stroke(); let fill_and_stroke = style.render( &mut attributes.0.svg_defs, element_transform, applied_stroke_transform, bounds_matrix, transformed_bounds_matrix, render_params, ); attributes.push_val(fill_and_stroke); }); } } } fn render_to_vello(&self, scene: &mut Scene, parent_transform: DAffine2, _context: &mut RenderContext, render_params: &RenderParams) { use core_types::consts::{LAYER_OUTLINE_STROKE_COLOR, LAYER_OUTLINE_STROKE_WEIGHT}; use graphic_types::vector_types::vector::style::{GradientType, StrokeCap, StrokeJoin}; use vello::kurbo::{Cap, Join}; use vello::peniko; for row in self.iter() { use graphic_types::vector_types::vector; let multiplied_transform = parent_transform * *row.transform; let has_real_stroke = row.element.style.stroke().filter(|stroke| stroke.weight() > 0.);
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
true
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/rendering/src/render_ext.rs
node-graph/libraries/rendering/src/render_ext.rs
use crate::renderer::{RenderParams, format_transform_matrix}; use core_types::consts::{LAYER_OUTLINE_STROKE_COLOR, LAYER_OUTLINE_STROKE_WEIGHT}; use core_types::uuid::generate_uuid; use glam::DAffine2; use graphic_types::vector_types::gradient::{Gradient, GradientType}; use graphic_types::vector_types::vector::style::{Fill, PaintOrder, PathStyle, RenderMode, Stroke, StrokeAlign, StrokeCap, StrokeJoin}; use std::fmt::Write; pub trait RenderExt { type Output; fn render(&self, svg_defs: &mut String, element_transform: DAffine2, stroke_transform: DAffine2, bounds: DAffine2, transformed_bounds: DAffine2, render_params: &RenderParams) -> Self::Output; } impl RenderExt for Gradient { type Output = u64; // /// Adds the gradient def through mutating the first argument, returning the gradient ID. fn render(&self, svg_defs: &mut String, element_transform: DAffine2, stroke_transform: DAffine2, bounds: DAffine2, transformed_bounds: DAffine2, _render_params: &RenderParams) -> Self::Output { let mut stop = String::new(); for (position, color) in self.stops.0.iter() { stop.push_str("<stop"); if *position != 0. { let _ = write!(stop, r#" offset="{}""#, (position * 1_000_000.).round() / 1_000_000.); } let _ = write!(stop, r##" stop-color="#{}""##, color.to_rgb_hex_srgb_from_gamma()); if color.a() < 1. { let _ = write!(stop, r#" stop-opacity="{}""#, (color.a() * 1000.).round() / 1000.); } stop.push_str(" />") } let transform_points = element_transform * stroke_transform * bounds; let start = transform_points.transform_point2(self.start); let end = transform_points.transform_point2(self.end); let gradient_transform = if transformed_bounds.matrix2.determinant() != 0. { transformed_bounds.inverse() } else { DAffine2::IDENTITY // Ignore if the transform cannot be inverted (the bounds are zero). See issue #1944. }; let gradient_transform = format_transform_matrix(gradient_transform); let gradient_transform = if gradient_transform.is_empty() { String::new() } else { format!(r#" gradientTransform="{gradient_transform}""#) }; let gradient_id = generate_uuid(); match self.gradient_type { GradientType::Linear => { let _ = write!( svg_defs, r#"<linearGradient id="{}" x1="{}" y1="{}" x2="{}" y2="{}"{gradient_transform}>{}</linearGradient>"#, gradient_id, start.x, start.y, end.x, end.y, stop ); } GradientType::Radial => { let radius = (f64::powi(start.x - end.x, 2) + f64::powi(start.y - end.y, 2)).sqrt(); let _ = write!( svg_defs, r#"<radialGradient id="{}" cx="{}" cy="{}" r="{}"{gradient_transform}>{}</radialGradient>"#, gradient_id, start.x, start.y, radius, stop ); } } gradient_id } } impl RenderExt for Fill { type Output = String; /// Renders the fill, adding necessary defs through mutating the first argument. fn render(&self, svg_defs: &mut String, element_transform: DAffine2, stroke_transform: DAffine2, bounds: DAffine2, transformed_bounds: DAffine2, render_params: &RenderParams) -> Self::Output { match self { Self::None => r#" fill="none""#.to_string(), Self::Solid(color) => { let mut result = format!(r##" fill="#{}""##, color.to_rgb_hex_srgb_from_gamma()); if color.a() < 1. { let _ = write!(result, r#" fill-opacity="{}""#, (color.a() * 1000.).round() / 1000.); } result } Self::Gradient(gradient) => { let gradient_id = gradient.render(svg_defs, element_transform, stroke_transform, bounds, transformed_bounds, render_params); format!(r##" fill="url('#{gradient_id}')""##) } } } } impl RenderExt for Stroke { type Output = String; /// Provide the SVG attributes for the stroke. fn render( &self, _svg_defs: &mut String, _element_transform: DAffine2, _stroke_transform: DAffine2, _bounds: DAffine2, _transformed_bounds: DAffine2, render_params: &RenderParams, ) -> Self::Output { // Don't render a stroke at all if it would be invisible let Some(color) = self.color else { return String::new() }; if !self.has_renderable_stroke() { return String::new(); } // Set to None if the value is the SVG default let weight = (self.weight != 1.).then_some(self.weight); let dash_array = (!self.dash_lengths.is_empty()).then_some(self.dash_lengths()); let dash_offset = (self.dash_offset != 0.).then_some(self.dash_offset); let stroke_cap = (self.cap != StrokeCap::Butt).then_some(self.cap); let stroke_join = (self.join != StrokeJoin::Miter).then_some(self.join); let stroke_join_miter_limit = (self.join_miter_limit != 4.).then_some(self.join_miter_limit); let stroke_align = (self.align != StrokeAlign::Center).then_some(self.align); let paint_order = (self.paint_order != PaintOrder::StrokeAbove || render_params.override_paint_order).then_some(PaintOrder::StrokeBelow); // Render the needed stroke attributes let mut attributes = format!(r##" stroke="#{}""##, color.to_rgb_hex_srgb_from_gamma()); if color.a() < 1. { let _ = write!(&mut attributes, r#" stroke-opacity="{}""#, (color.a() * 1000.).round() / 1000.); } if let Some(mut weight) = weight { if stroke_align.is_some() && render_params.aligned_strokes { weight *= 2.; } let _ = write!(&mut attributes, r#" stroke-width="{weight}""#); } if let Some(dash_array) = dash_array { let _ = write!(&mut attributes, r#" stroke-dasharray="{dash_array}""#); } if let Some(dash_offset) = dash_offset { let _ = write!(&mut attributes, r#" stroke-dashoffset="{dash_offset}""#); } if let Some(stroke_cap) = stroke_cap { let _ = write!(&mut attributes, r#" stroke-linecap="{}""#, stroke_cap.svg_name()); } if let Some(stroke_join) = stroke_join { let _ = write!(&mut attributes, r#" stroke-linejoin="{}""#, stroke_join.svg_name()); } if let Some(stroke_join_miter_limit) = stroke_join_miter_limit { let _ = write!(&mut attributes, r#" stroke-miterlimit="{stroke_join_miter_limit}""#); } // Add vector-effect attribute to make strokes non-scaling if self.non_scaling { let _ = write!(&mut attributes, r#" vector-effect="non-scaling-stroke""#); } if paint_order.is_some() { let _ = write!(&mut attributes, r#" style="paint-order: stroke;" "#); } attributes } } impl RenderExt for PathStyle { type Output = String; /// Renders the shape's fill and stroke attributes as a string with them concatenated together. #[allow(clippy::too_many_arguments)] fn render(&self, svg_defs: &mut String, element_transform: DAffine2, stroke_transform: DAffine2, bounds: DAffine2, transformed_bounds: DAffine2, render_params: &RenderParams) -> String { let render_mode = render_params.render_mode; match render_mode { RenderMode::Outline => { let fill_attribute = Fill::None.render(svg_defs, element_transform, stroke_transform, bounds, transformed_bounds, render_params); let mut outline_stroke = Stroke::new(Some(LAYER_OUTLINE_STROKE_COLOR), LAYER_OUTLINE_STROKE_WEIGHT); // Outline strokes should be non-scaling by default outline_stroke.non_scaling = true; let stroke_attribute = outline_stroke.render(svg_defs, element_transform, stroke_transform, bounds, transformed_bounds, render_params); format!("{fill_attribute}{stroke_attribute}") } _ => { let fill_attribute = self.fill.render(svg_defs, element_transform, stroke_transform, bounds, transformed_bounds, render_params); let stroke_attribute = self .stroke .as_ref() .map(|stroke| stroke.render(svg_defs, element_transform, stroke_transform, bounds, transformed_bounds, render_params)) .unwrap_or_default(); format!("{fill_attribute}{stroke_attribute}") } } } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/rendering/src/convert_usvg_path.rs
node-graph/libraries/rendering/src/convert_usvg_path.rs
use glam::DVec2; use vector_types::subpath::{ManipulatorGroup, Subpath}; use vector_types::vector::PointId; pub fn convert_usvg_path(path: &usvg::Path) -> Vec<Subpath<PointId>> { let mut subpaths = Vec::new(); let mut manipulators_list = Vec::new(); let mut points = path.data().points().iter(); let to_vec = |p: &usvg::tiny_skia_path::Point| DVec2::new(p.x as f64, p.y as f64); for verb in path.data().verbs() { match verb { usvg::tiny_skia_path::PathVerb::Move => { subpaths.push(Subpath::new(std::mem::take(&mut manipulators_list), false)); let Some(start) = points.next().map(to_vec) else { continue }; manipulators_list.push(ManipulatorGroup::new(start, Some(start), Some(start))); } usvg::tiny_skia_path::PathVerb::Line => { let Some(end) = points.next().map(to_vec) else { continue }; manipulators_list.push(ManipulatorGroup::new(end, Some(end), Some(end))); } usvg::tiny_skia_path::PathVerb::Quad => { let Some(handle) = points.next().map(to_vec) else { continue }; let Some(end) = points.next().map(to_vec) else { continue }; if let Some(last) = manipulators_list.last_mut() { last.out_handle = Some(last.anchor + (2. / 3.) * (handle - last.anchor)); } manipulators_list.push(ManipulatorGroup::new(end, Some(end + (2. / 3.) * (handle - end)), Some(end))); } usvg::tiny_skia_path::PathVerb::Cubic => { let Some(first_handle) = points.next().map(to_vec) else { continue }; let Some(second_handle) = points.next().map(to_vec) else { continue }; let Some(end) = points.next().map(to_vec) else { continue }; if let Some(last) = manipulators_list.last_mut() { last.out_handle = Some(first_handle); } manipulators_list.push(ManipulatorGroup::new(end, Some(second_handle), Some(end))); } usvg::tiny_skia_path::PathVerb::Close => { subpaths.push(Subpath::new(std::mem::take(&mut manipulators_list), true)); } } } subpaths.push(Subpath::new(manipulators_list, false)); subpaths }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/application-io/src/lib.rs
node-graph/libraries/application-io/src/lib.rs
use core_types::transform::Footprint; use dyn_any::{DynAny, StaticType, StaticTypeSized}; use glam::{DAffine2, DVec2, UVec2}; use std::fmt::Debug; use std::future::Future; use std::hash::{Hash, Hasher}; use std::pin::Pin; use std::ptr::addr_of; use std::sync::Arc; use std::time::Duration; use text_nodes::FontCache; use vector_types::vector::style::RenderMode; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] pub struct SurfaceId(pub u64); impl std::fmt::Display for SurfaceId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_fmt(format_args!("{}", self.0)) } } #[derive(Debug, Clone, Copy, PartialEq, serde::Serialize, serde::Deserialize)] pub struct SurfaceFrame { pub surface_id: SurfaceId, /// Logical resolution in CSS pixels (used for foreignObject dimensions) pub resolution: DVec2, pub transform: DAffine2, } impl Hash for SurfaceFrame { fn hash<H: Hasher>(&self, state: &mut H) { self.surface_id.hash(state); self.transform.to_cols_array().iter().for_each(|x| x.to_bits().hash(state)); } } unsafe impl StaticType for SurfaceFrame { type Static = SurfaceFrame; } pub trait Size { fn size(&self) -> UVec2; } #[cfg(target_family = "wasm")] impl Size for web_sys::HtmlCanvasElement { fn size(&self) -> UVec2 { UVec2::new(self.width(), self.height()) } } #[derive(Debug, Clone)] pub struct ImageTexture { #[cfg(feature = "wgpu")] pub texture: wgpu::Texture, #[cfg(not(feature = "wgpu"))] pub texture: (), } impl<'a> serde::Deserialize<'a> for ImageTexture { fn deserialize<D>(_: D) -> Result<Self, D::Error> where D: serde::Deserializer<'a>, { unimplemented!("attempted to serialize a texture") } } impl Hash for ImageTexture { #[cfg(feature = "wgpu")] fn hash<H: Hasher>(&self, state: &mut H) { self.texture.hash(state); } #[cfg(not(feature = "wgpu"))] fn hash<H: Hasher>(&self, _state: &mut H) {} } impl PartialEq for ImageTexture { fn eq(&self, other: &Self) -> bool { #[cfg(feature = "wgpu")] { self.texture == other.texture } #[cfg(not(feature = "wgpu"))] { self.texture == other.texture } } } unsafe impl StaticType for ImageTexture { type Static = ImageTexture; } #[cfg(feature = "wgpu")] impl Size for ImageTexture { fn size(&self) -> UVec2 { UVec2::new(self.texture.width(), self.texture.height()) } } impl<S: Size> From<SurfaceHandleFrame<S>> for SurfaceFrame { fn from(x: SurfaceHandleFrame<S>) -> Self { let size = x.surface_handle.surface.size(); Self { surface_id: x.surface_handle.window_id, transform: x.transform, resolution: size.into(), } } } #[derive(Clone, Debug, PartialEq, Eq)] pub struct SurfaceHandle<Surface> { pub window_id: SurfaceId, pub surface: Surface, } // #[cfg(target_family = "wasm")] // unsafe impl<T: dyn_any::WasmNotSend> Send for SurfaceHandle<T> {} // #[cfg(target_family = "wasm")] // unsafe impl<T: dyn_any::WasmNotSync> Sync for SurfaceHandle<T> {} impl<S: Size> Size for SurfaceHandle<S> { fn size(&self) -> UVec2 { self.surface.size() } } unsafe impl<T: 'static> StaticType for SurfaceHandle<T> { type Static = SurfaceHandle<T>; } #[derive(Clone, Debug, PartialEq)] pub struct SurfaceHandleFrame<Surface> { pub surface_handle: Arc<SurfaceHandle<Surface>>, pub transform: DAffine2, } unsafe impl<T: 'static> StaticType for SurfaceHandleFrame<T> { type Static = SurfaceHandleFrame<T>; } #[cfg(feature = "wasm")] pub type WasmSurfaceHandle = SurfaceHandle<web_sys::HtmlCanvasElement>; #[cfg(feature = "wasm")] pub type WasmSurfaceHandleFrame = SurfaceHandleFrame<web_sys::HtmlCanvasElement>; // TODO: think about how to automatically clean up memory /* impl<'a, Surface> Drop for SurfaceHandle<'a, Surface> { fn drop(&mut self) { self.application_io.destroy_surface(self.surface_id) } }*/ #[cfg(target_family = "wasm")] pub type ResourceFuture = Pin<Box<dyn Future<Output = Result<Arc<[u8]>, ApplicationError>>>>; #[cfg(not(target_family = "wasm"))] pub type ResourceFuture = Pin<Box<dyn Future<Output = Result<Arc<[u8]>, ApplicationError>> + Send>>; pub trait ApplicationIo { type Surface; type Executor; fn window(&self) -> Option<SurfaceHandle<Self::Surface>>; fn create_window(&self) -> SurfaceHandle<Self::Surface>; fn destroy_window(&self, surface_id: SurfaceId); fn gpu_executor(&self) -> Option<&Self::Executor> { None } fn load_resource(&self, url: impl AsRef<str>) -> Result<ResourceFuture, ApplicationError>; } impl<T: ApplicationIo> ApplicationIo for &T { type Surface = T::Surface; type Executor = T::Executor; fn window(&self) -> Option<SurfaceHandle<Self::Surface>> { (**self).window() } fn create_window(&self) -> SurfaceHandle<T::Surface> { (**self).create_window() } fn destroy_window(&self, surface_id: SurfaceId) { (**self).destroy_window(surface_id) } fn gpu_executor(&self) -> Option<&T::Executor> { (**self).gpu_executor() } fn load_resource<'a>(&self, url: impl AsRef<str>) -> Result<ResourceFuture, ApplicationError> { (**self).load_resource(url) } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum ApplicationError { NotFound, InvalidUrl, } #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] pub enum NodeGraphUpdateMessage {} pub trait NodeGraphUpdateSender { fn send(&self, message: NodeGraphUpdateMessage); } impl<T: NodeGraphUpdateSender> NodeGraphUpdateSender for std::sync::Mutex<T> { fn send(&self, message: NodeGraphUpdateMessage) { self.lock().as_mut().unwrap().send(message) } } pub trait GetEditorPreferences { fn use_vello(&self) -> bool; } #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] pub enum ExportFormat { #[default] Svg, Raster, } #[derive(Debug, Default, Clone, Copy, PartialEq, DynAny, serde::Serialize, serde::Deserialize)] pub struct TimingInformation { pub time: f64, pub animation_time: Duration, } #[derive(Debug, Default, Clone, Copy, PartialEq, DynAny, serde::Serialize, serde::Deserialize)] pub struct RenderConfig { pub viewport: Footprint, pub scale: f64, pub export_format: ExportFormat, pub time: TimingInformation, pub pointer: DVec2, #[serde(alias = "view_mode")] pub render_mode: RenderMode, pub hide_artboards: bool, pub for_export: bool, } struct Logger; impl NodeGraphUpdateSender for Logger { fn send(&self, message: NodeGraphUpdateMessage) { log::warn!("dispatching message with fallback node graph update sender {message:?}"); } } struct DummyPreferences; impl GetEditorPreferences for DummyPreferences { fn use_vello(&self) -> bool { false } } pub struct EditorApi<Io> { /// Font data (for rendering text) made available to the graph through the [`WasmEditorApi`]. pub font_cache: FontCache, /// Gives access to APIs like a rendering surface (native window handle or HTML5 canvas) and WGPU (which becomes WebGPU on web). pub application_io: Option<Arc<Io>>, pub node_graph_message_sender: Box<dyn NodeGraphUpdateSender + Send + Sync>, /// Editor preferences made available to the graph through the [`WasmEditorApi`]. pub editor_preferences: Box<dyn GetEditorPreferences + Send + Sync>, } impl<Io> Eq for EditorApi<Io> {} impl<Io: Default> Default for EditorApi<Io> { fn default() -> Self { Self { font_cache: FontCache::default(), application_io: None, node_graph_message_sender: Box::new(Logger), editor_preferences: Box::new(DummyPreferences), } } } impl<Io> Hash for EditorApi<Io> { fn hash<H: Hasher>(&self, state: &mut H) { self.font_cache.hash(state); self.application_io.as_ref().map_or(0, |io| io.as_ref() as *const _ as usize).hash(state); (self.node_graph_message_sender.as_ref() as *const dyn NodeGraphUpdateSender).hash(state); (self.editor_preferences.as_ref() as *const dyn GetEditorPreferences).hash(state); } } impl<Io> PartialEq for EditorApi<Io> { fn eq(&self, other: &Self) -> bool { self.font_cache == other.font_cache && self.application_io.as_ref().map_or(0, |io| addr_of!(io) as usize) == other.application_io.as_ref().map_or(0, |io| addr_of!(io) as usize) && std::ptr::eq(self.node_graph_message_sender.as_ref() as *const _, other.node_graph_message_sender.as_ref() as *const _) && std::ptr::eq(self.editor_preferences.as_ref() as *const _, other.editor_preferences.as_ref() as *const _) } } impl<T> Debug for EditorApi<T> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EditorApi").field("font_cache", &self.font_cache).finish() } } unsafe impl<T: StaticTypeSized> StaticType for EditorApi<T> { type Static = EditorApi<T::Static>; }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/wgpu-executor/src/texture_conversion.rs
node-graph/libraries/wgpu-executor/src/texture_conversion.rs
use crate::WgpuExecutor; use core_types::Color; use core_types::Ctx; use core_types::color::SRGBA8; use core_types::ops::Convert; use core_types::table::{Table, TableRow}; use core_types::transform::Footprint; use raster_types::Image; use raster_types::{CPU, GPU, Raster}; use wgpu::util::{DeviceExt, TextureDataOrder}; use wgpu::{Extent3d, TextureDescriptor, TextureDimension, TextureFormat, TextureUsages}; /// Uploads CPU image data to a GPU texture /// /// Creates a new WGPU texture with RGBA8UnormSrgb format and uploads the provided /// image data. The texture is configured for binding, copying, and source operations. fn upload_to_texture(device: &std::sync::Arc<wgpu::Device>, queue: &std::sync::Arc<wgpu::Queue>, image: &Raster<CPU>) -> wgpu::Texture { let rgba8_data: Vec<SRGBA8> = image.data.iter().map(|x| (*x).into()).collect(); device.create_texture_with_data( queue, &TextureDescriptor { label: Some("upload_texture node texture"), size: Extent3d { width: image.width, height: image.height, depth_or_array_layers: 1, }, mip_level_count: 1, sample_count: 1, dimension: TextureDimension::D2, format: TextureFormat::Rgba8UnormSrgb, usage: TextureUsages::TEXTURE_BINDING | TextureUsages::COPY_DST | TextureUsages::COPY_SRC, view_formats: &[], }, TextureDataOrder::LayerMajor, bytemuck::cast_slice(rgba8_data.as_slice()), ) } /// Converts a Raster<GPU> texture to Raster<CPU> by downloading the underlying texture data. /// /// Assumptions: /// - 2D texture, mip level 0 /// - 4 bytes-per-pixel RGBA8 /// - Texture has COPY_SRC usage struct RasterGpuToRasterCpuConverter { buffer: wgpu::Buffer, width: u32, height: u32, unpadded_bytes_per_row: u32, padded_bytes_per_row: u32, } impl RasterGpuToRasterCpuConverter { fn new(device: &std::sync::Arc<wgpu::Device>, encoder: &mut wgpu::CommandEncoder, data_gpu: Raster<GPU>) -> Self { let texture = data_gpu.data(); let width = texture.width(); let height = texture.height(); let bytes_per_pixel = 4; // RGBA8 let unpadded_bytes_per_row = width * bytes_per_pixel; let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT; let padded_bytes_per_row = unpadded_bytes_per_row.div_ceil(align) * align; let buffer_size = padded_bytes_per_row as u64 * height as u64; let buffer = device.create_buffer(&wgpu::BufferDescriptor { label: Some("texture_download_buffer"), size: buffer_size, usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ, mapped_at_creation: false, }); encoder.copy_texture_to_buffer( wgpu::TexelCopyTextureInfo { texture, mip_level: 0, origin: wgpu::Origin3d::ZERO, aspect: wgpu::TextureAspect::All, }, wgpu::TexelCopyBufferInfo { buffer: &buffer, layout: wgpu::TexelCopyBufferLayout { offset: 0, bytes_per_row: Some(padded_bytes_per_row), rows_per_image: Some(height), }, }, Extent3d { width, height, depth_or_array_layers: 1, }, ); Self { buffer, width, height, unpadded_bytes_per_row, padded_bytes_per_row, } } async fn convert(self) -> Result<Raster<CPU>, wgpu::BufferAsyncError> { let buffer_slice = self.buffer.slice(..); let (sender, receiver) = futures::channel::oneshot::channel(); buffer_slice.map_async(wgpu::MapMode::Read, move |result| { let _ = sender.send(result); }); receiver.await.expect("Failed to receive map result")?; let view = buffer_slice.get_mapped_range(); let row_stride = self.padded_bytes_per_row as usize; let row_bytes = self.unpadded_bytes_per_row as usize; let mut cpu_data: Vec<Color> = Vec::with_capacity((self.width * self.height) as usize); for row in 0..self.height as usize { let start = row * row_stride; let row_slice = &view[start..start + row_bytes]; for px in row_slice.chunks_exact(4) { cpu_data.push(Color::from_rgba8_srgb(px[0], px[1], px[2], px[3])); } } drop(view); self.buffer.unmap(); let cpu_image = Image { data: cpu_data, width: self.width, height: self.height, base64_string: None, }; Ok(Raster::new_cpu(cpu_image)) } } /// Passthrough conversion for GPU tables - no conversion needed impl<'i> Convert<Table<Raster<GPU>>, &'i WgpuExecutor> for Table<Raster<GPU>> { async fn convert(self, _: Footprint, _converter: &'i WgpuExecutor) -> Table<Raster<GPU>> { self } } /// Converts CPU raster table to GPU by uploading each image to a texture impl<'i> Convert<Table<Raster<GPU>>, &'i WgpuExecutor> for Table<Raster<CPU>> { async fn convert(self, _: Footprint, executor: &'i WgpuExecutor) -> Table<Raster<GPU>> { let device = &executor.context.device; let queue = &executor.context.queue; let table = self .iter() .map(|row| { let image = row.element; let texture = upload_to_texture(device, queue, image); TableRow { element: Raster::new_gpu(texture), transform: *row.transform, alpha_blending: *row.alpha_blending, source_node_id: *row.source_node_id, } }) .collect(); queue.submit([]); table } } /// Converts single CPU raster to GPU by uploading to texture impl<'i> Convert<Raster<GPU>, &'i WgpuExecutor> for Raster<CPU> { async fn convert(self, _: Footprint, executor: &'i WgpuExecutor) -> Raster<GPU> { let device = &executor.context.device; let queue = &executor.context.queue; let texture = upload_to_texture(device, queue, &self); queue.submit([]); Raster::new_gpu(texture) } } /// Passthrough conversion for CPU tables - no conversion needed impl<'i> Convert<Table<Raster<CPU>>, &'i WgpuExecutor> for Table<Raster<CPU>> { async fn convert(self, _: Footprint, _converter: &'i WgpuExecutor) -> Table<Raster<CPU>> { self } } /// Converts GPU raster table to CPU by downloading texture data in one go /// /// then asynchronously maps all buffers and processes the results. impl<'i> Convert<Table<Raster<CPU>>, &'i WgpuExecutor> for Table<Raster<GPU>> { async fn convert(self, _: Footprint, executor: &'i WgpuExecutor) -> Table<Raster<CPU>> { let device = &executor.context.device; let queue = &executor.context.queue; let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("batch_texture_download_encoder"), }); let mut converters = Vec::new(); let mut rows_meta = Vec::new(); for row in self { let gpu_raster = row.element; converters.push(RasterGpuToRasterCpuConverter::new(device, &mut encoder, gpu_raster)); rows_meta.push(TableRow { element: (), transform: row.transform, alpha_blending: row.alpha_blending, source_node_id: row.source_node_id, }); } queue.submit([encoder.finish()]); let mut map_futures = Vec::new(); for converter in converters { map_futures.push(converter.convert()); } let map_results = futures::future::try_join_all(map_futures) .await .map_err(|_| "Failed to receive map result") .expect("Buffer mapping communication failed"); map_results .into_iter() .zip(rows_meta.into_iter()) .map(|(element, row)| TableRow { element, transform: row.transform, alpha_blending: row.alpha_blending, source_node_id: row.source_node_id, }) .collect() } } /// Converts single GPU raster to CPU by downloading texture data impl<'i> Convert<Raster<CPU>, &'i WgpuExecutor> for Raster<GPU> { async fn convert(self, _: Footprint, executor: &'i WgpuExecutor) -> Raster<CPU> { let device = &executor.context.device; let queue = &executor.context.queue; let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("single_texture_download_encoder"), }); let converter = RasterGpuToRasterCpuConverter::new(device, &mut encoder, self); queue.submit([encoder.finish()]); converter.convert().await.expect("Failed to download texture data") } } /// Uploads an raster texture from the CPU to the GPU. This Is now deprecated and the Convert node should be used in the future. /// /// Accepts either individual raster data or a table of raster elements and converts it to the GPU format using the WgpuExecutor's device and queue. #[node_macro::node(category(""))] pub async fn upload_texture<'a: 'n, T: Convert<Table<Raster<GPU>>, &'a WgpuExecutor>>( _: impl Ctx, #[implementations(Table<Raster<CPU>>, Table<Raster<GPU>>)] input: T, executor: &'a WgpuExecutor, ) -> Table<Raster<GPU>> { input.convert(Footprint::DEFAULT, executor).await }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/wgpu-executor/src/lib.rs
node-graph/libraries/wgpu-executor/src/lib.rs
mod context; pub mod shader_runtime; pub mod texture_conversion; use crate::shader_runtime::ShaderRuntime; use anyhow::Result; use core_types::Color; use dyn_any::StaticType; use futures::lock::Mutex; use glam::UVec2; use graphene_application_io::{ApplicationIo, EditorApi, SurfaceHandle, SurfaceId}; pub use rendering::RenderContext; use std::sync::Arc; use vello::{AaConfig, AaSupport, RenderParams, Renderer, RendererOptions, Scene}; use wgpu::util::TextureBlitter; use wgpu::{Origin3d, TextureAspect}; pub use context::Context as WgpuContext; pub use context::ContextBuilder as WgpuContextBuilder; pub use wgpu::Backends as WgpuBackends; pub use wgpu::Features as WgpuFeatures; #[derive(dyn_any::DynAny)] pub struct WgpuExecutor { pub context: WgpuContext, vello_renderer: Mutex<Renderer>, pub shader_runtime: ShaderRuntime, } impl std::fmt::Debug for WgpuExecutor { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("WgpuExecutor").field("context", &self.context).finish() } } impl<'a, T: ApplicationIo<Executor = WgpuExecutor>> From<&'a EditorApi<T>> for &'a WgpuExecutor { fn from(editor_api: &'a EditorApi<T>) -> Self { editor_api.application_io.as_ref().unwrap().gpu_executor().unwrap() } } pub type WgpuSurface = Arc<SurfaceHandle<Surface>>; pub type WgpuWindow = Arc<SurfaceHandle<WindowHandle>>; pub struct Surface { pub inner: wgpu::Surface<'static>, pub target_texture: Mutex<Option<TargetTexture>>, pub blitter: TextureBlitter, } pub struct TargetTexture { texture: wgpu::Texture, view: wgpu::TextureView, size: UVec2, } #[cfg(target_family = "wasm")] pub type Window = web_sys::HtmlCanvasElement; #[cfg(not(target_family = "wasm"))] pub type Window = Arc<dyn winit::window::Window>; unsafe impl StaticType for Surface { type Static = Surface; } const VELLO_SURFACE_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8Unorm; impl WgpuExecutor { pub async fn render_vello_scene_to_texture(&self, scene: &Scene, size: UVec2, context: &RenderContext, background: Color) -> Result<wgpu::Texture> { let mut output = None; self.render_vello_scene_to_target_texture(scene, size, context, background, &mut output).await?; Ok(output.unwrap().texture) } async fn render_vello_scene_to_target_texture(&self, scene: &Scene, size: UVec2, context: &RenderContext, background: Color, output: &mut Option<TargetTexture>) -> Result<()> { let size = size.max(UVec2::ONE); let target_texture = if let Some(target_texture) = output && target_texture.size == size { target_texture } else { let texture = self.context.device.create_texture(&wgpu::TextureDescriptor { label: None, size: wgpu::Extent3d { width: size.x, height: size.y, depth_or_array_layers: 1, }, mip_level_count: 1, sample_count: 1, dimension: wgpu::TextureDimension::D2, usage: wgpu::TextureUsages::STORAGE_BINDING | wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_SRC, format: VELLO_SURFACE_FORMAT, view_formats: &[], }); let view = texture.create_view(&wgpu::TextureViewDescriptor::default()); *output = Some(TargetTexture { texture, view, size }); output.as_mut().unwrap() }; let [r, g, b, a] = background.to_rgba8_srgb(); let render_params = RenderParams { base_color: vello::peniko::Color::from_rgba8(r, g, b, a), width: size.x, height: size.y, antialiasing_method: AaConfig::Msaa16, }; { let mut renderer = self.vello_renderer.lock().await; for (image_brush, texture) in context.resource_overrides.iter() { let texture_view = wgpu::TexelCopyTextureInfoBase { texture: texture.clone(), mip_level: 0, origin: Origin3d::ZERO, aspect: TextureAspect::All, }; renderer.override_image(&image_brush.image, Some(texture_view)); } renderer.render_to_texture(&self.context.device, &self.context.queue, scene, &target_texture.view, &render_params)?; for (image_brush, _) in context.resource_overrides.iter() { renderer.override_image(&image_brush.image, None); } } Ok(()) } #[cfg(target_family = "wasm")] pub fn create_surface(&self, canvas: graphene_application_io::WasmSurfaceHandle) -> Result<SurfaceHandle<Surface>> { let surface = self.context.instance.create_surface(wgpu::SurfaceTarget::Canvas(canvas.surface))?; self.create_surface_inner(surface, canvas.window_id) } #[cfg(not(target_family = "wasm"))] pub fn create_surface(&self, window: SurfaceHandle<Window>) -> Result<SurfaceHandle<Surface>> { let surface = self.context.instance.create_surface(wgpu::SurfaceTarget::Window(Box::new(window.surface)))?; self.create_surface_inner(surface, window.window_id) } pub fn create_surface_inner(&self, surface: wgpu::Surface<'static>, window_id: SurfaceId) -> Result<SurfaceHandle<Surface>> { let blitter = TextureBlitter::new(&self.context.device, VELLO_SURFACE_FORMAT); Ok(SurfaceHandle { window_id, surface: Surface { inner: surface, target_texture: Mutex::new(None), blitter, }, }) } } impl WgpuExecutor { pub async fn new() -> Option<Self> { Self::with_context(WgpuContext::new().await?) } pub fn with_context(context: WgpuContext) -> Option<Self> { let vello_renderer = Renderer::new( &context.device, RendererOptions { pipeline_cache: None, use_cpu: false, antialiasing_support: AaSupport::all(), num_init_threads: std::num::NonZeroUsize::new(1), }, ) .map_err(|e| anyhow::anyhow!("Failed to create Vello renderer: {:?}", e)) .ok()?; Some(Self { shader_runtime: ShaderRuntime::new(&context), context, vello_renderer: vello_renderer.into(), }) } } pub type WindowHandle = Arc<SurfaceHandle<Window>>;
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/wgpu-executor/src/context.rs
node-graph/libraries/wgpu-executor/src/context.rs
use std::sync::Arc; use wgpu::{Adapter, Backends, Device, Features, Instance, Queue}; #[derive(Debug, Clone)] pub struct Context { pub device: Arc<Device>, pub queue: Arc<Queue>, pub instance: Arc<Instance>, pub adapter: Arc<Adapter>, } impl Context { pub async fn new() -> Option<Self> { ContextBuilder::new().build().await } } #[derive(Default)] pub struct ContextBuilder { backends: Backends, features: Features, } impl ContextBuilder { pub fn new() -> Self { Self { backends: Backends::all(), features: Features::empty(), } } pub fn with_backends(mut self, backends: Backends) -> Self { self.backends = backends; self } pub fn with_features(mut self, features: Features) -> Self { self.features = features; self } } #[cfg(not(target_family = "wasm"))] impl ContextBuilder { pub async fn build(self) -> Option<Context> { self.build_with_adapter_selection_inner(None::<fn(&[Adapter]) -> Option<usize>>).await } pub async fn build_with_adapter_selection<S>(self, select: S) -> Option<Context> where S: Fn(&[Adapter]) -> Option<usize>, { self.build_with_adapter_selection_inner(Some(select)).await } pub async fn available_adapters_fmt(&self) -> impl std::fmt::Display { let instance = self.build_instance(); fmt::AvailableAdaptersFormatter(instance.enumerate_adapters(self.backends)) } } #[cfg(target_family = "wasm")] impl ContextBuilder { pub async fn build(self) -> Option<Context> { let instance = self.build_instance(); let adapter = self.request_adapter(&instance).await?; let (device, queue) = self.request_device(&adapter).await?; Some(Context { device: Arc::new(device), queue: Arc::new(queue), adapter: Arc::new(adapter), instance: Arc::new(instance), }) } } impl ContextBuilder { fn build_instance(&self) -> Instance { Instance::new(&wgpu::InstanceDescriptor { backends: self.backends, ..Default::default() }) } async fn request_adapter(&self, instance: &Instance) -> Option<Adapter> { let request_adapter_options = wgpu::RequestAdapterOptions { power_preference: wgpu::PowerPreference::HighPerformance, compatible_surface: None, force_fallback_adapter: false, }; instance.request_adapter(&request_adapter_options).await.ok() } async fn request_device(&self, adapter: &Adapter) -> Option<(Device, Queue)> { let device_descriptor = wgpu::DeviceDescriptor { label: None, required_features: self.features, required_limits: adapter.limits(), memory_hints: Default::default(), trace: wgpu::Trace::Off, experimental_features: Default::default(), }; adapter.request_device(&device_descriptor).await.ok() } } #[cfg(not(target_family = "wasm"))] impl ContextBuilder { async fn build_with_adapter_selection_inner<S>(self, select: Option<S>) -> Option<Context> where S: Fn(&[Adapter]) -> Option<usize>, { let instance = self.build_instance(); let selected_adapter = if let Some(select) = select { self.select_adapter(&instance, select) } else if cfg!(target_os = "windows") { self.select_adapter(&instance, |adapters: &[Adapter]| adapters.iter().position(|a| a.get_info().backend == wgpu::Backend::Dx12)) } else { None }; let adapter = if let Some(adapter) = selected_adapter { adapter } else { self.request_adapter(&instance).await? }; let (device, queue) = self.request_device(&adapter).await?; Some(Context { device: Arc::new(device), queue: Arc::new(queue), adapter: Arc::new(adapter), instance: Arc::new(instance), }) } fn select_adapter<S>(&self, instance: &Instance, select: S) -> Option<Adapter> where S: Fn(&[Adapter]) -> Option<usize>, { let mut adapters = instance.enumerate_adapters(self.backends); let selected_index = select(&adapters)?; if selected_index >= adapters.len() { return None; } Some(adapters.remove(selected_index)) } } #[cfg(not(target_family = "wasm"))] mod fmt { use super::*; pub(super) struct AvailableAdaptersFormatter(pub(super) Vec<Adapter>); impl std::fmt::Display for AvailableAdaptersFormatter { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { for (i, adapter) in self.0.iter().enumerate() { let info = adapter.get_info(); writeln!( f, "[{}] {:?} {:?} (Name: {}, Driver: {}, Device: {})", i, info.backend, info.device_type, info.name, info.driver, info.device, )?; } Ok(()) } } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/wgpu-executor/src/shader_runtime/per_pixel_adjust_runtime.rs
node-graph/libraries/wgpu-executor/src/shader_runtime/per_pixel_adjust_runtime.rs
use crate::WgpuContext; use crate::shader_runtime::{FULLSCREEN_VERTEX_SHADER_NAME, ShaderRuntime}; use core_types::shaders::buffer_struct::BufferStruct; use core_types::table::{Table, TableRow}; use futures::lock::Mutex; use raster_types::{GPU, Raster}; use std::borrow::Cow; use std::collections::HashMap; use wgpu::util::{BufferInitDescriptor, DeviceExt}; use wgpu::{ BindGroupDescriptor, BindGroupEntry, BindGroupLayoutDescriptor, BindGroupLayoutEntry, BindingResource, BindingType, Buffer, BufferBinding, BufferBindingType, BufferUsages, ColorTargetState, Face, FragmentState, FrontFace, LoadOp, Operations, PipelineLayoutDescriptor, PolygonMode, PrimitiveState, PrimitiveTopology, RenderPassColorAttachment, RenderPassDescriptor, RenderPipelineDescriptor, ShaderModuleDescriptor, ShaderSource, ShaderStages, StoreOp, TextureDescriptor, TextureDimension, TextureFormat, TextureSampleType, TextureViewDescriptor, TextureViewDimension, VertexState, }; pub struct PerPixelAdjustShaderRuntime { // TODO: PerPixelAdjustGraphicsPipeline already contains the key as `name` pipeline_cache: Mutex<HashMap<String, PerPixelAdjustGraphicsPipeline>>, } impl Default for PerPixelAdjustShaderRuntime { fn default() -> Self { Self::new() } } impl PerPixelAdjustShaderRuntime { pub fn new() -> Self { Self { pipeline_cache: Mutex::new(HashMap::new()), } } } impl ShaderRuntime { pub async fn run_per_pixel_adjust<T: BufferStruct>(&self, shaders: &Shaders<'_>, textures: Table<Raster<GPU>>, args: Option<&T>) -> Table<Raster<GPU>> { let mut cache = self.per_pixel_adjust.pipeline_cache.lock().await; let pipeline = cache .entry(shaders.fragment_shader_name.to_owned()) .or_insert_with(|| PerPixelAdjustGraphicsPipeline::new(&self.context, shaders)); let arg_buffer = args.map(|args| { let device = &self.context.device; device.create_buffer_init(&BufferInitDescriptor { label: Some(&format!("{} arg buffer", pipeline.name.as_str())), usage: BufferUsages::STORAGE, contents: bytemuck::bytes_of(&T::write(*args)), }) }); pipeline.dispatch(&self.context, textures, arg_buffer) } } pub struct Shaders<'a> { pub wgsl_shader: &'a str, pub fragment_shader_name: &'a str, pub has_uniform: bool, } pub struct PerPixelAdjustGraphicsPipeline { name: String, has_uniform: bool, pipeline: wgpu::RenderPipeline, } impl PerPixelAdjustGraphicsPipeline { pub fn new(context: &WgpuContext, info: &Shaders) -> Self { let device = &context.device; let name = info.fragment_shader_name.to_owned(); let fragment_name = &name; let fragment_name = &fragment_name[(fragment_name.find("::").unwrap() + 2)..]; // TODO workaround to naga removing `:` let fragment_name = fragment_name.replace(":", ""); let shader_module = device.create_shader_module(ShaderModuleDescriptor { label: Some(&format!("PerPixelAdjust {name} wgsl shader")), source: ShaderSource::Wgsl(Cow::Borrowed(info.wgsl_shader)), }); let entries: &[_] = if info.has_uniform { &[ BindGroupLayoutEntry { binding: 0, visibility: ShaderStages::FRAGMENT, ty: BindingType::Buffer { ty: BufferBindingType::Storage { read_only: true }, has_dynamic_offset: false, min_binding_size: None, }, count: None, }, BindGroupLayoutEntry { binding: 1, visibility: ShaderStages::FRAGMENT, ty: BindingType::Texture { sample_type: TextureSampleType::Float { filterable: false }, view_dimension: TextureViewDimension::D2, multisampled: false, }, count: None, }, ] } else { &[BindGroupLayoutEntry { binding: 0, visibility: ShaderStages::FRAGMENT, ty: BindingType::Texture { sample_type: TextureSampleType::Float { filterable: false }, view_dimension: TextureViewDimension::D2, multisampled: false, }, count: None, }] }; let pipeline_layout = device.create_pipeline_layout(&PipelineLayoutDescriptor { label: Some(&format!("PerPixelAdjust {name} PipelineLayout")), bind_group_layouts: &[&device.create_bind_group_layout(&BindGroupLayoutDescriptor { label: Some(&format!("PerPixelAdjust {name} BindGroupLayout 0")), entries, })], push_constant_ranges: &[], }); let pipeline = device.create_render_pipeline(&RenderPipelineDescriptor { label: Some(&format!("PerPixelAdjust {name} Pipeline")), layout: Some(&pipeline_layout), vertex: VertexState { module: &shader_module, entry_point: Some(FULLSCREEN_VERTEX_SHADER_NAME), compilation_options: Default::default(), buffers: &[], }, primitive: PrimitiveState { topology: PrimitiveTopology::TriangleList, strip_index_format: None, front_face: FrontFace::Ccw, cull_mode: Some(Face::Back), unclipped_depth: false, polygon_mode: PolygonMode::Fill, conservative: false, }, depth_stencil: None, multisample: Default::default(), fragment: Some(FragmentState { module: &shader_module, entry_point: Some(&fragment_name), compilation_options: Default::default(), targets: &[Some(ColorTargetState { format: TextureFormat::Rgba8UnormSrgb, blend: None, write_mask: Default::default(), })], }), multiview: None, cache: None, }); Self { pipeline, name, has_uniform: info.has_uniform, } } pub fn dispatch(&self, context: &WgpuContext, textures: Table<Raster<GPU>>, arg_buffer: Option<Buffer>) -> Table<Raster<GPU>> { assert_eq!(self.has_uniform, arg_buffer.is_some()); let device = &context.device; let name = self.name.as_str(); let mut cmd = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some(&format!("{name} cmd encoder")), }); let out = textures .iter() .map(|instance| { let tex_in = &instance.element.texture; let view_in = tex_in.create_view(&TextureViewDescriptor::default()); let format = tex_in.format(); let entries: &[_] = if let Some(arg_buffer) = arg_buffer.as_ref() { &[ BindGroupEntry { binding: 0, resource: BindingResource::Buffer(BufferBinding { buffer: arg_buffer, offset: 0, size: None, }), }, BindGroupEntry { binding: 1, resource: BindingResource::TextureView(&view_in), }, ] } else { &[BindGroupEntry { binding: 0, resource: BindingResource::TextureView(&view_in), }] }; let bind_group = device.create_bind_group(&BindGroupDescriptor { label: Some(&format!("{name} bind group")), // `get_bind_group_layout` allocates unnecessary memory, we could create it manually to not do that layout: &self.pipeline.get_bind_group_layout(0), entries, }); let tex_out = device.create_texture(&TextureDescriptor { label: Some(&format!("{name} texture out")), size: tex_in.size(), mip_level_count: 1, sample_count: 1, dimension: TextureDimension::D2, format, usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST | wgpu::TextureUsages::COPY_SRC | wgpu::TextureUsages::RENDER_ATTACHMENT, view_formats: &[format], }); let view_out = tex_out.create_view(&TextureViewDescriptor::default()); let mut rp = cmd.begin_render_pass(&RenderPassDescriptor { label: Some(&format!("{name} render pipeline")), color_attachments: &[Some(RenderPassColorAttachment { view: &view_out, resolve_target: None, ops: Operations { // should be dont_care but wgpu doesn't expose that load: LoadOp::Clear(wgpu::Color::BLACK), store: StoreOp::Store, }, depth_slice: None, })], depth_stencil_attachment: None, timestamp_writes: None, occlusion_query_set: None, }); rp.set_pipeline(&self.pipeline); rp.set_bind_group(0, Some(&bind_group), &[]); rp.draw(0..3, 0..1); TableRow { element: Raster::new(GPU { texture: tex_out }), transform: *instance.transform, alpha_blending: *instance.alpha_blending, source_node_id: *instance.source_node_id, } }) .collect::<Table<_>>(); context.queue.submit([cmd.finish()]); out } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/wgpu-executor/src/shader_runtime/mod.rs
node-graph/libraries/wgpu-executor/src/shader_runtime/mod.rs
use crate::WgpuContext; use crate::shader_runtime::per_pixel_adjust_runtime::PerPixelAdjustShaderRuntime; pub mod per_pixel_adjust_runtime; pub const FULLSCREEN_VERTEX_SHADER_NAME: &str = "fullscreen_vertexfullscreen_vertex"; pub struct ShaderRuntime { context: WgpuContext, per_pixel_adjust: PerPixelAdjustShaderRuntime, } impl ShaderRuntime { pub fn new(context: &WgpuContext) -> Self { Self { context: context.clone(), per_pixel_adjust: PerPixelAdjustShaderRuntime::new(), } } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/no-std-types/src/lib.rs
node-graph/libraries/no-std-types/src/lib.rs
#![cfg_attr(not(feature = "std"), no_std)] pub mod blending; pub mod choice_type; pub mod color; pub mod context; pub mod registry; pub mod shaders; pub use context::Ctx; pub use glam; pub trait AsU32 { fn as_u32(&self) -> u32; } impl AsU32 for u32 { fn as_u32(&self) -> u32 { *self } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/no-std-types/src/registry.rs
node-graph/libraries/no-std-types/src/registry.rs
pub mod types { /// 0% - 100% pub type Percentage = f64; /// 0% - 100% pub type PercentageF32 = f32; /// -100% - 100% pub type SignedPercentage = f64; /// -100% - 100% pub type SignedPercentageF32 = f32; /// -180° - 180° pub type Angle = f64; /// -180° - 180° pub type AngleF32 = f32; /// Ends in the unit of x pub type Multiplier = f64; /// Non-negative integer with px unit pub type PixelLength = f64; /// Non-negative pub type Length = f64; /// 0 to 1 pub type Fraction = f64; /// Non-negative number broken into whole and fractional parts pub type Progression = f64; /// Signed integer that's actually a float because we don't handle type conversions very well yet pub type SignedInteger = f64; /// Unsigned integer pub type IntegerCount = u32; /// Unsigned integer to be used for random seeds pub type SeedValue = u32; /// DVec2 with px unit pub type PixelSize = glam::DVec2; /// String with one or more than one line #[cfg(feature = "std")] pub type TextArea = String; }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/no-std-types/src/blending.rs
node-graph/libraries/no-std-types/src/blending.rs
use core::fmt::Display; use core::hash::{Hash, Hasher}; use node_macro::BufferStruct; use num_enum::{FromPrimitive, IntoPrimitive}; #[cfg(not(feature = "std"))] use num_traits::float::Float; #[derive(Debug, Clone, Copy, PartialEq, BufferStruct)] #[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "std", serde(default))] pub struct AlphaBlending { pub blend_mode: BlendMode, pub opacity: f32, pub fill: f32, pub clip: bool, } impl Default for AlphaBlending { fn default() -> Self { Self::new() } } impl Hash for AlphaBlending { fn hash<H: Hasher>(&self, state: &mut H) { self.opacity.to_bits().hash(state); self.fill.to_bits().hash(state); self.blend_mode.hash(state); self.clip.hash(state); } } impl Display for AlphaBlending { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { let round = |x: f32| (x * 1e3).round() / 1e3; write!( f, "Blend Mode: {} — Opacity: {}% — Fill: {}% — Clip: {}", self.blend_mode, round(self.opacity * 100.), round(self.fill * 100.), if self.clip { "Yes" } else { "No" } ) } } impl AlphaBlending { pub const fn new() -> Self { Self { opacity: 1., fill: 1., blend_mode: BlendMode::Normal, clip: false, } } pub fn lerp(&self, other: &Self, t: f32) -> Self { let lerp = |a: f32, b: f32, t: f32| a + (b - a) * t; AlphaBlending { opacity: lerp(self.opacity, other.opacity, t), fill: lerp(self.fill, other.fill, t), blend_mode: if t < 0.5 { self.blend_mode } else { other.blend_mode }, clip: if t < 0.5 { self.clip } else { other.clip }, } } pub fn opacity(&self, mask: bool) -> f32 { self.opacity * if mask { 1. } else { self.fill } } } #[repr(i32)] #[derive(Debug, Default, Clone, Copy, Eq, PartialEq, Hash, BufferStruct, FromPrimitive, IntoPrimitive)] #[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))] pub enum BlendMode { // Basic group #[default] Normal, // Darken group Darken, Multiply, ColorBurn, LinearBurn, DarkerColor, // Lighten group Lighten, Screen, ColorDodge, LinearDodge, LighterColor, // Contrast group Overlay, SoftLight, HardLight, VividLight, LinearLight, PinLight, HardMix, // Inversion group Difference, Exclusion, Subtract, Divide, // Component group Hue, Saturation, Color, Luminosity, // Other stuff Erase, Restore, MultiplyAlpha, } impl BlendMode { /// All standard blend modes ordered by group. pub fn list() -> [&'static [BlendMode]; 6] { use BlendMode::*; [ // Normal group &[Normal], // Darken group &[Darken, Multiply, ColorBurn, LinearBurn, DarkerColor], // Lighten group &[Lighten, Screen, ColorDodge, LinearDodge, LighterColor], // Contrast group &[Overlay, SoftLight, HardLight, VividLight, LinearLight, PinLight, HardMix], // Inversion group &[Difference, Exclusion, Subtract, Divide], // Component group &[Hue, Saturation, Color, Luminosity], ] } /// The subset of [`BlendMode::list()`] that is supported by SVG. pub fn list_svg_subset() -> [&'static [BlendMode]; 6] { use BlendMode::*; [ // Normal group &[Normal], // Darken group &[Darken, Multiply, ColorBurn], // Lighten group &[Lighten, Screen, ColorDodge], // Contrast group &[Overlay, SoftLight, HardLight], // Inversion group &[Difference, Exclusion], // Component group &[Hue, Saturation, Color, Luminosity], ] } pub fn index_in_list(&self) -> Option<usize> { Self::list().iter().flat_map(|x| x.iter()).position(|&blend_mode| blend_mode == *self) } pub fn index_in_list_svg_subset(&self) -> Option<usize> { Self::list_svg_subset().iter().flat_map(|x| x.iter()).position(|&blend_mode| blend_mode == *self) } /// Convert the enum to the CSS string for the blend mode. /// [Read more](https://developer.mozilla.org/en-US/docs/Web/CSS/blend-mode#values) pub fn to_svg_style_name(&self) -> Option<&'static str> { match self { // Normal group BlendMode::Normal => Some("normal"), // Darken group BlendMode::Darken => Some("darken"), BlendMode::Multiply => Some("multiply"), BlendMode::ColorBurn => Some("color-burn"), // Lighten group BlendMode::Lighten => Some("lighten"), BlendMode::Screen => Some("screen"), BlendMode::ColorDodge => Some("color-dodge"), // Contrast group BlendMode::Overlay => Some("overlay"), BlendMode::SoftLight => Some("soft-light"), BlendMode::HardLight => Some("hard-light"), // Inversion group BlendMode::Difference => Some("difference"), BlendMode::Exclusion => Some("exclusion"), // Component group BlendMode::Hue => Some("hue"), BlendMode::Saturation => Some("saturation"), BlendMode::Color => Some("color"), BlendMode::Luminosity => Some("luminosity"), _ => None, } } /// Renders the blend mode CSS style declaration. #[cfg(feature = "std")] pub fn render(&self) -> String { format!( r#" mix-blend-mode: {};"#, self.to_svg_style_name().unwrap_or_else(|| { log::warn!("Unsupported blend mode {self:?}"); "normal" }) ) } } impl Display for BlendMode { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { // Normal group BlendMode::Normal => write!(f, "Normal"), // Darken group BlendMode::Darken => write!(f, "Darken"), BlendMode::Multiply => write!(f, "Multiply"), BlendMode::ColorBurn => write!(f, "Color Burn"), BlendMode::LinearBurn => write!(f, "Linear Burn"), BlendMode::DarkerColor => write!(f, "Darker Color"), // Lighten group BlendMode::Lighten => write!(f, "Lighten"), BlendMode::Screen => write!(f, "Screen"), BlendMode::ColorDodge => write!(f, "Color Dodge"), BlendMode::LinearDodge => write!(f, "Linear Dodge"), BlendMode::LighterColor => write!(f, "Lighter Color"), // Contrast group BlendMode::Overlay => write!(f, "Overlay"), BlendMode::SoftLight => write!(f, "Soft Light"), BlendMode::HardLight => write!(f, "Hard Light"), BlendMode::VividLight => write!(f, "Vivid Light"), BlendMode::LinearLight => write!(f, "Linear Light"), BlendMode::PinLight => write!(f, "Pin Light"), BlendMode::HardMix => write!(f, "Hard Mix"), // Inversion group BlendMode::Difference => write!(f, "Difference"), BlendMode::Exclusion => write!(f, "Exclusion"), BlendMode::Subtract => write!(f, "Subtract"), BlendMode::Divide => write!(f, "Divide"), // Component group BlendMode::Hue => write!(f, "Hue"), BlendMode::Saturation => write!(f, "Saturation"), BlendMode::Color => write!(f, "Color"), BlendMode::Luminosity => write!(f, "Luminosity"), // Other utility blend modes (hidden from the normal list) BlendMode::Erase => write!(f, "Erase"), BlendMode::Restore => write!(f, "Restore"), BlendMode::MultiplyAlpha => write!(f, "Multiply Alpha"), } } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/no-std-types/src/context.rs
node-graph/libraries/no-std-types/src/context.rs
pub trait Ctx: Clone + Send {} impl<T: Ctx> Ctx for Option<T> {} impl<T: Ctx + Sync> Ctx for &T {} impl Ctx for () {} pub trait ArcCtx: Send + Sync {} #[cfg(feature = "std")] impl<T: ArcCtx> Ctx for std::sync::Arc<T> {}
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/no-std-types/src/choice_type.rs
node-graph/libraries/no-std-types/src/choice_type.rs
pub trait ChoiceTypeStatic: Sized + Copy + crate::AsU32 + Send + Sync { const WIDGET_HINT: ChoiceWidgetHint; const DESCRIPTION: Option<&'static str>; fn list() -> &'static [&'static [(Self, VariantMetadata)]]; } pub enum ChoiceWidgetHint { Dropdown, RadioButtons, } /// Translation struct between macro and definition. #[derive(Clone, Debug)] pub struct VariantMetadata { /// Name as declared in source code. pub name: &'static str, /// Name to be displayed in UI. pub label: &'static str, /// User-facing documentation text. pub description: Option<&'static str>, /// Name of icon to display in radio buttons and such. pub icon: Option<&'static str>, }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/no-std-types/src/shaders/mod.rs
node-graph/libraries/no-std-types/src/shaders/mod.rs
//! supporting infrastructure for shaders pub mod buffer_struct; pub mod __private { pub use bytemuck; pub use glam; pub use num_enum; pub use spirv_std; }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/no-std-types/src/shaders/buffer_struct/glam.rs
node-graph/libraries/no-std-types/src/shaders/buffer_struct/glam.rs
use crate::shaders::buffer_struct::BufferStruct; macro_rules! glam_array { ($t:ty, $a:ty) => { unsafe impl BufferStruct for $t { type Buffer = $a; #[inline] fn write(from: Self) -> Self::Buffer { <$t>::to_array(&from) } #[inline] fn read(from: Self::Buffer) -> Self { <$t>::from_array(from) } } }; } macro_rules! glam_cols_array { ($t:ty, $a:ty) => { unsafe impl BufferStruct for $t { type Buffer = $a; #[inline] fn write(from: Self) -> Self::Buffer { <$t>::to_cols_array(&from) } #[inline] fn read(from: Self::Buffer) -> Self { <$t>::from_cols_array(&from) } } }; } glam_array!(glam::Vec2, [f32; 2]); glam_array!(glam::Vec3, [f32; 3]); // glam_array!(Vec3A, [f32; 4]); glam_array!(glam::Vec4, [f32; 4]); glam_array!(glam::Quat, [f32; 4]); glam_cols_array!(glam::Mat2, [f32; 4]); glam_cols_array!(glam::Mat3, [f32; 9]); // glam_cols_array!(Mat3A, [f32; 4]); glam_cols_array!(glam::Mat4, [f32; 16]); glam_cols_array!(glam::Affine2, [f32; 6]); glam_cols_array!(glam::Affine3A, [f32; 12]); glam_array!(glam::DVec2, [f64; 2]); glam_array!(glam::DVec3, [f64; 3]); glam_array!(glam::DVec4, [f64; 4]); glam_array!(glam::DQuat, [f64; 4]); glam_cols_array!(glam::DMat2, [f64; 4]); glam_cols_array!(glam::DMat3, [f64; 9]); glam_cols_array!(glam::DMat4, [f64; 16]); glam_cols_array!(glam::DAffine2, [f64; 6]); glam_cols_array!(glam::DAffine3, [f64; 12]); glam_array!(glam::I16Vec2, [i16; 2]); glam_array!(glam::I16Vec3, [i16; 3]); glam_array!(glam::I16Vec4, [i16; 4]); glam_array!(glam::U16Vec2, [u16; 2]); glam_array!(glam::U16Vec3, [u16; 3]); glam_array!(glam::U16Vec4, [u16; 4]); glam_array!(glam::IVec2, [i32; 2]); glam_array!(glam::IVec3, [i32; 3]); glam_array!(glam::IVec4, [i32; 4]); glam_array!(glam::UVec2, [u32; 2]); glam_array!(glam::UVec3, [u32; 3]); glam_array!(glam::UVec4, [u32; 4]); glam_array!(glam::I64Vec2, [i64; 2]); glam_array!(glam::I64Vec3, [i64; 3]); glam_array!(glam::I64Vec4, [i64; 4]); glam_array!(glam::U64Vec2, [u64; 2]); glam_array!(glam::U64Vec3, [u64; 3]); glam_array!(glam::U64Vec4, [u64; 4]); unsafe impl BufferStruct for glam::Vec3A { type Buffer = [f32; 4]; #[inline] fn write(from: Self) -> Self::Buffer { glam::Vec4::to_array(&from.extend(0.)) } #[inline] fn read(from: Self::Buffer) -> Self { glam::Vec3A::from_vec4(glam::Vec4::from_array(from)) } } /// do NOT use slices, otherwise spirv will fail to compile unsafe impl BufferStruct for glam::Mat3A { type Buffer = [f32; 12]; #[inline] fn write(from: Self) -> Self::Buffer { let a = from.to_cols_array(); [a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], 0., 0., 0.] } #[inline] fn read(from: Self::Buffer) -> Self { let a = from; glam::Mat3A::from_cols_array(&[a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]]) } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/no-std-types/src/shaders/buffer_struct/primitive.rs
node-graph/libraries/no-std-types/src/shaders/buffer_struct/primitive.rs
use crate::shaders::buffer_struct::{BufferStruct, BufferStructIdentity}; use bytemuck::Pod; use core::marker::PhantomData; use core::num::Wrapping; use spirv_std::arch::IndexUnchecked; macro_rules! identity { ($t:ty) => { impl BufferStructIdentity for $t {} }; } identity!(()); identity!(u8); identity!(u16); identity!(u32); identity!(u64); identity!(u128); identity!(usize); identity!(i8); identity!(i16); identity!(i32); identity!(i64); identity!(i128); identity!(isize); identity!(f32); identity!(f64); identity!(spirv_std::arch::SubgroupMask); identity!(spirv_std::memory::Semantics); identity!(spirv_std::ray_tracing::RayFlags); identity!(spirv_std::indirect_command::DrawIndirectCommand); identity!(spirv_std::indirect_command::DrawIndexedIndirectCommand); identity!(spirv_std::indirect_command::DispatchIndirectCommand); identity!(spirv_std::indirect_command::DrawMeshTasksIndirectCommandEXT); identity!(spirv_std::indirect_command::TraceRaysIndirectCommandKHR); // not pod // identity!(spirv_std::indirect_command::TraceRaysIndirectCommand2KHR); unsafe impl BufferStruct for bool { type Buffer = u32; #[inline] fn write(from: Self) -> Self::Buffer { from as u32 } #[inline] fn read(from: Self::Buffer) -> Self { from != 0 } } unsafe impl<T: BufferStruct> BufferStruct for Wrapping<T> where // unfortunately has to be Pod, even though AnyBitPattern would be sufficient, // due to bytemuck doing `impl<T: Pod> AnyBitPattern for T {}` // see https://github.com/Lokathor/bytemuck/issues/164 T::Buffer: Pod, { type Buffer = Wrapping<T::Buffer>; #[inline] fn write(from: Self) -> Self::Buffer { Wrapping(T::write(from.0)) } #[inline] fn read(from: Self::Buffer) -> Self { Wrapping(T::read(from.0)) } } unsafe impl<T: BufferStruct + 'static> BufferStruct for PhantomData<T> { type Buffer = PhantomData<T>; #[inline] fn write(_: Self) -> Self::Buffer { PhantomData {} } #[inline] fn read(_: Self::Buffer) -> Self { PhantomData {} } } /// Potential problem: you can't impl this for an array of BufferStruct, as it'll conflict with this impl due to the /// blanket impl on all BufferStructPlain types. unsafe impl<T: BufferStruct, const N: usize> BufferStruct for [T; N] where // rust-gpu does not like `[T; N].map()` nor `core::array::from_fn()` nor transmuting arrays with a const generic // length, so for now we need to require T: Default and T::Transfer: Default for all arrays. T: Default, // unfortunately has to be Pod, even though AnyBitPattern would be sufficient, // due to bytemuck doing `impl<T: Pod> AnyBitPattern for T {}` // see https://github.com/Lokathor/bytemuck/issues/164 T::Buffer: Pod + Default, { type Buffer = [T::Buffer; N]; #[inline] fn write(from: Self) -> Self::Buffer { unsafe { let mut ret = [T::Buffer::default(); N]; for i in 0..N { *ret.index_unchecked_mut(i) = T::write(*from.index_unchecked(i)); } ret } } #[inline] fn read(from: Self::Buffer) -> Self { unsafe { let mut ret = [T::default(); N]; for i in 0..N { *ret.index_unchecked_mut(i) = T::read(*from.index_unchecked(i)); } ret } } } #[cfg(test)] mod tests { use super::*; #[test] fn roundtrip_bool() { for x in [false, true] { assert_eq!(x, <bool as BufferStruct>::read(<bool as BufferStruct>::write(x))); } } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/no-std-types/src/shaders/buffer_struct/mod.rs
node-graph/libraries/no-std-types/src/shaders/buffer_struct/mod.rs
//! I (@firestar99) copied this entire mod from one of my projects, as I haven't uploaded that lib to crates. Hopefully //! rust-gpu improves and this entire thing becomes unnecessary in the future. //! //! https://github.com/Firestar99/nanite-at-home/tree/008dac8df656959c71efeddd2d3ddabcb801771c/rust-gpu-bindless/crates/buffer-content use bytemuck::Pod; mod glam; mod primitive; /// A BufferStruct is a "parallel representation" of the original struct with some fundamental types remapped. This /// struct hierarchy represents how data is stored in GPU Buffers, where all types must be [`Pod`] to allow /// transmuting them to `&[u8]` with [`bytemuck`]. /// /// Notable type remappings (original: buffer): /// * bool: u32 of 0 or 1 /// * any repr(u32) enum: u32 with remapping via [`num_enum`] /// /// By adding `#[derive(ShaderStruct)]` to your struct (or enum), a parallel `{name}Buffer` struct is created with all /// the members of the original struct, but with their types using the associated remapped types as specified by this /// trait. /// /// # Origin /// I (@firestar99) copied this entire mod from my [Nanite-at-home] project, specifically the [buffer-content] crate /// and the [buffer_struct] proc macro. The variant here has quite some modifications, to both cleaned up some of the /// mistakes my implementation has and to customize it a bit for graphite. /// /// Hopefully rust-gpu improves to the point where this remapping becomes unnecessary. /// /// [Nanite-at-home]: https://github.com/Firestar99/nanite-at-home /// [buffer-content]: https://github.com/Firestar99/nanite-at-home/tree/008dac8df656959c71efeddd2d3ddabcb801771c/rust-gpu-bindless/crates/buffer-content /// [buffer_struct]: https://github.com/Firestar99/nanite-at-home/blob/008dac8df656959c71efeddd2d3ddabcb801771c/rust-gpu-bindless/crates/macros/src/buffer_struct.rs /// /// # Safety /// The associated type Transfer must be the same on all targets. Writing followed by reading back a value must result /// in the same value. pub unsafe trait BufferStruct: Copy + Send + Sync + 'static { type Buffer: Pod + Send + Sync; fn write(from: Self) -> Self::Buffer; fn read(from: Self::Buffer) -> Self; } /// Trait marking all [`BufferStruct`] whose read and write methods are identity. While [`BufferStruct`] only /// requires `t == read(write(t))`, this trait additionally requires `t == read(t) == write(t)`. As this removes the /// conversion requirement for writing to or reading from a buffer, one can acquire slices from buffers created of these /// types. /// /// Implementing this type is completely safe due to the [`Pod`] requirement. pub trait BufferStructIdentity: Pod + Send + Sync {} unsafe impl<T: BufferStructIdentity> BufferStruct for T { type Buffer = Self; fn write(from: Self) -> Self::Buffer { from } fn read(from: Self::Buffer) -> Self { from } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/no-std-types/src/color/discrete_srgb.rs
node-graph/libraries/no-std-types/src/color/discrete_srgb.rs
#![allow(clippy::neg_cmp_op_on_partial_ord)] //! Fast conversions between u8 sRGB and linear float. // Inspired by https://gist.github.com/rygorous/2203834, but with a slightly // modified method, custom derived constants and error correction for perfect // accuracy in accordance with the D3D11 spec: // https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#FLOATtoSRGB. /// CRITICAL_POINTS[i] is the last float value such that it maps to i after /// conversion to integer sRGB. So if x > CRITICAL_POINTS[i] you know you need /// to increment i. #[rustfmt::skip] const CRITICAL_POINTS: [f32; 256] = [ 0.00015176347, 0.00045529046, 0.0007588174, 0.0010623443, 0.0013658714, 0.0016693983, 0.0019729252, 0.0022764523, 0.0025799791, 0.0028835062, 0.0031883009, 0.003509259, 0.003848315, 0.004205748, 0.0045818323, 0.0049768374, 0.005391024, 0.00582465, 0.0062779686, 0.0067512267, 0.0072446675, 0.0077585294, 0.008293047, 0.008848451, 0.0094249705, 0.010022825, 0.010642236, 0.01128342, 0.011946591, 0.012631957, 0.013339729, 0.014070111, 0.0148233045, 0.015599505, 0.01639891, 0.017221717, 0.018068114, 0.018938294, 0.019832445, 0.020750746, 0.021693384, 0.022660539, 0.02365239, 0.024669115, 0.025710886, 0.026777886, 0.027870273, 0.028988222, 0.030131903, 0.03130148, 0.032497127, 0.033718992, 0.034967244, 0.03624204, 0.03754355, 0.03887192, 0.040227327, 0.041609894, 0.04301979, 0.044457167, 0.04592218, 0.04741497, 0.04893569, 0.050484486, 0.05206151, 0.053666897, 0.055300802, 0.056963358, 0.058654714, 0.060375024, 0.062124394, 0.06390298, 0.065710925, 0.06754836, 0.06941542, 0.07131224, 0.07323896, 0.07519571, 0.07718261, 0.07919981, 0.08124744, 0.08332562, 0.08543448, 0.08757417, 0.08974478, 0.091946445, 0.09417931, 0.09644348, 0.098739095, 0.10106628, 0.10342514, 0.105815805, 0.1082384, 0.110693045, 0.11317986, 0.11569896, 0.118250474, 0.12083454, 0.12345121, 0.12610064, 0.12878296, 0.13149826, 0.13424668, 0.1370283, 0.13984327, 0.14269169, 0.14557366, 0.1484893, 0.15143873, 0.15442204, 0.15743938, 0.16049084, 0.1635765, 0.16669647, 0.16985092, 0.1730399, 0.17626354, 0.17952198, 0.18281525, 0.1861435, 0.18950681, 0.19290532, 0.19633913, 0.19980833, 0.20331302, 0.20685332, 0.21042931, 0.21404111, 0.21768881, 0.22137253, 0.22509235, 0.22884844, 0.23264077, 0.23646952, 0.24033478, 0.24423665, 0.24817522, 0.25215057, 0.25616285, 0.26021212, 0.26429847, 0.26842204, 0.27258286, 0.27678108, 0.2810168, 0.28529006, 0.289601, 0.2939497, 0.29833627, 0.30276078, 0.30722332, 0.311724, 0.31626293, 0.32084015, 0.32545578, 0.33010995, 0.3348027, 0.3395341, 0.34430432, 0.34911346, 0.3539615, 0.35884857, 0.3637748, 0.36874023, 0.373745, 0.37878913, 0.38387278, 0.388996, 0.39415887, 0.39936152, 0.404604, 0.4098864, 0.41520882, 0.42057133, 0.425974, 0.431417, 0.43690032, 0.4424241, 0.44798836, 0.45359328, 0.45923886, 0.46492523, 0.47065246, 0.47642064, 0.48222986, 0.48808017, 0.4939718, 0.49990457, 0.5058787, 0.5118943, 0.5179514, 0.5240501, 0.5301905, 0.5363727, 0.5425967, 0.54886264, 0.5551706, 0.56152064, 0.5679129, 0.5743473, 0.5808241, 0.5873433, 0.593905, 0.60050917, 0.60715604, 0.61384565, 0.62057805, 0.6273533, 0.63417155, 0.6410328, 0.6479372, 0.65488476, 0.66187555, 0.6689097, 0.6759874, 0.68310845, 0.6902731, 0.6974814, 0.7047334, 0.71202916, 0.7193688, 0.7267524, 0.73418003, 0.7416518, 0.7491677, 0.7567278, 0.76433223, 0.7719811, 0.7796744, 0.7874122, 0.7951947, 0.80302185, 0.8108938, 0.81881046, 0.82677215, 0.8347787, 0.8428304, 0.8509272, 0.85906917, 0.8672564, 0.875489, 0.8837671, 0.89209044, 0.9004596, 0.9088741, 0.91733456, 0.9258405, 0.9343926, 0.94299024, 0.95163417, 0.96032387, 0.96906, 0.977842, 0.9866705, 0.9955452, 1., ]; #[rustfmt::skip] const FLOAT_SRGB_LERP: [u32; 27] = [ 0x66f, 0x66f063b, 0xcaa0515, 0x11c00773, 0x193305dc, 0x1f1004f3, 0x24030481, 0x28850773, 0x2ff9065e, 0x365805a1, 0x3bfa0547, 0x414108f7, 0x4a3907d8, 0x52110709, 0x591b06aa, 0x5fc50b70, 0x6b350a18, 0x754e091c, 0x7e6b08aa, 0x87160ef1, 0x96070d3e, 0xa3460bfc, 0xaf430b6c, 0xbaaf13bd, 0xce6d1187, 0xdff40fe3, 0xefd70f28, ]; #[inline] pub fn float_to_srgb_u8(mut f: f32) -> u8 { // Clamp f to [0, 1], with a negated condition to handle NaNs as 0. if !(f >= 0.) { f = 0.; } else if f > 1. { f = 1.; } // Shift away slightly from 0.0 to reduce exponent range. const C: f32 = 0.009842521f32; let u = (f + C).to_bits() - C.to_bits(); if u > (1. + C).to_bits() - C.to_bits() { // We clamped f to [0, 1], and the integer representations // of the positive finite non-NaN floats are monotonic. // This makes the later LUT lookup panicless. unsafe { core::hint::unreachable_unchecked() } } // Compute a piecewise linear interpolation that is always // the correct answer, or one less than it. let u16mask = (1 << 16) - 1; let lut_idx = u >> 21; let lerp_idx = (u >> 5) & u16mask; let bias_mult = FLOAT_SRGB_LERP[lut_idx as usize]; let bias = (bias_mult >> 16) << 16; let mult = bias_mult & u16mask; // I don't believe this wraps, but since we test in release mode, // better make sure debug mode behaves the same. let lerp = bias.wrapping_add(mult * lerp_idx) >> 24; // Adjust linear interpolation to the correct value. if f > CRITICAL_POINTS[lerp as usize] { lerp as u8 + 1 } else { lerp as u8 } } #[rustfmt::skip] const FROM_SRGB_U8: [f32; 256] = [ 0., 0.000303527, 0.000607054, 0.00091058103, 0.001214108, 0.001517635, 0.0018211621, 0.002124689, 0.002428216, 0.002731743, 0.00303527, 0.0033465356, 0.003676507, 0.004024717, 0.004391442, 0.0047769533, 0.005181517, 0.0056053917, 0.0060488326, 0.006512091, 0.00699541, 0.0074990317, 0.008023192, 0.008568125, 0.009134057, 0.009721218, 0.010329823, 0.010960094, 0.011612245, 0.012286487, 0.012983031, 0.013702081, 0.014443844, 0.015208514, 0.015996292, 0.016807375, 0.017641952, 0.018500218, 0.019382361, 0.020288562, 0.02121901, 0.022173883, 0.023153365, 0.02415763, 0.025186857, 0.026241222, 0.027320892, 0.028426038, 0.029556843, 0.03071345, 0.03189604, 0.033104774, 0.03433981, 0.035601325, 0.036889452, 0.038204376, 0.039546248, 0.04091521, 0.042311423, 0.043735042, 0.045186214, 0.046665095, 0.048171833, 0.049706575, 0.051269468, 0.052860655, 0.05448028, 0.056128494, 0.057805434, 0.05951124, 0.06124607, 0.06301003, 0.06480328, 0.06662595, 0.06847818, 0.07036011, 0.07227186, 0.07421358, 0.07618539, 0.07818743, 0.08021983, 0.082282715, 0.084376216, 0.086500466, 0.088655606, 0.09084173, 0.09305898, 0.095307484, 0.09758736, 0.09989874, 0.10224175, 0.10461649, 0.10702311, 0.10946172, 0.111932434, 0.11443538, 0.116970696, 0.11953845, 0.12213881, 0.12477186, 0.12743773, 0.13013652, 0.13286836, 0.13563336, 0.13843165, 0.14126332, 0.1441285, 0.1470273, 0.14995982, 0.15292618, 0.1559265, 0.15896086, 0.16202943, 0.16513224, 0.16826946, 0.17144115, 0.17464745, 0.17788847, 0.1811643, 0.18447503, 0.1878208, 0.19120172, 0.19461787, 0.19806935, 0.2015563, 0.20507877, 0.2086369, 0.21223079, 0.21586053, 0.21952623, 0.22322798, 0.22696589, 0.23074007, 0.23455065, 0.23839766, 0.2422812, 0.2462014, 0.25015837, 0.25415218, 0.2581829, 0.26225072, 0.26635566, 0.27049786, 0.27467737, 0.27889434, 0.2831488, 0.2874409, 0.2917707, 0.29613832, 0.30054384, 0.30498737, 0.30946895, 0.31398875, 0.31854683, 0.32314324, 0.32777813, 0.33245158, 0.33716366, 0.34191445, 0.3467041, 0.3515327, 0.35640025, 0.36130688, 0.3662527, 0.37123778, 0.37626222, 0.3813261, 0.38642952, 0.39157256, 0.3967553, 0.40197787, 0.4072403, 0.4125427, 0.41788515, 0.42326775, 0.42869055, 0.4341537, 0.43965724, 0.44520125, 0.45078585, 0.45641106, 0.46207705, 0.46778384, 0.47353154, 0.47932023, 0.48514998, 0.4910209, 0.49693304, 0.5028866, 0.50888145, 0.5149178, 0.5209957, 0.52711535, 0.5332766, 0.5394797, 0.5457247, 0.5520116, 0.5583406, 0.5647117, 0.57112503, 0.57758063, 0.5840786, 0.590619, 0.597202, 0.60382754, 0.61049575, 0.61720675, 0.62396055, 0.63075733, 0.637597, 0.6444799, 0.6514058, 0.65837497, 0.66538745, 0.67244333, 0.6795426, 0.68668544, 0.69387203, 0.70110214, 0.70837605, 0.7156938, 0.72305536, 0.730461, 0.7379107, 0.7454045, 0.75294244, 0.76052475, 0.7681514, 0.77582246, 0.78353804, 0.79129815, 0.79910296, 0.8069525, 0.8148468, 0.822786, 0.8307701, 0.83879924, 0.84687346, 0.8549928, 0.8631574, 0.87136734, 0.8796226, 0.8879232, 0.89626956, 0.90466136, 0.913099, 0.92158204, 0.93011117, 0.9386859, 0.9473069, 0.9559735, 0.9646866, 0.9734455, 0.98225087, 0.9911022, 1., ]; #[inline] pub fn srgb_u8_to_float(c: u8) -> f32 { FROM_SRGB_U8[c as usize] } #[cfg(test)] mod tests { use super::*; // https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#FLOATtoSRGB fn float_to_srgb_ref(f: f32) -> f32 { if !(f > 0_f32) { 0_f32 } else if f <= 0.0031308f32 { 12.92_f32 * f } else if f < 1_f32 { 1.055f32 * f.powf(1_f32 / 2.4_f32) - 0.055f32 } else { 1_f32 } } fn float_to_srgb_u8_ref(f: f32) -> u8 { (float_to_srgb_ref(f) * 255_f32 + 0.5_f32) as u8 } // https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#SRGBtoFLOAT fn srgb_to_float_ref(f: f32) -> f32 { if f <= 0.04045f32 { f / 12.92f32 } else { ((f + 0.055f32) / 1.055f32).powf(2.4_f32) } } fn srgb_u8_to_float_ref(c: u8) -> f32 { srgb_to_float_ref(c as f32 * (1_f32 / 255_f32)) } #[test] fn test_float_to_srgb_u8() { for u in 0..=u8::MAX { assert!(srgb_u8_to_float(u) == srgb_u8_to_float_ref(u)); } } #[ignore = "expensive, test in release mode"] #[test] fn test_srgb_u8_to_float() { // Simply... check all float values. for u in 0..=u32::MAX { let f = f32::from_bits(u); assert!(float_to_srgb_u8(f) == float_to_srgb_u8_ref(f)); } } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/no-std-types/src/color/mod.rs
node-graph/libraries/no-std-types/src/color/mod.rs
mod color_traits; mod color_types; mod discrete_srgb; pub use color_traits::*; pub use color_types::*; pub use discrete_srgb::*;
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/no-std-types/src/color/color_traits.rs
node-graph/libraries/no-std-types/src/color/color_traits.rs
pub use crate::blending::*; use bytemuck::{Pod, Zeroable}; use core::fmt::Debug; use glam::DVec2; use num_derive::*; #[cfg(not(feature = "std"))] use num_traits::float::Float; pub trait Linear { fn from_f32(x: f32) -> Self; fn to_f32(self) -> f32; fn from_f64(x: f64) -> Self; fn to_f64(self) -> f64; fn lerp(self, other: Self, value: Self) -> Self where Self: Sized + Copy, Self: core::ops::Sub<Self, Output = Self>, Self: core::ops::Mul<Self, Output = Self>, Self: core::ops::Add<Self, Output = Self>, { self + (other - self) * value } } #[rustfmt::skip] impl Linear for f32 { #[inline(always)] fn from_f32(x: f32) -> Self { x } #[inline(always)] fn to_f32(self) -> f32 { self } #[inline(always)] fn from_f64(x: f64) -> Self { x as f32 } #[inline(always)] fn to_f64(self) -> f64 { self as f64 } } #[rustfmt::skip] impl Linear for f64 { #[inline(always)] fn from_f32(x: f32) -> Self { x as f64 } #[inline(always)] fn to_f32(self) -> f32 { self as f32 } #[inline(always)] fn from_f64(x: f64) -> Self { x } #[inline(always)] fn to_f64(self) -> f64 { self } } pub trait Channel: Copy + Debug { fn to_linear<Out: Linear>(self) -> Out; fn from_linear<In: Linear>(linear: In) -> Self; } pub trait LinearChannel: Channel { fn cast_linear_channel<Out: LinearChannel>(self) -> Out { Out::from_linear(self.to_linear::<f64>()) } } impl<T: Linear + Debug + Copy> Channel for T { #[inline(always)] fn to_linear<Out: Linear>(self) -> Out { Out::from_f64(self.to_f64()) } #[inline(always)] fn from_linear<In: Linear>(linear: In) -> Self { Self::from_f64(linear.to_f64()) } } impl<T: Linear + Debug + Copy> LinearChannel for T {} #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Num, NumCast, NumOps, One, Zero, ToPrimitive, FromPrimitive)] pub struct SRGBGammaFloat(f32); impl Channel for SRGBGammaFloat { #[inline(always)] fn to_linear<Out: Linear>(self) -> Out { let x = self.0; Out::from_f32(if x <= 0.04045 { x / 12.92 } else { ((x + 0.055) / 1.055).powf(2.4) }) } #[inline(always)] fn from_linear<In: Linear>(linear: In) -> Self { let x = linear.to_f32(); if x <= 0.0031308 { Self(x * 12.92) } else { Self(1.055 * x.powf(1. / 2.4) - 0.055) } } } pub trait RGBPrimaries { const RED: DVec2; const GREEN: DVec2; const BLUE: DVec2; const WHITE: DVec2; } pub trait Rec709Primaries {} impl<T: Rec709Primaries> RGBPrimaries for T { const RED: DVec2 = DVec2::new(0.64, 0.33); const GREEN: DVec2 = DVec2::new(0.3, 0.6); const BLUE: DVec2 = DVec2::new(0.15, 0.06); const WHITE: DVec2 = DVec2::new(0.3127, 0.329); } pub trait SRGB: Rec709Primaries {} // TODO: Come up with a better name for this trait pub trait Pixel: Clone + Pod + Zeroable + Default { #[cfg(feature = "std")] fn to_bytes(&self) -> Vec<u8> { bytemuck::bytes_of(self).to_vec() } // TODO: use u8 for Color fn from_bytes(bytes: &[u8]) -> Self { *bytemuck::try_from_bytes(bytes).expect("Failed to convert bytes to pixel") } fn byte_size() -> usize { size_of::<Self>() } } pub trait RGB: Pixel { type ColorChannel: Channel; fn red(&self) -> Self::ColorChannel; fn r(&self) -> Self::ColorChannel { self.red() } fn green(&self) -> Self::ColorChannel; fn g(&self) -> Self::ColorChannel { self.green() } fn blue(&self) -> Self::ColorChannel; fn b(&self) -> Self::ColorChannel { self.blue() } } pub trait RGBMut: RGB { fn set_red(&mut self, red: Self::ColorChannel); fn set_green(&mut self, green: Self::ColorChannel); fn set_blue(&mut self, blue: Self::ColorChannel); } pub trait AssociatedAlpha: RGB + Alpha { fn to_unassociated<Out: UnassociatedAlpha>(&self) -> Out; } pub trait UnassociatedAlpha: RGB + Alpha { fn to_associated<Out: AssociatedAlpha>(&self) -> Out; } pub trait Alpha { type AlphaChannel: LinearChannel; const TRANSPARENT: Self; fn alpha(&self) -> Self::AlphaChannel; fn a(&self) -> Self::AlphaChannel { self.alpha() } fn multiplied_alpha(&self, alpha: Self::AlphaChannel) -> Self; } pub trait AlphaMut: Alpha { fn set_alpha(&mut self, value: Self::AlphaChannel); } pub trait Depth { type DepthChannel: Channel; fn depth(&self) -> Self::DepthChannel; fn d(&self) -> Self::DepthChannel { self.depth() } } pub trait ExtraChannels<const NUM: usize> { type ChannelType: Channel; fn extra_channels(&self) -> [Self::ChannelType; NUM]; } pub trait Luminance { type LuminanceChannel: LinearChannel; fn luminance(&self) -> Self::LuminanceChannel; fn l(&self) -> Self::LuminanceChannel { self.luminance() } } pub trait LuminanceMut: Luminance { fn set_luminance(&mut self, luminance: Self::LuminanceChannel); } // TODO: We might rename this to Raster at some point pub trait Sample { type Pixel: Pixel; // TODO: Add an area parameter fn sample(&self, pos: DVec2, area: DVec2) -> Option<Self::Pixel>; } impl<T: Sample> Sample for &T { type Pixel = T::Pixel; #[inline(always)] fn sample(&self, pos: DVec2, area: DVec2) -> Option<Self::Pixel> { (**self).sample(pos, area) } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/no-std-types/src/color/color_types.rs
node-graph/libraries/no-std-types/src/color/color_types.rs
use super::color_traits::{Alpha, AlphaMut, AssociatedAlpha, Luminance, LuminanceMut, Pixel, RGB, RGBMut, Rec709Primaries, SRGB}; use super::discrete_srgb::{float_to_srgb_u8, srgb_u8_to_float}; use bytemuck::{Pod, Zeroable}; use core::fmt::Debug; use core::hash::Hash; use glam::Vec4; use half::f16; use node_macro::BufferStruct; #[cfg(not(feature = "std"))] use num_traits::Euclid; #[cfg(not(feature = "std"))] use num_traits::float::Float; #[repr(C)] #[derive(Default, Clone, Copy, PartialEq, Pod, Zeroable)] #[cfg_attr(not(target_arch = "spirv"), derive(Debug))] #[cfg_attr(feature = "std", derive(dyn_any::DynAny, serde::Serialize, serde::Deserialize))] pub struct RGBA16F { red: f16, green: f16, blue: f16, alpha: f16, } /// hack around half still masking out impl Debug for f16 on spirv #[cfg(target_arch = "spirv")] impl core::fmt::Debug for RGBA16F { fn fmt(&self, _f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { Ok(()) } } impl From<Color> for RGBA16F { #[inline(always)] fn from(c: Color) -> Self { Self { red: f16::from_f32(c.r()), green: f16::from_f32(c.g()), blue: f16::from_f32(c.b()), alpha: f16::from_f32(c.a()), } } } impl Luminance for RGBA16F { type LuminanceChannel = f32; #[inline(always)] fn luminance(&self) -> f32 { // TODO: verify this is correct for sRGB 0.2126 * self.red() + 0.7152 * self.green() + 0.0722 * self.blue() } } impl RGB for RGBA16F { type ColorChannel = f32; #[inline(always)] fn red(&self) -> f32 { self.red.to_f32() } #[inline(always)] fn green(&self) -> f32 { self.green.to_f32() } #[inline(always)] fn blue(&self) -> f32 { self.blue.to_f32() } } impl Rec709Primaries for RGBA16F {} impl Alpha for RGBA16F { type AlphaChannel = f32; #[inline(always)] fn alpha(&self) -> f32 { self.alpha.to_f32() / 255. } const TRANSPARENT: Self = RGBA16F { red: f16::from_f32_const(0.), green: f16::from_f32_const(0.), blue: f16::from_f32_const(0.), alpha: f16::from_f32_const(0.), }; fn multiplied_alpha(&self, alpha: Self::AlphaChannel) -> Self { let alpha = alpha * 255.; let mut result = *self; result.alpha = f16::from_f32(alpha * self.alpha()); result } } impl Pixel for RGBA16F {} #[repr(C)] #[derive(Debug, Default, Clone, Copy, PartialEq, Pod, Zeroable)] #[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))] pub struct SRGBA8 { red: u8, green: u8, blue: u8, alpha: u8, } impl From<Color> for SRGBA8 { #[inline(always)] fn from(c: Color) -> Self { Self { red: float_to_srgb_u8(c.r()), green: float_to_srgb_u8(c.g()), blue: float_to_srgb_u8(c.b()), alpha: (c.a() * 255.) as u8, } } } impl From<SRGBA8> for Color { #[inline(always)] fn from(color: SRGBA8) -> Self { Self { red: srgb_u8_to_float(color.red), green: srgb_u8_to_float(color.green), blue: srgb_u8_to_float(color.blue), alpha: color.alpha as f32 / 255., } } } impl Luminance for SRGBA8 { type LuminanceChannel = f32; #[inline(always)] fn luminance(&self) -> f32 { // TODO: verify this is correct for sRGB 0.2126 * self.red() + 0.7152 * self.green() + 0.0722 * self.blue() } } impl RGB for SRGBA8 { type ColorChannel = f32; #[inline(always)] fn red(&self) -> f32 { self.red as f32 / 255. } #[inline(always)] fn green(&self) -> f32 { self.green as f32 / 255. } #[inline(always)] fn blue(&self) -> f32 { self.blue as f32 / 255. } } impl Rec709Primaries for SRGBA8 {} impl SRGB for SRGBA8 {} impl Alpha for SRGBA8 { type AlphaChannel = f32; #[inline(always)] fn alpha(&self) -> f32 { self.alpha as f32 / 255. } const TRANSPARENT: Self = SRGBA8 { red: 0, green: 0, blue: 0, alpha: 0 }; fn multiplied_alpha(&self, alpha: Self::AlphaChannel) -> Self { let alpha = alpha * 255.; let mut result = *self; result.alpha = (alpha * self.alpha()) as u8; result } } impl Pixel for SRGBA8 {} #[repr(C)] #[derive(Debug, Default, Clone, Copy, PartialEq, Pod, Zeroable)] #[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))] pub struct Luma(pub f32); impl Luminance for Luma { type LuminanceChannel = f32; #[inline(always)] fn luminance(&self) -> f32 { self.0 } } impl LuminanceMut for Luma { fn set_luminance(&mut self, luminance: Self::LuminanceChannel) { self.0 = luminance } } impl RGB for Luma { type ColorChannel = f32; #[inline(always)] fn red(&self) -> f32 { self.0 } #[inline(always)] fn green(&self) -> f32 { self.0 } #[inline(always)] fn blue(&self) -> f32 { self.0 } } impl Pixel for Luma {} /// Structure that represents a color. /// Internally alpha is stored as `f32` that ranges from `0.0` (transparent) to `1.0` (opaque). /// The other components (RGB) are stored as `f32` that range from `0.0` up to `f32::MAX`, /// the values encode the brightness of each channel proportional to the light intensity in cd/m² (nits) in HDR, and `0.0` (black) to `1.0` (white) in SDR color. #[repr(C)] #[derive(Debug, Default, Clone, Copy, Pod, Zeroable, BufferStruct)] #[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))] pub struct Color { red: f32, green: f32, blue: f32, alpha: f32, } impl PartialEq for Color { fn eq(&self, other: &Self) -> bool { self.red == other.red && self.green == other.green && self.blue == other.blue && self.alpha == other.alpha } } impl Eq for Color {} #[allow(clippy::derived_hash_with_manual_eq)] impl Hash for Color { fn hash<H: core::hash::Hasher>(&self, state: &mut H) { self.red.to_bits().hash(state); self.green.to_bits().hash(state); self.blue.to_bits().hash(state); self.alpha.to_bits().hash(state); } } impl RGB for Color { type ColorChannel = f32; #[inline(always)] fn red(&self) -> f32 { self.red } #[inline(always)] fn green(&self) -> f32 { self.green } #[inline(always)] fn blue(&self) -> f32 { self.blue } } impl RGBMut for Color { fn set_red(&mut self, red: Self::ColorChannel) { self.red = red; } fn set_green(&mut self, green: Self::ColorChannel) { self.green = green; } fn set_blue(&mut self, blue: Self::ColorChannel) { self.blue = blue; } } impl AlphaMut for Color { fn set_alpha(&mut self, value: Self::AlphaChannel) { self.alpha = value; } } impl Pixel for Color { #[cfg(feature = "std")] fn to_bytes(&self) -> Vec<u8> { self.to_rgba8_srgb().to_vec() } fn from_bytes(bytes: &[u8]) -> Self { Color::from_rgba8_srgb(bytes[0], bytes[1], bytes[2], bytes[3]) } fn byte_size() -> usize { 4 } } impl Alpha for Color { type AlphaChannel = f32; const TRANSPARENT: Self = Self::TRANSPARENT; #[inline(always)] fn alpha(&self) -> f32 { self.alpha } #[inline(always)] fn multiplied_alpha(&self, alpha: Self::AlphaChannel) -> Self { Self { red: self.red * alpha, green: self.green * alpha, blue: self.blue * alpha, alpha: self.alpha * alpha, } } } impl AssociatedAlpha for Color { fn to_unassociated<Out: super::UnassociatedAlpha>(&self) -> Out { todo!() } } impl Luminance for Color { type LuminanceChannel = f32; #[inline(always)] fn luminance(&self) -> f32 { 0.2126 * self.red + 0.7152 * self.green + 0.0722 * self.blue } } impl LuminanceMut for Color { fn set_luminance(&mut self, luminance: f32) { let current = self.luminance(); // When we have a black-ish color, we just set the color to a grey-scale value. This prohibits a divide-by-0. if current < f32::EPSILON { self.red = 0.2126 * luminance; self.green = 0.7152 * luminance; self.blue = 0.0722 * luminance; return; } let fac = luminance / current; // TODO: when we have for example the rgb color (0, 0, 1) and want to // TODO: do `.set_luminance(1)`, then the actual luminance is not 1 at // TODO: the end. With no clamp, the resulting color would be // TODO: (0, 0, 12.8504). The excess should be spread to the other // TODO: channels, but is currently just clamped away. self.red = (self.red * fac).clamp(0., 1.); self.green = (self.green * fac).clamp(0., 1.); self.blue = (self.blue * fac).clamp(0., 1.); } } impl Rec709Primaries for Color {} impl SRGB for Color {} impl Color { pub const BLACK: Color = Color::from_rgbf32_unchecked(0., 0., 0.); pub const WHITE: Color = Color::from_rgbf32_unchecked(1., 1., 1.); pub const RED: Color = Color::from_rgbf32_unchecked(1., 0., 0.); pub const GREEN: Color = Color::from_rgbf32_unchecked(0., 1., 0.); pub const BLUE: Color = Color::from_rgbf32_unchecked(0., 0., 1.); pub const YELLOW: Color = Color::from_rgbf32_unchecked(1., 1., 0.); pub const CYAN: Color = Color::from_rgbf32_unchecked(0., 1., 1.); pub const MAGENTA: Color = Color::from_rgbf32_unchecked(1., 0., 1.); pub const TRANSPARENT: Color = Self { red: 0., green: 0., blue: 0., alpha: 0., }; /// Returns `Some(Color)` if `red`, `green`, `blue` and `alpha` have a valid value. Negative numbers (including `-0.0`), NaN, and infinity are not valid values and return `None`. /// Alpha values greater than `1.0` are not valid. /// /// # Examples /// ``` /// use core_types::color::Color; /// let color = Color::from_rgbaf32(0.3, 0.14, 0.15, 0.92).unwrap(); /// assert!(color.components() == (0.3, 0.14, 0.15, 0.92)); /// /// let color = Color::from_rgbaf32(1., 1., 1., f32::NAN); /// assert!(color == None); /// ``` #[inline(always)] pub fn from_rgbaf32(red: f32, green: f32, blue: f32, alpha: f32) -> Option<Color> { if alpha > 1. || [red, green, blue, alpha].iter().any(|c| c.is_sign_negative() || !c.is_finite()) { return None; } let color = Color { red, green, blue, alpha }; Some(color) } /// Return an opaque `Color` from given `f32` RGB channels. #[inline(always)] pub const fn from_rgbf32_unchecked(red: f32, green: f32, blue: f32) -> Color { Color { red, green, blue, alpha: 1. } } /// Return an opaque `Color` from given `f32` RGB channels. #[inline(always)] pub const fn from_rgbaf32_unchecked(red: f32, green: f32, blue: f32, alpha: f32) -> Color { Color { red, green, blue, alpha } } /// Return an opaque `Color` from given `f32` RGB channels. #[inline(always)] pub fn from_unassociated_alpha(red: f32, green: f32, blue: f32, alpha: f32) -> Color { Color::from_rgbaf32_unchecked(red * alpha, green * alpha, blue * alpha, alpha) } /// Return an opaque SDR `Color` given RGB channels from `0` to `255`, premultiplied by alpha. /// /// # Examples /// ``` /// use core_types::color::Color; /// let color = Color::from_rgb8_srgb(0x72, 0x67, 0x62); /// let color2 = Color::from_rgba8_srgb(0x72, 0x67, 0x62, 0xFF); /// assert_eq!(color, color2) /// ``` #[inline(always)] pub fn from_rgb8_srgb(red: u8, green: u8, blue: u8) -> Color { Color::from_rgba8_srgb(red, green, blue, 255) } // TODO: Should this be premult? /// Return an SDR `Color` given RGBA channels from `0` to `255`, premultiplied by alpha. /// /// # Examples /// ``` /// use core_types::color::Color; /// let color = Color::from_rgba8_srgb(0x72, 0x67, 0x62, 0x61); /// ``` #[inline(always)] pub fn from_rgba8_srgb(red: u8, green: u8, blue: u8, alpha: u8) -> Color { let map_range = |int_color| int_color as f32 / 255.; let red = map_range(red); let green = map_range(green); let blue = map_range(blue); let alpha = map_range(alpha); Color { red, green, blue, alpha }.to_linear_srgb().map_rgb(|channel| channel * alpha) } /// Create a [Color] from a hue, saturation, lightness and alpha (all between 0 and 1) /// /// # Examples /// ``` /// use core_types::color::Color; /// let color = Color::from_hsla(0.5, 0.2, 0.3, 1.); /// ``` pub fn from_hsla(hue: f32, saturation: f32, lightness: f32, alpha: f32) -> Color { let temp1 = if lightness < 0.5 { lightness * (saturation + 1.) } else { lightness + saturation - lightness * saturation }; let temp2 = 2. * lightness - temp1; #[cfg(feature = "std")] let rem = |x: f32| x.rem_euclid(1.); #[cfg(not(feature = "std"))] let rem = |x: f32| x.rem_euclid(&1.); let mut red = rem(hue + 1. / 3.); let mut green = rem(hue); let mut blue = rem(hue - 1. / 3.); fn map_channel(channel: &mut f32, temp2: f32, temp1: f32) { *channel = if *channel * 6. < 1. { temp2 + (temp1 - temp2) * 6. * *channel } else if *channel * 2. < 1. { temp1 } else if *channel * 3. < 2. { temp2 + (temp1 - temp2) * (2. / 3. - *channel) * 6. } else { temp2 } .clamp(0., 1.); } map_channel(&mut red, temp2, temp1); map_channel(&mut green, temp2, temp1); map_channel(&mut blue, temp2, temp1); Color { red, green, blue, alpha } } /// Return the `red` component. /// /// # Examples /// ``` /// use core_types::color::Color; /// let color = Color::from_rgbaf32(0.114, 0.103, 0.98, 0.97).unwrap(); /// assert!(color.r() == 0.114); /// ``` #[inline(always)] pub fn r(&self) -> f32 { self.red } /// Return the `green` component. /// /// # Examples /// ``` /// use core_types::color::Color; /// let color = Color::from_rgbaf32(0.114, 0.103, 0.98, 0.97).unwrap(); /// assert!(color.g() == 0.103); /// ``` #[inline(always)] pub fn g(&self) -> f32 { self.green } /// Return the `blue` component. /// /// # Examples /// ``` /// use core_types::color::Color; /// let color = Color::from_rgbaf32(0.114, 0.103, 0.98, 0.97).unwrap(); /// assert!(color.b() == 0.98); /// ``` #[inline(always)] pub fn b(&self) -> f32 { self.blue } /// Return the `alpha` component without checking its expected `0.0` to `1.0` range. /// /// # Examples /// ``` /// use core_types::color::Color; /// let color = Color::from_rgbaf32(0.114, 0.103, 0.98, 0.97).unwrap(); /// assert!(color.a() == 0.97); /// ``` #[inline(always)] pub fn a(&self) -> f32 { self.alpha } #[inline(always)] pub fn is_opaque(&self) -> bool { self.alpha > 1. - f32::EPSILON } #[inline(always)] pub fn average_rgb_channels(&self) -> f32 { (self.red + self.green + self.blue) / 3. } #[inline(always)] pub fn minimum_rgb_channels(&self) -> f32 { self.red.min(self.green).min(self.blue) } #[inline(always)] pub fn maximum_rgb_channels(&self) -> f32 { self.red.max(self.green).max(self.blue) } // From https://stackoverflow.com/a/56678483/775283 #[inline(always)] pub fn luminance_srgb(&self) -> f32 { 0.2126 * self.red + 0.7152 * self.green + 0.0722 * self.blue } // From https://en.wikipedia.org/wiki/Luma_(video)#Rec._601_luma_versus_Rec._709_luma_coefficients #[inline(always)] pub fn luminance_rec_601(&self) -> f32 { 0.299 * self.red + 0.587 * self.green + 0.114 * self.blue } // From https://en.wikipedia.org/wiki/Luma_(video)#Rec._601_luma_versus_Rec._709_luma_coefficients #[inline(always)] pub fn luminance_rec_601_rounded(&self) -> f32 { 0.3 * self.red + 0.59 * self.green + 0.11 * self.blue } // From https://stackoverflow.com/a/56678483/775283 #[inline(always)] pub fn luminance_perceptual(&self) -> f32 { let luminance = self.luminance_srgb(); if luminance <= 0.008856 { (luminance * 903.3) / 100. } else { (luminance.cbrt() * 116. - 16.) / 100. } } #[inline(always)] pub fn from_luminance(luminance: f32) -> Color { Color { red: luminance, green: luminance, blue: luminance, alpha: 1., } } #[inline(always)] pub fn with_luminance(&self, luminance: f32) -> Color { let delta = luminance - self.luminance_rec_601_rounded(); self.map_rgb(|c| (c + delta).clamp(0., 1.)) } #[inline(always)] pub fn saturation(&self) -> f32 { let max = (self.red).max(self.green).max(self.blue); let min = (self.red).min(self.green).min(self.blue); max - min } #[inline(always)] pub fn with_saturation(&self, saturation: f32) -> Color { let [hue, _, lightness, alpha] = self.to_hsla(); Color::from_hsla(hue, saturation, lightness, alpha) } pub fn with_alpha(&self, alpha: f32) -> Color { Color { red: self.red, green: self.green, blue: self.blue, alpha, } } pub fn with_red(&self, red: f32) -> Color { Color { red, green: self.green, blue: self.blue, alpha: self.alpha, } } pub fn with_green(&self, green: f32) -> Color { Color { red: self.red, green, blue: self.blue, alpha: self.alpha, } } pub fn with_blue(&self, blue: f32) -> Color { Color { red: self.red, green: self.green, blue, alpha: self.alpha, } } #[inline(always)] pub fn blend_normal(_c_b: f32, c_s: f32) -> f32 { c_s } #[inline(always)] pub fn blend_multiply(c_b: f32, c_s: f32) -> f32 { c_s * c_b } #[inline(always)] pub fn blend_darken(c_b: f32, c_s: f32) -> f32 { c_s.min(c_b) } #[inline(always)] pub fn blend_color_burn(c_b: f32, c_s: f32) -> f32 { if c_b == 1. { 1. } else if c_s == 0. { 0. } else { 1. - ((1. - c_b) / c_s).min(1.) } } #[inline(always)] pub fn blend_linear_burn(c_b: f32, c_s: f32) -> f32 { c_b + c_s - 1. } #[inline(always)] pub fn blend_darker_color(&self, other: Color) -> Color { if self.average_rgb_channels() <= other.average_rgb_channels() { *self } else { other } } #[inline(always)] pub fn blend_screen(c_b: f32, c_s: f32) -> f32 { 1. - (1. - c_s) * (1. - c_b) } #[inline(always)] pub fn blend_lighten(c_b: f32, c_s: f32) -> f32 { c_s.max(c_b) } #[inline(always)] pub fn blend_color_dodge(c_b: f32, c_s: f32) -> f32 { if c_s == 1. { 1. } else { (c_b / (1. - c_s)).min(1.) } } #[inline(always)] pub fn blend_linear_dodge(c_b: f32, c_s: f32) -> f32 { c_b + c_s } #[inline(always)] pub fn blend_lighter_color(&self, other: Color) -> Color { if self.average_rgb_channels() >= other.average_rgb_channels() { *self } else { other } } pub fn blend_softlight(c_b: f32, c_s: f32) -> f32 { if c_s <= 0.5 { c_b - (1. - 2. * c_s) * c_b * (1. - c_b) } else { let d = |x: f32| if x <= 0.25 { ((16. * x - 12.) * x + 4.) * x } else { x.sqrt() }; c_b + (2. * c_s - 1.) * (d(c_b) - c_b) } } pub fn blend_hardlight(c_b: f32, c_s: f32) -> f32 { if c_s <= 0.5 { Color::blend_multiply(2. * c_s, c_b) } else { Color::blend_screen(2. * c_s - 1., c_b) } } pub fn blend_vivid_light(c_b: f32, c_s: f32) -> f32 { if c_s <= 0.5 { Color::blend_color_burn(2. * c_s, c_b) } else { Color::blend_color_dodge(2. * c_s - 1., c_b) } } pub fn blend_linear_light(c_b: f32, c_s: f32) -> f32 { if c_s <= 0.5 { Color::blend_linear_burn(2. * c_s, c_b) } else { Color::blend_linear_dodge(2. * c_s - 1., c_b) } } pub fn blend_pin_light(c_b: f32, c_s: f32) -> f32 { if c_s <= 0.5 { Color::blend_darken(2. * c_s, c_b) } else { Color::blend_lighten(2. * c_s - 1., c_b) } } pub fn blend_hard_mix(c_b: f32, c_s: f32) -> f32 { if Color::blend_linear_light(c_b, c_s) < 0.5 { 0. } else { 1. } } pub fn blend_difference(c_b: f32, c_s: f32) -> f32 { (c_b - c_s).abs() } pub fn blend_exclusion(c_b: f32, c_s: f32) -> f32 { c_b + c_s - 2. * c_b * c_s } pub fn blend_subtract(c_b: f32, c_s: f32) -> f32 { c_b - c_s } pub fn blend_divide(c_b: f32, c_s: f32) -> f32 { if c_b == 0. { 1. } else { c_b / c_s } } pub fn blend_hue(&self, c_s: Color) -> Color { let sat_b = self.saturation(); let lum_b = self.luminance_rec_601(); c_s.with_saturation(sat_b).with_luminance(lum_b) } pub fn blend_saturation(&self, c_s: Color) -> Color { let sat_s = c_s.saturation(); let lum_b = self.luminance_rec_601(); self.with_saturation(sat_s).with_luminance(lum_b) } pub fn blend_color(&self, c_s: Color) -> Color { let lum_b = self.luminance_rec_601(); c_s.with_luminance(lum_b) } pub fn blend_luminosity(&self, c_s: Color) -> Color { let lum_s = c_s.luminance_rec_601(); self.with_luminance(lum_s) } /// Return the all components as a tuple, first component is red, followed by green, followed by blue, followed by alpha. /// /// # Examples /// ``` /// use core_types::color::Color; /// let color = Color::from_rgbaf32(0.114, 0.103, 0.98, 0.97).unwrap(); /// assert_eq!(color.components(), (0.114, 0.103, 0.98, 0.97)); /// ``` #[inline(always)] pub fn components(&self) -> (f32, f32, f32, f32) { (self.red, self.green, self.blue, self.alpha) } /// Return an 8-character RGBA hex string (without a # prefix). Use this if the [`Color`] is in linear space. /// /// # Examples /// ``` /// use core_types::color::Color; /// let color = Color::from_rgba8_srgb(0x52, 0x67, 0xFA, 0x61); // Premultiplied alpha /// assert_eq!("3240a261", color.to_rgba_hex_srgb()); // Equivalent hex incorporating premultiplied alpha /// ``` #[cfg(feature = "std")] pub fn to_rgba_hex_srgb(&self) -> String { let gamma = self.to_gamma_srgb(); format!( "{:02x?}{:02x?}{:02x?}{:02x?}", (gamma.r() * 255.) as u8, (gamma.g() * 255.) as u8, (gamma.b() * 255.) as u8, (gamma.a() * 255.) as u8, ) } /// Return a 6-character RGB hex string (without a # prefix). Use this if the [`Color`] is in linear space. /// ``` /// use core_types::color::Color; /// let color = Color::from_rgba8_srgb(0x52, 0x67, 0xFA, 0x61); // Premultiplied alpha /// assert_eq!("3240a2", color.to_rgb_hex_srgb()); // Equivalent hex incorporating premultiplied alpha /// ``` #[cfg(feature = "std")] pub fn to_rgb_hex_srgb(&self) -> String { self.to_gamma_srgb().to_rgb_hex_srgb_from_gamma() } /// Return a 6-character RGB hex string (without a # prefix). Use this if the [`Color`] is in gamma space. /// ``` /// use core_types::color::Color; /// let color = Color::from_rgba8_srgb(0x52, 0x67, 0xFA, 0x61); // Premultiplied alpha /// assert_eq!("3240a2", color.to_rgb_hex_srgb()); // Equivalent hex incorporating premultiplied alpha /// ``` #[cfg(feature = "std")] pub fn to_rgb_hex_srgb_from_gamma(&self) -> String { format!("{:02x?}{:02x?}{:02x?}", (self.r() * 255.) as u8, (self.g() * 255.) as u8, (self.b() * 255.) as u8) } /// Return the all components as a u8 slice, first component is red, followed by green, followed by blue, followed by alpha. Use this if the [`Color`] is in linear space. /// /// # Examples /// ``` /// use core_types::color::Color; /// let color = Color::from_rgbaf32(0.114, 0.103, 0.98, 0.97).unwrap(); /// // TODO: Add test /// ``` #[inline(always)] pub fn to_rgba8_srgb(&self) -> [u8; 4] { let gamma = self.to_gamma_srgb(); [(gamma.red * 255.) as u8, (gamma.green * 255.) as u8, (gamma.blue * 255.) as u8, (gamma.alpha * 255.) as u8] } /// Return the all RGB components as a u8 slice, first component is red, followed by green, followed by blue. Use this if the [`Color`] is in linear space. /// /// # Examples /// ``` /// use core_types::color::Color; /// let color = Color::from_rgbaf32(0.114, 0.103, 0.98, 0.97).unwrap(); /// // TODO: Add test /// ``` #[inline(always)] pub fn to_rgb8_srgb(&self) -> [u8; 3] { let gamma = self.to_gamma_srgb(); [(gamma.red * 255.) as u8, (gamma.green * 255.) as u8, (gamma.blue * 255.) as u8] } // https://www.niwa.nu/2013/05/math-behind-colorspace-conversions-rgb-hsl/ /// Convert a [Color] to a hue, saturation, lightness and alpha (all between 0 and 1) /// /// # Examples /// ``` /// use core_types::color::Color; /// let color = Color::from_hsla(0.5, 0.2, 0.3, 1.).to_hsla(); /// ``` pub fn to_hsla(&self) -> [f32; 4] { let min_channel = self.red.min(self.green).min(self.blue); let max_channel = self.red.max(self.green).max(self.blue); let lightness = (min_channel + max_channel) / 2.; let saturation = if min_channel == max_channel { 0. } else if lightness <= 0.5 { (max_channel - min_channel) / (max_channel + min_channel) } else { (max_channel - min_channel) / (2. - max_channel - min_channel) }; let hue = if self.red >= self.green && self.red >= self.blue { (self.green - self.blue) / (max_channel - min_channel) } else if self.green >= self.red && self.green >= self.blue { 2. + (self.blue - self.red) / (max_channel - min_channel) } else { 4. + (self.red - self.green) / (max_channel - min_channel) } / 6.; #[cfg(feature = "std")] let hue = hue.rem_euclid(1.); #[cfg(not(feature = "std"))] let hue = hue.rem_euclid(&1.); [hue, saturation, lightness, self.alpha] } // TODO: Readd formatting /// Creates a color from a 8-character RGBA hex string (without a # prefix). /// /// # Examples /// ``` /// use core_types::color::Color; /// let color = Color::from_rgba_str("7C67FA61").unwrap(); /// ``` pub fn from_rgba_str(color_str: &str) -> Option<Color> { if color_str.len() != 8 { return None; } let r = u8::from_str_radix(&color_str[0..2], 16).ok()?; let g = u8::from_str_radix(&color_str[2..4], 16).ok()?; let b = u8::from_str_radix(&color_str[4..6], 16).ok()?; let a = u8::from_str_radix(&color_str[6..8], 16).ok()?; Some(Color::from_rgba8_srgb(r, g, b, a)) } /// Creates a color from a 6-character RGB hex string (without a # prefix). /// /// ``` /// use core_types::color::Color; /// let color = Color::from_rgb_str("7C67FA").unwrap(); /// ``` pub fn from_rgb_str(color_str: &str) -> Option<Color> { if color_str.len() != 6 { return None; } let r = u8::from_str_radix(&color_str[0..2], 16).ok()?; let g = u8::from_str_radix(&color_str[2..4], 16).ok()?; let b = u8::from_str_radix(&color_str[4..6], 16).ok()?; Some(Color::from_rgb8_srgb(r, g, b)) } /// Linearly interpolates between two colors based on t. /// /// T must be between 0 and 1. #[inline(always)] pub fn lerp(&self, other: &Color, t: f32) -> Self { assert!((0. ..=1.).contains(&t)); Color::from_rgbaf32_unchecked( self.red + ((other.red - self.red) * t), self.green + ((other.green - self.green) * t), self.blue + ((other.blue - self.blue) * t), self.alpha + ((other.alpha - self.alpha) * t), ) } #[inline(always)] pub fn gamma(&self, gamma: f32) -> Color { let gamma = gamma.max(0.0001); // From https://www.dfstudios.co.uk/articles/programming/image-programming-algorithms/image-processing-algorithms-part-6-gamma-correction/ let inverse_gamma = 1. / gamma; self.map_rgb(|c: f32| c.powf(inverse_gamma)) } #[inline(always)] pub fn to_linear_srgb(&self) -> Self { Self { red: Self::srgb_to_linear(self.red), green: Self::srgb_to_linear(self.green), blue: Self::srgb_to_linear(self.blue), alpha: self.alpha, } } #[inline(always)] pub fn to_gamma_srgb(&self) -> Self { Self { red: Self::linear_to_srgb(self.red), green: Self::linear_to_srgb(self.green), blue: Self::linear_to_srgb(self.blue), alpha: self.alpha, } } #[inline(always)] pub fn srgb_to_linear(channel: f32) -> f32 { if channel <= 0.04045 { channel / 12.92 } else { ((channel + 0.055) / 1.055).powf(2.4) } } #[inline(always)] pub fn linear_to_srgb(channel: f32) -> f32 { if channel <= 0.0031308 { channel * 12.92 } else { 1.055 * channel.powf(1. / 2.4) - 0.055 } } #[inline(always)] pub fn map_rgba<F: Fn(f32) -> f32>(&self, f: F) -> Self { Self::from_rgbaf32_unchecked(f(self.r()), f(self.g()), f(self.b()), f(self.a())) } #[inline(always)] pub fn map_rgb<F: Fn(f32) -> f32>(&self, f: F) -> Self { Self::from_rgbaf32_unchecked(f(self.r()), f(self.g()), f(self.b()), self.a()) } #[inline(always)] pub fn apply_opacity(&self, opacity: f32) -> Self { Self::from_rgbaf32_unchecked(self.r() * opacity, self.g() * opacity, self.b() * opacity, self.a() * opacity) } #[inline(always)] pub fn to_associated_alpha(&self, alpha: f32) -> Self { Self { red: self.red * alpha, green: self.green * alpha, blue: self.blue * alpha, alpha: self.alpha * alpha, } } #[inline(always)] pub fn to_unassociated_alpha(&self) -> Self { if self.alpha == 0. { return *self; } let unmultiply = 1. / self.alpha; Self { red: self.red * unmultiply, green: self.green * unmultiply, blue: self.blue * unmultiply, alpha: self.alpha, } } #[inline(always)] pub fn blend_rgb<F: Fn(f32, f32) -> f32>(&self, other: Color, f: F) -> Self { let background = self.to_unassociated_alpha(); Color { red: f(background.red, other.red).clamp(0., 1.), green: f(background.green, other.green).clamp(0., 1.), blue: f(background.blue, other.blue).clamp(0., 1.), alpha: other.alpha, } } #[inline(always)] pub fn alpha_blend(&self, other: Color) -> Self { let inv_alpha = 1. - other.alpha; Self { red: self.red * inv_alpha + other.red, green: self.green * inv_alpha + other.green, blue: self.blue * inv_alpha + other.blue, alpha: self.alpha * inv_alpha + other.alpha, } } #[inline(always)] pub fn alpha_add(&self, other: Color) -> Self { Self { alpha: (self.alpha + other.alpha).clamp(0., 1.), ..*self } } #[inline(always)] pub fn alpha_subtract(&self, other: Color) -> Self { Self { alpha: (self.alpha - other.alpha).clamp(0., 1.), ..*self } } #[inline(always)] pub fn alpha_multiply(&self, other: Color) -> Self { Self { alpha: (self.alpha * other.alpha).clamp(0., 1.), ..*self } } #[inline(always)] pub const fn from_vec4(vec: Vec4) -> Self { Self { red: vec.x, green: vec.y, blue: vec.z, alpha: vec.w, } } #[inline(always)] pub fn to_vec4(&self) -> Vec4 { Vec4::new(self.red, self.green, self.blue, self.alpha) } } #[cfg(test)] mod tests { use super::*; #[test] fn hsl_roundtrip() { for (red, green, blue) in [ (24, 98, 118), (69, 11, 89), (54, 82, 38), (47, 76, 50), (25, 15, 73), (62, 57, 33), (55, 2, 18), (12, 3, 82), (91, 16, 98), (91, 39, 82), (97, 53, 32), (76, 8, 91), (54, 87, 19), (56, 24, 88), (14, 82, 34), (61, 86, 31), (73, 60, 75), (95, 79, 88), (13, 34, 4), (82, 84, 84), (255, 255, 178), ] { let col = Color::from_rgb8_srgb(red, green, blue); let [hue, saturation, lightness, alpha] = col.to_hsla(); let result = Color::from_hsla(hue, saturation, lightness, alpha); assert!((col.r() - result.r()) < f32::EPSILON * 100.); assert!((col.g() - result.g()) < f32::EPSILON * 100.); assert!((col.b() - result.b()) < f32::EPSILON * 100.); assert!((col.a() - result.a()) < f32::EPSILON * 100.); } } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/lib.rs
node-graph/libraries/vector-types/src/lib.rs
#[macro_use] extern crate log; pub mod gradient; pub mod math; pub mod subpath; pub mod vector; // Re-export commonly used types at the crate root pub use core_types as gcore; pub use gradient::{GradientStops, GradientType}; pub use math::{QuadExt, RectExt}; pub use subpath::Subpath; pub use vector::Vector; pub use vector::reference_point::ReferencePoint; // Re-export dependencies that users of this crate will need pub use dyn_any; pub use glam; pub use kurbo;
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/gradient.rs
node-graph/libraries/vector-types/src/gradient.rs
use core_types::{Color, render_complexity::RenderComplexity}; use dyn_any::DynAny; use glam::{DAffine2, DVec2}; #[derive(Default, PartialEq, Eq, Clone, Copy, Debug, Hash, serde::Serialize, serde::Deserialize, DynAny, specta::Type, node_macro::ChoiceType)] #[widget(Radio)] pub enum GradientType { #[default] Linear, Radial, } // TODO: Someday we could switch this to a Box[T] to avoid over-allocation // TODO: Use linear not gamma colors /// A list of colors associated with positions (in the range 0 to 1) along a gradient. #[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize, DynAny, specta::Type)] pub struct GradientStops(pub Vec<(f64, Color)>); impl std::hash::Hash for GradientStops { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.0.len().hash(state); self.0.iter().for_each(|(position, color)| { position.to_bits().hash(state); color.hash(state); }); } } impl Default for GradientStops { fn default() -> Self { Self(vec![(0., Color::BLACK), (1., Color::WHITE)]) } } impl RenderComplexity for GradientStops { fn render_complexity(&self) -> usize { 1 } } impl IntoIterator for GradientStops { type Item = (f64, Color); type IntoIter = std::vec::IntoIter<(f64, Color)>; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } impl<'a> IntoIterator for &'a GradientStops { type Item = &'a (f64, Color); type IntoIter = std::slice::Iter<'a, (f64, Color)>; fn into_iter(self) -> Self::IntoIter { self.0.iter() } } impl std::ops::Index<usize> for GradientStops { type Output = (f64, Color); fn index(&self, index: usize) -> &Self::Output { &self.0[index] } } impl std::ops::Deref for GradientStops { type Target = Vec<(f64, Color)>; fn deref(&self) -> &Self::Target { &self.0 } } impl std::ops::DerefMut for GradientStops { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl GradientStops { pub fn new(stops: Vec<(f64, Color)>) -> Self { let mut stops = Self(stops); stops.sort(); stops } pub fn evaluate(&self, t: f64) -> Color { if self.0.is_empty() { return Color::BLACK; } if t <= self.0[0].0 { return self.0[0].1; } if t >= self.0[self.0.len() - 1].0 { return self.0[self.0.len() - 1].1; } for i in 0..self.0.len() - 1 { let (t1, c1) = self.0[i]; let (t2, c2) = self.0[i + 1]; if t >= t1 && t <= t2 { let normalized_t = (t - t1) / (t2 - t1); return c1.lerp(&c2, normalized_t as f32); } } Color::BLACK } pub fn sort(&mut self) { self.0.sort_unstable_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); } pub fn reversed(&self) -> Self { Self(self.0.iter().rev().map(|(position, color)| (1. - position, *color)).collect()) } pub fn map_colors<F: Fn(&Color) -> Color>(&self, f: F) -> Self { Self(self.0.iter().map(|(position, color)| (*position, f(color))).collect()) } } /// A gradient fill. /// /// Contains the start and end points, along with the colors at varying points along the length. #[repr(C)] #[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize, DynAny, specta::Type)] pub struct Gradient { pub stops: GradientStops, pub gradient_type: GradientType, pub start: DVec2, pub end: DVec2, } impl Default for Gradient { fn default() -> Self { Self { stops: GradientStops::default(), gradient_type: GradientType::Linear, start: DVec2::new(0., 0.5), end: DVec2::new(1., 0.5), } } } impl std::hash::Hash for Gradient { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.stops.0.len().hash(state); [].iter() .chain(self.start.to_array().iter()) .chain(self.end.to_array().iter()) .chain(self.stops.0.iter().map(|(position, _)| position)) .for_each(|x| x.to_bits().hash(state)); self.stops.0.iter().for_each(|(_, color)| color.hash(state)); self.gradient_type.hash(state); } } impl std::fmt::Display for Gradient { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let round = |x: f64| (x * 1e3).round() / 1e3; let stops = self .stops .0 .iter() .map(|(position, color)| format!("[{}%: #{}]", round(position * 100.), color.to_rgba_hex_srgb())) .collect::<Vec<_>>() .join(", "); write!(f, "{} Gradient: {stops}", self.gradient_type) } } impl Gradient { /// Constructs a new gradient with the colors at 0 and 1 specified. pub fn new(start: DVec2, start_color: Color, end: DVec2, end_color: Color, gradient_type: GradientType) -> Self { let stops = GradientStops::new(vec![(0., start_color.to_gamma_srgb()), (1., end_color.to_gamma_srgb())]); Self { start, end, stops, gradient_type } } pub fn lerp(&self, other: &Self, time: f64) -> Self { let start = self.start + (other.start - self.start) * time; let end = self.end + (other.end - self.end) * time; let stops = self .stops .0 .iter() .zip(other.stops.0.iter()) .map(|((a_pos, a_color), (b_pos, b_color))| { let position = a_pos + (b_pos - a_pos) * time; let color = a_color.lerp(b_color, time as f32); (position, color) }) .collect::<Vec<_>>(); let stops = GradientStops::new(stops); let gradient_type = if time < 0.5 { self.gradient_type } else { other.gradient_type }; Self { start, end, stops, gradient_type } } /// Insert a stop into the gradient, the index if successful pub fn insert_stop(&mut self, mouse: DVec2, transform: DAffine2) -> Option<usize> { // Transform the start and end positions to the same coordinate space as the mouse. let (start, end) = (transform.transform_point2(self.start), transform.transform_point2(self.end)); // Calculate the new position by finding the closest point on the line let new_position = ((end - start).angle_to(mouse - start)).cos() * start.distance(mouse) / start.distance(end); // Don't insert point past end of line if !(0. ..=1.).contains(&new_position) { return None; } // Compute the color of the inserted stop let get_color = |index: usize, time: f64| match (self.stops.0[index].1, self.stops.0.get(index + 1).map(|(_, c)| *c)) { // Lerp between the nearest colors if applicable (a, Some(b)) => a.lerp( &b, ((time - self.stops.0[index].0) / self.stops.0.get(index + 1).map(|end| end.0 - self.stops.0[index].0).unwrap_or_default()) as f32, ), // Use the start or the end color if applicable (v, _) => v, }; // Compute the correct index to keep the positions in order let mut index = 0; while self.stops.0.len() > index && self.stops.0[index].0 <= new_position { index += 1; } let new_color = get_color(index - 1, new_position); // Insert the new stop self.stops.0.insert(index, (new_position, new_color)); Some(index) } } impl core_types::bounds::BoundingBox for GradientStops { fn bounding_box(&self, _transform: DAffine2, _include_stroke: bool) -> core_types::bounds::RenderBoundingBox { core_types::bounds::RenderBoundingBox::Infinite } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/subpath/consts.rs
node-graph/libraries/vector-types/src/subpath/consts.rs
// Implementation constants /// Constant used to determine if `f64`s are equivalent. pub const MAX_ABSOLUTE_DIFFERENCE: f64 = 1e-3;
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/subpath/transform.rs
node-graph/libraries/vector-types/src/subpath/transform.rs
use super::structs::Identifier; use super::*; use glam::{DAffine2, DVec2}; /// Functionality that transforms Subpaths, such as split, reduce, offset, etc. impl<PointId: Identifier> Subpath<PointId> { /// Returns [ManipulatorGroup]s with a reversed winding order. fn reverse_manipulator_groups(manipulator_groups: &[ManipulatorGroup<PointId>]) -> Vec<ManipulatorGroup<PointId>> { manipulator_groups .iter() .rev() .map(|group| ManipulatorGroup { anchor: group.anchor, in_handle: group.out_handle, out_handle: group.in_handle, id: PointId::new(), }) .collect::<Vec<ManipulatorGroup<PointId>>>() } /// Returns a [Subpath] with a reversed winding order. /// Note that a reversed closed subpath will start on the same manipulator group and simply wind the other direction pub fn reverse(&self) -> Subpath<PointId> { let mut reversed = Subpath::reverse_manipulator_groups(self.manipulator_groups()); if self.closed { reversed.rotate_right(1); }; Subpath { manipulator_groups: reversed, closed: self.closed, } } /// Apply a transformation to all of the [ManipulatorGroup]s in the [Subpath]. pub fn apply_transform(&mut self, affine_transform: DAffine2) { for manipulator_group in &mut self.manipulator_groups { manipulator_group.apply_transform(affine_transform); } } /// Returns a subpath that results from rotating this subpath around the origin by the given angle (in radians). pub fn rotate(&self, angle: f64) -> Subpath<PointId> { let mut rotated_subpath = self.clone(); let affine_transform: DAffine2 = DAffine2::from_angle(angle); rotated_subpath.apply_transform(affine_transform); rotated_subpath } /// Returns a subpath that results from rotating this subpath around the provided point by the given angle (in radians). pub fn rotate_about_point(&self, angle: f64, pivot: DVec2) -> Subpath<PointId> { // Translate before and after the rotation to account for the pivot let translate: DAffine2 = DAffine2::from_translation(pivot); let rotate: DAffine2 = DAffine2::from_angle(angle); let translate_inverse = translate.inverse(); let mut rotated_subpath = self.clone(); rotated_subpath.apply_transform(translate * rotate * translate_inverse); rotated_subpath } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/subpath/manipulators.rs
node-graph/libraries/vector-types/src/subpath/manipulators.rs
// use super::consts::MAX_ABSOLUTE_DIFFERENCE; // use super::utils::{SubpathTValue}; use super::*; impl<PointId: super::structs::Identifier> Subpath<PointId> { /// Get whether the subpath is closed. pub fn closed(&self) -> bool { self.closed } /// Set whether the subpath is closed. pub fn set_closed(&mut self, new_closed: bool) { self.closed = new_closed; } /// Access a [ManipulatorGroup] from a PointId. pub fn manipulator_from_id(&self, id: PointId) -> Option<&ManipulatorGroup<PointId>> { self.manipulator_groups.iter().find(|manipulator_group| manipulator_group.id == id) } /// Access a mutable [ManipulatorGroup] from a PointId. pub fn manipulator_mut_from_id(&mut self, id: PointId) -> Option<&mut ManipulatorGroup<PointId>> { self.manipulator_groups.iter_mut().find(|manipulator_group| manipulator_group.id == id) } /// Access the index of a [ManipulatorGroup] from a PointId. pub fn manipulator_index_from_id(&self, id: PointId) -> Option<usize> { self.manipulator_groups.iter().position(|manipulator_group| manipulator_group.id == id) } /// Insert a manipulator group at an index. pub fn insert_manipulator_group(&mut self, index: usize, group: ManipulatorGroup<PointId>) { assert!(group.is_finite(), "Inserting non finite manipulator group"); self.manipulator_groups.insert(index, group) } /// Push a manipulator group to the end. pub fn push_manipulator_group(&mut self, group: ManipulatorGroup<PointId>) { assert!(group.is_finite(), "Pushing non finite manipulator group"); self.manipulator_groups.push(group) } /// Get a mutable reference to the last manipulator pub fn last_manipulator_group_mut(&mut self) -> Option<&mut ManipulatorGroup<PointId>> { self.manipulator_groups.last_mut() } /// Remove a manipulator group at an index. pub fn remove_manipulator_group(&mut self, index: usize) -> ManipulatorGroup<PointId> { self.manipulator_groups.remove(index) } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/subpath/structs.rs
node-graph/libraries/vector-types/src/subpath/structs.rs
use crate::vector::algorithms::intersection::filtered_segment_intersections; use crate::vector::misc::{dvec2_to_point, handles_to_segment}; use glam::{DAffine2, DVec2}; use kurbo::{CubicBez, Line, PathSeg, QuadBez, Shape}; use std::fmt::{Debug, Formatter, Result}; use std::hash::Hash; /// An id type used for each [ManipulatorGroup]. pub trait Identifier: Sized + Clone + PartialEq + Hash + 'static { fn new() -> Self; } /// Structure used to represent a single anchor with up to two optional associated handles along a `Subpath` #[derive(Copy, Clone, PartialEq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct ManipulatorGroup<PointId: Identifier> { pub anchor: DVec2, pub in_handle: Option<DVec2>, pub out_handle: Option<DVec2>, pub id: PointId, } // TODO: Remove once we no longer need to hash floats in Graphite impl<PointId: Identifier> Hash for ManipulatorGroup<PointId> { fn hash<H: core::hash::Hasher>(&self, state: &mut H) { self.anchor.to_array().iter().for_each(|x| x.to_bits().hash(state)); self.in_handle.is_some().hash(state); if let Some(in_handle) = self.in_handle { in_handle.to_array().iter().for_each(|x| x.to_bits().hash(state)); } self.out_handle.is_some().hash(state); if let Some(out_handle) = self.out_handle { out_handle.to_array().iter().for_each(|x| x.to_bits().hash(state)); } self.id.hash(state); } } impl<PointId: Identifier> Debug for ManipulatorGroup<PointId> { fn fmt(&self, f: &mut Formatter<'_>) -> Result { f.debug_struct("ManipulatorGroup") .field("anchor", &self.anchor) .field("in_handle", &self.in_handle) .field("out_handle", &self.out_handle) .finish() } } impl<PointId: Identifier> ManipulatorGroup<PointId> { /// Construct a new manipulator group from an anchor, in handle and out handle pub fn new(anchor: DVec2, in_handle: Option<DVec2>, out_handle: Option<DVec2>) -> Self { let id = PointId::new(); Self { anchor, in_handle, out_handle, id } } /// Construct a new manipulator point with just an anchor position pub fn new_anchor(anchor: DVec2) -> Self { Self::new(anchor, Some(anchor), Some(anchor)) } pub fn new_anchor_linear(anchor: DVec2) -> Self { Self::new(anchor, None, None) } /// Construct a new manipulator group from an anchor, in handle, out handle and an id pub fn new_with_id(anchor: DVec2, in_handle: Option<DVec2>, out_handle: Option<DVec2>, id: PointId) -> Self { Self { anchor, in_handle, out_handle, id } } /// Construct a new manipulator point with just an anchor position and an id pub fn new_anchor_with_id(anchor: DVec2, id: PointId) -> Self { Self::new_with_id(anchor, Some(anchor), Some(anchor), id) } /// Create a bezier curve that starts at the current manipulator group and finishes in the `end_group` manipulator group. pub fn to_bezier(&self, end_group: &ManipulatorGroup<PointId>) -> PathSeg { let start = self.anchor; let end = end_group.anchor; let out_handle = self.out_handle; let in_handle = end_group.in_handle; match (out_handle, in_handle) { (Some(handle1), Some(handle2)) => PathSeg::Cubic(CubicBez::new(dvec2_to_point(start), dvec2_to_point(handle1), dvec2_to_point(handle2), dvec2_to_point(end))), (Some(handle), None) | (None, Some(handle)) => PathSeg::Quad(QuadBez::new(dvec2_to_point(start), dvec2_to_point(handle), dvec2_to_point(end))), (None, None) => PathSeg::Line(Line::new(dvec2_to_point(start), dvec2_to_point(end))), } } /// Apply a transformation to all of the [ManipulatorGroup] points pub fn apply_transform(&mut self, affine_transform: DAffine2) { self.anchor = affine_transform.transform_point2(self.anchor); self.in_handle = self.in_handle.map(|in_handle| affine_transform.transform_point2(in_handle)); self.out_handle = self.out_handle.map(|out_handle| affine_transform.transform_point2(out_handle)); } /// Are all handles at finite positions pub fn is_finite(&self) -> bool { self.anchor.is_finite() && self.in_handle.is_none_or(|handle| handle.is_finite()) && self.out_handle.is_none_or(|handle| handle.is_finite()) } /// Reverse directions of handles pub fn flip(mut self) -> Self { std::mem::swap(&mut self.in_handle, &mut self.out_handle); self } pub fn has_in_handle(&self) -> bool { self.in_handle.map(|handle| Self::has_handle(self.anchor, handle)).unwrap_or(false) } pub fn has_out_handle(&self) -> bool { self.out_handle.map(|handle| Self::has_handle(self.anchor, handle)).unwrap_or(false) } fn has_handle(anchor: DVec2, handle: DVec2) -> bool { !((handle.x - anchor.x).abs() < f64::EPSILON && (handle.y - anchor.y).abs() < f64::EPSILON) } } #[derive(Copy, Clone)] pub enum AppendType { IgnoreStart, SmoothJoin(f64), } #[derive(Copy, Clone, Eq, PartialEq, Hash)] pub enum ArcType { Open, Closed, PieSlice, } /// Representation of the handle point(s) in a bezier segment. #[derive(Copy, Clone, PartialEq, Debug)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum BezierHandles { Linear, /// Handles for a quadratic curve. Quadratic { /// Point representing the location of the single handle. handle: DVec2, }, /// Handles for a cubic curve. Cubic { /// Point representing the location of the handle associated to the start point. handle_start: DVec2, /// Point representing the location of the handle associated to the end point. handle_end: DVec2, }, } impl std::hash::Hash for BezierHandles { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { std::mem::discriminant(self).hash(state); match self { BezierHandles::Linear => {} BezierHandles::Quadratic { handle } => handle.to_array().map(|v| v.to_bits()).hash(state), BezierHandles::Cubic { handle_start, handle_end } => [handle_start, handle_end].map(|handle| handle.to_array().map(|v| v.to_bits())).hash(state), } } } impl BezierHandles { pub fn is_cubic(&self) -> bool { matches!(self, Self::Cubic { .. }) } pub fn is_finite(&self) -> bool { match self { BezierHandles::Linear => true, BezierHandles::Quadratic { handle } => handle.is_finite(), BezierHandles::Cubic { handle_start, handle_end } => handle_start.is_finite() && handle_end.is_finite(), } } /// Get the coordinates of the bezier segment's first handle point. This represents the only handle in a quadratic segment. pub fn start(&self) -> Option<DVec2> { match *self { BezierHandles::Cubic { handle_start, .. } | BezierHandles::Quadratic { handle: handle_start } => Some(handle_start), _ => None, } } /// Get the coordinates of the second handle point. This will return `None` for a quadratic segment. pub fn end(&self) -> Option<DVec2> { match *self { BezierHandles::Cubic { handle_end, .. } => Some(handle_end), _ => None, } } pub fn move_start(&mut self, delta: DVec2) { if let BezierHandles::Cubic { handle_start, .. } | BezierHandles::Quadratic { handle: handle_start } = self { *handle_start += delta } } pub fn move_end(&mut self, delta: DVec2) { if let BezierHandles::Cubic { handle_end, .. } = self { *handle_end += delta } } /// Returns a Bezier curve that results from applying the transformation function to each handle point in the Bezier. #[must_use] pub fn apply_transformation(&self, transformation_function: impl Fn(DVec2) -> DVec2) -> Self { match *self { BezierHandles::Linear => Self::Linear, BezierHandles::Quadratic { handle } => { let handle = transformation_function(handle); Self::Quadratic { handle } } BezierHandles::Cubic { handle_start, handle_end } => { let handle_start = transformation_function(handle_start); let handle_end = transformation_function(handle_end); Self::Cubic { handle_start, handle_end } } } } #[must_use] pub fn reversed(self) -> Self { match self { BezierHandles::Cubic { handle_start, handle_end } => Self::Cubic { handle_start: handle_end, handle_end: handle_start, }, _ => self, } } } /// Representation of a bezier curve with 2D points. #[derive(Copy, Clone, PartialEq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Bezier { /// Start point of the bezier curve. pub start: DVec2, /// End point of the bezier curve. pub end: DVec2, /// Handles of the bezier curve. pub handles: BezierHandles, } impl Debug for Bezier { fn fmt(&self, f: &mut Formatter<'_>) -> Result { let mut debug_struct = f.debug_struct("Bezier"); let mut debug_struct_ref = debug_struct.field("start", &self.start); debug_struct_ref = match self.handles { BezierHandles::Linear => debug_struct_ref, BezierHandles::Quadratic { handle } => debug_struct_ref.field("handle", &handle), BezierHandles::Cubic { handle_start, handle_end } => debug_struct_ref.field("handle_start", &handle_start).field("handle_end", &handle_end), }; debug_struct_ref.field("end", &self.end).finish() } } /// Functionality for the getters and setters of the various points in a Bezier impl Bezier { /// Set the coordinates of the start point. pub fn set_start(&mut self, s: DVec2) { self.start = s; } /// Set the coordinates of the end point. pub fn set_end(&mut self, e: DVec2) { self.end = e; } /// Set the coordinates of the first handle point. This represents the only handle in a quadratic segment. If used on a linear segment, it will be changed to a quadratic. pub fn set_handle_start(&mut self, h1: DVec2) { match self.handles { BezierHandles::Linear => { self.handles = BezierHandles::Quadratic { handle: h1 }; } BezierHandles::Quadratic { ref mut handle } => { *handle = h1; } BezierHandles::Cubic { ref mut handle_start, .. } => { *handle_start = h1; } }; } /// Set the coordinates of the second handle point. This will convert both linear and quadratic segments into cubic ones. For a linear segment, the first handle will be set to the start point. pub fn set_handle_end(&mut self, h2: DVec2) { match self.handles { BezierHandles::Linear => { self.handles = BezierHandles::Cubic { handle_start: self.start, handle_end: h2, }; } BezierHandles::Quadratic { handle } => { self.handles = BezierHandles::Cubic { handle_start: handle, handle_end: h2 }; } BezierHandles::Cubic { ref mut handle_end, .. } => { *handle_end = h2; } }; } /// Get the coordinates of the bezier segment's start point. pub fn start(&self) -> DVec2 { self.start } /// Get the coordinates of the bezier segment's end point. pub fn end(&self) -> DVec2 { self.end } /// Get the coordinates of the bezier segment's first handle point. This represents the only handle in a quadratic segment. pub fn handle_start(&self) -> Option<DVec2> { self.handles.start() } /// Get the coordinates of the second handle point. This will return `None` for a quadratic segment. pub fn handle_end(&self) -> Option<DVec2> { self.handles.end() } /// Get an iterator over the coordinates of all points in a vector. /// - For a linear segment, the order of the points will be: `start`, `end`. /// - For a quadratic segment, the order of the points will be: `start`, `handle`, `end`. /// - For a cubic segment, the order of the points will be: `start`, `handle_start`, `handle_end`, `end`. pub fn get_points(&self) -> impl Iterator<Item = DVec2> + use<> { match self.handles { BezierHandles::Linear => [self.start, self.end, DVec2::ZERO, DVec2::ZERO].into_iter().take(2), BezierHandles::Quadratic { handle } => [self.start, handle, self.end, DVec2::ZERO].into_iter().take(3), BezierHandles::Cubic { handle_start, handle_end } => [self.start, handle_start, handle_end, self.end].into_iter().take(4), } } // TODO: Consider removing this function /// Create a linear bezier using the provided coordinates as the start and end points. pub fn from_linear_coordinates(x1: f64, y1: f64, x2: f64, y2: f64) -> Self { Bezier { start: DVec2::new(x1, y1), handles: BezierHandles::Linear, end: DVec2::new(x2, y2), } } /// Create a linear bezier using the provided DVec2s as the start and end points. pub fn from_linear_dvec2(p1: DVec2, p2: DVec2) -> Self { Bezier { start: p1, handles: BezierHandles::Linear, end: p2, } } // TODO: Consider removing this function /// Create a quadratic bezier using the provided coordinates as the start, handle, and end points. pub fn from_quadratic_coordinates(x1: f64, y1: f64, x2: f64, y2: f64, x3: f64, y3: f64) -> Self { Bezier { start: DVec2::new(x1, y1), handles: BezierHandles::Quadratic { handle: DVec2::new(x2, y2) }, end: DVec2::new(x3, y3), } } /// Create a quadratic bezier using the provided DVec2s as the start, handle, and end points. pub fn from_quadratic_dvec2(p1: DVec2, p2: DVec2, p3: DVec2) -> Self { Bezier { start: p1, handles: BezierHandles::Quadratic { handle: p2 }, end: p3, } } // TODO: Consider removing this function /// Create a cubic bezier using the provided coordinates as the start, handles, and end points. #[allow(clippy::too_many_arguments)] pub fn from_cubic_coordinates(x1: f64, y1: f64, x2: f64, y2: f64, x3: f64, y3: f64, x4: f64, y4: f64) -> Self { Bezier { start: DVec2::new(x1, y1), handles: BezierHandles::Cubic { handle_start: DVec2::new(x2, y2), handle_end: DVec2::new(x3, y3), }, end: DVec2::new(x4, y4), } } /// Create a cubic bezier using the provided DVec2s as the start, handles, and end points. pub fn from_cubic_dvec2(p1: DVec2, p2: DVec2, p3: DVec2, p4: DVec2) -> Self { Bezier { start: p1, handles: BezierHandles::Cubic { handle_start: p2, handle_end: p3 }, end: p4, } } /// Returns a Bezier curve that results from applying the transformation function to each point in the Bezier. pub fn apply_transformation(&self, transformation_function: impl Fn(DVec2) -> DVec2) -> Bezier { Self { start: transformation_function(self.start), end: transformation_function(self.end), handles: self.handles.apply_transformation(transformation_function), } } pub fn intersections(&self, other: &Bezier, accuracy: Option<f64>, minimum_separation: Option<f64>) -> Vec<f64> { let this = handles_to_segment(self.start, self.handles, self.end); let other = handles_to_segment(other.start, other.handles, other.end); filtered_segment_intersections(this, other, accuracy, minimum_separation) } pub fn winding(&self, point: DVec2) -> i32 { let this = handles_to_segment(self.start, self.handles, self.end); this.winding(dvec2_to_point(point)) } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/subpath/core.rs
node-graph/libraries/vector-types/src/subpath/core.rs
use super::consts::*; use super::*; use crate::vector::misc::{SpiralType, point_to_dvec2}; use glam::DVec2; use kurbo::PathSeg; use std::f64::consts::TAU; pub struct PathSegPoints { pub p0: DVec2, pub p1: Option<DVec2>, pub p2: Option<DVec2>, pub p3: DVec2, } impl PathSegPoints { pub fn new(p0: DVec2, p1: Option<DVec2>, p2: Option<DVec2>, p3: DVec2) -> Self { Self { p0, p1, p2, p3 } } } pub fn pathseg_points(segment: PathSeg) -> PathSegPoints { match segment { PathSeg::Line(line) => PathSegPoints::new(point_to_dvec2(line.p0), None, None, point_to_dvec2(line.p1)), PathSeg::Quad(quad) => PathSegPoints::new(point_to_dvec2(quad.p0), None, Some(point_to_dvec2(quad.p1)), point_to_dvec2(quad.p2)), PathSeg::Cubic(cube) => PathSegPoints::new(point_to_dvec2(cube.p0), Some(point_to_dvec2(cube.p1)), Some(point_to_dvec2(cube.p2)), point_to_dvec2(cube.p3)), } } /// Functionality relating to core `Subpath` operations, such as constructors and `iter`. impl<PointId: Identifier> Subpath<PointId> { /// Create a new `Subpath` using a list of [ManipulatorGroup]s. /// A `Subpath` with less than 2 [ManipulatorGroup]s may not be closed. #[track_caller] pub fn new(manipulator_groups: Vec<ManipulatorGroup<PointId>>, closed: bool) -> Self { assert!(!closed || !manipulator_groups.is_empty(), "A closed Subpath must contain more than 0 ManipulatorGroups."); Self { manipulator_groups, closed } } /// Create a `Subpath` consisting of 2 manipulator groups from a `Bezier`. pub fn from_bezier(segment: PathSeg) -> Self { let PathSegPoints { p0, p1, p2, p3 } = pathseg_points(segment); Subpath::new(vec![ManipulatorGroup::new(p0, None, p1), ManipulatorGroup::new(p3, p2, None)], false) } /// Creates a subpath from a slice of [Bezier]. When two consecutive Beziers do not share an end and start point, this function /// resolves the discrepancy by simply taking the start-point of the second Bezier as the anchor of the Manipulator Group. pub fn from_beziers(beziers: &[PathSeg], closed: bool) -> Self { assert!(!closed || beziers.len() > 1, "A closed Subpath must contain at least 1 Bezier."); if beziers.is_empty() { return Subpath::new(vec![], closed); } let beziers: Vec<_> = beziers.iter().map(|b| pathseg_points(*b)).collect(); let first = beziers.first().unwrap(); let mut manipulator_groups = vec![ManipulatorGroup { anchor: first.p0, in_handle: None, out_handle: first.p1, id: PointId::new(), }]; let mut inner_groups: Vec<ManipulatorGroup<PointId>> = beziers .windows(2) .map(|bezier_pair| ManipulatorGroup { anchor: bezier_pair[1].p0, in_handle: bezier_pair[0].p2, out_handle: bezier_pair[1].p1, id: PointId::new(), }) .collect::<Vec<ManipulatorGroup<PointId>>>(); manipulator_groups.append(&mut inner_groups); let last = beziers.last().unwrap(); if !closed { manipulator_groups.push(ManipulatorGroup { anchor: last.p3, in_handle: last.p2, out_handle: None, id: PointId::new(), }); return Subpath::new(manipulator_groups, false); } manipulator_groups[0].in_handle = last.p2; Subpath::new(manipulator_groups, true) } /// Returns true if the `Subpath` contains no [ManipulatorGroup]. pub fn is_empty(&self) -> bool { self.manipulator_groups.is_empty() } /// Returns the number of [ManipulatorGroup]s contained within the `Subpath`. pub fn len(&self) -> usize { self.manipulator_groups.len() } /// Returns the number of segments contained within the `Subpath`. pub fn len_segments(&self) -> usize { let mut number_of_curves = self.len(); if !self.closed && number_of_curves > 0 { number_of_curves -= 1 } number_of_curves } /// Returns a copy of the bezier segment at the given segment index, if this segment exists. pub fn get_segment(&self, segment_index: usize) -> Option<PathSeg> { if segment_index >= self.len_segments() { return None; } Some(self[segment_index].to_bezier(&self[(segment_index + 1) % self.len()])) } /// Returns an iterator of the [Bezier]s along the `Subpath`. pub fn iter(&self) -> SubpathIter<'_, PointId> { SubpathIter { subpath: self, index: 0, is_always_closed: false, } } /// Returns an iterator of the [Bezier]s along the `Subpath` always considering it as a closed subpath. pub fn iter_closed(&self) -> SubpathIter<'_, PointId> { SubpathIter { subpath: self, index: 0, is_always_closed: true, } } /// Returns a slice of the [ManipulatorGroup]s in the `Subpath`. pub fn manipulator_groups(&self) -> &[ManipulatorGroup<PointId>] { &self.manipulator_groups } /// Returns a mutable reference to the [ManipulatorGroup]s in the `Subpath`. pub fn manipulator_groups_mut(&mut self) -> &mut Vec<ManipulatorGroup<PointId>> { &mut self.manipulator_groups } /// Returns a vector of all the anchors (DVec2) for this `Subpath`. pub fn anchors(&self) -> Vec<DVec2> { self.manipulator_groups().iter().map(|group| group.anchor).collect() } /// Returns if the Subpath is equivalent to a single point. pub fn is_point(&self) -> bool { if self.is_empty() { return false; } let point = self.manipulator_groups[0].anchor; self.manipulator_groups .iter() .all(|manipulator_group| manipulator_group.anchor.abs_diff_eq(point, MAX_ABSOLUTE_DIFFERENCE)) } /// Construct a [Subpath] from an iter of anchor positions. pub fn from_anchors(anchor_positions: impl IntoIterator<Item = DVec2>, closed: bool) -> Self { Self::new(anchor_positions.into_iter().map(|anchor| ManipulatorGroup::new_anchor(anchor)).collect(), closed) } pub fn from_anchors_linear(anchor_positions: impl IntoIterator<Item = DVec2>, closed: bool) -> Self { Self::new(anchor_positions.into_iter().map(|anchor| ManipulatorGroup::new_anchor_linear(anchor)).collect(), closed) } /// Constructs a rectangle with `corner1` and `corner2` as the two corners. pub fn new_rect(corner1: DVec2, corner2: DVec2) -> Self { Self::from_anchors_linear([corner1, DVec2::new(corner2.x, corner1.y), corner2, DVec2::new(corner1.x, corner2.y)], true) } /// Constructs a rounded rectangle with `corner1` and `corner2` as the two corners and `corner_radii` as the radii of the corners: `[top_left, top_right, bottom_right, bottom_left]`. pub fn new_rounded_rect(corner1: DVec2, corner2: DVec2, corner_radii: [f64; 4]) -> Self { if corner_radii.iter().all(|radii| radii.abs() < f64::EPSILON * 100.) { return Self::new_rect(corner1, corner2); } use std::f64::consts::{FRAC_1_SQRT_2, PI}; let new_arc = |center: DVec2, corner: DVec2, radius: f64| -> Vec<ManipulatorGroup<PointId>> { let point1 = center + DVec2::from_angle(-PI * 0.25).rotate(corner - center) * FRAC_1_SQRT_2; let point2 = center + DVec2::from_angle(PI * 0.25).rotate(corner - center) * FRAC_1_SQRT_2; if radius == 0. { return vec![ManipulatorGroup::new_anchor(point1), ManipulatorGroup::new_anchor(point2)]; } // Based on https://pomax.github.io/bezierinfo/#circles_cubic const HANDLE_OFFSET_FACTOR: f64 = 0.551784777779014; let handle_offset = radius * HANDLE_OFFSET_FACTOR; vec![ ManipulatorGroup::new(point1, None, Some(point1 + handle_offset * (corner - point1).normalize())), ManipulatorGroup::new(point2, Some(point2 + handle_offset * (corner - point2).normalize()), None), ] }; Self::new( [ new_arc(DVec2::new(corner1.x + corner_radii[0], corner1.y + corner_radii[0]), DVec2::new(corner1.x, corner1.y), corner_radii[0]), new_arc(DVec2::new(corner2.x - corner_radii[1], corner1.y + corner_radii[1]), DVec2::new(corner2.x, corner1.y), corner_radii[1]), new_arc(DVec2::new(corner2.x - corner_radii[2], corner2.y - corner_radii[2]), DVec2::new(corner2.x, corner2.y), corner_radii[2]), new_arc(DVec2::new(corner1.x + corner_radii[3], corner2.y - corner_radii[3]), DVec2::new(corner1.x, corner2.y), corner_radii[3]), ] .concat(), true, ) } /// Constructs an ellipse with `corner1` and `corner2` as the two corners of the bounding box. pub fn new_ellipse(corner1: DVec2, corner2: DVec2) -> Self { let size = (corner1 - corner2).abs(); let center = (corner1 + corner2) / 2.; let top = DVec2::new(center.x, corner1.y); let bottom = DVec2::new(center.x, corner2.y); let left = DVec2::new(corner1.x, center.y); let right = DVec2::new(corner2.x, center.y); // Based on https://pomax.github.io/bezierinfo/#circles_cubic const HANDLE_OFFSET_FACTOR: f64 = 0.551784777779014; let handle_offset = size * HANDLE_OFFSET_FACTOR * 0.5; let manipulator_groups = vec![ ManipulatorGroup::new(top, Some(top - handle_offset * DVec2::X), Some(top + handle_offset * DVec2::X)), ManipulatorGroup::new(right, Some(right - handle_offset * DVec2::Y), Some(right + handle_offset * DVec2::Y)), ManipulatorGroup::new(bottom, Some(bottom + handle_offset * DVec2::X), Some(bottom - handle_offset * DVec2::X)), ManipulatorGroup::new(left, Some(left + handle_offset * DVec2::Y), Some(left - handle_offset * DVec2::Y)), ]; Self::new(manipulator_groups, true) } /// Constructs an arc by a `radius`, `angle_start` and `angle_size`. Angles must be in radians. Slice option makes it look like pie or pacman. pub fn new_arc(radius: f64, start_angle: f64, sweep_angle: f64, arc_type: ArcType) -> Self { // Prevents glitches from numerical imprecision that have been observed during animation playback after about a minute let start_angle = start_angle % (std::f64::consts::TAU * 2.); let sweep_angle = sweep_angle % (std::f64::consts::TAU * 2.); let original_start_angle = start_angle; let sweep_angle_sign = sweep_angle.signum(); let mut start_angle = 0.; let mut sweep_angle = sweep_angle.abs(); if ((sweep_angle / std::f64::consts::TAU).floor() as u32).is_multiple_of(2) { sweep_angle %= std::f64::consts::TAU; } else { start_angle = sweep_angle % std::f64::consts::TAU; sweep_angle = std::f64::consts::TAU - start_angle; } sweep_angle *= sweep_angle_sign; start_angle *= sweep_angle_sign; start_angle += original_start_angle; let closed = arc_type == ArcType::Closed; let slice = arc_type == ArcType::PieSlice; let center = DVec2::new(0., 0.); let segments = (sweep_angle.abs() / (std::f64::consts::PI / 4.)).ceil().max(1.) as usize; let step = sweep_angle / segments as f64; let factor = 4. / 3. * (step / 2.).sin() / (1. + (step / 2.).cos()); let mut manipulator_groups = Vec::with_capacity(segments); let mut prev_in_handle = None; let mut prev_end = DVec2::new(0., 0.); for i in 0..segments { let start_angle = start_angle + step * i as f64; let end_angle = start_angle + step; let start_vec = DVec2::from_angle(start_angle); let end_vec = DVec2::from_angle(end_angle); let start = center + radius * start_vec; let end = center + radius * end_vec; let handle_start = start + start_vec.perp() * radius * factor; let handle_end = end - end_vec.perp() * radius * factor; manipulator_groups.push(ManipulatorGroup::new(start, prev_in_handle, Some(handle_start))); prev_in_handle = Some(handle_end); prev_end = end; } manipulator_groups.push(ManipulatorGroup::new(prev_end, prev_in_handle, None)); if slice { manipulator_groups.push(ManipulatorGroup::new(center, None, None)); } Self::new(manipulator_groups, closed || slice) } /// Constructs a regular polygon (ngon). Based on `sides` and `radius`, which is the distance from the center to any vertex. pub fn new_regular_polygon(center: DVec2, sides: u64, radius: f64) -> Self { let sides = sides.max(3); let angle_increment = std::f64::consts::TAU / (sides as f64); let anchor_positions = (0..sides).map(|i| { let angle = (i as f64) * angle_increment - std::f64::consts::FRAC_PI_2; let center = center + DVec2::ONE * radius; DVec2::new(center.x + radius * f64::cos(angle), center.y + radius * f64::sin(angle)) * 0.5 }); Self::from_anchors(anchor_positions, true) } /// Constructs a star polygon (n-star). See [new_regular_polygon], but with interspersed vertices at an `inner_radius`. pub fn new_star_polygon(center: DVec2, sides: u64, radius: f64, inner_radius: f64) -> Self { let sides = sides.max(2); let angle_increment = 0.5 * std::f64::consts::TAU / (sides as f64); let anchor_positions = (0..sides * 2).map(|i| { let angle = (i as f64) * angle_increment - std::f64::consts::FRAC_PI_2; let center = center + DVec2::ONE * radius; let r = if i % 2 == 0 { radius } else { inner_radius }; DVec2::new(center.x + r * f64::cos(angle), center.y + r * f64::sin(angle)) * 0.5 }); Self::from_anchors(anchor_positions, true) } /// Constructs a line from `p1` to `p2` pub fn new_line(p1: DVec2, p2: DVec2) -> Self { Self::from_anchors([p1, p2], false) } pub fn new_spiral(a: f64, outer_radius: f64, turns: f64, start_angle: f64, delta_theta: f64, spiral_type: SpiralType) -> Self { let mut manipulator_groups = Vec::new(); let mut prev_in_handle = None; let theta_end = turns * std::f64::consts::TAU + start_angle; let b = calculate_b(a, turns, outer_radius, spiral_type); let mut theta = start_angle; while theta < theta_end { let theta_next = f64::min(theta + delta_theta, theta_end); let p0 = spiral_point(theta, a, b, spiral_type); let p3 = spiral_point(theta_next, a, b, spiral_type); let t0 = spiral_tangent(theta, a, b, spiral_type); let t1 = spiral_tangent(theta_next, a, b, spiral_type); let arc_len = spiral_arc_length(theta, theta_next, a, b, spiral_type); let d = arc_len / 3.; let p1 = p0 + d * t0; let p2 = p3 - d * t1; manipulator_groups.push(ManipulatorGroup::new(p0, prev_in_handle, Some(p1))); prev_in_handle = Some(p2); // If final segment, end with anchor at theta_end if (theta_next - theta_end).abs() < f64::EPSILON { manipulator_groups.push(ManipulatorGroup::new(p3, prev_in_handle, None)); break; } theta = theta_next; } Self::new(manipulator_groups, false) } } pub fn calculate_b(a: f64, turns: f64, outer_radius: f64, spiral_type: SpiralType) -> f64 { match spiral_type { SpiralType::Archimedean => { let total_theta = turns * TAU; (outer_radius - a) / total_theta } SpiralType::Logarithmic => { let total_theta = turns * TAU; ((outer_radius.abs() / a).ln()) / total_theta } } } /// Returns a point on the given spiral type at angle `theta`. pub fn spiral_point(theta: f64, a: f64, b: f64, spiral_type: SpiralType) -> DVec2 { match spiral_type { SpiralType::Archimedean => archimedean_spiral_point(theta, a, b), SpiralType::Logarithmic => log_spiral_point(theta, a, b), } } /// Returns the tangent direction at angle `theta` for the given spiral type. pub fn spiral_tangent(theta: f64, a: f64, b: f64, spiral_type: SpiralType) -> DVec2 { match spiral_type { SpiralType::Archimedean => archimedean_spiral_tangent(theta, a, b), SpiralType::Logarithmic => log_spiral_tangent(theta, a, b), } } /// Computes arc length between two angles for the given spiral type. pub fn spiral_arc_length(theta_start: f64, theta_end: f64, a: f64, b: f64, spiral_type: SpiralType) -> f64 { match spiral_type { SpiralType::Archimedean => archimedean_spiral_arc_length(theta_start, theta_end, a, b), SpiralType::Logarithmic => log_spiral_arc_length(theta_start, theta_end, a, b), } } /// Returns a point on a logarithmic spiral at angle `theta`. pub fn log_spiral_point(theta: f64, a: f64, b: f64) -> DVec2 { let r = a * (b * theta).exp(); // a * e^(bθ) DVec2::new(r * theta.cos(), -r * theta.sin()) } /// Computes arc length along a logarithmic spiral between two angles. pub fn log_spiral_arc_length(theta_start: f64, theta_end: f64, a: f64, b: f64) -> f64 { let factor = (1. + b * b).sqrt(); (a / b) * factor * ((b * theta_end).exp() - (b * theta_start).exp()) } /// Returns the tangent direction of a logarithmic spiral at angle `theta`. pub fn log_spiral_tangent(theta: f64, a: f64, b: f64) -> DVec2 { let r = a * (b * theta).exp(); let dx = r * (b * theta.cos() - theta.sin()); let dy = r * (b * theta.sin() + theta.cos()); DVec2::new(dx, -dy).normalize_or(DVec2::X) } /// Returns a point on an Archimedean spiral at angle `theta`. pub fn archimedean_spiral_point(theta: f64, a: f64, b: f64) -> DVec2 { let r = a + b * theta; DVec2::new(r * theta.cos(), -r * theta.sin()) } /// Returns the tangent direction of an Archimedean spiral at angle `theta`. pub fn archimedean_spiral_tangent(theta: f64, a: f64, b: f64) -> DVec2 { let r = a + b * theta; let dx = b * theta.cos() - r * theta.sin(); let dy = b * theta.sin() + r * theta.cos(); DVec2::new(dx, -dy).normalize_or(DVec2::X) } /// Computes arc length along an Archimedean spiral between two angles. pub fn archimedean_spiral_arc_length(theta_start: f64, theta_end: f64, a: f64, b: f64) -> f64 { archimedean_spiral_arc_length_origin(theta_end, a, b) - archimedean_spiral_arc_length_origin(theta_start, a, b) } /// Computes arc length from origin to a point on Archimedean spiral at angle `theta`. pub fn archimedean_spiral_arc_length_origin(theta: f64, a: f64, b: f64) -> f64 { let r = a + b * theta; let sqrt_term = (r * r + b * b).sqrt(); (r * sqrt_term + b * b * ((r + sqrt_term).ln())) / (2. * b) }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/subpath/solvers.rs
node-graph/libraries/vector-types/src/subpath/solvers.rs
use crate::subpath::{Identifier, Subpath}; use crate::vector::algorithms::bezpath_algorithms::bezpath_is_inside_bezpath; use crate::vector::misc::dvec2_to_point; use glam::DVec2; use kurbo::{Affine, BezPath, Shape}; impl<PointId: Identifier> Subpath<PointId> { pub fn contains_point(&self, point: DVec2) -> bool { self.to_bezpath().contains(dvec2_to_point(point)) } pub fn to_bezpath(&self) -> BezPath { let mut bezpath = kurbo::BezPath::new(); let mut out_handle; let Some(first) = self.manipulator_groups.first() else { return bezpath }; bezpath.move_to(dvec2_to_point(first.anchor)); out_handle = first.out_handle; for manipulator in self.manipulator_groups.iter().skip(1) { match (out_handle, manipulator.in_handle) { (Some(handle_start), Some(handle_end)) => bezpath.curve_to(dvec2_to_point(handle_start), dvec2_to_point(handle_end), dvec2_to_point(manipulator.anchor)), (None, None) => bezpath.line_to(dvec2_to_point(manipulator.anchor)), (None, Some(handle)) => bezpath.quad_to(dvec2_to_point(handle), dvec2_to_point(manipulator.anchor)), (Some(handle), None) => bezpath.quad_to(dvec2_to_point(handle), dvec2_to_point(manipulator.anchor)), } out_handle = manipulator.out_handle; } if self.closed { match (out_handle, first.in_handle) { (Some(handle_start), Some(handle_end)) => bezpath.curve_to(dvec2_to_point(handle_start), dvec2_to_point(handle_end), dvec2_to_point(first.anchor)), (None, None) => bezpath.line_to(dvec2_to_point(first.anchor)), (None, Some(handle)) => bezpath.quad_to(dvec2_to_point(handle), dvec2_to_point(first.anchor)), (Some(handle), None) => bezpath.quad_to(dvec2_to_point(handle), dvec2_to_point(first.anchor)), } bezpath.close_path(); } bezpath } /// Returns `true` if this subpath is completely inside the `other` subpath. pub fn is_inside_subpath(&self, other: &Subpath<PointId>, accuracy: Option<f64>, minimum_separation: Option<f64>) -> bool { bezpath_is_inside_bezpath(&self.to_bezpath(), &other.to_bezpath(), accuracy, minimum_separation) } /// Return the min and max corners that represent the bounding box of the subpath. Return `None` if the subpath is empty. pub fn bounding_box(&self) -> Option<[DVec2; 2]> { self.iter() .map(|bezier| bezier.bounding_box()) .map(|bbox| [DVec2::new(bbox.min_x(), bbox.min_y()), DVec2::new(bbox.max_x(), bbox.max_y())]) .reduce(|bbox1, bbox2| [bbox1[0].min(bbox2[0]), bbox1[1].max(bbox2[1])]) } /// Return the min and max corners that represent the bounding box of the subpath, after a given affine transform. pub fn bounding_box_with_transform(&self, transform: glam::DAffine2) -> Option<[DVec2; 2]> { self.iter() .map(|bezier| (Affine::new(transform.to_cols_array()) * bezier).bounding_box()) .map(|bbox| [DVec2::new(bbox.min_x(), bbox.min_y()), DVec2::new(bbox.max_x(), bbox.max_y())]) .reduce(|bbox1, bbox2| [bbox1[0].min(bbox2[0]), bbox1[1].max(bbox2[1])]) } /// Return the min and max corners that represent the loose bounding box of the subpath (bounding box of all handles and anchors). pub fn loose_bounding_box(&self) -> Option<[DVec2; 2]> { self.manipulator_groups .iter() .flat_map(|group| [group.in_handle, group.out_handle, Some(group.anchor)]) .flatten() .map(|pos| [pos, pos]) .reduce(|bbox1, bbox2| [bbox1[0].min(bbox2[0]), bbox1[1].max(bbox2[1])]) } /// Return the min and max corners that represent the loose bounding box of the subpath, after a given affine transform. pub fn loose_bounding_box_with_transform(&self, transform: glam::DAffine2) -> Option<[DVec2; 2]> { self.manipulator_groups .iter() .flat_map(|group| [group.in_handle, group.out_handle, Some(group.anchor)]) .flatten() .map(|pos| transform.transform_point2(pos)) .map(|pos| [pos, pos]) .reduce(|bbox1, bbox2| [bbox1[0].min(bbox2[0]), bbox1[1].max(bbox2[1])]) } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/subpath/lookup.rs
node-graph/libraries/vector-types/src/subpath/lookup.rs
use super::consts::MAX_ABSOLUTE_DIFFERENCE; use super::*; use crate::vector::algorithms::bezpath_algorithms::pathseg_length_centroid_and_length; use crate::vector::algorithms::intersection::{filtered_all_segment_intersections, pathseg_self_intersections}; use core_types::math::polynomial::pathseg_to_parametric_polynomial; use glam::DVec2; impl<PointId: Identifier> Subpath<PointId> { /// Returns a list of `t` values that correspond to all the self intersection points of the subpath always considering it as a closed subpath. The index and `t` value of both will be returned that corresponds to a point. /// The points will be sorted based on their index and `t` repsectively. /// - `error` - For intersections with non-linear beziers, `error` defines the threshold for bounding boxes to be considered an intersection point. /// - `minimum_separation`: the minimum difference two adjacent `t`-values must have when comparing adjacent `t`-values in sorted order. /// /// If the comparison condition is not satisfied, the function takes the larger `t`-value of the two /// /// **NOTE**: if an intersection were to occur within an `error` distance away from an anchor point, the algorithm will filter that intersection out. pub fn all_self_intersections(&self, accuracy: Option<f64>, minimum_separation: Option<f64>) -> Vec<(usize, f64)> { let mut intersections_vec = Vec::new(); let err = accuracy.unwrap_or(MAX_ABSOLUTE_DIFFERENCE); let num_curves = self.len(); // TODO: optimization opportunity - this for-loop currently compares all intersections with all curve-segments in the subpath collection self.iter_closed().enumerate().for_each(|(i, other)| { intersections_vec.extend(pathseg_self_intersections(other, accuracy, minimum_separation).iter().flat_map(|value| [(i, value.0), (i, value.1)])); self.iter_closed().enumerate().skip(i + 1).for_each(|(j, curve)| { intersections_vec.extend( filtered_all_segment_intersections(curve, other, accuracy, minimum_separation) .iter() .filter(|&value| (j != i + 1 || value.0 > err || (1. - value.1) > err) && (j != num_curves - 1 || i != 0 || value.1 > err || (1. - value.0) > err)) .flat_map(|value| [(j, value.0), (i, value.1)]), ); }); }); intersections_vec.sort_by(|a, b| a.partial_cmp(b).unwrap()); intersections_vec } /// Return the area centroid, together with the area, of the `Subpath` always considering it as a closed subpath. The area will always be a positive value. /// /// The area centroid is the center of mass for the area of a solid shape's interior. /// An infinitely flat material forming the subpath's closed shape would balance at this point. /// /// It will return `None` if no manipulator is present. If the area is less than `error`, it will return `Some((DVec2::NAN, 0.))`. /// /// Because the calculation of area and centroid for self-intersecting path requires finding the intersections, the following parameters are used: /// - `error` - For intersections with non-linear beziers, `error` defines the threshold for bounding boxes to be considered an intersection point. /// - `minimum_separation` - the minimum difference two adjacent `t`-values must have when comparing adjacent `t`-values in sorted order. /// /// If the comparison condition is not satisfied, the function takes the larger `t`-value of the two. /// /// **NOTE**: if an intersection were to occur within an `error` distance away from an anchor point, the algorithm will filter that intersection out. pub fn area_centroid_and_area(&self, error: Option<f64>, minimum_separation: Option<f64>) -> Option<(DVec2, f64)> { let all_intersections = self.all_self_intersections(error, minimum_separation); let mut current_sign: f64 = 1.; let (x_sum, y_sum, area) = self .iter_closed() .enumerate() .map(|(index, bezier)| { let (f_x, f_y) = pathseg_to_parametric_polynomial(bezier); let (f_x, f_y) = (f_x.as_size::<10>().unwrap(), f_y.as_size::<10>().unwrap()); let f_y_prime = f_y.derivative(); let f_x_prime = f_x.derivative(); let f_xy = &f_x * &f_y; let mut x_part = &f_xy * &f_x_prime; let mut y_part = &f_xy * &f_y_prime; let mut area_part = &f_x * &f_y_prime; x_part.antiderivative_mut(); y_part.antiderivative_mut(); area_part.antiderivative_mut(); let mut curve_sum_x = -current_sign * x_part.eval(0.); let mut curve_sum_y = -current_sign * y_part.eval(0.); let mut curve_sum_area = -current_sign * area_part.eval(0.); for (_, t) in all_intersections.iter().filter(|(i, _)| *i == index) { curve_sum_x += 2. * current_sign * x_part.eval(*t); curve_sum_y += 2. * current_sign * y_part.eval(*t); curve_sum_area += 2. * current_sign * area_part.eval(*t); current_sign *= -1.; } curve_sum_x += current_sign * x_part.eval(1.); curve_sum_y += current_sign * y_part.eval(1.); curve_sum_area += current_sign * area_part.eval(1.); (-curve_sum_x, curve_sum_y, curve_sum_area) }) .reduce(|(x1, y1, area1), (x2, y2, area2)| (x1 + x2, y1 + y2, area1 + area2))?; if area.abs() < error.unwrap_or(MAX_ABSOLUTE_DIFFERENCE) { return Some((DVec2::NAN, 0.)); } Some((DVec2::new(x_sum / area, y_sum / area), area.abs())) } /// Return the approximation of the length centroid, together with the length, of the `Subpath`. /// /// The length centroid is the center of mass for the arc length of the solid shape's perimeter. /// An infinitely thin wire forming the subpath's closed shape would balance at this point. /// /// It will return `None` if no manipulator is present. /// - `accuracy` is used to approximate the curve. /// - `always_closed` is to consider the subpath as closed always. pub fn length_centroid_and_length(&self, accuracy: Option<f64>, always_closed: bool) -> Option<(DVec2, f64)> { if always_closed { self.iter_closed() } else { self.iter() } .map(|bezier| pathseg_length_centroid_and_length(bezier, accuracy)) .map(|(centroid, length)| (centroid * length, length)) .reduce(|(centroid_part1, length1), (centroid_part2, length2)| (centroid_part1 + centroid_part2, length1 + length2)) .map(|(centroid_part, length)| (centroid_part / length, length)) .map(|(centroid_part, length)| (DVec2::new(centroid_part.x, centroid_part.y), length)) } } #[cfg(test)] mod test_centroid { use crate::vector::PointId; use super::*; #[test] fn centroid_rect() { let rect = Subpath::<PointId>::new_rect(DVec2::new(100., 100.), DVec2::new(300., 200.)); let (centre, area) = rect.area_centroid_and_area(Some(1e-3), Some(1e-3)).unwrap(); assert_eq!(area, 200. * 100.); assert_eq!(centre, DVec2::new(200., 150.)) } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/subpath/mod.rs
node-graph/libraries/vector-types/src/subpath/mod.rs
mod consts; mod core; mod lookup; mod manipulators; mod solvers; mod structs; mod transform; pub use core::*; use kurbo::PathSeg; use std::fmt::{Debug, Formatter, Result}; use std::ops::{Index, IndexMut}; pub use structs::*; /// Structure used to represent a path composed of [Bezier] curves. #[derive(Clone, PartialEq, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Subpath<PointId: Identifier> { manipulator_groups: Vec<ManipulatorGroup<PointId>>, pub closed: bool, } /// Iteration structure for iterating across each curve of a `Subpath`, using an intermediate `Bezier` representation. pub struct SubpathIter<'a, PointId: Identifier> { index: usize, subpath: &'a Subpath<PointId>, is_always_closed: bool, } impl<PointId: Identifier> Index<usize> for Subpath<PointId> { type Output = ManipulatorGroup<PointId>; fn index(&self, index: usize) -> &Self::Output { assert!(index < self.len(), "Index out of bounds in trait Index of SubPath."); &self.manipulator_groups[index] } } impl<PointId: Identifier> IndexMut<usize> for Subpath<PointId> { fn index_mut(&mut self, index: usize) -> &mut Self::Output { assert!(index < self.len(), "Index out of bounds in trait IndexMut of SubPath."); &mut self.manipulator_groups[index] } } impl<PointId: Identifier> Iterator for SubpathIter<'_, PointId> { type Item = PathSeg; // Returns the Bezier representation of each `Subpath` segment, defined between a pair of adjacent manipulator points. fn next(&mut self) -> Option<Self::Item> { if self.subpath.is_empty() { return None; } let closed = if self.is_always_closed { true } else { self.subpath.closed }; let len = self.subpath.len() - 1 + if closed { 1 } else { 0 }; if self.index >= len { return None; } let start_index = self.index; let end_index = (self.index + 1) % self.subpath.len(); self.index += 1; Some(self.subpath[start_index].to_bezier(&self.subpath[end_index])) } } impl<PointId: Identifier> Debug for Subpath<PointId> { fn fmt(&self, f: &mut Formatter<'_>) -> Result { f.debug_struct("Subpath").field("closed", &self.closed).field("manipulator_groups", &self.manipulator_groups).finish() } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/math/mod.rs
node-graph/libraries/vector-types/src/math/mod.rs
use crate::subpath::Bezier; use crate::vector::misc::dvec2_to_point; use core_types::math::quad::Quad; use core_types::math::rect::Rect; use kurbo::{Line, PathSeg}; pub trait QuadExt { /// Get all the edges in the rect as linear bezier curves fn bezier_lines(&self) -> impl Iterator<Item = Bezier> + '_; fn to_lines(&self) -> impl Iterator<Item = PathSeg>; } impl QuadExt for Quad { fn bezier_lines(&self) -> impl Iterator<Item = Bezier> + '_ { self.all_edges().into_iter().map(|[start, end]| Bezier::from_linear_dvec2(start, end)) } fn to_lines(&self) -> impl Iterator<Item = PathSeg> { self.all_edges().into_iter().map(|[start, end]| PathSeg::Line(Line::new(dvec2_to_point(start), dvec2_to_point(end)))) } } pub trait RectExt { /// Get all the edges in the quad as linear bezier curves fn bezier_lines(&self) -> impl Iterator<Item = Bezier> + '_; } impl RectExt for Rect { fn bezier_lines(&self) -> impl Iterator<Item = Bezier> + '_ { self.edges().into_iter().map(|[start, end]| Bezier::from_linear_dvec2(start, end)) } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/vector/vector_types.rs
node-graph/libraries/vector-types/src/vector/vector_types.rs
use super::misc::dvec2_to_point; use super::style::{PathStyle, Stroke}; pub use super::vector_attributes::*; use crate::subpath::{BezierHandles, ManipulatorGroup, Subpath}; use crate::vector::click_target::{ClickTargetType, FreePoint}; use crate::vector::misc::{HandleId, ManipulatorPointId}; use crate::vector::vector_modification::VectorExt; use core::borrow::Borrow; use core_types::Color; use core_types::bounds::{BoundingBox, RenderBoundingBox}; use core_types::render_complexity::RenderComplexity; use core_types::transform::Transform; use dyn_any::StaticType; use glam::{DAffine2, DVec2}; use kurbo::{Affine, BezPath, Rect, Shape}; use std::collections::HashMap; /// Represents vector graphics data, composed of Bézier curves in a path or mesh arrangement. /// /// Generic over `Upstream` to avoid circular dependency with the Graphic type. /// - Use `Vector<()>` for basic vectors without upstream tracking /// - Use `Vector<Option<Table<Graphic>>>` in the graphic crate for vectors with upstream layers #[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] pub struct Vector<Upstream> { pub style: PathStyle, /// A list of all manipulator groups (referenced in `subpaths`) that have colinear handles (where they're locked at 180° angles from one another). /// This gets read in `graph_operation_message_handler.rs` by calling `inputs.as_mut_slice()` (search for the string `"Shape does not have both `subpath` and `colinear_manipulators` inputs"` to find it). pub colinear_manipulators: Vec<[HandleId; 2]>, pub point_domain: PointDomain, pub segment_domain: SegmentDomain, pub region_domain: RegionDomain, /// Used to store the upstream group/folder of nested layers during destructive Boolean Operations (and other nodes with a similar effect) so that click targets can be preserved for the child layers. /// Without this, the tools would be working with a collapsed version of the data which has no reference to the original child layers that were booleaned together, resulting in the inner layers not being editable. #[serde(alias = "upstream_group")] pub upstream_data: Upstream, } unsafe impl<Upstream: 'static> StaticType for Vector<Upstream> { type Static = Self; } impl<Upstream: Default + 'static> Default for Vector<Upstream> { fn default() -> Self { Self { style: PathStyle::new(Some(Stroke::new(Some(Color::BLACK), 0.)), super::style::Fill::None), colinear_manipulators: Vec::new(), point_domain: PointDomain::new(), segment_domain: SegmentDomain::new(), region_domain: RegionDomain::new(), upstream_data: Upstream::default(), } } } impl<Upstream> std::hash::Hash for Vector<Upstream> { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.point_domain.hash(state); self.segment_domain.hash(state); self.region_domain.hash(state); self.style.hash(state); self.colinear_manipulators.hash(state); // We don't hash the upstream_data intentionally } } impl<Upstream> Vector<Upstream> { /// Add a subpath to this vector path. pub fn append_subpath(&mut self, subpath: impl Borrow<Subpath<PointId>>, preserve_id: bool) { let subpath: &Subpath<PointId> = subpath.borrow(); let stroke_id = StrokeId::ZERO; let mut point_id = self.point_domain.next_id(); let handles = |a: &ManipulatorGroup<_>, b: &ManipulatorGroup<_>| match (a.out_handle, b.in_handle) { (None, None) => BezierHandles::Linear, (Some(handle), None) | (None, Some(handle)) => BezierHandles::Quadratic { handle }, (Some(handle_start), Some(handle_end)) => BezierHandles::Cubic { handle_start, handle_end }, }; let [mut first_seg, mut last_seg] = [None, None]; let mut segment_id = self.segment_domain.next_id(); let mut last_point = None; let mut first_point = None; // Construct a bezier segment from the two manipulators on the subpath. for pair in subpath.manipulator_groups().windows(2) { let start = last_point.unwrap_or_else(|| { let id = if preserve_id && !self.point_domain.ids().contains(&pair[0].id) { pair[0].id } else { point_id.next_id() }; self.point_domain.push(id, pair[0].anchor); self.point_domain.ids().len() - 1 }); first_point = Some(first_point.unwrap_or(start)); let end = if preserve_id && !self.point_domain.ids().contains(&pair[1].id) { pair[1].id } else { point_id.next_id() }; let end_index = self.point_domain.ids().len(); self.point_domain.push(end, pair[1].anchor); let id = segment_id.next_id(); first_seg = Some(first_seg.unwrap_or(id)); last_seg = Some(id); self.segment_domain.push(id, start, end_index, handles(&pair[0], &pair[1]), stroke_id); last_point = Some(end_index); } let fill_id = FillId::ZERO; if subpath.closed() { if let (Some(last), Some(first), Some(first_id), Some(last_id)) = (subpath.manipulator_groups().last(), subpath.manipulator_groups().first(), first_point, last_point) { let id = segment_id.next_id(); first_seg = Some(first_seg.unwrap_or(id)); last_seg = Some(id); self.segment_domain.push(id, last_id, first_id, handles(last, first), stroke_id); } if let [Some(first_seg), Some(last_seg)] = [first_seg, last_seg] { self.region_domain.push(self.region_domain.next_id(), first_seg..=last_seg, fill_id); } } } pub fn append_free_point(&mut self, point: &FreePoint, preserve_id: bool) { let mut point_id = self.point_domain.next_id(); // Use the current point ID if it's not already in the domain, otherwise generate a new one let id = if preserve_id && !self.point_domain.ids().contains(&point.id) { point.id } else { point_id.next_id() }; self.point_domain.push(id, point.position); } /// Construct some new vector path from a single subpath with an identity transform and black fill. pub fn from_subpath(subpath: impl Borrow<Subpath<PointId>>) -> Self where Upstream: Default + 'static, { Self::from_subpaths([subpath], false) } /// Construct some new vector path from a single [`BezPath`] with an identity transform and black fill. pub fn from_bezpath(bezpath: BezPath) -> Self where Upstream: Default + 'static, { let mut vector = Self::default(); vector.append_bezpath(bezpath); vector } /// Construct some new vector path from subpaths with an identity transform and black fill. pub fn from_subpaths(subpaths: impl IntoIterator<Item = impl Borrow<Subpath<PointId>>>, preserve_id: bool) -> Self where Upstream: Default + 'static, { let mut vector = Self::default(); for subpath in subpaths.into_iter() { vector.append_subpath(subpath, preserve_id); } vector } pub fn from_target_types(target_types: impl IntoIterator<Item = impl Borrow<ClickTargetType>>, preserve_id: bool) -> Self where Upstream: Default + 'static, { let mut vector = Self::default(); for target_type in target_types.into_iter() { match target_type.borrow() { ClickTargetType::Subpath(subpath) => vector.append_subpath(subpath, preserve_id), ClickTargetType::FreePoint(point) => vector.append_free_point(point, preserve_id), } } vector } /// Compute the bounding boxes of the bezpaths without any transform pub fn bounding_box_rect(&self) -> Option<Rect> { self.bounding_box_with_transform_rect(DAffine2::IDENTITY) } pub fn close_subpaths(&mut self) { let segments_to_add: Vec<_> = self .build_stroke_path_iter() .filter(|(_, closed)| !closed) .filter_map(|(manipulator_groups, _)| { let (first, last) = manipulator_groups.first().zip(manipulator_groups.last())?; let (start, end) = self.point_domain.resolve_id(first.id).zip(self.point_domain.resolve_id(last.id))?; Some((start, end)) }) .collect(); for (start, end) in segments_to_add { let segment_id = self.segment_domain.next_id().next_id(); self.segment_domain.push(segment_id, start, end, BezierHandles::Linear, StrokeId::ZERO); } } /// Compute the bounding boxes of the subpaths without any transform pub fn bounding_box(&self) -> Option<[DVec2; 2]> { self.bounding_box_with_transform_rect(DAffine2::IDENTITY) .map(|rect| [DVec2::new(rect.x0, rect.y0), DVec2::new(rect.x1, rect.y1)]) } /// Compute the bounding boxes of the subpaths with the specified transform pub fn bounding_box_with_transform(&self, transform: DAffine2) -> Option<[DVec2; 2]> { self.bounding_box_with_transform_rect(transform) .map(|rect| [DVec2::new(rect.x0, rect.y0), DVec2::new(rect.x1, rect.y1)]) } /// Compute the bounding boxes of the bezpaths with the specified transform pub fn bounding_box_with_transform_rect(&self, transform: DAffine2) -> Option<Rect> { let combine = |r1: Rect, r2: Rect| r1.union(r2); self.stroke_bezpath_iter() .map(|mut bezpath| { bezpath.apply_affine(Affine::new(transform.to_cols_array())); bezpath.bounding_box() }) .reduce(combine) } /// Calculate the corners of the bounding box but with a nonzero size. /// /// If the layer bounds are `0` in either axis then they are changed to be `1`. pub fn nonzero_bounding_box(&self) -> [DVec2; 2] { let [bounds_min, mut bounds_max] = self.bounding_box().unwrap_or_default(); let bounds_size = bounds_max - bounds_min; if bounds_size.x < 1e-10 { bounds_max.x = bounds_min.x + 1.; } if bounds_size.y < 1e-10 { bounds_max.y = bounds_min.y + 1.; } [bounds_min, bounds_max] } /// Compute the pivot of the layer in layerspace (the coordinates of the subpaths) pub fn layerspace_pivot(&self, normalized_pivot: DVec2) -> DVec2 { let [bounds_min, bounds_max] = self.nonzero_bounding_box(); let bounds_size = bounds_max - bounds_min; bounds_min + bounds_size * normalized_pivot } pub fn start_point(&self) -> impl Iterator<Item = PointId> + '_ { self.segment_domain.start_point().iter().map(|&index| self.point_domain.ids()[index]) } pub fn end_point(&self) -> impl Iterator<Item = PointId> + '_ { self.segment_domain.end_point().iter().map(|&index| self.point_domain.ids()[index]) } pub fn push(&mut self, id: SegmentId, start: PointId, end: PointId, handles: (Option<DVec2>, Option<DVec2>), stroke: StrokeId) { let [Some(start), Some(end)] = [start, end].map(|id| self.point_domain.resolve_id(id)) else { return; }; let handles = match handles { (None, None) => BezierHandles::Linear, (None, Some(handle)) | (Some(handle), None) => BezierHandles::Quadratic { handle }, (Some(handle_start), Some(handle_end)) => BezierHandles::Cubic { handle_start, handle_end }, }; self.segment_domain.push(id, start, end, handles, stroke) } pub fn handles_mut(&mut self) -> impl Iterator<Item = (SegmentId, &mut BezierHandles, PointId, PointId)> { self.segment_domain .handles_mut() .map(|(id, handles, start, end)| (id, handles, self.point_domain.ids()[start], self.point_domain.ids()[end])) } pub fn segment_start_from_id(&self, segment: SegmentId) -> Option<PointId> { self.segment_domain.segment_start_from_id(segment).map(|index| self.point_domain.ids()[index]) } pub fn segment_end_from_id(&self, segment: SegmentId) -> Option<PointId> { self.segment_domain.segment_end_from_id(segment).map(|index| self.point_domain.ids()[index]) } /// Returns an array for the start and end points of a segment. pub fn points_from_id(&self, segment: SegmentId) -> Option<[PointId; 2]> { self.segment_domain.points_from_id(segment).map(|val| val.map(|index| self.point_domain.ids()[index])) } /// Attempts to find another point in the segment that is not the one passed in. pub fn other_point(&self, segment: SegmentId, current: PointId) -> Option<PointId> { let index = self.point_domain.resolve_id(current); index.and_then(|index| self.segment_domain.other_point(segment, index)).map(|index| self.point_domain.ids()[index]) } /// Gets all points connected to the current one but not including the current one. pub fn connected_points(&self, current: PointId) -> impl Iterator<Item = PointId> + '_ { let index = [self.point_domain.resolve_id(current)].into_iter().flatten(); index.flat_map(|index| self.segment_domain.connected_points(index).map(|index| self.point_domain.ids()[index])) } /// Returns the number of linear segments connected to the given point. pub fn connected_linear_segments(&self, point_id: PointId) -> usize { self.segment_bezier_iter() .filter(|(_, bez, start, end)| (*start == point_id || *end == point_id) && matches!(bez.handles, BezierHandles::Linear)) .count() } /// Get an array slice of all segment IDs. pub fn segment_ids(&self) -> &[SegmentId] { self.segment_domain.ids() } /// Enumerate all segments that start at the point. pub fn start_connected(&self, point: PointId) -> impl Iterator<Item = SegmentId> + '_ { let index = [self.point_domain.resolve_id(point)].into_iter().flatten(); index.flat_map(|index| self.segment_domain.start_connected(index)) } /// Enumerate all segments that end at the point. pub fn end_connected(&self, point: PointId) -> impl Iterator<Item = SegmentId> + '_ { let index = [self.point_domain.resolve_id(point)].into_iter().flatten(); index.flat_map(|index| self.segment_domain.end_connected(index)) } /// Enumerate all segments that start or end at a point, converting them to [`HandleId`s]. Note that the handles may not exist e.g. for a linear segment. pub fn all_connected(&self, point: PointId) -> impl Iterator<Item = HandleId> + '_ { let index = [self.point_domain.resolve_id(point)].into_iter().flatten(); index.flat_map(|index| self.segment_domain.all_connected(index)) } /// Enumerate the number of segments connected to a point. If a segment starts and ends at a point then it is counted twice. pub fn connected_count(&self, point: PointId) -> usize { self.point_domain.resolve_id(point).map_or(0, |point| self.segment_domain.connected_count(point)) } /// Enumerate the number of segments connected to a point. If a segment starts and ends at a point then it is counted twice. pub fn any_connected(&self, point: PointId) -> bool { self.point_domain.resolve_id(point).is_some_and(|point| self.segment_domain.any_connected(point)) } pub fn check_point_inside_shape(&self, transform: DAffine2, point: DVec2) -> bool { let number = self .stroke_bezpath_iter() .map(|mut bezpath| { // TODO: apply transform to points instead of modifying the paths bezpath.apply_affine(Affine::new(transform.to_cols_array())); bezpath.close_path(); let bbox = bezpath.bounding_box(); (bezpath, bbox) }) .filter(|(_, bbox)| bbox.contains(dvec2_to_point(point))) .map(|(bezpath, _)| bezpath.winding(dvec2_to_point(point))) .sum::<i32>(); // Non-zero fill rule number != 0 } /// Iterator over all anchor points. pub fn anchor_points(&self) -> impl Iterator<Item = PointId> + '_ { self.point_domain.ids().iter().copied() } /// Anchor points at the ends of open subpaths. These are points with exactly one connection by a segment to another anchor. pub fn anchor_endpoints(&self) -> impl Iterator<Item = PointId> + '_ { self.anchor_points().enumerate().filter(|&(index, _)| self.segment_domain.connected_count(index) == 1).map(|(_, id)| id) } /// Computes if all the connected handles are colinear for an anchor, or if that handle is colinear for a handle. pub fn colinear(&self, point: ManipulatorPointId) -> bool { let has_handle = |target| self.colinear_manipulators.iter().flatten().any(|&handle| handle == target); match point { ManipulatorPointId::Anchor(id) => { self.start_connected(id).all(|segment| has_handle(HandleId::primary(segment))) && self.end_connected(id).all(|segment| has_handle(HandleId::end(segment))) } ManipulatorPointId::PrimaryHandle(segment) => has_handle(HandleId::primary(segment)), ManipulatorPointId::EndHandle(segment) => has_handle(HandleId::end(segment)), } } pub fn other_colinear_handle(&self, handle: HandleId) -> Option<HandleId> where Upstream: 'static, { let pair = self.colinear_manipulators.iter().find(|pair| pair.contains(&handle))?; let other = pair.iter().copied().find(|&val| val != handle)?; if handle.to_manipulator_point().get_anchor(self) == other.to_manipulator_point().get_anchor(self) { Some(other) } else { None } } pub fn adjacent_segment(&self, manipulator_id: &ManipulatorPointId) -> Option<(PointId, SegmentId)> { match manipulator_id { ManipulatorPointId::PrimaryHandle(segment_id) => { // For start handle, find segments ending at our start point let (start_point_id, _, _) = self.segment_points_from_id(*segment_id)?; let start_index = self.point_domain.resolve_id(start_point_id)?; self.segment_domain.end_connected(start_index).find(|&id| id != *segment_id).map(|id| (start_point_id, id)).or(self .segment_domain .start_connected(start_index) .find(|&id| id != *segment_id) .map(|id| (start_point_id, id))) } ManipulatorPointId::EndHandle(segment_id) => { // For end handle, find segments starting at our end point let (_, end_point_id, _) = self.segment_points_from_id(*segment_id)?; let end_index = self.point_domain.resolve_id(end_point_id)?; self.segment_domain.start_connected(end_index).find(|&id| id != *segment_id).map(|id| (end_point_id, id)).or(self .segment_domain .end_connected(end_index) .find(|&id| id != *segment_id) .map(|id| (end_point_id, id))) } ManipulatorPointId::Anchor(_) => None, } } pub fn concat(&mut self, additional: &Self, transform_of_additional: DAffine2, collision_hash_seed: u64) { let point_map = additional .point_domain .ids() .iter() .filter(|id| self.point_domain.ids().contains(id)) .map(|&old| (old, old.generate_from_hash(collision_hash_seed))) .collect::<HashMap<_, _>>(); let segment_map = additional .segment_domain .ids() .iter() .filter(|id| self.segment_domain.ids().contains(id)) .map(|&old| (old, old.generate_from_hash(collision_hash_seed))) .collect::<HashMap<_, _>>(); let region_map = additional .region_domain .ids() .iter() .filter(|id| self.region_domain.ids().contains(id)) .map(|&old| (old, old.generate_from_hash(collision_hash_seed))) .collect::<HashMap<_, _>>(); let id_map = IdMap { point_offset: self.point_domain.ids().len(), point_map, segment_map, region_map, }; self.point_domain.concat(&additional.point_domain, transform_of_additional, &id_map); self.segment_domain.concat(&additional.segment_domain, transform_of_additional, &id_map); self.region_domain.concat(&additional.region_domain, transform_of_additional, &id_map); // TODO: properly deal with fills such as gradients self.style = additional.style.clone(); self.colinear_manipulators.extend(additional.colinear_manipulators.iter().copied()); } } impl<Upstream> BoundingBox for Vector<Upstream> { fn bounding_box(&self, transform: DAffine2, include_stroke: bool) -> RenderBoundingBox { if !include_stroke { // Just use the path bounds without stroke return match self.bounding_box_with_transform(transform) { Some(bounds) => RenderBoundingBox::Rectangle(bounds), None => RenderBoundingBox::None, }; } // Include stroke by adding offset based on stroke width let stroke_width = self.style.stroke().map(|s| s.weight()).unwrap_or_default(); let miter_limit = self.style.stroke().map(|s| s.join_miter_limit).unwrap_or(1.); let scale = transform.decompose_scale(); // Use the full line width to account for different styles of stroke caps let offset = DVec2::splat(stroke_width * scale.x.max(scale.y) * miter_limit); match self.bounding_box_with_transform(transform) { Some([a, b]) => RenderBoundingBox::Rectangle([a - offset, b + offset]), None => RenderBoundingBox::None, } } } impl<Upstream> RenderComplexity for Vector<Upstream> { fn render_complexity(&self) -> usize { self.segment_domain.ids().len() } } // Note: BoundingBox for Table<Vector> is handled by blanket impl in gcore #[cfg(test)] mod tests { use kurbo::{CubicBez, PathSeg, Point}; use super::*; fn assert_subpath_eq(generated: &[Subpath<PointId>], expected: &[Subpath<PointId>]) { assert_eq!(generated.len(), expected.len()); for (generated, expected) in generated.iter().zip(expected) { assert_eq!(generated.manipulator_groups().len(), expected.manipulator_groups().len()); assert_eq!(generated.closed(), expected.closed()); for (generated, expected) in generated.manipulator_groups().iter().zip(expected.manipulator_groups()) { assert_eq!(generated.in_handle, expected.in_handle); assert_eq!(generated.out_handle, expected.out_handle); assert_eq!(generated.anchor, expected.anchor); } } } #[test] fn construct_closed_subpath() { let circle = Subpath::new_ellipse(DVec2::NEG_ONE, DVec2::ONE); let vector: Vector<()> = Vector::from_subpath(&circle); assert_eq!(vector.point_domain.ids().len(), 4); let bezier_paths = vector.segment_iter().map(|(_, bezier, _, _)| bezier).collect::<Vec<_>>(); assert_eq!(bezier_paths.len(), 4); assert!(bezier_paths.iter().all(|&bezier| circle.iter().any(|original_bezier| original_bezier == bezier))); let generated = vector.stroke_bezier_paths().collect::<Vec<_>>(); assert_subpath_eq(&generated, &[circle]); } #[test] fn construct_open_subpath() { let bezier = PathSeg::Cubic(CubicBez::new(Point::ZERO, Point::new(-1., -1.), Point::new(1., 1.), Point::new(1., 0.))); let subpath = Subpath::from_bezier(bezier); let vector: Vector<()> = Vector::from_subpath(&subpath); assert_eq!(vector.point_domain.ids().len(), 2); let bezier_paths = vector.segment_iter().map(|(_, bezier, _, _)| bezier).collect::<Vec<_>>(); assert_eq!(bezier_paths, vec![bezier]); let generated = vector.stroke_bezier_paths().collect::<Vec<_>>(); assert_subpath_eq(&generated, &[subpath]); } #[test] fn construct_many_subpath() { let curve = PathSeg::Cubic(CubicBez::new(Point::ZERO, Point::new(-1., -1.), Point::new(1., 1.), Point::new(1., 0.))); let curve = Subpath::from_bezier(curve); let circle = Subpath::new_ellipse(DVec2::NEG_ONE, DVec2::ONE); let vector: Vector<()> = Vector::from_subpaths([&curve, &circle], false); assert_eq!(vector.point_domain.ids().len(), 6); let bezier_paths = vector.segment_iter().map(|(_, bezier, _, _)| bezier).collect::<Vec<_>>(); assert_eq!(bezier_paths.len(), 5); assert!(bezier_paths.iter().all(|&bezier| circle.iter().chain(curve.iter()).any(|original_bezier| original_bezier == bezier))); let generated = vector.stroke_bezier_paths().collect::<Vec<_>>(); assert_subpath_eq(&generated, &[curve, circle]); } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/vector/misc.rs
node-graph/libraries/vector-types/src/vector/misc.rs
use super::PointId; use super::algorithms::offset_subpath::MAX_ABSOLUTE_DIFFERENCE; use crate::subpath::{BezierHandles, ManipulatorGroup}; use crate::vector::{SegmentId, Vector}; use dyn_any::DynAny; use glam::DVec2; use kurbo::{BezPath, CubicBez, Line, ParamCurve, ParamCurveDeriv, PathSeg, Point, QuadBez}; use std::ops::Sub; /// Represents different geometric interpretations of calculating the centroid (center of mass). #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize, Hash, DynAny, specta::Type, node_macro::ChoiceType)] #[widget(Radio)] pub enum CentroidType { /// The center of mass for the area of a solid shape's interior, as if made out of an infinitely flat material. #[default] Area, /// The center of mass for the arc length of a curved shape's perimeter, as if made out of an infinitely thin wire. Length, } pub trait AsU64 { fn as_u64(&self) -> u64; } impl AsU64 for u32 { fn as_u64(&self) -> u64 { *self as u64 } } impl AsU64 for u64 { fn as_u64(&self) -> u64 { *self } } impl AsU64 for f64 { fn as_u64(&self) -> u64 { *self as u64 } } pub trait AsI64 { fn as_i64(&self) -> i64; } impl AsI64 for u32 { fn as_i64(&self) -> i64 { *self as i64 } } impl AsI64 for u64 { fn as_i64(&self) -> i64 { *self as i64 } } impl AsI64 for f64 { fn as_i64(&self) -> i64 { *self as i64 } } #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize, Hash, DynAny, specta::Type, node_macro::ChoiceType)] #[widget(Radio)] pub enum GridType { #[default] Rectangular = 0, Isometric, } #[repr(C)] #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize, Hash, DynAny, specta::Type, node_macro::ChoiceType)] #[widget(Radio)] pub enum ArcType { #[default] Open = 0, Closed, PieSlice, } #[repr(C)] #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize, Hash, DynAny, specta::Type, node_macro::ChoiceType)] #[widget(Radio)] pub enum MergeByDistanceAlgorithm { #[default] Spatial, Topological, } #[repr(C)] #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize, Hash, DynAny, specta::Type, node_macro::ChoiceType)] #[widget(Radio)] pub enum ExtrudeJoiningAlgorithm { All, #[default] Extrema, None, } #[repr(C)] #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize, Hash, DynAny, specta::Type, node_macro::ChoiceType)] #[widget(Radio)] pub enum PointSpacingType { #[default] /// The desired spacing distance between points. Separation, /// The exact number of points to span the path. Quantity, } pub fn point_to_dvec2(point: Point) -> DVec2 { DVec2 { x: point.x, y: point.y } } pub fn dvec2_to_point(value: DVec2) -> Point { Point { x: value.x, y: value.y } } pub fn get_line_endpoints(line: Line) -> (DVec2, DVec2) { (point_to_dvec2(line.p0), point_to_dvec2(line.p1)) } pub fn segment_to_handles(segment: &PathSeg) -> BezierHandles { match *segment { PathSeg::Line(_) => BezierHandles::Linear, PathSeg::Quad(QuadBez { p0: _, p1, p2: _ }) => BezierHandles::Quadratic { handle: point_to_dvec2(p1) }, PathSeg::Cubic(CubicBez { p0: _, p1, p2, p3: _ }) => BezierHandles::Cubic { handle_start: point_to_dvec2(p1), handle_end: point_to_dvec2(p2), }, } } pub fn handles_to_segment(start: DVec2, handles: BezierHandles, end: DVec2) -> PathSeg { match handles { BezierHandles::Linear => { let p0 = dvec2_to_point(start); let p1 = dvec2_to_point(end); PathSeg::Line(Line::new(p0, p1)) } BezierHandles::Quadratic { handle } => { let p0 = dvec2_to_point(start); let p1 = dvec2_to_point(handle); let p2 = dvec2_to_point(end); PathSeg::Quad(QuadBez::new(p0, p1, p2)) } BezierHandles::Cubic { handle_start, handle_end } => { let p0 = dvec2_to_point(start); let p1 = dvec2_to_point(handle_start); let p2 = dvec2_to_point(handle_end); let p3 = dvec2_to_point(end); PathSeg::Cubic(CubicBez::new(p0, p1, p2, p3)) } } } pub fn bezpath_from_manipulator_groups(manipulator_groups: &[ManipulatorGroup<PointId>], closed: bool) -> BezPath { let mut bezpath = kurbo::BezPath::new(); let mut out_handle; let Some(first) = manipulator_groups.first() else { return bezpath }; bezpath.move_to(dvec2_to_point(first.anchor)); out_handle = first.out_handle; for manipulator in manipulator_groups.iter().skip(1) { match (out_handle, manipulator.in_handle) { (Some(handle_start), Some(handle_end)) => bezpath.curve_to(dvec2_to_point(handle_start), dvec2_to_point(handle_end), dvec2_to_point(manipulator.anchor)), (None, None) => bezpath.line_to(dvec2_to_point(manipulator.anchor)), (None, Some(handle)) => bezpath.quad_to(dvec2_to_point(handle), dvec2_to_point(manipulator.anchor)), (Some(handle), None) => bezpath.quad_to(dvec2_to_point(handle), dvec2_to_point(manipulator.anchor)), } out_handle = manipulator.out_handle; } if closed { match (out_handle, first.in_handle) { (Some(handle_start), Some(handle_end)) => bezpath.curve_to(dvec2_to_point(handle_start), dvec2_to_point(handle_end), dvec2_to_point(first.anchor)), (None, None) => bezpath.line_to(dvec2_to_point(first.anchor)), (None, Some(handle)) => bezpath.quad_to(dvec2_to_point(handle), dvec2_to_point(first.anchor)), (Some(handle), None) => bezpath.quad_to(dvec2_to_point(handle), dvec2_to_point(first.anchor)), } bezpath.close_path(); } bezpath } pub fn bezpath_to_manipulator_groups(bezpath: &BezPath) -> (Vec<ManipulatorGroup<PointId>>, bool) { let mut manipulator_groups = Vec::<ManipulatorGroup<PointId>>::new(); let mut is_closed = false; for element in bezpath.elements() { let manipulator_group = match *element { kurbo::PathEl::MoveTo(point) => ManipulatorGroup::new(point_to_dvec2(point), None, None), kurbo::PathEl::LineTo(point) => ManipulatorGroup::new(point_to_dvec2(point), None, None), kurbo::PathEl::QuadTo(point, point1) => ManipulatorGroup::new(point_to_dvec2(point1), Some(point_to_dvec2(point)), None), kurbo::PathEl::CurveTo(point, point1, point2) => { if let Some(last_manipulator_group) = manipulator_groups.last_mut() { last_manipulator_group.out_handle = Some(point_to_dvec2(point)); } ManipulatorGroup::new(point_to_dvec2(point2), Some(point_to_dvec2(point1)), None) } kurbo::PathEl::ClosePath => { if let Some(last_manipulators) = manipulator_groups.pop() && let Some(first_manipulators) = manipulator_groups.first_mut() { first_manipulators.out_handle = last_manipulators.in_handle; } is_closed = true; break; } }; manipulator_groups.push(manipulator_group); } (manipulator_groups, is_closed) } /// Returns true if the [`PathSeg`] is equivalent to a line. /// /// This is different from simply checking if the segment is [`PathSeg::Line`] or [`PathSeg::Quad`] or [`PathSeg::Cubic`]. Bezier curve can also be a line if the control points are colinear to the start and end points. Therefore if the handles exceed the start and end point, it will still be considered as a line. pub fn is_linear(segment: PathSeg) -> bool { let is_colinear = |a: Point, b: Point, c: Point| -> bool { ((b.x - a.x) * (c.y - a.y) - (b.y - a.y) * (c.x - a.x)).abs() < MAX_ABSOLUTE_DIFFERENCE }; match segment { PathSeg::Line(_) => true, PathSeg::Quad(QuadBez { p0, p1, p2 }) => is_colinear(p0, p1, p2), PathSeg::Cubic(CubicBez { p0, p1, p2, p3 }) => is_colinear(p0, p1, p3) && is_colinear(p0, p2, p3), } } /// Get an vec of all the points in a path segment. pub fn pathseg_points_vec(segment: PathSeg) -> Vec<Point> { match segment { PathSeg::Line(line) => [line.p0, line.p1].to_vec(), PathSeg::Quad(quad_bez) => [quad_bez.p0, quad_bez.p1, quad_bez.p2].to_vec(), PathSeg::Cubic(cubic_bez) => [cubic_bez.p0, cubic_bez.p1, cubic_bez.p2, cubic_bez.p3].to_vec(), } } /// Returns true if the corresponding points of the two [`PathSeg`]s are within the provided absolute value difference from each other. pub fn pathseg_abs_diff_eq(seg1: PathSeg, seg2: PathSeg, max_abs_diff: f64) -> bool { let seg1 = if is_linear(seg1) { PathSeg::Line(Line::new(seg1.start(), seg1.end())) } else { seg1 }; let seg2 = if is_linear(seg2) { PathSeg::Line(Line::new(seg2.start(), seg2.end())) } else { seg2 }; let seg1_points = pathseg_points_vec(seg1); let seg2_points = pathseg_points_vec(seg2); let cmp = |a: f64, b: f64| a.sub(b).abs() < max_abs_diff; seg1_points.len() == seg2_points.len() && seg1_points.into_iter().zip(seg2_points).all(|(a, b)| cmp(a.x, b.x) && cmp(a.y, b.y)) } pub trait Tangent { fn tangent_at(&self, t: f64) -> DVec2; fn tangent_at_start(&self) -> DVec2 { self.tangent_at(0.0) } fn tangent_at_end(&self) -> DVec2 { self.tangent_at(1.0) } } trait ControlPoints { type Points: AsRef<[Point]>; fn control_points(&self) -> Self::Points; } impl ControlPoints for kurbo::Line { type Points = [Point; 2]; fn control_points(&self) -> Self::Points { [self.p0, self.p1] } } impl ControlPoints for kurbo::QuadBez { type Points = [Point; 3]; fn control_points(&self) -> Self::Points { [self.p0, self.p1, self.p2] } } impl ControlPoints for kurbo::CubicBez { type Points = [Point; 4]; fn control_points(&self) -> Self::Points { [self.p0, self.p1, self.p2, self.p3] } } impl<T: ControlPoints + ParamCurveDeriv> Tangent for T { fn tangent_at(&self, t: f64) -> DVec2 { point_to_dvec2(self.deriv().eval(t)) } fn tangent_at_start(&self) -> DVec2 { let pts = self.control_points(); let pts = pts.as_ref(); let mut iter = pts.iter(); iter.next() .and_then(|&start| iter.find(|&&p| p != start).map(|&p| DVec2 { x: p.x - start.x, y: p.y - start.y })) .unwrap_or_default() } fn tangent_at_end(&self) -> DVec2 { let pts = self.control_points(); let pts = pts.as_ref(); let mut iter = pts.iter().rev(); iter.next() .and_then(|&end| iter.find(|&&p| p != end).map(|&p| DVec2 { x: end.x - p.x, y: end.y - p.y })) .unwrap_or_default() } } impl Tangent for kurbo::PathSeg { fn tangent_at(&self, t: f64) -> DVec2 { match self { PathSeg::Line(line) => line.tangent_at(t), PathSeg::Quad(quad) => quad.tangent_at(t), PathSeg::Cubic(cubic) => cubic.tangent_at(t), } } fn tangent_at_start(&self) -> DVec2 { match self { PathSeg::Line(line) => line.tangent_at_start(), PathSeg::Quad(quad) => quad.tangent_at_start(), PathSeg::Cubic(cubic) => cubic.tangent_at_start(), } } fn tangent_at_end(&self) -> DVec2 { match self { PathSeg::Line(line) => line.tangent_at_end(), PathSeg::Quad(quad) => quad.tangent_at_end(), PathSeg::Cubic(cubic) => cubic.tangent_at_end(), } } } /// A selectable part of a curve, either an anchor (start or end of a bézier) or a handle (doesn't necessarily go through the bézier but influences curvature). #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, DynAny, serde::Serialize, serde::Deserialize)] pub enum ManipulatorPointId { /// A control anchor - the start or end point of a bézier. Anchor(PointId), /// The handle for a bézier - the first handle on a cubic and the only handle on a quadratic. PrimaryHandle(SegmentId), /// The end handle on a cubic bézier. EndHandle(SegmentId), } impl ManipulatorPointId { /// Attempt to retrieve the manipulator position in layer space (no transformation applied). #[must_use] #[track_caller] pub fn get_position<Upstream: 'static>(&self, vector: &Vector<Upstream>) -> Option<DVec2> { match self { ManipulatorPointId::Anchor(id) => vector.point_domain.position_from_id(*id), ManipulatorPointId::PrimaryHandle(id) => vector.segment_from_id(*id).and_then(|bezier| bezier.handle_start()), ManipulatorPointId::EndHandle(id) => vector.segment_from_id(*id).and_then(|bezier| bezier.handle_end()), } } pub fn get_anchor_position<Upstream: 'static>(&self, vector: &Vector<Upstream>) -> Option<DVec2> { match self { ManipulatorPointId::EndHandle(_) | ManipulatorPointId::PrimaryHandle(_) => self.get_anchor(vector).and_then(|id| vector.point_domain.position_from_id(id)), _ => self.get_position(vector), } } /// Attempt to get a pair of handles. For an anchor this is the first two handles connected. For a handle it is self and the first opposing handle. #[must_use] pub fn get_handle_pair<Upstream: 'static>(self, vector: &Vector<Upstream>) -> Option<[HandleId; 2]> { match self { ManipulatorPointId::Anchor(point) => vector.all_connected(point).take(2).collect::<Vec<_>>().try_into().ok(), ManipulatorPointId::PrimaryHandle(segment) => { let point = vector.segment_domain.segment_start_from_id(segment)?; let current = HandleId::primary(segment); let other = vector.segment_domain.all_connected(point).find(|&value| value != current); other.map(|other| [current, other]) } ManipulatorPointId::EndHandle(segment) => { let point = vector.segment_domain.segment_end_from_id(segment)?; let current = HandleId::end(segment); let other = vector.segment_domain.all_connected(point).find(|&value| value != current); other.map(|other| [current, other]) } } } /// Finds all the connected handles of a point. /// For an anchor it is all the connected handles. /// For a handle it is all the handles connected to its corresponding anchor other than the current handle. pub fn get_all_connected_handles<Upstream: 'static>(self, vector: &Vector<Upstream>) -> Option<Vec<HandleId>> { match self { ManipulatorPointId::Anchor(point) => { let connected = vector.all_connected(point).collect::<Vec<_>>(); Some(connected) } ManipulatorPointId::PrimaryHandle(segment) => { let point = vector.segment_domain.segment_start_from_id(segment)?; let current = HandleId::primary(segment); let connected = vector.segment_domain.all_connected(point).filter(|&value| value != current).collect::<Vec<_>>(); Some(connected) } ManipulatorPointId::EndHandle(segment) => { let point = vector.segment_domain.segment_end_from_id(segment)?; let current = HandleId::end(segment); let connected = vector.segment_domain.all_connected(point).filter(|&value| value != current).collect::<Vec<_>>(); Some(connected) } } } /// Attempt to find the closest anchor. If self is already an anchor then it is just self. If it is a start or end handle, then the start or end point is chosen. #[must_use] pub fn get_anchor<Upstream: 'static>(self, vector: &Vector<Upstream>) -> Option<PointId> { match self { ManipulatorPointId::Anchor(point) => Some(point), ManipulatorPointId::PrimaryHandle(segment) => vector.segment_start_from_id(segment), ManipulatorPointId::EndHandle(segment) => vector.segment_end_from_id(segment), } } /// Attempt to convert self to a [`HandleId`], returning none for an anchor. #[must_use] pub fn as_handle(self) -> Option<HandleId> { match self { ManipulatorPointId::PrimaryHandle(segment) => Some(HandleId::primary(segment)), ManipulatorPointId::EndHandle(segment) => Some(HandleId::end(segment)), ManipulatorPointId::Anchor(_) => None, } } /// Attempt to convert self to an anchor, returning None for a handle. #[must_use] pub fn as_anchor(self) -> Option<PointId> { match self { ManipulatorPointId::Anchor(point) => Some(point), _ => None, } } pub fn get_segment(self) -> Option<SegmentId> { match self { ManipulatorPointId::PrimaryHandle(segment) | ManipulatorPointId::EndHandle(segment) => Some(segment), _ => None, } } } /// The type of handle found on a bézier curve. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, DynAny, serde::Serialize, serde::Deserialize)] pub enum HandleType { /// The first handle on a cubic bézier or the only handle on a quadratic bézier. Primary, /// The second handle on a cubic bézier. End, } /// Represents a primary or end handle found in a particular segment. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, DynAny, serde::Serialize, serde::Deserialize)] pub struct HandleId { pub ty: HandleType, pub segment: SegmentId, } impl std::fmt::Display for HandleId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.ty { // I haven't checked if "out" and "in" are reversed, or are accurate translations of the "primary" and "end" terms used in the `HandleType` enum, so this naming is an assumption. HandleType::Primary => write!(f, "{} out", self.segment.inner()), HandleType::End => write!(f, "{} in", self.segment.inner()), } } } impl HandleId { /// Construct a handle for the first handle on a cubic bézier or the only handle on a quadratic bézier. #[must_use] pub const fn primary(segment: SegmentId) -> Self { Self { ty: HandleType::Primary, segment } } /// Construct a handle for the end handle on a cubic bézier. #[must_use] pub const fn end(segment: SegmentId) -> Self { Self { ty: HandleType::End, segment } } /// Convert to [`ManipulatorPointId`]. #[must_use] pub fn to_manipulator_point(self) -> ManipulatorPointId { match self.ty { HandleType::Primary => ManipulatorPointId::PrimaryHandle(self.segment), HandleType::End => ManipulatorPointId::EndHandle(self.segment), } } /// Calculate the magnitude of the handle from the anchor. pub fn length<Upstream: 'static>(self, vector: &Vector<Upstream>) -> f64 { let Some(anchor_position) = self.to_manipulator_point().get_anchor_position(vector) else { // TODO: This was previously an unwrap which was encountered, so this is a temporary way to avoid a crash return 0.; }; let handle_position = self.to_manipulator_point().get_position(vector); handle_position.map(|pos| (pos - anchor_position).length()).unwrap_or(f64::MAX) } /// Convert an end handle to the primary handle and a primary handle to an end handle. Note that the new handle may not exist (e.g. for a quadratic bézier). #[must_use] pub fn opposite(self) -> Self { match self.ty { HandleType::Primary => Self::end(self.segment), HandleType::End => Self::primary(self.segment), } } } #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize, Hash, DynAny, specta::Type, node_macro::ChoiceType)] #[widget(Dropdown)] pub enum SpiralType { #[default] Archimedean, Logarithmic, }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/vector/vector_attributes.rs
node-graph/libraries/vector-types/src/vector/vector_attributes.rs
use crate::subpath::{Bezier, BezierHandles, Identifier, ManipulatorGroup, Subpath}; use crate::vector::misc::{HandleId, Tangent, dvec2_to_point}; use crate::vector::vector_types::Vector; use dyn_any::DynAny; use fixedbitset::FixedBitSet; use glam::{DAffine2, DVec2}; use kurbo::{CubicBez, Line, PathSeg, QuadBez}; use std::collections::HashMap; use std::hash::{Hash, Hasher}; use std::iter::zip; /// A simple macro for creating strongly typed ids (to avoid confusion when passing around ids). macro_rules! create_ids { ($($id:ident),*) => { $( #[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Ord, Eq, Hash, DynAny)] #[derive(serde::Serialize, serde::Deserialize)] /// A strongly typed ID pub struct $id(u64); impl $id { pub const ZERO: $id = $id(0); /// Generate a new random id pub fn generate() -> Self { Self(core_types::uuid::generate_uuid()) } pub fn generate_from_hash(self, node_id: u64) -> Self { let mut hasher = std::hash::DefaultHasher::new(); node_id.hash(&mut hasher); self.hash(&mut hasher); let hash_value = hasher.finish(); Self(hash_value) } /// Gets the inner raw value. pub fn inner(self) -> u64 { self.0 } /// Adds one to the current value and returns the old value. Note that the ids are not going to be unique unless you use the largest id. pub fn next_id(&mut self) -> Self { self.0 += 1; *self } } )* }; } create_ids! { PointId, SegmentId, RegionId, StrokeId, FillId } /// A no-op hasher that allows writing u64s (the id type). #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub struct NoHash(Option<u64>); impl Hasher for NoHash { fn finish(&self) -> u64 { self.0.unwrap() } fn write(&mut self, _bytes: &[u8]) { unimplemented!() } fn write_u64(&mut self, i: u64) { debug_assert!(self.0.is_none()); self.0 = Some(i) } } /// A hash builder that builds the [`NoHash`] hasher. #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub struct NoHashBuilder; impl std::hash::BuildHasher for NoHashBuilder { type Hasher = NoHash; fn build_hasher(&self) -> Self::Hasher { NoHash::default() } } #[derive(Clone, Debug, Default, PartialEq, DynAny, serde::Serialize, serde::Deserialize)] /// Stores data which is per-point. Each point is merely a position and can be used in a point cloud or to for a bézier path. In future this will be extendable at runtime with custom attributes. pub struct PointDomain { id: Vec<PointId>, #[serde(alias = "positions")] pub(crate) position: Vec<DVec2>, } impl Hash for PointDomain { fn hash<H: Hasher>(&self, state: &mut H) { self.id.hash(state); self.position.iter().for_each(|pos| pos.to_array().map(|v| v.to_bits()).hash(state)); } } impl PointDomain { pub const fn new() -> Self { Self { id: Vec::new(), position: Vec::new() } } pub fn clear(&mut self) { self.id.clear(); self.position.clear(); } pub fn retain(&mut self, segment_domain: &mut SegmentDomain, f: impl Fn(&PointId) -> bool) { let mut keep = self.id.iter().map(&f); self.position.retain(|_| keep.next().unwrap_or_default()); // TODO(TrueDoctor): Consider using a prefix sum to avoid this Vec allocation (https://github.com/GraphiteEditor/Graphite/pull/1949#discussion_r1741711562) let mut id_map = Vec::with_capacity(self.ids().len()); let mut new_index = 0; for id in self.ids() { if f(id) { id_map.push(new_index); new_index += 1; } else { // A placeholder for invalid IDs. This is checked after the segment domain is modified. id_map.push(usize::MAX); } } let update_index = |index: &mut usize| *index = id_map[*index]; segment_domain.start_point.iter_mut().for_each(update_index); segment_domain.end_point.iter_mut().for_each(update_index); self.id.retain(f); } pub fn push(&mut self, id: PointId, position: DVec2) { if self.id.contains(&id) { return; } self.id.push(id); self.position.push(position); } pub fn push_unchecked(&mut self, id: PointId, position: DVec2) { self.id.push(id); self.position.push(position); } pub fn positions(&self) -> &[DVec2] { &self.position } pub fn positions_mut(&mut self) -> impl Iterator<Item = (PointId, &mut DVec2)> { self.id.iter().copied().zip(self.position.iter_mut()) } pub fn set_position(&mut self, index: usize, position: DVec2) { self.position[index] = position; } pub fn ids(&self) -> &[PointId] { &self.id } pub fn next_id(&self) -> PointId { self.ids().iter().copied().max_by(|a, b| a.0.cmp(&b.0)).map(|mut id| id.next_id()).unwrap_or(PointId::ZERO) } #[track_caller] pub fn position_from_id(&self, id: PointId) -> Option<DVec2> { let pos = self.resolve_id(id).map(|index| self.position[index]); if pos.is_none() { warn!("Resolving pos of invalid id"); } pos } pub fn resolve_id(&self, id: PointId) -> Option<usize> { self.id.iter().position(|&check_id| check_id == id) } pub fn concat(&mut self, other: &Self, transform: DAffine2, id_map: &IdMap) { self.id.extend(other.id.iter().map(|id| *id_map.point_map.get(id).unwrap_or(id))); self.position.extend(other.position.iter().map(|&pos| transform.transform_point2(pos))); } pub fn map_ids(&mut self, id_map: &IdMap) { self.id.iter_mut().for_each(|id| *id = *id_map.point_map.get(id).unwrap_or(id)); } pub fn transform(&mut self, transform: DAffine2) { for pos in &mut self.position { *pos = transform.transform_point2(*pos); } } pub fn len(&self) -> usize { self.id.len() } pub fn is_empty(&self) -> bool { self.id.is_empty() } /// Iterate over point IDs and positions pub fn iter(&self) -> impl Iterator<Item = (PointId, DVec2)> + '_ { self.ids().iter().copied().zip(self.positions().iter().copied()) } } #[derive(Clone, Debug, Default, PartialEq, Hash, DynAny, serde::Serialize, serde::Deserialize)] /// Stores data which is per-segment. A segment is a bézier curve between two end points with a stroke. In future this will be extendable at runtime with custom attributes. pub struct SegmentDomain { #[serde(alias = "ids")] id: Vec<SegmentId>, start_point: Vec<usize>, end_point: Vec<usize>, handles: Vec<BezierHandles>, stroke: Vec<StrokeId>, } impl SegmentDomain { pub const fn new() -> Self { Self { id: Vec::new(), start_point: Vec::new(), end_point: Vec::new(), handles: Vec::new(), stroke: Vec::new(), } } pub fn clear(&mut self) { self.id.clear(); self.start_point.clear(); self.end_point.clear(); self.handles.clear(); self.stroke.clear(); } pub fn retain(&mut self, f: impl Fn(&SegmentId) -> bool, points_length: usize) { let additional_delete_ids = self .id .iter() .zip(&self.start_point) .zip(&self.end_point) .filter(|((_, start), end)| **start >= points_length || **end >= points_length) .map(|x| *x.0.0) .collect::<Vec<_>>(); let can_delete = || { let f = &f; let mut delete_iter = additional_delete_ids.iter().peekable(); move |id| { if delete_iter.peek() == Some(&id) { delete_iter.next(); false } else { f(id) } } }; let mut keep = self.id.iter().map(can_delete()); self.start_point.retain(|_| keep.next().unwrap_or_default()); let mut keep = self.id.iter().map(can_delete()); self.end_point.retain(|_| keep.next().unwrap_or_default()); let mut keep = self.id.iter().map(can_delete()); self.handles.retain(|_| keep.next().unwrap_or_default()); let mut keep = self.id.iter().map(can_delete()); self.stroke.retain(|_| keep.next().unwrap_or_default()); let mut delete_iter = additional_delete_ids.iter().peekable(); self.id.retain(move |id| { if delete_iter.peek() == Some(&id) { delete_iter.next(); false } else { f(id) } }); } pub fn ids(&self) -> &[SegmentId] { &self.id } pub fn next_id(&self) -> SegmentId { self.ids().iter().copied().max_by(|a, b| a.0.cmp(&b.0)).map(|mut id| id.next_id()).unwrap_or(SegmentId::ZERO) } pub fn start_point(&self) -> &[usize] { &self.start_point } pub fn end_point(&self) -> &[usize] { &self.end_point } pub fn set_start_point(&mut self, segment_index: usize, new: usize) { self.start_point[segment_index] = new; } pub fn set_end_point(&mut self, segment_index: usize, new: usize) { self.end_point[segment_index] = new; } pub fn set_handles(&mut self, segment_index: usize, new: BezierHandles) { self.handles[segment_index] = new; } pub fn handles(&self) -> &[BezierHandles] { &self.handles } pub fn stroke(&self) -> &[StrokeId] { &self.stroke } pub fn push(&mut self, id: SegmentId, start: usize, end: usize, handles: BezierHandles, stroke: StrokeId) { #[cfg(debug_assertions)] if self.id.contains(&id) { warn!("Tried to push an existing point to a point domain"); } self.id.push(id); self.start_point.push(start); self.end_point.push(end); self.handles.push(handles); self.stroke.push(stroke); } pub(crate) fn start_point_mut(&mut self) -> impl Iterator<Item = (SegmentId, &mut usize)> { self.id.iter().copied().zip(self.start_point.iter_mut()) } pub(crate) fn end_point_mut(&mut self) -> impl Iterator<Item = (SegmentId, &mut usize)> { self.id.iter().copied().zip(self.end_point.iter_mut()) } pub(crate) fn handles_mut(&mut self) -> impl Iterator<Item = (SegmentId, &mut BezierHandles, usize, usize)> { let nested = self.id.iter().zip(&mut self.handles).zip(&self.start_point).zip(&self.end_point); nested.map(|(((&a, b), &c), &d)| (a, b, c, d)) } pub fn handles_and_points_mut(&mut self) -> impl Iterator<Item = (&mut BezierHandles, &mut usize, &mut usize)> { let nested = self.handles.iter_mut().zip(&mut self.start_point).zip(&mut self.end_point); nested.map(|((a, b), c)| (a, b, c)) } pub fn stroke_mut(&mut self) -> impl Iterator<Item = (SegmentId, &mut StrokeId)> { self.id.iter().copied().zip(self.stroke.iter_mut()) } pub(crate) fn segment_start_from_id(&self, segment: SegmentId) -> Option<usize> { self.id_to_index(segment).and_then(|index| self.start_point.get(index)).copied() } pub(crate) fn segment_end_from_id(&self, segment: SegmentId) -> Option<usize> { self.id_to_index(segment).and_then(|index| self.end_point.get(index)).copied() } /// Returns an array for the start and end points of a segment. pub(crate) fn points_from_id(&self, segment: SegmentId) -> Option<[usize; 2]> { self.segment_start_from_id(segment).and_then(|start| self.segment_end_from_id(segment).map(|end| [start, end])) } /// Attempts to find another point in the segment that is not the one passed in. pub(crate) fn other_point(&self, segment: SegmentId, current: usize) -> Option<usize> { self.points_from_id(segment).and_then(|points| points.into_iter().find(|&point| point != current)) } /// Gets all points connected to the current one but not including the current one. pub(crate) fn connected_points(&self, current: usize) -> impl Iterator<Item = usize> + '_ { self.start_point.iter().zip(&self.end_point).filter_map(move |(&a, &b)| match (a == current, b == current) { (true, false) => Some(b), (false, true) => Some(a), _ => None, }) } /// Get index from ID by linear search. Takes `O(n)` time. fn id_to_index(&self, id: SegmentId) -> Option<usize> { debug_assert_eq!(self.id.len(), self.handles.len()); debug_assert_eq!(self.id.len(), self.start_point.len()); debug_assert_eq!(self.id.len(), self.end_point.len()); self.id.iter().position(|&check_id| check_id == id) } fn resolve_range(&self, range: &std::ops::RangeInclusive<SegmentId>) -> Option<std::ops::RangeInclusive<usize>> { match (self.id_to_index(*range.start()), self.id_to_index(*range.end())) { (Some(start), Some(end)) if start.max(end) < self.handles.len().min(self.id.len()).min(self.start_point.len()).min(self.end_point.len()) => Some(start..=end), _ => { warn!("Resolving range with invalid id"); None } } } pub fn concat(&mut self, other: &Self, transform: DAffine2, id_map: &IdMap) { self.id.extend(other.id.iter().map(|id| *id_map.segment_map.get(id).unwrap_or(id))); self.start_point.extend(other.start_point.iter().map(|&index| id_map.point_offset + index)); self.end_point.extend(other.end_point.iter().map(|&index| id_map.point_offset + index)); self.handles.extend(other.handles.iter().map(|handles| handles.apply_transformation(|p| transform.transform_point2(p)))); self.stroke.extend(&other.stroke); } pub fn map_ids(&mut self, id_map: &IdMap) { self.id.iter_mut().for_each(|id| *id = *id_map.segment_map.get(id).unwrap_or(id)); } pub fn transform(&mut self, transform: DAffine2) { for handles in &mut self.handles { *handles = handles.apply_transformation(|p| transform.transform_point2(p)); } } /// Enumerate all segments that start at the point. pub(crate) fn start_connected(&self, point: usize) -> impl Iterator<Item = SegmentId> + '_ { self.start_point.iter().zip(&self.id).filter(move |&(&found_point, _)| found_point == point).map(|(_, &seg)| seg) } /// Enumerate all segments that end at the point. pub(crate) fn end_connected(&self, point: usize) -> impl Iterator<Item = SegmentId> + '_ { self.end_point.iter().zip(&self.id).filter(move |&(&found_point, _)| found_point == point).map(|(_, &seg)| seg) } /// Enumerate all segments that start or end at a point, converting them to [`HandleId`s]. Note that the handles may not exist e.g. for a linear segment. pub(crate) fn all_connected(&self, point: usize) -> impl Iterator<Item = HandleId> + '_ { self.start_connected(point).map(HandleId::primary).chain(self.end_connected(point).map(HandleId::end)) } /// Enumerate the number of segments connected to a point. If a segment starts and ends at a point then it is counted twice. pub(crate) fn connected_count(&self, point: usize) -> usize { self.all_connected(point).count() } /// Enumerate the number of segments connected to a point. If a segment starts and ends at a point then it is counted twice. pub(crate) fn any_connected(&self, point: usize) -> bool { self.all_connected(point).next().is_some() } /// Iterates over segments in the domain. /// /// Tuple is: (id, start point, end point, handles) pub fn iter(&self) -> impl Iterator<Item = (SegmentId, usize, usize, BezierHandles)> + '_ { let ids = self.id.iter().copied(); let start_point = self.start_point.iter().copied(); let end_point = self.end_point.iter().copied(); let handles = self.handles.iter().copied(); zip(ids, zip(start_point, zip(end_point, handles))).map(|(id, (start_point, (end_point, handles)))| (id, start_point, end_point, handles)) } /// Iterates over segments in the domain, mutably. /// /// Tuple is: (id, start point, end point, handles) pub(crate) fn iter_mut(&mut self) -> impl Iterator<Item = (&mut SegmentId, &mut usize, &mut usize, &mut BezierHandles)> + '_ { let ids = self.id.iter_mut(); let start_point = self.start_point.iter_mut(); let end_point = self.end_point.iter_mut(); let handles = self.handles.iter_mut(); zip(ids, zip(start_point, zip(end_point, handles))).map(|(id, (start_point, (end_point, handles)))| (id, start_point, end_point, handles)) } pub fn pair_handles_and_points_mut_by_index(&mut self, index1: usize, index2: usize) -> (&mut BezierHandles, &mut usize, &mut usize, &mut BezierHandles, &mut usize, &mut usize) { // Use split_at_mut to avoid multiple mutable borrows of the same slice let (handles_first, handles_second) = self.handles.split_at_mut(index2.max(index1)); let (start_first, start_second) = self.start_point.split_at_mut(index2.max(index1)); let (end_first, end_second) = self.end_point.split_at_mut(index2.max(index1)); let (h1, h2) = if index1 < index2 { (&mut handles_first[index1], &mut handles_second[0]) } else { (&mut handles_second[0], &mut handles_first[index2]) }; let (sp1, sp2) = if index1 < index2 { (&mut start_first[index1], &mut start_second[0]) } else { (&mut start_second[0], &mut start_first[index2]) }; let (ep1, ep2) = if index1 < index2 { (&mut end_first[index1], &mut end_second[0]) } else { (&mut end_second[0], &mut end_first[index2]) }; (h1, sp1, ep1, h2, sp2, ep2) } } #[derive(Clone, Debug, Default, PartialEq, Hash, DynAny, serde::Serialize, serde::Deserialize)] /// Stores data which is per-region. A region is an enclosed area composed of a range of segments from the /// [`SegmentDomain`] that can be given a fill. In future this will be extendable at runtime with custom attributes. pub struct RegionDomain { #[serde(alias = "ids")] id: Vec<RegionId>, segment_range: Vec<std::ops::RangeInclusive<SegmentId>>, fill: Vec<FillId>, } impl RegionDomain { pub const fn new() -> Self { Self { id: Vec::new(), segment_range: Vec::new(), fill: Vec::new(), } } pub fn clear(&mut self) { self.id.clear(); self.segment_range.clear(); self.fill.clear(); } pub fn retain(&mut self, f: impl Fn(&RegionId) -> bool) { let mut keep = self.id.iter().map(&f); self.segment_range.retain(|_| keep.next().unwrap_or_default()); let mut keep = self.id.iter().map(&f); self.fill.retain(|_| keep.next().unwrap_or_default()); self.id.retain(&f); } /// Like [`Self::retain`] but also gives the function access to the segment range. /// /// Note that this function requires an allocation that `retain` avoids. pub fn retain_with_region(&mut self, f: impl Fn(&RegionId, &std::ops::RangeInclusive<SegmentId>) -> bool) { let keep = self.id.iter().zip(self.segment_range.iter()).map(|(id, range)| f(id, range)).collect::<Vec<_>>(); let mut iter = keep.iter().copied(); self.segment_range.retain(|_| iter.next().unwrap()); let mut iter = keep.iter().copied(); self.fill.retain(|_| iter.next().unwrap()); let mut iter = keep.iter().copied(); self.id.retain(|_| iter.next().unwrap()); } pub fn push(&mut self, id: RegionId, segment_range: std::ops::RangeInclusive<SegmentId>, fill: FillId) { if self.id.contains(&id) { warn!("Duplicate region"); return; } self.id.push(id); self.segment_range.push(segment_range); self.fill.push(fill); } fn _resolve_id(&self, id: RegionId) -> Option<usize> { self.id.iter().position(|&check_id| check_id == id) } pub fn next_id(&self) -> RegionId { self.id.iter().copied().max_by(|a, b| a.0.cmp(&b.0)).map(|mut id| id.next_id()).unwrap_or(RegionId::ZERO) } pub fn segment_range_mut(&mut self) -> impl Iterator<Item = (RegionId, &mut std::ops::RangeInclusive<SegmentId>)> { self.id.iter().copied().zip(self.segment_range.iter_mut()) } pub fn fill_mut(&mut self) -> impl Iterator<Item = (RegionId, &mut FillId)> { self.id.iter().copied().zip(self.fill.iter_mut()) } pub fn ids(&self) -> &[RegionId] { &self.id } pub fn segment_range(&self) -> &[std::ops::RangeInclusive<SegmentId>] { &self.segment_range } pub fn fill(&self) -> &[FillId] { &self.fill } pub fn concat(&mut self, other: &Self, _transform: DAffine2, id_map: &IdMap) { self.id.extend(other.id.iter().map(|id| *id_map.region_map.get(id).unwrap_or(id))); self.segment_range.extend( other .segment_range .iter() .map(|range| *id_map.segment_map.get(range.start()).unwrap_or(range.start())..=*id_map.segment_map.get(range.end()).unwrap_or(range.end())), ); self.fill.extend(&other.fill); } pub fn map_ids(&mut self, id_map: &IdMap) { self.id.iter_mut().for_each(|id| *id = *id_map.region_map.get(id).unwrap_or(id)); self.segment_range .iter_mut() .for_each(|range| *range = *id_map.segment_map.get(range.start()).unwrap_or(range.start())..=*id_map.segment_map.get(range.end()).unwrap_or(range.end())); } /// Iterates over regions in the domain. /// /// Tuple is: (id, segment_range, fill) pub fn iter(&self) -> impl Iterator<Item = (RegionId, std::ops::RangeInclusive<SegmentId>, FillId)> + '_ { let ids = self.id.iter().copied(); let segment_range = self.segment_range.iter().cloned(); let fill = self.fill.iter().copied(); zip(ids, zip(segment_range, fill)).map(|(id, (segment_range, fill))| (id, segment_range, fill)) } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct HalfEdge { pub id: SegmentId, pub start: usize, pub end: usize, pub reverse: bool, } impl HalfEdge { pub fn new(id: SegmentId, start: usize, end: usize, reverse: bool) -> Self { Self { id, start, end, reverse } } pub fn reversed(&self) -> Self { Self { id: self.id, start: self.start, end: self.end, reverse: !self.reverse, } } pub fn normalize_direction(&self) -> Self { if self.reverse { Self { id: self.id, start: self.end, end: self.start, reverse: false, } } else { *self } } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct FoundSubpath { pub edges: Vec<HalfEdge>, } impl FoundSubpath { pub fn new(segments: Vec<HalfEdge>) -> Self { Self { edges: segments } } pub fn endpoints(&self) -> Option<(&HalfEdge, &HalfEdge)> { match (self.edges.first(), self.edges.last()) { (Some(first), Some(last)) => Some((first, last)), _ => None, } } pub fn push(&mut self, segment: HalfEdge) { self.edges.push(segment); } pub fn insert(&mut self, index: usize, segment: HalfEdge) { self.edges.insert(index, segment); } pub fn extend(&mut self, segments: impl IntoIterator<Item = HalfEdge>) { self.edges.extend(segments); } pub fn splice<I>(&mut self, range: std::ops::Range<usize>, replace_with: I) where I: IntoIterator<Item = HalfEdge>, { self.edges.splice(range, replace_with); } pub fn is_closed(&self) -> bool { match (self.edges.first(), self.edges.last()) { (Some(first), Some(last)) => first.start == last.end, _ => false, } } pub fn from_segment(segment: HalfEdge) -> Self { Self { edges: vec![segment] } } pub fn contains(&self, segment_id: SegmentId) -> bool { self.edges.iter().any(|s| s.id == segment_id) } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] struct FaceSide { segment_index: usize, reversed: bool, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] struct FaceSideSet { set: FixedBitSet, } impl FaceSideSet { fn new(size: usize) -> Self { Self { set: FixedBitSet::with_capacity(size * 2), } } fn index(&self, side: FaceSide) -> usize { (side.segment_index << 1) | (side.reversed as usize) } fn insert(&mut self, side: FaceSide) { self.set.insert(self.index(side)); } fn contains(&self, side: FaceSide) -> bool { self.set.contains(self.index(side)) } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] struct Faces { sides: Vec<FaceSide>, face_start: Vec<usize>, } #[derive(Debug, Clone, PartialEq, Hash)] pub struct FaceIterator<'a, Upstream> { vector: &'a Vector<Upstream>, faces: Faces, current_face: usize, } impl<Upstream> FaceIterator<'_, Upstream> { fn new<'a>(faces: Faces, vector: &'a Vector<Upstream>) -> FaceIterator<'a, Upstream> { FaceIterator { vector, faces, current_face: 0 } } fn get_point(&self, point: usize) -> kurbo::Point { dvec2_to_point(self.vector.point_domain.positions()[point]) } } impl<Upstream> Iterator for FaceIterator<'_, Upstream> { type Item = kurbo::BezPath; fn next(&mut self) -> Option<Self::Item> { let start_side = self.faces.face_start.get(self.current_face).copied()?; self.current_face += 1; let end_side = self.faces.face_start.get(self.current_face).copied().unwrap_or(self.faces.sides.len()); let mut path = kurbo::BezPath::new(); let segment_domain = &self.vector.segment_domain; let first_side = self.faces.sides.get(start_side)?; let start_point_index = if first_side.reversed { segment_domain.end_point[first_side.segment_index] } else { segment_domain.start_point[first_side.segment_index] }; path.move_to(self.get_point(start_point_index)); for side in &self.faces.sides[start_side..end_side] { let (handle, end_index) = match side.reversed { false => (segment_domain.handles[side.segment_index], segment_domain.end_point[side.segment_index]), true => (segment_domain.handles[side.segment_index].reversed(), segment_domain.start_point[side.segment_index]), }; let path_element = match handle { BezierHandles::Linear => kurbo::PathEl::LineTo(self.get_point(end_index)), BezierHandles::Quadratic { handle } => kurbo::PathEl::QuadTo(dvec2_to_point(handle), self.get_point(end_index)), BezierHandles::Cubic { handle_start, handle_end } => kurbo::PathEl::CurveTo(dvec2_to_point(handle_start), dvec2_to_point(handle_end), self.get_point(end_index)), }; path.push(path_element); } Some(path) } } impl Faces { pub fn new() -> Self { Self { sides: Vec::new(), face_start: Vec::new(), } } pub fn add_side(&mut self, side: FaceSide) { self.sides.push(side); } pub fn start_new_face(&mut self) { self.face_start.push(self.sides.len()); } pub fn backtrack(&mut self) { if let Some(last_start) = self.face_start.pop() { self.sides.truncate(last_start); } } } impl<Upstream> Vector<Upstream> { /// Construct a [`kurbo::PathSeg`] by resolving the points from their ids. fn path_segment_from_index(&self, start: usize, end: usize, handles: BezierHandles) -> PathSeg { let start = dvec2_to_point(self.point_domain.positions()[start]); let end = dvec2_to_point(self.point_domain.positions()[end]); match handles { BezierHandles::Linear => PathSeg::Line(Line::new(start, end)), BezierHandles::Quadratic { handle } => PathSeg::Quad(QuadBez::new(start, dvec2_to_point(handle), end)), BezierHandles::Cubic { handle_start, handle_end } => PathSeg::Cubic(CubicBez::new(start, dvec2_to_point(handle_start), dvec2_to_point(handle_end), end)), } } /// Construct a [`Bezier`] curve spanning from the resolved position of the start and end points with the specified handles. fn segment_to_bezier_with_index(&self, start: usize, end: usize, handles: BezierHandles) -> Bezier { let start = self.point_domain.positions()[start]; let end = self.point_domain.positions()[end]; Bezier { start, end, handles } } /// Tries to convert a segment with the specified id to a [`Bezier`], returning None if the id is invalid. pub fn segment_from_id(&self, id: SegmentId) -> Option<Bezier> { self.segment_points_from_id(id).map(|(_, _, bezier)| bezier) } /// Tries to convert a segment with the specified id to the start and end points and a [`Bezier`], returning None if the id is invalid. pub fn segment_points_from_id(&self, id: SegmentId) -> Option<(PointId, PointId, Bezier)> { Some(self.segment_points_from_index(self.segment_domain.id_to_index(id)?)) } /// Tries to convert a segment with the specified index to the start and end points and a [`Bezier`]. pub fn segment_points_from_index(&self, index: usize) -> (PointId, PointId, Bezier) { let start = self.segment_domain.start_point[index]; let end = self.segment_domain.end_point[index]; let start_id = self.point_domain.ids()[start]; let end_id = self.point_domain.ids()[end]; (start_id, end_id, self.segment_to_bezier_with_index(start, end, self.segment_domain.handles[index])) } /// Iterator over all of the [`Bezier`] following the order that they are stored in the segment domain, skipping invalid segments. pub fn segment_iter(&self) -> impl Iterator<Item = (SegmentId, PathSeg, PointId, PointId)> { let to_segment = |(((&handles, &id), &start), &end)| (id, self.path_segment_from_index(start, end, handles), self.point_domain.ids()[start], self.point_domain.ids()[end]); self.segment_domain .handles .iter() .zip(&self.segment_domain.id) .zip(self.segment_domain.start_point()) .zip(self.segment_domain.end_point()) .map(to_segment) } /// Iterator over all of the [`Bezier`] following the order that they are stored in the segment domain, skipping invalid segments. pub fn segment_bezier_iter(&self) -> impl Iterator<Item = (SegmentId, Bezier, PointId, PointId)> + '_ { let to_bezier = |(((&handles, &id), &start), &end)| (id, self.segment_to_bezier_with_index(start, end, handles), self.point_domain.ids()[start], self.point_domain.ids()[end]); self.segment_domain .handles .iter() .zip(&self.segment_domain.id) .zip(self.segment_domain.start_point()) .zip(self.segment_domain.end_point()) .map(to_bezier) } pub fn auto_join_paths(&self) -> Vec<FoundSubpath> { let segments = self.segment_domain.iter().map(|(id, start, end, _)| HalfEdge::new(id, start, end, false)); let mut paths: Vec<FoundSubpath> = Vec::new(); let mut current_path: Option<&mut FoundSubpath> = None; let mut previous: Option<(usize, usize)> = None; // First pass. Generates subpaths from continuous segments. for seg_ref in segments { let (start, end) = (seg_ref.start, seg_ref.end); if previous.is_some_and(|(_, prev_end)| start == prev_end) { if let Some(path) = current_path.as_mut() { path.push(seg_ref); } } else { paths.push(FoundSubpath::from_segment(seg_ref)); current_path = paths.last_mut(); } previous = Some((start, end)); } // Second pass. Try to join paths together. let mut joined_paths = Vec::new(); loop { let mut prev_index: Option<usize> = None; let original_len = paths.len(); for current in paths.into_iter() { // If there's no previous subpath, start a new one if prev_index.is_none() { joined_paths.push(current); prev_index = Some(joined_paths.len() - 1); continue; } let prev = &mut joined_paths[prev_index.unwrap()]; // Compare segment connections let (prev_first, prev_last) = prev.endpoints().unwrap(); let (cur_first, cur_last) = current.endpoints().unwrap(); // Join paths if the endpoints connect if prev_last.end == cur_first.start { prev.edges.extend(current.edges.into_iter().map(|s| s.normalize_direction())); } else if prev_first.start == cur_last.end { prev.edges.splice(0..0, current.edges.into_iter().rev().map(|s| s.normalize_direction())); } else if prev_last.end == cur_last.end { prev.edges.extend(current.edges.into_iter().rev().map(|s| s.reversed().normalize_direction())); } else if prev_first.start == cur_first.start { prev.edges.splice(0..0, current.edges.into_iter().map(|s| s.reversed().normalize_direction())); } else { // If not connected, start a new subpath joined_paths.push(current); prev_index = Some(joined_paths.len() - 1); } } // If no paths were joined in this pass, we're done if joined_paths.len() == original_len { return joined_paths; } // Repeat pass with newly joined paths paths = joined_paths; joined_paths = Vec::new(); } } /// Construct a [`Bezier`] curve from an iterator of segments with (handles, start point, end point) independently of discontinuities. pub fn subpath_from_segments_ignore_discontinuities(&self, segments: impl Iterator<Item = (BezierHandles, usize, usize)>) -> Option<Subpath<PointId>> { let mut first_point = None; let mut manipulators_list = Vec::new(); let mut last: Option<(usize, BezierHandles)> = None; for (handle, start, end) in segments { first_point = Some(first_point.unwrap_or(start)); manipulators_list.push(ManipulatorGroup { anchor: self.point_domain.positions()[start], in_handle: last.and_then(|(_, handle)| handle.end()), out_handle: handle.start(), id: self.point_domain.ids()[start], }); last = Some((end, handle)); } let closed = manipulators_list.len() > 1 && last.map(|(point, _)| point) == first_point; if let Some((end, last_handle)) = last { if closed { manipulators_list[0].in_handle = last_handle.end(); } else { manipulators_list.push(ManipulatorGroup { anchor: self.point_domain.positions()[end], in_handle: last_handle.end(), out_handle: None, id: self.point_domain.ids()[end], }); } } Some(Subpath::new(manipulators_list, closed)) } /// Construct a [`Bezier`] curve for each region, skipping invalid regions. pub fn region_manipulator_groups(&self) -> impl Iterator<Item = (RegionId, Vec<ManipulatorGroup<PointId>>)> + '_ { self.region_domain .id .iter() .zip(&self.region_domain.segment_range) .filter_map(|(&id, segment_range)| self.segment_domain.resolve_range(segment_range).map(|range| (id, range))) .filter_map(|(id, range)| { let segments_iter = self .segment_domain .handles .get(range.clone())? .iter() .zip(self.segment_domain.start_point.get(range.clone())?) .zip(self.segment_domain.end_point.get(range)?) .map(|((&handles, &start), &end)| (handles, start, end)); let mut manipulator_groups = Vec::new(); let mut in_handle = None; for segment in segments_iter { let (handles, start_point_index, _end_point_index) = segment;
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
true
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/vector/click_target.rs
node-graph/libraries/vector-types/src/vector/click_target.rs
use std::sync::{Arc, RwLock}; use super::algorithms::{bezpath_algorithms::bezpath_is_inside_bezpath, intersection::filtered_segment_intersections}; use super::misc::dvec2_to_point; use crate::math::QuadExt; use crate::subpath::Subpath; use crate::vector::PointId; use crate::vector::misc::point_to_dvec2; use core_types::math::quad::Quad; use core_types::transform::Transform; use glam::{DAffine2, DMat2, DVec2}; use kurbo::{Affine, BezPath, ParamCurve, PathSeg, Shape}; type BoundingBox = Option<[DVec2; 2]>; #[derive(Copy, Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] pub struct FreePoint { pub id: PointId, pub position: DVec2, } impl FreePoint { pub fn new(id: PointId, position: DVec2) -> Self { Self { id, position } } pub fn apply_transform(&mut self, transform: DAffine2) { self.position = transform.transform_point2(self.position); } } #[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] pub enum ClickTargetType { Subpath(Subpath<PointId>), FreePoint(FreePoint), } /// Fixed-size ring buffer cache for rotated bounding boxes. /// /// Stores up to 8 rotation angles and their corresponding bounding boxes to avoid /// recomputing expensive bezier curve bounds for repeated rotations. Uses 7-bit /// fingerprint hashing with MSB as presence flag for fast lookup. #[derive(Clone, Debug, Default)] struct BoundingBoxCache { /// Packed 7-bit fingerprints with MSB presence flags for cache lookup fingerprints: u64, /// (rotation_angle, cached_bounds) pairs elements: [(f64, BoundingBox); Self::CACHE_SIZE], /// Next position to write in ring buffer write_ptr: usize, } impl BoundingBoxCache { /// Cache size - must be ≤ 8 since fingerprints is u64 (8 bytes, 1 byte per element) const CACHE_SIZE: usize = 8; const FINGERPRINT_BITS: u32 = 7; const PRESENCE_FLAG: u8 = 1 << Self::FINGERPRINT_BITS; /// Generates a 7-bit fingerprint from rotation with MSB as presence flag fn rotation_fingerprint(rotation: f64) -> u8 { (rotation.to_bits() % (1 << Self::FINGERPRINT_BITS)) as u8 | Self::PRESENCE_FLAG } /// Attempts to find cached bounding box for the given rotation. /// Returns Some(bounds) if found, None if not cached. fn try_read(&self, rotation: f64, scale: DVec2, translation: DVec2, fingerprint: u8) -> Option<BoundingBox> { // Build bitmask of positions with matching fingerprints for vectorized comparison let mut mask: u8 = 0; for (i, fp) in (0..Self::CACHE_SIZE).zip(self.fingerprints.to_le_bytes()) { // Check MSB for presence and lower 7 bits for fingerprint match if fp == fingerprint { mask |= 1 << i; } } // Check each position with matching fingerprint for exact rotation match while mask != 0 { let pos = mask.trailing_zeros() as usize; if rotation == self.elements[pos].0 { // Found cached rotation - apply scale and translation to cached bounds let transform = DAffine2::from_scale_angle_translation(scale, 0., translation); let new_bounds = self.elements[pos].1.map(|[a, b]| [transform.transform_point2(a), transform.transform_point2(b)]); return Some(new_bounds); } mask &= !(1 << pos); } None } /// Computes and caches bounding box for the given rotation, then applies scale/translation. /// Returns the final transformed bounds. fn add_to_cache(&mut self, subpath: &Subpath<PointId>, rotation: f64, scale: DVec2, translation: DVec2, fingerprint: u8) -> BoundingBox { // Compute bounds for pure rotation (expensive operation we want to cache) let bounds = subpath.bounding_box_with_transform(DAffine2::from_angle(rotation)); if bounds.is_none() { return bounds; } // Store in ring buffer at current write position let write_ptr = self.write_ptr; self.elements[write_ptr] = (rotation, bounds); // Update fingerprint byte for this position let mut bytes = self.fingerprints.to_le_bytes(); bytes[write_ptr] = fingerprint; self.fingerprints = u64::from_le_bytes(bytes); // Advance write pointer (ring buffer behavior) self.write_ptr = (write_ptr + 1) % Self::CACHE_SIZE; // Apply scale and translation to cached rotated bounds let transform = DAffine2::from_scale_angle_translation(scale, 0., translation); bounds.map(|[a, b]| [transform.transform_point2(a), transform.transform_point2(b)]) } } /// Represents a clickable target for the layer #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] pub struct ClickTarget { target_type: ClickTargetType, stroke_width: f64, bounding_box: BoundingBox, #[serde(skip)] bounding_box_cache: Arc<RwLock<BoundingBoxCache>>, } impl PartialEq for ClickTarget { fn eq(&self, other: &Self) -> bool { self.target_type == other.target_type && self.stroke_width == other.stroke_width && self.bounding_box == other.bounding_box } } impl ClickTarget { pub fn new_with_subpath(subpath: Subpath<PointId>, stroke_width: f64) -> Self { let bounding_box = subpath.loose_bounding_box(); Self { target_type: ClickTargetType::Subpath(subpath), stroke_width, bounding_box, bounding_box_cache: Default::default(), } } pub fn new_with_free_point(point: FreePoint) -> Self { const MAX_LENGTH_FOR_NO_WIDTH_OR_HEIGHT: f64 = 1e-4 / 2.; let stroke_width = 10.; let bounding_box = Some([ point.position - DVec2::splat(MAX_LENGTH_FOR_NO_WIDTH_OR_HEIGHT), point.position + DVec2::splat(MAX_LENGTH_FOR_NO_WIDTH_OR_HEIGHT), ]); Self { target_type: ClickTargetType::FreePoint(point), stroke_width, bounding_box, bounding_box_cache: Default::default(), } } pub fn target_type(&self) -> &ClickTargetType { &self.target_type } pub fn bounding_box(&self) -> BoundingBox { self.bounding_box } pub fn bounding_box_center(&self) -> Option<DVec2> { self.bounding_box.map(|bbox| bbox[0] + (bbox[1] - bbox[0]) / 2.) } pub fn bounding_box_with_transform(&self, transform: DAffine2) -> BoundingBox { match self.target_type { ClickTargetType::Subpath(ref subpath) => { // Bypass cache for skewed transforms since rotation decomposition isn't valid if transform.has_skew() { return subpath.bounding_box_with_transform(transform); } // Decompose transform into rotation, scale, translation for caching strategy let rotation = transform.decompose_rotation(); let scale = transform.decompose_scale(); let translation = transform.translation; // Generate fingerprint for cache lookup let fingerprint = BoundingBoxCache::rotation_fingerprint(rotation); // Try to read from cache first let read_lock = self.bounding_box_cache.read().unwrap(); if let Some(value) = read_lock.try_read(rotation, scale, translation, fingerprint) { return value; } std::mem::drop(read_lock); // Cache miss - compute and store new entry let mut write_lock = self.bounding_box_cache.write().unwrap(); write_lock.add_to_cache(subpath, rotation, scale, translation, fingerprint) } // TODO: use point for calculation of bbox ClickTargetType::FreePoint(_) => self.bounding_box.map(|[a, b]| [transform.transform_point2(a), transform.transform_point2(b)]), } } pub fn apply_transform(&mut self, affine_transform: DAffine2) { match self.target_type { ClickTargetType::Subpath(ref mut subpath) => { subpath.apply_transform(affine_transform); } ClickTargetType::FreePoint(ref mut point) => { point.apply_transform(affine_transform); } } self.update_bbox(); } fn update_bbox(&mut self) { match self.target_type { ClickTargetType::Subpath(ref subpath) => { self.bounding_box = subpath.bounding_box(); } ClickTargetType::FreePoint(ref point) => { self.bounding_box = Some([point.position - DVec2::splat(self.stroke_width / 2.), point.position + DVec2::splat(self.stroke_width / 2.)]); } } } /// Does the click target intersect the path pub fn intersect_path<It: Iterator<Item = PathSeg>>(&self, mut bezier_iter: impl FnMut() -> It, layer_transform: DAffine2) -> bool { // Check if the matrix is not invertible let mut layer_transform = layer_transform; if layer_transform.matrix2.determinant().abs() <= f64::EPSILON { layer_transform.matrix2 += DMat2::IDENTITY * 1e-4; // TODO: Is this the cleanest way to handle this? } let inverse = layer_transform.inverse(); let mut bezier_iter = || bezier_iter().map(|bezier| Affine::new(inverse.to_cols_array()) * bezier); match self.target_type() { ClickTargetType::Subpath(subpath) => { // Check if outlines intersect let outline_intersects = |path_segment: PathSeg| bezier_iter().any(|line| !filtered_segment_intersections(path_segment, line, None, None).is_empty()); if subpath.iter().any(outline_intersects) { return true; } // Check if selection is entirely within the shape if subpath.closed() && bezier_iter().next().is_some_and(|bezier| subpath.contains_point(point_to_dvec2(bezier.start()))) { return true; } let mut selection = BezPath::from_path_segments(bezier_iter()); selection.close_path(); // Check if shape is entirely within selection bezpath_is_inside_bezpath(&subpath.to_bezpath(), &selection, None, None) } ClickTargetType::FreePoint(point) => bezier_iter().map(|bezier: PathSeg| bezier.winding(dvec2_to_point(point.position))).sum::<i32>() != 0, } } /// Does the click target intersect the point (accounting for stroke size) pub fn intersect_point(&self, point: DVec2, layer_transform: DAffine2) -> bool { let target_bounds = [point - DVec2::splat(self.stroke_width / 2.), point + DVec2::splat(self.stroke_width / 2.)]; let intersects = |a: [DVec2; 2], b: [DVec2; 2]| a[0].x <= b[1].x && a[1].x >= b[0].x && a[0].y <= b[1].y && a[1].y >= b[0].y; // This bounding box is not very accurate as it is the axis aligned version of the transformed bounding box. However it is fast. if !self .bounding_box .is_some_and(|loose| (loose[0] - loose[1]).abs().cmpgt(DVec2::splat(1e-4)).any() && intersects((layer_transform * Quad::from_box(loose)).bounding_box(), target_bounds)) { return false; } // Allows for selecting lines // TODO: actual intersection of stroke let inflated_quad = Quad::from_box(target_bounds); self.intersect_path(|| inflated_quad.to_lines(), layer_transform) } /// Does the click target intersect the point (not accounting for stroke size) pub fn intersect_point_no_stroke(&self, point: DVec2) -> bool { // Check if the point is within the bounding box if self .bounding_box .is_some_and(|bbox| bbox[0].x <= point.x && point.x <= bbox[1].x && bbox[0].y <= point.y && point.y <= bbox[1].y) { // Check if the point is within the shape match self.target_type() { ClickTargetType::Subpath(subpath) => subpath.closed() && subpath.contains_point(point), ClickTargetType::FreePoint(free_point) => free_point.position == point, } } else { false } } } #[cfg(test)] mod tests { use super::*; use crate::subpath::Subpath; use glam::DVec2; use std::f64::consts::PI; #[test] fn test_bounding_box_cache_fingerprint_generation() { // Test that fingerprints have MSB set and use only 7 bits for data let rotation1 = 0.0; let rotation2 = PI / 3.0; let rotation3 = PI / 2.0; let fp1 = BoundingBoxCache::rotation_fingerprint(rotation1); let fp2 = BoundingBoxCache::rotation_fingerprint(rotation2); let fp3 = BoundingBoxCache::rotation_fingerprint(rotation3); // All fingerprints should have MSB set (presence flag) assert_eq!(fp1 & BoundingBoxCache::PRESENCE_FLAG, BoundingBoxCache::PRESENCE_FLAG); assert_eq!(fp2 & BoundingBoxCache::PRESENCE_FLAG, BoundingBoxCache::PRESENCE_FLAG); assert_eq!(fp3 & BoundingBoxCache::PRESENCE_FLAG, BoundingBoxCache::PRESENCE_FLAG); // Lower 7 bits should contain the actual fingerprint data let data1 = fp1 & !BoundingBoxCache::PRESENCE_FLAG; let data2 = fp2 & !BoundingBoxCache::PRESENCE_FLAG; let data3 = fp3 & !BoundingBoxCache::PRESENCE_FLAG; // Data portions should be different (unless collision) assert!(data1 != data2 && data2 != data3 && data3 != data1); } #[test] fn test_bounding_box_cache_basic_operations() { let mut cache = BoundingBoxCache::default(); // Create a simple rectangle subpath for testing let subpath = Subpath::new_rect(DVec2::ZERO, DVec2::new(100.0, 50.0)); let rotation = PI / 4.0; let scale = DVec2::new(2.0, 2.0); let translation = DVec2::new(10.0, 20.0); let fingerprint = BoundingBoxCache::rotation_fingerprint(rotation); // Cache should be empty initially assert!(cache.try_read(rotation, scale, translation, fingerprint).is_none()); // Add to cache let result = cache.add_to_cache(&subpath, rotation, scale, translation, fingerprint); assert!(result.is_some()); // Should now be able to read from cache let cached = cache.try_read(rotation, scale, translation, fingerprint); assert!(cached.is_some()); assert_eq!(cached.unwrap(), result); } #[test] fn test_bounding_box_cache_ring_buffer_behavior() { let mut cache = BoundingBoxCache::default(); let subpath = Subpath::new_rect(DVec2::ZERO, DVec2::new(10.0, 10.0)); let scale = DVec2::ONE; let translation = DVec2::ZERO; // Fill cache beyond capacity to test ring buffer behavior let rotations: Vec<f64> = (0..10).map(|i| i as f64 * PI / 8.0).collect(); for rotation in &rotations { let fingerprint = BoundingBoxCache::rotation_fingerprint(*rotation); cache.add_to_cache(&subpath, *rotation, scale, translation, fingerprint); } // First two entries should be overwritten (cache size is 8) let first_fp = BoundingBoxCache::rotation_fingerprint(rotations[0]); let second_fp = BoundingBoxCache::rotation_fingerprint(rotations[1]); let last_fp = BoundingBoxCache::rotation_fingerprint(rotations[9]); assert!(cache.try_read(rotations[0], scale, translation, first_fp).is_none()); assert!(cache.try_read(rotations[1], scale, translation, second_fp).is_none()); assert!(cache.try_read(rotations[9], scale, translation, last_fp).is_some()); } #[test] fn test_click_target_bounding_box_caching() { // Create a click target with a simple rectangle let subpath = Subpath::new_rect(DVec2::ZERO, DVec2::new(100.0, 50.0)); let click_target = ClickTarget::new_with_subpath(subpath, 1.0); let rotation = PI / 6.0; let scale = DVec2::new(1.5, 1.5); let translation = DVec2::new(20.0, 30.0); let transform = DAffine2::from_scale_angle_translation(scale, rotation, translation); // Helper function to count present values in cache let count_present_values = || { let cache = click_target.bounding_box_cache.read().unwrap(); cache.fingerprints.to_le_bytes().iter().filter(|&&fp| fp & BoundingBoxCache::PRESENCE_FLAG != 0).count() }; // Initially cache should be empty assert_eq!(count_present_values(), 0); // First call should compute and cache let result1 = click_target.bounding_box_with_transform(transform); assert!(result1.is_some()); assert_eq!(count_present_values(), 1); // Second call with same transform should use cache, not add new entry let result2 = click_target.bounding_box_with_transform(transform); assert_eq!(result1, result2); assert_eq!(count_present_values(), 1); // Should still be 1, not 2 // Different scale/translation but same rotation should use cached rotation let transform2 = DAffine2::from_scale_angle_translation(DVec2::new(2.0, 2.0), rotation, DVec2::new(50.0, 60.0)); let result3 = click_target.bounding_box_with_transform(transform2); assert!(result3.is_some()); assert_ne!(result1, result3); // Different due to different scale/translation assert_eq!(count_present_values(), 1); // Should still be 1, reused same rotation } #[test] fn test_click_target_skew_bypass_cache() { let subpath = Subpath::new_rect(DVec2::ZERO, DVec2::new(100.0, 50.0)); let click_target = ClickTarget::new_with_subpath(subpath.clone(), 1.0); // Create a transform with skew (non-uniform scaling in different directions) let skew_transform = DAffine2::from_cols_array(&[2.0, 0.5, 0.0, 1.0, 10.0, 20.0]); assert!(skew_transform.has_skew()); // Should bypass cache and compute directly let result = click_target.bounding_box_with_transform(skew_transform); let expected = subpath.bounding_box_with_transform(skew_transform); assert_eq!(result, expected); } #[test] fn test_cache_fingerprint_collision_handling() { let mut cache = BoundingBoxCache::default(); let subpath = Subpath::new_rect(DVec2::ZERO, DVec2::new(10.0, 10.0)); let scale = DVec2::ONE; let translation = DVec2::ZERO; // Find two rotations that produce the same fingerprint (collision) let rotation1 = 0.0; let rotation2 = 0.25; let fp1 = BoundingBoxCache::rotation_fingerprint(rotation1); let fp2 = BoundingBoxCache::rotation_fingerprint(rotation2); // If we found a collision, test that exact rotation matching still works if fp1 == fp2 && rotation1 != rotation2 { // Add first rotation cache.add_to_cache(&subpath, rotation1, scale, translation, fp1); // Should find the exact rotation assert!(cache.try_read(rotation1, scale, translation, fp1).is_some()); // Should not find the colliding rotation (different exact value) assert!(cache.try_read(rotation2, scale, translation, fp2).is_none()); } } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/vector/mod.rs
node-graph/libraries/vector-types/src/vector/mod.rs
pub mod algorithms; pub mod click_target; pub mod misc; pub mod reference_point; pub mod style; mod vector_attributes; mod vector_modification; mod vector_types; pub use reference_point::*; pub use style::PathStyle; pub use vector_attributes::*; pub use vector_modification::*; pub use vector_types::*;
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/vector/style.rs
node-graph/libraries/vector-types/src/vector/style.rs
//! Contains stylistic options for SVG elements. pub use crate::gradient::*; use core_types::Color; use core_types::table::Table; use dyn_any::DynAny; use glam::DAffine2; /// Describes the fill of a layer. /// /// Can be None, a solid [Color], or a linear/radial [Gradient]. /// /// In the future we'll probably also add a pattern fill. This will probably be named "Paint" in the future. #[repr(C)] #[derive(Default, Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize, DynAny, Hash, specta::Type)] pub enum Fill { #[default] None, Solid(Color), Gradient(Gradient), } impl std::fmt::Display for Fill { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::None => write!(f, "None"), Self::Solid(color) => write!(f, "#{} (Alpha: {}%)", color.to_rgb_hex_srgb(), color.a() * 100.), Self::Gradient(gradient) => write!(f, "{gradient}"), } } } impl Fill { /// Construct a new [Fill::Solid] from a [Color]. pub fn solid(color: Color) -> Self { Self::Solid(color) } /// Construct a new [Fill::Solid] or [Fill::None] from an optional [Color]. pub fn solid_or_none(color: Option<Color>) -> Self { match color { Some(color) => Self::Solid(color), None => Self::None, } } /// Evaluate the color at some point on the fill. Doesn't currently work for Gradient. pub fn color(&self) -> Color { match self { Self::None => Color::BLACK, Self::Solid(color) => *color, // TODO: Should correctly sample the gradient the equation here: https://svgwg.org/svg2-draft/pservers.html#Gradients Self::Gradient(Gradient { stops, .. }) => stops.0[0].1, } } pub fn lerp(&self, other: &Self, time: f64) -> Self { let transparent = Self::solid(Color::TRANSPARENT); let a = if *self == Self::None { &transparent } else { self }; let b = if *other == Self::None { &transparent } else { other }; match (a, b) { (Self::Solid(a), Self::Solid(b)) => Self::Solid(a.lerp(b, time as f32)), (Self::Solid(a), Self::Gradient(b)) => { let mut solid_to_gradient = b.clone(); solid_to_gradient.stops.0.iter_mut().for_each(|(_, color)| *color = *a); let a = &solid_to_gradient; Self::Gradient(a.lerp(b, time)) } (Self::Gradient(a), Self::Solid(b)) => { let mut gradient_to_solid = a.clone(); gradient_to_solid.stops.0.iter_mut().for_each(|(_, color)| *color = *b); let b = &gradient_to_solid; Self::Gradient(a.lerp(b, time)) } (Self::Gradient(a), Self::Gradient(b)) => Self::Gradient(a.lerp(b, time)), _ => Self::None, } } /// Extract a gradient from the fill pub fn as_gradient(&self) -> Option<&Gradient> { match self { Self::Gradient(gradient) => Some(gradient), _ => None, } } /// Extract a solid color from the fill pub fn as_solid(&self) -> Option<Color> { match self { Self::Solid(color) => Some(*color), _ => None, } } /// Find if fill can be represented with only opaque colors pub fn is_opaque(&self) -> bool { match self { Fill::Solid(color) => color.is_opaque(), Fill::Gradient(gradient) => gradient.stops.iter().all(|(_, color)| color.is_opaque()), Fill::None => true, } } /// Returns if fill is none pub fn is_none(&self) -> bool { *self == Self::None } } impl From<Color> for Fill { fn from(color: Color) -> Fill { Fill::Solid(color) } } impl From<Option<Color>> for Fill { fn from(color: Option<Color>) -> Fill { Fill::solid_or_none(color) } } impl From<Table<Color>> for Fill { fn from(color: Table<Color>) -> Fill { Fill::solid_or_none(color.into()) } } impl From<Table<GradientStops>> for Fill { fn from(gradient: Table<GradientStops>) -> Fill { Fill::Gradient(Gradient { stops: gradient.iter().nth(0).map(|row| row.element.clone()).unwrap_or_default(), ..Default::default() }) } } impl From<Gradient> for Fill { fn from(gradient: Gradient) -> Fill { Fill::Gradient(gradient) } } /// Describes the fill of a layer, but unlike [`Fill`], this doesn't store a [`Gradient`] directly but just its [`GradientStops`]. /// /// Can be None, a solid [Color], or a linear/radial [Gradient]. /// /// In the future we'll probably also add a pattern fill. #[repr(C)] #[derive(Default, Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize, DynAny, Hash, specta::Type)] pub enum FillChoice { #[default] None, /// WARNING: Color is gamma, not linear! Solid(Color), /// WARNING: Color stops are gamma, not linear! Gradient(GradientStops), } impl FillChoice { pub fn as_solid(&self) -> Option<Color> { let Self::Solid(color) = self else { return None }; Some(*color) } pub fn as_gradient(&self) -> Option<&GradientStops> { let Self::Gradient(gradient) = self else { return None }; Some(gradient) } /// Convert this [`FillChoice`] to a [`Fill`] using the provided [`Gradient`] as a base for the positional information of the gradient. /// If a gradient isn't provided, default gradient positional information is used in cases where the [`FillChoice`] is a [`Gradient`]. pub fn to_fill(&self, existing_gradient: Option<&Gradient>) -> Fill { match self { Self::None => Fill::None, Self::Solid(color) => Fill::Solid(*color), Self::Gradient(stops) => { let mut fill = existing_gradient.cloned().unwrap_or_default(); fill.stops = stops.clone(); Fill::Gradient(fill) } } } } impl From<Fill> for FillChoice { fn from(fill: Fill) -> Self { match fill { Fill::None => FillChoice::None, Fill::Solid(color) => FillChoice::Solid(color), Fill::Gradient(gradient) => FillChoice::Gradient(gradient.stops), } } } #[repr(C)] #[derive(Debug, Clone, Copy, Default, PartialEq, serde::Serialize, serde::Deserialize, DynAny, Hash, specta::Type, node_macro::ChoiceType)] #[widget(Radio)] pub enum FillType { #[default] Solid, Gradient, } /// The stroke (outline) style of an SVG element. #[repr(C)] #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize, Hash, DynAny, specta::Type, node_macro::ChoiceType)] #[widget(Radio)] pub enum StrokeCap { #[default] Butt, Round, Square, } impl StrokeCap { pub fn svg_name(&self) -> &'static str { match self { StrokeCap::Butt => "butt", StrokeCap::Round => "round", StrokeCap::Square => "square", } } } #[repr(C)] #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize, Hash, DynAny, specta::Type, node_macro::ChoiceType)] #[widget(Radio)] pub enum StrokeJoin { #[default] Miter, Bevel, Round, } impl StrokeJoin { pub fn svg_name(&self) -> &'static str { match self { StrokeJoin::Bevel => "bevel", StrokeJoin::Miter => "miter", StrokeJoin::Round => "round", } } } #[repr(C)] #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize, Hash, DynAny, specta::Type, node_macro::ChoiceType)] #[widget(Radio)] pub enum StrokeAlign { #[default] Center, Inside, Outside, } impl StrokeAlign { pub fn is_not_centered(self) -> bool { self != Self::Center } } #[repr(C)] #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize, Hash, DynAny, specta::Type, node_macro::ChoiceType)] #[widget(Radio)] pub enum PaintOrder { #[default] StrokeAbove, StrokeBelow, } impl PaintOrder { pub fn is_default(self) -> bool { self == Self::default() } } fn daffine2_identity() -> DAffine2 { DAffine2::IDENTITY } #[repr(C)] #[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize, DynAny, specta::Type)] #[serde(default)] pub struct Stroke { /// Stroke color pub color: Option<Color>, /// Line thickness pub weight: f64, pub dash_lengths: Vec<f64>, pub dash_offset: f64, #[serde(alias = "line_cap")] pub cap: StrokeCap, #[serde(alias = "line_join")] pub join: StrokeJoin, #[serde(alias = "line_join_miter_limit")] pub join_miter_limit: f64, #[serde(default)] pub align: StrokeAlign, #[serde(default = "daffine2_identity")] pub transform: DAffine2, #[serde(default)] pub non_scaling: bool, #[serde(default)] pub paint_order: PaintOrder, } impl std::hash::Hash for Stroke { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.color.hash(state); self.weight.to_bits().hash(state); { self.dash_lengths.len().hash(state); self.dash_lengths.iter().for_each(|length| length.to_bits().hash(state)); } self.dash_offset.to_bits().hash(state); self.cap.hash(state); self.join.hash(state); self.join_miter_limit.to_bits().hash(state); self.align.hash(state); self.transform.to_cols_array().iter().for_each(|x| x.to_bits().hash(state)); self.non_scaling.hash(state); self.paint_order.hash(state); } } impl Stroke { pub const fn new(color: Option<Color>, weight: f64) -> Self { Self { color, weight, dash_lengths: Vec::new(), dash_offset: 0., cap: StrokeCap::Butt, join: StrokeJoin::Miter, join_miter_limit: 4., align: StrokeAlign::Center, transform: DAffine2::IDENTITY, non_scaling: false, paint_order: PaintOrder::StrokeAbove, } } pub fn lerp(&self, other: &Self, time: f64) -> Self { Self { color: self.color.map(|color| color.lerp(&other.color.unwrap_or(color), time as f32)), weight: self.weight + (other.weight - self.weight) * time, dash_lengths: self.dash_lengths.iter().zip(other.dash_lengths.iter()).map(|(a, b)| a + (b - a) * time).collect(), dash_offset: self.dash_offset + (other.dash_offset - self.dash_offset) * time, cap: if time < 0.5 { self.cap } else { other.cap }, join: if time < 0.5 { self.join } else { other.join }, join_miter_limit: self.join_miter_limit + (other.join_miter_limit - self.join_miter_limit) * time, align: if time < 0.5 { self.align } else { other.align }, transform: DAffine2::from_mat2_translation( time * self.transform.matrix2 + (1. - time) * other.transform.matrix2, self.transform.translation * time + other.transform.translation * (1. - time), ), non_scaling: if time < 0.5 { self.non_scaling } else { other.non_scaling }, paint_order: if time < 0.5 { self.paint_order } else { other.paint_order }, } } /// Get the current stroke color. pub fn color(&self) -> Option<Color> { self.color } /// Get the current stroke weight. pub fn weight(&self) -> f64 { self.weight } /// Get the effective stroke weight. pub fn effective_width(&self) -> f64 { self.weight * match self.align { StrokeAlign::Center => 1., StrokeAlign::Inside => 0., StrokeAlign::Outside => 2., } } pub fn dash_lengths(&self) -> String { if self.dash_lengths.is_empty() { "none".to_string() } else { self.dash_lengths.iter().map(|v| v.to_string()).collect::<Vec<_>>().join(", ") } } pub fn dash_offset(&self) -> f64 { self.dash_offset } pub fn cap_index(&self) -> u32 { self.cap as u32 } pub fn join_index(&self) -> u32 { self.join as u32 } pub fn join_miter_limit(&self) -> f32 { self.join_miter_limit as f32 } pub fn with_color(mut self, color: &Option<Color>) -> Option<Self> { self.color = *color; Some(self) } pub fn with_weight(mut self, weight: f64) -> Self { self.weight = weight; self } pub fn with_dash_lengths(mut self, dash_lengths: &str) -> Option<Self> { dash_lengths .split(&[',', ' ']) .filter(|x| !x.is_empty()) .map(str::parse::<f64>) .collect::<Result<Vec<_>, _>>() .ok() .map(|lengths| { self.dash_lengths = lengths; self }) } pub fn with_dash_offset(mut self, dash_offset: f64) -> Self { self.dash_offset = dash_offset; self } pub fn with_stroke_cap(mut self, stroke_cap: StrokeCap) -> Self { self.cap = stroke_cap; self } pub fn with_stroke_join(mut self, stroke_join: StrokeJoin) -> Self { self.join = stroke_join; self } pub fn with_stroke_join_miter_limit(mut self, limit: f64) -> Self { self.join_miter_limit = limit; self } pub fn with_stroke_align(mut self, stroke_align: StrokeAlign) -> Self { self.align = stroke_align; self } pub fn with_non_scaling(mut self, non_scaling: bool) -> Self { self.non_scaling = non_scaling; self } pub fn has_renderable_stroke(&self) -> bool { self.weight > 0. && self.color.is_some_and(|color| color.a() != 0.) } } // Having an alpha of 1 to start with leads to a better experience with the properties panel impl Default for Stroke { fn default() -> Self { Self { weight: 0., color: Some(Color::from_rgba8_srgb(0, 0, 0, 255)), dash_lengths: Vec::new(), dash_offset: 0., cap: StrokeCap::Butt, join: StrokeJoin::Miter, join_miter_limit: 4., align: StrokeAlign::Center, transform: DAffine2::IDENTITY, non_scaling: false, paint_order: PaintOrder::default(), } } } #[repr(C)] #[derive(Debug, Clone, PartialEq, Default, serde::Serialize, serde::Deserialize, DynAny, specta::Type)] pub struct PathStyle { pub stroke: Option<Stroke>, pub fill: Fill, } impl std::hash::Hash for PathStyle { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.stroke.hash(state); self.fill.hash(state); } } impl std::fmt::Display for PathStyle { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let fill = &self.fill; let stroke = match &self.stroke { Some(stroke) => format!("#{} (Weight: {} px)", stroke.color.map_or("None".to_string(), |c| c.to_rgba_hex_srgb()), stroke.weight), None => "None".to_string(), }; write!(f, "Fill: {fill}\nStroke: {stroke}") } } impl PathStyle { pub const fn new(stroke: Option<Stroke>, fill: Fill) -> Self { Self { stroke, fill } } pub fn lerp(&self, other: &Self, time: f64) -> Self { Self { fill: self.fill.lerp(&other.fill, time), stroke: match (self.stroke.as_ref(), other.stroke.as_ref()) { (Some(a), Some(b)) => Some(a.lerp(b, time)), (Some(a), None) => { if time < 0.5 { Some(a.clone()) } else { None } } (None, Some(b)) => { if time < 0.5 { Some(b.clone()) } else { None } } (None, None) => None, }, } } /// Get the current path's [Fill]. /// /// # Example /// ``` /// # use vector_types::vector::style::{Fill, PathStyle}; /// # use core_types::Color; /// let fill = Fill::solid(Color::RED); /// let style = PathStyle::new(None, fill.clone()); /// /// assert_eq!(*style.fill(), fill); /// ``` pub fn fill(&self) -> &Fill { &self.fill } /// Get the current path's [Stroke]. /// /// # Example /// ``` /// # use vector_types::vector::style::{Fill, Stroke, PathStyle}; /// # use core_types::Color; /// let stroke = Stroke::new(Some(Color::GREEN), 42.); /// let style = PathStyle::new(Some(stroke.clone()), Fill::None); /// /// assert_eq!(style.stroke(), Some(stroke)); /// ``` pub fn stroke(&self) -> Option<Stroke> { self.stroke.clone() } /// Replace the path's [Fill] with a provided one. /// /// # Example /// ``` /// # use vector_types::vector::style::{Fill, PathStyle}; /// # use core_types::Color; /// let mut style = PathStyle::default(); /// /// assert_eq!(*style.fill(), Fill::None); /// /// let fill = Fill::solid(Color::RED); /// style.set_fill(fill.clone()); /// /// assert_eq!(*style.fill(), fill); /// ``` pub fn set_fill(&mut self, fill: Fill) { self.fill = fill; } pub fn set_stroke_transform(&mut self, transform: DAffine2) { if let Some(stroke) = &mut self.stroke { stroke.transform = transform; } } /// Replace the path's [Stroke] with a provided one. /// /// # Example /// ``` /// # use vector_types::vector::style::{Stroke, PathStyle}; /// # use core_types::Color; /// let mut style = PathStyle::default(); /// /// assert_eq!(style.stroke(), None); /// /// let stroke = Stroke::new(Some(Color::GREEN), 42.); /// style.set_stroke(stroke.clone()); /// /// assert_eq!(style.stroke(), Some(stroke)); /// ``` pub fn set_stroke(&mut self, stroke: Stroke) { self.stroke = Some(stroke); } /// Set the path's fill to None. /// /// # Example /// ``` /// # use vector_types::vector::style::{Fill, PathStyle}; /// # use core_types::Color; /// let mut style = PathStyle::new(None, Fill::Solid(Color::RED)); /// /// assert_ne!(*style.fill(), Fill::None); /// /// style.clear_fill(); /// /// assert_eq!(*style.fill(), Fill::None); /// ``` pub fn clear_fill(&mut self) { self.fill = Fill::None; } /// Set the path's stroke to None. /// /// # Example /// ``` /// # use vector_types::vector::style::{Fill, Stroke, PathStyle}; /// # use core_types::Color; /// let mut style = PathStyle::new(Some(Stroke::new(Some(Color::GREEN), 42.)), Fill::None); /// /// assert!(style.stroke().is_some()); /// /// style.clear_stroke(); /// /// assert!(!style.stroke().is_some()); /// ``` pub fn clear_stroke(&mut self) { self.stroke = None; } } /// Ways the user can choose to view the artwork in the viewport. #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize, Hash, DynAny, specta::Type)] pub enum RenderMode { /// Render with normal coloration at the current viewport resolution #[default] Normal = 0, /// Render only the outlines of shapes at the current viewport resolution Outline, // /// Render with normal coloration at the document resolution, showing the pixels when the current viewport resolution is higher // PixelPreview, // /// Render a preview of how the object would be exported as an SVG. // SvgPreview, }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/vector/reference_point.rs
node-graph/libraries/vector-types/src/vector/reference_point.rs
use core_types::math::bbox::AxisAlignedBbox; use glam::DVec2; #[derive(Clone, Copy, Debug, Default, Hash, Eq, PartialEq, dyn_any::DynAny, serde::Serialize, serde::Deserialize, specta::Type)] pub enum ReferencePoint { #[default] None, TopLeft, TopCenter, TopRight, CenterLeft, Center, CenterRight, BottomLeft, BottomCenter, BottomRight, } impl ReferencePoint { pub fn point_in_bounding_box(&self, bounding_box: AxisAlignedBbox) -> Option<DVec2> { let size = bounding_box.size(); let offset = match self { ReferencePoint::None => return None, ReferencePoint::TopLeft => DVec2::ZERO, ReferencePoint::TopCenter => DVec2::new(size.x / 2., 0.), ReferencePoint::TopRight => DVec2::new(size.x, 0.), ReferencePoint::CenterLeft => DVec2::new(0., size.y / 2.), ReferencePoint::Center => DVec2::new(size.x / 2., size.y / 2.), ReferencePoint::CenterRight => DVec2::new(size.x, size.y / 2.), ReferencePoint::BottomLeft => DVec2::new(0., size.y), ReferencePoint::BottomCenter => DVec2::new(size.x / 2., size.y), ReferencePoint::BottomRight => DVec2::new(size.x, size.y), }; Some(bounding_box.start + offset) } } impl From<&str> for ReferencePoint { fn from(input: &str) -> Self { match input { "None" => ReferencePoint::None, "TopLeft" => ReferencePoint::TopLeft, "TopCenter" => ReferencePoint::TopCenter, "TopRight" => ReferencePoint::TopRight, "CenterLeft" => ReferencePoint::CenterLeft, "Center" => ReferencePoint::Center, "CenterRight" => ReferencePoint::CenterRight, "BottomLeft" => ReferencePoint::BottomLeft, "BottomCenter" => ReferencePoint::BottomCenter, "BottomRight" => ReferencePoint::BottomRight, _ => panic!("Failed parsing unrecognized ReferencePosition enum value '{input}'"), } } } impl From<ReferencePoint> for Option<DVec2> { fn from(input: ReferencePoint) -> Self { match input { ReferencePoint::None => None, ReferencePoint::TopLeft => Some(DVec2::new(0., 0.)), ReferencePoint::TopCenter => Some(DVec2::new(0.5, 0.)), ReferencePoint::TopRight => Some(DVec2::new(1., 0.)), ReferencePoint::CenterLeft => Some(DVec2::new(0., 0.5)), ReferencePoint::Center => Some(DVec2::new(0.5, 0.5)), ReferencePoint::CenterRight => Some(DVec2::new(1., 0.5)), ReferencePoint::BottomLeft => Some(DVec2::new(0., 1.)), ReferencePoint::BottomCenter => Some(DVec2::new(0.5, 1.)), ReferencePoint::BottomRight => Some(DVec2::new(1., 1.)), } } } impl From<DVec2> for ReferencePoint { fn from(input: DVec2) -> Self { const TOLERANCE: f64 = 1e-5_f64; if input.y.abs() < TOLERANCE { if input.x.abs() < TOLERANCE { return ReferencePoint::TopLeft; } else if (input.x - 0.5).abs() < TOLERANCE { return ReferencePoint::TopCenter; } else if (input.x - 1.).abs() < TOLERANCE { return ReferencePoint::TopRight; } } else if (input.y - 0.5).abs() < TOLERANCE { if input.x.abs() < TOLERANCE { return ReferencePoint::CenterLeft; } else if (input.x - 0.5).abs() < TOLERANCE { return ReferencePoint::Center; } else if (input.x - 1.).abs() < TOLERANCE { return ReferencePoint::CenterRight; } } else if (input.y - 1.).abs() < TOLERANCE { if input.x.abs() < TOLERANCE { return ReferencePoint::BottomLeft; } else if (input.x - 0.5).abs() < TOLERANCE { return ReferencePoint::BottomCenter; } else if (input.x - 1.).abs() < TOLERANCE { return ReferencePoint::BottomRight; } } ReferencePoint::None } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/vector/vector_modification.rs
node-graph/libraries/vector-types/src/vector/vector_modification.rs
use super::*; use crate::subpath::BezierHandles; use crate::vector::misc::{HandleId, HandleType, point_to_dvec2}; use core_types::uuid::generate_uuid; use dyn_any::DynAny; use glam::DVec2; use kurbo::{BezPath, PathEl, Point}; use std::collections::{HashMap, HashSet}; use std::hash::BuildHasher; /// Represents a procedural change to the [`PointDomain`] in [`Vector`]. #[derive(Clone, Debug, Default, PartialEq, serde::Serialize, serde::Deserialize)] pub struct PointModification { add: Vec<PointId>, remove: HashSet<PointId>, #[serde(serialize_with = "serialize_hashmap", deserialize_with = "deserialize_hashmap")] delta: HashMap<PointId, DVec2>, } impl Hash for PointModification { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { generate_uuid().hash(state) } } impl PointModification { /// Apply this modification to the specified [`PointDomain`]. pub fn apply(&self, point_domain: &mut PointDomain, segment_domain: &mut SegmentDomain) { point_domain.retain(segment_domain, |id| !self.remove.contains(id)); for (index, (id, position)) in point_domain.positions_mut().enumerate() { let Some(&delta) = self.delta.get(&id) else { continue }; if !delta.is_finite() { warn!("Invalid delta when applying a point modification"); continue; } *position += delta; for (_, handles, start, end) in segment_domain.handles_mut() { if start == index { handles.move_start(delta); } if end == index { handles.move_end(delta); } } } for &add_id in &self.add { let Some(&position) = self.delta.get(&add_id) else { continue }; if !position.is_finite() { warn!("Invalid position when applying a point modification"); continue; } point_domain.push(add_id, position); } } /// Create a new modification that will convert an empty [`Vector`] into the target [`Vector`]. pub fn create_from_vector<Upstream>(vector: &Vector<Upstream>) -> Self { Self { add: vector.point_domain.ids().to_vec(), remove: HashSet::new(), delta: vector.point_domain.ids().iter().copied().zip(vector.point_domain.positions().iter().cloned()).collect(), } } fn push(&mut self, id: PointId, position: DVec2) { self.add.push(id); self.delta.insert(id, position); } fn remove(&mut self, id: PointId) { self.remove.insert(id); self.add.retain(|&add| add != id); self.delta.remove(&id); } } /// Represents a procedural change to the [`SegmentDomain`] in [`Vector`]. #[derive(Clone, Debug, Default, PartialEq, serde::Serialize, serde::Deserialize)] pub struct SegmentModification { add: Vec<SegmentId>, remove: HashSet<SegmentId>, #[serde(serialize_with = "serialize_hashmap", deserialize_with = "deserialize_hashmap")] start_point: HashMap<SegmentId, PointId>, #[serde(serialize_with = "serialize_hashmap", deserialize_with = "deserialize_hashmap")] end_point: HashMap<SegmentId, PointId>, #[serde(serialize_with = "serialize_hashmap", deserialize_with = "deserialize_hashmap")] handle_primary: HashMap<SegmentId, Option<DVec2>>, #[serde(serialize_with = "serialize_hashmap", deserialize_with = "deserialize_hashmap")] handle_end: HashMap<SegmentId, Option<DVec2>>, #[serde(serialize_with = "serialize_hashmap", deserialize_with = "deserialize_hashmap")] stroke: HashMap<SegmentId, StrokeId>, } impl SegmentModification { /// Apply this modification to the specified [`SegmentDomain`]. pub fn apply(&self, segment_domain: &mut SegmentDomain, point_domain: &PointDomain) { segment_domain.retain(|id| !self.remove.contains(id), point_domain.ids().len()); for (id, point) in segment_domain.start_point_mut() { let Some(&new) = self.start_point.get(&id) else { continue }; let Some(index) = point_domain.resolve_id(new) else { warn!("Invalid start ID when applying a segment modification"); continue; }; *point = index; } for (id, point) in segment_domain.end_point_mut() { let Some(&new) = self.end_point.get(&id) else { continue }; let Some(index) = point_domain.resolve_id(new) else { warn!("Invalid end ID when applying a segment modification"); continue; }; *point = index; } for (id, handles, start, end) in segment_domain.handles_mut() { let Some(&start) = point_domain.positions().get(start) else { continue }; let Some(&end) = point_domain.positions().get(end) else { continue }; // Compute the actual start and end position based on the offset from the anchor let start = self.handle_primary.get(&id).copied().map(|handle| handle.map(|handle| handle + start)); let end = self.handle_end.get(&id).copied().map(|handle| handle.map(|handle| handle + end)); if !start.unwrap_or_default().is_none_or(|start| start.is_finite()) || !end.unwrap_or_default().is_none_or(|end| end.is_finite()) { warn!("Invalid handles when applying a segment modification"); continue; } match (start, end) { // The new handles are fully specified by the modification (Some(Some(handle_start)), Some(Some(handle_end))) => *handles = BezierHandles::Cubic { handle_start, handle_end }, (Some(Some(handle)), Some(None)) | (Some(None), Some(Some(handle))) => *handles = BezierHandles::Quadratic { handle }, (Some(None), Some(None)) => *handles = BezierHandles::Linear, // Remove the end handle (None, Some(None)) => { if let BezierHandles::Cubic { handle_start, .. } = *handles { *handles = BezierHandles::Quadratic { handle: handle_start } } } // Change the end handle (None, Some(Some(handle_end))) => match *handles { BezierHandles::Linear => *handles = BezierHandles::Quadratic { handle: handle_end }, BezierHandles::Quadratic { handle: handle_start } => *handles = BezierHandles::Cubic { handle_start, handle_end }, BezierHandles::Cubic { handle_start, .. } => *handles = BezierHandles::Cubic { handle_start, handle_end }, }, // Remove the start handle (Some(None), None) => *handles = BezierHandles::Linear, // Change the start handle (Some(Some(handle_start)), None) => match *handles { BezierHandles::Linear => *handles = BezierHandles::Quadratic { handle: handle_start }, BezierHandles::Quadratic { .. } => *handles = BezierHandles::Quadratic { handle: handle_start }, BezierHandles::Cubic { handle_end, .. } => *handles = BezierHandles::Cubic { handle_start, handle_end }, }, // No change (None, None) => {} }; } for (id, stroke) in segment_domain.stroke_mut() { let Some(&new) = self.stroke.get(&id) else { continue }; *stroke = new; } for &add_id in &self.add { let Some(&start) = self.start_point.get(&add_id) else { continue }; let Some(&end) = self.end_point.get(&add_id) else { continue }; let Some(&handle_start) = self.handle_primary.get(&add_id) else { continue }; let Some(&handle_end) = self.handle_end.get(&add_id) else { continue }; let Some(&stroke) = self.stroke.get(&add_id) else { continue }; let Some(start_index) = point_domain.resolve_id(start) else { warn!("invalid start id: {start:#?}"); continue; }; let Some(end_index) = point_domain.resolve_id(end) else { warn!("invalid end id: {end:#?}"); continue; }; let start_position = point_domain.positions()[start_index]; let end_position = point_domain.positions()[end_index]; let handles = match (handle_start, handle_end) { (Some(handle_start), Some(handle_end)) => BezierHandles::Cubic { handle_start: handle_start + start_position, handle_end: handle_end + end_position, }, (Some(handle), None) | (None, Some(handle)) => BezierHandles::Quadratic { handle: handle + start_position }, (None, None) => BezierHandles::Linear, }; if !handles.is_finite() { warn!("invalid handles"); continue; } segment_domain.push(add_id, start_index, end_index, handles, stroke); } assert!( segment_domain.start_point().iter().all(|&index| index < point_domain.ids().len()), "index should be in range {segment_domain:#?}" ); assert!( segment_domain.end_point().iter().all(|&index| index < point_domain.ids().len()), "index should be in range {segment_domain:#?}" ); } /// Create a new modification that will convert an empty [`Vector`] into the target [`Vector`]. pub fn create_from_vector<Upstream>(vector: &Vector<Upstream>) -> Self { let point_id = |(&segment, &index)| (segment, vector.point_domain.ids()[index]); Self { add: vector.segment_domain.ids().to_vec(), remove: HashSet::new(), start_point: vector.segment_domain.ids().iter().zip(vector.segment_domain.start_point()).map(point_id).collect(), end_point: vector.segment_domain.ids().iter().zip(vector.segment_domain.end_point()).map(point_id).collect(), handle_primary: vector.segment_bezier_iter().map(|(id, b, _, _)| (id, b.handle_start().map(|handle| handle - b.start))).collect(), handle_end: vector.segment_bezier_iter().map(|(id, b, _, _)| (id, b.handle_end().map(|handle| handle - b.end))).collect(), stroke: vector.segment_domain.ids().iter().copied().zip(vector.segment_domain.stroke().iter().cloned()).collect(), } } fn push(&mut self, id: SegmentId, points: [PointId; 2], handles: [Option<DVec2>; 2], stroke: StrokeId) { self.remove.remove(&id); self.add.push(id); self.start_point.insert(id, points[0]); self.end_point.insert(id, points[1]); self.handle_primary.insert(id, handles[0]); self.handle_end.insert(id, handles[1]); self.stroke.insert(id, stroke); } fn remove(&mut self, id: SegmentId) { self.remove.insert(id); self.add.retain(|&add| add != id); self.start_point.remove(&id); self.end_point.remove(&id); self.handle_primary.remove(&id); self.handle_end.remove(&id); self.stroke.remove(&id); } } /// Represents a procedural change to the [`RegionDomain`] in [`Vector`]. #[derive(Clone, Debug, Default, PartialEq, serde::Serialize, serde::Deserialize)] pub struct RegionModification { add: Vec<RegionId>, remove: HashSet<RegionId>, #[serde(serialize_with = "serialize_hashmap", deserialize_with = "deserialize_hashmap")] segment_range: HashMap<RegionId, std::ops::RangeInclusive<SegmentId>>, #[serde(serialize_with = "serialize_hashmap", deserialize_with = "deserialize_hashmap")] fill: HashMap<RegionId, FillId>, } impl RegionModification { /// Apply this modification to the specified [`RegionDomain`]. pub fn apply(&self, region_domain: &mut RegionDomain) { region_domain.retain(|id| !self.remove.contains(id)); for (id, segment_range) in region_domain.segment_range_mut() { let Some(new) = self.segment_range.get(&id) else { continue }; *segment_range = new.clone(); // Range inclusive is not copy } for (id, fill) in region_domain.fill_mut() { let Some(&new) = self.fill.get(&id) else { continue }; *fill = new; } for &add_id in &self.add { let Some(segment_range) = self.segment_range.get(&add_id) else { continue }; let Some(&fill) = self.fill.get(&add_id) else { continue }; region_domain.push(add_id, segment_range.clone(), fill); } } /// Create a new modification that will convert an empty [`Vector`] into the target [`Vector`]. pub fn create_from_vector<Upstream>(vector: &Vector<Upstream>) -> Self { Self { add: vector.region_domain.ids().to_vec(), remove: HashSet::new(), segment_range: vector.region_domain.ids().iter().copied().zip(vector.region_domain.segment_range().iter().cloned()).collect(), fill: vector.region_domain.ids().iter().copied().zip(vector.region_domain.fill().iter().cloned()).collect(), } } } /// Represents a procedural change to the [`Vector`]. #[derive(Clone, Debug, Default, PartialEq, DynAny, serde::Serialize, serde::Deserialize)] pub struct VectorModification { points: PointModification, segments: SegmentModification, regions: RegionModification, add_g1_continuous: HashSet<[HandleId; 2]>, remove_g1_continuous: HashSet<[HandleId; 2]>, } /// A modification type that can be added to a [`VectorModification`]. #[derive(PartialEq, Clone, Debug, serde::Serialize, serde::Deserialize)] pub enum VectorModificationType { InsertSegment { id: SegmentId, points: [PointId; 2], handles: [Option<DVec2>; 2] }, InsertPoint { id: PointId, position: DVec2 }, RemoveSegment { id: SegmentId }, RemovePoint { id: PointId }, SetG1Continuous { handles: [HandleId; 2], enabled: bool }, SetHandles { segment: SegmentId, handles: [Option<DVec2>; 2] }, SetPrimaryHandle { segment: SegmentId, relative_position: DVec2 }, SetEndHandle { segment: SegmentId, relative_position: DVec2 }, SetStartPoint { segment: SegmentId, id: PointId }, SetEndPoint { segment: SegmentId, id: PointId }, ApplyPointDelta { point: PointId, delta: DVec2 }, ApplyPrimaryDelta { segment: SegmentId, delta: DVec2 }, ApplyEndDelta { segment: SegmentId, delta: DVec2 }, } impl VectorModification { /// Apply this modification to the specified [`Vector`]. pub fn apply<Upstream>(&self, vector: &mut Vector<Upstream>) { self.points.apply(&mut vector.point_domain, &mut vector.segment_domain); self.segments.apply(&mut vector.segment_domain, &vector.point_domain); self.regions.apply(&mut vector.region_domain); let valid = |val: &[HandleId; 2]| vector.segment_domain.ids().contains(&val[0].segment) && vector.segment_domain.ids().contains(&val[1].segment); vector .colinear_manipulators .retain(|val| !self.remove_g1_continuous.contains(val) && !self.remove_g1_continuous.contains(&[val[1], val[0]]) && valid(val)); for handles in &self.add_g1_continuous { if !vector.colinear_manipulators.iter().any(|test| test == handles || test == &[handles[1], handles[0]]) && valid(handles) { vector.colinear_manipulators.push(*handles); } } } /// Add a [`VectorModificationType`] to this modification. pub fn modify(&mut self, vector_modification: &VectorModificationType) { match vector_modification { VectorModificationType::InsertSegment { id, points, handles } => self.segments.push(*id, *points, *handles, StrokeId::ZERO), VectorModificationType::InsertPoint { id, position } => self.points.push(*id, *position), VectorModificationType::RemoveSegment { id } => self.segments.remove(*id), VectorModificationType::RemovePoint { id } => self.points.remove(*id), VectorModificationType::SetG1Continuous { handles, enabled } => { if *enabled { if !self.add_g1_continuous.contains(&[handles[1], handles[0]]) { self.add_g1_continuous.insert(*handles); } self.remove_g1_continuous.remove(handles); self.remove_g1_continuous.remove(&[handles[1], handles[0]]); } else { if !self.remove_g1_continuous.contains(&[handles[1], handles[0]]) { self.remove_g1_continuous.insert(*handles); } self.add_g1_continuous.remove(handles); self.add_g1_continuous.remove(&[handles[1], handles[0]]); } } VectorModificationType::SetHandles { segment, handles } => { self.segments.handle_primary.insert(*segment, handles[0]); self.segments.handle_end.insert(*segment, handles[1]); } VectorModificationType::SetPrimaryHandle { segment, relative_position } => { self.segments.handle_primary.insert(*segment, Some(*relative_position)); } VectorModificationType::SetEndHandle { segment, relative_position } => { self.segments.handle_end.insert(*segment, Some(*relative_position)); } VectorModificationType::SetStartPoint { segment, id } => { self.segments.start_point.insert(*segment, *id); } VectorModificationType::SetEndPoint { segment, id } => { self.segments.end_point.insert(*segment, *id); } VectorModificationType::ApplyPointDelta { point, delta } => { *self.points.delta.entry(*point).or_default() += *delta; } VectorModificationType::ApplyPrimaryDelta { segment, delta } => { let position = self.segments.handle_primary.entry(*segment).or_default(); *position = Some(position.unwrap_or_default() + *delta); } VectorModificationType::ApplyEndDelta { segment, delta } => { let position = self.segments.handle_end.entry(*segment).or_default(); *position = Some(position.unwrap_or_default() + *delta); } } } /// Create a new modification that will convert an empty [`Vector`] into the target [`Vector`]. pub fn create_from_vector<Upstream>(vector: &Vector<Upstream>) -> Self { Self { points: PointModification::create_from_vector(vector), segments: SegmentModification::create_from_vector(vector), regions: RegionModification::create_from_vector(vector), add_g1_continuous: vector.colinear_manipulators.iter().copied().collect(), remove_g1_continuous: HashSet::new(), } } } impl Hash for VectorModification { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { generate_uuid().hash(state) } } // Do we want to enforce that all serialized/deserialized hashmaps are a vec of tuples? // TODO: Eventually remove this document upgrade code use serde::de::{SeqAccess, Visitor}; use serde::ser::SerializeSeq; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; use std::hash::Hash; pub fn serialize_hashmap<K, V, S, H>(hashmap: &HashMap<K, V, H>, serializer: S) -> Result<S::Ok, S::Error> where K: Serialize + Eq + Hash, V: Serialize, S: Serializer, H: BuildHasher, { let mut seq = serializer.serialize_seq(Some(hashmap.len()))?; for (key, value) in hashmap { seq.serialize_element(&(key, value))?; } seq.end() } pub fn deserialize_hashmap<'de, K, V, D, H>(deserializer: D) -> Result<HashMap<K, V, H>, D::Error> where K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, D: Deserializer<'de>, H: BuildHasher + Default, { struct HashMapVisitor<K, V, H> { #[allow(clippy::type_complexity)] marker: std::marker::PhantomData<fn() -> HashMap<K, V, H>>, } impl<'de, K, V, H> Visitor<'de> for HashMapVisitor<K, V, H> where K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, H: BuildHasher + Default, { type Value = HashMap<K, V, H>; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("a sequence of tuples") } fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error> where A: SeqAccess<'de>, { let mut hashmap = HashMap::default(); while let Some((key, value)) = seq.next_element()? { hashmap.insert(key, value); } Ok(hashmap) } } let visitor = HashMapVisitor { marker: std::marker::PhantomData }; deserializer.deserialize_seq(visitor) } pub struct AppendBezpath<'a, Upstream: 'static> { first_point: Option<Point>, last_point: Option<Point>, first_point_index: Option<usize>, last_point_index: Option<usize>, first_segment_id: Option<SegmentId>, last_segment_id: Option<SegmentId>, point_id: PointId, segment_id: SegmentId, vector: &'a mut Vector<Upstream>, } impl<'a, Upstream> AppendBezpath<'a, Upstream> { fn new(vector: &'a mut Vector<Upstream>) -> Self { Self { first_point: None, last_point: None, first_point_index: None, last_point_index: None, first_segment_id: None, last_segment_id: None, point_id: vector.point_domain.next_id(), segment_id: vector.segment_domain.next_id(), vector, } } fn append_segment_and_close_path(&mut self, point: Point, handle: BezierHandles) { let handle = if self.first_point.unwrap() != point { // If the first point is not the same as the last point of the path then we append the segment // with given handle and point and then close the path with linear handle. self.append_segment(point, handle); BezierHandles::Linear } else { // if the endpoints are the same then we close the path with given handle. handle }; // Create a new segment. let next_segment_id = self.segment_id.next_id(); self.vector .segment_domain .push(next_segment_id, self.last_point_index.unwrap(), self.first_point_index.unwrap(), handle, StrokeId::ZERO); // Create a new region. let next_region_id = self.vector.region_domain.next_id(); let first_segment_id = self.first_segment_id.unwrap_or(next_segment_id); let last_segment_id = next_segment_id; self.vector.region_domain.push(next_region_id, first_segment_id..=last_segment_id, FillId::ZERO); } fn append_segment(&mut self, end_point: Point, handle: BezierHandles) { // Append the point. let next_point_index = self.vector.point_domain.ids().len(); let next_point_id = self.point_id.next_id(); self.vector.point_domain.push(next_point_id, point_to_dvec2(end_point)); // Append the segment. let next_segment_id = self.segment_id.next_id(); self.vector .segment_domain .push(next_segment_id, self.last_point_index.unwrap(), next_point_index, handle, StrokeId::ZERO); // Update the states. self.last_point = Some(end_point); self.last_point_index = Some(next_point_index); self.first_segment_id = Some(self.first_segment_id.unwrap_or(next_segment_id)); self.last_segment_id = Some(next_segment_id); } fn append_first_point(&mut self, point: Point) { self.first_point = Some(point); self.last_point = Some(point); // Append the first point. let next_point_index = self.vector.point_domain.ids().len(); self.vector.point_domain.push(self.point_id.next_id(), point_to_dvec2(point)); // Update the state. self.first_point_index = Some(next_point_index); self.last_point_index = Some(next_point_index); } fn reset(&mut self) { self.first_point = None; self.last_point = None; self.first_point_index = None; self.last_point_index = None; self.first_segment_id = None; self.last_segment_id = None; } pub fn append_bezpath(vector: &'a mut Vector<Upstream>, bezpath: BezPath) { let mut this = Self::new(vector); let mut elements = bezpath.elements().iter().peekable(); while let Some(element) = elements.next() { let close_path = elements.peek().is_some_and(|elm| **elm == PathEl::ClosePath); match *element { PathEl::MoveTo(point) => this.append_first_point(point), PathEl::LineTo(point) => { let handle = BezierHandles::Linear; if close_path { this.append_segment_and_close_path(point, handle); } else { this.append_segment(point, handle); } } PathEl::QuadTo(point, point1) => { let handle = BezierHandles::Quadratic { handle: point_to_dvec2(point) }; if close_path { this.append_segment_and_close_path(point1, handle); } else { this.append_segment(point1, handle); } } PathEl::CurveTo(point, point1, point2) => { let handle = BezierHandles::Cubic { handle_start: point_to_dvec2(point), handle_end: point_to_dvec2(point1), }; if close_path { this.append_segment_and_close_path(point2, handle); } else { this.append_segment(point2, handle); } } PathEl::ClosePath => { // Already handled using `append_segment_and_close_path()` hence we reset state and continue. this.reset(); } } } } } pub trait VectorExt { fn append_bezpath(&mut self, bezpath: BezPath); } impl<Upstream: 'static> VectorExt for Vector<Upstream> { fn append_bezpath(&mut self, bezpath: BezPath) { AppendBezpath::append_bezpath(self, bezpath); } } pub trait HandleExt { /// Set the handle's position relative to the anchor which is the start anchor for the primary handle and end anchor for the end handle. #[must_use] fn set_relative_position(self, relative_position: DVec2) -> VectorModificationType; } impl HandleExt for HandleId { fn set_relative_position(self, relative_position: DVec2) -> VectorModificationType { let Self { ty, segment } = self; match ty { HandleType::Primary => VectorModificationType::SetPrimaryHandle { segment, relative_position }, HandleType::End => VectorModificationType::SetEndHandle { segment, relative_position }, } } } #[cfg(test)] mod tests { use kurbo::{PathSeg, QuadBez}; use super::*; use crate::subpath::{Bezier, Subpath}; #[test] fn modify_new() { let vector: Vector<()> = Vector::from_subpaths([Subpath::new_ellipse(DVec2::ZERO, DVec2::ONE), Subpath::new_rect(DVec2::NEG_ONE, DVec2::ZERO)], false); let modify = VectorModification::create_from_vector(&vector); let mut new = Vector::default(); modify.apply(&mut new); assert_eq!(vector, new); } #[test] fn modify_existing() { let subpaths = [ Subpath::new_ellipse(DVec2::ZERO, DVec2::ONE), Subpath::new_rect(DVec2::NEG_ONE, DVec2::ZERO), Subpath::from_beziers( &[ PathSeg::Quad(QuadBez::new(Point::new(0., 0.), Point::new(5., 10.), Point::new(10., 0.))), PathSeg::Quad(QuadBez::new(Point::new(10., 0.), Point::new(15., 10.), Point::new(20., 0.))), ], false, ), ]; let mut vector: Vector<()> = Vector::from_subpaths(subpaths, false); let mut modify_new = VectorModification::create_from_vector(&vector); let mut modify_original = VectorModification::default(); for modification in [&mut modify_new, &mut modify_original] { let point = vector.point_domain.ids()[0]; modification.modify(&VectorModificationType::ApplyPointDelta { point, delta: DVec2::X * 0.5 }); let point = vector.point_domain.ids()[9]; modification.modify(&VectorModificationType::ApplyPointDelta { point, delta: DVec2::X }); } let mut new = Vector::default(); modify_new.apply(&mut new); modify_original.apply(&mut vector); assert_eq!(vector, new); assert_eq!(vector.point_domain.positions()[0], DVec2::X); assert_eq!(vector.point_domain.positions()[9], DVec2::new(11., 0.)); assert_eq!( vector.segment_bezier_iter().nth(8).unwrap().1, Bezier::from_quadratic_dvec2(DVec2::new(0., 0.), DVec2::new(5., 10.), DVec2::new(11., 0.)) ); assert_eq!( vector.segment_bezier_iter().nth(9).unwrap().1, Bezier::from_quadratic_dvec2(DVec2::new(11., 0.), DVec2::new(16., 10.), DVec2::new(20., 0.)) ); } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/vector/algorithms/offset_subpath.rs
node-graph/libraries/vector-types/src/vector/algorithms/offset_subpath.rs
use super::bezpath_algorithms::{clip_simple_bezpaths, miter_line_join, round_line_join}; use crate::vector::misc::point_to_dvec2; use kurbo::{BezPath, Join, ParamCurve, PathEl, PathSeg}; /// Value to control smoothness and mathematical accuracy to offset a cubic Bezier. const CUBIC_REGULARIZATION_ACCURACY: f64 = 0.5; /// Constant used to determine if `f64`s are equivalent. pub const MAX_ABSOLUTE_DIFFERENCE: f64 = 1e-7; /// Squared version to avoid sqrt in distance checks. const MAX_ABSOLUTE_DIFFERENCE_SQUARED: f64 = MAX_ABSOLUTE_DIFFERENCE * MAX_ABSOLUTE_DIFFERENCE; const MAX_FITTED_SEGMENTS: usize = 10000; /// Reduces the segments of the bezpath into simple subcurves, then offset each subcurve a set `distance` away. /// The intersections of segments of the subpath are joined using the method specified by the `join` argument. pub fn offset_bezpath(bezpath: &BezPath, distance: f64, join: Join, miter_limit: Option<f64>) -> BezPath { // An offset at a distance 0 from the curve is simply the same curve. // An offset of a single point is not defined. if distance == 0. || bezpath.get_seg(1).is_none() { return bezpath.clone(); } let mut bezpaths = bezpath .segments() .map(|bezier| bezier.to_cubic()) .filter_map(|cubic_bez| { // Skip degenerate curves where all control points are at the same location. // Offsetting a point is undefined and causes infinite recursion in fit_to_bezpath. let start = cubic_bez.p0; let is_degenerate = start.distance_squared(cubic_bez.p1) < MAX_ABSOLUTE_DIFFERENCE_SQUARED && start.distance_squared(cubic_bez.p2) < MAX_ABSOLUTE_DIFFERENCE_SQUARED && start.distance_squared(cubic_bez.p3) < MAX_ABSOLUTE_DIFFERENCE_SQUARED; if is_degenerate { return None; } let mut fitted = BezPath::new(); kurbo::offset::offset_cubic(cubic_bez, distance, CUBIC_REGULARIZATION_ACCURACY, &mut fitted); if fitted.segments().count() > MAX_FITTED_SEGMENTS { None } else { fitted.get_seg(1).is_some().then_some(fitted) } }) .collect::<Vec<BezPath>>(); // Clip or join consecutive Subpaths for i in 0..bezpaths.len() - 1 { let j = i + 1; let bezpath1 = &bezpaths[i]; let bezpath2 = &bezpaths[j]; let last_segment_end = point_to_dvec2(bezpath1.segments().last().unwrap().end()); let first_segment_start = point_to_dvec2(bezpath2.segments().next().unwrap().start()); // If the anchors are approximately equal, there is no need to clip / join the segments if last_segment_end.abs_diff_eq(first_segment_start, MAX_ABSOLUTE_DIFFERENCE) { continue; } // The angle is concave. The Subpath overlap and must be clipped let mut apply_join = true; if let Some((clipped_subpath1, clipped_subpath2)) = clip_simple_bezpaths(bezpath1, bezpath2) { bezpaths[i] = clipped_subpath1; bezpaths[j] = clipped_subpath2; apply_join = false; } // The angle is convex. The Subpath must be joined using the specified join type if apply_join { match join { Join::Bevel => { let element = PathEl::LineTo(bezpaths[j].segments().next().unwrap().start()); bezpaths[i].push(element); } Join::Miter => { let element = miter_line_join(&bezpaths[i], &bezpaths[j], miter_limit); if let Some(element) = element { bezpaths[i].push(element[0]); bezpaths[i].push(element[1]); } else { let element = PathEl::LineTo(bezpaths[j].segments().next().unwrap().start()); bezpaths[i].push(element); } } Join::Round => { let center = point_to_dvec2(bezpath.get_seg(i + 1).unwrap().end()); let elements = round_line_join(&bezpaths[i], &bezpaths[j], center); bezpaths[i].push(elements[0]); bezpaths[i].push(elements[1]); } } } } // Clip any overlap in the last segment let is_bezpath_closed = bezpath.elements().last().is_some_and(|element| *element == PathEl::ClosePath); if is_bezpath_closed { let mut apply_join = true; if let Some((clipped_subpath1, clipped_subpath2)) = clip_simple_bezpaths(&bezpaths[bezpaths.len() - 1], &bezpaths[0]) { // Merge the clipped subpaths let last_index = bezpaths.len() - 1; bezpaths[last_index] = clipped_subpath1; bezpaths[0] = clipped_subpath2; apply_join = false; } if apply_join { match join { Join::Bevel => { let last_subpath_index = bezpaths.len() - 1; let element = PathEl::LineTo(bezpaths[0].segments().next().unwrap().start()); bezpaths[last_subpath_index].push(element); } Join::Miter => { let last_subpath_index = bezpaths.len() - 1; let element = miter_line_join(&bezpaths[last_subpath_index], &bezpaths[0], miter_limit); if let Some(element) = element { bezpaths[last_subpath_index].push(element[0]); bezpaths[last_subpath_index].push(element[1]); } else { let element = PathEl::LineTo(bezpaths[0].segments().next().unwrap().start()); bezpaths[last_subpath_index].push(element); } } Join::Round => { let last_subpath_index = bezpaths.len() - 1; let center = point_to_dvec2(bezpath.get_seg(1).unwrap().start()); let elements = round_line_join(&bezpaths[last_subpath_index], &bezpaths[0], center); bezpaths[last_subpath_index].push(elements[0]); bezpaths[last_subpath_index].push(elements[1]); } } } } // Merge the bezpaths and its segments. Drop points which overlap with one another. let segments = bezpaths.iter().flat_map(|bezpath| bezpath.segments().collect::<Vec<PathSeg>>()).collect::<Vec<PathSeg>>(); let mut offset_bezpath = segments.iter().fold(BezPath::new(), |mut acc, segment| { if acc.elements().is_empty() { acc.move_to(segment.start()); } acc.push(segment.as_path_el()); acc }); if is_bezpath_closed { offset_bezpath.close_path(); } offset_bezpath }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/vector/algorithms/poisson_disk.rs
node-graph/libraries/vector-types/src/vector/algorithms/poisson_disk.rs
use glam::DVec2; use std::collections::HashMap; use std::f64; const DEEPEST_SUBDIVISION_LEVEL_BEFORE_DISCARDING: usize = 8; /// Fast (O(n) with respect to time and memory) algorithm for generating a maximal set of points using Poisson-disk sampling. /// Based on the paper: /// "Poisson Disk Point Sets by Hierarchical Dart Throwing" /// <https://scholarsarchive.byu.edu/facpub/237/> pub fn poisson_disk_sample( offset: DVec2, width: f64, height: f64, diameter: f64, point_in_shape_checker: impl Fn(DVec2) -> bool, line_intersect_shape_checker: impl Fn((f64, f64), (f64, f64)) -> bool, rng: impl FnMut() -> f64, ) -> Vec<DVec2> { let mut rng = rng; let diameter_squared = diameter.powi(2); // Initialize a place to store the generated points within a spatial acceleration structure let mut points_grid = AccelerationGrid::new(width, height, diameter); // Pick a grid size for the base-level domain that's as large as possible, while also: // - Dividing into an integer number of cells across the dartboard domain, to avoid wastefully throwing darts beyond the width and height of the dartboard domain // - Being fully covered by the radius around a dart thrown anywhere in its area, where the worst-case is a corner which has a distance of sqrt(2) to the opposite corner let greater_dimension = width.max(height); let base_level_grid_size = greater_dimension / (greater_dimension * f64::consts::SQRT_2 / (diameter / 2.)).ceil(); // Initialize the problem by including all base-level squares in the active list since they're all part of the yet-to-be-targetted dartboard domain let base_level = ActiveListLevel::new_filled(base_level_grid_size, offset, width, height, &point_in_shape_checker, &line_intersect_shape_checker); // In the future, if necessary, this could be turned into a fixed-length array with worst-case length `f64::MANTISSA_DIGITS` let mut active_list_levels = vec![base_level]; // Loop until all active squares have been processed, meaning all of the dartboard domain has been checked while active_list_levels.iter().any(|active_list| active_list.not_empty()) { // Randomly pick a square in the dartboard domain, with probability proportional to its area let (active_square_level, active_square_index_in_level) = target_active_square(&active_list_levels, &mut rng); // The level contains the list of all active squares at this target square's subdivision depth let level = &mut active_list_levels[active_square_level]; // Take the targetted active square out of the list and get its size let active_square = level.take_square(active_square_index_in_level); let active_square_size = level.square_size(); // Skip this target square if it's within range of any current points, since more nearby points could have been added after this square was included in the active list if !square_not_covered_by_poisson_points(active_square.top_left_corner(), active_square_size / 2., diameter_squared, &points_grid) { continue; } // Throw a dart by picking a random point within this target square let point = { let active_top_left_corner = active_square.top_left_corner(); let x = active_top_left_corner.x + rng() * active_square_size; let y = active_top_left_corner.y + rng() * active_square_size; (x, y).into() }; // If the dart hit a valid spot, save that point (we're now permanently done with this target square's region) if point_not_covered_by_poisson_points(point, diameter_squared, &points_grid) { // Silently reject the point if it lies outside the shape if active_square.fully_in_shape() || point_in_shape_checker(point + offset) { points_grid.insert(point); } } // Otherwise, subdivide this target square and add valid sub-squares back to the active list for later targetting else { // Discard any targetable domain smaller than this limited number of subdivision levels since it's too small to matter let next_level_deeper_level = active_square_level + 1; if next_level_deeper_level > DEEPEST_SUBDIVISION_LEVEL_BEFORE_DISCARDING { continue; } // If necessary for the following step, add another layer of depth to store squares at the next subdivision level if active_list_levels.len() <= next_level_deeper_level { active_list_levels.push(ActiveListLevel::new(active_square_size / 2.)) } // Get the list of active squares at the level of depth beneath this target square's level let next_level_deeper = &mut active_list_levels[next_level_deeper_level]; // Subdivide this target square into four sub-squares; running out of numerical precision will make this terminate at very small scales let subdivided_size = active_square_size / 2.; let active_top_left_corner = active_square.top_left_corner(); let subdivided = [ active_top_left_corner + DVec2::new(0., 0.), active_top_left_corner + DVec2::new(subdivided_size, 0.), active_top_left_corner + DVec2::new(0., subdivided_size), active_top_left_corner + DVec2::new(subdivided_size, subdivided_size), ]; // Add the sub-squares which aren't within the radius of a nearby point to the sub-level's active list let half_subdivided_size = subdivided_size / 2.; let new_sub_squares = subdivided.into_iter().filter_map(|sub_square| { // Any sub-squares within the radius of a nearby point are filtered out if !square_not_covered_by_poisson_points(sub_square, half_subdivided_size, diameter_squared, &points_grid) { return None; } // Fully inside the shape if active_square.fully_in_shape() { Some(ActiveSquare::new(sub_square, true)) } // Intersecting the shape's border else { // The sub-square is fully inside the shape if its top-left corner is inside and its edges don't intersect the shape border let point_with_offset = sub_square + offset; let square_edges_intersect_shape = { let min = point_with_offset; let max = min + DVec2::splat(subdivided_size); // Top edge line line_intersect_shape_checker((min.x, min.y), (max.x, min.y)) || // Right edge line line_intersect_shape_checker((max.x, min.y), (max.x, max.y)) || // Bottom edge line line_intersect_shape_checker((max.x, max.y), (min.x, max.y)) || // Left edge line line_intersect_shape_checker((min.x, max.y), (min.x, min.y)) }; let sub_square_fully_inside_shape = !square_edges_intersect_shape && point_in_shape_checker(point_with_offset) && point_in_shape_checker(point_with_offset + subdivided_size); Some(ActiveSquare::new(sub_square, sub_square_fully_inside_shape)) } }); next_level_deeper.add_squares(new_sub_squares); } } points_grid.final_points(offset) } /// Randomly pick a square in the dartboard domain, with probability proportional to its area. /// Returns a tuple with the subdivision level depth and the square index at that depth. fn target_active_square(active_list_levels: &[ActiveListLevel], rng: &mut impl FnMut() -> f64) -> (usize, usize) { let active_squares_total_area: f64 = active_list_levels.iter().map(|active_list| active_list.total_area()).sum(); let mut index_into_area = rng() * active_squares_total_area; for (level, active_list_level) in active_list_levels.iter().enumerate() { let subtracted = index_into_area - active_list_level.total_area(); if subtracted > 0. { index_into_area = subtracted; continue; } let active_square_index_in_level = (index_into_area / active_list_levels[level].square_area()).floor() as usize; return (level, active_square_index_in_level); } panic!("index_into_area couldn't be be mapped to a square in any level of the active lists"); } fn point_not_covered_by_poisson_points(point: DVec2, diameter_squared: f64, points_grid: &AccelerationGrid) -> bool { points_grid.nearby_points(point).all(|nearby_point| { let x_separation = nearby_point.x - point.x; let y_separation = nearby_point.y - point.y; x_separation.powi(2) + y_separation.powi(2) > diameter_squared }) } fn square_not_covered_by_poisson_points(point: DVec2, half_square_size: f64, diameter_squared: f64, points_grid: &AccelerationGrid) -> bool { let square_center_x = point.x + half_square_size; let square_center_y = point.y + half_square_size; points_grid.nearby_points(point).all(|nearby_point| { let x_distance = (square_center_x - nearby_point.x).abs() + half_square_size; let y_distance = (square_center_y - nearby_point.y).abs() + half_square_size; x_distance.powi(2) + y_distance.powi(2) > diameter_squared }) } #[inline(always)] fn cartesian_product<A, B>(a: A, b: B) -> impl Iterator<Item = (A::Item, B::Item)> where A: Iterator + Clone, B: Iterator + Clone, A::Item: Clone, B::Item: Clone, { a.flat_map(move |i| b.clone().map(move |j| (i.clone(), j))) } /// A square (represented by its top left corner position and width/height of `square_size`) that is currently a candidate for targetting by the dart throwing process. /// The positive sign bit encodes if the square is contained entirely within the masking shape, or negative if it's outside or intersects the shape path. pub struct ActiveSquare(DVec2); impl ActiveSquare { pub fn new(top_left_corner: DVec2, fully_in_shape: bool) -> Self { Self(if fully_in_shape { top_left_corner } else { -top_left_corner }) } pub fn top_left_corner(&self) -> DVec2 { self.0.abs() } pub fn fully_in_shape(&self) -> bool { self.0.x.is_sign_positive() } } pub struct ActiveListLevel { /// List of all subdivided squares of the same size that are currently candidates for targetting by the dart throwing process active_squares: Vec<ActiveSquare>, /// Width and height of the squares in this level of subdivision square_size: f64, /// Current sum of the area in all active squares in this subdivision level total_area: f64, } impl ActiveListLevel { #[inline(always)] pub fn new(square_size: f64) -> Self { Self { active_squares: Vec::new(), square_size, total_area: 0., } } pub fn new_filled( square_size: f64, offset: DVec2, width: f64, height: f64, point_in_shape_checker: impl Fn(DVec2) -> bool, line_intersect_shape_checker: impl Fn((f64, f64), (f64, f64)) -> bool, ) -> Self { // These should divide evenly but rounding is to protect against small numerical imprecision errors let x_squares = (width / square_size).round() as usize; let y_squares = (height / square_size).round() as usize; // Hashes based on the grid cell coordinates and direction of the line: (x, y, is_vertical) let mut line_intersection_cache: HashMap<(usize, usize, bool), bool> = HashMap::new(); // Populate each square with its top-left corner coordinate let active_squares: Vec<_> = cartesian_product(0..x_squares, 0..y_squares) .filter_map(|(x, y)| { let corner = DVec2::new(x as f64 * square_size, y as f64 * square_size); let corner_with_offset = corner + offset; // Lazily check (and cache) if the square's edges intersect the shape, which is an expensive operation let mut square_edges_intersect_shape_value = None; let mut square_edges_intersect_shape = || { square_edges_intersect_shape_value.unwrap_or_else(|| { let square_edges_intersect_shape = { let min = corner_with_offset; let max = min + DVec2::splat(square_size); // Top edge line *line_intersection_cache.entry((x, y, false)).or_insert_with(|| line_intersect_shape_checker((min.x, min.y), (max.x, min.y))) || // Right edge line *line_intersection_cache.entry((x + 1, y, true)).or_insert_with(|| line_intersect_shape_checker((max.x, min.y), (max.x, max.y))) || // Bottom edge line *line_intersection_cache.entry((x, y + 1, false)).or_insert_with(|| line_intersect_shape_checker((max.x, max.y), (min.x, max.y))) || // Left edge line *line_intersection_cache.entry((x, y, true)).or_insert_with(|| line_intersect_shape_checker((min.x, max.y), (min.x, min.y))) }; square_edges_intersect_shape_value = Some(square_edges_intersect_shape); square_edges_intersect_shape }) }; // Check if this cell's top-left corner is inside the shape let point_in_shape = point_in_shape_checker(corner_with_offset); // Determine if the square is inside the shape let square_not_outside_shape = point_in_shape || square_edges_intersect_shape(); if square_not_outside_shape { // Check if this cell's bottom-right corner is inside the shape let opposite_corner_with_offset = DVec2::new((x + 1) as f64 * square_size, (y + 1) as f64 * square_size) + offset; let opposite_corner_in_shape = point_in_shape_checker(opposite_corner_with_offset); let square_in_shape = opposite_corner_in_shape && !square_edges_intersect_shape(); Some(ActiveSquare::new(corner, square_in_shape)) } else { None } }) .collect(); // Sum every square's area to get the total let total_area = square_size.powi(2) * active_squares.len() as f64; Self { active_squares, square_size, total_area, } } #[must_use] #[inline(always)] pub fn take_square(&mut self, active_square_index: usize) -> ActiveSquare { let targetted_square = self.active_squares.swap_remove(active_square_index); self.total_area = self.square_size.powi(2) * self.active_squares.len() as f64; targetted_square } #[inline(always)] pub fn add_squares(&mut self, new_squares: impl Iterator<Item = ActiveSquare>) { for new_square in new_squares { self.active_squares.push(new_square); } self.total_area = self.square_size.powi(2) * self.active_squares.len() as f64; } #[inline(always)] pub fn square_size(&self) -> f64 { self.square_size } #[inline(always)] pub fn square_area(&self) -> f64 { self.square_size.powi(2) } #[inline(always)] pub fn total_area(&self) -> f64 { self.total_area } #[inline(always)] pub fn not_empty(&self) -> bool { !self.active_squares.is_empty() } } #[derive(Clone, Default)] pub struct PointsList { // The worst-case number of points in a 3x3 grid is 16 (one at each intersection of the four gridlines per axis) storage_slots: [DVec2; 16], length: usize, } impl PointsList { #[inline(always)] pub fn push(&mut self, point: DVec2) { self.storage_slots[self.length] = point; self.length += 1; } #[inline(always)] pub fn list_cell_and_neighbors(&self) -> impl Iterator<Item = DVec2> { // The negative bit is used to store whether a point belongs to a neighboring cell self.storage_slots.into_iter().take(self.length).map(|point| (point.x.abs(), point.y.abs()).into()) } #[inline(always)] pub fn list_cell(&self) -> impl Iterator<Item = DVec2> { // The negative bit is used to store whether a point belongs to a neighboring cell self.storage_slots .into_iter() .take(self.length) .filter(|point| point.x.is_sign_positive() && point.y.is_sign_positive()) } } pub struct AccelerationGrid { size: f64, dimension_x: usize, dimension_y: usize, cells: Vec<PointsList>, } impl AccelerationGrid { #[inline(always)] pub fn new(width: f64, height: f64, size: f64) -> Self { let dimension_x = (width / size).ceil() as usize + 1; let dimension_y = (height / size).ceil() as usize + 1; Self { size, dimension_x, dimension_y, cells: vec![PointsList::default(); dimension_x * dimension_y], } } #[inline(always)] pub fn insert(&mut self, point: DVec2) { let x = (point.x / self.size).floor() as usize; let y = (point.y / self.size).floor() as usize; // Insert this point at this cell and the surrounding cells in a 3x3 patch for (x_offset, y_offset) in cartesian_product((-1)..=1, (-1)..=1) { // Avoid going negative let (x, y) = (x as isize + x_offset, y as isize + y_offset); if x < 0 || y < 0 { continue; } // Avoid going beyond the width or height let (x, y) = (x as usize, y as usize); if x > self.dimension_x - 1 || y > self.dimension_y - 1 { continue; } // Get the cell corresponding to the (x, y) index let cell = &mut self.cells[y * self.dimension_x + x]; // Store the given point in this grid cell, and use the negative bit to indicate if this belongs to a neighboring cell cell.push(if x_offset == 0 && y_offset == 0 { point } else { -point }); } } #[inline(always)] pub fn nearby_points(&self, point: DVec2) -> impl Iterator<Item = DVec2> { let x = (point.x / self.size).floor() as usize; let y = (point.y / self.size).floor() as usize; self.cells[y * self.dimension_x + x].list_cell_and_neighbors() } #[inline(always)] pub fn final_points(&self, offset: DVec2) -> Vec<DVec2> { self.cells.iter().flat_map(|cell| cell.list_cell()).map(|point| point + offset).collect() } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/vector/algorithms/contants.rs
node-graph/libraries/vector-types/src/vector/algorithms/contants.rs
/// Minimum allowable separation between adjacent `t` values when calculating curve intersections pub const MIN_SEPARATION_VALUE: f64 = 5. * 1e-3; /// Constant used to determine if `f64`s are equivalent. #[cfg(test)] pub const MAX_ABSOLUTE_DIFFERENCE: f64 = 1e-3;
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/vector/algorithms/spline.rs
node-graph/libraries/vector-types/src/vector/algorithms/spline.rs
use glam::DVec2; /// Solve for the first handle of an open spline. (The opposite handle can be found by mirroring the result about the anchor.) pub fn solve_spline_first_handle_open(points: &[DVec2]) -> Vec<DVec2> { let len_points = points.len(); if len_points == 0 { return Vec::new(); } if len_points == 1 { return vec![points[0]]; } // Matrix coefficients a, b and c (see https://mathworld.wolfram.com/CubicSpline.html). // Because the `a` coefficients are all 1, they need not be stored. // This algorithm does a variation of the above algorithm. // Instead of using the traditional cubic (a + bt + ct^2 + dt^3), we use the bezier cubic. let mut b = vec![DVec2::new(4., 4.); len_points]; b[0] = DVec2::new(2., 2.); b[len_points - 1] = DVec2::new(2., 2.); let mut c = vec![DVec2::new(1., 1.); len_points]; // 'd' is the the second point in a cubic bezier, which is what we solve for let mut d = vec![DVec2::ZERO; len_points]; d[0] = DVec2::new(2. * points[1].x + points[0].x, 2. * points[1].y + points[0].y); d[len_points - 1] = DVec2::new(3. * points[len_points - 1].x, 3. * points[len_points - 1].y); for idx in 1..(len_points - 1) { d[idx] = DVec2::new(4. * points[idx].x + 2. * points[idx + 1].x, 4. * points[idx].y + 2. * points[idx + 1].y); } // Solve with Thomas algorithm (see https://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm) // Now we do row operations to eliminate `a` coefficients. c[0] /= -b[0]; d[0] /= -b[0]; #[allow(clippy::assign_op_pattern)] for i in 1..len_points { b[i] += c[i - 1]; // For some reason this `+=` version makes the borrow checker mad: // d[i] += d[i-1] d[i] = d[i] + d[i - 1]; c[i] /= -b[i]; d[i] /= -b[i]; } // At this point b[i] == -a[i + 1] and a[i] == 0. // Now we do row operations to eliminate 'c' coefficients and solve. d[len_points - 1] *= -1.; #[allow(clippy::assign_op_pattern)] for i in (0..len_points - 1).rev() { d[i] = d[i] - (c[i] * d[i + 1]); d[i] *= -1.; // d[i] /= b[i] } d } /// Solve for the first handle of a closed spline. (The opposite handle can be found by mirroring the result about the anchor.) /// If called with fewer than 3 points, this function will return an empty result. pub fn solve_spline_first_handle_closed(points: &[DVec2]) -> Vec<DVec2> { let len_points = points.len(); if len_points < 3 { return Vec::new(); } // Matrix coefficients `a`, `b` and `c` (see https://mathworld.wolfram.com/CubicSpline.html). // We don't really need to allocate them but it keeps the maths understandable. let a = vec![DVec2::ONE; len_points]; let b = vec![DVec2::splat(4.); len_points]; let c = vec![DVec2::ONE; len_points]; let mut cmod = vec![DVec2::ZERO; len_points]; let mut u = vec![DVec2::ZERO; len_points]; // `x` is initially the output of the matrix multiplication, but is converted to the second value. let mut x = vec![DVec2::ZERO; len_points]; for (i, point) in x.iter_mut().enumerate() { let previous_i = i.checked_sub(1).unwrap_or(len_points - 1); let next_i = (i + 1) % len_points; *point = 3. * (points[next_i] - points[previous_i]); } // Solve using https://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm#Variants (the variant using periodic boundary conditions). // This code below is based on the reference C language implementation provided in that section of the article. let alpha = a[0]; let beta = c[len_points - 1]; // Arbitrary, but chosen such that division by zero is avoided. let gamma = -b[0]; cmod[0] = alpha / (b[0] - gamma); u[0] = gamma / (b[0] - gamma); x[0] /= b[0] - gamma; // Handle from from `1` to `len_points - 2` (inclusive). for ix in 1..=(len_points - 2) { let m = 1.0 / (b[ix] - a[ix] * cmod[ix - 1]); cmod[ix] = c[ix] * m; u[ix] = (0.0 - a[ix] * u[ix - 1]) * m; x[ix] = (x[ix] - a[ix] * x[ix - 1]) * m; } // Handle `len_points - 1`. let m = 1.0 / (b[len_points - 1] - alpha * beta / gamma - beta * cmod[len_points - 2]); u[len_points - 1] = (alpha - a[len_points - 1] * u[len_points - 2]) * m; x[len_points - 1] = (x[len_points - 1] - a[len_points - 1] * x[len_points - 2]) * m; // Loop from `len_points - 2` to `0` (inclusive). for ix in (0..=(len_points - 2)).rev() { u[ix] = u[ix] - cmod[ix] * u[ix + 1]; x[ix] = x[ix] - cmod[ix] * x[ix + 1]; } let fact = (x[0] + x[len_points - 1] * beta / gamma) / (1.0 + u[0] + u[len_points - 1] * beta / gamma); for ix in 0..(len_points) { x[ix] -= fact * u[ix]; } let mut real = vec![DVec2::ZERO; len_points]; for i in 0..len_points { let previous = i.checked_sub(1).unwrap_or(len_points - 1); let next = (i + 1) % len_points; real[i] = x[previous] * a[next] + x[i] * b[i] + x[next] * c[i]; } // The matrix is now solved. // Since we have computed the derivative, work back to find the start handle. for i in 0..len_points { x[i] = (x[i] / 3.) + points[i]; } x } #[cfg(test)] mod tests { use super::*; #[test] fn closed_spline() { use crate::vector::misc::{dvec2_to_point, point_to_dvec2}; use kurbo::{BezPath, ParamCurve, ParamCurveDeriv}; // These points are just chosen arbitrary let points = [DVec2::new(0., 0.), DVec2::new(0., 0.), DVec2::new(6., 5.), DVec2::new(7., 9.), DVec2::new(2., 3.)]; // List of first handle or second point in a cubic bezier curve. let first_handles = solve_spline_first_handle_closed(&points); // Construct the Subpath let mut bezpath = BezPath::new(); bezpath.move_to(dvec2_to_point(points[0])); for i in 0..first_handles.len() { let next_i = i + 1; let next_i = if next_i == first_handles.len() { 0 } else { next_i }; // First handle or second point of a cubic Bezier curve. let p1 = dvec2_to_point(first_handles[i]); // Second handle or third point of a cubic Bezier curve. let p2 = dvec2_to_point(2. * points[next_i] - first_handles[next_i]); // Endpoint or fourth point of a cubic Bezier curve. let p3 = dvec2_to_point(points[next_i]); bezpath.curve_to(p1, p2, p3); } // For each pair of bézier curves, ensure that the second derivative is continuous for (bézier_a, bézier_b) in bezpath.segments().zip(bezpath.segments().skip(1).chain(bezpath.segments().take(1))) { let derivative2_end_a = point_to_dvec2(bézier_a.to_cubic().deriv().eval(1.)); let derivative2_start_b = point_to_dvec2(bézier_b.to_cubic().deriv().eval(0.)); assert!( derivative2_end_a.abs_diff_eq(derivative2_start_b, 1e-10), "second derivative at the end of a {derivative2_end_a} is equal to the second derivative at the start of b {derivative2_start_b}" ); } } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/vector/algorithms/util.rs
node-graph/libraries/vector-types/src/vector/algorithms/util.rs
use glam::DVec2; use kurbo::{ParamCurve, ParamCurveDeriv, PathSeg}; pub fn pathseg_tangent(segment: PathSeg, t: f64) -> DVec2 { // NOTE: .deriv() method gives inaccurate result when it is 1. let t = if t == 1. { 1. - f64::EPSILON } else { t }; let tangent = match segment { PathSeg::Line(line) => line.deriv().eval(t), PathSeg::Quad(quad_bez) => quad_bez.deriv().eval(t), PathSeg::Cubic(cubic_bez) => cubic_bez.deriv().eval(t), }; DVec2::new(tangent.x, tangent.y) } // Compare two f64s with some maximum absolute difference to account for floating point errors #[cfg(test)] pub fn compare_f64s(f1: f64, f2: f64) -> bool { (f1 - f2).abs() < super::contants::MAX_ABSOLUTE_DIFFERENCE } /// Compare points by allowing some maximum absolute difference to account for floating point errors #[cfg(test)] pub fn compare_points(p1: kurbo::Point, p2: kurbo::Point) -> bool { let (p1, p2) = (crate::vector::misc::point_to_dvec2(p1), crate::vector::misc::point_to_dvec2(p2)); p1.abs_diff_eq(p2, super::contants::MAX_ABSOLUTE_DIFFERENCE) } /// Compare vectors of points by allowing some maximum absolute difference to account for floating point errors #[cfg(test)] pub fn compare_vec_of_points(a: Vec<kurbo::Point>, b: Vec<kurbo::Point>, max_absolute_difference: f64) -> bool { a.len() == b.len() && a.into_iter() .zip(b) .map(|(p1, p2)| (crate::vector::misc::point_to_dvec2(p1), crate::vector::misc::point_to_dvec2(p2))) .all(|(p1, p2)| p1.abs_diff_eq(p2, max_absolute_difference)) } /// Compare the two values in a `DVec2` independently with a provided max absolute value difference. #[cfg(test)] pub fn dvec2_compare(a: kurbo::Point, b: kurbo::Point, max_abs_diff: f64) -> glam::BVec2 { glam::BVec2::new((a.x - b.x).abs() < max_abs_diff, (a.y - b.y).abs() < max_abs_diff) }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/vector/algorithms/mod.rs
node-graph/libraries/vector-types/src/vector/algorithms/mod.rs
pub mod bezpath_algorithms; mod contants; pub mod intersection; pub mod merge_by_distance; pub mod offset_subpath; pub mod poisson_disk; pub mod spline; pub mod util;
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false
GraphiteEditor/Graphite
https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/node-graph/libraries/vector-types/src/vector/algorithms/intersection.rs
node-graph/libraries/vector-types/src/vector/algorithms/intersection.rs
use super::contants::MIN_SEPARATION_VALUE; use kurbo::{BezPath, DEFAULT_ACCURACY, ParamCurve, PathSeg, Shape}; use lyon_geom::{CubicBezierSegment, Point}; /// Converts a kurbo cubic bezier to a lyon_geom CubicBezierSegment fn kurbo_cubic_to_lyon(cubic: kurbo::CubicBez) -> CubicBezierSegment<f64> { CubicBezierSegment { from: Point::new(cubic.p0.x, cubic.p0.y), ctrl1: Point::new(cubic.p1.x, cubic.p1.y), ctrl2: Point::new(cubic.p2.x, cubic.p2.y), to: Point::new(cubic.p3.x, cubic.p3.y), } } /// Fast cubic-cubic intersection using lyon_geom's analytical approach fn cubic_cubic_intersections_lyon(cubic1: kurbo::CubicBez, cubic2: kurbo::CubicBez) -> Vec<(f64, f64)> { let lyon_cubic1 = kurbo_cubic_to_lyon(cubic1); let lyon_cubic2 = kurbo_cubic_to_lyon(cubic2); lyon_cubic1.cubic_intersections_t(&lyon_cubic2).to_vec() } /// Calculates the intersection points the bezpath has with a given segment and returns a list of `(usize, f64)` tuples, /// where the `usize` represents the index of the segment in the bezpath, and the `f64` represents the `t`-value local to /// that segment where the intersection occurred. /// /// `minimum_separation` is the minimum difference that two adjacent `t`-values must have when comparing adjacent `t`-values in sorted order. pub fn bezpath_and_segment_intersections(bezpath: &BezPath, segment: PathSeg, accuracy: Option<f64>, minimum_separation: Option<f64>) -> Vec<(usize, f64)> { bezpath .segments() .enumerate() .flat_map(|(index, this_segment)| { filtered_segment_intersections(this_segment, segment, accuracy, minimum_separation) .into_iter() .map(|t| (index, t)) .collect::<Vec<(usize, f64)>>() }) .collect() } /// Calculates the intersection points the bezpath has with another given bezpath and returns a list of parametric `t`-values. pub fn bezpath_intersections(bezpath1: &BezPath, bezpath2: &BezPath, accuracy: Option<f64>, minimum_separation: Option<f64>) -> Vec<(usize, f64)> { let mut intersection_t_values: Vec<(usize, f64)> = bezpath2 .segments() .flat_map(|bezier| bezpath_and_segment_intersections(bezpath1, bezier, accuracy, minimum_separation)) .collect(); intersection_t_values.sort_by(|a, b| a.partial_cmp(b).unwrap()); intersection_t_values } /// Calculates the intersection points the segment has with another given segment and returns a list of parametric `t`-values with given accuracy. pub fn segment_intersections(segment1: PathSeg, segment2: PathSeg, accuracy: Option<f64>) -> Vec<(f64, f64)> { let accuracy = accuracy.unwrap_or(DEFAULT_ACCURACY); match (segment1, segment2) { (PathSeg::Line(line), segment2) => segment2.intersect_line(line).iter().map(|i| (i.line_t, i.segment_t)).collect(), (segment1, PathSeg::Line(line)) => segment1.intersect_line(line).iter().map(|i| (i.segment_t, i.line_t)).collect(), // Fast path for cubic-cubic intersections using lyon_geom (PathSeg::Cubic(cubic1), PathSeg::Cubic(cubic2)) => cubic_cubic_intersections_lyon(cubic1, cubic2), (segment1, segment2) => { let mut intersections = Vec::new(); segment_intersections_inner(segment1, 0., 1., segment2, 0., 1., accuracy, &mut intersections); intersections } } } pub fn subsegment_intersections(segment1: PathSeg, min_t1: f64, max_t1: f64, segment2: PathSeg, min_t2: f64, max_t2: f64, accuracy: Option<f64>) -> Vec<(f64, f64)> { let accuracy = accuracy.unwrap_or(DEFAULT_ACCURACY); match (segment1, segment2) { (PathSeg::Line(line), segment2) => segment2.intersect_line(line).iter().map(|i| (i.line_t, i.segment_t)).collect(), (segment1, PathSeg::Line(line)) => segment1.intersect_line(line).iter().map(|i| (i.segment_t, i.line_t)).collect(), // Fast path for cubic-cubic intersections using lyon_geom with subsegment parameters (PathSeg::Cubic(cubic1), PathSeg::Cubic(cubic2)) => { let sub_cubic1 = cubic1.subsegment(min_t1..max_t1); let sub_cubic2 = cubic2.subsegment(min_t2..max_t2); cubic_cubic_intersections_lyon(sub_cubic1, sub_cubic2) .into_iter() // Convert subsegment t-values back to original segment t-values .map(|(t1, t2)| { let original_t1 = min_t1 + t1 * (max_t1 - min_t1); let original_t2 = min_t2 + t2 * (max_t2 - min_t2); (original_t1, original_t2) }) .collect() } (segment1, segment2) => { let mut intersections = Vec::new(); segment_intersections_inner(segment1, min_t1, max_t1, segment2, min_t2, max_t2, accuracy, &mut intersections); intersections } } } fn approx_bounding_box(path_seg: PathSeg) -> kurbo::Rect { use kurbo::Rect; match path_seg { PathSeg::Line(line) => kurbo::Rect::from_points(line.p0, line.p1), PathSeg::Quad(quad_bez) => { let r1 = Rect::from_points(quad_bez.p0, quad_bez.p1); let r2 = Rect::from_points(quad_bez.p1, quad_bez.p2); r1.union(r2) } PathSeg::Cubic(cubic_bez) => { let r1 = Rect::from_points(cubic_bez.p0, cubic_bez.p1); let r2 = Rect::from_points(cubic_bez.p2, cubic_bez.p3); r1.union(r2) } } } /// Implements [https://pomax.github.io/bezierinfo/#curveintersection] to find intersection between two Bezier segments /// by splitting the segment recursively until the size of the subsegment's bounding box is smaller than the accuracy. #[allow(clippy::too_many_arguments)] fn segment_intersections_inner(segment1: PathSeg, min_t1: f64, max_t1: f64, segment2: PathSeg, min_t2: f64, max_t2: f64, accuracy: f64, intersections: &mut Vec<(f64, f64)>) { let bbox1 = approx_bounding_box(segment1.subsegment(min_t1..max_t1)); let bbox2 = approx_bounding_box(segment2.subsegment(min_t2..max_t2)); if intersections.len() > 50 { return; } let mid_t1 = (min_t1 + max_t1) / 2.; let mid_t2 = (min_t2 + max_t2) / 2.; // Check if the bounding boxes overlap if bbox1.overlaps(bbox2) { // If bounding boxes overlap and they are small enough, we have found an intersection if bbox1.width().abs() < accuracy && bbox1.height().abs() < accuracy && bbox2.width().abs() < accuracy && bbox2.height().abs() < accuracy { // Use the middle `t` value, append the corresponding `t` value intersections.push((mid_t1, mid_t2)); return; } // Split curves in half let (seg11, seg12) = segment1.subdivide(); let (seg21, seg22) = segment2.subdivide(); // Repeat checking the intersection with the combinations of the two halves of each curve segment_intersections_inner(seg11, min_t1, mid_t1, seg21, min_t2, mid_t2, accuracy, intersections); segment_intersections_inner(seg11, min_t1, mid_t1, seg22, mid_t2, max_t2, accuracy, intersections); segment_intersections_inner(seg12, mid_t1, max_t1, seg21, min_t2, mid_t2, accuracy, intersections); segment_intersections_inner(seg12, mid_t1, max_t1, seg22, mid_t2, max_t2, accuracy, intersections); } } // TODO: Use an `impl Iterator` return type instead of a `Vec` /// Returns a list of filtered parametric `t` values that correspond to intersection points between the current bezier segment and the provided one /// such that the difference between adjacent `t` values in sorted order is greater than some minimum separation value. If the difference /// between 2 adjacent `t` values is less than the minimum difference, the filtering takes the larger `t` value and discards the smaller `t` value. /// The returned `t` values are with respect to the current bezier segment, not the provided parameter. /// If the provided segment is linear, then zero intersection points will be returned along colinear segments. /// /// `accuracy` defines, for intersections where the provided bezier segment is non-linear, the maximum size of the bounding boxes to be considered an intersection point. /// /// `minimum_separation` is the minimum difference between adjacent `t` values in sorted order. pub fn filtered_segment_intersections(segment1: PathSeg, segment2: PathSeg, accuracy: Option<f64>, minimum_separation: Option<f64>) -> Vec<f64> { let mut intersection_t_values = segment_intersections(segment1, segment2, accuracy); intersection_t_values.sort_by(|a, b| a.partial_cmp(b).unwrap()); intersection_t_values.iter().map(|x| x.0).fold(Vec::new(), |mut accumulator, t| { if !accumulator.is_empty() && (accumulator.last().unwrap() - t).abs() < minimum_separation.unwrap_or(MIN_SEPARATION_VALUE) { accumulator.pop(); } accumulator.push(t); accumulator }) } // TODO: Use an `impl Iterator` return type instead of a `Vec` /// Returns a list of pairs of filtered parametric `t` values that correspond to intersection points between the current bezier curve and the provided /// one such that the difference between adjacent `t` values in sorted order is greater than some minimum separation value. If the difference between /// two adjacent `t` values is less than the minimum difference, the filtering takes the larger `t` value and discards the smaller `t` value. /// The first value in pair is with respect to the current bezier and the second value in pair is with respect to the provided parameter. /// If the provided curve is linear, then zero intersection points will be returned along colinear segments. /// /// `error`, for intersections where the provided bezier is non-linear, defines the threshold for bounding boxes to be considered an intersection point. /// /// `minimum_separation` is the minimum difference between adjacent `t` values in sorted order pub fn filtered_all_segment_intersections(segment1: PathSeg, segment2: PathSeg, accuracy: Option<f64>, minimum_separation: Option<f64>) -> Vec<(f64, f64)> { let mut intersection_t_values = segment_intersections(segment1, segment2, accuracy); intersection_t_values.sort_by(|a, b| (a.0 + a.1).partial_cmp(&(b.0 + b.1)).unwrap()); intersection_t_values.iter().fold(Vec::new(), |mut accumulator, t| { if !accumulator.is_empty() && (accumulator.last().unwrap().0 - t.0).abs() < minimum_separation.unwrap_or(MIN_SEPARATION_VALUE) && (accumulator.last().unwrap().1 - t.1).abs() < minimum_separation.unwrap_or(MIN_SEPARATION_VALUE) { accumulator.pop(); } accumulator.push(*t); accumulator }) } /// Helper function to compute intersections between lists of subcurves. /// This function uses the algorithm implemented in `intersections_between_subcurves`. fn intersections_between_vectors_of_path_segments(subcurves1: &[(f64, f64, PathSeg)], subcurves2: &[(f64, f64, PathSeg)], accuracy: Option<f64>) -> Vec<(f64, f64)> { let segment_pairs = subcurves1.iter().flat_map(move |(t11, t12, curve1)| { subcurves2 .iter() .filter_map(move |(t21, t22, curve2)| curve1.bounding_box().overlaps(curve2.bounding_box()).then_some((t11, t12, curve1, t21, t22, curve2))) }); segment_pairs .flat_map(|(&t11, &t12, &curve1, &t21, &t22, &curve2)| subsegment_intersections(curve1, t11, t12, curve2, t21, t22, accuracy)) .collect::<Vec<(f64, f64)>>() } fn pathseg_self_intersection(segment: PathSeg, accuracy: Option<f64>) -> Vec<(f64, f64)> { let cubic_bez = match segment { PathSeg::Line(_) | PathSeg::Quad(_) => return vec![], PathSeg::Cubic(cubic_bez) => cubic_bez, }; // Get 2 copies of the reduced curves let quads1 = cubic_bez.to_quads(DEFAULT_ACCURACY).map(|(t1, t2, quad_bez)| (t1, t2, PathSeg::Quad(quad_bez))).collect::<Vec<_>>(); let quads2 = quads1.clone(); let num_curves = quads1.len(); // Adjacent reduced curves cannot intersect if num_curves <= 2 { return vec![]; } // For each curve, look for intersections with every curve that is at least 2 indices away quads1 .iter() .take(num_curves - 2) .enumerate() .flat_map(|(index, &subsegment)| intersections_between_vectors_of_path_segments(&[subsegment], &quads2[index + 2..], accuracy)) .collect() } /// Returns a list of parametric `t` values that correspond to the self intersection points of the current bezier curve. For each intersection point, the returned `t` value is the smaller of the two that correspond to the point. /// If the difference between 2 adjacent `t` values is less than the minimum difference, the filtering takes the larger `t` value and discards the smaller `t` value. /// - `error` - For intersections with non-linear beziers, `error` defines the threshold for bounding boxes to be considered an intersection point. /// - `minimum_separation` - The minimum difference between adjacent `t` values in sorted order pub fn pathseg_self_intersections(segment: PathSeg, accuracy: Option<f64>, minimum_separation: Option<f64>) -> Vec<(f64, f64)> { let mut intersection_t_values = pathseg_self_intersection(segment, accuracy); intersection_t_values.sort_by(|a, b| (a.0 + a.1).partial_cmp(&(b.0 + b.1)).unwrap()); intersection_t_values.iter().fold(Vec::new(), |mut accumulator, t| { if !accumulator.is_empty() && (accumulator.last().unwrap().0 - t.0).abs() < minimum_separation.unwrap_or(MIN_SEPARATION_VALUE) && (accumulator.last().unwrap().1 - t.1).abs() < minimum_separation.unwrap_or(MIN_SEPARATION_VALUE) { accumulator.pop(); } accumulator.push(*t); accumulator }) } #[cfg(test)] mod tests { use super::{bezpath_and_segment_intersections, filtered_segment_intersections}; use crate::vector::algorithms::{ contants::MAX_ABSOLUTE_DIFFERENCE, util::{compare_points, compare_vec_of_points, dvec2_compare}, }; use kurbo::{BezPath, CubicBez, Line, ParamCurve, PathEl, PathSeg, Point, QuadBez}; #[test] fn test_intersect_line_segment_quadratic() { let p1 = Point::new(30., 50.); let p2 = Point::new(140., 30.); let p3 = Point::new(160., 170.); // Intersection at edge of curve let bezier = PathSeg::Quad(QuadBez::new(p1, p2, p3)); let line1 = PathSeg::Line(Line::new(Point::new(20., 50.), Point::new(40., 50.))); let intersections1 = filtered_segment_intersections(bezier, line1, None, None); assert!(intersections1.len() == 1); assert!(compare_points(bezier.eval(intersections1[0]), p1)); // Intersection in the middle of curve let line2 = PathSeg::Line(Line::new(Point::new(150., 150.), Point::new(30., 30.))); let intersections2 = filtered_segment_intersections(bezier, line2, None, None); assert!(compare_points(bezier.eval(intersections2[0]), Point::new(47.77355, 47.77354))); } #[test] fn test_intersect_curve_cubic_edge_case() { // M34 107 C40 40 120 120 102 29 let p1 = Point::new(34., 107.); let p2 = Point::new(40., 40.); let p3 = Point::new(120., 120.); let p4 = Point::new(102., 29.); let cubic_segment = PathSeg::Cubic(CubicBez::new(p1, p2, p3, p4)); let linear_segment = PathSeg::Line(Line::new(Point::new(150., 150.), Point::new(20., 20.))); let intersections = filtered_segment_intersections(cubic_segment, linear_segment, None, None); assert_eq!(intersections.len(), 1); } #[test] fn test_intersect_curve() { let p0 = Point::new(30., 30.); let p1 = Point::new(60., 140.); let p2 = Point::new(150., 30.); let p3 = Point::new(160., 160.); let cubic_segment = PathSeg::Cubic(CubicBez::new(p0, p1, p2, p3)); let p0 = Point::new(175., 140.); let p1 = Point::new(20., 20.); let p2 = Point::new(120., 20.); let quadratic_segment = PathSeg::Quad(QuadBez::new(p0, p1, p2)); let intersections1 = filtered_segment_intersections(cubic_segment, quadratic_segment, None, None); let intersections2 = filtered_segment_intersections(quadratic_segment, cubic_segment, None, None); let intersections1_points: Vec<Point> = intersections1.iter().map(|&t| cubic_segment.eval(t)).collect(); let intersections2_points: Vec<Point> = intersections2.iter().map(|&t| quadratic_segment.eval(t)).rev().collect(); assert!(compare_vec_of_points(intersections1_points, intersections2_points, 2.)); } #[test] fn intersection_linear_multiple_subpath_curves_test_one() { // M 35 125 C 40 40 120 120 43 43 Q 175 90 145 150 Q 70 185 35 125 Z let cubic_start = Point::new(35., 125.); let cubic_handle_1 = Point::new(40., 40.); let cubic_handle_2 = Point::new(120., 120.); let cubic_end = Point::new(43., 43.); let quadratic_1_handle = Point::new(175., 90.); let quadratic_end = Point::new(145., 150.); let quadratic_2_handle = Point::new(70., 185.); let cubic_segment = PathSeg::Cubic(CubicBez::new(cubic_start, cubic_handle_1, cubic_handle_2, cubic_end)); let quadratic_segment = PathSeg::Quad(QuadBez::new(cubic_end, quadratic_1_handle, quadratic_end)); let bezpath = BezPath::from_vec(vec![ PathEl::MoveTo(cubic_start), PathEl::CurveTo(cubic_handle_1, cubic_handle_2, cubic_end), PathEl::QuadTo(quadratic_1_handle, quadratic_end), PathEl::QuadTo(quadratic_2_handle, cubic_start), PathEl::ClosePath, ]); let linear_segment = PathSeg::Line(Line::new(Point::new(150., 150.), Point::new(20., 20.))); let cubic_intersections = filtered_segment_intersections(cubic_segment, linear_segment, None, None); let quadratic_1_intersections = filtered_segment_intersections(quadratic_segment, linear_segment, None, None); let bezpath_intersections = bezpath_and_segment_intersections(&bezpath, linear_segment, None, None); assert!( dvec2_compare( cubic_segment.eval(cubic_intersections[0]), bezpath.segments().nth(bezpath_intersections[0].0).unwrap().eval(bezpath_intersections[0].1), MAX_ABSOLUTE_DIFFERENCE ) .all() ); assert!( dvec2_compare( quadratic_segment.eval(quadratic_1_intersections[0]), bezpath.segments().nth(bezpath_intersections[1].0).unwrap().eval(bezpath_intersections[1].1), MAX_ABSOLUTE_DIFFERENCE ) .all() ); assert!( dvec2_compare( quadratic_segment.eval(quadratic_1_intersections[1]), bezpath.segments().nth(bezpath_intersections[2].0).unwrap().eval(bezpath_intersections[2].1), MAX_ABSOLUTE_DIFFERENCE ) .all() ); } #[test] fn intersection_linear_multiple_subpath_curves_test_two() { // M34 107 C40 40 120 120 102 29 Q175 90 129 171 Q70 185 34 107 Z // M150 150 L 20 20 let cubic_start = Point::new(34., 107.); let cubic_handle_1 = Point::new(40., 40.); let cubic_handle_2 = Point::new(120., 120.); let cubic_end = Point::new(102., 29.); let quadratic_1_handle = Point::new(175., 90.); let quadratic_end = Point::new(129., 171.); let quadratic_2_handle = Point::new(70., 185.); let cubic_segment = PathSeg::Cubic(CubicBez::new(cubic_start, cubic_handle_1, cubic_handle_2, cubic_end)); let quadratic_segment = PathSeg::Quad(QuadBez::new(cubic_end, quadratic_1_handle, quadratic_end)); let bezpath = BezPath::from_vec(vec![ PathEl::MoveTo(cubic_start), PathEl::CurveTo(cubic_handle_1, cubic_handle_2, cubic_end), PathEl::QuadTo(quadratic_1_handle, quadratic_end), PathEl::QuadTo(quadratic_2_handle, cubic_start), PathEl::ClosePath, ]); let line = PathSeg::Line(Line::new(Point::new(150., 150.), Point::new(20., 20.))); let cubic_intersections = filtered_segment_intersections(cubic_segment, line, None, None); let quadratic_1_intersections = filtered_segment_intersections(quadratic_segment, line, None, None); let bezpath_intersections = bezpath_and_segment_intersections(&bezpath, line, None, None); assert!( dvec2_compare( cubic_segment.eval(cubic_intersections[0]), bezpath.segments().nth(bezpath_intersections[0].0).unwrap().eval(bezpath_intersections[0].1), MAX_ABSOLUTE_DIFFERENCE ) .all() ); assert!( dvec2_compare( quadratic_segment.eval(quadratic_1_intersections[0]), bezpath.segments().nth(bezpath_intersections[1].0).unwrap().eval(bezpath_intersections[1].1), MAX_ABSOLUTE_DIFFERENCE ) .all() ); } #[test] fn intersection_linear_multiple_subpath_curves_test_three() { // M35 125 C40 40 120 120 44 44 Q175 90 145 150 Q70 185 35 125 Z let cubic_start = Point::new(35., 125.); let cubic_handle_1 = Point::new(40., 40.); let cubic_handle_2 = Point::new(120., 120.); let cubic_end = Point::new(44., 44.); let quadratic_1_handle = Point::new(175., 90.); let quadratic_end = Point::new(145., 150.); let quadratic_2_handle = Point::new(70., 185.); let cubic_segment = PathSeg::Cubic(CubicBez::new(cubic_start, cubic_handle_1, cubic_handle_2, cubic_end)); let quadratic_segment = PathSeg::Quad(QuadBez::new(cubic_end, quadratic_1_handle, quadratic_end)); let bezpath = BezPath::from_vec(vec![ PathEl::MoveTo(cubic_start), PathEl::CurveTo(cubic_handle_1, cubic_handle_2, cubic_end), PathEl::QuadTo(quadratic_1_handle, quadratic_end), PathEl::QuadTo(quadratic_2_handle, cubic_start), PathEl::ClosePath, ]); let line = PathSeg::Line(Line::new(Point::new(150., 150.), Point::new(20., 20.))); let cubic_intersections = filtered_segment_intersections(cubic_segment, line, None, None); let quadratic_1_intersections = filtered_segment_intersections(quadratic_segment, line, None, None); let bezpath_intersections = bezpath_and_segment_intersections(&bezpath, line, None, None); assert!( dvec2_compare( cubic_segment.eval(cubic_intersections[0]), bezpath.segments().nth(bezpath_intersections[0].0).unwrap().eval(bezpath_intersections[0].1), MAX_ABSOLUTE_DIFFERENCE ) .all() ); assert!( dvec2_compare( quadratic_segment.eval(quadratic_1_intersections[0]), bezpath.segments().nth(bezpath_intersections[1].0).unwrap().eval(bezpath_intersections[1].1), MAX_ABSOLUTE_DIFFERENCE ) .all() ); assert!( dvec2_compare( quadratic_segment.eval(quadratic_1_intersections[1]), bezpath.segments().nth(bezpath_intersections[2].0).unwrap().eval(bezpath_intersections[2].1), MAX_ABSOLUTE_DIFFERENCE ) .all() ); } }
rust
Apache-2.0
42440c0d0bcf5735b05d8a9e5bd27187f74b1589
2026-01-04T15:38:29.103662Z
false