repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/xsd_rs/xop/src/include.rs | xsd_rs/xop/src/include.rs | use validate::Validate;
use yaserde_derive::{YaDeserialize, YaSerialize};
// pub type Include = Include;
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[yaserde(
prefix = "tns",
namespace = "tns: http://www.w3.org/2004/08/xop/include"
)]
pub struct Include {
#[yaserde(attribute, rename = "href")]
pub href: String,
}
impl Validate for Include {}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/xsd_rs/soap_envelope/src/lib.rs | xsd_rs/soap_envelope/src/lib.rs | #![allow(clippy::derive_partial_eq_without_eq)]
use std::str::FromStr;
use validate::Validate;
use xsd_macro_utils::*;
use yaserde_derive::{YaDeserialize, YaSerialize};
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[yaserde(
prefix = "tns",
namespace = "tns: http://www.w3.org/2003/05/soap-envelope"
)]
pub struct Envelope {
#[yaserde(prefix = "tns", rename = "Header")]
pub header: Option<Header>,
#[yaserde(prefix = "tns", rename = "Body")]
pub body: Body,
}
impl Validate for Envelope {}
// pub type Header = Header;
// Elements replacing the wildcard MUST be namespace qualified, but can be in
// the targetNamespace
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[yaserde(
prefix = "tns",
namespace = "tns: http://www.w3.org/2003/05/soap-envelope"
)]
pub struct Header {}
impl Validate for Header {}
// pub type Body = Body;
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[yaserde(
prefix = "tns",
namespace = "tns: http://www.w3.org/2003/05/soap-envelope"
)]
pub struct Body {}
impl Validate for Body {}
// pub type MustUnderstand = bool;
// pub type Relay = bool;
// pub type Role = String;
// pub type EncodingStyle = String;
// pub type Fault = Fault;
// Fault reporting structure
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[yaserde(
prefix = "tns",
namespace = "tns: http://www.w3.org/2003/05/soap-envelope"
)]
pub struct Fault {
#[yaserde(prefix = "tns", rename = "Code")]
pub code: Faultcode,
#[yaserde(prefix = "tns", rename = "Reason")]
pub reason: Faultreason,
#[yaserde(prefix = "tns", rename = "Node")]
pub node: Option<String>,
#[yaserde(prefix = "tns", rename = "Role")]
pub role: Option<String>,
#[yaserde(prefix = "tns", rename = "Detail")]
pub detail: Option<Detail>,
}
impl Fault {
pub fn is_unauthorized(&self) -> bool {
match self.code.subcode.as_ref() {
Some(subcode) => subcode.value.contains("NotAuthorized"),
None => false,
}
}
}
impl Validate for Fault {}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[yaserde(
prefix = "tns",
namespace = "tns: http://www.w3.org/2003/05/soap-envelope"
)]
pub struct Faultreason {
#[yaserde(prefix = "tns", rename = "Text")]
pub text: Vec<Reasontext>,
}
impl Validate for Faultreason {}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[yaserde(
prefix = "tns",
namespace = "tns: http://www.w3.org/2003/05/soap-envelope"
)]
pub struct Reasontext {
#[yaserde(attribute, prefix = "xml" rename = "lang")]
pub lang: String,
// TODO: process the value of Reasontext too
}
impl Validate for Reasontext {}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[yaserde(
prefix = "tns",
namespace = "tns: http://www.w3.org/2003/05/soap-envelope"
)]
pub struct Faultcode {
#[yaserde(prefix = "tns", rename = "Value")]
pub value: FaultcodeEnum,
#[yaserde(prefix = "tns", rename = "Subcode")]
pub subcode: Option<Subcode>,
}
impl Validate for Faultcode {}
#[derive(Default, PartialEq, Debug, UtilsTupleIo, UtilsDefaultSerde)]
pub struct FaultcodeEnum(pub String);
impl Validate for FaultcodeEnum {}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[yaserde(
prefix = "tns",
namespace = "tns: http://www.w3.org/2003/05/soap-envelope"
)]
pub struct Subcode {
#[yaserde(prefix = "tns", rename = "Value")]
pub value: String,
// TODO: handle recursion
// #[yaserde(prefix = "tns", rename = "Subcode")]
// pub subcode: Vec<Subcode>,
}
impl Validate for Subcode {}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[yaserde(
prefix = "tns",
namespace = "tns: http://www.w3.org/2003/05/soap-envelope"
)]
pub struct Detail {}
impl Validate for Detail {}
// pub type NotUnderstood = NotUnderstoodType;
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[yaserde(
prefix = "tns",
namespace = "tns: http://www.w3.org/2003/05/soap-envelope"
)]
pub struct NotUnderstoodType {
#[yaserde(attribute, rename = "qname")]
pub qname: String,
}
impl Validate for NotUnderstoodType {}
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[yaserde(
prefix = "tns",
namespace = "tns: http://www.w3.org/2003/05/soap-envelope"
)]
pub struct SupportedEnvType {
#[yaserde(attribute, rename = "qname")]
pub qname: String,
}
impl Validate for SupportedEnvType {}
// pub type Upgrade = UpgradeType;
#[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)]
#[yaserde(
prefix = "tns",
namespace = "tns: http://www.w3.org/2003/05/soap-envelope"
)]
pub struct UpgradeType {
#[yaserde(prefix = "tns", rename = "SupportedEnvelope")]
pub supported_envelope: Vec<SupportedEnvType>,
}
impl Validate for UpgradeType {}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/transport/src/lib.rs | transport/src/lib.rs | use async_trait::async_trait;
use thiserror::Error;
use yaserde::{YaDeserialize, YaSerialize};
#[derive(Debug, Error)]
pub enum Error {
#[error("Serialization failed: {0}")]
Serialization(String),
#[error("Deserialization failed: {0}")]
Deserialization(String),
#[error("Authorization failed: {0}")]
Authorization(String),
#[error("Redirection error: {0}")]
Redirection(String),
#[error("Connection error: {0}")]
Connection(String),
#[error("Timeout occurred: {0}")]
Timeout(String),
#[error("Protocol error: {0}")]
Protocol(String),
#[error("Other: {0}")]
Other(String),
}
impl From<Error> for String {
fn from(error: Error) -> String {
error.to_string()
}
}
#[async_trait]
pub trait Transport {
async fn request(&self, message: &str) -> Result<String, Error>;
}
pub async fn request<T: Transport, R: YaSerialize, S: YaDeserialize>(
transport: &T,
request: &R,
) -> Result<S, Error> {
let ser = |obj: &R| yaserde::ser::to_string(obj).map_err(Error::Serialization);
let de = |s: &str| yaserde::de::from_str(s).map_err(Error::Deserialization);
de(&transport
.request(&crop_xml_declaration(&ser(request)?))
.await?)
}
fn crop_xml_declaration(xml: &str) -> String {
xml.split("?>").skip(1).collect()
}
#[test]
fn test_crop_xml_declaration() {
assert_eq!(
crop_xml_declaration(r#"<?xml version="1.0" encoding="utf-8"?><element />"#),
"<element />"
);
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/schema/src/lib.rs | schema/src/lib.rs | // Generated code contains upper-case acronyms. Ideally it shouldn't, but changing the codegen
// is not trivial (in addition to changing the casing being a breaking change, of course).
// This issue is being tracked at <https://github.com/lumeohq/xsd-parser-rs/issues/123>.
#![allow(clippy::upper_case_acronyms)]
pub use transport;
// xsd:
pub use common;
#[cfg(feature = "metadatastream")]
pub use metadatastream;
pub use onvif;
#[cfg(feature = "radiometry")]
pub use radiometry;
#[cfg(feature = "rules")]
pub use rules;
pub use soap_envelope;
pub use types;
pub use xmlmime;
pub use xop;
// wsdl:
#[cfg(feature = "accesscontrol")]
pub use accesscontrol;
#[cfg(feature = "accessrules")]
pub use accessrules;
#[cfg(feature = "actionengine")]
pub use actionengine;
#[cfg(feature = "advancedsecurity")]
pub use advancedsecurity;
#[cfg(feature = "analytics")]
pub use analytics;
#[cfg(feature = "authenticationbehavior")]
pub use authenticationbehavior;
pub use b_2;
#[cfg(feature = "bf_2")]
pub use bf_2;
#[cfg(feature = "credential")]
pub use credential;
#[cfg(feature = "deviceio")]
pub use deviceio;
#[cfg(feature = "devicemgmt")]
pub use devicemgmt;
#[cfg(feature = "display")]
pub use display;
#[cfg(feature = "doorcontrol")]
pub use doorcontrol;
#[cfg(feature = "event")]
pub use event;
#[cfg(feature = "imaging")]
pub use imaging;
#[cfg(feature = "media")]
pub use media;
#[cfg(feature = "media2")]
pub use media2;
#[cfg(feature = "provisioning")]
pub use provisioning;
#[cfg(feature = "ptz")]
pub use ptz;
#[cfg(feature = "receiver")]
pub use receiver;
#[cfg(feature = "recording")]
pub use recording;
#[cfg(feature = "replay")]
pub use replay;
#[cfg(feature = "schedule")]
pub use schedule;
#[cfg(feature = "search")]
pub use search;
pub use t_1;
#[cfg(feature = "thermal")]
pub use thermal;
#[cfg(feature = "uplink")]
pub use uplink;
pub use ws_addr;
pub use ws_discovery;
pub use xml_xsd;
#[cfg(test)]
mod tests;
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/schema/src/tests.rs | schema/src/tests.rs | use super::*;
use assert_approx_eq::assert_approx_eq;
use async_trait::async_trait;
use onvif as tt;
use utils::assert_xml_eq;
use xsd_types::types as xs;
mod utils;
pub struct FakeTransport {
pub response: String,
}
#[async_trait]
impl transport::Transport for FakeTransport {
async fn request(&self, _message: &str) -> Result<String, transport::Error> {
Ok(self.response.clone())
}
}
#[test]
#[cfg(feature = "devicemgmt")]
fn basic_deserialization() {
let response = r#"<?xml version="1.0" encoding="utf-8"?>
<tds:GetSystemDateAndTimeResponse
xmlns:tt="http://www.onvif.org/ver10/schema"
xmlns:tds="http://www.onvif.org/ver10/device/wsdl">
<tds:SystemDateAndTime>
<tt:DateTimeType>NTP</tt:DateTimeType>
<tt:DaylightSavings>false</tt:DaylightSavings>
<tt:TimeZone>
<tt:TZ>PST7PDT</tt:TZ>
</tt:TimeZone>
<tt:UTCDateTime>
<tt:Time>
<tt:Hour>16</tt:Hour>
<tt:Minute>20</tt:Minute>
<tt:Second>9</tt:Second>
</tt:Time>
<tt:Date>
<tt:Year>2019</tt:Year>
<tt:Month>11</tt:Month>
<tt:Day>18</tt:Day>
</tt:Date>
</tt:UTCDateTime>
</tds:SystemDateAndTime>
</tds:GetSystemDateAndTimeResponse>
"#;
let response: devicemgmt::GetSystemDateAndTimeResponse =
yaserde::de::from_str(response).unwrap();
let de = response.system_date_and_time;
println!("{:#?}", de);
assert_eq!(de.date_time_type, tt::SetDateTimeType::Ntp);
assert!(!de.daylight_savings);
assert_eq!(de.time_zone.unwrap().tz, "PST7PDT");
assert_eq!(de.utc_date_time.as_ref().unwrap().date.year, 2019);
assert_eq!(de.utc_date_time.as_ref().unwrap().date.month, 11);
assert_eq!(de.utc_date_time.as_ref().unwrap().date.day, 18);
assert_eq!(de.utc_date_time.as_ref().unwrap().time.hour, 16);
assert_eq!(de.utc_date_time.as_ref().unwrap().time.minute, 20);
assert_eq!(de.utc_date_time.as_ref().unwrap().time.second, 9);
}
#[cfg(feature = "devicemgmt")]
#[test]
fn basic_serialization() {
let expected = r#"<?xml version="1.0" encoding="utf-8"?>
<tds:GetSystemDateAndTime xmlns:tds="http://www.onvif.org/ver10/device/wsdl" />
"#;
let request: devicemgmt::GetSystemDateAndTime = Default::default();
let actual = yaserde::ser::to_string(&request).unwrap();
assert_xml_eq(actual.as_str(), expected);
}
#[test]
fn extend_base_deserialization() {
let ser = r#"
<tt:VideoSourceConfiguration token="V_SRC_CFG_000" xmlns:tt="http://www.onvif.org/ver10/schema">
<tt:Name>V_SRC_CFG_000</tt:Name>
<tt:UseCount>2</tt:UseCount>
<tt:SourceToken>V_SRC_000</tt:SourceToken>
<tt:Bounds height="720" width="1280" y="0" x="0"/>
</tt:VideoSourceConfiguration>
"#;
let de: tt::VideoSourceConfiguration = yaserde::de::from_str(ser).unwrap();
assert_eq!(
de.token,
common::ReferenceToken("V_SRC_CFG_000".to_string())
);
assert_eq!(de.name, tt::Name("V_SRC_CFG_000".to_string()));
assert_eq!(de.use_count, 2);
assert_eq!(
de.source_token,
common::ReferenceToken("V_SRC_000".to_string())
);
assert_eq!(de.bounds.x, 0);
assert_eq!(de.bounds.y, 0);
assert_eq!(de.bounds.width, 1280);
assert_eq!(de.bounds.height, 720);
}
#[test]
fn extend_base_serialization() {
let model = tt::VideoSourceConfiguration {
token: common::ReferenceToken("123abc".to_string()),
name: tt::Name("MyName".to_string()),
use_count: 2,
source_token: common::ReferenceToken("456cde".to_string()),
bounds: tt::IntRectangle {
x: 1,
y: 2,
width: 3,
height: 4,
},
..Default::default()
};
let expected = r#"<?xml version="1.0" encoding="utf-8"?>
<tt:VideoSourceConfiguration xmlns:tt="http://www.onvif.org/ver10/schema" token="123abc">
<tt:SourceToken>456cde</tt:SourceToken>
<tt:Bounds x="1" y="2" width="3" height="4" />
<tt:Name>MyName</tt:Name>
<tt:UseCount>2</tt:UseCount>
</tt:VideoSourceConfiguration>
"#;
let actual = yaserde::ser::to_string(&model).unwrap();
println!("actual: {}", actual);
println!("expected: {}", expected);
assert_xml_eq(actual.as_str(), expected);
}
#[test]
fn choice_deserialization() {
let ser = r#"
<tt:ColorOptions tt:any_attribute="attr_value" xmlns:tt="http://www.onvif.org/ver10/schema">
<tt:ColorspaceRange>
<tt:X><tt:Min>0.1</tt:Min><tt:Max>0.11</tt:Max></tt:X>
<tt:Y><tt:Min>0.2</tt:Min><tt:Max>0.22</tt:Max></tt:Y>
<tt:Z><tt:Min>0.3</tt:Min><tt:Max>0.33</tt:Max></tt:Z>
<tt:Colorspace>http://my.color.space</tt:Colorspace>
</tt:ColorspaceRange>
<tt:ColorspaceRange>
<tt:X><tt:Min>0.4</tt:Min><tt:Max>0.44</tt:Max></tt:X>
<tt:Y><tt:Min>0.5</tt:Min><tt:Max>0.55</tt:Max></tt:Y>
<tt:Z><tt:Min>0.6</tt:Min><tt:Max>0.66</tt:Max></tt:Z>
<tt:Colorspace>http://my.color.space</tt:Colorspace>
</tt:ColorspaceRange>
</tt:ColorOptions>
"#;
let des: tt::ColorOptions = yaserde::de::from_str(ser).unwrap();
match des.color_options_choice {
tt::ColorOptionsChoice::ColorspaceRange(colors) => {
assert_eq!(colors.len(), 2);
assert_eq!(
colors[0].x,
tt::FloatRange {
min: 0.1,
max: 0.11
}
);
assert_eq!(
colors[0].y,
tt::FloatRange {
min: 0.2,
max: 0.22
}
);
assert_eq!(
colors[0].z,
tt::FloatRange {
min: 0.3,
max: 0.33
}
);
assert_eq!(colors[0].colorspace, String::from("http://my.color.space"));
assert_eq!(
colors[1].x,
tt::FloatRange {
min: 0.4,
max: 0.44
}
);
assert_eq!(
colors[1].y,
tt::FloatRange {
min: 0.5,
max: 0.55
}
);
assert_eq!(
colors[1].z,
tt::FloatRange {
min: 0.6,
max: 0.66
}
);
assert_eq!(colors[1].colorspace, String::from("http://my.color.space"));
}
_ => panic!("Wrong variant: {:?}", des.color_options_choice),
}
}
#[test]
fn choice_serialization() {
let model = tt::ColorOptions {
color_options_choice: tt::ColorOptionsChoice::ColorspaceRange(vec![
tt::ColorspaceRange {
x: tt::FloatRange {
min: 0.1,
max: 0.11,
},
y: tt::FloatRange {
min: 0.2,
max: 0.22,
},
z: tt::FloatRange {
min: 0.3,
max: 0.33,
},
colorspace: "http://my.color.space".to_string(),
},
tt::ColorspaceRange {
x: tt::FloatRange {
min: 0.4,
max: 0.44,
},
y: tt::FloatRange {
min: 0.5,
max: 0.55,
},
z: tt::FloatRange {
min: 0.6,
max: 0.66,
},
colorspace: "http://my.color.space".to_string(),
},
]),
};
let expected = r#"<?xml version="1.0" encoding="utf-8"?>
<tt:ColorOptions xmlns:tt="http://www.onvif.org/ver10/schema">
<ColorspaceRange>
<tt:X><tt:Min>0.1</tt:Min><tt:Max>0.11</tt:Max></tt:X>
<tt:Y><tt:Min>0.2</tt:Min><tt:Max>0.22</tt:Max></tt:Y>
<tt:Z><tt:Min>0.3</tt:Min><tt:Max>0.33</tt:Max></tt:Z>
<tt:Colorspace>http://my.color.space</tt:Colorspace>
</ColorspaceRange>
<ColorspaceRange>
<tt:X><tt:Min>0.4</tt:Min><tt:Max>0.44</tt:Max></tt:X>
<tt:Y><tt:Min>0.5</tt:Min><tt:Max>0.55</tt:Max></tt:Y>
<tt:Z><tt:Min>0.6</tt:Min><tt:Max>0.66</tt:Max></tt:Z>
<tt:Colorspace>http://my.color.space</tt:Colorspace>
</ColorspaceRange>
</tt:ColorOptions>
"#;
let actual = yaserde::ser::to_string(&model).unwrap();
println!("actual: {}", actual);
println!("expected: {}", expected);
assert_xml_eq(actual.as_str(), expected);
}
#[test]
fn duration_serialization() {
let model = tt::MediaUri {
uri: "http://a/b/c".to_string(),
invalid_after_connect: false,
invalid_after_reboot: true,
timeout: xs::Duration {
seconds: 60.0,
..Default::default()
},
};
let expected = r#"<?xml version="1.0" encoding="utf-8"?>
<tt:MediaUri xmlns:tt="http://www.onvif.org/ver10/schema">
<tt:Uri>http://a/b/c</tt:Uri>
<tt:InvalidAfterConnect>false</tt:InvalidAfterConnect>
<tt:InvalidAfterReboot>true</tt:InvalidAfterReboot>
<tt:Timeout>PT60S</tt:Timeout>
</tt:MediaUri>
"#;
let actual = yaserde::ser::to_string(&model).unwrap();
assert_xml_eq(actual.as_str(), expected);
}
#[test]
fn duration_deserialization() {
let ser = r#"
<tt:MediaUri xmlns:tt="http://www.onvif.org/ver10/schema">
<tt:Uri>http://a/b/c</tt:Uri>
<tt:InvalidAfterConnect>false</tt:InvalidAfterConnect>
<tt:InvalidAfterReboot>true</tt:InvalidAfterReboot>
<tt:Timeout>PT60S</tt:Timeout>
</tt:MediaUri>
"#;
let des: tt::MediaUri = yaserde::de::from_str(ser).unwrap();
assert_eq!(des.uri, "http://a/b/c".to_string());
assert!(!des.invalid_after_connect);
assert!(des.invalid_after_reboot);
assert_approx_eq!(des.timeout.seconds, 60.0);
}
#[tokio::test]
#[cfg(feature = "devicemgmt")]
async fn operation_get_system_date_and_time() {
let req: devicemgmt::GetSystemDateAndTime = Default::default();
let transport = FakeTransport {
response: r#"
<tds:GetSystemDateAndTimeResponse
xmlns:tds="http://www.onvif.org/ver10/device/wsdl"
xmlns:tt="http://www.onvif.org/ver10/schema">
<tds:SystemDateAndTime>
<tt:DateTimeType>NTP</tt:DateTimeType>
<tt:DaylightSavings>false</tt:DaylightSavings>
<tt:TimeZone>
<tt:TZ>PST7PDT</tt:TZ>
</tt:TimeZone>
<tt:UTCDateTime>
<tt:Time>
<tt:Hour>8</tt:Hour>
<tt:Minute>5</tt:Minute>
<tt:Second>40</tt:Second>
</tt:Time>
<tt:Date>
<tt:Year>2019</tt:Year>
<tt:Month>11</tt:Month>
<tt:Day>21</tt:Day>
</tt:Date>
</tt:UTCDateTime>
</tds:SystemDateAndTime>
</tds:GetSystemDateAndTimeResponse>"#
.into(),
};
let resp = devicemgmt::get_system_date_and_time(&transport, &req)
.await
.unwrap();
assert_eq!(
resp.system_date_and_time.utc_date_time.unwrap().time.second,
40
);
}
#[tokio::test]
#[cfg(feature = "devicemgmt")]
async fn operation_get_device_information() {
let req: devicemgmt::GetDeviceInformation = Default::default();
let transport = FakeTransport {
response: r#"
<tds:GetDeviceInformationResponse
xmlns:tds="http://www.onvif.org/ver10/device/wsdl"
xmlns:tt="http://www.onvif.org/ver10/schema">
<tds:Manufacturer>Somebody</tds:Manufacturer>
<tds:Model>IPCamera</tds:Model>
<tds:FirmwareVersion>1.5</tds:FirmwareVersion>
<tds:SerialNumber>a12b34</tds:SerialNumber>
<tds:HardwareId>2.0</tds:HardwareId>
</tds:GetDeviceInformationResponse>"#
.into(),
};
let resp = devicemgmt::get_device_information(&transport, &req)
.await
.unwrap();
assert_eq!(resp.manufacturer, "Somebody");
}
#[test]
fn probe_serialization() {
let expected = r#"<?xml version="1.0" encoding="utf-8"?>
<s:Envelope
xmlns:d="http://schemas.xmlsoap.org/ws/2005/04/discovery"
xmlns:s="http://www.w3.org/2003/05/soap-envelope"
xmlns:w="http://schemas.xmlsoap.org/ws/2004/08/addressing">
<s:Header>
<w:MessageID>uuid:84ede3de-7dec-11d0-c360-f01234567890</w:MessageID>
<w:To>urn:schemas-xmlsoap-org:ws:2005:04:discovery</w:To>
<w:Action>http://schemas.xmlsoap.org/ws/2005/04/discovery/Probe</w:Action>
</s:Header>
<s:Body>
<d:Probe>
<d:Types></d:Types>
</d:Probe>
</s:Body>
</s:Envelope>
"#;
use ws_discovery::probe::*;
let probe = Envelope {
header: Header {
message_id: "uuid:84ede3de-7dec-11d0-c360-f01234567890".into(),
action: "http://schemas.xmlsoap.org/ws/2005/04/discovery/Probe".into(),
to: "urn:schemas-xmlsoap-org:ws:2005:04:discovery".into(),
},
..Default::default()
};
let actual = yaserde::ser::to_string(&probe).unwrap();
assert_xml_eq(&actual, expected);
}
#[test]
fn probe_match_deserialization() {
// Following XML was taken from ONVIF guide
// https://www.onvif.org/wp-content/uploads/2016/12/ONVIF_WG-APG-Application_Programmers_Guide-1.pdf
let ser = r#"<?xml version="1.0" encoding="utf-8"?>
<SOAP-ENV:Envelope
xmlns:SOAP-ENV="http://www.w3.org/2003/05/soap-envelope"
xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing"
xmlns:d="http://schemas.xmlsoap.org/ws/2005/04/discovery"
xmlns:dn="http://www.onvif.org/ver10/network/wsdl">
<SOAP-ENV:Header>
<wsa:MessageID>uuid:84ede3de-e374-11df-b259-00408c1836b2</wsa:MessageID>
<wsa:RelatesTo>uuid:84ede3de-7dec-11d0-c360-F01234567890</wsa:RelatesTo>
<wsa:To SOAP-ENV:mustUnderstand="true">
http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
</wsa:To>
<wsa:Action SOAP-ENV:mustUnderstand="true">
http://schemas.xmlsoap.org/ws/2005/04/discovery/ProbeMatches
</wsa:Action>
<d:AppSequence SOAP-ENV:mustUnderstand="true" MessageNumber="3" InstanceId="1287607812">
</d:AppSequence>
</SOAP-ENV:Header>
<SOAP-ENV:Body>
<d:ProbeMatches>
<d:ProbeMatch>
<wsa:EndpointReference>
<wsa:Address>urn:uuid:a1f48ac2-dc8b-11df-b255-00408c1836b2</wsa:Address>
</wsa:EndpointReference>
<d:Types>dn:NetworkVideoTransmitter</d:Types>
<d:Scopes>
onvif://www.onvif.org/type/video_encoder
onvif://www.onvif.org/type/audio_encoder
onvif://www.onvif.org/hardware/MODEL
onvif://www.onvif.org/name/VENDOR%20MODEL
onvif://www.onvif.org/location/ANY
</d:Scopes>
<d:XAddrs>
http://169.254.76.145/onvif/services
http://192.168.1.24/onvif/services
</d:XAddrs>
<d:MetadataVersion>1</d:MetadataVersion>
</d:ProbeMatch>
</d:ProbeMatches>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
"#;
let des: ws_discovery::probe_matches::Envelope = yaserde::de::from_str(ser).unwrap();
assert_eq!(
des.header.relates_to,
"uuid:84ede3de-7dec-11d0-c360-F01234567890".to_string()
);
assert_eq!(
des.body.probe_matches.probe_match[0]
.x_addrs
.split_whitespace()
.collect::<Vec<&str>>(),
vec![
"http://169.254.76.145/onvif/services",
"http://192.168.1.24/onvif/services"
]
);
}
#[test]
fn string_list_serialization() {
let model = tt::FocusOptions20Extension {
af_modes: Some(tt::StringAttrList(vec![
"Auto".to_string(),
"Manual".to_string(),
])),
};
let expected = r#"<?xml version="1.0" encoding="utf-8"?>
<tt:FocusOptions20Extension xmlns:tt="http://www.onvif.org/ver10/schema">
<tt:AFModes>Auto Manual</tt:AFModes>
</tt:FocusOptions20Extension>
"#;
let actual = yaserde::ser::to_string(&model).unwrap();
assert_xml_eq(actual.as_str(), expected);
}
#[test]
fn string_list_deserialization() {
let ser = r#"
<tt:FocusOptions20Extension xmlns:tt="http://www.onvif.org/ver10/schema">
<tt:AFModes>Auto Manual</tt:AFModes>
</tt:FocusOptions20Extension>
"#;
let des: tt::FocusOptions20Extension = yaserde::de::from_str(ser).unwrap();
assert_eq!(
des.af_modes,
Some(tt::StringAttrList(vec![
"Auto".to_string(),
"Manual".to_string()
]))
);
}
#[test]
fn float_list_serialization() {
let model = tt::FloatAttrList(vec![1.0, 2.3, 3.99]);
let expected = r#"<?xml version="1.0" encoding="utf-8"?>
<FloatAttrList>1 2.3 3.99</FloatAttrList>
"#;
let actual = yaserde::ser::to_string(&model).unwrap();
assert_xml_eq(actual.as_str(), expected);
}
#[test]
fn float_list_deserialization() {
let ser = r#"<?xml version="1.0" encoding="utf-8"?>
<FloatAttrList>1 2.3 3.99</FloatAttrList>
"#;
let des: tt::FloatAttrList = yaserde::de::from_str(ser).unwrap();
assert_eq!(des, tt::FloatAttrList(vec![1.0, 2.3, 3.99]));
}
#[test]
fn nested_structs_with_same_named_attributes() {
// https://github.com/media-io/yaserde/issues/12#issuecomment-601235031
let ser = r#"<?xml version="1.0" encoding="utf-8"?>
<tt:Profile token="a" xmlns:tt="http://www.onvif.org/ver10/schema">
<tt:PTZConfiguration token="b" />
</tt:Profile>
"#;
let des: tt::Profile = yaserde::de::from_str(ser).unwrap();
assert_eq!(des.token.0.as_str(), "a");
assert_eq!(des.ptz_configuration.unwrap().token.0.as_str(), "b");
}
#[test]
fn nested_structs_with_same_named_fields() {
// There was an issue in yaserde which is now fixed
// https://github.com/media-io/yaserde/issues/51
let ser = r#"<?xml version="1.0" encoding="utf-8"?>
<tt:Profile xmlns:tt="http://www.onvif.org/ver10/schema">
<tt:Extension />
</tt:Profile>
"#;
let des: tt::Profile = yaserde::de::from_str(ser).unwrap();
assert_eq!(
des,
tt::Profile {
extension: Some(tt::ProfileExtension::default()),
..Default::default()
}
);
}
#[test]
fn extension_inside_extension() {
// `Extension` inside `Extension` causes infinite loop at deserialization
// https://github.com/media-io/yaserde/issues/76
// If field `extension` in `SecurityCapabilitiesExtension` is uncommented accidentally
// then this test will fail. Also note that there's a bunch of such cases in `onvif.rs`.
let ser = r#"<?xml version="1.0" encoding="utf-8"?>
<tt:SecurityCapabilities xmlns:tt="http://www.onvif.org/ver10/schema">
<tt:Extension>
<tt:TLS1.0>false</tt:TLS1.0>
<tt:Extension>
<tt:RemoteUserHandling>false</tt:RemoteUserHandling>
</tt:Extension>
</tt:Extension>
</tt:SecurityCapabilities>
"#;
let _ = yaserde::de::from_str::<tt::SecurityCapabilities>(ser).unwrap();
}
#[test]
#[cfg(feature = "media2")]
fn media2_configs_name_serialization() {
pub fn type_of<T>(_: &T) -> &str {
std::any::type_name::<T>()
}
assert_eq!(
"media2::GetConfiguration",
type_of(&media2::GetConfiguration::default())
);
assert_eq!(
"media2::GetVideoEncoderConfigurations",
type_of(&media2::GetVideoEncoderConfigurations::default())
);
assert_eq!(
"media2::GetVideoSourceConfigurations",
type_of(&media2::GetVideoSourceConfigurations::default())
);
assert_eq!(
"media2::GetAudioEncoderConfigurations",
type_of(&media2::GetAudioEncoderConfigurations::default())
);
assert_eq!(
"media2::GetAudioSourceConfigurations",
type_of(&media2::GetAudioSourceConfigurations::default())
);
assert_eq!(
"media2::GetAnalyticsConfigurations",
type_of(&media2::GetAnalyticsConfigurations::default())
);
assert_eq!(
"media2::GetMetadataConfigurations",
type_of(&media2::GetMetadataConfigurations::default())
);
assert_eq!(
"media2::GetAudioOutputConfigurations",
type_of(&media2::GetAudioOutputConfigurations::default())
);
assert_eq!(
"media2::GetAudioDecoderConfigurations",
type_of(&media2::GetAudioDecoderConfigurations::default())
);
assert_eq!(
"media2::GetVideoSourceConfigurationOptions",
type_of(&media2::GetVideoSourceConfigurationOptions::default())
);
assert_eq!(
"media2::GetVideoEncoderConfigurationOptions",
type_of(&media2::GetVideoEncoderConfigurationOptions::default())
);
assert_eq!(
"media2::GetAudioSourceConfigurationOptions",
type_of(&media2::GetAudioSourceConfigurationOptions::default())
);
assert_eq!(
"media2::GetAudioEncoderConfigurationOptions",
type_of(&media2::GetAudioEncoderConfigurationOptions::default())
);
assert_eq!(
"media2::GetMetadataConfigurationOptions",
type_of(&media2::GetMetadataConfigurationOptions::default())
);
assert_eq!(
"media2::GetAudioOutputConfigurationOptions",
type_of(&media2::GetAudioOutputConfigurationOptions::default())
);
assert_eq!(
"media2::GetAudioDecoderConfigurationOptions",
type_of(&media2::GetAudioDecoderConfigurationOptions::default())
);
}
#[tokio::test]
#[cfg(feature = "event")]
async fn operation_pull_messages() {
let req: event::PullMessages = Default::default();
let transport = FakeTransport {
response: r#"
<tev:PullMessagesResponse
xmlns:tt="http://www.onvif.org/ver10/schema"
xmlns:wsnt="http://docs.oasis-open.org/wsn/b-2"
xmlns:tev="http://www.onvif.org/ver10/events/wsdl"
xmlns:wsa5="http://www.w3.org/2005/08/addressing"
xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing"
xmlns:wstop="http://docs.oasis-open.org/wsn/t-1"
xmlns:tns1="http://www.onvif.org/ver10/topics">
<tev:CurrentTime>
2023-09-28T16:01:15Z
</tev:CurrentTime>
<tev:TerminationTime>
2023-09-28T16:11:15Z
</tev:TerminationTime>
<wsnt:NotificationMessage>
<wsnt:Topic
Dialect="http://www.onvif.org/ver10/tev/topicExpression/ConcreteSet">
tns1:RuleEngine/CellMotionDetector/Motion
</wsnt:Topic>
<wsnt:Message>
<tt:Message
UtcTime="2023-09-28T16:01:15Z"
PropertyOperation="Initialized">
<tt:Source>
<tt:SimpleItem
Name="VideoSourceConfigurationToken"
Value="00000"/>
<tt:SimpleItem
Name="VideoAnalyticsConfigurationToken"
Value="00000"/>
<tt:SimpleItem
Name="Rule"
Value="00000"/>
</tt:Source>
<tt:Data>
<tt:SimpleItem
Name="IsMotion"
Value="false"/>
</tt:Data>
</tt:Message>
</wsnt:Message>
</wsnt:NotificationMessage>
</tev:PullMessagesResponse>
"#
.into(),
};
let response = event::pull_messages(&transport, &req).await;
let resp = match response {
Ok(resp) => resp,
Err(err) => panic!("Error: {:?}", err),
};
assert_eq!(
resp.notification_message[0].message.msg.source.simple_item[0].name,
"VideoSourceConfigurationToken"
);
assert_eq!(
resp.notification_message[0].message.msg.source.simple_item[0].value,
"00000"
);
assert_eq!(
resp.notification_message[0].message.msg.data.simple_item[0].name,
"IsMotion"
);
assert_eq!(
resp.notification_message[0].message.msg.data.simple_item[0].value,
"false"
);
}
#[tokio::test]
#[cfg(feature = "event")]
async fn operation_create_pullpoint_subscription() {
let req: event::CreatePullPointSubscription = Default::default();
let transport = FakeTransport {
response: r#"
<tev:CreatePullPointSubscriptionResponse
xmlns:tev="http://www.onvif.org/ver10/events/wsdl"
xmlns:wsnt="http://docs.oasis-open.org/wsn/b-2"
xmlns:wsa5="http://www.w3.org/2005/08/addressing">
<tev:SubscriptionReference>
<wsa5:Address>
http://192.168.88.108/onvif/Subscription?Idx=5
</wsa5:Address>
</tev:SubscriptionReference>
<wsnt:CurrentTime>
2023-09-28T16:01:15Z
</wsnt:CurrentTime>
<wsnt:TerminationTime>
2023-09-28T16:11:15Z
</wsnt:TerminationTime>
</tev:CreatePullPointSubscriptionResponse>
"#
.into(),
};
let resp = event::create_pull_point_subscription(&transport, &req)
.await
.unwrap();
assert_eq!(
resp.subscription_reference.address,
"http://192.168.88.108/onvif/Subscription?Idx=5"
);
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/schema/src/tests/utils.rs | schema/src/tests/utils.rs | use xml::reader::XmlEvent;
pub fn assert_xml_eq(actual: &str, expected: &str) {
for (a, e) in without_whitespaces(actual).zip(without_whitespaces(expected)) {
match (a, e) {
(
Ok(XmlEvent::StartDocument {
version,
encoding,
standalone,
}),
Ok(XmlEvent::StartDocument {
version: version_expected,
encoding: encoding_expected,
standalone: standalone_expected,
}),
) => {
assert_eq!(version, version_expected);
assert_eq!(encoding.to_lowercase(), encoding_expected.to_lowercase());
assert_eq!(standalone, standalone_expected);
}
(a, e) => assert_eq!(a, e),
}
}
}
fn without_whitespaces(
expected: &str,
) -> impl Iterator<Item = Result<xml::reader::XmlEvent, xml::reader::Error>> + '_ {
xml::EventReader::new(expected.as_bytes())
.into_iter()
.filter(|e| !matches!(e, Ok(xml::reader::XmlEvent::Whitespace(_))))
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/onvif/src/lib.rs | onvif/src/lib.rs | #[cfg(test)]
#[macro_use]
extern crate yaserde_derive;
pub mod discovery;
pub mod soap;
mod utils;
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/onvif/src/discovery/mod.rs | onvif/src/discovery/mod.rs | mod network_enumeration;
use crate::discovery::network_enumeration::enumerate_network_v4;
use futures::stream::{self, StreamExt};
use futures_core::stream::Stream;
use schema::ws_discovery::{probe, probe_matches};
use std::iter::Iterator;
use std::{
collections::HashSet,
fmt::{Debug, Formatter},
net::{IpAddr, Ipv4Addr, SocketAddr},
sync::Arc,
};
use thiserror::Error;
use tokio::{
io,
net::UdpSocket,
sync::mpsc::channel,
time::{timeout, Duration},
};
use tokio_stream::wrappers::ReceiverStream;
use tracing::debug;
use url::Url;
use crate::utils::{display_list::DisplayList, hash::calculate_hash};
#[derive(Debug, Error)]
pub enum Error {
#[error("Network error: {0}")]
Network(#[from] io::Error),
#[error("(De)serialization error: {0}")]
Serde(String),
#[error("Unsupported feature: {0}")]
Unsupported(String),
}
/// How to discover the devices on the network. Officially, only [DiscoveryMode::Multicast] (the
/// default) is supported by all onvif devices. However, it is said that sending unicast packets
/// can work.
#[derive(Debug, Clone)]
pub enum DiscoveryMode {
/// The normal WS-Discovery Mode
Multicast,
/// The unicast approach
Unicast {
/// The network IP address. Must be a valid network address, otherwise the behavior
/// will be undefined
network: Ipv4Addr,
/// The network mask, written out in "dotted notation". Must be a valid network mask,
/// otherwise the behavior will be undefined.
network_mask: Ipv4Addr,
},
}
#[derive(Clone, Eq, Hash, PartialEq)]
pub struct Device {
/// The WS-Discovery UUID / address reference
pub address: String,
pub hardware: Option<String>,
pub name: Option<String>,
pub types: Vec<String>,
pub urls: Vec<Url>,
}
impl Debug for Device {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Device")
.field("name", &self.name)
.field("url", &DisplayList(&self.urls))
.field("address", &self.address)
.finish()
}
}
#[derive(Debug, Clone)]
pub struct DiscoveryBuilder {
duration: Duration,
listen_address: IpAddr,
discovery_mode: DiscoveryMode,
}
impl Default for DiscoveryBuilder {
fn default() -> Self {
Self {
duration: Duration::from_secs(5),
listen_address: IpAddr::V4(Ipv4Addr::UNSPECIFIED),
discovery_mode: DiscoveryMode::Multicast,
}
}
}
impl DiscoveryBuilder {
const LOCAL_PORT: u16 = 0;
const MULTI_PORT: u16 = 3702;
const WS_DISCOVERY_BROADCAST_ADDR: Ipv4Addr = Ipv4Addr::new(239, 255, 255, 250);
const MAX_CONCURRENT_SOCK: usize = 32;
/// How long to listen for the responses from the network.
pub fn duration(&mut self, duration: Duration) -> &mut Self {
self.duration = duration;
self
}
/// Address to listen on.
///
/// By default, it is 0.0.0.0 which is fine for a single-NIC case. With multiple NICs, it's
/// problematic because 0.0.0.0 is routed to only one NIC, but you may want to run the discovery
/// on a specific network.
pub fn listen_address(&mut self, listen_address: IpAddr) -> &mut Self {
self.listen_address = listen_address;
self
}
/// Set the discovery mode. See [DiscoveryMode] for a description of how this works.
/// By default, the multicast mode is chosen.
pub fn discovery_mode(&mut self, discovery_mode: DiscoveryMode) -> &mut Self {
self.discovery_mode = discovery_mode;
self
}
async fn run_unicast(
&self,
duration: &Duration,
listen_address: &IpAddr,
network: &Ipv4Addr,
network_mask: &Ipv4Addr,
) -> Result<ReceiverStream<Device>, Error> {
let probe = Arc::new(build_probe());
let probe_xml = yaserde::ser::to_string(probe.as_ref()).map_err(Error::Serde)?;
debug!("Unicast Probe XML: {}. Since you are using unicast, some devices might not be detected", probe_xml);
let message_id = Arc::new(probe.header.message_id.clone());
let payload = Arc::new(probe_xml.as_bytes().to_vec());
let (device_sender, device_receiver) = channel(32);
let device_receiver = ReceiverStream::new(device_receiver);
let mut unicast_requests = vec![];
// Prepare the list of UDP queries to execute.
for target_address in enumerate_network_v4(*network, *network_mask) {
let local_sock_addr = SocketAddr::new(*listen_address, Self::LOCAL_PORT);
let target_sock_addr = SocketAddr::new(IpAddr::V4(target_address), Self::MULTI_PORT);
unicast_requests.push((
local_sock_addr,
target_sock_addr,
payload.clone(),
message_id.clone(),
));
}
let total_socks = unicast_requests.len();
let batches = (total_socks / Self::MAX_CONCURRENT_SOCK) as f64;
let max_time_per_sock = Duration::from_secs_f64(duration.as_secs_f64() / batches);
let produce_devices = async move {
let futures = unicast_requests
.iter()
.map(
|(local_sock_addr, target_sock_addr, payload, message_id)| async move {
let socket = UdpSocket::bind(local_sock_addr).await.ok()?;
socket.send_to(payload, target_sock_addr).await.ok()?;
let (xml, _) = timeout(max_time_per_sock, recv_string(&socket))
.await
.ok()?
.ok()?;
debug!("Probe match XML: {}", xml);
let envelope = match yaserde::de::from_str::<probe_matches::Envelope>(&xml)
{
Ok(envelope) => envelope,
Err(e) => {
debug!("Deserialization failed: {e}");
return None;
}
};
if envelope.header.relates_to != **message_id {
debug!("Unrelated message");
return None;
}
if let Some(device) = device_from_envelope(envelope) {
debug!("Found device {device:?}");
Some(device)
} else {
None
}
},
)
.collect::<Vec<_>>();
let mut stream = stream::iter(futures).buffer_unordered(Self::MAX_CONCURRENT_SOCK);
// Gets stopped by the timeout below, executing in a background task, but we can
// stop early as well
while let Some(device_or_empty) = stream.next().await {
if let Some(device) = device_or_empty {
// It's ok to ignore the sending error as user can drop the receiver soon
// (for example, after the first device discovered).
if device_sender.send(device).await.is_err() {
debug!("Failure to send to the device sender; Ignoring on purpose.")
}
}
}
};
// Give a grace of 100ms since we divided the time equally but some sockets will need a little more.
let global_timeout_duration = *duration + Duration::from_millis(100);
tokio::spawn(timeout(global_timeout_duration, produce_devices));
Ok(device_receiver)
}
async fn run_multicast(
&self,
duration: &Duration,
listen_address: &IpAddr,
) -> Result<ReceiverStream<Device>, Error> {
let probe = Arc::new(build_probe());
let probe_xml = yaserde::ser::to_string(probe.as_ref()).map_err(Error::Serde)?;
debug!("Probe XML: {}", probe_xml);
let socket = {
let local_socket_addr = SocketAddr::new(*listen_address, Self::LOCAL_PORT);
let multi_socket_addr = SocketAddr::new(
IpAddr::V4(Self::WS_DISCOVERY_BROADCAST_ADDR),
Self::MULTI_PORT,
);
let socket = UdpSocket::bind(local_socket_addr).await?;
match listen_address {
IpAddr::V4(addr) => {
socket.join_multicast_v4(Self::WS_DISCOVERY_BROADCAST_ADDR, *addr)?
}
IpAddr::V6(_) => return Err(Error::Unsupported("Discovery with IPv6".to_owned())),
}
socket
.send_to(probe_xml.as_bytes(), multi_socket_addr)
.await?;
socket
};
let (device_sender, device_receiver) = channel(32);
let device_receiver = ReceiverStream::new(device_receiver);
let mut known_responses = HashSet::new();
let produce_devices = async move {
while let Ok((xml, src)) = recv_string(&socket).await {
if !known_responses.insert(calculate_hash(&xml)) {
debug!("Duplicate response from {src}, skipping ...");
continue;
}
debug!("Probe match XML: {}", xml,);
let envelope = match yaserde::de::from_str::<probe_matches::Envelope>(&xml) {
Ok(envelope) => envelope,
Err(e) => {
debug!("Deserialization failed: {e}");
continue;
}
};
if envelope.header.relates_to != probe.header.message_id {
debug!("Unrelated message");
continue;
}
if let Some(device) = device_from_envelope(envelope) {
debug!("Found device {device:?}");
// It's ok to ignore the sending error as user can drop the receiver soon
// (for example, after the first device discovered).
let _ = device_sender.send(device).await;
} else {
debug!("No devices found");
}
}
};
tokio::spawn(timeout(*duration, produce_devices));
Ok(device_receiver)
}
/// Discovers devices on a local network asynchronously using WS-discovery.
///
/// Internally it sends a multicast probe and waits for responses for a specified amount of time.
/// You alternatively have the choice to send multiple unicast probes. See [DiscoveryMode]. This
/// is to allow the discovery process to operate within a Docker container or an environment where
/// the hosts network might be different than the target network.
///
/// The result is a stream of discovered devices.
/// The stream is terminated after provided amount of time.
///
/// There are many different ways to iterate over and process the values in a `Stream`
/// https://rust-lang.github.io/async-book/05_streams/02_iteration_and_concurrency.html
///
/// # Examples
///
/// You can access each element on the stream concurrently as soon as devices respond:
///
/// ```
/// use onvif::discovery;
/// use futures_util::stream::StreamExt; // to use for_each_concurrent
///
/// const MAX_CONCURRENT_JUMPERS: usize = 100;
///
/// async {
/// discovery::DiscoveryBuilder::default().run()
/// .await
/// .unwrap()
/// .for_each_concurrent(MAX_CONCURRENT_JUMPERS, |addr| {
/// async move {
/// println!("Device found: {:?}", addr);
/// }
/// })
/// .await;
/// };
/// ```
///
/// Or you can await on a collection of unique devices found in one second:
///
/// ```
/// use onvif::discovery;
/// use futures_util::stream::StreamExt; // to use collect
/// use std::collections::HashSet;
///
/// async {
/// let devices = discovery::DiscoveryBuilder::default().run()
/// .await
/// .unwrap()
/// .collect::<HashSet<_>>()
/// .await;
///
/// println!("Devices found: {:?}", devices);
/// };
/// ```
pub async fn run(&self) -> Result<impl Stream<Item = Device>, Error> {
let Self {
duration,
listen_address,
discovery_mode,
} = self;
match discovery_mode {
DiscoveryMode::Multicast => self.run_multicast(duration, listen_address).await,
DiscoveryMode::Unicast {
network,
network_mask,
} => {
self.run_unicast(duration, listen_address, network, network_mask)
.await
}
}
}
}
async fn recv_string(s: &UdpSocket) -> io::Result<(String, SocketAddr)> {
let mut buf = vec![0; 16 * 1024];
let (len, src) = s.recv_from(&mut buf).await?;
Ok((String::from_utf8_lossy(&buf[..len]).to_string(), src))
}
fn device_from_envelope(envelope: probe_matches::Envelope) -> Option<Device> {
let onvif_probe_match = envelope
.body
.probe_matches
.probe_match
.iter()
.find(|probe_match| {
probe_match
.find_in_scopes("onvif://www.onvif.org")
.is_some()
})?;
let name = onvif_probe_match.name();
let urls = onvif_probe_match.x_addrs();
let hardware = onvif_probe_match.hardware();
let address = onvif_probe_match.endpoint_reference_address();
let types = onvif_probe_match
.types()
.into_iter()
.map(Into::into)
.collect();
Some(Device {
name,
urls,
address,
hardware,
types,
})
}
fn build_probe() -> probe::Envelope {
use probe::*;
Envelope {
header: Header {
message_id: format!("uuid:{}", uuid::Uuid::new_v4()),
action: "http://schemas.xmlsoap.org/ws/2005/04/discovery/Probe".into(),
to: "urn:schemas-xmlsoap-org:ws:2005:04:discovery".into(),
},
..Default::default()
}
}
#[cfg(test)]
mod tests {
use super::*;
use tokio_stream::StreamExt;
/// This test serves more as an example of how the unicast discovery works.
#[tokio::test]
async fn test_unicast() {
let devices = DiscoveryBuilder::default()
.discovery_mode(DiscoveryMode::Unicast {
network: Ipv4Addr::new(192, 168, 1, 0),
network_mask: Ipv4Addr::new(255, 255, 255, 0),
})
.run()
.await
.unwrap()
.collect::<Vec<_>>()
.await;
println!("Devices found: {:?}", devices);
}
#[test]
fn test_xaddrs_extraction() {
const DEVICE_ADDRESS: &str = "an address";
let make_xml = |relates_to: &str, xaddrs: &str| -> String {
format!(
r#"<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope
xmlns:SOAP-ENV="http://www.w3.org/2003/05/soap-envelope"
xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing"
xmlns:d="http://schemas.xmlsoap.org/ws/2005/04/discovery"
xmlns:dn="http://www.onvif.org/ver10/network/wsdl">
<SOAP-ENV:Header>
<wsa:RelatesTo>{relates_to}</wsa:RelatesTo>
</SOAP-ENV:Header>
<SOAP-ENV:Body>
<d:ProbeMatches>
<d:ProbeMatch>
<d:XAddrs>http://something.else</d:XAddrs>
</d:ProbeMatch>
<d:ProbeMatch>
<wsa:EndpointReference>
<wsa:Address>{device_address}</wsa:Address>
</wsa:EndpointReference>
<d:Scopes>onvif://www.onvif.org/name/MyCamera2000</d:Scopes>
<d:XAddrs>{xaddrs}</d:XAddrs>
</d:ProbeMatch>
</d:ProbeMatches>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
"#,
relates_to = relates_to,
xaddrs = xaddrs,
device_address = DEVICE_ADDRESS
)
};
let our_uuid = "uuid:84ede3de-7dec-11d0-c360-F01234567890";
let bad_uuid = "uuid:84ede3de-7dec-11d0-c360-F00000000000";
let input = [
make_xml(our_uuid, "http://addr_20 http://addr_21 http://addr_22"),
make_xml(bad_uuid, "http://addr_30 http://addr_31"),
];
let actual = input
.iter()
.filter_map(|xml| yaserde::de::from_str::<probe_matches::Envelope>(xml).ok())
.filter(|envelope| envelope.header.relates_to == our_uuid)
.filter_map(device_from_envelope)
.collect::<Vec<_>>();
assert_eq!(actual.len(), 1);
// OK: message UUID matches and addr responds
assert_eq!(
actual,
&[Device {
urls: vec![
Url::parse("http://addr_20").unwrap(),
Url::parse("http://addr_21").unwrap(),
Url::parse("http://addr_22").unwrap(),
],
name: Some("MyCamera2000".to_string()),
hardware: None,
address: DEVICE_ADDRESS.to_string(),
types: vec![],
}]
);
}
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/onvif/src/discovery/network_enumeration.rs | onvif/src/discovery/network_enumeration.rs | use std::net::Ipv4Addr;
#[inline]
fn octets_to_u32(octets: [u8; 4]) -> u32 {
(octets[0] as u32) << (3 * 8)
| (octets[1] as u32) << (2 * 8)
| (octets[2] as u32) << 8
| (octets[3] as u32)
}
/// Enumerate the list of IPs on the network given the network address and the mask.
pub fn enumerate_network_v4(network: Ipv4Addr, mask: Ipv4Addr) -> Vec<Ipv4Addr> {
let network = octets_to_u32(network.octets());
let mask = octets_to_u32(mask.octets());
let mask = !mask;
let mut ips = Vec::with_capacity(mask as usize);
for value in 1..mask {
let addr = network | value;
ips.push(Ipv4Addr::from(addr))
}
ips
}
/// Tests the enumeration method. See http://jodies.de/ipcalc for examples.
#[cfg(test)]
mod test_enumerate_v4 {
use super::*;
#[test]
pub fn test_basic_home_network() {
let home_net = Ipv4Addr::new(192, 168, 0, 0);
let net_mask = Ipv4Addr::new(255, 255, 255, 0);
let ips = enumerate_network_v4(home_net, net_mask);
assert_eq!(254, ips.len())
}
#[test]
pub fn test_more_complex_net() {
let home_net = Ipv4Addr::new(192, 168, 0, 0);
let net_mask = Ipv4Addr::new(255, 255, 254, 0);
let ips = enumerate_network_v4(home_net, net_mask);
dbg!(&ips);
assert_eq!(510, ips.len())
}
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/onvif/src/soap/tests.rs | onvif/src/soap/tests.rs | use super::*;
use crate::utils::xml_eq::assert_xml_eq;
use schema::soap_envelope::{FaultcodeEnum, Reasontext, Subcode};
#[test]
fn test_soap() {
let app_data = r#"
<my:Book xmlns:my="http://www.example.my/schema">
<my:Title>Such book</my:Title>
<my:Pages>42</my:Pages>
</my:Book>
"#;
let expected = r#"<?xml version="1.0" encoding="UTF-8"?>
<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope"
xmlns:my="http://www.example.my/schema">
<s:Body>
<my:Book>
<my:Title>Such book</my:Title>
<my:Pages>42</my:Pages>
</my:Book>
</s:Body>
</s:Envelope>
"#;
let actual = soap(app_data, &None).unwrap();
println!("{}", actual);
println!("{}", expected);
assert_xml_eq(actual.as_str(), expected);
}
#[test]
fn test_unsoap() {
#[derive(Default, Eq, PartialEq, Debug, YaDeserialize)]
#[yaserde(prefix = "my", namespace = "my: http://www.example.my/schema")]
pub struct Book {
#[yaserde(prefix = "my", rename = "Title")]
pub title: String,
#[yaserde(prefix = "my", rename = "Pages")]
pub pages: i32,
}
let input = r#"<?xml version="1.0" encoding="utf-8"?>
<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope"
xmlns:my="http://www.example.my/schema">
<s:Body>
<my:Book>
<my:Title>Such book</my:Title>
<my:Pages>42</my:Pages>
</my:Book>
</s:Body>
</s:Envelope>
"#;
let actual = unsoap(input).unwrap();
println!("{:?}", actual);
let parsed: Book = yaserde::de::from_str(&actual).unwrap();
assert_eq!(parsed.title, "Such book");
assert_eq!(parsed.pages, 42);
}
#[test]
fn test_get_fault() {
let response = r#"<?xml version="1.0" ?>
<soapenv:Fault
xmlns:soapenv="http://www.w3.org/2003/05/soap-envelope"
xmlns:ter="http://www.onvif.org/ver10/error"
xmlns:xs="http://www.w3.org/2000/10/XMLSchema">
<soapenv:Code>
<soapenv:Value>tns:DataEncodingUnknown</soapenv:Value>
<soapenv:Subcode>
<soapenv:Value>ter:fault subcode</soapenv:Value>
<soapenv:Subcode>
<soapenv:Value>ter:fault subcode</soapenv:Value>
</soapenv:Subcode>
</soapenv:Subcode>
</soapenv:Code>
<soapenv:Reason>
<soapenv:Text xml:lang="en">fault reason 1</soapenv:Text>
<soapenv:Text xml:lang="en">fault reason 2</soapenv:Text>
</soapenv:Reason>
<soapenv:Node>http://www.w3.org/2003/05/soap-envelope/node/ultimateReceiver</soapenv:Node>
<soapenv:Role>http://www.w3.org/2003/05/soap-envelope/role/ultimateReceiver</soapenv:Role>
<soapenv:Detail>
<soapenv:Text>fault detail</soapenv:Text>
</soapenv:Detail>
</soapenv:Fault>
"#;
let envelope = xmltree::Element::parse(response.as_bytes()).unwrap();
let fault = deserialize_fault(&envelope).unwrap();
assert_eq!(
fault.code.value,
FaultcodeEnum("tns:DataEncodingUnknown".to_string())
);
assert_eq!(
fault.code.subcode,
Some(Subcode {
value: "ter:fault subcode".to_string(),
// subcode: Vec::new()
})
);
assert_eq!(
fault.reason.text,
vec![
Reasontext {
lang: "en".to_string()
},
Reasontext {
lang: "en".to_string()
}
]
);
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/onvif/src/soap/client.rs | onvif/src/soap/client.rs | #![allow(clippy::large_enum_variant)]
use crate::soap::{
self,
auth::{digest::Digest, username_token::UsernameToken},
};
use async_recursion::async_recursion;
use async_trait::async_trait;
use futures_util::lock::Mutex;
use schema::transport::{Error, Transport};
use std::ops::DerefMut;
use std::{
fmt::{Debug, Formatter},
sync::Arc,
time::Duration,
};
use tracing::{debug, instrument, trace};
use url::Url;
#[derive(Clone)]
pub struct Client {
client: reqwest::Client,
config: Config,
digest_auth_state: Arc<Mutex<Digest>>,
}
#[derive(Clone)]
pub struct ClientBuilder {
client: Option<reqwest::Client>,
config: Config,
}
impl ClientBuilder {
pub const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5);
pub fn new(uri: &Url) -> Self {
Self {
client: None,
config: Config {
uri: uri.clone(),
credentials: None,
response_patcher: None,
auth_type: AuthType::Any,
reuse_digest_auth_headers: false,
timeout: ClientBuilder::DEFAULT_TIMEOUT,
fix_time_gap: None,
},
}
}
pub fn http_client(mut self, client: reqwest::Client) -> Self {
self.client = Some(client);
self
}
pub fn credentials(mut self, credentials: Option<Credentials>) -> Self {
self.config.credentials = credentials;
self
}
pub fn response_patcher(mut self, response_patcher: Option<ResponsePatcher>) -> Self {
self.config.response_patcher = response_patcher;
self
}
pub fn auth_type(mut self, auth_type: AuthType) -> Self {
self.config.auth_type = auth_type;
self
}
pub fn reuse_digest_auth_headers(mut self, reuse_digest_auth_headers: bool) -> Self {
self.config.reuse_digest_auth_headers = reuse_digest_auth_headers;
self
}
pub fn timeout(mut self, timeout: Duration) -> Self {
self.config.timeout = timeout;
self
}
pub fn fix_time_gap(mut self, time_gap: Option<chrono::Duration>) -> Self {
self.config.fix_time_gap = time_gap;
self
}
pub fn build(self) -> Client {
let client = if let Some(client) = self.client {
client
} else {
ClientBuilder::default_http_client_builder()
.timeout(self.config.timeout)
.build()
.unwrap()
};
let digest = Digest::new(
&self.config.uri,
&self.config.credentials,
self.config.reuse_digest_auth_headers,
);
Client {
client,
config: self.config,
digest_auth_state: Arc::new(Mutex::new(digest)),
}
}
pub fn default_http_client_builder() -> reqwest::ClientBuilder {
#[allow(unused_mut)]
let mut client_builder =
reqwest::Client::builder().redirect(reqwest::redirect::Policy::none());
#[cfg(feature = "tls")]
{
// hyper-rustls does not support IP hosts (like https://192.168.1.2) which are
// very common for IP cameras. So we can use only native-tls for now.
// https://github.com/ctz/hyper-rustls/issues/56
client_builder = client_builder
.use_native_tls()
.danger_accept_invalid_certs(true);
}
client_builder
}
}
#[derive(Clone)]
struct Config {
uri: Url,
credentials: Option<Credentials>,
response_patcher: Option<ResponsePatcher>,
auth_type: AuthType,
reuse_digest_auth_headers: bool,
timeout: Duration,
fix_time_gap: Option<chrono::Duration>,
}
#[derive(Clone, Debug)]
pub enum AuthType {
/// First try to authorize with Digest and in case of error try UsernameToken auth
Any,
/// Use only Digest auth
Digest,
/// Use only UsernameToken auth
UsernameToken,
}
#[derive(Clone)]
pub struct Credentials {
pub username: String,
pub password: String,
}
impl Debug for Credentials {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("{} [password hidden]", self.username))
}
}
pub type ResponsePatcher = Arc<dyn Fn(&str) -> Result<String, String> + Send + Sync>;
#[derive(Debug)]
enum RequestAuthType<'a> {
Digest(&'a mut Digest),
UsernameToken,
}
#[async_trait]
impl Transport for Client {
#[instrument(skip_all, fields(uri = self.config.uri.as_str()))]
async fn request(&self, message: &str) -> Result<String, Error> {
match self.config.auth_type {
AuthType::Any => {
match self.request_with_digest(message).await {
Ok(success) => Ok(success),
Err(Error::Authorization(e)) => {
debug!("Failed to authorize with Digest auth: {e}. Trying UsernameToken auth ...");
self.request_with_username_token(message).await
}
Err(e) => Err(e),
}
}
AuthType::Digest => self.request_with_digest(message).await,
AuthType::UsernameToken => self.request_with_username_token(message).await,
}
}
}
impl Client {
async fn request_with_digest(&self, message: &str) -> Result<String, Error> {
let mut guard = self.digest_auth_state.lock().await;
let mut auth_type = RequestAuthType::Digest(guard.deref_mut());
self.request_recursive(message, &self.config.uri, &mut auth_type, 0)
.await
}
async fn request_with_username_token(&self, message: &str) -> Result<String, Error> {
let mut auth_type = RequestAuthType::UsernameToken;
self.request_recursive(message, &self.config.uri, &mut auth_type, 0)
.await
}
#[async_recursion]
async fn request_recursive(
&self,
message: &str,
uri: &Url,
auth_type: &mut RequestAuthType,
redirections: u32,
) -> Result<String, Error> {
let username_token = match auth_type {
RequestAuthType::UsernameToken => self.username_token_auth(),
_ => None,
};
debug!(?auth_type, %redirections, "About to make request.");
let soap_msg =
soap::soap(message, &username_token).map_err(|e| Error::Protocol(format!("{e:?}")))?;
let mut request = self
.client
.post(uri.as_str())
.header("Content-Type", "application/soap+xml; charset=utf-8;");
if let RequestAuthType::Digest(digest) = auth_type {
request = digest
.add_headers(request)
.map_err(|e| Error::Authorization(e.to_string()))?;
debug!("Digest headers added");
}
trace!("Request body: {soap_msg}");
let response = request.body(soap_msg).send().await.map_err(|e| match e {
e if e.is_connect() => Error::Connection(e.to_string()),
e if e.is_timeout() => Error::Timeout(e.to_string()),
e if e.is_redirect() => Error::Redirection(e.to_string()),
e if e.is_decode() || e.is_body() => Error::Protocol(e.to_string()),
e => Error::Other(e.to_string()),
})?;
let status = response.status();
debug!("Response status: {status}");
if status.is_success() {
if let RequestAuthType::Digest(digest) = auth_type {
digest.set_success();
}
response
.text()
.await
.map_err(|e| Error::Protocol(e.to_string()))
.and_then(|text| {
trace!("Response body: {text}");
let response =
soap::unsoap(&text).map_err(|e| Error::Protocol(format!("{e:?}")))?;
if let Some(response_patcher) = &self.config.response_patcher {
match response_patcher(&response) {
Ok(patched) => {
trace!("Response (SOAP unwrapped, patched): {patched}");
Ok(patched)
}
Err(e) => Err(Error::Protocol(format!("Patching failed: {e}"))),
}
} else {
Ok(response)
}
})
} else if status == reqwest::StatusCode::UNAUTHORIZED {
match auth_type {
RequestAuthType::Digest(digest) if !digest.is_failed() => {
digest.set_401(response);
}
_ => {
if let Ok(text) = response.text().await {
trace!("Got Unauthorized with body: {text}");
}
return Err(Error::Authorization("Unauthorized".to_string()));
}
}
self.request_recursive(message, uri, auth_type, redirections)
.await
} else if status.is_redirection() {
// reqwest changes method on 302, so we have to handle redirections ourselves
// https://github.com/seanmonstar/reqwest/issues/912
if redirections > 0 {
return Err(Error::Redirection("Redirection limit exceeded".to_string()));
}
let new_url = Client::get_redirect_location(&response)?;
debug!("Redirecting to {new_url} ...");
self.request_recursive(message, &new_url, auth_type, redirections + 1)
.await
} else {
if let Ok(text) = response.text().await {
trace!("Got HTTP error with body: {text}");
if let Err(soap::Error::Fault(f)) = soap::unsoap(&text) {
if f.is_unauthorized() {
return Err(Error::Authorization("Unauthorized".to_string()));
}
}
}
Err(Error::Other(status.to_string()))
}
}
fn get_redirect_location(response: &reqwest::Response) -> Result<Url, Error> {
response.headers()[reqwest::header::LOCATION]
.to_str()
.map_err(|e| Error::Redirection(e.to_string()))?
.parse::<Url>()
.map_err(|e| Error::Redirection(e.to_string()))
}
pub fn username_token_auth(&self) -> Option<UsernameToken> {
self.config
.credentials
.as_ref()
.map(|c| UsernameToken::new(&c.username, &c.password, self.config.fix_time_gap))
}
pub fn set_fix_time_gap(&mut self, time_gap: Option<chrono::Duration>) {
self.config.fix_time_gap = time_gap;
}
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/onvif/src/soap/auth.rs | onvif/src/soap/auth.rs | pub mod digest;
pub mod username_token;
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/onvif/src/soap/mod.rs | onvif/src/soap/mod.rs | pub mod auth;
pub mod client;
#[cfg(test)]
mod tests;
use auth::username_token::UsernameToken;
use schema::soap_envelope;
use xmltree::{Element, Namespace, XMLNode};
const SOAP_URI: &str = "http://www.w3.org/2003/05/soap-envelope";
#[derive(Debug)]
pub enum Error {
ParseError,
EnvelopeNotFound,
BodyNotFound,
BodyIsEmpty,
Fault(Box<soap_envelope::Fault>),
InternalError(String),
}
#[derive(Debug)]
pub struct Response {
pub response: Option<String>,
}
pub fn soap(xml: &str, username_token: &Option<UsernameToken>) -> Result<String, Error> {
let app_data = parse(xml)?;
let mut namespaces = app_data.namespaces.clone().unwrap_or_else(Namespace::empty);
namespaces.put("s", SOAP_URI);
let mut body = Element::new("Body");
body.prefix = Some("s".to_string());
body.children.push(XMLNode::Element(app_data));
let mut envelope = Element::new("Envelope");
envelope.namespaces = Some(namespaces);
envelope.prefix = Some("s".to_string());
if let Some(username_token) = username_token {
let mut header = Element::new("Header");
header.prefix = Some("s".to_string());
header
.children
.push(XMLNode::Element(parse(&username_token.to_xml())?));
envelope.children.push(XMLNode::Element(header));
}
envelope.children.push(XMLNode::Element(body));
xml_element_to_string(&envelope)
}
pub fn unsoap(xml: &str) -> Result<String, Error> {
let root = parse(xml)?;
if root.name != "Envelope" {
return Err(Error::EnvelopeNotFound);
}
let body = root.get_child("Body").ok_or(Error::BodyNotFound)?;
if let Some(fault) = body.get_child("Fault") {
let fault = deserialize_fault(fault)?;
return Err(Error::Fault(Box::new(fault)));
}
body.children
.iter()
.find_map(|node| match node {
XMLNode::Element(app_data) => Some(xml_element_to_string(app_data)),
_ => None,
})
.ok_or(Error::BodyIsEmpty)?
}
fn parse(xml: &str) -> Result<Element, Error> {
Element::parse(xml.as_bytes()).map_err(|_| Error::ParseError)
}
fn xml_element_to_string(el: &Element) -> Result<String, Error> {
let mut out = vec![];
el.write(&mut out)
.map_err(|_| Error::InternalError("Could not write XML element".to_string()))?;
String::from_utf8(out).map_err(|e| Error::InternalError(e.to_string()))
}
fn deserialize_fault(envelope: &Element) -> Result<soap_envelope::Fault, Error> {
let string = xml_element_to_string(envelope)?;
yaserde::de::from_str(&string).map_err(Error::InternalError)
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/onvif/src/soap/auth/digest.rs | onvif/src/soap/auth/digest.rs | use crate::soap::client::Credentials;
use nonzero_ext::nonzero;
use reqwest::{RequestBuilder, Response};
use std::fmt::{Debug, Formatter};
use std::num::NonZeroU8;
use thiserror::Error;
use url::Url;
#[derive(Debug, Error)]
pub enum Error {
#[error("Invalid state")]
InvalidState,
#[error("No credentials")]
NoCredentials,
#[error("Digest {0}")]
Digest(String),
}
pub struct Digest {
creds: Option<Credentials>,
uri: Url,
state: State,
reuse_headers: bool,
}
enum State {
Default,
Got401 {
response: Response,
count: NonZeroU8,
},
}
impl Digest {
pub fn new(uri: &Url, creds: &Option<Credentials>, reuse_headers: bool) -> Self {
Self {
creds: creds.clone(),
uri: uri.clone(),
state: State::Default,
reuse_headers,
}
}
}
impl Digest {
/// Call this when the authentication was successful.
pub fn set_success(&mut self) {
if !self.reuse_headers {
// Since we don't need to preserve the headers, reset all the state to default.
*self = Self::new(&self.uri, &self.creds, self.reuse_headers);
return;
}
if let State::Got401 { count, .. } = &mut self.state {
// We always store at least one request, so it's never zero.
*count = nonzero!(1_u8);
}
}
/// Call this when received 401 Unauthorized.
pub fn set_401(&mut self, response: Response) {
self.state = match self.state {
State::Default => State::Got401 {
response,
count: nonzero!(1_u8),
},
State::Got401 { count, .. } => State::Got401 {
response,
count: count.saturating_add(1),
},
}
}
pub fn is_failed(&self) -> bool {
match &self.state {
State::Default => false,
// Possible scenarios:
// - We've got 401 with a challenge for the first time, we calculate the answer, then
// we get 200 OK. So, a single 401 is never a failure.
// - After successful auth the count is 1 because we always store at least one request,
// and the caller decided to reuse the same challenge for multiple requests. But at
// some point, we'll get a 401 with a new challenge and `stale=true`.
// So, we'll get a second 401, and this is also not a failure because after
// calculating the answer to the challenge, we'll get a 200 OK, and will reset the
// counter in `set_success()`.
// - Three 401's in a row is certainly a failure.
State::Got401 { count, .. } => count.get() >= 3,
}
}
pub fn add_headers(&self, mut request: RequestBuilder) -> Result<RequestBuilder, Error> {
match &self.state {
State::Default => Ok(request),
State::Got401 { response, .. } => {
let creds = self.creds.as_ref().ok_or(Error::NoCredentials)?;
request = request.header("Authorization", digest_auth(response, creds, &self.uri)?);
Ok(request)
}
}
}
}
fn digest_auth(res: &reqwest::Response, creds: &Credentials, url: &Url) -> Result<String, Error> {
let www_authenticate = res
.headers()
.get(reqwest::header::WWW_AUTHENTICATE)
.ok_or_else(|| Error::Digest("No www-authenticate header".to_string()))?
.to_str()
.map_err(|e| Error::Digest(e.to_string()))?;
let mut context = digest_auth::AuthContext::new(&creds.username, &creds.password, url.path());
context.method = digest_auth::HttpMethod::POST;
Ok(digest_auth::parse(www_authenticate)
.map_err(|e| Error::Digest(e.to_string()))?
.respond(&context)
.map_err(|e| Error::Digest(e.to_string()))?
.to_string())
}
impl Debug for Digest {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Digest")
.field("creds", &self.creds)
.field("state", &self.state)
.finish()
}
}
impl Debug for State {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
State::Default => write!(f, "FirstRequest")?,
State::Got401 { count, .. } => write!(f, "Got401({count})")?,
};
Ok(())
}
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/onvif/src/soap/auth/username_token.rs | onvif/src/soap/auth/username_token.rs | #[derive(Default, Debug, Clone)]
pub struct UsernameToken {
pub username: String,
pub nonce: String,
pub digest: String,
pub created: String,
}
impl UsernameToken {
pub fn new(
username: &str,
password: &str,
fix_time_gap: Option<chrono::Duration>,
) -> UsernameToken {
let uuid = uuid::Uuid::new_v4();
let nonce = uuid.as_bytes();
let mut created = chrono::Utc::now();
if let Some(time_gap) = fix_time_gap {
created = match created.checked_add_signed(time_gap) {
Some(t) => t,
None => chrono::Utc::now(),
};
}
let created = created.to_rfc3339_opts(chrono::SecondsFormat::Millis, true);
let mut concat = Vec::with_capacity(nonce.len() + created.len() + password.len());
concat.extend_from_slice(nonce);
concat.extend_from_slice(created.as_bytes());
concat.extend_from_slice(password.as_bytes());
let digest = {
let mut hasher = sha1::Sha1::new();
hasher.update(&concat);
hasher.digest().bytes()
};
UsernameToken {
username: username.to_string(),
nonce: base64::encode(nonce),
digest: base64::encode(digest),
created,
}
}
pub fn to_xml(&self) -> String {
format!(
r##"<?xml version="1.0" encoding="UTF-8"?>
<wsse:Security
xmlns:wsse="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd"
xmlns:wsu="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd">
<wsse:UsernameToken>
<wsse:Username>{}</wsse:Username>
<wsse:Password Type="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0#PasswordDigest">{}</wsse:Password>
<wsse:Nonce EncodingType="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary">{}</wsse:Nonce>
<wsu:Created>{}</wsu:Created>
</wsse:UsernameToken>
</wsse:Security>"##,
self.username, self.digest, self.nonce, self.created
)
}
}
#[test]
fn ws_username_token_example() {
// Example from App guide (6.1.1.3 ONVIF::AuthenticatingByWS-UsernameToken)
// https://www.onvif.org/wp-content/uploads/2016/12/ONVIF_WG-APG-Application_Programmers_Guide-1.pdf
let nonce = base64::decode("LKqI6G/AikKCQrN0zqZFlg==").unwrap();
let date = "2010-09-16T07:50:45Z";
let password = "userpassword";
let mut concat = Vec::new();
concat.extend_from_slice(&nonce);
concat.extend_from_slice(date.as_bytes());
concat.extend_from_slice(password.as_bytes());
let digest = {
let mut hasher = sha1::Sha1::new();
hasher.update(&concat);
hasher.digest().bytes()
};
assert_eq!(
base64::encode(digest),
"tuOSpGlFlIXsozq4HFNeeGeFLEI=".to_string()
)
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/onvif/src/utils/display_list.rs | onvif/src/utils/display_list.rs | use std::fmt::{Debug, Display, Formatter, Write};
pub struct DisplayList<'a, T>(pub &'a [T]);
impl<T: Display> Debug for DisplayList<'_, T> {
fn fmt(&self, formatter: &mut Formatter<'_>) -> std::fmt::Result {
formatter.write_char('[')?;
let mut peekable = self.0.iter().peekable();
while let Some(element) = peekable.next() {
element.fmt(formatter)?;
if peekable.peek().is_some() {
formatter.write_str(", ")?;
}
}
formatter.write_char(']')
}
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/onvif/src/utils/xml_eq.rs | onvif/src/utils/xml_eq.rs | pub fn assert_xml_eq(actual: &str, expected: &str) {
for (a, e) in without_whitespaces(actual).zip(without_whitespaces(expected)) {
assert_eq!(a, e);
}
}
fn without_whitespaces(
expected: &str,
) -> impl Iterator<Item = Result<xml::reader::XmlEvent, xml::reader::Error>> + '_ {
xml::EventReader::new(expected.as_bytes())
.into_iter()
.filter(|e| !matches!(e, Ok(xml::reader::XmlEvent::Whitespace(_))))
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/onvif/src/utils/mod.rs | onvif/src/utils/mod.rs | pub mod display_list;
pub mod hash;
#[cfg(test)]
pub mod xml_eq;
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/onvif/src/utils/hash.rs | onvif/src/utils/hash.rs | use std::{
collections::hash_map::DefaultHasher,
hash::{Hash, Hasher},
};
pub fn calculate_hash<T: Hash>(t: &T) -> u64 {
let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/onvif/examples/event.rs | onvif/examples/event.rs | // This example pulls messages related to the RuleEngine topic.
// RuleEngine topic consists of events related to motion detection.
// Tested on Dahua, uniview, reolink and axis ip cameras.
// Don't forget to set the camera's IP address, username and password.
use onvif::soap::client::{ClientBuilder, Credentials};
use schema::event::{self, CreatePullPointSubscription, PullMessages};
use url::Url;
#[derive(Debug, Clone)]
pub struct Camera {
pub device_service_url: String,
pub username: String,
pub password: String,
pub event_service_url: String,
}
impl Default for Camera {
fn default() -> Self {
Camera {
device_service_url: "http://192.168.1.100/onvif/device_service".to_string(),
username: "admin".to_string(),
password: "admin".to_string(),
event_service_url: "http://192.168.1.100/onvif/event_service".to_string(),
}
}
}
#[tokio::main]
async fn main() {
let camera_ip = "192.168.1.50";
let username = "admin";
let password = "admin";
let camera: Camera = Camera {
device_service_url: format!("http://{}/onvif/device_service", camera_ip),
username: username.to_string(),
password: password.to_string(),
event_service_url: format!("http://{}/onvif/event_service", camera_ip),
};
let creds: Credentials = Credentials {
username: camera.username.to_string(),
password: camera.password.to_string(),
};
let event_client = ClientBuilder::new(&Url::parse(&camera.event_service_url).unwrap())
.credentials(Some(creds))
.build();
let create_pull_sub_request = CreatePullPointSubscription {
initial_termination_time: None,
filter: Some(b_2::FilterType {
topic_expression: Some(b_2::TopicExpressionType {
dialect: "http://www.onvif.org/ver10/tev/topicExpression/ConcreteSet".to_string(),
inner_text: "tns1:RuleEngine//.".to_string(),
}),
}),
subscription_policy: None,
};
let create_pull_puint_sub_response =
event::create_pull_point_subscription(&event_client, &create_pull_sub_request).await;
let camera_sub = match create_pull_puint_sub_response {
Ok(sub) => sub,
Err(e) => {
println!("Error: {:?}", e);
return;
}
};
let uri: Url = Url::parse(&camera_sub.subscription_reference.address).unwrap();
let creds: Credentials = Credentials {
username: camera.username.to_string(),
password: camera.password.to_string(),
};
let pull_msg_client = ClientBuilder::new(&uri)
.credentials(Some(creds))
.auth_type(onvif::soap::client::AuthType::Digest)
.build();
let pull_messages_request = PullMessages {
message_limit: 256,
timeout: xsd_types::types::Duration {
seconds: 1.0,
..Default::default()
},
};
// Main Loop
loop {
let pull_messages_response =
event::pull_messages(&pull_msg_client, &pull_messages_request).await;
let msg = match pull_messages_response {
Ok(msg) => msg,
Err(e) => {
println!("Error: {:?}", e);
continue;
}
};
if !msg.notification_message.is_empty() {
println!("Notification Message: {:?}", msg.notification_message[0]);
} else {
println!("No new notification message");
}
tokio::time::sleep(tokio::time::Duration::from_secs(3)).await;
}
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/onvif/examples/discovery.rs | onvif/examples/discovery.rs | extern crate onvif;
use onvif::discovery;
#[tokio::main]
async fn main() {
dotenv::dotenv().ok();
tracing_subscriber::fmt::init();
use futures_util::stream::StreamExt;
const MAX_CONCURRENT_JUMPERS: usize = 100;
discovery::DiscoveryBuilder::default()
.run()
.await
.unwrap()
.for_each_concurrent(MAX_CONCURRENT_JUMPERS, |addr| async move {
println!("Device found: {:?}", addr);
})
.await;
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/onvif/examples/camera.rs | onvif/examples/camera.rs | use chrono::{NaiveDate, Utc};
use onvif::soap::{self, client::AuthType};
use schema::{self, transport};
use structopt::StructOpt;
use tracing::{debug, warn};
use url::Url;
#[derive(StructOpt)]
#[structopt(name = "camera", about = "ONVIF camera control tool")]
struct Args {
#[structopt(global = true, long, requires = "password")]
username: Option<String>,
#[structopt(global = true, long, requires = "username")]
password: Option<String>,
/// The device's base URI, typically just to the HTTP root.
/// The service-specific path (such as `/onvif/device_support`) will be appended to this.
// Note this is an `Option` because global options can't be required in clap.
// https://github.com/clap-rs/clap/issues/1546
#[structopt(global = true, long)]
uri: Option<Url>,
/// Service specific path
#[structopt(global = true, long, default_value = "onvif/device_service")]
service_path: String,
/// Auto fix time gap between PC and the camera
#[structopt(short = "t", long)]
fix_time: bool,
/// Authorization type [Any(Default), Digest, UsernameToken]
#[structopt(short = "a", long, default_value = "Any")]
auth_type: String,
#[structopt(subcommand)]
cmd: Cmd,
}
#[derive(StructOpt)]
#[structopt()]
enum Cmd {
GetSystemDateAndTime,
GetCapabilities,
/// Gets the capabilities of all known ONVIF services supported by this device.
GetServiceCapabilities,
/// Gets RTSP URIs for all profiles, along with a summary of the video/audio streams.
GetStreamUris,
/// Gets JPEG URIs for all profiles
GetSnapshotUris,
GetHostname,
// Gets model, firmware, manufacturer and others informations related to the device.
GetDeviceInformation,
SetHostname {
hostname: String,
},
// Gets the PTZ status for the primary media profile.
GetStatus,
/// Attempts to enable a `vnd.onvif.metadata` RTSP stream with analytics.
EnableAnalytics,
/// Gets information about the currently enabled and supported video analytics.
GetAnalytics,
// Try to get any possible information
GetAll,
}
struct Clients {
devicemgmt: soap::client::Client,
event: Option<soap::client::Client>,
deviceio: Option<soap::client::Client>,
media: Option<soap::client::Client>,
media2: Option<soap::client::Client>,
imaging: Option<soap::client::Client>,
ptz: Option<soap::client::Client>,
analytics: Option<soap::client::Client>,
}
impl Clients {
async fn new(args: &Args) -> Result<Self, String> {
let creds = match (args.username.as_ref(), args.password.as_ref()) {
(Some(username), Some(password)) => Some(soap::client::Credentials {
username: username.clone(),
password: password.clone(),
}),
(None, None) => None,
_ => panic!("username and password must be specified together"),
};
let base_uri = args
.uri
.as_ref()
.ok_or_else(|| "--uri must be specified.".to_string())?;
let devicemgmt_uri = base_uri.join(&args.service_path).unwrap();
let auth_type = match args.auth_type.to_ascii_lowercase().as_str() {
"digest" => AuthType::Digest,
"usernametoken" => AuthType::UsernameToken,
_ => AuthType::Any,
};
let mut out = Self {
devicemgmt: soap::client::ClientBuilder::new(&devicemgmt_uri)
.credentials(creds.clone())
.auth_type(auth_type.clone())
.build(),
imaging: None,
ptz: None,
event: None,
deviceio: None,
media: None,
media2: None,
analytics: None,
};
let time_gap = if args.fix_time {
let device_time =
schema::devicemgmt::get_system_date_and_time(&out.devicemgmt, &Default::default())
.await?
.system_date_and_time;
if let Some(utc_time) = &device_time.utc_date_time {
let pc_time = Utc::now();
let date = &utc_time.date;
let t = &utc_time.time;
let device_time =
NaiveDate::from_ymd_opt(date.year, date.month as _, date.day as _)
.unwrap()
.and_hms_opt(t.hour as _, t.minute as _, t.second as _)
.unwrap()
.and_utc();
let diff = device_time - pc_time;
if diff.num_seconds().abs() > 60 {
out.devicemgmt.set_fix_time_gap(Some(diff));
}
Some(diff)
} else {
warn!("GetSystemDateAndTimeResponse doesn't have utc_data_time value!");
None
}
} else {
None
};
let services =
schema::devicemgmt::get_services(&out.devicemgmt, &Default::default()).await?;
for service in &services.service {
let service_url = Url::parse(&service.x_addr).map_err(|e| e.to_string())?;
if !service_url.as_str().starts_with(base_uri.as_str()) {
return Err(format!(
"Service URI {} is not within base URI {}",
service_url, base_uri
));
}
let svc = Some(
soap::client::ClientBuilder::new(&service_url)
.credentials(creds.clone())
.auth_type(auth_type.clone())
.fix_time_gap(time_gap)
.build(),
);
match service.namespace.as_str() {
"http://www.onvif.org/ver10/device/wsdl" => {
if service_url != devicemgmt_uri {
return Err(format!(
"advertised device mgmt uri {} not expected {}",
service_url, devicemgmt_uri
));
}
}
"http://www.onvif.org/ver10/events/wsdl" => out.event = svc,
"http://www.onvif.org/ver10/deviceIO/wsdl" => out.deviceio = svc,
"http://www.onvif.org/ver10/media/wsdl" => out.media = svc,
"http://www.onvif.org/ver20/media/wsdl" => out.media2 = svc,
"http://www.onvif.org/ver20/imaging/wsdl" => out.imaging = svc,
"http://www.onvif.org/ver20/ptz/wsdl" => out.ptz = svc,
"http://www.onvif.org/ver20/analytics/wsdl" => out.analytics = svc,
_ => debug!("unknown service: {:?}", service),
}
}
Ok(out)
}
}
async fn get_capabilities(clients: &Clients) {
match schema::devicemgmt::get_capabilities(&clients.devicemgmt, &Default::default()).await {
Ok(capabilities) => println!("{:#?}", capabilities),
Err(error) => println!("Failed to fetch capabilities: {}", error),
}
}
async fn get_device_information(clients: &Clients) -> Result<(), transport::Error> {
println!(
"{:#?}",
&schema::devicemgmt::get_device_information(&clients.devicemgmt, &Default::default())
.await?
);
Ok(())
}
async fn get_service_capabilities(clients: &Clients) {
match schema::event::get_service_capabilities(&clients.devicemgmt, &Default::default()).await {
Ok(capability) => println!("devicemgmt: {:#?}", capability),
Err(error) => println!("Failed to fetch devicemgmt: {}", error),
}
if let Some(ref event) = clients.event {
match schema::event::get_service_capabilities(event, &Default::default()).await {
Ok(capability) => println!("event: {:#?}", capability),
Err(error) => println!("Failed to fetch event: {}", error),
}
}
if let Some(ref deviceio) = clients.deviceio {
match schema::event::get_service_capabilities(deviceio, &Default::default()).await {
Ok(capability) => println!("deviceio: {:#?}", capability),
Err(error) => println!("Failed to fetch deviceio: {}", error),
}
}
if let Some(ref media) = clients.media {
match schema::event::get_service_capabilities(media, &Default::default()).await {
Ok(capability) => println!("media: {:#?}", capability),
Err(error) => println!("Failed to fetch media: {}", error),
}
}
if let Some(ref media2) = clients.media2 {
match schema::event::get_service_capabilities(media2, &Default::default()).await {
Ok(capability) => println!("media2: {:#?}", capability),
Err(error) => println!("Failed to fetch media2: {}", error),
}
}
if let Some(ref imaging) = clients.imaging {
match schema::event::get_service_capabilities(imaging, &Default::default()).await {
Ok(capability) => println!("imaging: {:#?}", capability),
Err(error) => println!("Failed to fetch imaging: {}", error),
}
}
if let Some(ref ptz) = clients.ptz {
match schema::event::get_service_capabilities(ptz, &Default::default()).await {
Ok(capability) => println!("ptz: {:#?}", capability),
Err(error) => println!("Failed to fetch ptz: {}", error),
}
}
if let Some(ref analytics) = clients.analytics {
match schema::event::get_service_capabilities(analytics, &Default::default()).await {
Ok(capability) => println!("analytics: {:#?}", capability),
Err(error) => println!("Failed to fetch analytics: {}", error),
}
}
}
async fn get_system_date_and_time(clients: &Clients) {
let date =
schema::devicemgmt::get_system_date_and_time(&clients.devicemgmt, &Default::default())
.await;
println!("{:#?}", date);
}
async fn get_stream_uris(clients: &Clients) -> Result<(), transport::Error> {
let media_client = clients
.media
.as_ref()
.ok_or_else(|| transport::Error::Other("Client media is not available".into()))?;
let profiles = schema::media::get_profiles(media_client, &Default::default()).await?;
debug!("get_profiles response: {:#?}", &profiles);
let requests: Vec<_> = profiles
.profiles
.iter()
.map(|p: &schema::onvif::Profile| schema::media::GetStreamUri {
profile_token: schema::onvif::ReferenceToken(p.token.0.clone()),
stream_setup: schema::onvif::StreamSetup {
stream: schema::onvif::StreamType::RtpUnicast,
transport: schema::onvif::Transport {
protocol: schema::onvif::TransportProtocol::Rtsp,
tunnel: vec![],
},
},
})
.collect();
let responses = futures_util::future::try_join_all(
requests
.iter()
.map(|r| schema::media::get_stream_uri(media_client, r)),
)
.await?;
for (p, resp) in profiles.profiles.iter().zip(responses.iter()) {
println!("token={} name={}", &p.token.0, &p.name.0);
println!(" {}", &resp.media_uri.uri);
if let Some(ref v) = p.video_encoder_configuration {
println!(
" {:?}, {}x{}",
v.encoding, v.resolution.width, v.resolution.height
);
if let Some(ref r) = v.rate_control {
println!(" {} fps, {} kbps", r.frame_rate_limit, r.bitrate_limit);
}
}
if let Some(ref a) = p.audio_encoder_configuration {
println!(
" audio: {:?}, {} kbps, {} kHz",
a.encoding, a.bitrate, a.sample_rate
);
}
}
Ok(())
}
async fn get_snapshot_uris(clients: &Clients) -> Result<(), transport::Error> {
let media_client = clients
.media
.as_ref()
.ok_or_else(|| transport::Error::Other("Client media is not available".into()))?;
let profiles = schema::media::get_profiles(media_client, &Default::default()).await?;
debug!("get_profiles response: {:#?}", &profiles);
let requests: Vec<_> = profiles
.profiles
.iter()
.map(|p: &schema::onvif::Profile| schema::media::GetSnapshotUri {
profile_token: schema::onvif::ReferenceToken(p.token.0.clone()),
})
.collect();
let responses = futures_util::future::try_join_all(
requests
.iter()
.map(|r| schema::media::get_snapshot_uri(media_client, r)),
)
.await?;
for (p, resp) in profiles.profiles.iter().zip(responses.iter()) {
println!("token={} name={}", &p.token.0, &p.name.0);
println!(" snapshot_uri={}", &resp.media_uri.uri);
}
Ok(())
}
async fn get_hostname(clients: &Clients) -> Result<(), transport::Error> {
let resp = schema::devicemgmt::get_hostname(&clients.devicemgmt, &Default::default()).await?;
debug!("get_hostname response: {:#?}", &resp);
println!(
"{}",
resp.hostname_information
.name
.as_deref()
.unwrap_or("(unset)")
);
Ok(())
}
async fn set_hostname(clients: &Clients, hostname: String) -> Result<(), transport::Error> {
schema::devicemgmt::set_hostname(
&clients.devicemgmt,
&schema::devicemgmt::SetHostname { name: hostname },
)
.await?;
Ok(())
}
async fn enable_analytics(clients: &Clients) -> Result<(), transport::Error> {
let media_client = clients
.media
.as_ref()
.ok_or_else(|| transport::Error::Other("Client media is not available".into()))?;
let mut config =
schema::media::get_metadata_configurations(media_client, &Default::default()).await?;
if config.configurations.len() != 1 {
println!("Expected exactly one analytics config");
return Ok(());
}
let mut c = config.configurations.pop().unwrap();
let token_str = c.token.0.clone();
println!("{:#?}", &c);
if c.analytics != Some(true) || c.events.is_none() {
println!(
"Enabling analytics in metadata configuration {}",
&token_str
);
c.analytics = Some(true);
c.events = Some(schema::onvif::EventSubscription {
filter: None,
subscription_policy: None,
});
schema::media::set_metadata_configuration(
media_client,
&schema::media::SetMetadataConfiguration {
configuration: c,
force_persistence: true,
},
)
.await?;
} else {
println!(
"Analytics already enabled in metadata configuration {}",
&token_str
);
}
let profiles = schema::media::get_profiles(media_client, &Default::default()).await?;
let requests: Vec<_> = profiles
.profiles
.iter()
.filter_map(
|p: &schema::onvif::Profile| match p.metadata_configuration {
Some(_) => None,
None => Some(schema::media::AddMetadataConfiguration {
profile_token: schema::onvif::ReferenceToken(p.token.0.clone()),
configuration_token: schema::onvif::ReferenceToken(token_str.clone()),
}),
},
)
.collect();
if !requests.is_empty() {
println!(
"Enabling metadata on {}/{} configs",
requests.len(),
profiles.profiles.len()
);
futures_util::future::try_join_all(
requests
.iter()
.map(|r| schema::media::add_metadata_configuration(media_client, r)),
)
.await?;
} else {
println!(
"Metadata already enabled on {} configs",
profiles.profiles.len()
);
}
Ok(())
}
async fn get_analytics(clients: &Clients) -> Result<(), transport::Error> {
let media_client = clients
.media
.as_ref()
.ok_or_else(|| transport::Error::Other("Client media is not available".into()))?;
let config =
schema::media::get_video_analytics_configurations(media_client, &Default::default())
.await?;
println!("{:#?}", &config);
let c = match config.configurations.first() {
Some(c) => c,
None => return Ok(()),
};
if let Some(ref a) = clients.analytics {
let mods = schema::analytics::get_supported_analytics_modules(
a,
&schema::analytics::GetSupportedAnalyticsModules {
configuration_token: schema::onvif::ReferenceToken(c.token.0.clone()),
},
)
.await?;
println!("{:#?}", &mods);
}
Ok(())
}
async fn get_status(clients: &Clients) -> Result<(), transport::Error> {
if let Some(ref ptz) = clients.ptz {
let media_client = match clients.media.as_ref() {
Some(client) => client,
None => {
return Err(transport::Error::Other(
"Client media is not available".into(),
))
}
};
let profile = &schema::media::get_profiles(media_client, &Default::default())
.await?
.profiles[0];
let profile_token = schema::onvif::ReferenceToken(profile.token.0.clone());
let status =
&schema::ptz::get_status(ptz, &schema::ptz::GetStatus { profile_token }).await?;
println!("ptz status: {:#?}", status);
}
Ok(())
}
#[tokio::main]
async fn main() {
tracing_subscriber::fmt::init();
let args = Args::from_args();
let clients = Clients::new(&args).await.unwrap();
match args.cmd {
Cmd::GetSystemDateAndTime => get_system_date_and_time(&clients).await,
Cmd::GetCapabilities => get_capabilities(&clients).await,
Cmd::GetServiceCapabilities => get_service_capabilities(&clients).await,
Cmd::GetStreamUris => get_stream_uris(&clients).await.unwrap(),
Cmd::GetSnapshotUris => get_snapshot_uris(&clients).await.unwrap(),
Cmd::GetHostname => get_hostname(&clients).await.unwrap(),
Cmd::SetHostname { hostname } => set_hostname(&clients, hostname).await.unwrap(),
Cmd::GetDeviceInformation => get_device_information(&clients).await.unwrap(),
Cmd::EnableAnalytics => enable_analytics(&clients).await.unwrap(),
Cmd::GetAnalytics => get_analytics(&clients).await.unwrap(),
Cmd::GetStatus => get_status(&clients).await.unwrap(),
Cmd::GetAll => {
get_system_date_and_time(&clients).await;
get_capabilities(&clients).await;
get_service_capabilities(&clients).await;
get_device_information(&clients)
.await
.unwrap_or_else(|error| {
println!("Error while fetching device information: {:#?}", error);
});
get_stream_uris(&clients).await.unwrap_or_else(|error| {
println!("Error while fetching stream urls: {:#?}", error);
});
get_snapshot_uris(&clients).await.unwrap_or_else(|error| {
println!("Error while fetching snapshot urls: {:#?}", error);
});
get_hostname(&clients).await.unwrap_or_else(|error| {
println!("Error while fetching hostname: {:#?}", error);
});
get_analytics(&clients).await.unwrap_or_else(|error| {
println!("Error while fetching analytics: {:#?}", error);
});
get_status(&clients).await.unwrap_or_else(|error| {
println!("Error while fetching status: {:#?}", error);
});
}
}
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
lumeohq/onvif-rs | https://github.com/lumeohq/onvif-rs/blob/8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6/validate/src/lib.rs | validate/src/lib.rs | pub trait Validate {
fn validate(&self) -> Result<(), String> {
Ok(())
}
}
| rust | MIT | 8f1490e2ce5e2ddd29dbd3ab2586d7a90da0b6d6 | 2026-01-04T20:20:57.434821Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/hotpatch_macros/src/lib.rs | hotpatch_macros/src/lib.rs | #![feature(proc_macro_diagnostic)]
//! You probably want documentation for the [`hotpatch`](https://docs.rs/hotpatch) crate.
use proc_macro::TokenStream;
use std::sync::RwLock;
use syn::{parse::Nothing, ItemFn, ItemImpl, Path};
mod item_fn;
mod item_impl;
lazy_static::lazy_static! {
static ref EXPORTNUM: RwLock<usize> = RwLock::new(0);
}
/// Transforms a function into a [`Patchable`](struct.Patchable.html) capable of having
/// its behavior redefined at runtime.
///
/// Takes a single optional arguement: `modpath`. Used to spoof the module
/// path.
///
/// ## Example
/// ```
/// #[patchable]
/// fn foo() {}
///
/// #[patchable(mymod::baz)] // will look for the function ::mymod::baz instead of ::bar
/// fn bar() {
/// foo(); // foo is callable, just as a functor
/// }
/// ```
#[proc_macro_attribute]
pub fn patchable(attr: TokenStream, input: TokenStream) -> TokenStream {
let modpath = get_modpath(attr);
if modpath.is_err() {
return TokenStream::new();
}
if let Ok(item) = syn::parse::<ItemFn>(input.clone()) {
item_fn::patchable(item, modpath.unwrap())
} else if let Ok(item) = syn::parse::<ItemImpl>(input) {
item_impl::patchable(item, modpath.unwrap())
} else {
panic!("I can't hotpatch this yet!");
}
}
/// Transforms a function into a [`HotpatchExport`](struct.HotpatchExport.html) capable of
/// being exported and changing the behavior of a function in a seperate binary
/// at runtime. **The original function is preserved.**
///
/// Takes a single optional arguement: `modpath`. Used to spoof the module
/// path.
///
/// ## Example
/// ```
/// #[patch]
/// fn foo() {}
///
/// #[patch(mymod::baz)] // looks like: mod mymod { fn baz() {} }
/// fn bar() {
/// foo(); // can still call foo
/// }
/// ```
#[proc_macro_attribute]
pub fn patch(attr: TokenStream, input: TokenStream) -> TokenStream {
let modpath = get_modpath(attr);
if modpath.is_err() {
return TokenStream::new();
}
if let Ok(fn_item) = syn::parse::<ItemFn>(input.clone()) {
item_fn::patch(fn_item, modpath.unwrap())
} else if let Ok(item) = syn::parse::<ItemImpl>(input) {
item_impl::patch(item, modpath.unwrap())
} else {
panic!("I can't turn this into a patch yet!");
}
}
fn get_modpath(attr: TokenStream) -> Result<Option<String>, ()> {
if syn::parse::<Nothing>(attr.clone()).is_ok() {
Ok(None)
} else {
let s = attr.to_string();
let path = syn::parse::<Path>(attr);
if path.is_err() {
proc_macro::Span::call_site().error("Expected module path")
.help("Just use #[patchable]; it's already module aware.")
.help("If you're trying to spoof a module path, the supplied arguement is an invalid path")
.emit();
return Err(());
}
Ok(Some(s.replace(" ", "")))
}
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/hotpatch_macros/src/item_impl.rs | hotpatch_macros/src/item_impl.rs | use proc_macro::TokenStream;
use proc_macro2::Span;
use quote::quote;
use quote::ToTokens;
use syn::{FnArg::Typed, Ident, ImplItemConst, ImplItemMethod, ItemImpl, ReturnType::Type};
use std::sync::RwLock;
use syn::spanned::Spanned;
use crate::EXPORTNUM;
lazy_static::lazy_static! {
static ref WRAPPER_NUM: RwLock<usize> = RwLock::new(0);
}
pub fn patchable(mut fn_item: ItemImpl, modpath: Option<String>) -> TokenStream {
let mut tt = proc_macro2::TokenStream::new();
fn_item.self_ty.to_tokens(&mut tt);
let self_ty = fn_item.self_ty.clone();
let impl_name = tt.to_string();
fn_item.items = fn_item
.items
.drain(..)
.map(|item| {
match item {
syn::ImplItem::Method(m) => {
let (mut fargs, mut output_type, mut item, mut fn_name, sigtext) = gather_info(m);
let wrapper_num;
{
// scope is used so EXPORTNUM is unlocked faster
let mut r = WRAPPER_NUM.write().unwrap();
wrapper_num = *r;
*r += 1;
}
// transform arguements from Self notation to concrete type (only in inetermediate variables)
if let syn::Type::Tuple(ref mut t) = fargs {
for farg in t.elems.iter_mut() {
transform_self(&impl_name, farg);
}
}
// same but for return value
transform_self(&impl_name, &mut output_type);
let vis = item.vis.clone(); // pass through pub
let mut docitem = item.clone();
docitem.attrs.append(
&mut syn::parse2::<syn::ItemStruct>(quote! {
///
/// ---
/// ## Hotpatch
/// **Warning**: This item is [`#[patchable]`](hotpatch::patchable). Runtime behavior may not
/// follow the source implementation. See the
/// [Hotpatch Documentation](hotpatch) for more information.
#[cfg(doc)]
struct Dummy {}
})
.unwrap()
.attrs,
);
let item_name = fn_name.clone();
fn_name = Ident::new(&format!("__hotpatch_internal_staticwrap_{}", wrapper_num), Span::call_site());
item.sig.ident = fn_name.clone();
let mname = match &modpath {
Some(mpath) =>
format!("!__associated_fn:{}:{}", impl_name, mpath),
None =>
format!("!__associated_fn:{}:{}", impl_name, item_name),
};
let c_item = syn::parse2::<ImplItemConst>(quote! {
#[cfg(not(doc))]
#[allow(non_upper_case_globals)]
#vis const #item_name: hotpatch::MutConst<Patchable<dyn Fn#fargs -> #output_type + Send + Sync + 'static>> =hotpatch::MutConst::new(|| {
#[cfg(not(doc))]
#[allow(non_upper_case_globals)]
static __hotpatch_internal_pwrap: hotpatch::Patchable<
dyn Fn#fargs -> #output_type + Send + Sync + 'static,
> = hotpatch::Patchable::__new(|| {
hotpatch::Patchable::__new_internal(
Box::new(#self_ty::#fn_name)
as Box<dyn Fn#fargs -> #output_type + Send + Sync + 'static>,
concat!(module_path!(), "::", #mname),
#sigtext,
)
});
&__hotpatch_internal_pwrap
});
}).unwrap();
let f_item = syn::parse2::<ImplItemMethod>(quote! {
#item
}).unwrap();
(syn::ImplItem::Method(docitem), syn::ImplItem::Const(c_item), syn::ImplItem::Method(f_item))
}
_ => panic!("There's something in this impl block I can't hotpatch yet"),
}
}).fold(vec![], |mut acc, (c1, c2, c3)| {acc.push(c1); acc.push(c2); acc.push(c3); acc});
TokenStream::from(quote! {
#fn_item
})
}
pub fn patch(mut fn_item: ItemImpl, modpath: Option<String>) -> TokenStream {
let mut tt = proc_macro2::TokenStream::new();
fn_item.self_ty.to_tokens(&mut tt);
let impl_name = tt.to_string();
let self_type = fn_item.self_ty.clone();
let exports: Vec<_> = fn_item
.items
.iter_mut()
.map(|item| {
match item {
syn::ImplItem::Method(m) => {
let (mut fargs, mut output_type, _item, fn_name, sigtext) = gather_info(m.clone());
// transform arguements from Self notation to concrete type (only in inetermediate variables)
if let syn::Type::Tuple(ref mut t) = fargs {
for farg in t.elems.iter_mut() {
transform_self(&impl_name, farg);
}
}
// same but for return value
transform_self(&impl_name, &mut output_type);
let exnum;
{
// scope is used so EXPORTNUM is unlocked faster
let mut r = EXPORTNUM.write().unwrap();
exnum = *r;
*r += 1;
}
m.attrs.append(
&mut syn::parse2::<syn::ItemStruct>(quote! {
///
/// ---
/// ## Hotpatch
/// This item is a [`#[patch]`](hotpatch::patch). It will silently define a public static
/// symbol `__HOTPATCH_EXPORT_N` for use in shared object files. See the
/// [Hotpatch Documentation](hotpatch) for more information.
struct Dummy {}
})
.unwrap()
.attrs,
);
let item_name = fn_name.clone();
let mname = match &modpath {
Some(mpath) =>
quote! {
concat!("::!__associated_fn:", #impl_name, ":" #mpath)
},
None => quote! {
concat!(module_path!(), "::!__associated_fn:", #impl_name, ":", stringify!(#fn_name))
},
};
let hotpatch_name = Ident::new(&format!("__HOTPATCH_EXPORT_{}", exnum), Span::call_site());
quote! {
#[doc(hidden)]
#[no_mangle]
pub static #hotpatch_name: hotpatch::HotpatchExport<fn#fargs -> #output_type> =
hotpatch::HotpatchExport::__new(
#self_type :: #item_name,
#mname,
#sigtext,
);
}
}
_ => panic!("There's something in this impl block I can't hotpatch yet"),
}
}).collect();
TokenStream::from(quote! {
#fn_item
#(#exports)*
})
}
fn gather_info(item: ImplItemMethod) -> (syn::Type, syn::Type, ImplItemMethod, Ident, String) {
let fn_name = item.sig.ident.clone();
let output_type = if let Type(_, t) = &item.sig.output {
*(t.clone())
} else {
syn::parse2::<syn::Type>(quote! {
()
})
.unwrap()
};
let mut ts = proc_macro2::TokenStream::new();
output_type.to_tokens(&mut ts);
let mut args = vec![];
for i in 0..item.sig.inputs.len() {
if let Typed(arg) = &item.sig.inputs[i] {
args.push(arg.ty.clone());
}
}
let fargs = syn::parse2::<syn::Type>(if args.is_empty() {
quote! {
()
}
} else {
quote! {
(#(#args),*,)
}
})
.unwrap();
let sigtext = format!(
"fn({}) -> {}",
item.sig
.inputs
.clone()
.into_iter()
.map(|input| {
if let syn::FnArg::Typed(t) = input {
let mut ts = proc_macro2::TokenStream::new();
t.ty.to_tokens(&mut ts);
ts.to_string()
} else {
panic!("self parameter is not allowed. Can't hotpatch methods (yet!)")
}
})
.collect::<Vec<String>>()
.join(", "),
ts
);
(fargs, output_type, item, fn_name, sigtext)
}
// TODO: is there a crate for this?
fn transform_self(impl_name: &str, farg: &mut syn::Type) {
use syn::Type::*;
match farg {
Path(p) => {
if p.path.segments.first().map(|s| s.ident.to_string()) == Some("Self".to_owned()) {
let span = p.path.segments.first().unwrap().ident.span();
p.path.segments.first_mut().unwrap().ident = Ident::new(&impl_name, span);
}
// generics too
use syn::PathArguments::*;
for seg in p.path.segments.iter_mut() {
match &mut seg.arguments {
AngleBracketed(args) => {
for arg in args.args.iter_mut() {
use syn::GenericArgument::*;
match arg {
Type(t) => transform_self(impl_name, t),
Binding(b) => transform_self(impl_name, &mut b.ty),
Constraint(c) => {
c.ident.span().unwrap().error("Can't hotpatch a non-fully-defined function")
.help("Trait bounds in functions are not allowed")
.help("Patchable items cannot be generic")
.emit();
},
Const(_c) => todo!("The hotpatch dev was lazy and doesn't want to figure out how to do recursive type analysis on const generics. File an issue on the github repo: https://github.com/Shizcow/hotpatch."),
Lifetime(_) => (),
}
}
},
Parenthesized(p) => {
for input in p.inputs.iter_mut() {
transform_self(impl_name, input);
}
use syn::ReturnType::*;
match &mut p.output {
Type(_, t) => transform_self(impl_name, t),
Default => (),
}
},
None => (),
}
}
},
Reference(r) => {
transform_self(impl_name, &mut r.elem);
},
Group(g) => {
transform_self(impl_name, &mut g.elem);
},
BareFn(b) => {
for input in b.inputs.iter_mut() {
transform_self(impl_name, &mut input.ty);
}
use syn::ReturnType::*;
match &mut b.output {
Type(_, t) => transform_self(impl_name, t),
Default => (),
}
},
TraitObject(d) => {
for bound in d.bounds.iter_mut() {
if let syn::TypeParamBound::Trait(t) = bound {
// I can't think of a less stupid way to do this
let mut tpath = syn::Type::Path(syn::TypePath {
qself: None,
path: t.path.clone(),
});
transform_self(impl_name, &mut tpath);
if let syn::Type::Path(p) = tpath {
t.path = p.path;
}
}
}
},
ImplTrait(i) => {
for bound in i.bounds.iter_mut() {
if let syn::TypeParamBound::Trait(t) = bound {
// I can't think of a less stupid way to do this
let mut tpath = syn::Type::Path(syn::TypePath {
qself: None,
path: t.path.clone(),
});
transform_self(impl_name, &mut tpath);
if let syn::Type::Path(p) = tpath {
t.path = p.path;
}
}
}
},
Array(a) => {
transform_self(impl_name, &mut a.elem);
},
Infer(_) => (),
Macro(m) =>
m.mac.path.span().unwrap().error("Can't hotpatch an associated function/method with macro type arguements")
.help("Try this as a bare function (not inside an impl) instead")
.note("hotpatch is trying to make `Self` as a type work and can't guarentee this will pass through with macros")
.emit(),
Never(_) => (),
Paren(p) => {
transform_self(impl_name, &mut p.elem);
},
Ptr(p) => {
transform_self(impl_name, &mut p.elem);
},
Slice(p) => {
transform_self(impl_name, &mut p.elem);
},
Tuple(t) => {
for elem in t.elems.iter_mut() {
transform_self(impl_name, elem);
}
},
Verbatim(_) => (), // not found in normal source code
_ => (), // nonexhaustive
}
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/hotpatch_macros/src/item_fn.rs | hotpatch_macros/src/item_fn.rs | use proc_macro::TokenStream;
use proc_macro2::Span;
use quote::quote;
use quote::ToTokens;
use syn::{FnArg::Typed, Ident, ItemFn, ReturnType::Type};
use crate::EXPORTNUM;
pub fn patchable(fn_item: ItemFn, modpath: Option<String>) -> TokenStream {
let (fargs, output_type, mut fn_name, sigtext, mut item) = gather_info(fn_item);
if !cfg!(feature = "allow-main") && !cfg!(feature = "redirect-main") && fn_name == "main" {
fn_name.span().unwrap().error("Attempted to set main as patchable")
.note("calling main.hotpatch() would cause a deadlock")
.help("enable the 'allow-main' feature if you're using #[main] or #[start]")
.help("enable the 'redirect-main' feature if you actually want main to be patchable (requires unsafe and nightly, read the docs on force functions)")
.emit();
return TokenStream::new();
}
let vis = item.vis.clone(); // pass through pub
let mut docitem = item.clone();
docitem.attrs.append(
&mut syn::parse2::<syn::ItemStruct>(quote! {
///
/// ---
/// ## Hotpatch
/// **Warning**: This item is [`#[patchable]`](hotpatch::patchable). Runtime behavior may not
/// follow the source implementation. See the
/// [Hotpatch Documentation](hotpatch) for more information.
#[cfg(doc)]
struct Dummy {}
})
.unwrap()
.attrs,
);
let item_name = fn_name.clone();
fn_name = Ident::new("__hotpatch_internal_fn_mangle_name", Span::call_site());
item.sig.ident = fn_name.clone();
let redirected_main = if cfg!(feature = "redirect-main") && item_name == "main" {
quote! {
#[main]
fn __hotpatch_redirect_main() -> #output_type {
main()
}
}
} else {
quote! {}
};
let mname = match modpath {
Some(mpath) => (quote! {concat!("::", #mpath)}),
None => {
quote! {
concat!(module_path!(), "::", stringify!(#item_name))
}
}
};
TokenStream::from(quote! {
#docitem
#[cfg(not(doc))]
#[allow(non_upper_case_globals)]
#vis static #item_name: hotpatch::Patchable<dyn Fn#fargs -> #output_type + Send + Sync + 'static> = hotpatch::Patchable::__new(
|| {
#[inline(always)]
#item
hotpatch::Patchable::__new_internal(Box::new(#fn_name) as Box<dyn Fn#fargs -> #output_type + Send + Sync + 'static>,
#mname,
#sigtext)
});
#redirected_main
})
}
pub fn patch(fn_item: ItemFn, modpath: Option<String>) -> TokenStream {
let (fargs, output_type, fn_name, sigtext, mut item) = gather_info(fn_item);
let exnum;
{
// scope is used so EXPORTNUM is unlocked faster
let mut r = EXPORTNUM.write().unwrap();
exnum = *r;
*r += 1;
}
item.attrs.append(
&mut syn::parse2::<syn::ItemStruct>(quote! {
///
/// ---
/// ## Hotpatch
/// This item is a [`#[patch]`](hotpatch::patch). It will silently define a public static
/// symbol `__HOTPATCH_EXPORT_N` for use in shared object files. See the
/// [Hotpatch Documentation](hotpatch) for more information.
struct Dummy {}
})
.unwrap()
.attrs,
);
let hotpatch_name = Ident::new(&format!("__HOTPATCH_EXPORT_{}", exnum), Span::call_site());
let mname = match modpath {
Some(mpath) => (quote! {concat!("::", #mpath)}),
None => {
quote! {
concat!(module_path!(), "::", stringify!(#fn_name))
}
}
};
TokenStream::from(quote! {
#item
#[doc(hidden)]
#[no_mangle]
pub static #hotpatch_name: hotpatch::HotpatchExport<fn#fargs -> #output_type> =
hotpatch::HotpatchExport::__new(#fn_name,
#mname,
#sigtext);
})
}
fn gather_info(item: ItemFn) -> (syn::Type, syn::Type, Ident, String, ItemFn) {
let fn_name = item.sig.ident.clone();
let output_type = if let Type(_, t) = &item.sig.output {
*(t.clone())
} else {
syn::parse2::<syn::Type>(quote! {
()
})
.unwrap()
};
let mut ts = proc_macro2::TokenStream::new();
output_type.to_tokens(&mut ts);
let sigtext = format!(
"fn({}) -> {}",
item.sig
.inputs
.clone()
.into_iter()
.map(|input| {
if let syn::FnArg::Typed(t) = input {
let mut ts = proc_macro2::TokenStream::new();
t.ty.to_tokens(&mut ts);
ts.to_string()
} else {
todo!() // give an error or something
}
})
.collect::<Vec<String>>()
.join(", "),
ts
);
let mut args = vec![];
for i in 0..item.sig.inputs.len() {
if let Typed(arg) = &item.sig.inputs[i] {
args.push(arg.ty.clone());
}
}
let fargs = syn::parse2::<syn::Type>(if args.is_empty() {
quote! {
()
}
} else {
quote! {
(#(#args),*,)
}
})
.unwrap();
(fargs, output_type, fn_name, sigtext, item)
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/hotpatch/src/lib.rs | hotpatch/src/lib.rs | #![feature(unboxed_closures)]
#![feature(fn_traits)]
#![feature(const_fn)]
#![feature(const_fn_fn_ptr_basics)]
//! Changing function definitions at runtime.
//!
//! This crate is primarily used to load new function definitions from shared
//! object files in an exceedingly easy way.
//!
//! ## Short Example
//! The following shows how
//! dead-simple this crate is to use:
//! ```
//! // main.rs
//! use hotpatch::*;
//!
//! #[patchable]
//! fn foo() { }
//!
//! fn main() -> Result<(), Box<dyn std::error::Error>> {
//! foo(); // does nothing
//! foo.hotpatch_lib("libsomething.so")?;
//! foo(); // does something totally different!
//! Ok(())
//! }
//! ```
//! What about defining a patch? Also easy:
//! ```
//! // lib.rs
//! use hotpatch::patch;
//!
//! #[patch]
//! fn foo() { }
//! ```
//! For more examples see the [git repo](https://github.com/Shizcow/hotpatch).
//!
//! ## Methods and Free Functions
//!
//! `hotpatch` also works with methods.
//!
//! **Note:** `hotpatch` currently only works with free functions (methods without a self parameter).
//! Actual method support is in progress and expected in the next version.
//!
//! **Note:** [`#[patchable]`](patchable) and [`#[patch]`](patch) must be placed __outside__ of `impl` bodies!
//!
//! ### Example
//! Setting up is done like so:
//! ```
//! // main.rs
//! use hotpatch::*;
//!
//! struct Foo {}
//! #[patchable]
//! impl Foo {
//! pub fn bar() {
//! println!("I can be changed!");
//! }
//! }
//! ```
//! Patches are declared like this:
//! ```
//! struct Foo {}
//!
//! #[patch]
//! impl Foo {
//! /// remember, #[patch] is top-level
//! pub fn bar() {
//! println!("this is external!");
//! }
//! }
//! ```
//!
//! Finally, patching is done as so.
//! ```
//! fn main() -> Result<(), Box<dyn std::error::Error>> {
//! Foo::bar();
//! Foo::bar.hotpatch_fn(|| println!("this is patch!"))?;
//! Foo::bar();
//! Foo::bar.hotpatch_lib("target/debug/libmethods_obj.so")?;
//! Foo::bar();
//! Ok(())
//! }
//! ```
//!
//! ## Features
//! For reference, this crate recognizes the following features:
//! - `allow-main`: Allow setting `main` as [`#[patchable]`](patchable). Only useful if using `#[start]` or `#[main]`.
//! - `redirect-main`: Same as `allow-main` but also generates a stub `#[main]` to call the [`Patchable`](Patchable).
//! If you just want to hotpatch `main`, this is probably the right feature. Requires nightly and `#[feature(main)]`.
//! - `large-signatures`: Tweaks the variadic generics engine. See [`hotpatch_fn`](Patchable::hotpatch_fn).
//!
//! ## Warnings
//! Under normal operation, this crate provides type safety, thread safety,
//! namepace safety, and a whole bunch of other guarantees. However, use of this
//! crate is still playing with fire.
//!
//! The one thing that cannot be checked against is call stack safety. Because
//! [`Patchable`](Patchable) uses [`RwLock`](https://doc.rust-lang.org/std/sync/struct.RwLock.html)s
//! the current thread is blocked when trying to hotpatch a function.
//! This ensures that an out-of-date function body cannot be run. However if the
//! function being hotpatched is the current function or anywhere within the
//! call stack (eg patching a function that called the current function) a
//! deadlock will occur. Be careful!
//!
//! The `try` methods within [`Patchable`](Patchable) provide additional checks
//! for this, but may cause other problems in multithreaded environments.
//!
//! ## Bypassing Thread Safety
//! The previous section mentions being unable to hotpatch currently running functions.
//! This is a deliberate safety feature. However, it can be bypassed by using the
//! `force` methods within [`Patchable`](Patchable). This allows multiple
//! functions definitions to run at once. This is unsafe, but allows for some really
//! interesting things such as hotpatching `main`.
use std::marker::PhantomData;
use simple_error::bail;
use std::sync::RwLock;
pub use hotpatch_macros::*;
#[doc(hidden)]
pub use once_cell::sync::Lazy;
use variadic_generics::*;
mod export;
pub use export::*;
mod docs;
pub use docs::*;
use std::mem::{transmute, transmute_copy};
type FnVoid = dyn Fn() + Send + Sync + 'static;
macro_rules! va_largesig {
($va_len:tt, $va_idents:tt, $va_indices:tt, $($tt:tt)+) => {
#[cfg(not(feature = "large-signatures"))]
va_expand_with_nil! { $va_len $va_idents $va_indices $($tt)* }
#[cfg(feature = "large-signatures")]
va_expand_more_with_nil! { $va_len $va_idents $va_indices $($tt)* }
}
}
/// Created by [`#[patchable]`](patchable). A functor capable of overwriting its
/// own function.
pub struct Patchable<RealType: ?Sized + Send + Sync + 'static> {
lazy: Lazy<Option<RwLock<HotpatchImportInternal<RealType>>>>,
}
#[doc(hidden)]
pub struct HotpatchImportInternal<RealType: ?Sized + Send + Sync + 'static> {
current_ptr: Box<FnVoid>, // void pointer
default_ptr: Box<FnVoid>, // void pointer
phantom: PhantomData<RealType>, // store the real type for correct casts
sig: &'static str,
lib: Option<libloading::Library>,
mpath: &'static str,
}
impl<RealType: ?Sized + Send + Sync + 'static> HotpatchImportInternal<RealType> {
fn new<T>(ptr: T, mpath: &'static str, sig: &'static str) -> Self {
// we know that ptr is a Box<'static raw fn ptr>, so it DOES impl Copy (kinda)
// and because new is hidden, this assumption is safe
let r = &ptr;
unsafe {
Self {
current_ptr: transmute_copy(r),
default_ptr: transmute_copy(r),
phantom: PhantomData,
lib: None,
sig,
mpath: mpath.trim_start_matches(|c| c != ':'),
}
}
}
fn clean(&mut self) -> Result<(), Box<dyn std::error::Error>> {
if self.lib.is_some() {
self.lib.take().unwrap().close()?;
}
Ok(())
}
fn restore_default(&mut self) -> Result<(), Box<dyn std::error::Error>> {
// see Self::new for why this is safe
self.current_ptr = unsafe { transmute_copy(&self.default_ptr) };
self.clean()
}
fn upcast_self(&self) -> &RealType {
unsafe { transmute_copy(&self.current_ptr) }
}
}
// passthrough methods
impl<RealType: ?Sized + Send + Sync + 'static> Patchable<RealType> {
#[doc(hidden)]
pub const fn __new(ptr: fn() -> Option<RwLock<HotpatchImportInternal<RealType>>>) -> Self {
Self {
lazy: Lazy::new(ptr),
}
}
#[doc(hidden)]
pub fn __new_internal<T>(
ptr: T,
mpath: &'static str,
sig: &'static str,
) -> Option<RwLock<HotpatchImportInternal<RealType>>> {
Some(RwLock::new(HotpatchImportInternal::new(ptr, mpath, sig)))
}
/// Hotpatch this functor back to its original definition.
///
/// ## Example
/// ```
/// #[patchable]
/// fn foo() {}
///
/// fn main() -> Result<(), Box<dyn std::error::Error>> {
/// foo(); // does A
/// foo.hotpatch_lib("libtest.so")?;
/// foo(); // does B
/// foo.restore_default();
/// foo(); // does A again
/// Ok(())
/// }
/// ```
pub fn restore_default(&self) -> Result<(), Box<dyn std::error::Error + '_>> {
self.lazy.as_ref().unwrap().write()?.restore_default()
}
/// Like [`restore_default`](Patchable::restore_default) but uses
/// [`RwLock::try_write`](https://doc.rust-lang.org/std/sync/struct.RwLock.html#method.try_write).
pub fn try_restore_default(&self) -> Result<(), Box<dyn std::error::Error + '_>> {
self.lazy.as_ref().unwrap().try_write()?.restore_default()
}
/// Like [`restore_default`](Patchable::restore_default) but uses
/// unsafe features to completly bypass the
/// [`RwLock`](https://doc.rust-lang.org/std/sync/struct.RwLock.html).
/// Can be used to patch the current function or parent functions.
///
/// # Safety
/// This mutates a global static, and has all the thread-saftey issues associated with it.
/// The main cause of unsafety isthe fact that multiple function definitions can be in affect
/// __at the same time__. This can cause a hotpatch to not work as intended (as nothing may have changed!).
///
/// Additionally, as the internal value is `taken`, this causes a teeny tiny bit of undefined behavior
/// if a thread tries to call a `Patchable` during a (small but nonzero duration) `force` transition.
///
/// **Use with caution**.
pub unsafe fn force_restore_default(&self) -> Result<(), Box<dyn std::error::Error + '_>> {
let sref = self as *const Self as *mut Self;
let mut rref = (*sref).lazy.take().unwrap();
let reslt = rref.get_mut().unwrap().restore_default();
*(*sref).lazy = Some(rref);
reslt
}
}
trait HotpatchFnInternal<T, Dummy> {
unsafe fn hotpatch_fn(&mut self, c: T) -> Result<(), Box<dyn std::error::Error>>;
}
#[cfg(not(doc))]
va_largesig! { ($va_len:tt), ($($va_idents:ident),*), ($($va_indices:tt),*),
impl<RealType: ?Sized + 'static, T, Ret, $($va_idents,)*> HotpatchFnInternal<T, (Ret, $($va_idents,)*)>
for HotpatchImportInternal<RealType>
where
T: Fn($($va_idents,)*) -> Ret + Send + Sync + 'static,
RealType: Fn($($va_idents,)*) -> Ret + Send + Sync + 'static,
{
unsafe fn hotpatch_fn(&mut self, c: T) -> Result<(), Box<dyn std::error::Error>> {
let boxed: Box<T> = Box::new(c);
let reboxed: Box<dyn Fn($($va_idents,)*) -> Ret> = boxed;
let dbox: Box<FnVoid> = std::mem::transmute(reboxed);
self.current_ptr = dbox;
self.clean()
}
}
}
trait HotpatchLibInternal<Dummy> {
fn hotpatch_lib(&mut self, lib_name: &str) -> Result<(), Box<dyn std::error::Error>>;
}
#[cfg(not(doc))]
va_largesig! { ($va_len:tt), ($($va_idents:ident),*), ($($va_indices:tt),*),
impl<RealType: ?Sized + Send + Sync + 'static, Ret: 'static, $($va_idents: 'static,)*> HotpatchLibInternal<(Ret, $($va_idents,)*)>
for HotpatchImportInternal<RealType>
where
RealType: Fn($($va_idents,)*) -> Ret + Send + Sync + 'static,
{
fn hotpatch_lib(&mut self, lib_name: &str) -> Result<(), Box<dyn std::error::Error>> {
unsafe {
let lib = libloading::Library::new(lib_name)?;
let mut i: usize = 0;
loop {
let symbol_name = format!("{}{}", "__HOTPATCH_EXPORT_", i);
let exports: libloading::Symbol<*mut HotpatchExport<fn($($va_idents,)*) -> Ret>> =
lib.get(symbol_name.as_bytes()).map_err(|_| {
format!(
"Hotpatch for {} failed: symbol not found in library {}",
self.mpath, lib_name
)
})?;
let export_obj = &**exports;
if export_obj.symbol.trim_start_matches(|c| c != ':') == self.mpath {
// found the correct symbol
if self.sig != export_obj.sig {
bail!("Hotpatch for {} failed: symbol found but of wrong type. Expected {} but found {}", self.mpath, self.sig, export_obj.sig);
}
let d: Box<fn($($va_idents,)*) -> Ret> = Box::new(export_obj.ptr);
let t: Box<dyn Fn($($va_idents,)*) -> Ret + Send + Sync + 'static> = d;
self.current_ptr = transmute(t);
self.clean()?;
self.lib = Some(lib);
break;
}
i += 1;
}
}
Ok(())
}
}
}
/// Public interface for [Patchable::hotpatch_lib] and associated; requires import to use.
pub trait HotpatchLib<Dummy> {
fn hotpatch_lib(&self, lib_name: &str) -> Result<(), Box<dyn std::error::Error + '_>>;
fn try_hotpatch_lib(&self, lib_name: &str) -> Result<(), Box<dyn std::error::Error + '_>>;
#[allow(clippy::missing_safety_doc)] // documentation is elsewhere and linked to
unsafe fn force_hotpatch_lib(
&self,
lib_name: &str,
) -> Result<(), Box<dyn std::error::Error + '_>>;
}
#[cfg(not(doc))]
va_largesig! { ($va_len:tt), ($($va_idents:ident),*), ($($va_indices:tt),*),
impl<RealType: ?Sized + Send + Sync + 'static, Ret: 'static, $($va_idents: 'static,)*> HotpatchLib<(Ret, $($va_idents,)*)>
for Patchable<RealType>
where
RealType: Fn($($va_idents,)*) -> Ret + Send + Sync + 'static,
{
fn hotpatch_lib(&self, lib_name: &str) -> Result<(), Box<dyn std::error::Error + '_>> {
self.lazy.as_ref().unwrap().write()?.hotpatch_lib(lib_name)
}
fn try_hotpatch_lib(&self, lib_name: &str) -> Result<(), Box<dyn std::error::Error + '_>> {
self.lazy
.as_ref()
.unwrap()
.try_write()?
.hotpatch_lib(lib_name)
}
unsafe fn force_hotpatch_lib(
&self,
lib_name: &str,
) -> Result<(), Box<dyn std::error::Error + '_>> {
let sref = self as *const Self as *mut Self;
let mut rref = (*sref).lazy.take().unwrap();
let reslt = rref.get_mut().unwrap().hotpatch_lib(lib_name);
*(*sref).lazy = Some(rref);
reslt
}
}
}
/// Public interface for [Patchable::hotpatch_fn] and associated; requires import to use
pub trait HotpatchFn<T, Dummy> {
fn hotpatch_fn(&self, c: T) -> Result<(), Box<dyn std::error::Error + '_>>;
fn try_hotpatch_fn(&self, c: T) -> Result<(), Box<dyn std::error::Error + '_>>;
#[allow(clippy::missing_safety_doc)] // documentation is elsewhere and linked to
unsafe fn force_hotpatch_fn(&self, c: T) -> Result<(), Box<dyn std::error::Error + '_>>;
}
#[cfg(not(doc))]
va_largesig! { ($va_len:tt), ($($va_idents:ident),*), ($($va_indices:tt),*),
impl<RealType: ?Sized + Send + Sync + 'static, T, Ret, $($va_idents,)*> HotpatchFn<T, (Ret, $($va_idents,)*)>
for Patchable<RealType>
where
T: Fn($($va_idents,)*) -> Ret + Send + Sync + 'static,
RealType: Fn($($va_idents,)*) -> Ret + Send + Sync + 'static,
{
fn hotpatch_fn(&self, c: T) -> Result<(), Box<dyn std::error::Error + '_>> {
unsafe { self.lazy.as_ref().unwrap().write()?.hotpatch_fn(c) }
}
fn try_hotpatch_fn(&self, c: T) -> Result<(), Box<dyn std::error::Error + '_>> {
unsafe { self.lazy.as_ref().unwrap().try_write()?.hotpatch_fn(c) }
}
unsafe fn force_hotpatch_fn(&self, c: T) -> Result<(), Box<dyn std::error::Error + '_>> {
let sref = self as *const Self as *mut Self;
let mut rref = (*sref).lazy.take().unwrap();
let reslt = rref.get_mut().unwrap().hotpatch_fn(c);
*(*sref).lazy = Some(rref);
reslt
}
}
}
// Fn Traits
#[cfg(not(doc))]
va_largesig! { ($va_len:tt), ($($va_idents:ident),*), ($($va_indices:tt),*),
impl<RealType: ?Sized + 'static, Ret, $($va_idents,)*> FnOnce<($($va_idents,)*)> for Patchable<RealType>
where
RealType: Fn($($va_idents,)*) -> Ret + Send + Sync + 'static,
{
type Output = Ret;
extern "rust-call" fn call_once(self, args: ($($va_idents,)*)) -> Ret {
let inner = self.lazy.as_ref().unwrap().read().unwrap();
inner.upcast_self().call(args)
}
}
}
#[cfg(not(doc))]
va_largesig! { ($va_len:tt), ($($va_idents:ident),*), ($($va_indices:tt),*),
impl<RealType: ?Sized + 'static, Ret, $($va_idents,)*> FnMut<($($va_idents,)*)> for Patchable<RealType>
where
RealType: Fn($($va_idents,)*) -> Ret + Send + Sync + 'static,
{
extern "rust-call" fn call_mut(&mut self, args: ($($va_idents,)*)) -> Ret {
let inner = self.lazy.as_ref().unwrap().read().unwrap();
inner.upcast_self().call(args)
}
}
}
#[cfg(not(doc))]
va_largesig! { ($va_len:tt), ($($va_idents:ident),*), ($($va_indices:tt),*),
impl<RealType: ?Sized + 'static, Ret, $($va_idents,)*> Fn<($($va_idents,)*)> for Patchable<RealType>
where
RealType: Fn($($va_idents,)*) -> Ret + Send + Sync + 'static,
{
extern "rust-call" fn call(&self, args: ($($va_idents,)*)) -> Ret {
let inner = self.lazy.as_ref().unwrap().read().unwrap();
inner.upcast_self().call(args)
}
}
}
// methods stuff
pub struct MutConst<T: 'static> {
f: fn() -> &'static T,
}
impl<T> MutConst<T> {
pub const fn new(f: fn() -> &'static T) -> Self {
Self { f }
}
}
impl<T> std::ops::Deref for MutConst<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
(self.f)()
}
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/hotpatch/src/export.rs | hotpatch/src/export.rs | /// Created by [`#[patch]`](crate::patch). Internal use only.
///
/// Creates a `#[no_mangle] pub static` instance to be imported in another
/// binary by [`Patchable`](crate::Patchable) methods.
pub struct HotpatchExport<T: 'static> {
pub symbol: &'static str,
pub sig: &'static str,
pub ptr: T,
}
#[doc(hidden)]
impl<T: 'static> HotpatchExport<T> {
pub const fn __new(ptr: T, symbol: &'static str, sig: &'static str) -> Self {
Self { symbol, sig, ptr }
}
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/hotpatch/src/docs.rs | hotpatch/src/docs.rs | //! A significant amount of source code uses variadic generics, which clutter
//! documentation. This module holds documentation for those, in a format
//! which is much easier to read.
#[cfg(doc)]
impl<RealType: ?Sized + Send + Sync + 'static, VaGen, Ret> crate::Patchable<RealType>
where
RealType: Fn(VaGen) -> Ret,
{
/// Hotpatch this functor with functionality defined in `ptr`.
/// `ptr` can be a function pointer or `move` closure with the
/// same type signature as the functor's function.
///
/// ## Example
/// ```
/// #[patchable]
/// fn foo(_: i32, _: i32, _: i32) {}
///
/// fn bar(_: i32, _: i32, _: i32) {}
///
/// fn main() -> Result<(), Box<dyn std::error::Error>> {
/// foo.hotpatch_fn(bar)?;
/// foo.hotpatch_fn(move |a, b, c| println!("{} {} {}", a, b, c))?;
/// Ok(())
/// }
/// ```
///
/// Requires importing [crate::HotpatchFn].
///
/// ## VaArgs Note
/// Implementation is defined with the [`variadic_generics`](https://docs.rs/variadic_generics)
/// crate. This means
/// a macro is used to define a finite but large number of templated inputs.
/// If using functions with large numbers of inputs and `hotpatch_fn` does not
/// appear to be defined, compile `hotpatch` with the `large-signatures` feature
/// to increase the number of supported arguements.
pub fn hotpatch_fn<F>(&self, ptr: F) -> Result<(), Box<dyn std::error::Error + '_>>
where
F: Fn(VaGen) -> Ret,
{
// The actual implementation is in toplevel
}
/// Like [`hotpatch_fn`](crate::Patchable::hotpatch_fn) but uses
/// [`RwLock::try_write`](https://doc.rust-lang.org/std/sync/struct.RwLock.html#method.try_write).
pub fn try_hotpatch_fn<F>(&self, ptr: F) -> Result<(), Box<dyn std::error::Error + '_>>
where
F: Fn(VaGen) -> Ret,
{
// The actual implementation is in toplevel
}
/// Like [`hotpatch_fn`](crate::Patchable::hotpatch_fn) but uses
/// unsafe features to completly bypass the
/// [`RwLock`](https://doc.rust-lang.org/std/sync/struct.RwLock.html).
/// Can be used to patch the current function or parent functions.
///
/// # Safety
/// This mutates a global static, and has all the thread-saftey issues associated with it.
/// The main cause of unsafety isthe fact that multiple function definitions can be in affect
/// __at the same time__. This can cause a hotpatch to not work as intended (as nothing may have changed!).
///
/// Additionally, as the internal value is `taken`, this causes a teeny tiny bit of undefined behavior
/// if a thread tries to call a `Patchable` during a (small but nonzero duration) `force` transition.
///
/// **Use with caution**.
pub unsafe fn force_hotpatch_fn<F>(&self, ptr: F) -> Result<(), Box<dyn std::error::Error + '_>>
where
F: Fn(VaGen) -> Ret,
{
// The actual implementation is in toplevel
}
/// Hotpatch this functor with functionality defined in `lib_name`.
/// Will search a shared object `cdylib` file for [`#[patch]`](crate::patch) exports,
/// finding the definition that matches module path and signature.
///
/// ## Example
/// ```
/// #[patchable]
/// fn foo() {}
///
/// fn main() -> Result<(), Box<dyn std::error::Error>> {
/// foo(); // does something
/// foo.hotpatch_lib("libtest.so")?;
/// foo(); // does something else
/// Ok(())
/// }
/// ```
///
/// Requires importing [crate::HotpatchLib].
///
/// ## VaArgs Note
/// Implementation is defined with the [`variadic_generics`](https://docs.rs/variadic_generics)
/// crate. This means
/// a macro is used to define a finite but large number of templated inputs.
/// If using functions with large numbers of inputs and `hotpatch_lib` does not
/// appear to be defined, compile `hotpatch` with the `large-signatures` feature
/// to increase the number of supported arguements.
pub fn hotpatch_lib(&self, lib_name: &str) -> Result<(), Box<dyn std::error::Error + '_>> {
// The actual implementation is in toplevel
}
/// Like [`hotpatch_lib`](crate::Patchable::hotpatch_lib) but uses
/// [`RwLock::try_write`](https://doc.rust-lang.org/std/sync/struct.RwLock.html#method.try_write).
pub fn try_hotpatch_lib(&self, lib_name: &str) -> Result<(), Box<dyn std::error::Error + '_>> {
// The actual implementation is in toplevel
}
/// Like [`hotpatch_lib`](crate::Patchable::hotpatch_lib) but uses
/// unsafe features to completly bypass the
/// [`RwLock`](https://doc.rust-lang.org/std/sync/struct.RwLock.html).
/// Can be used to patch the current function or parent functions.
///
/// # Safety
/// This mutates a global static, and has all the thread-saftey issues associated with it.
/// The main cause of unsafety isthe fact that multiple function definitions can be in affect
/// __at the same time__. This can cause a hotpatch to not work as intended (as nothing may have changed!).
///
/// Additionally, as the internal value is `taken`, this causes a teeny tiny bit of undefined behavior
/// if a thread tries to call a `Patchable` during a (small but nonzero duration) `force` transition.
///
/// **Use with caution**.
pub unsafe fn force_hotpatch_lib(
&self,
lib_name: &str,
) -> Result<(), Box<dyn std::error::Error + '_>> {
// The actual implementation is in toplevel
}
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/examples/unsafe/unsafe_bin/src/main.rs | examples/unsafe/unsafe_bin/src/main.rs | #![feature(main)]
use hotpatch::*;
#[patchable]
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("You'll only see me once.");
unsafe { // because we're patching the current function, this will
// let multiple function definitions exist at the same time.
// That's unsafe, so the unsafe block is required.
main.force_hotpatch_lib("target/debug/libunsafe_obj.so")?;
}
main()?; // and now libunsafe_obj defines what happens
unsafe { main.force_hotpatch_fn(foo)?; }
main()?;
main()?; // Something different happens each time!
main()?;
main()?;
main() // And the base case remains the same
}
fn foo() -> Result<(), Box<dyn std::error::Error>> {
println!("Hello from foo");
unsafe { main.force_hotpatch_fn(bar) }
}
fn bar() -> Result<(), Box<dyn std::error::Error>> {
println!("Hello from bar");
unsafe { main.force_hotpatch_fn(baz) }
}
fn baz() -> Result<(), Box<dyn std::error::Error>> {
println!("Hello from baz");
unsafe { main.force_hotpatch_fn(|| {
println!("Hello from a closure");
Ok(())
}) }
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/examples/unsafe/unsafe_obj/src/lib.rs | examples/unsafe/unsafe_obj/src/lib.rs | use hotpatch::patch;
#[patch]
pub fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("I'm from somewhere else!");
Ok(())
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/examples/threads/threads_obj/src/lib.rs | examples/threads/threads_obj/src/lib.rs | use hotpatch::patch;
#[patch]
pub fn foo() -> &'static str {
"Foo: Patched"
}
#[patch]
pub fn bar() -> &'static str {
"Bar: Patched"
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/examples/threads/threads_bin/src/main.rs | examples/threads/threads_bin/src/main.rs | use hotpatch::*;
use std::{thread, time};
#[patchable]
fn foo() -> &'static str {
// The patched version does not sleep. This shows that
// no two threads can execute different definitions at
// the same time. Threads themselves may be out of order
// but after patching the first definition will never be
// called.
std::thread::sleep(time::Duration::from_micros(100));
"Foo: Default"
}
#[patchable]
fn bar() -> &'static str {
// Same here, but slowed down for emphasis
std::thread::sleep(time::Duration::from_secs(1));
"Bar: Default"
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
// make some threads
let mut children = vec![];
for i in 0..10000 { // on my machine this does 9900+-50 before patch goes through
children.push(thread::spawn(move || {
// call foo before/during/after hotpatch
std::thread::sleep(time::Duration::from_micros(i));
println!("Hello from thread {}. {}", i, foo());
}));
}
// hotpatch in the middle of execution
std::thread::sleep(time::Duration::from_micros(5));
foo.hotpatch_lib("target/debug/libthreads_obj.so")?;
// wait for threads to finish
for child in children {
let _ = child.join();
}
// Here's a slower example
children = vec![];
children.push(thread::spawn(move || {
println!("{}", bar()); // will not be patched
}));
children.push(thread::spawn(move || {
std::thread::sleep(time::Duration::from_secs(2));
println!("{}", bar()); // expected to be patched
}));
std::thread::sleep(time::Duration::from_millis(500));
// This should patch after the first call but before the second
bar.hotpatch_lib("target/debug/libthreads_obj.so")?;
// wait for threads to finish
for child in children.into_iter().rev() {
let _ = child.join();
}
Ok(())
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/examples/multiple/multiple_obj1/src/lib.rs | examples/multiple/multiple_obj1/src/lib.rs | use hotpatch::patch;
#[patch]
pub fn foo() {
println!("Multiple 1");
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/examples/multiple/multiple_obj2/src/lib.rs | examples/multiple/multiple_obj2/src/lib.rs | use hotpatch::patch;
#[patch]
pub fn foo() {
println!("Multiple 2");
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/examples/multiple/multiple_bin/src/main.rs | examples/multiple/multiple_bin/src/main.rs | use hotpatch::*;
#[patchable]
fn foo() {
println!("Source");
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
foo();
foo.hotpatch_lib("target/debug/libmultiple_obj1.so")?;
foo();
foo.hotpatch_lib("target/debug/libmultiple_obj2.so")?;
foo();
foo.restore_default()?;
foo();
Ok(())
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/examples/methods/src/main.rs | examples/methods/src/main.rs | use hotpatch::*;
/// This is a struct
struct Foo {
description: &'static str,
}
/// And this is where free associated items can be defined
#[patchable]
impl Foo {
/// Here's one of them!
fn new() -> Self {
Self {
description: "This object was created with the original definition",
}
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let f = Foo::new();
println!("First description: {}", f.description);
Foo::new.hotpatch_fn(|| Foo {
description: "Created with an anonymous definition",
})?;
let f = Foo::new();
println!("Second description: {}", f.description);
Foo::new.hotpatch_lib("target/debug/libmethods_obj.so")?;
let f = Foo::new();
println!("Third description: {}", f.description);
Ok(())
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/examples/methods_extra_obj/src/lib.rs | examples/methods_extra_obj/src/lib.rs | use hotpatch::*;
/// There is a system of trust here
/// Foo is assumed to be the same struct everywhere
/// This may be possible to lock down even more with typeid, but that's WIP upstream
pub struct Foo {
pub description: &'static str,
}
#[patch]
impl Foo {
/// remember, #[patch] is top-level
pub fn new() -> Self {
Self {
description: "This object was created in a dynamic library",
}
}
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/examples/hello_world/hw_obj/src/lib.rs | examples/hello_world/hw_obj/src/lib.rs | use hotpatch::patch;
#[patch]
/// This is what a patch looks like. It's a normal function that can still be executed locally
pub fn foo() {
println!("I am from patched foo.");
}
mod a {
use hotpatch::patch;
#[patch]
pub fn bar(a: i32) {
println!(
"I am from patched bar. I have {} as an arg. I am module aware.",
a
);
}
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/examples/hello_world/hw_bin/src/main.rs | examples/hello_world/hw_bin/src/main.rs | use hotpatch::*;
mod a {
use hotpatch::patchable;
#[patchable]
pub fn bar(a: i32) {
println!(
"I am from source bar. I have {} as an arg. I am module aware.",
a
);
}
}
#[patchable]
/// This is what a patchable item looks like. Just like a function!
fn foo() {
println!("I am from source foo.");
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
foo(); // prints "I am from source"
foo.hotpatch_lib("target/debug/libhw_obj.so")?;
foo(); // prints something totally different
use crate::a::bar;
bar(1);
bar.hotpatch_lib("target/debug/libhw_obj.so")?;
bar(2);
Ok(())
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
Shizcow/hotpatch | https://github.com/Shizcow/hotpatch/blob/ce6a71d2e7188b7ed2af425a2ffce9b148a87034/examples/local/src/main.rs | examples/local/src/main.rs | use hotpatch::*;
/// I'm a functor
#[patchable]
fn foo(_: i32) {
println!("I am Foo");
}
/// I'm a function with extra bits
#[patch]
fn tmp(_: i32) {}
fn bar(_: i32) {
println!("Foo Becomes Bar");
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
foo(1);
foo.hotpatch_fn(bar)?;
foo(1);
let a = 5;
foo.hotpatch_fn(move |_: i32| println!("Foo becomes anonymous {}", a))?;
foo(1);
Ok(())
}
| rust | Apache-2.0 | ce6a71d2e7188b7ed2af425a2ffce9b148a87034 | 2026-01-04T20:21:03.153459Z | false |
nintha/river | https://github.com/nintha/river/blob/56a63312dd7fef48f73dbd3985022604c739ba40/src/ws_fmp4.rs | src/ws_fmp4.rs | use async_tungstenite::tungstenite::handshake::server::{ErrorResponse, Request, Response};
use async_tungstenite::tungstenite::Message;
use crossbeam_utils::atomic::AtomicCell;
use futures::sink::SinkExt;
use futures::StreamExt;
use smol::net::{SocketAddr, TcpListener, TcpStream};
use crate::protocol::h264::Nalu;
use crate::rtmp_server::{eventbus_map, video_header_map, meta_data_map};
use crate::protocol::fmp4::{Fmp4Encoder, Track};
#[allow(unused)]
pub async fn run_server(addr: String) -> anyhow::Result<()> {
// Create the event loop and TCP listener we'll accept connections on.
let try_socket = TcpListener::bind(&addr).await;
let listener = try_socket.expect("Failed to bind");
log::info!("Websocket Listening on: {}", addr);
// Let's spawn the handling of each connection in a separate task.
while let Ok((stream, addr)) = listener.accept().await {
smol::spawn(handle_connection(stream, addr)).detach();
}
Ok(())
}
async fn handle_connection(raw_stream: TcpStream, addr: SocketAddr) -> anyhow::Result<()> {
log::info!("Incoming TCP connection from: {}", addr);
let uri = AtomicCell::default();
let callback = |req: &Request, res: Response| -> Result<Response, ErrorResponse>{
uri.store(req.uri().clone());
Ok(res)
};
let ws_stream = async_tungstenite::accept_hdr_async(raw_stream, callback).await?;
let (mut outgoing, _incoming) = ws_stream.split();
let uri = uri.take();
let stream_name = uri.path().strip_prefix("/websocket/")
.ok_or(anyhow::anyhow!("invalid uri path"))?;
log::info!("WebSocket connection established: {}, stream_name={}", addr, stream_name);
let meta_data = meta_data_map()
.get(stream_name)
.map(|it| it.value().clone())
.ok_or_else(|| anyhow::anyhow!(format!("not found meta_data, stream={}", stream_name)))?;
let video_header = video_header_map()
.get(stream_name)
.map(|it| it.value().clone())
.ok_or_else(|| anyhow::anyhow!(format!("not found meta_data, stream={}", stream_name)))?;
let mut sps_list = vec![];
let mut pps_list = vec![];
let pioneer_nalus = Nalu::from_rtmp_message(&video_header);
for nalu in pioneer_nalus {
match nalu.get_nal_unit_type() {
Nalu::UNIT_TYPE_SPS => sps_list.push(nalu.as_ref().to_vec()),
Nalu::UNIT_TYPE_PPS => pps_list.push(nalu.as_ref().to_vec()),
_ => {}
}
}
let rx = eventbus_map()
.get(stream_name)
.map(|it| it.register_receiver())
.ok_or_else(|| anyhow::anyhow!(format!("not found eventbus, stream={}", stream_name)))?;
let mut fmp4_encoder = Fmp4Encoder::new(Track {
duration: meta_data.duration as u32,
timescale: (meta_data.duration * meta_data.frame_rate) as u32,
width: meta_data.width as _,
height: meta_data.height as _,
sps_list,
pps_list,
..Default::default()
});
// send video header
let header = fmp4_encoder.init_segment();
outgoing.send(Message::binary(header)).await?;
while let Ok(msg) = rx.recv().await {
let nalus = Nalu::from_rtmp_message(&msg);
for nalu in nalus {
let bytes = fmp4_encoder.wrap_frame(nalu.as_ref(), nalu.is_key_frame);
outgoing.send(Message::binary(bytes)).await?;
}
}
log::info!("WebSocket disconnected: {}, stream_name={}", addr, stream_name);
Ok(())
}
| rust | Apache-2.0 | 56a63312dd7fef48f73dbd3985022604c739ba40 | 2026-01-04T20:21:06.830703Z | false |
nintha/river | https://github.com/nintha/river/blob/56a63312dd7fef48f73dbd3985022604c739ba40/src/lib.rs | src/lib.rs | #[macro_use]
extern crate num_derive;
mod eventbus;
pub mod http_flv;
pub mod http_player;
pub mod protocol;
pub mod rtmp_server;
pub mod util;
pub mod ws_h264;
pub mod ws_fmp4; | rust | Apache-2.0 | 56a63312dd7fef48f73dbd3985022604c739ba40 | 2026-01-04T20:21:06.830703Z | false |
nintha/river | https://github.com/nintha/river/blob/56a63312dd7fef48f73dbd3985022604c739ba40/src/ws_h264.rs | src/ws_h264.rs | use async_tungstenite::tungstenite::handshake::server::{ErrorResponse, Request, Response};
use async_tungstenite::tungstenite::Message;
use crossbeam_utils::atomic::AtomicCell;
use futures::sink::SinkExt;
use futures::StreamExt;
use smol::net::{SocketAddr, TcpListener, TcpStream};
use crate::protocol::h264::Nalu;
use crate::rtmp_server::{eventbus_map, video_header_map, audio_header_map};
use crate::protocol::rtmp::{ChunkMessageType, RtmpMessage};
use smol::channel::Receiver;
use smol::stream::{Stream};
use smol::stream;
use crate::protocol::aac::{AAC, ADTS};
#[allow(unused)]
pub async fn run_server(addr: String) -> anyhow::Result<()> {
// Create the event loop and TCP listener we'll accept connections on.
let try_socket = TcpListener::bind(&addr).await;
let listener = try_socket.expect("Failed to bind");
log::info!("Websocket Listening on: {}", addr);
// Let's spawn the handling of each connection in a separate task.
while let Ok((stream, addr)) = listener.accept().await {
smol::spawn(handle_connection(stream, addr)).detach();
}
Ok(())
}
async fn handle_connection(raw_stream: TcpStream, addr: SocketAddr) -> anyhow::Result<()> {
log::info!("Incoming TCP connection from: {}", addr);
let uri = AtomicCell::default();
let callback = |req: &Request, res: Response| -> Result<Response, ErrorResponse>{
uri.store(req.uri().clone());
Ok(res)
};
let ws_stream = async_tungstenite::accept_hdr_async(raw_stream, callback).await?;
let (mut outgoing, _incoming) = ws_stream.split();
let uri = uri.take();
let stream_name = uri.path().strip_prefix("/websocket/")
.ok_or(anyhow::anyhow!("invalid uri path"))?;
log::info!("WebSocket connection established: {}, stream_name={}", addr, stream_name);
// send video header
if let Some(header) = video_header_map().get(stream_name) {
for mix in Mix::from_rtmp_message(&header, &stream_name) {
outgoing.send(Message::binary(mix.to_bytes())).await?;
}
}
if let Some(el) = eventbus_map().get(stream_name) {
let rx = el.register_receiver();
std::mem::drop(el);
let rx = rtmp_rx_into_mix_rx(rx, stream_name.to_string());
futures::pin_mut!(rx);
while let Some(mix) = StreamExt::next(&mut rx).await {
outgoing.send(Message::binary(mix.to_bytes())).await?;
}
}
log::info!("WebSocket disconnected: {}, stream_name={}", addr, stream_name);
Ok(())
}
// 把RMTP流转换城MIX流,并保证首帧为关键帧
fn rtmp_rx_into_mix_rx(rx: Receiver<RtmpMessage>, stream_name: String) -> impl Stream<Item=Mix> {
stream::unfold((rx, false, stream_name), |(rx, first_key_frame, stream_name)| async move {
while let Ok(msg) = rx.recv().await {
let mixes = Mix::from_rtmp_message(&msg, &stream_name);
if mixes.is_empty() {
continue;
}
if first_key_frame {
return Some((stream::iter(mixes), (rx, first_key_frame, stream_name)));
}
let mut mixes = mixes.into_iter().skip_while(|mix| !mix.is_key_frame()).collect::<Vec<Mix>>();
// 消息堆积,丢弃视频非关键帧
if rx.len() > 30 {
mixes.retain(|x| x.is_audio() || x.is_key_frame());
}
if mixes.is_empty() {
continue;
}
return Some((stream::iter(mixes), (rx, true, stream_name)));
}
None
}).flatten()
}
/// 媒体混合数据
enum Mix {
Video(Nalu),
Audio(ADTS),
}
impl Mix {
const VIDEO_FLAG: u8 = 0x00;
const AUDIO_FLAG: u8 = 0x01;
pub fn from_rtmp_message(msg: &RtmpMessage, stream_name: &str) -> Vec<Self> {
match msg.header.message_type {
ChunkMessageType::VideoMessage => {
Nalu::from_rtmp_message(&msg).into_iter().map(Mix::Video).collect()
}
ChunkMessageType::AudioMessage => {
if let Some(header) = audio_header_map().get(stream_name) {
AAC::from_rtmp_message(&msg, header.value())
.into_iter()
.map(|x| x.to_adts())
.flatten()
.map(Mix::Audio)
.collect()
} else {
vec![]
}
}
_ => vec![]
}
}
#[allow(unused)]
pub fn is_video(&self) -> bool {
matches!(self, Mix::Video(_))
}
#[allow(unused)]
pub fn is_audio(&self) -> bool {
matches!(self, Mix::Audio(_))
}
pub fn is_key_frame(&self) -> bool {
if let Mix::Video(nalu) = self {
nalu.is_key_frame
} else {
false
}
}
fn to_bytes(&self) -> Vec<u8> {
match self {
Mix::Video(nalu) => {
let mut bytes = vec![Mix::VIDEO_FLAG];
bytes.extend_from_slice(nalu.as_ref());
bytes
}
Mix::Audio(aac) => {
let mut bytes = vec![Mix::AUDIO_FLAG];
bytes.extend_from_slice(&aac.to_bytes());
bytes
}
}
}
}
| rust | Apache-2.0 | 56a63312dd7fef48f73dbd3985022604c739ba40 | 2026-01-04T20:21:06.830703Z | false |
nintha/river | https://github.com/nintha/river/blob/56a63312dd7fef48f73dbd3985022604c739ba40/src/eventbus.rs | src/eventbus.rs | use crossbeam_utils::atomic::AtomicCell;
use dashmap::DashMap;
use smol::channel::{Receiver, Sender};
pub struct EventBus<E> {
label: String,
incr_val: AtomicCell<u64>,
tx_map: DashMap<u64, Sender<E>>,
}
impl<E: 'static + Clone> EventBus<E> {
pub fn with_label(label: String) -> Self {
Self {
label,
incr_val: Default::default(),
tx_map: Default::default(),
}
}
pub async fn publish(&self, val: E) {
let mut dropped_senders: Vec<u64> = vec![];
let mut keys: Vec<u64> = self.tx_map.iter().map(|x| x.key().to_owned()).collect();
let last_key = keys.pop();
for key in keys {
if let Some(entry) = self.tx_map.get(&key) {
if let Err(_) = entry.send(val.clone()).await {
dropped_senders.push(key);
}
}
}
// 最后一个元素可以直接发送,减少一次clone
if let Some(key) = last_key {
if let Some(entry) = self.tx_map.get(&key) {
if let Err(_) = entry.send(val).await {
dropped_senders.push(key);
}
}
}
for key in dropped_senders.iter() {
self.tx_map.remove(key);
log::info!("[EventBus][{}] remove receiver {}", self.label, key);
}
}
pub fn register_receiver(&self) -> Receiver<E> {
let (tx, rx) = smol::channel::unbounded();
let key = self.incr_val.fetch_add(1);
self.tx_map.insert(key, tx);
log::info!("[EventBus][{}] add receiver {}", self.label, key);
rx
}
}
| rust | Apache-2.0 | 56a63312dd7fef48f73dbd3985022604c739ba40 | 2026-01-04T20:21:06.830703Z | false |
nintha/river | https://github.com/nintha/river/blob/56a63312dd7fef48f73dbd3985022604c739ba40/src/util.rs | src/util.rs | use chrono::Local;
use rand::Rng;
use std::fmt::Debug;
use std::future::Future;
use std::io::Write;
pub fn init_logger() {
let env = env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info");
// 设置日志打印格式
env_logger::Builder::from_env(env)
.format(|buf, record| {
writeln!(
buf,
"{} {} - {}",
Local::now().format("%Y-%m-%d %H:%M:%S%.3f"),
buf.default_styled_level(record.level()),
&record.args()
)
})
.init();
log::info!("env_logger initialized.");
}
pub fn bytes_hex_format(bytes: &[u8]) -> String {
const COLUMN: usize = 16;
const COL_SPACE: &str = " ";
let mut text = String::new();
let mut i = 0;
let mut arr: [char; COLUMN] = ['.'; COLUMN];
for byte in bytes {
text += &format!("{:02X}", byte);
if byte.is_ascii_graphic() {
arr[i % COLUMN] = byte.clone() as char;
} else {
arr[i % COLUMN] = '.';
}
text += &format!(" ");
i += 1;
// 每8列多一个空格
if i % 8 == 0 {
text += COL_SPACE;
}
if i % COLUMN == 0 {
let mut ascii = arr.iter().collect::<String>();
let mut index = 8;
while index < COLUMN {
ascii.insert_str((index - 8) / 8 * COL_SPACE.len() + index, COL_SPACE);
index += 8;
}
text += &format!(" {}\n", ascii);
}
}
// 最后一行单独处理格式化
if i % COLUMN > 0 {
for _ in 0..(COLUMN - 1 - (i - 1) % COLUMN) {
text += " ";
}
for _ in 0..(COLUMN + 8 - i % COLUMN) / 8 {
text += COL_SPACE;
}
let mut ascii = arr.iter().take(((i - 1) % COLUMN) + 1).collect::<String>();
let mut index = 8;
let ascii_len = ascii.len();
while index < ascii_len {
ascii.insert_str((index - 8) / 8 * COL_SPACE.len() + index, COL_SPACE);
index += 8;
}
text += &format!(" {}\n", ascii);
}
text
}
pub fn print_hex(bytes: &[u8]) {
println!("{}", bytes_hex_format(bytes));
}
/// 执行一个新协程,并且在错误时打印错误信息
pub fn spawn_and_log_error<F, E>(fut: F)
where
F: Future<Output = Result<(), E>> + Send + 'static,
E: Debug,
{
smol::spawn(async move {
if let Err(e) = fut.await {
log::error!("spawn future error, {:?}", e)
}
})
.detach();
}
/// 生成随机字节数组
pub fn gen_random_bytes(len: u32) -> Vec<u8> {
let mut rng = rand::thread_rng();
let mut vec = Vec::new();
for _ in 0..len {
vec.push(rng.gen());
}
vec
}
| rust | Apache-2.0 | 56a63312dd7fef48f73dbd3985022604c739ba40 | 2026-01-04T20:21:06.830703Z | false |
nintha/river | https://github.com/nintha/river/blob/56a63312dd7fef48f73dbd3985022604c739ba40/src/http_flv.rs | src/http_flv.rs | use crate::util::spawn_and_log_error;
use smol::io::{AsyncReadExt, AsyncWriteExt};
use smol::net::{TcpListener, TcpStream};
use smol::stream::StreamExt;
use crate::rtmp_server::{eventbus_map, video_header_map};
use crate::protocol::flv::{FLV_HEADER_ONLY_VIDEO_WITH_TAG0};
use crate::protocol::flv::FlvTag;
use chrono::Local;
use std::convert::TryFrom;
use crate::protocol::rtmp::ChunkMessageType;
pub async fn run_server(addr: String) -> anyhow::Result<()> {
// Open up a TCP connection and create a URL.
let listener = TcpListener::bind(addr).await?;
let addr = format!("http://{}", listener.local_addr()?);
log::info!("HTTP-FLV Server is listening to {}", addr);
// For each incoming TCP connection, spawn a task and call `accept`.
let mut incoming = listener.incoming();
while let Some(stream) = incoming.next().await {
let stream = stream?;
spawn_and_log_error(accept(stream));
}
Ok(())
}
// Take a TCP stream, and convert it into sequential HTTP request / response pairs.
async fn accept(mut stream: TcpStream) -> anyhow::Result<()> {
log::info!("[HTTP] new connection from {}", stream.peer_addr()?);
let mut buffer = [0; 1024];
stream.read(&mut buffer).await?;
let req = String::from_utf8_lossy(&buffer[..]);
let stream_name = get_path(req.as_ref()).map(|x| x.trim_start_matches("/")).unwrap_or_default();
if let Some(eventbus) = eventbus_map().get(stream_name) {
let receiver = eventbus.register_receiver();
let header = "HTTP/1.1 200 OK\r\n\
Server: river\r\n\
Content-Type: video/x-flv\r\n\
Connection: close\r\n\
Transfer-Encoding: chunked\r\n\
Cache-Control: no-cache\r\n\
Access-Control-Allow-Origin: *\r\n\
\r\n\
";
stream.write_all(header.as_bytes()).await?;
stream.flush().await?;
write_chunk(&mut stream, &FLV_HEADER_ONLY_VIDEO_WITH_TAG0).await?;
// 发送sps/pps帧
if let Some(msg) = video_header_map().get(stream_name) {
let flv_tag = FlvTag::try_from(msg.value().clone())?;
write_chunk(&mut stream, flv_tag.as_ref()).await?;
write_chunk(&mut stream, &(flv_tag.as_ref().len() as u32).to_be_bytes()).await?;
};
// 发送audio帧
// if let Some(msg) = audio_header_map().get(stream_name) {
// let flv_tag = FlvTag::try_from(msg.value().clone())?;
// write_chunk(&mut stream, flv_tag.as_ref()).await?;
// write_chunk(&mut stream, &(flv_tag.as_ref().len() as u32).to_be_bytes()).await?;
// };
let ctx_begin_timestamp = Local::now().timestamp_millis();
while let Ok(mut msg) = receiver.recv().await {
if ChunkMessageType::VideoMessage == msg.header.message_type {
msg.header.timestamp = (Local::now().timestamp_millis() - ctx_begin_timestamp) as u32;
let flv_tag = FlvTag::try_from(msg)?;
write_chunk(&mut stream, flv_tag.as_ref()).await?;
write_chunk(&mut stream, &(flv_tag.as_ref().len() as u32).to_be_bytes()).await?;
if receiver.len() > 2 {
log::warn!("receiver.len={}, stream_name={}", receiver.len(), stream_name);
}
}
}
write_chunk(&mut stream, b"").await?;
} else {
let header = "HTTP/1.1 404 Not Found\r\n\r\n";
stream.write_all(header.as_bytes()).await?;
stream.flush().await?;
}
Ok(())
}
fn get_path(req: &str) -> Option<&str> {
let first_line = req.lines().next().unwrap_or_default();
if first_line.starts_with("GET") {
return first_line.split_whitespace().skip(1).next();
}
None
}
async fn write_chunk(stream: &mut TcpStream, bytes: &[u8]) -> anyhow::Result<()> {
stream.write_all(format!("{:X}\r\n", bytes.len()).as_bytes()).await?;
stream.write_all(bytes).await?;
stream.write_all(b"\r\n").await?;
stream.flush().await?;
Ok(())
} | rust | Apache-2.0 | 56a63312dd7fef48f73dbd3985022604c739ba40 | 2026-01-04T20:21:06.830703Z | false |
nintha/river | https://github.com/nintha/river/blob/56a63312dd7fef48f73dbd3985022604c739ba40/src/rtmp_server.rs | src/rtmp_server.rs | use amf::amf0::Value;
use amf::Pair;
use byteorder::{BigEndian, ByteOrder};
use chrono::Local;
use dashmap::DashMap;
use once_cell::sync::OnceCell;
use smol::net::{TcpListener, TcpStream};
use smol::prelude::*;
use crate::eventbus::EventBus;
use crate::protocol::rtmp::{
ChunkMessageType, Handshake0, Handshake1, Handshake2, RtmpContext, RtmpMessage, RtmpMetaData,
};
use crate::util::{bytes_hex_format, gen_random_bytes, print_hex, spawn_and_log_error};
use std::convert::TryFrom;
use crate::protocol::fmp4::save_fmp4_background;
pub fn eventbus_map() -> &'static DashMap<String, EventBus<RtmpMessage>> {
static INSTANCE: OnceCell<DashMap<String, EventBus<RtmpMessage>>> = OnceCell::new();
INSTANCE.get_or_init(|| DashMap::new())
}
pub fn video_header_map() -> &'static DashMap<String, RtmpMessage> {
static INSTANCE: OnceCell<DashMap<String, RtmpMessage>> = OnceCell::new();
INSTANCE.get_or_init(|| DashMap::new())
}
pub fn audio_header_map() -> &'static DashMap<String, RtmpMessage> {
static INSTANCE: OnceCell<DashMap<String, RtmpMessage>> = OnceCell::new();
INSTANCE.get_or_init(|| DashMap::new())
}
pub fn meta_data_map() -> &'static DashMap<String, RtmpMetaData> {
static INSTANCE: OnceCell<DashMap<String, RtmpMetaData>> = OnceCell::new();
INSTANCE.get_or_init(|| DashMap::new())
}
/// TCP 连接处理
pub async fn accept_loop(addr: &str) -> anyhow::Result<()> {
let listener = TcpListener::bind(addr.clone()).await?;
log::info!("RTMP Server is listening to {}", addr);
let mut incoming = listener.incoming();
while let Some(stream) = incoming.next().await {
let stream = stream?;
log::info!("new connection: {}", stream.peer_addr()?);
spawn_and_log_error(connection_loop(stream));
}
Ok(())
}
async fn connection_loop(stream: TcpStream) -> anyhow::Result<()> {
let mut ctx = RtmpContext::new(stream);
handle_rtmp_handshake(&mut ctx).await?;
loop {
let message = RtmpMessage::read_from(&mut ctx).await?;
log::debug!(
"[peer={}] C->S, [{}] csid={}, msid={}",
ctx.peer_addr,
message.message_type_desc(),
&message.header.csid,
&message.header.msid
);
match message.header.message_type {
ChunkMessageType::SetChunkSize => {
ctx.chunk_size = BigEndian::read_u32(&message.body);
log::info!(
"[peer={}] C->S, [{}] value={}",
ctx.peer_addr,
message.message_type_desc(),
&ctx.chunk_size
);
}
ChunkMessageType::UserControlMessage => {
let bytes = &message.body;
let event_type = BigEndian::read_u16(&bytes[0..2]);
// set buffer length
if event_type == 3 {
// 等于 create_stream 应答中第4个字段值
let stream_id = BigEndian::read_u32(&bytes[2..6]);
let buffer_length = BigEndian::read_u32(&bytes[6..10]);
log::info!(
"[peer={}] C->S, [{}] set buffer length={}, streamId={}",
ctx.peer_addr,
message.message_type_desc(),
buffer_length,
stream_id
);
response_play(&mut ctx, stream_id).await?;
if let Some(el) = meta_data_map().get(&ctx.stream_name) {
send_meta_data_for_play(&mut ctx, el.value()).await?;
} else {
log::warn!(
"[peer={}] not found meta_data, stream_name={}",
ctx.peer_addr,
ctx.stream_name
);
continue;
};
ctx.ctx_begin_timestamp = Local::now().timestamp_millis();
let src_begin_timestamp = meta_data_map()
.get(&ctx.stream_name)
.map(|x| x.begin_time)
.unwrap_or(ctx.ctx_begin_timestamp);
let begin_time_delta = (ctx.ctx_begin_timestamp - src_begin_timestamp + 1000) as u32;
log::info!("[RTMP] begin_time_delta={}", begin_time_delta);
// 发送sps/pps帧
if let Some(msg) = video_header_map().get(&ctx.stream_name) {
let chunks = msg.split_chunks_bytes(ctx.chunk_size);
for chunk in chunks {
ctx.write_to_peer(&chunk).await?;
}
} else {
log::warn!(
"[peer={}] not found video header, stream_name={}",
ctx.peer_addr,
ctx.stream_name
);
};
// 发送 aac header
if let Some(msg) = audio_header_map().get(&ctx.stream_name) {
let chunks = msg.split_chunks_bytes(ctx.chunk_size);
for chunk in chunks {
ctx.write_to_peer(&chunk).await?;
}
} else {
log::warn!(
"[peer={}] not found audio header, stream_name={}",
ctx.peer_addr,
ctx.stream_name
);
};
if let Some(eventbus) = eventbus_map().get(&ctx.stream_name) {
let receiver = eventbus.register_receiver();
while let Ok(mut msg) = receiver.recv().await {
msg.header.timestamp = msg.header.timestamp - begin_time_delta;
let chunks = msg.split_chunks_bytes(ctx.chunk_size);
for chunk in chunks {
ctx.write_to_peer(&chunk).await?;
}
}
} else {
log::error!(
"[peer={}] not found eventbus, stream_name={}",
ctx.peer_addr,
ctx.stream_name
);
Err(anyhow::anyhow!("not found stream {}", ctx.stream_name))?;
}
} else {
log::info!(
"[peer={}] C->S, [{}] \n{}",
ctx.peer_addr,
message.message_type_desc(),
bytes_hex_format(&message.body)
);
}
}
ChunkMessageType::AMF0CommandMessage => {
let option = message.try_read_body_to_amf0();
if option.is_none() {
log::error!(
"[peer={}] C->S, expect AMF0 data, ctx={:#?} \n msg={:#?}",
ctx.peer_addr,
&ctx,
&message
);
Err(anyhow::anyhow!("[AMF0CommandMessage] expect AMF0 data"))?
}
let values = option.unwrap();
let command = values[0].try_as_str().unwrap();
for v in &values {
log::info!("[peer={}] C->S, {} part: {:?}", ctx.peer_addr, command, v);
}
match command {
"connect" => {
response_connect(&mut ctx).await?;
}
"createStream" => {
response_create_stream(&mut ctx, &values[1]).await?;
}
"publish" => {
ctx.stream_name = values[3].try_as_str().unwrap_or_default().to_string();
log::info!("[peer={}] stream_name={}", ctx.peer_addr, ctx.stream_name);
// 推送者创建eventbus
eventbus_map().insert(
ctx.stream_name.clone(),
EventBus::with_label(ctx.stream_name.clone()),
);
ctx.is_publisher = true;
response_publish(&mut ctx).await?;
}
"play" => {
ctx.stream_name = values[3].try_as_str().unwrap_or_default().to_string();
log::info!("[peer={}] stream_name={}", ctx.peer_addr, ctx.stream_name);
}
_ => (),
}
}
ChunkMessageType::AMF0DataMessage => {
let values = message.try_read_body_to_amf0().unwrap();
let command = values[0].try_as_str().unwrap();
for v in &values {
if let Value::EcmaArray { entries } = v {
log::info!("[peer={}] C->S, [{}] part Array: ", ctx.peer_addr, command);
for item in entries {
log::info!(
"[peer={}] C->S, [{}] item: {:?}",
ctx.peer_addr,
command,
item
);
}
} else {
log::info!("[peer={}] C->S, [{}] part: {:?}", ctx.peer_addr, command, v);
}
}
if command == "@setDataFrame" {
let meta_data = RtmpMetaData::try_from(&values[2])?;
meta_data_map().insert(ctx.stream_name.clone(), meta_data);
log::info!(
"[peer={}] C->S, cache meta_data, stream_name={}",
ctx.peer_addr,
ctx.stream_name
);
}
}
ChunkMessageType::VideoMessage => {
if message.body[0] == 0x17 && message.body[1] == 0x00 {
let mut message_clone = message.clone();
message_clone.header.timestamp = 0;
video_header_map().insert(ctx.stream_name.clone(), message_clone);
log::info!(
"[peer={}] C->S, cache video header, stream_name={}",
ctx.peer_addr,
ctx.stream_name
);
save_fmp4_background(&ctx.stream_name, ctx.peer_addr.clone());
}
if let Some(eventbus) = eventbus_map().get(&ctx.stream_name) {
eventbus.publish(message.clone()).await;
}
}
ChunkMessageType::AudioMessage => {
if message.body[0] == 0xAF && message.body[1] == 0x00 {
let mut message_clone = message.clone();
message_clone.header.timestamp = 0;
audio_header_map().insert(ctx.stream_name.clone(), message_clone);
log::info!(
"[peer={}] C->S, cache audio header, stream_name={}",
ctx.peer_addr,
ctx.stream_name
);
}
if let Some(eventbus) = eventbus_map().get(&ctx.stream_name) {
eventbus.publish(message.clone()).await;
}
}
_ => {
log::info!(
"[peer={}] C->S, [{}] OTHER len={}",
ctx.peer_addr,
message.message_type_desc(),
message.header.message_length
);
}
}
}
}
/// 处理RTMP握手流程
///
/// 有时候OBS在握手流程中会发送ACK报文
async fn handle_rtmp_handshake(ctx: &mut RtmpContext) -> anyhow::Result<()> {
/* C0/C1 */
let c0 = ctx.read_exact_from_peer(1).await?[0];
log::info!("[peer={}] C0, version={}", ctx.peer_addr, c0);
let c1_vec = ctx.read_exact_from_peer(Handshake1::PACKET_LENGTH).await?;
let c1 = Handshake1 {
time: BigEndian::read_u32(&c1_vec[0..4]),
zero: BigEndian::read_u32(&c1_vec[4..8]),
random_data: c1_vec[8..Handshake1::PACKET_LENGTH as usize].to_vec(),
};
log::info!("[peer={}] C1,time={}, zero={}, last12=0x{:02X?}", ctx.peer_addr, c1.time, c1.zero, &c1_vec[Handshake1::PACKET_LENGTH as usize - 12..]);
/* S0/S1/S2 */
ctx.write_to_peer(Handshake0::S0_V3.to_bytes().as_ref())
.await?;
log::info!("[peer={}] S0, version={:?}", ctx.peer_addr, Handshake0::S0_V3);
let s1 = Handshake1 {
time: (Local::now().timestamp_millis() - ctx.ctx_begin_timestamp) as u32,
zero: 0,
random_data: {
let mut random_bytes = gen_random_bytes(1528);
random_bytes[0] = 0x0; // 首字符置0
random_bytes
},
};
ctx.write_to_peer(s1.to_bytes().as_ref()).await?;
log::info!("[peer={}] S1", ctx.peer_addr);
let s2 = Handshake2 {
time: c1.time,
time2: 0,
random_echo: c1.random_data,
};
ctx.write_to_peer(s2.to_bytes().as_ref()).await?;
log::info!("[peer={}] S2", ctx.peer_addr);
let peek_len = 12;
let peek_vec = ctx.peek_exact_from_peer(peek_len).await?;
if peek_vec != &s1.to_bytes()[0..peek_len as usize] {
log::info!("[peer={}] ACK in handshake, peek=0x{:02X?}, s1_part=0x{:02X?}", ctx.peer_addr, peek_vec, &s1.to_bytes()[0..peek_len as usize]);
let _ = RtmpMessage::read_from(ctx).await?;
}
/* C2*/
let c2_vec = ctx.read_exact_from_peer(Handshake2::PACKET_LENGTH).await?;
let c2 = Handshake2 {
time: BigEndian::read_u32(&c2_vec[0..4]),
time2: BigEndian::read_u32(&c2_vec[4..8]),
random_echo: c2_vec[8..Handshake2::PACKET_LENGTH as usize].to_vec(),
};
log::info!("[peer={}] C2, time=0x{:02X?}, time2=0x{:02X?}", ctx.peer_addr, &c2_vec[0..4], &c2_vec[4..8]);
assert_eq!(s1.random_data, c2.random_echo);
ctx.recv_bytes_num += 1 + Handshake1::PACKET_LENGTH + Handshake2::PACKET_LENGTH;
Ok(())
}
async fn response_connect(ctx: &mut RtmpContext) -> anyhow::Result<()> {
{
let ack_window_size = [
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x10, 0x00,
];
ctx.write_to_peer(ack_window_size.as_ref()).await?;
log::info!("[peer={}] S->C, ack_window_size_packet:", ctx.peer_addr);
print_hex(ack_window_size.to_vec().as_ref());
}
if ctx.chunk_size == 128 {
ctx.chunk_size = 4096;
}
{
let mut set_peer_bandwidth = vec![
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00,
];
set_peer_bandwidth.append(&mut ctx.chunk_size.to_be_bytes().to_vec());
// 0-Hard, 1-Soft, 2-Dynamic
set_peer_bandwidth.push(0x01);
ctx.write_to_peer(&set_peer_bandwidth).await?;
log::info!("[peer={}] S->C, set_peer_bandwidth:", ctx.peer_addr);
print_hex(set_peer_bandwidth.to_vec().as_ref());
}
{
let mut set_chunk_size = vec![
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x01, 0x00, 0x00, 0x00, 0x00,
];
set_chunk_size.append(&mut ctx.chunk_size.to_be_bytes().to_vec());
ctx.write_to_peer(set_chunk_size.as_ref()).await?;
log::info!("[peer={}] S->C, set_chunk_size:", ctx.peer_addr);
print_hex(set_chunk_size.to_vec().as_ref());
}
{
let mut response_result: Vec<u8> = vec![
0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28, 0x14, 0x00, 0x00, 0x00, 0x00,
];
amf::amf0::Value::String("_result".to_string()).write_to(&mut response_result)?;
amf::amf0::Value::Number(1.0).write_to(&mut response_result)?;
amf::amf0::Value::Object {
class_name: None,
entries: vec![
Pair {
key: "fmsVer".to_owned(),
value: amf::amf0::Value::String("FMS/3,0,1,123".to_owned()),
},
Pair {
key: "capabilities".to_owned(),
value: amf::amf0::Value::Number(31.0),
},
],
}
.write_to(&mut response_result)?;
amf::amf0::Value::Object {
class_name: None,
entries: vec![
Pair {
key: "level".to_owned(),
value: amf::amf0::Value::String("status".to_owned()),
},
Pair {
key: "code".to_owned(),
value: amf::amf0::Value::String("NetConnection.Connect.Success".to_owned()),
},
Pair {
key: "description".to_owned(),
value: amf::amf0::Value::String("Connection succeeded.".to_owned()),
},
Pair {
key: "objectEncoding".to_owned(),
value: amf::amf0::Value::Number(0.0),
},
],
}
.write_to(&mut response_result)?;
response_result[6] = (response_result.len() - 12) as u8;
ctx.write_to_peer(response_result.as_ref()).await?;
log::info!("[peer={}] S->C, response_result:", ctx.peer_addr);
print_hex(response_result.as_ref());
}
Ok(())
}
async fn response_create_stream(
ctx: &mut RtmpContext,
prev_command_number: &amf::amf0::Value,
) -> anyhow::Result<()> {
let mut response_result: Vec<u8> = vec![
0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28, 0x14, 0x00, 0x00, 0x00, 0x00,
];
amf::amf0::Value::String("_result".to_string()).write_to(&mut response_result)?;
prev_command_number.write_to(&mut response_result)?;
amf::amf0::Value::Null.write_to(&mut response_result)?;
amf::amf0::Value::Number(9.0).write_to(&mut response_result)?;
response_result[6] = (response_result.len() - 12) as u8;
ctx.write_to_peer(response_result.as_ref()).await?;
log::info!("[peer={}] S->C, response_result:", ctx.peer_addr);
print_hex(response_result.as_ref());
Ok(())
}
async fn response_publish(ctx: &mut RtmpContext) -> anyhow::Result<()> {
let mut response_result: Vec<u8> = vec![
0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28, 0x14, 0x00, 0x00, 0x00, 0x01,
];
amf::amf0::Value::String("onStatus".to_string()).write_to(&mut response_result)?;
amf::amf0::Value::Number(1.0).write_to(&mut response_result)?;
amf::amf0::Value::Null.write_to(&mut response_result)?;
amf::amf0::Value::Object {
class_name: None,
entries: vec![
Pair {
key: "level".to_owned(),
value: amf::amf0::Value::String("status".to_owned()),
},
Pair {
key: "code".to_owned(),
value: amf::amf0::Value::String("NetStream.Publish.Start".to_owned()),
},
Pair {
key: "description".to_owned(),
value: amf::amf0::Value::String("Start publishing".to_owned()),
},
],
}
.write_to(&mut response_result)?;
response_result[6] = (response_result.len() - 12) as u8;
ctx.write_to_peer(response_result.as_ref()).await?;
log::info!("[peer={}] S->C, Start publishing:", ctx.peer_addr);
print_hex(response_result.as_ref());
Ok(())
}
async fn response_play(ctx: &mut RtmpContext, stream_id: u32) -> anyhow::Result<()> {
{
let rs: Vec<u8> = vec![
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
];
ctx.write_to_peer(rs.as_ref()).await?;
log::info!(
"[peer={}] S->C, Stream Begin, streamId={}",
ctx.peer_addr,
&stream_id
);
}
{
let mut response_result: Vec<u8> = vec![
0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01,
];
amf::amf0::Value::String("onStatus".to_string()).write_to(&mut response_result)?;
amf::amf0::Value::Number(0.0).write_to(&mut response_result)?;
amf::amf0::Value::Null.write_to(&mut response_result)?;
amf::amf0::Value::Object {
class_name: None,
entries: vec![
Pair {
key: "level".to_owned(),
value: amf::amf0::Value::String("status".to_owned()),
},
Pair {
key: "code".to_owned(),
value: amf::amf0::Value::String("NetStream.Play.Start".to_owned()),
},
Pair {
key: "description".to_owned(),
value: amf::amf0::Value::String("Start live".to_owned()),
},
],
}
.write_to(&mut response_result)?;
response_result[6] = (response_result.len() - 12) as u8;
ctx.write_to_peer(response_result.as_ref()).await?;
log::info!("[peer={}] S->C, Start play:", ctx.peer_addr);
print_hex(response_result.as_ref());
}
{
let mut response_result: Vec<u8> = vec![
0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01,
];
amf::amf0::Value::String("|RtmpSampleAccess".to_string()).write_to(&mut response_result)?;
amf::amf0::Value::Boolean(true).write_to(&mut response_result)?;
amf::amf0::Value::Boolean(true).write_to(&mut response_result)?;
response_result[6] = (response_result.len() - 12) as u8;
ctx.write_to_peer(response_result.as_ref()).await?;
log::info!("[peer={}] S->C, Start play:", ctx.peer_addr);
print_hex(response_result.as_ref());
}
Ok(())
}
/// # 向对端发送onMetaData数据
///
/// 在publish或者play之后就是开始传输媒体数据了,媒体数据分为3种,
/// script脚本数据、video视频数据、audio音频数据。
///
/// 首先需要传输的是脚本数据onMetaData,也称为元数据。
///
/// onMetaData主要描述音视频的编码格式的相关参数。
async fn send_meta_data_for_play(
ctx: &mut RtmpContext,
meta_data: &RtmpMetaData,
) -> anyhow::Result<()> {
let mut response_result: Vec<u8> = vec![
0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x01,
];
amf::amf0::Value::String("onMetaData".to_string()).write_to(&mut response_result)?;
amf::amf0::Value::Object {
class_name: None,
entries: vec![
Pair {
key: "Server".to_owned(),
value: amf::amf0::Value::String("RIVER".to_owned()),
},
Pair {
key: "width".to_owned(),
value: amf::amf0::Value::Number(meta_data.width),
},
Pair {
key: "height".to_owned(),
value: amf::amf0::Value::Number(meta_data.height),
},
Pair {
key: "displayWidth".to_owned(),
value: amf::amf0::Value::Number(meta_data.width),
},
Pair {
key: "displayHeight".to_owned(),
value: amf::amf0::Value::Number(meta_data.height),
},
Pair {
key: "duration".to_owned(),
value: amf::amf0::Value::Number(meta_data.duration),
},
Pair {
key: "framerate".to_owned(),
value: amf::amf0::Value::Number(meta_data.frame_rate),
},
Pair {
key: "fps".to_owned(),
value: amf::amf0::Value::Number(meta_data.frame_rate),
},
Pair {
key: "videocodecid".to_owned(),
value: amf::amf0::Value::String(meta_data.video_codec_id.to_string()),
},
Pair {
key: "videodatarate".to_owned(),
value: amf::amf0::Value::Number(meta_data.video_data_rate),
},
Pair {
key: "audiocodecid".to_owned(),
value: amf::amf0::Value::String(meta_data.audio_codec_id.to_string()),
},
Pair {
key: "audiodatarate".to_owned(),
value: amf::amf0::Value::Number(meta_data.audio_data_rate),
},
Pair {
key: "profile".to_owned(),
value: amf::amf0::Value::String(Default::default()),
},
Pair {
key: "level".to_owned(),
value: amf::amf0::Value::String(Default::default()),
},
],
}
.write_to(&mut response_result)?;
let be_bytes: [u8; 2] = ((response_result.len() - 12) as u16).to_be_bytes();
response_result[5] = be_bytes[0];
response_result[6] = be_bytes[1];
ctx.write_to_peer(response_result.as_ref()).await?;
log::info!("[peer={}] S->C, Start play:", ctx.peer_addr);
print_hex(response_result.as_ref());
Ok(())
}
| rust | Apache-2.0 | 56a63312dd7fef48f73dbd3985022604c739ba40 | 2026-01-04T20:21:06.830703Z | false |
nintha/river | https://github.com/nintha/river/blob/56a63312dd7fef48f73dbd3985022604c739ba40/src/main.rs | src/main.rs | use clap::crate_version;
use clap::Clap;
use river::{ws_h264, ws_fmp4, util, http_flv, http_player};
use river::rtmp_server::accept_loop;
use river::util::spawn_and_log_error;
#[derive(Clap, Debug)]
#[clap(version = crate_version ! (), author = "Ninthakeey <ninthakeey@hotmail.com>")]
struct Opts {
#[clap(long, default_value = "0", about = "disabled if port is 0")]
http_flv_port: u16,
#[clap(long, default_value = "18000", about = "disabled if port is 0")]
http_player_port: u16,
#[clap(long, default_value = "18001", about = "disabled if port is 0")]
ws_h264_port: u16,
#[clap(long, default_value = "0", about = "disabled if port is 0")]
ws_fmp4_port: u16,
#[clap(long, default_value = "1935")]
rtmp_port: u16,
}
fn main() -> anyhow::Result<()> {
util::init_logger();
let opts: Opts = Opts::parse();
log::info!("{:?}", &opts);
let player_html = include_str!("../static/player.html");
let player_html = player_html.replace("{/*$INJECTED_CONTEXT*/}", &format!("{{port: {}}}", opts.ws_h264_port));
if opts.http_player_port > 0 {
spawn_and_log_error(http_player::run_server(format!("0.0.0.0:{}", opts.http_player_port), player_html));
}
if opts.http_flv_port > 0 {
spawn_and_log_error(http_flv::run_server(format!("0.0.0.0:{}", opts.http_flv_port)));
}
if opts.ws_h264_port > 0 {
spawn_and_log_error(ws_h264::run_server(format!("0.0.0.0:{}", opts.ws_h264_port)));
}
if opts.ws_fmp4_port > 0 {
spawn_and_log_error(ws_fmp4::run_server(format!("0.0.0.0:{}", opts.ws_fmp4_port)));
}
smol::block_on(accept_loop(&format!("0.0.0.0:{}", opts.rtmp_port)))
}
| rust | Apache-2.0 | 56a63312dd7fef48f73dbd3985022604c739ba40 | 2026-01-04T20:21:06.830703Z | false |
nintha/river | https://github.com/nintha/river/blob/56a63312dd7fef48f73dbd3985022604c739ba40/src/http_player.rs | src/http_player.rs | use smol::io::{AsyncWriteExt, AsyncReadExt};
use smol::net::{TcpListener, TcpStream};
use smol::stream::StreamExt;
use crate::util::spawn_and_log_error;
pub async fn run_server(addr: String, player_html: String) -> anyhow::Result<()> {
// Open up a TCP connection and create a URL.
let listener = TcpListener::bind(addr).await?;
let addr = format!("http://{}", listener.local_addr()?);
log::info!("HTTP-Player Server is listening to {}", addr);
// For each incoming TCP connection, spawn a task and call `accept`.
let mut incoming = listener.incoming();
while let Some(stream) = incoming.next().await {
let stream = stream?;
spawn_and_log_error(accept(stream, player_html.clone()));
}
Ok(())
}
async fn accept(mut stream: TcpStream, player_html: String) -> anyhow::Result<()> {
log::info!("[HTTP] new connection from {}", stream.peer_addr()?);
let mut buffer = [0; 1024];
stream.read(&mut buffer).await?;
let header = format!("HTTP/1.1 200 OK\r\n\
Content-Type: text/html;charset=UTF-8\r\n\
Connection: close\r\n\
Content-Length: {}\r\n\
Cache-Control: no-cache\r\n\
Access-Control-Allow-Origin: *\r\n\
\r\n\
{}", player_html.len(), player_html);
stream.write_all(header.as_bytes()).await?;
stream.flush().await?;
Ok(())
}
| rust | Apache-2.0 | 56a63312dd7fef48f73dbd3985022604c739ba40 | 2026-01-04T20:21:06.830703Z | false |
nintha/river | https://github.com/nintha/river/blob/56a63312dd7fef48f73dbd3985022604c739ba40/src/protocol/aac.rs | src/protocol/aac.rs | use crate::protocol::rtmp::{RtmpMessage, ChunkMessageType};
/// # AAC rtmp头部信息封装
/// AAC音频文件的每一帧都由一个ADTS头和AAC ES(AAC音频数据)组成。
///
/// 以下是AAC音频数据格式
/// 第一个byte包含音频的编码参数:
/// ```
/// 1-4bit: audioCodeId
/// 5-6bit: 采样率 00 5.5KHZ, 01 11KHZ, 10 22KHZ, 11 44KHZ
/// 7 bit: 采样长度 0 8bit, 1 16bit
/// 8 bit: 立体声 0 单声道, 1 双声道
/// ```
/// ## 第一帧 AAC sequence header
/// 第一帧共4个byte:
/// 1. 第1个byte : audioCodeId=10,如果是44KHZ、16bit、双声道,
/// 第一个byte是0xAF。如果实际采样率不是5.5KHZ、11KHZ、22KHZ、44KHZ,
/// 就选一个接近的。
/// 2. 第2个byte : 0x00 表示是sequence header
/// 3. 第3-4个byte : 0x14,0x10
///
/// 其他帧 AAC raw data
/// 1) 第1个byte : audioCodeId=10,如果是44KHZ、16bit、双声道,
/// 第一个byte是0xAF。如果实际采样率不是5.5KHZ、11KHZ、22KHZ、44KHZ,
/// 就选一个接近的。
/// 2) 第2个byte : 0x01 表示是raw data
/// 3) 第3byte开始 : 去掉前7个byte的AAC头之后的AAC数据。
pub struct AAC {
inner: Vec<u8>,
}
#[allow(unused)]
impl AAC {
pub fn from_rtmp_message(msg: &RtmpMessage, header: &RtmpMessage) -> Option<Self> {
if msg.header.message_type != ChunkMessageType::AudioMessage {
return None;
}
Some(Self {
inner: msg.body.to_owned(),
})
}
pub fn is_sequence_header(&self) -> bool {
self.inner[1] == 0x00
}
pub fn is_raw_data(&self) -> bool {
self.inner[1] != 0x00
}
/// raw_data -> ADTS
pub fn to_adts(&self) -> Option<ADTS> {
if self.is_raw_data() {
Some(ADTS::with_data(self.inner[2..].to_vec()))
} else {
None
}
}
}
impl AsRef<[u8]> for AAC {
fn as_ref(&self) -> &[u8] {
&self.inner
}
}
pub struct ADTS {
// 1 bit; 0: MPEG-4, 1: MPEG-2
pub id: bool,
/// 2 bits; 0-Main Profile, 1-Low Complexity, 2-Scalable Sampling Rate
pub profile: u8,
/// 4 bits; 15 is forbidden
pub sampling_frequency_index: u8,
/// set to 0 when encoding, ignore when decoding
pub private_bit: bool,
/// 3 bits;
pub channel_configuration: u8,
pub copyright_identification_bit: bool,
pub copyright_identification_start: bool,
// 13 bits;
pub aac_frame_length: u16,
// 2 bits; 表示ADTS帧中有N + 1个AAC原始帧
pub num_of_raw_data_blocks_in_frame: u8,
// 音频数据
pub raw_data: Vec<u8>,
}
impl ADTS {
pub const HEADER_LEN: u16 = 7;
/// 12 bits
pub const SYNC_WORD: u16 = 0xFFF;
/// 2 bits
pub const LAYER: u8 = 0;
/// 1 bit
pub const PROTECTION_ABSENT: bool = true;
/// set to 0 when encoding, ignore when decoding,
pub const ORIGINALITY: bool = false;
/// set to 0 when encoding, ignore when decoding
pub const HOME: bool = false;
/// 11bits; 0x7FF 说明是码率可变的码流
pub const ADTS_BUFFER_FULLNESS: u16 = 0x7FF;
/// `len` must be less than or equals to 2^13 - 7
pub fn with_data(data: Vec<u8>) -> Self {
if data.len() + ADTS::HEADER_LEN as usize > (2usize << 13) {
unreachable!("ADTS len must be less than or equals to 2^13 - 7");
}
Self {
id: false,
profile: 1,
sampling_frequency_index: 4,
private_bit: false,
channel_configuration: 1,
copyright_identification_bit: false,
copyright_identification_start: false,
aac_frame_length: data.len() as u16 + ADTS::HEADER_LEN,
num_of_raw_data_blocks_in_frame: 0,
raw_data: data,
}
}
pub fn to_bytes(&self) -> Vec<u8> {
let mut data = vec![0u8; 7];
data[0] = (ADTS::SYNC_WORD >> 4) as u8;
data[1] = 0xF0 | bool2u8(self.id) << 3 | ADTS::LAYER << 1 | bool2u8(ADTS::PROTECTION_ABSENT);
data[2] = self.profile << 6
| self.sampling_frequency_index << 2
| bool2u8(self.private_bit) << 1
| self.channel_configuration >> 2;
data[3] = self.channel_configuration << 6
| bool2u8(ADTS::ORIGINALITY) << 5
| bool2u8(ADTS::HOME) << 4
| bool2u8(self.copyright_identification_bit) << 3
| bool2u8(self.copyright_identification_start) << 2
| (self.aac_frame_length >> 11) as u8;
data[4] = (self.aac_frame_length >> 3) as u8;
data[5] = (self.aac_frame_length as u8) << 5 | (ADTS::ADTS_BUFFER_FULLNESS >> 6) as u8;
data[6] = (ADTS::ADTS_BUFFER_FULLNESS as u8) << 2 | self.num_of_raw_data_blocks_in_frame;
data.extend_from_slice(&self.raw_data);
data
}
}
fn bool2u8(v: bool) -> u8 {
if v { 0x01 } else { 0x00 }
} | rust | Apache-2.0 | 56a63312dd7fef48f73dbd3985022604c739ba40 | 2026-01-04T20:21:06.830703Z | false |
nintha/river | https://github.com/nintha/river/blob/56a63312dd7fef48f73dbd3985022604c739ba40/src/protocol/fmp4.rs | src/protocol/fmp4.rs | use std::{u32, vec};
use crate::rtmp_server::{eventbus_map, meta_data_map, video_header_map};
use crate::util::spawn_and_log_error;
use smol::channel::Receiver;
use crate::protocol::rtmp::RtmpMessage;
use crate::protocol::h264::Nalu;
use smol::io::AsyncWriteExt;
/// fps = timescale / duration
#[derive(Clone)]
pub struct Track {
pub id: u32,
pub duration: u32,
pub timescale: u32,
pub width: u16,
pub height: u16,
pub volume: u16,
pub dts: u32,
pub pps_list: Vec<Vec<u8>>,
pub sps_list: Vec<Vec<u8>>,
}
impl Track {
pub const DEFAULT_TIMESCALE: u32 = 1000_000;
pub const DEFAULT_ID: u32 = 1;
}
impl Default for Track {
fn default() -> Self {
Self {
id: Track::DEFAULT_ID,
duration: 0,
timescale: Track::DEFAULT_TIMESCALE,
width: 0,
height: 0,
volume: 0,
dts: 0,
pps_list: vec![],
sps_list: vec![],
}
}
}
#[derive(Clone)]
pub struct Sample {
pub size: u32,
pub duration: u32,
pub cts: u32,
pub flags: Flags,
}
impl Sample {
pub fn new(size: u32, duration: u32, cts: u32, key_frame: bool) -> Self {
Self {
size,
duration,
cts,
flags: Flags {
is_leading: 0,
is_depended_on: 0,
has_redundancy: 0,
depands_on: if key_frame { 2 } else { 1 },
padding_value: 0,
is_non_sync: if key_frame { 0 } else { 1 },
degrad_prio: 0,
},
}
}
}
#[derive(Clone)]
pub struct Flags {
pub is_leading: u8,
pub is_depended_on: u8,
pub has_redundancy: u8,
pub depands_on: u8,
pub padding_value: u8,
pub is_non_sync: u8,
pub degrad_prio: u16,
}
impl Flags {
pub fn as_byte(&self) -> u8 {
self.depands_on << 4 | self.is_depended_on << 2 | self.has_redundancy as u8
}
/// in trun box
pub fn as_four_byte(&self) -> [u8; 4] {
[
self.is_leading << 2 | self.depands_on,
self.is_depended_on << 6 | self.has_redundancy << 6 | self.padding_value << 1 | self.is_non_sync,
(self.degrad_prio >> 8) as u8,
self.degrad_prio as u8,
]
}
}
pub struct Fmp4Encoder {
track: Track,
sn: u32,
}
impl Fmp4Encoder {
pub fn new(track: Track) -> Self {
Self {
track,
sn: 0,
}
}
pub fn init_segment(&self) -> Vec<u8> {
let mut ftyp = ftyp();
let mut movie = moov(&vec![self.track.clone()], Track::DEFAULT_TIMESCALE, self.track.timescale);
let total_len = ftyp.len() + movie.len();
let mut buffer = Vec::with_capacity(total_len);
buffer.append(&mut ftyp);
buffer.append(&mut movie);
buffer
}
pub fn wrap_frame(&mut self, data: &[u8], key_frame: bool) -> Vec<u8> {
let sample = Sample::new(
data.len() as u32,
self.track.duration,
0,
key_frame,
);
let mut buffer = moof(self.sn, self.track.dts, &self.track, &vec![sample]);
buffer.append(&mut mdat(data));
self.track.dts += self.track.duration;
self.sn += 1;
buffer
}
}
fn moof(sn: u32, base_media_decode_time: u32, track: &Track, samples: &[Sample]) -> Vec<u8> {
mp4_box(b"moof", vec![&mfhd(sn), &traf(track, base_media_decode_time, samples)])
}
/// movie data
fn mdat(data: &[u8]) -> Vec<u8> {
mp4_box(b"mdat", vec![data])
}
fn mp4_box(box_type: &[u8; 4], payloads: Vec<&[u8]>) -> Vec<u8> {
let size: u32 = 8 + payloads.iter().map(|x| x.len() as u32).sum::<u32>();
let mut buffer = Vec::with_capacity(size as usize);
buffer.extend_from_slice(&size.to_be_bytes());
buffer.extend_from_slice(box_type);
for p in payloads {
buffer.extend_from_slice(p);
}
buffer
}
fn mfhd(sn: u32) -> Vec<u8> {
let bytes: [u8; 8] = [
0x00,
0x00, 0x00, 0x00, // flags
(sn >> 24) as u8,
(sn >> 16) as u8,
(sn >> 8) as u8,
sn as u8, // sequence_number
];
mp4_box(b"mfhd", vec![&bytes])
}
fn traf(track: &Track, base_media_decode_time: u32, samples: &[Sample]) -> Vec<u8> {
let sample_dependency_table = sdtp(samples);
let id = track.id;
let tfhd = {
let bytes: [u8; 8] = [
0x00, // version 0
0x00, 0x00, 0x00, // flags
(id >> 24) as u8,
(id >> 16) as u8,
(id >> 8) as u8,
(id as u8), // track_ID
];
mp4_box(b"tfhd", vec![&bytes])
};
let tfdt = {
let bytes: [u8; 8] = [
0x00, // version 0
0x00, 0x00, 0x00, // flags
(base_media_decode_time >> 24) as u8,
(base_media_decode_time >> 16) as u8,
(base_media_decode_time >> 8) as u8,
(base_media_decode_time as u8), // baseMediaDecodeTime
];
mp4_box(b"tfdt", vec![&bytes])
};
let trun = trun(track, sample_dependency_table.len() as u32 +
16 + // tfhd
16 + // tfdt
8 + // traf header
16 + // mfhd
8 + // moof header
8, samples);
mp4_box(b"traf", vec![&tfhd, &tfdt, &trun, &sample_dependency_table])
}
fn trun(_track: &Track, offset: u32, samples: &[Sample]) -> Vec<u8> {
let sample_count = samples.len() as u32;
let data_offset = offset + 8 + 12 + 16 * sample_count;
let mut buffer = vec![];
buffer.push(0x00); // version 0
buffer.extend_from_slice(&[0x00, 0x0F, 0x01]); // flags
buffer.extend_from_slice(&sample_count.to_be_bytes());
buffer.extend_from_slice(&data_offset.to_be_bytes());
for s in samples {
buffer.extend_from_slice(&s.duration.to_be_bytes());
buffer.extend_from_slice(&s.size.to_be_bytes());
buffer.extend_from_slice(&s.flags.as_four_byte());
buffer.extend_from_slice(&s.cts.to_be_bytes());
}
mp4_box(b"trun", vec![&buffer])
}
fn sdtp(samples: &[Sample]) -> Vec<u8> {
let mut buffer = Vec::with_capacity(samples.len() + 4);
// leave the full box header (4 bytes) all zero
buffer.extend_from_slice(&[0x00, 0x00, 0x00, 0x00, ]);
for s in samples {
buffer.push(s.flags.as_byte());
}
mp4_box(b"sdtp", vec![&buffer])
}
/// file type
fn ftyp() -> Vec<u8> {
const MAJOR_BRAND: [u8; 4] = *b"isom";
const MINOR_VERSION: [u8; 4] = [0, 0, 0, 1];
const AVC_BRAND: [u8; 4] = *b"avc1";
mp4_box(b"ftyp", vec![&MAJOR_BRAND, &MINOR_VERSION, &MAJOR_BRAND, &AVC_BRAND])
}
fn mvhd(timescale: u32, duration: u32) -> Vec<u8> {
let bytes = vec![
0x00, // version 0
0x00, 0x00, 0x00, // flags
0x00, 0x00, 0x00, 0x01, // creation_time
0x00, 0x00, 0x00, 0x02, // modification_time
(timescale >> 24) as u8,
(timescale >> 16) as u8,
(timescale >> 8) as u8,
timescale as u8, // timescale
(duration >> 24) as u8,
(duration >> 16) as u8,
(duration >> 8) as u8,
duration as u8, // duration
0x00, 0x01, 0x00, 0x00, // 1.0 rate
0x01, 0x00, // 1.0 volume
0x00, 0x00, // reserved
0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x40, 0x00, 0x00, 0x00, // transformation: unity matrix
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, // pre_defined
0xff, 0xff, 0xff, 0xff, // next_track_ID
];
mp4_box(b"mvhd", vec![&bytes])
}
fn trak(track: &Track) -> Vec<u8> {
mp4_box(b"trak", vec![&tkhd(&track), &mdia(&track)])
}
fn tkhd(track: &Track) -> Vec<u8> {
let bytes = vec![
0x00, // version 0
0x00, 0x00, 0x07, // flags
0x00, 0x00, 0x00, 0x00, // creation_time
0x00, 0x00, 0x00, 0x00, // modification_time
(track.id >> 24) as u8,
(track.id >> 16) as u8,
(track.id >> 8) as u8,
track.id as u8, // track_ID
0x00, 0x00, 0x00, 0x00, // reserved
(track.duration >> 24) as u8,
(track.duration >> 16) as u8,
(track.duration >> 8) as u8,
track.duration as u8, // duration
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x00, // layer
0x00, 0x00, // alternate_group
(track.volume >> 0) as u8, (((track.volume % 1) * 10) >> 0) as u8, // track volume
0x00, 0x00, // reserved
0x00, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x40, 0x00, 0x00, 0x00, // transformation: unity matrix
(track.width >> 8) as u8,
track.width as u8,
0x00, 0x00, // width
(track.height >> 8) as u8,
track.height as u8,
0x00, 0x00, // height
];
mp4_box(b"tkhd", vec![&bytes])
}
fn mdia(track: &Track) -> Vec<u8> {
mp4_box(b"mdia", vec![&mdhd(track.timescale, track.duration), &hdlr(), &minf(track)])
}
fn minf(track: &Track) -> Vec<u8> {
const VMHD: [u8; 12] = [
0x00, // version
0x00, 0x00, 0x01, // flags
0x00, 0x00, // graphicsmode
0x00, 0x00,
0x00, 0x00,
0x00, 0x00, // opcolor
];
const DREF: [u8; 20] = [
0x00, // version 0
0x00, 0x00, 0x00, // flags
0x00, 0x00, 0x00, 0x01, // entry_count
0x00, 0x00, 0x00, 0x0c, // entry_size
0x75, 0x72, 0x6c, 0x20, // 'url' type
0x00, // version 0
0x00, 0x00, 0x01, // entry_flags
];
let dinf = mp4_box(b"dinf", vec![&mp4_box(b"dref", vec![&DREF])]);
mp4_box(b"minf", vec![&mp4_box(b"vmhd", vec![&VMHD]), &dinf, &stbl(&track)])
}
fn mdhd(timescale: u32, duration: u32) -> Vec<u8> {
let bytes = vec![
0x00, // version 0
0x00, 0x00, 0x00, // flags
0x00, 0x00, 0x00, 0x02, // creation_time
0x00, 0x00, 0x00, 0x03, // modification_time
(timescale >> 24) as u8,
(timescale >> 16) as u8,
(timescale >> 8) as u8,
timescale as u8, // timescale
(duration >> 24) as u8,
(duration >> 16) as u8,
(duration >> 8) as u8,
duration as u8, // duration
0x55, 0xc4, // 'und' language (undetermined)
0x00, 0x00,
];
mp4_box(b"mdhd", vec![&bytes])
}
fn hdlr() -> Vec<u8> {
const VIDEO_HDLR: [u8; 37] = [
0x00, // version 0
0x00, 0x00, 0x00, // flags
0x00, 0x00, 0x00, 0x00, // pre_defined
0x76, 0x69, 0x64, 0x65, // handler_type: 'vide'
0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x00, 0x00, 0x00, // reserved
0x56, 0x69, 0x64, 0x65,
0x6f, 0x48, 0x61, 0x6e,
0x64, 0x6c, 0x65, 0x72, 0x00, // name: 'VideoHandler'
];
mp4_box(b"hdlr", vec![&VIDEO_HDLR])
}
fn stbl(track: &Track) -> Vec<u8> {
const STCO: [u8; 8] = [
0x00, // version
0x00, 0x00, 0x00, // flags
0x00, 0x00, 0x00, 0x00, // entry_count
];
const STTS: [u8; 8] = STCO;
const STSC: [u8; 8] = STCO;
const STSZ: [u8; 12] = [
0x00, // version
0x00, 0x00, 0x00, // flags
0x00, 0x00, 0x00, 0x00, // sample_size
0x00, 0x00, 0x00, 0x00, // sample_count
];
mp4_box(b"stbl", vec![
&stsd(track),
&mp4_box(b"stts", vec![&STTS]),
&mp4_box(b"stsc", vec![&STSC]),
&mp4_box(b"stsz", vec![&STSZ]),
&mp4_box(b"stco", vec![&STCO])
])
}
fn stsd(track: &Track) -> Vec<u8> {
const STSD: [u8; 8] = [
0x00, // version 0
0x00, 0x00, 0x00, // flags
0x00, 0x00, 0x00, 0x01
];
mp4_box(b"stsd", vec![&STSD, &avc1(track)])
}
fn avc1(track: &Track) -> Vec<u8> {
let mut sps = vec![];
let mut pps = vec![];
for item in &track.sps_list {
let length = item.len() as u16;
sps.extend_from_slice(&length.to_be_bytes());
sps.extend_from_slice(&item);
}
for item in &track.pps_list {
let length = item.len() as u16;
pps.extend_from_slice(&length.to_be_bytes());
pps.extend_from_slice(&item);
}
let width = track.width;
let height = track.height;
let bytes = vec![
0x00, 0x00, 0x00, // reserved
0x00, 0x00, 0x00, // reserved
0x00, 0x01, // data_reference_index
0x00, 0x00, // pre_defined
0x00, 0x00, // reserved
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, // pre_defined
(width >> 8) as u8,
width as u8, // width
(height >> 8) as u8,
height as u8, // height
0x00, 0x48, 0x00, 0x00, // horizresolution
0x00, 0x48, 0x00, 0x00, // vertresolution
0x00, 0x00, 0x00, 0x00, // reserved
0x00, 0x01, // frame_count
0x12,
0x62, 0x69, 0x6E, 0x65, // binelpro.ru
0x6C, 0x70, 0x72, 0x6F,
0x2E, 0x72, 0x75, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, // compressorname
0x00, 0x18, // depth = 24
0xFF, 0xFF
];
mp4_box(b"avc1", vec![&bytes, &avcc(track, &sps, &pps), &btrt()])
}
/// AVCConfigurationBox
fn avcc(track: &Track, sps: &[u8], pps: &[u8]) -> Vec<u8> {
let mut bytes = vec![
0x01, // version
sps[3], // profile
sps[4], // profile compat
sps[5], // level
0xFC | 3, // lengthSizeMinusOne, hard-coded to 4 bytes
0xE0 | track.sps_list.len() as u8, // 3bit reserved (111) + numOfSequenceParameterSets
];
bytes.extend_from_slice(sps);
bytes.push(track.pps_list.len() as u8);
bytes.extend_from_slice(pps);
mp4_box(b"avcC", vec![&bytes])
}
fn btrt() -> Vec<u8> {
const BTRT: [u8; 12] = [
0x00, 0x1c, 0x9c, 0x80, // bufferSizeDB
0x00, 0x2d, 0xc6, 0xc0, // maxBitrate
0x00, 0x2d, 0xc6, 0xc0
];
mp4_box(b"btrt", vec![&BTRT])
}
/// movie extend
fn mvex(tracks: &[Track]) -> Vec<u8> {
let boxes = tracks.into_iter().map(|t| trex(t)).collect::<Vec<Vec<u8>>>();
mp4_box(b"mvex", boxes.iter().map(AsRef::as_ref).collect())
}
fn trex(track: &Track) -> Vec<u8> {
let bytes = [
0x00, // version 0
0x00, 0x00, 0x00, // flags
(track.id >> 24) as u8,
(track.id >> 16) as u8,
(track.id >> 8) as u8,
track.id as u8, // track_ID
0x00, 0x00, 0x00, 0x01, // default_sample_description_index
0x00, 0x00, 0x00, 0x00, // default_sample_duration
0x00, 0x00, 0x00, 0x00, // default_sample_size
0x00, 0x01, 0x00, 0x01, // default_sample_flags
];
mp4_box(b"trex", vec![&bytes])
}
/// movie box
fn moov(tracks: &[Track], duration: u32, timescale: u32) -> Vec<u8> {
let boxes = tracks.iter().map(|t| trak(t)).collect::<Vec<Vec<u8>>>();
let mvhd = mvhd(timescale, duration);
let mvex = mvex(&tracks);
let mut payloads: Vec<&[u8]> = vec![];
payloads.push(&mvhd);
boxes.iter().for_each(|x| payloads.push(x));
payloads.push(&mvex);
mp4_box(b"moov", payloads)
}
/// 后台保存FLV文件
#[allow(unused)]
pub fn save_fmp4_background(stream_name: &str, peer_addr: String) {
if let Some(eventbus) = eventbus_map().get(stream_name) {
log::warn!("[peer={}] save_fmp4_background, stream_name={}", peer_addr, stream_name);
let rx = eventbus.register_receiver();
spawn_and_log_error(handle_fmp4_rx(rx, stream_name.to_owned(), peer_addr));
}
}
/// Rtmp流输出到mp4文件
async fn handle_fmp4_rx(
rx: Receiver<RtmpMessage>,
stream_name: String,
peer_addr: String,
) -> anyhow::Result<()> {
let tmp_dir = "tmp";
if smol::fs::read_dir(tmp_dir).await.is_err() {
smol::fs::create_dir_all(tmp_dir).await?;
}
let mut file = smol::fs::OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open("tmp/output.mp4")
.await?;
let meta_data = meta_data_map()
.get(&stream_name)
.map(|it| it.value().clone())
.ok_or_else(|| anyhow::anyhow!(format!("not found meta_data, stream={}", stream_name)))?;
let video_header = video_header_map()
.get(&stream_name)
.map(|it| it.value().clone())
.ok_or_else(|| anyhow::anyhow!(format!("not found meta_data, stream={}", stream_name)))?;
let mut sps_list = vec![];
let mut pps_list = vec![];
let pioneer_nalus = Nalu::from_rtmp_message(&video_header);
for nalu in pioneer_nalus {
let bytes = (&nalu.to_avcc_format()[4..]).to_vec();
match nalu.get_nal_unit_type() {
Nalu::UNIT_TYPE_SPS => sps_list.push(bytes),
Nalu::UNIT_TYPE_PPS => pps_list.push(bytes),
_ => {}
}
}
log::info!("[peer={}], sps={:?}, pps={:?}", peer_addr, sps_list, pps_list);
let mut fmp4_encoder = Fmp4Encoder::new(Track {
duration: (Track::DEFAULT_TIMESCALE as f64 / meta_data.frame_rate) as _,
timescale: Track::DEFAULT_TIMESCALE,
width: meta_data.width as _,
height: meta_data.height as _,
sps_list,
pps_list,
..Default::default()
});
// send video header
let header = fmp4_encoder.init_segment();
file.write_all(&header).await?;
let mut found_key_frame = false;
while let Ok(msg) = rx.recv().await {
let nalus = Nalu::from_rtmp_message(&msg);
for nalu in nalus {
if !found_key_frame {
if nalu.is_key_frame {
found_key_frame = true;
} else {
continue;
}
}
let bytes = fmp4_encoder.wrap_frame(&nalu.to_avcc_format(), nalu.is_key_frame);
file.write_all(&bytes).await?;
}
file.flush().await?
}
log::warn!("[peer={}][handle_fmp4_rx] closed, stream_name={}", peer_addr, stream_name);
Ok(())
}
| rust | Apache-2.0 | 56a63312dd7fef48f73dbd3985022604c739ba40 | 2026-01-04T20:21:06.830703Z | false |
nintha/river | https://github.com/nintha/river/blob/56a63312dd7fef48f73dbd3985022604c739ba40/src/protocol/mod.rs | src/protocol/mod.rs | pub mod flv;
pub mod rtmp;
pub mod h264;
pub mod aac;
pub mod fmp4;
| rust | Apache-2.0 | 56a63312dd7fef48f73dbd3985022604c739ba40 | 2026-01-04T20:21:06.830703Z | false |
nintha/river | https://github.com/nintha/river/blob/56a63312dd7fef48f73dbd3985022604c739ba40/src/protocol/flv.rs | src/protocol/flv.rs | use byteorder::{BigEndian, ByteOrder};
use crate::protocol::rtmp::{ChunkMessageType, RtmpMessage};
use crate::util::spawn_and_log_error;
use smol::channel::Receiver;
use std::convert::TryFrom;
use crate::rtmp_server::eventbus_map;
use chrono::Local;
use smol::io::AsyncWriteExt;
use std::time::{Duration, Instant};
pub const FLV_HEADER_WITH_TAG0: [u8; 13] = [
0x46, 0x4c, 0x56, // signature
0x01, // version
0x05, // audio and video flag
0x00, 0x00, 0x00, 0x09, // header length
0x00, 0x00, 0x00, 0x00, // tag0 length
];
pub const FLV_HEADER_ONLY_VIDEO_WITH_TAG0: [u8; 13] = [
0x46, 0x4c, 0x56, // signature
0x01, // version
0x01, // video flag
0x00, 0x00, 0x00, 0x09, // header length
0x00, 0x00, 0x00, 0x00, // tag0 length
];
pub struct FlvTag {
raw_data: Vec<u8>,
}
#[allow(unused)]
impl FlvTag {
/// 0x08=audio, 0x09=video, 0x12=script
pub fn tag_type(&self) -> u8 {
self.raw_data[0]
}
/// 0x08=audio, 0x09=video, 0x12=script
pub fn data_size(&self) -> u32 {
BigEndian::read_u24(&self.raw_data[1..4])
}
/// ms timestamp
pub fn timestamp(&self) -> u32 {
let timestamp_u24 = BigEndian::read_u24(&self.raw_data[4..7]);
timestamp_u24 | (self.raw_data[7] as u32) << 24
}
/// audio/video/script data
pub fn body(&self) -> &[u8] {
&self.raw_data[11..]
}
}
impl TryFrom<RtmpMessage> for FlvTag {
type Error = anyhow::Error;
/// 当RtmpMessage类型不是音频或视频的时候,会返回Error
fn try_from(mut msg: RtmpMessage) -> Result<Self, Self::Error> {
let mut raw_data = vec![];
// data type
match msg.header.message_type {
ChunkMessageType::AudioMessage => raw_data.push(0x08),
ChunkMessageType::VideoMessage => raw_data.push(0x09),
_ => Err(anyhow::anyhow!(
"[FlvTag] invalid message type, {:?}",
msg.header.message_type
))?,
}
// data size
raw_data.extend_from_slice(&(msg.body.len() as u32).to_be_bytes()[1..4]);
// timestamp
raw_data.extend_from_slice(&(msg.header.timestamp & 0xFFFFFF).to_be_bytes()[1..4]);
// timestamp extended
raw_data.push((msg.header.timestamp >> 24) as u8);
// stream id
raw_data.extend_from_slice(&0u32.to_be_bytes()[1..4]);
// body
raw_data.append(&mut msg.body);
Ok(FlvTag { raw_data })
}
}
impl AsRef<[u8]> for FlvTag {
fn as_ref(&self) -> &[u8] {
self.raw_data.as_ref()
}
}
/// 后台保存FLV文件
#[allow(unused)]
pub fn save_flv_background(stream_name: &str, peer_addr: String) {
if let Some(eventbus) = eventbus_map().get(stream_name) {
let flv_rx = eventbus.register_receiver();
spawn_and_log_error(handle_flv_rx(flv_rx, stream_name.to_owned(), peer_addr));
}
}
/// Rtmp流输出到FLV文件
async fn handle_flv_rx(
flv_rx: Receiver<RtmpMessage>,
stream_name: String,
peer_addr: String,
) -> anyhow::Result<()> {
let tmp_dir = "tmp";
if smol::fs::read_dir(tmp_dir).await.is_err() {
smol::fs::create_dir_all(tmp_dir).await?;
}
let mut file = smol::fs::OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open("tmp/output.flv")
.await?;
// write header
file.write_all(&FLV_HEADER_WITH_TAG0).await?;
let ctx_begin_timestamp = Local::now().timestamp_millis();
let mut last_flush_time = Instant::now();
let min_flush_duration = Duration::from_secs(2);
while let Ok(mut msg) = flv_rx.recv().await {
msg.header.timestamp = (Local::now().timestamp_millis() - ctx_begin_timestamp) as u32;
let flv_tag = FlvTag::try_from(msg)?;
file.write_all(flv_tag.as_ref()).await?;
file.write_all(&(flv_tag.as_ref().len() as u32).to_be_bytes())
.await?;
if last_flush_time.elapsed() > min_flush_duration {
last_flush_time = Instant::now();
file.flush().await?
}
}
log::warn!("[peer={}][handle_flv_rx] closed, stream_name={}", peer_addr, stream_name);
Ok(())
}
| rust | Apache-2.0 | 56a63312dd7fef48f73dbd3985022604c739ba40 | 2026-01-04T20:21:06.830703Z | false |
nintha/river | https://github.com/nintha/river/blob/56a63312dd7fef48f73dbd3985022604c739ba40/src/protocol/h264.rs | src/protocol/h264.rs | use byteorder::{BigEndian, ByteOrder};
use crate::protocol::rtmp::{RtmpContext, RtmpMessage, ChunkMessageType};
/// H264编码数据存储或传输的基本单元
pub struct Nalu {
inner: Vec<u8>,
pub is_key_frame: bool,
}
impl Nalu {
pub const UNIT_TYPE_SPS: u8 = 7;
pub const UNIT_TYPE_PPS: u8 = 8;
/// RtmpMessage to Nalus
pub fn from_rtmp_message(msg: &RtmpMessage) -> Vec<Nalu> {
if msg.header.message_type != ChunkMessageType::VideoMessage {
return vec![];
}
let bytes = &msg.body;
let mut nalus = vec![];
let frame_type = bytes[0];
let is_key_frame = frame_type == 0x17;
let mut read_index = 1;
let acv_packet_type = bytes[read_index];
read_index += 4;
// AVCDecoderConfigurationRecord(AVC sequence header)
if acv_packet_type == 0 {
read_index += 5;
let sps_num = &bytes[read_index] & 0x1F;
read_index += 1;
for _ in 0..sps_num as usize {
let data_len = BigEndian::read_u16(&bytes[read_index..]);
read_index += 2;
let data = &bytes[read_index..(read_index + data_len as usize)];
read_index += data_len as usize;
let mut nalu_bytes: Vec<u8> = vec![0x00, 0x00, 0x00, 0x01];
nalu_bytes.extend_from_slice(data);
nalus.push(Self { inner: nalu_bytes, is_key_frame });
}
let num_of_pps = &bytes[read_index] & 0x1F;
read_index += 1;
for _ in 0..num_of_pps as usize {
let data_len = BigEndian::read_u16(&bytes[read_index..]);
read_index += 2;
let data = &bytes[read_index..(read_index + data_len as usize)];
read_index += data_len as usize;
let mut nalu_bytes: Vec<u8> = vec![0x00, 0x00, 0x00, 0x01];
nalu_bytes.extend_from_slice(data);
nalus.push(Self { inner: nalu_bytes, is_key_frame });
}
}
// One or more NALUs (Full frames are required)
else if acv_packet_type == 1 {
loop {
if read_index >= bytes.len() {
break;
}
let data_len = BigEndian::read_u32(&bytes[read_index..]);
read_index += 4;
let data = &bytes[read_index..(read_index + data_len as usize)];
read_index += data_len as usize;
let mut nalu_bytes: Vec<u8> = vec![0x00, 0x00, 0x00, 0x01];
nalu_bytes.extend_from_slice(data);
nalus.push(Self { inner: nalu_bytes, is_key_frame });
}
} else {
log::warn!("unknown acv packet type");
};
nalus
}
/// 帧优先级
#[allow(unused)]
pub fn get_nal_ref_idc(&self) -> u8 {
self.inner[0] >> 5
}
/// 帧类型
#[allow(unused)]
pub fn get_nal_unit_type(&self) -> u8 {
self.inner[4] & 0x1F
}
#[allow(unused)]
pub fn nalu_type_desc(&self) -> String {
let priority: String = match self.get_nal_ref_idc() {
0 => "DISPOSABLE".into(),
1 => "LOW".into(),
2 => "HIGH".into(),
3 => "HIGHEST".into(),
_ => "UNKNOWN".into(),
};
let t: String = match self.get_nal_unit_type() {
1 => "SLICE".into(),
2 => "DPA".into(),
3 => "DPB".into(),
4 => "DPC".into(),
5 => "IDR".into(),
6 => "SEI".into(),
7 => "SPS".into(),
8 => "PPS".into(),
9 => "AUD".into(),
10 => "EOSEQ".into(),
11 => "EOSTREAM".into(),
12 => "FILL".into(),
_ => "UNKNOWN".into(),
};
format!("{}::{}", priority, t)
}
pub fn to_avcc_format(&self) -> Vec<u8> {
let origin = self.as_ref();
let mut bytes = vec![0x00, 0x00, 0x00, 0x00];
for i in 4..origin.len() {
// remove prevention byte
// if origin[i - 2] == 0 && origin[i - 1] == 0 && origin[i] == 3 {
// if i < origin.len() && [0u8, 1, 2, 3].contains(&origin[i + 1]) {
// continue;
// }
// }
bytes.push(origin[i]);
}
let len = (bytes.len() - 4) as u32;
bytes[0] = (len >> 24) as u8;
bytes[1] = (len >> 16) as u8;
bytes[2] = (len >> 8) as u8;
bytes[3] = len as u8;
bytes
}
}
impl AsRef<[u8]> for Nalu {
fn as_ref(&self) -> &[u8] {
self.inner.as_ref()
}
}
/// # VideoTagHeader
///
/// ## Frame Type
///
/// Type: UB [4]
///
/// Type of video frame. The following values are defined:
/// 1 = key frame (for AVC, a seekable frame)
/// 2 = inter frame (for AVC, a non-seekable frame)
/// 3 = disposable inter frame (H.263 only)
/// 4 = generated key frame (reserved for server use only)
/// 5 = video info/command frame
///
/// ## CodecID
///
/// Type: UB [4]
///
/// Codec Identifier. The following values are defined:
/// 2 = Sorenson H.263
/// 3 = Screen video
/// 4 = On2 VP6
/// 5 = On2 VP6 with alpha channel
/// 6 = Screen video version 2
/// 7 = AVC
///
/// ## AVCPacketType
///
/// Type: IF CodecID == 7, UI8
///
/// The following values are defined:
/// 0 = AVC sequence header
/// 1 = AVC NALU
/// 2 = AVC end of sequence (lower level NALU sequence ender is not required or supported)
///
/// ## CompositionTime
///
/// Type: IF CodecID == 7, SI24
///
/// IF AVCPacketType == 1
/// Composition time offset
/// ELSE
/// 0
/// See ISO 14496-12, 8.15.3 for an explanation of composition
/// times. The offset in an FLV file is always in milliseconds.
#[allow(unused)]
pub fn handle_video_data(bytes: &[u8], ctx: &RtmpContext) {
let frame_type = bytes[0];
let mut read_index = 1;
let acv_packet_type = bytes[read_index];
read_index += 1;
// AVC时,全0,无意义(作业时间)
let _composition_time_offset = &bytes[read_index..read_index + 3];
read_index += 3;
log::debug!(
"[peer={}] video frame type = {:#04X}, acv_packet_type={:#04X}",
&ctx.peer_addr,
frame_type,
acv_packet_type
);
// AVCDecoderConfigurationRecord(AVC sequence header)
if acv_packet_type == 0 {
// let config_version = &bytes[read_index];
// let avc_profile_indication = &bytes[read_index + 1];
// let profile_compatibility = &bytes[read_index + 2];
// let avc_level_indication = &bytes[read_index + 3];
// let length_size_minus_one = &bytes[read_index + 4];
read_index += 5;
let sps_num = &bytes[read_index] & 0x1F;
read_index += 1;
// println!("sps_num={}", sps_num);
// println!("config_version={:#04X}", config_version);
// println!("avc_profile_indication={:#04X}", avc_profile_indication);
// println!("profile_compatibility={:#04X}", profile_compatibility);
// println!("avc_level_indication={:#04X}", avc_level_indication);
// println!("length_size_minus_one={:#04X}", length_size_minus_one);
for _ in 0..sps_num as usize {
let data_len = BigEndian::read_u16(&bytes[read_index..]);
read_index += 2;
let data = &bytes[read_index..(read_index + data_len as usize)];
read_index += data_len as usize;
// println!("len={}, sps data:\n{}", data_len, bytes_hex_format(data));
let mut nalu_bytes: Vec<u8> = vec![0x00, 0x00, 0x00, 0x01];
nalu_bytes.extend_from_slice(data);
handle_nalu(nalu_bytes);
}
let num_of_pps = &bytes[read_index] & 0x1F;
read_index += 1;
// println!("pps num = {}", num_of_pps);
for _ in 0..num_of_pps as usize {
let data_len = BigEndian::read_u16(&bytes[read_index..]);
read_index += 2;
let data = &bytes[read_index..(read_index + data_len as usize)];
read_index += data_len as usize;
// println!("len={}, pps data:\n{}", data_len, bytes_hex_format(data));
let mut nalu_bytes: Vec<u8> = vec![0x00, 0x00, 0x00, 0x01];
nalu_bytes.extend_from_slice(data);
handle_nalu(nalu_bytes);
}
}
// One or more NALUs (Full frames are required)
else if acv_packet_type == 1 {
loop {
if read_index >= bytes.len() {
break;
}
let data_len = BigEndian::read_u32(&bytes[read_index..]);
read_index += 4;
let data = &bytes[read_index..(read_index + data_len as usize)];
read_index += data_len as usize;
// println!("NALU Type: {}, len={}", nalu_type_desc(&data[0]), data_len);
// println!("len={}, nalu data:\n{}", data_len, bytes_hex_format(data));
let mut nalu_bytes: Vec<u8> = vec![0x00, 0x00, 0x00, 0x01];
nalu_bytes.extend_from_slice(data);
handle_nalu(nalu_bytes);
}
} else {
unreachable!("unknown acv packet type")
};
fn handle_nalu(nalu_bytes: Vec<u8>) {}
}
| rust | Apache-2.0 | 56a63312dd7fef48f73dbd3985022604c739ba40 | 2026-01-04T20:21:06.830703Z | false |
nintha/river | https://github.com/nintha/river/blob/56a63312dd7fef48f73dbd3985022604c739ba40/src/protocol/rtmp.rs | src/protocol/rtmp.rs | use std::fmt::{Debug, Formatter};
use amf::amf0;
use amf::amf0::Value;
use byteorder::{BigEndian, ByteOrder, WriteBytesExt};
use chrono::Local;
use num::FromPrimitive;
use smol::io::{AsyncReadExt, AsyncWriteExt};
use smol::net::TcpStream;
use crate::rtmp_server::eventbus_map;
use crate::util::bytes_hex_format;
use std::convert::TryFrom;
#[derive(Clone, Debug)]
pub struct Handshake0 {
/// Version (8 bits): In C0, this field identifies the RTMP version
/// requested by the client. In S0, this field identifies the RTMP
/// version selected by the server. The version defined by this
/// specification is 3. Values 0-2 are deprecated values used by
/// earlier proprietary products; 4-31 are reserved for future
/// implementations; and 32-255 are not allowed (to allow
/// distinguishing RTMP from text-based protocols, which always start
/// with a printable character). A server that does not recognize the
/// client’s requested version SHOULD respond with 3. The client MAY
/// choose to degrade to version 3, or to abandon the handshake.
pub version: u8,
}
impl Handshake0 {
pub const S0_V3: Handshake0 = Handshake0 { version: 3 };
pub fn to_bytes(&self) -> Vec<u8> {
vec![self.version.to_owned()]
}
}
#[derive(Clone, Debug)]
pub struct Handshake1 {
/// Time (4 bytes): This field contains a timestamp, which SHOULD be
/// used as the epoch for all future chunks sent from this endpoint.
/// This may be 0, or some arbitrary value. To synchronize multiple
/// chunkstreams, the endpoint may wish to send the current value of
/// the other chunkstream’s timestamp.
pub time: u32,
/// Zero (4 bytes): This field MUST be all 0s.
pub zero: u32,
/// Random data (1528 bytes): This field can contain any arbitrary
/// values. Since each endpoint has to distinguish between the
/// response to the handshake it has initiated and the handshake
/// initiated by its peer,this data SHOULD send something sufficiently
/// random. But there is no need for cryptographically-secure
/// randomness, or even dynamic values.
pub random_data: Vec<u8>,
}
impl Handshake1 {
pub const PACKET_LENGTH: u32 = 1536;
pub fn to_bytes(&self) -> Vec<u8> {
let mut v = Vec::new();
v.append(self.time.to_be_bytes().to_vec().as_mut());
v.append(self.zero.to_be_bytes().to_vec().as_mut());
v.append(self.random_data.clone().as_mut());
v
}
}
#[derive(Clone, Debug)]
pub struct Handshake2 {
/// Time (4 bytes): This field MUST contain the timestamp sent by the
/// peer in S1 (for C2) or C1 (for S2).
pub time: u32,
/// Time2 (4 bytes): This field MUST contain the timestamp at which the
/// previous packet(s1 or c1) sent by the peer was read.
pub time2: u32,
/// Random echo (1528 bytes): This field MUST contain the random data
/// field sent by the peer in S1 (for C2) or S2 (for C1). Either peer
/// can use the time and time2 fields together with the current
/// timestamp as a quick estimate of the bandwidth and/or latency of
/// the connection, but this is unlikely to be useful.
pub random_echo: Vec<u8>,
}
impl Handshake2 {
pub const PACKET_LENGTH: u32 = 1536;
pub fn to_bytes(&self) -> Vec<u8> {
let mut v = Vec::new();
v.append(self.time.to_be_bytes().to_vec().as_mut());
v.append(self.time2.to_be_bytes().to_vec().as_mut());
v.append(self.random_echo.clone().as_mut());
v
}
}
#[derive(Debug)]
pub struct RtmpContext {
pub stream: TcpStream,
pub ctx_begin_timestamp: i64,
pub last_timestamp: u32,
pub last_timestamp_delta: u32,
pub last_message_length: u32,
pub last_message_type_id: u8,
pub last_message_stream_id: u32,
pub chunk_size: u32,
pub remain_message_length: u32,
pub recv_bytes_num: u32,
pub peer_addr: String,
pub stream_name: String,
pub is_publisher: bool,
}
impl RtmpContext {
pub fn new(stream: TcpStream) -> Self {
let peer_addr = stream
.peer_addr()
.map(|a| a.to_string())
.unwrap_or_default();
RtmpContext {
stream,
ctx_begin_timestamp: Local::now().timestamp_millis(),
last_timestamp_delta: 0,
last_timestamp: 0,
last_message_length: 0,
last_message_type_id: 0,
last_message_stream_id: 0,
chunk_size: 128,
remain_message_length: 0,
recv_bytes_num: 0,
peer_addr,
stream_name: Default::default(),
is_publisher: false,
}
}
pub async fn read_exact_from_peer(&mut self, bytes_num: u32) -> anyhow::Result<Vec<u8>> {
let mut data = vec![0u8; bytes_num as usize];
AsyncReadExt::read_exact(&mut self.stream, &mut data).await?;
Ok(data)
}
/// Receives data without removing it from the queue.
pub async fn peek_exact_from_peer(&mut self, bytes_num: u32) -> anyhow::Result<Vec<u8>> {
let mut data = vec![0u8; bytes_num as usize];
self.stream.peek(&mut data).await?;
Ok(data)
}
pub async fn write_to_peer(&mut self, bytes: &[u8]) -> anyhow::Result<()> {
self.stream.write_all(bytes).await?;
Ok(())
}
}
impl Drop for RtmpContext {
fn drop(&mut self) {
if self.is_publisher {
eventbus_map().remove(&self.stream_name);
log::warn!(
"[{}][RtmpContext] remove eventbus, stream_name={}",
self.peer_addr,
self.stream_name
);
}
}
}
#[derive(Debug, Clone)]
pub struct RtmpMessageHeader {
/// chunk stream id
pub csid: u8,
pub timestamp: u32,
pub message_length: u32,
pub message_type_id: u8,
pub message_type: ChunkMessageType,
/// message stream id
/// 0 => 信令,
/// 1 => play 信令| publish 信令 | 音视频数据
pub msid: u32,
}
impl RtmpMessageHeader {
pub fn to_bytes(&self) -> Vec<u8> {
let enable_extend_timestamp_field = self.timestamp >= 0xFFFFFF;
let mut rs = vec![self.csid];
if enable_extend_timestamp_field {
rs.write_u24::<BigEndian>(0xFFFFFF).unwrap();
} else {
rs.write_u24::<BigEndian>(self.timestamp).unwrap();
}
rs.write_u24::<BigEndian>(self.message_length).unwrap();
rs.write_u8(self.message_type_id).unwrap();
rs.write_u32::<BigEndian>(self.msid).unwrap();
if enable_extend_timestamp_field {
rs.write_u32::<BigEndian>(self.timestamp).unwrap();
}
rs
}
}
#[derive(Clone)]
pub struct RtmpMessage {
pub header: RtmpMessageHeader,
pub body: Vec<u8>,
pub chunk_count: u32,
}
impl RtmpMessage {
/// 读取完整消息
pub async fn read_from(ctx: &mut RtmpContext) -> anyhow::Result<Self> {
let mut chunk = RtmpMessage::read_chunk_from(ctx).await?;
while ctx.remain_message_length > 0 {
let mut remain_chunk = RtmpMessage::read_chunk_from(ctx).await?;
chunk.body.append(&mut remain_chunk.body);
chunk.chunk_count += 1;
}
Ok(chunk)
}
/// 读取一个消息分片
async fn read_chunk_from(ctx: &mut RtmpContext) -> anyhow::Result<Self> {
let one = ctx.read_exact_from_peer(1).await?[0];
let fmt = one >> 6;
let csid = one << 2 >> 2;
let (timestamp, message_length, message_type_id, message_stream_id) = match fmt {
0 => {
let h = ctx.read_exact_from_peer(11).await?;
// print_hex(&h);
// 时间差值置零
ctx.last_timestamp_delta = 0;
ctx.last_timestamp = BigEndian::read_u24(&h[0..3]);
ctx.last_message_length = BigEndian::read_u24(&h[3..6]);
ctx.remain_message_length = 0;
ctx.last_message_type_id = h[6];
ctx.last_message_stream_id = BigEndian::read_u32(&h[7..11]);
ctx.recv_bytes_num += 12;
if ctx.last_timestamp >= 0xFFFFFF {
let extend = ctx.read_exact_from_peer(4).await?;
ctx.last_timestamp = BigEndian::read_u32(&extend[0..4]);
ctx.recv_bytes_num += 4;
}
(
ctx.last_timestamp,
ctx.last_message_length,
ctx.last_message_type_id,
ctx.last_message_stream_id,
)
}
1 => {
let h = ctx.read_exact_from_peer(7).await?;
// bytes_hex_format(&h);
let timestamp_delta = BigEndian::read_u24(&h[0..3]);
ctx.last_message_length = BigEndian::read_u24(&h[3..6]);
ctx.remain_message_length = 0;
ctx.last_message_type_id = h[6];
ctx.last_timestamp += timestamp_delta;
ctx.recv_bytes_num += 8;
(
ctx.last_timestamp,
ctx.last_message_length,
ctx.last_message_type_id,
ctx.last_message_stream_id,
)
}
2 => {
let h = ctx.read_exact_from_peer(3).await?;
let timestamp_delta = BigEndian::read_u24(&h[0..3]);
ctx.last_timestamp_delta = timestamp_delta;
ctx.last_timestamp += timestamp_delta;
ctx.recv_bytes_num += 4;
(
ctx.last_timestamp,
ctx.last_message_length,
ctx.last_message_type_id,
ctx.last_message_stream_id,
)
}
3 => {
ctx.last_timestamp += ctx.last_timestamp_delta;
(
ctx.last_timestamp,
ctx.last_message_length,
ctx.last_message_type_id,
ctx.last_message_stream_id,
)
}
_ => unreachable!(),
};
// 当前分片的body长度
let read_num = {
let remain_length = if ctx.remain_message_length > 0 {
ctx.remain_message_length
} else {
message_length
};
if remain_length > ctx.chunk_size {
ctx.remain_message_length = remain_length - ctx.chunk_size;
ctx.chunk_size
} else {
ctx.remain_message_length = 0;
remain_length
}
};
let message_data = ctx.read_exact_from_peer(read_num).await?;
ctx.recv_bytes_num += read_num;
let message_type = FromPrimitive::from_u8(message_type_id).ok_or(anyhow::anyhow!(
format!("invalid message type: {}", message_type_id)
))?;
Ok(RtmpMessage {
header: RtmpMessageHeader {
csid,
msid: message_stream_id,
message_length,
timestamp,
message_type_id,
message_type,
},
body: message_data,
chunk_count: 1,
})
}
pub fn message_type_desc(&self) -> String {
match self.header.message_type_id {
1 => "ProtocolControlMessages::SetChunkSize",
2 => "ProtocolControlMessages::AbortMessage",
3 => "ProtocolControlMessages::Acknowledgement",
4 => "ProtocolControlMessages::UserControlMessage",
5 => "ProtocolControlMessages::WindowAcknowledgementSize",
6 => "ProtocolControlMessages::SetPeerBandwidth",
17 => "CommandMessages::AMF3CommandMessage",
20 => "CommandMessages::AMF0CommandMessage",
15 => "CommandMessages::AMF3DataMessage",
18 => "CommandMessages::AMF0DataMessage",
16 => "CommandMessages::AMF3SharedObjectMessage",
19 => "CommandMessages::AMF0SharedObjectMessage",
8 => "CommandMessages::AudioMessage",
9 => "CommandMessages::VideoMessage",
22 => "CommandMessages::AggregateMessage",
_ => "UnknownMessage",
}
.to_string()
}
/// 把body数据解析成amf0格式
pub fn try_read_body_to_amf0(&self) -> Option<Vec<Value>> {
match self.header.message_type_id {
18 | 19 | 20 => read_all_amf_value(&self.body),
_ => None,
}
}
/// 把一个长message分离成多个chunk,第一个chunk的type=0,后续的type=3
pub fn split_chunks_bytes(&self, chunk_size: u32) -> Vec<Vec<u8>> {
let chunk_size = chunk_size as usize;
let mut rs = vec![];
let mut remain = self.body.clone();
while remain.len() > chunk_size {
let right = remain.split_off(chunk_size);
rs.push(remain);
remain = right;
}
rs.push(remain);
// 添加type0头部
for item in self.header.to_bytes().iter().rev() {
(&mut rs[0]).insert(0, item.clone());
}
// 添加type3头部
if rs.len() > 1 {
let type3_fmt = 0xC0 | self.header.csid;
for item in &mut rs[1..] {
item.insert(0, type3_fmt);
}
}
rs
}
}
impl Debug for RtmpMessage {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"ChunkMessage {{\nheader: {:?}\nmessage type: {}\nchunk count={}\nbody:\n{}}}",
self.header,
self.message_type_desc(),
self.chunk_count,
bytes_hex_format(&self.body)
)
}
}
#[derive(Debug, PartialEq, FromPrimitive, Clone, Copy)]
pub enum ChunkMessageType {
SetChunkSize = 1,
AbortMessage = 2,
Acknowledgement = 3,
UserControlMessage = 4,
WindowAcknowledgementSize = 5,
SetPeerBandwidth = 6,
AMF3CommandMessage = 17,
AMF0CommandMessage = 20,
AMF3DataMessage = 15,
AMF0DataMessage = 18,
AMF3SharedObjectMessage = 16,
AMF0SharedObjectMessage = 19,
AudioMessage = 8,
VideoMessage = 9,
AggregateMessage = 22,
}
#[derive(Default, Clone)]
pub struct RtmpMetaData {
pub width: f64,
pub height: f64,
pub video_codec_id: String,
pub video_data_rate: f64,
pub audio_codec_id: String,
pub audio_data_rate: f64,
pub frame_rate: f64,
pub duration: f64,
pub begin_time: i64,
}
impl TryFrom<&amf::amf0::Value> for RtmpMetaData {
type Error = anyhow::Error;
fn try_from(value: &amf::amf0::Value) -> Result<Self, Self::Error> {
let mut meta_data = RtmpMetaData::default();
if let Value::EcmaArray { entries } = value {
for item in entries {
match item.key.as_ref() {
"duration" => {
meta_data.duration = item.value.try_as_f64().unwrap_or_default();
}
"width" => {
meta_data.width = item.value.try_as_f64().unwrap_or_default();
}
"height" => {
meta_data.height = item.value.try_as_f64().unwrap_or_default();
}
"videocodecid" => {
meta_data.video_codec_id =
item.value.try_as_str().unwrap_or_default().to_owned();
}
"videodatarate" => {
meta_data.video_data_rate =
item.value.try_as_f64().unwrap_or_default();
}
"framerate" => {
meta_data.frame_rate =
item.value.try_as_f64().unwrap_or_default();
}
"audiocodecid" => {
meta_data.audio_codec_id =
item.value.try_as_str().unwrap_or_default().to_owned();
}
"audiodatarate" => {
meta_data.audio_data_rate =
item.value.try_as_f64().unwrap_or_default();
}
_ => {}
}
}
meta_data.begin_time = Local::now().timestamp_millis();
Ok(meta_data)
} else {
Err(anyhow::anyhow!("value is not Value::EcmaArray"))?
}
}
}
/// 计算一个AMF值的字节长度
pub fn calc_amf_byte_len(v: &amf0::Value) -> usize {
match v {
Value::Number(_) => 9,
Value::Boolean(_) => 2,
Value::String(s) => (s.len() + 3),
Value::Object { entries, .. } => {
// marker and tail
let mut len = 4;
for en in entries {
len += en.key.len() + 2;
len += calc_amf_byte_len(&en.value);
}
len
}
Value::Null => 1,
Value::Undefined => 1,
Value::EcmaArray { entries } => {
// marker and tail
let mut len = 8;
for en in entries {
len += en.key.len() + 2;
len += calc_amf_byte_len(&en.value);
}
len
}
Value::Array { entries: _ } => unimplemented!(),
Value::Date { unix_time: _ } => unimplemented!(),
Value::XmlDocument(_) => unimplemented!(),
Value::AvmPlus(_) => unimplemented!(),
}
}
/// 从字节数组中读取全部的AMF值
pub fn read_all_amf_value(bytes: &[u8]) -> Option<Vec<Value>> {
let mut read_num = 0;
let mut list = Vec::new();
loop {
if let Ok(v) = amf::amf0::Value::read_from(&mut &bytes[read_num..]) {
let len = calc_amf_byte_len(&v);
read_num += len;
list.push(v);
if read_num >= bytes.len() {
break;
}
} else {
return None;
}
}
Some(list)
}
| rust | Apache-2.0 | 56a63312dd7fef48f73dbd3985022604c739ba40 | 2026-01-04T20:21:06.830703Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/build.rs | build.rs | // Purpose: This build script configures the build environment for GStreamer integration.
//
// What this does:
// - Sets up necessary paths, environment variables and linker flags for GStreamer
// - Currently handles macOS-specific configuration for the GStreamer framework
//
// Customization for different environments:
// - macOS: If your GStreamer framework is installed in a non-standard location,
// update the paths in the macOS section
// - Windows: You'll need to add a Windows-specific section similar to the macOS one,
// typically pointing to your GStreamer installation directory (e.g.,
// C:\gstreamer\1.0\msvc_x86_64\lib for MSVC builds)
// - Linux: For standard installations, pkg-config should find GStreamer without
// any special configuration. For custom installations, add a Linux section
// that sets PKG_CONFIG_PATH to your GStreamer lib/pkgconfig directory.
//
// For more information on build scripts, see:
// https://doc.rust-lang.org/cargo/reference/build-scripts.html
//
// Note that, installation of Gstreamer is not a hard task (please open issue if you have a trouble), I hope these explanations are not making you feel like it is a hard task. In
// windows for instance, just download the installer and click next, next, next, finish. That's all, it should automatically set the environment variables for you.
// And you will able to use Gstreamer in this project. Bellow is my own configuration for Gstreamer in my mac machine which I used via PKG_CONFIG_PATH.
// You can also use the same configuration in your mac machine. And I strongly recommend you to install it with PKG_CONFIG_PATH.
// Please see how I build the project in github actions, you can use it as a reference:
// github.com/altunenes/cuneus/blob/main/.github/workflows/release.yaml
use std::env;
fn main() {
let target = env::var("CARGO_CFG_TARGET_OS");
if target == Ok("macos".to_string()) {
env::set_var(
"PKG_CONFIG_PATH",
"/Library/Frameworks/GStreamer.framework/Versions/Current/lib/pkgconfig",
);
let lib = "/Library/Frameworks/GStreamer.framework/Versions/Current/lib";
env::set_var("GST_PLUGIN_PATH", lib);
env::set_var("DYLD_FALLBACK_LIBRARY_PATH", lib);
println!("cargo:rustc-link-search=framework=/Library/Frameworks");
println!("cargo:rustc-link-arg=-Wl,-rpath,/Library/Frameworks/GStreamer.framework/Versions/Current/lib");
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/app.rs | src/app.rs | use crate::{Core, ShaderManager};
use winit::{
application::ApplicationHandler,
dpi::LogicalSize,
event::*,
event_loop::{ActiveEventLoop, EventLoop},
window::WindowAttributes,
};
pub struct ShaderApp {
window_title: String,
window_size: (u32, u32),
core: Option<Core>,
}
impl ShaderApp {
pub fn new(window_title: &str, width: u32, height: u32) -> (Self, EventLoop<()>) {
let event_loop = EventLoop::builder()
.build()
.expect("Failed to create event loop");
//note: No window creation here - will happen in resumed event
let app = Self {
window_title: String::from(window_title),
window_size: (width, height),
core: None,
};
(app, event_loop)
}
pub fn run<S: ShaderManager + 'static>(
self,
event_loop: EventLoop<()>,
shader_creator: impl FnOnce(&Core) -> S + 'static,
) -> Result<(), Box<dyn std::error::Error>> {
let mut handler = ShaderAppHandler {
app: self,
shader_creator: Some(Box::new(shader_creator)),
shader: None,
first_render: true,
};
Ok(event_loop.run_app(&mut handler)?)
}
pub fn core(&self) -> Option<&Core> {
self.core.as_ref()
}
}
// This struct implements ApplicationHandler to handle winit events
struct ShaderAppHandler<S: ShaderManager> {
app: ShaderApp,
shader_creator: Option<Box<dyn FnOnce(&Core) -> S + 'static>>,
shader: Option<S>,
first_render: bool,
}
impl<S: ShaderManager> ApplicationHandler for ShaderAppHandler<S> {
fn resumed(&mut self, event_loop: &ActiveEventLoop) {
let window_attributes = WindowAttributes::default()
.with_inner_size(LogicalSize::new(
self.app.window_size.0,
self.app.window_size.1,
))
.with_title(&self.app.window_title)
.with_resizable(true);
let window = event_loop
.create_window(window_attributes)
.expect("Failed to create window");
window.set_window_level(winit::window::WindowLevel::AlwaysOnTop);
let core = pollster::block_on(Core::new(window));
// Initialize the shader with the core if it hasn't been initialized yet
if let Some(shader_creator) = self.shader_creator.take() {
let shader = shader_creator(&core);
self.shader = Some(shader);
}
self.app.core = Some(core);
}
fn window_event(
&mut self,
event_loop: &ActiveEventLoop,
window_id: winit::window::WindowId,
event: WindowEvent,
) {
// Only process events if core and shader are initialized
if let (Some(core), Some(shader)) = (&self.app.core, &mut self.shader) {
if window_id == core.window().id() && !shader.handle_input(core, &event) {
match event {
WindowEvent::CloseRequested => {
event_loop.exit();
}
WindowEvent::Resized(size) => {
if let Some(core) = &mut self.app.core {
if core.size == size {
return;
}
core.resize(size);
shader.resize(core);
}
}
WindowEvent::RedrawRequested => {
shader.update(core);
match shader.render(core) {
Ok(_) => {
if self.first_render {
self.first_render = false;
}
}
Err(wgpu::SurfaceError::Lost) => {
if let Some(core) = &mut self.app.core {
core.resize(core.size);
}
}
Err(wgpu::SurfaceError::OutOfMemory) => event_loop.exit(),
Err(e) => eprintln!("Render error: {e:?}"),
}
}
_ => {}
}
}
}
}
fn about_to_wait(&mut self, _event_loop: &ActiveEventLoop) {
if let Some(core) = &self.app.core {
core.window().request_redraw();
}
}
fn new_events(&mut self, _event_loop: &ActiveEventLoop, _cause: StartCause) {
// No special handling needed for new events
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/controls.rs | src/controls.rs | #[cfg(feature = "media")]
use crate::gst::video::VideoTextureManager;
use crate::hdri::HdriMetadata;
use std::path::PathBuf;
#[derive(Clone)]
pub struct ControlsRequest {
pub is_paused: bool,
pub should_reset: bool,
pub should_clear_buffers: bool,
pub current_time: Option<f32>,
pub window_size: Option<(u32, u32)>,
pub current_fps: Option<f32>,
// Video reqs
pub load_media_path: Option<PathBuf>,
pub play_video: bool,
pub pause_video: bool,
pub restart_video: bool,
pub seek_position: Option<f64>,
pub set_loop: Option<bool>,
// Audio reqs
pub set_volume: Option<f64>,
pub mute_audio: Option<bool>,
pub toggle_mute: bool,
// HDRI reqs
pub hdri_exposure: Option<f32>,
pub hdri_gamma: Option<f32>,
// Webcam reqs
pub start_webcam: bool,
pub stop_webcam: bool,
pub webcam_device_index: Option<u32>,
}
impl Default for ControlsRequest {
fn default() -> Self {
let mut default_media = None;
let mut should_play_video = false;
if let Ok(media_dir) = std::env::var("CUNEUS_MEDIA") {
println!("CUNEUS_MEDIA: {media_dir}");
if media_dir.starts_with('"') && media_dir.ends_with('"') {
let unquoted = &media_dir[1..media_dir.len() - 1];
default_media = Some(PathBuf::from(unquoted));
} else {
default_media = Some(PathBuf::from(media_dir));
}
should_play_video = true;
}
Self {
is_paused: false,
should_reset: false,
should_clear_buffers: false,
current_time: None,
window_size: None,
current_fps: None,
// Video-related stuff
load_media_path: default_media,
play_video: should_play_video,
pause_video: false,
restart_video: false,
seek_position: None,
set_loop: None,
// Audio-related stuff
set_volume: None,
mute_audio: None,
toggle_mute: false,
// HDRI-related stuff
hdri_exposure: None,
hdri_gamma: None,
// Webcam-related stuff
start_webcam: false,
stop_webcam: false,
webcam_device_index: None,
}
}
}
/// VideoInfo type alias
/// (duration, position, dimensions, framerate, is_looping, has_audio, volume, is_muted)
pub type VideoInfo = (
Option<f32>,
f32,
(u32, u32),
Option<f32>,
bool,
bool,
f64,
bool,
);
pub struct ShaderControls {
is_paused: bool,
pause_start: Option<std::time::Instant>,
total_pause_duration: f32,
current_frame: u32,
media_loaded_once: bool,
}
impl Default for ShaderControls {
fn default() -> Self {
Self {
is_paused: false,
pause_start: None,
total_pause_duration: 0.0,
current_frame: 0,
media_loaded_once: false,
}
}
}
impl ShaderControls {
pub fn new() -> Self {
Self::default()
}
pub fn get_frame(&mut self) -> u32 {
if !self.is_paused {
self.current_frame = self.current_frame.wrapping_add(1);
}
self.current_frame
}
pub fn get_time(&self, start_time: &std::time::Instant) -> f32 {
let raw_time = start_time.elapsed().as_secs_f32();
if self.is_paused {
if let Some(pause_start) = self.pause_start {
raw_time - self.total_pause_duration - pause_start.elapsed().as_secs_f32()
} else {
raw_time - self.total_pause_duration
}
} else {
raw_time - self.total_pause_duration
}
}
pub fn get_ui_request(
&mut self,
start_time: &std::time::Instant,
size: &winit::dpi::PhysicalSize<u32>,
) -> ControlsRequest {
let mut load_media_path = None;
let mut play_video = false;
if !self.media_loaded_once {
if let Ok(media_dir) = std::env::var("CUNEUS_MEDIA") {
println!("CUNEUS_MEDIA: {media_dir}");
if media_dir.starts_with('"') && media_dir.ends_with('"') {
let unquoted = &media_dir[1..media_dir.len() - 1];
load_media_path = Some(PathBuf::from(unquoted));
} else {
load_media_path = Some(PathBuf::from(media_dir));
}
play_video = true;
self.media_loaded_once = true;
}
}
ControlsRequest {
is_paused: self.is_paused,
should_reset: false,
should_clear_buffers: false,
current_time: Some(self.get_time(start_time)),
window_size: Some((size.width, size.height)),
current_fps: None,
load_media_path,
play_video,
pause_video: false,
restart_video: false,
seek_position: None,
set_loop: None,
set_volume: None,
mute_audio: None,
toggle_mute: false,
hdri_exposure: None,
hdri_gamma: None,
start_webcam: false,
stop_webcam: false,
webcam_device_index: None,
}
}
pub fn apply_ui_request(&mut self, request: ControlsRequest) {
if request.should_reset {
self.is_paused = false;
self.pause_start = None;
self.total_pause_duration = 0.0;
self.current_frame = 0;
self.media_loaded_once = false;
} else if request.is_paused && !self.is_paused {
self.pause_start = Some(std::time::Instant::now());
} else if !request.is_paused && self.is_paused {
if let Some(pause_start) = self.pause_start {
self.total_pause_duration += pause_start.elapsed().as_secs_f32();
}
self.pause_start = None;
}
self.is_paused = request.is_paused;
}
/// Extract video info from a video texture manager
#[cfg(feature = "media")]
pub fn get_video_info(
using_video_texture: bool,
video_manager: Option<&VideoTextureManager>,
) -> Option<VideoInfo> {
if using_video_texture {
video_manager.map(|vm| {
(
vm.duration().map(|d| d.seconds() as f32),
vm.position().seconds() as f32,
vm.dimensions(),
vm.framerate().map(|(num, den)| num as f32 / den as f32),
vm.is_looping(),
vm.has_audio(),
vm.volume(),
vm.is_muted(),
)
})
} else {
None
}
}
///media control panel (image, video, hdri)
pub fn render_media_panel(
ui: &mut egui::Ui,
request: &mut ControlsRequest,
using_video_texture: bool,
video_info: Option<VideoInfo>,
using_hdri_texture: bool,
hdri_info: Option<HdriMetadata>,
using_webcam_texture: bool,
webcam_info: Option<(u32, u32)>,
) {
ui.group(|ui| {
ui.horizontal(|ui| {
ui.heading("Media");
ui.with_layout(egui::Layout::right_to_left(egui::Align::Center), |ui| {
if using_webcam_texture {
if ui.button("🔴 Stop Webcam").clicked() {
request.stop_webcam = true;
}
} else if ui.button("📹 Webcam").clicked() {
request.start_webcam = true;
}
if ui.button("Load").clicked() {
if let Some(path) = rfd::FileDialog::new()
.add_filter(
"Media Files",
&["png", "jpg", "jpeg", "mp4", "avi", "mkv", "webm", "mov", "mp3", "wav", "ogg"],
)
.add_filter("Images", &["png", "jpg", "jpeg", "webp", "bmp", "tiff"])
.add_filter("Videos", &["mp4", "avi", "mkv", "webm", "mov"])
.add_filter("HDRI", &["hdr", "exr"])
.pick_file()
{
request.load_media_path = Some(path);
}
}
});
});
// Only show video controls if we're using a video texture
if using_video_texture {
ui.collapsing("Controls", |ui| {
// Main video controls
ui.horizontal(|ui| {
if ui.button("⏵").clicked() {
request.play_video = true;
}
if ui.button("⏸").clicked() {
request.pause_video = true;
}
if ui.button("⏮").clicked() {
request.restart_video = true;
}
});
if let Some((
duration_opt,
position_secs,
dimensions,
framerate_opt,
is_looping,
has_audio,
volume,
is_muted,
)) = video_info
{
ui.separator();
if let Some(duration_secs) = duration_opt {
ui.label(format!(
"Position: {position_secs:.1}s / {duration_secs:.1}s"
));
let mut pos = position_secs;
if ui
.add(
egui::Slider::new(&mut pos, 0.0..=duration_secs)
.text("Timeline"),
)
.changed()
{
request.seek_position = Some(pos as f64);
}
}
// only show if video has audio
if has_audio {
ui.separator();
ui.heading("Audio");
let mut vol = volume;
if ui
.add(
egui::Slider::new(&mut vol, 0.0..=1.0)
.text("Volume")
.show_value(true),
)
.changed()
{
request.set_volume = Some(vol);
}
ui.horizontal(|ui| {
let mut muted = is_muted;
if ui.checkbox(&mut muted, "Mute").changed() {
request.mute_audio = Some(muted);
}
});
}
ui.separator();
ui.collapsing("Properties", |ui| {
ui.label(format!("Dimensions: {}x{}", dimensions.0, dimensions.1));
if let Some(fps) = framerate_opt {
ui.label(format!("Framerate: {fps:.2} fps"));
}
let mut looping = is_looping;
if ui.checkbox(&mut looping, "Loop").changed() {
request.set_loop = Some(looping);
}
if has_audio {
ui.label("Audio: Yes");
} else {
ui.label("Audio: No");
}
});
}
});
}
if using_hdri_texture {
ui.collapsing("HDRI Settings", |ui| {
if let Some(hdri_metadata) = &hdri_info {
ui.label(format!(
"Dimensions: {}x{}",
hdri_metadata.width, hdri_metadata.height
));
ui.label("Type: High Dynamic Range Image");
let mut exposure = hdri_metadata.exposure;
if ui
.add(
egui::Slider::new(&mut exposure, 0.1..=6.28)
.text("Exposure")
.logarithmic(true),
)
.changed()
{
request.hdri_exposure = Some(exposure);
}
let mut gamma = hdri_metadata.gamma;
if ui
.add(egui::Slider::new(&mut gamma, 0.1..=6.28).text("Gamma"))
.changed()
{
request.hdri_gamma = Some(gamma);
}
} else {
ui.label("HDRI metadata not available");
}
});
}
if using_webcam_texture {
ui.collapsing("Webcam Settings", |ui| {
if let Some((width, height)) = webcam_info {
ui.label(format!("Resolution: {width}x{height}"));
ui.label("Type: Live Camera Feed");
ui.label("Status: Active");
} else {
ui.label("Webcam information not available");
}
});
}
});
}
pub fn render_controls_widget(ui: &mut egui::Ui, request: &mut ControlsRequest) {
ui.vertical(|ui| {
ui.horizontal(|ui| {
if ui
.button(if request.is_paused {
"▶ Resume"
} else {
"⏸ Pause"
})
.clicked()
{
request.is_paused = !request.is_paused;
}
if ui.button("↺ Reset").clicked() {
request.should_reset = true;
request.should_clear_buffers = true;
}
if let Some(time) = request.current_time {
ui.label(format!("Time: {time:.2}s"));
}
if let Some(fps) = request.current_fps {
ui.label(format!("FPS: {fps:.1}"));
}
});
if let Some((width, height)) = request.window_size {
ui.horizontal(|ui| {
ui.label(format!("Resolution: {width}x{height}"));
});
}
});
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/lib.rs | src/lib.rs | use std::sync::Arc;
use winit::window::Window;
pub use anyhow;
pub use bytemuck;
pub use egui;
pub use env_logger;
pub use wgpu;
pub use winit;
pub use bytemuck::{Pod, Zeroable};
pub use wgpu::SurfaceError;
pub use winit::event::WindowEvent;
mod app;
mod atomic;
pub mod compute;
mod controls;
mod export;
mod font;
mod fps;
#[cfg(feature = "media")]
pub mod gst;
pub mod hdri;
mod hot;
mod keyinputs;
mod mouse;
mod renderer;
mod renderkit;
mod shader;
mod spectrum;
mod texture;
mod uniforms;
pub use app::*;
pub use atomic::AtomicBuffer;
pub use controls::{ControlsRequest, ShaderControls};
pub use export::{save_frame, ExportError, ExportManager, ExportSettings, ExportUiState};
pub use font::{CharInfo, FontSystem, FontUniforms};
pub use hdri::*;
pub use hot::ShaderHotReload;
pub use keyinputs::KeyInputHandler;
pub use mouse::*;
pub use renderer::*;
pub use renderkit::*;
pub use shader::*;
pub use texture::*;
pub use uniforms::*;
#[cfg(feature = "media")]
pub mod audio {
pub use crate::gst::audio::{
AudioDataProvider, AudioSynthManager, AudioSynthUniform, AudioWaveform, EnvelopeConfig,
MusicalNote, SynthesisManager, SynthesisUniform, SynthesisWaveform,
};
}
pub mod prelude {
pub use crate::{
compute::ComputeShader, compute::ComputeShaderBuilder, compute::MultiPassManager,
save_frame, AtomicBuffer, CharInfo, ControlsRequest, Core, ExportManager, FontSystem,
FontUniforms, KeyInputHandler, RenderKit, Renderer, ShaderApp, ShaderControls,
ShaderHotReload, ShaderManager, TextureManager, UniformBinding, UniformProvider,
};
#[cfg(feature = "media")]
pub use crate::{
audio::{
AudioWaveform, MusicalNote, SynthesisManager, SynthesisUniform, SynthesisWaveform,
},
gst,
};
pub use crate::anyhow;
pub use crate::bytemuck;
pub use crate::egui;
pub use crate::wgpu;
pub use crate::winit;
pub use crate::SurfaceError;
pub use crate::WindowEvent;
pub use env_logger;
pub use bytemuck::{bytes_of, cast_slice, Pod, Zeroable};
pub use wgpu::{
BindGroup, BindGroupLayout, Buffer, ComputePipeline, Device, Queue, RenderPipeline,
ShaderModule, Surface, SurfaceConfiguration, TextureFormat, TextureView,
};
pub use winit::{dpi::PhysicalSize, event_loop::EventLoop, window::Window};
}
pub struct Core {
pub surface: wgpu::Surface<'static>,
pub device: Arc<wgpu::Device>,
pub queue: wgpu::Queue,
pub config: wgpu::SurfaceConfiguration,
pub size: winit::dpi::PhysicalSize<u32>,
pub window: Window,
}
impl Core {
pub async fn new(window: Window) -> Self {
let size = window.inner_size();
let instance_desc = wgpu::InstanceDescriptor {
backends: wgpu::Backends::all(),
backend_options: wgpu::BackendOptions::default(),
..Default::default()
};
let instance = wgpu::Instance::new(&instance_desc);
let window_box = Box::new(window);
let window_ptr = Box::into_raw(window_box);
// SAFETY: window_ptr is valid as we just created it
let surface = unsafe { instance.create_surface(&*window_ptr) }.unwrap();
let power_preference = instance
.enumerate_adapters(wgpu::Backends::all())
.iter()
.find(|p| p.get_info().device_type == wgpu::DeviceType::DiscreteGpu)
.map(|_| wgpu::PowerPreference::HighPerformance)
.unwrap_or(wgpu::PowerPreference::default());
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference,
compatible_surface: Some(&surface),
force_fallback_adapter: false,
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor {
label: None,
required_features: wgpu::Features::empty(),
required_limits: wgpu::Limits::default(),
memory_hints: Default::default(),
experimental_features: Default::default(),
trace: wgpu::Trace::default(),
})
.await
.unwrap();
let device = Arc::new(device);
let surface_caps = surface.get_capabilities(&adapter);
let surface_format = surface_caps
.formats
.iter()
.copied()
.find(|f| f.is_srgb() && *f == CAPTURE_FORMAT)
.unwrap_or(surface_caps.formats[0]);
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface_format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 2,
};
surface.configure(&device, &config);
// SAFETY: window_ptr is still valid and we're taking back ownership
let window = unsafe { *Box::from_raw(window_ptr) };
Self {
surface,
device,
queue,
config,
size,
window,
}
}
pub fn window(&self) -> &Window {
&self.window
}
pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
println!("Core resize called with size: {new_size:?}");
if new_size.width > 0 && new_size.height > 0 {
self.size = new_size;
self.config.width = new_size.width;
self.config.height = new_size.height;
self.surface.configure(&self.device, &self.config);
println!("Surface reconfigured");
}
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/renderer.rs | src/renderer.rs | use wgpu::util::DeviceExt;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Vertex {
pub position: [f32; 2],
}
impl Vertex {
const ATTRIBS: [wgpu::VertexAttribute; 1] = wgpu::vertex_attr_array![0 => Float32x2];
pub fn desc() -> wgpu::VertexBufferLayout<'static> {
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &Self::ATTRIBS,
}
}
}
#[derive(Debug)]
pub struct RenderPassWrapper<'a> {
render_pass: wgpu::RenderPass<'a>,
}
impl<'a> RenderPassWrapper<'a> {
/// Extract the inner RenderPass for special cases like egui's forget_lifetime()
///
/// We need this because Deref gives us &RenderPass but some methods
/// (like forget_lifetime) need owned RenderPass to consume it.
pub fn into_inner(self) -> wgpu::RenderPass<'a> {
self.render_pass
}
}
pub struct Renderer {
pub render_pipeline: wgpu::RenderPipeline,
pub vertex_buffer: wgpu::Buffer,
}
impl Renderer {
pub fn new(
device: &wgpu::Device,
vs_module: &wgpu::ShaderModule,
fs_module: &wgpu::ShaderModule,
format: wgpu::TextureFormat,
layout: &wgpu::PipelineLayout,
fragment_entry: Option<&str>,
) -> Self {
const VERTICES: &[Vertex] = &[
Vertex {
position: [-1.0, -1.0],
},
Vertex {
position: [1.0, -1.0],
},
Vertex {
position: [-1.0, 1.0],
},
Vertex {
position: [1.0, 1.0],
},
];
println!("Creating vertex buffer");
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsages::VERTEX,
});
let color_target_state = [Some(wgpu::ColorTargetState {
format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent::REPLACE,
alpha: wgpu::BlendComponent::REPLACE,
}),
write_mask: wgpu::ColorWrites::ALL,
})];
println!("Creating render pipeline");
let pipeline_desc = wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(layout),
vertex: wgpu::VertexState {
module: vs_module,
entry_point: Some("vs_main"),
buffers: &[Vertex::desc()],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: fs_module,
entry_point: Some(fragment_entry.unwrap_or("fs_main")),
targets: &color_target_state,
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleStrip,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
unclipped_depth: false,
polygon_mode: wgpu::PolygonMode::Fill,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
};
let render_pipeline = device.create_render_pipeline(&pipeline_desc);
Self {
render_pipeline,
vertex_buffer,
}
}
pub fn begin_render_pass<'a>(
encoder: &'a mut wgpu::CommandEncoder,
view: &'a wgpu::TextureView,
load_op: wgpu::LoadOp<wgpu::Color>,
label: Option<&'a str>,
) -> RenderPassWrapper<'a> {
let render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label,
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view,
resolve_target: None,
ops: wgpu::Operations {
load: load_op,
store: wgpu::StoreOp::Store,
},
depth_slice: None,
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
RenderPassWrapper { render_pass }
}
}
impl<'a> std::ops::Deref for RenderPassWrapper<'a> {
type Target = wgpu::RenderPass<'a>;
fn deref(&self) -> &Self::Target {
&self.render_pass
}
}
impl<'a> std::ops::DerefMut for RenderPassWrapper<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.render_pass
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/texture.rs | src/texture.rs | pub struct TextureManager {
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
pub sampler: wgpu::Sampler,
pub bind_group: wgpu::BindGroup,
}
impl TextureManager {
pub fn new(
device: &wgpu::Device,
queue: &wgpu::Queue,
image: &image::RgbaImage,
layout: &wgpu::BindGroupLayout,
) -> Self {
let dimensions = image.dimensions();
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Shader Texture"),
size: wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
..Default::default()
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
label: Some("Texture Bind Group"),
});
queue.write_texture(
wgpu::TexelCopyTextureInfo {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
image,
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(4 * dimensions.0),
rows_per_image: Some(dimensions.1),
},
wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
},
);
Self {
texture,
view,
sampler,
bind_group,
}
}
pub fn update(&self, queue: &wgpu::Queue, image: &image::RgbaImage) {
let dimensions = image.dimensions();
queue.write_texture(
wgpu::TexelCopyTextureInfo {
texture: &self.texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
image,
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(4 * dimensions.0),
rows_per_image: Some(dimensions.1),
},
wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
},
);
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/hot.rs | src/hot.rs | use notify::{Event, EventKind, RecursiveMode, Watcher};
use std::collections::HashMap;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::mpsc::{channel, Receiver};
use std::sync::Arc;
use std::time::{Duration, Instant};
pub enum ShaderType {
RenderPair, // Vertex + Fragment
Compute, // Compute
}
pub struct ShaderHotReload {
pub vs_module: wgpu::ShaderModule,
pub fs_module: wgpu::ShaderModule,
compute_module: Option<wgpu::ShaderModule>,
device: Arc<wgpu::Device>,
shader_paths: Vec<PathBuf>,
last_vs_content: String,
last_fs_content: String,
last_compute_content: Option<String>,
#[allow(dead_code)]
watcher: notify::RecommendedWatcher,
rx: Receiver<notify::Event>,
_watcher_tx: std::sync::mpsc::Sender<notify::Event>,
last_update_times: HashMap<PathBuf, Instant>,
debounce_duration: Duration,
shader_type: ShaderType,
entry_point: Option<String>,
}
impl ShaderHotReload {
pub fn new(
device: Arc<wgpu::Device>,
shader_paths: Vec<PathBuf>,
vs_module: wgpu::ShaderModule,
fs_module: wgpu::ShaderModule,
) -> notify::Result<Self> {
let (tx, rx) = channel();
let watcher_tx = tx.clone();
let mut watcher = notify::recommended_watcher(move |res: Result<Event, _>| {
if let Ok(event) = res {
match event.kind {
EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_) => {
tx.send(event).unwrap_or_default();
}
_ => {}
}
}
})?;
//normalize for Windows
let normalized_paths: Vec<PathBuf> = shader_paths
.iter()
.map(|path| Self::normalize_path(path))
.collect();
for path in &normalized_paths {
if let Some(parent) = path.parent() {
if !parent.exists() {
fs::create_dir_all(parent).unwrap_or_else(|e| {
println!("Failed to create shader directory: {e}");
});
}
if let Err(e) = watcher.watch(parent, RecursiveMode::Recursive) {
println!(
"Warning: Could not watch shader directory {}: {}",
parent.display(),
e
);
if cfg!(windows) {
if let Err(e) = watcher.watch(parent, RecursiveMode::NonRecursive) {
println!("Fallback watch failed: {e}");
}
}
}
}
}
let last_vs_content = fs::read_to_string(&normalized_paths[0]).unwrap_or_default();
let last_fs_content = fs::read_to_string(&normalized_paths[1]).unwrap_or_default();
Ok(Self {
vs_module,
fs_module,
compute_module: None,
device,
shader_paths: normalized_paths,
last_vs_content,
last_fs_content,
last_compute_content: None,
watcher,
rx,
_watcher_tx: watcher_tx,
last_update_times: HashMap::new(),
debounce_duration: Duration::from_millis(100),
shader_type: ShaderType::RenderPair,
entry_point: None,
})
}
pub fn new_compute(
device: Arc<wgpu::Device>,
shader_path: PathBuf,
compute_module: wgpu::ShaderModule,
entry_point: &str,
) -> notify::Result<Self> {
let (tx, rx) = channel();
let watcher_tx = tx.clone();
let mut watcher = notify::recommended_watcher(move |res: Result<Event, _>| {
if let Ok(event) = res {
match event.kind {
EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_) => {
tx.send(event).unwrap_or_default();
}
_ => {}
}
}
})?;
let normalized_path = Self::normalize_path(&shader_path);
let shader_paths = vec![normalized_path.clone()];
if let Some(parent) = normalized_path.parent() {
if !parent.exists() {
fs::create_dir_all(parent).unwrap_or_else(|e| {
println!("Failed to create shader directory: {e}");
});
}
if let Err(e) = watcher.watch(parent, RecursiveMode::Recursive) {
println!(
"Warning: Could not watch shader directory {}: {}",
parent.display(),
e
);
if cfg!(windows) {
if let Err(e) = watcher.watch(parent, RecursiveMode::NonRecursive) {
println!("Fallback watch failed: {e}");
}
}
}
}
let last_compute_content = fs::read_to_string(&normalized_path).unwrap_or_default();
let dummy_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Dummy Shader Module"),
source: wgpu::ShaderSource::Wgsl("".into()),
});
Ok(Self {
vs_module: dummy_shader.clone(),
fs_module: dummy_shader,
compute_module: Some(compute_module),
device,
shader_paths,
last_vs_content: String::new(),
last_fs_content: String::new(),
last_compute_content: Some(last_compute_content),
watcher,
rx,
_watcher_tx: watcher_tx,
last_update_times: HashMap::new(),
debounce_duration: Duration::from_millis(100),
shader_type: ShaderType::Compute,
entry_point: Some(entry_point.to_string()),
})
}
fn normalize_path(path: &Path) -> PathBuf {
if cfg!(windows) {
path.components()
.collect::<PathBuf>()
.canonicalize()
.unwrap_or_else(|_| path.to_path_buf())
} else {
path.to_path_buf()
}
}
pub fn check_and_reload(&mut self) -> Option<(&wgpu::ShaderModule, &wgpu::ShaderModule)> {
match self.shader_type {
ShaderType::RenderPair => self.reload_render_shaders(),
ShaderType::Compute => {
self.reload_compute_shader();
None
}
}
}
fn reload_render_shaders(&mut self) -> Option<(&wgpu::ShaderModule, &wgpu::ShaderModule)> {
let mut should_reload = false;
// Process all pending events
while let Ok(event) = self.rx.try_recv() {
for path in event.paths {
let now = Instant::now();
if let Some(last_update) = self.last_update_times.get(&path) {
if now.duration_since(*last_update) < self.debounce_duration {
continue;
}
}
self.last_update_times.insert(path.clone(), now);
should_reload = true;
}
}
if !should_reload {
return None;
}
let vs_content = match fs::read_to_string(&self.shader_paths[0]) {
Ok(content) => content,
Err(e) => {
eprintln!("Failed to read vertex shader: {e}");
return None;
}
};
let fs_content = match fs::read_to_string(&self.shader_paths[1]) {
Ok(content) => content,
Err(e) => {
eprintln!("Failed to read fragment shader: {e}");
return None;
}
};
if vs_content == self.last_vs_content && fs_content == self.last_fs_content {
return None;
}
let new_vs = self.create_shader_module(&vs_content, "Vertex Shader")?;
let new_fs = self.create_shader_module(&fs_content, "Fragment Shader")?;
self.last_vs_content = vs_content;
self.last_fs_content = fs_content;
self.vs_module = new_vs;
self.fs_module = new_fs;
Some((&self.vs_module, &self.fs_module))
}
pub fn reload_compute_shader(&mut self) -> Option<&wgpu::ShaderModule> {
let mut should_reload = false;
while let Ok(event) = self.rx.try_recv() {
for path in event.paths {
let now = Instant::now();
if let Some(last_update) = self.last_update_times.get(&path) {
if now.duration_since(*last_update) < self.debounce_duration {
continue;
}
}
self.last_update_times.insert(path.clone(), now);
should_reload = true;
}
}
if !should_reload {
return None;
}
let compute_content = match fs::read_to_string(&self.shader_paths[0]) {
Ok(content) => content,
Err(e) => {
eprintln!("Failed to read compute shader: {e}");
return None;
}
};
if let Some(ref last_content) = self.last_compute_content {
if compute_content == *last_content {
return None;
}
}
let new_compute = self.create_shader_module(&compute_content, "Compute Shader")?;
self.last_compute_content = Some(compute_content);
self.compute_module = Some(new_compute);
self.compute_module.as_ref()
}
fn create_shader_module(&self, source: &str, label: &str) -> Option<wgpu::ShaderModule> {
let desc = wgpu::ShaderModuleDescriptor {
label: Some(label),
source: wgpu::ShaderSource::Wgsl(source.into()),
};
let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
self.device.create_shader_module(desc)
}));
match result {
Ok(module) => Some(module),
Err(e) => {
if let Some(error_msg) = e.downcast_ref::<String>() {
eprintln!("Shader compilation error in {label}: {error_msg}");
} else {
eprintln!("Shader compilation error in {label}");
}
None
}
}
}
pub fn entry_point(&self) -> Option<&str> {
self.entry_point.as_deref()
}
pub fn get_compute_module(&self) -> Option<&wgpu::ShaderModule> {
self.compute_module.as_ref()
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/mouse.rs | src/mouse.rs | use crate::UniformProvider;
use winit::event::WindowEvent;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct MouseUniform {
pub position: [f32; 2],
pub click_position: [f32; 2],
pub wheel: [f32; 2],
pub buttons: [u32; 2],
}
impl Default for MouseUniform {
fn default() -> Self {
Self {
position: [0.0, 0.0],
click_position: [0.0, 0.0],
wheel: [0.0, 0.0],
buttons: [0, 0],
}
}
}
impl UniformProvider for MouseUniform {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
pub struct MouseTracker {
pub uniform: MouseUniform,
pub raw_position: [f32; 2],
pub is_inside_window: bool,
}
impl Default for MouseTracker {
fn default() -> Self {
Self {
uniform: MouseUniform::default(),
raw_position: [0.0, 0.0],
is_inside_window: false,
}
}
}
impl MouseTracker {
pub fn new() -> Self {
Self::default()
}
pub fn handle_mouse_input(
&mut self,
event: &WindowEvent,
window_size: [f32; 2],
ui_handled: bool,
) -> bool {
// If UI already handled the event, don't update mouse for shader
if ui_handled {
return false;
}
match event {
WindowEvent::CursorMoved { position, .. } => {
let x = position.x as f32;
let y = position.y as f32;
self.raw_position = [x, y];
self.uniform.position[0] = x / window_size[0];
self.uniform.position[1] = y / window_size[1];
true
}
WindowEvent::MouseInput { state, button, .. } => {
use winit::event::{ElementState, MouseButton};
let pressed = *state == ElementState::Pressed;
let bit_mask = match button {
MouseButton::Left => 1,
MouseButton::Right => 2,
MouseButton::Middle => 4,
MouseButton::Back => 8,
MouseButton::Forward => 16,
MouseButton::Other(b) => {
if *b < 27 {
1 << (b + 5)
} else {
0
}
}
};
if pressed {
self.uniform.buttons[0] |= bit_mask;
self.uniform.click_position = self.uniform.position;
} else {
self.uniform.buttons[0] &= !bit_mask;
}
true
}
WindowEvent::MouseWheel { delta, .. } => {
use winit::event::MouseScrollDelta;
match delta {
MouseScrollDelta::LineDelta(x, y) => {
self.uniform.wheel[0] += *x;
self.uniform.wheel[1] += *y;
}
MouseScrollDelta::PixelDelta(pos) => {
self.uniform.wheel[0] += pos.x as f32 / 100.0;
self.uniform.wheel[1] += pos.y as f32 / 100.0;
}
}
true
}
WindowEvent::CursorLeft { .. } => {
self.is_inside_window = false;
true
}
WindowEvent::CursorEntered { .. } => {
self.is_inside_window = true;
true
}
_ => false,
}
}
pub fn reset_wheel(&mut self) {
self.uniform.wheel = [0.0, 0.0];
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/keyinputs.rs | src/keyinputs.rs | use winit::event::{ElementState, KeyEvent};
use winit::keyboard::Key;
use winit::window::Window;
pub struct KeyInputHandler {
is_fullscreen: bool,
pub show_ui: bool,
}
impl Default for KeyInputHandler {
fn default() -> Self {
Self::new()
}
}
impl KeyInputHandler {
pub fn new() -> Self {
Self {
is_fullscreen: false,
show_ui: true,
}
}
pub fn handle_keyboard_input(&mut self, window: &Window, event: &KeyEvent) -> bool {
if event.state == ElementState::Pressed && !event.repeat {
if let Key::Character(ch) = &event.logical_key {
match ch.as_str() {
"f" | "F" => {
self.toggle_fullscreen(window);
return true;
}
"h" | "H" => {
self.show_ui = !self.show_ui;
return true;
}
_ => {}
}
}
}
false
}
fn toggle_fullscreen(&mut self, window: &Window) {
if !self.is_fullscreen {
window.set_fullscreen(Some(winit::window::Fullscreen::Borderless(None)));
} else {
window.set_fullscreen(None);
}
self.is_fullscreen = !self.is_fullscreen;
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/font.rs | src/font.rs | use crate::{Core, TextureManager, UniformBinding, UniformProvider};
use bytemuck::{Pod, Zeroable};
use std::collections::HashMap;
// font system using texture atlas
#[repr(C)]
#[derive(Copy, Clone, Debug, Pod, Zeroable)]
pub struct FontUniforms {
pub atlas_size: [f32; 2],
pub char_size: [f32; 2],
pub screen_size: [f32; 2],
pub grid_size: [f32; 2],
}
impl UniformProvider for FontUniforms {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
#[derive(Clone, Copy, Debug)]
pub struct CharInfo {
pub uv_min: [f32; 2],
pub uv_max: [f32; 2],
pub char_code: u8,
}
pub struct FontSystem {
pub atlas_texture: TextureManager,
pub char_map: HashMap<char, CharInfo>,
pub font_uniforms: UniformBinding<FontUniforms>,
pub font_bind_group_layout: wgpu::BindGroupLayout,
pub atlas_width: u32,
pub atlas_height: u32,
pub grid_size: u32,
pub char_size: u32,
}
impl FontSystem {
pub fn new(core: &Core) -> Self {
//note that: I always use following:
// _ATLAS_SIZE: u32 = 1024;
// _CELL_SIZE: u32 = 64;
// _GRID_SIZE: u32 = 16;
let font_texture_bytes = include_bytes!("../assets/fonts/fonttexture.png");
let font_image = image::load_from_memory(font_texture_bytes)
.expect("Failed to load font texture")
.into_rgba8();
let atlas_width = font_image.width();
let atlas_height = font_image.height();
let grid_size = 16u32;
let char_size = atlas_width / grid_size;
let font_bind_group_layout =
core.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
],
label: Some("Font Bind Group Layout"),
});
let font_uniforms_data = FontUniforms {
atlas_size: [atlas_width as f32, atlas_height as f32],
char_size: [char_size as f32, char_size as f32],
screen_size: [core.size.width as f32, core.size.height as f32],
grid_size: [grid_size as f32, grid_size as f32],
};
let font_uniform_layout =
core.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("Font Uniforms Layout"),
});
let font_uniforms = UniformBinding::new(
&core.device,
"Font Uniforms",
font_uniforms_data,
&font_uniform_layout,
0,
);
let atlas_texture = Self::create_font_texture(core, &font_image);
let char_map = Self::generate_character_map(grid_size);
Self {
atlas_texture,
char_map,
font_uniforms,
font_bind_group_layout,
atlas_width,
atlas_height,
grid_size,
char_size,
}
}
fn create_font_texture(core: &Core, font_image: &image::RgbaImage) -> TextureManager {
let (width, height) = font_image.dimensions();
let texture = core.device.create_texture(&wgpu::TextureDescriptor {
label: Some("Shadertoy Font Texture"),
size: wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8Unorm,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
core.queue.write_texture(
wgpu::TexelCopyTextureInfo {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
font_image.as_raw(),
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(width * 4),
rows_per_image: Some(height),
},
wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = core.device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let texture_bind_group_layout =
core.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Font Texture Display Layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
});
let bind_group = core.device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
label: Some("Font Texture Atlas Bind Group"),
});
TextureManager {
texture,
view,
sampler,
bind_group,
}
}
fn generate_character_map(grid_size: u32) -> HashMap<char, CharInfo> {
let mut char_map = HashMap::new();
for ascii_code in 32..127 {
let char = ascii_code as u8 as char;
let grid_index = ascii_code as usize;
if grid_index >= 256 {
break;
}
let grid_x = grid_index % (grid_size as usize);
let grid_y = grid_index / (grid_size as usize);
let char_info = CharInfo {
uv_min: [
grid_x as f32 / grid_size as f32,
grid_y as f32 / grid_size as f32,
],
uv_max: [
(grid_x + 1) as f32 / grid_size as f32,
(grid_y + 1) as f32 / grid_size as f32,
],
char_code: ascii_code as u8,
};
char_map.insert(char, char_info);
}
char_map
}
pub fn update_screen_size(&mut self, width: u32, height: u32, queue: &wgpu::Queue) {
self.font_uniforms.data.screen_size = [width as f32, height as f32];
self.font_uniforms.update(queue);
}
pub fn get_char_info(&self, ch: char) -> Option<&CharInfo> {
self.char_map.get(&ch)
}
pub fn create_font_bind_group(&self, device: &wgpu::Device) -> wgpu::BindGroup {
device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &self.font_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: self.font_uniforms.buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&self.atlas_texture.view),
},
],
label: Some("Font Bind Group"),
})
}
pub fn get_atlas_dimensions(&self) -> (u32, u32) {
(self.atlas_width, self.atlas_height)
}
pub fn get_char_size(&self) -> u32 {
self.char_size
}
pub fn get_grid_size(&self) -> u32 {
self.grid_size
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/uniforms.rs | src/uniforms.rs | use wgpu::util::DeviceExt;
pub trait UniformProvider {
fn as_bytes(&self) -> &[u8];
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct ResolutionUniform {
pub dimensions: [f32; 2],
pub _padding: [f32; 2],
pub audio_data: [[f32; 4]; 32],
pub bpm: f32,
pub bass_energy: f32,
pub mid_energy: f32,
pub high_energy: f32,
pub total_energy: f32,
pub _energy_padding: [f32; 3],
}
impl UniformProvider for ResolutionUniform {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
pub struct UniformBinding<T: UniformProvider> {
pub buffer: wgpu::Buffer,
pub bind_group: wgpu::BindGroup,
pub data: T,
}
impl<T: UniformProvider> UniformBinding<T> {
pub fn new(
device: &wgpu::Device,
label: &str,
data: T,
layout: &wgpu::BindGroupLayout,
binding: u32,
) -> Self {
let buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(label),
contents: data.as_bytes(),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[wgpu::BindGroupEntry {
binding,
resource: buffer.as_entire_binding(),
}],
label: Some(label),
});
Self {
buffer,
bind_group,
data,
}
}
pub fn update(&self, queue: &wgpu::Queue) {
queue.write_buffer(&self.buffer, 0, self.data.as_bytes());
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/fps.rs | src/fps.rs | use std::collections::VecDeque;
use std::time::Instant;
pub struct FpsTracker {
last_frame_time: Instant,
frame_times: VecDeque<f32>,
current_fps: f32,
}
impl Default for FpsTracker {
fn default() -> Self {
Self::new()
}
}
impl FpsTracker {
pub fn new() -> Self {
Self {
last_frame_time: Instant::now(),
frame_times: VecDeque::with_capacity(60),
current_fps: 0.0,
}
}
pub fn update(&mut self) {
let now = Instant::now();
let frame_time = now.duration_since(self.last_frame_time).as_secs_f32();
self.last_frame_time = now;
// lets filter out unreasonable frame times to avoid spikes
if frame_time > 0.0 && frame_time < 1.0 {
self.frame_times.push_back(frame_time);
if self.frame_times.len() > 30 {
self.frame_times.pop_front();
}
// shouldn't happen, but who knows anyway...:
if !self.frame_times.is_empty() {
let avg_frame_time: f32 =
self.frame_times.iter().sum::<f32>() / self.frame_times.len() as f32;
self.current_fps = 1.0 / avg_frame_time;
}
}
}
pub fn fps(&self) -> f32 {
self.current_fps
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/hdri.rs | src/hdri.rs | use crate::TextureManager;
use image::codecs::hdr::HdrDecoder;
use image::{ImageDecoder, RgbaImage};
use std::io::Cursor;
#[derive(Clone, Debug, Copy)]
pub struct HdriMetadata {
pub width: u32,
pub height: u32,
pub exposure: f32,
pub gamma: f32,
}
impl Default for HdriMetadata {
fn default() -> Self {
Self {
width: 0,
height: 0,
exposure: 1.0,
gamma: 2.2,
}
}
}
pub fn load_hdri_texture(
device: &wgpu::Device,
queue: &wgpu::Queue,
data: &[u8],
layout: &wgpu::BindGroupLayout,
exposure: f32,
) -> Result<(TextureManager, HdriMetadata), String> {
let format = detect_format(data)?;
let gamma = 2.2;
let hdri_image = match format {
HdriFormat::Hdr => hdr_to_rgba8(data, exposure, Some(gamma))?,
HdriFormat::Exr => exr_to_rgba8(data, exposure, Some(gamma))?,
};
let dimensions = hdri_image.dimensions();
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("HDRI Texture"),
size: wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
..Default::default()
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
label: Some("HDRI Texture Bind Group"),
});
queue.write_texture(
wgpu::TexelCopyTextureInfo {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
&hdri_image,
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(4 * dimensions.0),
rows_per_image: Some(dimensions.1),
},
wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
},
);
let metadata = HdriMetadata {
width: dimensions.0,
height: dimensions.1,
exposure,
gamma,
};
Ok((
TextureManager {
texture,
view,
sampler,
bind_group,
},
metadata,
))
}
enum HdriFormat {
Hdr,
Exr,
}
fn detect_format(data: &[u8]) -> Result<HdriFormat, String> {
if data.len() >= 4 && data[0] == 0x76 && data[1] == 0x2f && data[2] == 0x31 && data[3] == 0x01 {
return Ok(HdriFormat::Exr);
}
let start_bytes = if data.len() >= 10 { &data[0..10] } else { data };
let start_str = String::from_utf8_lossy(start_bytes);
if start_str.starts_with("#?RADIANCE") || start_str.starts_with("#?RGBE") {
return Ok(HdriFormat::Hdr);
}
// I know looks NOT good to you :-P. This could be improved with more robust format detection
Ok(HdriFormat::Hdr)
}
fn hdr_to_rgba8(hdr_data: &[u8], exposure: f32, gamma: Option<f32>) -> Result<RgbaImage, String> {
let cursor = Cursor::new(hdr_data);
let decoder = HdrDecoder::new(cursor).map_err(|e| e.to_string())?;
let metadata = decoder.metadata();
let dynamic_img = image::DynamicImage::from_decoder(decoder)
.map_err(|e| format!("Failed to decode HDR: {e}"))?;
let mut rgba8_image = RgbaImage::new(metadata.width, metadata.height);
let rgb8_image = dynamic_img.to_rgb8();
let gamma_value = gamma.unwrap_or(2.2);
let gamma_correction = 1.0 / gamma_value;
for (x, y, pixel) in rgb8_image.enumerate_pixels() {
let r_linear = (pixel[0] as f32 / 255.0) * exposure;
let g_linear = (pixel[1] as f32 / 255.0) * exposure;
let b_linear = (pixel[2] as f32 / 255.0) * exposure;
let r = ((r_linear.powf(gamma_correction)).min(1.0) * 255.0) as u8;
let g = ((g_linear.powf(gamma_correction)).min(1.0) * 255.0) as u8;
let b = ((b_linear.powf(gamma_correction)).min(1.0) * 255.0) as u8;
rgba8_image.put_pixel(x, y, image::Rgba([r, g, b, 255]));
}
Ok(rgba8_image)
}
fn exr_to_rgba8(exr_data: &[u8], exposure: f32, gamma: Option<f32>) -> Result<RgbaImage, String> {
use image::codecs::openexr::OpenExrDecoder;
let cursor = Cursor::new(exr_data);
let decoder = OpenExrDecoder::new(cursor).map_err(|e| format!("Failed to decode EXR: {e}"))?;
let (width, height) = decoder.dimensions();
let dynamic_img = image::DynamicImage::from_decoder(decoder)
.map_err(|e| format!("Failed to create DynamicImage from EXR: {e}"))?;
let rgba_float = dynamic_img.to_rgba32f();
let mut rgba8_image = RgbaImage::new(width, height);
let gamma_value = gamma.unwrap_or(2.2);
let gamma_correction = 1.0 / gamma_value;
for (x, y, pixel) in rgba_float.enumerate_pixels() {
let r_linear = pixel[0] * exposure;
let g_linear = pixel[1] * exposure;
let b_linear = pixel[2] * exposure;
let a = pixel[3];
let r = ((r_linear.powf(gamma_correction)).min(1.0) * 255.0) as u8;
let g = ((g_linear.powf(gamma_correction)).min(1.0) * 255.0) as u8;
let b = ((b_linear.powf(gamma_correction)).min(1.0) * 255.0) as u8;
let a = (a.min(1.0) * 255.0) as u8;
rgba8_image.put_pixel(x, y, image::Rgba([r, g, b, a]));
}
Ok(rgba8_image)
}
pub fn update_hdri_exposure(
_device: &wgpu::Device,
queue: &wgpu::Queue,
data: &[u8],
_layout: &wgpu::BindGroupLayout,
texture_manager: &mut TextureManager,
new_exposure: f32,
gamma: Option<f32>,
) -> Result<(), String> {
let format = detect_format(data)?;
let rgba_image = match format {
HdriFormat::Hdr => hdr_to_rgba8(data, new_exposure, gamma)?,
HdriFormat::Exr => exr_to_rgba8(data, new_exposure, gamma)?,
};
texture_manager.update(queue, &rgba_image);
Ok(())
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/export.rs | src/export.rs | use image::ImageError;
use std::path::PathBuf;
use std::sync::mpsc;
#[derive(Debug)]
pub enum ExportError {
IoError(std::io::Error),
ImageError(ImageError),
}
impl From<std::io::Error> for ExportError {
fn from(err: std::io::Error) -> Self {
ExportError::IoError(err)
}
}
impl From<ImageError> for ExportError {
fn from(err: ImageError) -> Self {
ExportError::ImageError(err)
}
}
#[derive(Debug, Clone)]
pub struct ExportSettings {
pub export_path: PathBuf,
pub width: u32,
pub height: u32,
pub start_time: f32,
pub total_time: f32,
pub fps: u32,
pub is_exporting: bool,
}
impl Default for ExportSettings {
fn default() -> Self {
Self {
export_path: PathBuf::from("./export"),
width: 1920,
height: 1080,
start_time: 0.0,
total_time: 5.0,
fps: 60,
is_exporting: false,
}
}
}
#[derive(Clone)]
pub struct ExportUiRequest {
pub width: u32,
pub height: u32,
pub start_time: f32,
pub total_time: f32,
pub fps: u32,
pub path: PathBuf,
pub is_exporting: bool,
}
#[derive(Default)]
pub struct ExportUiState {
pub show_window: bool,
pub temp_width: u32,
pub temp_height: u32,
pub temp_start_time: f32,
pub temp_total_time: f32,
pub temp_fps: u32,
}
/// Manages the export process and UI state
pub struct ExportManager {
settings: ExportSettings,
export_channel: Option<mpsc::Receiver<(u32, f32)>>,
ui_state: ExportUiState,
temp_state: TempExportState,
}
#[derive(Clone)]
struct TempExportState {
width: u32,
height: u32,
start_time: f32,
total_time: f32,
fps: u32,
path: PathBuf,
}
impl Default for ExportManager {
fn default() -> Self {
Self::new()
}
}
impl ExportManager {
pub fn new() -> Self {
let settings = ExportSettings::default();
let ui_state = ExportUiState::default();
let temp_state = TempExportState {
width: settings.width,
height: settings.height,
start_time: settings.start_time,
total_time: settings.total_time,
fps: settings.fps,
path: settings.export_path.clone(),
};
Self {
settings,
export_channel: None,
ui_state,
temp_state,
}
}
pub fn get_ui_request(&self) -> ExportUiRequest {
ExportUiRequest {
width: self.temp_state.width,
height: self.temp_state.height,
start_time: self.temp_state.start_time,
total_time: self.temp_state.total_time,
fps: self.temp_state.fps,
path: self.temp_state.path.clone(),
is_exporting: self.settings.is_exporting,
}
}
pub fn apply_ui_request(&mut self, request: ExportUiRequest) {
self.temp_state.width = request.width;
self.temp_state.height = request.height;
self.temp_state.start_time = request.start_time;
self.temp_state.total_time = request.total_time;
self.temp_state.fps = request.fps;
self.temp_state.path = request.path;
}
/// Returns a reference to the current export settings
pub fn settings(&self) -> &ExportSettings {
&self.settings
}
/// Returns whether an export is currently in progress
pub fn is_exporting(&self) -> bool {
self.settings.is_exporting
}
pub fn settings_mut(&mut self) -> &mut ExportSettings {
&mut self.settings
}
/// Attempts to get the next frame for export
pub fn try_get_next_frame(&mut self) -> Option<(u32, f32)> {
self.export_channel.as_ref()?.try_recv().ok()
}
pub fn start_export(&mut self) {
if self.settings.is_exporting {
return;
}
// Apply the temporary state to settings before starting export
self.settings.width = self.temp_state.width;
self.settings.height = self.temp_state.height;
self.settings.start_time = self.temp_state.start_time;
self.settings.total_time = self.temp_state.total_time;
self.settings.fps = self.temp_state.fps;
self.settings.export_path = self.temp_state.path.clone();
// Then start the export process
self.settings.is_exporting = true;
let settings = self.settings.clone();
let (tx, rx) = mpsc::channel();
std::thread::spawn(move || {
let total_frames = (settings.total_time * settings.fps as f32) as u32;
for frame in 0..total_frames {
let time = settings.start_time + (frame as f32 / settings.fps as f32);
if tx.send((frame, time)).is_err() {
break;
}
}
});
self.export_channel = Some(rx);
}
/// Completes the export process
pub fn complete_export(&mut self) {
self.settings.is_exporting = false;
self.export_channel = None;
}
/// Returns references to both UI state and settings for the UI to use
pub fn get_ui_elements(&mut self) -> (&mut ExportUiState, &mut ExportSettings) {
(&mut self.ui_state, &mut self.settings)
}
pub fn render_export_ui_widget(ui: &mut egui::Ui, request: &mut ExportUiRequest) -> bool {
let mut should_start_export = false;
ui.separator();
ui.collapsing("Export", |ui| {
if !request.is_exporting {
// Resolution section
ui.collapsing("Resolution", |ui| {
ui.add(
egui::DragValue::new(&mut request.width)
.range(1..=7680)
.prefix("Width: "),
);
ui.add(
egui::DragValue::new(&mut request.height)
.range(1..=4320)
.prefix("Height: "),
);
});
ui.collapsing("Time Settings", |ui| {
ui.add(
egui::DragValue::new(&mut request.start_time)
.prefix("Start Time: ")
.speed(0.1),
);
ui.add(
egui::DragValue::new(&mut request.total_time)
.prefix("Total Time: ")
.speed(0.1),
);
ui.add(
egui::DragValue::new(&mut request.fps)
.range(1..=240)
.prefix("FPS: "),
);
});
ui.collapsing("Output", |ui| {
ui.horizontal(|ui| {
ui.label("Export Path:");
if ui.button("Browse").clicked() {
if let Some(path) = rfd::FileDialog::new()
.set_directory(&request.path)
.pick_folder()
{
request.path = path;
}
}
});
ui.horizontal(|ui| {
let path_text = request.path.to_str().unwrap_or("Invalid path");
ui.add(egui::Label::new(
egui::RichText::new(path_text)
.monospace()
.weak()
.color(egui::Color32::from_rgb(150, 150, 150)),
));
});
if !request.path.exists() {
ui.horizontal(|ui| {
ui.label(
egui::RichText::new("⚠ Path doesn't exist!")
.color(egui::Color32::from_rgb(255, 190, 0)),
);
});
}
});
ui.separator();
if ui.button("Start Export").clicked() {
should_start_export = true;
}
} else {
ui.label("Exporting...");
}
});
should_start_export
}
pub fn handle_export<F, E>(&mut self, capture_fn: F)
where
F: FnMut(u32, f32) -> Result<Vec<u8>, E>,
E: std::fmt::Debug,
{
let mut capture_fn = capture_fn;
if let Some((frame, time)) = self.try_get_next_frame() {
match capture_fn(frame, time) {
Ok(data) => {
let settings = self.settings();
if let Err(e) = save_frame(data, frame, settings) {
eprintln!("Error saving frame: {e:?}");
}
}
Err(e) => {
eprintln!("Error capturing frame: {e:?}");
}
}
} else {
self.complete_export();
}
}
}
#[allow(unused_mut)]
pub fn save_frame(
mut data: Vec<u8>,
frame: u32,
settings: &ExportSettings,
) -> Result<(), ExportError> {
let frame_path = settings.export_path.join(format!("frame_{frame:05}.png"));
if let Some(parent) = frame_path.parent() {
std::fs::create_dir_all(parent)?;
}
#[cfg(target_os = "macos")]
{
for chunk in data.chunks_mut(4) {
chunk.swap(0, 2);
}
}
let image = image::ImageBuffer::<image::Rgba<u8>, Vec<u8>>::from_raw(
settings.width,
settings.height,
data,
)
.ok_or_else(|| {
ImageError::Parameter(image::error::ParameterError::from_kind(
image::error::ParameterErrorKind::Generic("Failed to create image buffer".to_string()),
))
})?;
image.save(&frame_path)?;
Ok(())
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/atomic.rs | src/atomic.rs | pub struct AtomicBuffer {
pub buffer: wgpu::Buffer,
pub bind_group: wgpu::BindGroup,
pub size: u32,
}
impl AtomicBuffer {
pub fn new(device: &wgpu::Device, size: u32, layout: &wgpu::BindGroupLayout) -> Self {
let buffer_size = (size * 4 * 4) as u64;
let max_binding_size = device.limits().max_storage_buffer_binding_size as u64;
let max_size = (max_binding_size / (4 * 4)) as u32;
let (actual_size, actual_buffer_size) = if buffer_size > max_binding_size {
println!(
"Requested buffer size {buffer_size} exceeds device max_storage_buffer_binding_size {max_binding_size}. Reducing size to {max_size}."
);
(max_size, max_binding_size)
} else {
(size, buffer_size)
};
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Atomic Buffer"),
size: actual_buffer_size,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: buffer.as_entire_binding(),
}],
label: Some("Atomic Buffer Bind Group"),
});
Self {
buffer,
bind_group,
size: actual_size,
}
}
pub fn clear(&self, queue: &wgpu::Queue) {
let clear_data = vec![0u32; (self.size * 4) as usize];
queue.write_buffer(&self.buffer, 0, bytemuck::cast_slice(&clear_data));
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/spectrum.rs | src/spectrum.rs | // This file is part of the gstreamer, and its inits the spectrum analyzer and bpm.
// I also did some smoothing related to audio data for the spectrum analyzer.
#[cfg(feature = "media")]
use crate::gst::video::VideoTextureManager;
#[cfg(feature = "media")]
use crate::ResolutionUniform;
#[cfg(feature = "media")]
use crate::UniformBinding;
#[cfg(feature = "media")]
use log::info;
pub struct SpectrumAnalyzer {
#[cfg(feature = "media")]
prev_audio_data: [[f32; 4]; 32],
}
#[cfg(feature = "media")]
impl Default for SpectrumAnalyzer {
fn default() -> Self {
Self::new()
}
}
#[cfg(feature = "media")]
impl SpectrumAnalyzer {
pub fn new() -> Self {
Self {
prev_audio_data: [[0.0; 4]; 32],
}
}
pub fn update_spectrum(
&mut self,
queue: &wgpu::Queue,
resolution_uniform: &mut UniformBinding<ResolutionUniform>,
video_texture_manager: &Option<VideoTextureManager>,
using_video_texture: bool,
) {
// Initialize audio data arrays to zero
for i in 0..32 {
for j in 0..4 {
resolution_uniform.data.audio_data[i][j] = 0.0;
}
}
if using_video_texture {
if let Some(video_manager) = video_texture_manager {
if video_manager.has_audio() {
let spectrum_data = video_manager.spectrum_data();
let audio_level = video_manager.audio_level();
resolution_uniform.data.bpm = video_manager.get_bpm();
if !spectrum_data.magnitudes.is_empty() {
let bands = spectrum_data.bands;
// Highly sensitive threshold for detecting subtle high frequencies
let threshold: f32 = -60.0;
// Calculate adaptive gain based on RMS
// Target RMS: -20dB (moderate loudness)
// Loud songs (metal): RMS ~ -10dB → gain < 1.0 (reduce)
// Quiet songs: RMS ~ -30dB → gain > 1.0 (boost)
let target_rms_db = -20.0;
let current_rms_db = audio_level.rms_db as f32;
let adaptive_gain = if current_rms_db > -100.0 {
// Calculate gain to bring current RMS closer to target
let db_diff = target_rms_db - current_rms_db;
// Convert dB difference to linear gain (every 6dB ≈ 2x amplitude)
let gain = 10.0_f32.powf(db_diff / 20.0);
// Clamp to reasonable range (0.3 to 3.0)
gain.max(0.3).min(3.0)
} else {
1.0 // No adjustment if RMS is invalid
};
// sanity: to see RMS normalization values
info!(
"Audio Level - RMS: {:.2}dB, Peak: {:.3}, Gain: {:.2}x",
current_rms_db, audio_level.peak, adaptive_gain
);
// Process only first 64 bands (note that, we actually have 128 but its expensiive)
for i in 0..64 {
let band_percent = i as f32 / 64.0;
// Map to source index with slight emphasis on higher frequencies
let source_idx = (band_percent * (bands as f32 / 2.0)) as usize;
// Use narrow width for all frequencies for accuracy
let width = 1;
let end_idx = (source_idx + width).min(bands);
if source_idx < bands {
// Get peak value in this range
let mut peak: f32 = -120.0;
for j in source_idx..end_idx {
if j < bands {
let val = spectrum_data.magnitudes[j];
peak = peak.max(val);
}
}
// Map from dB scale to 0-1
let mut normalized =
((peak - threshold) / -threshold).max(0.0).min(1.0);
normalized = (normalized * adaptive_gain).min(1.0);
// Apply frequency-specific processing that's balanced
// Lower boost for bass, higher boost for treble
let enhanced = if band_percent < 0.2 {
// Bass - slightly reduced
(normalized.powf(0.75) * 0.85).min(1.0)
} else if band_percent < 0.4 {
// Low-mids - neutral
normalized.powf(0.7).min(1.0)
} else if band_percent < 0.6 {
// Mids - slight boost
(normalized.powf(0.65) * 1.1).min(1.0)
} else if band_percent < 0.8 {
// Upper-mids - moderate boost
(normalized.powf(0.55) * 1.6).min(1.0)
} else {
// Highs - significant boost with lower power
// The critical adjustment for high frequency sensitivity
(normalized.powf(0.4) * 3.0).min(1.0)
};
// No minimum thresholds - let silent frequencies be silent
// Temporal smoothing with frequency-specific parameters
let vec_idx = i / 4;
let vec_component = i % 4;
if vec_idx < 32 {
let prev_value = self.prev_audio_data[vec_idx][vec_component];
// Fast attack for all frequencies - slightly faster for highs
let attack = if band_percent < 0.6 { 0.6 } else { 0.7 };
let decay = if band_percent < 0.6 { 0.3 } else { 0.25 };
// Apply smoothing
let smoothing_factor = if enhanced > prev_value {
attack // Rising
} else {
decay // Falling
};
// Calculate smoothed value
let smoothed = prev_value * (1.0 - smoothing_factor)
+ enhanced * smoothing_factor;
// Store the result
resolution_uniform.data.audio_data[vec_idx][vec_component] =
smoothed;
// Store for next frame
self.prev_audio_data[vec_idx][vec_component] = smoothed;
}
}
}
// Compute audio energy for bass/mid/high ranges
let mut bass_sum = 0.0f32;
let mut mid_sum = 0.0f32;
let mut high_sum = 0.0f32;
for i in 0..64 {
let vec_idx = i / 4;
let component = i % 4;
let value = resolution_uniform.data.audio_data[vec_idx][component];
let freq = i as f32 / 64.0;
if freq < 0.2 {
bass_sum += value;
} else if freq < 0.6 {
mid_sum += value;
} else {
high_sum += value;
}
}
// Normalize by band count
let bass_energy = bass_sum / 13.0; // bands 0-12
let mid_energy = mid_sum / 26.0; // bands 13-38
let high_energy = high_sum / 25.0; // bands 39-63
let total_energy = (bass_energy * 1.5 + mid_energy + high_energy) / 3.5;
// Store in resolution uniform for shaders to access
resolution_uniform.data.bass_energy = bass_energy;
resolution_uniform.data.mid_energy = mid_energy;
resolution_uniform.data.high_energy = high_energy;
resolution_uniform.data.total_energy = total_energy;
// If we detect a beat, provide progressive boost to mid/high frequencies
if bass_energy > 0.5 {
// First quarter - bass
let q1 = 16 / 4;
// Second quarter - low-mids
let q2 = 16 / 2;
// Third quarter - upper-mids
let q3 = 3 * 16 / 4;
for i in 0..16 {
for j in 0..4 {
if i < q1 {
// No boost for bass (prevent dominance)
// Actually reduce bass slightly on beats
resolution_uniform.data.audio_data[i][j] *= 0.9;
} else if i < q2 {
// Small boost for low-mids
resolution_uniform.data.audio_data[i][j] *= 1.1;
} else if i < q3 {
// Moderate boost for upper-mids
resolution_uniform.data.audio_data[i][j] *= 1.3;
} else {
// Strong boost for highs during beats
resolution_uniform.data.audio_data[i][j] *= 1.7;
}
}
}
}
}
}
}
resolution_uniform.update(queue);
}
}
}
#[cfg(not(feature = "media"))]
impl SpectrumAnalyzer {
pub fn new() -> Self {
Self {}
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/shader.rs | src/shader.rs | use crate::Core;
use winit::event::WindowEvent;
pub trait ShaderManager {
fn init(core: &Core) -> Self
where
Self: Sized;
fn resize(&mut self, _core: &Core) {}
fn update(&mut self, _core: &Core) {}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError>;
fn handle_input(&mut self, _core: &Core, _event: &WindowEvent) -> bool {
false
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/renderkit.rs | src/renderkit.rs | use crate::compute::ComputeShader;
#[cfg(feature = "media")]
use crate::gst::video::VideoTextureManager;
#[cfg(feature = "media")]
use crate::gst::webcam::WebcamTextureManager;
use crate::load_hdri_texture;
use crate::mouse::MouseTracker;
use crate::mouse::MouseUniform;
use crate::spectrum::SpectrumAnalyzer;
use crate::HdriMetadata;
use crate::{
fps, ControlsRequest, Core, ExportManager, KeyInputHandler, Renderer, ResolutionUniform,
ShaderControls, TextureManager, UniformBinding, UniformProvider,
};
use egui::ViewportId;
use egui_wgpu::ScreenDescriptor;
#[cfg(feature = "media")]
use log::warn;
use log::{error, info};
use std::path::Path;
use std::time::Instant;
use winit::event::WindowEvent;
#[cfg(target_os = "macos")]
pub const CAPTURE_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Bgra8UnormSrgb;
#[cfg(not(target_os = "macos"))]
pub const CAPTURE_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8UnormSrgb;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct TimeUniform {
pub time: f32,
pub frame: u32,
}
impl UniformProvider for TimeUniform {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
pub struct RenderKit {
pub renderer: Renderer,
#[cfg(feature = "media")]
pub video_texture_manager: Option<VideoTextureManager>,
#[cfg(feature = "media")]
pub using_video_texture: bool,
#[cfg(feature = "media")]
pub webcam_texture_manager: Option<WebcamTextureManager>,
#[cfg(feature = "media")]
pub using_webcam_texture: bool,
pub texture_manager: Option<TextureManager>,
pub egui_renderer: egui_wgpu::Renderer,
pub egui_state: egui_winit::State,
pub context: egui::Context,
pub texture_bind_group_layout: wgpu::BindGroupLayout,
pub start_time: Instant,
pub time_uniform: UniformBinding<TimeUniform>,
pub resolution_uniform: UniformBinding<ResolutionUniform>,
pub key_handler: KeyInputHandler,
pub export_manager: ExportManager,
pub controls: ShaderControls,
pub spectrum_analyzer: SpectrumAnalyzer,
pub compute_shader: Option<ComputeShader>,
pub fps_tracker: fps::FpsTracker,
pub mouse_tracker: MouseTracker,
pub mouse_uniform: Option<UniformBinding<MouseUniform>>,
pub mouse_bind_group_layout: Option<wgpu::BindGroupLayout>,
pub using_hdri_texture: bool,
pub hdri_metadata: Option<HdriMetadata>,
pub hdri_file_data: Option<Vec<u8>>,
}
impl RenderKit {
const VERTEX_SHADER: &'static str = include_str!("../shaders/vertex.wgsl");
const BLIT_SHADER: &'static str = include_str!("../shaders/blit.wgsl");
/// Creates a bind group layout with texture (binding 0) and sampler (binding 1) for displaying compute shader output
pub fn create_standard_texture_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
label: Some("Standard Texture Layout"),
})
}
/// Create RenderKit with standard texture layout
pub fn new_with_standard_layout(core: &Core) -> Self {
let layout = Self::create_standard_texture_layout(&core.device);
Self::new(core, &layout, None)
}
pub fn new(core: &Core, layout: &wgpu::BindGroupLayout, fragment_entry: Option<&str>) -> Self {
let bind_group_layouts = &[layout];
let time_bind_group_layout =
core.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("time_bind_group_layout"),
});
let resolution_bind_group_layout =
core.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("resolution_bind_group_layout"),
});
let time_uniform = UniformBinding::new(
&core.device,
"Time Uniform",
TimeUniform {
time: 0.0,
frame: 0,
},
&time_bind_group_layout,
0,
);
let resolution_uniform = UniformBinding::new(
&core.device,
"Resolution Uniform",
ResolutionUniform {
dimensions: [core.size.width as f32, core.size.height as f32],
_padding: [0.0, 0.0],
audio_data: [[0.0; 4]; 32],
bpm: 0.0,
bass_energy: 0.0,
mid_energy: 0.0,
high_energy: 0.0,
total_energy: 0.0,
_energy_padding: [0.0, 0.0, 0.0],
},
&resolution_bind_group_layout,
0,
);
let vs_shader = core
.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Vertex Shader"),
source: wgpu::ShaderSource::Wgsl(Self::VERTEX_SHADER.into()),
});
let fs_shader = core
.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Fragment Shader"),
source: wgpu::ShaderSource::Wgsl(Self::BLIT_SHADER.into()),
});
let texture_bind_group_layout =
core.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
label: Some("texture_bind_group_layout"),
});
let pipeline_layout = core
.device
.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts,
push_constant_ranges: &[],
});
let renderer = Renderer::new(
&core.device,
&vs_shader,
&fs_shader,
core.config.format,
&pipeline_layout,
fragment_entry,
);
let context = egui::Context::default();
let egui_state = egui_winit::State::new(
context.clone(),
ViewportId::default(),
core.window(),
None,
None,
None,
);
let egui_renderer = egui_wgpu::Renderer::new(
&core.device,
core.config.format,
egui_wgpu::RendererOptions::default(),
);
// default texture manager
let texture_manager =
Self::create_default_texture_manager(core, &texture_bind_group_layout);
let fps_tracker = fps::FpsTracker::new();
let mouse_tracker = MouseTracker::new();
Self {
renderer,
#[cfg(feature = "media")]
video_texture_manager: None,
#[cfg(feature = "media")]
using_video_texture: false,
#[cfg(feature = "media")]
webcam_texture_manager: None,
#[cfg(feature = "media")]
using_webcam_texture: false,
texture_manager: Some(texture_manager),
egui_renderer,
egui_state,
context,
texture_bind_group_layout,
start_time: Instant::now(),
time_uniform,
resolution_uniform,
key_handler: KeyInputHandler::new(),
export_manager: ExportManager::new(),
controls: ShaderControls::new(),
spectrum_analyzer: SpectrumAnalyzer::new(),
compute_shader: None,
fps_tracker,
mouse_tracker,
mouse_uniform: None,
mouse_bind_group_layout: None,
using_hdri_texture: false,
hdri_metadata: None,
hdri_file_data: None,
}
}
pub fn update_time(&mut self, queue: &wgpu::Queue) {
self.time_uniform.data.time = self.start_time.elapsed().as_secs_f32();
self.time_uniform.update(queue);
}
pub fn update_resolution(
&mut self,
queue: &wgpu::Queue,
new_size: winit::dpi::PhysicalSize<u32>,
) {
self.resolution_uniform.data.dimensions = [new_size.width as f32, new_size.height as f32];
self.resolution_uniform.update(queue);
}
pub fn create_default_texture_manager(
core: &Core,
texture_bind_group_layout: &wgpu::BindGroupLayout,
) -> TextureManager {
let default_texture = core.device.create_texture(&wgpu::TextureDescriptor {
label: Some("Default Texture"),
size: wgpu::Extent3d {
width: 1,
height: 1,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
let default_view = default_texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = core
.device
.create_sampler(&wgpu::SamplerDescriptor::default());
let bind_group = core.device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&default_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
label: Some("Default Texture Bind Group"),
});
TextureManager {
texture: default_texture,
view: default_view,
sampler,
bind_group,
}
}
pub fn render_ui<F>(&mut self, core: &Core, mut ui_builder: F) -> egui::FullOutput
where
F: FnMut(&egui::Context),
{
let raw_input = self.egui_state.take_egui_input(core.window());
self.context.run(raw_input, |ctx| ui_builder(ctx))
}
pub fn handle_render_output(
&mut self,
core: &Core,
view: &wgpu::TextureView,
full_output: egui::FullOutput,
encoder: &mut wgpu::CommandEncoder,
) {
let screen_descriptor = ScreenDescriptor {
size_in_pixels: [core.config.width, core.config.height],
pixels_per_point: core.window().scale_factor() as f32,
};
let clipped_primitives = self
.context
.tessellate(full_output.shapes, screen_descriptor.pixels_per_point);
// Update egui textures
for (id, image_delta) in &full_output.textures_delta.set {
self.egui_renderer
.update_texture(&core.device, &core.queue, *id, image_delta);
}
self.egui_renderer.update_buffers(
&core.device,
&core.queue,
encoder,
&clipped_primitives,
&screen_descriptor,
);
{
let render_pass = crate::Renderer::begin_render_pass(
encoder,
view,
wgpu::LoadOp::Load,
Some("Egui Render Pass"),
);
let mut render_pass = render_pass.into_inner().forget_lifetime();
self.egui_renderer
.render(&mut render_pass, &clipped_primitives, &screen_descriptor);
}
// Cleanup egui textures
for id in &full_output.textures_delta.free {
self.egui_renderer.free_texture(id);
}
}
pub fn load_media<P: AsRef<Path>>(&mut self, core: &Core, path: P) -> anyhow::Result<()> {
let path_ref = path.as_ref();
let extension = path_ref
.extension()
.and_then(|ext| ext.to_str())
.map(|ext| ext.to_lowercase());
match extension {
// Image formats
Some(ext)
if ["png", "jpg", "jpeg", "bmp", "gif", "tiff", "webp"].contains(&ext.as_str()) =>
{
info!("Loading image: {path_ref:?}");
if let Ok(img) = image::open(path_ref) {
let rgba_image = img.into_rgba8();
let new_texture_manager = TextureManager::new(
&core.device,
&core.queue,
&rgba_image,
&self.texture_bind_group_layout,
);
self.texture_manager = Some(new_texture_manager);
#[cfg(feature = "media")]
{
self.using_video_texture = false;
self.video_texture_manager = None;
self.using_webcam_texture = false;
self.webcam_texture_manager = None;
}
Ok(())
} else {
Err(anyhow::anyhow!("Failed to open image"))
}
}
Some(ext) if ["hdr", "exr"].contains(&ext.as_str()) => {
info!("Loading HDRI: {path_ref:?}");
let file_data = std::fs::read(path_ref)?;
self.hdri_file_data = Some(file_data.clone());
let default_exposure = 1.0;
match load_hdri_texture(
&core.device,
&core.queue,
&file_data,
&self.texture_bind_group_layout,
default_exposure,
) {
Ok((texture_manager, metadata)) => {
self.texture_manager = Some(texture_manager);
#[cfg(feature = "media")]
{
self.using_video_texture = false;
self.video_texture_manager = None;
self.using_webcam_texture = false;
self.webcam_texture_manager = None;
}
self.using_hdri_texture = true;
self.hdri_metadata = Some(metadata);
Ok(())
}
Err(e) => {
error!("Failed to load HDRI: {e}");
Err(anyhow::anyhow!("Failed to load HDRI: {}", e))
}
}
}
#[cfg(feature = "media")]
Some(ext) if ["mp4", "avi", "mkv", "mov", "webm","mp3", "wav", "ogg"].contains(&ext.as_str()) => {
info!("Loading video: {path_ref:?}");
match VideoTextureManager::new(
&core.device,
&core.queue,
&self.texture_bind_group_layout,
path_ref,
) {
Ok(video_manager) => {
self.video_texture_manager = Some(video_manager);
self.using_video_texture = true;
self.using_webcam_texture = false;
self.webcam_texture_manager = None;
if let Err(e) = self.play_video() {
warn!("Failed to play video: {e}");
}
self.set_video_loop(true);
Ok(())
}
Err(e) => {
error!("Failed to load video: {e}");
Err(e)
}
}
}
_ => Err(anyhow::anyhow!("Unsupported media format: {:?}", path_ref)),
}
}
#[cfg(feature = "media")]
pub fn update_video_texture(&mut self, core: &Core, queue: &wgpu::Queue) -> bool {
if self.using_video_texture {
if let Some(video_manager) = &mut self.video_texture_manager {
if let Ok(updated) = video_manager.update_texture(
&core.device,
queue,
&self.texture_bind_group_layout,
) {
return updated;
}
}
}
false
}
#[cfg(feature = "media")]
pub fn play_video(&mut self) -> anyhow::Result<()> {
if let Some(video_manager) = &mut self.video_texture_manager {
video_manager.play()?;
}
Ok(())
}
#[cfg(feature = "media")]
pub fn pause_video(&mut self) -> anyhow::Result<()> {
if let Some(video_manager) = &mut self.video_texture_manager {
video_manager.pause()?;
}
Ok(())
}
#[cfg(feature = "media")]
pub fn seek_video(&mut self, position_seconds: f64) -> anyhow::Result<()> {
if let Some(video_manager) = &mut self.video_texture_manager {
let position = gstreamer::ClockTime::from_seconds(position_seconds as u64);
video_manager.seek(position)?;
}
Ok(())
}
#[cfg(feature = "media")]
pub fn set_video_loop(&mut self, should_loop: bool) {
if let Some(video_manager) = &mut self.video_texture_manager {
video_manager.set_loop(should_loop);
}
}
#[cfg(feature = "media")]
pub fn start_webcam(&mut self, core: &Core, device_index: Option<u32>) -> anyhow::Result<()> {
info!("Starting webcam");
let webcam_manager = WebcamTextureManager::new(
&core.device,
&core.queue,
&self.texture_bind_group_layout,
device_index,
)?;
let mut manager = webcam_manager;
manager.start()?;
self.webcam_texture_manager = Some(manager);
self.using_webcam_texture = true;
self.using_video_texture = false;
self.video_texture_manager = None;
self.using_hdri_texture = false;
Ok(())
}
#[cfg(feature = "media")]
pub fn stop_webcam(&mut self) -> anyhow::Result<()> {
info!("Stopping webcam");
if let Some(webcam_manager) = &mut self.webcam_texture_manager {
webcam_manager.stop()?;
}
self.using_webcam_texture = false;
self.webcam_texture_manager = None;
Ok(())
}
#[cfg(feature = "media")]
pub fn update_webcam_texture(&mut self, core: &Core, queue: &wgpu::Queue) -> bool {
if self.using_webcam_texture {
if let Some(webcam_manager) = &mut self.webcam_texture_manager {
if let Ok(updated) = webcam_manager.update_texture(
&core.device,
queue,
&self.texture_bind_group_layout,
) {
return updated;
}
}
}
false
}
pub fn load_image(&mut self, core: &Core, path: std::path::PathBuf) {
if let Ok(img) = image::open(path) {
let rgba_image = img.into_rgba8();
let new_texture_manager = TextureManager::new(
&core.device,
&core.queue,
&rgba_image,
&self.texture_bind_group_layout,
);
self.texture_manager = Some(new_texture_manager);
#[cfg(feature = "media")]
{
self.using_video_texture = false;
self.video_texture_manager = None;
self.using_webcam_texture = false;
self.webcam_texture_manager = None;
}
}
}
pub fn create_capture_texture(
&self,
device: &wgpu::Device,
width: u32,
height: u32,
) -> (wgpu::Texture, wgpu::Buffer) {
let capture_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Capture Texture"),
size: wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: CAPTURE_FORMAT,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
view_formats: &[],
});
let align = 256;
let unpadded_bytes_per_row = width * 4;
let padding = (align - unpadded_bytes_per_row % align) % align;
let padded_bytes_per_row = unpadded_bytes_per_row + padding;
let buffer_size = padded_bytes_per_row * height;
let output_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Capture Buffer"),
size: buffer_size as u64,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
});
(capture_texture, output_buffer)
}
pub fn apply_control_request(&mut self, request: ControlsRequest) {
if request.should_reset {
self.start_time = Instant::now();
}
self.controls.apply_ui_request(request);
}
#[cfg(feature = "media")]
pub fn update_audio_spectrum(&mut self, queue: &wgpu::Queue) {
self.spectrum_analyzer.update_spectrum(
queue,
&mut self.resolution_uniform,
&self.video_texture_manager,
self.using_video_texture,
);
}
#[cfg(feature = "media")]
pub fn handle_video_requests(&mut self, core: &Core, request: &ControlsRequest) {
if let Some(path) = &request.load_media_path {
if let Err(e) = self.load_media(core, path) {
error!("Failed to load media: {e}");
}
}
if request.play_video {
let _ = self.play_video();
}
if request.pause_video {
let _ = self.pause_video();
}
if request.restart_video {
let _ = self.seek_video(0.0);
let _ = self.play_video();
}
if let Some(position) = request.seek_position {
let _ = self.seek_video(position);
}
if let Some(should_loop) = request.set_loop {
self.set_video_loop(should_loop);
}
// Handle audio control requests
if let Some(volume) = request.set_volume {
if let Some(vm) = &mut self.video_texture_manager {
let _ = vm.set_volume(volume);
}
}
if let Some(muted) = request.mute_audio {
if let Some(vm) = &mut self.video_texture_manager {
let _ = vm.set_mute(muted);
}
}
if request.toggle_mute {
if let Some(vm) = &mut self.video_texture_manager {
let _ = vm.toggle_mute();
}
}
}
#[cfg(feature = "media")]
pub fn handle_webcam_requests(&mut self, core: &Core, request: &ControlsRequest) {
if request.start_webcam {
if let Err(e) = self.start_webcam(core, request.webcam_device_index) {
error!("Failed to start webcam: {e}");
}
}
if request.stop_webcam {
if let Err(e) = self.stop_webcam() {
error!("Failed to stop webcam: {e}");
}
}
}
pub fn handle_hdri_requests(&mut self, core: &Core, request: &ControlsRequest) -> bool {
if !self.using_hdri_texture {
return false;
}
let mut updated = false;
let mut new_exposure = None;
let mut new_gamma = None;
if let (Some(exposure), Some(hdri_meta)) = (request.hdri_exposure, &mut self.hdri_metadata)
{
if (exposure - hdri_meta.exposure).abs() > 0.001 {
hdri_meta.exposure = exposure;
new_exposure = Some(exposure);
updated = true;
}
}
if let (Some(gamma), Some(hdri_meta)) = (request.hdri_gamma, &mut self.hdri_metadata) {
if (gamma - hdri_meta.gamma).abs() > 0.001 {
hdri_meta.gamma = gamma;
new_gamma = Some(gamma);
updated = true;
}
}
if updated {
if let (Some(hdri_data), Some(texture_manager)) =
(&self.hdri_file_data, &mut self.texture_manager)
{
let exposure = new_exposure
.unwrap_or_else(|| self.hdri_metadata.map(|meta| meta.exposure).unwrap_or(1.0));
if let Err(e) = crate::update_hdri_exposure(
&core.device,
&core.queue,
hdri_data,
&self.texture_bind_group_layout,
texture_manager,
exposure,
new_gamma,
) {
error!("Failed to update HDRI parameters: {e}");
}
}
}
updated
}
pub fn get_hdri_info(&self) -> Option<HdriMetadata> {
if self.using_hdri_texture {
self.hdri_metadata
} else {
None
}
}
pub fn create_compute_shader(
&mut self,
core: &Core,
shader_source: &str,
_entry_point: &str,
_workgroup_size: [u32; 3],
_workgroup_count: Option<[u32; 3]>,
_dispatch_once: bool,
) {
// WIP untill I complete everything in compute folder
self.compute_shader = Some(ComputeShader::new(core, shader_source));
}
pub fn enable_compute_hot_reload(
&mut self,
core: &Core,
shader_path: &Path,
) -> Result<(), notify::Error> {
if let Some(compute_shader) = &mut self.compute_shader {
let shader_source = std::fs::read_to_string(shader_path)?;
let shader_module = core
.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Compute Shader Hot Reload"),
source: wgpu::ShaderSource::Wgsl(shader_source.into()),
});
compute_shader.enable_hot_reload(
core.device.clone(),
shader_path.to_path_buf(),
shader_module,
)?;
println!(
"Compute shader hot reload enabled for: {}",
shader_path.display()
);
Ok(())
} else {
Err(notify::Error::generic("No compute shader initialized"))
}
}
pub fn dispatch_compute_shader(&mut self, encoder: &mut wgpu::CommandEncoder, core: &Core) {
if let Some(compute) = &mut self.compute_shader {
compute.dispatch(encoder, core);
}
}
pub fn get_compute_output_texture(&self) -> Option<&TextureManager> {
self.compute_shader
.as_ref()
.map(|compute| compute.get_output_texture())
}
pub fn resize_compute_shader(&mut self, core: &Core) {
if let Some(compute) = &mut self.compute_shader {
compute.resize(core, core.size.width, core.size.height);
}
}
pub fn update_compute_shader_time(&mut self, elapsed: f32, delta: f32, queue: &wgpu::Queue) {
if let Some(compute) = &mut self.compute_shader {
compute.set_time(elapsed, delta, queue);
}
}
/// Get video information if a video texture is loaded
#[cfg(feature = "media")]
pub fn get_video_info(
&self,
) -> Option<(
Option<f32>,
f32,
(u32, u32),
Option<f32>,
bool,
bool,
f64,
bool,
)> {
if self.using_video_texture {
self.video_texture_manager.as_ref().map(|vm| {
(
vm.duration().map(|d| d.seconds() as f32),
vm.position().seconds() as f32,
vm.dimensions(),
vm.framerate().map(|(num, den)| num as f32 / den as f32),
vm.is_looping(),
vm.has_audio(),
vm.volume(),
vm.is_muted(),
)
})
} else {
None
}
}
#[cfg(feature = "media")]
pub fn get_webcam_info(&self) -> Option<(u32, u32)> {
if self.using_webcam_texture {
self.webcam_texture_manager
.as_ref()
.map(|wm| wm.dimensions())
} else {
None
}
}
pub fn setup_mouse_uniform(&mut self, core: &Core) {
if self.mouse_uniform.is_none() {
let mouse_bind_group_layout =
core.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT | wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("mouse_bind_group_layout"),
});
let mouse_uniform = UniformBinding::new(
&core.device,
"Mouse Uniform",
self.mouse_tracker.uniform,
&mouse_bind_group_layout,
0,
);
self.mouse_bind_group_layout = Some(mouse_bind_group_layout);
self.mouse_uniform = Some(mouse_uniform);
}
}
pub fn update_mouse_uniform(&mut self, queue: &wgpu::Queue) {
if let Some(mouse_uniform) = &mut self.mouse_uniform {
mouse_uniform.data = self.mouse_tracker.uniform;
mouse_uniform.update(queue);
}
}
pub fn handle_mouse_input(
&mut self,
core: &Core,
event: &WindowEvent,
ui_handled: bool,
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | true |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/gst/video.rs | src/gst/video.rs | use crate::texture::TextureManager;
use anyhow::{anyhow, Result};
use gst::glib::ControlFlow;
use gst::prelude::*;
use gstreamer as gst;
use gstreamer_app as gst_app;
use gstreamer_video as gst_video;
use log::{debug, error, info, warn};
use std::path::Path;
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use wgpu;
#[derive(Debug, Clone, Default)]
pub struct SpectrumData {
/// Number of frequency bands
pub bands: usize,
/// Magnitude values for each frequency band in dB
pub magnitudes: Vec<f32>,
/// Phase values for each frequency band
pub phases: Option<Vec<f32>>,
/// Timestamp of the spectrum data
pub timestamp: Option<gst::ClockTime>,
}
#[derive(Debug, Clone, Default)]
pub struct AudioLevel {
/// RMS in linear scale (0.0 to 1.0)
pub rms: f64,
/// RMS in decibels
pub rms_db: f64,
/// Peak value in linear scale (0.0 to 1.0)
pub peak: f64,
}
/// Here I created a struct to organize the video text mang.
/// Manages a video texture that can be updated frame by frame
pub struct VideoTextureManager {
/// The underlying TextureManager that handles the WGPU resources
texture_manager: TextureManager,
/// The GStreamer pipeline for video decoding
pipeline: gst::Pipeline,
/// The AppSink element that receives decoded frames
appsink: gst_app::AppSink,
/// Whether the video has an audio track
has_audio: bool,
/// Whether this is an audio-only file (no video track)
audio_only: Arc<Mutex<bool>>,
/// Audio volume (0.0 to 1.0)
volume: Arc<Mutex<f64>>,
/// Whether audio is muted
is_muted: Arc<Mutex<bool>>,
/// Current video dimensions
dimensions: (u32, u32),
/// Video duration in nanoseconds (if available)
duration: Option<gst::ClockTime>,
/// Current position in the video
position: Arc<Mutex<gst::ClockTime>>,
/// Frame rate of the video
framerate: Option<gst::Fraction>,
/// Whether the video is currently playing
is_playing: Arc<Mutex<bool>>,
/// Whether to loop the video when it ends
loop_playback: Arc<Mutex<bool>>,
/// Last frame update time
last_update: Instant,
/// Frame buffer for the most recently decoded frame
current_frame: Arc<Mutex<Option<image::RgbaImage>>>,
/// Path to the video file
video_path: String,
/// Whether the video texture has been initialized
texture_initialized: bool,
/// Frame counter for debugging
frame_count: usize,
/// Spectrum analysis enabled
spectrum_enabled: bool,
/// Number of frequency bands for spectrum analysis
spectrum_bands: usize,
/// Threshold in dB for spectrum analysis
spectrum_threshold: i32,
/// Spectrum data from the most recent analysis
spectrum_data: Arc<Mutex<SpectrumData>>,
/// Audio level data (RMS, peak) for normalization
audio_level: Arc<Mutex<AudioLevel>>,
/// bpm
bpm_value: Arc<Mutex<f32>>,
/// Whether video track was found
has_video: Arc<Mutex<bool>>,
}
impl VideoTextureManager {
pub fn new(
device: &wgpu::Device,
queue: &wgpu::Queue,
bind_group_layout: &wgpu::BindGroupLayout,
video_path: impl AsRef<Path>,
) -> Result<Self> {
// Create a default 1x1 texture initially, note that, this going to be replaced with first video frame
let default_image = image::RgbaImage::new(1, 1);
let texture_manager = TextureManager::new(device, queue, &default_image, bind_group_layout);
let path_str = video_path
.as_ref()
.to_str()
.ok_or_else(|| anyhow!("Invalid video path"))?
.to_string();
info!("Creating video texture from: {path_str}");
let pipeline = gst::Pipeline::new();
// Source element - read from file
let filesrc = gst::ElementFactory::make("filesrc")
.name("source")
.property("location", &path_str)
.build()
.map_err(|_| anyhow!("Failed to create filesrc element"))?;
// Decoding element
let decodebin = gst::ElementFactory::make("decodebin")
.name("decoder")
.build()
.map_err(|_| anyhow!("Failed to create decodebin element"))?;
// videorate element to enforce correct frame timing
let videorate = gst::ElementFactory::make("videorate")
.name("rate")
.build()
.map_err(|_| anyhow!("Failed to create videorate element"))?;
// Convert to proper format
let videoconvert = gst::ElementFactory::make("videoconvert")
.name("convert")
.build()
.map_err(|_| anyhow!("Failed to create videoconvert element"))?;
//caps filter to force framerate if needed
let capsfilter = gst::ElementFactory::make("capsfilter")
.name("capsfilter")
.build()
.map_err(|_| anyhow!("Failed to create capsfilter element"))?;
// Output sink for video
let appsink = gst::ElementFactory::make("appsink")
.name("sink")
.build()
.map_err(|_| anyhow!("Failed to create appsink element"))?;
let appsink = appsink
.dynamic_cast::<gst_app::AppSink>()
.map_err(|_| anyhow!("Failed to cast to AppSink"))?;
// Configure appsink
appsink.set_caps(Some(
&gst::Caps::builder("video/x-raw")
.field("format", gst_video::VideoFormat::Rgba.to_str())
.build(),
));
appsink.set_max_buffers(2);
// Drop old buffers when full
appsink.set_drop(true);
appsink.set_sync(true);
// video elements goes to the pipeline
pipeline
.add_many([
&filesrc,
&decodebin,
&videorate,
&videoconvert,
&capsfilter,
appsink.upcast_ref(),
])
.map_err(|_| anyhow!("Failed to add video elements to pipeline"))?;
// Link elements that can be linked statically
gst::Element::link_many([&videorate, &videoconvert, &capsfilter, appsink.upcast_ref()])
.map_err(|_| anyhow!("Failed to link video elements"))?;
gst::Element::link_many([&filesrc, &decodebin])
.map_err(|_| anyhow!("Failed to link filesrc to decodebin"))?;
// Set up pad-added signal for dynamic linking from decodebin -> videorate
let videorate_weak = videorate.downgrade();
let has_audio = Arc::new(Mutex::new(false));
let has_audio_clone = has_audio.clone();
let has_video = Arc::new(Mutex::new(false));
let has_video_clone = has_video.clone();
// now audio elements reference holders
let audioconvert_weak = Arc::new(Mutex::new(None));
// Default spectrum configuration
let spectrum_bands = 128;
let spectrum_threshold = -60;
let spectrum_enabled = true;
let spectrum_data = Arc::new(Mutex::new(SpectrumData::default()));
let audio_level = Arc::new(Mutex::new(AudioLevel::default()));
// bus watch for spectrum messages with debug
let bus = pipeline.bus().expect("Pipeline has no bus");
let spectrum_data_clone2 = spectrum_data.clone();
// Bus watch for pipeline messages (errors, warnings, EOS)
let watch_result = bus.add_watch(move |_, message| {
match message.view() {
gst::MessageView::Element(element) => {
if let Some(structure) = element.structure() {
info!("Element message structure name: '{}'", structure.name());
// Explicitly check for spectrum messages
if structure.name() == "spectrum" {
info!("SPECTRUM MESSAGE DETECTED ");
info!("Full structure: {structure}");
// Try ALL possible ways to extract spectrum data
let mut magnitude_values = Vec::new();
// Method 1: Direct indexing
for i in 0..5 {
// Just try first 5 bands initially
let field_name = format!("magnitude[{i}]");
match structure.get::<f32>(&field_name) {
Ok(value) => {
info!("Method 1 - Band {i}: {value} dB");
magnitude_values.push(value);
}
Err(e) => {
info!("Method 1 failed: {e:?}");
break;
}
}
}
// Method 2: Try to access magnitude as array field
if structure.has_field("magnitude") {
info!("Structure has 'magnitude' field");
} else {
info!("Structure does NOT have 'magnitude' field");
}
// Method 3: Parse from structure string
let struct_str = structure.to_string();
info!("Structure string: {struct_str}");
// If we found magnitude values through any method, process them
if !magnitude_values.is_empty() {
// Continue extracting all magnitude values
let mut i = magnitude_values.len();
loop {
let field_name = format!("magnitude[{i}]");
if let Ok(value) = structure.get::<f32>(&field_name) {
magnitude_values.push(value);
i += 1;
} else {
break;
}
}
// Log summary of extracted data
info!("Extracted {} magnitude values", magnitude_values.len());
// Calculate average magnitude
let avg_magnitude = magnitude_values.iter().sum::<f32>()
/ magnitude_values.len() as f32;
info!("Average magnitude: {avg_magnitude:.2} dB");
// Find peak frequency
if let Some((peak_idx, &peak_val)) = magnitude_values
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| {
a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal)
})
{
info!("Peak frequency: band {peak_idx} at {peak_val:.2} dB");
}
// Update spectrum data
if let Ok(mut data) = spectrum_data_clone2.lock() {
*data = SpectrumData {
bands: magnitude_values.len(),
magnitudes: magnitude_values,
phases: None,
timestamp: structure.get("timestamp").ok(),
};
}
} else {
warn!(
"Failed to extract any magnitude values from spectrum message"
);
}
}
// Handle level messages for RMS/peak/decay
if structure.name() == "level" {
info!("LEVEL MESSAGE DETECTED");
info!("Level structure: {structure}");
// Extract RMS, peak, and decay values
if let Ok(rms_list) = structure.get::<gst::glib::ValueArray>("rms") {
let mut rms_values = Vec::new();
for val in rms_list.iter() {
if let Ok(rms_db) = val.get::<f64>() {
rms_values.push(rms_db);
}
}
if !rms_values.is_empty() {
info!("Level RMS values: {rms_values:?}");
}
}
}
// Note: BPM messages are handled in poll_audio_messages()
}
}
gst::MessageView::Tag(tag) => {
let tags = tag.tags();
// Note: BPM tags are handled in poll_audio_messages()
if let Some(bpm) = tags.get::<gst::tags::BeatsPerMinute>() {
info!("BPM tag detected: {:.1}", bpm.get());
}
}
gst::MessageView::Error(err) => {
error!(
"Pipeline error: {} ({})",
err.error(),
err.debug().unwrap_or_default()
);
}
_ => (),
}
ControlFlow::Continue
});
if let Err(e) = watch_result {
error!("Failed to add bus watch: {e:?}");
}
decodebin.connect_pad_added(move |_, pad| {
let caps = match pad.current_caps() {
Some(caps) => caps,
_ => return,
};
let structure = match caps.structure(0) {
Some(s) => s,
_ => return,
};
// Check if this is a video or audio stream. maybe there could be other way to handle this but I love simpicity
if structure.name().starts_with("video/") {
// Mark that we have a video track
if let Ok(mut has_video_lock) = has_video_clone.lock() {
*has_video_lock = true;
info!("Video track detected");
}
// Handle video path
if let Some(videorate) = videorate_weak.upgrade() {
let sink_pad = match videorate.static_pad("sink") {
Some(pad) => pad,
_ => return,
};
if !sink_pad.is_linked() {
let _ = pad.link(&sink_pad);
info!("Linked decoder to videorate successfully");
}
}
} else if structure.name().starts_with("audio/") {
// has_audio flag to true - we've detected an audio stream
if let Ok(mut has_audio_lock) = has_audio_clone.lock() {
*has_audio_lock = true;
info!("Audio track detected in video");
}
// Now lets dynamically create the audio processing chain
if let Ok(mut audioconvert_lock) = audioconvert_weak.lock() {
// Only create audio elements once when first audio pad is detected
if audioconvert_lock.is_none() {
// Create audio elements
let audioconvert = match gst::ElementFactory::make("audioconvert")
.name("audioconvert")
.build()
{
Ok(e) => e,
Err(_) => {
warn!("Failed to create audioconvert");
return;
}
};
let audioresample = match gst::ElementFactory::make("audioresample")
.name("audioresample")
.build()
{
Ok(e) => e,
Err(_) => {
warn!("Failed to create audioresample");
return;
}
};
// level element for RMS/peak/decay analysis
let level = match gst::ElementFactory::make("level")
.name("level")
.property("interval", 50000000u64) // 50ms intervals (matches spectrum)
.property("message", true)
.property("post-messages", true)
.build()
{
Ok(e) => {
info!("Created level analyzer element");
e
}
Err(_) => {
warn!("Failed to create level element");
return;
}
};
let bpmdetect = match gst::ElementFactory::make("bpmdetect")
.name("bpmdetect")
.build()
{
Ok(e) => {
info!("Created BPM detector");
e
}
Err(_) => {
warn!("Failed to create BPM detector");
return;
}
};
let spectrum = match gst::ElementFactory::make("spectrum")
.name("spectrum")
.property("bands", spectrum_bands as u32)
.property("threshold", spectrum_threshold)
.property("post-messages", true)
.property("message-magnitude", true)
.property("message-phase", false)
.property("interval", 50000000u64)
.build()
{
Ok(e) => {
info!(
"Created spectrum analyzer with {spectrum_bands} bands and threshold {spectrum_threshold}dB"
);
e
}
Err(_) => {
warn!("Failed to create spectrum analyzer");
return;
}
};
let volume = match gst::ElementFactory::make("volume")
.name("volume")
.property("volume", 1.0)
.build()
{
Ok(e) => e,
Err(_) => {
warn!("Failed to create volume");
return;
}
};
// autoaudiosink should works on all platforms: https://gstreamer.freedesktop.org/documentation/autodetect/autoaudiosink.html?gi-language=c
let audio_sink = match gst::ElementFactory::make("autoaudiosink")
.name("audiosink")
.build()
{
Ok(e) => e,
Err(_) => {
warn!("Failed to create autoaudiosink");
return;
}
};
// Add elements to pipeline
if let Err(e) = pad
.parent_element()
.unwrap()
.parent()
.unwrap()
.downcast_ref::<gst::Pipeline>()
.unwrap()
.add_many([
&audioconvert,
&audioresample,
&level,
&bpmdetect,
&spectrum,
&volume,
&audio_sink,
])
{
warn!("Failed to add audio elements: {e:?}");
return;
}
// Link audio elements
if let Err(e) = gst::Element::link_many([
&audioconvert,
&audioresample,
&level,
&bpmdetect,
&spectrum,
&volume,
&audio_sink,
]) {
warn!("Failed to link audio elements: {e:?}");
return;
}
// Set elements to PAUSED state
let _ = audioconvert.sync_state_with_parent();
let _ = audioresample.sync_state_with_parent();
let _ = level.sync_state_with_parent();
let _ = bpmdetect.sync_state_with_parent();
let _ = spectrum.sync_state_with_parent();
let _ = volume.sync_state_with_parent();
let _ = audio_sink.sync_state_with_parent();
*audioconvert_lock = Some(audioconvert.clone());
}
// Link decoder pad to audioconvert
if let Some(audioconvert) = &*audioconvert_lock {
let sink_pad = match audioconvert.static_pad("sink") {
Some(pad) => pad,
_ => return,
};
if !sink_pad.is_linked() {
match pad.link(&sink_pad) {
Ok(_) => {
info!("Linked decoder to audioconvert successfully");
}
Err(err) => {
warn!("Failed to link audio pad: {err:?}");
}
}
}
}
}
}
});
// Create shared state
let current_frame = Arc::new(Mutex::new(None));
let current_frame_clone = current_frame.clone();
let position = Arc::new(Mutex::new(gst::ClockTime::ZERO));
let is_playing = Arc::new(Mutex::new(false));
let volume_val = Arc::new(Mutex::new(1.0));
let is_muted = Arc::new(Mutex::new(false));
// Setup callbacks to receive frames
appsink.set_callbacks(
gst_app::AppSinkCallbacks::builder()
.new_sample(move |sink| {
let sample = match sink.pull_sample() {
Ok(sample) => sample,
Err(_) => return Err(gst::FlowError::Eos),
};
let buffer = match sample.buffer() {
Some(buffer) => buffer,
_ => return Err(gst::FlowError::Error),
};
let caps = match sample.caps() {
Some(caps) => caps,
_ => return Err(gst::FlowError::Error),
};
let video_info = match gst_video::VideoInfo::from_caps(caps) {
Ok(info) => info,
Err(_) => return Err(gst::FlowError::Error),
};
let map = match buffer.map_readable() {
Ok(map) => map,
Err(_) => return Err(gst::FlowError::Error),
};
// Access the raw frame data
let frame_data = map.as_slice();
let width = video_info.width() as usize;
let height = video_info.height() as usize;
// Create an RgbaImage from the frame data
// (We need to copy the data because buffer will be unmapped after this function)
let mut rgba_image = image::RgbaImage::new(width as u32, height as u32);
// Stride might be larger than width * 4
let stride = video_info.stride()[0] as usize;
for y in 0..height {
let src_start = y * stride;
let src_end = src_start + width * 4;
let dst_start = y * width * 4;
let dst_end = dst_start + width * 4;
// Copy row by row to handle stride correctly
let dst_buffer = rgba_image.as_mut();
if src_end <= frame_data.len() && dst_end <= dst_buffer.len() {
dst_buffer[dst_start..dst_end]
.copy_from_slice(&frame_data[src_start..src_end]);
}
}
// Store the frame
if let Ok(mut frame_lock) = current_frame_clone.lock() {
*frame_lock = Some(rgba_image);
}
Ok(gst::FlowSuccess::Ok)
})
.build(),
);
// init the object
let mut video_texture = Self {
texture_manager,
pipeline,
appsink,
has_audio: false,
audio_only: Arc::new(Mutex::new(false)),
volume: volume_val,
is_muted,
dimensions: (1, 1),
duration: None,
position,
framerate: None,
is_playing,
loop_playback: Arc::new(Mutex::new(true)),
last_update: Instant::now(),
current_frame,
video_path: path_str,
texture_initialized: false,
frame_count: 0,
spectrum_enabled,
spectrum_bands,
spectrum_threshold,
spectrum_data,
audio_level,
bpm_value: Arc::new(Mutex::new(0.0)),
has_video: has_video.clone(),
};
// Start pipeline in paused state to get video info
if video_texture
.pipeline
.set_state(gst::State::Paused)
.is_err()
{
return Err(anyhow!("Failed to set pipeline to PAUSED state"));
}
// Wait a bit for pipeline to settle
std::thread::sleep(Duration::from_millis(200));
let state_result = video_texture
.pipeline
.state(gst::ClockTime::from_seconds(1));
if let (_, gst::State::Paused, _) = state_result {
video_texture.query_video_info()?;
} else {
warn!("Pipeline not in PAUSED state, may not be able to query info");
}
// Set specific framerate in the caps filter if we detected one
if let Some(framerate) = video_texture.framerate {
if let Some(capsfilter_elem) = video_texture.pipeline.by_name("capsfilter") {
let caps = gst::Caps::builder("video/x-raw")
.field("framerate", framerate)
.build();
capsfilter_elem.set_property("caps", &caps);
info!(
"Set capsfilter to force framerate {}/{}",
framerate.numer(),
framerate.denom()
);
}
}
video_texture.has_audio = *has_audio.lock().unwrap();
let has_video_track = *has_video.lock().unwrap();
// Determine if this is an audio-only file
if video_texture.has_audio && !has_video_track {
*video_texture.audio_only.lock().unwrap() = true;
info!("Audio-only file detected - no video track present");
// For audio-only files, remove unlinked video elements from the pipeline
if let Some(videorate_elem) = video_texture.pipeline.by_name("rate") {
let _ = videorate_elem.set_state(gst::State::Null);
let _ = video_texture.pipeline.remove(&videorate_elem);
info!("Removed unused videorate element");
}
if let Some(convert_elem) = video_texture.pipeline.by_name("convert") {
let _ = convert_elem.set_state(gst::State::Null);
let _ = video_texture.pipeline.remove(&convert_elem);
info!("Removed unused videoconvert element");
}
if let Some(capsfilter_elem) = video_texture.pipeline.by_name("capsfilter") {
let _ = capsfilter_elem.set_state(gst::State::Null);
let _ = video_texture.pipeline.remove(&capsfilter_elem);
info!("Removed unused capsfilter element");
}
if let Some(sink_elem) = video_texture.pipeline.by_name("sink") {
let _ = sink_elem.set_state(gst::State::Null);
let _ = video_texture.pipeline.remove(&sink_elem);
info!("Removed unused appsink element");
}
}
info!("Video has audio: {}, has video: {}, audio_only: {}",
video_texture.has_audio,
has_video_track,
*video_texture.audio_only.lock().unwrap());
info!("Video texture manager created successfully");
Ok(video_texture)
}
/// Query video information (dimensions, duration, framerate)
fn query_video_info(&mut self) -> Result<()> {
// Query duration
if let Some(duration) = self.pipeline.query_duration::<gst::ClockTime>() {
self.duration = Some(duration);
info!(
"Video duration: {:?} ({:.2} seconds)",
duration,
duration.seconds() as f64
);
}
// Now try to get video info
if let Some(pad) = self.appsink.static_pad("sink") {
if let Some(caps) = pad.current_caps() {
if let Some(s) = caps.structure(0) {
// dims
if let (Ok(width), Ok(height)) = (s.get::<i32>("width"), s.get::<i32>("height"))
{
self.dimensions = (width as u32, height as u32);
info!("Video dimensions: {width}x{height}");
}
// framerate
if let Ok(framerate) = s.get::<gst::Fraction>("framerate") {
self.framerate = Some(framerate);
info!(
"Video framerate: {}/{} (approx. {:.2} fps)",
framerate.numer(),
framerate.denom(),
framerate.numer() as f64 / framerate.denom() as f64
);
}
}
}
}
Ok(())
}
/// Get the texture manager (for binding to shaders)
pub fn texture_manager(&self) -> &TextureManager {
&self.texture_manager
}
/// Update the texture with the current video frame
pub fn update_texture(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
bind_group_layout: &wgpu::BindGroupLayout,
) -> Result<bool> {
// No update needed if video is not playing
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | true |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/gst/webcam.rs | src/gst/webcam.rs | use crate::texture::TextureManager;
use anyhow::{anyhow, Result};
use gst::prelude::*;
use gstreamer as gst;
use gstreamer_app as gst_app;
use gstreamer_video as gst_video;
use log::{debug, info};
use std::sync::{Arc, Mutex};
use std::time::Instant;
use wgpu;
/// Manages a webcam texture that can be updated frame by frame. My approach is actually same for src/gst/video.rs
pub struct WebcamTextureManager {
/// The underlying TextureManager that handles the WGPU resources
texture_manager: TextureManager,
/// The GStreamer pipeline for webcam capture
pipeline: gst::Pipeline,
/// The AppSink element that receives decoded frames
appsink: gst_app::AppSink,
/// Current webcam dimensions
dimensions: (u32, u32),
/// Whether the webcam is currently active
is_active: Arc<Mutex<bool>>,
/// Last frame update time
last_update: Instant,
/// Frame buffer for the most recently captured frame
current_frame: Arc<Mutex<Option<image::RgbaImage>>>,
/// Whether the webcam texture has been initialized
texture_initialized: bool,
/// Frame counter for debugging
frame_count: usize,
/// Webcam device name/index
device_name: String,
}
impl WebcamTextureManager {
pub fn new(
device: &wgpu::Device,
queue: &wgpu::Queue,
bind_group_layout: &wgpu::BindGroupLayout,
device_index: Option<u32>,
) -> Result<Self> {
// Create a default 1x1 texture initially, this will be replaced with first webcam frame
let default_image = image::RgbaImage::new(1, 1);
let texture_manager = TextureManager::new(device, queue, &default_image, bind_group_layout);
let device_name = device_index
.map(|i| format!("/dev/video{i}"))
.unwrap_or_else(|| "0".to_string());
info!("Creating webcam capture from device: {device_name}");
let pipeline = gst::Pipeline::new();
//MAC : https://gstreamer.freedesktop.org/documentation/applemedia/avfvideosrc.html?gi-language=c#avfvideosrc-page
//Linux: https://gstreamer.freedesktop.org/documentation/video4linux2/v4l2src.html?gi-language=c#v4l2src-page
//Win : https://gstreamer.freedesktop.org/documentation/winks/index.html?gi-language=c#ksvideosrc-page
// Will be traditional pipeline.
// Create source element for webcam
#[cfg(target_os = "linux")]
let source = gst::ElementFactory::make("v4l2src")
.name("webcam_source")
.property("device", &device_name)
.build()
.map_err(|_| anyhow!("Failed to create v4l2src element"))?;
#[cfg(target_os = "macos")]
let source = gst::ElementFactory::make("avfvideosrc")
.name("webcam_source")
.property("device-index", device_index.unwrap_or(0) as i32)
.build()
.map_err(|_| anyhow!("Failed to create avfvideosrc element"))?;
#[cfg(target_os = "windows")]
let source = gst::ElementFactory::make("ksvideosrc")
.name("webcam_source")
.property("device-index", device_index.unwrap_or(0) as i32)
.build()
.map_err(|_| anyhow!("Failed to create ksvideosrc element"))?;
// Create caps filter to set resolution and framerate
let caps_filter = gst::ElementFactory::make("capsfilter")
.name("caps")
.build()
.map_err(|_| anyhow!("Failed to create capsfilter element"))?;
// Set preferred webcam format: EXPERIMENTAL
let caps = gst::Caps::builder("video/x-raw")
.field("width", 1280i32)
.field("height", 720i32)
.field("framerate", gst::Fraction::new(30, 1))
.build();
caps_filter.set_property("caps", &caps);
// videorate element to stabilize frame timing
let videorate = gst::ElementFactory::make("videorate")
.name("rate")
.build()
.map_err(|_| anyhow!("Failed to create videorate element"))?;
// Convert to proper format
let videoconvert = gst::ElementFactory::make("videoconvert")
.name("convert")
.build()
.map_err(|_| anyhow!("Failed to create videoconvert element"))?;
// Output sink for video
let appsink = gst::ElementFactory::make("appsink")
.name("sink")
.build()
.map_err(|_| anyhow!("Failed to create appsink element"))?;
let appsink = appsink
.dynamic_cast::<gst_app::AppSink>()
.map_err(|_| anyhow!("Failed to cast to AppSink"))?;
// Configure appsink
appsink.set_caps(Some(
&gst::Caps::builder("video/x-raw")
.field("format", gst_video::VideoFormat::Rgba.to_str())
.build(),
));
appsink.set_max_buffers(2);
appsink.set_drop(true);
appsink.set_sync(false);
pipeline
.add_many([
&source,
&caps_filter,
&videorate,
&videoconvert,
appsink.upcast_ref(),
])
.map_err(|_| anyhow!("Failed to add webcam elements to pipeline"))?;
// Link elements
gst::Element::link_many([
&source,
&caps_filter,
&videorate,
&videoconvert,
appsink.upcast_ref(),
])
.map_err(|_| anyhow!("Failed to link webcam elements"))?;
// Create shared state
let current_frame = Arc::new(Mutex::new(None));
let current_frame_clone = current_frame.clone();
let is_active = Arc::new(Mutex::new(false));
// Setup callbacks to receive frames
appsink.set_callbacks(
gst_app::AppSinkCallbacks::builder()
.new_sample(move |sink| {
let sample = match sink.pull_sample() {
Ok(sample) => sample,
Err(_) => return Err(gst::FlowError::Eos),
};
let buffer = match sample.buffer() {
Some(buffer) => buffer,
_ => return Err(gst::FlowError::Error),
};
let caps = match sample.caps() {
Some(caps) => caps,
_ => return Err(gst::FlowError::Error),
};
let video_info = match gst_video::VideoInfo::from_caps(caps) {
Ok(info) => info,
Err(_) => return Err(gst::FlowError::Error),
};
let map = match buffer.map_readable() {
Ok(map) => map,
Err(_) => return Err(gst::FlowError::Error),
};
// Access the raw frame data
let frame_data = map.as_slice();
let width = video_info.width() as usize;
let height = video_info.height() as usize;
// Create an RgbaImage from the frame data
let mut rgba_image = image::RgbaImage::new(width as u32, height as u32);
// Stride might be larger than width * 4
let stride = video_info.stride()[0] as usize;
for y in 0..height {
let src_start = y * stride;
let src_end = src_start + width * 4;
let dst_start = y * width * 4;
let dst_end = dst_start + width * 4;
let dst_buffer = rgba_image.as_mut();
if src_end <= frame_data.len() && dst_end <= dst_buffer.len() {
dst_buffer[dst_start..dst_end]
.copy_from_slice(&frame_data[src_start..src_end]);
}
}
if let Ok(mut frame_lock) = current_frame_clone.lock() {
*frame_lock = Some(rgba_image);
}
Ok(gst::FlowSuccess::Ok)
})
.build(),
);
// init the obj
let webcam_texture = Self {
texture_manager,
pipeline,
appsink,
dimensions: (1280, 720),
is_active,
last_update: Instant::now(),
current_frame,
texture_initialized: false,
frame_count: 0,
device_name,
};
info!("Webcam texture manager created successfully");
Ok(webcam_texture)
}
pub fn start(&mut self) -> Result<()> {
info!("Starting webcam capture");
match self.pipeline.set_state(gst::State::Playing) {
Ok(_) => {
*self.is_active.lock().unwrap() = true;
// lets wait a moment for the pipeline to start
std::thread::sleep(std::time::Duration::from_millis(100));
// Try to get actual dimensions from the pipeline
if let Some(pad) = self.appsink.static_pad("sink") {
if let Some(caps) = pad.current_caps() {
if let Some(s) = caps.structure(0) {
if let (Ok(width), Ok(height)) =
(s.get::<i32>("width"), s.get::<i32>("height"))
{
self.dimensions = (width as u32, height as u32);
info!("Webcam dimensions: {width}x{height}");
}
}
}
}
Ok(())
}
Err(e) => Err(anyhow!("Failed to start webcam: {:?}", e)),
}
}
pub fn stop(&mut self) -> Result<()> {
info!("Stopping webcam capture");
match self.pipeline.set_state(gst::State::Null) {
Ok(_) => {
*self.is_active.lock().unwrap() = false;
Ok(())
}
Err(e) => Err(anyhow!("Failed to stop webcam: {:?}", e)),
}
}
/// I need this for wgpu
pub fn texture_manager(&self) -> &TextureManager {
&self.texture_manager
}
pub fn update_texture(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
bind_group_layout: &wgpu::BindGroupLayout,
) -> Result<bool> {
// No update needed if webcam is not active
if !*self.is_active.lock().unwrap() {
return Ok(false);
}
// Check if we have a NEW frame to process
let frame_to_process = {
let mut frame_lock = self.current_frame.lock().unwrap();
frame_lock.take()
};
// If we have a frame, update the texture
if let Some(frame) = frame_to_process {
self.frame_count += 1;
let width = frame.width();
let height = frame.height();
// Log less frequently to reduce spam
if self.frame_count % 60 == 0 {
debug!(
"Processing webcam frame #{} (dimensions: {}x{})",
self.frame_count, width, height
);
}
// ALWAYS recreate the texture for the first frame or if dimensions don't match
let should_recreate = !self.texture_initialized
|| self.dimensions != (width, height)
|| self.dimensions.0 <= 1
|| self.dimensions.1 <= 1
|| self.frame_count <= 3;
if should_recreate {
info!("Creating new webcam texture with dimensions: {width}x{height}");
// Create a completely new texture with the frame's dimensions
let new_texture_manager =
TextureManager::new(device, queue, &frame, bind_group_layout);
self.texture_manager = new_texture_manager;
self.dimensions = (width, height);
self.texture_initialized = true;
} else {
self.texture_manager.update(queue, &frame);
}
// Update the last update time
self.last_update = Instant::now();
Ok(true)
} else {
Ok(false)
}
}
pub fn dimensions(&self) -> (u32, u32) {
self.dimensions
}
pub fn is_active(&self) -> bool {
*self.is_active.lock().unwrap()
}
pub fn device_name(&self) -> &str {
&self.device_name
}
/// Get available webcam devices:
/// DEVICE LISTS: I tested only macos: https://www.ffmpeg.org/ffmpeg-devices.html
/// But according here, this should work anyway...
pub fn list_devices() -> Vec<String> {
let mut devices = Vec::new();
#[cfg(target_os = "linux")]
{
// On Linux, check for /dev/video* devices
for i in 0..10 {
let device_path = format!("/dev/video{}", i);
if std::path::Path::new(&device_path).exists() {
devices.push(device_path);
}
}
}
#[cfg(target_os = "macos")]
{
// On macOS, AVFoundation devices are usually indexed 0, 1, 2...
for i in 0..5 {
devices.push(format!("Camera {i}"));
}
}
#[cfg(target_os = "windows")]
{
// On Windows, DirectShow devices are usually indexed 0, 1, 2...
for i in 0..5 {
devices.push(format!("Camera {}", i));
}
}
if devices.is_empty() {
devices.push("Default Camera".to_string());
}
devices
}
}
impl Drop for WebcamTextureManager {
fn drop(&mut self) {
info!("Shutting down webcam pipeline");
let _ = self.pipeline.set_state(gst::State::Null);
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/gst/mod.rs | src/gst/mod.rs | #[cfg(feature = "media")]
pub mod audio;
#[cfg(feature = "media")]
pub mod video;
#[cfg(feature = "media")]
pub mod webcam;
use log::info;
#[cfg(feature = "media")]
pub fn init() -> anyhow::Result<()> {
// These are active untill I merge the PR
//std::env::set_var("GST_DEBUG", "bpmdetect:5,pitch:5,soundtouch:5,bus:4,element:4");
info!("Setting up GStreamer with enhanced logging");
gstreamer::init()?;
info!("GStreamer initialized successfully");
Ok(())
}
#[cfg(not(feature = "media"))]
pub fn init() -> anyhow::Result<()> {
info!("Media support disabled - skipping GStreamer initialization");
Ok(())
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/gst/audio.rs | src/gst/audio.rs | use anyhow::{anyhow, Result};
use gst::glib::ControlFlow;
use gst::prelude::*;
use gstreamer as gst;
use log::{debug, info, warn};
use std::sync::{Arc, Mutex};
use std::time::Instant;
/// Envelope phase for ADSR
#[derive(Debug, Clone, Copy, PartialEq)]
enum EnvelopePhase {
Idle,
Attack,
Decay,
Sustain,
Release,
}
/// ADSR envelope configuration
#[derive(Debug, Clone, Copy)]
pub struct EnvelopeConfig {
pub attack_time: f32,
pub decay_time: f32,
pub sustain_level: f32,
pub release_time: f32,
}
impl Default for EnvelopeConfig {
fn default() -> Self {
Self {
attack_time: 0.01,
decay_time: 0.1,
sustain_level: 0.7,
release_time: 0.3,
}
}
}
/// Envelope state for a single voice
#[derive(Debug, Clone)]
struct EnvelopeState {
phase: EnvelopePhase,
current_level: f32,
phase_start_time: Instant,
phase_start_level: f32,
target_frequency: f64,
config: EnvelopeConfig,
}
impl EnvelopeState {
fn new() -> Self {
Self {
phase: EnvelopePhase::Idle,
current_level: 0.0,
phase_start_time: Instant::now(),
phase_start_level: 0.0,
target_frequency: 440.0,
config: EnvelopeConfig::default(),
}
}
fn trigger(&mut self, frequency: f64, config: EnvelopeConfig) {
self.target_frequency = frequency;
self.config = config;
self.phase_start_level = self.current_level;
self.phase_start_time = Instant::now();
self.phase = EnvelopePhase::Attack;
}
fn release(&mut self) {
if self.phase != EnvelopePhase::Idle && self.phase != EnvelopePhase::Release {
self.phase_start_level = self.current_level;
self.phase_start_time = Instant::now();
self.phase = EnvelopePhase::Release;
}
}
fn update(&mut self) -> f32 {
let elapsed = self.phase_start_time.elapsed().as_secs_f32();
match self.phase {
EnvelopePhase::Idle => {
self.current_level = 0.0;
}
EnvelopePhase::Attack => {
if self.config.attack_time > 0.0 {
let progress = (elapsed / self.config.attack_time).min(1.0);
// Smooth curve for attack
let curve = smooth_step(progress);
self.current_level =
self.phase_start_level + (1.0 - self.phase_start_level) * curve;
if progress >= 1.0 {
self.phase = EnvelopePhase::Decay;
self.phase_start_time = Instant::now();
self.phase_start_level = 1.0;
}
} else {
self.current_level = 1.0;
self.phase = EnvelopePhase::Decay;
self.phase_start_time = Instant::now();
self.phase_start_level = 1.0;
}
}
EnvelopePhase::Decay => {
if self.config.decay_time > 0.0 {
let progress = (elapsed / self.config.decay_time).min(1.0);
let curve = smooth_step(progress);
self.current_level = 1.0 - (1.0 - self.config.sustain_level) * curve;
if progress >= 1.0 {
self.phase = EnvelopePhase::Sustain;
self.phase_start_time = Instant::now();
}
} else {
self.current_level = self.config.sustain_level;
self.phase = EnvelopePhase::Sustain;
self.phase_start_time = Instant::now();
}
}
EnvelopePhase::Sustain => {
self.current_level = self.config.sustain_level;
}
EnvelopePhase::Release => {
if self.config.release_time > 0.0 {
let progress = (elapsed / self.config.release_time).min(1.0);
// Use exponential decay for more natural release
let curve = 1.0 - smooth_step(progress);
self.current_level = self.phase_start_level * curve;
if progress >= 1.0 || self.current_level < 0.001 {
self.phase = EnvelopePhase::Idle;
self.current_level = 0.0;
}
} else {
self.phase = EnvelopePhase::Idle;
self.current_level = 0.0;
}
}
}
self.current_level
}
fn is_active(&self) -> bool {
self.phase != EnvelopePhase::Idle
}
fn is_releasing(&self) -> bool {
self.phase == EnvelopePhase::Release
}
}
/// Attempt smoothstep for smoother transitions
fn smooth_step(t: f32) -> f32 {
let t = t.clamp(0.0, 1.0);
t * t * (3.0 - 2.0 * t)
}
/// Unison oscillator - one of multiple oscillators per voice
struct UnisonOscillator {
audiotestsrc: gst::Element,
volume: gst::Element,
detune_cents: f64,
}
/// Individual audio voice for polyphonic synthesis with envelope
struct AudioVoice {
oscillators: Vec<UnisonOscillator>,
envelope: EnvelopeState,
last_applied_volume: f64,
base_frequency: f64,
note: Option<MusicalNote>,
}
impl AudioVoice {
/// Apply detuned frequency to all oscillators
fn set_frequency(&mut self, freq: f64) {
self.base_frequency = freq;
for osc in &self.oscillators {
// Convert cents to frequency multiplier: 2^(cents/1200)
let detune_mult = 2.0_f64.powf(osc.detune_cents / 1200.0);
let detuned_freq = freq * detune_mult;
osc.audiotestsrc.set_property("freq", detuned_freq);
}
}
fn apply_envelope(&mut self) {
let level = self.envelope.update();
let target_volume = (level as f64 * 0.35) / self.oscillators.len() as f64;
// Smooth the volume changes to avoid clicks
let smoothed = self.last_applied_volume + (target_volume - self.last_applied_volume) * 0.3;
if (smoothed - self.last_applied_volume).abs() > 0.0001 {
for osc in &self.oscillators {
osc.volume.set_property("volume", smoothed);
osc.audiotestsrc.set_property("volume", smoothed);
}
self.last_applied_volume = smoothed;
}
}
/// Set waveform for all oscillators
fn set_waveform(&self, wave: &str) {
for osc in &self.oscillators {
osc.audiotestsrc.set_property_from_str("wave", wave);
}
}
}
pub struct AudioSynthManager {
pipeline: gst::Pipeline,
_audiomixer: gst::Element,
voices: Vec<AudioVoice>,
current_waveform: AudioWaveform,
master_volume: Arc<Mutex<f64>>,
sample_rate: u32,
last_update: Instant,
active_notes: Arc<Mutex<Vec<MusicalNote>>>,
default_envelope: EnvelopeConfig,
update_interval: std::time::Duration,
last_envelope_update: Instant,
}
impl AudioSynthManager {
pub fn new(sample_rate: Option<u32>) -> Result<Self> {
let sample_rate = sample_rate.unwrap_or(44100);
let max_voices = 16;
info!("Creating polyphonic audio synthesis manager with {max_voices} voices at {sample_rate} Hz");
let pipeline = gst::Pipeline::new();
let audiomixer = gst::ElementFactory::make("audiomixer")
.name("audio_mixer")
.build()
.map_err(|_| anyhow!("Failed to create audiomixer element"))?;
let final_convert = gst::ElementFactory::make("audioconvert")
.name("final_convert")
.build()
.map_err(|_| anyhow!("Failed to create final audioconvert element"))?;
let final_resample = gst::ElementFactory::make("audioresample")
.name("final_resample")
.build()
.map_err(|_| anyhow!("Failed to create final audioresample element"))?;
// Add a limiter to prevent clipping with multiple voices
let master_volume = gst::ElementFactory::make("volume")
.name("master_volume")
.property("volume", 0.5f64)
.build()
.map_err(|_| anyhow!("Failed to create master volume element"))?;
let audiosink = gst::ElementFactory::make("autoaudiosink")
.name("audio_sink")
.build()
.map_err(|_| anyhow!("Failed to create autoaudiosink element"))?;
pipeline
.add_many([
&audiomixer,
&final_convert,
&final_resample,
&master_volume,
&audiosink,
])
.map_err(|_| anyhow!("Failed to add output elements to pipeline"))?;
gst::Element::link_many([
&audiomixer,
&final_convert,
&final_resample,
&master_volume,
&audiosink,
])
.map_err(|_| anyhow!("Failed to link output elements"))?;
let mut voices = Vec::new();
for i in 0..max_voices {
let voice = Self::create_voice(&pipeline, &audiomixer, i, sample_rate)?;
voices.push(voice);
}
let bus = pipeline.bus().expect("Pipeline has no bus");
let _ = bus.add_watch(move |_, message| {
match message.view() {
gst::MessageView::Error(err) => {
warn!(
"Audio pipeline error: {} ({})",
err.error(),
err.debug().unwrap_or_default()
);
}
gst::MessageView::Warning(warning) => {
debug!("Audio pipeline warning: {}", warning.error());
}
gst::MessageView::Eos(_) => {
info!("Audio pipeline reached end of stream");
}
_ => (),
}
ControlFlow::Continue
});
Ok(Self {
pipeline,
_audiomixer: audiomixer,
voices,
current_waveform: AudioWaveform::Sine,
master_volume: Arc::new(Mutex::new(0.5)),
sample_rate,
last_update: Instant::now(),
active_notes: Arc::new(Mutex::new(Vec::new())),
default_envelope: EnvelopeConfig::default(),
update_interval: std::time::Duration::from_millis(5),
last_envelope_update: Instant::now(),
})
}
fn create_voice(
pipeline: &gst::Pipeline,
mixer: &gst::Element,
voice_id: usize,
_sample_rate: u32,
) -> Result<AudioVoice> {
// Unison detune amounts in cents (100 cents = 1 semitone)
let detune_values = [-12.0, 0.0, 12.0];
let mut oscillators = Vec::new();
for (osc_idx, &detune_cents) in detune_values.iter().enumerate() {
let audiotestsrc = gst::ElementFactory::make("audiotestsrc")
.name(format!("voice_{voice_id}_osc_{osc_idx}"))
.property("freq", 440.0f64)
.property("volume", 0.0f64)
.property("samplesperbuffer", 512i32)
.property("is-live", true)
.build()
.map_err(|_| {
anyhow!(
"Failed to create audiotestsrc for voice {} osc {}",
voice_id,
osc_idx
)
})?;
audiotestsrc.set_property_from_str("wave", "sine");
let audioconvert = gst::ElementFactory::make("audioconvert")
.name(format!("voice_{voice_id}_osc_{osc_idx}_convert"))
.build()
.map_err(|_| {
anyhow!(
"Failed to create audioconvert for voice {} osc {}",
voice_id,
osc_idx
)
})?;
let audioresample = gst::ElementFactory::make("audioresample")
.name(format!("voice_{voice_id}_osc_{osc_idx}_resample"))
.build()
.map_err(|_| {
anyhow!(
"Failed to create audioresample for voice {} osc {}",
voice_id,
osc_idx
)
})?;
let volume = gst::ElementFactory::make("volume")
.name(format!("voice_{voice_id}_osc_{osc_idx}_volume"))
.property("volume", 0.0f64)
.build()
.map_err(|_| {
anyhow!(
"Failed to create volume for voice {} osc {}",
voice_id,
osc_idx
)
})?;
pipeline
.add_many([&audiotestsrc, &audioconvert, &audioresample, &volume])
.map_err(|_| {
anyhow!("Failed to add voice {} osc {} elements", voice_id, osc_idx)
})?;
gst::Element::link_many([&audiotestsrc, &audioconvert, &audioresample, &volume])
.map_err(|_| {
anyhow!("Failed to link voice {} osc {} elements", voice_id, osc_idx)
})?;
volume.link(mixer).map_err(|_| {
anyhow!("Failed to link voice {} osc {} to mixer", voice_id, osc_idx)
})?;
oscillators.push(UnisonOscillator {
audiotestsrc,
volume,
detune_cents,
});
}
debug!(
"Created voice {} with {} unison oscillators",
voice_id,
oscillators.len()
);
Ok(AudioVoice {
oscillators,
envelope: EnvelopeState::new(),
last_applied_volume: 0.0,
base_frequency: 440.0,
note: None,
})
}
pub fn set_envelope_config(&mut self, config: EnvelopeConfig) {
self.default_envelope = config;
}
pub fn start(&mut self) -> Result<()> {
info!("Starting polyphonic audio synthesis");
match self.pipeline.set_state(gst::State::Playing) {
Ok(_) => {
std::thread::sleep(std::time::Duration::from_millis(50));
Ok(())
}
Err(e) => Err(anyhow!("Failed to start audio synthesis: {:?}", e)),
}
}
pub fn stop(&mut self) -> Result<()> {
info!("Stopping audio synthesis");
for voice in &mut self.voices {
// Silence all oscillators in this voice
for osc in &voice.oscillators {
osc.volume.set_property("volume", 0.0f64);
osc.audiotestsrc.set_property("volume", 0.0f64);
}
voice.last_applied_volume = 0.0;
voice.envelope = EnvelopeState::new();
voice.note = None;
}
self.active_notes.lock().unwrap().clear();
match self.pipeline.set_state(gst::State::Null) {
Ok(_) => Ok(()),
Err(e) => Err(anyhow!("Failed to stop audio synthesis: {:?}", e)),
}
}
pub fn set_master_volume(&mut self, vol: f64) -> Result<()> {
let clamped_volume = vol.max(0.0).min(1.0);
*self.master_volume.lock().unwrap() = clamped_volume;
if let Some(master_vol) = self.pipeline.by_name("master_volume") {
master_vol.set_property("volume", clamped_volume);
}
debug!("Set master volume to {clamped_volume:.3}");
Ok(())
}
pub fn set_waveform(&mut self, wave_type: AudioWaveform) -> Result<()> {
let wave_str = match wave_type {
AudioWaveform::Sine => "sine",
AudioWaveform::Square => "square",
AudioWaveform::Saw => "saw",
AudioWaveform::Triangle => "triangle",
};
self.current_waveform = wave_type;
for voice in &self.voices {
voice.set_waveform(wave_str);
}
debug!("Set waveform to {wave_type:?}");
Ok(())
}
pub fn play_frequency(&mut self, frequency: f64, voice_id: usize) -> Result<()> {
self.play_frequency_with_config(frequency, voice_id, self.default_envelope)
}
pub fn play_frequency_with_config(
&mut self,
frequency: f64,
voice_id: usize,
envelope_config: EnvelopeConfig,
) -> Result<()> {
if voice_id >= self.voices.len() {
return Err(anyhow!("Voice ID {} out of range", voice_id));
}
let voice = &mut self.voices[voice_id];
// Set frequency on all oscillators (with detuning)
voice.set_frequency(frequency);
// Trigger envelope
voice.envelope.trigger(frequency, envelope_config);
voice.note = None;
debug!("Triggered frequency {frequency:.2} Hz on voice {voice_id}");
Ok(())
}
/// Release a specific voice (start release phase, don't stop immediately)
pub fn release_voice(&mut self, voice_id: usize) -> Result<()> {
if voice_id >= self.voices.len() {
return Err(anyhow!("Voice ID {} out of range", voice_id));
}
let voice = &mut self.voices[voice_id];
voice.envelope.release();
debug!("Released voice {voice_id}");
Ok(())
}
/// Stop a voice immediately (use sparingly, prefer release_voice)
pub fn stop_voice(&mut self, voice_id: usize) -> Result<()> {
if voice_id >= self.voices.len() {
return Err(anyhow!("Voice ID {} out of range", voice_id));
}
let voice = &mut self.voices[voice_id];
voice.envelope = EnvelopeState::new();
// Silence all oscillators
for osc in &voice.oscillators {
osc.volume.set_property("volume", 0.0f64);
osc.audiotestsrc.set_property("volume", 0.0f64);
}
voice.last_applied_volume = 0.0;
voice.note = None;
debug!("Stopped voice {voice_id}");
Ok(())
}
/// Update frequency and amplitude for a voice with smooth transition
pub fn update_voice_frequency(
&mut self,
voice_id: usize,
frequency: f64,
_amplitude: f64,
) -> Result<()> {
if voice_id >= self.voices.len() {
return Err(anyhow!("Voice ID {} out of range", voice_id));
}
let voice = &mut self.voices[voice_id];
if voice.envelope.is_active() {
voice.set_frequency(frequency);
voice.envelope.target_frequency = frequency;
}
Ok(())
}
/// Find a free voice or steal the oldest releasing voice
fn find_available_voice(&mut self) -> Option<usize> {
// First, try to find an idle voice
if let Some(idx) = self.voices.iter().position(|v| !v.envelope.is_active()) {
return Some(idx);
}
// Then, try to find a releasing voice (steal it)
if let Some(idx) = self.voices.iter().position(|v| v.envelope.is_releasing()) {
return Some(idx);
}
// No available voices
None
}
/// Play a note with proper voice allocation
pub fn play_note(&mut self, note: MusicalNote) -> Result<()> {
self.play_note_with_config(note, self.default_envelope)
}
/// Play a note with custom envelope
pub fn play_note_with_config(
&mut self,
note: MusicalNote,
envelope_config: EnvelopeConfig,
) -> Result<()> {
// Check if note is already playing on a non-releasing voice
if let Some(voice) = self
.voices
.iter_mut()
.find(|v| v.note == Some(note) && v.envelope.is_active() && !v.envelope.is_releasing())
{
// Retrigger the existing voice
voice.envelope.trigger(note.to_frequency(), envelope_config);
return Ok(());
}
// Find an available voice
if let Some(voice_idx) = self.find_available_voice() {
let voice = &mut self.voices[voice_idx];
let freq = note.to_frequency();
voice.set_frequency(freq);
voice.envelope.trigger(freq, envelope_config);
voice.note = Some(note);
let mut active_notes = self.active_notes.lock().unwrap();
if !active_notes.contains(¬e) {
active_notes.push(note);
}
debug!("Playing note {note:?} ({freq:.2} Hz) on voice {voice_idx}");
} else {
warn!("No available voice for note {note:?}");
}
Ok(())
}
/// Release a specific note
pub fn stop_note(&mut self, note: MusicalNote) -> Result<()> {
for voice in &mut self.voices {
if voice.note == Some(note) && !voice.envelope.is_releasing() {
voice.envelope.release();
}
}
// Note will be removed from active_notes when envelope finishes
debug!("Released note {note:?}");
Ok(())
}
/// Release all notes
pub fn stop_all_notes(&mut self) -> Result<()> {
for voice in &mut self.voices {
if voice.envelope.is_active() {
voice.envelope.release();
}
}
debug!("Released all notes");
Ok(())
}
/// Must be called regularly (every frame) to update envelopes
pub fn update(&mut self) {
let now = Instant::now();
// Update envelopes at regular intervals
if now.duration_since(self.last_envelope_update) >= self.update_interval {
self.last_envelope_update = now;
let mut finished_notes = Vec::new();
for voice in &mut self.voices {
voice.apply_envelope();
// Track notes that finished their release phase
if !voice.envelope.is_active() && voice.note.is_some() {
finished_notes.push(voice.note.take().unwrap());
}
}
// Remove finished notes from active list
if !finished_notes.is_empty() {
let mut active_notes = self.active_notes.lock().unwrap();
for note in finished_notes {
active_notes.retain(|&n| n != note);
}
}
}
self.last_update = now;
}
pub fn waveform(&self) -> AudioWaveform {
self.current_waveform
}
pub fn master_volume(&self) -> f64 {
*self.master_volume.lock().unwrap()
}
pub fn is_active(&self) -> bool {
self.voices.iter().any(|v| v.envelope.is_active())
}
pub fn active_notes(&self) -> Vec<MusicalNote> {
self.active_notes.lock().unwrap().clone()
}
pub fn sample_rate(&self) -> u32 {
self.sample_rate
}
/// Get the current envelope level for a voice (useful for visualization)
pub fn get_voice_level(&self, voice_id: usize) -> f32 {
if voice_id < self.voices.len() {
self.voices[voice_id].envelope.current_level
} else {
0.0
}
}
/// Check if a specific voice is active
pub fn is_voice_active(&self, voice_id: usize) -> bool {
if voice_id < self.voices.len() {
self.voices[voice_id].envelope.is_active()
} else {
false
}
}
}
impl Drop for AudioSynthManager {
fn drop(&mut self) {
info!("Shutting down audio synthesis pipeline");
let _ = self.pipeline.set_state(gst::State::Null);
}
}
/// Supported waveform types for audio synthesis
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AudioWaveform {
Sine,
Square,
Saw,
Triangle,
}
/// Musical notes with their frequencies
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum MusicalNote {
C4,
CSharp4,
D4,
DSharp4,
E4,
F4,
FSharp4,
G4,
GSharp4,
A4,
ASharp4,
B4,
C5,
}
impl MusicalNote {
pub fn to_frequency(self) -> f64 {
match self {
MusicalNote::C4 => 261.63,
MusicalNote::CSharp4 => 277.18,
MusicalNote::D4 => 293.66,
MusicalNote::DSharp4 => 311.13,
MusicalNote::E4 => 329.63,
MusicalNote::F4 => 349.23,
MusicalNote::FSharp4 => 369.99,
MusicalNote::G4 => 392.00,
MusicalNote::GSharp4 => 415.30,
MusicalNote::A4 => 440.00,
MusicalNote::ASharp4 => 466.16,
MusicalNote::B4 => 493.88,
MusicalNote::C5 => 523.25,
}
}
pub fn from_keyboard_number(num: u32) -> Option<Self> {
match num {
1 => Some(MusicalNote::C4),
2 => Some(MusicalNote::D4),
3 => Some(MusicalNote::E4),
4 => Some(MusicalNote::F4),
5 => Some(MusicalNote::G4),
6 => Some(MusicalNote::A4),
7 => Some(MusicalNote::B4),
8 => Some(MusicalNote::C5),
9 => Some(MusicalNote::CSharp4),
_ => None,
}
}
pub fn name(self) -> &'static str {
match self {
MusicalNote::C4 => "C4",
MusicalNote::CSharp4 => "C#4",
MusicalNote::D4 => "D4",
MusicalNote::DSharp4 => "D#4",
MusicalNote::E4 => "E4",
MusicalNote::F4 => "F4",
MusicalNote::FSharp4 => "F#4",
MusicalNote::G4 => "G4",
MusicalNote::GSharp4 => "G#4",
MusicalNote::A4 => "A4",
MusicalNote::ASharp4 => "A#4",
MusicalNote::B4 => "B4",
MusicalNote::C5 => "C5",
}
}
}
/// Simple frequency-to-audio-data converter for visualization
pub struct AudioDataProvider {
sample_count: usize,
}
impl AudioDataProvider {
pub fn new() -> Self {
Self { sample_count: 0 }
}
pub fn update(&mut self, _active_notes: &[MusicalNote], _master_volume: f64) {
self.sample_count += 1;
}
pub fn generate_audio_data(
&self,
active_notes: &[MusicalNote],
master_volume: f64,
) -> [[f32; 4]; 32] {
let mut audio_data = [[0.0f32; 4]; 32];
let note_mapping = [
MusicalNote::C4,
MusicalNote::D4,
MusicalNote::E4,
MusicalNote::F4,
MusicalNote::G4,
MusicalNote::A4,
MusicalNote::B4,
MusicalNote::C5,
MusicalNote::CSharp4,
];
if master_volume > 0.0 {
for (note_index, &mapped_note) in note_mapping.iter().enumerate() {
if active_notes.contains(&mapped_note) {
let positions_per_note = 128 / 9;
let start_pos = note_index * positions_per_note;
let amplitude = (master_volume * 0.9) as f32;
for i in 0..positions_per_note.min(10) {
let pos = start_pos + i;
if pos < 128 {
let array_index = pos / 4;
let component_index = pos % 4;
if array_index < 32 {
let distance_from_center =
(i as f32 - positions_per_note as f32 / 2.0).abs();
let falloff = (1.0
- distance_from_center / (positions_per_note as f32 / 2.0))
.max(0.0);
let final_amplitude = amplitude * falloff;
audio_data[array_index][component_index] =
(audio_data[array_index][component_index] + final_amplitude)
.min(1.0);
}
}
}
}
}
}
audio_data
}
}
impl Default for AudioDataProvider {
fn default() -> Self {
Self::new()
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct AudioSynthUniform {
pub note_frequencies: [[f32; 4]; 4],
pub note_amplitudes: [[f32; 4]; 4],
pub master_volume: f32,
pub waveform_type: u32,
pub active_note_count: u32,
pub _padding: u32,
}
unsafe impl bytemuck::Pod for AudioSynthUniform {}
unsafe impl bytemuck::Zeroable for AudioSynthUniform {}
impl crate::UniformProvider for AudioSynthUniform {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
impl Default for AudioSynthUniform {
fn default() -> Self {
Self::new()
}
}
impl AudioSynthUniform {
pub fn new() -> Self {
Self {
note_frequencies: [[0.0; 4]; 4],
note_amplitudes: [[0.0; 4]; 4],
master_volume: 0.0,
waveform_type: 0,
active_note_count: 0,
_padding: 0,
}
}
pub fn update_from_synthesis(
&mut self,
active_notes: &[MusicalNote],
master_volume: f64,
waveform: AudioWaveform,
) {
for vec4 in &mut self.note_frequencies {
vec4.fill(0.0);
}
for vec4 in &mut self.note_amplitudes {
vec4.fill(0.0);
}
self.master_volume = master_volume as f32;
self.waveform_type = match waveform {
AudioWaveform::Sine => 0,
AudioWaveform::Square => 1,
AudioWaveform::Saw => 2,
AudioWaveform::Triangle => 3,
};
self.active_note_count = active_notes.len().min(16) as u32;
for (i, note) in active_notes.iter().take(16).enumerate() {
let vec4_index = i / 4;
let component_index = i % 4;
self.note_frequencies[vec4_index][component_index] = note.to_frequency() as f32;
self.note_amplitudes[vec4_index][component_index] = master_volume as f32;
}
}
}
/// Voice state for the synthesis manager
#[derive(Clone)]
struct VoiceState {
frequency: f32,
amplitude: f32,
active: bool,
}
impl Default for VoiceState {
fn default() -> Self {
Self {
frequency: 440.0,
amplitude: 0.0,
active: false,
}
}
}
// High-level synthesis manager that bridges GPU-computed parameters to GStreamer audio
pub struct SynthesisManager {
audio_manager: Option<AudioSynthManager>,
sample_rate: u32,
last_update: Instant,
synthesis_enabled: bool,
voice_states: Vec<VoiceState>,
envelope_config: EnvelopeConfig,
}
impl SynthesisManager {
pub fn new() -> anyhow::Result<Self> {
let audio_manager = match AudioSynthManager::new(Some(44100)) {
Ok(manager) => Some(manager),
Err(e) => {
eprintln!("Failed to create GStreamer audio manager: {e}");
None
}
};
Ok(Self {
audio_manager,
sample_rate: 44100,
last_update: Instant::now(),
synthesis_enabled: false,
voice_states: vec![VoiceState::default(); 16],
envelope_config: EnvelopeConfig::default(),
})
}
pub fn start_gpu_synthesis(&mut self) -> anyhow::Result<()> {
if let Some(ref mut manager) = self.audio_manager {
manager.start()?;
self.synthesis_enabled = true;
}
Ok(())
}
pub fn stop_gpu_synthesis(&mut self) -> anyhow::Result<()> {
if let Some(ref mut manager) = self.audio_manager {
manager.stop()?;
self.synthesis_enabled = false;
}
Ok(())
}
/// Set the global envelope configuration
pub fn set_envelope(&mut self, config: EnvelopeConfig) {
self.envelope_config = config;
if let Some(ref mut manager) = self.audio_manager {
manager.set_envelope_config(config);
}
}
/// Set envelope from ADSR values
pub fn set_adsr(&mut self, attack: f32, decay: f32, sustain: f32, release: f32) {
self.set_envelope(EnvelopeConfig {
attack_time: attack,
decay_time: decay,
sustain_level: sustain,
release_time: release,
});
}
pub fn update_frequency(&mut self, frequency: f32) {
self.set_voice(0, frequency, 0.3, true);
}
pub fn update_waveform(&mut self, waveform_type: u32) {
if let Some(ref mut manager) = self.audio_manager {
let gst_waveform = match waveform_type {
0 => AudioWaveform::Sine,
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | true |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/compute/resource.rs | src/compute/resource.rs | use std::collections::HashMap;
use wgpu;
#[derive(Debug, Clone)]
pub enum ResourceType {
UniformBuffer {
size: u64,
},
StorageBuffer {
size: u64,
read_only: bool,
},
StorageTexture {
format: wgpu::TextureFormat,
access: wgpu::StorageTextureAccess,
},
InputTexture,
ChannelTexture, // External texture channels (channel0, channel1, etc.)
Sampler,
}
#[derive(Debug, Clone)]
pub struct ResourceBinding {
pub group: u32,
pub binding: u32,
pub name: String,
pub resource_type: ResourceType,
}
#[derive(Debug, Default)]
pub struct ResourceLayout {
pub bindings: Vec<ResourceBinding>,
}
impl ResourceLayout {
pub fn new() -> Self {
Self {
bindings: Vec::new(),
}
}
pub fn add_resource(&mut self, group: u32, name: &str, resource_type: ResourceType) {
let binding = self.next_binding_in_group(group);
self.bindings.push(ResourceBinding {
group,
binding,
name: name.to_string(),
resource_type,
});
}
fn next_binding_in_group(&self, group: u32) -> u32 {
self.bindings
.iter()
.filter(|b| b.group == group)
.map(|b| b.binding)
.max()
.map(|max| max + 1)
.unwrap_or(0)
}
pub fn create_bind_group_layouts(
&self,
device: &wgpu::Device,
) -> HashMap<u32, wgpu::BindGroupLayout> {
let mut groups: HashMap<u32, Vec<&ResourceBinding>> = HashMap::new();
for binding in &self.bindings {
groups.entry(binding.group).or_default().push(binding);
}
// layout for each group
groups
.into_iter()
.map(|(group_idx, bindings)| {
let entries: Vec<wgpu::BindGroupLayoutEntry> = bindings
.iter()
.map(|binding| self.create_layout_entry(binding))
.collect();
let layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some(&format!("Dynamic Group {group_idx} Layout")),
entries: &entries,
});
(group_idx, layout)
})
.collect()
}
fn create_layout_entry(&self, binding: &ResourceBinding) -> wgpu::BindGroupLayoutEntry {
wgpu::BindGroupLayoutEntry {
binding: binding.binding,
visibility: wgpu::ShaderStages::COMPUTE,
ty: match &binding.resource_type {
ResourceType::UniformBuffer { .. } => wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
ResourceType::StorageBuffer { read_only, .. } => wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage {
read_only: *read_only,
},
has_dynamic_offset: false,
min_binding_size: None,
},
ResourceType::StorageTexture { format, access } => {
wgpu::BindingType::StorageTexture {
access: *access,
format: *format,
view_dimension: wgpu::TextureViewDimension::D2,
}
}
ResourceType::InputTexture => wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
ResourceType::ChannelTexture => wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
ResourceType::Sampler => {
wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering)
}
},
count: None,
}
}
/// Get all bindings for a specific group
pub fn get_bindings_for_group(&self, group: u32) -> Vec<&ResourceBinding> {
self.bindings.iter().filter(|b| b.group == group).collect()
}
/// Get binding by name
pub fn get_binding_by_name(&self, name: &str) -> Option<&ResourceBinding> {
self.bindings.iter().find(|b| b.name == name)
}
}
/// 4-Group Convention Implementation:
/// @group(0): Per-Frame Resources (TimeUniform)
/// @group(1): Primary Pass I/O & Parameters (output texture, shader params, input textures)
/// @group(2): Global Engine Resources (fonts, audio, atomics, mouse)
/// @group(3): User-Defined Data Buffers (custom storage buffers)
impl ResourceLayout {
// GROUP 0: Per-Frame Resources
pub fn add_time_uniform(&mut self) {
self.add_resource(
0,
"time",
ResourceType::UniformBuffer {
size: std::mem::size_of::<super::ComputeTimeUniform>() as u64,
},
);
}
// GROUP 1: Primary Pass I/O & Parameters
pub fn add_output_texture(&mut self, format: wgpu::TextureFormat) {
self.add_resource(
1,
"output",
ResourceType::StorageTexture {
format,
access: wgpu::StorageTextureAccess::WriteOnly,
},
);
}
pub fn add_input_texture(&mut self) {
self.add_resource(1, "input_texture", ResourceType::InputTexture);
self.add_resource(1, "input_sampler", ResourceType::Sampler);
}
/// Add multi-pass input textures to Group 3 (up to 3 input textures with samplers)
// GROUP 2: Engine Resources including Channels
/// Add channel textures (channel0-channel3) for external media accessible from all passes
pub fn add_channel_textures(&mut self, num_channels: u32) {
for i in 0..num_channels {
let channel_name = format!("channel{i}");
let sampler_name = format!("channel{i}_sampler");
self.add_resource(2, &channel_name, ResourceType::ChannelTexture);
self.add_resource(2, &sampler_name, ResourceType::Sampler);
}
}
pub fn add_multipass_input_textures(&mut self) {
// Add 3 input texture pairs for multi-pass dependencies
for i in 0..3 {
self.add_resource(3, &format!("input_texture{i}"), ResourceType::InputTexture);
self.add_resource(3, &format!("input_sampler{i}"), ResourceType::Sampler);
}
}
pub fn add_custom_uniform(&mut self, name: &str, size: u64) {
self.add_resource(1, name, ResourceType::UniformBuffer { size });
}
// GROUP 2: Global Engine Resources
pub fn add_mouse_uniform(&mut self) {
self.add_resource(
2,
"mouse",
ResourceType::UniformBuffer {
size: std::mem::size_of::<crate::MouseUniform>() as u64,
},
);
}
pub fn add_font_resources(&mut self) {
self.add_resource(
2,
"font_texture_uniform",
ResourceType::UniformBuffer {
size: std::mem::size_of::<crate::FontUniforms>() as u64,
},
);
self.add_resource(2, "font_texture_atlas", ResourceType::InputTexture);
}
pub fn add_audio_buffer(&mut self, size: usize) {
self.add_resource(
2,
"audio_buffer",
ResourceType::StorageBuffer {
size: (size * std::mem::size_of::<f32>()) as u64,
read_only: false,
},
);
}
pub fn add_audio_spectrum_buffer(&mut self, size: usize) {
self.add_resource(
2,
"audio_spectrum",
ResourceType::StorageBuffer {
size: (size * std::mem::size_of::<f32>()) as u64,
read_only: true,
},
);
}
pub fn add_atomic_buffer(&mut self, size: u64) {
self.add_resource(
2,
"atomic_buffer",
ResourceType::StorageBuffer {
size,
read_only: false,
},
);
}
// GROUP 3: User-Defined Data Buffers
pub fn add_storage_buffer(&mut self, name: &str, size: u64) {
self.add_resource(
3,
name,
ResourceType::StorageBuffer {
size,
read_only: false,
},
);
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/compute/builder.rs | src/compute/builder.rs | use crate::UniformProvider;
use wgpu;
/// Pass description for multi-pass shaders
#[derive(Debug, Clone)]
pub struct PassDescription {
pub name: String,
pub inputs: Vec<String>,
pub workgroup_size: Option<[u32; 3]>,
}
impl PassDescription {
pub fn new(name: &str, inputs: &[&str]) -> Self {
Self {
name: name.to_string(),
inputs: inputs.iter().map(|s| s.to_string()).collect(),
workgroup_size: None,
}
}
pub fn with_workgroup_size(mut self, size: [u32; 3]) -> Self {
self.workgroup_size = Some(size);
self
}
}
/// User-defined storage buffer specification
#[derive(Debug, Clone)]
pub struct StorageBufferSpec {
pub name: String,
pub size_bytes: u64,
}
impl StorageBufferSpec {
pub fn new(name: &str, size_bytes: u64) -> Self {
Self {
name: name.to_string(),
size_bytes,
}
}
}
/// Configuration built by the builder
#[derive(Debug)]
pub struct ComputeConfiguration {
pub entry_points: Vec<String>,
pub passes: Option<Vec<PassDescription>>,
pub custom_uniform_size: Option<u64>,
pub has_input_texture: bool,
pub has_mouse: bool,
pub has_fonts: bool,
pub has_audio: bool,
pub has_atomic_buffer: bool,
pub audio_buffer_size: usize,
pub has_audio_spectrum: bool,
pub audio_spectrum_size: usize,
pub storage_buffers: Vec<StorageBufferSpec>,
pub workgroup_size: [u32; 3],
pub dispatch_once: bool,
pub texture_format: wgpu::TextureFormat,
pub label: String,
pub num_channels: Option<u32>,
}
/// Builder for compute shader configurations
/// @group(0): Per-Frame Resources (TimeUniform)
/// @group(1): Primary Pass I/O & Parameters (output texture, shader params, input textures)
/// @group(2): Global Engine Resources (fonts, audio, atomics, mouse)
/// @group(3): User-Defined Data Buffers (custom storage buffers)
pub struct ComputeShaderBuilder {
config: ComputeConfiguration,
}
impl ComputeShaderBuilder {
pub fn new() -> Self {
Self {
config: ComputeConfiguration {
entry_points: vec!["main".to_string()],
passes: None,
custom_uniform_size: None,
has_input_texture: false,
has_mouse: false,
has_fonts: false,
has_audio: false,
has_atomic_buffer: false,
audio_buffer_size: 1024,
has_audio_spectrum: false,
audio_spectrum_size: 128,
storage_buffers: Vec::new(),
workgroup_size: [16, 16, 1],
dispatch_once: false,
texture_format: wgpu::TextureFormat::Rgba16Float,
label: "Compute Shader".to_string(),
num_channels: None,
},
}
}
/// Set the entry point for single-pass shaders
pub fn with_entry_point(mut self, entry_point: &str) -> Self {
self.config.entry_points = vec![entry_point.to_string()];
self
}
/// Configure multi-pass execution with ping-pong buffers
pub fn with_multi_pass(mut self, passes: &[PassDescription]) -> Self {
self.config.passes = Some(passes.to_vec());
self.config.entry_points = passes.iter().map(|p| p.name.clone()).collect();
self
}
/// Add custom uniform parameters (goes to @group(1))
pub fn with_custom_uniforms<T: UniformProvider>(mut self) -> Self {
self.config.custom_uniform_size = Some(std::mem::size_of::<T>() as u64);
self
}
/// Enable input texture support (goes to @group(1))
pub fn with_input_texture(mut self) -> Self {
self.config.has_input_texture = true;
self
}
/// Enable channel textures for external media (goes to @group(2))
pub fn with_channels(mut self, num_channels: u32) -> Self {
self.config.num_channels = Some(num_channels);
self
}
/// Enable mouse input (goes to @group(2))
pub fn with_mouse(mut self) -> Self {
self.config.has_mouse = true;
self
}
/// Enable font rendering (goes to @group(2))
pub fn with_fonts(mut self) -> Self {
self.config.has_fonts = true;
self
}
/// Enable audio buffer (goes to @group(2))
pub fn with_audio(mut self, buffer_size: usize) -> Self {
self.config.has_audio = true;
self.config.audio_buffer_size = buffer_size;
self
}
/// Enable audio spectrum data buffer for visualizers (goes to @group(2))
pub fn with_audio_spectrum(mut self, spectrum_size: usize) -> Self {
self.config.has_audio_spectrum = true;
self.config.audio_spectrum_size = spectrum_size;
self
}
/// Enable atomic buffer for particle systems (goes to @group(2))
pub fn with_atomic_buffer(mut self) -> Self {
self.config.has_atomic_buffer = true;
self
}
/// Add user-defined storage buffers (goes to @group(3))
pub fn with_storage_buffer(mut self, buffer: StorageBufferSpec) -> Self {
self.config.storage_buffers.push(buffer);
self
}
/// Add multiple storage buffers
pub fn with_storage_buffers(mut self, buffers: &[StorageBufferSpec]) -> Self {
self.config.storage_buffers.extend_from_slice(buffers);
self
}
/// Set workgroup size
pub fn with_workgroup_size(mut self, size: [u32; 3]) -> Self {
self.config.workgroup_size = size;
self
}
/// Run only once (for initialization shaders)
pub fn dispatch_once(mut self) -> Self {
self.config.dispatch_once = true;
self
}
/// Set output texture format
pub fn with_texture_format(mut self, format: wgpu::TextureFormat) -> Self {
self.config.texture_format = format;
self
}
/// Set debug label
pub fn with_label(mut self, label: &str) -> Self {
self.config.label = label.to_string();
self
}
/// Build the configuration (will be used by ComputeShader::from_builder)
pub fn build(self) -> ComputeConfiguration {
self.config
}
}
impl Default for ComputeShaderBuilder {
fn default() -> Self {
Self::new()
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/compute/core.rs | src/compute/core.rs | use log::info;
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use wgpu;
use super::builder::ComputeConfiguration;
use super::multipass::MultiPassManager;
use super::resource::ResourceLayout;
use crate::{Core, FontSystem, ShaderHotReload, TextureManager, UniformBinding, UniformProvider};
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct ComputeTimeUniform {
pub time: f32,
pub delta: f32,
pub frame: u32,
pub _padding: u32,
}
impl UniformProvider for ComputeTimeUniform {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
pub struct ComputeShader {
// Core resources
pub pipelines: Vec<wgpu::ComputePipeline>,
pub output_texture: TextureManager,
pub time_uniform: UniformBinding<ComputeTimeUniform>,
pub workgroup_size: [u32; 3],
pub dispatch_once: bool,
pub current_frame: u32,
// Layouts following the 4-group convention
pub bind_group_layouts: HashMap<u32, wgpu::BindGroupLayout>,
pub pipeline_layout: wgpu::PipelineLayout,
// Bind groups organized by convention
pub group0_bind_group: wgpu::BindGroup, // Per-frame (time)
pub group1_bind_group: wgpu::BindGroup, // Primary I/O & params
pub group2_bind_group: Option<wgpu::BindGroup>, // Engine resources
pub group3_bind_group: Option<wgpu::BindGroup>, // User data
// Custom uniform parameters (Group 1)
pub custom_uniform: Option<wgpu::Buffer>,
pub custom_uniform_size: Option<u64>,
// Input texture support (Group 1)
pub placeholder_input_texture: Option<TextureManager>,
// Multi-pass support
pub multipass_manager: Option<MultiPassManager>,
pub pass_dependencies: Option<HashMap<String, Vec<String>>>,
pub pass_descriptions: Option<Vec<crate::compute::PassDescription>>,
// Engine resources (Group 2)
pub font_system: Option<FontSystem>,
pub atomic_buffer_raw: Option<wgpu::Buffer>,
pub audio_buffer: Option<wgpu::Buffer>,
pub audio_staging_buffer: Option<wgpu::Buffer>,
pub audio_spectrum_buffer: Option<wgpu::Buffer>,
pub mouse_uniform: Option<UniformBinding<crate::MouseUniform>>,
// Channel system for external textures (Group 2)
pub channel_textures: HashMap<u32, Option<(wgpu::TextureView, wgpu::Sampler)>>,
pub num_channels: u32,
// User storage buffers (Group 3)
pub storage_buffers: Vec<wgpu::Buffer>,
// Empty bind groups for contiguous layout requirement
pub empty_bind_groups: std::collections::HashMap<u32, wgpu::BindGroup>,
// Configuration and hot reload
pub entry_points: Vec<String>,
pub hot_reload: Option<ShaderHotReload>,
pub label: String,
pub has_input_texture: bool,
}
impl ComputeShader {
/// Create a compute shader from builder configuration
pub fn from_builder(core: &Core, shader_source: &str, config: ComputeConfiguration) -> Self {
// Step 1: Create resource layout following 4-group convention
let mut resource_layout = ResourceLayout::new();
// Group 0: Always has time uniform
resource_layout.add_time_uniform();
// Group 1: Primary I/O & Parameters
resource_layout.add_output_texture(config.texture_format);
if let Some(uniform_size) = config.custom_uniform_size {
resource_layout.add_custom_uniform("params", uniform_size);
}
if config.has_input_texture {
resource_layout.add_input_texture();
}
// Group 2: Engine Resources
if config.has_mouse {
resource_layout.add_mouse_uniform();
}
if config.has_fonts {
resource_layout.add_font_resources();
}
if config.has_audio {
resource_layout.add_audio_buffer(config.audio_buffer_size);
}
if config.has_audio_spectrum {
resource_layout.add_audio_spectrum_buffer(config.audio_spectrum_size);
}
if config.has_atomic_buffer {
// Create buffer with 3 u32s per pixel
// The shader accesses: atomic_buffer[idx], atomic_buffer[idx + w*h], atomic_buffer[idx + 2*w*h]
let atomic_size = (core.size.width * core.size.height * 3 * 4) as u64;
resource_layout.add_atomic_buffer(atomic_size);
}
if let Some(num_channels) = config.num_channels {
resource_layout.add_channel_textures(num_channels);
}
// Group 3: User-defined storage buffers with optional multi-pass input textures
if !config.storage_buffers.is_empty() {
// User storage buffers
for buffer_spec in &config.storage_buffers {
resource_layout.add_storage_buffer(&buffer_spec.name, buffer_spec.size_bytes);
}
} else if config.passes.is_some() {
// Fallback: Multi-pass input textures only if no storage buffers requested
resource_layout.add_multipass_input_textures();
}
// Step 2: Create bind group layouts
let bind_group_layouts = resource_layout.create_bind_group_layouts(&core.device);
// Step 3: Create pipeline layout - WebGPU requires contiguous bind group indices
// I need to ensure all groups 0-3 are present, creating empty layouts if needed
let mut layouts_vec: Vec<wgpu::BindGroupLayout> = Vec::new();
for i in 0..4 {
if let Some(layout) = bind_group_layouts.get(&i) {
layouts_vec.push(layout.clone()); // Clone the existing layout
} else {
// Create an empty bind group layout for missing groups
let empty_layout =
core.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some(&format!("Empty Group {i} Layout")),
entries: &[],
});
layouts_vec.push(empty_layout);
}
}
let layout_refs: Vec<&wgpu::BindGroupLayout> = layouts_vec.iter().collect();
let pipeline_layout = core
.device
.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some(&format!("{} Pipeline Layout", config.label)),
bind_group_layouts: &layout_refs,
push_constant_ranges: &[],
});
// Step 4: Create time uniform (Group 0)
let time_bind_group_layout = bind_group_layouts.get(&0).unwrap();
let time_uniform = UniformBinding::new(
&core.device,
&format!("{} Time Uniform", config.label),
ComputeTimeUniform {
time: 0.0,
delta: 0.0,
frame: 0,
_padding: 0,
},
time_bind_group_layout,
0,
);
let group0_bind_group = time_uniform.bind_group.clone();
// Step 5: Create output texture
let output_texture = Self::create_output_texture(
&core.device,
core.size.width,
core.size.height,
config.texture_format,
&format!("{} Output Texture", config.label),
);
// Step 5.5: Create custom uniform buffer if needed
let custom_uniform = if let Some(uniform_size) = config.custom_uniform_size {
Some(core.device.create_buffer(&wgpu::BufferDescriptor {
label: Some(&format!("{} Custom Uniform Buffer", config.label)),
size: uniform_size,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
}))
} else {
None
};
// Create placeholder input texture for shaders that need input texture support
let placeholder_input_texture = if config.has_input_texture {
Some(Self::create_placeholder_input_texture(
&core.device,
&format!("{} Placeholder Input", config.label),
))
} else {
None
};
let group1_bind_group = Self::create_group1_bind_group(
&core.device,
bind_group_layouts.get(&1).unwrap(),
&output_texture,
&config,
custom_uniform.as_ref(),
placeholder_input_texture.as_ref().map(|t| &t.view),
placeholder_input_texture.as_ref().map(|t| &t.sampler),
);
// Step 6: Create engine resources (Group 2) if needed
let (
font_system,
atomic_buffer_raw,
audio_buffer,
audio_staging_buffer,
audio_spectrum_buffer,
mouse_uniform,
group2_bind_group,
) = Self::create_engine_resources(core, &bind_group_layouts, &config);
// Step 7: Create user storage buffers (Group 3) if needed
let (storage_buffers, group3_bind_group) = if !config.storage_buffers.is_empty() {
// Create storage buffers (works for both single-pass and multi-pass with storage)
Self::create_user_storage_buffers(core, &bind_group_layouts, &config)
} else if config.passes.is_some() {
// Pure multi-pass mode: Group 3 will be managed dynamically by MultiPassManager
(Vec::new(), None)
} else {
// No storage buffers needed
(Vec::new(), None)
};
// Step 7.5: Create empty bind groups for empty layouts (needed when we create contiguous layouts)
let mut empty_bind_groups: std::collections::HashMap<u32, wgpu::BindGroup> =
std::collections::HashMap::new();
for i in 0..4 {
if !bind_group_layouts.contains_key(&i) {
// This group was missing and got an empty layout, create an empty bind group
let empty_bind_group = core.device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some(&format!("Empty Group {i} Bind Group")),
layout: &layouts_vec[i as usize],
entries: &[],
});
empty_bind_groups.insert(i, empty_bind_group);
}
}
// Step 8: Create multi-pass manager if needed (only for texture ping-pong, not storage buffers)
let (multipass_manager, pass_dependencies) = if let Some(passes) = &config.passes {
if config.storage_buffers.is_empty() {
// Pure multi-pass mode with texture ping-pong: Group 3 managed by MultiPassManager
let buffer_names: Vec<String> = passes.iter().map(|p| p.name.clone()).collect();
let dependencies: HashMap<String, Vec<String>> = passes
.iter()
.map(|p| (p.name.clone(), p.inputs.clone()))
.collect();
let manager = MultiPassManager::new(
core,
&buffer_names,
config.texture_format,
bind_group_layouts.get(&3).unwrap().clone(),
);
(Some(manager), Some(dependencies))
} else {
// Multi-pass with storage buffers: no texture ping-pong needed
// Passes use shared storage buffers instead of ping-pong textures
let dependencies: HashMap<String, Vec<String>> = passes
.iter()
.map(|p| (p.name.clone(), p.inputs.clone()))
.collect();
(None, Some(dependencies))
}
} else {
(None, None)
};
// Step 9: Create compute pipelines
let shader_module = core
.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some(&format!("{} Module", config.label)),
source: wgpu::ShaderSource::Wgsl(shader_source.into()),
});
let mut pipelines = Vec::new();
for entry_point in &config.entry_points {
let pipeline = core
.device
.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some(&format!("{} Pipeline - {}", config.label, entry_point)),
layout: Some(&pipeline_layout),
module: &shader_module,
entry_point: Some(entry_point),
compilation_options: wgpu::PipelineCompilationOptions::default(),
cache: None,
});
pipelines.push(pipeline);
}
Self {
pipelines,
output_texture,
time_uniform,
workgroup_size: config.workgroup_size,
dispatch_once: config.dispatch_once,
current_frame: 0,
bind_group_layouts,
pipeline_layout,
group0_bind_group,
group1_bind_group,
group2_bind_group,
group3_bind_group,
multipass_manager,
pass_dependencies,
pass_descriptions: config.passes.clone(),
font_system,
atomic_buffer_raw,
audio_buffer,
audio_staging_buffer,
audio_spectrum_buffer,
mouse_uniform,
storage_buffers,
empty_bind_groups,
custom_uniform,
custom_uniform_size: config.custom_uniform_size,
placeholder_input_texture,
channel_textures: Self::initialize_channel_textures(config.num_channels.unwrap_or(0)),
num_channels: config.num_channels.unwrap_or(0),
entry_points: config.entry_points,
hot_reload: None,
label: config.label,
has_input_texture: config.has_input_texture,
}
}
fn create_output_texture(
device: &wgpu::Device,
width: u32,
height: u32,
format: wgpu::TextureFormat,
label: &str,
) -> TextureManager {
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some(label),
size: wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::STORAGE_BINDING,
view_formats: &[],
});
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor::default());
// Create a dummy bind group for display purposes - this is only used by the display renderer
let dummy_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Display Texture Layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &dummy_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
label: Some(&format!("{label} Display Bind Group")),
});
TextureManager {
texture,
view,
sampler,
bind_group,
}
}
fn create_placeholder_input_texture(device: &wgpu::Device, label: &str) -> TextureManager {
// Create a minimal 1x1 placeholder texture for initialization
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some(label),
size: wgpu::Extent3d {
width: 1,
height: 1,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb, // Match real texture format
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor::default());
// Initialize placeholder with black pixels instead of uninitialized data
// This prevents red artifacts when no real texture is loaded
// Note: We could write actual data here, but shaders should handle empty textures gracefully
// Create dummy bind group layout and bind group for placeholder
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some(&format!("{label} Placeholder Layout")),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
label: Some(&format!("{label} Placeholder Bind Group")),
});
TextureManager {
texture,
view,
sampler,
bind_group,
}
}
fn create_group1_bind_group(
device: &wgpu::Device,
layout: &wgpu::BindGroupLayout,
output_texture: &TextureManager,
config: &ComputeConfiguration,
custom_uniform_buffer: Option<&wgpu::Buffer>,
input_texture_view: Option<&wgpu::TextureView>,
input_sampler: Option<&wgpu::Sampler>,
) -> wgpu::BindGroup {
// Create a storage view for the compute shader
let storage_view = output_texture
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut entries = vec![wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&storage_view),
}];
// Add custom uniform if present
if let (Some(buffer), Some(_size)) = (custom_uniform_buffer, config.custom_uniform_size) {
entries.push(wgpu::BindGroupEntry {
binding: 1, // Custom uniforms go to binding 1 in Group 1
resource: buffer.as_entire_binding(),
});
}
// Add input texture and sampler if present (for shaders like FFT): again, this still not "perfect" and generic but let me think more
if config.has_input_texture {
// Input textures should always be provided - if not, there's an architecture issue
if let (Some(view), Some(sampler)) = (input_texture_view, input_sampler) {
entries.push(wgpu::BindGroupEntry {
binding: 2, // Input texture goes to binding 2
resource: wgpu::BindingResource::TextureView(view),
});
entries.push(wgpu::BindGroupEntry {
binding: 3, // Input sampler goes to binding 3
resource: wgpu::BindingResource::Sampler(sampler),
});
} else {
// This indicates an architecture problem - input texture support needs placeholder handling
log::error!("Input texture required but not provided during bind group creation");
}
}
device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &entries,
label: Some("Group 1 Bind Group"),
})
}
fn create_engine_resources(
core: &Core,
layouts: &HashMap<u32, wgpu::BindGroupLayout>,
config: &ComputeConfiguration,
) -> (
Option<FontSystem>,
Option<wgpu::Buffer>,
Option<wgpu::Buffer>,
Option<wgpu::Buffer>,
Option<wgpu::Buffer>,
Option<UniformBinding<crate::MouseUniform>>,
Option<wgpu::BindGroup>,
) {
let layout = layouts.get(&2);
if layout.is_none() {
return (None, None, None, None, None, None, None);
}
let layout = layout.unwrap();
// Create font system if needed
let font_system = if config.has_fonts {
Some(FontSystem::new(core))
} else {
None
};
// Create atomic buffer if needed (raw buffer, not old AtomicBuffer struct)
// buffer size: 3 u32s * 4 bytes per pixel
let atomic_buffer_raw = if config.has_atomic_buffer {
let buffer_size = (core.size.width * core.size.height * 3 * 4) as u64;
Some(core.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Atomic Storage Buffer"),
size: buffer_size,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
}))
} else {
None
};
// Create audio buffers if needed
let (audio_buffer, audio_staging_buffer) = if config.has_audio {
let buffer_size = config.audio_buffer_size * std::mem::size_of::<f32>();
let audio_buffer = core.device.create_buffer(&wgpu::BufferDescriptor {
label: Some(&format!("{} Audio Buffer", config.label)),
size: buffer_size as u64,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let staging_buffer = core.device.create_buffer(&wgpu::BufferDescriptor {
label: Some(&format!("{} Audio Staging Buffer", config.label)),
size: buffer_size as u64,
usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
(Some(audio_buffer), Some(staging_buffer))
} else {
(None, None)
};
// Create audio spectrum buffer if needed
let audio_spectrum_buffer = if config.has_audio_spectrum {
let buffer_size = config.audio_spectrum_size * std::mem::size_of::<f32>();
let buffer = core.device.create_buffer(&wgpu::BufferDescriptor {
label: Some(&format!("{} Audio Spectrum Buffer", config.label)),
size: buffer_size as u64,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
Some(buffer)
} else {
None
};
// Create mouse uniform if needed
let mouse_uniform = if config.has_mouse {
// Create a temporary bind group layout for UniformBinding compatibility
let temp_layout =
core.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("Temp Mouse Layout"),
});
Some(UniformBinding::new(
&core.device,
"Mouse Uniform",
crate::MouseUniform::default(),
&temp_layout,
0,
))
} else {
None
};
// Create Group 2 bind group
// Create empty channel textures map for initial bind group creation
let empty_channels = std::collections::HashMap::new();
let num_channels = config.num_channels.unwrap_or(0);
let bind_group = Self::create_group2_bind_group(
&core.device,
&core.queue,
layout,
&font_system,
&atomic_buffer_raw,
&audio_buffer,
&audio_spectrum_buffer,
&mouse_uniform,
&empty_channels,
num_channels,
);
(
font_system,
atomic_buffer_raw,
audio_buffer,
audio_staging_buffer,
audio_spectrum_buffer,
mouse_uniform,
bind_group,
)
}
fn create_group2_bind_group(
device: &wgpu::Device,
queue: &wgpu::Queue,
layout: &wgpu::BindGroupLayout,
font_system: &Option<FontSystem>,
atomic_buffer_raw: &Option<wgpu::Buffer>,
audio_buffer: &Option<wgpu::Buffer>,
audio_spectrum_buffer: &Option<wgpu::Buffer>,
mouse_uniform: &Option<UniformBinding<crate::MouseUniform>>,
channel_textures: &HashMap<u32, Option<(wgpu::TextureView, wgpu::Sampler)>>,
num_channels: u32,
) -> Option<wgpu::BindGroup> {
// Create entries based on expected layout from ResourceLayout
// Order must match ResourceLayout creation order:
// 1. mouse (if has_mouse) -> binding 0
// 2. fonts (if has_fonts) -> bindings 1,2,3
// 3. audio (if has_audio) -> binding N
// 4. audio_spectrum (if has_audio_spectrum) -> binding N+1
// 5. atomic_buffer (if has_atomic_buffer) -> binding N+2
// 6. channels (if num_channels > 0) -> bindings N+3 onwards (texture + sampler pairs)
// Create a default 1x1 magenta texture for unassigned channels
let default_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Default Channel Texture"),
size: wgpu::Extent3d {
width: 1,
height: 1,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8Unorm,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
// Fill with magenta color so we can see when default texture is used
let magenta_data: [u8; 4] = [255, 0, 255, 255];
queue.write_texture(
wgpu::TexelCopyTextureInfo {
texture: &default_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
&magenta_data,
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(4),
rows_per_image: Some(1),
},
wgpu::Extent3d {
width: 1,
height: 1,
depth_or_array_layers: 1,
},
);
let default_texture_view =
default_texture.create_view(&wgpu::TextureViewDescriptor::default());
let default_sampler = device.create_sampler(&wgpu::SamplerDescriptor::default());
let mut entries = Vec::new();
let mut binding_counter = 0;
// Add mouse uniform (binding 0)
if let Some(mouse) = mouse_uniform {
entries.push(wgpu::BindGroupEntry {
binding: binding_counter,
resource: mouse.buffer.as_entire_binding(),
});
binding_counter += 1;
}
// Add font texture resources (Shadertoy-style)
if let Some(font_tex) = font_system {
entries.extend_from_slice(&[
wgpu::BindGroupEntry {
binding: binding_counter,
resource: font_tex.font_uniforms.buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: binding_counter + 1,
resource: wgpu::BindingResource::TextureView(&font_tex.atlas_texture.view),
},
]);
binding_counter += 2;
}
// Add audio buffer
if let Some(audio) = audio_buffer {
entries.push(wgpu::BindGroupEntry {
binding: binding_counter,
resource: audio.as_entire_binding(),
});
binding_counter += 1;
}
// Add audio spectrum buffer
if let Some(audio_spectrum) = audio_spectrum_buffer {
entries.push(wgpu::BindGroupEntry {
binding: binding_counter,
resource: audio_spectrum.as_entire_binding(),
});
binding_counter += 1;
}
// Add atomic buffer (if provided)
if let Some(atomic_buf) = atomic_buffer_raw {
entries.push(wgpu::BindGroupEntry {
binding: binding_counter,
resource: atomic_buf.as_entire_binding(),
});
binding_counter += 1;
}
// Add channel textures (channel0, channel1, etc. with their samplers)
for i in 0..num_channels {
// Channel texture binding
let (texture_view, sampler) = if let Some(Some((view, samp))) = channel_textures.get(&i)
{
(view, samp)
} else {
(&default_texture_view, &default_sampler)
};
entries.push(wgpu::BindGroupEntry {
binding: binding_counter,
resource: wgpu::BindingResource::TextureView(texture_view),
});
binding_counter += 1;
// Channel sampler binding
entries.push(wgpu::BindGroupEntry {
binding: binding_counter,
resource: wgpu::BindingResource::Sampler(sampler),
});
binding_counter += 1;
}
if entries.is_empty() {
return None;
}
Some(device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &entries,
label: Some("Group 2 Bind Group"),
}))
}
fn create_user_storage_buffers(
core: &Core,
layouts: &HashMap<u32, wgpu::BindGroupLayout>,
config: &ComputeConfiguration,
) -> (Vec<wgpu::Buffer>, Option<wgpu::BindGroup>) {
if config.storage_buffers.is_empty() {
return (Vec::new(), None);
}
let layout = layouts.get(&3);
if layout.is_none() {
return (Vec::new(), None);
}
let layout = layout.unwrap();
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | true |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/compute/multipass.rs | src/compute/multipass.rs | use crate::Core;
use std::collections::HashMap;
use wgpu;
/// Manages ping-pong buffers for multi-pass compute shaders
pub struct MultiPassManager {
buffers: HashMap<String, (wgpu::Texture, wgpu::Texture)>,
bind_groups: HashMap<String, (wgpu::BindGroup, wgpu::BindGroup)>,
output_texture: wgpu::Texture,
output_bind_group: wgpu::BindGroup,
storage_layout: wgpu::BindGroupLayout,
input_layout: wgpu::BindGroupLayout,
frame_flip: bool,
width: u32,
height: u32,
texture_format: wgpu::TextureFormat,
}
/// Note: storage layout currently un-used. I try to create our own storage-only layout
impl MultiPassManager {
pub fn new(
core: &Core,
buffer_names: &[String],
texture_format: wgpu::TextureFormat,
_storage_layout: wgpu::BindGroupLayout,
) -> Self {
let width = core.size.width;
let height = core.size.height;
// Create dedicated storage layout (only storage texture, no custom uniform)
let storage_layout =
core.device
.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Multi-Pass Storage Layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::StorageTexture {
access: wgpu::StorageTextureAccess::WriteOnly,
format: texture_format,
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
}],
});
// Create input texture layout for multi-buffer reading
let input_layout = Self::create_input_layout(&core.device);
let mut buffers = HashMap::new();
let mut bind_groups = HashMap::new();
// Create ping-pong texture pairs for each buffer
for name in buffer_names {
let texture0 = Self::create_storage_texture(
&core.device,
width,
height,
texture_format,
&format!("{name}_0"),
);
let texture1 = Self::create_storage_texture(
&core.device,
width,
height,
texture_format,
&format!("{name}_1"),
);
let bind_group0 = Self::create_storage_bind_group(
&core.device,
&storage_layout,
&texture0,
&format!("{name}_0_bind"),
);
let bind_group1 = Self::create_storage_bind_group(
&core.device,
&storage_layout,
&texture1,
&format!("{name}_1_bind"),
);
buffers.insert(name.clone(), (texture0, texture1));
bind_groups.insert(name.clone(), (bind_group0, bind_group1));
}
// Create output texture
let output_texture = Self::create_storage_texture(
&core.device,
width,
height,
texture_format,
"multipass_output",
);
let output_bind_group = Self::create_storage_bind_group(
&core.device,
&storage_layout,
&output_texture,
"output_bind",
);
Self {
buffers,
bind_groups,
output_texture,
output_bind_group,
storage_layout,
input_layout,
frame_flip: false,
width,
height,
texture_format,
}
}
fn create_storage_texture(
device: &wgpu::Device,
width: u32,
height: u32,
format: wgpu::TextureFormat,
label: &str,
) -> wgpu::Texture {
device.create_texture(&wgpu::TextureDescriptor {
label: Some(label),
size: wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format,
usage: wgpu::TextureUsages::STORAGE_BINDING
| wgpu::TextureUsages::TEXTURE_BINDING
| wgpu::TextureUsages::COPY_SRC
| wgpu::TextureUsages::COPY_DST,
view_formats: &[],
})
}
fn create_storage_bind_group(
device: &wgpu::Device,
layout: &wgpu::BindGroupLayout,
texture: &wgpu::Texture,
label: &str,
) -> wgpu::BindGroup {
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&view),
}],
label: Some(label),
})
}
fn create_input_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 4,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 5,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
label: Some("Multi-Pass Input Layout"),
})
}
/// Get the write bind group for current frame
pub fn get_write_bind_group(&self, buffer_name: &str) -> &wgpu::BindGroup {
let bind_groups = self.bind_groups.get(buffer_name).expect("Buffer not found");
if self.frame_flip {
&bind_groups.1
} else {
&bind_groups.0
}
}
/// Get the write texture for current frame
pub fn get_write_texture(&self, buffer_name: &str) -> &wgpu::Texture {
let textures = self.buffers.get(buffer_name).expect("Buffer not found");
if self.frame_flip {
&textures.1
} else {
&textures.0
}
}
/// Get the read texture for previous frame
pub fn get_read_texture(&self, buffer_name: &str) -> &wgpu::Texture {
let textures = self.buffers.get(buffer_name).expect("Buffer not found");
if self.frame_flip {
&textures.0
} else {
&textures.1
}
}
/// Create input bind group for a pass with its dependencies
pub fn create_input_bind_group(
&self,
device: &wgpu::Device,
sampler: &wgpu::Sampler,
input_buffers: &[String],
) -> wgpu::BindGroup {
let mut views = Vec::new();
// Create views for up to 3 input textures
for i in 0..3 {
let buffer_name = if input_buffers.is_empty() {
// For first pass with no dependencies, use the first buffer or create a dummy
self.buffers
.keys()
.next()
.cloned()
.unwrap_or_else(|| "buffer_a".to_string())
} else {
input_buffers.get(i).unwrap_or(&input_buffers[0]).clone()
};
let texture = self.get_read_texture(&buffer_name);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
views.push(view);
}
let entries = [
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&views[0]),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(&views[1]),
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::Sampler(sampler),
},
wgpu::BindGroupEntry {
binding: 4,
resource: wgpu::BindingResource::TextureView(&views[2]),
},
wgpu::BindGroupEntry {
binding: 5,
resource: wgpu::BindingResource::Sampler(sampler),
},
];
device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &self.input_layout,
entries: &entries,
label: Some("Multi-Pass Input"),
})
}
/// Get output bind group
pub fn get_output_bind_group(&self) -> &wgpu::BindGroup {
&self.output_bind_group
}
/// Get output texture
pub fn get_output_texture(&self) -> &wgpu::Texture {
&self.output_texture
}
/// Flip ping-pong buffers
pub fn flip_buffers(&mut self) {
self.frame_flip = !self.frame_flip;
}
/// Clear all buffers
pub fn clear_all(&mut self, core: &Core) {
// Recreate all buffer textures
for (name, textures) in &mut self.buffers {
textures.0 = Self::create_storage_texture(
&core.device,
self.width,
self.height,
self.texture_format,
&format!("{name}_0"),
);
textures.1 = Self::create_storage_texture(
&core.device,
self.width,
self.height,
self.texture_format,
&format!("{name}_1"),
);
}
// Recreate all bind groups
for (name, bind_groups) in &mut self.bind_groups {
let textures = self.buffers.get(name).unwrap();
bind_groups.0 = Self::create_storage_bind_group(
&core.device,
&self.storage_layout,
&textures.0,
&format!("{name}_0_bind"),
);
bind_groups.1 = Self::create_storage_bind_group(
&core.device,
&self.storage_layout,
&textures.1,
&format!("{name}_1_bind"),
);
}
// Recreate output texture and bind group
self.output_texture = Self::create_storage_texture(
&core.device,
self.width,
self.height,
self.texture_format,
"multipass_output",
);
self.output_bind_group = Self::create_storage_bind_group(
&core.device,
&self.storage_layout,
&self.output_texture,
"output_bind",
);
self.frame_flip = false;
}
/// Resize all buffers
pub fn resize(&mut self, core: &Core, width: u32, height: u32) {
self.width = width;
self.height = height;
self.clear_all(core);
}
/// Get the input layout for pipeline creation
pub fn get_input_layout(&self) -> &wgpu::BindGroupLayout {
&self.input_layout
}
/// Get the storage layout for pipeline creation
pub fn get_storage_layout(&self) -> &wgpu::BindGroupLayout {
&self.storage_layout
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/src/compute/mod.rs | src/compute/mod.rs | // @group(0): Per-Frame Resources (TimeUniform)
// @group(1): Primary Pass I/O & Parameters (output texture, shader params, input textures)
// @group(2): Global Engine Resources (fonts, audio, atomics, mouse)
// @group(3): User-Defined Data Buffers (custom storage buffers)
pub mod builder;
pub mod core;
pub mod multipass;
pub mod resource;
pub use builder::*;
pub use core::*;
pub use multipass::*;
pub use resource::*;
// Texture format constants
pub const COMPUTE_TEXTURE_FORMAT_RGBA16: wgpu::TextureFormat = wgpu::TextureFormat::Rgba16Float;
pub const COMPUTE_TEXTURE_FORMAT_RGBA8: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8Unorm;
use crate::Core;
/// Main entry point for creating compute shaders
impl ComputeShader {
/// Create a compute shader using the builder pattern
/// This is the primary API for all compute shader creation
pub fn builder() -> ComputeShaderBuilder {
ComputeShaderBuilder::new()
}
/// Create a simple compute shader with basic configuration
pub fn new(core: &Core, shader_source: &str) -> Self {
let config = ComputeShaderBuilder::new()
.with_label("Simple Compute Shader")
.build();
Self::from_builder(core, shader_source, config)
}
/// Create a compute shader with custom uniform parameters
pub fn with_uniforms<T: crate::UniformProvider>(
core: &Core,
shader_source: &str,
label: &str,
) -> Self {
let config = ComputeShaderBuilder::new()
.with_custom_uniforms::<T>()
.with_label(label)
.build();
Self::from_builder(core, shader_source, config)
}
/// Create a multi-pass compute shader
pub fn with_multi_pass(
core: &Core,
shader_source: &str,
passes: &[PassDescription],
label: &str,
) -> Self {
let config = ComputeShaderBuilder::new()
.with_multi_pass(passes)
.with_label(label)
.build();
Self::from_builder(core, shader_source, config)
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/blockgame.rs | examples/blockgame.rs | // Block Game, Enes Altun, 2025, MIT License
use cuneus::compute::*;
use cuneus::prelude::*;
use winit::event::*;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct BlockGameParams {
// 0=menu, 1=playing, 2=game_over
game_state: i32,
score: u32,
current_block: u32,
total_blocks: u32,
block_x: f32,
block_y: f32,
block_z: f32,
block_width: f32,
block_height: f32,
block_depth: f32,
movement_speed: f32,
movement_range: f32,
drop_triggered: i32,
camera_height: f32,
camera_angle: f32,
camera_scale: f32,
// Game mech
perfect_placement: i32,
game_over: i32,
_padding: [f32; 2],
}
impl Default for BlockGameParams {
fn default() -> Self {
Self {
game_state: 0,
score: 0,
current_block: 0,
total_blocks: 1,
block_x: 0.0,
block_y: 1.0,
block_z: 0.0,
block_width: 3.0,
block_height: 0.6,
block_depth: 3.0,
movement_speed: 2.0,
movement_range: 2.5,
drop_triggered: 0,
camera_height: 0.0,
camera_angle: 0.0,
camera_scale: 65.0,
perfect_placement: 0,
game_over: 0,
_padding: [0.0; 2],
}
}
}
impl UniformProvider for BlockGameParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
struct BlockTowerGame {
base: RenderKit,
compute_shader: ComputeShader,
last_mouse_click: bool,
game_params: BlockGameParams,
}
impl ShaderManager for BlockTowerGame {
fn init(core: &Core) -> Self {
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let base = RenderKit::new(core, &texture_bind_group_layout, None);
// Create single-pass compute shader with mouse, fonts, and game storage
let config = ComputeShader::builder()
.with_entry_point("main")
.with_mouse()
.with_fonts()
.with_audio(1024) // Used for game state storage, not audio
.with_workgroup_size([8, 8, 1])
.with_texture_format(COMPUTE_TEXTURE_FORMAT_RGBA16)
.with_label("Block Tower Game Unified")
.build();
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/blockgame.wgsl"), config);
// Enable hot reload
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/blockgame.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Block Tower Game Hot Reload"),
source: wgpu::ShaderSource::Wgsl(include_str!("shaders/blockgame.wgsl").into()),
}),
) {
eprintln!("Failed to enable hot reload for blockgame shader: {e}");
}
Self {
base,
compute_shader,
last_mouse_click: false,
game_params: BlockGameParams::default(),
}
}
fn update(&mut self, core: &Core) {
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta, &core.queue);
self.compute_shader
.update_mouse_uniform(&self.base.mouse_tracker.uniform, &core.queue);
self.base.fps_tracker.update();
// Check for hot reload updates
self.compute_shader.check_hot_reload(&core.device);
self.update_camera_in_shader(&core.queue);
let mouse_buttons = self.base.mouse_tracker.uniform.buttons[0];
let mouse_pressed = mouse_buttons & 1 != 0;
self.last_mouse_click = mouse_pressed;
}
fn resize(&mut self, core: &Core) {
self.base.update_resolution(&core.queue, core.size);
self.compute_shader
.resize(core, core.size.width, core.size.height);
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
controls_request.current_fps = Some(self.base.fps_tracker.fps());
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
style
.text_styles
.get_mut(&egui::TextStyle::Body)
.unwrap()
.size = 11.0;
style
.text_styles
.get_mut(&egui::TextStyle::Button)
.unwrap()
.size = 10.0;
});
egui::Window::new("Block Tower")
.collapsible(true)
.resizable(true)
.default_width(220.0)
.show(ctx, |ui| {
egui::CollapsingHeader::new("Camera")
.default_open(true)
.show(ui, |ui| {
ui.add(
egui::Slider::new(
&mut self.game_params.camera_height,
0.0..=20.0,
)
.text("Height"),
);
ui.add(
egui::Slider::new(
&mut self.game_params.camera_angle,
-3.14159..=3.14159,
)
.text("Angle"),
);
ui.add(
egui::Slider::new(
&mut self.game_params.camera_scale,
20.0..=200.0,
)
.text("Scale"),
);
ui.separator();
ui.label("Controls:");
ui.label("Q/E: Move up/down");
ui.label("W/S: Rotate left/right");
ui.separator();
ui.label("Scale presets:");
ui.horizontal(|ui| {
if ui.button("1080p").clicked() {
self.game_params.camera_scale = 50.0;
}
if ui.button("1440p").clicked() {
self.game_params.camera_scale = 65.0;
}
if ui.button("4K").clicked() {
self.game_params.camera_scale = 100.0;
}
});
if ui.button("Reset Camera").clicked() {
self.game_params.camera_height = 8.0;
self.game_params.camera_angle = 0.0;
self.game_params.camera_scale = 65.0;
}
});
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Block Game Render Encoder"),
});
self.compute_shader.dispatch(&mut encoder, core);
{
let mut render_pass = cuneus::Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("Block Game Render Pass"),
);
let compute_texture = self.compute_shader.get_output_texture();
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &compute_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(Some(encoder.finish()));
output.present();
Ok(())
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
let ui_handled = self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed;
if self.base.handle_mouse_input(core, event, ui_handled) {
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
if let winit::keyboard::PhysicalKey::Code(key_code) = event.physical_key {
if event.state == ElementState::Pressed {
let camera_speed = 0.5;
match key_code {
winit::keyboard::KeyCode::KeyQ => {
self.game_params.camera_height += camera_speed;
return true;
}
winit::keyboard::KeyCode::KeyE => {
self.game_params.camera_height -= camera_speed;
return true;
}
winit::keyboard::KeyCode::KeyW => {
self.game_params.camera_angle += 0.1;
return true;
}
winit::keyboard::KeyCode::KeyS => {
self.game_params.camera_angle -= 0.1;
return true;
}
_ => {}
}
}
}
return self
.base
.key_handler
.handle_keyboard_input(core.window(), event);
}
false
}
}
impl BlockTowerGame {
fn update_camera_in_shader(&self, queue: &wgpu::Queue) {
if let Some(audio_buffer) = self.compute_shader.get_audio_buffer() {
let camera_data = [
self.game_params.camera_height,
self.game_params.camera_angle,
self.game_params.camera_scale,
];
let camera_data_bytes = bytemuck::cast_slice(&camera_data);
let offset = 5 * std::mem::size_of::<f32>();
queue.write_buffer(audio_buffer, offset as u64, camera_data_bytes);
}
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
cuneus::gst::init()?;
let (app, event_loop) = ShaderApp::new("Block Tower Game", 600, 800);
app.run(event_loop, BlockTowerGame::init)
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/gabor.rs | examples/gabor.rs | use cuneus::compute::*;
use cuneus::prelude::*;
use winit::event::WindowEvent;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct GaborParams {
frequency: f32,
orientation: f32,
phase: f32,
speed: f32,
sigma_x: f32,
sigma_y: f32,
amplitude: f32,
aspect_ratio: f32,
z_scale: f32,
rotation_x: f32,
rotation_y: f32,
click_state: i32,
brightness: f32,
color1_r: f32,
color1_g: f32,
color1_b: f32,
color2_r: f32,
color2_g: f32,
color2_b: f32,
dof_amount: f32,
dof_focal_dist: f32,
}
impl UniformProvider for GaborParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
struct GaborShader {
base: RenderKit,
compute_shader: ComputeShader,
current_params: GaborParams,
}
impl GaborShader {
fn clear_buffers(&mut self, core: &Core) {
self.compute_shader.clear_all_buffers(core);
}
}
impl ShaderManager for GaborShader {
fn init(core: &Core) -> Self {
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let initial_params = GaborParams {
frequency: 5.0,
orientation: 0.0,
phase: 0.0,
speed: 1.0,
sigma_x: 1.0,
sigma_y: 1.0,
amplitude: 1.0,
aspect_ratio: 1.0,
z_scale: 0.5,
rotation_x: 0.0,
rotation_y: 0.0,
click_state: 0,
brightness: 0.00006,
color1_r: 0.0,
color1_g: 0.7,
color1_b: 1.0,
color2_r: 1.0,
color2_g: 0.3,
color2_b: 0.0,
dof_amount: 1.0,
dof_focal_dist: 0.5,
};
let base = RenderKit::new(core, &texture_bind_group_layout, None);
let mut config = ComputeShader::builder()
.with_entry_point("Splat")
.with_custom_uniforms::<GaborParams>()
.with_atomic_buffer()
.with_workgroup_size([16, 16, 1])
.with_texture_format(COMPUTE_TEXTURE_FORMAT_RGBA16)
.with_label("Gabor Patch Unified")
.build();
// Add second entry point manually (no ping-pong needed)
config.entry_points.push("main_image".to_string());
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/gabor.wgsl"), config);
// Enable hot reload
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/gabor.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Gabor Hot Reload"),
source: wgpu::ShaderSource::Wgsl(include_str!("shaders/gabor.wgsl").into()),
}),
) {
eprintln!("Failed to enable hot reload for gabor shader: {e}");
}
compute_shader.set_custom_params(initial_params, &core.queue);
Self {
base,
compute_shader,
current_params: initial_params,
}
}
fn update(&mut self, core: &Core) {
self.base.fps_tracker.update();
// Check for hot reload updates
self.compute_shader.check_hot_reload(&core.device);
// Handle export
self.compute_shader.handle_export(core, &mut self.base);
}
fn resize(&mut self, core: &Core) {
self.base.update_resolution(&core.queue, core.size);
self.compute_shader
.resize(core, core.size.width, core.size.height);
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut params = self.current_params;
let mut changed = false;
let mut should_start_export = false;
let mut export_request = self.base.export_manager.get_ui_request();
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
controls_request.current_fps = Some(self.base.fps_tracker.fps());
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
style
.text_styles
.get_mut(&egui::TextStyle::Body)
.unwrap()
.size = 11.0;
style
.text_styles
.get_mut(&egui::TextStyle::Button)
.unwrap()
.size = 10.0;
});
egui::Window::new("Gabor Patch")
.collapsible(true)
.resizable(true)
.default_width(250.0)
.show(ctx, |ui| {
egui::CollapsingHeader::new("Gabor Parameters")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.frequency, 0.1..=10.0)
.text("Frequency"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(
&mut params.orientation,
-std::f32::consts::PI..=std::f32::consts::PI,
)
.text("Orientation"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(
&mut params.phase,
-std::f32::consts::PI..=std::f32::consts::PI,
)
.text("Phase"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.speed, 0.0..=5.0)
.text("Speed"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.sigma_x, 0.1..=3.0)
.text("Sigma X"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.sigma_y, 0.1..=3.0)
.text("Sigma Y"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.amplitude, 0.0..=2.0)
.text("Amplitude"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.aspect_ratio, 0.5..=2.0)
.text("Aspect Ratio"),
)
.changed();
ui.separator();
});
egui::CollapsingHeader::new("Visual Settings")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.z_scale, 0.0..=1.0)
.text("Z Depth Scale"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.brightness, 0.00001..=0.0001)
.logarithmic(true)
.text("Brightness"),
)
.changed();
ui.separator();
ui.label("Camera Controls:");
changed |= ui
.add(
egui::Slider::new(&mut params.rotation_x, -1.0..=1.0)
.text("Rotation X"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.rotation_y, -1.0..=1.0)
.text("Rotation Y"),
)
.changed();
});
egui::CollapsingHeader::new("Depth of Field")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.dof_amount, 0.0..=3.0)
.text("DOF Amount"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.dof_focal_dist, 0.0..=1.0)
.text("Focal Distance"),
)
.changed();
params.click_state = 1;
});
egui::CollapsingHeader::new("Colors")
.default_open(false)
.show(ui, |ui| {
ui.horizontal(|ui| {
ui.label("Positive Part:");
let mut color =
[params.color1_r, params.color1_g, params.color1_b];
if ui.color_edit_button_rgb(&mut color).changed() {
params.color1_r = color[0];
params.color1_g = color[1];
params.color1_b = color[2];
changed = true;
}
});
ui.horizontal(|ui| {
ui.label("Negative Part:");
let mut color =
[params.color2_r, params.color2_g, params.color2_b];
if ui.color_edit_button_rgb(&mut color).changed() {
params.color2_r = color[0];
params.color2_g = color[1];
params.color2_b = color[2];
changed = true;
}
});
});
ui.separator();
ShaderControls::render_controls_widget(ui, &mut controls_request);
ui.separator();
should_start_export =
ExportManager::render_export_ui_widget(ui, &mut export_request);
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
self.base.export_manager.apply_ui_request(export_request);
if controls_request.should_clear_buffers {
self.clear_buffers(core);
}
self.base.apply_control_request(controls_request);
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta, &core.queue);
if changed {
self.current_params = params;
self.compute_shader.set_custom_params(params, &core.queue);
}
if should_start_export {
self.base.export_manager.start_export();
}
// Stage 0: Generate and splat particles (workgroup size [256, 1, 1])
self.compute_shader
.dispatch_stage_with_workgroups(&mut encoder, 0, [4096, 1, 1]);
// Stage 1: Render to screen (workgroup size [16, 16, 1])
self.compute_shader.dispatch_stage(&mut encoder, core, 1);
{
let mut render_pass = cuneus::Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("Display Pass"),
);
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &self.compute_shader.output_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(Some(encoder.finish()));
output.present();
Ok(())
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
if self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed
{
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
return self
.base
.key_handler
.handle_keyboard_input(core.window(), event);
}
false
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
let (app, event_loop) = cuneus::ShaderApp::new("Gabor Patch Visualizer", 800, 600);
app.run(event_loop, GaborShader::init)
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/scenecolor.rs | examples/scenecolor.rs | use cuneus::compute::*;
use cuneus::prelude::*;
use winit::event::WindowEvent;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct SceneColorParams {
num_segments: f32,
palette_height: f32,
samples_x: i32,
samples_y: i32,
_pad1: f32,
_pad2: f32,
_pad3: f32,
_pad4: f32,
}
impl UniformProvider for SceneColorParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
struct SceneColorShader {
base: RenderKit,
compute_shader: ComputeShader,
current_params: SceneColorParams,
}
impl SceneColorShader {
fn clear_buffers(&mut self, core: &Core) {
self.compute_shader.clear_all_buffers(core);
}
}
impl ShaderManager for SceneColorShader {
fn init(core: &Core) -> Self {
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let initial_params = SceneColorParams {
num_segments: 16.0,
palette_height: 0.2,
samples_x: 8,
samples_y: 8,
_pad1: 0.0,
_pad2: 0.0,
_pad3: 0.0,
_pad4: 0.0,
};
let base = RenderKit::new(core, &texture_bind_group_layout, None);
let config = ComputeShader::builder()
.with_entry_point("main")
.with_input_texture() // Enable input texture support
.with_custom_uniforms::<SceneColorParams>()
.with_workgroup_size([16, 16, 1])
.with_texture_format(COMPUTE_TEXTURE_FORMAT_RGBA16)
.with_label("Scene Color Unified")
.build();
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/scenecolor.wgsl"), config);
// Enable hot reload
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/scenecolor.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Scene Color Compute Shader"),
source: wgpu::ShaderSource::Wgsl(
include_str!("shaders/scenecolor.wgsl").into(),
),
}),
) {
eprintln!("Failed to enable Scene Color compute shader hot reload: {e}");
}
compute_shader.set_custom_params(initial_params, &core.queue);
Self {
base,
compute_shader,
current_params: initial_params,
}
}
fn update(&mut self, core: &Core) {
// Update time
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta, &core.queue);
// Update input textures for media processing
self.base.update_current_texture(core, &core.queue);
if let Some(texture_manager) = self.base.get_current_texture_manager() {
self.compute_shader.update_input_texture(
&texture_manager.view,
&texture_manager.sampler,
&core.device,
);
}
self.base.fps_tracker.update();
}
fn resize(&mut self, core: &Core) {
self.base.update_resolution(&core.queue, core.size);
self.compute_shader
.resize(core, core.size.width, core.size.height);
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Scene Color Render Encoder"),
});
let mut params = self.current_params;
let mut changed = false;
let mut should_start_export = false;
let mut export_request = self.base.export_manager.get_ui_request();
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
let using_video_texture = self.base.using_video_texture;
let using_hdri_texture = self.base.using_hdri_texture;
let using_webcam_texture = self.base.using_webcam_texture;
let video_info = self.base.get_video_info();
let hdri_info = self.base.get_hdri_info();
let webcam_info = self.base.get_webcam_info();
controls_request.current_fps = Some(self.base.fps_tracker.fps());
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
style
.text_styles
.get_mut(&egui::TextStyle::Body)
.unwrap()
.size = 11.0;
style
.text_styles
.get_mut(&egui::TextStyle::Button)
.unwrap()
.size = 10.0;
});
egui::Window::new("Scene Color Palette")
.collapsible(true)
.resizable(true)
.default_width(280.0)
.show(ctx, |ui| {
// Media controls
ShaderControls::render_media_panel(
ui,
&mut controls_request,
using_video_texture,
video_info,
using_hdri_texture,
hdri_info,
using_webcam_texture,
webcam_info,
);
ui.separator();
egui::CollapsingHeader::new("Palette Parameters")
.default_open(true)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.num_segments, 1.0..=64.0)
.text("Segments"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.palette_height, 0.05..=0.5)
.text("Height"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.samples_x, 1..=32)
.text("Samples X"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.samples_y, 1..=32)
.text("Samples Y"),
)
.changed();
});
ui.separator();
ShaderControls::render_controls_widget(ui, &mut controls_request);
ui.separator();
should_start_export =
ExportManager::render_export_ui_widget(ui, &mut export_request);
ui.separator();
ui.label("Color palette extractor from scene");
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
// Apply controls
self.base.export_manager.apply_ui_request(export_request);
if controls_request.should_clear_buffers {
self.clear_buffers(core);
}
self.base.apply_control_request(controls_request.clone());
self.base.handle_video_requests(core, &controls_request);
self.base.handle_webcam_requests(core, &controls_request);
if changed {
self.current_params = params;
self.compute_shader.set_custom_params(params, &core.queue);
}
if should_start_export {
self.base.export_manager.start_export();
}
// Check for hot reload updates
self.compute_shader.check_hot_reload(&core.device);
// Single stage dispatch
self.compute_shader.dispatch(&mut encoder, core);
{
let mut render_pass = Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("Scene Color Display Pass"),
);
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &self.compute_shader.output_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(std::iter::once(encoder.finish()));
output.present();
Ok(())
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
if self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed
{
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
return self
.base
.key_handler
.handle_keyboard_input(core.window(), event);
}
if let WindowEvent::DroppedFile(path) = event {
if let Err(e) = self.base.load_media(core, path) {
eprintln!("Failed to load dropped file: {e:?}");
}
return true;
}
false
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
cuneus::gst::init()?;
env_logger::init();
let (app, event_loop) = ShaderApp::new("Scene Color Palette", 800, 600);
app.run(event_loop, SceneColorShader::init)
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/veridisquo.rs | examples/veridisquo.rs | use cuneus::audio::{EnvelopeConfig, SynthesisManager};
use cuneus::compute::*;
use cuneus::prelude::*;
use winit::event::WindowEvent;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct SongParams {
volume: f32,
octave_shift: f32,
tempo_multiplier: f32,
waveform_type: u32,
crossfade: f32,
reverb_mix: f32,
chorus_rate: f32,
_padding: f32,
}
impl UniformProvider for SongParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
struct VeridisQuo {
base: RenderKit,
compute_shader: ComputeShader,
current_params: SongParams,
audio_synthesis: Option<SynthesisManager>,
}
impl ShaderManager for VeridisQuo {
fn init(core: &Core) -> Self {
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let base = RenderKit::new(core, &texture_bind_group_layout, None);
let initial_params = SongParams {
volume: 0.5,
octave_shift: 0.0,
tempo_multiplier: 1.0,
waveform_type: 1,
crossfade: 0.0,
reverb_mix: 0.0,
chorus_rate: 0.0,
_padding: 0.0,
};
let config = ComputeShader::builder()
.with_entry_point("main")
.with_custom_uniforms::<SongParams>()
.with_fonts()
.with_audio(4096)
.with_workgroup_size([16, 16, 1])
.with_texture_format(COMPUTE_TEXTURE_FORMAT_RGBA16)
.with_label("Veridis Quo Unified")
.build();
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/veridisquo.wgsl"), config);
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/veridisquo.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Veridisquo Hot Reload"),
source: wgpu::ShaderSource::Wgsl(
include_str!("shaders/veridisquo.wgsl").into(),
),
}),
) {
eprintln!("Failed to enable hot reload for veridisquo shader: {e}");
}
compute_shader.set_custom_params(initial_params, &core.queue);
let audio_synthesis = match SynthesisManager::new() {
Ok(mut synth) => {
// For continuous song playback, use minimal envelope
// Quick attack, full sustain, quick release for smooth note transitions
synth.set_envelope(EnvelopeConfig {
attack_time: 0.005, // 5ms - very quick attack
decay_time: 0.01, // 10ms decay
sustain_level: 1.0, // Full sustain for continuous playback
release_time: 0.05, // 50ms release for smooth transitions
});
if let Err(e) = synth.start_gpu_synthesis() {
eprintln!("Failed to start audio synthesis: {e}");
None
} else {
println!("Audio synthesis started.");
Some(synth)
}
}
Err(e) => {
eprintln!("Failed to create audio synthesis: {e}");
None
}
};
Self {
base,
compute_shader,
current_params: initial_params,
audio_synthesis,
}
}
fn update(&mut self, core: &Core) {
self.compute_shader.check_hot_reload(&core.device);
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta, &core.queue);
self.base.fps_tracker.update();
// Read GPU audio parameters and update synthesis
if let Some(ref mut synth) = self.audio_synthesis {
// Update waveform type
synth.update_waveform(self.current_params.waveform_type);
synth.set_master_volume(self.current_params.volume as f64);
// Read melody and bass frequencies from GPU's audio buffer
if let Ok(audio_data) = pollster::block_on(
self.compute_shader
.read_audio_buffer(&core.device, &core.queue),
) {
if audio_data.len() >= 7 {
let final_melody_freq = audio_data[3];
let melody_amp = audio_data[4];
let final_bass_freq = audio_data[5];
let bass_amp = audio_data[6];
// Voice 0: Melody
let melody_active = melody_amp > 0.01 && final_melody_freq > 10.0;
synth.set_voice(0, final_melody_freq, melody_amp * 0.8, melody_active);
// Voice 1: Bass
let bass_active = bass_amp > 0.01 && final_bass_freq > 10.0;
synth.set_voice(1, final_bass_freq, bass_amp * 0.6, bass_active);
}
}
// Must call update() every frame to process envelopes
synth.update();
}
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut params = self.current_params;
let mut changed = false;
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
controls_request.current_fps = Some(self.base.fps_tracker.fps());
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
style
.text_styles
.get_mut(&egui::TextStyle::Body)
.unwrap()
.size = 11.0;
style
.text_styles
.get_mut(&egui::TextStyle::Button)
.unwrap()
.size = 10.0;
});
egui::Window::new("Veridis Quo")
.collapsible(true)
.resizable(true)
.default_width(250.0)
.show(ctx, |ui| {
egui::CollapsingHeader::new("Audio Controls")
.default_open(true)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.volume, 0.0..=1.0)
.text("Volume"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.octave_shift, -2.0..=2.0)
.text("Octave"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.tempo_multiplier, 0.5..=4.0)
.text("Tempo"),
)
.changed();
});
egui::CollapsingHeader::new("Waveforms")
.default_open(false)
.show(ui, |ui| {
let waveform_names =
[("Sine", 0), ("Square", 1), ("Saw", 2), ("Triangle", 3)];
for (name, wave_type) in waveform_names.iter() {
let selected = params.waveform_type == *wave_type;
if ui.selectable_label(selected, *name).clicked() {
params.waveform_type = *wave_type;
changed = true;
}
}
});
egui::CollapsingHeader::new("Effects")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.crossfade, 0.0..=1.0)
.text("Legato"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.reverb_mix, 0.0..=1.0)
.text("Reverb"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.chorus_rate, 0.1..=8.0)
.text("Chorus Rate"),
)
.changed();
});
ui.separator();
ShaderControls::render_controls_widget(ui, &mut controls_request);
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
if changed {
self.current_params = params;
self.compute_shader.set_custom_params(params, &core.queue);
}
self.base.apply_control_request(controls_request);
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Veridis Quo Render Encoder"),
});
self.compute_shader.dispatch(&mut encoder, core);
{
let mut render_pass = cuneus::Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("Veridis Quo Render Pass"),
);
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &self.compute_shader.output_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(Some(encoder.finish()));
output.present();
Ok(())
}
fn resize(&mut self, core: &Core) {
self.base.update_resolution(&core.queue, core.size);
self.compute_shader
.resize(core, core.size.width, core.size.height);
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
if self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed
{
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
if event.state == winit::event::ElementState::Pressed {
if let winit::keyboard::Key::Character(ref s) = event.logical_key {
match s.as_str() {
"r" | "R" => {
self.base.start_time = std::time::Instant::now();
return true;
}
_ => {}
}
}
}
return self
.base
.key_handler
.handle_keyboard_input(core.window(), event);
}
false
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
cuneus::gst::init()?;
let (app, event_loop) = ShaderApp::new("Veridis Quo", 800, 600);
app.run(event_loop, VeridisQuo::init)
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/pathtracing.rs | examples/pathtracing.rs | use cuneus::compute::*;
use cuneus::prelude::*;
use winit::event::WindowEvent;
struct CameraMovement {
forward: bool,
backward: bool,
left: bool,
right: bool,
up: bool,
down: bool,
speed: f32,
last_update: std::time::Instant,
yaw: f32,
pitch: f32,
mouse_sensitivity: f32,
last_mouse_x: f32,
last_mouse_y: f32,
mouse_initialized: bool,
mouse_look_enabled: bool,
look_changed: bool,
}
impl Default for CameraMovement {
fn default() -> Self {
Self {
forward: false,
backward: false,
left: false,
right: false,
up: false,
down: false,
speed: 2.0,
last_update: std::time::Instant::now(),
yaw: 0.0,
pitch: 0.0,
mouse_sensitivity: 0.005,
last_mouse_x: 0.0,
last_mouse_y: 0.0,
mouse_initialized: false,
mouse_look_enabled: true,
look_changed: false,
}
}
}
impl CameraMovement {
fn update_camera(&mut self, params: &mut PathTracingParams) -> bool {
let now = std::time::Instant::now();
let dt = now.duration_since(self.last_update).as_secs_f32();
self.last_update = now;
let mut changed = false;
if self.look_changed {
changed = true;
self.look_changed = false;
}
let forward = [
self.pitch.cos() * self.yaw.cos(),
self.pitch.sin(),
self.pitch.cos() * self.yaw.sin(),
];
let world_up = [0.0, 1.0, 0.0];
let right = [
forward[1] * world_up[2] - forward[2] * world_up[1],
forward[2] * world_up[0] - forward[0] * world_up[2],
forward[0] * world_up[1] - forward[1] * world_up[0],
];
let right_len = (right[0] * right[0] + right[1] * right[1] + right[2] * right[2]).sqrt();
let right = [
right[0] / right_len,
right[1] / right_len,
right[2] / right_len,
];
let delta = self.speed * dt;
let mut move_vec = [0.0, 0.0, 0.0];
if self.forward {
move_vec[0] += forward[0] * delta;
move_vec[1] += forward[1] * delta;
move_vec[2] += forward[2] * delta;
changed = true;
}
if self.backward {
move_vec[0] -= forward[0] * delta;
move_vec[1] -= forward[1] * delta;
move_vec[2] -= forward[2] * delta;
changed = true;
}
if self.right {
move_vec[0] += right[0] * delta;
move_vec[1] += right[1] * delta;
move_vec[2] += right[2] * delta;
changed = true;
}
if self.left {
move_vec[0] -= right[0] * delta;
move_vec[1] -= right[1] * delta;
move_vec[2] -= right[2] * delta;
changed = true;
}
if self.up {
move_vec[1] += delta;
changed = true;
}
if self.down {
move_vec[1] -= delta;
changed = true;
}
params.camera_pos_x += move_vec[0];
params.camera_pos_y += move_vec[1];
params.camera_pos_z += move_vec[2];
let look_distance = 1.0;
params.camera_target_x = params.camera_pos_x + forward[0] * look_distance;
params.camera_target_y = params.camera_pos_y + forward[1] * look_distance;
params.camera_target_z = params.camera_pos_z + forward[2] * look_distance;
changed
}
fn handle_mouse_movement(&mut self, x: f32, y: f32) -> bool {
if !self.mouse_look_enabled {
return false;
}
if !self.mouse_initialized {
self.last_mouse_x = x;
self.last_mouse_y = y;
self.mouse_initialized = true;
return false;
}
let dx = x - self.last_mouse_x;
let dy = y - self.last_mouse_y;
self.last_mouse_x = x;
self.last_mouse_y = y;
self.yaw += dx * self.mouse_sensitivity;
self.pitch -= dy * self.mouse_sensitivity;
self.pitch = self
.pitch
.clamp(-std::f32::consts::PI * 0.49, std::f32::consts::PI * 0.49);
self.look_changed = true;
true
}
fn toggle_mouse_look(&mut self) {
self.mouse_look_enabled = !self.mouse_look_enabled;
self.mouse_initialized = false;
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct PathTracingParams {
camera_pos_x: f32,
camera_pos_y: f32,
camera_pos_z: f32,
camera_target_x: f32,
camera_target_y: f32,
camera_target_z: f32,
fov: f32,
aperture: f32,
max_bounces: u32,
samples_per_pixel: u32,
accumulate: u32,
num_spheres: u32,
_padding1: f32,
_padding2: f32,
rotation_speed: f32,
exposure: f32,
}
impl UniformProvider for PathTracingParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
struct PathTracingShader {
base: RenderKit,
compute_shader: ComputeShader,
current_params: PathTracingParams,
camera_movement: CameraMovement,
frame_count: u32,
should_reset_accumulation: bool,
}
impl PathTracingShader {
fn clear_buffers(&mut self, core: &Core) {
self.compute_shader.clear_all_buffers(core);
self.frame_count = 0;
self.should_reset_accumulation = false;
}
}
impl ShaderManager for PathTracingShader {
fn init(core: &Core) -> Self {
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let base = RenderKit::new(core, &texture_bind_group_layout, None);
let initial_params = PathTracingParams {
camera_pos_x: 0.0,
camera_pos_y: 1.0,
camera_pos_z: 6.0,
camera_target_x: 0.0,
camera_target_y: 0.0,
camera_target_z: -1.0,
fov: 40.0,
aperture: 0.00,
max_bounces: 4,
samples_per_pixel: 2,
accumulate: 1,
num_spheres: 15,
_padding1: 0.0,
_padding2: 0.0,
rotation_speed: 0.2,
exposure: 1.5,
};
let config = ComputeShader::builder()
.with_entry_point("main")
.with_input_texture() // Enable input texture support for background
.with_custom_uniforms::<PathTracingParams>()
.with_mouse()
.with_storage_buffer(StorageBufferSpec::new(
"atomic_buffer",
(core.size.width * core.size.height * 3 * 4) as u64,
)) // 3 channels * u32 per pixel
.with_workgroup_size([16, 16, 1])
.with_texture_format(COMPUTE_TEXTURE_FORMAT_RGBA16)
.with_label("Path Tracing Unified")
.build();
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/pathtracing.wgsl"), config);
// Enable hot reload
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/pathtracing.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Path Tracing Compute Shader"),
source: wgpu::ShaderSource::Wgsl(
include_str!("shaders/pathtracing.wgsl").into(),
),
}),
) {
eprintln!("Failed to enable Path Tracing compute shader hot reload: {e}");
}
compute_shader.set_custom_params(initial_params, &core.queue);
Self {
base,
compute_shader,
current_params: initial_params,
camera_movement: CameraMovement::default(),
frame_count: 0,
should_reset_accumulation: true,
}
}
fn update(&mut self, core: &Core) {
// Update time
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta, &core.queue);
// Update input textures for background
self.base.update_current_texture(core, &core.queue);
if let Some(texture_manager) = self.base.get_current_texture_manager() {
self.compute_shader.update_input_texture(
&texture_manager.view,
&texture_manager.sampler,
&core.device,
);
}
if self.camera_movement.update_camera(&mut self.current_params) {
self.compute_shader
.set_custom_params(self.current_params, &core.queue);
self.should_reset_accumulation = true;
}
self.base.fps_tracker.update();
// Handle export
self.compute_shader.handle_export(core, &mut self.base);
}
fn resize(&mut self, core: &Core) {
self.base.update_resolution(&core.queue, core.size);
self.compute_shader
.resize(core, core.size.width, core.size.height);
self.should_reset_accumulation = true;
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Path Tracing Render Encoder"),
});
// Handle UI and parameter updates
let mut params = self.current_params;
let mut changed = false;
let mut should_start_export = false;
let mut export_request = self.base.export_manager.get_ui_request();
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
controls_request.current_fps = Some(self.base.fps_tracker.fps());
let current_fps = self.base.fps_tracker.fps();
let using_video_texture = self.base.using_video_texture;
let using_hdri_texture = self.base.using_hdri_texture;
let using_webcam_texture = self.base.using_webcam_texture;
let video_info = self.base.get_video_info();
let hdri_info = self.base.get_hdri_info();
let webcam_info = self.base.get_webcam_info();
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
style
.text_styles
.get_mut(&egui::TextStyle::Body)
.unwrap()
.size = 11.0;
style
.text_styles
.get_mut(&egui::TextStyle::Button)
.unwrap()
.size = 10.0;
});
egui::Window::new("Path Tracer")
.collapsible(true)
.resizable(true)
.default_width(300.0)
.show(ctx, |ui| {
ui.label("Camera Controls:");
ui.label("W/A/S/D - Movements");
ui.label("Q/E - down/up");
ui.label("Mouse - Look around");
ui.label("Right Click - Toggle mouse look");
ui.label("Space - Toggle progressive rendering");
ui.separator();
ShaderControls::render_media_panel(
ui,
&mut controls_request,
using_video_texture,
video_info,
using_hdri_texture,
hdri_info,
using_webcam_texture,
webcam_info,
);
ui.separator();
egui::CollapsingHeader::new("Render Settings")
.default_open(false)
.show(ui, |ui| {
let old_samples = params.samples_per_pixel;
changed |= ui
.add(
egui::Slider::new(&mut params.samples_per_pixel, 1..=16)
.text("Samples/pixel"),
)
.changed();
if params.samples_per_pixel != old_samples {
self.should_reset_accumulation = true;
}
let old_bounces = params.max_bounces;
changed |= ui
.add(
egui::Slider::new(&mut params.max_bounces, 1..=16)
.text("Max Bounces"),
)
.changed();
if params.max_bounces != old_bounces {
self.should_reset_accumulation = true;
}
let old_accumulate = params.accumulate;
let mut accumulate_bool = params.accumulate > 0;
changed |= ui
.checkbox(&mut accumulate_bool, "Progressive Rendering")
.changed();
params.accumulate = if accumulate_bool { 1 } else { 0 };
if params.accumulate != old_accumulate {
self.should_reset_accumulation = true;
}
changed |= ui
.add(
egui::Slider::new(&mut params.exposure, 0.1..=5.0)
.text("Exposure"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.aperture, 0.0..=0.5)
.text("Depth of Field"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.rotation_speed, 0.0..=2.0)
.text("Animation Speed"),
)
.changed();
if ui.button("Reset Accumulation").clicked() {
self.should_reset_accumulation = true;
changed = true;
}
});
ui.separator();
ShaderControls::render_controls_widget(ui, &mut controls_request);
ui.separator();
should_start_export =
ExportManager::render_export_ui_widget(ui, &mut export_request);
ui.separator();
ui.label(format!("Accumulated Samples: {}", self.frame_count));
ui.label(format!(
"Resolution: {}x{}",
core.size.width, core.size.height
));
ui.label(format!("FPS: {current_fps:.1}"));
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
// Apply controls
self.base.export_manager.apply_ui_request(export_request);
if controls_request.should_clear_buffers || self.should_reset_accumulation {
self.clear_buffers(core);
}
self.base.apply_control_request(controls_request.clone());
self.base.handle_video_requests(core, &controls_request);
self.base.handle_webcam_requests(core, &controls_request);
if should_start_export {
self.base.export_manager.start_export();
}
// Update mouse
self.compute_shader
.update_mouse_uniform(&self.base.mouse_tracker.uniform, &core.queue);
if changed {
self.current_params = params;
self.compute_shader.set_custom_params(params, &core.queue);
}
// Check for hot reload updates
self.compute_shader.check_hot_reload(&core.device);
// Set frame count for random number generation
self.compute_shader.time_uniform.data.frame = self.frame_count;
self.compute_shader.time_uniform.update(&core.queue);
// Single stage dispatch
self.compute_shader.dispatch(&mut encoder, core);
// Render compute output to screen
{
let mut render_pass = Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("Path Tracing Display Pass"),
);
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &self.compute_shader.output_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(Some(encoder.finish()));
output.present();
// Increment frame count for progressive rendering and noise generation
if self.current_params.accumulate > 0 {
self.frame_count += 1;
} else {
self.frame_count = (self.frame_count + 1) % 1000;
}
Ok(())
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
if self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed
{
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
if let winit::keyboard::Key::Character(ch) = &event.logical_key {
match ch.as_str() {
"w" | "W" => {
self.camera_movement.forward =
event.state == winit::event::ElementState::Pressed;
self.should_reset_accumulation = true;
return true;
}
"s" | "S" => {
self.camera_movement.backward =
event.state == winit::event::ElementState::Pressed;
self.should_reset_accumulation = true;
return true;
}
"a" | "A" => {
self.camera_movement.left =
event.state == winit::event::ElementState::Pressed;
self.should_reset_accumulation = true;
return true;
}
"d" | "D" => {
self.camera_movement.right =
event.state == winit::event::ElementState::Pressed;
self.should_reset_accumulation = true;
return true;
}
"q" | "Q" => {
self.camera_movement.down =
event.state == winit::event::ElementState::Pressed;
self.should_reset_accumulation = true;
return true;
}
"e" | "E" => {
self.camera_movement.up =
event.state == winit::event::ElementState::Pressed;
self.should_reset_accumulation = true;
return true;
}
" " => {
if event.state == winit::event::ElementState::Released {
self.current_params.accumulate = 1 - self.current_params.accumulate;
self.should_reset_accumulation = true;
self.compute_shader
.set_custom_params(self.current_params, &core.queue);
return true;
}
}
_ => {}
}
}
}
if let WindowEvent::CursorMoved { position, .. } = event {
let x = position.x as f32;
let y = position.y as f32;
self.base.handle_mouse_input(core, event, false);
if self.camera_movement.handle_mouse_movement(x, y) {
self.should_reset_accumulation = true;
return true;
}
}
if let WindowEvent::MouseInput { state, button, .. } = event {
if *button == winit::event::MouseButton::Right
&& *state == winit::event::ElementState::Released
{
self.camera_movement.toggle_mouse_look();
return true;
}
}
if let WindowEvent::DroppedFile(path) = event {
if let Err(e) = self.base.load_media(core, path) {
eprintln!("Failed to load dropped file: {e:?}");
}
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
if self
.base
.key_handler
.handle_keyboard_input(core.window(), event)
{
return true;
}
}
false
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
cuneus::gst::init()?;
let (app, event_loop) = ShaderApp::new("Path Tracer", 800, 600);
app.run(event_loop, PathTracingShader::init)
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/cliffordcompute.rs | examples/cliffordcompute.rs | use cuneus::compute::*;
use cuneus::prelude::*;
use winit::event::WindowEvent;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct CliffordParams {
a: f32,
b: f32,
c: f32,
d: f32,
motion_speed: f32,
rotation_x: f32,
rotation_y: f32,
click_state: i32,
brightness: f32,
color1_r: f32,
color1_g: f32,
color1_b: f32,
color2_r: f32,
color2_g: f32,
color2_b: f32,
scale: f32,
dof_amount: f32,
dof_focal_dist: f32,
}
impl UniformProvider for CliffordParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
struct CliffordShader {
base: RenderKit,
compute_shader: ComputeShader,
current_params: CliffordParams,
}
impl CliffordShader {
fn clear_buffers(&mut self, core: &Core) {
self.compute_shader.clear_all_buffers(core);
}
}
impl ShaderManager for CliffordShader {
fn init(core: &Core) -> Self {
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let base = RenderKit::new(core, &texture_bind_group_layout, None);
let initial_params = CliffordParams {
a: 1.7,
b: 1.7,
c: 0.6,
d: 1.2,
motion_speed: 1.0,
rotation_x: 0.0,
rotation_y: 0.0,
click_state: 0,
brightness: 0.00004,
color1_r: 0.0,
color1_g: 0.7,
color1_b: 1.0,
color2_r: 1.0,
color2_g: 0.3,
color2_b: 0.5,
scale: 0.6,
dof_amount: 1.0,
dof_focal_dist: 0.5,
};
let mut config = ComputeShader::builder()
.with_entry_point("Splat")
.with_custom_uniforms::<CliffordParams>()
.with_atomic_buffer()
.with_workgroup_size([16, 16, 1])
.with_texture_format(COMPUTE_TEXTURE_FORMAT_RGBA16)
.with_label("Clifford Attractor Unified")
.build();
config.entry_points.push("main_image".to_string());
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/cliffordcompute.wgsl"), config);
// Enable hot reload
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/cliffordcompute.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Clifford Compute Hot Reload"),
source: wgpu::ShaderSource::Wgsl(
include_str!("shaders/cliffordcompute.wgsl").into(),
),
}),
) {
eprintln!("Failed to enable hot reload for cliffordcompute shader: {e}");
}
compute_shader.set_custom_params(initial_params, &core.queue);
Self {
base,
compute_shader,
current_params: initial_params,
}
}
fn update(&mut self, core: &Core) {
self.base.fps_tracker.update();
// Check for hot reload updates
self.compute_shader.check_hot_reload(&core.device);
// Handle export with custom dispatch pattern for cliffordcompute
self.compute_shader.handle_export_dispatch(
core,
&mut self.base,
|shader, encoder, core| {
shader.dispatch_stage_with_workgroups(encoder, 0, [2048, 1, 1]);
shader.dispatch_stage(encoder, core, 1);
},
);
}
fn resize(&mut self, core: &Core) {
self.base.update_resolution(&core.queue, core.size);
self.compute_shader
.resize(core, core.size.width, core.size.height);
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut params = self.current_params;
let mut changed = false;
let mut should_start_export = false;
let mut export_request = self.base.export_manager.get_ui_request();
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
controls_request.current_fps = Some(self.base.fps_tracker.fps());
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
style
.text_styles
.get_mut(&egui::TextStyle::Body)
.unwrap()
.size = 11.0;
style
.text_styles
.get_mut(&egui::TextStyle::Button)
.unwrap()
.size = 10.0;
});
egui::Window::new("Clifford Attractor")
.collapsible(true)
.resizable(true)
.default_width(250.0)
.show(ctx, |ui| {
egui::CollapsingHeader::new("Attractor Parameters")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.a, -3.0..=3.0)
.text("Parameter A"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.b, -3.0..=3.0)
.text("Parameter B"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.c, -3.0..=3.0)
.text("Parameter C"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.d, -3.0..=3.0)
.text("Parameter D"),
)
.changed();
ui.separator();
ui.label("Interesting presets:");
if ui.button("Classic (1.5, 1.5, 1.4, 1.4)").clicked() {
params.a = 1.5;
params.b = 1.5;
params.c = 1.4;
params.d = 1.4;
changed = true;
}
if ui.button("Chaotic (1.7, 1.7, 0.6, 1.2)").clicked() {
params.a = 1.7;
params.b = 1.7;
params.c = 0.6;
params.d = 1.2;
changed = true;
}
if ui.button("Symmetric (2.0, -2.0, 1.0, 0.5)").clicked() {
params.a = 2.0;
params.b = -2.0;
params.c = 1.0;
params.d = 0.5;
changed = true;
}
});
egui::CollapsingHeader::new("Visual Settings")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.motion_speed, 0.0..=3.0)
.text("Animation Speed"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.brightness, 0.00001..=0.0001)
.logarithmic(true)
.text("Brightness"),
)
.changed();
ui.separator();
ui.label("Camera Controls:");
changed |= ui
.add(
egui::Slider::new(&mut params.rotation_x, -1.0..=1.0)
.text("Rotation X"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.rotation_y, -1.0..=1.0)
.text("Rotation Y"),
)
.changed();
ui.separator();
changed |= ui
.add(
egui::Slider::new(&mut params.scale, 0.1..=2.0)
.text("Attractor Scale"),
)
.changed();
});
egui::CollapsingHeader::new("Depth of Field")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.dof_amount, 0.0..=3.0)
.text("DOF Amount"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.dof_focal_dist, 0.0..=1.0)
.text("Focal Distance"),
)
.changed();
params.click_state = 1;
});
egui::CollapsingHeader::new("Colors")
.default_open(false)
.show(ui, |ui| {
ui.horizontal(|ui| {
ui.label("Color 1:");
let mut color =
[params.color1_r, params.color1_g, params.color1_b];
if ui.color_edit_button_rgb(&mut color).changed() {
params.color1_r = color[0];
params.color1_g = color[1];
params.color1_b = color[2];
changed = true;
}
});
ui.horizontal(|ui| {
ui.label("Color 2:");
let mut color =
[params.color2_r, params.color2_g, params.color2_b];
if ui.color_edit_button_rgb(&mut color).changed() {
params.color2_r = color[0];
params.color2_g = color[1];
params.color2_b = color[2];
changed = true;
}
});
});
ui.separator();
ShaderControls::render_controls_widget(ui, &mut controls_request);
ui.separator();
should_start_export =
ExportManager::render_export_ui_widget(ui, &mut export_request);
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
self.base.export_manager.apply_ui_request(export_request);
if controls_request.should_clear_buffers {
self.clear_buffers(core);
}
self.base.apply_control_request(controls_request);
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta, &core.queue);
if changed {
self.current_params = params;
self.compute_shader.set_custom_params(params, &core.queue);
}
if should_start_export {
self.base.export_manager.start_export();
}
// Stage 0: Generate and splat particles (workgroup size [256, 1, 1])
self.compute_shader
.dispatch_stage_with_workgroups(&mut encoder, 0, [2048, 1, 1]);
// Stage 1: Render to screen (workgroup size [16, 16, 1])
self.compute_shader.dispatch_stage(&mut encoder, core, 1);
{
let mut render_pass = cuneus::Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("Display Pass"),
);
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &self.compute_shader.output_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(Some(encoder.finish()));
output.present();
Ok(())
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
if self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed
{
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
return self
.base
.key_handler
.handle_keyboard_input(core.window(), event);
}
false
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
let (app, event_loop) = cuneus::ShaderApp::new("Clifford", 800, 600);
app.run(event_loop, CliffordShader::init)
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/2dneuron.rs | examples/2dneuron.rs | use cuneus::compute::*;
use cuneus::prelude::*;
use winit::event::WindowEvent;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct NeuronParams {
pixel_offset: f32,
pixel_offset2: f32,
lights: f32,
exp: f32,
frame: f32,
col1: f32,
col2: f32,
decay: f32,
}
impl UniformProvider for NeuronParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
struct NeuronShader {
base: RenderKit,
compute_shader: ComputeShader,
current_params: NeuronParams,
}
impl ShaderManager for NeuronShader {
fn init(core: &Core) -> Self {
let initial_params = NeuronParams {
pixel_offset: -1.0,
pixel_offset2: 1.0,
lights: 2.2,
exp: 4.0,
frame: 1.0,
col1: 100.0,
col2: 1.0,
decay: 1.0,
};
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let base = RenderKit::new(core, &texture_bind_group_layout, None);
// Create multipass system: buffer_a -> buffer_b -> buffer_c -> main_image
let passes = vec![
PassDescription::new("buffer_a", &[]), // no dependencies, generates pattern
PassDescription::new("buffer_b", &["buffer_a"]), // reads buffer_a
PassDescription::new("buffer_c", &["buffer_c", "buffer_b"]), // self-feedback + buffer_b
PassDescription::new("main_image", &["buffer_c"]),
];
let config = ComputeShader::builder()
.with_entry_point("buffer_a")
.with_multi_pass(&passes)
.with_custom_uniforms::<NeuronParams>()
.with_workgroup_size([16, 16, 1])
.with_texture_format(COMPUTE_TEXTURE_FORMAT_RGBA16)
.with_label("2D Neuron Unified")
.build();
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/2dneuron.wgsl"), config);
// Enable hot reload
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/2dneuron.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("2dneuron Hot Reload"),
source: wgpu::ShaderSource::Wgsl(include_str!("shaders/2dneuron.wgsl").into()),
}),
) {
eprintln!("Failed to enable hot reload for 2dneuron shader: {e}");
}
compute_shader.set_custom_params(initial_params, &core.queue);
Self {
base,
compute_shader,
current_params: initial_params,
}
}
fn update(&mut self, core: &Core) {
// Check for hot reload updates
self.compute_shader.check_hot_reload(&core.device);
// Handle export
self.compute_shader.handle_export(core, &mut self.base);
// Update time uniform - this is crucial for accumulation!
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta, &core.queue);
self.base.fps_tracker.update();
}
fn resize(&mut self, core: &Core) {
self.base.update_resolution(&core.queue, core.size);
self.compute_shader
.resize(core, core.size.width, core.size.height);
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Neuron Render Encoder"),
});
let mut params = self.current_params;
let mut changed = false;
let mut should_start_export = false;
let mut export_request = self.base.export_manager.get_ui_request();
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
controls_request.current_fps = Some(self.base.fps_tracker.fps());
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
style
.text_styles
.get_mut(&egui::TextStyle::Body)
.unwrap()
.size = 11.0;
style
.text_styles
.get_mut(&egui::TextStyle::Button)
.unwrap()
.size = 10.0;
});
egui::Window::new("2D Neuron")
.collapsible(true)
.resizable(true)
.default_width(280.0)
.show(ctx, |ui| {
egui::CollapsingHeader::new("Neuron Parameters")
.default_open(true)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.pixel_offset, -3.14..=3.14)
.text("Pixel Offset Y"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.pixel_offset2, -3.14..=3.14)
.text("Pixel Offset X"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.lights, 0.0..=12.2)
.text("Lights"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.exp, 1.0..=120.0).text("Exp"),
)
.changed();
});
egui::CollapsingHeader::new("Visual Settings")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.frame, 0.0..=5.2)
.text("Frame"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.col1, 0.0..=150.0)
.text("Iterations"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.col2, 0.0..=20.0)
.text("Color 2"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.decay, 0.0..=1.0)
.text("Feedback"),
)
.changed();
});
ui.separator();
ShaderControls::render_controls_widget(ui, &mut controls_request);
ui.separator();
should_start_export =
ExportManager::render_export_ui_widget(ui, &mut export_request);
ui.separator();
ui.label(format!("Frame: {}", self.compute_shader.current_frame));
ui.label("Multi-buffer neuron with particle tracing");
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
// Handle controls and clear buffers if requested
if controls_request.should_clear_buffers {
// Reset frame count to restart accumulation - this is crucial
self.compute_shader.current_frame = 0;
}
// Execute multi-pass compute shader: buffer_a -> buffer_b -> buffer_c -> main_image
self.compute_shader.dispatch(&mut encoder, core);
// Render compute output to screen
{
let compute_texture = self.compute_shader.get_output_texture();
let mut render_pass = cuneus::Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("Neuron Display Pass"),
);
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &compute_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
self.base.apply_control_request(controls_request);
self.base.export_manager.apply_ui_request(export_request);
if changed {
self.current_params = params;
self.compute_shader.set_custom_params(params, &core.queue);
}
if should_start_export {
self.base.export_manager.start_export();
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(std::iter::once(encoder.finish()));
output.present();
Ok(())
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
if self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed
{
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
return self
.base
.key_handler
.handle_keyboard_input(core.window(), event);
}
false
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
let (app, event_loop) = ShaderApp::new("2D Neuron", 600, 800);
app.run(event_loop, NeuronShader::init)
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/sinh.rs | examples/sinh.rs | use cuneus::compute::*;
use cuneus::prelude::*;
use winit::event::WindowEvent;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct SinhParams {
aa: i32,
camera_x: f32,
camera_y: f32,
camera_z: f32,
orbit_speed: f32,
magic_number: f32,
cv_min: f32,
cv_max: f32,
os_base: f32,
os_scale: f32,
base_color_r: f32,
base_color_g: f32,
base_color_b: f32,
light_color_r: f32,
light_color_g: f32,
light_color_b: f32,
ambient_r: f32,
ambient_g: f32,
ambient_b: f32,
gamma: f32,
iterations: i32,
bound: f32,
fractal_scale: f32,
vignette_offset: f32,
}
impl UniformProvider for SinhParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
struct SinhShader {
base: RenderKit,
compute_shader: ComputeShader,
current_params: SinhParams,
}
impl SinhShader {
fn clear_buffers(&mut self, core: &Core) {
self.compute_shader.clear_all_buffers(core);
}
}
impl ShaderManager for SinhShader {
fn init(core: &Core) -> Self {
// Create texture bind group layout for displaying compute shader output
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let base = RenderKit::new(core, &texture_bind_group_layout, None);
let initial_params = SinhParams {
aa: 2,
camera_x: 0.1,
camera_y: 10.0,
camera_z: 10.0,
orbit_speed: 0.3,
magic_number: 36.0,
cv_min: 2.197,
cv_max: 2.99225,
os_base: 0.00004,
os_scale: 0.02040101,
base_color_r: 0.5,
base_color_g: 0.25,
base_color_b: 0.05,
light_color_r: 0.8,
light_color_g: 1.0,
light_color_b: 0.3,
ambient_r: 1.2,
ambient_g: 1.0,
ambient_b: 0.8,
gamma: 0.4,
iterations: 65,
bound: 12.25,
fractal_scale: 0.05,
vignette_offset: 0.0,
};
let config = ComputeShader::builder()
.with_entry_point("main")
.with_custom_uniforms::<SinhParams>()
.with_workgroup_size([16, 16, 1])
.with_texture_format(COMPUTE_TEXTURE_FORMAT_RGBA16)
.with_label("Sinh Unified")
.build();
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/sinh.wgsl"), config);
// Enable hot reload
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/sinh.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Sinh Hot Reload"),
source: wgpu::ShaderSource::Wgsl(include_str!("shaders/sinh.wgsl").into()),
}),
) {
eprintln!("Failed to enable hot reload for Sinh shader: {e}");
}
compute_shader.set_custom_params(initial_params, &core.queue);
Self {
base,
compute_shader,
current_params: initial_params,
}
}
fn update(&mut self, core: &Core) {
// Check for hot reload updates
self.compute_shader.check_hot_reload(&core.device);
// Handle export
self.compute_shader.handle_export(core, &mut self.base);
self.base.fps_tracker.update();
}
fn resize(&mut self, core: &Core) {
self.base.update_resolution(&core.queue, core.size);
self.compute_shader
.resize(core, core.size.width, core.size.height);
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut params = self.current_params;
let mut changed = false;
let mut should_start_export = false;
let mut export_request = self.base.export_manager.get_ui_request();
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
controls_request.current_fps = Some(self.base.fps_tracker.fps());
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
style
.text_styles
.get_mut(&egui::TextStyle::Body)
.unwrap()
.size = 11.0;
style
.text_styles
.get_mut(&egui::TextStyle::Button)
.unwrap()
.size = 10.0;
});
egui::Window::new("Sinh")
.collapsible(true)
.resizable(true)
.default_width(280.0)
.show(ctx, |ui| {
egui::CollapsingHeader::new("Rendering")
.default_open(true)
.show(ui, |ui| {
changed |= ui
.add(egui::Slider::new(&mut params.aa, 1..=4).text("AA"))
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.gamma, 0.2..=1.1)
.text("Gamma"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.vignette_offset, 0.0..=1.0)
.text("Vignette"),
)
.changed();
});
egui::CollapsingHeader::new("Camera")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.camera_x, -1.0..=1.0)
.text("X"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.camera_y, 5.0..=20.0)
.text("Y"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.camera_z, 5.0..=20.0)
.text("Z"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.orbit_speed, 0.0..=1.0)
.text("speed"),
)
.changed();
});
egui::CollapsingHeader::new("Fractal")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.iterations, 10..=100)
.text("Iterations"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.bound, 1.0..=25.0)
.text("Bound"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.magic_number, 1.0..=100.0)
.text("Magic Number"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.cv_min, 1.0..=3.0)
.text("CV Min"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.cv_max, 2.0..=4.0)
.text("CV Max"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.os_base, 0.00001..=0.001)
.logarithmic(true)
.text("OS Base"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.os_scale, 0.001..=0.1)
.text("OS Scale"),
)
.changed();
ui.separator();
changed |= ui
.add(
egui::Slider::new(&mut params.fractal_scale, 0.01..=1.0)
.text("Fractal Scale"),
)
.changed();
});
egui::CollapsingHeader::new("Colors")
.default_open(false)
.show(ui, |ui| {
ui.horizontal(|ui| {
ui.label("Base Color:");
let mut color = [
params.base_color_r,
params.base_color_g,
params.base_color_b,
];
if ui.color_edit_button_rgb(&mut color).changed() {
params.base_color_r = color[0];
params.base_color_g = color[1];
params.base_color_b = color[2];
changed = true;
}
});
ui.horizontal(|ui| {
ui.label("Light Color:");
let mut color = [
params.light_color_r,
params.light_color_g,
params.light_color_b,
];
if ui.color_edit_button_rgb(&mut color).changed() {
params.light_color_r = color[0];
params.light_color_g = color[1];
params.light_color_b = color[2];
changed = true;
}
});
ui.horizontal(|ui| {
ui.label("Ambient Color:");
let mut color =
[params.ambient_r, params.ambient_g, params.ambient_b];
if ui.color_edit_button_rgb(&mut color).changed() {
params.ambient_r = color[0];
params.ambient_g = color[1];
params.ambient_b = color[2];
changed = true;
}
});
});
ui.separator();
ShaderControls::render_controls_widget(ui, &mut controls_request);
ui.separator();
should_start_export =
ExportManager::render_export_ui_widget(ui, &mut export_request);
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
self.base.export_manager.apply_ui_request(export_request);
if controls_request.should_clear_buffers {
self.clear_buffers(core);
}
self.base.apply_control_request(controls_request);
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta, &core.queue);
if changed {
self.current_params = params;
self.compute_shader.set_custom_params(params, &core.queue);
}
if should_start_export {
self.base.export_manager.start_export();
}
self.compute_shader.dispatch(&mut encoder, core);
{
let mut render_pass = cuneus::Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("Display Pass"),
);
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &self.compute_shader.output_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(Some(encoder.finish()));
output.present();
Ok(())
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
if self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed
{
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
return self
.base
.key_handler
.handle_keyboard_input(core.window(), event);
}
false
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
let (app, event_loop) = cuneus::ShaderApp::new("Sinh 3D", 800, 300);
app.run(event_loop, SinhShader::init)
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/orbits.rs | examples/orbits.rs | use cuneus::prelude::ComputeShader;
use cuneus::{Core, RenderKit, ShaderApp, ShaderManager, UniformProvider};
use winit::event::*;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct ShaderParams {
base_color: [f32; 3],
x: f32,
rim_color: [f32; 3],
y: f32,
accent_color: [f32; 3],
gamma_correction: f32,
travel_speed: f32,
iteration: i32,
col_ext: f32,
zoom: f32,
trap_pow: f32,
trap_x: f32,
trap_y: f32,
trap_c1: f32,
aa: i32,
trap_s1: f32,
wave_speed: f32,
fold_intensity: f32,
}
impl UniformProvider for ShaderParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
struct Shader {
base: RenderKit,
compute_shader: ComputeShader,
mouse_dragging: bool,
drag_start: [f32; 2],
drag_start_pos: [f32; 2],
zoom_level: f32,
current_params: ShaderParams,
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
let (app, event_loop) = ShaderApp::new("orbits", 800, 600);
app.run(event_loop, Shader::init)
}
impl ShaderManager for Shader {
fn init(core: &Core) -> Self {
let initial_zoom = 0.0004;
let initial_x = 2.14278;
let initial_y = 2.14278;
// Create texture display layout
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let base = RenderKit::new(core, &texture_bind_group_layout, None);
let config = ComputeShader::builder()
.with_entry_point("main")
.with_custom_uniforms::<ShaderParams>()
.with_mouse()
.build();
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/orbits.wgsl"), config);
let initial_params = ShaderParams {
base_color: [0.0, 0.5, 1.0],
x: initial_x,
rim_color: [0.0, 0.5, 1.0],
y: initial_y,
accent_color: [0.018, 0.018, 0.018],
gamma_correction: 0.4,
travel_speed: 1.0,
iteration: 355,
col_ext: 2.0,
zoom: initial_zoom,
trap_pow: 1.0,
trap_x: -0.5,
trap_y: 2.0,
trap_c1: 0.2,
aa: 1,
trap_s1: 0.8,
wave_speed: 0.1,
fold_intensity: 1.0,
};
// Enable hot reload
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/orbits.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Orbits Hot Reload"),
source: wgpu::ShaderSource::Wgsl(include_str!("shaders/orbits.wgsl").into()),
}),
) {
eprintln!("Failed to enable hot reload for orbits shader: {e}");
}
compute_shader.set_custom_params(initial_params, &core.queue);
Self {
base,
compute_shader,
mouse_dragging: false,
drag_start: [0.0, 0.0],
drag_start_pos: [initial_x, initial_y],
zoom_level: initial_zoom,
current_params: initial_params,
}
}
fn update(&mut self, core: &Core) {
self.base.fps_tracker.update();
self.compute_shader.check_hot_reload(&core.device);
// Handle export
self.compute_shader.handle_export(core, &mut self.base);
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut params = self.current_params;
let mut changed = false;
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
controls_request.current_fps = Some(self.base.fps_tracker.fps());
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
style
.text_styles
.get_mut(&egui::TextStyle::Body)
.unwrap()
.size = 11.0;
style
.text_styles
.get_mut(&egui::TextStyle::Button)
.unwrap()
.size = 10.0;
});
egui::Window::new("Orbits")
.collapsible(true)
.resizable(true)
.default_width(280.0)
.show(ctx, |ui| {
egui::CollapsingHeader::new("Colors")
.default_open(false)
.show(ui, |ui| {
ui.horizontal(|ui| {
ui.label("Base:");
changed |=
ui.color_edit_button_rgb(&mut params.base_color).changed();
});
ui.horizontal(|ui| {
ui.label("Orbit:");
changed |=
ui.color_edit_button_rgb(&mut params.rim_color).changed();
});
ui.horizontal(|ui| {
ui.label("Exterior:");
changed |= ui
.color_edit_button_rgb(&mut params.accent_color)
.changed();
});
});
egui::CollapsingHeader::new("Rendering")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.iteration, 50..=500)
.text("Iterations"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.aa, 1..=4)
.text("Anti-aliasing"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.gamma_correction, 0.1..=2.0)
.text("Gamma"),
)
.changed();
});
egui::CollapsingHeader::new("Traps")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.trap_x, -5.0..=5.0)
.text("Trap X"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.trap_y, -5.0..=5.0)
.text("Trap Y"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.trap_pow, 0.0..=3.0)
.text("Trap Power"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.trap_c1, 0.0..=1.0)
.text("Trap Mix"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.trap_s1, 0.0..=2.0)
.text("Trap Blend"),
)
.changed();
});
egui::CollapsingHeader::new("Animation")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.travel_speed, 0.0..=2.0)
.text("Travel Speed"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.wave_speed, 0.0..=2.0)
.text("Wave Speed"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.fold_intensity, 0.0..=3.0)
.text("Fold Intensity"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.col_ext, 0.0..=10.0)
.text("Color Extension"),
)
.changed();
});
egui::CollapsingHeader::new("Navigation")
.default_open(false)
.show(ui, |ui| {
ui.label("Left-click + drag: Pan view");
ui.label("Mouse wheel: Zoom");
ui.separator();
let old_zoom = params.zoom;
changed |= ui
.add(
egui::Slider::new(&mut params.zoom, 0.0001..=1.0)
.text("Zoom")
.logarithmic(true),
)
.changed();
if old_zoom != params.zoom {
self.zoom_level = params.zoom;
}
changed |= ui
.add(
egui::Slider::new(&mut params.x, 0.0..=3.0)
.text("X Position"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.y, 0.0..=6.0)
.text("Y Position"),
)
.changed();
});
ui.separator();
cuneus::ShaderControls::render_controls_widget(ui, &mut controls_request);
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
self.base.apply_control_request(controls_request);
if changed {
self.current_params = params;
self.compute_shader.set_custom_params(params, &core.queue);
}
// Create command encoder
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
// Update time uniform
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta_time = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta_time, &core.queue);
// Update mouse uniform
self.compute_shader
.update_mouse_uniform(&self.base.mouse_tracker.uniform, &core.queue);
// Dispatch compute shader
self.compute_shader.dispatch(&mut encoder, core);
// Render compute output to screen
{
let mut render_pass = cuneus::Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("Main Render Pass"),
);
let compute_texture = self.compute_shader.get_output_texture();
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &compute_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(Some(encoder.finish()));
output.present();
Ok(())
}
fn resize(&mut self, core: &Core) {
self.base.update_resolution(&core.queue, core.size);
self.compute_shader
.resize(core, core.size.width, core.size.height);
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
if self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed
{
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
return self
.base
.key_handler
.handle_keyboard_input(core.window(), event);
}
match event {
WindowEvent::MouseInput { state, button, .. } => {
if button == &MouseButton::Left {
match state {
ElementState::Pressed => {
let mouse_pos = self.base.mouse_tracker.uniform.position;
self.mouse_dragging = true;
self.drag_start = mouse_pos;
self.drag_start_pos = [self.current_params.x, self.current_params.y];
return true;
}
ElementState::Released => {
self.mouse_dragging = false;
return true;
}
}
}
false
}
WindowEvent::CursorMoved { .. } => {
if self.mouse_dragging {
let current_pos = self.base.mouse_tracker.uniform.position;
let dx = (current_pos[0] - self.drag_start[0]) * 3.0 * self.zoom_level;
let dy = (current_pos[1] - self.drag_start[1]) * 6.0 * self.zoom_level;
let mut new_x = self.drag_start_pos[0] + dx;
let mut new_y = self.drag_start_pos[1] + dy;
new_x = new_x.clamp(0.0, 3.0);
new_y = new_y.clamp(0.0, 6.0);
self.current_params.x = new_x;
self.current_params.y = new_y;
self.compute_shader
.set_custom_params(self.current_params, &core.queue);
}
self.base.handle_mouse_input(core, event, false)
}
WindowEvent::MouseWheel { delta, .. } => {
let zoom_delta = match delta {
MouseScrollDelta::LineDelta(_, y) => *y * 0.1,
MouseScrollDelta::PixelDelta(pos) => (pos.y as f32) * 0.001,
};
if zoom_delta != 0.0 {
let mouse_pos = self.base.mouse_tracker.uniform.position;
let center_x = self.current_params.x;
let center_y = self.current_params.y;
let rel_x = mouse_pos[0] - 0.5;
let rel_y = mouse_pos[1] - 0.5;
let zoom_factor = if zoom_delta > 0.0 { 0.9 } else { 1.1 };
self.zoom_level = (self.zoom_level * zoom_factor).clamp(0.0001, 1.5);
let scale_change = 1.0 - zoom_factor;
let dx = rel_x * scale_change * 3.0 * self.zoom_level;
let dy = rel_y * scale_change * 6.0 * self.zoom_level;
self.current_params.zoom = self.zoom_level;
self.current_params.x = (center_x + dx).clamp(0.0, 3.0);
self.current_params.y = (center_y + dy).clamp(0.0, 6.0);
self.compute_shader
.set_custom_params(self.current_params, &core.queue);
}
self.base.handle_mouse_input(core, event, false)
}
_ => self.base.handle_mouse_input(core, event, false),
}
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/jfa.rs | examples/jfa.rs | use cuneus::compute::*;
use cuneus::prelude::*;
use winit::event::WindowEvent;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct JfaParams {
a: f32,
b: f32,
c: f32,
d: f32,
scale: f32,
n: f32,
gamma: f32,
color_intensity: f32,
color_r: f32,
color_g: f32,
color_b: f32,
color_w: f32,
accumulation_speed: f32,
fade_speed: f32,
freeze_accumulation: f32,
pattern_floor_add: f32,
pattern_temp_add: f32,
pattern_v_offset: f32,
pattern_temp_mul1: f32,
pattern_temp_mul2_3: f32,
_padding0: f32,
_padding1: f32,
_padding2: f32,
}
impl UniformProvider for JfaParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
struct JfaShader {
base: RenderKit,
compute_shader: ComputeShader,
current_params: JfaParams,
}
impl ShaderManager for JfaShader {
fn init(core: &Core) -> Self {
let initial_params = JfaParams {
a: -2.7,
b: 0.7,
c: 0.2,
d: 0.2,
scale: 0.3,
n: 10.0,
gamma: 2.1,
color_intensity: 1.0,
color_r: 1.0,
color_g: 2.0,
color_b: 3.0,
color_w: 4.0,
accumulation_speed: 0.1,
fade_speed: 0.99,
freeze_accumulation: 0.0,
pattern_floor_add: 1.0,
pattern_temp_add: 0.1,
pattern_v_offset: 0.7,
pattern_temp_mul1: 0.7,
pattern_temp_mul2_3: 3.0,
_padding0: 0.0,
_padding1: 0.0,
_padding2: 0.0,
};
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let base = RenderKit::new(core, &texture_bind_group_layout, None);
// Create multipass system: buffer_a -> buffer_b -> buffer_c -> main_image
let passes = vec![
PassDescription::new("buffer_a", &["buffer_a"]), // self-feedback
PassDescription::new("buffer_b", &["buffer_a", "buffer_b"]), // reads buffer_a + self-feedback
PassDescription::new("buffer_c", &["buffer_a", "buffer_b", "buffer_c"]), // reads ALL 3 buffers
PassDescription::new("main_image", &["buffer_c"]),
];
let config = ComputeShader::builder()
.with_entry_point("buffer_a")
.with_multi_pass(&passes)
.with_custom_uniforms::<JfaParams>()
.with_workgroup_size([16, 16, 1])
.with_texture_format(COMPUTE_TEXTURE_FORMAT_RGBA16)
.with_label("JFA Unified")
.build();
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/jfa.wgsl"), config);
// Enable hot reload
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/jfa.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("JFA Hot Reload"),
source: wgpu::ShaderSource::Wgsl(include_str!("shaders/jfa.wgsl").into()),
}),
) {
eprintln!("Failed to enable hot reload for JFA shader: {e}");
}
compute_shader.set_custom_params(initial_params, &core.queue);
Self {
base,
compute_shader,
current_params: initial_params,
}
}
fn update(&mut self, core: &Core) {
// Check for hot reload updates
self.compute_shader.check_hot_reload(&core.device);
// Handle export
self.compute_shader.handle_export(core, &mut self.base);
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta, &core.queue);
self.base.fps_tracker.update();
}
fn resize(&mut self, core: &Core) {
self.base.update_resolution(&core.queue, core.size);
self.compute_shader
.resize(core, core.size.width, core.size.height);
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("JFA Render Encoder"),
});
// Execute multi-pass compute shader: buffer_a -> buffer_b -> buffer_c -> main_image
self.compute_shader.dispatch(&mut encoder, core);
// Render compute output to screen
{
let compute_texture = self.compute_shader.get_output_texture();
let mut render_pass = cuneus::Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("JFA Display Pass"),
);
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &compute_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
// Handle UI and controls
let mut params = self.current_params;
let mut changed = false;
let mut should_start_export = false;
let mut export_request = self.base.export_manager.get_ui_request();
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
controls_request.current_fps = Some(self.base.fps_tracker.fps());
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
style
.text_styles
.get_mut(&egui::TextStyle::Body)
.unwrap()
.size = 11.0;
style
.text_styles
.get_mut(&egui::TextStyle::Button)
.unwrap()
.size = 10.0;
});
egui::Window::new("JFA - Simplified")
.collapsible(true)
.resizable(true)
.default_width(280.0)
.show(ctx, |ui| {
egui::CollapsingHeader::new("JFA Parameters")
.default_open(true)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.n, 1.0..=50.0)
.text("N (Frame Cycle)"),
)
.changed();
ui.separator();
changed |= ui
.add(
egui::Slider::new(
&mut params.accumulation_speed,
0.0..=3.0,
)
.text("Accumulation Speed"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.fade_speed, 0.9..=1.0)
.text("Fade Speed"),
)
.changed();
});
egui::CollapsingHeader::new("Clifford Attractor")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(egui::Slider::new(&mut params.a, -5.0..=5.0).text("a"))
.changed();
changed |= ui
.add(egui::Slider::new(&mut params.b, -5.0..=5.0).text("b"))
.changed();
changed |= ui
.add(egui::Slider::new(&mut params.c, -5.0..=5.0).text("c"))
.changed();
changed |= ui
.add(egui::Slider::new(&mut params.d, -5.0..=5.0).text("d"))
.changed();
ui.separator();
changed |= ui
.add(
egui::Slider::new(&mut params.scale, 0.1..=1.0)
.text("Scale"),
)
.changed();
});
egui::CollapsingHeader::new("Colors")
.default_open(false)
.show(ui, |ui| {
ui.horizontal(|ui| {
ui.label("Color Pattern:");
let mut color =
[params.color_r, params.color_g, params.color_b];
if ui.color_edit_button_rgb(&mut color).changed() {
params.color_r = color[0];
params.color_g = color[1];
params.color_b = color[2];
changed = true;
}
});
changed |= ui
.add(
egui::Slider::new(&mut params.color_w, 0.0..=10.0)
.text("Color W"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.color_intensity, 0.1..=3.0)
.text("Color Intensity"),
)
.changed();
ui.separator();
changed |= ui
.add(
egui::Slider::new(&mut params.gamma, 0.1..=4.0)
.text("Gamma"),
)
.changed();
});
ui.separator();
ShaderControls::render_controls_widget(ui, &mut controls_request);
ui.separator();
should_start_export =
ExportManager::render_export_ui_widget(ui, &mut export_request);
ui.separator();
ui.label(format!("Frame: {}", self.compute_shader.current_frame));
ui.label("JFA with Clifford Attractor (Simplified)");
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
// Handle control requests
if controls_request.is_paused != (params.freeze_accumulation > 0.5) {
params.freeze_accumulation = if controls_request.is_paused { 1.0 } else { 0.0 };
changed = true;
}
self.base.export_manager.apply_ui_request(export_request);
if controls_request.should_clear_buffers {
// Reset frame count to restart accumulation
self.compute_shader.current_frame = 0;
}
self.base.apply_control_request(controls_request);
if changed {
self.current_params = params;
self.compute_shader.set_custom_params(params, &core.queue);
}
if should_start_export {
self.base.export_manager.start_export();
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(std::iter::once(encoder.finish()));
output.present();
Ok(())
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
if self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed
{
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
return self
.base
.key_handler
.handle_keyboard_input(core.window(), event);
}
false
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
let (app, event_loop) = ShaderApp::new("JFA", 800, 600);
app.run(event_loop, JfaShader::init)
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/computecolors.rs | examples/computecolors.rs | use cuneus::compute::*;
use cuneus::prelude::*;
use winit::event::WindowEvent;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct SplattingParams {
animation_speed: f32,
splat_size: f32,
particle_spread: f32,
intensity: f32,
particle_density: f32,
brightness: f32,
physics_strength: f32,
trail_length: f32,
trail_decay: f32,
flow_strength: f32,
_padding1: f32,
_padding2: u32,
}
impl UniformProvider for SplattingParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
struct ColorProjection {
base: RenderKit,
compute_shader: ComputeShader,
current_params: SplattingParams,
}
impl ColorProjection {
fn clear_buffers(&mut self, core: &Core) {
self.compute_shader.clear_all_buffers(core);
}
}
impl ShaderManager for ColorProjection {
fn init(core: &Core) -> Self {
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let base = RenderKit::new(core, &texture_bind_group_layout, None);
let initial_params = SplattingParams {
animation_speed: 1.0,
splat_size: 0.8,
particle_spread: 0.0,
intensity: 2.0,
particle_density: 0.4,
brightness: 36.0,
physics_strength: 0.5,
trail_length: 0.0,
trail_decay: 0.95,
flow_strength: 1.0,
_padding1: 0.0,
_padding2: 0,
};
// Define the multi-stage passes
let passes = vec![
PassDescription::new("clear_buffer", &[]), // Stage 0: Clear atomic buffer
PassDescription::new("project_colors", &[]), // Stage 1: Project colors to 3D space
PassDescription::new("generate_image", &[]), // Stage 2: Generate final image
];
let config = ComputeShader::builder()
.with_entry_point("clear_buffer")
.with_multi_pass(&passes)
.with_input_texture() // Enable input texture support
.with_custom_uniforms::<SplattingParams>()
.with_storage_buffer(StorageBufferSpec::new(
"atomic_buffer",
(core.size.width * core.size.height * 4 * 4) as u64,
))
.with_workgroup_size([16, 16, 1])
.with_texture_format(COMPUTE_TEXTURE_FORMAT_RGBA16)
.with_label("Particle Splatting Multi-Pass")
.build();
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/computecolors.wgsl"), config);
// Enable hot reload
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/computecolors.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("ComputeColors Compute Shader"),
source: wgpu::ShaderSource::Wgsl(
include_str!("shaders/computecolors.wgsl").into(),
),
}),
) {
eprintln!("Failed to enable ComputeColors hot reload: {e}");
}
compute_shader.set_custom_params(initial_params, &core.queue);
Self {
base,
compute_shader,
current_params: initial_params,
}
}
fn update(&mut self, core: &Core) {
// Update time
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta, &core.queue);
// Update input textures for media processing
self.base.update_current_texture(core, &core.queue);
if let Some(texture_manager) = self.base.get_current_texture_manager() {
self.compute_shader.update_input_texture(
&texture_manager.view,
&texture_manager.sampler,
&core.device,
);
}
self.base.fps_tracker.update();
}
fn resize(&mut self, core: &Core) {
self.base.update_resolution(&core.queue, core.size);
self.compute_shader
.resize(core, core.size.width, core.size.height);
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Color Projection Render Encoder"),
});
// Handle UI and controls
let mut params = self.current_params;
let mut changed = false;
let mut should_start_export = false;
let mut export_request = self.base.export_manager.get_ui_request();
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
let using_video_texture = self.base.using_video_texture;
let using_hdri_texture = self.base.using_hdri_texture;
let using_webcam_texture = self.base.using_webcam_texture;
let video_info = self.base.get_video_info();
let hdri_info = self.base.get_hdri_info();
let webcam_info = self.base.get_webcam_info();
controls_request.current_fps = Some(self.base.fps_tracker.fps());
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
style
.text_styles
.get_mut(&egui::TextStyle::Body)
.unwrap()
.size = 11.0;
style
.text_styles
.get_mut(&egui::TextStyle::Button)
.unwrap()
.size = 10.0;
});
egui::Window::new("Particle Splatting")
.collapsible(true)
.resizable(true)
.default_width(250.0)
.show(ctx, |ui| {
ShaderControls::render_media_panel(
ui,
&mut controls_request,
using_video_texture,
video_info,
using_hdri_texture,
hdri_info,
using_webcam_texture,
webcam_info,
);
ui.separator();
egui::CollapsingHeader::new("Particles")
.default_open(true)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.particle_density, 0.1..=1.0)
.text("Density"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.splat_size, 0.1..=2.0)
.text("Splat Size"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.intensity, 0.1..=6.0)
.text("Intensity"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.brightness, 36.0..=48.0)
.text("Brightness"),
)
.changed();
});
egui::CollapsingHeader::new("Effects")
.default_open(true)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.animation_speed, 0.0..=3.0)
.text("Speed"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.particle_spread, 0.0..=1.0)
.text("Scramble Amount"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.physics_strength, 0.0..=12.0)
.text("Return Force"),
)
.changed();
});
egui::CollapsingHeader::new("Flow Trails")
.default_open(true)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.trail_length, 0.0..=2.0)
.text("Trail Length"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.trail_decay, 0.8..=1.0)
.text("Trail Decay"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.flow_strength, 0.0..=3.0)
.text("Flow Strength"),
)
.changed();
});
ui.separator();
ShaderControls::render_controls_widget(ui, &mut controls_request);
ui.separator();
should_start_export =
ExportManager::render_export_ui_widget(ui, &mut export_request);
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
// Apply controls
self.base.export_manager.apply_ui_request(export_request);
if controls_request.should_clear_buffers {
self.clear_buffers(core);
}
self.base.apply_control_request(controls_request.clone());
self.base.handle_video_requests(core, &controls_request);
self.base.handle_webcam_requests(core, &controls_request);
if should_start_export {
self.base.export_manager.start_export();
}
if changed {
self.current_params = params;
self.compute_shader.set_custom_params(params, &core.queue);
}
// Check for hot reload updates
self.compute_shader.check_hot_reload(&core.device);
// Handle export
self.compute_shader.handle_export(core, &mut self.base);
// Color projection multi-stage dispatch - run all stages every frame for animation
// Stage 0: Clear atomic buffer (16x16 workgroups)
self.compute_shader.dispatch_stage(&mut encoder, core, 0);
// Stage 1: Project colors to 3D space (uses input texture dimensions)
if let Some(texture_manager) = self.base.get_current_texture_manager() {
let input_workgroups = [
texture_manager.texture.width().div_ceil(16),
texture_manager.texture.height().div_ceil(16),
1,
];
self.compute_shader
.dispatch_stage_with_workgroups(&mut encoder, 1, input_workgroups);
} else {
// Fallback to screen size if no input texture
self.compute_shader.dispatch_stage(&mut encoder, core, 1);
}
// Stage 2: Generate final image (16x16 workgroups, screen size)
self.compute_shader.dispatch_stage(&mut encoder, core, 2);
// Display result
{
let mut render_pass = cuneus::Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("Display Pass"),
);
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &self.compute_shader.output_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(Some(encoder.finish()));
output.present();
Ok(())
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
if self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed
{
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
return self
.base
.key_handler
.handle_keyboard_input(core.window(), event);
}
if let WindowEvent::DroppedFile(path) = event {
if let Err(e) = self.base.load_media(core, path) {
eprintln!("Failed to load dropped file: {e:?}");
}
return true;
}
false
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
cuneus::gst::init()?;
env_logger::init();
let (app, event_loop) = cuneus::ShaderApp::new("Particle Splatting", 800, 600);
app.run(event_loop, ColorProjection::init)
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/rorschach.rs | examples/rorschach.rs | use cuneus::compute::*;
use cuneus::prelude::*;
use winit::event::WindowEvent;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct RorschachParams {
// Matrix 1
m1_scale: f32,
m1_y_scale: f32,
// Matrix 2
m2_scale: f32,
m2_shear: f32,
m2_shift: f32,
// Matrix 3
m3_scale: f32,
m3_shear: f32,
m3_shift: f32,
// Matrix 4
m4_scale: f32,
m4_shift: f32,
// Matrix 5
m5_scale: f32,
m5_shift: f32,
time_scale: f32,
decay: f32,
intensity: f32,
rotation_x: f32,
rotation_y: f32,
click_state: i32,
brightness: f32,
exposure: f32,
gamma: f32,
particle_count: f32,
scale: f32,
dof_amount: f32,
dof_focal_dist: f32,
color1_r: f32,
color1_g: f32,
color1_b: f32,
color2_r: f32,
color2_g: f32,
color2_b: f32,
}
impl UniformProvider for RorschachParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
struct RorschachShader {
base: RenderKit,
compute_shader: ComputeShader,
current_params: RorschachParams,
}
impl RorschachShader {
fn clear_buffers(&mut self, core: &Core) {
self.compute_shader.clear_all_buffers(core);
}
}
impl ShaderManager for RorschachShader {
fn init(core: &Core) -> Self {
// Create texture bind group layout for displaying compute shader output
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let mut base = RenderKit::new(core, &texture_bind_group_layout, None);
base.setup_mouse_uniform(core);
let initial_params = RorschachParams {
m1_scale: 0.8,
m1_y_scale: 0.5,
m2_scale: 0.4,
m2_shear: 0.2,
m2_shift: 0.3,
m3_scale: 0.4,
m3_shear: 0.2,
m3_shift: 0.3,
m4_scale: 0.3,
m4_shift: 0.2,
m5_scale: 0.2,
m5_shift: 0.4,
time_scale: 0.5,
decay: 0.0,
intensity: 0.0,
rotation_x: 0.0,
rotation_y: 0.0,
click_state: 0,
brightness: 0.003,
exposure: 1.5,
gamma: 0.4,
particle_count: 100000.0,
scale: 1.0,
dof_amount: 0.0,
dof_focal_dist: 0.5,
color1_r: 1.0,
color1_g: 0.3,
color1_b: 0.1,
color2_r: 0.1,
color2_g: 0.5,
color2_b: 1.0,
};
let mut config = ComputeShader::builder()
.with_entry_point("Splat")
.with_custom_uniforms::<RorschachParams>()
.with_atomic_buffer()
.with_workgroup_size([16, 16, 1])
.with_texture_format(COMPUTE_TEXTURE_FORMAT_RGBA16)
.with_label("Rorschach IFS Unified")
.build();
// Add second entry point manually (no ping-pong needed)
config.entry_points.push("main_image".to_string());
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/rorschach.wgsl"), config);
// Enable hot reload
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/rorschach.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Rorschach Hot Reload"),
source: wgpu::ShaderSource::Wgsl(include_str!("shaders/rorschach.wgsl").into()),
}),
) {
eprintln!("Failed to enable hot reload for Rorschach shader: {e}");
}
compute_shader.set_custom_params(initial_params, &core.queue);
Self {
base,
compute_shader,
current_params: initial_params,
}
}
fn update(&mut self, core: &Core) {
// Check for hot reload updates
self.compute_shader.check_hot_reload(&core.device);
// Handle export
self.compute_shader.handle_export(core, &mut self.base);
self.base.update_mouse_uniform(&core.queue);
self.base.fps_tracker.update();
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut params = self.current_params;
let mut changed = false;
let mut should_start_export = false;
let mut export_request = self.base.export_manager.get_ui_request();
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
controls_request.current_fps = Some(self.base.fps_tracker.fps());
if self.base.mouse_tracker.uniform.buttons[0] & 1 != 0 {
params.rotation_x = self.base.mouse_tracker.uniform.position[0];
params.rotation_y = self.base.mouse_tracker.uniform.position[1];
params.click_state = 1;
changed = true;
} else if self.base.mouse_tracker.uniform.buttons[0] & 2 != 0 {
params.click_state = 0;
} else {
params.click_state = 0;
}
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
style
.text_styles
.get_mut(&egui::TextStyle::Body)
.unwrap()
.size = 11.0;
style
.text_styles
.get_mut(&egui::TextStyle::Button)
.unwrap()
.size = 10.0;
});
egui::Window::new("Rorschach IFS")
.collapsible(true)
.resizable(true)
.default_width(280.0)
.show(ctx, |ui| {
egui::CollapsingHeader::new("IFS Matrices")
.default_open(false)
.show(ui, |ui| {
ui.label("Matrix 1:");
changed |= ui
.add(
egui::Slider::new(&mut params.m1_scale, 0.1..=1.2)
.text("Scale"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.m1_y_scale, 0.1..=1.2)
.text("Y Scale"),
)
.changed();
ui.separator();
ui.label("Matrix 2:");
changed |= ui
.add(
egui::Slider::new(&mut params.m2_scale, 0.1..=1.0)
.text("Scale"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.m2_shear, -0.5..=0.5)
.text("Shear"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.m2_shift, -0.5..=0.5)
.text("Shift"),
)
.changed();
ui.separator();
ui.label("Matrix 3:");
changed |= ui
.add(
egui::Slider::new(&mut params.m3_scale, 0.1..=1.0)
.text("Scale"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.m3_shear, -0.5..=0.5)
.text("Shear"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.m3_shift, -0.5..=0.5)
.text("Shift"),
)
.changed();
ui.separator();
ui.label("Matrix 4 & 5:");
changed |= ui
.add(
egui::Slider::new(&mut params.m4_scale, 0.1..=1.0)
.text("M4 Scale"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.m4_shift, -0.5..=0.5)
.text("M4 Shift"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.m5_scale, 0.1..=1.0)
.text("M5 Scale"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.m5_shift, -0.5..=0.5)
.text("M5 Shift"),
)
.changed();
});
egui::CollapsingHeader::new("Colors")
.default_open(false)
.show(ui, |ui| {
ui.horizontal(|ui| {
ui.label("Primary:");
let mut color1 =
[params.color1_r, params.color1_g, params.color1_b];
if ui.color_edit_button_rgb(&mut color1).changed() {
params.color1_r = color1[0];
params.color1_g = color1[1];
params.color1_b = color1[2];
changed = true;
}
});
ui.horizontal(|ui| {
ui.label("Secondary:");
let mut color2 =
[params.color2_r, params.color2_g, params.color2_b];
if ui.color_edit_button_rgb(&mut color2).changed() {
params.color2_r = color2[0];
params.color2_g = color2[1];
params.color2_b = color2[2];
changed = true;
}
});
});
egui::CollapsingHeader::new("Rendering")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(
&mut params.particle_count,
10000.0..=200000.0,
)
.text("Particles"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.scale, 0.1..=3.0)
.text("Scale"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.brightness, 0.001..=0.01)
.text("Brightness"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.exposure, 0.5..=3.0)
.text("Exposure"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.gamma, 0.1..=2.0)
.text("Gamma"),
)
.changed();
});
egui::CollapsingHeader::new("Depth of Field")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.dof_amount, 0.0..=2.0)
.text("DOF Amount"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.dof_focal_dist, 0.0..=1.0)
.text("Focal Distance"),
)
.changed();
});
egui::CollapsingHeader::new("Animation")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.time_scale, 0.0..=2.0)
.text("Animation Speed"),
)
.changed();
});
ui.separator();
ShaderControls::render_controls_widget(ui, &mut controls_request);
ui.separator();
should_start_export =
ExportManager::render_export_ui_widget(ui, &mut export_request);
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
self.base.export_manager.apply_ui_request(export_request);
if controls_request.should_clear_buffers {
self.clear_buffers(core);
}
self.base.apply_control_request(controls_request);
let current_time = self.base.controls.get_time(&self.base.start_time);
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let delta = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta, &core.queue);
if changed {
self.current_params = params;
self.compute_shader.set_custom_params(params, &core.queue);
}
if should_start_export {
self.base.export_manager.start_export();
}
// Stage 0: Generate and splat particles (workgroup size [256, 1, 1])
let workgroups = (self.current_params.particle_count as u32 / 256).max(1);
self.compute_shader
.dispatch_stage_with_workgroups(&mut encoder, 0, [workgroups, 1, 1]);
// Stage 1: Render to screen (workgroup size [16, 16, 1])
self.compute_shader.dispatch_stage(&mut encoder, core, 1);
{
let mut render_pass = cuneus::Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("Display Pass"),
);
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &self.compute_shader.output_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(Some(encoder.finish()));
output.present();
Ok(())
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
let ui_handled = self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed;
if ui_handled {
return true;
}
if self.base.handle_mouse_input(core, event, false) {
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
return self
.base
.key_handler
.handle_keyboard_input(core.window(), event);
}
false
}
fn resize(&mut self, core: &Core) {
self.base.update_resolution(&core.queue, core.size);
self.compute_shader
.resize(core, core.size.width, core.size.height);
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
let (app, event_loop) = cuneus::ShaderApp::new("Rorschach IFS", 800, 600);
app.run(event_loop, RorschachShader::init)
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/lich.rs | examples/lich.rs | use cuneus::compute::*;
use cuneus::prelude::*;
use winit::event::WindowEvent;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct LichParams {
cloud_density: f32,
lightning_intensity: f32,
branch_count: f32,
feedback_decay: f32,
base_color: [f32; 3],
_pad1: f32,
color_shift: f32,
spectrum_mix: f32,
_pad2: [f32; 2],
}
impl UniformProvider for LichParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
struct LichShader {
base: RenderKit,
compute_shader: ComputeShader,
current_params: LichParams,
}
impl LichShader {
fn clear_buffers(&mut self, core: &Core) {
// Clear multipass ping-pong buffers
self.compute_shader.clear_all_buffers(core);
}
}
impl ShaderManager for LichShader {
fn init(core: &Core) -> Self {
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let base = RenderKit::new(core, &texture_bind_group_layout, None);
let passes = vec![
PassDescription::new("buffer_a", &[]),
PassDescription::new("buffer_b", &["buffer_a", "buffer_b"]), // Self-feedback!
PassDescription::new("main_image", &["buffer_b"]),
];
let config = ComputeShader::builder()
.with_multi_pass(&passes)
.with_custom_uniforms::<LichParams>()
.with_workgroup_size([16, 16, 1])
.with_texture_format(cuneus::compute::COMPUTE_TEXTURE_FORMAT_RGBA16)
.with_label("Lich Lightning")
.build();
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/lich.wgsl"), config);
// Enable hot reload
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/lich.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Lich Hot Reload"),
source: wgpu::ShaderSource::Wgsl(include_str!("shaders/lich.wgsl").into()),
}),
) {
eprintln!("Failed to enable hot reload for Lich shader: {e}");
}
let initial_params = LichParams {
cloud_density: 3.0,
lightning_intensity: 1.0,
branch_count: 1.0,
feedback_decay: 0.98,
base_color: [1.0, 1.0, 1.0],
_pad1: 0.0,
color_shift: 2.0,
spectrum_mix: 0.5,
_pad2: [0.0; 2],
};
// Initialize custom uniform with initial parameters
compute_shader.set_custom_params(initial_params, &core.queue);
Self {
base,
compute_shader,
current_params: initial_params,
}
}
fn update(&mut self, core: &Core) {
// Check for hot reload updates
self.compute_shader.check_hot_reload(&core.device);
// Handle export
self.compute_shader.handle_export(core, &mut self.base);
// Update time
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta, &core.queue);
self.base.fps_tracker.update();
}
fn resize(&mut self, core: &Core) {
self.compute_shader
.resize(core, core.size.width, core.size.height);
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Lich Render Encoder"),
});
let mut params = self.current_params;
let mut changed = false;
let mut should_start_export = false;
let mut export_request = self.base.export_manager.get_ui_request();
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
controls_request.current_fps = Some(self.base.fps_tracker.fps());
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
style
.text_styles
.get_mut(&egui::TextStyle::Body)
.unwrap()
.size = 11.0;
style
.text_styles
.get_mut(&egui::TextStyle::Button)
.unwrap()
.size = 10.0;
});
egui::Window::new("Lich Lightning")
.collapsible(true)
.resizable(true)
.default_width(300.0)
.show(ctx, |ui| {
egui::CollapsingHeader::new("Lightning Parameters")
.default_open(true)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.cloud_density, 0.0..=24.0)
.text("Seed"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(
&mut params.lightning_intensity,
0.1..=6.0,
)
.text("Lightning"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.branch_count, 0.0..=2.0)
.text("Branch"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.feedback_decay, 0.1..=1.5)
.text("Decay"),
)
.changed();
});
egui::CollapsingHeader::new("Color Settings")
.default_open(false)
.show(ui, |ui| {
let mut color = params.base_color;
if ui.color_edit_button_rgb(&mut color).changed() {
params.base_color = color;
changed = true;
}
changed |= ui
.add(
egui::Slider::new(&mut params.color_shift, 0.1..=20.0)
.text("Temperature"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.spectrum_mix, 0.0..=1.0)
.text("Spectral"),
)
.changed();
});
ui.separator();
ShaderControls::render_controls_widget(ui, &mut controls_request);
ui.separator();
should_start_export =
ExportManager::render_export_ui_widget(ui, &mut export_request);
ui.separator();
ui.label("Electric lightning with atomic buffer accumulation");
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
self.compute_shader.dispatch(&mut encoder, core);
// Render compute output to screen
{
let compute_texture = self.compute_shader.get_output_texture();
let mut render_pass = cuneus::Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("Lich Display Pass"),
);
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &compute_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
// Apply UI changes
if controls_request.should_clear_buffers {
self.clear_buffers(core);
}
self.base.apply_control_request(controls_request.clone());
self.base.export_manager.apply_ui_request(export_request);
if should_start_export {
self.base.export_manager.start_export();
}
if changed {
self.current_params = params;
self.compute_shader.set_custom_params(params, &core.queue);
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(std::iter::once(encoder.finish()));
output.present();
// Flip ping-pong buffers for next frame (required for multi-pass)
self.compute_shader.flip_buffers();
Ok(())
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
if self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed
{
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
return self
.base
.key_handler
.handle_keyboard_input(core.window(), event);
}
false
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
let (app, event_loop) = ShaderApp::new("Lich Lightning", 800, 600);
app.run(event_loop, LichShader::init)
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/voronoi.rs | examples/voronoi.rs | use cuneus::prelude::ComputeShader;
use cuneus::{Core, RenderKit, ShaderApp, ShaderManager, UniformProvider};
use winit::event::*;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct ShaderParams {
scale: f32,
offset_value: f32,
cell_index: f32,
edge_width: f32,
highlight: f32,
}
impl UniformProvider for ShaderParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
cuneus::gst::init()?;
env_logger::init();
let (app, event_loop) = ShaderApp::new("voronoi", 800, 600);
app.run(event_loop, Voronoi::init)
}
struct Voronoi {
base: RenderKit,
compute_shader: ComputeShader,
current_params: ShaderParams,
}
impl ShaderManager for Voronoi {
fn init(core: &Core) -> Self {
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let base = RenderKit::new(core, &texture_bind_group_layout, None);
let initial_params = ShaderParams {
scale: 24.0,
offset_value: -1.0,
cell_index: 0.0,
edge_width: 0.1,
highlight: 0.15,
};
let config = ComputeShader::builder()
.with_entry_point("main")
.with_input_texture()
.with_custom_uniforms::<ShaderParams>()
.build();
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/voronoi.wgsl"), config);
// Enable hot reload
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/voronoi.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Voronoi Hot Reload"),
source: wgpu::ShaderSource::Wgsl(include_str!("shaders/voronoi.wgsl").into()),
}),
) {
eprintln!("Failed to enable hot reload for voronoi shader: {e}");
}
compute_shader.set_custom_params(initial_params, &core.queue);
Self {
base,
compute_shader,
current_params: initial_params,
}
}
fn update(&mut self, core: &Core) {
// Update time
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta, &core.queue);
// Update input textures for media processing
self.base.update_current_texture(core, &core.queue);
if let Some(texture_manager) = self.base.get_current_texture_manager() {
self.compute_shader.update_input_texture(
&texture_manager.view,
&texture_manager.sampler,
&core.device,
);
}
self.base.fps_tracker.update();
// Handle export
self.compute_shader.handle_export(core, &mut self.base);
self.compute_shader.check_hot_reload(&core.device);
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let _video_updated = if self.base.using_video_texture {
self.base.update_video_texture(core, &core.queue)
} else {
false
};
let _webcam_updated = if self.base.using_webcam_texture {
self.base.update_webcam_texture(core, &core.queue)
} else {
false
};
let mut params = self.current_params;
let mut changed = false;
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
controls_request.current_fps = Some(self.base.fps_tracker.fps());
let using_video_texture = self.base.using_video_texture;
let using_hdri_texture = self.base.using_hdri_texture;
let using_webcam_texture = self.base.using_webcam_texture;
let video_info = self.base.get_video_info();
let hdri_info = self.base.get_hdri_info();
let webcam_info = self.base.get_webcam_info();
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
});
egui::Window::new("Voronoi Settings")
.collapsible(true)
.default_size([300.0, 100.0])
.show(ctx, |ui| {
ui.collapsing("Media", |ui: &mut egui::Ui| {
cuneus::ShaderControls::render_media_panel(
ui,
&mut controls_request,
using_video_texture,
video_info,
using_hdri_texture,
hdri_info,
using_webcam_texture,
webcam_info,
);
});
ui.separator();
// Pattern Settings
ui.collapsing("Pattern Settings", |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.scale, 1.0..=100.0)
.text("Cell Scale"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.offset_value, -1.0..=2.0)
.text("Pattern Offset"),
)
.changed();
});
// Cell Settings
ui.collapsing("Cell Settings", |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.cell_index, 0.0..=3.0)
.text("Cell Index"),
)
.changed();
});
// Edge Settings
ui.collapsing("Edge Settings", |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.edge_width, 0.0..=1.0)
.text("Edge Width"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.highlight, 0.0..=15.0)
.text("Edge Highlight"),
)
.changed();
});
ui.separator();
cuneus::ShaderControls::render_controls_widget(ui, &mut controls_request);
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
self.base.apply_control_request(controls_request.clone());
self.base.handle_video_requests(core, &controls_request);
self.base.handle_webcam_requests(core, &controls_request);
self.base.handle_hdri_requests(core, &controls_request);
if changed {
self.current_params = params;
self.compute_shader.set_custom_params(params, &core.queue);
}
// Create command encoder
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
// Update time uniform
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta_time = 1.0 / 60.0; // Approximate delta time
self.compute_shader
.set_time(current_time, delta_time, &core.queue);
// Dispatch compute shader
self.compute_shader.dispatch(&mut encoder, core);
// Render compute output to screen
{
let mut render_pass = cuneus::Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("Main Render Pass"),
);
let compute_texture = self.compute_shader.get_output_texture();
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &compute_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(Some(encoder.finish()));
output.present();
Ok(())
}
fn resize(&mut self, core: &Core) {
self.base.update_resolution(&core.queue, core.size);
self.compute_shader
.resize(core, core.size.width, core.size.height);
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
if self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed
{
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
return self
.base
.key_handler
.handle_keyboard_input(core.window(), event);
}
false
}
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/lorenz.rs | examples/lorenz.rs | use cuneus::compute::*;
use cuneus::prelude::*;
use winit::event::WindowEvent;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct LorenzParams {
sigma: f32,
rho: f32,
beta: f32,
step_size: f32,
motion_speed: f32,
rotation_x: f32,
rotation_y: f32,
rotation_z: f32,
click_state: i32,
brightness: f32,
color1_r: f32,
color1_g: f32,
color1_b: f32,
color2_r: f32,
color2_g: f32,
color2_b: f32,
scale: f32,
dof_amount: f32,
dof_focal_dist: f32,
gamma: f32,
exposure: f32,
particle_count: f32,
decay_speed: f32,
}
impl UniformProvider for LorenzParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
struct LorenzShader {
base: RenderKit,
compute_shader: ComputeShader,
current_params: LorenzParams,
mouse_look_enabled: bool,
}
impl LorenzShader {
fn clear_buffers(&mut self, core: &Core) {
self.compute_shader.clear_all_buffers(core);
}
}
impl ShaderManager for LorenzShader {
fn init(core: &Core) -> Self {
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let initial_params = LorenzParams {
sigma: 40.0,
rho: 33.0,
beta: 30.0 / 3.0,
step_size: 0.02,
motion_speed: 2.2,
rotation_x: 0.0,
rotation_y: 0.0,
rotation_z: 0.0,
click_state: 0,
brightness: 0.0005,
color1_r: 1.0,
color1_g: 0.5,
color1_b: 0.0,
color2_r: 0.0,
color2_g: 0.5,
color2_b: 1.0,
scale: 0.013,
dof_amount: 0.1,
dof_focal_dist: 0.5,
gamma: 2.2,
exposure: 1.0,
particle_count: 1000.0,
decay_speed: 8.0,
};
let base = RenderKit::new(core, &texture_bind_group_layout, None);
let mut config = ComputeShader::builder()
.with_entry_point("Splat")
.with_custom_uniforms::<LorenzParams>()
.with_atomic_buffer()
.with_workgroup_size([16, 16, 1])
.with_texture_format(COMPUTE_TEXTURE_FORMAT_RGBA16)
.with_label("Lorenz Unified")
.build();
config.entry_points.push("main_image".to_string());
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/lorenz.wgsl"), config);
// Enable hot reload
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/lorenz.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Lorenz Hot Reload"),
source: wgpu::ShaderSource::Wgsl(include_str!("shaders/lorenz.wgsl").into()),
}),
) {
eprintln!("Failed to enable hot reload for Lorenz shader: {e}");
}
compute_shader.set_custom_params(initial_params, &core.queue);
Self {
base,
compute_shader,
current_params: initial_params,
mouse_look_enabled: false,
}
}
fn update(&mut self, core: &Core) {
// Check for hot reload updates
self.compute_shader.check_hot_reload(&core.device);
// Handle export
self.compute_shader.handle_export_dispatch(
core,
&mut self.base,
|shader, encoder, core| {
let particle_workgroups = (self.current_params.particle_count as u32 / 256).max(1);
shader.dispatch_stage_with_workgroups(encoder, 0, [particle_workgroups, 1, 1]);
shader.dispatch_stage(encoder, core, 1);
},
);
self.base.fps_tracker.update();
}
fn resize(&mut self, core: &Core) {
self.base.update_resolution(&core.queue, core.size);
self.compute_shader
.resize(core, core.size.width, core.size.height);
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut params = self.current_params;
let mut changed = false;
let mut should_start_export = false;
let mut export_request = self.base.export_manager.get_ui_request();
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
controls_request.current_fps = Some(self.base.fps_tracker.fps());
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
style
.text_styles
.get_mut(&egui::TextStyle::Body)
.unwrap()
.size = 11.0;
style
.text_styles
.get_mut(&egui::TextStyle::Button)
.unwrap()
.size = 10.0;
});
egui::Window::new("Volumetric Lorenz")
.collapsible(true)
.resizable(true)
.default_width(350.0)
.show(ctx, |ui| {
egui::CollapsingHeader::new("Attractor Parameters")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.sigma, 0.0..=80.0)
.text("Sigma (σ)"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.rho, 0.0..=100.0)
.text("Rho (ρ)"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.beta, 0.0..=10.0)
.text("Beta (β)"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.step_size, 0.001..=0.02)
.text("Step Size")
.logarithmic(true),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.motion_speed, 0.0..=5.0)
.text("Motion Speed"),
)
.changed();
});
egui::CollapsingHeader::new("Camera")
.default_open(true)
.show(ui, |ui| {
ui.checkbox(&mut self.mouse_look_enabled, "Enable Mouse Look");
ui.separator();
if !self.mouse_look_enabled {
changed |= ui
.add(
egui::Slider::new(&mut params.rotation_x, -1.0..=1.0)
.text("Rotation X"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.rotation_y, -1.0..=1.0)
.text("Rotation Y"),
)
.changed();
} else {
ui.label("Mouse Look Active - Move mouse to control camera");
}
ui.separator();
ui.label("Z Rotation");
changed |= ui
.add(
egui::Slider::new(&mut params.rotation_z, -1.0..=1.0)
.text("Rotation Z"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.scale, 0.001..=0.1)
.text("Zoom")
.logarithmic(true),
)
.changed();
});
egui::CollapsingHeader::new("Rendering")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.brightness, 0.0001..=0.01)
.text("Brightness")
.logarithmic(true),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.exposure, 0.1..=5.0)
.text("Exposure"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.gamma, 0.5..=4.0)
.text("Gamma"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(
&mut params.particle_count,
100.0..=5000.0,
)
.text("Particle Count"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.dof_amount, 0.0..=1.0)
.text("DOF Amount"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.dof_focal_dist, 0.0..=1.0)
.text("DOF Focal Distance"),
)
.changed();
ui.separator();
ui.label("Trail Settings:");
changed |= ui
.add(
egui::Slider::new(&mut params.decay_speed, 1.0..=50.0)
.text("Decay Speed (higher = faster fade)"),
)
.changed();
});
egui::CollapsingHeader::new("Colors")
.default_open(false)
.show(ui, |ui| {
let mut color1 =
[params.color1_r, params.color1_g, params.color1_b];
let mut color2 =
[params.color2_r, params.color2_g, params.color2_b];
ui.horizontal(|ui| {
ui.label("Color 1:");
if ui.color_edit_button_rgb(&mut color1).changed() {
params.color1_r = color1[0];
params.color1_g = color1[1];
params.color1_b = color1[2];
changed = true;
}
});
ui.horizontal(|ui| {
ui.label("Color 2:");
if ui.color_edit_button_rgb(&mut color2).changed() {
params.color2_r = color2[0];
params.color2_g = color2[1];
params.color2_b = color2[2];
changed = true;
}
});
ui.separator();
ui.label("Presets:");
ui.horizontal(|ui| {
if ui.button("Fire").clicked() {
params.color1_r = 1.0;
params.color1_g = 0.3;
params.color1_b = 0.0;
params.color2_r = 1.0;
params.color2_g = 1.0;
params.color2_b = 0.0;
changed = true;
}
if ui.button("Ocean").clicked() {
params.color1_r = 0.0;
params.color1_g = 0.3;
params.color1_b = 1.0;
params.color2_r = 0.0;
params.color2_g = 0.8;
params.color2_b = 1.0;
changed = true;
}
if ui.button("Purple").clicked() {
params.color1_r = 0.5;
params.color1_g = 0.0;
params.color1_b = 1.0;
params.color2_r = 1.0;
params.color2_g = 0.0;
params.color2_b = 0.5;
changed = true;
}
});
});
ui.separator();
ui.separator();
ui.label("Controls:");
ui.horizontal(|ui| {
ui.label("• Mouse:");
if self.mouse_look_enabled {
ui.colored_label(egui::Color32::GREEN, "Active");
} else {
ui.colored_label(egui::Color32::RED, "Disabled");
}
});
ui.label("• Right click: Toggle mouse control");
ui.label("• H: Toggle UI");
ShaderControls::render_controls_widget(ui, &mut controls_request);
ui.separator();
should_start_export =
ExportManager::render_export_ui_widget(ui, &mut export_request);
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
self.base.export_manager.apply_ui_request(export_request);
if controls_request.should_clear_buffers {
self.clear_buffers(core);
}
self.base.apply_control_request(controls_request);
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta, &core.queue);
// Mouse data is read from tracker and passed through custom uniform parameters
if self.mouse_look_enabled {
params.rotation_x = self.base.mouse_tracker.uniform.position[0];
params.rotation_y = self.base.mouse_tracker.uniform.position[1];
}
params.click_state = if self.base.mouse_tracker.uniform.buttons[0] & 1 > 0 {
1
} else {
0
};
changed = true;
if changed {
self.current_params = params;
self.compute_shader.set_custom_params(params, &core.queue);
}
if should_start_export {
self.base.export_manager.start_export();
}
// Stage 0: Generate and splat particles (workgroup size [256, 1, 1])
let particle_workgroups = (self.current_params.particle_count as u32 / 256).max(1);
self.compute_shader.dispatch_stage_with_workgroups(
&mut encoder,
0,
[particle_workgroups, 1, 1],
);
// Stage 1: Render to screen (workgroup size [16, 16, 1])
self.compute_shader.dispatch_stage(&mut encoder, core, 1);
{
let mut render_pass = cuneus::Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("Display Pass"),
);
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &self.compute_shader.output_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(Some(encoder.finish()));
output.present();
Ok(())
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
if self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed
{
return true;
}
if let WindowEvent::MouseInput { state, button, .. } = event {
if *button == winit::event::MouseButton::Right
&& *state == winit::event::ElementState::Released
{
self.mouse_look_enabled = !self.mouse_look_enabled;
return true;
}
}
if self.mouse_look_enabled && self.base.handle_mouse_input(core, event, false) {
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
return self
.base
.key_handler
.handle_keyboard_input(core.window(), event);
}
false
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
let (app, event_loop) = cuneus::ShaderApp::new("Volumetric Lorenz", 800, 600);
app.run(event_loop, LorenzShader::init)
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/kuwahara.rs | examples/kuwahara.rs | use cuneus::compute::*;
use cuneus::prelude::*;
use winit::event::WindowEvent;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct KuwaharaParams {
radius: f32,
q: f32,
alpha: f32,
filter_strength: f32,
sigma_d: f32,
sigma_r: f32,
edge_threshold: f32,
color_enhance: f32,
blur_samples: f32,
blur_lod: f32,
blur_slod: f32,
filter_mode: i32,
show_tensors: i32,
_padding: [u32; 3],
}
impl UniformProvider for KuwaharaParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
struct KuwaharaShader {
base: RenderKit,
compute_shader: ComputeShader,
current_params: KuwaharaParams,
}
impl ShaderManager for KuwaharaShader {
fn init(core: &Core) -> Self {
let initial_params = KuwaharaParams {
radius: 5.0,
q: 1.5,
alpha: 4.0,
filter_strength: 0.8,
sigma_d: 0.8,
sigma_r: 1.2,
edge_threshold: 0.2,
color_enhance: 1.0,
blur_samples: 15.0,
blur_lod: 2.0,
blur_slod: 4.0,
filter_mode: 1,
show_tensors: 0,
_padding: [0; 3],
};
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let base = RenderKit::new(core, &texture_bind_group_layout, None);
let passes = vec![
PassDescription::new("structure_tensor", &[]),
PassDescription::new("tensor_field", &["structure_tensor"]),
PassDescription::new("kuwahara_filter", &["tensor_field"]),
PassDescription::new("main_image", &["kuwahara_filter"]),
];
let config = ComputeShader::builder()
.with_entry_point("structure_tensor")
.with_multi_pass(&passes)
.with_custom_uniforms::<KuwaharaParams>()
.with_workgroup_size([16, 16, 1])
.with_texture_format(COMPUTE_TEXTURE_FORMAT_RGBA16)
.with_channels(2)
.with_label("Kuwahara Multi-Pass")
.build();
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/kuwahara.wgsl"), config);
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/kuwahara.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Kuwahara Hot Reload"),
source: wgpu::ShaderSource::Wgsl(include_str!("shaders/kuwahara.wgsl").into()),
}),
) {
eprintln!("Failed to enable hot reload for Kuwahara shader: {e}");
}
compute_shader.set_custom_params(initial_params, &core.queue);
Self {
base,
compute_shader,
current_params: initial_params,
}
}
fn update(&mut self, core: &Core) {
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta, &core.queue);
self.base.update_current_texture(core, &core.queue);
if let Some(texture_manager) = self.base.get_current_texture_manager() {
self.compute_shader.update_channel_texture(
0,
&texture_manager.view,
&texture_manager.sampler,
&core.device,
&core.queue,
);
}
self.base.fps_tracker.update();
self.compute_shader.check_hot_reload(&core.device);
self.compute_shader.handle_export(core, &mut self.base);
}
fn resize(&mut self, core: &Core) {
self.base.update_resolution(&core.queue, core.size);
self.compute_shader
.resize(core, core.size.width, core.size.height);
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Kuwahara Render Encoder"),
});
let mut params = self.current_params;
let mut changed = false;
let mut should_start_export = false;
let mut export_request = self.base.export_manager.get_ui_request();
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
let using_video_texture = self.base.using_video_texture;
let using_hdri_texture = self.base.using_hdri_texture;
let using_webcam_texture = self.base.using_webcam_texture;
let video_info = self.base.get_video_info();
let hdri_info = self.base.get_hdri_info();
let webcam_info = self.base.get_webcam_info();
let current_fps = self.base.fps_tracker.fps();
controls_request.current_fps = Some(current_fps);
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
style
.text_styles
.get_mut(&egui::TextStyle::Body)
.unwrap()
.size = 11.0;
style
.text_styles
.get_mut(&egui::TextStyle::Button)
.unwrap()
.size = 10.0;
});
egui::Window::new("Filter")
.collapsible(true)
.resizable(true)
.default_width(320.0)
.show(ctx, |ui| {
ShaderControls::render_media_panel(
ui,
&mut controls_request,
using_video_texture,
video_info,
using_hdri_texture,
hdri_info,
using_webcam_texture,
webcam_info,
);
ui.separator();
let mut anisotropy_enabled = params.filter_mode == 1;
if ui
.checkbox(&mut anisotropy_enabled, "Anisotropy?")
.changed()
{
params.filter_mode = if anisotropy_enabled { 1 } else { 0 };
changed = true;
}
egui::CollapsingHeader::new("Filter Parameters")
.default_open(true)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.radius, 2.0..=16.0)
.text("Radius"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.filter_strength, 0.0..=16.0)
.text("Filter Strength"),
)
.changed();
if params.filter_mode == 1 {
ui.separator();
ui.label("Anisotropic Controls:");
changed |= ui
.add(
egui::Slider::new(&mut params.alpha, 0.1..=16.0)
.text("Anisotropy"),
)
.changed();
}
});
egui::CollapsingHeader::new("Blur Settings")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.blur_samples, 5.0..=25.0)
.text("Samples"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.blur_lod, 0.0..=5.0)
.text("LOD"),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.blur_slod, 2.0..=5.0)
.text("Step"),
)
.changed();
});
egui::CollapsingHeader::new("Post-Processing")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.color_enhance, 0.5..=2.0)
.text("Color Filter"),
)
.changed();
ui.separator();
if ui.button("Reset to Defaults").clicked() {
params = KuwaharaParams {
radius: 8.0,
q: 8.0,
alpha: 1.0,
filter_strength: 1.0,
sigma_d: 1.0,
sigma_r: 2.0,
edge_threshold: 0.2,
color_enhance: 1.0,
blur_samples: 35.0,
blur_lod: 2.0,
blur_slod: 4.0,
filter_mode: params.filter_mode,
show_tensors: 0,
_padding: [0; 3],
};
changed = true;
}
});
ui.separator();
ShaderControls::render_controls_widget(ui, &mut controls_request);
ui.separator();
should_start_export =
ExportManager::render_export_ui_widget(ui, &mut export_request);
ui.separator();
ui.label(format!(
"Resolution: {}x{}",
core.size.width, core.size.height
));
ui.label(format!("FPS: {current_fps:.1}"));
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
self.base.apply_control_request(controls_request.clone());
self.base.handle_video_requests(core, &controls_request);
self.base.handle_webcam_requests(core, &controls_request);
self.base.handle_hdri_requests(core, &controls_request);
self.base.export_manager.apply_ui_request(export_request);
if should_start_export {
self.base.export_manager.start_export();
}
if changed {
self.current_params = params;
self.compute_shader.set_custom_params(params, &core.queue);
}
self.compute_shader.dispatch(&mut encoder, core);
{
let mut render_pass = cuneus::Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("Kuwahara Display Pass"),
);
let compute_texture = self.compute_shader.get_output_texture();
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &compute_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(Some(encoder.finish()));
output.present();
Ok(())
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
if self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed
{
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
return self
.base
.key_handler
.handle_keyboard_input(core.window(), event);
}
if let WindowEvent::DroppedFile(path) = event {
if let Err(e) = self.base.load_media(core, path) {
eprintln!("Failed to load dropped file: {e:?}");
}
return true;
}
false
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
cuneus::gst::init()?;
env_logger::init();
let (app, event_loop) = ShaderApp::new("Kuwahara Filter", 800, 600);
app.run(event_loop, KuwaharaShader::init)
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
altunenes/cuneus | https://github.com/altunenes/cuneus/blob/4ab4a90169f70265c17a065d0db53bc168aeecc2/examples/gaussian.rs | examples/gaussian.rs | use cuneus::compute::{ComputeShader, ComputeShaderBuilder, PassDescription, StorageBufferSpec, COMPUTE_TEXTURE_FORMAT_RGBA16};
use cuneus::{Core, RenderKit, ShaderApp, ShaderControls, ShaderManager};
use cuneus::{ExportManager, UniformProvider};
use winit::event::*;
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct GaussianParams {
num_gaussians: u32,
learning_rate: f32,
color_learning_rate: f32,
reset_training: u32,
show_target: u32,
show_error: u32,
temperature: f32,
error_scale: f32,
min_sigma: f32,
max_sigma: f32,
position_noise: f32,
random_seed: u32,
iteration: u32,
sigma_learning_rate: f32,
_padding0: u32,
_padding1: u32,
}
impl Default for GaussianParams {
fn default() -> Self {
Self {
num_gaussians: 20000,
learning_rate: 0.01,
color_learning_rate: 0.008,
reset_training: 0,
show_target: 0,
show_error: 0,
temperature: 1.0,
error_scale: 2.0,
min_sigma: 0.001,
max_sigma: 0.05,
position_noise: 0.5,
random_seed: 42,
iteration: 0,
sigma_learning_rate: 0.001,
_padding0: 0,
_padding1: 0,
}
}
}
impl UniformProvider for GaussianParams {
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
}
struct GaussianShader {
base: RenderKit,
compute_shader: ComputeShader,
current_params: GaussianParams,
}
impl ShaderManager for GaussianShader {
fn init(core: &Core) -> Self {
let texture_bind_group_layout = RenderKit::create_standard_texture_layout(&core.device);
let base = RenderKit::new(core, &texture_bind_group_layout, None);
// 1. init_gaussians: Initialize/reset Gaussian parameters
// 2. clear_gradients: Clear gradient buffer before each iteration
// 3. render_display: Render Gaussians + compute gradients via backprop
// 4. update_gaussians: Adam to update parameters
let passes = vec![
PassDescription::new("init_gaussians", &[]),
PassDescription::new("clear_gradients", &[]),
PassDescription::new("render_display", &[]),
PassDescription::new("update_gaussians", &[]),
];
// Storage buffers for training
// Each Gaussian: position(2f32) + sigma(3f32) + color(3f32) + opacity(1f32) = 9 f32 (gradient data)
// GaussianData struct: 10 f32 (includes padding)
let max_gaussians = 20000u32;
let gaussian_buffer_size = (max_gaussians * 40) as u64;
let gradient_buffer_size = (max_gaussians * 36) as u64;
let adam_buffer_size = (max_gaussians * 36) as u64;
let config = ComputeShaderBuilder::new()
.with_label("Gaussian Splatting Training")
.with_multi_pass(&passes)
.with_channels(1)
.with_custom_uniforms::<GaussianParams>()
.with_storage_buffer(StorageBufferSpec::new("gaussian_params", gaussian_buffer_size))
.with_storage_buffer(StorageBufferSpec::new("gradient_buffer", gradient_buffer_size))
.with_storage_buffer(StorageBufferSpec::new("adam_first_moment", adam_buffer_size))
.with_storage_buffer(StorageBufferSpec::new("adam_second_moment", adam_buffer_size))
.with_texture_format(COMPUTE_TEXTURE_FORMAT_RGBA16)
.build();
let mut compute_shader =
ComputeShader::from_builder(core, include_str!("shaders/gaussian.wgsl"), config);
// Enable hot reload
if let Err(e) = compute_shader.enable_hot_reload(
core.device.clone(),
std::path::PathBuf::from("examples/shaders/gaussian.wgsl"),
core.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Gaussian Hot Reload"),
source: wgpu::ShaderSource::Wgsl(include_str!("shaders/gaussian.wgsl").into()),
}),
) {
eprintln!("Failed to enable hot reload for gaussian shader: {e}");
}
let initial_params = GaussianParams::default();
let shader = Self {
base,
compute_shader,
current_params: initial_params,
};
shader
.compute_shader
.set_custom_params(initial_params, &core.queue);
shader
}
fn update(&mut self, core: &Core) {
let current_time = self.base.controls.get_time(&self.base.start_time);
let delta = 1.0 / 60.0;
self.compute_shader
.set_time(current_time, delta, &core.queue);
// Update target texture from media
self.base.update_current_texture(core, &core.queue);
if let Some(texture_manager) = self.base.get_current_texture_manager() {
self.compute_shader.update_channel_texture(
0,
&texture_manager.view,
&texture_manager.sampler,
&core.device,
&core.queue,
);
}
// Auto-increment iteration counter
if self.current_params.reset_training == 0 {
self.current_params.iteration = self.current_params.iteration.wrapping_add(1);
self.compute_shader.set_custom_params(self.current_params, &core.queue);
}
self.base.fps_tracker.update();
self.compute_shader.check_hot_reload(&core.device);
self.compute_shader.handle_export(core, &mut self.base);
}
fn resize(&mut self, core: &Core) {
self.base.update_resolution(&core.queue, core.size);
self.compute_shader
.resize(core, core.size.width, core.size.height);
}
fn render(&mut self, core: &Core) -> Result<(), wgpu::SurfaceError> {
let output = core.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut controls_request = self
.base
.controls
.get_ui_request(&self.base.start_time, &core.size);
controls_request.current_fps = Some(self.base.fps_tracker.fps());
let mut params = self.current_params;
let mut changed = false;
let mut should_start_export = false;
let mut export_request = self.base.export_manager.get_ui_request();
let using_video_texture = self.base.using_video_texture;
let using_hdri_texture = self.base.using_hdri_texture;
let using_webcam_texture = self.base.using_webcam_texture;
let video_info = self.base.get_video_info();
let hdri_info = self.base.get_hdri_info();
let webcam_info = self.base.get_webcam_info();
let full_output = if self.base.key_handler.show_ui {
self.base.render_ui(core, |ctx| {
ctx.style_mut(|style| {
style.visuals.window_fill =
egui::Color32::from_rgba_premultiplied(0, 0, 0, 180);
style
.text_styles
.get_mut(&egui::TextStyle::Body)
.unwrap()
.size = 11.0;
style
.text_styles
.get_mut(&egui::TextStyle::Button)
.unwrap()
.size = 10.0;
});
egui::Window::new("gaussian splatting")
.collapsible(true)
.resizable(true)
.default_width(280.0)
.show(ctx, |ui| {
ui.label(format!("Iteration: {}", params.iteration));
egui::CollapsingHeader::new("Training")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.num_gaussians, 100..=20000)
.text("N Gauss")
.logarithmic(true),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.learning_rate, 0.0001..=0.1)
.text("pos LR")
.logarithmic(true),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.color_learning_rate, 0.001..=0.2)
.text("col LR")
.logarithmic(true),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.temperature, 0.1..=5.0)
.text("temp"),
)
.changed();
ui.separator();
ui.horizontal(|ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.random_seed, 1..=10000)
.text("seed"),
)
.changed();
if ui.button("🎲").on_hover_text("Randomize seed").clicked() {
params.random_seed = (std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_millis() % 10000) as u32;
params.reset_training = 1;
changed = true;
}
});
if ui.button("res training").clicked() {
params.reset_training = 1;
params.iteration = 0;
changed = true;
}
});
egui::CollapsingHeader::new("vis")
.default_open(false)
.show(ui, |ui| {
let mut show_target = params.show_target != 0;
if ui.checkbox(&mut show_target, "Show Target").changed() {
params.show_target = if show_target { 1 } else { 0 };
changed = true;
}
let mut show_error = params.show_error != 0;
if ui.checkbox(&mut show_error, "Show Error").changed() {
params.show_error = if show_error { 1 } else { 0 };
changed = true;
}
if params.show_error != 0 {
changed |= ui
.add(
egui::Slider::new(&mut params.error_scale, 0.5..=10.0)
.text("Error Scale"),
)
.changed();
}
});
egui::CollapsingHeader::new("Advanced")
.default_open(false)
.show(ui, |ui| {
changed |= ui
.add(
egui::Slider::new(&mut params.sigma_learning_rate, 0.001..=0.1)
.text("Sigma LR")
.logarithmic(true),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.min_sigma, 0.001..=0.05)
.text("Min Sigma")
.logarithmic(true),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.max_sigma, 0.02..=0.3)
.text("Max Sigma")
.logarithmic(true),
)
.changed();
changed |= ui
.add(
egui::Slider::new(&mut params.position_noise, 0.0..=1.0)
.text("Position"),
)
.changed();
});
ui.separator();
ui.separator();
ShaderControls::render_media_panel(
ui,
&mut controls_request,
using_video_texture,
video_info,
using_hdri_texture,
hdri_info,
using_webcam_texture,
webcam_info,
);
ui.separator();
ShaderControls::render_controls_widget(ui, &mut controls_request);
ui.separator();
should_start_export =
ExportManager::render_export_ui_widget(ui, &mut export_request);
});
})
} else {
self.base.render_ui(core, |_ctx| {})
};
self.base.export_manager.apply_ui_request(export_request);
self.base.apply_control_request(controls_request.clone());
self.base.handle_video_requests(core, &controls_request);
self.base.handle_webcam_requests(core, &controls_request);
self.base.handle_hdri_requests(core, &controls_request);
if controls_request.should_clear_buffers || params.reset_training != 0 {
self.compute_shader.current_frame = 0;
self.compute_shader.time_uniform.data.frame = 0;
self.compute_shader.time_uniform.update(&core.queue);
params.iteration = 0;
params.reset_training = 0;
changed = true;
}
if changed {
self.current_params = params;
self.compute_shader.set_custom_params(params, &core.queue);
}
if should_start_export {
self.base.export_manager.start_export();
}
let mut encoder = core
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Gaussian Render Encoder"),
});
self.compute_shader.dispatch(&mut encoder, core);
{
let mut render_pass = cuneus::Renderer::begin_render_pass(
&mut encoder,
&view,
wgpu::LoadOp::Clear(wgpu::Color::BLACK),
Some("Gaussian Render Pass"),
);
let compute_texture = self.compute_shader.get_output_texture();
render_pass.set_pipeline(&self.base.renderer.render_pipeline);
render_pass.set_vertex_buffer(0, self.base.renderer.vertex_buffer.slice(..));
render_pass.set_bind_group(0, &compute_texture.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
self.base
.handle_render_output(core, &view, full_output, &mut encoder);
core.queue.submit(Some(encoder.finish()));
output.present();
Ok(())
}
fn handle_input(&mut self, core: &Core, event: &WindowEvent) -> bool {
if self
.base
.egui_state
.on_window_event(core.window(), event)
.consumed
{
return true;
}
if let WindowEvent::KeyboardInput { event, .. } = event {
return self
.base
.key_handler
.handle_keyboard_input(core.window(), event);
}
if let WindowEvent::DroppedFile(path) = event {
if let Err(e) = self.base.load_media(core, path) {
eprintln!("Failed to load dropped file: {e:?}");
}
self.current_params.reset_training = 1;
self.current_params.iteration = 0;
self.compute_shader.set_custom_params(self.current_params, &core.queue);
return true;
}
false
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
cuneus::gst::init()?;
env_logger::init();
let (app, event_loop) = ShaderApp::new("2D Gaussian Splatting", 450, 350);
app.run(event_loop, GaussianShader::init)
}
| rust | MIT | 4ab4a90169f70265c17a065d0db53bc168aeecc2 | 2026-01-04T20:21:00.414612Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.