file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
crates/package-manager/src/storage/models/mod.rs | Rust | mod image_entry;
mod known_package;
mod migration;
mod wit_interface;
pub use image_entry::{ImageEntry, InsertResult};
pub use known_package::KnownPackage;
pub(crate) use known_package::TagType;
pub(crate) use migration::Migrations;
pub use wit_interface::WitInterface;
| yoshuawuyts/wasm | 0 | Unified developer tools for WebAssembly | Rust | yoshuawuyts | Yosh | |
crates/package-manager/src/storage/models/wit_interface.rs | Rust | use rusqlite::Connection;
/// A WIT interface extracted from a WebAssembly component.
#[derive(Debug, Clone)]
pub struct WitInterface {
id: i64,
/// The package name (e.g., "wasi:http@0.2.0")
pub package_name: Option<String>,
/// The full WIT text representation
pub wit_text: String,
/// The world name if available
pub world_name: Option<String>,
/// Number of imports
pub import_count: i32,
/// Number of exports
pub export_count: i32,
/// When this was created
pub created_at: String,
}
impl WitInterface {
/// Returns the ID of this WIT interface.
#[must_use]
pub fn id(&self) -> i64 {
self.id
}
/// Create a new WitInterface for testing purposes
#[must_use]
pub fn new_for_testing(
id: i64,
package_name: Option<String>,
wit_text: String,
world_name: Option<String>,
import_count: i32,
export_count: i32,
created_at: String,
) -> Self {
Self {
id,
package_name,
wit_text,
world_name,
import_count,
export_count,
created_at,
}
}
/// Insert a new WIT interface and return its ID.
/// Uses content-addressable storage - if the same WIT text already exists, returns existing ID.
pub(crate) fn insert(
conn: &Connection,
wit_text: &str,
package_name: Option<&str>,
world_name: Option<&str>,
import_count: i32,
export_count: i32,
) -> anyhow::Result<i64> {
// Check if this exact WIT text already exists
let existing: Option<i64> = conn
.query_row(
"SELECT id FROM wit_interface WHERE wit_text = ?1",
[wit_text],
|row| row.get(0),
)
.ok();
if let Some(id) = existing {
return Ok(id);
}
// Insert new WIT interface
conn.execute(
"INSERT INTO wit_interface (wit_text, package_name, world_name, import_count, export_count) VALUES (?1, ?2, ?3, ?4, ?5)",
(wit_text, package_name, world_name, import_count, export_count),
)?;
Ok(conn.last_insert_rowid())
}
/// Link an image to a WIT interface.
pub(crate) fn link_to_image(
conn: &Connection,
image_id: i64,
wit_interface_id: i64,
) -> anyhow::Result<()> {
conn.execute(
"INSERT OR IGNORE INTO image_wit_interface (image_id, wit_interface_id) VALUES (?1, ?2)",
(image_id, wit_interface_id),
)?;
Ok(())
}
/// Get WIT interface for an image by image ID.
#[allow(dead_code)]
pub(crate) fn get_for_image(conn: &Connection, image_id: i64) -> anyhow::Result<Option<Self>> {
let result = conn.query_row(
"SELECT w.id, w.package_name, w.wit_text, w.world_name, w.import_count, w.export_count, w.created_at
FROM wit_interface w
JOIN image_wit_interface iwi ON w.id = iwi.wit_interface_id
WHERE iwi.image_id = ?1",
[image_id],
|row| {
Ok(WitInterface {
id: row.get(0)?,
package_name: row.get(1)?,
wit_text: row.get(2)?,
world_name: row.get(3)?,
import_count: row.get(4)?,
export_count: row.get(5)?,
created_at: row.get(6)?,
})
},
);
match result {
Ok(interface) => Ok(Some(interface)),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e.into()),
}
}
/// Get all WIT interfaces with their associated image references.
pub(crate) fn get_all_with_images(conn: &Connection) -> anyhow::Result<Vec<(Self, String)>> {
let mut stmt = conn.prepare(
"SELECT w.id, w.package_name, w.wit_text, w.world_name, w.import_count, w.export_count, w.created_at,
i.ref_registry || '/' || i.ref_repository || COALESCE(':' || i.ref_tag, '') as reference
FROM wit_interface w
JOIN image_wit_interface iwi ON w.id = iwi.wit_interface_id
JOIN image i ON iwi.image_id = i.id
ORDER BY w.package_name ASC, w.world_name ASC, i.ref_repository ASC",
)?;
let rows = stmt.query_map([], |row| {
Ok((
WitInterface {
id: row.get(0)?,
package_name: row.get(1)?,
wit_text: row.get(2)?,
world_name: row.get(3)?,
import_count: row.get(4)?,
export_count: row.get(5)?,
created_at: row.get(6)?,
},
row.get::<_, String>(7)?,
))
})?;
let mut result = Vec::new();
for row in rows {
result.push(row?);
}
Ok(result)
}
/// Get all unique WIT interfaces.
#[allow(dead_code)]
pub(crate) fn get_all(conn: &Connection) -> anyhow::Result<Vec<Self>> {
let mut stmt = conn.prepare(
"SELECT id, package_name, wit_text, world_name, import_count, export_count, created_at
FROM wit_interface
ORDER BY package_name ASC, world_name ASC",
)?;
let rows = stmt.query_map([], |row| {
Ok(WitInterface {
id: row.get(0)?,
package_name: row.get(1)?,
wit_text: row.get(2)?,
world_name: row.get(3)?,
import_count: row.get(4)?,
export_count: row.get(5)?,
created_at: row.get(6)?,
})
})?;
let mut result = Vec::new();
for row in rows {
result.push(row?);
}
Ok(result)
}
/// Delete a WIT interface by ID (also removes links).
#[allow(dead_code)]
pub(crate) fn delete(conn: &Connection, id: i64) -> anyhow::Result<bool> {
let rows = conn.execute("DELETE FROM wit_interface WHERE id = ?1", [id])?;
Ok(rows > 0)
}
}
| yoshuawuyts/wasm | 0 | Unified developer tools for WebAssembly | Rust | yoshuawuyts | Yosh | |
crates/package-manager/src/storage/store.rs | Rust | use anyhow::Context;
use std::collections::HashSet;
use std::path::Path;
use super::config::StateInfo;
use super::models::{ImageEntry, InsertResult, KnownPackage, Migrations, TagType, WitInterface};
use super::wit_parser::extract_wit_metadata;
use futures_concurrency::prelude::*;
use oci_client::{Reference, client::ImageData};
use rusqlite::Connection;
/// Calculate the total size of a directory recursively
async fn dir_size(path: &Path) -> u64 {
let mut total = 0u64;
let mut stack = vec![path.to_path_buf()];
while let Some(dir) = stack.pop() {
if let Ok(mut entries) = tokio::fs::read_dir(&dir).await {
while let Ok(Some(entry)) = entries.next_entry().await {
if let Ok(metadata) = entry.metadata().await {
if metadata.is_dir() {
stack.push(entry.path());
} else {
total += metadata.len();
}
}
}
}
}
total
}
#[derive(Debug)]
pub(crate) struct Store {
pub(crate) state_info: StateInfo,
conn: Connection,
}
impl Store {
/// Open the store and run any pending migrations.
pub(crate) async fn open() -> anyhow::Result<Self> {
let data_dir = dirs::data_local_dir()
.context("No local data dir known for the current OS")?
.join("wasm");
let store_dir = data_dir.join("store");
let db_dir = data_dir.join("db");
let metadata_file = db_dir.join("metadata.db3");
// TODO: remove me once we're done testing
// tokio::fs::remove_dir_all(&data_dir).await?;
let a = tokio::fs::create_dir_all(&data_dir);
let b = tokio::fs::create_dir_all(&store_dir);
let c = tokio::fs::create_dir_all(&db_dir);
let _ = (a, b, c)
.try_join()
.await
.context("Could not create config directories on disk")?;
let conn = Connection::open(&metadata_file)?;
Migrations::run_all(&conn)?;
let migration_info = Migrations::get(&conn)?;
let store_size = dir_size(&store_dir).await;
let metadata_size = tokio::fs::metadata(&metadata_file)
.await
.map(|m| m.len())
.unwrap_or(0);
let state_info = StateInfo::new_at(data_dir, migration_info, store_size, metadata_size);
let store = Self { state_info, conn };
// Re-scan known package tags after migrations to ensure derived data is up-to-date
// Suppress errors as they shouldn't prevent the store from opening
if let Err(e) = store.rescan_known_package_tags() {
eprintln!("Warning: Failed to re-scan known package tags: {}", e);
}
Ok(store)
}
pub(crate) async fn insert(
&self,
reference: &Reference,
image: ImageData,
) -> anyhow::Result<InsertResult> {
let digest = reference.digest().map(|s| s.to_owned()).or(image.digest);
let manifest_str = serde_json::to_string(&image.manifest)?;
// Calculate total size on disk from all layers
let size_on_disk: u64 = image.layers.iter().map(|l| l.data.len() as u64).sum();
let (result, image_id) = ImageEntry::insert(
&self.conn,
reference.registry(),
reference.repository(),
reference.tag(),
digest.as_deref(),
&manifest_str,
size_on_disk,
)?;
// Only store layers if this is a new entry
if result == InsertResult::Inserted {
// Store layers by their content digest (content-addressable storage)
// The manifest.layers and image.layers should be in the same order
if let Some(ref manifest) = image.manifest {
for (idx, layer) in image.layers.iter().enumerate() {
let cache = self.state_info.store_dir();
// Use the layer's content digest from the manifest as the key
let fallback_key = reference.whole().to_string();
let key = manifest
.layers
.get(idx)
.map(|l| l.digest.as_str())
.unwrap_or(&fallback_key);
let data = &layer.data;
let _integrity = cacache::write(&cache, key, data).await?;
// Try to extract WIT interface from this layer
if let Some(image_id) = image_id {
self.try_extract_wit_interface(image_id, data);
}
}
}
}
Ok(result)
}
/// Attempt to extract WIT interface from wasm component bytes.
/// This is best-effort - if extraction fails, we silently skip.
fn try_extract_wit_interface(&self, image_id: i64, wasm_bytes: &[u8]) {
let Some(metadata) = extract_wit_metadata(wasm_bytes) else {
return; // Not a valid wasm component, skip
};
// Insert the WIT interface
let wit_id = match WitInterface::insert(
&self.conn,
&metadata.wit_text,
metadata.package_name.as_deref(),
Some(&metadata.world_name),
metadata.import_count,
metadata.export_count,
) {
Ok(id) => id,
Err(_) => return, // Failed to insert, skip
};
// Link to image
let _ = WitInterface::link_to_image(&self.conn, image_id, wit_id);
}
/// Returns all currently stored images and their metadata.
pub(crate) fn list_all(&self) -> anyhow::Result<Vec<ImageEntry>> {
ImageEntry::get_all(&self.conn)
}
/// Deletes an image by its reference.
/// Only removes cached layers if no other images reference them.
pub(crate) async fn delete(&self, reference: &Reference) -> anyhow::Result<bool> {
// Get all images to find which layers are still needed
let all_entries = ImageEntry::get_all(&self.conn)?;
// Find the entry we're deleting to get its layer digests
let entry_to_delete = all_entries.iter().find(|e| {
e.ref_registry == reference.registry()
&& e.ref_repository == reference.repository()
&& e.ref_tag.as_deref() == reference.tag()
&& e.ref_digest.as_deref() == reference.digest()
});
if let Some(entry) = entry_to_delete {
// Collect all layer digests from the entry we're deleting
let layers_to_delete: HashSet<&str> = entry
.manifest
.layers
.iter()
.map(|l| l.digest.as_str())
.collect();
// Collect all layer digests from OTHER entries (excluding the one we're deleting)
let layers_still_needed: HashSet<&str> = all_entries
.iter()
.filter(|e| {
!(e.ref_registry == reference.registry()
&& e.ref_repository == reference.repository()
&& e.ref_tag.as_deref() == reference.tag()
&& e.ref_digest.as_deref() == reference.digest())
})
.flat_map(|e| e.manifest.layers.iter().map(|l| l.digest.as_str()))
.collect();
// Only delete layers that are not needed by other entries
for layer_digest in layers_to_delete {
if !layers_still_needed.contains(layer_digest) {
let _ = cacache::remove(self.state_info.store_dir(), layer_digest).await;
}
}
}
// Delete from database
ImageEntry::delete_by_reference(
&self.conn,
reference.registry(),
reference.repository(),
reference.tag(),
reference.digest(),
)
}
/// Search for known packages by query string.
pub(crate) fn search_known_packages(&self, query: &str) -> anyhow::Result<Vec<KnownPackage>> {
KnownPackage::search(&self.conn, query)
}
/// Get all known packages.
pub(crate) fn list_known_packages(&self) -> anyhow::Result<Vec<KnownPackage>> {
KnownPackage::get_all(&self.conn)
}
/// Add or update a known package.
pub(crate) fn add_known_package(
&self,
registry: &str,
repository: &str,
tag: Option<&str>,
description: Option<&str>,
) -> anyhow::Result<()> {
KnownPackage::upsert(&self.conn, registry, repository, tag, description)
}
/// Re-scan known package tags to update derived data after migrations.
/// This re-classifies tag types based on tag naming conventions:
/// - Tags ending in ".sig" are classified as "signature"
/// - Tags ending in ".att" are classified as "attestation"
/// - All other tags are classified as "release"
pub(crate) fn rescan_known_package_tags(&self) -> anyhow::Result<usize> {
// Get all unique package IDs and their tags
let mut stmt = self.conn.prepare(
"SELECT DISTINCT kpt.known_package_id, kpt.tag
FROM known_package_tag kpt",
)?;
let tags: Vec<(i64, String)> = stmt
.query_map([], |row| {
Ok((row.get::<_, i64>(0)?, row.get::<_, String>(1)?))
})?
.collect::<Result<Vec<_>, _>>()?;
let mut updated_count = 0;
// Re-process each tag to ensure it has the correct tag_type
for (package_id, tag) in tags {
// Determine the correct tag type using existing logic
let tag_type = TagType::from_tag(&tag).as_str();
// Update the tag type if needed
let rows_affected = self.conn.execute(
"UPDATE known_package_tag
SET tag_type = ?1
WHERE known_package_id = ?2 AND tag = ?3 AND tag_type != ?1",
(tag_type, package_id, &tag),
)?;
if rows_affected > 0 {
updated_count += 1;
}
}
Ok(updated_count)
}
/// Get all WIT interfaces.
#[allow(dead_code)]
pub(crate) fn list_wit_interfaces(&self) -> anyhow::Result<Vec<WitInterface>> {
WitInterface::get_all(&self.conn)
}
/// Get all WIT interfaces with their associated component references.
pub(crate) fn list_wit_interfaces_with_components(
&self,
) -> anyhow::Result<Vec<(WitInterface, String)>> {
WitInterface::get_all_with_images(&self.conn)
}
}
| yoshuawuyts/wasm | 0 | Unified developer tools for WebAssembly | Rust | yoshuawuyts | Yosh | |
crates/package-manager/src/storage/wit_parser.rs | Rust | use wit_parser::decoding::{DecodedWasm, decode};
/// Metadata extracted from a WIT component.
pub(crate) struct WitMetadata {
pub package_name: Option<String>,
pub world_name: String,
pub import_count: i32,
pub export_count: i32,
pub wit_text: String,
}
/// Attempt to extract WIT metadata from wasm component bytes.
/// Returns `None` if the bytes are not a valid wasm component.
pub(crate) fn extract_wit_metadata(wasm_bytes: &[u8]) -> Option<WitMetadata> {
// Try to decode the wasm bytes as a component
let decoded = decode(wasm_bytes).ok()?;
// Extract metadata based on decoded type
let (package_name, world_name, import_count, export_count) = match &decoded {
DecodedWasm::WitPackage(resolve, package_id) => {
let package = resolve
.packages
.get(*package_id)
.expect("Package ID should be valid");
let pkg_name = format!("{}", package.name);
// Use the first world name if available
let world = package.worlds.iter().next().map(|(name, world_id)| {
let w = resolve
.worlds
.get(*world_id)
.expect("World ID should be valid");
(name.clone(), w.imports.len() as i32, w.exports.len() as i32)
});
let (world_name, imports, exports) = world.unwrap_or((package.name.name.clone(), 0, 0));
(Some(pkg_name), world_name, imports, exports)
}
DecodedWasm::Component(resolve, world_id) => {
let world = resolve
.worlds
.get(*world_id)
.expect("World ID should be valid");
// Try to get package name from world's package reference
let pkg_name = world
.package
.and_then(|pid| resolve.packages.get(pid))
.map(|p| format!("{}", p.name));
(
pkg_name,
world.name.clone(),
world.imports.len() as i32,
world.exports.len() as i32,
)
}
};
// Generate a WIT text representation from the decoded structure
let wit_text = generate_wit_text(&decoded);
Some(WitMetadata {
package_name,
world_name,
import_count,
export_count,
wit_text,
})
}
/// Generate WIT text representation from decoded component.
fn generate_wit_text(decoded: &DecodedWasm) -> String {
let resolve = decoded.resolve();
let mut output = String::new();
match decoded {
DecodedWasm::WitPackage(_, package_id) => {
let package = resolve
.packages
.get(*package_id)
.expect("Package ID should be valid");
output.push_str(&format!("package {};\n\n", package.name));
// Print interfaces
for (name, interface_id) in &package.interfaces {
output.push_str(&format!("interface {} {{\n", name));
let interface = resolve
.interfaces
.get(*interface_id)
.expect("Interface ID should be valid");
// Print types
for (type_name, type_id) in &interface.types {
let type_def = resolve
.types
.get(*type_id)
.expect("Type ID should be valid");
output.push_str(&format!(
" type {}: {:?};\n",
type_name,
type_def.kind.as_str()
));
}
// Print functions
for (func_name, func) in &interface.functions {
let params: Vec<String> =
func.params.iter().map(|(name, _ty)| name.clone()).collect();
let has_result = func.result.is_some();
output.push_str(&format!(
" func {}({}){};\n",
func_name,
params.join(", "),
if has_result { " -> ..." } else { "" }
));
}
output.push_str("}\n\n");
}
// Print worlds
for (name, world_id) in &package.worlds {
let world = resolve
.worlds
.get(*world_id)
.expect("World ID should be valid");
output.push_str(&format!("world {} {{\n", name));
for (key, _item) in &world.imports {
output.push_str(&format!(" import {};\n", world_key_to_string(key)));
}
for (key, _item) in &world.exports {
output.push_str(&format!(" export {};\n", world_key_to_string(key)));
}
output.push_str("}\n\n");
}
}
DecodedWasm::Component(_, world_id) => {
let world = resolve
.worlds
.get(*world_id)
.expect("World ID should be valid");
output.push_str("// Inferred component interface\n");
output.push_str(&format!("world {} {{\n", world.name));
for (key, _item) in &world.imports {
output.push_str(&format!(" import {};\n", world_key_to_string(key)));
}
for (key, _item) in &world.exports {
output.push_str(&format!(" export {};\n", world_key_to_string(key)));
}
output.push_str("}\n");
}
}
output
}
/// Convert a WorldKey to a string representation.
fn world_key_to_string(key: &wit_parser::WorldKey) -> String {
match key {
wit_parser::WorldKey::Name(name) => name.clone(),
wit_parser::WorldKey::Interface(id) => format!("interface-{:?}", id),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn extract_returns_none_for_invalid_bytes() {
let invalid_bytes = b"not a wasm component";
assert!(extract_wit_metadata(invalid_bytes).is_none());
}
#[test]
fn extract_returns_none_for_empty_bytes() {
let empty_bytes: &[u8] = &[];
assert!(extract_wit_metadata(empty_bytes).is_none());
}
#[test]
fn extract_handles_core_wasm_module() {
// A minimal valid core WebAssembly module (not a component)
// Magic number + version + empty sections
let core_module = [
0x00, 0x61, 0x73, 0x6d, // \0asm magic
0x01, 0x00, 0x00, 0x00, // version 1
];
// Core modules may or may not be decoded - just ensure we don't panic
let _ = extract_wit_metadata(&core_module);
}
#[test]
fn extract_returns_none_for_random_bytes() {
let random_bytes = [0xDE, 0xAD, 0xBE, 0xEF, 0x00, 0x11, 0x22, 0x33];
assert!(extract_wit_metadata(&random_bytes).is_none());
}
#[test]
fn world_key_name_converts_correctly() {
let key = wit_parser::WorldKey::Name("my-import".to_string());
assert_eq!(world_key_to_string(&key), "my-import");
}
#[test]
fn world_key_interface_converts_to_debug_format() {
use wit_parser::{Interface, Resolve};
let mut resolve = Resolve::default();
let interface = Interface {
name: Some("test".to_string()),
docs: Default::default(),
types: Default::default(),
functions: Default::default(),
package: None,
stability: Default::default(),
};
let id = resolve.interfaces.alloc(interface);
let key = wit_parser::WorldKey::Interface(id);
let result = world_key_to_string(&key);
assert!(result.starts_with("interface-"), "got: {}", result);
}
#[test]
fn generate_wit_text_for_wit_package() {
use wit_parser::{Interface, Package, PackageName, Resolve, World};
let mut resolve = Resolve::default();
// Create interface
let interface = Interface {
name: Some("greeter".to_string()),
docs: Default::default(),
types: Default::default(),
functions: Default::default(),
package: None,
stability: Default::default(),
};
let interface_id = resolve.interfaces.alloc(interface);
// Create world
let world = World {
name: "hello".to_string(),
docs: Default::default(),
imports: Default::default(),
exports: Default::default(),
includes: Default::default(),
include_names: Default::default(),
package: None,
stability: Default::default(),
};
let world_id = resolve.worlds.alloc(world);
// Create package
let package = Package {
name: PackageName {
namespace: "test".to_string(),
name: "example".to_string(),
version: None,
},
docs: Default::default(),
interfaces: [("greeter".to_string(), interface_id)]
.into_iter()
.collect(),
worlds: [("hello".to_string(), world_id)].into_iter().collect(),
};
let package_id = resolve.packages.alloc(package);
// Update back-references
resolve.interfaces[interface_id].package = Some(package_id);
resolve.worlds[world_id].package = Some(package_id);
// Create decoded structure directly (without encoding to binary)
let decoded = DecodedWasm::WitPackage(resolve, package_id);
let wit_text = generate_wit_text(&decoded);
assert!(
wit_text.contains("package test:example"),
"should contain package name, got: {}",
wit_text
);
assert!(
wit_text.contains("interface greeter"),
"should contain interface name, got: {}",
wit_text
);
assert!(
wit_text.contains("world hello"),
"should contain world name, got: {}",
wit_text
);
}
#[test]
fn generate_wit_text_for_component() {
use wit_parser::{Resolve, World};
let mut resolve = Resolve::default();
// Create a world for a component
let world = World {
name: "my-component".to_string(),
docs: Default::default(),
imports: Default::default(),
exports: Default::default(),
includes: Default::default(),
include_names: Default::default(),
package: None,
stability: Default::default(),
};
let world_id = resolve.worlds.alloc(world);
let decoded = DecodedWasm::Component(resolve, world_id);
let wit_text = generate_wit_text(&decoded);
assert!(
wit_text.contains("// Inferred component interface"),
"should have component comment, got: {}",
wit_text
);
assert!(
wit_text.contains("world my-component"),
"should contain world name, got: {}",
wit_text
);
}
#[test]
fn generate_wit_text_with_imports_and_exports() {
use wit_parser::{Function, FunctionKind, Resolve, World, WorldItem, WorldKey};
let mut resolve = Resolve::default();
let mut world = World {
name: "test-world".to_string(),
docs: Default::default(),
imports: Default::default(),
exports: Default::default(),
includes: Default::default(),
include_names: Default::default(),
package: None,
stability: Default::default(),
};
// Add named imports and exports using functions (which don't need TypeIds)
world.imports.insert(
WorldKey::Name("read-stdin".to_string()),
WorldItem::Function(Function {
name: "read-stdin".to_string(),
kind: FunctionKind::Freestanding,
params: vec![],
result: None,
docs: Default::default(),
stability: Default::default(),
}),
);
world.exports.insert(
WorldKey::Name("run".to_string()),
WorldItem::Function(Function {
name: "run".to_string(),
kind: FunctionKind::Freestanding,
params: vec![],
result: None,
docs: Default::default(),
stability: Default::default(),
}),
);
let world_id = resolve.worlds.alloc(world);
let decoded = DecodedWasm::Component(resolve, world_id);
let wit_text = generate_wit_text(&decoded);
assert!(
wit_text.contains("import read-stdin"),
"should contain import, got: {}",
wit_text
);
assert!(
wit_text.contains("export run"),
"should contain export, got: {}",
wit_text
);
}
}
| yoshuawuyts/wasm | 0 | Unified developer tools for WebAssembly | Rust | yoshuawuyts | Yosh | |
crates/wasm-detector/src/lib.rs | Rust | //! A library to detect local `.wasm` files in a repository.
//!
//! This crate provides functionality to find WebAssembly files while:
//! - Respecting `.gitignore` rules
//! - Including well-known `.wasm` locations that are typically ignored
//! (e.g., `target/wasm32-*`, `pkg/`, `dist/`)
//!
//! # Example
//!
//! ```no_run
//! use wasm_detector::WasmDetector;
//! use std::path::Path;
//!
//! let detector = WasmDetector::new(Path::new("."));
//! for result in detector {
//! match result {
//! Ok(entry) => println!("Found: {}", entry.path().display()),
//! Err(e) => eprintln!("Error: {}", e),
//! }
//! }
//! ```
use ignore::WalkBuilder;
use std::collections::HashSet;
use std::path::{Path, PathBuf};
/// Well-known directories that typically contain `.wasm` files but are often ignored.
///
/// These directories are scanned separately without respecting `.gitignore` rules
/// to ensure important wasm output locations are always included.
pub const WELL_KNOWN_WASM_DIRS: &[&str] = &[
// Rust wasm targets (the target directory is scanned for wasm32-* subdirs)
"target", // wasm-pack output
"pkg", // JavaScript/jco output
"dist",
];
/// Patterns to match within the target directory for wasm-specific subdirectories.
const TARGET_WASM_PREFIXES: &[&str] = &["wasm32-"];
/// A discovered WebAssembly file entry.
#[derive(Debug, Clone)]
pub struct WasmEntry {
path: PathBuf,
}
impl WasmEntry {
/// Create a new WasmEntry from a path.
fn new(path: PathBuf) -> Self {
Self { path }
}
/// Creates a new WasmEntry for testing purposes.
#[cfg(any(test, feature = "test-helpers"))]
#[must_use]
pub fn new_for_testing(path: PathBuf) -> Self {
Self::new(path)
}
/// Returns the path to the `.wasm` file.
#[must_use]
pub fn path(&self) -> &Path {
&self.path
}
/// Returns the file name of the `.wasm` file.
#[must_use]
pub fn file_name(&self) -> Option<&str> {
self.path.file_name().and_then(|s| s.to_str())
}
/// Consumes the entry and returns the underlying path.
#[must_use]
pub fn into_path(self) -> PathBuf {
self.path
}
}
/// A detector that finds `.wasm` files in a directory tree.
///
/// The detector:
/// - Respects `.gitignore` rules by default
/// - Automatically includes well-known `.wasm` locations that are typically ignored
/// - Returns an iterator over discovered `.wasm` files
///
/// # Example
///
/// ```no_run
/// use wasm_detector::WasmDetector;
/// use std::path::Path;
///
/// let detector = WasmDetector::new(Path::new("."));
/// let wasm_files: Vec<_> = detector.into_iter().filter_map(Result::ok).collect();
/// println!("Found {} wasm files", wasm_files.len());
/// ```
#[derive(Debug, Clone)]
pub struct WasmDetector {
root: PathBuf,
include_hidden: bool,
follow_symlinks: bool,
}
impl WasmDetector {
/// Create a new detector that will search from the given root directory.
#[must_use]
pub fn new(root: &Path) -> Self {
Self {
root: root.to_path_buf(),
include_hidden: false,
follow_symlinks: false,
}
}
/// Set whether to include hidden files and directories.
///
/// By default, hidden files are excluded.
#[must_use]
pub fn include_hidden(mut self, include: bool) -> Self {
self.include_hidden = include;
self
}
/// Set whether to follow symbolic links.
///
/// By default, symbolic links are not followed.
#[must_use]
pub fn follow_symlinks(mut self, follow: bool) -> Self {
self.follow_symlinks = follow;
self
}
/// Detect `.wasm` files and return all results as a vector.
///
/// This is a convenience method that collects all results.
/// For large directories, consider using the iterator interface instead.
///
/// # Errors
///
/// Returns an error if the detection fails to complete.
pub fn detect(&self) -> Result<Vec<WasmEntry>, ignore::Error> {
self.into_iter().collect()
}
/// Find all well-known wasm directories that exist in the root.
fn find_well_known_dirs(&self) -> Vec<PathBuf> {
let mut dirs = Vec::new();
// Check for pkg/ and dist/ directories
for dir_name in &["pkg", "dist"] {
let dir_path = self.root.join(dir_name);
if dir_path.is_dir() {
dirs.push(dir_path);
}
}
// Check for target/wasm32-* directories
let target_dir = self.root.join("target");
if target_dir.is_dir()
&& let Ok(entries) = std::fs::read_dir(&target_dir)
{
for entry in entries.filter_map(Result::ok) {
let path = entry.path();
if path.is_dir()
&& let Some(name) = path.file_name().and_then(|n| n.to_str())
{
for prefix in TARGET_WASM_PREFIXES {
if name.starts_with(prefix) {
dirs.push(path);
break;
}
}
}
}
}
dirs
}
}
impl IntoIterator for WasmDetector {
type Item = Result<WasmEntry, ignore::Error>;
type IntoIter = WasmDetectorIter;
fn into_iter(self) -> Self::IntoIter {
WasmDetectorIter::new(self)
}
}
impl IntoIterator for &WasmDetector {
type Item = Result<WasmEntry, ignore::Error>;
type IntoIter = WasmDetectorIter;
fn into_iter(self) -> Self::IntoIter {
WasmDetectorIter::new(self.clone())
}
}
/// Iterator over discovered `.wasm` files.
///
/// This iterator combines results from multiple walks:
/// 1. A main walk that respects `.gitignore`
/// 2. Additional walks for well-known directories (ignoring `.gitignore`)
pub struct WasmDetectorIter {
/// The main walker that respects gitignore
main_walker: ignore::Walk,
/// Walkers for well-known directories (ignoring gitignore)
well_known_walkers: Vec<ignore::Walk>,
/// Current index in well_known_walkers
current_well_known_idx: usize,
/// Set of paths already seen (to avoid duplicates)
seen_paths: HashSet<PathBuf>,
/// Whether we've finished the main walk
main_walk_done: bool,
}
impl std::fmt::Debug for WasmDetectorIter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("WasmDetectorIter")
.field("main_walk_done", &self.main_walk_done)
.field("current_well_known_idx", &self.current_well_known_idx)
.field("seen_paths_count", &self.seen_paths.len())
.finish_non_exhaustive()
}
}
impl WasmDetectorIter {
fn new(detector: WasmDetector) -> Self {
// Build the main walker that respects gitignore
let main_walker = WalkBuilder::new(&detector.root)
.hidden(!detector.include_hidden)
.follow_links(detector.follow_symlinks)
.git_ignore(true)
.git_global(true)
.git_exclude(true)
.build();
// Build walkers for well-known directories (ignoring gitignore)
let well_known_dirs = detector.find_well_known_dirs();
let well_known_walkers: Vec<_> = well_known_dirs
.into_iter()
.map(|dir| {
WalkBuilder::new(dir)
.hidden(!detector.include_hidden)
.follow_links(detector.follow_symlinks)
.git_ignore(false) // Don't respect gitignore for well-known dirs
.git_global(false)
.git_exclude(false)
.build()
})
.collect();
Self {
main_walker,
well_known_walkers,
current_well_known_idx: 0,
seen_paths: HashSet::new(),
main_walk_done: false,
}
}
/// Try to get the next .wasm file from the main walker
fn next_from_main(&mut self) -> Option<Result<WasmEntry, ignore::Error>> {
loop {
match self.main_walker.next() {
Some(Ok(entry)) => {
let path = entry.path();
if path.is_file() && path.extension().is_some_and(|ext| ext == "wasm") {
let path_buf = path.to_path_buf();
self.seen_paths.insert(path_buf.clone());
return Some(Ok(WasmEntry::new(path_buf)));
}
// Continue to next entry
}
Some(Err(e)) => return Some(Err(e)),
None => {
self.main_walk_done = true;
return None;
}
}
}
}
/// Try to get the next .wasm file from well-known walkers
fn next_from_well_known(&mut self) -> Option<Result<WasmEntry, ignore::Error>> {
while self.current_well_known_idx < self.well_known_walkers.len() {
if let Some(walker) = self.well_known_walkers.get_mut(self.current_well_known_idx) {
loop {
match walker.next() {
Some(Ok(entry)) => {
let path = entry.path();
if path.is_file() && path.extension().is_some_and(|ext| ext == "wasm") {
let path_buf = path.to_path_buf();
// Skip if we've already seen this path
if self.seen_paths.contains(&path_buf) {
continue;
}
self.seen_paths.insert(path_buf.clone());
return Some(Ok(WasmEntry::new(path_buf)));
}
// Continue to next entry
}
Some(Err(e)) => return Some(Err(e)),
None => {
// Move to next well-known walker
self.current_well_known_idx += 1;
break;
}
}
}
} else {
self.current_well_known_idx += 1;
}
}
None
}
}
impl Iterator for WasmDetectorIter {
type Item = Result<WasmEntry, ignore::Error>;
fn next(&mut self) -> Option<Self::Item> {
// First, exhaust the main walker
if !self.main_walk_done
&& let Some(result) = self.next_from_main()
{
return Some(result);
}
// Then, go through well-known directories
self.next_from_well_known()
}
}
| yoshuawuyts/wasm | 0 | Unified developer tools for WebAssembly | Rust | yoshuawuyts | Yosh | |
crates/wasm-detector/tests/tests.rs | Rust | //! Integration tests for the wasm-detector crate.
use std::fs::{self, File};
use tempfile::TempDir;
use wasm_detector::WasmDetector;
/// Create a test directory structure with some .wasm files
fn setup_test_dir() -> TempDir {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let root = temp_dir.path();
// Create some regular .wasm files
fs::create_dir_all(root.join("src")).unwrap();
File::create(root.join("src/module.wasm")).unwrap();
// Create .wasm files in target directory (typically gitignored)
fs::create_dir_all(root.join("target/wasm32-wasip2/release")).unwrap();
File::create(root.join("target/wasm32-wasip2/release/app.wasm")).unwrap();
fs::create_dir_all(root.join("target/wasm32-unknown-unknown/debug")).unwrap();
File::create(root.join("target/wasm32-unknown-unknown/debug/lib.wasm")).unwrap();
// Create .wasm files in pkg directory (wasm-pack output)
fs::create_dir_all(root.join("pkg")).unwrap();
File::create(root.join("pkg/my_crate_bg.wasm")).unwrap();
// Create .wasm files in dist directory (jco output)
fs::create_dir_all(root.join("dist")).unwrap();
File::create(root.join("dist/component.wasm")).unwrap();
// Create a non-.wasm file
File::create(root.join("src/main.rs")).unwrap();
temp_dir
}
#[test]
fn test_detector_finds_wasm_files() {
let temp_dir = setup_test_dir();
let detector = WasmDetector::new(temp_dir.path());
let results: Vec<_> = detector.into_iter().filter_map(Result::ok).collect();
// Should find all .wasm files
assert!(
results.len() >= 5,
"Expected at least 5 .wasm files, found {}",
results.len()
);
// Verify all results have .wasm extension
for entry in &results {
assert!(
entry.path().extension().is_some_and(|e| e == "wasm"),
"Expected .wasm extension for {:?}",
entry.path()
);
}
}
#[test]
fn test_detector_finds_target_wasm_files() {
let temp_dir = setup_test_dir();
let detector = WasmDetector::new(temp_dir.path());
let results: Vec<_> = detector.into_iter().filter_map(Result::ok).collect();
// Check that we found files in target directories
let target_files: Vec<_> = results
.iter()
.filter(|e| e.path().to_string_lossy().contains("target"))
.collect();
assert!(
target_files.len() >= 2,
"Expected at least 2 files in target directory, found {}",
target_files.len()
);
}
#[test]
fn test_detector_finds_pkg_wasm_files() {
let temp_dir = setup_test_dir();
let detector = WasmDetector::new(temp_dir.path());
let results: Vec<_> = detector.into_iter().filter_map(Result::ok).collect();
// Check that we found files in pkg directory
let pkg_files: Vec<_> = results
.iter()
.filter(|e| e.path().to_string_lossy().contains("pkg"))
.collect();
assert_eq!(pkg_files.len(), 1, "Expected 1 file in pkg directory");
}
#[test]
fn test_detector_finds_dist_wasm_files() {
let temp_dir = setup_test_dir();
let detector = WasmDetector::new(temp_dir.path());
let results: Vec<_> = detector.into_iter().filter_map(Result::ok).collect();
// Check that we found files in dist directory
let dist_files: Vec<_> = results
.iter()
.filter(|e| e.path().to_string_lossy().contains("dist"))
.collect();
assert_eq!(dist_files.len(), 1, "Expected 1 file in dist directory");
}
#[test]
fn test_wasm_entry_methods() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let wasm_path = temp_dir.path().join("module.wasm");
File::create(&wasm_path).unwrap();
let detector = WasmDetector::new(temp_dir.path());
let results: Vec<_> = detector.into_iter().filter_map(Result::ok).collect();
assert_eq!(results.len(), 1);
#[allow(clippy::indexing_slicing)]
let entry = &results[0];
assert!(entry.path().ends_with("module.wasm"));
assert_eq!(entry.file_name(), Some("module.wasm"));
}
#[test]
fn test_detector_with_gitignore() {
use std::process::Command;
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let root = temp_dir.path();
// Initialize a git repository so that .gitignore is respected
Command::new("git")
.args(["init"])
.current_dir(root)
.output()
.expect("Failed to initialize git repository");
// Create a .gitignore that ignores the "ignored" directory
fs::write(root.join(".gitignore"), "ignored/\n").unwrap();
// Create .wasm files in various locations
fs::create_dir_all(root.join("src")).unwrap();
File::create(root.join("src/visible.wasm")).unwrap();
fs::create_dir_all(root.join("ignored")).unwrap();
File::create(root.join("ignored/hidden.wasm")).unwrap();
// Create files in well-known directories (should be included despite gitignore)
fs::create_dir_all(root.join("target/wasm32-wasip2")).unwrap();
File::create(root.join("target/wasm32-wasip2/app.wasm")).unwrap();
let detector = WasmDetector::new(root);
let results: Vec<_> = detector.into_iter().filter_map(Result::ok).collect();
// Should find src/visible.wasm and target/wasm32-wasip2/app.wasm
// but NOT ignored/hidden.wasm
let paths: Vec<_> = results.iter().map(|e| e.path().to_owned()).collect();
assert!(
paths.iter().any(|p| p.ends_with("visible.wasm")),
"Should find visible.wasm"
);
assert!(
paths.iter().any(|p| p.ends_with("app.wasm")),
"Should find app.wasm in target"
);
assert!(
!paths.iter().any(|p| p.ends_with("hidden.wasm")),
"Should NOT find hidden.wasm (gitignored)"
);
}
#[test]
fn test_detect_convenience_method() {
let temp_dir = setup_test_dir();
let detector = WasmDetector::new(temp_dir.path());
let results = detector.detect().expect("Detect should succeed");
assert!(
results.len() >= 5,
"Expected at least 5 .wasm files, found {}",
results.len()
);
}
#[test]
fn test_detector_empty_directory() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let detector = WasmDetector::new(temp_dir.path());
let results: Vec<_> = detector.into_iter().filter_map(Result::ok).collect();
assert!(
results.is_empty(),
"Empty directory should yield no results"
);
}
| yoshuawuyts/wasm | 0 | Unified developer tools for WebAssembly | Rust | yoshuawuyts | Yosh | |
xtask/src/main.rs | Rust | //! xtask - Build automation and task orchestration for the wasm project
//!
//! This binary provides a unified interface for running common development tasks
//! like testing, linting, and formatting checks.
use std::process::Command;
use anyhow::Result;
use clap::Parser;
#[derive(Parser)]
#[command(name = "xtask")]
#[command(about = "Build automation and task orchestration")]
enum Xtask {
/// Run tests, clippy, and formatting checks
Test,
}
fn main() -> Result<()> {
let xtask = Xtask::parse();
match xtask {
Xtask::Test => run_tests()?,
}
Ok(())
}
fn run_tests() -> Result<()> {
println!("Running cargo test...");
run_command("cargo", &["test", "--all"])?;
println!("\nRunning cargo clippy...");
run_command("cargo", &["clippy", "--all", "--", "-D", "warnings"])?;
println!("\nRunning cargo fmt check...");
run_command("cargo", &["fmt", "--all", "--", "--check"])?;
println!("\n✓ All checks passed!");
Ok(())
}
fn run_command(cmd: &str, args: &[&str]) -> Result<()> {
let status = Command::new(cmd).args(args).status()?;
if !status.success() {
anyhow::bail!("{} failed with exit code: {:?}", cmd, status.code());
}
Ok(())
}
| yoshuawuyts/wasm | 0 | Unified developer tools for WebAssembly | Rust | yoshuawuyts | Yosh | |
src/lib.rs | Rust | //! Wasm CLI runner
//!
//! # Examples
//!
//! ```
//! // tbi
//! ```
#![forbid(unsafe_code, rust_2018_idioms)]
#![deny(missing_debug_implementations, nonstandard_style)]
#![warn(missing_docs, future_incompatible, unreachable_pub)]
| yoshuawuyts/wasm-cli-runner | 1 | Rust | yoshuawuyts | Yosh | ||
src/main.rs | Rust | use std::io;
use std::path::PathBuf;
use clap::{Command, Parser};
#[derive(clap::Parser)]
#[command(version)]
struct Arg {
/// The path to a .wasm binary
path: PathBuf,
}
fn main() -> io::Result<()> {
let Arg { path } = Arg::parse();
path.canonicalize()?;
let commands = vec![make_command("bar"), make_command("foo")];
let mut cli = Command::new("test")
.subcommand_required(true)
.arg_required_else_help(true);
for cmd in commands {
cli = cli.subcommand(cmd);
}
// Augment with derived subcommands
let matches = cli.get_matches();
Ok(())
}
fn make_command(name: &'static str) -> Command {
Command::new(&name).about(&format!("the {name} command"))
}
| yoshuawuyts/wasm-cli-runner | 1 | Rust | yoshuawuyts | Yosh | ||
tests/cli.rs | Rust | use insta_cmd::{assert_cmd_snapshot, get_cargo_bin};
use std::process::Command;
fn cli() -> Command {
Command::new(get_cargo_bin("wasm-cli-runner"))
}
#[test]
fn has_a_version() {
assert_cmd_snapshot!(cli().arg("--version"));
}
#[test]
fn path_is_required() {
assert_cmd_snapshot!(cli());
}
#[test]
fn path_is_reachable_on_disk() {
assert_cmd_snapshot!(cli().arg("noop.wasm"));
}
#[test]
fn path_points_to_a_wasm_binary() {
assert_cmd_snapshot!(cli().arg("tests/binaries/time.wasm"));
}
| yoshuawuyts/wasm-cli-runner | 1 | Rust | yoshuawuyts | Yosh | ||
bin/cli.js | JavaScript | #!/usr/bin/env node
/**
* git-hierarchies CLI
* Reveal the real org chart by analyzing who approves whose PRs
*/
import { program } from 'commander';
import chalk from 'chalk';
import ora from 'ora';
import fs from 'fs';
import path from 'path';
import { createClient, fetchMergedPRsWithReviews, parseRepoInput } from '../lib/github.js';
import { buildApprovalGraph, analyzeHierarchy } from '../lib/hierarchy.js';
import { formatAnalysis, generateHTML } from '../lib/output.js';
/**
* Parse relative date strings
*/
function parseDate(str) {
if (!str) return null;
const now = new Date();
const lower = str.toLowerCase().trim();
if (lower === 'last week' || lower === '1 week ago') {
return new Date(now.getTime() - 7 * 24 * 60 * 60 * 1000);
}
if (lower === 'last month' || lower === '1 month ago') {
return new Date(now.getFullYear(), now.getMonth() - 1, now.getDate());
}
if (lower === 'last year' || lower === '1 year ago') {
return new Date(now.getFullYear() - 1, now.getMonth(), now.getDate());
}
if (lower === 'last quarter' || lower === '3 months ago') {
return new Date(now.getFullYear(), now.getMonth() - 3, now.getDate());
}
const relativeMatch = lower.match(/^(\d+)\s+(day|week|month|year)s?\s+ago$/);
if (relativeMatch) {
const [, num, unit] = relativeMatch;
const n = parseInt(num, 10);
switch (unit) {
case 'day': return new Date(now.getTime() - n * 24 * 60 * 60 * 1000);
case 'week': return new Date(now.getTime() - n * 7 * 24 * 60 * 60 * 1000);
case 'month': return new Date(now.getFullYear(), now.getMonth() - n, now.getDate());
case 'year': return new Date(now.getFullYear() - n, now.getMonth(), now.getDate());
}
}
const parsed = new Date(str);
if (!isNaN(parsed.getTime())) return parsed;
throw new Error(`Could not parse date: ${str}`);
}
program
.name('git-hierarchies')
.description('Reveal the real org chart by analyzing who approves whose PRs')
.version('0.1.0')
.argument('[repo]', 'GitHub repo (owner/repo or URL)')
.option('-n, --limit <number>', 'Maximum PRs to analyze (default: unlimited)')
.option('-t, --token <token>', 'GitHub token (or set GITHUB_TOKEN env)')
.option('--since <date>', 'Only include PRs merged after this date (e.g., "2024-01-01", "1 month ago")')
.option('--last-week', 'Shortcut for --since "1 week ago"')
.option('--last-month', 'Shortcut for --since "1 month ago"')
.option('--last-quarter', 'Shortcut for --since "3 months ago"')
.option('--last-year', 'Shortcut for --since "1 year ago"')
.option('--html <file>', 'Generate HTML visualization')
.option('--json <file>', 'Output raw data as JSON')
.action(async (repoArg, options) => {
try {
// Get GitHub token
const token = options.token || process.env.GITHUB_TOKEN || process.env.GH_TOKEN;
// Parse repo input
let owner, repo;
if (repoArg) {
({ owner, repo } = parseRepoInput(repoArg));
} else {
// Try to detect from current directory
try {
const gitRemote = fs.readFileSync('.git/config', 'utf-8');
const match = gitRemote.match(/github\.com[:/]([^/]+)\/([^/\s.]+)/);
if (match) {
owner = match[1];
repo = match[2].replace(/\.git$/, '');
}
} catch {
console.error(chalk.red('Error: No repo specified and not in a git directory.'));
console.error(chalk.gray('Usage: git-hierarchies <owner/repo>'));
process.exit(1);
}
}
if (!owner || !repo) {
console.error(chalk.red('Error: Could not determine repository.'));
process.exit(1);
}
const repoName = `${owner}/${repo}`;
if (!token) {
console.error(chalk.red('Error: GitHub token required.'));
console.error(chalk.gray('Set GITHUB_TOKEN environment variable or use --token'));
process.exit(1);
}
// Parse date filters
let sinceDate = null;
if (options.lastWeek) sinceDate = parseDate('last week');
else if (options.lastMonth) sinceDate = parseDate('last month');
else if (options.lastQuarter) sinceDate = parseDate('last quarter');
else if (options.lastYear) sinceDate = parseDate('last year');
else if (options.since) sinceDate = parseDate(options.since);
const limit = options.limit ? parseInt(options.limit, 10) : Infinity;
console.log('');
console.log(chalk.bold.cyan(`🔀 git-hierarchies`));
console.log(chalk.gray(` Analyzing ${repoName}...`));
if (sinceDate) {
console.log(chalk.gray(` Filtering PRs merged since ${sinceDate.toISOString().split('T')[0]}`));
}
console.log('');
const spinner = ora('Fetching merged PRs...').start();
const client = createClient(token);
// Fetch PRs
let prs = await fetchMergedPRsWithReviews(client, owner, repo, {
limit,
since: sinceDate,
onProgress: (current) => {
spinner.text = `Fetching PRs... ${current}`;
},
});
spinner.succeed(`Fetched ${prs.length} PRs`);
// Apply limit if set
if (limit !== Infinity && prs.length > limit) {
prs = prs.slice(0, limit);
console.log(chalk.gray(` ✂️ Limited to ${limit} PRs`));
}
if (prs.length === 0) {
console.log(chalk.yellow('No merged PRs found in the specified range.'));
process.exit(0);
}
// Build and analyze
const analysisSpinner = ora('Building approval graph...').start();
const graph = buildApprovalGraph(prs);
const analysis = analyzeHierarchy(graph);
analysisSpinner.succeed('Analysis complete');
// Output
console.log(formatAnalysis(analysis, repoName));
// Show date range
if (prs.length > 0) {
const oldest = prs[prs.length - 1].merged_at.split('T')[0];
const newest = prs[0].merged_at.split('T')[0];
console.log(chalk.gray(` Date range: ${oldest} to ${newest} (${prs.length} PRs)`));
console.log('');
}
// Generate HTML if requested
if (options.html) {
const html = generateHTML(analysis, prs, repoName);
fs.writeFileSync(options.html, html);
console.log(chalk.green(`✓ HTML visualization saved to ${options.html}`));
console.log(chalk.gray(` Open in browser: file://${path.resolve(options.html)}`));
}
// Generate JSON if requested
if (options.json) {
const jsonData = {
repo: repoName,
prsAnalyzed: prs.length,
dateRange: {
oldest: prs[prs.length - 1]?.merged_at,
newest: prs[0]?.merged_at,
},
analysis,
generatedAt: new Date().toISOString(),
};
fs.writeFileSync(options.json, JSON.stringify(jsonData, null, 2));
console.log(chalk.green(`✓ JSON data saved to ${options.json}`));
}
} catch (error) {
if (error.status === 401) {
console.error(chalk.red('Error: Invalid GitHub token.'));
} else if (error.status === 404) {
console.error(chalk.red('Error: Repository not found (or no access).'));
} else {
console.error(chalk.red(`Error: ${error.message}`));
if (process.env.DEBUG) console.error(error.stack);
}
process.exit(1);
}
});
program.parse();
| youknowriad/git-hierarchies | 0 | Reveal the real org chart by analyzing who approves whose PRs | JavaScript | youknowriad | Riad Benguella | Automattic |
lib/github.js | JavaScript | /**
* GitHub API interactions for fetching PR and review data
* Uses GraphQL for efficient batched queries
*/
import { Octokit } from '@octokit/rest';
/**
* Create an authenticated Octokit instance
* @param {string} token - GitHub personal access token
* @returns {Octokit}
*/
export function createClient(token) {
return new Octokit({
auth: token,
userAgent: 'git-hierarchies/0.1.0',
request: {
timeout: 60000,
},
});
}
/**
* Generate monthly time windows between two dates
*/
function generateTimeWindows(since, until) {
const windows = [];
const start = new Date(since);
const end = new Date(until);
let current = new Date(start);
while (current < end) {
const windowStart = new Date(current);
current.setMonth(current.getMonth() + 1);
const windowEnd = current < end ? new Date(current) : new Date(end);
windows.push({ start: windowStart, end: windowEnd });
}
return windows;
}
/**
* Fetch PRs for a single time window
*/
async function fetchWindowPRs(client, owner, repo, windowStart, windowEnd, limit, onProgress, currentCount) {
const results = [];
let cursor = null;
let hasNextPage = true;
const startStr = windowStart.toISOString().split('T')[0];
const endStr = windowEnd.toISOString().split('T')[0];
const searchQuery = `repo:${owner}/${repo} is:pr is:merged merged:${startStr}..${endStr}`;
while (hasNextPage && (currentCount + results.length) < limit) {
const query = `
query($searchQuery: String!, $cursor: String) {
search(query: $searchQuery, type: ISSUE, first: 50, after: $cursor) {
pageInfo {
hasNextPage
endCursor
}
nodes {
... on PullRequest {
number
title
mergedAt
author {
login
}
reviews(first: 20, states: [APPROVED]) {
nodes {
author {
login
}
state
}
}
}
}
}
}
`;
const response = await client.graphql(query, {
searchQuery,
cursor,
});
const { search } = response;
hasNextPage = search.pageInfo.hasNextPage;
cursor = search.pageInfo.endCursor;
for (const pr of search.nodes) {
if (!pr || !pr.mergedAt) continue;
if ((currentCount + results.length) >= limit) break;
const author = pr.author?.login;
const approverLogins = pr.reviews.nodes
.map(r => r.author?.login)
.filter(Boolean);
const isSelfMerge = approverLogins.length === 0 ||
(approverLogins.length === 1 && approverLogins[0] === author);
results.push({
number: pr.number,
author,
title: pr.title,
merged_at: pr.mergedAt,
approvers: approverLogins.filter(a => a !== author),
selfMerge: isSelfMerge,
});
if (onProgress) {
onProgress(currentCount + results.length);
}
}
}
return results;
}
/**
* Fetch merged PRs with their reviews using GraphQL
* Uses time-windowed queries to bypass GitHub's 1000 result limit
* @param {Octokit} client
* @param {string} owner
* @param {string} repo
* @param {object} options
* @param {number} options.limit - Max PRs to fetch
* @param {Date} options.since - Only fetch PRs merged after this date
* @param {function} options.onProgress - Progress callback
* @returns {Promise<Array>} Array of PR data
*/
export async function fetchMergedPRsWithReviews(client, owner, repo, options = {}) {
const { limit = Infinity, since = null, onProgress } = options;
const sinceDate = since || new Date('2020-01-01');
const untilDate = new Date();
// Generate monthly windows to stay under 1000 limit per window
const windows = generateTimeWindows(sinceDate, untilDate);
if (process.env.DEBUG) {
console.error(`[DEBUG] Fetching with ${windows.length} time windows`);
}
const allResults = [];
// Fetch newest first (reverse order)
for (let i = windows.length - 1; i >= 0 && allResults.length < limit; i--) {
const window = windows[i];
const windowResults = await fetchWindowPRs(
client, owner, repo,
window.start, window.end,
limit, onProgress, allResults.length
);
allResults.push(...windowResults);
if (process.env.DEBUG) {
console.error(`[DEBUG] Window ${window.start.toISOString().split('T')[0]} to ${window.end.toISOString().split('T')[0]}: ${windowResults.length} PRs (total: ${allResults.length})`);
}
}
// Sort by merged date descending and dedupe by PR number
const seen = new Set();
const deduped = allResults
.sort((a, b) => new Date(b.merged_at) - new Date(a.merged_at))
.filter(pr => {
if (seen.has(pr.number)) return false;
seen.add(pr.number);
return true;
});
if (process.env.DEBUG) {
console.error(`[DEBUG] Total PRs after dedup: ${deduped.length}`);
}
return deduped;
}
/**
* Parse a GitHub repo URL or owner/repo string
* @param {string} input - URL or owner/repo
* @returns {{ owner: string, repo: string }}
*/
export function parseRepoInput(input) {
// Handle full URLs
const urlMatch = input.match(/github\.com\/([^/]+)\/([^/]+)/);
if (urlMatch) {
return { owner: urlMatch[1], repo: urlMatch[2].replace(/\.git$/, '') };
}
// Handle owner/repo format
const parts = input.split('/');
if (parts.length === 2) {
return { owner: parts[0], repo: parts[1] };
}
throw new Error(`Invalid repo format: ${input}. Use owner/repo or a GitHub URL.`);
}
| youknowriad/git-hierarchies | 0 | Reveal the real org chart by analyzing who approves whose PRs | JavaScript | youknowriad | Riad Benguella | Automattic |
lib/hierarchy.js | JavaScript | /**
* Build and analyze the approval hierarchy from PR data
*/
/**
* Build a directed graph of author -> approver relationships
* @param {Array} prs - Array of PR data with author and approvers
* @returns {object} Graph data structure
*/
export function buildApprovalGraph(prs) {
const edges = new Map(); // "author->approver" -> count
const authorStats = new Map(); // author -> { prs, receivedApprovals, selfMerges }
const approverStats = new Map(); // approver -> { approved }
for (const pr of prs) {
const { author, approvers, selfMerge } = pr;
if (!author) continue;
// Update author stats
if (!authorStats.has(author)) {
authorStats.set(author, { prs: 0, receivedApprovals: 0, selfMerges: 0 });
}
const authorData = authorStats.get(author);
authorData.prs++;
if (selfMerge) {
authorData.selfMerges++;
}
for (const approver of approvers) {
if (!approver || approver === author) continue; // Skip self-approvals
// Update edge count
const edgeKey = `${author}->${approver}`;
edges.set(edgeKey, (edges.get(edgeKey) || 0) + 1);
// Update author received approvals
authorData.receivedApprovals++;
// Update approver stats
if (!approverStats.has(approver)) {
approverStats.set(approver, { approved: 0 });
}
approverStats.get(approver).approved++;
}
}
return { edges, authorStats, approverStats };
}
/**
* Analyze the graph to find hierarchy patterns
* @param {object} graph - Graph from buildApprovalGraph
* @returns {object} Analysis results
*/
export function analyzeHierarchy(graph) {
const { edges, authorStats, approverStats } = graph;
// Find top approvers (who approves the most)
const topApprovers = [...approverStats.entries()]
.map(([name, stats]) => ({ name, ...stats }))
.sort((a, b) => b.approved - a.approved)
.slice(0, 10);
// Find top authors (who submits the most)
const topAuthors = [...authorStats.entries()]
.map(([name, stats]) => ({ name, ...stats }))
.sort((a, b) => b.prs - a.prs)
.slice(0, 10);
// Find strongest relationships (most approvals between two people)
const relationships = [...edges.entries()]
.map(([key, count]) => {
const [author, approver] = key.split('->');
return { author, approver, count };
})
.sort((a, b) => b.count - a.count)
.slice(0, 20);
// Calculate "power score" - combination of approval authority, submissions, and self-merges
const powerScores = new Map();
// First pass: add approvers
for (const [name, stats] of approverStats) {
const approvalPower = stats.approved;
const authorData = authorStats.get(name);
const submissionActivity = authorData?.prs || 0;
const selfMerges = authorData?.selfMerges || 0;
// Power = approvals given * 2 + submissions + self-merges * 3 (self-merge = high trust)
const power = approvalPower * 2 + submissionActivity + selfMerges * 3;
powerScores.set(name, { power, approved: approvalPower, submitted: submissionActivity, selfMerges });
}
// Second pass: add authors who don't approve much
for (const [name, stats] of authorStats) {
if (!powerScores.has(name)) {
const selfMerges = stats.selfMerges || 0;
// Self-mergers without approval authority still have power
const power = stats.prs * 0.5 + selfMerges * 3;
powerScores.set(name, { power, approved: 0, submitted: stats.prs, selfMerges });
}
}
const rankedByPower = [...powerScores.entries()]
.map(([name, data]) => ({
name,
power: data.power,
approved: data.approved,
submitted: data.submitted,
selfMerges: data.selfMerges,
}))
.sort((a, b) => b.power - a.power)
.slice(0, 15);
// Find self-mergers (people who merge without approval)
const selfMergers = [...authorStats.entries()]
.filter(([, stats]) => stats.selfMerges > 0)
.map(([name, stats]) => ({
name,
selfMerges: stats.selfMerges,
totalPRs: stats.prs,
selfMergeRate: Math.round((stats.selfMerges / stats.prs) * 100),
}))
.sort((a, b) => b.selfMerges - a.selfMerges)
.slice(0, 10);
// Detect potential "reporting lines" - if A always gets approved by B
const reportingPatterns = [];
for (const [name, stats] of authorStats) {
if (stats.prs < 3 || stats.receivedApprovals === 0) continue; // Need enough data with approvals
// Find who approves this person most
const approversForThisPerson = [...edges.entries()]
.filter(([key]) => key.startsWith(`${name}->`))
.map(([key, count]) => ({
approver: key.split('->')[1],
count,
percentage: Math.round((count / stats.receivedApprovals) * 100),
}))
.sort((a, b) => b.count - a.count);
if (approversForThisPerson.length > 0) {
const primary = approversForThisPerson[0];
if (primary.percentage >= 40) { // At least 40% of approvals from one person
reportingPatterns.push({
author: name,
likelyManager: primary.approver,
approvalPercentage: primary.percentage,
totalApprovals: stats.receivedApprovals,
});
}
}
}
return {
topApprovers,
topAuthors,
relationships,
rankedByPower,
selfMergers,
reportingPatterns: reportingPatterns.sort((a, b) => b.approvalPercentage - a.approvalPercentage),
};
}
| youknowriad/git-hierarchies | 0 | Reveal the real org chart by analyzing who approves whose PRs | JavaScript | youknowriad | Riad Benguella | Automattic |
lib/output.js | JavaScript | /**
* Format and output the analysis results
*/
import chalk from 'chalk';
import Table from 'cli-table3';
/**
* Format the analysis as a beautiful CLI output
* @param {object} analysis - From analyzeHierarchy
* @param {string} repoName - Repository name for header
*/
export function formatAnalysis(analysis, repoName) {
const { topApprovers, topAuthors, relationships, rankedByPower, reportingPatterns, selfMergers } = analysis;
const output = [];
// Header
output.push('');
output.push(chalk.bold.cyan(`🔀 git-hierarchies: ${repoName}`));
output.push(chalk.gray(' The real org chart, revealed by PR approvals'));
output.push('');
// Power Rankings (The Real Hierarchy)
output.push(chalk.bold.yellow('👑 POWER RANKINGS'));
output.push(chalk.gray(' Who really runs things around here'));
output.push('');
const powerTable = new Table({
head: [
chalk.white('Rank'),
chalk.white('Name'),
chalk.white('Power'),
chalk.white('Approved'),
chalk.white('Submitted'),
chalk.white('Self-Merged'),
chalk.white('Role'),
],
style: { head: [], border: [] },
});
rankedByPower.forEach((person, i) => {
const role = getRole(person);
const rankEmoji = i === 0 ? '👑' : i === 1 ? '🥈' : i === 2 ? '🥉' : ' ';
powerTable.push([
`${rankEmoji} ${i + 1}`,
chalk.bold(person.name),
chalk.cyan(person.power.toFixed(0)),
chalk.green(person.approved),
chalk.blue(person.submitted),
person.selfMerges > 0 ? chalk.magenta(person.selfMerges) : chalk.gray('-'),
chalk.gray(role),
]);
});
output.push(powerTable.toString());
output.push('');
// Reporting Patterns (Likely Reporting Lines)
if (reportingPatterns.length > 0) {
output.push(chalk.bold.magenta('📊 LIKELY REPORTING LINES'));
output.push(chalk.gray(' Who approves whose work most consistently'));
output.push('');
const reportingTable = new Table({
head: [
chalk.white('Developer'),
chalk.white('→'),
chalk.white('Likely Manager'),
chalk.white('% of Approvals'),
],
style: { head: [], border: [] },
});
reportingPatterns.slice(0, 10).forEach(pattern => {
reportingTable.push([
chalk.blue(pattern.author),
chalk.gray('→'),
chalk.green(pattern.likelyManager),
chalk.yellow(`${pattern.approvalPercentage}%`),
]);
});
output.push(reportingTable.toString());
output.push('');
}
// Strongest Relationships
output.push(chalk.bold.green('🤝 STRONGEST APPROVAL RELATIONSHIPS'));
output.push(chalk.gray(' Who approves whose PRs most often'));
output.push('');
const relTable = new Table({
head: [
chalk.white('Author'),
chalk.white('→'),
chalk.white('Approved By'),
chalk.white('Count'),
chalk.white('Vibe'),
],
style: { head: [], border: [] },
});
relationships.slice(0, 10).forEach(rel => {
const vibe = getRelationshipVibe(rel.count);
relTable.push([
chalk.blue(rel.author),
chalk.gray('→'),
chalk.green(rel.approver),
chalk.cyan(rel.count),
vibe,
]);
});
output.push(relTable.toString());
output.push('');
// Top Approvers
output.push(chalk.bold.blue('🛡️ TOP GATEKEEPERS'));
output.push(chalk.gray(' Who approves the most PRs'));
output.push('');
const approverTable = new Table({
head: [chalk.white('Name'), chalk.white('PRs Approved')],
style: { head: [], border: [] },
});
topApprovers.forEach(approver => {
approverTable.push([chalk.bold(approver.name), chalk.green(approver.approved)]);
});
output.push(approverTable.toString());
output.push('');
// Footer
output.push(chalk.gray('─'.repeat(50)));
output.push(chalk.gray.italic(' Remember: correlation ≠ causation. But still... 👀'));
output.push('');
return output.join('\n');
}
/**
* Determine a person's role based on their stats
*/
function getRole(person) {
const { approved, submitted, selfMerges = 0 } = person;
// High self-merge rate = core contributor with commit access
const selfMergeRate = submitted > 0 ? selfMerges / submitted : 0;
if (selfMergeRate >= 0.5 && selfMerges >= 3) return 'Tech Lead';
if (approved > 50 && submitted < 10) return 'Gatekeeper';
if (approved > 30 && submitted > 20) return 'Tech Lead';
if (selfMerges >= 2 && approved > 5) return 'Core Team';
if (approved > submitted * 2) return 'Senior/Reviewer';
if (submitted > approved * 2) return 'Builder';
if (approved > 10 && submitted > 10) return 'Core Team';
return 'Contributor';
}
/**
* Get a fun description of the relationship strength
*/
function getRelationshipVibe(count) {
if (count >= 50) return chalk.red('🔥 inseparable');
if (count >= 30) return chalk.yellow('💪 tight');
if (count >= 15) return chalk.green('✨ solid');
if (count >= 8) return chalk.blue('👍 regular');
return chalk.gray('📝 occasional');
}
/**
* Generate HTML visualization
* @param {object} analysis - Analysis results
* @param {Array} prs - Raw PR data
* @param {string} repoName - Repo name
* @returns {string} HTML content
*/
export function generateHTML(analysis, prs, repoName) {
const { rankedByPower, relationships, reportingPatterns } = analysis;
// Build nodes with full stats
const nodeMap = new Map();
rankedByPower.forEach((person, rank) => {
nodeMap.set(person.name, {
id: person.name,
power: person.power,
approved: person.approved,
submitted: person.submitted,
selfMerges: person.selfMerges || 0,
role: getRole(person),
rank: rank + 1,
});
});
// Add any nodes from relationships not in power rankings
relationships.forEach(r => {
if (!nodeMap.has(r.author)) {
nodeMap.set(r.author, { id: r.author, power: 1, approved: 0, submitted: 1, selfMerges: 0, role: 'Contributor', rank: 999 });
}
if (!nodeMap.has(r.approver)) {
nodeMap.set(r.approver, { id: r.approver, power: 1, approved: 1, submitted: 0, selfMerges: 0, role: 'Reviewer', rank: 999 });
}
});
const nodes = [...nodeMap.values()];
// Build links with relationship context
const links = relationships.map(r => ({
source: r.author,
target: r.approver,
value: r.count,
}));
// Calculate date range
const dates = prs.map(p => new Date(p.merged_at)).sort((a, b) => a - b);
const dateRange = dates.length > 0
? `${dates[0].toISOString().split('T')[0]} to ${dates[dates.length - 1].toISOString().split('T')[0]}`
: 'Unknown';
// Role colors
const roleColors = {
'Tech Lead': '#f0883e',
'Gatekeeper': '#a371f7',
'Core Team': '#58a6ff',
'Senior/Reviewer': '#3fb950',
'Builder': '#79c0ff',
'Contributor': '#8b949e',
'Reviewer': '#a371f7',
};
const html = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>git-hierarchies: ${repoName}</title>
<script src="https://d3js.org/d3.v7.min.js"></script>
<style>
* { box-sizing: border-box; margin: 0; padding: 0; }
html, body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
background: linear-gradient(135deg, #0d1117 0%, #161b22 100%);
color: #c9d1d9;
height: 100%;
overflow: hidden;
}
.header {
position: absolute;
top: 16px;
left: 50%;
transform: translateX(-50%);
text-align: center;
padding: 12px 24px;
background: rgba(22, 27, 34, 0.85);
backdrop-filter: blur(10px);
border-radius: 12px;
border: 1px solid #30363d;
z-index: 100;
}
h1 {
margin: 0 0 4px 0;
font-size: 18px;
font-weight: 600;
}
h1 .icon { margin-right: 6px; }
h1 .repo { color: #58a6ff; }
.subtitle {
color: #8b949e;
margin: 0 0 8px 0;
font-size: 12px;
}
.stats {
display: flex;
justify-content: center;
gap: 16px;
font-size: 11px;
}
.stat { color: #8b949e; }
.stat strong { color: #c9d1d9; }
.container {
display: flex;
height: 100vh;
width: 100vw;
}
.graph-container {
width: 100%;
height: 100%;
position: relative;
overflow: hidden;
}
svg {
width: 100%;
height: 100%;
cursor: grab;
}
svg:active { cursor: grabbing; }
.sidebar {
position: absolute;
top: 16px;
right: 16px;
width: 280px;
max-height: calc(100vh - 32px);
background: rgba(22, 27, 34, 0.92);
backdrop-filter: blur(10px);
border: 1px solid #30363d;
border-radius: 12px;
padding: 16px;
overflow-y: auto;
z-index: 100;
}
.sidebar h2 {
font-size: 14px;
font-weight: 600;
color: #8b949e;
text-transform: uppercase;
letter-spacing: 0.5px;
margin: 0 0 16px 0;
}
.legend {
margin-bottom: 24px;
}
.legend-item {
display: flex;
align-items: center;
gap: 10px;
padding: 6px 0;
font-size: 13px;
}
.legend-dot {
width: 12px;
height: 12px;
border-radius: 50%;
flex-shrink: 0;
}
.top-list {
margin-bottom: 24px;
}
.top-item {
display: flex;
align-items: center;
gap: 10px;
padding: 8px 10px;
margin: 4px 0;
border-radius: 6px;
background: rgba(48, 54, 61, 0.5);
font-size: 13px;
cursor: pointer;
transition: background 0.15s;
}
.top-item:hover {
background: rgba(48, 54, 61, 0.9);
}
.top-rank {
width: 24px;
height: 24px;
border-radius: 50%;
background: #30363d;
display: flex;
align-items: center;
justify-content: center;
font-size: 11px;
font-weight: 600;
flex-shrink: 0;
}
.top-rank.gold { background: #f0883e; color: #0d1117; }
.top-rank.silver { background: #8b949e; color: #0d1117; }
.top-rank.bronze { background: #a37150; color: #0d1117; }
.top-name { flex: 1; font-weight: 500; }
.top-power { color: #58a6ff; font-weight: 600; }
.controls {
margin-bottom: 24px;
}
.control-group {
margin-bottom: 12px;
}
.control-label {
font-size: 12px;
color: #8b949e;
margin-bottom: 6px;
}
input[type="range"] {
width: 100%;
height: 4px;
border-radius: 2px;
background: #30363d;
outline: none;
-webkit-appearance: none;
}
input[type="range"]::-webkit-slider-thumb {
-webkit-appearance: none;
width: 14px;
height: 14px;
border-radius: 50%;
background: #58a6ff;
cursor: pointer;
}
/* Tooltip */
.tooltip {
position: absolute;
padding: 12px 16px;
background: rgba(22, 27, 34, 0.98);
border: 1px solid #30363d;
border-radius: 8px;
font-size: 13px;
pointer-events: none;
opacity: 0;
transition: opacity 0.15s;
max-width: 280px;
box-shadow: 0 8px 24px rgba(0,0,0,0.4);
z-index: 1000;
}
.tooltip.visible { opacity: 1; }
.tooltip-header {
display: flex;
align-items: center;
gap: 8px;
margin-bottom: 10px;
padding-bottom: 10px;
border-bottom: 1px solid #30363d;
}
.tooltip-name {
font-weight: 600;
font-size: 15px;
}
.tooltip-role {
font-size: 11px;
padding: 2px 8px;
border-radius: 12px;
font-weight: 500;
}
.tooltip-stats {
display: grid;
grid-template-columns: 1fr 1fr;
gap: 8px;
}
.tooltip-stat {
display: flex;
flex-direction: column;
}
.tooltip-stat-value {
font-size: 18px;
font-weight: 600;
color: #c9d1d9;
}
.tooltip-stat-label {
font-size: 11px;
color: #8b949e;
}
/* Graph elements */
.link {
fill: none;
stroke-opacity: 0.6;
stroke-linecap: round;
transition: stroke-opacity 0.2s, stroke-width 0.2s;
}
.link:hover {
stroke-opacity: 1;
}
.link.highlighted {
stroke-opacity: 1;
stroke-width: 3px !important;
}
.link.dimmed {
stroke-opacity: 0.08;
}
.node circle {
cursor: pointer;
transition: all 0.2s ease;
filter: drop-shadow(0 2px 4px rgba(0,0,0,0.3));
}
.node:hover circle {
filter: drop-shadow(0 0 12px currentColor) drop-shadow(0 4px 8px rgba(0,0,0,0.4));
transform: scale(1.1);
}
.node.highlighted circle {
filter: drop-shadow(0 0 16px currentColor) drop-shadow(0 4px 8px rgba(0,0,0,0.4));
}
.node.dimmed circle {
opacity: 0.2;
filter: none;
}
.node text {
fill: #e6edf3;
font-size: 11px;
font-weight: 500;
pointer-events: none;
text-shadow: 0 1px 3px rgba(0,0,0,0.9), 0 0 8px rgba(0,0,0,0.5);
}
.node.dimmed text {
opacity: 0.2;
}
.zoom-controls {
position: absolute;
bottom: 20px;
left: 20px;
display: flex;
flex-direction: column;
gap: 4px;
z-index: 100;
}
.zoom-btn {
width: 36px;
height: 36px;
border: 1px solid #30363d;
border-radius: 6px;
background: rgba(22, 27, 34, 0.9);
color: #c9d1d9;
font-size: 18px;
cursor: pointer;
display: flex;
align-items: center;
justify-content: center;
transition: background 0.15s;
}
.zoom-btn:hover {
background: rgba(48, 54, 61, 0.9);
}
.search-box {
margin-bottom: 16px;
}
.search-input {
width: 100%;
padding: 10px 12px;
border: 1px solid #30363d;
border-radius: 6px;
background: #0d1117;
color: #c9d1d9;
font-size: 13px;
outline: none;
}
.search-input:focus {
border-color: #58a6ff;
}
.search-input::placeholder {
color: #484f58;
}
.sidebar-toggle {
position: absolute;
top: 16px;
right: 16px;
width: 36px;
height: 36px;
border: 1px solid #30363d;
border-radius: 8px;
background: rgba(22, 27, 34, 0.9);
color: #c9d1d9;
font-size: 16px;
cursor: pointer;
display: none;
align-items: center;
justify-content: center;
z-index: 101;
transition: background 0.15s;
}
.sidebar-toggle:hover {
background: rgba(48, 54, 61, 0.9);
}
.sidebar-toggle.visible {
display: flex;
}
.sidebar.collapsed {
display: none;
}
</style>
</head>
<body>
<div class="header">
<h1><span class="icon">🔀</span>git-hierarchies: <span class="repo">${repoName}</span></h1>
<p class="subtitle">The real org chart, revealed by PR approvals</p>
<div class="stats">
<span class="stat"><strong>${prs.length}</strong> PRs analyzed</span>
<span class="stat"><strong>${nodes.length}</strong> contributors</span>
<span class="stat">${dateRange}</span>
</div>
</div>
<div class="container">
<div class="graph-container">
<svg></svg>
<div class="tooltip"></div>
<div class="zoom-controls">
<button class="zoom-btn" id="zoom-in">+</button>
<button class="zoom-btn" id="zoom-out">−</button>
<button class="zoom-btn" id="zoom-reset">⟲</button>
</div>
</div>
<button class="sidebar-toggle" id="sidebar-toggle" title="Show panel">☰</button>
<div class="sidebar" id="sidebar">
<div class="search-box" style="display: flex; gap: 8px;">
<input type="text" class="search-input" placeholder="Search contributors..." id="search" style="flex: 1;">
<button class="zoom-btn" id="sidebar-close" title="Hide panel">✕</button>
</div>
<div class="legend">
<h2>Roles</h2>
${Object.entries(roleColors).map(([role, color]) => `
<div class="legend-item">
<div class="legend-dot" style="background: ${color}"></div>
<span>${role}</span>
</div>
`).join('')}
</div>
<div class="top-list">
<h2>👑 Power Rankings</h2>
${rankedByPower.slice(0, 10).map((p, i) => `
<div class="top-item" data-name="${p.name}">
<div class="top-rank ${i === 0 ? 'gold' : i === 1 ? 'silver' : i === 2 ? 'bronze' : ''}">${i + 1}</div>
<span class="top-name">${p.name}</span>
<span class="top-power">${p.power.toFixed(0)}</span>
</div>
`).join('')}
</div>
<div class="controls">
<h2>Display</h2>
<div class="control-group">
<div class="control-label">Link Strength Filter</div>
<input type="range" id="link-filter" min="1" max="20" value="1">
</div>
<div class="control-group">
<div class="control-label">Node Size</div>
<input type="range" id="node-size" min="0.5" max="3" step="0.1" value="1.5">
</div>
</div>
</div>
</div>
<script>
const nodes = ${JSON.stringify(nodes)};
const links = ${JSON.stringify(links)};
const roleColors = ${JSON.stringify(roleColors)};
const svg = d3.select("svg");
const container = document.querySelector('.graph-container');
const width = container.clientWidth;
const height = container.clientHeight;
// Create zoom behavior
const zoom = d3.zoom()
.scaleExtent([0.1, 4])
.on("zoom", (event) => {
g.attr("transform", event.transform);
});
svg.call(zoom);
// Main group for zoom/pan
const g = svg.append("g");
// Defs for gradients and markers
const defs = svg.append("defs");
// Improved arrow marker
defs.append("marker")
.attr("id", "arrowhead")
.attr("viewBox", "-6 -6 12 12")
.attr("refX", 22)
.attr("refY", 0)
.attr("orient", "auto")
.attr("markerWidth", 8)
.attr("markerHeight", 8)
.append("circle")
.attr("r", 4)
.attr("fill", "#58a6ff");
// Glow filter for nodes
const glow = defs.append("filter")
.attr("id", "glow")
.attr("x", "-50%")
.attr("y", "-50%")
.attr("width", "200%")
.attr("height", "200%");
glow.append("feGaussianBlur")
.attr("stdDeviation", "3")
.attr("result", "coloredBlur");
const glowMerge = glow.append("feMerge");
glowMerge.append("feMergeNode").attr("in", "coloredBlur");
glowMerge.append("feMergeNode").attr("in", "SourceGraphic");
const simulation = d3.forceSimulation(nodes)
.force("link", d3.forceLink(links).id(d => d.id).distance(120).strength(0.5))
.force("charge", d3.forceManyBody().strength(-400))
.force("center", d3.forceCenter(width / 2, height / 2))
.force("collision", d3.forceCollide().radius(d => Math.sqrt(d.power) * 3 + 20));
// Links with arrows - color by strength
const maxLinkValue = Math.max(...links.map(l => l.value));
const link = g.append("g")
.selectAll("path")
.data(links)
.join("path")
.attr("class", "link")
.attr("stroke", d => {
const intensity = Math.min(1, d.value / (maxLinkValue * 0.5));
return d3.interpolateRgb("#30363d", "#58a6ff")(intensity);
})
.attr("stroke-width", d => Math.max(1.5, Math.min(6, Math.sqrt(d.value) * 1.2)))
.attr("marker-end", "url(#arrowhead)");
// Nodes
const node = g.append("g")
.selectAll("g")
.data(nodes)
.join("g")
.attr("class", "node")
.call(d3.drag()
.on("start", dragstarted)
.on("drag", dragged)
.on("end", dragended));
let nodeSizeMultiplier = 1.5;
// Add outer glow circle
node.append("circle")
.attr("class", "node-glow")
.attr("r", d => Math.max(8, Math.sqrt(d.power) * nodeSizeMultiplier) + 4)
.attr("fill", d => roleColors[d.role] || '#8b949e')
.attr("opacity", 0.15)
.attr("filter", "url(#glow)");
// Main node circle with gradient-like effect
node.append("circle")
.attr("class", "node-main")
.attr("r", d => Math.max(8, Math.sqrt(d.power) * nodeSizeMultiplier))
.attr("fill", d => roleColors[d.role] || '#8b949e')
.attr("stroke", d => d3.color(roleColors[d.role] || '#8b949e').brighter(0.5))
.attr("stroke-width", 2)
.attr("stroke-opacity", 0.6);
node.append("text")
.attr("dx", d => Math.max(8, Math.sqrt(d.power) * nodeSizeMultiplier) + 6)
.attr("dy", 4)
.text(d => d.id);
// Tooltip
const tooltip = d3.select(".tooltip");
node.on("mouseenter", (event, d) => {
const roleColor = roleColors[d.role] || '#8b949e';
tooltip.html(\`
<div class="tooltip-header">
<span class="tooltip-name">\${d.id}</span>
<span class="tooltip-role" style="background: \${roleColor}22; color: \${roleColor}">\${d.role}</span>
</div>
<div class="tooltip-stats">
<div class="tooltip-stat">
<span class="tooltip-stat-value" style="color: #f0883e">\${d.power.toFixed(0)}</span>
<span class="tooltip-stat-label">Power Score</span>
</div>
<div class="tooltip-stat">
<span class="tooltip-stat-value" style="color: #3fb950">\${d.approved}</span>
<span class="tooltip-stat-label">PRs Approved</span>
</div>
<div class="tooltip-stat">
<span class="tooltip-stat-value" style="color: #58a6ff">\${d.submitted}</span>
<span class="tooltip-stat-label">PRs Submitted</span>
</div>
<div class="tooltip-stat">
<span class="tooltip-stat-value" style="color: #a371f7">\${d.selfMerges}</span>
<span class="tooltip-stat-label">Self-Merged</span>
</div>
</div>
\`)
.classed("visible", true)
.style("left", (event.pageX + 15) + "px")
.style("top", (event.pageY - 10) + "px");
// Highlight connected nodes and links
highlightConnections(d);
})
.on("mousemove", (event) => {
tooltip
.style("left", (event.pageX + 15) + "px")
.style("top", (event.pageY - 10) + "px");
})
.on("mouseleave", () => {
tooltip.classed("visible", false);
clearHighlights();
});
function highlightConnections(d) {
const connected = new Set([d.id]);
links.forEach(l => {
if (l.source.id === d.id) connected.add(l.target.id);
if (l.target.id === d.id) connected.add(l.source.id);
});
node.classed("highlighted", n => n.id === d.id)
.classed("dimmed", n => !connected.has(n.id));
link.classed("highlighted", l => l.source.id === d.id || l.target.id === d.id)
.classed("dimmed", l => l.source.id !== d.id && l.target.id !== d.id);
}
function clearHighlights() {
node.classed("highlighted", false).classed("dimmed", false);
link.classed("highlighted", false).classed("dimmed", false);
}
// Curved links for directed graph
simulation.on("tick", () => {
link.attr("d", d => {
const dx = d.target.x - d.source.x;
const dy = d.target.y - d.source.y;
const dr = Math.sqrt(dx * dx + dy * dy) * 2;
return \`M\${d.source.x},\${d.source.y}A\${dr},\${dr} 0 0,1 \${d.target.x},\${d.target.y}\`;
});
node.attr("transform", d => \`translate(\${d.x},\${d.y})\`);
});
function dragstarted(event) {
if (!event.active) simulation.alphaTarget(0.3).restart();
event.subject.fx = event.subject.x;
event.subject.fy = event.subject.y;
}
function dragged(event) {
event.subject.fx = event.x;
event.subject.fy = event.y;
}
function dragended(event) {
if (!event.active) simulation.alphaTarget(0);
event.subject.fx = null;
event.subject.fy = null;
}
// Zoom controls
document.getElementById('zoom-in').addEventListener('click', () => {
svg.transition().call(zoom.scaleBy, 1.5);
});
document.getElementById('zoom-out').addEventListener('click', () => {
svg.transition().call(zoom.scaleBy, 0.67);
});
document.getElementById('zoom-reset').addEventListener('click', () => {
svg.transition().call(zoom.transform, d3.zoomIdentity);
});
// Link filter
document.getElementById('link-filter').addEventListener('input', (e) => {
const minValue = parseInt(e.target.value);
link.style("display", d => d.value >= minValue ? null : "none");
});
// Node size
document.getElementById('node-size').addEventListener('input', (e) => {
nodeSizeMultiplier = parseFloat(e.target.value);
node.select(".node-glow").attr("r", d => Math.max(8, Math.sqrt(d.power) * nodeSizeMultiplier) + 4);
node.select(".node-main").attr("r", d => Math.max(8, Math.sqrt(d.power) * nodeSizeMultiplier));
node.select("text").attr("dx", d => Math.max(8, Math.sqrt(d.power) * nodeSizeMultiplier) + 6);
});
// Search
document.getElementById('search').addEventListener('input', (e) => {
const query = e.target.value.toLowerCase();
if (!query) {
clearHighlights();
return;
}
const matches = nodes.filter(n => n.id.toLowerCase().includes(query));
if (matches.length === 1) {
highlightConnections(matches[0]);
// Center on the matched node
const n = matches[0];
svg.transition().duration(500).call(
zoom.transform,
d3.zoomIdentity.translate(width/2 - n.x, height/2 - n.y)
);
} else {
node.classed("dimmed", d => !d.id.toLowerCase().includes(query));
link.classed("dimmed", true);
}
});
// Sidebar item click
document.querySelectorAll('.top-item').forEach(item => {
item.addEventListener('click', () => {
const name = item.dataset.name;
const n = nodes.find(n => n.id === name);
if (n) {
highlightConnections(n);
svg.transition().duration(500).call(
zoom.transform,
d3.zoomIdentity.translate(width/2 - n.x, height/2 - n.y).scale(1.5)
);
}
});
});
// Sidebar toggle
const sidebar = document.getElementById('sidebar');
const sidebarToggle = document.getElementById('sidebar-toggle');
const sidebarClose = document.getElementById('sidebar-close');
sidebarClose.addEventListener('click', () => {
sidebar.classList.add('collapsed');
sidebarToggle.classList.add('visible');
});
sidebarToggle.addEventListener('click', () => {
sidebar.classList.remove('collapsed');
sidebarToggle.classList.remove('visible');
});
// Initial zoom to fit
setTimeout(() => {
const bounds = g.node().getBBox();
const fullWidth = bounds.width;
const fullHeight = bounds.height;
const midX = bounds.x + fullWidth / 2;
const midY = bounds.y + fullHeight / 2;
const scale = 0.85 / Math.max(fullWidth / width, fullHeight / height);
svg.transition().duration(750).call(
zoom.transform,
d3.zoomIdentity.translate(width/2 - midX * scale, height/2 - midY * scale).scale(scale)
);
}, 1500);
</script>
</body>
</html>`;
return html;
}
| youknowriad/git-hierarchies | 0 | Reveal the real org chart by analyzing who approves whose PRs | JavaScript | youknowriad | Riad Benguella | Automattic |
src/components/graphiql.js | JavaScript | import React, { Component, PropTypes } from 'react';
import GraphiQL from 'graphiql';
import { graphql } from 'graphql';
export default class GraphiQLWrapper extends Component {
static contextTypes = {
graph: PropTypes.object.isRequired
};
fetch = ( { query, variables } ) => {
return graphql( this.context.graph.schema, query, this.context.graph.root, { uid: this.uid }, variables );
};
render() {
return (
<GraphiQL fetcher={ this.fetch } />
);
}
}
| youknowriad/react-graphql-redux | 49 | This library allows you to use GraphQL to query your Redux store | JavaScript | youknowriad | Riad Benguella | Automattic |
src/components/provider.js | JavaScript | import { Component, PropTypes, Children } from 'react';
export default class GraphProvider extends Component {
static propTypes = {
store: PropTypes.object.isRequired,
schema: PropTypes.object.isRequired,
root: PropTypes.object.isRequired,
children: PropTypes.element.isRequired
};
static childContextTypes = {
graph: PropTypes.object.isRequired
};
getChildContext() {
return { graph: this.graph };
}
constructor( props, context ) {
super( props, context );
const { store, root, schema } = props;
this.graph = { store, root, schema };
}
render() {
return Children.only( this.props.children );
}
}
| youknowriad/react-graphql-redux | 49 | This library allows you to use GraphQL to query your Redux store | JavaScript | youknowriad | Riad Benguella | Automattic |
src/components/query.js | JavaScript | import React, { Component, PropTypes } from 'react';
import { isString, uniqueId, throttle } from 'lodash';
import { graphql } from 'graphql';
import { quickGraphql, parse } from '../quick-graphql';
import { makePromiseCancelable } from '../utils/promises';
import { clearRequests } from '../redux/actions';
const THROTTLE_DELAY = 50;
const query = ( mapPropsToQuery, mapPropsToVariables = () => ( {} ) ) => ( WrappedComponent ) => {
return class GraphQueryComponent extends Component {
state = {
data: null,
errors: null
};
uid = uniqueId();
static contextTypes = {
graph: PropTypes.object
};
constructor( props, context ) {
super( props, context );
this.buildQuery( props );
}
componentDidMount() {
const throttledRequest = throttle( this.request, THROTTLE_DELAY, { leading: true } );
this.unsubscribe = this.context.graph.store.subscribe( throttledRequest );
this.request();
}
componentWillUnmount() {
this.cancelRequest();
this.unsubscribe && this.unsubscribe();
this.context.graph.store.dispatch( clearRequests( this.uid ) );
}
componentWillReceiveProps( newProps ) {
this.buildQuery( newProps );
this.request();
}
buildQuery( props ) {
if ( isString( mapPropsToQuery ) ) {
this.query = mapPropsToQuery;
} else {
this.query = mapPropsToQuery( props );
}
this.variables = mapPropsToVariables( props );
this.parsedQuery = parse( this.query, this.variables );
}
cancelRequest() {
this.cancelRequestPromise && this.cancelRequestPromise();
}
request = () => {
this.cancelRequest();
const cancelablePromise = makePromiseCancelable( this.triggerGraphRequest() );
this.cancelRequestPromise = cancelablePromise.cancel;
cancelablePromise.promise
.then( results => {
this.setState( results );
this.cancelRequestPromise = false;
} )
.catch( () => {} ); // avoid console warnings
};
triggerGraphRequest() {
if ( process.env.NODE_ENV === 'development' ) {
return graphql( this.context.graph.schema, this.query, this.context.graph.root, { uid: this.uid }, this.variables );
}
return quickGraphql( this.parsedQuery, this.context.graph.root, { uid: this.uid } );
}
render() {
return (
<WrappedComponent { ...this.props } { ...this.state } />
);
}
};
};
export default query;
| youknowriad/react-graphql-redux | 49 | This library allows you to use GraphQL to query your Redux store | JavaScript | youknowriad | Riad Benguella | Automattic |
src/helpers/refresh.js | JavaScript | import { getRequest, getRequestIgnoringUid } from '../redux/selectors';
import { addRequest, removeRequest } from '../redux/actions';
export const refreshByUid = ( store, uid, type, options, triggerRequest ) => {
const state = store.getState();
const request = getRequest( state, uid, type, options );
if ( ! request ) {
store.dispatch( addRequest( uid, type, options ) );
triggerRequest();
}
};
export const refreshWhenExpired = ( store, type, options, timeout, triggerRequest ) => {
const state = store.getState();
const request = getRequestIgnoringUid( state, type, options );
const refresh = ! request || ( Date.now() - request.createdAt >= timeout );
if ( refresh ) {
if ( request ) {
store.dispatch( removeRequest( request.uid, type, options ) );
}
store.dispatch( addRequest( '', type, options ) );
triggerRequest();
}
};
| youknowriad/react-graphql-redux | 49 | This library allows you to use GraphQL to query your Redux store | JavaScript | youknowriad | Riad Benguella | Automattic |
src/index.js | JavaScript | export { default as query } from './components/query';
export { default as GraphProvider } from './components/provider';
export { default as GraphiQL } from './components/graphiql';
export { default as GraphReducer } from './redux/reducer';
export * from './helpers/refresh';
| youknowriad/react-graphql-redux | 49 | This library allows you to use GraphQL to query your Redux store | JavaScript | youknowriad | Riad Benguella | Automattic |
src/quick-graphql/execute.js | JavaScript | import { isArray, isPlainObject, isFunction } from 'lodash';
const resolveNode = ( node, resolver, context ) => {
let resolved = resolver;
if ( isFunction( resolver ) ) {
resolved = resolver( node.arguments, context );
}
if ( isPlainObject( resolved ) ) {
return resolveNodes( node.nodes, resolved, context ); // eslint-disable-line no-use-before-define
}
if ( isArray( resolved ) ) {
return resolved.map(
resolvedItem => resolveNodes( node.nodes, resolvedItem, context ) // eslint-disable-line no-use-before-define
);
}
return resolved;
};
const resolveNodes = ( nodes, resolvers = {}, context ) => {
return nodes.reduce( ( memo, node ) => {
memo[ node.name ] = resolveNode( node, resolvers[ node.name ], context );
return memo;
}, {} );
};
export default ( query, rootResolver, context ) =>
new Promise( ( resolve ) => {
const data = resolveNodes( query.nodes, rootResolver, context );
resolve( { data } );
} );
| youknowriad/react-graphql-redux | 49 | This library allows you to use GraphQL to query your Redux store | JavaScript | youknowriad | Riad Benguella | Automattic |
src/quick-graphql/index.js | JavaScript | export { default as quickGraphql } from './execute';
export { default as parse } from './parse';
| youknowriad/react-graphql-redux | 49 | This library allows you to use GraphQL to query your Redux store | JavaScript | youknowriad | Riad Benguella | Automattic |
src/quick-graphql/parse.js | JavaScript | import { parse, visit } from 'graphql';
export default ( queryString, variables ) => {
const ast = parse( queryString );
const parsed = visit( ast, {
Document: { leave: node => node.definitions[ 0 ] },
OperationDefinition: { leave: node => {
return {
nodes: node.selectionSet
};
} },
SelectionSet: { leave: node => node.selections },
Field: { leave: node => {
return {
name: node.name.value,
nodes: node.selectionSet,
arguments: node.arguments.reduce( ( memo, arg ) => {
memo[ arg.name ] = arg.value;
return memo;
}, {} )
};
} },
Argument: { leave: node => {
return {
name: node.name.value,
value: node.value
};
} },
Variable: { leave: node => variables[ node.name.value ] },
ObjectValue: { leave: node => {
return node.fields.reduce( ( memo, field ) => {
memo[ field.name ] = field.value;
return memo;
}, {} );
} },
ObjectField: { leave: node => {
return {
name: node.name.value,
value: node.value
};
} },
StringValue: { leave: node => node.value },
IntValue: { leave: node => parseInt( node.value, 10 ) },
BooleanValue: { leave: node => node.value },
NullValue: { leave: () => null },
FloatValue: { leave: node => parseFloat( node.value ) },
ListValue: { leave: node => node.values },
} );
return parsed;
};
| youknowriad/react-graphql-redux | 49 | This library allows you to use GraphQL to query your Redux store | JavaScript | youknowriad | Riad Benguella | Automattic |
src/redux/actions.js | JavaScript | export const GRAPH_RESOLVER_REQUEST_ADD = 'GRAPH_RESOLVER_REQUEST_ADD';
export const GRAPHQL_RESOLVER_REQUEST_CLEAR = 'GRAPHQL_RESOLVER_REQUEST_CLEAR';
export const GRAPH_RESOLVER_REQUEST_REMOVE = 'GRAPH_RESOLVER_REQUEST_REMOVE';
export function addRequest( uid, type, options = {} ) {
const createdAt = Date.now();
return {
type: GRAPH_RESOLVER_REQUEST_ADD,
payload: {
uid,
type,
options,
createdAt
}
};
}
export function removeRequest( uid, type, options = {} ) {
return {
type: GRAPH_RESOLVER_REQUEST_REMOVE,
payload: {
uid,
type,
options
}
};
}
export function clearRequests( uid ) {
return {
type: GRAPHQL_RESOLVER_REQUEST_CLEAR,
payload: uid
};
}
| youknowriad/react-graphql-redux | 49 | This library allows you to use GraphQL to query your Redux store | JavaScript | youknowriad | Riad Benguella | Automattic |
src/redux/reducer.js | JavaScript | import { filter, omit } from 'lodash';
import { GRAPH_RESOLVER_REQUEST_ADD, GRAPH_RESOLVER_REQUEST_REMOVE, GRAPHQL_RESOLVER_REQUEST_CLEAR } from './actions';
const handleAdd = ( state, { payload: { uid, type, options, createdAt } } ) => {
const optionsSerialization = JSON.stringify( options );
return {
...state,
[ uid ]: [
...( state[ uid ] ? state[ uid ] : [] ),
{ type, options: optionsSerialization, createdAt }
]
};
}
const handleRemove = ( state, { payload: { uid, type, options } } ) => {
const optionsSerialization = JSON.stringify( options );
return {
...state,
[ uid ]: filter( state[ uid ], request => {
return request.type !== type || request.options !== optionsSerialization;
} )
};
};
const handleClear = ( state, { payload: uid } ) => {
return omit( state, [ uid ] );
};
const reducer = ( state = {}, action ) => {
switch ( action.type ) {
case GRAPH_RESOLVER_REQUEST_ADD:
return handleAdd( state, action );
case GRAPH_RESOLVER_REQUEST_REMOVE:
return handleRemove( state, action );
case GRAPHQL_RESOLVER_REQUEST_CLEAR:
return handleClear( state, action );
default:
return state;
}
}
export default reducer;
| youknowriad/react-graphql-redux | 49 | This library allows you to use GraphQL to query your Redux store | JavaScript | youknowriad | Riad Benguella | Automattic |
src/redux/selectors.js | JavaScript | import { find, flatten, get, values } from 'lodash';
export const getRequest = ( state, uid, type, options = {} ) => {
const optionsSerialization = JSON.stringify( options );
return find( get( state.graphqlResolvers, [ uid ], [] ), request => {
return request.type === type && request.options === optionsSerialization;
} );
};
export const getRequestIgnoringUid = ( state, type, options = {} ) => {
const optionsSerialization = JSON.stringify( options );
return find( flatten( values( state.graphqlResolvers ) ), request => {
return request.type === type && request.options === optionsSerialization;
} );
};
| youknowriad/react-graphql-redux | 49 | This library allows you to use GraphQL to query your Redux store | JavaScript | youknowriad | Riad Benguella | Automattic |
src/utils/promises.js | JavaScript | /**
* Takes a promise and transform it to a cancelable promise by adding a "cancel" method
* @param {Promise} promise Promise to make cancelable
* @return {Promise} Cancelble promise
*/
export const makePromiseCancelable = promise => {
let hasCanceled_ = false;
const wrappedPromise = new Promise( ( resolve, reject ) => {
promise.then( val =>
hasCanceled_ ? reject( { isCanceled: true } ) : resolve( val )
);
promise.catch( error =>
hasCanceled_ ? reject( { isCanceled: true } ) : reject( error )
);
} );
return {
promise: wrappedPromise,
cancel() {
hasCanceled_ = true;
},
};
};
| youknowriad/react-graphql-redux | 49 | This library allows you to use GraphQL to query your Redux store | JavaScript | youknowriad | Riad Benguella | Automattic |
.devcontainer/post-create.sh | Shell | #!/usr/bin/env bash
if [ -f package.json ]; then
bash -i -c "nvm install --lts && nvm install-latest-npm"
npm i
npm run build
fi
# Install dependencies for shfmt extension
curl -sS https://webi.sh/shfmt | sh &>/dev/null
# Add OMZ plugins
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ~/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting
git clone https://github.com/zsh-users/zsh-autosuggestions ~/.oh-my-zsh/custom/plugins/zsh-autosuggestions
sed -i -E "s/^(plugins=\()(git)(\))/\1\2 zsh-syntax-highlighting zsh-autosuggestions\3/" ~/.zshrc
# Avoid git log use less
echo -e "\nunset LESS" >>~/.zshrc
| youtalk/chirpy-starter | 0 | Shell | youtalk | Yutaka Kondo | tier4 | |
_plugins/posts-lastmod-hook.rb | Ruby | #!/usr/bin/env ruby
#
# Check for changed posts
Jekyll::Hooks.register :posts, :post_init do |post|
commit_num = `git rev-list --count HEAD "#{ post.path }"`
if commit_num.to_i > 1
lastmod_date = `git log -1 --pretty="%ad" --date=iso "#{ post.path }"`
post.data['last_modified_at'] = lastmod_date
end
end
| youtalk/chirpy-starter | 0 | Shell | youtalk | Yutaka Kondo | tier4 | |
index.html | HTML | ---
layout: home
# Index page
---
| youtalk/chirpy-starter | 0 | Shell | youtalk | Yutaka Kondo | tier4 | |
tools/run.sh | Shell | #!/usr/bin/env bash
#
# Run jekyll serve and then launch the site
prod=false
command="bundle exec jekyll s -l"
host="127.0.0.1"
help() {
echo "Usage:"
echo
echo " bash /path/to/run [options]"
echo
echo "Options:"
echo " -H, --host [HOST] Host to bind to."
echo " -p, --production Run Jekyll in 'production' mode."
echo " -h, --help Print this help information."
}
while (($#)); do
opt="$1"
case $opt in
-H | --host)
host="$2"
shift 2
;;
-p | --production)
prod=true
shift
;;
-h | --help)
help
exit 0
;;
*)
echo -e "> Unknown option: '$opt'\n"
help
exit 1
;;
esac
done
command="$command -H $host"
if $prod; then
command="JEKYLL_ENV=production $command"
fi
if [ -e /proc/1/cgroup ] && grep -q docker /proc/1/cgroup; then
command="$command --force_polling"
fi
echo -e "\n> $command\n"
eval "$command"
| youtalk/chirpy-starter | 0 | Shell | youtalk | Yutaka Kondo | tier4 | |
tools/test.sh | Shell | #!/usr/bin/env bash
#
# Build and test the site content
#
# Requirement: html-proofer, jekyll
#
# Usage: See help information
set -eu
SITE_DIR="_site"
_config="_config.yml"
_baseurl=""
help() {
echo "Build and test the site content"
echo
echo "Usage:"
echo
echo " bash $0 [options]"
echo
echo "Options:"
echo ' -c, --config "<config_a[,config_b[...]]>" Specify config file(s)'
echo " -h, --help Print this information."
}
read_baseurl() {
if [[ $_config == *","* ]]; then
# multiple config
IFS=","
read -ra config_array <<<"$_config"
# reverse loop the config files
for ((i = ${#config_array[@]} - 1; i >= 0; i--)); do
_tmp_baseurl="$(grep '^baseurl:' "${config_array[i]}" | sed "s/.*: *//;s/['\"]//g;s/#.*//")"
if [[ -n $_tmp_baseurl ]]; then
_baseurl="$_tmp_baseurl"
break
fi
done
else
# single config
_baseurl="$(grep '^baseurl:' "$_config" | sed "s/.*: *//;s/['\"]//g;s/#.*//")"
fi
}
main() {
# clean up
if [[ -d $SITE_DIR ]]; then
rm -rf "$SITE_DIR"
fi
read_baseurl
# build
JEKYLL_ENV=production bundle exec jekyll b \
-d "$SITE_DIR$_baseurl" -c "$_config"
# test
bundle exec htmlproofer "$SITE_DIR" \
--disable-external \
--ignore-urls "/^http:\/\/127.0.0.1/,/^http:\/\/0.0.0.0/,/^http:\/\/localhost/"
}
while (($#)); do
opt="$1"
case $opt in
-c | --config)
_config="$2"
shift
shift
;;
-h | --help)
help
exit 0
;;
*)
# unknown option
help
exit 1
;;
esac
done
main
| youtalk/chirpy-starter | 0 | Shell | youtalk | Yutaka Kondo | tier4 | |
lib/cargo-miri-wrapper.sh | Shell | #!@bash@ -e
src_dir="@out@/lib/rustlib/src/rust/library"
if [[ ! -v XARGO_RUST_SRC ]]; then
if [[ ! -d "$src_dir" ]]; then
echo '`rust-src` is required by miri but not installed.' >&2
echo 'Please either install component `rust-src` or set `XARGO_RUST_SRC`.' >&2
exit 1
fi
export XARGO_RUST_SRC="$src_dir"
fi
exec -a "$0" "@cargo_miri@" "$@"
| yshui/rustup.nix | 0 | declaratively download rust toolchains with nix | Nix | yshui | Yuxuan Shui | CodeWeavers |
gl-bindings/build.rs | Rust | use gl_generator::{Api, Fallbacks, Profile, Registry, StructGenerator};
fn main() -> anyhow::Result<()> {
let out_dir = std::env::var("OUT_DIR")?;
let out_dir = std::path::Path::new(&out_dir);
let target = std::env::var("TARGET").unwrap();
if target.contains("linux")
|| target.contains("dragonfly")
|| target.contains("freebsd")
|| target.contains("netbsd")
|| target.contains("openbsd")
{
let mut file = std::fs::File::create(out_dir.join("egl_bindings.rs")).unwrap();
let reg = Registry::new(Api::Egl, (1, 5), Profile::Core, Fallbacks::All, [
"EGL_ANDROID_native_fence_sync",
"EGL_KHR_platform_x11",
"EGL_EXT_device_base",
"EGL_EXT_device_drm",
"EGL_EXT_device_drm_render_node",
"EGL_EXT_device_query",
"EGL_KHR_fence_sync",
"EGL_EXT_image_dma_buf_import",
"EGL_EXT_image_dma_buf_import_modifiers",
]);
reg.write_bindings(StructGenerator, &mut file).unwrap();
let mut file = std::fs::File::create(out_dir.join("gl_bindings.rs")).unwrap();
Registry::new(Api::Gl, (3, 3), Profile::Core, Fallbacks::All, ["GL_EXT_EGL_image_storage"])
.write_bindings(StructGenerator, &mut file)
.unwrap();
}
Ok(())
}
| yshui/x11screencast-portal | 5 | xdg-desktop-portal ScreenCast implementation for X11 | Rust | yshui | Yuxuan Shui | CodeWeavers |
gl-bindings/src/lib.rs | Rust | pub mod egl {
#![cfg(any(
target_os = "linux",
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd"
))]
#![allow(dead_code)]
#![allow(unused_imports)]
#![allow(non_camel_case_types)]
#![allow(clippy::missing_safety_doc)]
#![allow(clippy::manual_non_exhaustive)]
#![allow(clippy::unnecessary_cast)]
#![allow(clippy::missing_transmute_annotations)]
pub mod sys {
pub type khronos_utime_nanoseconds_t = super::khronos_utime_nanoseconds_t;
pub type khronos_uint64_t = super::khronos_uint64_t;
pub type khronos_ssize_t = super::khronos_ssize_t;
pub type EGLNativeDisplayType = super::EGLNativeDisplayType;
pub type EGLNativePixmapType = super::EGLNativePixmapType;
pub type EGLNativeWindowType = super::EGLNativeWindowType;
pub type EGLint = super::EGLint;
pub type NativeDisplayType = super::EGLNativeDisplayType;
pub type NativePixmapType = super::EGLNativePixmapType;
pub type NativeWindowType = super::EGLNativeWindowType;
include!(concat!(env!("OUT_DIR"), "/egl_bindings.rs"));
// TODO should upstream these:
// EGL_EXT_platform_xcb
pub const PLATFORM_XCB_EXT: super::EGLenum = 0x31DC;
pub const PLATFORM_XCB_SCREEN_EXT: super::EGLenum = 0x31DE;
// EGL_EXT_device_query_name
pub const RENDERER_EXT: super::EGLenum = 0x335F;
// EGL_ANGLE_platform_angle - https://chromium.googlesource.com/angle/angle/+/HEAD/extensions/EGL_ANGLE_platform_angle.txt
pub const PLATFORM_ANGLE_ANGLE: super::EGLenum = 0x3202;
pub const PLATFORM_ANGLE_TYPE_ANGLE: super::EGLenum = 0x3203;
pub const PLATFORM_ANGLE_MAX_VERSION_MAJOR_ANGLE: super::EGLenum = 0x3204;
pub const PLATFORM_ANGLE_MAX_VERSION_MINOR_ANGLE: super::EGLenum = 0x3205;
pub const PLATFORM_ANGLE_DEBUG_LAYERS_ENABLED: super::EGLenum = 0x3451;
pub const PLATFORM_ANGLE_NATIVE_PLATFORM_TYPE_ANGLE: super::EGLenum = 0x348F;
pub const PLATFORM_ANGLE_TYPE_DEFAULT_ANGLE: super::EGLenum = 0x3206;
pub const PLATFORM_ANGLE_DEVICE_TYPE_HARDWARE_ANGLE: super::EGLenum = 0x320A;
pub const PLATFORM_ANGLE_DEVICE_TYPE_NULL_ANGLE: super::EGLenum = 0x345E;
pub const DRM_RENDER_NODE_FILE_EXT: super::EGLenum = 0x3377;
}
use std::os::raw;
pub use self::sys::types::{EGLContext, EGLDisplay};
pub type khronos_utime_nanoseconds_t = khronos_uint64_t;
pub type khronos_uint64_t = u64;
pub type khronos_ssize_t = raw::c_long;
pub type EGLint = i32;
pub type EGLenum = raw::c_uint;
pub type EGLNativeDisplayType = *const raw::c_void;
// FIXME: egl_native_pixmap_t instead
#[cfg(not(windows))]
pub type EGLNativePixmapType = *const raw::c_void;
#[cfg(not(windows))]
pub type EGLNativeWindowType = *const raw::c_void;
}
pub mod gl {
include!(concat!(env!("OUT_DIR"), "/gl_bindings.rs"));
}
| yshui/x11screencast-portal | 5 | xdg-desktop-portal ScreenCast implementation for X11 | Rust | yshui | Yuxuan Shui | CodeWeavers |
plugin/build.rs | Rust | #[derive(Debug)]
struct ParseCallbacks;
impl bindgen::callbacks::ParseCallbacks for ParseCallbacks {
fn enum_variant_name(
&self,
enum_name: Option<&str>,
original_variant_name: &str,
_variant_value: bindgen::callbacks::EnumVariantValue,
) -> Option<String> {
let enum_name = enum_name?.trim_start_matches("enum ").trim_end_matches("_t");
if original_variant_name.to_ascii_lowercase().starts_with(enum_name) {
Some(original_variant_name[enum_name.len() + 1..].to_string())
} else {
None
}
}
}
fn main() -> anyhow::Result<()> {
let pixman = pkg_config::probe_library("pixman-1")?;
let libxcb = pkg_config::probe_library("xcb")?;
let egl = pkg_config::probe_library("egl")?;
let manifest_dir = std::env::var("CARGO_MANIFEST_DIR")?;
let manifest_dir = std::path::Path::new(&manifest_dir);
let out_dir = std::env::var("OUT_DIR")?;
let out_dir = std::path::Path::new(&out_dir);
println!("cargo:rustc-link-search=native={}", egl.link_paths[0].display());
let bindings = bindgen::Builder::default()
.header("picom.h")
.clang_arg(format!("-I{}", manifest_dir.display()))
.clang_args(pixman.include_paths.iter().map(|p| format!("-I{}", p.display())))
.clang_args(libxcb.include_paths.iter().map(|p| format!("-I{}", p.display())))
.allowlist_function("picom_api_get_interfaces")
.allowlist_function("backend_register")
.opaque_type("image_handle")
.newtype_enum(".*")
.parse_callbacks(Box::new(ParseCallbacks))
.generate()?;
bindings.write_to_file(out_dir.join("bindings.rs"))?;
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=picom");
Ok(())
}
| yshui/x11screencast-portal | 5 | xdg-desktop-portal ScreenCast implementation for X11 | Rust | yshui | Yuxuan Shui | CodeWeavers |
plugin/picom.h | C/C++ Header | #include <picom/api.h>
#include <picom/backend.h>
| yshui/x11screencast-portal | 5 | xdg-desktop-portal ScreenCast implementation for X11 | Rust | yshui | Yuxuan Shui | CodeWeavers |
plugin/picom/api.h | C/C++ Header | // SPDX-License-Identifier: MPL-2.0
// Copyright (c) Yuxuan Shui <yshuiv7@gmail.com>
#pragma once
#include <stdbool.h>
#include <stdint.h>
#define PICOM_API_MAJOR (0UL)
#define PICOM_API_MINOR (1UL)
struct backend_base;
/// The entry point of a backend plugin. Called after the backend is initialized.
typedef void (*picom_backend_plugin_entrypoint)(struct backend_base *backend, void *user_data);
struct picom_api {
/// Add a plugin for a specific backend. The plugin's entry point will be called
/// when the specified backend is initialized.
///
/// @param backend_name The name of the backend to add the plugin to.
/// @param major The major version of the backend API interface this plugin
/// is compatible with.
/// @param minor The minor version of the backend API interface this plugin
/// is compatible with.
/// @param entrypoint The entry point of the plugin.
/// @param user_data The user data to pass to the plugin's entry point.
bool (*add_backend_plugin)(const char *backend_name, uint64_t major, uint64_t minor,
picom_backend_plugin_entrypoint entrypoint,
void *user_data);
};
const struct picom_api *
picom_api_get_interfaces(uint64_t major, uint64_t minor, const char *context);
| yshui/x11screencast-portal | 5 | xdg-desktop-portal ScreenCast implementation for X11 | Rust | yshui | Yuxuan Shui | CodeWeavers |
plugin/picom/backend.h | C/C++ Header | // SPDX-License-Identifier: MPL-2.0
// Copyright (c) Yuxuan Shui <yshuiv7@gmail.com>
#pragma once
#include <pixman-1/pixman.h>
#include <stdbool.h>
#include <xcb/xproto.h>
#include "types.h"
#define PICOM_BACKEND_MAJOR (1UL)
#define PICOM_BACKEND_MINOR (0UL)
#define PICOM_BACKEND_MAKE_VERSION(major, minor) ((major) * 1000 + (minor))
typedef pixman_region32_t region_t;
struct xvisual_info {
/// Bit depth of the red component
int red_size;
/// Bit depth of the green component
int green_size;
/// Bit depth of the blue component
int blue_size;
/// Bit depth of the alpha component
int alpha_size;
/// The depth of X visual
int visual_depth;
xcb_visualid_t visual;
};
typedef struct session session_t;
struct managed_win;
struct ev_loop;
struct backend_operations;
typedef struct backend_base backend_t;
// This mimics OpenGL's ARB_robustness extension, which enables detection of GPU context
// resets.
// See: https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_robustness.txt, section
// 2.6 "Graphics Reset Recovery".
enum device_status {
DEVICE_STATUS_NORMAL,
DEVICE_STATUS_RESETTING,
};
enum shader_attributes {
// Whether the shader needs to be render regardless of whether the window is
// updated.
SHADER_ATTRIBUTE_ANIMATED = 1,
};
struct gaussian_blur_args {
int size;
double deviation;
};
struct box_blur_args {
int size;
};
struct kernel_blur_args {
struct conv **kernels;
int kernel_count;
};
struct dual_kawase_blur_args {
int size;
int strength;
};
typedef struct image_handle {
// Intentionally left blank
} *image_handle;
/// A mask for various backend operations.
///
/// The mask is composed of both a mask region and a mask image. The resulting mask
/// is the intersection of the two. The mask image can be modified by the `corner_radius`
/// and `inverted` properties. Note these properties have no effect on the mask region.
struct backend_mask_image {
/// Mask image, can be NULL.
///
/// Mask image must be an image that was created with the
/// `BACKEND_IMAGE_FORMAT_MASK` format. Using an image with a wrong format as mask
/// is undefined behavior.
image_handle image;
/// Corner radius of the mask image, the corners of the mask image will be
/// rounded.
double corner_radius;
/// Origin of the mask image, in the source image's coordinate.
ivec2 origin;
/// Whether the mask image should be inverted.
bool inverted;
};
struct backend_blur_args {
/// The blur context
void *blur_context;
/// The source mask for the blur operation, may be NULL. Only parts of the source
/// image covered by the mask should participate in the blur operation.
const struct backend_mask_image *source_mask;
/// Region of the target image that will be covered by the blur operation, in the
/// source image's coordinate.
const region_t *target_mask;
/// Source image
image_handle source_image;
/// Opacity of the blurred image
double opacity;
};
struct backend_blit_args {
/// Source image, can be NULL.
image_handle source_image;
/// Mask for the source image. may be NULL. Only contents covered by the mask
/// should participate in the blit operation. This applies to the source image
/// before it's scaled.
const struct backend_mask_image *source_mask;
/// Mask for the target image. Only regions of the target image covered by this
/// mask should be modified. This is the target's coordinate system.
const region_t *target_mask;
/// Custom shader for this blit operation.
void *shader;
/// Opacity of the source image.
double opacity;
/// Dim level of the source image.
double dim;
/// Brightness limit of the source image. Source image
/// will be normalized so that the maximum brightness is
/// this value.
double max_brightness;
/// Scale factor for the horizontal and vertical direction (X for horizontal,
/// Y for vertical).
vec2 scale;
/// Corner radius of the source image BEFORE scaling. The corners of
/// the source image will be rounded.
double corner_radius;
/// Effective size of the source image BEFORE scaling, set where the corners
/// of the image are.
ivec2 effective_size;
/// Border width of the source image BEFORE scaling. This is used with
/// `corner_radius` to create a border for the rounded corners.
/// Setting this has no effect if `corner_radius` is 0.
int border_width;
/// Whether the source image should be inverted.
bool color_inverted;
};
enum backend_image_format {
/// A format that can be used for normal rendering, and binding
/// X pixmaps.
/// Images created with `bind_pixmap` have this format automatically.
BACKEND_IMAGE_FORMAT_PIXMAP,
/// Like `BACKEND_IMAGE_FORMAT_PIXMAP`, but the image has a higher
/// precision. Support is optional.
BACKEND_IMAGE_FORMAT_PIXMAP_HIGH,
/// A format that can be used for masks.
BACKEND_IMAGE_FORMAT_MASK,
};
enum backend_image_capability {
/// Image can be sampled from. This is required for `blit` and `blur` source
/// images. All images except the back buffer should have this capability.
/// Note that `copy_area` should work without this capability, this is so that
/// blurring the back buffer could be done.
BACKEND_IMAGE_CAP_SRC = 1 << 0,
/// Image can be rendered to. This is required for target images of any operation.
/// All images except bound X pixmaps should have this capability.
BACKEND_IMAGE_CAP_DST = 1 << 1,
};
enum backend_command_op {
BACKEND_COMMAND_INVALID = -1,
BACKEND_COMMAND_BLIT,
BACKEND_COMMAND_BLUR,
BACKEND_COMMAND_COPY_AREA,
};
/// Symbolic references used as render command source images. The actual `image_handle`
/// will later be filled in by the renderer using this symbolic reference.
enum backend_command_source {
BACKEND_COMMAND_SOURCE_WINDOW,
BACKEND_COMMAND_SOURCE_SHADOW,
BACKEND_COMMAND_SOURCE_BACKGROUND,
};
// TODO(yshui) might need better names
struct backend_command {
enum backend_command_op op;
ivec2 origin;
enum backend_command_source source;
union {
struct {
struct backend_blit_args blit;
/// Region of the screen that will be covered by this blit
/// operations, in screen coordinates.
region_t opaque_region;
};
struct {
image_handle source_image;
const region_t *region;
} copy_area;
struct backend_blur_args blur;
};
/// Source mask for the operation.
/// If the `source_mask` of the operation's argument points to this, a mask image
/// will be created for the operation for the renderer.
struct backend_mask_image source_mask;
/// Target mask for the operation.
region_t target_mask;
};
enum backend_quirk {
/// Backend cannot do blur quickly. The compositor will avoid using blur to create
/// shadows on this backend
BACKEND_QUIRK_SLOW_BLUR = 1 << 0,
};
struct backend_operations {
// =========== Initialization ===========
/// Initialize the backend, prepare for rendering to the target window.
backend_t *(*init)(session_t *, xcb_window_t) __attribute__((nonnull(1)));
void (*deinit)(backend_t *backend_data) __attribute__((nonnull(1)));
/// Called when rendering will be stopped for an unknown amount of
/// time (e.g. when screen is unredirected). Free some resources.
///
/// Optional, not yet used
void (*pause)(backend_t *backend_data, session_t *ps);
/// Called before rendering is resumed
///
/// Optional, not yet used
void (*resume)(backend_t *backend_data, session_t *ps);
/// Called when root window size changed. All existing image data ever
/// returned by this backend should remain valid after this call
/// returns.
///
/// Optional
void (*root_change)(backend_t *backend_data, session_t *ps);
// =========== Rendering ============
/// Called before when a new frame starts.
///
/// Optional
void (*prepare)(backend_t *backend_data, const region_t *reg_damage);
/// Multiply the alpha channel of the target image by a given value.
///
/// @param backend_data backend data
/// @param target an image handle, cannot be NULL.
/// @param alpha the alpha value to multiply
/// @param region the region to apply the alpha, in the target image's
/// coordinate.
bool (*apply_alpha)(struct backend_base *backend_data, image_handle target,
double alpha, const region_t *region)
__attribute__((nonnull(1, 2, 4)));
/// Copy pixels from a source image on to the target image.
///
/// Some effects may be applied. If the region specified by the mask
/// contains parts that are outside the source image, the source image
/// will be repeated to fit.
///
/// Source and target MUST NOT be the same image.
///
/// @param backend_data backend data
/// @param origin the origin of the operation, in the target image's
/// coordinate.
/// @param target an image handle, cannot be NULL.
/// @param args arguments for blit
/// @return whether the operation is successful
bool (*blit)(struct backend_base *backend_data, ivec2 origin, image_handle target,
const struct backend_blit_args *args) __attribute__((nonnull(1, 3, 4)));
/// Blur a given region of a source image and store the result in the
/// target image.
///
/// The blur operation might access pixels outside the mask region, the
/// amount of pixels accessed can be queried with `get_blur_size`. If
/// pixels outside the source image are accessed, the result will be
/// clamped to the edge of the source image.
///
/// Source and target may be the same image.
///
/// @param backend_data backend data
/// @param origin the origin of the operation, in the target image's
/// coordinate.
/// @param target an image handle, cannot be NULL.
/// @param args argument for blur
/// @return whether the operation is successful
bool (*blur)(struct backend_base *backend_data, ivec2 origin, image_handle target,
const struct backend_blur_args *args) __attribute__((nonnull(1, 3, 4)));
/// Direct copy of pixels from a source image on to the target image.
/// This is a simpler version of `blit`, without any effects. Note unlike `blit`,
/// if `region` tries to sample from outside the source image, instead of
/// repeating, the result will be clamped to the edge of the source image.
/// Blending should not be applied for the copy.
///
/// Source and target MUST NOT be the same image.
///
/// @param backend_data backend data
/// @param origin the origin of the operation, in the target image's
/// coordinate.
/// @param target an image handle, cannot be NULL.
/// @param source an image handle, cannot be NULL.
/// @param region the region to copy, in the target image's coordinate.
/// @return whether the operation is successful
bool (*copy_area)(struct backend_base *backend_data, ivec2 origin,
image_handle target, image_handle source, const region_t *region)
__attribute__((nonnull(1, 3, 4, 5)));
/// Similar to `copy_area`, but is specialized for copying from a higher
/// precision format to a lower precision format. It has 2 major differences from
/// `copy_area`:
///
/// 1. This function _may_ use dithering when copying from a higher precision
/// format to a lower precision format. But this is not required.
/// 2. This function only needs to support copying from an image with the SRC
/// capability. Unlike `copy_area`, which supports copying from any image.
///
/// It's perfectly legal to have this pointing to the same function as
/// `copy_area`, if the backend doesn't support dithering.
///
/// @param backend_data backend data
/// @param origin the origin of the operation, in the target image's
/// coordinate.
/// @param target an image handle, cannot be NULL.
/// @param source an image handle, cannot be NULL.
/// @param region the region to copy, in the target image's coordinate.
/// @return whether the operation is successful
bool (*copy_area_quantize)(struct backend_base *backend_data, ivec2 origin,
image_handle target, image_handle source,
const region_t *region)
__attribute__((nonnull(1, 3, 4, 5)));
/// Initialize an image with a given color value. If the image has a mask format,
/// only the alpha channel of the color is used.
///
/// @param backend_data backend data
/// @param target an image handle, cannot be NULL.
/// @param color the color to fill the image with
/// @return whether the operation is successful
bool (*clear)(struct backend_base *backend_data, image_handle target,
struct color color) __attribute__((nonnull(1, 2)));
/// Present the back buffer to the target window. Ideally the backend should keep
/// track of the region of the back buffer that has been updated, and use relevant
/// mechanism (when possible) to present only the updated region.
bool (*present)(struct backend_base *backend_data) __attribute__((nonnull(1)));
// ============ Resource management ===========
/// Create a shader object from a shader source.
///
/// Optional
void *(*create_shader)(backend_t *backend_data, const char *source)
__attribute__((nonnull(1, 2)));
/// Free a shader object.
///
/// Required if create_shader is present.
void (*destroy_shader)(backend_t *backend_data, void *shader)
__attribute__((nonnull(1, 2)));
/// Create a new, uninitialized image with the given format and size.
///
/// @param backend_data backend data
/// @param format the format of the image
/// @param size the size of the image
image_handle (*new_image)(struct backend_base *backend_data,
enum backend_image_format format, ivec2 size)
__attribute__((nonnull(1)));
/// Bind a X pixmap to the backend's internal image data structure.
///
/// @param backend_data backend data
/// @param pixmap X pixmap to bind
/// @param fmt information of the pixmap's visual
/// @return backend specific image handle for the pixmap. May be
/// NULL.
image_handle (*bind_pixmap)(struct backend_base *backend_data, xcb_pixmap_t pixmap,
struct xvisual_info fmt) __attribute__((nonnull(1)));
/// Acquire the image handle of the back buffer.
///
/// @param backend_data backend data
image_handle (*back_buffer)(struct backend_base *backend_data);
/// Free resources associated with an image data structure. Releasing the image
/// returned by `back_buffer` should be a no-op.
///
/// @param image the image to be released, cannot be NULL.
/// @return if this image is created by `bind_pixmap`, the X pixmap; 0
/// otherwise.
xcb_pixmap_t (*release_image)(struct backend_base *backend_data, image_handle image)
__attribute__((nonnull(1, 2)));
// =========== Query ===========
/// Get backend quirks
/// @return a bitmask of `enum backend_quirk`.
uint32_t (*quirks)(struct backend_base *backend_data) __attribute__((nonnull(1)));
/// Get the version of the backend
void (*version)(struct backend_base *backend_data, uint64_t *major, uint64_t *minor)
__attribute__((nonnull(1, 2, 3)));
/// Check if an optional image format is supported by the backend.
bool (*is_format_supported)(struct backend_base *backend_data,
enum backend_image_format format)
__attribute__((nonnull(1)));
/// Return the capabilities of an image.
uint32_t (*image_capabilities)(struct backend_base *backend_data, image_handle image)
__attribute__((nonnull(1, 2)));
/// Get the attributes of a shader.
///
/// Optional, Returns a bitmask of attributes, see `shader_attributes`.
uint64_t (*get_shader_attributes)(backend_t *backend_data, void *shader)
__attribute__((nonnull(1, 2)));
/// Get the age of the buffer content we are currently rendering on top
/// of. The buffer that has just been `present`ed has a buffer age of 1.
/// Every time `present` is called, buffers get older. Return -1 if the
/// buffer is empty.
///
/// Optional
int (*buffer_age)(backend_t *backend_data);
/// Get the render time of the last frame. If the render is still in progress,
/// returns false. The time is returned in `ts`. Frames are delimited by the
/// present() calls. i.e. after a present() call, last_render_time() should start
/// reporting the time of the just presented frame.
///
/// Optional, if not available, the most conservative estimation will be used.
bool (*last_render_time)(backend_t *backend_data, struct timespec *ts);
/// The maximum number buffer_age might return.
int (*max_buffer_age)(backend_t *backend_data);
// =========== Post-processing ============
/// Create a blur context that can be used to call `blur` for images with a
/// specific format.
void *(*create_blur_context)(backend_t *base, enum blur_method,
enum backend_image_format format, void *args);
/// Destroy a blur context
void (*destroy_blur_context)(backend_t *base, void *ctx);
/// Get how many pixels outside of the blur area is needed for blur
void (*get_blur_size)(void *blur_context, int *width, int *height);
// =========== Misc ============
/// Return the driver that is been used by the backend
enum driver (*detect_driver)(backend_t *backend_data);
void (*diagnostics)(backend_t *backend_data);
enum device_status (*device_status)(backend_t *backend_data);
};
struct backend_base {
struct backend_operations ops;
struct x_connection *c;
struct ev_loop *loop;
/// Whether the backend can accept new render request at the moment
bool busy;
// ...
};
/// Register a new backend, `major` and `minor` should be the version of the picom backend
/// interface. You should just pass `PICOM_BACKEND_MAJOR` and `PICOM_BACKEND_MINOR` here.
/// `name` is the name of the backend, `init` is the function to initialize the backend,
/// `can_present` should be true if the backend can present the back buffer to the screen,
/// false otherwise (e.g. if the backend does off screen rendering, etc.)
bool backend_register(uint64_t major, uint64_t minor, const char *name,
struct backend_base *(*init)(session_t *ps, xcb_window_t target),
bool can_present);
/// Define a backend entry point. (Note constructor priority 202 is used here because 1xx
/// is reversed by test.h, and 201 is used for logging initialization.)
#define BACKEND_ENTRYPOINT(func) static void __attribute__((constructor(202))) func(void)
| yshui/x11screencast-portal | 5 | xdg-desktop-portal ScreenCast implementation for X11 | Rust | yshui | Yuxuan Shui | CodeWeavers |
plugin/picom/types.h | C/C++ Header | // SPDX-License-Identifier: MPL-2.0
// Copyright (c) Yuxuan Shui <yshuiv7@gmail.com>
#pragma once
/// Some common types
#include <limits.h>
#include <math.h>
#include <stdbool.h>
#include <stdint.h>
enum blur_method {
BLUR_METHOD_NONE = 0,
BLUR_METHOD_KERNEL,
BLUR_METHOD_BOX,
BLUR_METHOD_GAUSSIAN,
BLUR_METHOD_DUAL_KAWASE,
BLUR_METHOD_INVALID,
};
/// Enumeration type to represent switches.
typedef enum {
OFF = 0, // false
ON, // true
UNSET
} switch_t;
enum tristate { TRI_FALSE = -1, TRI_UNKNOWN = 0, TRI_TRUE = 1 };
/// A structure representing margins around a rectangle.
typedef struct {
int top;
int left;
int bottom;
int right;
} margin_t;
struct color {
double red, green, blue, alpha;
};
typedef uint32_t opacity_t;
typedef struct vec2 {
union {
double x;
double width;
};
union {
double y;
double height;
};
} vec2;
typedef struct ivec2 {
union {
int x;
int width;
};
union {
int y;
int height;
};
} ivec2;
struct ibox {
ivec2 origin;
ivec2 size;
};
static const vec2 SCALE_IDENTITY = {1.0, 1.0};
static inline vec2 ivec2_as(ivec2 a) {
return (vec2){
.x = a.x,
.y = a.y,
};
}
static inline ivec2 ivec2_add(ivec2 a, ivec2 b) {
return (ivec2){
.x = a.x + b.x,
.y = a.y + b.y,
};
}
static inline ivec2 ivec2_sub(ivec2 a, ivec2 b) {
return (ivec2){
.x = a.x - b.x,
.y = a.y - b.y,
};
}
static inline bool ivec2_eq(ivec2 a, ivec2 b) {
return a.x == b.x && a.y == b.y;
}
static inline ivec2 ivec2_neg(ivec2 a) {
return (ivec2){
.x = -a.x,
.y = -a.y,
};
}
/// Saturating cast from a vec2 to a ivec2
static inline ivec2 vec2_as(vec2 a) {
return (ivec2){
.x = (int)fmin(fmax(a.x, INT_MIN), INT_MAX),
.y = (int)fmin(fmax(a.y, INT_MIN), INT_MAX),
};
}
static inline vec2 vec2_add(vec2 a, vec2 b) {
return (vec2){
.x = a.x + b.x,
.y = a.y + b.y,
};
}
static inline vec2 vec2_ceil(vec2 a) {
return (vec2){
.x = ceil(a.x),
.y = ceil(a.y),
};
}
static inline vec2 vec2_floor(vec2 a) {
return (vec2){
.x = floor(a.x),
.y = floor(a.y),
};
}
static inline bool vec2_eq(vec2 a, vec2 b) {
return a.x == b.x && a.y == b.y;
}
static inline vec2 vec2_scale(vec2 a, vec2 scale) {
return (vec2){
.x = a.x * scale.x,
.y = a.y * scale.y,
};
}
/// Check if two boxes have a non-zero intersection area.
static inline bool ibox_overlap(struct ibox a, struct ibox b) {
if (a.size.width <= 0 || a.size.height <= 0 || b.size.width <= 0 || b.size.height <= 0) {
return false;
}
if (a.origin.x <= INT_MAX - a.size.width && a.origin.y <= INT_MAX - a.size.height &&
(a.origin.x + a.size.width <= b.origin.x ||
a.origin.y + a.size.height <= b.origin.y)) {
return false;
}
if (b.origin.x <= INT_MAX - b.size.width && b.origin.y <= INT_MAX - b.size.height &&
(b.origin.x + b.size.width <= a.origin.x ||
b.origin.y + b.size.height <= a.origin.y)) {
return false;
}
return true;
}
static inline bool ibox_eq(struct ibox a, struct ibox b) {
return ivec2_eq(a.origin, b.origin) && ivec2_eq(a.size, b.size);
}
static inline ivec2 ivec2_scale_ceil(ivec2 a, vec2 scale) {
vec2 scaled = vec2_scale(ivec2_as(a), scale);
return vec2_as(vec2_ceil(scaled));
}
static inline ivec2 ivec2_scale_floor(ivec2 a, vec2 scale) {
vec2 scaled = vec2_scale(ivec2_as(a), scale);
return vec2_as(vec2_floor(scaled));
}
#define MARGIN_INIT \
{ 0, 0, 0, 0 }
| yshui/x11screencast-portal | 5 | xdg-desktop-portal ScreenCast implementation for X11 | Rust | yshui | Yuxuan Shui | CodeWeavers |
plugin/src/cursor.rs | Rust | //! Cursor monitor
//!
//! Monitor cursor changes and import them into GL textures
use std::collections::{HashMap, HashSet};
use gl_bindings::{egl, gl};
use x11rb::{connection::Connection as _, protocol::xfixes::ConnectionExt as _};
#[derive(Debug)]
pub(crate) struct Cursor {
pub(crate) hotspot_x: u32,
pub(crate) hotspot_y: u32,
pub(crate) width: u32,
pub(crate) height: u32,
pub(crate) texture: gl::types::GLuint,
}
#[derive(Debug)]
pub(crate) enum CursorMessage {
NewCursor { serial: u32, cursor: Cursor },
ChangeCursor { serial: u32 },
}
struct CursorMonitorImpl {
egl: egl::sys::Egl,
gl: gl::Gl,
textures: Vec<gl::types::GLuint>,
cursor_serials: HashSet<u32>,
x11: x11rb::rust_connection::RustConnection,
egl_ctx: egl::sys::types::EGLContext,
egl_display: egl::sys::types::EGLDisplay,
tx: std::sync::mpsc::Sender<CursorMessage>,
}
impl CursorMonitorImpl {
fn run_inner(&mut self, screen: usize) -> anyhow::Result<()> {
let root = self.x11.setup().roots[screen].root;
let (major, minor) = x11rb::protocol::xfixes::X11_XML_VERSION;
self.x11.xfixes_query_version(major, minor)?.reply()?;
self.x11
.xfixes_select_cursor_input(
root,
x11rb::protocol::xfixes::CursorNotifyMask::DISPLAY_CURSOR,
)?
.check()?;
loop {
use x11rb::protocol::Event::*;
let event = self.x11.wait_for_event()?;
#[allow(clippy::single_match)]
match event {
XfixesCursorNotify(notify) => {
if self.cursor_serials.contains(¬ify.cursor_serial) {
if self
.tx
.send(CursorMessage::ChangeCursor { serial: notify.cursor_serial })
.is_err()
{
break;
}
} else {
let cursor = self.x11.xfixes_get_cursor_image()?.reply()?;
let mut texture = 0;
unsafe {
self.gl.GenTextures(1, &mut texture);
self.gl.BindTexture(gl::TEXTURE_2D, texture);
self.gl.TexImage2D(
gl::TEXTURE_2D,
0,
gl::RGBA as _,
cursor.width as _,
cursor.height as _,
0,
gl::BGRA,
gl::UNSIGNED_BYTE,
cursor.cursor_image.as_ptr() as _,
);
self.gl.TexParameteri(
gl::TEXTURE_2D,
gl::TEXTURE_MIN_FILTER,
gl::NEAREST as _,
);
self.gl.TexParameteri(
gl::TEXTURE_2D,
gl::TEXTURE_MAG_FILTER,
gl::NEAREST as _,
);
self.gl.TexParameteri(
gl::TEXTURE_2D,
gl::TEXTURE_WRAP_S,
gl::CLAMP_TO_EDGE as _,
);
self.gl.TexParameteri(
gl::TEXTURE_2D,
gl::TEXTURE_WRAP_T,
gl::CLAMP_TO_EDGE as _,
);
self.gl.Flush();
self.gl.Finish();
}
self.textures.push(texture);
self.cursor_serials.insert(cursor.cursor_serial);
let err = unsafe { self.gl.GetError() };
if err != gl::NO_ERROR {
tracing::error!("GL error: {}", err);
}
if self
.tx
.send(CursorMessage::NewCursor {
serial: cursor.cursor_serial,
cursor: Cursor {
hotspot_x: cursor.xhot as _,
hotspot_y: cursor.yhot as _,
width: cursor.width as _,
height: cursor.height as _,
texture,
},
})
.is_err()
{
break;
}
}
}
_ => {}
}
}
Ok(())
}
fn run(mut self, screen: usize) {
if let Err(e) = self.run_inner(screen) {
tracing::error!("Error in cursor monitor: {:?}", e);
}
for texture in self.textures {
unsafe {
self.gl.DeleteTextures(1, &texture);
}
}
unsafe {
self.egl.MakeCurrent(
self.egl_display,
egl::sys::NO_SURFACE,
egl::sys::NO_SURFACE,
egl::sys::NO_CONTEXT,
);
self.egl.DestroyContext(self.egl_display, self.egl_ctx);
};
}
}
unsafe impl Send for CursorMonitorImpl {
}
pub(crate) struct CursorMonitor {
rx: std::sync::mpsc::Receiver<CursorMessage>,
cursors: HashMap<u32, Cursor>,
current_cursor: Option<u32>,
}
impl CursorMonitor {
fn next(&self) -> anyhow::Result<Option<CursorMessage>> {
Ok(self.rx.try_recv().map(Some).or_else(|e| {
if e == std::sync::mpsc::TryRecvError::Empty {
Ok(None)
} else {
Err(e)
}
})?)
}
pub(crate) fn current_cursor(&mut self) -> anyhow::Result<Option<&Cursor>> {
while let Some(msg) = self.next()? {
tracing::debug!("Cursor message: {:?}", msg);
match msg {
CursorMessage::NewCursor { serial, cursor } => {
self.cursors.insert(serial, cursor);
self.current_cursor = Some(serial);
}
CursorMessage::ChangeCursor { serial } => {
self.current_cursor = Some(serial);
}
}
}
Ok(self.current_cursor.and_then(|c| self.cursors.get(&c)))
}
pub(crate) fn new(
ctx: egl::sys::types::EGLContext,
dpy: egl::sys::types::EGLDisplay,
) -> anyhow::Result<Self> {
let egl = egl::sys::Egl::load_with(|sym| {
std::ffi::CString::new(sym.as_bytes())
.map(|s| unsafe { crate::eglGetProcAddress(s.as_ptr()) })
.unwrap()
});
let gl = gl::Gl::load_with(|sym| {
std::ffi::CString::new(sym.as_bytes())
.map(|s| unsafe { crate::eglGetProcAddress(s.as_ptr()) })
.unwrap()
});
let (tx, rx) = std::sync::mpsc::channel();
let (x11, screen) = x11rb::rust_connection::RustConnection::connect(None)?;
let ctx = unsafe { egl.CreateContext(dpy, std::ptr::null(), ctx, std::ptr::null()) };
if ctx == egl::sys::NO_CONTEXT {
return Err(anyhow::anyhow!("Failed to create context"));
}
let monitor = CursorMonitorImpl {
x11,
egl,
gl,
egl_ctx: ctx,
egl_display: dpy,
tx,
textures: Vec::new(),
cursor_serials: HashSet::new(),
};
std::thread::spawn(move || {
if unsafe {
monitor.egl.MakeCurrent(
monitor.egl_display,
egl::sys::NO_SURFACE,
egl::sys::NO_SURFACE,
monitor.egl_ctx,
)
} == egl::sys::FALSE
{
return;
}
monitor.run(screen);
tracing::warn!("Cursor monitor thread exited");
});
Ok(Self { rx, cursors: HashMap::new(), current_cursor: None })
}
}
| yshui/x11screencast-portal | 5 | xdg-desktop-portal ScreenCast implementation for X11 | Rust | yshui | Yuxuan Shui | CodeWeavers |
plugin/src/ffi.rs | Rust | use std::os::raw::c_void;
use libffi::{
high::CType,
low::CodePtr,
raw::{self, ffi_closure},
};
// Converts the raw status type to a `Result`.
fn status_to_result(status: raw::ffi_status) -> Result<(), libffi::low::Error> {
if status == raw::ffi_status_FFI_OK {
Ok(())
} else if status == raw::ffi_status_FFI_BAD_TYPEDEF {
Err(libffi::low::Error::Typedef)
}
// If we don't recognize the status, that is an ABI error:
else {
Err(libffi::low::Error::Abi)
}
}
struct ClosureData1<A, U, R> {
callback: fn(&mut A, *mut U) -> R,
userdata: U,
cif: libffi::low::ffi_cif,
atypes: [*mut libffi::low::ffi_type; 1],
}
pub struct Closure1<A, U, R> {
closure: *mut ffi_closure,
fnptr: CodePtr,
data: *mut ClosureData1<A, U, R>,
}
impl<A, U, R> Closure1<A, U, R> {
pub fn code_ptr(&self) -> *mut c_void { self.fnptr.0 }
}
impl<A, U, R> Drop for Closure1<A, U, R> {
fn drop(&mut self) {
unsafe {
libffi::low::closure_free(self.closure);
drop(Box::from_raw(self.data));
}
}
}
unsafe extern "C" fn closure1_trampoline<A, U, R: CType>(
_: *mut libffi::raw::ffi_cif,
ret_: *mut c_void,
args: *mut *mut c_void,
userdata_: *mut c_void,
) {
let args = args as *mut *mut A;
// Calling `callback` can free `userdata_`, so we can't keep mut reference to
// it.
let callback = (*(userdata_ as *mut ClosureData1<A, U, R>)).callback;
let userdata = &mut (*(userdata_ as *mut ClosureData1<A, U, R>)).userdata as *mut U;
let ret = &mut *(ret_ as *mut R::RetType);
*ret = (callback)(&mut **args, userdata).into();
}
/// Create a new FFI closure that calls `callback` with `userdata` as the second
/// argument.
///
/// # Safety
///
/// - The returned function pointer must not be called after `closure` is
/// dropped.
/// - userdata must have been initialized before the returned function pointer
/// is called the first time.
pub unsafe fn make_ffi_closure1<U, A: CType, R: CType>(
callback: fn(&mut A, *mut U) -> R,
userdata: U,
) -> anyhow::Result<Closure1<A, U, R>>
where
R::RetType: CType,
{
let (closure, fnptr) = libffi::low::closure_alloc();
let data = Box::leak(Box::new(ClosureData1 {
callback,
userdata,
cif: Default::default(),
atypes: [A::reify().into_middle().as_raw_ptr()],
}));
let rtype = <R::RetType as CType>::reify().into_middle().as_raw_ptr();
libffi::low::prep_cif(
&mut data.cif,
libffi::low::ffi_abi_FFI_DEFAULT_ABI,
1,
rtype,
data.atypes.as_mut_ptr(),
)
.map_err(|e| anyhow::anyhow!("{e:?}"))?;
status_to_result(libffi::raw::ffi_prep_closure_loc(
closure,
&mut data.cif,
Some(closure1_trampoline::<A, U, R>),
data as *mut _ as _,
fnptr.0,
))
.map_err(|e| anyhow::anyhow!("{e:?}"))?;
Ok(Closure1 { closure, fnptr, data })
}
pub struct Closure2<A, B, U, R> {
closure: *mut ffi_closure,
fnptr: CodePtr,
data: *mut ClosureData2<A, B, U, R>,
}
impl<A, B, U, R> Closure2<A, B, U, R> {
pub fn code_ptr(&self) -> *mut c_void { self.fnptr.0 }
}
struct ClosureData2<A, B, U, R> {
callback: fn(&mut A, &mut B, *mut U) -> R,
userdata: U,
cif: libffi::low::ffi_cif,
atypes: [*mut libffi::low::ffi_type; 2],
}
impl<A, B, U, R> Drop for Closure2<A, B, U, R> {
fn drop(&mut self) {
unsafe {
libffi::low::closure_free(self.closure);
drop(Box::from_raw(self.data));
}
}
}
unsafe extern "C" fn closure2_trampoline<A, B, U, R: CType>(
_: *mut libffi::raw::ffi_cif,
ret_: *mut c_void,
args: *mut *mut c_void,
userdata_: *mut c_void,
) {
let [arg1, arg2] = *(args as *const [*mut c_void; 2]);
let ret = ret_ as *mut R::RetType;
// Calling `callback` can free `userdata_`, so we can't keep mut reference to
// it.
let callback = (*(userdata_ as *mut ClosureData2<A, B, U, R>)).callback;
let userdata = &mut (*(userdata_ as *mut ClosureData2<A, B, U, R>)).userdata as *mut U;
*ret = (callback)(&mut *(arg1 as *mut A), &mut *(arg2 as *mut B), userdata).into();
}
pub unsafe fn make_ffi_closure2<U, A: CType, B: CType, R: CType>(
callback: fn(&mut A, &mut B, *mut U) -> R,
userdata: U,
) -> anyhow::Result<Closure2<A, B, U, R>>
where
R::RetType: CType,
{
let (closure, fnptr) = libffi::low::closure_alloc();
let data = Box::leak(Box::new(ClosureData2 {
callback,
userdata,
cif: Default::default(),
atypes: [A::reify().into_middle().as_raw_ptr(), B::reify().into_middle().as_raw_ptr()],
}));
let rtype = <R::RetType as CType>::reify().into_middle().as_raw_ptr();
libffi::low::prep_cif(
&mut data.cif,
libffi::low::ffi_abi_FFI_DEFAULT_ABI,
2,
rtype,
data.atypes.as_mut_ptr(),
)
.map_err(|e| anyhow::anyhow!("{e:?}"))?;
status_to_result(libffi::raw::ffi_prep_closure_loc(
closure,
&mut data.cif,
Some(closure2_trampoline::<A, B, U, R>),
data as *mut _ as _,
fnptr.0,
))
.map_err(|e| anyhow::anyhow!("{e:?}"))?;
Ok(Closure2 { closure, fnptr, data })
}
| yshui/x11screencast-portal | 5 | xdg-desktop-portal ScreenCast implementation for X11 | Rust | yshui | Yuxuan Shui | CodeWeavers |
plugin/src/lib.rs | Rust | use std::{
cell::{OnceCell, UnsafeCell},
collections::HashSet,
os::{
fd::{AsFd, AsRawFd},
raw::c_void,
},
rc::Rc,
sync::{
mpsc::{Receiver, Sender},
Arc,
},
};
use anyhow::Context as _;
use cursor::Cursor;
use slotmap::{DefaultKey, SlotMap};
use smallvec::SmallVec;
mod cursor;
mod ffi;
mod pipewire;
mod server;
use drm_fourcc::DrmModifier;
use gl_bindings::{
egl,
gl::{
self,
types::{GLboolean, GLint, GLuint},
},
};
use x11rb::{
connection::Connection,
protocol::xproto::{ConnectionExt as _, PropMode},
};
#[allow(dead_code, non_camel_case_types, non_snake_case)]
mod picom {
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
}
#[link(name = "EGL")]
extern "C" {
fn eglGetProcAddress(procname: *const std::os::raw::c_char) -> *const std::os::raw::c_void;
}
struct SavedFnPtrs {
deinit: unsafe extern "C" fn(*mut picom::backend_base),
present: Option<unsafe extern "C" fn(*mut picom::backend_base) -> bool>,
root_change: Option<unsafe extern "C" fn(*mut picom::backend_base, *mut picom::session)>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, bytemuck::NoUninit)]
#[repr(C)]
struct Extent {
width: u32,
height: u32,
}
impl From<x11rb::protocol::xproto::GetGeometryReply> for Extent {
fn from(r: x11rb::protocol::xproto::GetGeometryReply) -> Self {
Self { width: r.width as _, height: r.height as _ }
}
}
#[derive(Clone)]
struct PipewireSender {
waker: ::pipewire::channel::Sender<()>,
tx: Sender<MessagesToPipewire>,
}
struct PipewireSendGuard<'a>(&'a PipewireSender, bool);
impl<'a> Drop for PipewireSendGuard<'a> {
fn drop(&mut self) {
if self.1 {
self.0.waker.send(()).ok();
}
}
}
impl<'a> PipewireSendGuard<'a> {
fn send(&mut self, msg: MessagesToPipewire) -> anyhow::Result<()> {
self.1 = true;
self.0.tx.send(msg).map_err(|_| anyhow::anyhow!("send"))
}
fn wake(&mut self) { self.1 = true; }
}
impl PipewireSender {
fn start_send(&self) -> PipewireSendGuard<'_> { PipewireSendGuard(self, false) }
fn new(waker: ::pipewire::channel::Sender<()>, tx: Sender<MessagesToPipewire>) -> Self {
Self { waker, tx }
}
}
const COPY_VS: &std::ffi::CStr = c"
#version 330
#extension GL_ARB_explicit_uniform_location : enable
layout(location = 0) in vec2 pos;
layout(location = 1) in vec2 uv;
out vec2 v_uv;
layout(location = 1)
uniform vec2 offset;
layout(location = 2)
uniform vec2 scale;
void main() {
gl_Position = vec4((pos + offset) * scale - vec2(1.0, 1.0), 0.0, 1.0);
v_uv = uv;
}
";
const COPY_FS: &std::ffi::CStr = c"
#version 330
#extension GL_ARB_explicit_uniform_location : enable
in vec2 v_uv;
layout(location = 0)
uniform sampler2D tex;
void main() {
gl_FragColor = texture(tex, v_uv);
//gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
";
struct PluginContext {
cursor_monitor: cursor::CursorMonitor,
x11: x11rb::xcb_ffi::XCBConnection,
root: u32,
root_size: Extent,
// We use UnsafeCell here because only one callback can be called at any given time,
// so we can safely get mutable reference to the context from the `Rc``.
deinit: Option<ffi::Closure1<*mut picom::backend_base, Rc<UnsafeCell<Self>>, ()>>,
present: Option<ffi::Closure1<*mut picom::backend_base, Rc<UnsafeCell<Self>>, u8>>,
root_change: Option<
ffi::Closure2<*mut picom::backend_base, *mut picom::session, Rc<UnsafeCell<Self>>, ()>,
>,
cursor_shader: GLuint,
pw_rx: Receiver<MessagesFromPipewire>,
pw_tx: Option<PipewireSender>,
saved_fn_ptrs: SavedFnPtrs,
buffers: SlotMap<DefaultKey, CaptureReceiver>,
cookie: Arc<String>,
}
#[derive(Default)]
struct GlStateGuard {
texture_2d: GLint,
draw_fbo: GLint,
read_fbo: GLint,
program: GLint,
blend_enabled: GLboolean,
blend_dst_rgb: GLint,
blend_src_rgb: GLint,
blend_dst_alpha: GLint,
blend_src_alpha: GLint,
}
impl GlStateGuard {
fn save() -> Self {
let mut ret = Self::default();
GL.with(|gl| {
let gl = gl.get().unwrap();
unsafe {
gl.GetIntegerv(gl::TEXTURE_BINDING_2D, &mut ret.texture_2d);
gl.GetIntegerv(gl::DRAW_FRAMEBUFFER_BINDING, &mut ret.draw_fbo);
gl.GetIntegerv(gl::READ_FRAMEBUFFER_BINDING, &mut ret.read_fbo);
gl.GetIntegerv(gl::CURRENT_PROGRAM, &mut ret.program);
gl.GetBooleanv(gl::BLEND, &mut ret.blend_enabled);
gl.GetIntegerv(gl::BLEND_DST_RGB, &mut ret.blend_dst_rgb);
gl.GetIntegerv(gl::BLEND_SRC_RGB, &mut ret.blend_src_rgb);
gl.GetIntegerv(gl::BLEND_DST_ALPHA, &mut ret.blend_dst_alpha);
gl.GetIntegerv(gl::BLEND_SRC_ALPHA, &mut ret.blend_src_alpha);
}
});
ret
}
}
impl Drop for GlStateGuard {
fn drop(&mut self) {
GL.with(|gl| {
let gl = gl.get().unwrap();
unsafe {
gl.BindTexture(gl::TEXTURE_2D, self.texture_2d as _);
gl.BindFramebuffer(gl::DRAW_FRAMEBUFFER, self.draw_fbo as _);
gl.BindFramebuffer(gl::READ_FRAMEBUFFER, self.read_fbo as _);
gl.UseProgram(self.program as _);
if self.blend_enabled == gl::TRUE {
gl.Enable(gl::BLEND);
} else {
gl.Disable(gl::BLEND);
}
gl.BlendFuncSeparate(
self.blend_src_rgb as _,
self.blend_dst_rgb as _,
self.blend_src_alpha as _,
self.blend_dst_alpha as _,
);
}
});
}
}
impl PluginContext {
fn deinit(&mut self, backend: *mut picom::backend_base) {
unsafe {
GL.with(|gl| {
let gl = gl.get().unwrap();
gl.DeleteProgram(self.cursor_shader);
});
(self.saved_fn_ptrs.deinit)(backend)
};
}
fn blit_cursor_prepare(
shader: GLuint,
x: i32,
y: i32,
cursor: &Cursor,
texture_unit: GLint,
) -> (GLuint, [GLuint; 2]) {
GL.with(|gl| {
let gl = gl.get().unwrap();
let (x, y) = (x - cursor.hotspot_x as i32, y - cursor.hotspot_y as i32);
unsafe {
gl.BindTexture(gl::TEXTURE_2D, cursor.texture);
gl.UseProgram(shader);
gl.Uniform1i(0, (texture_unit as u32 - gl::TEXTURE0) as _);
let mut vao = 0;
gl.GenVertexArrays(1, &mut vao);
gl.BindVertexArray(vao);
let mut vbo = [0; 2];
gl.GenBuffers(2, vbo.as_mut_ptr());
gl.BindBuffer(gl::ARRAY_BUFFER, vbo[0]);
gl.BindBuffer(gl::ELEMENT_ARRAY_BUFFER, vbo[1]);
let vertices: [f32; 16] = [
// pos
x as f32,
y as f32,
(x + cursor.width as i32) as f32,
y as f32,
(x + cursor.width as i32) as f32,
(y + cursor.height as i32) as f32,
x as f32,
(y + cursor.height as i32) as f32,
// uv
0.0,
0.0,
1.0,
0.0,
1.0,
1.0,
0.0,
1.0,
];
gl.BufferData(
gl::ARRAY_BUFFER,
(vertices.len() * std::mem::size_of::<f32>()) as _,
vertices.as_ptr() as _,
gl::STATIC_DRAW,
);
let indices: [u32; 6] = [0, 1, 2, 2, 3, 0];
gl.BufferData(
gl::ELEMENT_ARRAY_BUFFER,
(indices.len() * std::mem::size_of::<u32>()) as _,
indices.as_ptr() as _,
gl::STATIC_DRAW,
);
gl.EnableVertexAttribArray(0);
gl.EnableVertexAttribArray(1);
gl.VertexAttribPointer(
0,
2,
gl::FLOAT,
gl::FALSE,
(2 * std::mem::size_of::<f32>()) as _,
std::ptr::null(),
);
gl.VertexAttribPointer(
1,
2,
gl::FLOAT,
gl::FALSE,
(2 * std::mem::size_of::<f32>()) as _,
(8 * std::mem::size_of::<f32>()) as _,
);
(vao, vbo)
}
})
}
fn copy_back_buffer(&mut self) -> anyhow::Result<()> {
let Self { pw_tx, x11, root, cursor_monitor, pw_rx, root_size, cursor_shader, .. } = self;
let Some(pw_send) = pw_tx.as_ref() else {
return Ok(());
};
let pw_send = pw_send.clone();
let pointer = x11.query_pointer(*root)?.reply()?;
let cursor = cursor_monitor.current_cursor()?;
let mut pw_tx = pw_send.start_send();
let mut active_buffers = HashSet::new();
while let Ok(msg) = pw_rx.try_recv() {
match msg {
MessagesFromPipewire::AddBuffer {
dma_buf,
stream_id,
x,
y,
embed_cursor,
reply,
} => {
let image = CaptureReceiver::import(&dma_buf, stream_id, x, y, embed_cursor)
.context("import")?;
let id = self.buffers.insert(image);
reply.send((id, dma_buf)).ok();
}
MessagesFromPipewire::ActivateBuffer { id } => {
assert!(!active_buffers.contains(&id));
active_buffers.insert(id);
}
MessagesFromPipewire::RemoveBuffers { ids } => {
for id in &ids {
self.buffers.remove(*id);
active_buffers.remove(id);
}
}
MessagesFromPipewire::WakeMeUp => {
pw_tx.wake();
}
}
}
if active_buffers.is_empty() {
return Ok(());
}
GL.with(|gl| {
let gl = gl.get().unwrap();
unsafe {
let mut texture_unit = 0;
gl.GetIntegerv(gl::ACTIVE_TEXTURE, &mut texture_unit);
let _guard = GlStateGuard::save();
gl.Enable(gl::BLEND);
gl.BlendFunc(gl::SRC_ALPHA, gl::ONE_MINUS_SRC_ALPHA);
gl.BindFramebuffer(gl::READ_FRAMEBUFFER, 0);
let resources = cursor.map(|cursor| {
Self::blit_cursor_prepare(
*cursor_shader,
pointer.root_x as _,
pointer.root_y as _,
cursor,
texture_unit,
)
});
for i in active_buffers {
let b = &self.buffers[i];
gl.BindFramebuffer(gl::DRAW_FRAMEBUFFER, b.fbo);
tracing::trace!("fbo: {}, size: {}x{}", b.fbo, b.width, b.height);
gl.BlitFramebuffer(
// src
b.x as _,
(root_size.height as i32 - b.y) as _,
(b.x + b.width as i32) as _,
(root_size.height as i32 - b.y - b.height as i32) as _,
// dst
0,
0,
b.width as _,
b.height as _,
gl::COLOR_BUFFER_BIT,
gl::NEAREST,
);
if b.embed_cursor && cursor.is_some() {
let offset = [-b.x as f32, -b.y as f32];
gl.Uniform2fv(1, 1, offset.as_ptr());
gl.DrawElements(gl::TRIANGLES, 6, gl::UNSIGNED_INT, std::ptr::null());
}
match self.buffers[i].insert_fence() {
Ok(fence) => {
pw_tx
.send(MessagesToPipewire::NewFrame {
id: i,
fence,
stream_id: self.buffers[i].stream_id,
})
.unwrap()
}
Err(e) => {
tracing::error!("insert_fence failed: {}", e);
pw_tx
.send(MessagesToPipewire::BufferError {
id: i,
stream_id: self.buffers[i].stream_id,
})
.unwrap()
}
};
}
let error = gl.GetError();
if error != gl::NO_ERROR {
tracing::error!("GL errorB: {}", error);
}
if let Some((vao, vbo)) = resources {
gl.BindBuffer(gl::ARRAY_BUFFER, 0);
gl.BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl.BindVertexArray(0);
gl.DeleteVertexArrays(1, &vao);
gl.DeleteBuffers(2, vbo.as_ptr());
}
}
});
Ok(())
}
fn present(&mut self, backend: *mut picom::backend_base) -> bool {
match self.copy_back_buffer() {
Ok(()) => (),
Err(e) => {
tracing::debug!("copy_back_buffer failed: {:?}, bail", e);
self.pw_tx.take();
}
}
if let Some(present) = self.saved_fn_ptrs.present {
if !unsafe { present(backend) } {
return false;
}
}
true
}
fn root_change(
&mut self,
backend: *mut picom::backend_base,
session: *mut picom::session,
) -> anyhow::Result<()> {
if let Some(root_change) = self.saved_fn_ptrs.root_change {
unsafe { root_change(backend, session) }
}
let root = self.x11.setup().roots.first().unwrap().root;
self.root_size = self.x11.get_geometry(root)?.reply()?.into();
Ok(())
}
}
fn deinit_trampoline(
backend: &mut *mut picom::backend_base,
userdata: *mut Rc<UnsafeCell<PluginContext>>,
) {
tracing::debug!("userdata refcount: {}", Rc::strong_count(unsafe { &*userdata }));
let deinit = {
// This is extremely unsafe. Dropping `userdata.deinit` transitively drops
// `userdata`, which we still have a mut reference to (!). So we must
// keep it alive, until we have gotten rid of the mut reference to
// `userdata`.
let userdata = unsafe { &mut *(*userdata).get() };
userdata.deinit(*backend);
userdata.present.take();
userdata.root_change.take();
userdata.deinit.take()
};
// Here we don't have mut reference to `userdata` anymore, only a raw ptr. So
// it's safe to drop `deinit`.
drop(deinit);
}
fn present_trampoline(
backend: &mut *mut picom::backend_base,
userdata: *mut Rc<UnsafeCell<PluginContext>>,
) -> u8 {
unsafe { &mut *(*userdata).get() }.present(*backend) as _
}
fn root_change_trampoline(
backend: &mut *mut picom::backend_base,
session: &mut *mut picom::session,
userdata: *mut Rc<UnsafeCell<PluginContext>>,
) {
match unsafe { &mut *(*userdata).get() }.root_change(*backend, *session) {
Ok(()) => {}
Err(e) => {
tracing::debug!("root_change failed: {}", e);
}
}
}
fn egl_get_dma_buf_formats(egl: &egl::sys::Egl, dpy: egl::EGLDisplay) -> Vec<egl::EGLint> {
let mut num: i32 = 0;
let mut formats = Vec::new();
unsafe {
egl.QueryDmaBufFormatsEXT(dpy, 0, std::ptr::null_mut(), &mut num);
formats.resize(num as usize, 0);
egl.QueryDmaBufFormatsEXT(dpy, num, formats.as_mut_ptr(), &mut num);
}
tracing::debug!("num: {}", num);
formats
}
fn egl_get_dma_buf_modifiers(
egl: &egl::sys::Egl,
dpy: egl::EGLDisplay,
format: drm_fourcc::DrmFourcc,
) -> Vec<(egl::sys::types::EGLuint64KHR, egl::sys::types::EGLBoolean)> {
let mut num: i32 = 0;
let mut modifiers = Vec::new();
let mut external_only = Vec::new();
unsafe {
egl.QueryDmaBufModifiersEXT(
dpy,
format as u32 as i32,
0,
std::ptr::null_mut(),
std::ptr::null_mut(),
&mut num,
);
modifiers.resize(num as usize, 0);
external_only.resize(num as usize, 0);
egl.QueryDmaBufModifiersEXT(
dpy,
format as u32 as i32,
num,
modifiers.as_mut_ptr(),
external_only.as_mut_ptr(),
&mut num,
);
}
modifiers.into_iter().zip(external_only).collect()
}
const REQUIRED_EGL_CLIENT_EXTENSIONS: &[&str] = &["EGL_EXT_device_base", "EGL_EXT_device_query"];
const REQUIRED_EGL_DEVICE_EXTENSIONS: &[&str] = &["EGL_EXT_device_drm_render_node"];
const REQUIRED_EGL_EXTENSIONS: &[&str] = &["EGL_EXT_image_dma_buf_import_modifiers"];
unsafe fn egl_check_extensions(egl: &egl::sys::Egl, dpy: egl::EGLDisplay) -> anyhow::Result<()> {
let egl_extensions: HashSet<&str> =
std::ffi::CStr::from_ptr(egl.QueryString(egl::sys::NO_DISPLAY, egl::sys::EXTENSIONS as _))
.to_str()?
.split(' ')
.collect();
for required in REQUIRED_EGL_CLIENT_EXTENSIONS {
if !egl_extensions.contains(required) {
return Err(anyhow::anyhow!("Required EGL client extension {} not found", required));
}
}
let egl_extensions: HashSet<&str> =
std::ffi::CStr::from_ptr(egl.QueryString(dpy, egl::sys::EXTENSIONS as _))
.to_str()?
.split(' ')
.collect();
for required in REQUIRED_EGL_EXTENSIONS {
if !egl_extensions.contains(required) {
return Err(anyhow::anyhow!("Required EGL extension {} not found", required));
}
}
Ok(())
}
struct DrmRenderNode(std::fs::File);
impl AsFd for DrmRenderNode {
fn as_fd(&self) -> std::os::unix::io::BorrowedFd<'_> { self.0.as_fd() }
}
impl drm::Device for DrmRenderNode {
}
struct CaptureReceiver {
texture: GLuint,
image: egl::sys::types::EGLImage,
fbo: GLuint,
stream_id: DefaultKey,
x: i32,
y: i32,
width: u32,
height: u32,
embed_cursor: bool,
}
// We have 3 threads:
// 1. picom thread, that's the compositor's main thread, which calls into hooks
// registered by our plugin.
// 3. pipewire thread. This is the thread that runs the pipewire mainloop and
// handles pipewire communication.
#[derive(Debug)]
enum MessagesToPipewire {
/// Sent when picom presents a new frame, after we have sent commands to
/// copy this frame into our buffer. After sending this, buffer `id`
/// will stop being active.
NewFrame {
id: DefaultKey,
fence: drm::control::syncobj::Handle,
stream_id: DefaultKey,
},
/// Error occurred for the given buffer. The pipewire thread should drop
/// this buffer, and the stream it's associated with.
BufferError {
id: DefaultKey,
stream_id: DefaultKey,
},
CreateStream {
x: i32,
y: i32,
width: u32,
height: u32,
embed_cursor: bool,
reply: oneshot::Sender<anyhow::Result<u32>>,
},
CloseStreams {
node_ids: SmallVec<[u32; 6]>,
},
}
enum MessagesFromPipewire {
/// A new buffer is created on the pipewire's side, the main thread
/// should import it, and send back an id in a `BufferImported` response.
AddBuffer {
dma_buf: gbm::BufferObject<()>,
stream_id: DefaultKey,
x: i32,
y: i32,
embed_cursor: bool,
reply: oneshot::Sender<(DefaultKey, gbm::BufferObject<()>)>,
},
/// Set buffer active. There can be multiple active buffers at the same
/// time.
ActivateBuffer {
id: DefaultKey,
},
/// Clear all imported buffers, this happens when the pipewire thread is
/// shutting down.
RemoveBuffers {
ids: SmallVec<[DefaultKey; 2]>,
},
WakeMeUp,
}
thread_local! {
static EGL: OnceCell<egl::sys::Egl> = const{ OnceCell::new() };
static GL: OnceCell<gl::Gl> = const { OnceCell::new() };
}
impl Drop for CaptureReceiver {
fn drop(&mut self) {
GL.with(|gl| {
let gl = gl.get().unwrap();
unsafe {
gl.DeleteTextures(1, &self.texture);
gl.DeleteFramebuffers(1, &self.fbo);
}
});
EGL.with(|egl| {
let egl = egl.get().unwrap();
unsafe {
let dpy = egl.GetCurrentDisplay();
egl.DestroyImage(dpy, self.image);
}
});
}
}
struct EglDmaBufParamterIds {
fd_ext: egl::EGLenum,
offset_ext: egl::EGLenum,
pitch_ext: egl::EGLenum,
modifier_lo_ext: egl::EGLenum,
modifier_hi_ext: egl::EGLenum,
}
const EGL_DMA_BUF_PARAMETER_IDS: [EglDmaBufParamterIds; 4] = [
EglDmaBufParamterIds {
fd_ext: egl::sys::DMA_BUF_PLANE0_FD_EXT,
offset_ext: egl::sys::DMA_BUF_PLANE0_OFFSET_EXT,
pitch_ext: egl::sys::DMA_BUF_PLANE0_PITCH_EXT,
modifier_lo_ext: egl::sys::DMA_BUF_PLANE0_MODIFIER_LO_EXT,
modifier_hi_ext: egl::sys::DMA_BUF_PLANE0_MODIFIER_HI_EXT,
},
EglDmaBufParamterIds {
fd_ext: egl::sys::DMA_BUF_PLANE1_FD_EXT,
offset_ext: egl::sys::DMA_BUF_PLANE1_OFFSET_EXT,
pitch_ext: egl::sys::DMA_BUF_PLANE1_PITCH_EXT,
modifier_lo_ext: egl::sys::DMA_BUF_PLANE1_MODIFIER_LO_EXT,
modifier_hi_ext: egl::sys::DMA_BUF_PLANE1_MODIFIER_HI_EXT,
},
EglDmaBufParamterIds {
fd_ext: egl::sys::DMA_BUF_PLANE2_FD_EXT,
offset_ext: egl::sys::DMA_BUF_PLANE2_OFFSET_EXT,
pitch_ext: egl::sys::DMA_BUF_PLANE2_PITCH_EXT,
modifier_lo_ext: egl::sys::DMA_BUF_PLANE2_MODIFIER_LO_EXT,
modifier_hi_ext: egl::sys::DMA_BUF_PLANE2_MODIFIER_HI_EXT,
},
EglDmaBufParamterIds {
fd_ext: egl::sys::DMA_BUF_PLANE3_FD_EXT,
offset_ext: egl::sys::DMA_BUF_PLANE3_OFFSET_EXT,
pitch_ext: egl::sys::DMA_BUF_PLANE3_PITCH_EXT,
modifier_lo_ext: egl::sys::DMA_BUF_PLANE3_MODIFIER_LO_EXT,
modifier_hi_ext: egl::sys::DMA_BUF_PLANE3_MODIFIER_HI_EXT,
},
];
impl CaptureReceiver {
fn insert_fence(&mut self) -> anyhow::Result<drm::control::syncobj::Handle> {
EGL.with(|egl| {
let egl = egl.get().unwrap();
unsafe {
let dpy = egl.GetCurrentDisplay();
let fence =
egl.CreateSyncKHR(dpy, egl::sys::SYNC_NATIVE_FENCE_ANDROID, std::ptr::null());
if fence == egl::sys::NO_SYNC {
return Err(anyhow::anyhow!("CreateSyncKHR failed"));
}
GL.with(|gl| {
let gl = gl.get().unwrap();
gl.Flush();
});
let fence_fd = egl.DupNativeFenceFDANDROID(dpy, fence);
if fence_fd == egl::sys::NO_NATIVE_FENCE_FD_ANDROID {
return Err(anyhow::anyhow!("GetSyncAttrib failed"));
};
let fence_fd = std::num::NonZeroU32::new(fence_fd as u32)
.ok_or_else(|| anyhow::anyhow!("DupNativeFenceFD failed: fence_fd is zero"))?;
egl.DestroySyncKHR(dpy, fence);
Ok(fence_fd.into())
}
})
}
fn import(
dma_buf: &gbm::BufferObject<()>,
stream_id: DefaultKey,
x: i32,
y: i32,
embed_cursor: bool,
) -> anyhow::Result<Self> {
let modifier = dma_buf.modifier();
let raw_modifier: u64 = modifier.into();
let fds = (0..dma_buf.plane_count())
.map(|i| dma_buf.fd_for_plane(i as i32))
.collect::<Result<Vec<_>, _>>()?;
let width = dma_buf.width();
let height = dma_buf.height();
tracing::debug!("Importing: {}x{}", width, height);
let image = EGL.with(|egl| {
let egl = egl.get().unwrap();
let mut attribs = vec![
egl::sys::LINUX_DRM_FOURCC_EXT as isize,
dma_buf.format() as _,
egl::sys::WIDTH as _,
width as _,
egl::sys::HEIGHT as _,
height as _,
];
for plane_id in 0..(dma_buf.plane_count() as i32) {
let param_ids = &EGL_DMA_BUF_PARAMETER_IDS[plane_id as usize];
attribs.extend_from_slice(&[
param_ids.fd_ext as isize,
fds[plane_id as usize].as_raw_fd() as _,
param_ids.offset_ext as isize,
dma_buf.offset(plane_id) as _,
param_ids.pitch_ext as isize,
dma_buf.stride_for_plane(plane_id) as _,
param_ids.modifier_lo_ext as isize,
(raw_modifier & 0xffffffff) as _,
param_ids.modifier_hi_ext as isize,
(raw_modifier >> 32) as _,
]);
}
attribs.extend(&[egl::sys::NONE as isize]);
Ok::<_, anyhow::Error>(unsafe {
egl.CreateImage(
egl.GetCurrentDisplay(),
egl::sys::NO_CONTEXT,
egl::sys::LINUX_DMA_BUF_EXT,
std::ptr::null(),
attribs.as_ptr(),
)
})
})?;
if image == egl::sys::NO_IMAGE {
return Err(anyhow::anyhow!(
"CreateImage failed {:x}",
EGL.with(|egl| unsafe { egl.get().unwrap().GetError() })
));
}
let (fbo, texture) = GL.with(|gl| {
let gl = gl.get().unwrap();
let mut texture = 0;
let mut fbo = 0;
unsafe {
let mut old_texture = 0;
let mut old_draw_fbo = 0;
gl.GetIntegerv(gl::TEXTURE_BINDING_2D, &mut old_texture);
gl.GetIntegerv(gl::DRAW_FRAMEBUFFER_BINDING, &mut old_draw_fbo);
gl.GenTextures(1, &mut texture);
gl.BindTexture(gl::TEXTURE_2D, texture);
gl.EGLImageTargetTexStorageEXT(gl::TEXTURE_2D, image, std::ptr::null());
gl.BindTexture(gl::TEXTURE_2D, old_texture as _);
gl.GenFramebuffers(1, &mut fbo);
gl.BindFramebuffer(gl::DRAW_FRAMEBUFFER, fbo);
gl.FramebufferTexture2D(
gl::DRAW_FRAMEBUFFER,
gl::COLOR_ATTACHMENT0,
gl::TEXTURE_2D,
texture,
0,
);
let status = gl.CheckFramebufferStatus(gl::DRAW_FRAMEBUFFER);
if status != gl::FRAMEBUFFER_COMPLETE {
return Err(anyhow::anyhow!("Framebuffer incomplete: {:x}", status));
}
gl.BlendFunc(gl::ONE, gl::ONE_MINUS_SRC_ALPHA);
gl.ClearColor(1.0, 0.0, 0.0, 1.0);
gl.Clear(gl::COLOR_BUFFER_BIT);
tracing::debug!("GL error: {:x}", gl.GetError());
gl.BindFramebuffer(gl::DRAW_FRAMEBUFFER, old_draw_fbo as _);
}
Ok((fbo, texture))
})?;
Ok(Self { fbo, texture, image, stream_id, width, height, x, y, embed_cursor })
}
}
bitfield::bitfield! {
struct AmdModifier(u64);
impl Debug;
pub tile_version, _: 7, 0;
pub tile, _: 12, 8;
pub dcc, _: 13;
pub dcc_retile, _: 14;
pub dcc_pipe_align, _: 15;
pub dcc_independent_64b, _: 16;
pub dcc_independent_128b, _: 17;
pub dcc_max_compressed_block, _: 19, 18;
pub dcc_constant_encode, _: 20;
pub pipe_xor_bits, _: 23, 21;
pub bank_xor_bits, _: 26, 24;
pub packers, _: 29, 27;
pub rb, _: 32, 30;
pub pipe, _: 35, 33;
pub reserved, _: 55, 36;
pub vendor, _: 63, 56;
}
bitfield::bitfield! {
struct Modifier(u64);
pub reserved, _: 55, 0;
pub vendor, _: 63, 56;
}
const MODIFIER_VENDOR_AMD: u64 = 0x2;
#[allow(dead_code)]
const AMD_FMT_MOD_TILE_VER_GFX9: u64 = 0x1;
const AMD_FMT_MOD_TILE_VER_GFX10: u64 = 0x2;
#[allow(dead_code)]
const AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: u64 = 0x3;
#[allow(dead_code)]
const AMD_FMT_MOD_TILE_VER_GFX11: u64 = 0x4;
fn extra_modifier_check(modifier: u64, width: u16, height: u16) -> bool {
let modifier = Modifier(modifier);
if modifier.vendor() != MODIFIER_VENDOR_AMD {
return true;
}
let amd_modifier = AmdModifier(modifier.0);
if width <= 2560 && height <= 2560 {
// Allocating 2560x2560 buffers are always possible
return true;
}
if amd_modifier.tile_version() < AMD_FMT_MOD_TILE_VER_GFX10 {
// GPUs earlier than GFX10 doesn't have size restriction
return true;
}
!amd_modifier.dcc() || amd_modifier.dcc_independent_64b()
}
#[repr(C)]
struct PicomXConnection {
xcb: *mut c_void,
display: *mut c_void,
screen: libc::c_int,
}
const BASE64_CHARSET: &str = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
unsafe fn compile_shader(gl: &gl::Gl, source: &str, t: GLuint) -> anyhow::Result<GLuint> {
let shader = gl.CreateShader(t);
gl.ShaderSource(
shader,
1,
[source.as_ptr() as *const _].as_ptr(),
[source.len() as _].as_ptr(),
);
gl.CompileShader(shader);
let mut success = 0;
gl.GetShaderiv(shader, gl::COMPILE_STATUS, &mut success);
if success != gl::TRUE as i32 {
let mut len = 0;
gl.GetShaderiv(shader, gl::INFO_LOG_LENGTH, &mut len);
let mut buf = vec![0; len as usize];
gl.GetShaderInfoLog(shader, len, std::ptr::null_mut(), buf.as_mut_ptr() as _);
tracing::error!("Shader compilation failed: {}", std::str::from_utf8(&buf).unwrap());
gl.DeleteShader(shader);
return Err(anyhow::anyhow!("Shader compilation failed"));
}
Ok(shader)
}
unsafe fn backend_plugin_init_inner(backend: &mut picom::backend_base) -> anyhow::Result<()> {
tracing::debug!("backend_plugin_init called {backend:p}");
{
let mut major = 0;
let mut minor = 0;
(backend.ops.version.unwrap())(backend, &mut major, &mut minor);
if major != 0 || minor < 1 {
return Err(anyhow::anyhow!("Unsupported picom egl backend version"));
}
}
let egl = egl::sys::Egl::load_with(|name| {
let name = std::ffi::CString::new(name).unwrap();
eglGetProcAddress(name.as_ptr())
});
let dpy = egl.GetCurrentDisplay();
egl_check_extensions(&egl, dpy)?;
let mut device: egl::sys::types::EGLAttrib = 0;
if egl.QueryDisplayAttribEXT(dpy, egl::sys::DEVICE_EXT as i32, &mut device) == 0 {
return Err(anyhow::anyhow!("QueryDisplayAttribEXT failed"));
}
tracing::debug!("device: {:#x}", device);
let egl_extensions: HashSet<&str> = std::ffi::CStr::from_ptr(
egl.QueryDeviceStringEXT(device as u64 as _, egl::sys::EXTENSIONS as _),
)
.to_str()?
.split(' ')
.collect();
for required in REQUIRED_EGL_DEVICE_EXTENSIONS {
if !egl_extensions.contains(required) {
return Err(anyhow::anyhow!("Required EGL device extension {} not found", required));
}
}
let render_node =
egl.QueryDeviceStringEXT(device as u64 as _, egl::sys::DRM_RENDER_NODE_FILE_EXT as i32);
if render_node.is_null() {
return Err(anyhow::anyhow!("QueryDeviceStringEXT failed"));
}
let render_node = std::ffi::CStr::from_ptr(render_node).to_str().unwrap();
tracing::debug!("render_node: {}", render_node);
let render_node = std::fs::File::open(render_node).map(DrmRenderNode)?;
let gbm = gbm::Device::new(render_node)?;
let formats: HashSet<egl::EGLint> = egl_get_dma_buf_formats(&egl, dpy).into_iter().collect();
for format in formats.iter() {
tracing::debug!(
"format: {}{}{}{}",
(format & 0xff) as u8 as char,
((format >> 8) & 0xff) as u8 as char,
((format >> 16) & 0xff) as u8 as char,
((format >> 24) & 0xff) as u8 as char
);
}
let c = unsafe { &mut *(backend.c as *mut PicomXConnection) };
let x11 = x11rb::xcb_ffi::XCBConnection::from_raw_xcb_connection(c.xcb, false)?;
let root = x11.setup().roots.first().ok_or_else(|| anyhow::anyhow!("No root found"))?;
let r = x11.get_geometry(root.root)?.reply()?;
tracing::info!("Root size: {}x{}", r.width, r.height);
let compositor_selection = format!("_NET_WM_CM_S{}", c.screen);
let atom = x11.intern_atom(false, compositor_selection.as_bytes())?.reply()?.atom;
let selection_owner = x11.get_selection_owner(atom)?.reply()?.owner;
tracing::info!("Selection owner: {selection_owner:#x}");
let formats_modifiers: Vec<_> = formats
.into_iter()
.filter_map(|format| (format as u32).try_into().ok())
.filter_map(|format| {
let spa_format = pipewire::fourcc_to_spa_format(format)?;
let modifiers = egl_get_dma_buf_modifiers(&egl, dpy, format);
for modifier in &modifiers {
tracing::debug!(
"format {format} modifier: {:x}, external_only: {}",
modifier.0,
modifier.1
);
}
let modifiers: Vec<DrmModifier> = modifiers
.into_iter()
.filter(|(modifier, external_only)| {
*external_only == 0 && extra_modifier_check(*modifier, r.width, r.height)
})
.map(|(modifiers, _)| modifiers.into())
.collect();
if !modifiers.is_empty() {
Some((spa_format, modifiers))
} else {
None
}
})
.collect();
if formats_modifiers.is_empty() {
return Err(anyhow::anyhow!("No suitable modifier/formats found"));
};
let backend = &mut *backend;
let (our_pw_waker, pw_waker) = ::pipewire::channel::channel();
let (our_pw_tx, pw_rx) = std::sync::mpsc::channel();
let (pw_tx, our_pw_rx) = std::sync::mpsc::channel();
std::thread::spawn(move || {
match pipewire::pipewire_main(gbm, pw_waker, pw_rx, pw_tx, formats_modifiers) {
Ok(()) => {}
Err(e) => {
tracing::debug!("pipewire_main failed: {e:?}");
}
}
});
let cursor_monitor = cursor::CursorMonitor::new(egl.GetCurrentContext(), dpy)?;
// Compile shaders
let gl = gl::Gl::load_with(|name| {
let name = std::ffi::CString::new(name).unwrap();
eglGetProcAddress(name.as_ptr())
});
let vs = compile_shader(&gl, COPY_VS.to_str().unwrap(), gl::VERTEX_SHADER)?;
let fs = compile_shader(&gl, COPY_FS.to_str().unwrap(), gl::FRAGMENT_SHADER)?;
let program = gl.CreateProgram();
gl.AttachShader(program, vs);
gl.AttachShader(program, fs);
gl.LinkProgram(program);
gl.DeleteShader(vs);
gl.DeleteShader(fs);
let mut success = 0;
gl.GetProgramiv(program, gl::LINK_STATUS, &mut success);
if success != gl::TRUE as i32 {
let mut len = 0;
gl.GetProgramiv(program, gl::INFO_LOG_LENGTH, &mut len);
let mut buf = vec![0; len as usize];
gl.GetProgramInfoLog(program, len, std::ptr::null_mut(), buf.as_mut_ptr() as _);
tracing::error!("Program link failed: {}", std::str::from_utf8(&buf).unwrap());
gl.DeleteProgram(program);
return Err(anyhow::anyhow!("Program link failed"));
}
let mut viewport_size = [0; 4];
gl.GetIntegerv(gl::VIEWPORT, viewport_size.as_mut_ptr());
assert_eq!(viewport_size[0], 0);
assert_eq!(viewport_size[1], 0);
let [_, _, width, height] = viewport_size;
let scale = [2.0 / width as f32, 2.0 / height as f32];
let mut old_program = 0;
gl.GetIntegerv(gl::CURRENT_PROGRAM, &mut old_program);
gl.UseProgram(program);
gl.Uniform2fv(2, 1, scale.as_ptr());
gl.UseProgram(old_program as _);
let context = Rc::new(UnsafeCell::new(PluginContext {
deinit: None,
present: None,
root_change: None,
cursor_monitor,
cursor_shader: program,
saved_fn_ptrs: SavedFnPtrs {
deinit: backend.ops.deinit.unwrap_unchecked(),
present: backend.ops.present,
root_change: backend.ops.root_change,
},
root: root.root,
x11,
root_size: r.into(),
buffers: Default::default(),
pw_rx: our_pw_rx,
pw_tx: Some(PipewireSender::new(our_pw_waker, our_pw_tx)),
cookie: Arc::new(random_string::generate(88, BASE64_CHARSET)),
}));
{
let context_mut = unsafe { &mut *context.get() };
backend.ops.deinit = Some(std::mem::transmute::<
*mut libc::c_void,
unsafe extern "C" fn(*mut picom::backend_base),
>(
context_mut
.deinit
.insert(ffi::make_ffi_closure1(deinit_trampoline, context.clone())?)
.code_ptr(),
));
backend.ops.present = Some(std::mem::transmute::<
*mut libc::c_void,
unsafe extern "C" fn(*mut picom::backend_base) -> bool,
>(
context_mut
.present
.insert(ffi::make_ffi_closure1(present_trampoline, context.clone())?)
.code_ptr(),
));
backend.ops.root_change = Some(std::mem::transmute::<
*mut libc::c_void,
unsafe extern "C" fn(*mut picom::backend_base, *mut picom::session),
>(
context_mut
.root_change
.insert(ffi::make_ffi_closure2(root_change_trampoline, context.clone())?)
.code_ptr(),
));
let egl_screencast_atom =
context_mut.x11.intern_atom(false, b"EGL_SCREENCAST_COOKIE")?.reply()?.atom;
let utf_string_atom = context_mut.x11.intern_atom(false, b"UTF8_STRING")?.reply()?.atom;
context_mut
.x11
.change_property(
PropMode::REPLACE,
selection_owner,
egl_screencast_atom,
utf_string_atom,
8,
context_mut.cookie.len() as _,
context_mut.cookie.as_bytes(),
)?
.check()?;
std::thread::spawn({
let cookie = context_mut.cookie.clone();
let pw_tx = context_mut.pw_tx.clone().unwrap();
move || server::start_server(cookie, pw_tx, selection_owner)
});
}
EGL.with(|egl_| assert!(egl_.set(egl).is_ok()));
GL.with(|gl_| assert!(gl_.set(gl).is_ok()));
Ok(())
}
/// # Safety
pub unsafe extern "C" fn backend_plugin_init(backend: *mut picom::backend_base, _: *mut c_void) {
if let Err(e) = backend_plugin_init_inner(&mut *backend) {
tracing::debug!("backend_plugin_init failed: {e:?}");
}
}
#[cfg(not(test))]
#[ctor::ctor]
unsafe fn init() {
tracing_subscriber::fmt::init();
tracing::debug!("init called");
let api = &*picom::picom_api_get_interfaces(0, 1, c"egl-screencast".as_ptr());
(api.add_backend_plugin.unwrap())(
c"egl".as_ptr(),
1,
0,
Some(backend_plugin_init),
std::ptr::null_mut(),
);
}
| yshui/x11screencast-portal | 5 | xdg-desktop-portal ScreenCast implementation for X11 | Rust | yshui | Yuxuan Shui | CodeWeavers |
plugin/src/pipewire.rs | Rust | use std::{
borrow::Cow,
cell::{Cell, RefCell},
collections::HashMap,
os::{
fd::{FromRawFd, OwnedFd},
unix::io::IntoRawFd,
},
rc::Rc,
};
use anyhow::Context;
use drm_fourcc::DrmModifier;
use gbm::BufferObjectFlags;
use gl_bindings::gl;
use pipewire::{
loop_::IoSource,
properties::properties,
spa::{
self,
param::{
format::{FormatProperties, MediaSubtype, MediaType},
ParamType,
},
pod::{
deserialize::{PodDeserialize, PodDeserializer},
serialize::{PodSerialize, PodSerializer},
Pod, PropertyFlags,
},
support::system::IoFlags,
utils::{Direction, SpaTypes},
},
stream::StreamFlags,
};
use slotmap::{DefaultKey, SlotMap};
use smallvec::smallvec;
use crate::{DrmRenderNode, MessagesFromPipewire, MessagesToPipewire};
pub(crate) fn fourcc_to_spa_format(
fourcc: drm_fourcc::DrmFourcc,
) -> Option<spa::param::video::VideoFormat> {
use drm_fourcc::DrmFourcc::*;
use spa::param::video::VideoFormat;
Some(match fourcc {
Rgb888 => VideoFormat::BGR,
Bgr888 => VideoFormat::RGB,
Bgra8888 => VideoFormat::ARGB,
Abgr8888 => VideoFormat::RGBA,
Rgba8888 => VideoFormat::ABGR,
Argb8888 => VideoFormat::BGRA,
Bgrx8888 => VideoFormat::xRGB,
Xbgr8888 => VideoFormat::RGBx,
Xrgb8888 => VideoFormat::BGRx,
Rgbx8888 => VideoFormat::xBGR,
Abgr2101010 => VideoFormat::ABGR_210LE,
Xbgr2101010 => VideoFormat::xBGR_210LE,
Argb2101010 => VideoFormat::ARGB_210LE,
Xrgb2101010 => VideoFormat::xRGB_210LE,
_ => return None,
})
}
fn spa_format_to_fourcc(format: spa::param::video::VideoFormat) -> drm_fourcc::DrmFourcc {
use drm_fourcc::DrmFourcc::*;
use spa::param::video::VideoFormat;
match format {
VideoFormat::RGB => Bgr888,
VideoFormat::BGR => Rgb888,
VideoFormat::BGRA => Argb8888,
VideoFormat::ABGR => Rgba8888,
VideoFormat::RGBA => Abgr8888,
VideoFormat::ARGB => Bgra8888,
VideoFormat::BGRx => Xrgb8888,
VideoFormat::xBGR => Rgbx8888,
VideoFormat::xRGB => Bgrx8888,
VideoFormat::RGBx => Xbgr8888,
VideoFormat::ABGR_210LE => Abgr2101010,
VideoFormat::xBGR_210LE => Xbgr2101010,
VideoFormat::ARGB_210LE => Argb2101010,
VideoFormat::xRGB_210LE => Xrgb2101010,
_ => unimplemented!(),
}
}
#[allow(dead_code)]
fn blit(
gl: &gl::Gl,
program: gl::types::GLuint,
source: gl::types::GLuint,
target: gl::types::GLuint,
) {
unsafe {
gl.UseProgram(program);
gl.ActiveTexture(gl::TEXTURE0);
gl.BindTexture(gl::TEXTURE_2D, source);
gl.Uniform1i(gl.GetUniformLocation(program, c"tex".as_ptr()), 0);
gl.BindFramebuffer(gl::DRAW_FRAMEBUFFER, target);
let mut vao = 0;
gl.GenVertexArrays(1, &mut vao);
gl.BindVertexArray(vao);
let mut vbo = [0; 2];
gl.GenBuffers(2, vbo.as_mut_ptr());
gl.BindBuffer(gl::ARRAY_BUFFER, vbo[0]);
gl.BindBuffer(gl::ELEMENT_ARRAY_BUFFER, vbo[1]);
let vertex: [gl::types::GLfloat; 8] = [0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0];
gl.BufferData(
gl::ARRAY_BUFFER,
(4 * 2) as isize,
vertex.as_ptr() as *const std::ffi::c_void,
gl::STATIC_DRAW,
);
gl.BufferData(
gl::ELEMENT_ARRAY_BUFFER,
(6 * 2) as isize,
[0, 1, 2, 0, 2, 3].as_ptr() as *const std::ffi::c_void,
gl::STATIC_DRAW,
);
gl.EnableVertexAttribArray(0);
gl.VertexAttribPointer(
0,
2,
gl::FLOAT,
gl::FALSE,
(std::mem::size_of::<gl::types::GLfloat>() * 2) as _,
std::ptr::null(),
);
gl.DrawElements(gl::TRIANGLES, 6, gl::UNSIGNED_INT, std::ptr::null());
gl.BindFramebuffer(gl::DRAW_FRAMEBUFFER, 0);
}
}
#[derive(Debug)]
struct ParamFormat<'a> {
format: Option<spa::param::video::VideoFormat>,
modifiers: Cow<'a, [DrmModifier]>,
width: u32,
height: u32,
fixate: bool,
}
impl<'a> ParamFormat<'a> {
fn satisfies(&self, other: &ParamFormat<'_>) -> bool {
self.format == other.format
&& self.width == other.width
&& self.height == other.height
&& self.modifiers.iter().all(|m| other.modifiers.contains(m))
}
}
impl<'a> PodDeserialize<'a> for ParamFormat<'static> {
fn deserialize(
deserializer: spa::pod::deserialize::PodDeserializer<'a>,
) -> Result<
(Self, spa::pod::deserialize::DeserializeSuccess<'a>),
spa::pod::deserialize::DeserializeError<&'a [u8]>,
>
where
Self: Sized,
{
struct Visitor;
impl<'de> spa::pod::deserialize::Visitor<'de> for Visitor {
type ArrayElem = std::convert::Infallible;
type Value = ParamFormat<'static>;
fn visit_object(
&self,
object_deserializer: &mut spa::pod::deserialize::ObjectPodDeserializer<'de>,
) -> Result<Self::Value, spa::pod::deserialize::DeserializeError<&'de [u8]>>
{
use spa::{
pod::{ChoiceValue, Value},
utils::{Choice, ChoiceEnum, Id},
};
let mut ret = ParamFormat {
format: None,
modifiers: Cow::Borrowed(&[]),
width: 0,
height: 0,
fixate: false,
};
let modifiers = ret.modifiers.to_mut();
while let Some((prop, id, flags)) =
object_deserializer.deserialize_property::<Value>()?
{
let id = FormatProperties::from_raw(id);
match id {
FormatProperties::VideoSize => {
match prop {
Value::Rectangle(rect) => {
ret.width = rect.width;
ret.height = rect.height;
}
Value::Choice(ChoiceValue::Rectangle(Choice(
_flags,
ChoiceEnum::None(rect),
))) => {
ret.width = rect.width;
ret.height = rect.height;
}
_ => {
tracing::debug!("Invalid type for VideoSize: {:?}", prop);
return Err(
spa::pod::deserialize::DeserializeError::InvalidType,
);
}
}
}
FormatProperties::VideoModifier => {
ret.fixate = !flags.contains(PropertyFlags::DONT_FIXATE);
match prop {
Value::Long(m) => {
modifiers.push(DrmModifier::from(m as u64));
}
Value::Choice(ChoiceValue::Long(Choice(_flags, choices))) => {
match choices {
ChoiceEnum::Enum { default, alternatives } => {
modifiers.push(DrmModifier::from(default as u64));
modifiers.extend(
alternatives
.iter()
.map(|&m| DrmModifier::from(m as u64)),
);
}
ChoiceEnum::None(m) => {
modifiers.push(DrmModifier::from(m as u64));
}
_ => {
tracing::debug!(
"Invalid choices for VideoModifier: {:?}",
choices
);
return Err(
spa::pod::deserialize::DeserializeError::InvalidChoiceType,
);
}
}
}
_ => {
tracing::debug!("Invalid type for VideoModifier: {:?}", prop);
return Err(
spa::pod::deserialize::DeserializeError::InvalidChoiceType,
);
}
}
}
FormatProperties::VideoFormat => {
match prop {
Value::Id(id) => {
let format = spa::param::video::VideoFormat::from_raw(id.0);
ret.format = Some(format);
}
Value::Choice(ChoiceValue::Id(Choice(
_,
ChoiceEnum::None(Id(format)),
))) => {
let format = spa::param::video::VideoFormat::from_raw(format);
ret.format = Some(format);
}
_ => {
tracing::debug!("Invalid type for VideoFormat: {:?}", prop);
return Err(
spa::pod::deserialize::DeserializeError::InvalidType,
);
}
}
}
_ => {
tracing::debug!("Unknown property: {:?}", id);
}
}
}
Ok(ret)
}
}
deserializer.deserialize_object(Visitor)
}
}
impl<'a> PodSerialize for ParamFormat<'a> {
fn serialize<O: std::io::Write + std::io::Seek>(
&self,
serializer: PodSerializer<O>,
) -> Result<spa::pod::serialize::SerializeSuccess<O>, spa::pod::serialize::GenError> {
use spa::utils::{Choice, ChoiceEnum, ChoiceFlags, Fraction, Id, Rectangle};
assert!(!self.fixate || self.modifiers.len() == 1);
let mut serializer =
serializer.serialize_object(SpaTypes::ObjectParamFormat.0, ParamType::EnumFormat.0)?;
serializer.serialize_property(
FormatProperties::MediaType.0,
&Id(MediaType::Video.0),
PropertyFlags::empty(),
)?;
serializer.serialize_property(
FormatProperties::MediaSubtype.0,
&Id(MediaSubtype::Raw.0),
PropertyFlags::empty(),
)?;
if let Some(format) = self.format {
serializer.serialize_property(
FormatProperties::VideoFormat.0,
&Id(format.0),
PropertyFlags::empty(),
)?;
}
if self.modifiers.len() == 1 && self.modifiers[0] == DrmModifier::Invalid {
serializer.serialize_property(
FormatProperties::VideoModifier.0,
&(u64::from(DrmModifier::Invalid) as i64),
PropertyFlags::MANDATORY,
)?;
} else {
let mut modifiers: Vec<_> =
self.modifiers.iter().map(|m| u64::from(*m) as i64).collect();
if !self.modifiers.contains(&DrmModifier::Invalid) && !self.fixate {
modifiers.push(u64::from(DrmModifier::Invalid) as _);
}
let choice = Choice(ChoiceFlags::empty(), ChoiceEnum::Enum {
default: u64::from(self.modifiers[0]) as _,
alternatives: modifiers,
});
let flags = if self.fixate {
PropertyFlags::MANDATORY | PropertyFlags::DONT_FIXATE
} else {
PropertyFlags::MANDATORY
};
serializer.serialize_property(FormatProperties::VideoModifier.0, &choice, flags)?;
}
serializer.serialize_property(
FormatProperties::VideoSize.0,
&Rectangle { width: self.width, height: self.height },
PropertyFlags::empty(),
)?;
serializer.serialize_property(
FormatProperties::VideoFramerate.0,
&Fraction { num: 0, denom: 1 }, // Variable framerate?
PropertyFlags::empty(),
)?;
serializer.end()
}
}
struct BufferInfo<'a> {
buffers: (u32, u32),
blocks: u32,
size: u32,
stride: u32,
data_type: &'a [spa::buffer::DataType],
}
impl<'a> PodSerialize for BufferInfo<'a> {
fn serialize<O: std::io::prelude::Write + std::io::prelude::Seek>(
&self,
serializer: PodSerializer<O>,
) -> Result<spa::pod::serialize::SerializeSuccess<O>, spa::pod::serialize::GenError> {
use spa::{
sys,
utils::{Choice, ChoiceEnum, ChoiceFlags},
};
let mut serializer =
serializer.serialize_object(SpaTypes::ObjectParamBuffers.0, ParamType::Buffers.0)?;
if self.buffers.0 != self.buffers.1 {
serializer.serialize_property(
sys::SPA_PARAM_BUFFERS_buffers,
&Choice::<i32>(ChoiceFlags::empty(), ChoiceEnum::Range {
default: self.buffers.1 as _,
min: self.buffers.0 as _,
max: self.buffers.1 as _,
}),
PropertyFlags::empty(),
)?;
} else {
serializer.serialize_property(
sys::SPA_PARAM_BUFFERS_buffers,
&(self.buffers.0 as i32),
PropertyFlags::empty(),
)?;
}
serializer.serialize_property(
sys::SPA_PARAM_BUFFERS_blocks,
&(self.blocks as i32),
PropertyFlags::empty(),
)?;
serializer.serialize_property(
sys::SPA_PARAM_BUFFERS_size,
&(self.size as i32),
PropertyFlags::empty(),
)?;
serializer.serialize_property(
sys::SPA_PARAM_BUFFERS_stride,
&(self.stride as i32),
PropertyFlags::empty(),
)?;
let data_type_choices = Choice::<i32>(ChoiceFlags::empty(), ChoiceEnum::Flags {
default: (1 << self.data_type[0].as_raw()) as _,
flags: self.data_type.iter().map(|t| (1 << t.as_raw()) as _).collect(),
});
serializer.serialize_property(
sys::SPA_PARAM_BUFFERS_dataType,
&data_type_choices,
PropertyFlags::empty(),
)?;
serializer.end()
}
}
type Incoming = MessagesToPipewire;
type Outgoing = MessagesFromPipewire;
type Tx = std::sync::mpsc::Sender<Outgoing>;
type Rx = std::sync::mpsc::Receiver<Incoming>;
struct StreamData {
this: Rc<Pipewire>,
x: i32,
y: i32,
width: u32,
height: u32,
outstanding_buffer: Cell<Option<*mut pipewire::sys::pw_buffer>>,
fixated_format: RefCell<Option<ParamFormat<'static>>>,
buffers: RefCell<HashMap<DefaultKey, gbm::BufferObject<()>>>,
/// Test allocation buffer
test_buffer: RefCell<Option<gbm::BufferObject<()>>>,
reply: Option<oneshot::Sender<Result<u32, anyhow::Error>>>,
}
struct StreamHandle {
stream: pipewire::stream::Stream,
listener: Option<pipewire::stream::StreamListener<StreamData>>,
}
struct Pipewire {
mainloop: pipewire::main_loop::MainLoop,
formats_modifiers: Vec<(spa::param::video::VideoFormat, Vec<DrmModifier>)>,
core: pipewire::core::Core,
gbm: gbm::Device<DrmRenderNode>,
streams: RefCell<SlotMap<DefaultKey, StreamHandle>>,
node_id_to_stream: RefCell<HashMap<u32, DefaultKey>>,
/// SAFETY: This `IoSource` cannot outlive `self.mainloop`
fence_waits: RefCell<HashMap<DefaultKey, *mut IoSource<'static, OwnedFd>>>,
tx: Tx,
rx: Rx,
}
fn stream_set_error(stream: &pipewire::stream::StreamRef, err: impl std::fmt::Debug, tx: &Tx) {
let err = format!("Error: {:?}", err);
tracing::debug!("Stream error: {}", err);
let err = std::ffi::CString::new(err).unwrap();
unsafe { pipewire::sys::pw_stream_set_error(stream.as_raw_ptr(), -libc::EIO, err.as_ptr()) };
tx.send(Outgoing::WakeMeUp).unwrap();
}
impl Pipewire {
fn new_stream(
self: &Rc<Self>,
width: u32,
height: u32,
x: i32,
y: i32,
embed_cursor: bool,
reply: oneshot::Sender<Result<u32, anyhow::Error>>,
) -> anyhow::Result<()> {
let stream_props = properties! {
"media.class" => "Video/Source",
"media.name" => "Screen",
"node.name" => "picom-egl-screencast",
};
let stream =
pipewire::stream::Stream::new(&self.core, "picom-egl-screencast", stream_props)?;
let pods: Vec<_> = self
.formats_modifiers
.iter()
.map(|(format, modifiers)| {
let mut buf = Vec::new();
PodSerializer::serialize(std::io::Cursor::new(&mut buf), &ParamFormat {
format: Some(*format),
modifiers: modifiers.into(),
width,
height,
fixate: false,
})?;
Ok(buf)
})
.collect::<Result<_, anyhow::Error>>()?;
let mut pods: Vec<_> = pods.iter().map(|p| Pod::from_bytes(p).unwrap()).collect();
stream.connect(
Direction::Output,
None,
StreamFlags::DRIVER | StreamFlags::ALLOC_BUFFERS,
&mut pods,
)?;
let data = StreamData {
outstanding_buffer: Cell::new(None),
this: self.clone(),
x,
y,
width,
height,
fixated_format: RefCell::new(None),
buffers: Default::default(),
test_buffer: RefCell::new(None),
reply: Some(reply),
};
let stream_id = self.streams.borrow_mut().insert(StreamHandle { stream, listener: None });
let listener = self.streams.borrow()[stream_id]
.stream
.add_local_listener_with_user_data(data)
.add_buffer(move |stream, data, buf| {
data.this
.handle_add_buffer(
&data.fixated_format.borrow(),
&mut data.test_buffer.borrow_mut(),
&mut data.buffers.borrow_mut(),
unsafe { &mut *buf },
stream_id,
x,
y,
embed_cursor,
)
.unwrap_or_else(|e| stream_set_error(stream, e, &data.this.tx))
})
.process(move |stream, data| {
let buffer = data.outstanding_buffer.take();
tracing::trace!("Process {buffer:?}");
if let Some(buffer) = buffer {
unsafe { stream.queue_raw_buffer(buffer) };
}
data.this.send_buffer(stream, data)
})
.state_changed(move |stream, data, old_state, state| {
tracing::info!("State changed: {:?} -> {:?}", old_state, state);
if state == pipewire::stream::StreamState::Paused {
if let Some(reply) = data.reply.take() {
let mut node_id_to_stream = data.this.node_id_to_stream.borrow_mut();
node_id_to_stream.insert(stream.node_id(), stream_id);
reply.send(Ok(stream.node_id())).unwrap()
}
} else if state == pipewire::stream::StreamState::Streaming {
data.this.send_buffer(stream, data)
}
})
.param_changed(
|stream, StreamData { this, fixated_format, test_buffer, .. }, id, pod| {
this.handle_param_changed(
&mut fixated_format.borrow_mut(),
&mut test_buffer.borrow_mut(),
stream,
id,
pod,
)
.unwrap_or_else(|e| stream_set_error(stream, e, &this.tx))
},
)
.remove_buffer(|_, StreamData { this, .. }, buf| {
let buf = unsafe { &mut *buf };
let id = *unsafe { Box::from_raw(buf.user_data as *mut DefaultKey) };
tracing::info!("Remove buffer: {id:?}");
let inner_buf = unsafe { &mut *buf.buffer };
let datas = unsafe {
std::slice::from_raw_parts_mut(inner_buf.datas, inner_buf.n_datas as usize)
};
for data in datas {
unsafe {
OwnedFd::from_raw_fd(data.fd as _);
data.fd = -1;
}
}
this.tx.send(Outgoing::RemoveBuffers { ids: smallvec![id] }).unwrap();
})
.register()?;
self.streams.borrow_mut()[stream_id].listener = Some(listener);
Ok(())
}
fn handle_param_changed(
&self,
out_fixated_format: &mut Option<ParamFormat<'static>>,
out_test_buffer: &mut Option<gbm::BufferObject<()>>,
stream: &pipewire::stream::StreamRef,
prop: u32,
data: Option<&Pod>,
) -> anyhow::Result<()> {
tracing::debug!("Param changed: {:x} {}", prop, data.is_none());
if prop != spa::sys::SPA_PARAM_Format {
return Ok(());
}
let Some(data) = data else {
if out_fixated_format.is_some() {
tracing::debug!("Stream completed successfully");
stream_set_error(stream, "Format removed, stopping.", &self.tx);
}
return Ok(());
};
unsafe { spa::sys::spa_debug_format(1, std::ptr::null(), data.as_raw_ptr()) };
let (_, params): (_, ParamFormat<'_>) = PodDeserializer::deserialize_from(data.as_bytes())
.map_err(|e| anyhow::anyhow!("{e:?}"))?;
tracing::debug!("Format: {params:?}");
if params.modifiers.is_empty() {
return Err(anyhow::anyhow!("Client doesn't support DMA-Buf"));
}
if let Some(old_fixated_format) = out_fixated_format {
if !old_fixated_format.satisfies(¶ms) {
return Err(anyhow::anyhow!("Client requested a different format"));
}
} else if !params.fixate {
// Announcing fixated format
let fixated_params = ParamFormat {
format: params.format,
modifiers: Cow::Owned(vec![params.modifiers[0]]),
width: params.width,
height: params.height,
fixate: true,
};
let mut buf = Vec::new();
PodSerializer::serialize(std::io::Cursor::new(&mut buf), &fixated_params)?;
let pod = Pod::from_bytes(&buf).unwrap();
*out_fixated_format = Some(fixated_params);
stream.update_params(&mut [pod])?;
} else {
// Now we have format, try to test allocate a buffer
tracing::debug!("Test allocation");
let test_buffer =
out_test_buffer.insert(self.gbm.create_buffer_object_with_modifiers2(
params.width,
params.height,
spa_format_to_fourcc(
params.format.with_context(|| anyhow::anyhow!("Format missing"))?,
),
[params.modifiers[0]].into_iter(),
BufferObjectFlags::RENDERING,
)?);
let stride = test_buffer.stride();
let size = stride * test_buffer.height();
let buffer_info = BufferInfo {
buffers: (2, 3),
blocks: test_buffer.plane_count(),
size,
stride,
data_type: &[spa::buffer::DataType::DmaBuf],
};
let mut buf = Vec::new();
PodSerializer::serialize(std::io::Cursor::new(&mut buf), &buffer_info)?;
let pod = Pod::from_bytes(&buf).unwrap();
*out_fixated_format = Some(params);
stream.update_params(&mut [pod])?;
}
Ok(())
}
fn handle_add_buffer(
&self,
format: &Option<ParamFormat<'_>>,
test_buffer: &mut Option<gbm::BufferObject<()>>,
buffers: &mut HashMap<DefaultKey, gbm::BufferObject<()>>,
buf: &mut pipewire::sys::pw_buffer,
stream_id: DefaultKey,
x: i32,
y: i32,
embed_cursor: bool,
) -> anyhow::Result<()> {
let Some(format) = format else {
return Err(anyhow::anyhow!("add_buffer called without a negotiated format"));
};
tracing::debug!("Add buffer: {format:?}");
let inner_buf = unsafe { &mut *buf.buffer };
let dma_buf = if let Some(test_buffer) = test_buffer.take() {
test_buffer
} else {
self.gbm.create_buffer_object_with_modifiers2(
format.width,
format.height,
spa_format_to_fourcc(format.format.unwrap()),
format.modifiers.iter().copied(),
BufferObjectFlags::RENDERING,
)?
};
assert!(inner_buf.n_datas == dma_buf.plane_count());
let (tx, rx) = oneshot::channel();
self.tx
.send(Outgoing::AddBuffer { dma_buf, stream_id, x, y, embed_cursor, reply: tx })
.unwrap();
let Ok((id, dma_buf)) = rx.recv() else {
panic!("picom failed to add buffer");
};
buf.user_data = Box::leak(Box::new(id)) as *mut _ as *mut _;
let datas =
unsafe { std::slice::from_raw_parts_mut(inner_buf.datas, inner_buf.n_datas as usize) };
let height = dma_buf.height();
for (i, data) in datas.iter_mut().enumerate() {
let stride = dma_buf.stride_for_plane(i as _);
data.fd = dma_buf.fd_for_plane(i as _)?.into_raw_fd() as _;
data.type_ = spa::sys::SPA_DATA_DmaBuf;
data.data = std::ptr::null_mut();
data.maxsize = stride * height;
data.flags = spa::sys::SPA_DATA_FLAG_READWRITE;
let chunk = unsafe { &mut *data.chunk };
chunk.offset = dma_buf.offset(i as _);
chunk.size = stride * height;
chunk.stride = stride as _;
chunk.flags = spa::sys::SPA_CHUNK_FLAG_NONE as _;
}
buffers.insert(id, dma_buf);
Ok(())
}
fn send_buffer(&self, stream: &pipewire::stream::StreamRef, data: &StreamData) {
if !matches!(stream.state(), pipewire::stream::StreamState::Streaming)
|| data.outstanding_buffer.get().is_some()
{
return;
}
let Some(buffer) = (unsafe { stream.dequeue_raw_buffer().as_mut() }) else {
return;
};
let id = unsafe { *(buffer.user_data as *const DefaultKey) };
data.outstanding_buffer.set(Some(buffer));
self.tx.send(Outgoing::ActivateBuffer { id }).unwrap();
}
fn handle_message(self: &Rc<Self>, msg: Incoming) -> anyhow::Result<()> {
match msg {
Incoming::CreateStream { x, y, width, height, reply, embed_cursor } => {
tracing::debug!("Creating stream with size {}x{}", width, height);
self.new_stream(width, height, x, y, embed_cursor, reply)?;
}
Incoming::NewFrame { id, fence, stream_id } => {
tracing::trace!("New frame: {id:?} {fence:?} {stream_id:?}");
let fd = unsafe { OwnedFd::from_raw_fd(u32::from(fence) as _) };
if self.streams.borrow().get(stream_id).is_some() {
let this = self.clone();
let io_source = self.mainloop.loop_().add_io(fd, IoFlags::IN, move |_| {
tracing::trace!("Fence triggered: {id:?} {stream_id:?}");
let streams = this.streams.borrow();
if let Some(stream) = streams.get(stream_id) {
stream.stream.trigger_process().unwrap_or_else(|e| {
stream_set_error(&streams[stream_id].stream, e, &this.tx)
});
}
let _ = unsafe {
Box::from_raw(this.fence_waits.borrow_mut().remove(&id).unwrap())
};
});
let io_source = Box::leak(Box::new(io_source)) as *mut IoSource<'_, OwnedFd>;
self.fence_waits.borrow_mut().insert(id, io_source.cast());
}
}
Incoming::BufferError { id, stream_id } => {
tracing::debug!("Buffer error: {id:?} {stream_id:?}");
let streams = self.streams.borrow();
stream_set_error(&streams[stream_id].stream, "Buffer error", &self.tx);
}
Incoming::CloseStreams { node_ids } => {
tracing::debug!("Close streams: {node_ids:?}");
let streams = self.streams.borrow();
let node_id_to_stream = self.node_id_to_stream.borrow();
for id in node_ids {
if let Some(stream_id) = node_id_to_stream.get(&id).copied() {
stream_set_error(
&streams[stream_id].stream,
"Client requested close",
&self.tx,
);
}
}
}
}
Ok(())
}
}
pub unsafe fn pipewire_main(
gbm: gbm::Device<DrmRenderNode>,
waker: pipewire::channel::Receiver<()>,
rx: std::sync::mpsc::Receiver<crate::MessagesToPipewire>,
tx: std::sync::mpsc::Sender<crate::MessagesFromPipewire>,
formats_modifiers: Vec<(spa::param::video::VideoFormat, Vec<DrmModifier>)>,
) -> anyhow::Result<()> {
pipewire::init();
tracing::debug!("Starting pipewire thread, #formats {}", formats_modifiers.len());
for (format, modifiers) in &formats_modifiers {
tracing::debug!(" format: {format:?}");
for modifier in modifiers {
let raw_modifier = u64::from(*modifier);
let amd_modifier = crate::AmdModifier(raw_modifier);
tracing::debug!(
" modifier: {amd_modifier:x?}, dcc: {}, dcc 64b: {}, dcc 128b {}",
(raw_modifier >> 13) & 1,
(raw_modifier >> 16) & 1,
(raw_modifier >> 17) & 1
);
}
}
let mainloop = ::pipewire::main_loop::MainLoop::new(None)?;
let context = pipewire::context::Context::new(&mainloop)?;
let core = context.connect(None)?;
let _attached = waker.attach(mainloop.as_ref(), {
let mainloop = mainloop.clone();
let pipewire = Rc::new(Pipewire {
mainloop,
gbm,
formats_modifiers,
fence_waits: Default::default(),
core,
streams: Default::default(),
tx,
rx,
node_id_to_stream: Default::default(),
});
move |()| {
tracing::trace!("Woken");
loop {
match pipewire.rx.try_recv() {
Ok(msg) => {
match pipewire.handle_message(msg) {
Ok(()) => {}
Err(e) => {
tracing::error!("Error handling message: {:?}", e);
pipewire.mainloop.quit();
return;
}
}
}
Err(std::sync::mpsc::TryRecvError::Empty) => break,
Err(std::sync::mpsc::TryRecvError::Disconnected) => {
pipewire.mainloop.quit();
return;
}
}
}
let mut node_id_to_stream = pipewire.node_id_to_stream.borrow_mut();
pipewire.streams.borrow_mut().retain(|_, StreamHandle { stream, .. }| {
if matches!(stream.state(), pipewire::stream::StreamState::Error(_)) {
tracing::info!("Removing errored stream");
let key = node_id_to_stream.remove(&stream.node_id()).unwrap();
tracing::info!("Removed stream: {key:?}");
false
} else {
true
}
});
}
});
mainloop.run();
Ok(())
}
| yshui/x11screencast-portal | 5 | xdg-desktop-portal ScreenCast implementation for X11 | Rust | yshui | Yuxuan Shui | CodeWeavers |
plugin/src/server.rs | Rust | use std::{
io::{Read as _, Write as _},
os::{
fd::AsRawFd as _,
unix::net::{UnixListener, UnixStream},
},
path::{Path, PathBuf},
pin::pin,
sync::Arc,
};
use async_channel::{Receiver, Sender};
use futures_util::{
io::{ReadHalf, WriteHalf},
stream::FuturesOrdered,
AsyncReadExt, AsyncWriteExt as _, Sink, SinkExt as _, Stream, StreamExt as _, TryFutureExt,
};
use smallvec::SmallVec;
use smol::Async;
use x11rb::protocol::xproto::{ConnectionExt as _, PropMode};
fn place_runtime_file(name: &str) -> PathBuf {
if let Some(path) = xdg::BaseDirectories::with_prefix("picom")
.ok()
.and_then(|base| base.place_runtime_file(name).ok())
{
return path;
}
let name = format!("picom-{}", name);
if let Some(tmp) = std::env::var_os("TMPDIR") {
Path::new(&tmp).join(name)
} else {
Path::new("/tmp").join(name)
}
}
use protocol::{ClientMessage, ServerMessage};
#[pin_project::pin_project]
struct Client {
#[pin]
tx: Sender<ServerMessage>,
#[pin]
rx: Receiver<anyhow::Result<ClientMessage>>,
read_task: smol::Task<()>,
}
impl Client {
async fn read_side_inner(
mut stream: ReadHalf<Async<UnixStream>>,
tx: &Sender<anyhow::Result<ClientMessage>>,
) -> anyhow::Result<()> {
let mut buf = Vec::new();
loop {
let mut len = [0u8; 4];
match stream.read_exact(&mut len).await {
Ok(()) => (),
Err(e) => {
if e.kind() == std::io::ErrorKind::UnexpectedEof {
return Ok(());
}
return Err(e.into());
}
}
let len = u32::from_be_bytes(len);
tracing::debug!("Read message of length {}", len);
buf.resize(len as usize, 0u8);
stream.read_exact(&mut buf).await?;
let msg = serde_json::from_slice(&buf)?;
tracing::debug!("Received message: {:?}", msg);
tx.send(Ok(msg)).await?;
}
}
#[tracing::instrument(skip(stream, tx))]
async fn read_side(
stream: ReadHalf<Async<UnixStream>>,
tx: Sender<anyhow::Result<ClientMessage>>,
) {
match Self::read_side_inner(stream, &tx).await {
Ok(()) => (),
Err(e) => {
tracing::error!("Client read side error: {:?}", e);
tx.send(Err(e)).await.ok();
}
}
tracing::debug!("Client read side exited");
}
#[tracing::instrument(skip(stream, rx))]
async fn write_side(
mut stream: WriteHalf<Async<UnixStream>>,
rx: Receiver<ServerMessage>,
) -> anyhow::Result<()> {
loop {
let Ok(msg) = rx.recv().await else {
break Ok(());
};
let msg = serde_json::to_string(&msg)?;
let msg = msg.as_bytes();
let mut len = [0u8; 4];
len.copy_from_slice(&(msg.len() as u32).to_be_bytes());
stream.write_all(&len).await?;
stream.write_all(msg).await?;
}
}
fn new(stream: Async<UnixStream>) -> Self {
let (tx1, rx1) = async_channel::unbounded();
let (tx2, rx2) = async_channel::unbounded();
let (read, write) = stream.split();
let read_task = smol::spawn(Self::read_side(read, tx2));
smol::spawn(async {
Self::write_side(write, rx1)
.unwrap_or_else(|e| {
tracing::error!("Client write side error: {:?}", e);
})
.await;
})
.detach();
Self { tx: tx1, rx: rx2, read_task }
}
}
impl Stream for Client {
type Item = anyhow::Result<ClientMessage>;
fn poll_next(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context,
) -> std::task::Poll<Option<Self::Item>> {
let this = self.project();
match this.rx.try_recv() {
Err(async_channel::TryRecvError::Empty) => this.rx.poll_next(cx),
Ok(msg) => std::task::Poll::Ready(Some(msg)),
Err(async_channel::TryRecvError::Closed) => std::task::Poll::Ready(None),
}
}
}
impl Sink<ServerMessage> for Client {
type Error = anyhow::Error;
fn poll_ready(
self: std::pin::Pin<&mut Self>,
_cx: &mut std::task::Context,
) -> std::task::Poll<Result<(), Self::Error>> {
if self.tx.is_closed() {
return std::task::Poll::Ready(Err(anyhow::anyhow!("Channel closed")));
}
std::task::Poll::Ready(Ok(()))
}
fn start_send(self: std::pin::Pin<&mut Self>, item: ServerMessage) -> Result<(), Self::Error> {
let this = self.project();
match this.tx.try_send(item) {
Ok(_) => Ok(()),
Err(async_channel::TrySendError::Closed(_)) => Err(anyhow::anyhow!("Channel closed")),
Err(async_channel::TrySendError::Full(_)) => unreachable!(),
}
}
fn poll_flush(
self: std::pin::Pin<&mut Self>,
_cx: &mut std::task::Context,
) -> std::task::Poll<Result<(), Self::Error>> {
if self.tx.is_closed() {
return std::task::Poll::Ready(Err(anyhow::anyhow!("Channel closed")));
}
std::task::Poll::Ready(Ok(()))
}
fn poll_close(
self: std::pin::Pin<&mut Self>,
_cx: &mut std::task::Context,
) -> std::task::Poll<Result<(), Self::Error>> {
self.tx.close();
std::task::Poll::Ready(Ok(()))
}
}
async fn client_task_inner(
our_cookie: Arc<String>,
stream: Async<UnixStream>,
pw_tx: &crate::PipewireSender,
) -> anyhow::Result<()> {
let client = Box::pin(Client::new(stream));
let mut client = client.fuse();
tracing::info!("Client task started");
let Some(msg) = client.next().await else {
tracing::warn!("Client disconnected before sending any message");
return Ok(());
};
tracing::info!("Message: {:?}", msg);
match msg? {
ClientMessage::CloseStreams { node_ids } => {
tracing::info!("CloseStreams: {:?}", node_ids);
let pw_tx = pw_tx.clone();
smol::unblock(move || {
pw_tx.start_send().send(crate::MessagesToPipewire::CloseStreams { node_ids })
})
.await?;
tracing::info!("Request sent");
}
ClientMessage::CreateStream { cookie, rectangles, embed_cursor } => {
tracing::info!("CreateStream: {:?}", cookie);
if cookie != *our_cookie {
return Err(anyhow::anyhow!("Invalid cookie {}", our_cookie));
}
let rxs = smol::unblock({
let pw_tx = pw_tx.clone();
move || {
let mut pw_tx = pw_tx.start_send();
let mut rxs = FuturesOrdered::new();
for r in rectangles {
let (tx, rx) = oneshot::channel();
pw_tx.send(crate::MessagesToPipewire::CreateStream {
width: r.width,
height: r.height,
x: r.x,
y: r.y,
embed_cursor,
reply: tx,
})?;
rxs.push_back(rx);
}
Ok::<_, anyhow::Error>(rxs)
}
})
.await?;
let node_ids: SmallVec<[_; 6]> = rxs.collect().await;
let node_ids: SmallVec<[_; 6]> = node_ids.into_iter().collect::<Result<_, _>>()?;
let node_ids: Result<SmallVec<[_; 6]>, _> = node_ids.into_iter().collect();
tracing::info!("CreateStream reply: {:?}", node_ids);
match node_ids {
Ok(node_ids) => {
client.send(ServerMessage::StreamCreated { node_ids }).await?;
}
Err(e) => {
client
.send(ServerMessage::StreamCreationError { error: format!("{:?}", e) })
.await?;
}
}
}
}
tracing::info!("Client task exited");
Ok(())
}
#[tracing::instrument(skip(our_cookie, stream, pw_tx), fields(stream = stream.get_ref().as_raw_fd()))]
async fn client_task(
our_cookie: Arc<String>,
stream: Async<UnixStream>,
pw_tx: crate::PipewireSender,
) {
match client_task_inner(our_cookie, stream, &pw_tx).await {
Ok(_) => (),
Err(e) => tracing::error!("Error: {:?}", e),
}
}
async fn run(
our_cookie: Arc<String>,
pw_tx: crate::PipewireSender,
selection_owner: u32,
) -> anyhow::Result<()> {
let file_name = format!("egl-screencast-{}", std::env::var("DISPLAY")?);
let mut pidfile = std::fs::OpenOptions::new()
.create(true)
.truncate(false)
.read(true)
.write(true)
.open(place_runtime_file(&format!("{}.pid", file_name)))?;
match pidfile.try_lock() {
Ok(_) => {
pidfile.write_all(format!("{}", std::process::id()).as_bytes())?;
}
Err(e) => {
let mut buf = String::new();
pidfile.read_to_string(&mut buf)?;
eprintln!("Another instance of picom-egl-screencast is running: {}, pid: {}", e, buf);
return Ok(());
}
}
let socket_path = place_runtime_file(&file_name);
std::fs::remove_file(&socket_path).or_else(|e| {
if e.kind() == std::io::ErrorKind::NotFound {
Ok(())
} else {
Err(e)
}
})?;
{
let (x11, _) = x11rb::rust_connection::RustConnection::connect(None)?;
let egl_screencast_socket_atom =
x11.intern_atom(false, b"EGL_SCREENCAST_SOCKET")?.reply()?.atom;
let utf8_string = x11.intern_atom(false, b"UTF8_STRING")?.reply()?.atom;
let path_bytes = socket_path.to_str().unwrap().as_bytes();
x11.change_property(
PropMode::REPLACE,
selection_owner,
egl_screencast_socket_atom,
utf8_string,
8,
path_bytes.len() as u32,
path_bytes,
)?
.check()?;
}
let listener = UnixListener::bind(&socket_path)?;
let listener = async_io::Async::new(listener)?;
let mut incoming = pin!(listener.incoming().fuse());
println!("Listening on {:?}", listener.get_ref().local_addr()?);
while let Some(new_client) = incoming.next().await {
match new_client {
Ok(new_client) => {
tracing::info!("New client from {:?}", new_client.get_ref().peer_addr());
smol::spawn(client_task(our_cookie.clone(), new_client, pw_tx.clone())).detach();
}
Err(e) => tracing::error!("Error accepting new client: {:?}", e),
}
}
Ok(())
}
pub fn start_server(our_cookie: Arc<String>, pw_tx: crate::PipewireSender, selection_owner: u32) {
unsafe { libc::signal(libc::SIGPIPE, libc::SIG_IGN) };
smol::block_on(async {
match run(our_cookie, pw_tx, selection_owner).await {
Ok(_) => println!("Server exited."),
Err(e) => {
println!("Error: {:?}", e);
}
}
});
}
| yshui/x11screencast-portal | 5 | xdg-desktop-portal ScreenCast implementation for X11 | Rust | yshui | Yuxuan Shui | CodeWeavers |
portal/build.rs | Rust | fn main() -> anyhow::Result<()> {
pkg_config::probe_library("x11-xcb")?;
pkg_config::probe_library("egl")?;
println!("cargo:rerun-if-changed=build.rs");
Ok(())
}
| yshui/x11screencast-portal | 5 | xdg-desktop-portal ScreenCast implementation for X11 | Rust | yshui | Yuxuan Shui | CodeWeavers |
portal/src/main.rs | Rust | use std::{
os::unix::{ffi::OsStrExt, net::UnixStream},
pin::Pin,
sync::atomic::AtomicUsize,
task::ready,
};
use anyhow::Context;
use async_io::Async;
use futures_util::{
stream::FuturesOrdered, AsyncRead as _, AsyncWrite, Sink, SinkExt, Stream, StreamExt as _,
};
use itertools::izip;
use serde::{Deserialize, Serialize};
use smallvec::SmallVec;
use x11rb_async::{
connection::Connection,
protocol::{randr::ConnectionExt, xproto::ConnectionExt as _},
};
use zbus::zvariant;
use zvariant::{DeserializeDict, OwnedValue, SerializeDict, Type};
struct Picom {
conn: Async<UnixStream>,
buf: Vec<u8>,
len: Option<u32>,
pos: usize,
cookie: String,
out_buf: Vec<u8>,
out_pos: usize,
}
impl std::fmt::Debug for Picom {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Picom").finish()
}
}
impl Sink<protocol::ClientMessage> for Picom {
type Error = anyhow::Error;
fn poll_close(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> {
Pin::new(&mut self.conn).poll_close(cx).map_err(Into::into)
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> {
ready!(self.as_mut().poll_ready(cx)?);
Pin::new(&mut self.conn).poll_flush(cx).map_err(Into::into)
}
fn poll_ready(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> {
while self.out_pos < self.out_buf.len() {
let Self { conn, out_buf, out_pos, .. } = &mut *self;
let written = ready!(Pin::new(&mut *conn).poll_write(cx, &out_buf[*out_pos..]))?;
tracing::info!("written: {}", written);
self.out_pos += written;
}
self.out_buf.clear();
self.out_pos = 0;
std::task::Poll::Ready(Ok(()))
}
fn start_send(
mut self: Pin<&mut Self>,
item: protocol::ClientMessage,
) -> Result<(), Self::Error> {
let mut cursor = std::io::Cursor::new(&mut self.out_buf);
cursor.set_position(4);
serde_json::to_writer(&mut cursor, &item)?;
let len = cursor.position() as u32 - 4;
self.out_buf[..4].copy_from_slice(&len.to_be_bytes()[..]);
Ok(())
}
}
impl Stream for Picom {
type Item = anyhow::Result<protocol::ServerMessage>;
fn poll_next(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
loop {
if let Some(len) = self.len {
let Self { conn, buf, pos, .. } = &mut *self;
buf.resize(len as usize, 0);
let nbytes = ready!(Pin::new(&mut *conn).poll_read(cx, &mut buf[*pos..]))?;
self.pos += nbytes;
if self.pos < len as usize {
continue;
}
let ret = serde_json::from_slice(&self.buf)?;
self.len = None;
self.pos = 0;
self.buf.clear();
break std::task::Poll::Ready(Some(Ok(ret)));
} else {
self.buf.resize(4, 0);
let Self { conn, buf, pos, .. } = &mut *self;
let nbytes = ready!(Pin::new(&mut *conn).poll_read(cx, &mut buf[*pos..]))?;
self.pos += nbytes;
if self.pos < 4 {
continue;
}
self.len = Some(u32::from_be_bytes(self.buf[..4].try_into()?));
tracing::info!("incoming len: {:?}", self.len);
self.pos = 0;
self.buf.clear();
}
}
}
}
async fn get_atom(
x11: &x11rb_async::rust_connection::RustConnection,
name: &[u8],
) -> anyhow::Result<u32> {
Ok(x11.intern_atom(false, name).await?.reply().await?.atom)
}
impl Picom {
async fn new(
x11: &x11rb_async::rust_connection::RustConnection,
screen: usize,
) -> anyhow::Result<Self> {
let compositor_selection = format!("_NET_WM_CM_S{}", screen);
let futs: FuturesOrdered<_> = [
get_atom(x11, compositor_selection.as_bytes()),
get_atom(x11, b"EGL_SCREENCAST_COOKIE"),
get_atom(x11, b"EGL_SCREENCAST_SOCKET"),
get_atom(x11, b"UTF8_STRING"),
]
.into_iter()
.collect();
let [compositor_selection, egl_screencast_cookie_atom, egl_screencast_socket_atom, utf8_string_atom] =
<[_; 4]>::try_from(futs.collect::<Vec<_>>().await).unwrap();
let selection_owner =
x11.get_selection_owner(compositor_selection?).await?.reply().await?.owner;
tracing::info!("selection_owner: {selection_owner:#x}");
let utf8_string_atom = utf8_string_atom?;
let (cookie, path) = if selection_owner != x11rb::NONE {
let egl_screencast_cookie = x11
.get_property(
false,
selection_owner,
egl_screencast_cookie_atom?,
utf8_string_atom,
0,
128,
)
.await?
.reply()
.await?;
if egl_screencast_cookie.type_ == x11rb::NONE {
return Err(anyhow::anyhow!("No cookie found"));
}
let egl_screencast_socket = x11
.get_property(
false,
selection_owner,
egl_screencast_socket_atom?,
utf8_string_atom,
0,
1024,
)
.await?
.reply()
.await?;
if egl_screencast_socket.type_ == x11rb::NONE {
return Err(anyhow::anyhow!("No socket found"));
}
(egl_screencast_cookie.value, egl_screencast_socket.value)
} else {
return Err(anyhow::anyhow!("No compatible compositor found"));
};
let path = std::ffi::OsStr::from_bytes(&path);
println!("path: {:?} {}", path, std::str::from_utf8(&cookie)?);
let socket = std::path::Path::new(path);
let conn = Async::new(UnixStream::connect(socket)?)?;
Ok(Self {
conn,
cookie: String::from_utf8(cookie).context("Invalid cookie")?,
buf: Vec::new(),
len: None,
pos: 0,
out_buf: Vec::new(),
out_pos: 0,
})
}
}
#[derive(Debug)]
struct Session {
path: zbus::zvariant::ObjectPath<'static>,
source_type: SourceType,
allow_multiple: bool,
cursor_mode: CursorMode,
persist: bool,
node_ids: SmallVec<[u32; 6]>,
restore_data: Option<RestoreDataInner>,
}
#[zbus::interface(name = "org.freedesktop.impl.portal.Session")]
impl Session {
#[zbus(property, name = "version")]
fn version(&self) -> u32 { 1 }
#[zbus(signal)]
async fn closed(signal_ctx: zbus::SignalContext<'_>) -> zbus::Result<()>;
async fn close(
&self,
#[zbus(object_server)] server: &zbus::ObjectServer,
#[zbus(signal_context)] signal_ctx: zbus::SignalContext<'_>,
) -> zbus::fdo::Result<()> {
tracing::debug!("Session::Close called");
let (x11, screen, fut) = x11rb_async::rust_connection::RustConnection::connect(None)
.await
.fdo_context("Failed to connect to X11")?;
let _task = smol::spawn(fut);
let mut picom = Picom::new(&x11, screen).await.fdo_context("Failed to connect to picom")?;
picom
.send(protocol::ClientMessage::CloseStreams { node_ids: self.node_ids.clone() })
.await
.fdo_context("Failed to close streams")?;
picom.flush().await.fdo_context("Failed to flush")?;
Self::closed(signal_ctx).await?;
server.remove::<Self, _>(&self.path).await.unwrap();
Ok(())
}
}
#[derive(Debug)]
struct ScreenCast {
sessions: std::collections::HashMap<String, Session>,
/// The monitor to pick when the requester doesn't support allow_multiple.
default_monitor: AtomicUsize,
}
struct ArrayExtend<'a>(zbus::zvariant::Array<'a>);
impl<'a, T: Into<zvariant::Value<'a>>> Extend<T> for ArrayExtend<'a> {
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
for item in iter {
self.0.append(item.into()).unwrap();
}
}
}
impl<'a, T: zvariant::Type + Into<zvariant::Value<'a>>> FromIterator<T> for ArrayExtend<'a> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let mut array = Self(zvariant::Array::new(T::signature()));
array.extend(iter);
array
}
}
impl<'a> From<ArrayExtend<'a>> for zvariant::Value<'a> {
fn from(value: ArrayExtend<'a>) -> Self { zvariant::Value::Array(value.0) }
}
impl<'a> TryFrom<ArrayExtend<'a>> for zvariant::OwnedValue {
type Error = zvariant::Error;
fn try_from(value: ArrayExtend<'a>) -> Result<Self, Self::Error> {
zvariant::Value::Array(value.0).try_into()
}
}
bitflags::bitflags! {
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
struct CursorMode: u32 {
const None = 0;
const Hidden = 1;
const Embedded = 2;
const Metadata = 4;
}
}
bitflags::bitflags! {
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
struct SourceType: u32 {
const Monitor = 1;
const Window = 2;
const Virtual = 4;
}
}
impl Type for SourceType {
fn signature() -> zvariant::Signature<'static> { u32::signature() }
}
impl Type for CursorMode {
fn signature() -> zvariant::Signature<'static> { u32::signature() }
}
trait FdoContext<T, E> {
fn fdo_context(self, msg: &str) -> zbus::fdo::Result<T>;
fn with_fdo_context(self, f: impl FnOnce(E) -> String) -> zbus::fdo::Result<T>;
}
impl<T, E: std::fmt::Debug> FdoContext<T, E> for Result<T, E> {
fn with_fdo_context(self, f: impl FnOnce(E) -> String) -> zbus::fdo::Result<T> {
self.map_err(|e| zbus::fdo::Error::Failed(f(e)))
}
fn fdo_context(self, msg: &str) -> zbus::fdo::Result<T> {
self.with_fdo_context(|e| format!("{msg}: {e:?}"))
}
}
#[derive(Serialize, Deserialize, Debug, OwnedValue)]
struct RestoreDataInner {
data: Vec<u8>,
}
#[derive(Serialize, Deserialize, Debug, Type)]
struct RestoreData {
vendor: String,
version: u32,
data: zvariant::OwnedValue,
}
#[derive(SerializeDict, Type)]
#[zvariant(signature = "a{sv}")]
struct StreamDict {
position: (i32, i32),
size: (i32, i32),
source_type: SourceType,
mapping_id: Option<String>,
}
#[derive(SerializeDict, Type)]
#[zvariant(signature = "a{sv}")]
struct StartResponse {
streams: Vec<(u32, StreamDict)>,
restore_data: Option<RestoreData>,
}
#[derive(DeserializeDict, Type, Debug)]
#[zvariant(signature = "a{sv}")]
struct SelectSourcesOptions {
multiple: Option<bool>,
types: Option<SourceType>,
cursor_mode: Option<CursorMode>,
persist_mode: Option<u32>,
restore_data: Option<RestoreData>,
}
#[zbus::interface(name = "org.freedesktop.impl.portal.ScreenCast")]
impl ScreenCast {
const AVAILABLE_SOURCE_TYPE: SourceType = SourceType::Monitor.union(SourceType::Virtual);
#[zbus(property, name = "version")]
fn version(&self) -> u32 { 5 }
#[zbus(property)]
fn available_cursor_modes(&self) -> u32 { (CursorMode::Embedded | CursorMode::Hidden).bits() }
#[zbus(property)]
fn available_source_types(&self) -> u32 { Self::AVAILABLE_SOURCE_TYPE.bits() }
#[zbus(out_args("response", "results"))]
async fn create_session(
&self,
#[zbus(object_server)] server: &zbus::ObjectServer,
_handle: zbus::zvariant::ObjectPath<'_>,
session_handle: zbus::zvariant::ObjectPath<'_>,
_app_id: &str,
_options: std::collections::HashMap<&str, zbus::zvariant::Value<'_>>,
) -> zbus::fdo::Result<(u32, std::collections::HashMap<String, zbus::zvariant::OwnedValue>)>
{
server
.at(&session_handle, Session {
path: session_handle.to_owned(),
allow_multiple: false,
source_type: SourceType::Monitor,
cursor_mode: CursorMode::Hidden,
persist: false,
restore_data: None,
node_ids: Default::default(),
})
.await?;
tracing::info!("create_session: {session_handle}");
Ok((0, Default::default()))
}
#[zbus(out_args("response", "results"))]
async fn select_sources(
&self,
#[zbus(object_server)] server: &zbus::ObjectServer,
_handle: zbus::zvariant::ObjectPath<'_>,
session_handle: zbus::zvariant::ObjectPath<'_>,
_app_id: &str,
options: SelectSourcesOptions,
) -> zbus::fdo::Result<(u32, std::collections::HashMap<String, zbus::zvariant::OwnedValue>)>
{
tracing::info!("select_sources: {options:?}");
let session = server.interface::<_, Session>(session_handle).await?;
let mut session = session.get_mut().await;
if let Some(source_type) = options.types {
session.source_type = source_type & Self::AVAILABLE_SOURCE_TYPE;
if session.source_type.is_empty() {
return Err(zbus::fdo::Error::InvalidArgs("Invalid source type".to_string()));
}
}
if let Some(allow_multiple) = options.multiple {
session.allow_multiple = allow_multiple;
}
if let Some(cursor_mode) = options.cursor_mode {
session.cursor_mode = cursor_mode;
}
if let Some(persist_mode) = options.persist_mode {
session.persist = persist_mode != 0;
}
let restore_data: Option<RestoreDataInner> = options.restore_data.and_then(|d| {
if d.vendor == "picom" && d.version == 0 {
d.data.try_into().ok()
} else {
None
}
});
session.restore_data = restore_data;
// TODO(yshui): handle `cursor_mode`
Ok((0, Default::default()))
}
#[zbus(out_args("response", "results"))]
async fn start(
&self,
#[zbus(object_server)] server: &zbus::ObjectServer,
_handle: zbus::zvariant::ObjectPath<'_>,
session_handle: zbus::zvariant::ObjectPath<'_>,
_app_id: &str,
_parent_window: &str,
_options: std::collections::HashMap<&str, zbus::zvariant::Value<'_>>,
) -> zbus::fdo::Result<(u32, StartResponse)> {
let session = server.interface::<_, Session>(session_handle).await?;
let (x11, screen, fut) = x11rb_async::rust_connection::RustConnection::connect(None)
.await
.fdo_context("Failed to connect to X11")?;
let _task = smol::spawn(fut);
let mut picom =
Picom::new(&x11, screen).await.map_err(|e| zbus::fdo::Error::Failed(e.to_string()))?;
let root = x11.setup().roots[screen].root;
let cookie = picom.cookie.clone();
let mut session = session.get_mut().await;
let source_type = if !session.allow_multiple {
session.source_type.iter().next().unwrap_or(SourceType::Monitor)
} else {
session.source_type
};
let mut rectangles = SmallVec::<[_; 6]>::new();
let mut types = SmallVec::<[_; 6]>::new();
let mut output_monitor_names = SmallVec::<[_; 8]>::new();
if source_type.contains(SourceType::Monitor) {
let monitors = x11
.randr_get_monitors(root, true)
.await
.fdo_context("Failed to get monitors")?
.reply()
.await
.fdo_context("Failed to get monitors")?;
let monitor_count = monitors.monitors.len();
let monitor_names: FuturesOrdered<_> = monitors
.monitors
.iter()
.map(|m| {
async {
Ok::<_, anyhow::Error>(x11.get_atom_name(m.name).await?.reply().await?)
}
})
.collect();
let monitor_names: SmallVec<[_; 8]> = monitor_names.collect().await;
let monitor_names: SmallVec<[_; 8]> = monitor_names
.into_iter()
.collect::<Result<_, _>>()
.fdo_context("get monitor names")?;
let mut m = monitors.monitors.iter().map(|m| {
protocol::Rectangle {
x: m.x as i32,
y: m.y as i32,
width: m.width as u32,
height: m.height as u32,
}
});
if session.allow_multiple {
output_monitor_names = monitor_names;
rectangles.extend(m)
} else {
let mut monitor_index = None;
if let Some(restore_data) = &session.restore_data {
monitor_index = monitor_names.iter().position(|n| n.name == restore_data.data);
tracing::info!(
"Monitor {} is {:?}",
String::from_utf8_lossy(&restore_data.data),
monitor_index
);
}
let monitor_index = monitor_index.unwrap_or_else(|| {
let old_default_monitor =
self.default_monitor.load(std::sync::atomic::Ordering::Relaxed);
let mut default_monitor = old_default_monitor;
if default_monitor >= monitor_count {
default_monitor = 0;
}
tracing::info!("default_monitor: {}", default_monitor);
let _ = self.default_monitor.compare_exchange(
old_default_monitor,
default_monitor + 1,
std::sync::atomic::Ordering::Relaxed,
std::sync::atomic::Ordering::Relaxed,
);
default_monitor
});
output_monitor_names.push(monitor_names[monitor_index].clone());
rectangles.extend(m.nth(monitor_index));
};
types.extend(rectangles.iter().map(|_| SourceType::Monitor));
}
if source_type.contains(SourceType::Virtual)
&& (session.allow_multiple || rectangles.is_empty())
{
let root = x11.setup().roots[screen].root;
let geom = x11
.get_geometry(root)
.await
.fdo_context("get root geometry send")?
.reply()
.await
.fdo_context("get root geometry reply")?;
rectangles.push(protocol::Rectangle {
x: 0,
y: 0,
width: geom.width as _,
height: geom.height as _,
});
types.push(SourceType::Virtual);
}
picom
.send(protocol::ClientMessage::CreateStream {
cookie,
rectangles: rectangles.clone(),
embed_cursor: session.cursor_mode.contains(CursorMode::Embedded),
})
.await
.map_err(|e| zbus::fdo::Error::Failed(e.to_string()))?;
let node_ids = match picom
.next()
.await
.unwrap()
.map_err(|e| zbus::fdo::Error::Failed(e.to_string()))?
{
protocol::ServerMessage::StreamCreated { node_ids } => node_ids,
protocol::ServerMessage::StreamCreationError { error } => {
return Err(zbus::fdo::Error::Failed(error))
}
};
session.node_ids.extend(node_ids.iter().copied());
let streams = izip!(node_ids, types, rectangles, &output_monitor_names)
.map(|(node_id, type_, rectangle, name)| {
(node_id, StreamDict {
position: (rectangle.x, rectangle.y),
size: (rectangle.width as i32, rectangle.height as i32),
source_type: type_,
mapping_id: Some(String::from_utf8_lossy(&name.name).into_owned()),
})
})
.collect();
let monitor_name = if output_monitor_names.len() == 1 {
Some(output_monitor_names[0].clone())
} else {
None
};
Ok((0, StartResponse {
streams,
restore_data: monitor_name.map(|name| {
RestoreData {
vendor: "picom".to_string(),
version: 0,
data: RestoreDataInner { data: name.name }.try_into().unwrap(),
}
}),
}))
}
}
fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt::init();
let screen_cast =
ScreenCast { sessions: Default::default(), default_monitor: AtomicUsize::new(0) };
let zbus = zbus::connection::Builder::session()?
.name("org.freedesktop.impl.portal.desktop.picom")?
.serve_at("/org/freedesktop/portal/desktop", screen_cast)?;
let (_tx, zbus_cancel) = oneshot::channel::<()>();
smol::block_on(async move {
let _conn = zbus.build().await.unwrap();
zbus_cancel.await.unwrap();
});
Ok(())
}
| yshui/x11screencast-portal | 5 | xdg-desktop-portal ScreenCast implementation for X11 | Rust | yshui | Yuxuan Shui | CodeWeavers |
protocol/src/lib.rs | Rust | /// Client-server protocol uses length delimited JSON.
use serde::{Deserialize, Serialize};
use smallvec::SmallVec;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub struct Rectangle {
pub x: i32,
pub y: i32,
pub width: u32,
pub height: u32,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum ClientMessage {
// The cookie is here to prevent "privilege escalation". All clients that are connected
// to the same X server already has the ability to capture the screen. We just try not to
// expand that privilege to other clients. So we set a cookie on one of our windows and
// only clients that know the cookie can create streams, this way we can be sure that the
// client is connected to the same X server as us.
/// Create a new stream.
CreateStream {
cookie: String,
rectangles: SmallVec<[Rectangle; 6]>,
embed_cursor: bool,
},
CloseStreams { node_ids: SmallVec<[u32; 6]> },
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum ServerMessage {
StreamCreated { node_ids: SmallVec<[u32; 6]> },
StreamCreationError { error: String },
}
| yshui/x11screencast-portal | 5 | xdg-desktop-portal ScreenCast implementation for X11 | Rust | yshui | Yuxuan Shui | CodeWeavers |
experiment/src/main.rs | Rust | #![deny(rust_2018_idioms)]
use smallvec::smallvec;
use winit::{event::WindowEvent, event_loop::EventLoop};
use std::{collections::HashSet, sync::Arc};
use anyhow::{Context, Result, anyhow};
use ::xr_passthrough_layer::{CAMERA_SIZE, camera, find_index_camera, pipeline, steam};
use glam::UVec2;
use v4l::video::Capture;
use vulkano::{
command_buffer::{
AutoCommandBufferBuilder, BlitImageInfo, ClearColorImageInfo, CommandBufferUsage,
ImageBlit,
allocator::{
CommandBufferAllocator, StandardCommandBufferAllocator,
StandardCommandBufferAllocatorCreateInfo,
},
},
descriptor_set::allocator::{
StandardDescriptorSetAllocator, StandardDescriptorSetAllocatorCreateInfo,
},
device::{Device, Queue},
format,
image::{Image, ImageLayout, ImageUsage},
memory::allocator::StandardMemoryAllocator,
swapchain::{SurfaceInfo, Swapchain, SwapchainCreateInfo, SwapchainPresentInfo},
sync::GpuFuture,
};
static APP_NAME: &str = "Camera\0";
static APP_VERSION: u32 = 0;
static SPLASH_IMAGE: &[u8] = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/../splash.png"));
#[derive(Clone)]
struct Window {
swapchain: Arc<Swapchain>,
images: Vec<Arc<Image>>,
inner: Arc<winit::window::Window>,
}
struct App {
pp: pipeline::Pipeline,
device: Arc<Device>,
cmdbuf_allocator: Arc<dyn CommandBufferAllocator>,
queue: Arc<Queue>,
camera: camera::CameraThread,
window: Option<Window>,
instance: Arc<vulkano::instance::Instance>,
previous_frame_end: Option<Box<dyn GpuFuture>>,
}
const PREFERRED_FORMATS: &[format::Format] = &[
format::Format::B8G8R8_UNORM,
format::Format::R8G8B8_UNORM,
format::Format::B8G8R8A8_UNORM,
format::Format::R8G8B8A8_UNORM,
];
impl App {
fn setup_window(&mut self, window: Arc<winit::window::Window>) -> Result<()> {
log::info!("setting up window");
let surface = vulkano::swapchain::Surface::from_window(&self.instance, &window)?;
let surface_capabilities = self
.device
.physical_device()
.surface_capabilities(&surface, &SurfaceInfo::default())?;
let swapchain_formats = self
.device
.physical_device()
.surface_formats(&surface, &SurfaceInfo::default())?
.into_iter()
.map(|(f, _)| f)
.collect::<HashSet<_>>();
log::info!("{swapchain_formats:?}");
let swapchain_format = PREFERRED_FORMATS
.iter()
.find(|f| swapchain_formats.contains(f))
.context("cannot find a suitable format for swapchain images")?;
let (swapchain, images) = vulkano::swapchain::Swapchain::new(
&self.device,
&surface,
&vulkano::swapchain::SwapchainCreateInfo {
min_image_count: surface_capabilities.min_image_count.max(2),
image_format: *swapchain_format,
image_extent: window.inner_size().into(),
image_usage: vulkano::image::ImageUsage::TRANSFER_DST,
composite_alpha: vulkano::swapchain::CompositeAlpha::Opaque,
present_mode: vulkano::swapchain::PresentMode::Fifo,
..Default::default()
},
)?;
self.window = Some(Window {
swapchain,
images,
inner: window,
});
//self.previous_frame_end = Some(vulkano::sync::now(self.device.clone()).boxed());
Ok(())
}
fn recreate_swapchain(&mut self) -> Result<Window> {
log::info!("recreating swapchain");
let Some(window) = &mut self.window else {
panic!("recreate non-existent swapchain")
};
(window.swapchain, window.images) = window.swapchain.recreate(&SwapchainCreateInfo {
image_extent: window.inner.inner_size().into(),
..window.swapchain.create_info()
})?;
Ok(window.clone())
}
fn redraw(&mut self) -> Result<()> {
let Some(mut window) = self.window.clone() else {
return Ok(());
};
let (image_index, future) = loop {
match vulkano::swapchain::acquire_next_image(
window.swapchain.clone(),
Some(std::time::Duration::from_secs(1)),
) {
Ok((image_index, false, future)) => break (image_index, future),
Ok((_, true, future)) => {
window = self.recreate_swapchain()?;
self.previous_frame_end = Some(future.boxed());
continue;
}
Err(vulkano::Validated::Error(vulkano::VulkanError::OutOfDate)) => {
window = self.recreate_swapchain()?;
continue;
}
Err(e) => return Err(e.into()),
};
};
log::trace!("acquired {image_index}");
let mut cmdbuf = AutoCommandBufferBuilder::primary(
self.cmdbuf_allocator.clone(),
self.queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)?;
self.pp.maybe_postprocess(&self.camera.frame())?;
let (src_image, pp_fut) = self.pp.image();
// have submitted to cmdbuf, otherwise this
// image could be reused by the camera thread.
let dst_image = window.images[image_index as usize].clone();
let [w, h, _] = src_image.extent();
let [dw, dh, _] = dst_image.extent();
let aspect_ratio = w as f64 / h as f64;
let (mut target_w, mut target_h) = (dh as f64 * aspect_ratio, dh as f64);
if target_w > dw as _ {
target_w = dw as _;
target_h = target_w / aspect_ratio;
}
let crop_x = (dw as f64 - target_w).max(0.) / 2.0;
let crop_y = (dh as f64 - target_h).max(0.) / 2.0;
cmdbuf
.clear_color_image(ClearColorImageInfo::new(dst_image.clone()))?
.blit_image(BlitImageInfo {
src_image_layout: ImageLayout::TransferSrcOptimal,
dst_image_layout: ImageLayout::TransferDstOptimal,
filter: vulkano::image::sampler::Filter::Linear,
regions: smallvec![ImageBlit {
src_offsets: [[0, 0, 0], src_image.extent(),],
src_subresource: src_image.subresource_layers(),
dst_offsets: [
[crop_x as u32, crop_y as u32, 0],
[(crop_x + target_w) as u32, (crop_y + target_h) as u32, 1]
],
dst_subresource: dst_image.subresource_layers(),
..Default::default()
}],
..BlitImageInfo::new(src_image.clone(), dst_image.clone())
})?;
let mut previous_future = self.previous_frame_end.take().unwrap();
previous_future.cleanup_finished();
let future = future
.join(previous_future)
.join(pp_fut)
.then_execute(self.queue.clone(), cmdbuf.build()?)?;
window.inner.pre_present_notify();
log::trace!("presenting");
self.previous_frame_end = Some(
future
.then_swapchain_present(
self.queue.clone(),
SwapchainPresentInfo::new(window.swapchain.clone(), image_index),
)
.then_signal_fence_and_flush()?
.boxed(),
);
Ok(())
}
}
impl winit::application::ApplicationHandler for App {
fn about_to_wait(&mut self, _event_loop: &winit::event_loop::ActiveEventLoop) {
if let Some(window) = &self.window {
window.inner.request_redraw();
}
}
fn resumed(&mut self, event_loop: &winit::event_loop::ActiveEventLoop) {
log::info!("resumed");
let window = match event_loop.create_window(winit::window::Window::default_attributes()) {
Ok(window) => Arc::new(window),
Err(e) => {
log::warn!("Failed to create window {e:#}");
event_loop.exit();
return;
}
};
let Err(e) = self.setup_window(window.clone()) else {
self.camera.resume().unwrap();
window.request_redraw();
return;
};
log::warn!("Failed to setup rendering surface {e:#}");
event_loop.exit();
}
fn window_event(
&mut self,
event_loop: &winit::event_loop::ActiveEventLoop,
_window_id: winit::window::WindowId,
event: WindowEvent,
) {
match event {
WindowEvent::CloseRequested => event_loop.exit(),
WindowEvent::RedrawRequested => {
let Err(e) = self.redraw() else { return };
log::warn!("Failed to redraw {e:#}");
event_loop.exit();
}
_ => (),
}
}
fn user_event(&mut self, event_loop: &winit::event_loop::ActiveEventLoop, (): ()) {
event_loop.exit();
}
}
fn main() -> Result<()> {
let env = env_logger::Env::default().default_filter_or("info");
env_logger::Builder::from_env(env)
.format_timestamp_millis()
.init();
let event_loop = EventLoop::new()?;
let required_extensions = vulkano::swapchain::Surface::required_extensions(&event_loop)?;
let (xr, _frame_waiter, _frame_stream) = xr::OpenXr::new(
required_extensions,
&Default::default(),
&[],
APP_NAME,
APP_VERSION,
)?;
let instance = xr.vk_instance();
let (device, queue) = xr.vk_device();
let allocator = Arc::new(StandardMemoryAllocator::new(&device, &Default::default())) as _;
let cmdbuf_allocator = Arc::new(StandardCommandBufferAllocator::new(
&device,
&StandardCommandBufferAllocatorCreateInfo::default(),
)) as _;
let descriptor_set_allocator = Arc::new(StandardDescriptorSetAllocator::new(
&device,
&StandardDescriptorSetAllocatorCreateInfo::default(),
)) as _;
let camera =
v4l::Device::with_path(find_index_camera()?).context("cannot open camera device")?;
if !camera
.query_caps()?
.capabilities
.contains(v4l::capability::Flags::VIDEO_CAPTURE)
{
return Err(anyhow!("Cannot capture from index camera"));
}
let format = camera.set_format(&v4l::Format::new(
CAMERA_SIZE * 2,
CAMERA_SIZE,
v4l::FourCC::new(b"YUYV"),
))?;
let pipeline_cache =
xr_passthrough_layer::config::load_pipeline_cache(&device, &xdg::BaseDirectories::new()?)?;
let camera_config = steam::find_steam_config();
log::info!("{}", format);
let pp = pipeline::Pipeline::new(
&device,
&allocator,
&cmdbuf_allocator,
&queue,
&descriptor_set_allocator,
true,
camera_config,
ImageLayout::TransferSrcOptimal,
ImageUsage::TRANSFER_SRC,
pipeline_cache,
UVec2::new(CAMERA_SIZE, CAMERA_SIZE),
UVec2::new(CAMERA_SIZE, CAMERA_SIZE),
)?;
log::info!("pipeline: {pp:?}");
camera.set_params(&v4l::video::capture::Parameters::with_fps(54))?;
let camera = camera::CameraThread::new(camera, SPLASH_IMAGE);
let proxy = event_loop.create_proxy();
ctrlc::set_handler({
move || {
let _ = proxy.send_event(());
}
})
.expect("Error setting Ctrl-C handler");
// Event loop
let mut app = App {
device: device.clone(),
window: None,
camera,
cmdbuf_allocator,
queue,
pp,
instance: instance.clone(),
previous_frame_end: Some(vulkano::sync::now(device).boxed()),
};
log::info!("event loop start");
event_loop.run_app(&mut app)?;
log::info!("event loop exited");
app.camera.exit()?;
Ok(())
}
| yshui/xr_passthrough_layer | 6 | Rust | yshui | Yuxuan Shui | CodeWeavers | |
hello/src/main.rs | Rust | use glam::{Mat4, Quat, Vec3, Vec4};
use smallvec::smallvec;
use std::{collections::HashSet, sync::Arc, thread::JoinHandle};
use winit::{
event::WindowEvent,
event_loop::{EventLoop, EventLoopProxy},
};
use anyhow::{Context as _, Result};
use openxr::{
CompositionLayerFlags, EnvironmentBlendMode, Extent2Di, FrameState, Offset2Di, Rect2Di,
SwapchainSubImage, ViewConfigurationType, ViewStateFlags, sys::Handle,
};
use vulkano::{
buffer::{Buffer, BufferCreateInfo, BufferUsage, Subbuffer},
command_buffer::{
AutoCommandBufferBuilder, CommandBufferUsage, PrimaryAutoCommandBuffer,
PrimaryCommandBufferAbstract, RenderPassBeginInfo, SubpassBeginInfo, SubpassContents,
SubpassEndInfo,
allocator::{CommandBufferAllocator, StandardCommandBufferAllocator},
},
descriptor_set::{
DescriptorSet, WriteDescriptorSet,
allocator::{DescriptorSetAllocator, StandardDescriptorSetAllocator},
},
device::{Device, Queue},
format::ClearValue,
image::{
Image, ImageCreateInfo, ImageLayout, ImageUsage, SampleCount,
view::{ImageView, ImageViewCreateInfo},
},
instance::Instance,
memory::allocator::{
AllocationCreateInfo, MemoryAllocatePreference, MemoryAllocator, MemoryTypeFilter,
StandardMemoryAllocator,
},
pipeline::{
self, GraphicsPipeline, Pipeline, PipelineLayout, PipelineShaderStageCreateInfo,
graphics::{
GraphicsPipelineCreateInfo,
color_blend::ColorBlendState,
depth_stencil::{DepthState, DepthStencilState},
input_assembly::{InputAssemblyState, PrimitiveTopology},
multisample::MultisampleState,
rasterization::RasterizationState,
vertex_input::{Vertex as _, VertexDefinition},
viewport::{Viewport, ViewportState},
},
layout::PipelineDescriptorSetLayoutCreateInfo,
},
render_pass::{
AttachmentDescription, AttachmentLoadOp, AttachmentReference, AttachmentStoreOp,
Framebuffer, FramebufferCreateInfo, RenderPass, RenderPassCreateInfo, Subpass,
SubpassDescription,
},
shader::ShaderModule,
swapchain::{SurfaceInfo, Swapchain, SwapchainCreateInfo, SwapchainPresentInfo},
sync::{GpuFuture, future::FenceSignalFuture},
};
use xr::XrContext as _;
#[derive(Clone)]
struct Window {
swapchain: Arc<Swapchain>,
inner: Arc<winit::window::Window>,
}
enum SessionState {
Running(JoinHandle<()>),
StopWaitting,
StartWaitting(EventLoopProxy<AppMessage>),
Idle(openxr::FrameWaiter),
Invalid,
}
impl SessionState {
fn start(&mut self, proxy: EventLoopProxy<AppMessage>) {
let this = std::mem::replace(self, SessionState::Invalid);
match this {
SessionState::Running(_) | SessionState::StartWaitting(_) => {
panic!("Session already running");
}
SessionState::StopWaitting => {
*self = SessionState::StartWaitting(proxy);
}
SessionState::Idle(mut waiter) => {
let handle = std::thread::spawn(move || {
log::info!("Frame waiter started");
loop {
log::debug!("Waiting for frame");
match waiter.wait() {
Ok(state) => {
log::debug!("Waiting for frame end");
proxy.send_event(AppMessage::Frame(state)).unwrap();
}
Err(openxr::sys::Result::ERROR_SESSION_NOT_RUNNING) => {
log::info!("session stopped, stop frame waiter");
break;
}
Err(e) => {
log::warn!("Frame waiter error: {e:#}");
break;
}
}
}
proxy.send_event(AppMessage::WaiterExited(waiter)).unwrap();
});
*self = SessionState::Running(handle);
}
SessionState::Invalid => unreachable!(),
}
}
fn stop(&mut self, session: &openxr::Session<openxr::Vulkan>) -> bool {
let this = std::mem::replace(self, SessionState::Invalid);
match this {
SessionState::Running(handle) => {
session.end().unwrap();
handle.join().unwrap();
*self = SessionState::StopWaitting;
true
}
SessionState::StartWaitting(_) => {
*self = SessionState::StopWaitting;
true
}
SessionState::Idle(_) | SessionState::StopWaitting => false,
SessionState::Invalid => unreachable!(),
}
}
fn ensure_stop(&mut self, session: &openxr::Session<openxr::Vulkan>) {
if self.stop(session) {
log::info!("Session stopped");
} else {
panic!("Session already stopped");
}
}
fn is_stopped(&self) -> bool {
match self {
SessionState::Idle(_) | SessionState::StopWaitting => true,
SessionState::Running(_) | SessionState::StartWaitting(_) => false,
SessionState::Invalid => unreachable!(),
}
}
fn put_frame_waiter(&mut self, waiter: openxr::FrameWaiter) {
let this = std::mem::replace(self, SessionState::Invalid);
match this {
SessionState::Running(_) => {
unreachable!("Got frame wait while running");
}
SessionState::Idle(_) => {
unreachable!("Got frame wait while idle");
}
SessionState::StopWaitting => {
*self = SessionState::Idle(waiter);
}
SessionState::StartWaitting(proxy) => {
*self = SessionState::Idle(waiter);
self.start(proxy);
}
SessionState::Invalid => unreachable!(),
}
}
fn wait_time(&self) -> Option<std::time::Duration> {
match self {
SessionState::Running(_) => None,
SessionState::StopWaitting => None,
SessionState::StartWaitting(_) => None,
SessionState::Idle(_) => Some(std::time::Duration::from_millis(100)),
SessionState::Invalid => unreachable!(),
}
}
}
type FenceFut = Arc<FenceSignalFuture<Box<dyn GpuFuture + Send + Sync>>>;
struct App {
exiting: bool,
state: SessionState,
proxy: EventLoopProxy<AppMessage>,
allocator: Arc<dyn MemoryAllocator>,
descriptor_set_allocator: Arc<dyn DescriptorSetAllocator>,
cmdbuf_allocator: Arc<dyn CommandBufferAllocator>,
vertices: Subbuffer<[Vertex]>,
indices: Subbuffer<[u32]>,
cmdbufs: Vec<Arc<PrimaryAutoCommandBuffer>>,
xr_cmdbufs: Vec<Arc<PrimaryAutoCommandBuffer>>,
device: Arc<Device>,
queue: Arc<Queue>,
instance: Arc<Instance>,
window: Option<Window>,
uniform_buffer_gpu_use_end: Vec<Option<FenceFut>>,
previous_frame_end: Option<Box<dyn GpuFuture + Send + Sync>>,
vs: Arc<ShaderModule>,
fs: Arc<ShaderModule>,
renderdoc: Option<renderdoc::RenderDoc<renderdoc::V141>>,
latest_mvps: [Mat4; 2],
uniform_buffers: Vec<Subbuffer<vs::MVP>>,
xr_uniform_buffer: Subbuffer<vs::MVP>,
frame_stream: openxr::FrameStream<openxr::Vulkan>,
passthrough: Option<openxr::sys::PassthroughHTC>,
render_start: std::time::Instant,
xr: xr::OpenXr,
}
#[derive(
vulkano::pipeline::graphics::vertex_input::Vertex,
bytemuck::Pod,
Clone,
Copy,
bytemuck::Zeroable,
)]
#[repr(C)]
struct Vertex {
#[format(R32G32B32_SFLOAT)]
in_position: [f32; 3],
#[format(R32G32B32A32_SFLOAT)]
in_color: [f32; 4],
}
const INDICES: [u32; 36] = [
0, 1, 3, 1, 3, 5, // back
3, 5, 6, 5, 6, 7, // top
6, 7, 2, 7, 2, 4, // front
2, 4, 1, 2, 1, 0, // bottom
1, 4, 5, 4, 5, 7, // right
0, 2, 3, 2, 3, 6, // left
];
const VERTICES: [Vertex; 8] = [
// 0
Vertex {
in_position: [-0.1, -0.1, -0.1],
in_color: [1., 0.0, 0.0, 1.],
},
// 1
Vertex {
in_position: [0.1, -0.1, -0.1],
in_color: [0.0, 1., 0.0, 1.],
},
// 2
Vertex {
in_position: [-0.1, 0.1, -0.1],
in_color: [0.0, 0.0, 1., 1.],
},
// 3
Vertex {
in_position: [-0.1, -0.1, 0.1],
in_color: [1., 0.0, 0.0, 1.],
},
// 4
Vertex {
in_position: [0.1, 0.1, -0.1],
in_color: [1., 1., 1., 1.],
},
// 5
Vertex {
in_position: [0.1, -0.1, 0.1],
in_color: [0.0, 1., 0.0, 1.],
},
// 6
Vertex {
in_position: [-0.1, 0.1, 0.1],
in_color: [0.0, 0.0, 1., 1.],
},
// 7
Vertex {
in_position: [0.1, 0.1, 0.1],
in_color: [1., 1., 1., 1.],
},
];
impl winit::application::ApplicationHandler<AppMessage> for App {
fn about_to_wait(&mut self, event_loop: &winit::event_loop::ActiveEventLoop) {
self.process_xr_events().unwrap();
if let Some(dur) = self.state.wait_time() {
event_loop.set_control_flow(winit::event_loop::ControlFlow::wait_duration(dur));
} else {
event_loop.set_control_flow(winit::event_loop::ControlFlow::Wait);
}
log::debug!("about to wait");
if let Some(window) = &self.window {
window.inner.request_redraw();
}
}
fn exiting(&mut self, _event_loop: &winit::event_loop::ActiveEventLoop) {
self.state.stop(self.xr.xr_session());
}
fn resumed(&mut self, event_loop: &winit::event_loop::ActiveEventLoop) {
log::info!("resumed");
let window = match event_loop.create_window(winit::window::Window::default_attributes()) {
Ok(window) => Arc::new(window),
Err(e) => {
log::warn!("Failed to create window {e:#}");
event_loop.exit();
return;
}
};
let Err(e) = self.setup_window(window.clone()) else {
window.request_redraw();
return;
};
log::warn!("Failed to setup rendering surface {e:#}");
event_loop.exit();
}
fn window_event(
&mut self,
event_loop: &winit::event_loop::ActiveEventLoop,
_window_id: winit::window::WindowId,
event: WindowEvent,
) {
match event {
WindowEvent::CloseRequested => {
self.xr.xr_session().request_exit().unwrap();
}
WindowEvent::RedrawRequested => {
let Err(e) = self.redraw() else { return };
log::warn!("Failed to redraw {e:#}");
event_loop.exit();
}
_ => (),
}
}
fn user_event(&mut self, event_loop: &winit::event_loop::ActiveEventLoop, msg: AppMessage) {
match msg {
AppMessage::Frame(state) if !state.should_render => {
if !self.state.is_stopped() {
self.skip_frame(state).unwrap();
}
}
AppMessage::Frame(state) => {
if !self.state.is_stopped() {
self.render_and_submit(state).unwrap();
}
}
AppMessage::WaiterExited(waiter) => {
log::info!("Frame waiter exited");
self.state.put_frame_waiter(waiter);
if self.exiting {
event_loop.exit();
}
}
}
}
}
impl App {
fn skip_frame(&mut self, state: FrameState) -> Result<(), openxr::sys::Result> {
self.queue.with(|_| {
self.frame_stream.begin()?;
self.frame_stream.end(
state.predicted_display_time,
EnvironmentBlendMode::OPAQUE,
&[],
)
})
}
fn process_xr_events(&mut self) -> Result<()> {
let mut buf = openxr::EventDataBuffer::new();
while let Some(event) = self.xr.xr_instance().poll_event(&mut buf)? {
match event {
openxr::Event::SessionStateChanged(state_change) => {
if state_change.state() == openxr::SessionState::READY {
log::info!("Session is ready");
self.xr
.xr_session()
.begin(ViewConfigurationType::PRIMARY_STEREO)?;
self.state.start(self.proxy.clone());
} else if state_change.state() == openxr::SessionState::EXITING {
log::info!("Session is exiting");
self.exiting = true;
break;
} else if state_change.state() == openxr::SessionState::STOPPING {
self.state.ensure_stop(self.xr.xr_session());
} else {
log::info!("Session state changed: {:?}", state_change.state());
}
}
_ => {
log::info!("Event: {:?}", std::mem::discriminant(&event));
}
}
}
Ok(())
}
fn setup_window(&mut self, window: Arc<winit::window::Window>) -> Result<()> {
const PREFERRED_FORMATS: &[vulkano::format::Format] = &[
vulkano::format::Format::B8G8R8_UNORM,
vulkano::format::Format::R8G8B8_UNORM,
vulkano::format::Format::B8G8R8A8_UNORM,
vulkano::format::Format::R8G8B8A8_UNORM,
];
log::info!("setting up window");
let surface = vulkano::swapchain::Surface::from_window(&self.instance, &window)?;
let surface_capabilities = self
.device
.physical_device()
.surface_capabilities(&surface, &SurfaceInfo::default())?;
let swapchain_formats = self
.device
.physical_device()
.surface_formats(&surface, &SurfaceInfo::default())?
.into_iter()
.map(|(f, _)| f)
.collect::<HashSet<_>>();
log::info!("{swapchain_formats:?}");
let swapchain_format = PREFERRED_FORMATS
.iter()
.find(|f| swapchain_formats.contains(f))
.context("cannot find a suitable format for swapchain images")?;
let (swapchain, images) = vulkano::swapchain::Swapchain::new(
&self.device,
&surface,
&vulkano::swapchain::SwapchainCreateInfo {
min_image_count: surface_capabilities.min_image_count.max(2),
image_format: *swapchain_format,
image_extent: window.inner_size().into(),
image_usage: ImageUsage::TRANSFER_DST | ImageUsage::COLOR_ATTACHMENT,
composite_alpha: vulkano::swapchain::CompositeAlpha::Opaque,
present_mode: vulkano::swapchain::PresentMode::Fifo,
..Default::default()
},
)?;
self.recreate_window_command_buffers(&images, &swapchain)?;
self.window = Some(Window {
swapchain,
inner: window,
});
Ok(())
}
fn recreate_swapchain(&mut self) -> Result<()> {
log::info!("recreating swapchain");
let Some(window) = &mut self.window else {
panic!("recreate non-existent swapchain")
};
let (swapchain, images) = window.swapchain.recreate(&SwapchainCreateInfo {
image_extent: window.inner.inner_size().into(),
..window.swapchain.create_info()
})?;
log::info!("swapchain recreated");
self.recreate_window_command_buffers(&images, &swapchain)?;
self.window.as_mut().unwrap().swapchain = swapchain;
Ok(())
}
fn redraw(&mut self) -> Result<()> {
if self.window.is_none() {
return Ok(());
}
if let Some(renderdoc) = &mut self.renderdoc {
log::debug!("Starting frame capture");
renderdoc.start_frame_capture(std::ptr::null(), std::ptr::null());
}
log::debug!("Debug rendering wait");
let (image_index, future) = loop {
match vulkano::swapchain::acquire_next_image(
self.window.as_ref().unwrap().swapchain.clone(),
Some(std::time::Duration::from_secs(0)),
) {
Ok((image_index, false, future)) => break (image_index, future),
Ok((_, true, future)) => {
self.recreate_swapchain()?;
self.previous_frame_end = Some(Box::new(future));
continue;
}
Err(vulkano::Validated::Error(vulkano::VulkanError::OutOfDate)) => {
self.recreate_swapchain()?;
continue;
}
Err(vulkano::Validated::Error(vulkano::VulkanError::Timeout))
| Err(vulkano::Validated::Error(vulkano::VulkanError::NotReady)) => {
return Ok(());
}
Err(e) => return Err(e.into()),
};
};
log::debug!("Debug rendering");
{
// Last render to the same image should have finished, since we got this image again.
if let Some(last_use) = self.uniform_buffer_gpu_use_end[image_index as usize].take() {
last_use.wait(None).unwrap();
}
let mut mvp = self.uniform_buffers[image_index as usize].write().unwrap();
mvp.mvps = self.latest_mvps.map(|m| m.to_cols_array_2d());
}
let mut previous_future = self.previous_frame_end.take().unwrap();
previous_future.cleanup_finished();
let future = (Box::new(future.join(previous_future).then_execute(
self.queue.clone(),
self.cmdbufs[image_index as usize].clone(),
)?) as Box<dyn GpuFuture + Send + Sync>)
.then_signal_fence();
let future = Arc::new(future);
self.uniform_buffer_gpu_use_end[image_index as usize] = Some(future.clone());
let window = self.window.as_ref().unwrap();
window.inner.pre_present_notify();
self.previous_frame_end = Some(Box::new(
future
.then_swapchain_present(
self.queue.clone(),
SwapchainPresentInfo::new(window.swapchain.clone(), image_index),
)
.then_signal_fence_and_flush()?,
));
if let Some(renderdoc) = &mut self.renderdoc {
log::debug!("End frame capture");
renderdoc.end_frame_capture(std::ptr::null(), std::ptr::null());
}
log::debug!("Debug rendering end");
Ok(())
}
fn recreate_window_command_buffers(
&mut self,
images: &[Arc<Image>],
swapchain: &vulkano::swapchain::Swapchain,
) -> Result<()> {
let vs_main = self.vs.entry_point("main").unwrap();
let stages = smallvec![
PipelineShaderStageCreateInfo::new(vs_main.clone()),
PipelineShaderStageCreateInfo::new(self.fs.entry_point("main").unwrap()),
];
let layout = PipelineLayout::new(
self.device.clone(),
PipelineDescriptorSetLayoutCreateInfo::from_stages(&stages)
.into_pipeline_layout_create_info(self.device.clone())?,
)?;
let extent = swapchain.image_extent();
let depth_image = Image::new(
&self.allocator,
&ImageCreateInfo {
format: vulkano::format::Format::D32_SFLOAT,
extent: [extent[0], extent[1], 1],
samples: SampleCount::Sample1,
array_layers: 1,
usage: ImageUsage::DEPTH_STENCIL_ATTACHMENT,
..Default::default()
},
&AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
allocate_preference: MemoryAllocatePreference::Unknown,
..Default::default()
},
)?;
let depth_image =
ImageView::new(&depth_image, &ImageViewCreateInfo::from_image(&depth_image))?;
assert!(self.uniform_buffers.len() == self.uniform_buffer_gpu_use_end.len());
if self.uniform_buffers.len() < images.len() {
for _ in self.uniform_buffers.len()..images.len() {
self.uniform_buffers.push(Buffer::new_sized::<vs::MVP>(
&self.allocator,
&BufferCreateInfo {
usage: BufferUsage::UNIFORM_BUFFER,
..Default::default()
},
&AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
allocate_preference: MemoryAllocatePreference::Unknown,
..Default::default()
},
)?);
self.uniform_buffer_gpu_use_end.push(None);
}
}
let render_pass = RenderPass::new(
self.device.clone(),
RenderPassCreateInfo {
attachments: [
AttachmentDescription {
format: swapchain.image_format(),
store_op: AttachmentStoreOp::Store,
load_op: AttachmentLoadOp::Clear,
initial_layout: ImageLayout::Undefined,
final_layout: ImageLayout::ColorAttachmentOptimal,
..Default::default()
},
AttachmentDescription {
format: vulkano::format::Format::D32_SFLOAT,
store_op: AttachmentStoreOp::Store,
load_op: AttachmentLoadOp::Clear,
initial_layout: ImageLayout::Undefined,
final_layout: ImageLayout::DepthStencilAttachmentOptimal,
..Default::default()
},
]
.into(),
subpasses: [SubpassDescription {
color_attachments: [Some(AttachmentReference {
attachment: 0,
layout: ImageLayout::ColorAttachmentOptimal,
..Default::default()
})]
.into(),
depth_stencil_attachment: Some(AttachmentReference {
attachment: 1,
layout: ImageLayout::DepthStencilAttachmentOptimal,
..Default::default()
}),
..Default::default()
}]
.into(),
..Default::default()
},
)?;
let pipeline = GraphicsPipeline::new(
self.device.clone(),
None,
GraphicsPipelineCreateInfo {
vertex_input_state: Some(Vertex::per_vertex().definition(&vs_main)?),
stages: stages.clone(),
input_assembly_state: Some(InputAssemblyState {
topology: PrimitiveTopology::TriangleList,
..Default::default()
}),
viewport_state: Some(ViewportState {
viewports: smallvec![Viewport {
offset: [0., 0.],
extent: [extent[0] as f32, extent[1] as f32],
..Default::default()
}],
..Default::default()
}),
depth_stencil_state: Some(DepthStencilState {
depth: Some(DepthState::simple()),
..Default::default()
}),
multisample_state: Some(MultisampleState {
..Default::default()
}),
color_blend_state: Some(ColorBlendState::with_attachment_states(
1,
Default::default(),
)),
rasterization_state: Some(RasterizationState::default()),
subpass: Some(Subpass::from(render_pass.clone(), 0).unwrap().into()),
..GraphicsPipelineCreateInfo::new(layout.clone())
},
)?;
log::info!("Creating cmdbufs. extent: {extent:?}");
self.cmdbufs = images
.iter()
.zip(self.uniform_buffers.iter().take(images.len()))
.map(|(i, mvp)| {
let descriptor_set = DescriptorSet::new(
self.descriptor_set_allocator.clone(),
pipeline.layout().set_layouts().first().unwrap().clone(),
[WriteDescriptorSet::buffer(0, mvp.clone())],
[],
)?;
let framebuffer = Framebuffer::new(
render_pass.clone(),
FramebufferCreateInfo {
attachments: vec![
ImageView::new(i, &ImageViewCreateInfo::from_image(i))?,
depth_image.clone(),
],
..Default::default()
},
)?;
let mut cmdbuf = AutoCommandBufferBuilder::primary(
self.cmdbuf_allocator.clone(),
self.queue.queue_family_index(),
CommandBufferUsage::MultipleSubmit,
)?;
cmdbuf
.begin_render_pass(
RenderPassBeginInfo {
clear_values: vec![
Some(ClearValue::Float([0.0, 0.0, 0.0, 0.0])),
Some(ClearValue::Depth(1.0)),
],
..RenderPassBeginInfo::framebuffer(framebuffer)
},
SubpassBeginInfo {
contents: SubpassContents::Inline,
..Default::default()
},
)?
.bind_pipeline_graphics(pipeline.clone())?
.bind_descriptor_sets(
pipeline::PipelineBindPoint::Graphics,
pipeline.layout().clone(),
0,
descriptor_set.clone(),
)?
.bind_vertex_buffers(0, self.vertices.clone())?
.bind_index_buffer(self.indices.clone())?;
unsafe { cmdbuf.draw_indexed(INDICES.len() as u32, 1, 0, 0, 0)? }
.end_render_pass(SubpassEndInfo::default())?;
cmdbuf.build()
})
.collect::<Result<_, _>>()?;
Ok(())
}
fn new(
mut xr: xr::OpenXr,
proxy: EventLoopProxy<AppMessage>,
frame_waiter: openxr::FrameWaiter,
frame_stream: openxr::FrameStream<openxr::Vulkan>,
) -> Result<Self> {
let renderdoc = renderdoc::RenderDoc::<renderdoc::V141>::new()
.map_err(|e| log::info!("cannot load renderdoc: {e}"))
.ok();
let (device, queue) = xr.vk_device();
let xr::RenderInfo {
swapchain_images,
depth_swapchain_images,
..
} = xr.render_info();
let representative_image = &swapchain_images[0];
let render_pass = RenderPass::new(
device.clone(),
RenderPassCreateInfo {
attachments: [
AttachmentDescription {
format: representative_image.format(),
store_op: AttachmentStoreOp::Store,
load_op: AttachmentLoadOp::Clear,
final_layout: ImageLayout::ColorAttachmentOptimal,
..Default::default()
},
AttachmentDescription {
format: vulkano::format::Format::D32_SFLOAT,
store_op: AttachmentStoreOp::Store,
load_op: AttachmentLoadOp::Clear,
final_layout: ImageLayout::DepthStencilAttachmentOptimal,
..Default::default()
},
]
.into(),
subpasses: [SubpassDescription {
color_attachments: [Some(AttachmentReference {
attachment: 0,
layout: ImageLayout::ColorAttachmentOptimal,
..Default::default()
})]
.into(),
view_mask: 0b11,
depth_stencil_attachment: Some(AttachmentReference {
attachment: 1,
layout: ImageLayout::DepthStencilAttachmentOptimal,
..Default::default()
}),
..Default::default()
}]
.into(),
correlated_view_masks: vec![0b11],
..Default::default()
},
)?;
let allocator = Arc::new(StandardMemoryAllocator::new(&device, &Default::default()));
let descriptor_set_allocator = Arc::new(StandardDescriptorSetAllocator::new(
&device,
&Default::default(),
));
let vs = vs::load(device.clone())?;
let fs = fs::load(device.clone())?;
let mvp = Buffer::new_sized::<vs::MVP>(
&allocator,
&BufferCreateInfo {
usage: BufferUsage::UNIFORM_BUFFER,
..Default::default()
},
&AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
allocate_preference: MemoryAllocatePreference::Unknown,
..Default::default()
},
)?;
let vs_main = vs.entry_point("main").unwrap();
let stages = smallvec![
PipelineShaderStageCreateInfo::new(vs_main.clone()),
PipelineShaderStageCreateInfo::new(fs.entry_point("main").unwrap()),
];
let layout = PipelineLayout::new(
device.clone(),
PipelineDescriptorSetLayoutCreateInfo::from_stages(&stages)
.into_pipeline_layout_create_info(device.clone())?,
)?;
let extent = representative_image.extent();
let pipeline = GraphicsPipeline::new(
device.clone(),
None,
GraphicsPipelineCreateInfo {
vertex_input_state: Some(Vertex::per_vertex().definition(&vs_main)?),
stages: stages.clone(),
input_assembly_state: Some(InputAssemblyState {
topology: PrimitiveTopology::TriangleList,
..Default::default()
}),
viewport_state: Some(ViewportState {
viewports: smallvec![Viewport {
offset: [0., 0.],
extent: [extent[0] as f32, extent[1] as f32],
..Default::default()
}],
..Default::default()
}),
depth_stencil_state: Some(DepthStencilState {
depth: Some(DepthState::simple()),
..Default::default()
}),
multisample_state: Some(MultisampleState {
rasterization_samples: representative_image.samples(),
..Default::default()
}),
color_blend_state: Some(ColorBlendState::with_attachment_states(
1,
Default::default(),
)),
rasterization_state: Some(RasterizationState::default()),
subpass: Some(Subpass::from(render_pass.clone(), 0).unwrap().into()),
..GraphicsPipelineCreateInfo::new(layout.clone())
},
)?;
// Vertices for a cube
let vertices = Buffer::from_iter::<Vertex, _>(
&allocator,
&BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
&AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
allocate_preference: MemoryAllocatePreference::Unknown,
..Default::default()
},
VERTICES.into_iter(),
)?;
let indices = Buffer::from_iter::<u32, _>(
&allocator,
&BufferCreateInfo {
usage: BufferUsage::INDEX_BUFFER,
..Default::default()
},
&AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
allocate_preference: MemoryAllocatePreference::Unknown,
..Default::default()
},
INDICES.into_iter(),
)?;
let cmdbuf_allocator = Arc::new(StandardCommandBufferAllocator::new(
&device,
&Default::default(),
));
let depth_images: Vec<_> = if let Some(depth_images) = depth_swapchain_images {
depth_images
.iter()
.map(|i| ImageView::new(i, &ImageViewCreateInfo::from_image(i)))
.collect::<Result<_, _>>()?
} else {
let depth_image = Image::new(
&allocator,
&ImageCreateInfo {
format: vulkano::format::Format::D32_SFLOAT,
extent,
samples: representative_image.samples(),
array_layers: representative_image.array_layers(),
usage: ImageUsage::DEPTH_STENCIL_ATTACHMENT,
..Default::default()
},
&AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
allocate_preference: MemoryAllocatePreference::Unknown,
..Default::default()
},
)?;
let depth_image =
ImageView::new(&depth_image, &ImageViewCreateInfo::from_image(&depth_image))?;
swapchain_images
.iter()
.map(|_| depth_image.clone())
.collect()
};
let descriptor_set = DescriptorSet::new(
descriptor_set_allocator.clone(),
pipeline.layout().set_layouts().first().unwrap().clone(),
[WriteDescriptorSet::buffer(0, mvp.clone())],
[],
)?;
let cmdbufs = swapchain_images
.iter()
.zip(depth_images.into_iter())
.map(|(i, di)| {
let framebuffer = Framebuffer::new(
render_pass.clone(),
FramebufferCreateInfo {
attachments: vec![
ImageView::new(i, &ImageViewCreateInfo::from_image(i))?,
di,
],
..Default::default()
},
)?;
let mut cmdbuf = AutoCommandBufferBuilder::primary(
cmdbuf_allocator.clone(),
queue.queue_family_index(),
CommandBufferUsage::MultipleSubmit,
)?;
cmdbuf
.begin_render_pass(
RenderPassBeginInfo {
clear_values: vec![
Some(ClearValue::Float([0.0, 0.0, 0.0, 0.0])),
Some(ClearValue::Depth(1.0)),
],
..RenderPassBeginInfo::framebuffer(framebuffer)
},
SubpassBeginInfo {
contents: SubpassContents::Inline,
..Default::default()
},
)?
.bind_pipeline_graphics(pipeline.clone())?
.bind_descriptor_sets(
pipeline::PipelineBindPoint::Graphics,
pipeline.layout().clone(),
0,
descriptor_set.clone(),
)?
.bind_vertex_buffers(0, vertices.clone())?
.bind_index_buffer(indices.clone())?;
unsafe { cmdbuf.draw_indexed(INDICES.len() as u32, 1, 0, 0, 0)? }
.end_render_pass(SubpassEndInfo::default())?;
cmdbuf.build()
})
.collect::<Result<_, _>>()?;
log::info!("xr swapchain extent {:?}", representative_image.extent());
Ok(App {
exiting: false,
state: SessionState::Idle(frame_waiter),
queue: queue.clone(),
allocator,
cmdbuf_allocator,
descriptor_set_allocator,
instance: xr.vk_instance(),
previous_frame_end: Some(Box::new(vulkano::sync::now(device.clone()))),
window: None,
cmdbufs: Vec::new(),
vertices,
device,
proxy,
indices,
vs,
fs,
renderdoc,
xr_uniform_buffer: mvp,
uniform_buffers: Vec::new(),
uniform_buffer_gpu_use_end: Vec::new(),
latest_mvps: [Mat4::IDENTITY, Mat4::IDENTITY],
xr_cmdbufs: cmdbufs,
xr,
passthrough: None,
frame_stream,
render_start: std::time::Instant::now(),
})
}
fn render_and_submit(&mut self, state: FrameState) -> Result<()> {
log::debug!("XR rendering");
let delta = self.render_start.elapsed();
let xr::RenderInfo {
session: xr_session,
swapchain,
depth_swapchain,
space,
render_size,
..
} = self.xr.render_info();
self.queue.with(|_| self.frame_stream.begin())?;
let (view_flags, views) = xr_session
.locate_views(
ViewConfigurationType::PRIMARY_STEREO,
state.predicted_display_time,
space,
)
.unwrap();
if !view_flags.contains(ViewStateFlags::POSITION_VALID | ViewStateFlags::ORIENTATION_VALID)
{
log::warn!("View state is not valid");
self.queue.with(|_| {
self.frame_stream.end(
state.predicted_display_time,
EnvironmentBlendMode::OPAQUE,
&[],
)
})?;
return Ok(());
}
log::trace!("{:?}", views[0].fov);
log::trace!("{:?}", views[1].fov);
log::trace!("{:?}", views[0].pose);
log::trace!("{:?}", views[1].pose);
let views: [_; 2] = (&views[..]).try_into()?;
let image_index = self.queue.with(|_| swapchain.acquire_image())? as usize;
swapchain.wait_image(openxr::Duration::INFINITE).unwrap();
let depth_swapchain = depth_swapchain
.map(|sc| {
let i = self.queue.with(|_| sc.acquire_image())? as usize;
assert_eq!(i, image_index);
sc.wait_image(openxr::Duration::INFINITE)?;
Ok::<_, openxr::sys::Result>(sc)
})
.transpose()?;
// Convert views to mvp
self.latest_mvps = [0, 1].map(|i| {
let view = views[i];
let translation = glam::Vec3::new(
view.pose.position.x,
view.pose.position.y,
view.pose.position.z,
);
let rotation = glam::Quat::from_xyzw(
view.pose.orientation.x,
view.pose.orientation.y,
view.pose.orientation.z,
view.pose.orientation.w,
);
let l = view.fov.angle_left.tan();
let r = view.fov.angle_right.tan();
let t = view.fov.angle_up.tan();
let b = view.fov.angle_down.tan();
let (near, far) = (0.05, 100.0);
#[rustfmt::skip]
let projection = Mat4::from_cols(
Vec4::new(2.0 / (r - l), 0.0 , (r + l) / (r - l) , 0.0 ),
Vec4::new(0.0 , -2.0 / (t - b), -(t + b) / (t - b) , 0.0 ),
Vec4::new(0.0 , 0.0 , -far / (far - near), -far*near / (far - near)),
Vec4::new(0.0 , 0.0 , -1.0 , 0.0 ),
).transpose(); // We gave the matrix in row major, so transpose it
let model = Mat4::from_rotation_translation(Quat::from_axis_angle(Vec3::new(0., 1., 0.), (delta.as_millis() as f32) / 1000.), Vec3::new(0.0, 0.0, -1.0));
projection * Mat4::from_rotation_translation(rotation, translation).inverse() * model
});
{
let mut mvp = self.xr_uniform_buffer.write()?;
mvp.mvps = self.latest_mvps.map(|m| m.to_cols_array_2d());
};
self.xr_cmdbufs[image_index]
.clone()
.execute(self.queue.clone())?
.then_signal_fence_and_flush()?
.wait(None)?;
self.queue.with(|_| swapchain.release_image())?;
let views = [
openxr::CompositionLayerProjectionView::new()
.pose(views[0].pose)
.fov(views[0].fov)
.sub_image(
SwapchainSubImage::new()
.swapchain(swapchain)
.image_array_index(0)
.image_rect(Rect2Di {
offset: Offset2Di { x: 0, y: 0 },
extent: Extent2Di {
width: render_size.x as _,
height: render_size.y as _,
},
}),
),
openxr::CompositionLayerProjectionView::new()
.pose(views[1].pose)
.fov(views[1].fov)
.sub_image(
SwapchainSubImage::new()
.swapchain(swapchain)
.image_array_index(1)
.image_rect(Rect2Di {
offset: Offset2Di { x: 0, y: 0 },
extent: Extent2Di {
width: render_size.x as _,
height: render_size.y as _,
},
}),
),
];
let depths = if let Some(sc) = depth_swapchain {
let mut i = 0;
self.queue.with(|_| sc.release_image())?;
Some(views.each_ref().map(|_| {
let sub_img = SwapchainSubImage::new()
.swapchain(sc)
.image_array_index(i)
.image_rect(Rect2Di {
offset: Offset2Di { x: 0, y: 0 },
extent: Extent2Di {
width: render_size.x as _,
height: render_size.y as _,
},
});
i += 1;
openxr::sys::CompositionLayerDepthInfoKHR {
ty: openxr::sys::CompositionLayerDepthInfoKHR::TYPE,
next: std::ptr::null(),
sub_image: sub_img.into_raw(),
max_depth: 1.0,
min_depth: 0.0,
near_z: 0.05,
far_z: 100.0,
}
}))
} else {
None
};
let views = if let Some(depths) = &depths {
let mut i = 0;
views.map(|v| {
let mut v = v.into_raw();
v.next = &depths[i] as *const _ as *const _;
i += 1;
unsafe { openxr::CompositionLayerProjectionView::from_raw(v) }
})
} else {
views
};
let layer = openxr::CompositionLayerProjection::new()
.space(space)
.layer_flags(CompositionLayerFlags::BLEND_TEXTURE_SOURCE_ALPHA)
.views(&views);
let layers: &[&openxr::CompositionLayerBase<_>] = if let Some(p) = self.passthrough {
let passthrough_layer = openxr::sys::CompositionLayerPassthroughHTC {
ty: openxr::sys::CompositionLayerPassthroughHTC::TYPE,
next: std::ptr::null(),
// Spec: layer_flags must not be 0. don't know why, let's just use a
// noop flag.
layer_flags: openxr::sys::CompositionLayerFlags::CORRECT_CHROMATIC_ABERRATION,
space: openxr::sys::Space::NULL,
passthrough: p,
color: openxr::sys::PassthroughColorHTC {
ty: openxr::sys::PassthroughColorHTC::TYPE,
next: std::ptr::null(),
alpha: 1.0,
},
};
// Safety: `openxr::CompositionLayerBase` is a transparent wrapper of
// `openxr::sys::CompositionLayerBaseHeader`, which is a prefix of
// `openxr::sys::CompositionLayerPassthroughHTC`.
let passthrough_layer = unsafe {
&*(&passthrough_layer as *const openxr::sys::CompositionLayerPassthroughHTC).cast()
};
&[passthrough_layer, &layer]
} else {
&[&layer]
};
self.queue.with(|_| {
self.frame_stream.end(
state.predicted_display_time,
EnvironmentBlendMode::OPAQUE,
layers,
)
})?;
log::debug!("XR rendering end");
Ok(())
}
}
enum AppMessage {
Frame(openxr::FrameState),
WaiterExited(openxr::FrameWaiter),
}
impl std::fmt::Debug for AppMessage {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
AppMessage::Frame(state) => write!(f, "Frame({state:?})"),
AppMessage::WaiterExited(_) => write!(f, "WaiterExited"),
}
}
}
fn main() -> Result<()> {
env_logger::Builder::from_default_env()
.format_timestamp_millis()
.init();
let entry = unsafe { openxr::Entry::load() }?;
let supported_xr_extensions = entry.enumerate_extensions()?;
let mut xr_extensions = openxr::ExtensionSet::default();
let layers = entry.enumerate_layers()?;
let mut api_layers = vec![
"XR_APILAYER_LUNARG_api_dump",
//"XR_APILAYER_LUNARG_core_validation", // has bugs for passthrough
];
if supported_xr_extensions.htc_passthrough {
log::info!("Has HTC passthrough extension");
xr_extensions.htc_passthrough = true;
} else {
for layer in &layers {
let layer_exts = entry.enumerate_layer_extensions(&layer.layer_name)?;
if layer_exts.htc_passthrough {
xr_extensions.htc_passthrough = true;
api_layers.push(layer.layer_name.as_str());
break;
}
}
}
if supported_xr_extensions.khr_composition_layer_depth {
log::info!("Has KHR composition layer depth extension");
xr_extensions.khr_composition_layer_depth = true;
}
//xr_extensions.htc_passthrough = true;
let (xr, frame_waiter, frame_stream) = xr::OpenXr::new(
vulkano::instance::InstanceExtensions {
khr_xlib_surface: true,
..Default::default()
},
&xr_extensions,
&api_layers,
"openxr hello world",
1,
)?;
println!("Hello, world!");
let event_loop = EventLoop::<AppMessage>::with_user_event().build()?;
let mut renderer = App::new(xr, event_loop.create_proxy(), frame_waiter, frame_stream)?;
renderer.passthrough = renderer
.xr
.xr_instance()
.exts()
.htc_passthrough
.map(|passthrough_fp| {
let mut passthrough = openxr::sys::PassthroughHTC::NULL;
let passthrough_info = openxr::sys::PassthroughCreateInfoHTC {
form: openxr::sys::PassthroughFormHTC::PLANAR,
ty: openxr::sys::PassthroughCreateInfoHTC::TYPE,
next: std::ptr::null(),
};
unsafe {
(passthrough_fp.create_passthrough)(
renderer.xr.xr_session().as_raw(),
&passthrough_info,
&mut passthrough,
)
}
.context("failed to create passthrough")?;
Ok::<_, anyhow::Error>(passthrough)
})
.transpose()?;
event_loop.run_app(&mut renderer)?;
log::info!("Session ended");
Ok(())
}
// Shader for a cube
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
#version 450
#extension GL_EXT_multiview : enable
layout(location = 0) in vec3 in_position;
layout(location = 1) in vec4 in_color;
layout(location = 0) out vec4 out_color;
layout(binding = 0) uniform MVP {
mat4 mvps[2];
};
void main () {
gl_Position = mvps[gl_ViewIndex] * vec4(in_position, 1.0);
out_color = in_color;
}
"
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
#version 450
layout(location = 0) in vec4 out_color;
layout(location = 0) out vec4 frag_color;
void main () {
frag_color = out_color;
}
"
}
}
| yshui/xr_passthrough_layer | 6 | Rust | yshui | Yuxuan Shui | CodeWeavers | |
src/api_layer/instance.rs | Rust | use crate::api_layer::{LOG_INIT, REQUIRED_VK_DEVICE_EXTENSIONS, REQUIRED_VK_INSTANCE_EXTENSIONS};
use log::{debug, warn};
use openxr::sys::Result as XrErr;
use quark::{Hooked as _, Low as _, try_xr};
use std::{
collections::{HashMap, HashSet},
ffi::{CStr, c_char},
};
use vulkano::Handle as _;
unsafe fn get_vulkan_extensions_override(
instance: openxr::sys::Instance,
system_id: openxr::sys::SystemId,
cap: u32,
count: *mut u32,
buffer: *mut c_char,
required_extensions: &[&CStr],
original_fn: unsafe extern "system" fn(
openxr::sys::Instance,
openxr::sys::SystemId,
u32,
*mut u32,
*mut c_char,
) -> XrErr,
) -> XrErr {
if cap != 0 && buffer.is_null() {
return XrErr::ERROR_VALIDATION_FAILURE;
}
let mut len = 0;
let ret = unsafe { (original_fn)(instance, system_id, 0, &mut len, std::ptr::null_mut()) };
if ret != XrErr::SUCCESS {
return ret;
}
let mut buf: Vec<u8> = Vec::with_capacity(len as _);
let ret = unsafe {
(original_fn)(
instance,
system_id,
len,
&mut len,
buf.spare_capacity_mut().as_mut_ptr() as *mut _,
)
};
if ret != XrErr::SUCCESS {
return ret;
}
// SAFETY: `original_fn` should've written `len` bytes into `buf`.
unsafe {
buf.set_len((len - 1) as _); // ignore the nul
}
let extensions = buf.split(|b| *b == b' ').collect::<HashSet<_>>();
let extra_extensions = required_extensions
.iter()
.map(|e| e.to_bytes())
.filter(|e| !extensions.contains(e));
// First, check if there is enough space.
let extra_len = extra_extensions.clone().fold(0, |a, b| a + b.len() + 1);
let total_len = buf.len() + extra_len
- if buf.is_empty() && extra_len > 0 {
// If buf is empty string, then we don't need to put a space between it and
// extra_extensions
1
} else {
0
};
let total_len: u32 = total_len.try_into().unwrap();
unsafe { *count = total_len + 1 }; // add the nul
if total_len > cap {
return XrErr::SUCCESS;
}
// Can't create &mut [] from `buffer` because it might be uninitialized.
unsafe {
buffer.copy_from_nonoverlapping(buf.as_ptr() as *const _, buf.len());
let mut pos = buffer.add(buf.len());
for e in extra_extensions {
if !std::ptr::eq(pos, buffer) {
pos.write(b' ' as _);
pos = pos.add(1);
}
pos.copy_from_nonoverlapping((*e).as_ptr() as *const _, e.len());
pos = pos.add(e.len());
}
pos.write(0);
assert_eq!(pos.offset_from(buffer), total_len as isize);
}
XrErr::SUCCESS
}
pub(super) unsafe extern "system" fn get_vulkan_instance_extensions(
instance: openxr::sys::Instance,
system_id: openxr::sys::SystemId,
cap: u32,
count: *mut u32,
buffer: *mut c_char,
) -> XrErr {
let wrapped_instance = try_xr!(instance.registered_with_hook());
let Some(vulkan_enable) = wrapped_instance.get().exts().khr_vulkan_enable else {
return XrErr::ERROR_VALIDATION_FAILURE;
};
unsafe {
get_vulkan_extensions_override(
instance,
system_id,
cap,
count,
buffer,
REQUIRED_VK_INSTANCE_EXTENSIONS,
vulkan_enable.get_vulkan_instance_extensions,
)
}
}
pub(super) unsafe extern "system" fn get_vulkan_device_extensions(
instance: openxr::sys::Instance,
system_id: openxr::sys::SystemId,
cap: u32,
count: *mut u32,
buffer: *mut c_char,
) -> XrErr {
let wrapped_instance = try_xr!(instance.registered_with_hook());
let Some(vulkan_enable) = wrapped_instance.get().exts().khr_vulkan_enable else {
return XrErr::ERROR_VALIDATION_FAILURE;
};
unsafe {
get_vulkan_extensions_override(
instance,
system_id,
cap,
count,
buffer,
REQUIRED_VK_DEVICE_EXTENSIONS,
vulkan_enable.get_vulkan_device_extensions,
)
}
}
pub(super) unsafe extern "system" fn create_vulkan_instance(
instance: openxr::sys::Instance,
create_info: *const openxr::sys::VulkanInstanceCreateInfoKHR,
out_vk_instance: *mut ash::vk::Instance,
result: *mut ash::vk::Result,
) -> XrErr {
debug!("Creating Vulkan instance");
let mut wrapped_instance = try_xr!(instance.registered_with_hook_mut());
let Some(vulkan_enable2) = wrapped_instance.get().exts().khr_vulkan_enable2 else {
warn!("Calling create_vulkan_instance without khr_vulkan_enable2");
return XrErr::ERROR_VALIDATION_FAILURE;
};
let mut vk_create_info =
unsafe { *((*create_info).vulkan_create_info as *const ash::vk::InstanceCreateInfo<'_>) };
let requested_extensions = if vk_create_info.enabled_extension_count > 0 {
let extensions = unsafe {
std::slice::from_raw_parts(
vk_create_info.pp_enabled_extension_names,
vk_create_info.enabled_extension_count as _,
)
};
extensions
.iter()
.map(|&e| unsafe { CStr::from_ptr(e) })
.collect::<HashSet<_>>()
} else {
HashSet::new()
};
let extra_extensions = REQUIRED_VK_INSTANCE_EXTENSIONS
.iter()
.filter(|&&e| !requested_extensions.contains(e));
debug!("Extensions: {:?}", requested_extensions);
let mut new_create_info = unsafe { *create_info };
let mut new_extensions = Vec::new();
if extra_extensions.clone().count() > 0 {
new_extensions.extend(extra_extensions.map(|e| e.as_ptr()));
new_extensions.extend(requested_extensions.into_iter().map(|e| e.as_ptr()));
vk_create_info.enabled_extension_count = new_extensions.len() as _;
vk_create_info.pp_enabled_extension_names = new_extensions.as_ptr();
new_create_info.vulkan_create_info = &vk_create_info as *const _ as *const _;
}
let ret = unsafe {
(vulkan_enable2.create_vulkan_instance)(
instance,
&new_create_info,
out_vk_instance as *mut _,
result as *mut _,
)
};
if ret != XrErr::SUCCESS {
return ret;
}
wrapped_instance.hook().instance_api_version.insert(
unsafe { *out_vk_instance }.as_raw(),
unsafe { *vk_create_info.p_application_info }
.api_version
.into(),
);
ret
}
pub(super) unsafe extern "system" fn create_vulkan_device(
instance: openxr::sys::Instance,
create_info: *const openxr::sys::VulkanDeviceCreateInfoKHR,
device: *mut ash::vk::Device,
result: *mut ash::vk::Result,
) -> XrErr {
debug!("Creating Vulkan device");
let wrapped_instance = try_xr!(instance.registered_with_hook());
let Some(vulkan_enable2) = wrapped_instance.get().exts().khr_vulkan_enable2 else {
return XrErr::ERROR_VALIDATION_FAILURE;
};
let mut vk_create_info =
unsafe { *((*create_info).vulkan_create_info as *const ash::vk::DeviceCreateInfo<'_>) };
let requested_extensions = if vk_create_info.enabled_extension_count > 0 {
let extensions = unsafe {
std::slice::from_raw_parts(
vk_create_info.pp_enabled_extension_names,
vk_create_info.enabled_extension_count as _,
)
};
extensions
.iter()
.map(|&e| unsafe { CStr::from_ptr(e) })
.collect::<HashSet<_>>()
} else {
HashSet::new()
};
let extra_extensions = REQUIRED_VK_DEVICE_EXTENSIONS
.iter()
.filter(|&&e| !requested_extensions.contains(e));
debug!("Extensions: {:?}", requested_extensions);
debug!(
"Extra Extensions: {:?}",
extra_extensions.clone().collect::<Vec<_>>()
);
let mut new_create_info = unsafe { *create_info };
let mut new_extensions = Vec::new();
if extra_extensions.clone().count() > 0 {
new_extensions.extend(extra_extensions.map(|e| e.as_ptr()));
new_extensions.extend(requested_extensions.into_iter().map(|e| e.as_ptr()));
vk_create_info.enabled_extension_count = new_extensions.len() as _;
vk_create_info.pp_enabled_extension_names = new_extensions.as_ptr();
new_create_info.vulkan_create_info = &vk_create_info as *const _ as *const _;
}
unsafe {
(vulkan_enable2.create_vulkan_device)(
instance,
&new_create_info,
device as *mut _,
result as *mut _,
)
}
}
// Define your instance data
pub struct InstanceData {
/// Whether the XR_HTC_passthrough extension is enabled. Note passthrough resources are created
/// regardless to support the ALPHA_BLEND environment blend mode, which is not behind an
/// extension.
_is_passthrough_enabled: bool,
/// Mapping raw vulkan VkInstance handles to the api version it was created with.
instance_api_version: HashMap<u64, vulkano::Version>,
}
impl InstanceData {
pub(super) fn instance_api_version(&self) -> &HashMap<u64, vulkano::Version> {
&self.instance_api_version
}
}
pub struct InstanceFactory;
unsafe impl quark::Factory<InstanceData> for InstanceFactory {
// SAFETY: `args` must be valid
unsafe fn create(
args: quark::CreateArgs<openxr::sys::Instance>,
) -> Result<(openxr::Instance, InstanceData), XrErr> {
LOG_INIT.get_or_init(env_logger::init);
debug!("Creating OpenXR instance");
let (info, api_layer_info, instance) = args;
let mut instance_info = unsafe { *info };
let mut new_exts = Vec::new();
let enabled = if instance_info.enabled_extension_count != 0 {
let exts = unsafe {
std::slice::from_raw_parts(
instance_info.enabled_extension_names,
instance_info.enabled_extension_count as _,
)
};
let it = exts.iter().map(|e| unsafe { CStr::from_ptr(*e) });
let dbge = it.clone().collect::<Vec<_>>();
for e in dbge {
debug!("Extension: {:?}", e.to_str());
}
let enabled = it.clone().any(|e| e == c"XR_HTC_passthrough");
if enabled {
debug!("Passthrough extension enabled");
new_exts.reserve(exts.len());
new_exts.extend(
it.filter(|e| *e != c"XR_HTC_passthrough")
.map(|e| e.as_ptr()),
);
instance_info.enabled_extension_names = new_exts.as_ptr();
instance_info.enabled_extension_count = new_exts.len() as _;
}
enabled
} else {
false
};
let layer_info = unsafe { &*api_layer_info };
let r = unsafe {
((*layer_info.next_info).next_create_api_layer_instance)(
&instance_info,
api_layer_info,
instance,
)
};
if r != XrErr::SUCCESS {
return Err(r);
}
let (high, _create_info) = unsafe { (*instance).into_high(args) }?;
let this = InstanceData {
_is_passthrough_enabled: enabled,
instance_api_version: HashMap::new(),
};
Ok((high, this))
}
}
// SAFETY: `fp` must a function that takes arguments like this: `fp($args..., capacity, count,
// out_array)`. It must return the number of elements it has written to `out_array` via `count`,
// and it must not right more than `capacity` elements into `out_array`.
#[allow(edition_2024_expr_fragment_specifier)]
macro_rules! call_enumerate {
($f:expr => [$size:literal]; $($args:expr),*) => {
{
let fp = $f;
let mut buf = smallvec::SmallVec::<[_; $size]>::new();
let mut size_out = 0u32;
let ret = fp($($args),* , ($size) as u32, &mut size_out, buf.as_mut_ptr());
if ret != XrErr::SUCCESS && ret != XrErr::ERROR_SIZE_INSUFFICIENT {
Err(ret)
} else if ret == XrErr::SUCCESS {
assert!((size_out as usize) < $size, "{} written more than capacity", stringify!($fp));
buf.set_len(size_out as usize);
Ok(buf)
} else {
buf.reserve(size_out as usize);
let ret = fp($($args),* , size_out, &mut size_out, buf.as_mut_ptr());
if ret != XrErr::SUCCESS {
Err(ret)
} else {
assert!(size_out as usize <= buf.capacity(), "{} written more than capacity", stringify!($f));
buf.set_len(size_out as usize);
Ok(buf)
}
}
}
};
}
pub(super) unsafe extern "system" fn enumerate_environment_blend_modes(
raw_instance: openxr::sys::Instance,
system_id: openxr::sys::SystemId,
view_config_type: openxr::sys::ViewConfigurationType,
capacity: u32,
count: *mut u32,
modes: *mut openxr::sys::EnvironmentBlendMode,
) -> XrErr {
let instance = try_xr!(raw_instance.registered());
let original_modes = try_xr!(unsafe {
call_enumerate!(instance.fp().enumerate_environment_blend_modes => [3];
raw_instance, system_id, view_config_type)
});
if original_modes.contains(&openxr::sys::EnvironmentBlendMode::ALPHA_BLEND) {
// Already has ALPHA_BLEND, just forward the call
unsafe {
(instance.fp().enumerate_environment_blend_modes)(
raw_instance,
system_id,
view_config_type,
capacity,
count,
modes,
)
}
} else {
debug!("Insert ALPHA_BLEND into supported environment blend modes");
if (capacity as usize) < original_modes.len() + 1 {
unsafe { *count = (original_modes.len() + 1) as _ };
if capacity == 0 {
XrErr::SUCCESS
} else {
XrErr::ERROR_SIZE_INSUFFICIENT
}
} else {
unsafe {
modes.copy_from_nonoverlapping(original_modes.as_ptr(), original_modes.len());
modes
.add(original_modes.len())
.write(openxr::sys::EnvironmentBlendMode::ALPHA_BLEND);
*count = (original_modes.len() + 1) as _;
}
XrErr::SUCCESS
}
}
}
impl quark::Hook for InstanceData {
type Target = openxr::sys::Instance;
type Factory = InstanceFactory;
}
| yshui/xr_passthrough_layer | 6 | Rust | yshui | Yuxuan Shui | CodeWeavers | |
src/api_layer/mod.rs | Rust | use glam::Vec3;
use log::debug;
use openxr::{
AsHandle,
sys::{Handle, Result as XrErr},
};
use quark::{Hooked as _, Low as _};
use std::{
ffi::CStr,
mem::MaybeUninit,
sync::{Arc, LazyLock, OnceLock},
};
mod instance;
mod session;
use instance::{
InstanceData, create_vulkan_device, create_vulkan_instance, enumerate_environment_blend_modes,
get_vulkan_device_extensions, get_vulkan_instance_extensions,
};
use session::{SessionData, begin_frame, begin_session, end_frame, end_session, wait_frame};
const REQUIRED_VK_INSTANCE_EXTENSIONS: &[&CStr] = &[
ash::vk::KHR_EXTERNAL_MEMORY_CAPABILITIES_NAME,
ash::vk::KHR_GET_PHYSICAL_DEVICE_PROPERTIES2_NAME,
ash::vk::KHR_XCB_SURFACE_NAME,
];
const REQUIRED_VK_DEVICE_EXTENSIONS: &[&CStr] = &[ash::vk::KHR_COPY_COMMANDS2_NAME];
fn xrcvt(e: XrErr) -> Result<(), XrErr> {
if e == XrErr::SUCCESS { Ok(()) } else { Err(e) }
}
struct CameraResources {
camera: crate::camera::CameraThread,
pp: crate::pipeline::Pipeline,
}
struct PassthroughMesh<'a> {
vertices: &'a [openxr::Vector3f],
indices: &'a [u32],
base_space: openxr::sys::Space,
time: openxr::sys::Time,
pose: openxr::Posef,
scale: Vec3,
}
impl<'a> PassthroughMesh<'a> {
unsafe fn find_mesh(layer: &'a openxr::sys::CompositionLayerPassthroughHTC) -> Option<Self> {
let mut curr = unsafe { &*(layer.next as *const openxr::sys::CompositionLayerBaseHeader) };
while curr.ty != openxr::sys::PassthroughMeshTransformInfoHTC::TYPE {
if curr.next.is_null() {
return None;
}
curr = unsafe { &*(curr.next as *const _) };
}
let curr =
unsafe { &*(curr as *const _ as *const openxr::sys::PassthroughMeshTransformInfoHTC) };
Some(Self {
vertices: unsafe { std::slice::from_raw_parts(curr.vertices, curr.vertex_count as _) },
indices: unsafe { std::slice::from_raw_parts(curr.indices, curr.index_count as _) },
base_space: curr.base_space,
time: curr.time,
scale: Vec3::new(curr.scale.x, curr.scale.y, curr.scale.z),
pose: curr.pose,
})
}
}
#[derive(Default)]
pub struct PassthroughInner {}
impl PassthroughInner {
fn camera_cfg() -> Option<&'static crate::steam::StereoCamera> {
CAMERA_CONFIG.as_ref()
}
fn splash() -> &'static [u8] {
SPLASH_IMAGE
}
}
impl Drop for PassthroughInner {
fn drop(&mut self) {
debug!("Passthrough destroyed")
}
}
pub struct PassthroughData {
inner: Arc<PassthroughInner>,
form: openxr::sys::PassthroughFormHTC,
/// Allocate 1 byte whose address is used as an unique id for the passthrough
/// object.
unique: Box<MaybeUninit<u8>>,
}
pub struct PassthroughFactory;
unsafe impl quark::Factory<PassthroughData> for PassthroughFactory {
unsafe fn create(
args: quark::CreateArgs<openxr::sys::PassthroughHTC>,
) -> Result<(quark::Facade<openxr::sys::PassthroughHTC>, PassthroughData), XrErr> {
let mut session = args.0.registered_with_hook_mut()?;
let (session_data, session) = session.both();
debug!("Creating passthrough {:#x}", session.as_handle().into_raw());
let form = unsafe { *args.1 }.form;
let passthrough = session_data.maybe_get_or_add_passthrough()?;
let passthrough = passthrough.ok_or(XrErr::ERROR_FEATURE_UNSUPPORTED)?;
let ret = PassthroughData {
inner: passthrough,
form,
unique: Box::new(MaybeUninit::uninit()),
};
let handle =
openxr::sys::PassthroughHTC::from_raw(&*ret.unique as *const _ as usize as u64);
unsafe { *args.2 = handle };
let (high, _) = unsafe { handle.into_high(args) }?;
Ok((high, ret))
}
}
impl quark::Hook for PassthroughData {
type Target = openxr::sys::PassthroughHTC;
type Factory = PassthroughFactory;
}
static VULKAN_LIBRARY: LazyLock<Arc<vulkano::library::VulkanLibrary>> =
LazyLock::new(|| vulkano::library::VulkanLibrary::new().unwrap());
static LOG_INIT: OnceLock<()> = OnceLock::new();
static CAMERA_CONFIG: LazyLock<Option<crate::steam::StereoCamera>> =
LazyLock::new(crate::steam::find_steam_config);
static SPLASH_IMAGE: &[u8] = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/splash.png"));
quark::api_layer! {
hooks: {
Session: SessionData,
Instance: InstanceData,
PassthroughHTC: PassthroughData,
},
override_fns: {
xrGetVulkanInstanceExtensionsKHR: get_vulkan_instance_extensions,
xrGetVulkanDeviceExtensionsKHR: get_vulkan_device_extensions,
xrCreateVulkanInstanceKHR: create_vulkan_instance,
xrCreateVulkanDeviceKHR: create_vulkan_device,
xrBeginSession: begin_session,
xrEndSession: end_session,
xrWaitFrame: wait_frame,
xrBeginFrame: begin_frame,
xrEndFrame: end_frame,
xrEnumerateEnvironmentBlendModes: enumerate_environment_blend_modes,
},
}
| yshui/xr_passthrough_layer | 6 | Rust | yshui | Yuxuan Shui | CodeWeavers | |
src/api_layer/session.rs | Rust | use glam::UVec2;
use log::{debug, error, warn};
use openxr::{
AsHandle, CompositionLayerFlags, EnvironmentBlendMode, Extent2Di, Offset2Di, Rect2Di,
ReferenceSpaceType, SwapchainCreateFlags, SwapchainCreateInfo, SwapchainSubImage,
SwapchainUsageFlags, ViewStateFlags, sys::Handle as _,
};
use quark::{Hooked as _, Low as _, prelude::XrResult, try_xr, types::AnySession};
use smallvec::smallvec;
use std::{collections::HashSet, sync::Arc};
use vulkano::{
Handle as _,
command_buffer::{
AutoCommandBufferBuilder, BlitImageInfo, CommandBufferUsage, ImageBlit,
allocator::{CommandBufferAllocator, StandardCommandBufferAllocator},
},
descriptor_set::allocator::{DescriptorSetAllocator, StandardDescriptorSetAllocator},
device::QueueCreateInfo,
image::{ImageCreateInfo, ImageLayout, ImageUsage},
memory::allocator::{MemoryAllocator, StandardMemoryAllocator},
sync::GpuFuture as _,
};
use crate::api_layer::{
CameraResources, PassthroughInner, REQUIRED_VK_DEVICE_EXTENSIONS,
REQUIRED_VK_INSTANCE_EXTENSIONS, XrErr, xrcvt,
};
#[allow(clippy::large_enum_variant)]
enum SessionState {
Idle {
passthrough: Option<Arc<PassthroughInner>>,
},
Running {
view_type: openxr::sys::ViewConfigurationType,
passthrough: Option<(Arc<PassthroughInner>, CameraResources)>,
/// Extra swapchain for our own rendering needs
swapchain: openxr::Swapchain<openxr::Vulkan>,
images: Vec<Arc<vulkano::image::Image>>,
size: UVec2,
image_index: Option<u32>,
},
}
impl SessionState {
fn view_type(&self) -> Option<openxr::sys::ViewConfigurationType> {
match self {
&Self::Running { view_type, .. } => Some(view_type),
_ => None,
}
}
}
struct SessionDataInner {
device: Arc<vulkano::device::Device>,
queue: Arc<vulkano::device::Queue>,
state: SessionState,
system_id: openxr::SystemId,
allocator: Arc<dyn MemoryAllocator>,
cmdbuf_allocator: Arc<dyn CommandBufferAllocator>,
descriptor_set_allocator: Arc<dyn DescriptorSetAllocator>,
space: openxr::Space,
}
impl SessionDataInner {
fn create_camera_resources(
&self,
camera_cfg: Option<&crate::steam::StereoCamera>,
splash: &[u8],
size: UVec2,
) -> Result<CameraResources, XrErr> {
let xdg = xdg::BaseDirectories::new();
let pipeline_cache =
crate::config::load_pipeline_cache(&self.device, &xdg).map_err(|e| {
warn!("Failed to load pipeline cache {e:#}");
XrErr::ERROR_RUNTIME_FAILURE
})?;
let pp = crate::pipeline::Pipeline::new(
&self.device,
&self.allocator,
&self.cmdbuf_allocator,
&self.queue,
&self.descriptor_set_allocator,
true,
camera_cfg,
ImageLayout::ShaderReadOnlyOptimal,
ImageUsage::SAMPLED | ImageUsage::TRANSFER_SRC,
pipeline_cache,
UVec2::new(crate::CAMERA_SIZE, crate::CAMERA_SIZE),
size,
)
.map_err(|e| {
warn!("Failed to create pipeline {e:#}");
XrErr::ERROR_RUNTIME_FAILURE
})?;
let camera = crate::find_index_camera().map_err(|e| {
warn!("Cannot find camera {e:#}");
XrErr::ERROR_RUNTIME_FAILURE
})?;
let camera = v4l::Device::with_path(camera).map_err(|e| {
warn!("Failed to open camera {e:#}");
XrErr::ERROR_RUNTIME_FAILURE
})?;
let camera = crate::camera::CameraThread::new(camera, splash);
camera.resume().unwrap();
Ok(CameraResources { camera, pp })
}
/// Transition `[Self::Running]` or `[Self::Idle]` states into `[Self::RunningWithPassthrough]`
/// and `[Self::IdleWithPassthrough]` respectively.
///
/// Returns `Ok(Some(passthrough))` if this session state wasn't already in a passthrough state,
/// otherwise `Ok(None)`. If errors occurred while trying to transition, `Err` is returned.
fn get_or_add_passthrough(&mut self) -> Result<Arc<PassthroughInner>, XrErr> {
Ok(match &mut self.state {
&mut SessionState::Running {
ref passthrough,
size,
..
} => {
if let Some(passthrough) = passthrough {
passthrough.0.clone()
} else {
let camera = self.create_camera_resources(
PassthroughInner::camera_cfg(),
PassthroughInner::splash(),
size,
)?;
let new_pt = Arc::new(PassthroughInner::default());
match &mut self.state {
SessionState::Running { passthrough, .. } => {
*passthrough = Some((new_pt.clone(), camera))
}
_ => unreachable!(),
}
new_pt
}
}
SessionState::Idle { passthrough } => passthrough.get_or_insert_default().clone(),
})
}
fn is_running(&self) -> bool {
matches!(&self.state, SessionState::Running { .. })
}
fn begin(
&mut self,
session: &openxr::Session<openxr::Vulkan>,
view_type: openxr::ViewConfigurationType,
) -> Result<(), XrErr> {
match &self.state {
SessionState::Running { .. } => Err(XrErr::ERROR_SESSION_RUNNING),
SessionState::Idle { passthrough } => {
let instance = session.instance();
let cfgs = instance.enumerate_view_configuration_views(
self.system_id,
self.state
.view_type()
.ok_or(XrErr::ERROR_SESSION_NOT_RUNNING)?,
)?;
if cfgs.len() != 1 && cfgs.len() != 2 {
error!("unsupported view count? {}", cfgs.len());
}
let width = cfgs[0]
.recommended_image_rect_width
.max(cfgs[1].recommended_image_rect_width);
let height = cfgs[0]
.recommended_image_rect_height
.max(cfgs[1].recommended_image_rect_height);
let size = UVec2::new(width, height);
let passthrough = if let Some(p) = passthrough {
let camera = self.create_camera_resources(
PassthroughInner::camera_cfg(),
PassthroughInner::splash(),
size,
)?;
Some((p.clone(), camera))
} else {
None
};
let formats = session
.enumerate_swapchain_formats()?
.into_iter()
.map(|f| vulkano::format::Format::try_from(ash::vk::Format::from_raw(f as i32)))
.collect::<Result<HashSet<_>, _>>()
.map_err(|()| {
warn!("Invalid swapchain formats");
XrErr::ERROR_RUNTIME_FAILURE
})?;
const PREFERRED_FORMATS: [vulkano::format::Format; 4] = [
vulkano::format::Format::R8G8B8A8_UNORM,
vulkano::format::Format::B8G8R8A8_UNORM,
vulkano::format::Format::R8G8B8A8_SRGB,
vulkano::format::Format::B8G8R8A8_SRGB,
];
let Some(format) = PREFERRED_FORMATS
.iter()
.find(|f| formats.contains(f))
.copied()
else {
warn!("No suitable format found for swapchain");
return Err(XrErr::ERROR_RUNTIME_FAILURE);
};
let swapchain = session.create_swapchain(&SwapchainCreateInfo {
array_size: 1,
face_count: 1,
format: format as u32,
mip_count: 1,
sample_count: cfgs[0].recommended_swapchain_sample_count,
usage_flags: SwapchainUsageFlags::COLOR_ATTACHMENT
| SwapchainUsageFlags::TRANSFER_DST,
create_flags: SwapchainCreateFlags::EMPTY,
width: width * 2,
height,
})?;
let images = swapchain
.enumerate_images()?
.into_iter()
.map(|raw_img| unsafe {
Ok::<_, XrErr>(Arc::new(
vulkano::image::sys::RawImage::from_handle_borrowed(
&self.device,
ash::vk::Image::from_raw(raw_img),
&ImageCreateInfo {
format,
extent: [width * 2, height, 1],
array_layers: 1,
mip_levels: 1,
usage: ImageUsage::COLOR_ATTACHMENT | ImageUsage::TRANSFER_DST,
..Default::default()
},
)
.map_err(|e| {
warn!("Failed to wrap image {e:#}");
XrErr::ERROR_RUNTIME_FAILURE
})?
.assume_bound(),
))
})
.collect::<Result<_, _>>()?;
self.state = SessionState::Running {
view_type,
passthrough,
image_index: None,
swapchain,
images,
size,
};
Ok(())
}
}
}
/// # Panic
///
/// panics if session is not running.
fn end(&mut self) {
self.state = match &self.state {
SessionState::Idle { .. } => {
panic!("session not running")
}
SessionState::Running { passthrough, .. } => SessionState::Idle {
passthrough: passthrough.as_ref().map(|(p, _)| p.clone()),
},
};
}
}
#[derive(Default)]
pub struct SessionData {
inner: Option<SessionDataInner>,
}
struct FrameEndInfo<'a> {
display_time: openxr::Time,
environment_blend_mode: openxr::EnvironmentBlendMode,
layers: &'a [Option<&'a openxr::sys::CompositionLayerBaseHeader>],
}
impl FrameEndInfo<'_> {
unsafe fn from_raw(info: &openxr::sys::FrameEndInfo) -> Self {
Self {
display_time: info.display_time,
environment_blend_mode: info.environment_blend_mode,
layers: unsafe {
std::slice::from_raw_parts(
// Safety: Option<&T> and *const T are bitwise identical.
info.layers as *const Option<&openxr::sys::CompositionLayerBaseHeader>,
info.layer_count as _,
)
},
}
}
fn as_raw(&self) -> openxr::sys::FrameEndInfo {
openxr::sys::FrameEndInfo {
ty: openxr::sys::FrameEndInfo::TYPE,
next: std::ptr::null_mut(),
display_time: self.display_time,
environment_blend_mode: self.environment_blend_mode,
layer_count: self.layers.len() as _,
layers: self.layers.as_ptr() as *const _,
}
}
}
impl SessionData {
unsafe fn end_frame(
&mut self,
session: &quark::types::AnySession,
instance: &openxr::Instance,
info: &FrameEndInfo<'_>,
) -> Result<(), XrErr> {
let Some(data) = &mut self.inner else {
// We are not wrapping this session, passed it through.
debug!("Unhandled session");
return xrcvt(unsafe {
(instance.fp().end_frame)(session.as_handle(), &info.as_raw())
});
};
let quark::types::AnySession::Vulkan(xr_vk_session) = session else {
unreachable!()
};
let has_passthrough = info
.layers
.iter()
.filter(|l| {
l.is_some_and(|l| l.ty == openxr::sys::CompositionLayerPassthroughHTC::TYPE)
})
.count();
if has_passthrough > 1 {
warn!("More than one passthrough layer, not supported");
return Err(XrErr::ERROR_VALIDATION_FAILURE);
}
let passthrough_layer = info.layers.iter().find_map(|l| {
if let &Some(l) = l
&& l.ty == openxr::sys::CompositionLayerPassthroughHTC::TYPE
{
Some(l)
} else {
None
}
});
if passthrough_layer.is_none()
&& info.environment_blend_mode != EnvironmentBlendMode::ALPHA_BLEND
{
// No passthrough layer, we can just pass the frame to openxr.
return xrcvt(unsafe {
(instance.fp().end_frame)(session.as_handle(), &info.as_raw())
});
}
let passthrough = data.get_or_add_passthrough()?;
if let Some(layer) = passthrough_layer {
// SAFETY: we checked the type is CompositionLayerPassthroughHTC::TYPE
let layer: &openxr::sys::CompositionLayerPassthroughHTC =
unsafe { &*(layer as *const openxr::sys::CompositionLayerBaseHeader).cast() };
let obj = layer.passthrough.registered_with_hook()?;
let data = obj.hook();
if !Arc::ptr_eq(&passthrough, &data.inner) {
warn!("app supplied passthrough object is invalid");
return Err(XrErr::ERROR_VALIDATION_FAILURE);
}
}
let SessionState::Running {
image_index,
view_type,
passthrough,
images,
swapchain,
size,
..
} = &mut data.state
else {
return Err(XrErr::ERROR_SESSION_NOT_RUNNING);
};
let (_, camera) = passthrough.as_mut().unwrap();
let (view_state_flags, view_locations) =
xr_vk_session.locate_views(*view_type, info.display_time, &data.space)?;
if !view_state_flags
.contains(ViewStateFlags::POSITION_VALID | ViewStateFlags::ORIENTATION_VALID)
|| image_index.is_none()
{
if image_index.is_none() {
warn!("end_frame called with begin_frame");
} else {
warn!("Pose or orientation invalid {view_state_flags:?}, skip passthrough layer");
}
let layers = info
.layers
.iter()
.filter(|l| {
l.is_none_or(|l| l.ty != openxr::sys::CompositionLayerPassthroughHTC::TYPE)
})
.collect::<Vec<_>>();
let mut info = info.as_raw();
info.layer_count = layers.len() as _;
info.layers = layers.as_ptr() as *const _;
return xrcvt(unsafe { (instance.fp().end_frame)(session.as_handle(), &info) });
}
let image_index = image_index.take().unwrap();
// Copy camera image to swapchain
let mut cmdbuf = AutoCommandBufferBuilder::primary(
data.cmdbuf_allocator.clone(),
data.queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)
.map_err(|e| {
warn!("Failed to create command buffer {e:#}");
XrErr::ERROR_RUNTIME_FAILURE
})?;
let camera_frame = camera.camera.frame();
camera.pp.maybe_postprocess(&camera_frame).map_err(|e| {
warn!("Failed to postprocess camera frame {e:#}");
XrErr::ERROR_RUNTIME_FAILURE
})?;
let (camera_image, fut) = camera.pp.image();
let camera_extent = camera.pp.image_extent();
cmdbuf
.blit_image(BlitImageInfo {
src_image: camera_image.clone(),
dst_image: images[image_index as usize].clone(),
regions: smallvec![ImageBlit {
src_subresource: camera_image.subresource_layers(),
src_offsets: [[0, 0, 0], [camera_extent[0], camera_extent[1], 1],],
dst_subresource: images[image_index as usize].subresource_layers(),
dst_offsets: [[0, 0, 0], [size.x * 2, size.y, 1],],
..Default::default()
}],
..BlitImageInfo::new(camera_image.clone(), images[image_index as usize].clone())
})
.map_err(|e| {
warn!("Failed to blit image {e:#}");
XrErr::ERROR_RUNTIME_FAILURE
})?;
let cmdbuf = cmdbuf.build().map_err(|e| {
warn!("Failed to build command buffer {e:#}");
XrErr::ERROR_RUNTIME_FAILURE
})?;
fut.then_execute(data.queue.clone(), cmdbuf)
.map_err(|e| {
warn!("Failed to execute command buffer {e:#}");
XrErr::ERROR_RUNTIME_FAILURE
})?
.then_signal_fence_and_flush()
.map_err(|e| {
warn!("Failed to flush command buffer {e:#}");
XrErr::ERROR_RUNTIME_FAILURE
})?
.wait(None)
.map_err(|e| {
warn!("Failed to wait for fence {e:#}");
XrErr::ERROR_RUNTIME_FAILURE
})?;
assert_eq!(view_locations.len(), 2);
swapchain.release_image()?;
log::trace!("{:?}", view_locations[0].fov);
log::trace!("{:?}", view_locations[1].fov);
log::trace!("{:?}", view_locations[0].pose);
log::trace!("{:?}", view_locations[1].pose);
let views = [
openxr::CompositionLayerProjectionView::new()
.sub_image(
SwapchainSubImage::new()
.swapchain(swapchain)
.image_rect(Rect2Di {
offset: Offset2Di { x: 0, y: 0 },
extent: Extent2Di {
width: size.x as _,
height: size.y as _,
},
}),
)
.pose(view_locations[0].pose)
.fov(view_locations[0].fov),
openxr::CompositionLayerProjectionView::new()
.sub_image(
SwapchainSubImage::new()
.swapchain(swapchain)
.image_rect(Rect2Di {
offset: Offset2Di {
x: size.x as _,
y: 0,
},
extent: Extent2Di {
width: size.x as _,
height: size.y as _,
},
}),
)
.pose(view_locations[1].pose)
.fov(view_locations[1].fov),
];
let replaced_passthrough_layer = openxr::CompositionLayerProjection::new()
.space(&data.space)
.layer_flags(CompositionLayerFlags::BLEND_TEXTURE_SOURCE_ALPHA)
.views(&views);
let new_layer = unsafe {
std::mem::transmute::<
Option<&openxr::sys::CompositionLayerProjection>,
Option<&openxr::sys::CompositionLayerBaseHeader>,
>(Some(replaced_passthrough_layer.as_raw()))
};
let pos = info.layers.iter().position(|l| {
l.is_some_and(|l| l.ty == openxr::sys::CompositionLayerPassthroughHTC::TYPE)
});
let new_layers = if let Some(pos) = pos {
let mut layers = info.layers.to_vec();
layers[pos] = new_layer;
layers
} else {
// ALPHA_BLEND mode, insert the camera layer at the 0th position.
assert_eq!(
info.environment_blend_mode,
EnvironmentBlendMode::ALPHA_BLEND
);
let mut layers = vec![new_layer];
layers.extend(info.layers.iter());
layers
};
let mut info2 = info.as_raw();
if info2.environment_blend_mode == EnvironmentBlendMode::ALPHA_BLEND {
info2.environment_blend_mode = EnvironmentBlendMode::OPAQUE;
}
info2.layers = new_layers.as_ptr() as *const _;
xrcvt(unsafe { (instance.fp().end_frame)(session.as_handle(), &info2) })
}
/// Returns the passthrough object if one is already attached. Otherwise try to create one.
/// Returns `Ok(None)` if the current session is not supported.
pub(super) fn maybe_get_or_add_passthrough(
&mut self,
) -> Result<Option<Arc<PassthroughInner>>, XrErr> {
let Some(inner) = self.inner.as_mut() else {
return Ok(None);
};
Ok(Some(inner.get_or_add_passthrough()?))
}
}
impl quark::Hook for SessionData {
type Target = openxr::sys::Session;
type Factory = quark::FactoryOf<Self>;
fn on_create(
session: &AnySession,
create_info: quark::types::SessionCreateInfo,
) -> XrResult<Self> {
debug!("on_create(): OpenXR session");
// Do we have vulkan?
let AnySession::Vulkan(xr_vk_session) = session else {
warn!("Not a vulkan session, don't know how to handle it");
return Ok(Self::default());
};
let gb = create_info.graphics_binding.unwrap();
let quark::types::GraphicsBinding::Vulkan(gb) = gb else {
warn!("Session type is vulkan, but no graphics binding offered");
return Err(XrErr::ERROR_VALIDATION_FAILURE);
};
let instance = xr_vk_session
.instance()
.as_handle()
.registered_with_hook()?;
let api_version = instance
.hook()
.instance_api_version()
.get(&(gb.instance as u64));
let api_version = if let Some(v) = api_version {
*v
} else {
let req = xr_vk_session
.instance()
.graphics_requirements::<openxr::Vulkan>(create_info.system_id)?;
// This vulkan instance wasn't created via `xrCreateVulkanInstanceKHR`, we have to
// assume minimum supported api version.
(req.min_api_version_supported.into_raw() as u32).into()
};
log::info!("Vulkan API version: {api_version}");
let enabled_vk_instance_extensions = REQUIRED_VK_INSTANCE_EXTENSIONS
.iter()
.map(|&e| e.to_str().unwrap())
.collect();
let vk_create_info = vulkano::instance::InstanceCreateInfo {
enabled_extensions: &enabled_vk_instance_extensions,
max_api_version: Some(api_version),
..Default::default()
};
let vk_instance = unsafe {
vulkano::instance::Instance::from_handle_borrowed(
&super::VULKAN_LIBRARY,
ash::vk::Handle::from_raw(gb.instance as usize as u64),
&vk_create_info,
)
};
let physical_device = match unsafe {
vulkano::device::physical::PhysicalDevice::from_handle(
&vk_instance,
ash::vk::Handle::from_raw(gb.physical_device as usize as u64),
)
} {
Ok(pd) => pd,
Err(e) => {
warn!("Failed to wrap vulkan physical device {e}");
return Ok(Self::default());
}
};
let queues = vec![0.0; gb.queue_index as usize + 1];
let enabled_vk_device_extensions = REQUIRED_VK_DEVICE_EXTENSIONS
.iter()
.map(|&e| e.to_str().unwrap())
.collect();
let vk_create_info = vulkano::device::DeviceCreateInfo {
queue_create_infos: &[QueueCreateInfo {
queue_family_index: gb.queue_family_index,
queues: &queues,
..Default::default()
}],
enabled_extensions: &enabled_vk_device_extensions,
..Default::default()
};
let (device, mut queue) = unsafe {
vulkano::device::Device::from_handle_borrowed(
&physical_device,
ash::vk::Handle::from_raw(gb.device as usize as u64),
&vk_create_info,
)
};
let Some(queue) = queue.nth(gb.queue_index as _) else {
warn!("Failed to get requested Vulkan queue");
return Ok(Self::default());
};
let allocator = Arc::new(StandardMemoryAllocator::new(&device, &Default::default()));
let cmdbuf_allocator = Arc::new(StandardCommandBufferAllocator::new(
&device,
&Default::default(),
));
let descriptor_set_allocator = Arc::new(StandardDescriptorSetAllocator::new(
&device,
&Default::default(),
));
let space = xr_vk_session
.create_reference_space(ReferenceSpaceType::STAGE, openxr::Posef::IDENTITY)?;
Ok(Self {
inner: Some(SessionDataInner {
device,
queue,
state: SessionState::Idle { passthrough: None },
space,
system_id: create_info.system_id,
allocator,
cmdbuf_allocator,
descriptor_set_allocator,
}),
})
}
}
pub(super) unsafe extern "system" fn begin_session(
session: openxr::sys::Session,
info: *const openxr::sys::SessionBeginInfo,
) -> XrErr {
debug!("begin session {:#x}", session.into_raw());
let mut wrapped_session = try_xr!(session.registered_with_hook_mut());
let instance = try_xr!(quark::find_instance(session));
let info = &unsafe { *info };
let wrapped_instance = try_xr!(instance.registered_with_hook());
let (data, wrapped_session) = wrapped_session.both();
let Some(data) = &mut data.inner else {
// We are not wrapping this session, passed it through.
debug!("Unhandled session");
return unsafe { (wrapped_instance.get().fp().begin_session)(session, info) };
};
let quark::types::AnySession::Vulkan(xr_vk_session) = wrapped_session else {
unreachable!()
};
if data.is_running() {
return XrErr::ERROR_SESSION_RUNNING;
}
// Transition first, if `session.begin` failed then the session will still not be running, and
// we don't need to do anything.
try_xr!(xr_vk_session.begin(info.primary_view_configuration_type));
try_xr!(data.begin(xr_vk_session, info.primary_view_configuration_type));
XrErr::SUCCESS
}
pub(super) unsafe extern "system" fn end_session(raw_session: openxr::sys::Session) -> XrErr {
debug!("end session {:#x}", raw_session.into_raw());
let instance = try_xr!(quark::find_instance(raw_session));
let mut session = try_xr!(raw_session.registered_with_hook_mut());
let (data, session) = session.both();
let Some(data) = &mut data.inner else {
// We are not wrapping this session, passed it through.
debug!("Unhandled session, like passthrough extension wasn't enabled");
return unsafe { (try_xr!(instance.registered()).fp().end_session)(raw_session) };
};
if !data.is_running() {
return XrErr::ERROR_SESSION_NOT_RUNNING;
}
try_xr!(session.end());
data.end();
XrErr::SUCCESS
}
pub(super) unsafe extern "system" fn begin_frame(
raw_session: openxr::sys::Session,
info: *mut openxr::sys::FrameBeginInfo,
) -> XrErr {
debug!("begin frame {:#x} before", raw_session.into_raw());
let instance = try_xr!(quark::find_instance(raw_session));
// Lock the registry entry for the xrSession. this is because xrBeginFrame might unblock
// a currently blocked xrWaitFrame. Our override for xrWaitFrame will acquire the xrSession
// from the registry to update `should_render`. We want to make sure it will only get the lock
// after we have set `should_render` to None.
let mut session = try_xr!(raw_session.registered_with_hook_mut());
let ret = unsafe { (try_xr!(instance.registered()).fp().begin_frame)(raw_session, info) };
if ret != XrErr::SUCCESS {
return ret;
}
debug!("begin frame {:#x} after", raw_session.into_raw());
let data = session.hook();
let Some(data) = &mut data.inner else {
// We are not wrapping this session, passed it through.
debug!("Unhandled session");
return XrErr::SUCCESS;
};
let SessionState::Running {
image_index,
swapchain,
..
} = &mut data.state
else {
return XrErr::ERROR_SESSION_NOT_RUNNING;
};
if image_index.is_some() {
// App might called 2 begin frame in a row, or maybe its last end_frame doesn't have
// a passthrough layer, etc. we can use the image we alredy have.
return XrErr::SUCCESS;
}
*image_index = Some(try_xr!(swapchain.acquire_image()));
try_xr!(swapchain.wait_image(openxr::Duration::INFINITE));
XrErr::SUCCESS
}
pub(super) unsafe extern "system" fn end_frame(
raw_session: openxr::sys::Session,
info: *const openxr::sys::FrameEndInfo,
) -> XrErr {
debug!("end frame {:#x}", raw_session.into_raw());
let instance = try_xr!(quark::find_instance(raw_session));
let instance = try_xr!(instance.registered());
let mut session = try_xr!(raw_session.registered_with_hook_mut());
let info = unsafe { FrameEndInfo::from_raw(&*info) };
let (data, session) = session.both();
try_xr!(unsafe { data.end_frame(session, &instance, &info) });
XrErr::SUCCESS
}
pub(super) unsafe extern "system" fn wait_frame(
raw_session: openxr::sys::Session,
wait_info: *const openxr::sys::FrameWaitInfo,
frame_state: *mut openxr::sys::FrameState,
) -> XrErr {
debug!("wait frame {:#x}, before", raw_session.into_raw());
let fp = {
// Can't keep the `registered` high-level instance object, because it
// locks the object registry. but `xrWaitFrame` must be callable from any thread, while
// `Begin/EndFrame` might be called concurrently from other threads, which needs this
// registry entry lock too, and `xrWaitFrame` might enter into wait.
let instance = try_xr!(quark::find_instance(raw_session));
try_xr!(instance.registered()).fp().wait_frame
};
// We didn't keep the frame waiter given by openxr crate, just call the raw function.
unsafe { (fp)(raw_session, wait_info, frame_state) }
// We don't check if `should_render` here. Because we need to be prepared to deal with a
// misbehaving application that submits frames dispite `should_render` being false.
}
| yshui/xr_passthrough_layer | 6 | Rust | yshui | Yuxuan Shui | CodeWeavers | |
src/camera.rs | Rust | use std::{
sync::{Arc, mpsc},
thread::JoinHandle,
};
use super::FrameInfo;
use anyhow::{Context, anyhow};
use arc_swap::{ArcSwap, Guard};
use glam::UVec2;
use log::{info as debug, warn};
use smallvec::SmallVec;
use v4l::video::Capture;
#[derive(PartialEq, Eq, Debug, Clone, Copy)]
enum Control {
/// Pause the camera thread so it's not capturing new frames, but keep the camera device open
Pause,
/// Resume capturing
Resume,
Exit,
}
pub struct CameraThread {
frame: Arc<ArcSwap<FrameInfo>>,
control: mpsc::Sender<Control>,
join: JoinHandle<()>,
}
impl CameraThread {
pub fn new(camera: v4l::Device, splash_png: &[u8]) -> Self {
let (tx, rx) = mpsc::channel();
let img = image::load_from_memory_with_format(splash_png, image::ImageFormat::Png)
.unwrap()
.into_rgba8();
let extent = [img.width(), img.height()];
assert!(extent[0] % 2 == 0);
let frame = FrameInfo {
frame: img.into_raw(),
frame_time: std::time::Instant::now(),
needs_postprocess: false, // splash image doesn't need postprocessing
size: UVec2::new(extent[0] / 2, extent[1]),
};
let frame = Arc::new(ArcSwap::new(Arc::new(frame)));
let join = std::thread::spawn({
let frame = frame.clone();
move || Self::run(rx, camera, frame)
});
Self {
frame,
join,
control: tx,
}
}
pub fn frame(&self) -> Guard<Arc<FrameInfo>> {
self.frame.load()
}
pub fn pause(&self) -> anyhow::Result<()> {
Ok(self.control.send(Control::Pause)?)
}
pub fn resume(&self) -> anyhow::Result<()> {
Ok(self.control.send(Control::Resume)?)
}
pub fn exit(self) -> anyhow::Result<()> {
self.control.send(Control::Exit)?;
self.join.join().map_err(|e| anyhow!("{e:?}"))
}
fn run(control: mpsc::Receiver<Control>, camera: v4l::Device, frame: Arc<ArcSwap<FrameInfo>>) {
// hold the thread until we get a go signal
match control.recv() {
Err(std::sync::mpsc::RecvError) | Ok(Control::Exit) => {
debug!("camera thread stopped early");
return;
}
Ok(Control::Resume) => (),
Ok(Control::Pause) => panic!("Invalid pause command"),
}
let Err(e) = Self::run_inner(control, camera, frame) else {
debug!("camera thread stopped");
return;
};
warn!("Camera thread stopped: {e:#}");
}
fn run_inner(
control: mpsc::Receiver<Control>,
camera: v4l::Device,
frame: Arc<ArcSwap<FrameInfo>>,
) -> anyhow::Result<()> {
let mut first_frame_time = None;
// We want to make the latency as low as possible, so only set a single buffer.
let mut video_stream =
v4l::prelude::MmapStream::with_buffers(&camera, v4l::buffer::Type::VideoCapture, 1)
.context("cannot open camera mmap stream")?;
let mut is_splash = true;
const MAX_POOL_SIZE: usize = 2;
let mut pool = SmallVec::<[Arc<FrameInfo>; 2]>::new();
let camera_format = camera.format()?;
if camera_format.width % 2 != 0 {
return Err(anyhow!("Camera width is not even"));
}
let find_free = |pool: &mut SmallVec<_>| {
// Find unused image from pool
let index = pool.iter_mut().position(|i| Arc::get_mut(i).is_some())?;
Some(pool.swap_remove(index))
};
loop {
if let Some(c) = match control.try_recv() {
Ok(c) => Some(c),
Err(mpsc::TryRecvError::Empty) => None,
Err(mpsc::TryRecvError::Disconnected) => break Ok(()),
} {
match c {
Control::Pause => {
let Ok(c) = control.recv() else {
break Ok(());
};
assert_eq!(c, Control::Resume, "unexpected command {c:?}");
}
Control::Exit => break Ok(()),
Control::Resume => panic!("unexpected resume"),
}
}
log::trace!("getting camera frame");
let (frame_data, metadata) = v4l::io::traits::CaptureStream::next(&mut video_stream)?;
let frame_time = if let Some((camera_reference, reference)) = first_frame_time {
let camera_elapsed =
std::time::Duration::from(metadata.timestamp) - camera_reference;
reference + camera_elapsed
} else {
let now = std::time::Instant::now();
first_frame_time = Some((metadata.timestamp.into(), now));
now
};
log::trace!("got camera frame {:?}", frame_time);
let new_frame = find_free(&mut pool)
.map(|mut fi| {
log::trace!("Reusing frame");
let mfi = Arc::get_mut(&mut fi).unwrap();
mfi.frame.copy_from_slice(frame_data);
mfi.frame_time = frame_time;
mfi.needs_postprocess = true;
fi
})
.unwrap_or_else(|| {
log::debug!("Allocated new frame");
FrameInfo {
frame: frame_data.to_vec(),
frame_time: std::time::Instant::now(),
needs_postprocess: true,
size: UVec2::new(camera_format.width / 2, camera_format.height),
}
.into()
});
let old_frame = frame.swap(new_frame);
// splash image isn't necessarily `frame_data` sized, so don't reuse it.
// if we already have too many frames, just release the old one.
if !is_splash && pool.len() < MAX_POOL_SIZE {
pool.push(old_frame);
} else {
log::info!("Releasing frame image, is_splash {is_splash}");
}
is_splash = false;
// log::debug!("got camera frame {}", frame_data.len());
}
}
}
| yshui/xr_passthrough_layer | 6 | Rust | yshui | Yuxuan Shui | CodeWeavers | |
src/config.rs | Rust | use ed25519_dalek::Signer;
use std::{io::Write, sync::Arc};
use log::warn;
use serde::{Deserialize, Serialize};
/// Because your eye and the camera is at different physical locations, it is impossible
/// to project camera view into VR space perfectly. There are trade offs approximating
/// this projection. (viewing range means things too close to you will give you double vision).
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize, Clone, Copy, PartialOrd, Ord)]
pub enum ProjectionMode {
/// in this mode, we assume your eyes are at the cameras' physical location. this mode
/// has larger viewing range, but everything will smaller to you.
FromCamera,
/// in this mode, we assume your cameras are at your eyes' physical location. everything will
/// have the right scale in this mode, but the viewing range is smaller.
FromEye,
}
impl Default for ProjectionMode {
fn default() -> Self {
Self::FromCamera
}
}
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone, Copy, PartialOrd, Ord)]
pub enum Eye {
Left,
Right,
}
pub const fn default_display_eye() -> Eye {
Eye::Left
}
/// Index camera passthrough
#[derive(Debug, Serialize, Deserialize)]
pub struct Config {
/// camera device to use. auto detect if not set
#[serde(default)]
pub camera_device: String,
/// enable debug option, including:
/// - use trigger button to do renderdoc capture
#[serde(default)]
pub debug: bool,
}
impl Default for Config {
fn default() -> Self {
Self {
camera_device: "".to_owned(),
debug: false,
}
}
}
use anyhow::{Context, Result};
use vulkano::{
buffer::{Buffer, BufferCreateInfo, BufferUsage},
command_buffer::{
AutoCommandBufferBuilder, CommandBufferUsage, CopyBufferToImageInfo,
PrimaryCommandBufferAbstract as _, allocator::CommandBufferAllocator,
},
device::Queue,
image::{ImageCreateInfo, ImageUsage},
memory::allocator::{AllocationCreateInfo, MemoryAllocator, MemoryTypeFilter},
pipeline::cache::{PipelineCache, PipelineCacheCreateInfo, PipelineCacheData},
sync::GpuFuture as _,
};
use xdg::BaseDirectories;
use crate::utils::DeviceExt as _;
pub fn load_config(xdg: &BaseDirectories) -> Result<Config> {
if let Some(f) = xdg.find_config_file("index_camera_passthrough.toml") {
let cfg = std::fs::read_to_string(f)?;
Ok(toml::from_str(&cfg)?)
} else {
Ok(Default::default())
}
}
pub struct AutoSavingPipelineCache(Arc<PipelineCache>);
impl std::ops::Deref for AutoSavingPipelineCache {
type Target = Arc<PipelineCache>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<Arc<PipelineCache>> for AutoSavingPipelineCache {
fn from(value: Arc<PipelineCache>) -> Self {
Self(value)
}
}
impl AutoSavingPipelineCache {
fn save(&self) -> Result<()> {
let data = self.0.get_data().context("get pipeline cache data")?;
let xdg = xdg::BaseDirectories::new();
let mut f = std::fs::OpenOptions::new()
.truncate(true)
.write(true)
.create(true)
.open(
xdg.place_cache_file(std::path::Path::new("xr_passthrough").join("pipeline_cache"))
.context("create pipeline cache file")?,
)
.context("open pipeline cache file")?;
let key = ed25519_dalek::SigningKey::generate(&mut rand::rng());
let signature = key.sign(&data);
let verifying_key = key.verifying_key();
f.write_all(&verifying_key.as_bytes()[..])?;
f.write_all(&signature.to_bytes()[..])?;
f.write_all(&data)?;
Ok(())
}
}
impl Drop for AutoSavingPipelineCache {
fn drop(&mut self) {
match self.save() {
Ok(()) => (),
Err(e) => warn!("Failed to save pipeline cache {e:#}"),
}
}
}
/// Load pipeline cache from file, if file not found or fails validation, create empty
/// PipelineCache.
pub fn load_pipeline_cache(
device: &Arc<vulkano::device::Device>,
xdg: &BaseDirectories,
) -> Result<Arc<PipelineCache>> {
if let Some(data) = xdg
.find_cache_file(std::path::Path::new("xr_passthrough").join("pipeline_cache"))
.and_then(|f| std::fs::read(f).ok())
.and_then(|mut data| {
let buf = &data[..];
let verifying_key = ed25519_dalek::VerifyingKey::from_bytes(
&buf[..ed25519_dalek::PUBLIC_KEY_LENGTH].try_into().unwrap(),
)
.ok()?;
let buf = &buf[ed25519_dalek::PUBLIC_KEY_LENGTH..];
let signature = &ed25519_dalek::Signature::from_bytes(
buf[..ed25519_dalek::SIGNATURE_LENGTH].try_into().unwrap(),
);
let buf = &buf[ed25519_dalek::SIGNATURE_LENGTH..];
verifying_key.verify_strict(buf, signature).ok()?;
data.drain(..ed25519_dalek::PUBLIC_KEY_LENGTH + ed25519_dalek::SIGNATURE_LENGTH);
Some(data)
})
{
PipelineCache::new(
device.clone(),
PipelineCacheCreateInfo {
// SAFETY: we validated the signature
initial_data: Some(unsafe { PipelineCacheData::new(data) }),
..Default::default()
},
)
} else {
PipelineCache::new(device.clone(), PipelineCacheCreateInfo::default())
}
.map_err(Into::into)
}
pub fn load_splash(
device: &Arc<vulkano::device::Device>,
allocator: &Arc<dyn MemoryAllocator>,
cmdbuf_allocator: &Arc<dyn CommandBufferAllocator>,
queue: &Arc<Queue>,
data: &[u8],
) -> Result<Arc<vulkano::image::Image>> {
log::debug!("loading splash");
let img = image::load_from_memory_with_format(data, image::ImageFormat::Png)?.into_rgba8();
let extent = [img.width(), img.height()];
let img = img.into_raw();
log::debug!("splash loaded");
let vkimg = device.new_image(
&ImageCreateInfo {
format: vulkano::format::Format::R8G8B8A8_UNORM,
extent: [extent[0], extent[1], 1],
usage: ImageUsage::TRANSFER_DST | ImageUsage::TRANSFER_SRC | ImageUsage::SAMPLED,
..Default::default()
},
MemoryTypeFilter::PREFER_DEVICE,
)?;
let mut cmdbuf = AutoCommandBufferBuilder::primary(
cmdbuf_allocator.clone(),
queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)?;
let buffer = Buffer::new_unsized::<[u8]>(
allocator,
&BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
..Default::default()
},
&AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::HOST_SEQUENTIAL_WRITE
| MemoryTypeFilter::PREFER_DEVICE,
..Default::default()
},
img.len() as _,
)?;
buffer.write()?.copy_from_slice(&img);
cmdbuf.copy_buffer_to_image(CopyBufferToImageInfo::new(buffer, vkimg.clone()))?;
cmdbuf
.build()?
.execute(queue.clone())?
.then_signal_fence()
.wait(None)?;
Ok(vkimg)
}
| yshui/xr_passthrough_layer | 6 | Rust | yshui | Yuxuan Shui | CodeWeavers | |
src/events.rs | Rust | use std::time::{Duration, Instant};
pub enum Action {
None,
ShowOverlay,
HideOverlay,
}
enum InternalState {
Activated(Instant),
Refractory,
Armed,
}
pub struct State {
visible: bool,
state: InternalState,
delay: Duration,
}
impl State {
/// Whether the overlay should be visible
pub fn is_visible(&self) -> bool {
self.visible
}
pub fn new(delay: Duration) -> Self {
Self {
visible: true,
state: InternalState::Armed,
delay,
}
}
pub(crate) fn handle<Vr: crate::vrapi::Vr + ?Sized>(
&mut self,
vrsys: &Vr,
) -> Result<(), Vr::Error> {
let mut button_pressed = 0;
if vrsys.get_action_state(crate::vrapi::Action::Button1)? {
button_pressed += 1;
}
if vrsys.get_action_state(crate::vrapi::Action::Button2)? {
button_pressed += 1;
}
log::trace!("Button pressed: {}", button_pressed);
match (&self.state, button_pressed) {
(InternalState::Refractory, 0) => {
log::debug!("Refractory -> Armed");
self.state = InternalState::Armed;
}
(InternalState::Refractory, _) => (),
(InternalState::Activated(_), 0) | (InternalState::Activated(_), 1) => {
log::debug!("Activated -> Armed");
self.state = InternalState::Armed;
}
(InternalState::Activated(_), _) => (),
(InternalState::Armed, 2) => {
log::debug!("Armed -> Activated");
self.state = InternalState::Activated(Instant::now());
}
(InternalState::Armed, _) => (),
}
Ok(())
}
pub fn turn(&mut self) -> Action {
if let InternalState::Activated(start) = self.state {
if !self.visible && std::time::Instant::now() - start > self.delay {
log::debug!("Show overlay, Activated -> Refactory");
self.state = InternalState::Refractory;
self.visible = true;
Action::ShowOverlay
} else if self.visible {
log::debug!("Hide overlay, Activated -> Refactory");
self.state = InternalState::Refractory;
self.visible = false;
Action::HideOverlay
} else {
Action::None
}
} else {
Action::None
}
}
}
| yshui/xr_passthrough_layer | 6 | Rust | yshui | Yuxuan Shui | CodeWeavers | |
src/lib.rs | Rust | #![deny(rust_2018_idioms, rust_2024_compatibility, rust_2021_compatibility)]
pub mod api_layer;
pub mod camera;
pub mod config;
pub mod pipeline;
pub mod steam;
pub mod utils;
use anyhow::{Context, Result, anyhow};
/// Camera image will be (size * 2, size)
pub const CAMERA_SIZE: u32 = 960;
use glam::UVec2;
#[allow(unused_imports)]
use log::info;
pub struct FrameInfo {
pub frame: Vec<u8>,
pub frame_time: std::time::Instant,
pub size: UVec2,
pub needs_postprocess: bool,
}
pub fn find_index_camera() -> Result<std::path::PathBuf> {
let mut it = udev::Enumerator::new()?;
it.match_subsystem("video4linux")?;
it.match_property("ID_VENDOR_ID", "28de")?;
it.match_property("ID_MODEL_ID", "2400")?;
let dev = it
.scan_devices()?
.next()
.with_context(|| anyhow!("Index camera not found"))?;
let devnode = dev
.devnode()
.with_context(|| anyhow!("Index camera cannot be accessed"))?;
Ok(devnode.to_owned())
}
| yshui/xr_passthrough_layer | 6 | Rust | yshui | Yuxuan Shui | CodeWeavers | |
src/pipeline.rs | Rust | use glam::{
UVec2,
f64::{DVec2 as Vec2, DVec4 as Vec4},
};
use smallvec::smallvec;
use std::sync::Arc;
use crate::{steam::StereoCamera, utils::DeviceExt as _};
use anyhow::Result;
use log::{info, trace};
use vulkano::{
Handle, VulkanObject,
buffer::{Buffer, BufferCreateInfo, BufferUsage, Subbuffer},
command_buffer::{
AutoCommandBufferBuilder, BlitImageInfo, CommandBufferUsage, CopyBufferToImageInfo,
ImageBlit, PrimaryAutoCommandBuffer, PrimaryCommandBufferAbstract as _,
RenderPassBeginInfo, SubpassBeginInfo, SubpassContents, SubpassEndInfo,
allocator::CommandBufferAllocator,
},
descriptor_set::{DescriptorSet, WriteDescriptorSet, allocator::DescriptorSetAllocator},
device::{Device, Queue},
format::{Format, FormatFeatures},
image::{
Image as VkImage, ImageCreateInfo, ImageLayout, ImageTiling, ImageUsage,
sampler::{
Filter, Sampler, SamplerCreateInfo,
ycbcr::{
SamplerYcbcrConversion, SamplerYcbcrConversionCreateInfo,
SamplerYcbcrModelConversion,
},
},
view::{ImageView, ImageViewCreateInfo},
},
memory::allocator::{
AllocationCreateInfo, MemoryAllocatePreference, MemoryAllocator, MemoryTypeFilter,
},
padded::Padded,
pipeline::{
GraphicsPipeline, Pipeline as _, PipelineBindPoint, PipelineLayout,
PipelineShaderStageCreateInfo,
cache::PipelineCache,
graphics::{
GraphicsPipelineCreateInfo,
color_blend::ColorBlendState,
input_assembly::{InputAssemblyState, PrimitiveTopology},
rasterization::RasterizationState,
vertex_input::{self, Vertex as _, VertexDefinition},
viewport::{Viewport, ViewportState},
},
layout::PipelineDescriptorSetLayoutCreateInfo,
},
render_pass::{Framebuffer, Subpass},
shader::ShaderModule,
sync::{GpuFuture, future::FenceSignalFuture},
};
/// Lens distortion correction parameters for a side-by-side stereo image
#[derive(Debug)]
pub struct StereoUndistortParams {
/// field-of-view parameter, 0 = left eye, 1 = right eye
fov: [Vec2; 2],
scale: [Vec2; 2],
focal: [Vec2; 2],
center: [Vec2; 2],
coeff: [Vec4; 2],
size: Vec2,
}
impl StereoUndistortParams {
pub fn fov(&self) -> [Vec2; 2] {
self.fov
}
/// i.e. solving Undistort(src) = dst for the smallest non-zero root.
fn undistort_inverse(coeff: &Vec4, dst: f64) -> Option<f64> {
// solving: x * (1 + k1*x^2 + k2*x^4 + k3*x^6 + k4*x^8) - dst = 0
let f = |x: f64| {
let x2 = x * x;
x * (1.0 + x2 * (coeff[0] + x2 * (coeff[1] + x2 * (coeff[2] + x2 * coeff[3])))) - dst
};
let fp = |x: f64| {
let x2 = x * x;
1.0 + x2
* (3.0 * coeff[0]
+ x2 * (5.0 * coeff[1] + x2 * (7.0 * coeff[2] + x2 * 9.0 * coeff[3])))
};
const MAX_ITER: u32 = 100;
let mut x = 0.0;
for _ in 0..MAX_ITER {
if fp(x) == 0.0 {
// Give up
info!("Divided by zero");
return None;
}
trace!("{} {} {}", x, f(x), fp(x));
if f(x).abs() < 1e-6 {
info!("Inverse is: {}, {} {}", x, f(x), dst);
return Some(x);
}
x = x - f(x) / fp(x);
}
// Give up
info!("Cannot find scale");
None
}
// Find a scale that maps the middle point of 4 edges of the undistorted image to
// the edge of the field of view of the distorted image.
//
// Returns the scales and the adjusted fovs
fn find_scale(coeff: &Vec4, center: &Vec2, focal: &Vec2) -> (Vec2, Vec2) {
let ret = [0, 1].map(|i| {
let min_edge_dist = center[i].min(1.0 - center[i]) / focal[i];
// Find the input theta angle where Undistort(theta) = min_edge_dist
if let Some(theta) = Self::undistort_inverse(coeff, min_edge_dist) {
if theta >= std::f64::consts::PI / 2.0 {
// infinity?
(1.0, focal[i])
} else {
// Find the input coordinates that will give us that theta
let target_edge = theta.tan();
log::info!("{}", target_edge);
(target_edge / (0.5 / focal[i]), 1.0 / min_edge_dist / 2.0)
}
} else {
// Cannot find scale so just don't scale
(1.0, focal[i])
}
});
(Vec2::new(ret[0].0, ret[1].0), Vec2::new(ret[0].1, ret[1].1))
}
/// Input size is (size * 2, size)
/// returns also the adjusted FOV for left and right
///
/// # Arguments
///
/// - is_final: whether this is the final stage of the pipeline.
/// if true, the output image will be submitted to
/// the vr compositor.
pub fn new(size: UVec2, camera_calib: &StereoCamera) -> Result<Self> {
let size = size.as_dvec2();
let center = [
Vec2::new(
camera_calib.left.intrinsics.center_x / size.x,
camera_calib.left.intrinsics.center_y / size.y,
),
Vec2::new(
camera_calib.right.intrinsics.center_x / size.x,
camera_calib.right.intrinsics.center_y / size.y,
),
];
let focal = [
Vec2::new(
camera_calib.left.intrinsics.focal_x / size.x,
camera_calib.left.intrinsics.focal_y / size.y,
),
Vec2::new(
camera_calib.right.intrinsics.focal_x / size.x,
camera_calib.right.intrinsics.focal_y / size.y,
),
];
let coeff: [Vec4; 2] = [
camera_calib.left.intrinsics.distort.coeffs.into(),
camera_calib.left.intrinsics.distort.coeffs.into(),
];
let scale_fov = [0, 1].map(|i| Self::find_scale(&coeff[i], ¢er[i], &focal[i]));
Ok(Self {
fov: [scale_fov[0].1, scale_fov[1].1],
scale: [scale_fov[0].0, scale_fov[1].0],
focal,
center,
coeff,
size,
})
}
}
#[derive(vertex_input::Vertex, Default, Debug, Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)]
#[allow(non_snake_case)]
#[repr(C)]
struct Vertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
}
pub struct Pipeline {
correction: Option<StereoUndistortParams>,
capture: bool,
/// A cpu buffer for storing and uploading the input image.
input_image_buffer: Arc<Buffer>,
input_image_gpu: Arc<VkImage>,
/// The input image after post-processing (e.g. undistortion, yuv to rgb conversion)
postprocessed_image: Arc<VkImage>,
camera_config: Option<StereoCamera>,
allocator: Arc<dyn MemoryAllocator>,
cmdbuf_allocator: Arc<dyn CommandBufferAllocator>,
device: Arc<Device>,
queue: Arc<Queue>,
/// Command buffer for uploading the image to the GPU
cmdbuf: Arc<PrimaryAutoCommandBuffer>,
previous_upload_end: Option<Arc<FenceSignalFuture<Box<dyn GpuFuture + Send + Sync>>>>,
previous_frame_time: Option<std::time::Instant>,
}
impl std::fmt::Debug for Pipeline {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Pipeline")
.field("correction", &self.correction)
.field("capture", &self.capture)
.field("input_texture", &self.input_image_gpu.handle().as_raw())
.field("camera_config", &self.camera_config)
.finish_non_exhaustive()
}
}
pub trait PostprocessPipeline {
fn allocate_image(&self) -> Result<Arc<VkImage>>;
/// Create command buffers for a given set of swapchain images. If a set of command buffers
/// were created previously, they will be replaced.
fn create_command_buffers(&mut self, images: &[Arc<VkImage>], input_len: usize) -> Result<()>;
/// Postprocess the camera image, taking input from a in memory buffer.
fn postprocess(&self, input: &[u8], output: Arc<VkImage>) -> Result<Box<dyn GpuFuture>>;
// /// Postprocess the camera image, taking input from a dmabuf file descriptor.
// fn postprocess_dmabuf(&self, input: OwnedFd, output: &Self::Image) -> Result<Self::Future>;
}
impl Pipeline {
pub fn load_shader(
device: &Arc<Device>,
source_is_yuyv: bool,
has_camera_config: bool,
has_yuyv_sampler: bool,
) -> anyhow::Result<(Arc<ShaderModule>, Arc<ShaderModule>)> {
let vs = vs::load(device.clone())?;
let fs = match (source_is_yuyv && !has_yuyv_sampler, has_camera_config) {
(true, true) => fs::yuyv_undistort::load(device.clone())?,
(true, false) => fs::yuyv::load(device.clone())?,
(false, true) => fs::undistort::load(device.clone())?,
(false, false) => fs::unprocessed::load(device.clone())?,
};
Ok((vs, fs))
}
/// Create post-processing stages
/// The camera image is two `size` images stitched together side-by-side.
#[allow(clippy::too_many_arguments)]
pub fn new(
device: &Arc<Device>,
allocator: &Arc<dyn MemoryAllocator>,
cmdbuf_allocator: &Arc<dyn CommandBufferAllocator>,
queue: &Arc<Queue>,
descriptor_set_allocator: &Arc<dyn DescriptorSetAllocator>,
source_is_yuyv: bool,
camera_config: Option<&StereoCamera>,
final_layout: ImageLayout,
output_usage: ImageUsage,
pipeline_cache: Arc<PipelineCache>,
camera_size: UVec2,
render_size: UVec2,
) -> Result<Self> {
let exts = device.enabled_extensions();
let feats = device.enabled_features();
let has_yuyv_sampler =
if exts.khr_sampler_ycbcr_conversion && feats.sampler_ycbcr_conversion {
let format_feats = device
.physical_device()
.format_properties(Format::G8B8G8R8_422_UNORM)?
.format_features(ImageTiling::Optimal, &[]);
format_feats.contains(FormatFeatures::MIDPOINT_CHROMA_SAMPLES)
} else {
false
};
let format = if has_yuyv_sampler && source_is_yuyv {
Format::G8B8G8R8_422_UNORM
} else {
Format::R8G8B8A8_UNORM
};
let (vs, fs) = Self::load_shader(
&device,
source_is_yuyv,
camera_config.is_some(),
has_yuyv_sampler,
)?;
let vs_main = vs.entry_point("main").unwrap();
let fs_main = fs.entry_point("main").unwrap();
// Allocate intermediate textures
let input_texture = device.new_image(
&ImageCreateInfo {
extent: if source_is_yuyv && !has_yuyv_sampler {
// Source is raw, unconverted yuyv, therefore is downsampled 2x in the X
// direction.
[camera_size.x, camera_size.y, 1]
} else {
[camera_size.x * 2, camera_size.y, 1]
},
format,
usage: ImageUsage::TRANSFER_DST
| ImageUsage::TRANSFER_SRC
| ImageUsage::SAMPLED
| ImageUsage::COLOR_ATTACHMENT,
..Default::default()
},
MemoryTypeFilter::PREFER_DEVICE,
)?;
let postprocessed_image = device.new_image(
&ImageCreateInfo {
extent: [render_size.x * 2, render_size.y, 1],
format: Format::R8G8B8A8_UNORM,
usage: ImageUsage::COLOR_ATTACHMENT | ImageUsage::TRANSFER_DST | output_usage,
..Default::default()
},
MemoryTypeFilter::PREFER_DEVICE,
)?;
let cpu_buffer = device.new_buffer(
&BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
size: camera_size.x as u64
* camera_size.y as u64
* 2
* if source_is_yuyv { 2 } else { 4 },
..Default::default()
},
MemoryTypeFilter::HOST_SEQUENTIAL_WRITE | MemoryTypeFilter::PREFER_DEVICE,
)?;
let render_pass = vulkano::single_pass_renderpass!(device.clone(),
attachments: {
color: {
format: vulkano::format::Format::R8G8B8A8_UNORM,
samples: 1,
load_op: Load,
store_op: Store,
final_layout: final_layout,
}
},
pass: {
color: [color],
depth_stencil: {},
})
.unwrap();
let correction = camera_config
.map(|c| StereoUndistortParams::new(camera_size, &c))
.transpose()?;
log::debug!("correction fov: {:?}", correction.as_ref().map(|x| x.fov()));
let fov = correction
.as_ref()
.map(|c| c.fov())
.unwrap_or([Vec2::new(1.19, 1.19); 2]); // default to roughly 100 degrees fov, hopefully this is sensible
let stages = smallvec![
PipelineShaderStageCreateInfo::new(vs_main.clone()),
PipelineShaderStageCreateInfo::new(fs_main),
];
let layout = PipelineLayout::new(
device.clone(),
PipelineDescriptorSetLayoutCreateInfo::from_stages(&stages)
.into_pipeline_layout_create_info(device.clone())?,
)?;
let sampler = Sampler::new(
device,
&SamplerCreateInfo {
min_filter: Filter::Linear,
mag_filter: Filter::Linear,
sampler_ycbcr_conversion: has_yuyv_sampler
.then(|| {
SamplerYcbcrConversion::new(
device,
&SamplerYcbcrConversionCreateInfo {
format,
ycbcr_model: SamplerYcbcrModelConversion::Ycbcr709,
..Default::default()
},
)
})
.transpose()?
.as_ref(),
..Default::default()
},
)?;
let distortion_params = correction
.as_ref()
.map(|c| {
Buffer::from_data(
allocator,
&BufferCreateInfo {
usage: BufferUsage::UNIFORM_BUFFER,
..Default::default()
},
&AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::HOST_SEQUENTIAL_WRITE
| MemoryTypeFilter::PREFER_DEVICE,
allocate_preference: MemoryAllocatePreference::Unknown,
..Default::default()
},
fs::yuyv_undistort::DistortionParameters {
center: c.center.map(|v| Padded(*v.as_vec2().as_ref())),
dcoef: c.coeff.map(|v| *v.as_vec4().as_ref()),
focal: c.focal.map(|v| Padded(*v.as_vec2().as_ref())),
scale: c.scale.map(|v| Padded(*v.as_vec2().as_ref())),
},
)
.map_err(anyhow::Error::from)
})
.transpose()?;
let pipeline = GraphicsPipeline::new(
device.clone(),
Some(pipeline_cache),
GraphicsPipelineCreateInfo {
vertex_input_state: Some(Vertex::per_vertex().definition(&vs_main)?),
stages,
input_assembly_state: Some(InputAssemblyState {
topology: PrimitiveTopology::TriangleFan,
..Default::default()
}),
viewport_state: Some(ViewportState {
viewports: smallvec![Viewport {
offset: [0., 0.],
extent: [(render_size.x * 2) as _, render_size.y as _],
..Default::default()
}],
..Default::default()
}),
subpass: Some(Subpass::from(render_pass.clone(), 0).unwrap().into()),
multisample_state: Some(Default::default()),
color_blend_state: Some(ColorBlendState::with_attachment_states(
1,
Default::default(),
)),
rasterization_state: Some(RasterizationState::default()),
..GraphicsPipelineCreateInfo::new(layout)
},
)?;
let desc_set_writes = [WriteDescriptorSet::image_view_sampler(
1,
ImageView::new(
&input_texture,
&ImageViewCreateInfo::from_image(&input_texture),
)?,
sampler.clone(),
)]
.into_iter()
.chain(
distortion_params
.clone()
.map(|b| WriteDescriptorSet::buffer(2, b)),
);
let vertices = Buffer::from_iter::<Vertex, _>(
allocator,
&BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
&AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::HOST_SEQUENTIAL_WRITE
| MemoryTypeFilter::PREFER_DEVICE,
allocate_preference: MemoryAllocatePreference::Unknown,
..Default::default()
},
[
// Left eye quad
Vertex {
position: [-1.0, -1.0],
},
Vertex {
position: [-1.0, 1.0],
},
Vertex {
position: [0.0, 1.0],
},
Vertex {
position: [0.0, -1.0],
},
]
.iter()
.cloned(),
)
.unwrap();
let desc_set = DescriptorSet::new(
descriptor_set_allocator.clone(),
pipeline.layout().set_layouts().first().unwrap().clone(),
desc_set_writes,
None,
)?;
let buffer = Subbuffer::new(cpu_buffer.clone());
let ivci = ImageViewCreateInfo::from_image(&postprocessed_image);
let framebuffer = Framebuffer::new(
render_pass.clone(),
vulkano::render_pass::FramebufferCreateInfo {
attachments: vec![ImageView::new(&postprocessed_image, &ivci)?],
..Default::default()
},
)?;
let mut cmdbuf = AutoCommandBufferBuilder::primary(
cmdbuf_allocator.clone(),
queue.queue_family_index(),
CommandBufferUsage::MultipleSubmit,
)?;
cmdbuf
.copy_buffer_to_image(CopyBufferToImageInfo::new(buffer, input_texture.clone()))?
.begin_render_pass(
RenderPassBeginInfo {
clear_values: vec![None],
..RenderPassBeginInfo::framebuffer(framebuffer.clone())
},
SubpassBeginInfo {
contents: SubpassContents::Inline,
..Default::default()
},
)?
.bind_pipeline_graphics(pipeline.clone())?
.bind_descriptor_sets(
PipelineBindPoint::Graphics,
pipeline.layout().clone(),
0,
desc_set.clone(),
)?
.bind_vertex_buffers(0, vertices.clone())?;
unsafe { cmdbuf.draw(vertices.len() as u32, 2, 0, 0)? }
.end_render_pass(SubpassEndInfo::default())?;
let cmdbuf = cmdbuf.build()?;
log::info!("Adjusted FOV: {:?}", fov);
Ok(Self {
correction,
capture: false,
camera_config: camera_config.copied(),
input_image_buffer: cpu_buffer,
input_image_gpu: input_texture,
postprocessed_image,
previous_upload_end: None,
previous_frame_time: None,
device: device.clone(),
allocator: allocator.clone(),
cmdbuf,
cmdbuf_allocator: cmdbuf_allocator.clone(),
queue: queue.clone(),
})
}
pub fn fov(&self) -> [Vec2; 2] {
self.correction
.as_ref()
.map(|c| c.fov())
.unwrap_or([Vec2::new(1.19, 1.19); 2])
}
/// Run the pipeline
///
/// # Arguments
///
/// - time: Time offset into the past when the camera frame is captured
pub fn maybe_postprocess(&mut self, frame: &crate::FrameInfo) -> Result<()> {
if Some(frame.frame_time) == self.previous_frame_time {
return Ok(());
}
let mut previous_fut = self.previous_upload_end.take();
if let Some(f) = &mut previous_fut {
f.wait(None)?;
f.cleanup_finished();
}
self.previous_upload_end = Some(Arc::new(if frame.needs_postprocess {
{
let buffer = Subbuffer::new(self.input_image_buffer.clone());
buffer.write()?.copy_from_slice(&frame.frame);
}
if let Some(f) = previous_fut {
f.then_execute(self.queue.clone(), self.cmdbuf.clone())?
.boxed_send_sync()
.then_signal_fence_and_flush()?
} else {
self.cmdbuf
.clone()
.execute(self.queue.clone())?
.boxed_send_sync()
.then_signal_fence_and_flush()?
}
} else {
assert_eq!(
frame.frame.len(),
frame.size.x as usize * 2 * frame.size.y as usize * 4
);
let buffer = Buffer::new_slice::<u8>(
&self.allocator,
&BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
..Default::default()
},
&AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::HOST_SEQUENTIAL_WRITE
| MemoryTypeFilter::PREFER_DEVICE,
allocate_preference: MemoryAllocatePreference::Unknown,
..Default::default()
},
frame.size.x as u64 * 2 * frame.size.y as u64 * 4,
)?;
buffer.write()?.copy_from_slice(&frame.frame);
let vkimg = self.device.new_image(
&ImageCreateInfo {
format: vulkano::format::Format::R8G8B8A8_UNORM,
extent: [frame.size.x * 2, frame.size.y, 1],
usage: ImageUsage::TRANSFER_DST
| ImageUsage::TRANSFER_SRC
| ImageUsage::SAMPLED,
..Default::default()
},
MemoryTypeFilter::PREFER_DEVICE,
)?;
let mut cmdbuf = AutoCommandBufferBuilder::primary(
self.cmdbuf_allocator.clone(),
self.queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)?;
cmdbuf
.copy_buffer_to_image(CopyBufferToImageInfo::new(buffer, vkimg.clone()))?
.blit_image(BlitImageInfo {
src_image_layout: ImageLayout::TransferSrcOptimal,
dst_image_layout: ImageLayout::TransferDstOptimal,
filter: Filter::Linear,
regions: smallvec![ImageBlit {
src_subresource: vkimg.subresource_layers(),
dst_subresource: self.postprocessed_image.subresource_layers(),
src_offsets: [[0, 0, 0], vkimg.extent()],
dst_offsets: [[0, 0, 0], self.postprocessed_image.extent()],
..Default::default()
}],
..BlitImageInfo::new(vkimg, self.postprocessed_image.clone())
})?;
cmdbuf
.build()?
.execute(self.queue.clone())?
.boxed_send_sync()
.then_signal_fence_and_flush()?
}));
Ok(())
}
/// Return the post-processed image and the fence signal future to wait on for the image to be
/// ready.
///
/// # Panic
///
/// panics if `maybe_postprocess` was not called before this function.
pub fn image(
&self,
) -> (
Arc<VkImage>,
Arc<FenceSignalFuture<Box<dyn GpuFuture + Send + Sync>>>,
) {
(
self.postprocessed_image.clone(),
self.previous_upload_end.clone().unwrap(),
)
}
pub fn image_extent(&self) -> [u32; 3] {
self.postprocessed_image.extent()
}
}
mod fs {
pub mod yuyv_undistort {
vulkano_shaders::shader! {
ty: "fragment",
path: "shaders/combined.frag",
define: [
("INPUT_IS_YUYV", "1"),
("UNDISTORT", "1"),
],
custom_derives: [Copy, Clone, Debug],
}
}
pub mod undistort {
vulkano_shaders::shader! {
ty: "fragment",
path: "shaders/combined.frag",
define: [
("UNDISTORT", "1"),
],
custom_derives: [Copy, Clone, Debug],
}
}
pub mod yuyv {
vulkano_shaders::shader! {
ty: "fragment",
path: "shaders/combined.frag",
define: [
("INPUT_IS_YUYV", "1"),
],
custom_derives: [Copy, Clone, Debug],
}
}
pub mod unprocessed {
vulkano_shaders::shader! {
ty: "fragment",
path: "shaders/combined.frag",
custom_derives: [Copy, Clone, Debug],
}
}
}
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "#version 450
layout(location = 0) in vec2 position;
layout(location = 0) out flat uint instanceId;
layout(location = 1) out vec2 eyeRelativeCoord;
void main() {
gl_Position = vec4(position, 0, 1) + vec4(1.0, 0.0, 0.0, 0.0) * float(gl_InstanceIndex);
instanceId = gl_InstanceIndex;
eyeRelativeCoord = position * vec2(2.0, 1.0) + vec2(1.0, 0);
}"
}
}
| yshui/xr_passthrough_layer | 6 | Rust | yshui | Yuxuan Shui | CodeWeavers | |
src/steam.rs | Rust | use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
pub struct Extrinsics {
/// Offset of the camera from Hmd
pub position: [f64; 3],
}
#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
pub struct Distort {
pub coeffs: [f64; 4],
}
#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
pub struct Intrinsics {
/// Optical center X
pub center_x: f64,
/// Optical center Y
pub center_y: f64,
/// X focal length in device pixels
pub focal_x: f64,
/// Y focal length in device pixels
pub focal_y: f64,
/// Height of the camera output in pixels
pub height: f64,
/// Width of the camera output in pixels
pub width: f64,
pub distort: Distort,
}
#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Copy, Debug)]
#[serde(rename_all = "lowercase")]
pub enum Camera {
Left,
Right,
}
#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
pub struct TrackedCamera {
pub extrinsics: Extrinsics,
pub intrinsics: Intrinsics,
pub name: Camera,
}
#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
pub struct StereoCamera {
pub left: TrackedCamera,
pub right: TrackedCamera,
}
/// Extract relevant bits of information from steam config files
#[derive(Serialize, Deserialize)]
pub struct LighthouseConfig {
pub tracked_cameras: Vec<TrackedCamera>,
}
use anyhow::{Context, Result, anyhow};
/// Try to find the config file for index
pub fn find_steam_config() -> Option<StereoCamera> {
let xdg = xdg::BaseDirectories::new();
log::debug!("Base directories: {:?}", xdg);
let steam = xdg
.find_data_file("steam")
.or_else(|| xdg.find_data_file("Steam"))?;
log::debug!("Steam directory: {:?}", steam);
let steam_config = steam.join("config").join("lighthouse");
log::debug!("Enumerating steam config dir {:?}", steam_config);
let mut files = steam_config.read_dir().ok()?;
files.find_map(|dir| {
log::debug!("Trying to find config in {:?}", dir);
let dir = dir.ok()?;
let config = dir.path().join("config.json");
log::debug!("Trying to config from {:?}", config);
let json = std::fs::read_to_string(config).ok()?;
log::debug!("Trying to parse config");
let lhconfig: LighthouseConfig = serde_json::from_str(&json).ok()?;
log::debug!("Trying to find left camera");
let left = lhconfig
.tracked_cameras
.iter()
.copied()
.find(|p| p.name == Camera::Left)?;
log::debug!("Trying to find right camera");
let right = lhconfig
.tracked_cameras
.iter()
.copied()
.find(|p| p.name == Camera::Right)?;
Some(StereoCamera { left, right })
})
}
pub fn load_steam_config(hmd_serial: &str) -> Result<StereoCamera> {
let xdg = xdg::BaseDirectories::new();
let steam = xdg
.find_data_file("steam")
.or_else(|| xdg.find_data_file("Steam"))
.with_context(|| anyhow!("Cannot find steam directory"))?;
let lhconfig = std::fs::read_to_string(
steam
.join("config")
.join("lighthouse")
.join(hmd_serial.to_lowercase())
.join("config.json"),
)?;
let lhconfig: LighthouseConfig = serde_json::from_str(&lhconfig)?;
let left = *lhconfig
.tracked_cameras
.iter()
.find(|p| p.name == Camera::Left)
.with_context(|| anyhow!("No left camera found"))?;
let right = *lhconfig
.tracked_cameras
.iter()
.find(|p| p.name == Camera::Right)
.with_context(|| anyhow!("No right camera found"))?;
Ok(StereoCamera { left, right })
}
| yshui/xr_passthrough_layer | 6 | Rust | yshui | Yuxuan Shui | CodeWeavers | |
src/utils.rs | Rust | use std::sync::Arc;
use vulkano::{
Validated,
buffer::{AllocateBufferError, Buffer, BufferCreateInfo, RawBuffer},
device::Device,
image::{AllocateImageError, Image, ImageCreateFlags, ImageCreateInfo, sys::RawImage},
memory::{
DedicatedAllocation, DeviceMemory, MemoryAllocateInfo, MemoryMapInfo, MemoryPropertyFlags,
MemoryRequirements, ResourceMemory,
allocator::{
FreeListAllocator, GenericMemoryAllocator, GenericMemoryAllocatorCreateInfo,
MemoryAllocator, MemoryAllocatorError, MemoryTypeFilter,
},
},
};
pub trait DeviceExt {
type HostToDeviceAllocator: MemoryAllocator;
fn new_image(
self: &Arc<Self>,
create_info: &ImageCreateInfo<'_>,
filter: MemoryTypeFilter,
) -> Result<Arc<Image>, Validated<AllocateImageError>>;
fn new_buffer(
self: &Arc<Self>,
create_info: &BufferCreateInfo<'_>,
filter: MemoryTypeFilter,
) -> Result<Arc<Buffer>, Validated<AllocateBufferError>>;
/// An allocator used to allocate a small amount of memory intended for host-to-device upload,
/// e.g. small vertex buffers, uniform buffers, etc.
fn host_to_device_allocator(self: &Arc<Self>) -> Self::HostToDeviceAllocator;
}
fn dedicated_allocation_memory_requirements(
dedicate_allocation: DedicatedAllocation<'_>,
) -> &'_ MemoryRequirements {
match dedicate_allocation {
DedicatedAllocation::Buffer(buffer) => buffer.memory_requirements(),
DedicatedAllocation::Image(image) => &image.memory_requirements()[0],
}
}
fn find_memory_type_index(
device: Arc<Device>,
memory_type_bits: u32,
filter: MemoryTypeFilter,
) -> Option<u32> {
let memory_properties = device.physical_device().memory_properties();
let MemoryTypeFilter {
required_flags,
preferred_flags,
not_preferred_flags,
} = filter;
memory_properties
.memory_types
.iter()
.enumerate()
.filter(|&(index, memory_type)| {
(memory_type_bits & (1 << index)) != 0
&& memory_type.property_flags.contains(required_flags)
})
.min_by_key(|&(_, memory_type)| {
(preferred_flags - memory_type.property_flags).count()
+ (memory_type.property_flags & not_preferred_flags).count()
})
.map(|(index, _)| index as u32)
}
fn allocate_dedicated(
device: &Arc<Device>,
dedicate_allocation: DedicatedAllocation<'_>,
filter: MemoryTypeFilter,
should_map: bool,
) -> Result<ResourceMemory, MemoryAllocatorError> {
let memory_requirements = dedicated_allocation_memory_requirements(dedicate_allocation);
let memory_type_index =
find_memory_type_index(device.clone(), memory_requirements.memory_type_bits, filter)
.ok_or(MemoryAllocatorError::FindMemoryType)?;
let mut device_memory = DeviceMemory::allocate(
device,
&MemoryAllocateInfo {
allocation_size: memory_requirements.layout.size(),
dedicated_allocation: Some(dedicate_allocation),
memory_type_index,
..Default::default()
},
)
.map_err(MemoryAllocatorError::AllocateDeviceMemory)?;
if should_map {
device_memory
.map(&MemoryMapInfo {
offset: 0,
size: device_memory.allocation_size(),
..Default::default()
})
.map_err(MemoryAllocatorError::AllocateDeviceMemory)?;
}
Ok(ResourceMemory::new_dedicated(device_memory))
}
impl DeviceExt for Device {
type HostToDeviceAllocator = GenericMemoryAllocator<FreeListAllocator>;
fn new_image(
self: &Arc<Self>,
create_info: &ImageCreateInfo<'_>,
filter: MemoryTypeFilter,
) -> Result<Arc<Image>, Validated<AllocateImageError>> {
assert!(!create_info.flags.intersects(ImageCreateFlags::DISJOINT));
let raw_image =
RawImage::new(self, create_info).map_err(|x| x.map(AllocateImageError::CreateImage))?;
let resource_memory =
allocate_dedicated(self, DedicatedAllocation::Image(&raw_image), filter, false)
.map_err(|x| Validated::Error(AllocateImageError::AllocateMemory(x)))?;
raw_image
.bind_memory(Some(resource_memory))
.map_err(|(x, _, _)| x.map(AllocateImageError::BindMemory))
.map(Arc::new)
}
fn new_buffer(
self: &Arc<Self>,
create_info: &BufferCreateInfo<'_>,
filter: MemoryTypeFilter,
) -> Result<Arc<Buffer>, Validated<AllocateBufferError>> {
let buffer = RawBuffer::new(self, create_info)
.map_err(|x| x.map(AllocateBufferError::CreateBuffer))?;
let resource_memory =
allocate_dedicated(self, DedicatedAllocation::Buffer(&buffer), filter, true)
.map_err(|x| Validated::Error(AllocateBufferError::AllocateMemory(x)))?;
buffer
.bind_memory(resource_memory)
.map_err(|(x, _, _)| x.map(AllocateBufferError::BindMemory))
.map(Arc::new)
}
fn host_to_device_allocator(self: &Arc<Self>) -> Self::HostToDeviceAllocator {
// Find a memory type suitable for host-to-device upload.
let block_sizes: Vec<_> = self
.physical_device()
.memory_properties()
.memory_types
.iter()
.map(|memory_type| {
if memory_type
.property_flags
.contains(MemoryPropertyFlags::HOST_VISIBLE)
&& !memory_type.property_flags.intersects(
MemoryPropertyFlags::DEVICE_COHERENT | MemoryPropertyFlags::RDMA_CAPABLE,
)
{
1024 * 1024
} else {
0
}
})
.collect();
let memory_type_bits = block_sizes
.iter()
.enumerate()
.map(|(index, &size)| if size != 0 { 1 << index } else { 0 })
.sum();
log::debug!(
"host_to_device_allocator: block_sizes={block_sizes:?}, memory_type_bits={memory_type_bits:#b}"
);
GenericMemoryAllocator::new(
self,
&GenericMemoryAllocatorCreateInfo {
block_sizes: &block_sizes,
memory_type_bits,
..Default::default()
},
)
}
}
| yshui/xr_passthrough_layer | 6 | Rust | yshui | Yuxuan Shui | CodeWeavers | |
xrhelper/src/lib.rs | Rust | use anyhow::{Context, Result, anyhow};
use glam::UVec2;
use itertools::Itertools;
use nalgebra::{Affine3, Matrix3, UnitQuaternion};
use openxr::{
ApplicationInfo, FrameStream, FrameWaiter, ReferenceSpaceType, ViewConfigurationType,
sys::Handle as _,
};
use std::{
collections::HashSet,
sync::{Arc, OnceLock},
};
use vulkano::{
Handle as _, VulkanObject,
device::{Device, DeviceExtensions, DeviceFeatures, Queue, QueueFlags},
image::Image,
instance::{Instance, InstanceExtensions as VkInstanceExtensions},
};
use vulkano::{
device::QueueCreateInfo,
image::{ImageCreateInfo, ImageUsage},
};
static VULKAN_LIBRARY: OnceLock<Arc<vulkano::VulkanLibrary>> = OnceLock::new();
fn get_vulkan_library() -> &'static Arc<vulkano::VulkanLibrary> {
VULKAN_LIBRARY.get_or_init(|| vulkano::VulkanLibrary::new().unwrap())
}
struct VulkanKeepAlive {
_instance: Arc<Instance>,
_device: Arc<Device>,
_queue: Arc<Queue>,
}
pub struct OpenXr {
device: Arc<Device>,
queue: Arc<Queue>,
vk_instance: Arc<Instance>,
space: openxr::Space,
swapchain_images: Vec<Arc<Image>>,
swapchain: openxr::Swapchain<openxr::Vulkan>,
depth_swapchain_images: Option<Vec<Arc<Image>>>,
depth_swapchain: Option<openxr::Swapchain<openxr::Vulkan>>,
session: openxr::Session<openxr::Vulkan>,
instance: openxr::Instance,
/// Render size of a single eye.
render_size: UVec2,
}
pub fn affine_to_posef(t: Affine3<f32>) -> openxr::Posef {
let m = t.to_homogeneous();
let r: Matrix3<f32> = m.fixed_columns::<3>(0).fixed_rows::<3>(0).into();
let rotation = nalgebra::geometry::Rotation3::from_matrix(&r);
let quaternion = UnitQuaternion::from_rotation_matrix(&rotation);
let quaternion = &quaternion.as_ref().coords;
let translation: nalgebra::Vector3<f32> =
[m.data.0[3][0], m.data.0[3][1], m.data.0[3][2]].into();
openxr::Posef {
orientation: openxr::Quaternionf {
x: quaternion.x,
y: quaternion.y,
z: quaternion.z,
w: quaternion.w,
},
position: openxr::Vector3f {
x: translation.x,
y: translation.y,
z: translation.z,
},
}
}
pub fn posef_to_nalgebra(posef: openxr::Posef) -> (UnitQuaternion<f32>, nalgebra::Vector3<f32>) {
let quaternion = UnitQuaternion::new_normalize(nalgebra::Quaternion::new(
posef.orientation.w,
posef.orientation.x,
posef.orientation.y,
posef.orientation.z,
));
let translation: nalgebra::Vector3<f32> =
[posef.position.x, posef.position.y, posef.position.z].into();
(quaternion, translation)
}
pub struct RenderInfo<'a> {
pub session: &'a openxr::Session<openxr::Vulkan>,
pub swapchain: &'a mut openxr::Swapchain<openxr::Vulkan>,
pub swapchain_images: &'a Vec<Arc<Image>>,
pub depth_swapchain: Option<&'a mut openxr::Swapchain<openxr::Vulkan>>,
pub depth_swapchain_images: Option<&'a Vec<Arc<Image>>>,
pub space: &'a openxr::Space,
pub render_size: UVec2,
}
impl OpenXr {
fn create_vk_device(
xr_instance: &openxr::Instance,
xr_system: openxr::SystemId,
instance: &Arc<Instance>,
) -> Result<(Arc<Device>, Arc<Queue>)> {
let vk_requirements = xr_instance.graphics_requirements::<openxr::Vulkan>(xr_system)?;
let physical_device = unsafe {
let physical_device =
xr_instance.vulkan_graphics_device(xr_system, instance.handle().as_raw() as _)?;
vulkano::device::physical::PhysicalDevice::from_handle(
instance,
ash::vk::PhysicalDevice::from_raw(physical_device as _),
)
}?;
let min_version = vulkano::Version::major_minor(
vk_requirements.min_api_version_supported.major() as u32,
vk_requirements.min_api_version_supported.minor() as u32,
);
if physical_device.api_version() < min_version {
return Err(anyhow!(
"Vulkan API version not supported {}",
physical_device.api_version(),
));
}
let ext = DeviceExtensions {
khr_swapchain: true,
khr_multiview: true,
..Default::default()
};
let raw_extensions = ext
.into_iter()
.filter(|&(_, enabled)| enabled)
.map(|(name, _)| std::ffi::CString::new(name).unwrap())
.collect::<Vec<_>>();
let raw_extensions = raw_extensions
.iter()
.map(|s| s.as_ptr())
.collect::<Vec<_>>();
let queue_family = physical_device
.queue_family_properties()
.iter()
.position(|qf| qf.queue_flags.contains(QueueFlags::GRAPHICS))
.context("No graphics queue found")?;
log::debug!("queue family: {queue_family}");
let queue_create_info = ash::vk::DeviceQueueCreateInfo::default()
.queue_family_index(queue_family as u32)
.queue_priorities(std::slice::from_ref(&1.0));
let mut multiview_features =
ash::vk::PhysicalDeviceMultiviewFeatures::default().multiview(true);
let features = ash::vk::PhysicalDeviceFeatures2 {
p_next: &mut multiview_features as *mut _ as _,
..Default::default()
};
let mut create_info = ash::vk::DeviceCreateInfo::default()
.enabled_extension_names(&raw_extensions)
.queue_create_infos(std::slice::from_ref(&queue_create_info));
create_info.p_next = &features as *const _ as _;
let vulkano_create_info = vulkano::device::DeviceCreateInfo {
queue_create_infos: &[QueueCreateInfo {
queue_family_index: queue_family as u32,
queues: &[1.0],
..Default::default()
}],
enabled_extensions: &ext,
enabled_features: &DeviceFeatures {
multiview: true,
..Default::default()
},
physical_devices: &[&physical_device],
..Default::default()
};
let (device, mut queues) = unsafe {
vulkano::device::Device::from_handle(
&physical_device,
ash::vk::Device::from_raw(
xr_instance
.create_vulkan_device(
xr_system,
get_instance_proc_addr,
physical_device.handle().as_raw() as _,
(&create_info) as *const _ as _,
)?
.map_err(ash::vk::Result::from_raw)? as _,
),
&vulkano_create_info,
)
};
Ok((device, queues.next().unwrap()))
}
fn create_vk_instance(
mut vk_instance_extensions: VkInstanceExtensions,
xr_instance: &openxr::Instance,
xr_system: openxr::SystemId,
) -> Result<Arc<Instance>> {
let vk_requirements = xr_instance.graphics_requirements::<openxr::Vulkan>(xr_system)?;
log::info!("Vulkan requirements: {vk_requirements:?}");
let extensions = *get_vulkan_library().supported_extensions();
vk_instance_extensions.khr_surface = true;
if let Some(unsupported) = vk_instance_extensions
.difference(&extensions)
.into_iter()
.find_map(|(name, enabled)| enabled.then_some(name))
{
return Err(anyhow!(
"Required instance extension {unsupported} not supported"
));
}
let vk_version = vulkano::Version::major_minor(
vk_requirements.max_api_version_supported.major() as u32,
vk_requirements.max_api_version_supported.minor() as u32,
);
let vk_version = vk_version.min(get_vulkan_library().api_version());
let vulkano_create_info = vulkano::instance::InstanceCreateInfo {
max_api_version: Some(vk_version),
enabled_extensions: &vk_instance_extensions,
enabled_layers: &[
"VK_LAYER_KHRONOS_validation",
//"VK_LAYER_LUNARG_api_dump".to_owned(),
//"VK_LAYER_LUNARG_gfxreconstruct".to_owned(),
],
..Default::default()
};
let extensions = vk_instance_extensions
.into_iter()
.filter(|(_, enabled)| *enabled)
.map(|(ext, _)| std::ffi::CString::new(ext).unwrap())
.collect::<Vec<_>>();
let extensions = extensions
.iter()
.map(|s| s.as_c_str().as_ptr())
.collect::<Vec<_>>();
let application_info =
ash::vk::ApplicationInfo::default().api_version(vk_version.try_into().unwrap());
let mut buf = [0u8; 1024];
let mut buflen = 0;
unsafe {
(xr_instance
.exts()
.khr_vulkan_enable
.unwrap()
.get_vulkan_instance_extensions)(
xr_instance.as_raw(),
xr_system,
buf.len() as _,
&mut buflen,
buf.as_mut_ptr() as _,
)
};
println!(
"{:?}",
std::ffi::CStr::from_bytes_until_nul(&buf).unwrap().to_str()
);
let instance = unsafe {
xr_instance.create_vulkan_instance(
xr_system,
get_instance_proc_addr,
(&ash::vk::InstanceCreateInfo::default()
.enabled_extension_names(&extensions)
.application_info(&application_info)
.enabled_layer_names(&[
c"VK_LAYER_KHRONOS_validation".as_ptr(),
//c"VK_LAYER_LUNARG_api_dump".as_ptr(),
//c"VK_LAYER_LUNARG_gfxreconstruct".as_ptr(),
])) as *const _ as _,
)?
}
.map_err(ash::vk::Result::from_raw)?;
let instance = ash::vk::Instance::from_raw(instance as _);
Ok(unsafe { Instance::from_handle(get_vulkan_library(), instance, &vulkano_create_info) })
}
/// render_size: Resolution of the swapchain image for a *single* eye.
pub fn new(
vk_instance_extensions: VkInstanceExtensions,
xr_instance_extensions: &openxr::ExtensionSet,
api_layers: &[&str],
app_name: &str,
app_version: u32,
) -> Result<(Self, FrameWaiter, FrameStream<openxr::Vulkan>)> {
let entry = unsafe { openxr::Entry::load()? };
let mut xr_instance_extensions = xr_instance_extensions.clone();
xr_instance_extensions.khr_vulkan_enable = true;
xr_instance_extensions.khr_vulkan_enable2 = true;
xr_instance_extensions.khr_convert_timespec_time = true;
let instance = entry.create_instance(
&ApplicationInfo {
application_name: app_name,
application_version: app_version,
api_version: openxr::Version::new(1, 0, 0),
engine_name: "engine",
engine_version: 0,
},
&xr_instance_extensions,
api_layers,
)?;
let system = instance.system(openxr::FormFactor::HEAD_MOUNTED_DISPLAY)?;
let blend_modes = instance.enumerate_environment_blend_modes(
system,
openxr::ViewConfigurationType::PRIMARY_STEREO,
)?;
log::info!("{:?}", blend_modes);
if !blend_modes.contains(&openxr::EnvironmentBlendMode::OPAQUE) {
return Err(anyhow!("OpenXR runtime doesn't support opaque blend mode"));
}
let vk_instance = Self::create_vk_instance(vk_instance_extensions, &instance, system)?;
let (device, queue) = Self::create_vk_device(&instance, system, &vk_instance)?;
let binding = openxr::sys::GraphicsBindingVulkanKHR {
ty: openxr::sys::GraphicsBindingVulkanKHR::TYPE,
next: std::ptr::null(),
instance: vk_instance.handle().as_raw() as _,
physical_device: device.physical_device().handle().as_raw() as _,
device: device.handle().as_raw() as _,
queue_family_index: queue.queue_family_index(),
queue_index: queue.queue_index(),
};
let info = openxr::sys::SessionCreateInfo {
ty: openxr::sys::SessionCreateInfo::TYPE,
next: &binding as *const _ as *const _,
create_flags: Default::default(),
system_id: system,
};
let mut out = openxr::sys::Session::NULL;
let ret = unsafe { (instance.fp().create_session)(instance.as_raw(), &info, &mut out) };
if ret.into_raw() < 0 {
log::warn!("Cannot create session {ret}");
return Err(ret.into());
}
let cfgs = instance
.enumerate_view_configuration_views(system, ViewConfigurationType::PRIMARY_STEREO)?;
if cfgs.len() != 2 {
return Err(anyhow!("Stereo view has unexpected number of configs"));
}
if cfgs[0].recommended_image_rect_height != cfgs[1].recommended_image_rect_height
|| cfgs[0].recommended_image_rect_width != cfgs[1].recommended_image_rect_width
{
log::warn!("Stereo view has different recommended image rect sizes");
}
log::info!(
"Recommended image rect sizes: {:?} {:?}",
cfgs[0].recommended_image_rect_width,
cfgs[0].recommended_image_rect_height
);
let width = cfgs[0]
.recommended_image_rect_width
.max(cfgs[1].recommended_image_rect_width);
let height = cfgs[0]
.recommended_image_rect_height
.max(cfgs[1].recommended_image_rect_height);
let sample_count = cfgs[0]
.recommended_swapchain_sample_count
.max(cfgs[1].recommended_swapchain_sample_count);
let (session, frame_waiter, frame_stream) = unsafe {
openxr::Session::<openxr::Vulkan>::from_raw(
instance.clone(),
out,
Box::new(VulkanKeepAlive {
// Since the XR session uses the Vulkan device, these
// vulkan objects must not be dropped until the session
// is destroyed.
_instance: vk_instance.clone(),
_device: device.clone(),
_queue: queue.clone(),
}),
)
};
let formats = session.enumerate_swapchain_formats()?;
let formats = formats
.into_iter()
.map(|f| vulkano::format::Format::try_from(ash::vk::Format::from_raw(f as _)).unwrap())
.collect::<HashSet<_>>();
log::info!("swapchain formats: {:?}", formats);
const PREFERRED_FORMATS: [vulkano::format::Format; 4] = [
vulkano::format::Format::R8G8B8A8_UNORM,
vulkano::format::Format::B8G8R8A8_UNORM,
vulkano::format::Format::R8G8B8A8_SRGB,
vulkano::format::Format::B8G8R8A8_SRGB,
];
let Some(format) = PREFERRED_FORMATS
.iter()
.find(|f| formats.contains(f))
.copied()
else {
return Err(anyhow!("No suitable format found for swapchain"));
};
let swapchain = session.create_swapchain(&openxr::SwapchainCreateInfo {
array_size: 2,
face_count: 1,
create_flags: Default::default(),
usage_flags: openxr::SwapchainUsageFlags::COLOR_ATTACHMENT
| openxr::SwapchainUsageFlags::TRANSFER_DST,
format: format as u32,
sample_count,
width,
height,
mip_count: 1,
})?;
log::debug!("created swapchain");
let swapchain_images: Vec<_> = swapchain
.enumerate_images()?
.into_iter()
.map(|handle| {
let handle = ash::vk::Image::from_raw(handle);
let raw_image = unsafe {
vulkano::image::sys::RawImage::from_handle_borrowed(
&device,
handle,
&ImageCreateInfo {
format,
array_layers: 2,
extent: [width, height, 1],
usage: ImageUsage::COLOR_ATTACHMENT | ImageUsage::TRANSFER_DST,
..Default::default()
},
)?
};
// SAFETY: OpenXR guarantees that the image is a swapchain image, thus has memory backing it.
let image = unsafe { raw_image.assume_bound() };
Ok::<_, anyhow::Error>(Arc::new(image))
})
.try_collect()?;
let (depth_swapchain, depth_swapchain_images) =
if instance.exts().khr_composition_layer_depth.is_some() {
const PREFERRED_DEPTH_FORMATS: [vulkano::format::Format; 5] = [
vulkano::format::Format::D32_SFLOAT,
vulkano::format::Format::D16_UNORM,
vulkano::format::Format::D32_SFLOAT_S8_UINT,
vulkano::format::Format::D24_UNORM_S8_UINT,
vulkano::format::Format::D16_UNORM_S8_UINT,
];
if let Some(format) = PREFERRED_DEPTH_FORMATS
.iter()
.find(|f| formats.contains(f))
.copied()
{
let depth_swapchain =
session.create_swapchain(&openxr::SwapchainCreateInfo {
array_size: 2,
face_count: 1,
create_flags: Default::default(),
usage_flags: openxr::SwapchainUsageFlags::DEPTH_STENCIL_ATTACHMENT,
format: format as u32,
sample_count,
width,
height,
mip_count: 1,
})?;
log::debug!("created depth swapchain");
let depth_swapchain_images: Vec<_> = depth_swapchain
.enumerate_images()?
.into_iter()
.map(|handle| {
let handle = ash::vk::Image::from_raw(handle);
let raw_image = unsafe {
vulkano::image::sys::RawImage::from_handle_borrowed(
&device,
handle,
&ImageCreateInfo {
format,
array_layers: 2,
extent: [width, height, 1],
usage: ImageUsage::DEPTH_STENCIL_ATTACHMENT,
..Default::default()
},
)?
};
// SAFETY: OpenXR guarantees that the image is a swapchain image, thus has memory backing it.
let image = unsafe { raw_image.assume_bound() };
Ok::<_, anyhow::Error>(Arc::new(image))
})
.try_collect()?;
assert_eq!(depth_swapchain_images.len(), swapchain_images.len());
(Some(depth_swapchain), Some(depth_swapchain_images))
} else {
log::warn!("No suitable depth format found for swapchain");
(None, None)
}
} else {
(None, None)
};
log::debug!("got swapchain images");
let space =
session.create_reference_space(ReferenceSpaceType::STAGE, openxr::Posef::IDENTITY)?;
log::debug!("created actions");
Ok((
Self {
instance,
session,
swapchain,
swapchain_images,
depth_swapchain,
depth_swapchain_images,
space,
vk_instance,
device,
queue,
render_size: UVec2::new(width, height),
},
frame_waiter,
frame_stream,
))
}
pub fn render_info(&mut self) -> RenderInfo {
RenderInfo {
session: &self.session,
swapchain: &mut self.swapchain,
swapchain_images: &self.swapchain_images,
depth_swapchain: self.depth_swapchain.as_mut(),
depth_swapchain_images: self.depth_swapchain_images.as_ref(),
space: &self.space,
render_size: self.render_size,
}
}
pub fn vk_device(&self) -> (Arc<Device>, Arc<Queue>) {
(self.device.clone(), self.queue.clone())
}
pub fn vk_instance(&self) -> Arc<Instance> {
self.vk_instance.clone()
}
pub fn xr_instance(&self) -> &openxr::Instance {
&self.instance
}
pub fn xr_session(&self) -> &openxr::Session<openxr::Vulkan> {
&self.session
}
pub fn render_size(&self) -> UVec2 {
self.render_size
}
}
unsafe extern "system" fn get_instance_proc_addr(
instance: openxr::sys::platform::VkInstance,
name: *const std::ffi::c_char,
) -> Option<unsafe extern "system" fn()> {
let instance = ash::vk::Instance::from_raw(instance as _);
let library = get_vulkan_library();
unsafe { library.get_instance_proc_addr(instance, name) }
}
pub trait XrContext {
fn context<C>(self, ctx: C) -> anyhow::Result<()>
where
C: std::fmt::Display + Send + Sync + 'static;
fn with_context<C>(self, f: impl FnOnce(&Self) -> C) -> anyhow::Result<()>
where
C: std::fmt::Display + Send + Sync + 'static;
}
impl XrContext for openxr::sys::Result {
fn context<C>(self, ctx: C) -> anyhow::Result<()>
where
C: std::fmt::Display + Send + Sync + 'static,
{
if self == openxr::sys::Result::SUCCESS {
Ok(())
} else {
Err(self).context(ctx)
}
}
fn with_context<C>(self, f: impl FnOnce(&Self) -> C) -> anyhow::Result<()>
where
C: std::fmt::Display + Send + Sync + 'static,
{
if self == openxr::sys::Result::SUCCESS {
Ok(())
} else {
let save = self;
Err(self).with_context(|| f(&save))
}
}
}
| yshui/xr_passthrough_layer | 6 | Rust | yshui | Yuxuan Shui | CodeWeavers | |
xtask/src/main.rs | Rust | use serde::Serialize;
use std::path::PathBuf;
#[derive(Serialize)]
struct Extension {
name: &'static str,
extension_version: &'static str,
}
#[derive(Serialize)]
struct ApiLayer {
name: &'static str,
library_path: PathBuf,
api_version: &'static str,
implementation_version: &'static str,
description: &'static str,
instance_extensions: &'static [Extension],
}
#[derive(Serialize)]
struct Manifest {
file_format_version: &'static str,
api_layer: ApiLayer,
}
fn main() {
let cmd = std::env::args().nth(1).expect("Expect one verb: `install`");
if cmd == "install" {
std::env::set_current_dir(
std::path::Path::new(&std::env::var_os("CARGO_MANIFEST_DIR").unwrap()).join(".."),
)
.unwrap();
let mut cmd = std::process::Command::new("cargo");
cmd.args(["build", "--release"]);
let status = cmd.status().expect("Failed to run cargo build");
if !status.success() {
std::process::exit(status.code().unwrap_or(1));
}
let home = std::env::var_os("HOME").unwrap();
let home = std::path::Path::new(&home);
let local_share = home.join(".local").join("share");
let api_layer_dir = local_share
.join("openxr")
.join("1")
.join("api_layers")
.join("explicit.d");
let libdir = home.join(".local").join("lib").join("xr_passthrough_layer");
match std::fs::create_dir(&libdir) {
Ok(_) => {}
Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => {}
Err(e) => panic!("Failed to create libdir: {}", e),
}
match std::fs::create_dir_all(&api_layer_dir) {
Ok(_) => {}
Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => {}
Err(e) => panic!("Failed to create api_layer_dir: {}", e),
}
std::fs::copy(
std::path::Path::new("target")
.join("release")
.join("libxr_passthrough_layer.so"),
libdir.join("liblayer.so"),
)
.unwrap();
let manifest = Manifest {
file_format_version: "1.0.0",
api_layer: ApiLayer {
name: "XR_APILAYER_YX_passthrough",
library_path: libdir.join("liblayer.so"),
api_version: "1.0",
implementation_version: "1",
description: "Passthrough",
instance_extensions: &[Extension {
name: "XR_HTC_passthrough",
extension_version: "1",
}],
},
};
let json_file = std::fs::OpenOptions::new()
.truncate(true)
.write(true)
.create(true)
.open(api_layer_dir.join("XR_APILAYER_YX_passthrough.json"))
.unwrap();
serde_json::to_writer_pretty(json_file, &manifest).unwrap();
} else {
panic!("Unknown command: {}", cmd);
}
}
| yshui/xr_passthrough_layer | 6 | Rust | yshui | Yuxuan Shui | CodeWeavers | |
pg_rrf--0.0.1--0.0.2.sql | SQL | /*
Upgrade script from pg_rrf v0.0.1 to v0.0.2.
*/
-- Add rrf_fuse (SRF)
CREATE FUNCTION "rrf_fuse"(
"ids_a" bigint[],
"ids_b" bigint[],
"k" bigint DEFAULT 60
) RETURNS TABLE (
"id" bigint,
"score" double precision,
"rank_a" integer,
"rank_b" integer
)
LANGUAGE c
AS 'MODULE_PATHNAME', 'rrf_fuse_wrapper';
| yuiseki/pg_rrf | 1 | RFF (Reciprocal Rank Fusion) Extension for PostgreSQL | Rust | yuiseki | yuiseki | Yuiseki Inc. |
pg_rrf--0.0.2--0.0.3.sql | SQL | /*
Upgrade script from pg_rrf v0.0.2 to v0.0.3.
*/
-- Add rrfn
CREATE FUNCTION "rrfn"(
"ranks" bigint[],
"k" bigint
) RETURNS double precision
LANGUAGE c
AS 'MODULE_PATHNAME', 'rrfn_wrapper';
| yuiseki/pg_rrf | 1 | RFF (Reciprocal Rank Fusion) Extension for PostgreSQL | Rust | yuiseki | yuiseki | Yuiseki Inc. |
pg_rrf--0.0.2.sql | SQL | /*
This file defines extension objects for pg_rrf v0.0.2.
*/
-- rrf
CREATE FUNCTION "rrf"(
"rank_a" bigint,
"rank_b" bigint,
"k" bigint
) RETURNS double precision
LANGUAGE c
AS 'MODULE_PATHNAME', 'rrf_wrapper';
-- rrf3
CREATE FUNCTION "rrf3"(
"rank_a" bigint,
"rank_b" bigint,
"rank_c" bigint,
"k" bigint
) RETURNS double precision
LANGUAGE c
AS 'MODULE_PATHNAME', 'rrf3_wrapper';
-- rrf_fuse (SRF)
CREATE FUNCTION "rrf_fuse"(
"ids_a" bigint[],
"ids_b" bigint[],
"k" bigint DEFAULT 60
) RETURNS TABLE (
"id" bigint,
"score" double precision,
"rank_a" integer,
"rank_b" integer
)
LANGUAGE c
AS 'MODULE_PATHNAME', 'rrf_fuse_wrapper';
| yuiseki/pg_rrf | 1 | RFF (Reciprocal Rank Fusion) Extension for PostgreSQL | Rust | yuiseki | yuiseki | Yuiseki Inc. |
pg_rrf--0.0.3.sql | SQL | /*
This file defines extension objects for pg_rrf v0.0.3.
*/
-- rrfn
CREATE FUNCTION "rrfn"(
"ranks" bigint[],
"k" bigint
) RETURNS double precision
LANGUAGE c
AS 'MODULE_PATHNAME', 'rrfn_wrapper';
-- rrf
CREATE FUNCTION "rrf"(
"rank_a" bigint,
"rank_b" bigint,
"k" bigint
) RETURNS double precision
LANGUAGE c
AS 'MODULE_PATHNAME', 'rrf_wrapper';
-- rrf3
CREATE FUNCTION "rrf3"(
"rank_a" bigint,
"rank_b" bigint,
"rank_c" bigint,
"k" bigint
) RETURNS double precision
LANGUAGE c
AS 'MODULE_PATHNAME', 'rrf3_wrapper';
-- rrf_fuse (SRF)
CREATE FUNCTION "rrf_fuse"(
"ids_a" bigint[],
"ids_b" bigint[],
"k" bigint DEFAULT 60
) RETURNS TABLE (
"id" bigint,
"score" double precision,
"rank_a" integer,
"rank_b" integer
)
LANGUAGE c
AS 'MODULE_PATHNAME', 'rrf_fuse_wrapper';
| yuiseki/pg_rrf | 1 | RFF (Reciprocal Rank Fusion) Extension for PostgreSQL | Rust | yuiseki | yuiseki | Yuiseki Inc. |
src/bin/pgrx_embed.rs | Rust | ::pgrx::pgrx_embed!();
| yuiseki/pg_rrf | 1 | RFF (Reciprocal Rank Fusion) Extension for PostgreSQL | Rust | yuiseki | yuiseki | Yuiseki Inc. |
src/lib.rs | Rust | use pgrx::prelude::*;
use std::collections::{HashMap, HashSet};
::pgrx::pg_module_magic!(name, version);
fn rrf_score(ranks: &[Option<i64>], k: i64) -> (f64, usize) {
if k <= 0 {
error!("rrf k must be positive");
}
let kf = k as f64;
let mut sum = 0.0f64;
let mut used = 0usize;
for rank in ranks.iter().flatten() {
if *rank > 0 {
sum += 1.0 / (kf + (*rank as f64));
used += 1;
}
}
(sum, used)
}
#[pg_extern]
fn rrfn(ranks: Option<Vec<Option<i64>>>, k: i64) -> f64 {
let ranks = ranks.unwrap_or_default();
let (sum, used) = rrf_score(&ranks, k);
if used == 0 {
0.0
} else {
sum
}
}
#[pg_extern]
fn rrf(rank_a: Option<i64>, rank_b: Option<i64>, k: i64) -> Option<f64> {
let score = rrfn(Some(vec![rank_a, rank_b]), k);
if score == 0.0 {
None
} else {
Some(score)
}
}
#[pg_extern]
fn rrf3(
rank_a: Option<i64>,
rank_b: Option<i64>,
rank_c: Option<i64>,
k: i64,
) -> Option<f64> {
let score = rrfn(Some(vec![rank_a, rank_b, rank_c]), k);
if score == 0.0 {
None
} else {
Some(score)
}
}
#[pg_extern]
fn rrf_fuse(
ids_a: Option<Vec<Option<i64>>>,
ids_b: Option<Vec<Option<i64>>>,
k: default!(i64, 60),
) -> TableIterator<
'static,
(
name!(id, i64),
name!(score, f64),
name!(rank_a, Option<i32>),
name!(rank_b, Option<i32>),
),
> {
let mut ranks_a = HashMap::<i64, i32>::new();
if let Some(ids) = ids_a {
for (idx, id) in ids.into_iter().enumerate() {
if let Some(id) = id {
let rank = (idx + 1) as i32;
ranks_a
.entry(id)
.and_modify(|r| {
if rank < *r {
*r = rank;
}
})
.or_insert(rank);
}
}
}
let mut ranks_b = HashMap::<i64, i32>::new();
if let Some(ids) = ids_b {
for (idx, id) in ids.into_iter().enumerate() {
if let Some(id) = id {
let rank = (idx + 1) as i32;
ranks_b
.entry(id)
.and_modify(|r| {
if rank < *r {
*r = rank;
}
})
.or_insert(rank);
}
}
}
let mut ids = HashSet::<i64>::new();
ids.extend(ranks_a.keys().copied());
ids.extend(ranks_b.keys().copied());
let mut rows = Vec::with_capacity(ids.len());
for id in ids.into_iter() {
let rank_a = ranks_a.get(&id).copied();
let rank_b = ranks_b.get(&id).copied();
let score = rrfn(
Some(vec![rank_a.map(|r| r as i64), rank_b.map(|r| r as i64)]),
k,
);
rows.push((id, score, rank_a, rank_b));
}
TableIterator::new(rows.into_iter())
}
#[cfg(any(test, feature = "pg_test"))]
#[pg_schema]
mod tests {
use super::*;
use std::collections::HashMap;
#[pg_test]
fn test_rrf_basic() {
let score = rrf(Some(1), Some(2), 60).unwrap();
let expected = 1.0 / 61.0 + 1.0 / 62.0;
assert!((score - expected).abs() < 1e-12);
}
#[pg_test]
fn test_rrf_nulls() {
let score = rrf(Some(1), None, 60).unwrap();
let expected = 1.0 / 61.0;
assert!((score - expected).abs() < 1e-12);
let score = rrf(None, None, 60);
assert!(score.is_none());
}
#[pg_test]
fn test_rrf_ignores_non_positive_ranks() {
let score = rrf(Some(0), Some(2), 60).unwrap();
let expected = 1.0 / 62.0;
assert!((score - expected).abs() < 1e-12);
let score = rrf(Some(-1), None, 60);
assert!(score.is_none());
}
#[pg_test]
#[should_panic(expected = "rrf k must be positive")]
fn test_rrf_invalid_k() {
let _ = rrf(Some(1), Some(2), 0);
}
#[pg_test]
fn test_rrf3_basic() {
let score = rrf3(Some(1), Some(2), Some(3), 60).unwrap();
let expected = 1.0 / 61.0 + 1.0 / 62.0 + 1.0 / 63.0;
assert!((score - expected).abs() < 1e-12);
}
#[pg_test]
fn test_rrfn_matches_rrf() {
let score = rrfn(Some(vec![Some(1), Some(2)]), 60);
let expected = rrf(Some(1), Some(2), 60).unwrap();
assert!((score - expected).abs() < 1e-12);
}
#[pg_test]
fn test_rrfn_matches_rrf3() {
let score = rrfn(Some(vec![Some(1), Some(2), Some(3)]), 60);
let expected = rrf3(Some(1), Some(2), Some(3), 60).unwrap();
assert!((score - expected).abs() < 1e-12);
}
#[pg_test]
fn test_rrfn_ignores_nulls_and_non_positive() {
let score = rrfn(Some(vec![Some(1), None, Some(0), Some(-1), Some(3)]), 60);
let expected = 1.0 / 61.0 + 1.0 / 63.0;
assert!((score - expected).abs() < 1e-12);
}
#[pg_test]
fn test_rrfn_empty_array_returns_zero() {
let score = rrfn(Some(vec![]), 60);
assert!((score - 0.0).abs() < 1e-12);
}
#[pg_test]
#[should_panic(expected = "rrf k must be positive")]
fn test_rrfn_invalid_k() {
let _ = rrfn(Some(vec![Some(1)]), 0);
}
fn rows_to_map(
rows: Vec<(i64, f64, Option<i32>, Option<i32>)>,
) -> HashMap<i64, (f64, Option<i32>, Option<i32>)> {
rows.into_iter()
.map(|(id, score, rank_a, rank_b)| (id, (score, rank_a, rank_b)))
.collect()
}
#[pg_test]
fn test_rrf_fuse_overlap() {
let rows: Vec<(i64, f64, Option<i32>, Option<i32>)> =
rrf_fuse(
Some(vec![Some(10), Some(20), Some(30)]),
Some(vec![Some(20), Some(40)]),
60,
)
.collect();
let map = rows_to_map(rows);
assert_eq!(map.len(), 4);
let (score, rank_a, rank_b) = map.get(&20).unwrap();
assert_eq!(*rank_a, Some(2));
assert_eq!(*rank_b, Some(1));
let expected = rrf(Some(2), Some(1), 60).unwrap();
assert!((*score - expected).abs() < 1e-12);
}
#[pg_test]
fn test_rrf_fuse_disjoint_and_null_list() {
let rows: Vec<(i64, f64, Option<i32>, Option<i32>)> =
rrf_fuse(None, Some(vec![Some(1), Some(2)]), 60).collect();
let map = rows_to_map(rows);
assert_eq!(map.len(), 2);
let (_, rank_a, rank_b) = map.get(&1).unwrap();
assert_eq!(*rank_a, None);
assert_eq!(*rank_b, Some(1));
}
#[pg_test]
fn test_rrf_fuse_duplicates_take_best_rank() {
let rows: Vec<(i64, f64, Option<i32>, Option<i32>)> =
rrf_fuse(
Some(vec![Some(10), Some(20), Some(10)]),
Some(vec![Some(10)]),
60,
)
.collect();
let map = rows_to_map(rows);
let (_, rank_a, rank_b) = map.get(&10).unwrap();
assert_eq!(*rank_a, Some(1));
assert_eq!(*rank_b, Some(1));
}
#[pg_test]
#[should_panic(expected = "rrf k must be positive")]
fn test_rrf_fuse_invalid_k() {
let _: Vec<(i64, f64, Option<i32>, Option<i32>)> =
rrf_fuse(Some(vec![Some(1)]), None, 0).collect();
}
}
/// This module is required by `cargo pgrx test` invocations.
/// It must be visible at the root of your extension crate.
#[cfg(test)]
pub mod pg_test {
pub fn setup(_options: Vec<&str>) {
// perform one-off initialization when the pg_test framework starts
}
#[must_use]
pub fn postgresql_conf_options() -> Vec<&'static str> {
// return any postgresql.conf settings that are required for your tests
vec![]
}
}
| yuiseki/pg_rrf | 1 | RFF (Reciprocal Rank Fusion) Extension for PostgreSQL | Rust | yuiseki | yuiseki | Yuiseki Inc. |
src/bin/pgrx_embed.rs | Rust | ::pgrx::pgrx_embed!();
| yuiseki/pg_s2 | 0 | S2 Geometry Extension for PostgreSQL | Rust | yuiseki | yuiseki | Yuiseki Inc. |
src/lib.rs | Rust | use pgrx::callconv::{ArgAbi, BoxRet};
use pgrx::datum::Datum;
use pgrx::guc::{GucContext, GucFlags, GucRegistry, GucSetting};
use pgrx::iter::SetOfIterator;
use pgrx::pg_sys::Point;
use pgrx::pg_sys::BOX;
use pgrx::pg_sys::Oid;
use pgrx::pgrx_sql_entity_graph::metadata::{
ArgumentError, Returns, ReturnsError, SqlMapping, SqlTranslatable,
};
use pgrx::prelude::*;
use pgrx::{rust_regtypein, StringInfo};
use s2::cap::Cap;
use s2::cell::Cell;
use s2::cellid::{CellID, NUM_FACES, POS_BITS};
use s2::latlng::LatLng;
use s2::point::Point as S2Point;
use s2::region::RegionCoverer;
use s2::rect::Rect;
use s2::s1::{Angle, Rad};
use std::ffi::CStr;
::pgrx::pg_module_magic!(name, version);
const S2CELLID_ORDER_MASK: u64 = 0x8000_0000_0000_0000;
const S2CELLID_LSB_MASK: u64 = 0x1555_5555_5555_5555;
const DEFAULT_MAX_CELLS: i32 = 8;
const EARTH_RADIUS_M_DEFAULT: f64 = 6_371_008.8;
static DEFAULT_LEVEL: GucSetting<i32> = GucSetting::<i32>::new(14);
static EARTH_RADIUS_M: GucSetting<f64> = GucSetting::<f64>::new(EARTH_RADIUS_M_DEFAULT);
static DEFAULT_LEVEL_NAME: &CStr =
unsafe { CStr::from_bytes_with_nul_unchecked(b"pg_s2.default_level\0") };
static DEFAULT_LEVEL_SHORT: &CStr = unsafe {
CStr::from_bytes_with_nul_unchecked(b"Default S2 level for s2_lat_lng_to_cell(point).\0")
};
static DEFAULT_LEVEL_DESC: &CStr =
unsafe { CStr::from_bytes_with_nul_unchecked(b"Used when level is not explicitly provided.\0") };
static EARTH_RADIUS_M_NAME: &CStr =
unsafe { CStr::from_bytes_with_nul_unchecked(b"pg_s2.earth_radius_m\0") };
static EARTH_RADIUS_M_SHORT: &CStr = unsafe {
CStr::from_bytes_with_nul_unchecked(b"Earth radius in meters for distance and cap conversions.\0")
};
static EARTH_RADIUS_M_DESC: &CStr = unsafe {
CStr::from_bytes_with_nul_unchecked(
b"Used to convert between meters and radians in s2_great_circle_distance and s2_cover_cap.\0",
)
};
static DEFAULT_COVER_LEVEL: GucSetting<i32> = GucSetting::<i32>::new(12);
static DEFAULT_COVER_LEVEL_NAME: &CStr =
unsafe { CStr::from_bytes_with_nul_unchecked(b"pg_s2.default_cover_level\0") };
static DEFAULT_COVER_LEVEL_SHORT: &CStr = unsafe {
CStr::from_bytes_with_nul_unchecked(b"Default S2 level for s2_cover_rect(point).\0")
};
static DEFAULT_COVER_LEVEL_DESC: &CStr = unsafe {
CStr::from_bytes_with_nul_unchecked(b"Used when cover level is not explicitly provided.\0")
};
#[pg_guard]
pub extern "C-unwind" fn _PG_init() {
GucRegistry::define_int_guc(
DEFAULT_LEVEL_NAME,
DEFAULT_LEVEL_SHORT,
DEFAULT_LEVEL_DESC,
&DEFAULT_LEVEL,
0,
30,
GucContext::Userset,
GucFlags::default(),
);
GucRegistry::define_float_guc(
EARTH_RADIUS_M_NAME,
EARTH_RADIUS_M_SHORT,
EARTH_RADIUS_M_DESC,
&EARTH_RADIUS_M,
0.0,
1.0e9,
GucContext::Userset,
GucFlags::default(),
);
GucRegistry::define_int_guc(
DEFAULT_COVER_LEVEL_NAME,
DEFAULT_COVER_LEVEL_SHORT,
DEFAULT_COVER_LEVEL_DESC,
&DEFAULT_COVER_LEVEL,
0,
30,
GucContext::Userset,
GucFlags::default(),
);
}
#[inline]
fn u64_to_i64_norm(cellid: u64) -> i64 {
(cellid ^ S2CELLID_ORDER_MASK) as i64
}
#[inline]
fn i64_norm_to_u64(norm: i64) -> u64 {
(norm as u64) ^ S2CELLID_ORDER_MASK
}
#[inline]
fn s2_cellid_is_valid_raw(raw: u64) -> bool {
let face = (raw >> POS_BITS) as u8;
if face >= NUM_FACES {
return false;
}
let lsb = raw & raw.wrapping_neg();
(lsb & S2CELLID_LSB_MASK) != 0
}
#[repr(transparent)]
#[derive(
Copy, Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Hash, PostgresEq, PostgresOrd, PostgresHash,
)]
pub struct S2CellId {
value: i64,
}
impl S2CellId {
#[inline]
fn from_u64(cellid: u64) -> Self {
Self {
value: u64_to_i64_norm(cellid),
}
}
#[inline]
fn to_u64(self) -> u64 {
i64_norm_to_u64(self.value)
}
}
impl std::fmt::Display for S2CellId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let token = CellID(self.to_u64()).to_token();
write!(f, "{token}")
}
}
unsafe impl SqlTranslatable for S2CellId {
fn argument_sql() -> Result<SqlMapping, ArgumentError> {
Ok(SqlMapping::As("s2cellid".into()))
}
fn return_sql() -> Result<Returns, ReturnsError> {
Ok(Returns::One(SqlMapping::As("s2cellid".into())))
}
}
impl FromDatum for S2CellId {
unsafe fn from_polymorphic_datum(datum: pg_sys::Datum, is_null: bool, _: Oid) -> Option<Self>
where
Self: Sized,
{
if is_null {
None
} else {
Some(S2CellId {
value: datum.value() as _,
})
}
}
}
impl IntoDatum for S2CellId {
fn into_datum(self) -> Option<pg_sys::Datum> {
Some(pg_sys::Datum::from(self.value))
}
fn type_oid() -> Oid {
rust_regtypein::<Self>()
}
}
unsafe impl<'fcx> ArgAbi<'fcx> for S2CellId
where
Self: 'fcx,
{
unsafe fn unbox_arg_unchecked(arg: ::pgrx::callconv::Arg<'_, 'fcx>) -> Self {
arg.unbox_arg_using_from_datum().unwrap()
}
}
unsafe impl BoxRet for S2CellId {
unsafe fn box_into<'fcx>(self, fcinfo: &mut pgrx::callconv::FcInfo<'fcx>) -> Datum<'fcx> {
fcinfo.return_raw_datum(pg_sys::Datum::from(self.value))
}
}
#[pg_extern(immutable, parallel_safe, requires = ["shell_type"])]
fn s2cellid_in(input: &CStr) -> S2CellId {
let token = input
.to_str()
.unwrap_or_else(|_| error!("invalid s2cellid token"));
let cellid = CellID::from_token(token);
if !s2_cellid_is_valid_raw(cellid.0) {
error!("invalid s2cellid token");
}
S2CellId::from_u64(cellid.0)
}
#[pg_extern(immutable, parallel_safe, requires = ["shell_type"])]
fn s2cellid_out(value: S2CellId) -> &'static CStr {
let mut s = StringInfo::new();
s.push_str(&value.to_string());
unsafe { s.leak_cstr() }
}
extension_sql!(
r#"
CREATE TYPE s2cellid;
"#,
name = "shell_type",
bootstrap
);
extension_sql!(
r#"
CREATE TYPE s2cellid (
INPUT = s2cellid_in,
OUTPUT = s2cellid_out,
LIKE = int8
);
"#,
name = "concrete_type",
creates = [Type(S2CellId)],
requires = ["shell_type", s2cellid_in, s2cellid_out],
);
extension_sql!(
r#"
CREATE CAST (s2cellid AS text) WITH FUNCTION s2_cell_to_token(s2cellid);
CREATE CAST (text AS s2cellid) WITH FUNCTION s2_cell_from_token(text);
CREATE CAST (s2cellid AS bigint) WITH FUNCTION s2_cell_to_bigint(s2cellid);
CREATE CAST (bigint AS s2cellid) WITH FUNCTION s2_cell_from_bigint(bigint);
"#,
name = "s2cellid_casts",
requires = [
"concrete_type",
s2_cell_to_token,
s2_cell_from_token,
s2_cell_to_bigint,
s2_cell_from_bigint,
],
);
#[pg_extern]
fn s2_get_extension_version() -> String {
env!("CARGO_PKG_VERSION").to_string()
}
#[pg_extern(immutable)]
fn s2_cell_from_token(token: &str) -> S2CellId {
let cellid = CellID::from_token(token);
if !s2_cellid_is_valid_raw(cellid.0) {
error!("invalid s2cellid token");
}
S2CellId::from_u64(cellid.0)
}
#[pg_extern(immutable)]
fn s2_cell_to_token(cell: S2CellId) -> String {
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
CellID(raw).to_token()
}
#[pg_extern(immutable)]
fn s2_cell_to_bigint(cell: S2CellId) -> i64 {
cell.value
}
#[pg_extern(immutable)]
fn s2_cell_from_bigint(id: i64) -> S2CellId {
S2CellId { value: id }
}
#[pg_extern(immutable)]
fn s2_is_valid_cell(cell: S2CellId) -> bool {
s2_cellid_is_valid_raw(cell.to_u64())
}
#[pg_extern(immutable)]
fn s2_get_level(cell: S2CellId) -> i32 {
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
CellID(raw).level() as i32
}
#[pg_extern(immutable)]
fn s2_get_face(cell: S2CellId) -> i32 {
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
CellID(raw).face() as i32
}
#[pg_extern(immutable)]
fn s2_lat_lng_to_cell(latlng: Point, level: i32) -> S2CellId {
if !(0..=30).contains(&level) {
error!("invalid level");
}
let ll = LatLng::from_degrees(latlng.y, latlng.x);
if !ll.is_valid() {
error!("invalid latlng");
}
let cellid = CellID::from(ll).parent(level as u64);
S2CellId::from_u64(cellid.0)
}
#[pg_extern(stable, name = "s2_lat_lng_to_cell")]
fn s2_lat_lng_to_cell_default(latlng: Point) -> S2CellId {
let level = DEFAULT_LEVEL.get();
s2_lat_lng_to_cell(latlng, level)
}
#[pg_extern(immutable)]
fn s2_cell_to_lat_lng(cell: S2CellId) -> Point {
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
let ll = LatLng::from(CellID(raw));
Point {
x: ll.lng.deg(),
y: ll.lat.deg(),
}
}
#[pg_extern(immutable)]
fn s2_cell_bbox(cell: S2CellId) -> BOX {
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
let rect = Cell::from(CellID(raw)).rect_bound();
let (lng_lo, lng_hi) = if rect.is_inverted() {
(-180.0, 180.0)
} else {
(rect.lng_lo().deg(), rect.lng_hi().deg())
};
let low = Point {
x: lng_lo,
y: rect.lat_lo().deg(),
};
let high = Point {
x: lng_hi,
y: rect.lat_hi().deg(),
};
BOX { low, high }
}
#[pg_extern(immutable)]
fn s2_cell_to_vertices(cell: S2CellId) -> Vec<Point> {
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
let verts = Cell::from(CellID(raw)).vertices();
verts
.iter()
.map(|v| {
let ll = LatLng::from(*v);
Point {
x: ll.lng.deg(),
y: ll.lat.deg(),
}
})
.collect()
}
#[inline]
fn format_polygon_points(points: &[Point]) -> String {
let mut out = String::new();
out.push('(');
for (idx, p) in points.iter().enumerate() {
if idx > 0 {
out.push(',');
}
out.push('(');
out.push_str(&format!("{:.15}", p.x));
out.push(',');
out.push_str(&format!("{:.15}", p.y));
out.push(')');
}
out.push(')');
out
}
#[pg_extern(immutable)]
fn s2_cell_boundary_text(cell: S2CellId) -> String {
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
let verts = Cell::from(CellID(raw)).vertices();
let points: Vec<Point> = verts
.iter()
.map(|v| {
let ll = LatLng::from(*v);
Point {
x: ll.lng.deg(),
y: ll.lat.deg(),
}
})
.collect();
format_polygon_points(&points)
}
#[pg_extern(immutable)]
fn s2_cell_edge_neighbors(cell: S2CellId) -> Vec<S2CellId> {
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
let neighbors = CellID(raw).edge_neighbors();
neighbors.into_iter().map(|n| S2CellId::from_u64(n.0)).collect()
}
#[pg_extern(immutable)]
fn s2_cell_all_neighbors(cell: S2CellId) -> Vec<S2CellId> {
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
let cellid = CellID(raw);
let level = cellid.level();
cellid
.all_neighbors(level)
.into_iter()
.map(|n| S2CellId::from_u64(n.0))
.collect()
}
#[pg_extern(stable)]
fn s2_cover_rect(
rect: pg_sys::BOX,
level: i32,
max_cells: i32,
) -> SetOfIterator<'static, S2CellId> {
if !(0..=30).contains(&level) {
error!("invalid level");
}
if max_cells <= 0 {
error!("invalid max_cells");
}
let lat_lo = rect.low.y.min(rect.high.y);
let lat_hi = rect.low.y.max(rect.high.y);
let lng_lo = rect.low.x.min(rect.high.x);
let lng_hi = rect.low.x.max(rect.high.x);
let s2_rect = Rect::from_degrees(lat_lo, lng_lo, lat_hi, lng_hi);
let coverer = RegionCoverer {
min_level: level as u8,
max_level: level as u8,
level_mod: 1,
max_cells: max_cells as usize,
};
let iter = coverer
.covering(&s2_rect)
.0
.into_iter()
.map(|c| S2CellId::from_u64(c.0));
SetOfIterator::new(iter)
}
#[pg_extern(stable, name = "s2_cover_rect")]
fn s2_cover_rect_default(rect: pg_sys::BOX) -> SetOfIterator<'static, S2CellId> {
let level = DEFAULT_COVER_LEVEL.get();
s2_cover_rect(rect, level, DEFAULT_MAX_CELLS)
}
#[pg_extern(stable, name = "s2_cover_rect")]
fn s2_cover_rect_with_default_max_cells(
rect: pg_sys::BOX,
level: i32,
) -> SetOfIterator<'static, S2CellId> {
s2_cover_rect(rect, level, DEFAULT_MAX_CELLS)
}
#[pg_extern(stable)]
fn s2_cover_cap(
center: Point,
radius_m: f64,
level: i32,
max_cells: i32,
) -> SetOfIterator<'static, S2CellId> {
if !(0..=30).contains(&level) {
error!("invalid level");
}
if max_cells <= 0 {
error!("invalid max_cells");
}
if radius_m < 0.0 {
error!("invalid radius");
}
let ll = LatLng::from_degrees(center.y, center.x);
if !ll.is_valid() {
error!("invalid latlng");
}
let center_point = S2Point::from(ll);
let angle = Angle::from(Rad(radius_m / EARTH_RADIUS_M.get()));
let cap = Cap::from_center_angle(¢er_point, &angle);
let coverer = RegionCoverer {
min_level: level as u8,
max_level: level as u8,
level_mod: 1,
max_cells: max_cells as usize,
};
let iter = coverer
.covering(&cap)
.0
.into_iter()
.map(|c| S2CellId::from_u64(c.0));
SetOfIterator::new(iter)
}
#[pg_extern(stable, name = "s2_cover_cap")]
fn s2_cover_cap_default(center: Point, radius_m: f64) -> SetOfIterator<'static, S2CellId> {
let level = DEFAULT_COVER_LEVEL.get();
s2_cover_cap(center, radius_m, level, DEFAULT_MAX_CELLS)
}
#[pg_extern(stable, name = "s2_cover_cap")]
fn s2_cover_cap_with_default_max_cells(
center: Point,
radius_m: f64,
level: i32,
) -> SetOfIterator<'static, S2CellId> {
s2_cover_cap(center, radius_m, level, DEFAULT_MAX_CELLS)
}
extension_sql!(
r#"
CREATE FUNCTION s2_cover_cap_ranges(
center point,
radius_m double precision,
level integer,
max_cells integer DEFAULT 8
)
RETURNS SETOF int8range
STABLE PARALLEL SAFE
LANGUAGE SQL
AS $$
SELECT int8range(
s2_cell_to_bigint(s2_cell_range_min(cell)),
s2_cell_to_bigint(s2_cell_range_max(cell)),
'[]'
)
FROM s2_cover_cap($1, $2, $3, $4) AS cell
$$;
"#,
name = "s2_cover_cap_ranges",
requires = [s2_cover_cap, s2_cell_range_min, s2_cell_range_max, s2_cell_to_bigint],
);
extension_sql!(
r#"
CREATE FUNCTION s2_cover_rect_ranges(
rect box,
level integer,
max_cells integer DEFAULT 8
)
RETURNS SETOF int8range
STABLE PARALLEL SAFE
LANGUAGE SQL
AS $$
SELECT int8range(
s2_cell_to_bigint(s2_cell_range_min(cell)),
s2_cell_to_bigint(s2_cell_range_max(cell)),
'[]'
)
FROM s2_cover_rect($1, $2, $3) AS cell
$$;
"#,
name = "s2_cover_rect_ranges",
requires = [s2_cover_rect, s2_cell_range_min, s2_cell_range_max, s2_cell_to_bigint],
);
extension_sql!(
r#"
CREATE FUNCTION s2_cell_to_boundary(cell s2cellid)
RETURNS polygon
IMMUTABLE PARALLEL SAFE
LANGUAGE SQL
AS $$ SELECT s2_cell_boundary_text($1)::polygon $$;
"#,
name = "s2_cell_to_boundary",
requires = [s2_cell_boundary_text],
);
#[pg_extern(immutable)]
fn s2_cell_range_min(cell: S2CellId) -> S2CellId {
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
let min = CellID(raw).range_min();
S2CellId::from_u64(min.0)
}
#[pg_extern(immutable)]
fn s2_cell_range_max(cell: S2CellId) -> S2CellId {
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
let max = CellID(raw).range_max();
S2CellId::from_u64(max.0)
}
#[pg_extern(immutable)]
fn s2_cell_to_parent(cell: S2CellId, level: i32) -> S2CellId {
if !(0..=30).contains(&level) {
error!("invalid level");
}
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
let cellid = CellID(raw);
let cell_level = cellid.level() as i32;
if level > cell_level {
error!("invalid level");
}
let parent = cellid.parent(level as u64);
S2CellId::from_u64(parent.0)
}
#[pg_extern(immutable, name = "s2_cell_to_parent")]
fn s2_cell_to_parent_default(cell: S2CellId) -> S2CellId {
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
let cellid = CellID(raw);
let level = cellid.level();
if level == 0 {
error!("invalid level");
}
s2_cell_to_parent(cell, level as i32 - 1)
}
#[pg_extern(immutable)]
fn s2_cell_to_children(cell: S2CellId, level: i32) -> SetOfIterator<'static, S2CellId> {
if !(0..=30).contains(&level) {
error!("invalid level");
}
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
let cellid = CellID(raw);
let cell_level = cellid.level() as i32;
if level <= cell_level {
error!("invalid level");
}
let mut cur = cellid.child_begin_at_level(level as u64);
let end = cellid.child_end_at_level(level as u64);
let iter = std::iter::from_fn(move || {
if cur == end {
None
} else {
let out = S2CellId::from_u64(cur.0);
cur = cur.next();
Some(out)
}
});
SetOfIterator::new(iter)
}
#[pg_extern(immutable, name = "s2_cell_to_children")]
fn s2_cell_to_children_default(cell: S2CellId) -> SetOfIterator<'static, S2CellId> {
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
let cellid = CellID(raw);
let level = cellid.level();
if level == 30 {
error!("invalid level");
}
s2_cell_to_children(cell, level as i32 + 1)
}
#[pg_extern(immutable)]
fn s2_cell_to_center_child(cell: S2CellId, level: i32) -> S2CellId {
if !(0..=30).contains(&level) {
error!("invalid level");
}
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
let cellid = CellID(raw);
let cell_level = cellid.level() as i32;
if level <= cell_level {
error!("invalid level");
}
let center = Cell::from(cellid).center();
let child = CellID::from(center).parent(level as u64);
S2CellId::from_u64(child.0)
}
#[pg_extern(immutable, name = "s2_cell_to_center_child")]
fn s2_cell_to_center_child_default(cell: S2CellId) -> S2CellId {
let raw = cell.to_u64();
if !s2_cellid_is_valid_raw(raw) {
error!("invalid s2cellid");
}
let cellid = CellID(raw);
let level = cellid.level();
if level == 30 {
error!("invalid level");
}
s2_cell_to_center_child(cell, level as i32 + 1)
}
#[pg_extern(immutable)]
fn s2_great_circle_distance(a: Point, b: Point, unit: &str) -> f64 {
let ll_a = LatLng::from_degrees(a.y, a.x);
let ll_b = LatLng::from_degrees(b.y, b.x);
if !ll_a.is_valid() || !ll_b.is_valid() {
error!("invalid latlng");
}
let angle = ll_a.distance(&ll_b).rad();
let earth_radius = EARTH_RADIUS_M.get();
match unit.trim().to_ascii_lowercase().as_str() {
"m" => angle * earth_radius,
"km" => angle * earth_radius / 1000.0,
"rad" => angle,
_ => error!("invalid unit"),
}
}
#[pg_extern(immutable, name = "s2_great_circle_distance")]
fn s2_great_circle_distance_default(a: Point, b: Point) -> f64 {
s2_great_circle_distance(a, b, "m")
}
#[cfg(any(test, feature = "pg_test"))]
#[pg_schema]
mod tests {
use super::*;
use s2::cell::Cell;
use s2::cellid::CellID;
use s2::latlng::LatLng;
use pgrx::spi::Spi;
#[pg_test]
fn test_s2_get_extension_version_matches_pkg() {
let v = s2_get_extension_version();
assert_eq!(v, env!("CARGO_PKG_VERSION"));
}
#[pg_test]
fn test_i64_norm_roundtrip() {
let cases = [
0u64,
1u64,
0x7fff_ffff_ffff_ffff,
0x8000_0000_0000_0000,
u64::MAX,
];
for &v in &cases {
let norm = u64_to_i64_norm(v);
let back = i64_norm_to_u64(norm);
assert_eq!(back, v);
}
}
#[pg_test]
fn test_i64_norm_order_preserving_unsigned() {
let pairs = [
(0u64, 1u64),
(1u64, 2u64),
(0x7fff_ffff_ffff_fffe, 0x7fff_ffff_ffff_ffff),
(0x7fff_ffff_ffff_ffff, 0x8000_0000_0000_0000),
(0x8000_0000_0000_0000, u64::MAX),
];
for &(a, b) in &pairs {
let na = u64_to_i64_norm(a);
let nb = u64_to_i64_norm(b);
assert!(na < nb, "order violated for {a:#x} < {b:#x}");
}
}
#[pg_test]
fn test_s2cellid_sql_ordering() {
let a = "47a1cbd595522b39";
let b = "b09dff882a7809e1";
let expected = CellID::from_token(a).0 < CellID::from_token(b).0;
let got = Spi::get_one::<bool>(&format!(
"SELECT ('{a}'::text::s2cellid) < ('{b}'::text::s2cellid)"
))
.expect("spi");
assert_eq!(got, Some(expected));
}
#[pg_test]
fn test_s2_cell_token_roundtrip() {
let token = "47a1cbd595522b39";
let cell = s2_cell_from_token(token);
let back = s2_cell_to_token(cell);
assert_eq!(back, token);
}
#[pg_test]
fn test_s2_cell_token_roundtrip_high_bit() {
let token = "b112966aaaaaaaab";
let cell = s2_cell_from_token(token);
let back = s2_cell_to_token(cell);
assert_eq!(back, token);
}
#[pg_test]
#[should_panic(expected = "invalid s2cellid token")]
fn test_s2_cell_from_token_invalid() {
let _ = s2_cell_from_token("zz");
}
#[pg_test]
fn test_s2_cell_to_bigint() {
let token = "47a1cbd595522b39";
let cell = s2_cell_from_token(token);
let expected = u64_to_i64_norm(CellID::from_token(token).0);
assert_eq!(s2_cell_to_bigint(cell), expected);
}
#[pg_test]
fn test_s2_cell_from_bigint_roundtrip() {
let token = "47a1cbd595522b39";
let cell = s2_cell_from_token(token);
let id = s2_cell_to_bigint(cell);
let back = s2_cell_from_bigint(id);
assert_eq!(s2_cell_to_token(back), token);
}
#[pg_test]
fn test_s2_is_valid_cell() {
let valid = s2_cell_from_token("47a1cbd595522b39");
assert!(s2_is_valid_cell(valid));
let valid_high_bit = s2_cell_from_token("b112966aaaaaaaab");
assert!(s2_is_valid_cell(valid_high_bit));
let invalid = s2_cell_from_bigint(0);
assert!(!s2_is_valid_cell(invalid));
}
#[pg_test]
fn test_s2_get_level_and_face() {
let face0 = s2_cell_from_token("1");
assert_eq!(s2_get_level(face0), 0);
assert_eq!(s2_get_face(face0), 0);
let face1 = s2_cell_from_token("3");
assert_eq!(s2_get_level(face1), 0);
assert_eq!(s2_get_face(face1), 1);
}
#[pg_test]
fn test_s2_lat_lng_to_cell_level() {
let lat = 49.703498679;
let lng = 11.770681595;
let level = 12i32;
let ll = LatLng::from_degrees(lat, lng);
let expected = CellID::from(ll).parent(level as u64).to_token();
let got = s2_lat_lng_to_cell(Point { x: lng, y: lat }, level);
assert_eq!(s2_cell_to_token(got), expected);
}
#[pg_test]
#[should_panic(expected = "invalid level")]
fn test_s2_lat_lng_to_cell_level_invalid() {
let _ = s2_lat_lng_to_cell(Point { x: 0.0, y: 0.0 }, 31);
}
#[pg_test]
fn test_s2_lat_lng_to_cell_default_level() {
let lat = 49.703498679;
let lng = 11.770681595;
let expected = s2_lat_lng_to_cell(Point { x: lng, y: lat }, 14);
let got = s2_lat_lng_to_cell_default(Point { x: lng, y: lat });
assert_eq!(s2_cell_to_token(got), s2_cell_to_token(expected));
}
#[pg_test]
fn test_s2_lat_lng_to_cell_default_level_guc() {
let lat = 49.703498679;
let lng = 11.770681595;
Spi::run("SET pg_s2.default_level = 10").expect("set GUC");
let expected = s2_lat_lng_to_cell(Point { x: lng, y: lat }, 10);
let got = s2_lat_lng_to_cell_default(Point { x: lng, y: lat });
assert_eq!(s2_cell_to_token(got), s2_cell_to_token(expected));
}
#[pg_test]
fn test_s2_cell_to_lat_lng() {
let cell = s2_cell_from_token("47a1cbd595522b39");
let ll = s2_cell_to_lat_lng(cell);
assert!((ll.y - 49.703498679).abs() < 1e-6);
assert!((ll.x - 11.770681595).abs() < 1e-6);
}
#[pg_test]
fn test_s2_cell_bbox() {
let token = "47a1cbd595522b39";
let cell = s2_cell_from_token(token);
let bbox = s2_cell_bbox(cell);
let rect = Cell::from(CellID::from_token(token)).rect_bound();
assert!((bbox.low.x - rect.lng_lo().deg()).abs() < 1e-12);
assert!((bbox.low.y - rect.lat_lo().deg()).abs() < 1e-12);
assert!((bbox.high.x - rect.lng_hi().deg()).abs() < 1e-12);
assert!((bbox.high.y - rect.lat_hi().deg()).abs() < 1e-12);
}
#[pg_test]
#[should_panic(expected = "invalid s2cellid")]
fn test_s2_cell_to_lat_lng_invalid() {
let _ = s2_cell_to_lat_lng(s2_cell_from_bigint(0));
}
#[pg_test]
fn test_s2_cell_range_min() {
let token = "47a1cbd595522b39";
let cell = s2_cell_from_token(token);
let expected = CellID::from_token(token).range_min().to_token();
let got = s2_cell_range_min(cell);
assert_eq!(s2_cell_to_token(got), expected);
}
#[pg_test]
fn test_s2_cell_range_max() {
let token = "47a1cbd595522b39";
let cell = s2_cell_from_token(token);
let expected = CellID::from_token(token).range_max().to_token();
let got = s2_cell_range_max(cell);
assert_eq!(s2_cell_to_token(got), expected);
}
#[pg_test]
fn test_s2_cell_to_parent_level() {
let token = "47a1cbd595522b39";
let cell_raw = CellID::from_token(token);
assert!(cell_raw.level() > 0);
let level = cell_raw.level() as i32 - 1;
let expected = cell_raw.parent(level as u64).to_token();
let cell = s2_cell_from_token(token);
let got = s2_cell_to_parent(cell, level);
assert_eq!(s2_cell_to_token(got), expected);
}
#[pg_test]
fn test_s2_cell_to_parent_default() {
let token = "47a1cbd595522b39";
let cell_raw = CellID::from_token(token);
assert!(cell_raw.level() > 0);
let expected = cell_raw.parent(cell_raw.level() - 1).to_token();
let cell = s2_cell_from_token(token);
let got = s2_cell_to_parent_default(cell);
assert_eq!(s2_cell_to_token(got), expected);
}
#[pg_test]
fn test_s2_cell_to_children_level() {
let ll = LatLng::from_degrees(49.703498679, 11.770681595);
let cell_raw = CellID::from(ll).parent(10);
let level = cell_raw.level() as i32 + 1;
let expected: Vec<String> = cell_raw.children().iter().map(|c| c.to_token()).collect();
let cell = s2_cell_from_token(&cell_raw.to_token());
let got: Vec<String> = s2_cell_to_children(cell, level)
.map(s2_cell_to_token)
.collect();
assert_eq!(got, expected);
}
#[pg_test]
fn test_s2_cell_to_children_default() {
let ll = LatLng::from_degrees(49.703498679, 11.770681595);
let cell_raw = CellID::from(ll).parent(10);
let expected: Vec<String> = cell_raw.children().iter().map(|c| c.to_token()).collect();
let cell = s2_cell_from_token(&cell_raw.to_token());
let got: Vec<String> = s2_cell_to_children_default(cell)
.map(s2_cell_to_token)
.collect();
assert_eq!(got, expected);
}
#[pg_test]
fn test_s2_cell_to_center_child_level() {
let ll = LatLng::from_degrees(49.703498679, 11.770681595);
let cell_raw = CellID::from(ll).parent(10);
let level = cell_raw.level() as i32 + 1;
let expected = CellID::from(Cell::from(cell_raw).center())
.parent(level as u64)
.to_token();
let cell = s2_cell_from_token(&cell_raw.to_token());
let got = s2_cell_to_center_child(cell, level);
assert_eq!(s2_cell_to_token(got), expected);
}
#[pg_test]
fn test_s2_cell_to_center_child_default() {
let ll = LatLng::from_degrees(49.703498679, 11.770681595);
let cell_raw = CellID::from(ll).parent(10);
let level = cell_raw.level() as i32 + 1;
let expected = CellID::from(Cell::from(cell_raw).center())
.parent(level as u64)
.to_token();
let cell = s2_cell_from_token(&cell_raw.to_token());
let got = s2_cell_to_center_child_default(cell);
assert_eq!(s2_cell_to_token(got), expected);
}
#[pg_test]
fn test_s2_cell_to_vertices() {
let token = "47a1cbd595522b39";
let cell_raw = CellID::from_token(token);
let expected: Vec<Point> = Cell::from(cell_raw)
.vertices()
.iter()
.map(|v| {
let ll = LatLng::from(*v);
Point {
x: ll.lng.deg(),
y: ll.lat.deg(),
}
})
.collect();
let cell = s2_cell_from_token(token);
let got = s2_cell_to_vertices(cell);
assert_eq!(got.len(), expected.len());
for (a, b) in got.iter().zip(expected.iter()) {
assert!((a.x - b.x).abs() < 1e-6);
assert!((a.y - b.y).abs() < 1e-6);
}
}
#[pg_test]
fn test_s2_cell_boundary_text() {
let token = "47a1cbd595522b39";
let cell_raw = CellID::from_token(token);
let points: Vec<Point> = Cell::from(cell_raw)
.vertices()
.iter()
.map(|v| {
let ll = LatLng::from(*v);
Point {
x: ll.lng.deg(),
y: ll.lat.deg(),
}
})
.collect();
let expected = format_polygon_points(&points);
let cell = s2_cell_from_token(token);
let got = s2_cell_boundary_text(cell);
assert_eq!(got, expected);
}
#[pg_test]
fn test_s2_cell_to_boundary_sql() {
let token = "47a1cbd595522b39";
let query = format!(
"SELECT s2_cell_to_boundary(s2_cell_from_token('{token}')) IS NOT NULL"
);
let got = Spi::get_one::<bool>(&query).expect("spi");
assert_eq!(got, Some(true));
}
#[pg_test]
fn test_s2_cover_rect_level() {
let rect = pg_sys::BOX {
low: Point { x: 11.70, y: 49.68 },
high: Point { x: 11.82, y: 49.76 },
};
let level = 12i32;
let max_cells = 8i32;
let s2_rect = s2::rect::Rect::from_degrees(
rect.low.y,
rect.low.x,
rect.high.y,
rect.high.x,
);
let coverer = s2::region::RegionCoverer {
min_level: level as u8,
max_level: level as u8,
level_mod: 1,
max_cells: max_cells as usize,
};
let mut expected: Vec<String> =
coverer.covering(&s2_rect).0.iter().map(|c| c.to_token()).collect();
expected.sort();
let mut got: Vec<String> = s2_cover_rect(rect, level, max_cells)
.map(s2_cell_to_token)
.collect();
got.sort();
assert_eq!(got, expected);
}
#[pg_test]
fn test_s2_cover_rect_default_level() {
let rect = pg_sys::BOX {
low: Point { x: 11.70, y: 49.68 },
high: Point { x: 11.82, y: 49.76 },
};
let expected: Vec<String> = s2_cover_rect(rect, 12, 8).map(s2_cell_to_token).collect();
let got: Vec<String> = s2_cover_rect_default(rect).map(s2_cell_to_token).collect();
assert_eq!(got, expected);
}
#[pg_test]
fn test_s2_cover_rect_default_level_guc() {
let rect = pg_sys::BOX {
low: Point { x: 11.70, y: 49.68 },
high: Point { x: 11.82, y: 49.76 },
};
Spi::run("SET pg_s2.default_cover_level = 10").expect("set GUC");
let expected: Vec<String> = s2_cover_rect(rect, 10, 8).map(s2_cell_to_token).collect();
let got: Vec<String> = s2_cover_rect_default(rect).map(s2_cell_to_token).collect();
assert_eq!(got, expected);
}
#[pg_test]
fn test_s2_cover_cap_level() {
let center = Point { x: 11.77, y: 49.70 };
let radius_m = 2000.0;
let level = 12i32;
let max_cells = 8i32;
let center_ll = LatLng::from_degrees(center.y, center.x);
let center_point = s2::point::Point::from(center_ll);
let angle = s2::s1::Angle::from(s2::s1::Rad(radius_m / EARTH_RADIUS_M.get()));
let cap = s2::cap::Cap::from_center_angle(¢er_point, &angle);
let coverer = s2::region::RegionCoverer {
min_level: level as u8,
max_level: level as u8,
level_mod: 1,
max_cells: max_cells as usize,
};
let mut expected: Vec<String> =
coverer.covering(&cap).0.iter().map(|c| c.to_token()).collect();
expected.sort();
let mut got: Vec<String> = s2_cover_cap(center, radius_m, level, max_cells)
.map(s2_cell_to_token)
.collect();
got.sort();
assert_eq!(got, expected);
}
#[pg_test]
fn test_s2_cover_cap_default_level() {
let center = Point { x: 11.77, y: 49.70 };
let radius_m = 2000.0;
let expected: Vec<String> = s2_cover_cap(center, radius_m, 12, 8)
.map(s2_cell_to_token)
.collect();
let got: Vec<String> = s2_cover_cap_default(center, radius_m)
.map(s2_cell_to_token)
.collect();
assert_eq!(got, expected);
}
#[pg_test]
fn test_s2_cover_cap_ranges_sql() {
let center = Point { x: 11.77, y: 49.70 };
let radius_m = 2000.0;
let level = 12i32;
let max_cells = 8i32;
let center_ll = LatLng::from_degrees(center.y, center.x);
let center_point = s2::point::Point::from(center_ll);
let angle = s2::s1::Angle::from(s2::s1::Rad(radius_m / EARTH_RADIUS_M.get()));
let cap = s2::cap::Cap::from_center_angle(¢er_point, &angle);
let coverer = s2::region::RegionCoverer {
min_level: level as u8,
max_level: level as u8,
level_mod: 1,
max_cells: max_cells as usize,
};
let mut expected: Vec<String> = coverer
.covering(&cap)
.0
.iter()
.map(|c| {
let min = u64_to_i64_norm(c.range_min().0);
let max = u64_to_i64_norm(c.range_max().0);
let max_exclusive = max.saturating_add(1);
format!("[{min},{max_exclusive})")
})
.collect();
expected.sort();
let query = format!(
"SELECT string_agg(r::text, ',' ORDER BY r::text) \
FROM s2_cover_cap_ranges(point({}, {}), {}::double precision, {}, {}) r",
center.x, center.y, radius_m, level, max_cells
);
let got = Spi::get_one::<String>(&query).expect("spi");
let got_list = got.unwrap_or_default();
let expected_list = expected.join(",");
assert_eq!(got_list, expected_list);
}
#[pg_test]
fn test_s2_cover_rect_ranges_sql() {
let rect = pg_sys::BOX {
low: Point { x: 11.70, y: 49.68 },
high: Point { x: 11.82, y: 49.76 },
};
let level = 12i32;
let max_cells = 8i32;
let s2_rect = s2::rect::Rect::from_degrees(
rect.low.y,
rect.low.x,
rect.high.y,
rect.high.x,
);
let coverer = s2::region::RegionCoverer {
min_level: level as u8,
max_level: level as u8,
level_mod: 1,
max_cells: max_cells as usize,
};
let mut expected: Vec<String> = coverer
.covering(&s2_rect)
.0
.iter()
.map(|c| {
let min = u64_to_i64_norm(c.range_min().0);
let max = u64_to_i64_norm(c.range_max().0);
let max_exclusive = max.saturating_add(1);
format!("[{min},{max_exclusive})")
})
.collect();
expected.sort();
let query = format!(
"SELECT string_agg(r::text, ',' ORDER BY r::text) \
FROM s2_cover_rect_ranges(box(point({}, {}), point({}, {})), {}, {}) r",
rect.low.x, rect.low.y, rect.high.x, rect.high.y, level, max_cells
);
let got = Spi::get_one::<String>(&query).expect("spi");
let got_list = got.unwrap_or_default();
let expected_list = expected.join(",");
assert_eq!(got_list, expected_list);
}
#[pg_test]
fn test_s2_great_circle_distance_units() {
let a = Point { x: 0.0, y: 0.0 };
let b = Point { x: 90.0, y: 0.0 };
let ll_a = LatLng::from_degrees(a.y, a.x);
let ll_b = LatLng::from_degrees(b.y, b.x);
let angle = ll_a.distance(&ll_b).rad();
let earth_radius = EARTH_RADIUS_M.get();
let expected_m = angle * earth_radius;
let expected_km = expected_m / 1000.0;
let got_m = s2_great_circle_distance(a, b, "m");
let got_km = s2_great_circle_distance(a, b, "km");
let got_default = s2_great_circle_distance_default(a, b);
let got_rad = s2_great_circle_distance(a, b, "rad");
assert!((got_m - expected_m).abs() < 1e-6);
assert!((got_km - expected_km).abs() < 1e-9);
assert!((got_default - expected_m).abs() < 1e-6);
assert!((got_rad - angle).abs() < 1e-12);
}
#[pg_test]
fn test_s2_great_circle_distance_guc() {
let a = Point { x: 0.0, y: 0.0 };
let b = Point { x: 90.0, y: 0.0 };
let ll_a = LatLng::from_degrees(a.y, a.x);
let ll_b = LatLng::from_degrees(b.y, b.x);
let angle = ll_a.distance(&ll_b).rad();
Spi::run("SET pg_s2.earth_radius_m = 1000000").expect("set GUC");
let got = s2_great_circle_distance(a, b, "m");
let expected = angle * 1_000_000.0;
assert!((got - expected).abs() < 1e-6);
}
#[pg_test]
fn test_s2_cellid_casts() {
let token = "47a1cbd595522b39";
let cast_token = Spi::get_one::<String>(&format!(
"SELECT ('{token}'::text::s2cellid)::text"
))
.expect("spi");
assert_eq!(cast_token, Some(token.to_string()));
let cell = s2_cell_from_token(token);
let expected_bigint = s2_cell_to_bigint(cell);
let cast_bigint = Spi::get_one::<i64>(&format!(
"SELECT (s2_cell_from_token('{token}')::bigint)"
))
.expect("spi");
assert_eq!(cast_bigint, Some(expected_bigint));
let cast_back = Spi::get_one::<String>(&format!(
"SELECT (CAST({expected_bigint} AS bigint)::s2cellid)::text"
))
.expect("spi");
assert_eq!(cast_back, Some(token.to_string()));
}
#[pg_test]
fn test_s2_cell_edge_neighbors() {
let token = "47a1cbd595522b39";
let cell_raw = CellID::from_token(token);
let mut expected: Vec<String> = cell_raw
.edge_neighbors()
.iter()
.map(|c| c.to_token())
.collect();
expected.sort();
let cell = s2_cell_from_token(token);
let mut got: Vec<String> = s2_cell_edge_neighbors(cell)
.iter()
.map(|c| s2_cell_to_token(*c))
.collect();
got.sort();
assert_eq!(got, expected);
}
#[pg_test]
fn test_s2_cell_all_neighbors() {
let token = "47a1cbd595522b39";
let cell_raw = CellID::from_token(token);
let mut expected: Vec<String> = cell_raw
.all_neighbors(cell_raw.level())
.iter()
.map(|c| c.to_token())
.collect();
expected.sort();
let cell = s2_cell_from_token(token);
let mut got: Vec<String> = s2_cell_all_neighbors(cell)
.iter()
.map(|c| s2_cell_to_token(*c))
.collect();
got.sort();
assert_eq!(got, expected);
}
}
/// This module is required by `cargo pgrx test` invocations.
/// It must be visible at the root of your extension crate.
#[cfg(test)]
pub mod pg_test {
pub fn setup(_options: Vec<&str>) {}
#[must_use]
pub fn postgresql_conf_options() -> Vec<&'static str> {
vec![]
}
}
| yuiseki/pg_s2 | 0 | S2 Geometry Extension for PostgreSQL | Rust | yuiseki | yuiseki | Yuiseki Inc. |
src/cli.rs | Rust | use std::fs::{File, OpenOptions};
use std::io::{Read, Write};
use std::path::{Path, PathBuf};
use std::{cmp, fs, io, str};
use anyhow::Context as _;
use bstr::BStr;
use clap::Parser as _;
use tracing_subscriber::prelude::*;
use crate::keymap::{LAYER_DATA_LEN, PROFILE_DATA_LEN};
use crate::{keymap, layout, scancode};
const GET_PRODUCT_NAME: u16 = 0x1001;
const GET_KEYBOARD_LAYOUT: u16 = 0x1002;
const GET_BOOT_LOADER_VERSION: u16 = 0x1003; // ?
const GET_MODEL_NAME: u16 = 0x1005;
const GET_SERIAL_NUMBER: u16 = 0x1007;
const GET_FIRMWARE_VERSION: u16 = 0x100b;
const GET_DIPSW: u16 = 0x1103;
const GET_CURRENT_PROFILE: u16 = 0x1101;
const SET_CURRENT_PROFILE: u16 = 0x1101;
#[derive(Clone, Debug, clap::Parser)]
struct Cli {
#[command(subcommand)]
command: Command,
}
#[derive(Clone, Debug, clap::Subcommand)]
enum Command {
Info(InfoArgs),
ReadProfile(ReadProfileArgs),
WriteProfile(WriteProfileArgs),
ShowProfile(ShowProfileArgs),
}
#[derive(Clone, Debug, clap::Args)]
struct ConnectionArgs {
/// Path to device file to communicate over
#[arg(long, default_value = "/dev/hidraw1")]
device: PathBuf,
}
pub fn run() -> anyhow::Result<()> {
tracing_subscriber::registry()
.with(tracing_subscriber::fmt::layer().with_writer(io::stderr))
.with(tracing_subscriber::EnvFilter::from_default_env())
.init();
let cli = Cli::parse();
match &cli.command {
Command::Info(args) => run_info(args),
Command::ReadProfile(args) => run_read_profile(args),
Command::WriteProfile(args) => run_write_profile(args),
Command::ShowProfile(args) => run_show_profile(args),
}
}
/// Print information about the connected keyboard
#[derive(Clone, Debug, clap::Args)]
struct InfoArgs {
#[command(flatten)]
connection: ConnectionArgs,
/// Show fetched data without interpreting
#[arg(long)]
raw: bool,
}
fn run_info(args: &InfoArgs) -> anyhow::Result<()> {
let mut dev = open_device(&args.connection)?;
if args.raw {
for code in 0x1000..0x1010 {
let message = get_simple(&mut dev, code)?;
println!("{code:04x}: {:?}", &BStr::new(&message[3..]));
}
} else {
let message = get_simple(&mut dev, GET_PRODUCT_NAME)?;
println!("Product name: {}", truncate_nul_str(&message[3..]));
let message = get_simple(&mut dev, GET_MODEL_NAME)?;
println!("Model name: {}", truncate_nul_str(&message[3..]));
let message = get_simple(&mut dev, GET_SERIAL_NUMBER)?;
println!("Serial number: {}", truncate_nul_str(&message[3..]));
let message = get_simple(&mut dev, GET_KEYBOARD_LAYOUT)?;
println!("Keyboard layout: {}", truncate_nul_str(&message[3..]));
let message = get_simple(&mut dev, GET_BOOT_LOADER_VERSION)?;
println!("Boot loader version?: {}", truncate_nul_str(&message[3..]));
let message = get_simple(&mut dev, GET_FIRMWARE_VERSION)?;
println!("Firmware version: {}", truncate_nul_str(&message[3..]));
let message = get_simple(&mut dev, GET_DIPSW)?;
println!("DIP Sw: {:06b}", pack_dipsw(&message[3..9]));
let index = get_current_profile(&mut dev)?;
println!("Current profile: {index}");
}
Ok(())
}
/// Fetch keymap profile and save to file
#[derive(Clone, Debug, clap::Args)]
struct ReadProfileArgs {
#[command(flatten)]
connection: ConnectionArgs,
/// Output file [default: stdout]
#[arg(short, long)]
output: Option<PathBuf>,
/// Output raw binary data
#[arg(long)]
raw: bool,
/// Profile index to fetch [default: current profile]
#[arg(long, value_parser = clap::value_parser!(u16).range(0..4))]
index: Option<u16>,
}
fn run_read_profile(args: &ReadProfileArgs) -> anyhow::Result<()> {
let mut dev = open_device(&args.connection)?;
let data = maybe_switch_profile(&mut dev, args.index, |dev| {
read_data(dev, 0, PROFILE_DATA_LEN.try_into().unwrap())
})?;
let serialized = if args.raw {
data
} else {
keymap::serialize_to_toml_string(&data).into_bytes()
};
if let Some(path) = &args.output {
fs::write(path, serialized)
.with_context(|| format!("failed to write {}", path.display()))?;
} else {
io::stdout().write_all(&serialized)?;
}
Ok(())
}
/// Load keymap profile from file
#[derive(Clone, Debug, clap::Args)]
struct WriteProfileArgs {
#[command(flatten)]
connection: ConnectionArgs,
/// Input file [default: stdin]
#[arg(short, long)]
input: Option<PathBuf>,
/// Profile index to write [default: current profile]
#[arg(long, value_parser = clap::value_parser!(u16).range(0..4))]
index: Option<u16>,
}
fn run_write_profile(args: &WriteProfileArgs) -> anyhow::Result<()> {
let data = read_profile_data(args.input.as_deref())?;
let mut dev = open_device(&args.connection)?;
maybe_switch_profile(&mut dev, args.index, |dev| write_data(dev, 0, &data))?;
Ok(())
}
/// Show keymap profile data
#[derive(Clone, Debug, clap::Args)]
struct ShowProfileArgs {
/// Input file [default: stdin]
#[arg(short, long)]
input: Option<PathBuf>,
/// Print each row ignoring physical layout
#[arg(long)]
no_layout: bool,
}
fn run_show_profile(args: &ShowProfileArgs) -> anyhow::Result<()> {
let profile_data = read_profile_data(args.input.as_deref())?;
for (i, data) in profile_data.chunks_exact(LAYER_DATA_LEN).enumerate() {
println!("Layer #{i}");
let scancodes: Vec<_> = data
.chunks_exact(2)
.map(|d| u16::from_be_bytes(d.try_into().unwrap()))
.collect();
if args.no_layout {
for codes in scancodes.chunks(15) {
println!(" {codes:04x?}");
}
} else {
let widths_map = &layout::US_LAYOUT_WIDTHS_MAP;
for (codes, widths) in scancodes.chunks(15).zip(widths_map) {
let formatted_codes =
layout::format_row(widths, codes.iter().map(|n| format!("{n:04x}")));
let formatted_labels = layout::format_row(
widths,
codes
.iter()
.map(|n| scancode::scancode_to_label(*n).unwrap_or("")),
);
println!(" {formatted_codes}");
println!(" {formatted_labels}");
}
}
}
Ok(())
}
fn read_profile_data(maybe_path: Option<&Path>) -> anyhow::Result<Vec<u8>> {
let data = if let Some(path) = &maybe_path {
fs::read(path).with_context(|| format!("failed to read {}", path.display()))?
} else {
let mut buf = Vec::with_capacity(PROFILE_DATA_LEN);
io::stdin().read_to_end(&mut buf)?;
buf
};
if data.contains(&b'\0') {
anyhow::ensure!(
data.len() == PROFILE_DATA_LEN,
"unexpected profile data length"
);
Ok(data)
} else {
let serialized = str::from_utf8(&data).context("invalid profile text")?;
keymap::parse_toml_string(serialized)
}
}
fn maybe_switch_profile<D: Read + Write, O>(
dev: &mut D,
profile_index: Option<u16>,
f: impl FnOnce(&mut D) -> io::Result<O>,
) -> io::Result<O> {
let old_profile_index = if let Some(index) = profile_index {
let old_index = get_current_profile(dev)?;
set_current_profile(dev, index)?;
Some(old_index)
} else {
None
};
let res = f(dev);
if let Some(index) = old_profile_index {
set_current_profile(dev, index)?;
}
res
}
fn open_device(args: &ConnectionArgs) -> anyhow::Result<File> {
OpenOptions::new()
.read(true)
.write(true)
.open(&args.device)
.with_context(|| format!("failed to open device {}", args.device.display()))
}
#[tracing::instrument(skip(dev))]
fn get_simple<D: Read + Write>(dev: &mut D, command: u16) -> io::Result<[u8; 32]> {
let mut message = [0; 32];
message[0] = 0x02;
message[1..3].copy_from_slice(&command.to_be_bytes());
tracing::trace!(?message, "write");
dev.write_all(&message)?;
dev.read_exact(&mut message)?;
tracing::trace!(?message, "read");
Ok(message)
}
#[tracing::instrument(skip(dev))]
fn get_current_profile<D: Read + Write>(dev: &mut D) -> io::Result<u16> {
let message = get_simple(dev, GET_CURRENT_PROFILE)?;
Ok(u16::from_be_bytes(message[3..5].try_into().unwrap()))
}
#[tracing::instrument(skip(dev))]
fn set_current_profile<D: Read + Write>(dev: &mut D, id: u16) -> io::Result<()> {
let mut message = [0; 32];
message[0] = 0x03;
message[1..3].copy_from_slice(&SET_CURRENT_PROFILE.to_be_bytes());
message[3..5].copy_from_slice(&id.to_be_bytes());
tracing::trace!(?message, "write");
dev.write_all(&message)?;
// TODO: process response
dev.read_exact(&mut message)?;
tracing::trace!(?message, "read");
dev.read_exact(&mut message)?;
tracing::trace!(?message, "read");
Ok(())
}
const MAX_DATA_CHUNK_LEN: u16 = 26; // or 28?
// TODO: Is this a generic function or specific to the profile data?
#[tracing::instrument(skip(dev))]
fn read_data<D: Read + Write>(dev: &mut D, start: u16, len: u16) -> io::Result<Vec<u8>> {
let mut data = Vec::with_capacity(len.into());
for offset in (0..len).step_by(MAX_DATA_CHUNK_LEN.into()) {
let n: u8 = cmp::min(MAX_DATA_CHUNK_LEN, len - offset)
.try_into()
.unwrap();
let mut message = [0; 32];
message[0] = 0x12;
message[1..3].copy_from_slice(&(start + offset).to_be_bytes());
message[3] = n;
tracing::trace!(?message, "write");
dev.write_all(&message)?;
dev.read_exact(&mut message)?;
tracing::trace!(?message, "read");
data.extend_from_slice(&message[4..][..n.into()]);
}
Ok(data)
}
// TODO: Is this a generic function or specific to the profile data?
#[tracing::instrument(skip(dev, data))]
fn write_data<D: Read + Write>(dev: &mut D, start: u16, data: &[u8]) -> io::Result<()> {
for (i, chunk) in data.chunks(MAX_DATA_CHUNK_LEN.into()).enumerate() {
let offset: u16 = MAX_DATA_CHUNK_LEN * u16::try_from(i).unwrap();
let mut message = [0; 32];
message[0] = 0x13;
message[1..3].copy_from_slice(&(start + offset).to_be_bytes());
message[3] = chunk.len().try_into().unwrap();
message[4..][..chunk.len()].copy_from_slice(chunk);
tracing::trace!(?message, "write");
dev.write_all(&message)?;
dev.read_exact(&mut message)?;
tracing::trace!(?message, "read");
// TODO: process response
}
Ok(())
}
fn pack_dipsw(data: &[u8]) -> u8 {
// dip-sw bit per byte (not packed), from MSB for pretty printing
data.iter().fold(0, |acc, v| (acc << 1) | (v & 1))
}
fn truncate_nul_str(data: &[u8]) -> &BStr {
if let Some(p) = data.iter().position(|&c| c == b'\0') {
BStr::new(&data[..p])
} else {
BStr::new(data)
}
}
| yuja/hhkb-studio-tools | 30 | Linux tool to modify HHKB Studio keymap | Rust | yuja | Yuya Nishihara | |
src/keymap.rs | Rust | //! Utility to process keymap data.
use std::fmt::Write as _;
pub const LAYER_DATA_LEN: usize = 0xf0;
pub const PROFILE_DATA_LEN: usize = LAYER_DATA_LEN * 4;
pub fn serialize_to_toml_string(profile_data: &[u8]) -> String {
assert_eq!(profile_data.len(), PROFILE_DATA_LEN);
let mut buffer = String::new();
for layer_data in profile_data.chunks_exact(LAYER_DATA_LEN) {
buffer.push_str("[[layers]]\nscancodes = ");
serialize_layer_scancodes_to_toml_string(&mut buffer, layer_data);
buffer.push('\n');
}
buffer.truncate(buffer.trim_end_matches('\n').len() + 1);
debug_assert!(buffer.parse::<toml::Table>().is_ok());
buffer
}
fn serialize_layer_scancodes_to_toml_string(buffer: &mut String, layer_data: &[u8]) {
let scancodes = layer_data
.chunks_exact(2)
.map(|d| u16::from_be_bytes(d.try_into().unwrap()));
// Build formatted array split per keyboard raw.
buffer.push_str("[\n");
for (i, code) in scancodes.enumerate() {
if i % 15 == 0 {
buffer.push_str(" ");
}
write!(buffer, "0x{code:04x},").unwrap();
buffer.push(if i % 15 == 14 { '\n' } else { ' ' });
}
buffer.push_str("]\n");
}
pub fn parse_toml_string(serialized: &str) -> anyhow::Result<Vec<u8>> {
let doc: toml::Table = serialized.parse()?;
let layers = doc
.get("layers")
.ok_or_else(|| anyhow::anyhow!("layers not found"))?
.as_array()
.ok_or_else(|| anyhow::anyhow!("unexpected type of layers"))?;
anyhow::ensure!(layers.len() == 4, "unexpected number of layers");
let mut profile_data = Vec::with_capacity(PROFILE_DATA_LEN);
for layer in layers {
let scancodes: Vec<u16> = layer
.get("scancodes")
.ok_or_else(|| anyhow::anyhow!("scancodes not found"))?
.clone()
.try_into()?;
anyhow::ensure!(
scancodes.len() == LAYER_DATA_LEN / 2,
"unexpected number of scancodes"
);
for code in &scancodes {
profile_data.extend(code.to_be_bytes());
}
}
assert_eq!(profile_data.len(), PROFILE_DATA_LEN);
Ok(profile_data)
}
| yuja/hhkb-studio-tools | 30 | Linux tool to modify HHKB Studio keymap | Rust | yuja | Yuya Nishihara | |
src/layout.rs | Rust | use std::fmt::{Display, Write as _};
use std::iter;
/// Marker denoting a blank cell.
const B: u8 = 0x80;
/// Physical layout of US keymap.
#[rustfmt::skip]
pub const US_LAYOUT_WIDTHS_MAP: [[u8; 15]; 8] = [
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5], // Esc, 0, .., ~
[7, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 8, 8], // Tab, Q, .., Delete (/BS)
[9, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, B, B, 11], // Control, A, .., Return
[11, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, B, B, 9, 5], // Shift, Z, .., Fn
[7 | B, B, 5, 8, B, 29, B, B, 8, 5, B, B, B, B, B], // Alt, ..., Alt
[23 | B, B, B, B, 8, 5, 8, B, B, B, 31 | B, 5, 5, B, B], // Left, Middle, Right, gesture pad?
[75 | B, B, B, B, B, B, B, B, B, B, B, 5, 5, B, B], // gesture pad?
[75 | B, B, B, 5, 5, B, B, B, B, B, B, 5, 5, B, B], // gesture pad?
];
/// Formats the row `labels` based on the given `widths` layout table.
pub fn format_row<I>(widths: &[u8], labels: I) -> String
where
I: IntoIterator,
I::Item: Display,
{
let mut line = String::new();
let mut was_blank = true;
for (label, &width) in labels.into_iter().zip(widths) {
let is_blank = width & B != 0;
let mut width = usize::from(width & !B);
if width == 0 {
continue;
}
if is_blank && was_blank {
line.push(' ');
} else {
line.push('|');
}
width -= 1;
if is_blank {
line.extend(iter::repeat(' ').take(width));
} else {
let max_len = line.len() + width;
write!(&mut line, "{label:width$}").unwrap();
line.truncate(max_len);
}
was_blank = is_blank;
}
if !was_blank {
line.push('|');
}
line
}
| yuja/hhkb-studio-tools | 30 | Linux tool to modify HHKB Studio keymap | Rust | yuja | Yuya Nishihara | |
src/lib.rs | Rust | pub mod cli;
mod keymap;
mod layout;
mod scancode;
| yuja/hhkb-studio-tools | 30 | Linux tool to modify HHKB Studio keymap | Rust | yuja | Yuya Nishihara | |
src/main.rs | Rust | fn main() -> anyhow::Result<()> {
hhkb_studio_tools::cli::run()
}
| yuja/hhkb-studio-tools | 30 | Linux tool to modify HHKB Studio keymap | Rust | yuja | Yuya Nishihara | |
src/scancode.rs | Rust | /// Translates HHKB Studio scancode to short string label.
pub fn scancode_to_label(code: u16) -> Option<&'static str> {
match code {
// 0x0000..0x00e8: USB HID Keyboard (with some HHKB specific mappings)
0x0000 => None, // Reserved
0x0001 => None, // Error roll over
0x0002 => None, // POST fail
0x0003 => None, // Error undefined
0x0004 => Some("A"),
0x0005 => Some("B"),
0x0006 => Some("C"),
0x0007 => Some("D"),
0x0008 => Some("E"),
0x0009 => Some("F"),
0x000a => Some("G"),
0x000b => Some("H"),
0x000c => Some("I"),
0x000d => Some("J"),
0x000e => Some("K"),
0x000f => Some("L"),
0x0010 => Some("M"),
0x0011 => Some("N"),
0x0012 => Some("O"),
0x0013 => Some("P"),
0x0014 => Some("Q"),
0x0015 => Some("R"),
0x0016 => Some("S"),
0x0017 => Some("T"),
0x0018 => Some("U"),
0x0019 => Some("V"),
0x001a => Some("W"),
0x001b => Some("X"),
0x001c => Some("Y"),
0x001d => Some("Z"),
0x001e => Some("1 !"),
0x001f => Some("2 @"),
0x0020 => Some("3 #"),
0x0021 => Some("4 $"),
0x0022 => Some("5 %"),
0x0023 => Some("6 ^"),
0x0024 => Some("7 &"),
0x0025 => Some("8 *"),
0x0026 => Some("9 ("),
0x0027 => Some("0 )"),
0x0028 => Some("Return"),
0x0029 => Some("Esc"),
0x002a => Some("Backspace"),
0x002b => Some("Tab"),
0x002c => Some("Space"),
0x002d => Some("- _"),
0x002e => Some("= +"),
0x002f => Some("[ {"),
0x0030 => Some("] }"),
0x0031 => Some("\\ |"),
0x0032 => Some("# ~"), // Int
0x0033 => Some("; :"),
0x0034 => Some("' \""),
0x0035 => Some("` ~"),
0x0036 => Some(", <"),
0x0037 => Some(". >"),
0x0038 => Some("/ ?"),
0x0039 => Some("Caps"),
0x003a => Some("F1"),
0x003b => Some("F2"),
0x003c => Some("F3"),
0x003d => Some("F4"),
0x003e => Some("F5"),
0x003f => Some("F6"),
0x0040 => Some("F7"),
0x0041 => Some("F8"),
0x0042 => Some("F9"),
0x0043 => Some("F10"),
0x0044 => Some("F11"),
0x0045 => Some("F12"),
0x0046 => Some("PrtSc"),
0x0047 => Some("ScrLock"),
0x0048 => Some("Pause"),
0x0049 => Some("Insert"),
0x004a => Some("Home"),
0x004b => Some("PgUp"),
0x004c => Some("Delete"),
0x004d => Some("End"),
0x004e => Some("PgDn"),
0x004f => Some("Right"),
0x0050 => Some("Left"),
0x0051 => Some("Down"),
0x0052 => Some("Up"),
0x0053 => Some("KP NumLock"),
0x0054 => Some("KP /"),
0x0055 => Some("KP *"),
0x0056 => Some("KP -"),
0x0057 => Some("KP +"),
0x0058 => Some("KP Enter"),
0x0059 => Some("KP 1"),
0x005a => Some("KP 2"),
0x005b => Some("KP 3"),
0x005c => Some("KP 4"),
0x005d => Some("KP 5"),
0x005e => Some("KP 6"),
0x005f => Some("KP 7"),
0x0060 => Some("KP 8"),
0x0061 => Some("KP 9"),
0x0062 => Some("KP 0"),
0x0063 => Some("KP ."),
0x0064 => Some("\\ |"), // Int
0x0065 => Some("Application"),
0x0066 => None, // Sun Power
0x0067 => None, // KP =
0x0068 => Some("F13"),
0x0069 => Some("F14"),
0x006a => Some("F15"),
0x006b => Some("F16"),
0x006c => Some("F17"),
0x006d => Some("F18"),
0x006e => Some("F19"),
0x006f => None, // F20
0x0070 => None, // F21
0x0071 => None, // F22
0x0072 => None, // F23
0x0073 => None, // F24
0x0074 => None, // Execute
0x0075 => None, // Help
0x0076 => None, // Menu
0x0077 => None, // Select
0x0078 => None, // Stop
0x0079 => None, // Again
0x007a => None, // Undo
0x007b => None, // Cut
0x007c => None, // Copy
0x007d => None, // Paste
0x007e => None, // Find
0x007f => None, // Sun Mute
0x0080 => None, // Sun VolumeUp
0x0081 => None, // Sun VolumeDown
0x0082 => None, // Locking CapsLock
0x0083 => None, // Locking NumLock
0x0084 => None, // Locking ScrollLock
0x0085 => None, // KP ,
0x0086 => None, // KP EqualsSign
0x0087 => Some("\\ Ro"), // Int1
0x0088 => Some("Kana"), // Int2
0x0089 => Some("Yen"), // Int3
0x008a => Some("Xfer"), // Int4
0x008b => Some("Nfer"), // Int5
0x008c => None, // Int6
0x008d => None, // Int7
0x008e => None, // Int8
0x008f => None, // Int9
0x0090 => Some("Kana"), // Lang1
0x0091 => Some("Eisu"), // Lang2
0x0092 => None, // Lang3
0x0093 => None, // Lang4
0x0094 => None, // Lang5
0x0095 => None, // Lang6
0x0096 => None, // Lang7
0x0097 => None, // Lang8
0x0098 => None, // Lang9
0x0099 => None, // Alternative Erase
0x009a => None, // SysReq
0x009b => None, // Cancel
0x009c => None, // Clear
0x009d => None, // Prior
0x009e => None, // Return
0x009f => None, // Separator
0x00a0 => None, // Out
0x00a1 => None, // Oper
0x00a2 => None, // Clear
0x00a3 => None, // ClSel
0x00a4 => None, // ExSel
0x00a5 => Some("Power"), // HHKB
0x00a6 => None,
0x00a7 => None,
0x00a8 => Some("Mute"), // HHKB
0x00a9 => Some("VolUP"), // HHKB
0x00aa => Some("VolDn"), // HHKB
0x00ab => None,
0x00ac => None,
0x00ad => None,
0x00ae => None,
0x00af => None,
0x00b0 => Some("Eject"), // KP 00
0x00b1 => None, // KP 000
0x00b2 => None, // Thousands Separator
0x00b3 => None, // Decimal Separator
0x00b4 => None, // Currency Unit
0x00b5 => None, // Currency Sub-unit
0x00b6 => None, // KP (
0x00b7 => None, // KP )
0x00b8 => None, // KP {
0x00b9 => None, // KP }
0x00ba => None, // KP Tab
0x00bb => None, // KP Backspace
0x00bc => None, // KP A
0x00bd => Some("BriUp"), // KP B
0x00be => Some("BriDn"), // KP C
0x00bf => None, // KP D
0x00c0 => None, // KP E
0x00c1 => None, // KP F
0x00c2 => None, // KP XOR
0x00c3 => None, // KP ^
0x00c4 => None, // KP %
0x00c5 => None, // KP <
0x00c6 => None, // KP >
0x00c7 => None, // KP &
0x00c8 => None, // KP &&
0x00c9 => None, // KP |
0x00ca => None, // KP ||
0x00cb => None, // KP :
0x00cc => None, // KP #
0x00cd => None, // KP Space
0x00ce => None, // KP @
0x00cf => None, // KP !
0x00d0 => None, // KP Memory Store
0x00d1 => None, // KP Memory Recall
0x00d2 => None, // KP Memory Clear
0x00d3 => None, // KP Memory Add
0x00d4 => None, // KP Memory Subtract
0x00d5 => None, // KP Memory Multiply
0x00d6 => None, // KP Memory Divide
0x00d7 => None, // KP +/-
0x00d8 => None, // KP Clear
0x00d9 => None, // KP Clear Entry
0x00da => None, // KP Binary
0x00db => None, // KP Octal
0x00dc => None, // KP Decimal
0x00dd => None, // KP Hexadecimal
0x00de => None,
0x00df => None,
0x00e0 => Some("LControl"),
0x00e1 => Some("LShift"),
0x00e2 => Some("LAlt"),
0x00e3 => Some("LMeta"), // or Super
0x00e4 => Some("RControl"),
0x00e5 => Some("RShift"),
0x00e6 => Some("RAlt"),
0x00e7 => Some("RMeta"), // or Super
0x00e8 => None,
0x00e9 => None,
0x00ea => None,
0x00eb => None,
0x00ec => None,
0x00ed => None,
0x00ee => None,
0x00ef => None,
0x00f0 => Some("MsUp"), // HHKB Mouse Cursor Up
0x00f1 => Some("MsDn"), // HHKB Mouse Cursor Down
0x00f2 => Some("MsLeft"), // HHKB Mouse Cursor Left
0x00f3 => Some("MsRight"), // HHKB Mouse Cursor Right
0x00f4 => Some("LB"), // HHKB Mouse Left Click
0x00f5 => Some("MB"), // HHKB Mouse Middle Click
0x00f6 => Some("RB"), // HHKB Mouse Right Click
0x00f7 => None,
0x00f8 => None,
0x00f9 => Some("MwUp"), // HHKB Mouse Wheel Up
0x00fa => Some("MwDn"), // HHKB Mouse Wheel Down
0x00fb => Some("MwLeft"), // HHKB Mouse Wheel Left
0x00fc => Some("MwRight"), // HHKB Mouse Wheel Right
0x00fd => None,
0x00fe => None,
0x00ff => None,
// TODO: 0x094f => Some("Right"),
// TODO: 0x0950 => Some("Left"),
0x5101 => Some("Fn1"),
0x5102 => Some("Fn2"),
0x5103 => Some("Fn3"),
0x5f80 => Some("GLS On"), // HHKB GesturePad SideLeft On
0x5f81 => Some("GFL On"), // HHKB GesturePad FrontLeft On
0x5f82 => Some("GFR On"), // HHKB GesturePad FrontRight On
0x5f83 => Some("GRS On"), // HHKB GesturePad SideRight On
0x5f84 => Some("GLS Off"),
0x5f85 => Some("GFL Off"),
0x5f86 => Some("GFR Off"),
0x5f87 => Some("GRS Off"),
0x5f88 => Some("GLS Toggle"),
0x5f89 => Some("GFL Toggle"),
0x5f8a => Some("GFR Toggle"),
0x5f8b => Some("GRS Toggle"),
0x5f8c => Some("Alt TabL"),
0x5f8d => Some("Alt TabR"),
0x5f8e => Some("Cmd TabL"),
0x5f8f => Some("Cmd TabR"),
0x5f90 => Some("Pstck On"), // HHKB Pointing Stick On
0x5f91 => Some("Pstck Off"),
0x5f92 => Some("Pstck Toggle"),
0x5f93 => Some("Ms On"), // HHKB Mouse Key On
0x5f94 => Some("Ms Off"),
0x5f95 => Some("Ms Toggle"),
0x5f9e => Some("Gspd Low"), // HHKB GesturePad Sensitivity Low
0x5f9f => Some("Gspd Mid"),
0x5fa0 => Some("Gspd Hi"),
0x5fa1 => Some("Gspd Max"),
0x5fa2 => Some("Spd +"), // HHKB Pointing Stick Speed+
0x5fa3 => Some("Spd -"),
0x5fa4 => Some("Spd 1"),
0x5fa5 => Some("Spd 2"),
0x5fa6 => Some("Spd 3"),
0x5fa7 => Some("Spd 4"),
_ => None,
}
}
| yuja/hhkb-studio-tools | 30 | Linux tool to modify HHKB Studio keymap | Rust | yuja | Yuya Nishihara | |
main.cpp | C++ | #include <QCommandLineParser>
#include <QGuiApplication>
#include <QQuickItem>
#include <QQuickItemGrabResult>
#include <QQuickView>
#include <QUrl>
#include <QWindow>
#include <QtDebug>
#include <set>
namespace {
void grabItemsRecursively(std::set<QSharedPointer<QQuickItemGrabResult>> &grabResults,
QQuickItem *item, bool quitOnLastItemGrabbed)
{
for (auto *child : item->childItems()) {
grabItemsRecursively(grabResults, child, quitOnLastItemGrabbed);
}
const auto fileName = item->property("fileName").toString();
if (fileName.isEmpty())
return;
auto grab = item->grabToImage();
if (!grab) {
qWarning() << "Failed to grab item" << item;
return;
}
grabResults.insert(grab);
QObject::connect(grab.get(), &QQuickItemGrabResult::ready,
[fileName, grab, &grabResults, quitOnLastItemGrabbed]() {
auto ok = grab->saveToFile(fileName);
if (ok) {
qInfo() << "Saved file" << fileName;
} else {
qWarning() << "Failed to save file" << fileName;
}
grabResults.erase(grab);
if (grabResults.empty() && quitOnLastItemGrabbed) {
QCoreApplication::quit();
}
});
}
} // namespace
int main(int argc, char *argv[])
{
QGuiApplication app(argc, argv);
QCommandLineParser parser;
parser.addHelpOption();
parser.addOption({ "show-window", "Show QML file in window" });
parser.addPositionalArgument("filename", "Source QML file to process.");
parser.process(app);
const auto arguments = parser.positionalArguments();
if (arguments.empty()) {
qWarning() << "No filename specified";
return -1;
}
const auto fileName = arguments.front();
const bool showWindow = parser.isSet("show-window");
QWindow dummyWindow;
QQuickView offscreenView;
if (!showWindow) {
dummyWindow.create();
offscreenView.setParent(&dummyWindow);
}
offscreenView.setSource(QUrl::fromLocalFile(fileName));
auto *rootItem = offscreenView.rootObject();
if (!rootItem) {
qWarning() << "No root item created from" << fileName;
return -1;
}
// the parent window has to be visible to grabToImage()
offscreenView.show();
std::set<QSharedPointer<QQuickItemGrabResult>> grabResults;
grabItemsRecursively(grabResults, rootItem, /*quitOnLastItemGrabbed=*/!showWindow);
if (grabResults.empty()) {
qWarning() << "No item to grab found in " << fileName;
qWarning() << "(Set fileName property to items to be grabbed.)";
return -1;
}
qInfo() << "Found" << grabResults.size() << "items to grab";
return app.exec();
}
| yuja/qmlseen | 4 | Mini tool to generate prerendered images from QML file | C++ | yuja | Yuya Nishihara | |
cmake/QmluicMacros.cmake | CMake | cmake_minimum_required(VERSION 3.12)
function(qmluic_target_qml_sources target)
cmake_parse_arguments(PARSE_ARGV 1 arg "NO_DYNAMIC_BINDING" "OUTPUT_DIRECTORY" "")
set(qml_files ${arg_UNPARSED_ARGUMENTS})
set(no_dynamic_binding ${arg_NO_DYNAMIC_BINDING})
set(output_directory ${arg_OUTPUT_DIRECTORY})
if(NOT output_directory)
set(output_directory ${CMAKE_CURRENT_BINARY_DIR})
endif()
set(generate_ui_opts)
list(TRANSFORM qml_files PREPEND "${CMAKE_CURRENT_SOURCE_DIR}/" OUTPUT_VARIABLE abs_qml_files)
list(TRANSFORM qml_files TOLOWER OUTPUT_VARIABLE ui_files)
list(TRANSFORM ui_files REPLACE "\.qml$" ".ui")
list(TRANSFORM ui_files PREPEND "${output_directory}/" OUTPUT_VARIABLE abs_ui_files)
if(no_dynamic_binding)
set(ui_support_h_files)
set(abs_ui_support_h_files)
list(APPEND generate_ui_opts "--no-dynamic-binding")
else()
list(TRANSFORM qml_files TOLOWER OUTPUT_VARIABLE ui_support_h_files)
list(TRANSFORM ui_support_h_files REPLACE "([^/]*)\.qml$" "uisupport_\\1.h")
list(TRANSFORM ui_support_h_files PREPEND "${output_directory}/" OUTPUT_VARIABLE abs_ui_support_h_files)
endif()
target_sources(${target} PRIVATE ${qml_files})
get_target_property(QMAKE_EXECUTABLE Qt::qmake LOCATION)
add_custom_command(
OUTPUT ${abs_ui_files} ${abs_ui_support_h_files}
COMMAND
Qmluic::qmluic generate-ui -O "${output_directory}"
--qmake "${QMAKE_EXECUTABLE}"
${generate_ui_opts}
-- ${qml_files}
DEPENDS ${abs_qml_files} Qmluic::qmluic Qt::qmake
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
COMMENT "Generating UI from QML"
)
target_sources(${target} PRIVATE ${abs_ui_support_h_files})
qt_wrap_ui(header_files ${abs_ui_files})
target_sources(${target} PRIVATE ${header_files})
endfunction()
# Adds custom target that creates QML type stubs.
#
# This is primarily designed for internal use, but you can use this to generate
# type stubs by yourself.
#
# Example:
# qmluic_add_qmldir(qmluic.QtWidgets Qt5::Widgets) to
# generate imports/qmluic/QtWidgets/{qmldir,plugins.qmltypes}
function(qmluic_add_qmldir module_name lib)
get_target_property(lib_location ${lib} LOCATION)
get_filename_component(lib_directory "${lib_location}" DIRECTORY)
set(metatypes_directory "${lib_directory}/metatypes")
# FIXME: Look for the QT_INSTALL_ARCHDATA path instead. This works only on
# Debian-like systems.
set(metatypes_directory_qt6 "${lib_directory}/qt6/metatypes")
# Look up metatypes.json by pattern matching. Maybe this can be resolved from
# the INTERFACE_SOURCES property with TARGET_PROPERTY:QT_CONSUMES_METATYPES on Qt 6,
# but doing that would be tedious.
string(REPLACE "::" "" metatypes_prefix ${lib})
string(TOLOWER "${metatypes_prefix}" metatypes_prefix)
file(GLOB input_metatypes_file
"${metatypes_directory}/${metatypes_prefix}_*.json"
"${metatypes_directory_qt6}/${metatypes_prefix}_*.json")
list(LENGTH input_metatypes_file count)
if(count EQUAL 0)
message(FATAL_ERROR "No metatypes.json found for ${lib}")
elseif(NOT count EQUAL 1)
message(FATAL_ERROR "Multiple metatypes.json found for ${lib}: ${input_metatypes_file}")
endif()
string(REPLACE "." "/" module_directory ${module_name})
set(output_directory "${CMAKE_CURRENT_BINARY_DIR}/imports/${module_directory}")
set(output_qmldir "${output_directory}/qmldir")
set(output_qmltypes "${output_directory}/plugins.qmltypes")
add_custom_command(
OUTPUT "${output_qmldir}"
COMMAND ${CMAKE_COMMAND} -E make_directory "${output_directory}"
COMMAND ${CMAKE_COMMAND} -E echo "module ${module_name}" > "${output_qmldir}"
COMMAND ${CMAKE_COMMAND} -E echo "typeinfo plugins.qmltypes" >> "${output_qmldir}"
COMMAND ${CMAKE_COMMAND} -E echo "depends QtQml" >> "${output_qmldir}"
)
get_target_property(QMAKE_EXECUTABLE Qt::qmake LOCATION)
add_custom_command(
OUTPUT "${output_qmltypes}"
COMMAND
Qmluic::qmluic dump-metatypes --qmake "${QMAKE_EXECUTABLE}"
--output-qmltypes "${output_qmltypes}"
"${input_metatypes_file}"
MAIN_DEPENDENCY "${input_metatypes_file}"
DEPENDS Qmluic::qmluic Qt::qmake
)
add_custom_target("${module_name}" ALL DEPENDS "${output_qmldir}" "${output_qmltypes}")
endfunction()
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
cmake/QmluicShim.cmake | CMake | # Drop-in replacement for QmluicMacros.cmake
#
# This module should be loaded only when qmluic isn't available:
#
# find_package(Qmluic QUIET)
# if(NOT Qmluic_FOUND)
# include("${CMAKE_SOURCE_DIR}/cmake/QmluicShim.cmake")
# endif()
function(qmluic_target_qml_sources target)
cmake_parse_arguments(PARSE_ARGV 1 arg "NO_DYNAMIC_BINDING" "OUTPUT_DIRECTORY" "")
set(qml_files ${arg_UNPARSED_ARGUMENTS})
set(no_dynamic_binding ${arg_NO_DYNAMIC_BINDING})
set(output_directory ${arg_OUTPUT_DIRECTORY})
if(NOT output_directory)
message(FATAL_ERROR "OUTPUT_DIRECTORY must be set (shim can't generate .ui from .qml)")
endif()
list(TRANSFORM qml_files TOLOWER OUTPUT_VARIABLE ui_files)
list(TRANSFORM ui_files REPLACE "\.qml$" ".ui")
list(TRANSFORM ui_files PREPEND "${output_directory}/" OUTPUT_VARIABLE abs_ui_files)
if(no_dynamic_binding)
set(ui_support_h_files)
set(abs_ui_support_h_files)
else()
list(TRANSFORM qml_files TOLOWER OUTPUT_VARIABLE ui_support_h_files)
list(TRANSFORM ui_support_h_files REPLACE "([^/]*)\.qml$" "uisupport_\\1.h")
list(TRANSFORM ui_support_h_files PREPEND "${output_directory}/" OUTPUT_VARIABLE abs_ui_support_h_files)
endif()
target_sources(${target} PRIVATE ${qml_files})
target_sources(${target} PRIVATE ${abs_ui_support_h_files})
qt_wrap_ui(header_files ${abs_ui_files})
target_sources(${target} PRIVATE ${header_files})
endfunction()
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/bindingloop.cpp | C++ | #include "bindingloop.h"
#include "ui_bindingloop.h"
#include "uisupport_bindingloop.h"
BindingLoop::BindingLoop(QWidget *parent)
: QWidget(parent),
ui_(std::make_unique<Ui::BindingLoop>()),
uiSupport_(std::make_unique<UiSupport::BindingLoop>(this, ui_.get()))
{
ui_->setupUi(this);
uiSupport_->setup();
}
BindingLoop::~BindingLoop() = default;
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/bindingloop.h | C/C++ Header | #pragma once
#include <QWidget>
#include <memory>
namespace Ui {
class BindingLoop;
}
namespace UiSupport {
class BindingLoop;
}
class BindingLoop : public QWidget
{
Q_OBJECT
public:
explicit BindingLoop(QWidget *parent = nullptr);
~BindingLoop() override;
private:
std::unique_ptr<Ui::BindingLoop> ui_;
std::unique_ptr<UiSupport::BindingLoop> uiSupport_;
};
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/customwidget/common/mydialogbuttonbox.cpp | C++ | #include "mydialogbuttonbox.h"
#include "ui_mydialogbuttonbox.h"
MyDialogButtonBox::MyDialogButtonBox(QWidget *parent)
: QDialogButtonBox(parent), ui_(std::make_unique<Ui::MyDialogButtonBox>())
{
ui_->setupUi(this);
}
MyDialogButtonBox::~MyDialogButtonBox() = default;
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/customwidget/common/mydialogbuttonbox.h | C/C++ Header | #pragma once
#include <QDialogButtonBox>
#include <memory>
namespace Ui {
class MyDialogButtonBox;
}
class MyDialogButtonBox : public QDialogButtonBox
{
Q_OBJECT
public:
MyDialogButtonBox(QWidget *parent = nullptr);
~MyDialogButtonBox() override;
private:
std::unique_ptr<Ui::MyDialogButtonBox> ui_;
};
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/customwidget/main.cpp | C++ | #include <QApplication>
#include "maindialog.h"
int main(int argc, char *argv[])
{
QApplication app(argc, argv);
MainDialog dialog;
dialog.show();
return app.exec();
}
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/customwidget/maindialog.cpp | C++ | #include "maindialog.h"
#include "ui_maindialog.h"
MainDialog::MainDialog(QWidget *parent) : QDialog(parent), ui_(std::make_unique<Ui::MainDialog>())
{
ui_->setupUi(this);
}
MainDialog::~MainDialog() = default;
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/customwidget/maindialog.h | C/C++ Header | #pragma once
#include <QDialog>
#include <memory>
namespace Ui {
class MainDialog;
}
class MainDialog : public QDialog
{
Q_OBJECT
public:
MainDialog(QWidget *parent = nullptr);
~MainDialog() override;
private:
std::unique_ptr<Ui::MainDialog> ui_;
};
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/customwidget/settingsform.cpp | C++ | #include "settingsform.h"
#include "ui_settingsform.h"
SettingsForm::SettingsForm(QWidget *parent)
: QWidget(parent), ui_(std::make_unique<Ui::SettingsForm>())
{
ui_->setupUi(this);
}
SettingsForm::~SettingsForm() = default;
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/customwidget/settingsform.h | C/C++ Header | #pragma once
#include <QWidget>
#include <memory>
namespace Ui {
class SettingsForm;
}
class SettingsForm : public QWidget
{
Q_OBJECT
public:
SettingsForm(QWidget *parent = nullptr);
~SettingsForm() override;
private:
std::unique_ptr<Ui::SettingsForm> ui_;
};
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/hgemaildialog.cpp | C++ | #include "hgemaildialog.h"
#include "ui_hgemaildialog.h"
#include "uisupport_hgemaildialog.h"
HgEmailDialog::HgEmailDialog(QWidget *parent)
: QDialog(parent),
ui_(std::make_unique<Ui::HgEmailDialog>()),
uiSupport_(std::make_unique<UiSupport::HgEmailDialog>(this, ui_.get()))
{
ui_->setupUi(this);
uiSupport_->setup();
}
HgEmailDialog::~HgEmailDialog() = default;
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/hgemaildialog.h | C/C++ Header | #pragma once
#include <QDialog>
#include <memory>
namespace Ui {
class HgEmailDialog;
}
namespace UiSupport {
class HgEmailDialog;
}
class HgEmailDialog : public QDialog
{
Q_OBJECT
public:
explicit HgEmailDialog(QWidget *parent = nullptr);
~HgEmailDialog() override;
private:
std::unique_ptr<Ui::HgEmailDialog> ui_;
std::unique_ptr<UiSupport::HgEmailDialog> uiSupport_;
};
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/itemviews.cpp | C++ | #include <QDir>
#include <QFileSystemModel>
#include "itemviews.h"
#include "ui_itemviews.h"
#include "uisupport_itemviews.h"
ItemViews::ItemViews(QWidget *parent)
: QWidget(parent),
ui_(std::make_unique<Ui::ItemViews>()),
uiSupport_(std::make_unique<UiSupport::ItemViews>(this, ui_.get())),
fsModel_(std::make_unique<QFileSystemModel>())
{
ui_->setupUi(this);
uiSupport_->setup();
// Install a model to render views. Here we use QFileSystemModel because it's easy.
ui_->treeView->setModel(fsModel_.get());
ui_->tableView->setModel(fsModel_.get());
auto homeIndex = fsModel_->setRootPath(QDir::homePath());
ui_->treeView->setRootIndex(homeIndex);
ui_->tableView->setRootIndex(homeIndex);
connect(ui_->treeView, &QTreeView::activated, this, [this](const QModelIndex &index) {
ui_->tableView->setRootIndex(fsModel_->isDir(index) ? index : index.parent());
});
}
ItemViews::~ItemViews() = default;
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/itemviews.h | C/C++ Header | #pragma once
#include <QWidget>
#include <memory>
class QFileSystemModel;
namespace Ui {
class ItemViews;
}
namespace UiSupport {
class ItemViews;
}
class ItemViews : public QWidget
{
Q_OBJECT
public:
explicit ItemViews(QWidget *parent = nullptr);
~ItemViews() override;
private:
std::unique_ptr<Ui::ItemViews> ui_;
std::unique_ptr<UiSupport::ItemViews> uiSupport_;
std::unique_ptr<QFileSystemModel> fsModel_;
};
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/layoutflow.cpp | C++ | #include "layoutflow.h"
#include "ui_layoutflow.h"
#include "uisupport_layoutflow.h"
LayoutFlow::LayoutFlow(QWidget *parent)
: QWidget(parent),
ui_(std::make_unique<Ui::LayoutFlow>()),
uiSupport_(std::make_unique<UiSupport::LayoutFlow>(this, ui_.get()))
{
ui_->setupUi(this);
uiSupport_->setup();
}
LayoutFlow::~LayoutFlow() = default;
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/layoutflow.h | C/C++ Header | #pragma once
#include <QWidget>
#include <memory>
namespace Ui {
class LayoutFlow;
}
namespace UiSupport {
class LayoutFlow;
}
class LayoutFlow : public QWidget
{
Q_OBJECT
public:
explicit LayoutFlow(QWidget *parent = nullptr);
~LayoutFlow() override;
private:
std::unique_ptr<Ui::LayoutFlow> ui_;
std::unique_ptr<UiSupport::LayoutFlow> uiSupport_;
};
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/main.cpp | C++ | #include <QApplication>
#include "mainwindow.h"
int main(int argc, char *argv[])
{
QApplication app(argc, argv);
MainWindow window;
window.show();
return app.exec();
}
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/mainwindow.cpp | C++ | #include <QCoreApplication>
#include <QDir>
#include <QFile>
#include <QFileInfo>
#include <QtDebug>
#include "mainwindow.h"
#include "ui_mainwindow.h"
#include "uisupport_mainwindow.h"
MainWindow::MainWindow(QWidget *parent)
: QMainWindow(parent),
ui_(std::make_unique<Ui::MainWindow>()),
uiSupport_(std::make_unique<UiSupport::MainWindow>(this, ui_.get()))
{
ui_->setupUi(this);
uiSupport_->setup();
connect(ui_->fileNameEdit, &QComboBox::currentIndexChanged, this,
&MainWindow::updateSourceEdit);
updateSourceEdit();
}
MainWindow::~MainWindow() = default;
void MainWindow::updateSourceEdit()
{
QDir baseDir(QFileInfo(__FILE__).dir());
QFile file(baseDir.filePath(ui_->fileNameEdit->currentText()));
if (file.open(QIODevice::ReadOnly)) {
ui_->sourceEdit->setPlainText(QString::fromUtf8(file.readAll()));
} else {
qWarning() << "failed to open source file:" << file.fileName() << file.errorString();
ui_->sourceEdit->setPlainText("");
}
}
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/mainwindow.h | C/C++ Header | #pragma once
#include <QMainWindow>
#include <memory>
namespace Ui {
class MainWindow;
}
namespace UiSupport {
class MainWindow;
}
class MainWindow : public QMainWindow
{
Q_OBJECT
public:
explicit MainWindow(QWidget *parent = nullptr);
~MainWindow() override;
private slots:
void updateSourceEdit();
private:
std::unique_ptr<Ui::MainWindow> ui_;
std::unique_ptr<UiSupport::MainWindow> uiSupport_;
};
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/settingsdialog.cpp | C++ | #include "settingsdialog.h"
#include "ui_settingsdialog.h"
#include "uisupport_settingsdialog.h"
SettingsDialog::SettingsDialog(QWidget *parent)
: QDialog(parent),
ui_(std::make_unique<Ui::SettingsDialog>()),
uiSupport_(std::make_unique<UiSupport::SettingsDialog>(this, ui_.get()))
{
ui_->setupUi(this);
uiSupport_->setup();
}
SettingsDialog::~SettingsDialog() = default;
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/settingsdialog.h | C/C++ Header | #pragma once
#include <QDialog>
#include <memory>
namespace Ui {
class SettingsDialog;
}
namespace UiSupport {
class SettingsDialog;
}
class SettingsDialog : public QDialog
{
Q_OBJECT
public:
explicit SettingsDialog(QWidget *parent = nullptr);
~SettingsDialog() override;
private:
std::unique_ptr<Ui::SettingsDialog> ui_;
std::unique_ptr<UiSupport::SettingsDialog> uiSupport_;
};
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/staticitemmodel.cpp | C++ | #include "staticitemmodel.h"
#include "ui_staticitemmodel.h"
#include "uisupport_staticitemmodel.h"
StaticItemModel::StaticItemModel(QWidget *parent)
: QWidget(parent),
ui_(std::make_unique<Ui::StaticItemModel>()),
uiSupport_(std::make_unique<UiSupport::StaticItemModel>(this, ui_.get()))
{
ui_->setupUi(this);
uiSupport_->setup();
}
StaticItemModel::~StaticItemModel() = default;
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara | |
examples/staticitemmodel.h | C/C++ Header | #pragma once
#include <QWidget>
#include <memory>
namespace Ui {
class StaticItemModel;
}
namespace UiSupport {
class StaticItemModel;
}
class StaticItemModel : public QWidget
{
Q_OBJECT
public:
explicit StaticItemModel(QWidget *parent = nullptr);
~StaticItemModel() override;
private:
std::unique_ptr<Ui::StaticItemModel> ui_;
std::unique_ptr<UiSupport::StaticItemModel> uiSupport_;
};
| yuja/qmluic | 9 | QML -> QtWidgets UI/C++ transpiler | Rust | yuja | Yuya Nishihara |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.