repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/traits.rs | crates/builder/src/artifact_qa/traits.rs | //! Generic abstractions for post‑build actions.
use crate::artifact_qa::diagnostics::DiagnosticCollector;
use crate::artifact_qa::reports::Report;
use crate::{BuildContext, BuildEnvironment};
use sps2_errors::Error;
use std::future::Future;
pub trait Action: Send + Sync + 'static {
/// Human readable label (emitted in events).
const NAME: &'static str;
/// Execute the action and return a [`Report`].
/// Validators should ignore the findings parameter.
/// Patchers may use the findings to target specific files.
fn run(
ctx: &BuildContext,
env: &BuildEnvironment,
findings: Option<&DiagnosticCollector>,
) -> impl Future<Output = Result<Report, Error>> + Send;
}
pub trait Validator: Action {}
pub trait Patcher: Action {}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/patchers/headers.rs | crates/builder/src/artifact_qa/patchers/headers.rs | //! Converts absolute include paths in headers to <relative> form.
use crate::artifact_qa::{reports::Report, traits::Patcher};
use crate::{BuildContext, BuildEnvironment};
use ignore::WalkBuilder;
use regex::Regex;
use sps2_errors::Error;
pub struct HeaderPatcher;
impl crate::artifact_qa::traits::Action for HeaderPatcher {
const NAME: &'static str = "Header include‑fixer";
async fn run(
_ctx: &BuildContext,
env: &BuildEnvironment,
_findings: Option<&crate::artifact_qa::diagnostics::DiagnosticCollector>,
) -> Result<Report, Error> {
let build_prefix = env.build_prefix().to_string_lossy().into_owned();
let build_src = format!("{build_prefix}/src");
let build_base = "/opt/pm/build";
// Create regex for all build paths
let re = Regex::new(&format!(
r#"#\s*include\s*"({}|{}|{})[^"]+""#,
regex::escape(&build_src),
regex::escape(&build_prefix),
regex::escape(build_base)
))
.unwrap();
let mut changed = Vec::new();
for dir in ["include", "Headers"] {
let root = env.staging_dir().join(dir);
if !root.exists() {
continue;
}
for entry in WalkBuilder::new(&root).build().flatten() {
let p = entry.into_path();
if p.is_file() {
if let Ok(src) = std::fs::read_to_string(&p) {
if re.is_match(&src) {
let repl = re.replace_all(&src, |caps: ®ex::Captures| {
// naive: just strip the prefix and keep quotes
let full = &caps.get(0).unwrap().as_str()[0..];
let inner = full.trim_start_matches("#include ").trim();
let stripped = inner
.trim_matches('"')
.trim_start_matches(&build_src)
.trim_start_matches(&build_prefix)
.trim_start_matches(build_base)
.trim_start_matches('/');
format!("#include \"{stripped}\"")
});
std::fs::write(&p, repl.as_bytes())?;
changed.push(p);
}
}
}
}
}
Ok(Report {
changed_files: changed,
..Default::default()
})
}
}
impl Patcher for HeaderPatcher {}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/patchers/placeholder.rs | crates/builder/src/artifact_qa/patchers/placeholder.rs | //! Replaces `BUILD_PLACEHOLDER` and build‑prefix strings in *text* files.
use crate::artifact_qa::{reports::Report, traits::Patcher};
use crate::{BuildContext, BuildEnvironment};
use sps2_errors::Error;
use globset::{Glob, GlobSetBuilder};
use ignore::WalkBuilder;
pub struct PlaceholderPatcher;
impl crate::artifact_qa::traits::Action for PlaceholderPatcher {
const NAME: &'static str = "Placeholder / build‑path replacer";
async fn run(
_ctx: &BuildContext,
env: &BuildEnvironment,
findings: Option<&crate::artifact_qa::diagnostics::DiagnosticCollector>,
) -> Result<Report, Error> {
use std::collections::HashSet;
use std::fs::{self, File};
use std::io::{BufWriter, Read, Write};
let actual_prefix = sps2_config::fixed_paths::LIVE_DIR;
let build_prefix = env.build_prefix().to_string_lossy().into_owned();
let build_src = format!("{build_prefix}/src");
let build_base = "/opt/pm/build";
// Replace any build paths with actual prefix
// ----------- build the globset of *binary* extensions we skip -----------
let mut gsb = GlobSetBuilder::new();
for pat in &[
"*.png", "*.jpg", "*.jpeg", "*.gif", "*.ico", "*.gz", "*.bz2", "*.xz", "*.zip",
"*.tar", "*.a", "*.so", "*.dylib", "*.o", "*.rlib",
] {
gsb.add(Glob::new(pat).unwrap());
}
let binaries = gsb.build().unwrap();
let mut changed = Vec::new();
let mut warnings = Vec::new();
// Get the list of files to process
let files_to_process: Box<dyn Iterator<Item = std::path::PathBuf>> =
if let Some(findings) = findings {
// Use validator findings - only process files with hardcoded paths
let files_with_issues = findings.get_files_with_hardcoded_paths();
let paths: HashSet<std::path::PathBuf> =
files_with_issues.keys().map(|&p| p.to_path_buf()).collect();
Box::new(paths.into_iter())
} else {
// Fall back to walking the entire directory (old behavior)
Box::new(
WalkBuilder::new(env.staging_dir())
.hidden(false)
.parents(false)
.build()
.filter_map(Result::ok)
.map(ignore::DirEntry::into_path)
.filter(|p| p.is_file()),
)
};
for path in files_to_process {
// Skip binary files based on extension
if binaries.is_match(&path) {
continue;
}
if let Ok(mut f) = File::open(&path) {
let mut buf = Vec::new();
if f.read_to_end(&mut buf).is_ok() {
if let Ok(txt) = String::from_utf8(buf) {
let mut modified = false;
let mut result = txt.clone();
// Replace build paths in order of specificity (most specific first)
if result.contains(&build_src) {
result = result.replace(&build_src, actual_prefix);
modified = true;
}
if result.contains(&build_prefix) {
result = result.replace(&build_prefix, actual_prefix);
modified = true;
}
if result.contains(build_base) {
result = result.replace(build_base, actual_prefix);
modified = true;
}
if modified {
let _ = fs::create_dir_all(path.parent().unwrap());
if let Ok(file) = File::create(&path) {
let mut writer = BufWriter::new(file);
if writer.write_all(result.as_bytes()).is_ok() {
changed.push(path);
}
}
}
}
}
}
}
// Event message
if !changed.is_empty() {
warnings.push(format!("Replaced placeholders in {} files", changed.len()));
}
Ok(Report {
changed_files: changed,
warnings,
..Default::default()
})
}
}
impl Patcher for PlaceholderPatcher {}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/patchers/permissions.rs | crates/builder/src/artifact_qa/patchers/permissions.rs | //! Ensures binaries and dynamic libraries have proper execute permissions
//!
//! This patcher comprehensively handles all types of executables:
//! - Dynamic libraries (.dylib, .so)
//! - All files in bin/, sbin/ directories
//! - Mach-O executables in libexec/
//! - Scripts with shebang lines (#!/bin/sh, etc.)
//! - Mach-O binaries anywhere in the package
//! - Common script files (.sh, .py, .pl, etc.)
//! - Build outputs (target/release/, .build/debug/, etc.)
//!
//! Some build systems don't set proper permissions, so this ensures
//! all executables are actually executable after installation.
use crate::artifact_qa::{macho_utils, reports::Report, traits::Patcher};
use crate::{BuildContext, BuildEnvironment};
use sps2_errors::Error;
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
#[derive(Default)]
pub struct PermissionsFixer {
aggressive: bool,
}
impl PermissionsFixer {
/// Create a new permissions fixer
///
/// Set `aggressive` to true for more aggressive permission fixing (used with explicit calls).
#[must_use]
pub fn new(aggressive: bool) -> Self {
Self { aggressive }
}
}
impl PermissionsFixer {
/// Check if a file is a dynamic library that needs execute permissions
fn is_dynamic_library(path: &Path) -> bool {
if let Some(name) = path.file_name().and_then(|n| n.to_str()) {
// Dynamic libraries need +x on macOS
name.contains(".dylib") || name.contains(".so")
} else {
false
}
}
/// Check if a file has a shebang (#!) indicating it's a script
fn has_shebang(path: &Path) -> bool {
use std::fs::File;
use std::io::{BufRead, BufReader};
if let Ok(file) = File::open(path) {
let mut reader = BufReader::new(file);
let mut first_line = String::new();
if reader.read_line(&mut first_line).is_ok() && first_line.len() >= 2 {
return first_line.starts_with("#!");
}
}
false
}
/// Check if file needs execute permissions (conservative default mode)
fn needs_execute_permission(path: &Path) -> bool {
// Dynamic libraries need +x
if Self::is_dynamic_library(path) {
return true;
}
// Only check for Mach-O binaries by default
macho_utils::is_macho_file(path)
}
/// Check if file needs execute permissions (aggressive mode for explicit `fix_permissions()` calls)
///
/// This comprehensive check should be used to determine if a file needs execute permissions
/// in aggressive mode, checking file location, type, and content.
#[must_use]
pub fn needs_execute_permission_aggressive(path: &Path) -> bool {
// Dynamic libraries need +x
if Self::is_dynamic_library(path) {
return true;
}
// Check if file is in any common executable directory
// We check parent directories to be more precise than string matching
let mut current = path.parent();
while let Some(parent) = current {
if let Some(dir_name) = parent.file_name() {
let dir_str = dir_name.to_string_lossy();
// Standard executable directories
if dir_str == "bin" || dir_str == "sbin" {
return true;
}
// libexec is special - only make Mach-O files executable
if dir_str == "libexec" {
return macho_utils::is_macho_file(path);
}
// Cargo/Rust build directories
if dir_str == "release" || dir_str == "debug" {
if let Some(grandparent) = parent.parent() {
if let Some(gp_name) = grandparent.file_name() {
if gp_name == ".build" || gp_name == "target" {
return true;
}
}
}
}
}
current = parent.parent();
}
// Check for scripts with shebang (#!/bin/sh, #!/usr/bin/env python, etc.)
if Self::has_shebang(path) {
return true;
}
// Check for Mach-O binaries anywhere in the package
if macho_utils::is_macho_file(path) {
return true;
}
// Check for files with common executable extensions
if let Some(ext) = path.extension() {
let ext_str = ext.to_string_lossy();
if ext_str == "sh"
|| ext_str == "bash"
|| ext_str == "zsh"
|| ext_str == "fish"
|| ext_str == "py"
|| ext_str == "pl"
|| ext_str == "rb"
|| ext_str == "lua"
{
return true;
}
}
false
}
/// Fix permissions on a file if needed
fn fix_permissions(path: &Path) -> Result<bool, std::io::Error> {
let metadata = std::fs::metadata(path)?;
let mut perms = metadata.permissions();
let current_mode = perms.mode();
// Check if any execute bit is already set
if current_mode & 0o111 != 0 {
return Ok(false); // Already has execute permissions
}
// Add execute permissions matching read permissions
// If readable by owner, make executable by owner, etc.
let new_mode = current_mode | ((current_mode & 0o444) >> 2); // Convert read bits to execute bits
perms.set_mode(new_mode);
std::fs::set_permissions(path, perms)?;
Ok(true)
}
}
impl crate::artifact_qa::traits::Action for PermissionsFixer {
const NAME: &'static str = "Permissions fixer";
async fn run(
_ctx: &BuildContext,
env: &BuildEnvironment,
_findings: Option<&crate::artifact_qa::diagnostics::DiagnosticCollector>,
) -> Result<Report, Error> {
// Default instance uses conservative mode
let fixer = Self::default();
let mut fixed_count = 0;
let mut warnings = Vec::new();
let mut errors = Vec::new();
// Walk through all files in staging directory
for entry in ignore::WalkBuilder::new(env.staging_dir())
.hidden(false)
.parents(false)
.build()
.filter_map(Result::ok)
{
let path = entry.into_path();
if !path.is_file() {
continue;
}
// Use aggressive or conservative mode based on instance
let needs_fix = if fixer.aggressive {
Self::needs_execute_permission_aggressive(&path)
} else {
Self::needs_execute_permission(&path)
};
if !needs_fix {
continue;
}
match Self::fix_permissions(&path) {
Ok(true) => fixed_count += 1,
Ok(false) => {} // Already had correct permissions
Err(e) => {
errors.push(format!(
"Failed to fix permissions on {}: {}",
path.display(),
e
));
}
}
}
if fixed_count > 0 {
warnings.push(format!("Adjusted permissions for {fixed_count} files"));
}
Ok(Report {
errors,
warnings,
changed_files: vec![], // Permissions changes don't count as file content changes
..Default::default()
})
}
}
impl Patcher for PermissionsFixer {}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/patchers/object_cleaner.rs | crates/builder/src/artifact_qa/patchers/object_cleaner.rs | //! Cleaner that removes object (.o) files
use crate::artifact_qa::{reports::Report, traits::Patcher};
use crate::{BuildContext, BuildEnvironment};
use sps2_errors::Error;
pub struct ObjectFileCleaner;
impl crate::artifact_qa::traits::Action for ObjectFileCleaner {
const NAME: &'static str = "Object file cleaner";
async fn run(
_ctx: &BuildContext,
env: &BuildEnvironment,
_findings: Option<&crate::artifact_qa::diagnostics::DiagnosticCollector>,
) -> Result<Report, Error> {
let staging_dir = env.staging_dir();
let mut removed_files = Vec::new();
// Walk staging directory for .o files
for entry in ignore::WalkBuilder::new(staging_dir)
.hidden(false)
.parents(false)
.build()
{
let path = match entry {
Ok(e) => e.into_path(),
Err(_) => continue,
};
if !path.is_file() {
continue;
}
// Check if it's a .o file
if let Some(ext) = path.extension().and_then(|e| e.to_str()) {
if ext == "o" {
// Remove the file
if let Ok(()) = std::fs::remove_file(&path) {
removed_files.push(path);
}
// Ignore removal errors
}
}
}
let mut warnings = Vec::new();
let removed = removed_files;
if !removed.is_empty() {
warnings.push(format!("Removed {} object files", removed.len()));
}
Ok(Report {
changed_files: removed,
warnings,
..Default::default()
})
}
}
impl Patcher for ObjectFileCleaner {}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/patchers/rpath.rs | crates/builder/src/artifact_qa/patchers/rpath.rs | //! Fixes install‑name / `LC_RPATH` of Mach‑O dylibs & executables.
use crate::artifact_qa::{macho_utils, reports::Report, traits::Patcher};
use crate::{BuildContext, BuildEnvironment};
use sps2_errors::Error;
use sps2_events::{AppEvent, EventSender, GeneralEvent};
use sps2_platform::{PlatformContext, PlatformManager};
use sps2_types::RpathStyle;
use ignore::WalkBuilder;
use std::collections::HashSet;
use std::path::{Path, PathBuf};
pub struct RPathPatcher {
style: RpathStyle,
platform: &'static sps2_platform::Platform,
}
impl RPathPatcher {
/// Create a new `RPathPatcher` with the specified style
///
/// The patcher will fix install names and RPATHs according to the given style.
#[must_use]
pub fn new(style: RpathStyle) -> Self {
Self {
style,
platform: PlatformManager::instance().platform(),
}
}
/// Create a platform context for this patcher
#[must_use]
pub fn create_platform_context(&self, event_sender: Option<EventSender>) -> PlatformContext {
self.platform.create_context(event_sender)
}
/// Check if a file is a dylib based on its name pattern
/// Handles versioned dylibs like libfoo.1.dylib, libbar.2.3.4.dylib
fn is_dylib(path: &Path) -> bool {
if let Some(name) = path.file_name().and_then(|n| n.to_str()) {
// Check if the filename contains .dylib anywhere
// This catches: libfoo.dylib, libfoo.1.dylib, libfoo.1.2.3.dylib
name.contains(".dylib")
} else {
false
}
}
/// Check if a file should be processed by `RPathPatcher`
///
/// This includes dylibs, shared objects, and Mach-O executables. Returns true
/// if the file needs RPATH or install name processing.
#[must_use]
pub fn should_process_file(path: &Path) -> bool {
if !path.is_file() {
return false;
}
// Check if it's a dylib (including versioned ones)
if let Some(name) = path.file_name().and_then(|n| n.to_str()) {
if name.contains(".dylib") || name.contains(".so") {
return true;
}
}
// Use the shared MachO detection logic as a fallback
// This will catch any Mach-O files we might have missed with filename patterns
macho_utils::is_macho_file(path)
}
/// Get the install name of a Mach-O file using platform abstraction
async fn get_install_name(&self, ctx: &PlatformContext, path: &Path) -> Option<String> {
(self.platform.binary().get_install_name(ctx, path).await).unwrap_or_default()
}
/// Check if an install name needs fixing based on the style
fn needs_install_name_fix(&self, install_name: &str, _file_path: &Path) -> bool {
match self.style {
RpathStyle::Modern => {
// Only fix install names that contain build paths
// Do NOT fix @rpath/, @loader_path/, or @executable_path/ install names
if install_name.starts_with("@rpath/")
|| install_name.starts_with("@loader_path/")
|| install_name.starts_with("@executable_path/")
{
return false;
}
// Check if the install name contains a build path
install_name.contains("/opt/pm/build") || install_name.contains("/private/")
}
RpathStyle::Absolute => {
// Absolute style: Fix @rpath references AND build paths
// Keep @loader_path/ and @executable_path/ as they're relative to the binary
if install_name.starts_with("@loader_path/")
|| install_name.starts_with("@executable_path/")
{
return false;
}
// Fix @rpath references and build paths
install_name.starts_with("@rpath/")
|| install_name.contains("/opt/pm/build")
|| install_name.contains("/private/")
}
}
}
/// Fix the install name of a dylib to use absolute path
async fn fix_install_name(
&self,
ctx: &PlatformContext,
path: &Path,
new_install_name: &str,
) -> Result<bool, String> {
match self
.platform
.binary()
.set_install_name(ctx, path, new_install_name)
.await
{
Ok(()) => Ok(true),
Err(platform_err) => {
let err_msg = platform_err.to_string();
// Check if this is a headerpad error
if err_msg.contains("larger updated load commands do not fit") {
Err(format!("HEADERPAD_ERROR: {}", path.display()))
} else {
Err(format!(
"install_name_tool failed on {}: {}",
path.display(),
err_msg
))
}
}
}
}
/// Find all executables that link to a dylib and update their references
async fn update_dylib_references(
&self,
ctx: &PlatformContext,
staging_dir: &Path,
old_dylib_name: &str,
new_dylib_path: &str,
) -> Result<Vec<PathBuf>, String> {
let mut updated_files = Vec::new();
let mut checked_files = HashSet::new();
// Walk through all Mach-O files in the staging directory
for entry in ignore::WalkBuilder::new(staging_dir)
.hidden(false)
.parents(false)
.build()
.filter_map(Result::ok)
{
let path = entry.into_path();
if !path.is_file() || !macho_utils::is_macho_file(&path) {
continue;
}
// Skip if we've already checked this file
if !checked_files.insert(path.clone()) {
continue;
}
// Check if this file references the old dylib
let Ok(deps) = self.platform.binary().get_dependencies(ctx, &path).await else {
continue;
};
if deps.iter().any(|dep| dep.contains(old_dylib_name)) {
// This file references our dylib - update the reference
if let Ok(()) = self
.platform
.binary()
.change_dependency(ctx, &path, old_dylib_name, new_dylib_path)
.await
{
updated_files.push(path);
} else {
// Silently continue on error
}
}
}
Ok(updated_files)
}
/// Fix dependencies that use @rpath by converting them to absolute paths (Absolute style)
async fn fix_rpath_dependencies(
&self,
ctx: &PlatformContext,
path: &Path,
lib_path: &str,
) -> Result<Vec<(String, String)>, String> {
let mut fixed_deps = Vec::new();
// Skip if not using Absolute style
if self.style != RpathStyle::Absolute {
return Ok(fixed_deps);
}
// Get all dependencies using platform abstraction
let Ok(deps) = self.platform.binary().get_dependencies(ctx, path).await else {
return Ok(fixed_deps);
};
// Process each dependency
for dep in deps {
// Extract the library name after @rpath/
if let Some(lib_name) = dep.strip_prefix("@rpath/") {
let new_path = format!("{lib_path}/{lib_name}");
// Update the dependency reference
if let Ok(()) = self
.platform
.binary()
.change_dependency(ctx, path, &dep, &new_path)
.await
{
fixed_deps.push((dep, new_path));
} else {
// Continue on error - some dependencies might fail to update
}
}
}
Ok(fixed_deps)
}
/// Process a single file for RPATH and install name fixes
pub async fn process_file(
&self,
ctx: &PlatformContext,
path: &Path,
lib_path: &str,
build_paths: &[String],
) -> (bool, bool, Vec<String>, Option<String>) {
let _path_s = path.to_string_lossy().into_owned();
let mut bad_rpaths = Vec::new();
let mut need_good = false;
let mut install_name_was_fixed = false;
// Get RPATH entries using platform abstraction
let Ok(rpath_entries) = self.platform.binary().get_rpath_entries(ctx, path).await else {
return (false, false, bad_rpaths, None);
};
// Check RPATH entries and gather bad ones
let mut has_good_rpath = false;
for rpath in &rpath_entries {
if rpath == lib_path {
has_good_rpath = true;
} else if build_paths.iter().any(|bp| rpath.contains(bp)) {
// Flag any build paths as bad
bad_rpaths.push(rpath.clone());
}
}
// Check if binary needs @rpath by examining dependencies
let Ok(deps) = self.platform.binary().get_dependencies(ctx, path).await else {
return (false, false, bad_rpaths, None);
};
for dep in &deps {
if dep.contains("@rpath/") {
need_good = true;
break;
}
}
// Fix RPATHs
// Only add RPATH for Modern style (for Absolute style, we convert @rpath to absolute paths)
if need_good && self.style == RpathStyle::Modern && !has_good_rpath {
let _ = self.platform.binary().add_rpath(ctx, path, lib_path).await;
}
for bad in &bad_rpaths {
let _ = self.platform.binary().delete_rpath(ctx, path, bad).await;
}
// Check and fix install names for dylibs
if Self::is_dylib(path) {
if let Some(install_name) = self.get_install_name(ctx, path).await {
if self.needs_install_name_fix(&install_name, path) {
// Fix the install name to absolute path
let file_name = path.file_name().and_then(|n| n.to_str()).unwrap_or("");
let new_install_name = format!("{lib_path}/{file_name}");
match self.fix_install_name(ctx, path, &new_install_name).await {
Ok(true) => install_name_was_fixed = true,
Ok(false) => {} // Should not happen with current implementation
Err(msg) => {
// Store error for reporting later
return (
need_good || !bad_rpaths.is_empty(),
false,
bad_rpaths,
Some(msg),
);
}
}
}
}
}
// Fix @rpath dependencies if using Absolute style
if self.style == RpathStyle::Absolute {
match self.fix_rpath_dependencies(ctx, path, lib_path).await {
Ok(fixed_deps) => {
if !fixed_deps.is_empty() {
// Dependencies were fixed
// Note: We don't set a flag for this as it's part of Absolute-style processing
}
}
Err(msg) => {
// Store error for reporting later
return (
need_good || !bad_rpaths.is_empty(),
install_name_was_fixed,
bad_rpaths,
Some(msg),
);
}
}
}
(
need_good || !bad_rpaths.is_empty(),
install_name_was_fixed,
bad_rpaths,
None,
)
}
/// Handle headerpad errors by updating references in dependent binaries
async fn handle_headerpad_errors(
patcher: &RPathPatcher,
platform_ctx: &PlatformContext,
headerpad_errors: &[PathBuf],
lib_path: &str,
staging_dir: &Path,
_build_ctx: &BuildContext,
) -> (Vec<PathBuf>, Vec<String>) {
let mut fixed_files = Vec::new();
let mut warnings = Vec::new();
if headerpad_errors.is_empty() {
return (fixed_files, warnings);
}
warnings.push(format!(
"Found {} dylibs with headerpad errors; attempting fallback reference updates",
headerpad_errors.len()
));
for dylib_path in headerpad_errors {
if let Some(file_name) = dylib_path.file_name().and_then(|n| n.to_str()) {
// Get the current install name that may need fixing
if let Some(current_install_name) =
patcher.get_install_name(platform_ctx, dylib_path).await
{
if patcher.needs_install_name_fix(¤t_install_name, dylib_path) {
// The desired new install name
let new_install_name = format!("{lib_path}/{file_name}");
// Update all binaries that reference this dylib
match patcher
.update_dylib_references(
platform_ctx,
staging_dir,
¤t_install_name,
&new_install_name,
)
.await
{
Ok(updated_files) => {
if !updated_files.is_empty() {
fixed_files.extend(updated_files);
}
}
Err(e) => {
warnings.push(format!(
"Failed to update references for {file_name}: {e}"
));
}
}
}
}
}
}
(fixed_files, warnings)
}
}
impl RPathPatcher {
/// Process all files that need rpath patching
async fn process_files(
&self,
files: Vec<PathBuf>,
lib_path: &str,
build_paths: &[String],
platform_ctx: &PlatformContext,
build_ctx: &BuildContext,
) -> (Vec<PathBuf>, usize, usize, Vec<PathBuf>, Vec<String>) {
let mut changed = Vec::new();
let mut install_name_fixes = 0;
let mut rpath_fixes = 0;
let mut warnings = Vec::new();
let mut headerpad_errors = Vec::new();
for path in files {
let (rpath_changed, name_was_fixed, _, error_msg) = self
.process_file(platform_ctx, &path, lib_path, build_paths)
.await;
if let Some(msg) = &error_msg {
if msg.starts_with("HEADERPAD_ERROR:") {
headerpad_errors.push(path.clone());
} else {
crate::utils::events::send_event(
build_ctx,
AppEvent::General(GeneralEvent::warning("Install name fix failed")),
);
warnings.push(format!("{}: install name fix failed", path.display()));
}
}
if rpath_changed {
rpath_fixes += 1;
}
if name_was_fixed {
install_name_fixes += 1;
}
if rpath_changed || name_was_fixed {
changed.push(path.clone());
}
}
(
changed,
rpath_fixes,
install_name_fixes,
headerpad_errors,
warnings,
)
}
}
impl crate::artifact_qa::traits::Action for RPathPatcher {
const NAME: &'static str = "install_name_tool patcher";
async fn run(
ctx: &BuildContext,
env: &BuildEnvironment,
findings: Option<&crate::artifact_qa::diagnostics::DiagnosticCollector>,
) -> Result<Report, Error> {
let patcher = Self::new(RpathStyle::Modern);
// Create platform context from build context
let platform_ctx = patcher.platform.create_context(ctx.event_sender.clone());
let mut files_to_process = HashSet::new();
// Collect files from validator findings
if let Some(findings) = findings {
let files_with_macho_issues = findings.get_files_with_macho_issues();
for (path, _) in files_with_macho_issues {
files_to_process.insert(path.to_path_buf());
}
}
// Add files with @rpath references
for entry in WalkBuilder::new(env.staging_dir())
.hidden(false)
.parents(false)
.build()
.filter_map(Result::ok)
{
let path = entry.into_path();
if Self::should_process_file(&path) {
files_to_process.insert(path);
}
}
let files: Vec<_> = files_to_process.into_iter().collect();
let lib_path = &format!("{}/lib", sps2_config::fixed_paths::LIVE_DIR);
let build_paths = vec![
"/opt/pm/build".to_string(),
env.build_prefix().to_string_lossy().into_owned(),
format!("{}/src", env.build_prefix().to_string_lossy()),
];
// Process all files
let (mut changed, rpath_fixes, install_name_fixes, headerpad_errors, mut warnings) =
patcher
.process_files(files, lib_path, &build_paths, &platform_ctx, ctx)
.await;
// Handle headerpad errors
let (headerpad_fixed, headerpad_warnings) = Self::handle_headerpad_errors(
&patcher,
&platform_ctx,
&headerpad_errors,
lib_path,
env.staging_dir(),
ctx,
)
.await;
changed.extend(headerpad_fixed);
warnings.extend(headerpad_warnings);
// Report results
if !changed.is_empty() {
let mut operations = Vec::new();
if rpath_fixes > 0 {
operations.push(format!(
"adjusted {} RPATH{}",
rpath_fixes,
if rpath_fixes > 1 { "s" } else { "" }
));
}
if install_name_fixes > 0 {
operations.push(format!(
"updated {} install name{}",
install_name_fixes,
if install_name_fixes > 1 { "s" } else { "" }
));
}
if !operations.is_empty() {
warnings.push(operations.join(", "));
}
}
Ok(Report {
changed_files: changed,
warnings,
..Default::default()
})
}
}
impl Patcher for RPathPatcher {}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/patchers/python_bytecode_cleanup.rs | crates/builder/src/artifact_qa/patchers/python_bytecode_cleanup.rs | //! Patcher that removes Python bytecode and build artifacts from staging directory
//!
//! This patcher cleans up dynamic files generated during Python package installation
//! that should not be included in the final .sp packages. These files are automatically
//! regenerated when Python packages are used at runtime.
use crate::artifact_qa::{reports::Report, traits::Patcher};
use crate::{BuildContext, BuildEnvironment};
use sps2_errors::Error;
use std::path::Path;
use tokio::fs;
#[derive(Default)]
pub struct PythonBytecodeCleanupPatcher;
impl PythonBytecodeCleanupPatcher {
/// Remove all __pycache__ directories and their contents
async fn remove_pycache_dirs(
&self,
staging_dir: &Path,
) -> Result<Vec<std::path::PathBuf>, Error> {
let mut removed_dirs = Vec::new();
for entry in ignore::WalkBuilder::new(staging_dir)
.hidden(false)
.parents(false)
.build()
{
let path = match entry {
Ok(e) => e.into_path(),
Err(_) => continue,
};
if path.is_dir() && path.file_name().and_then(|n| n.to_str()) == Some("__pycache__") {
if let Ok(()) = fs::remove_dir_all(&path).await {
removed_dirs.push(path);
}
}
}
Ok(removed_dirs)
}
/// Remove individual bytecode files (.pyc, .pyo, etc.)
async fn remove_bytecode_files(
&self,
staging_dir: &Path,
) -> Result<Vec<std::path::PathBuf>, Error> {
let mut removed_files = Vec::new();
for entry in ignore::WalkBuilder::new(staging_dir)
.hidden(false)
.parents(false)
.build()
{
let path = match entry {
Ok(e) => e.into_path(),
Err(_) => continue,
};
if !path.is_file() {
continue;
}
// Check for bytecode file extensions
if let Some(ext) = path.extension().and_then(|e| e.to_str()) {
if matches!(ext, "pyc" | "pyo") {
if let Ok(()) = fs::remove_file(&path).await {
removed_files.push(path);
}
continue;
}
}
// Check for complex bytecode patterns (.cpython-*.pyc, etc.)
if let Some(filename) = path.file_name().and_then(|n| n.to_str()) {
let has_pyc_extension = path
.extension()
.is_some_and(|ext| ext.eq_ignore_ascii_case("pyc"));
if has_pyc_extension
&& (filename.contains(".cpython-")
|| filename.contains(".pypy")
|| filename.contains(".opt-"))
{
if let Ok(()) = fs::remove_file(&path).await {
removed_files.push(path);
}
}
}
}
Ok(removed_files)
}
/// Remove build artifacts and development files
async fn remove_build_artifacts(
&self,
staging_dir: &Path,
) -> Result<Vec<std::path::PathBuf>, Error> {
let mut removed_items = Vec::new();
for entry in ignore::WalkBuilder::new(staging_dir)
.hidden(false)
.parents(false)
.build()
{
let path = match entry {
Ok(e) => e.into_path(),
Err(_) => continue,
};
let Some(name) = path.file_name().and_then(|n| n.to_str()) else {
continue;
};
// Remove only clearly safe build artifacts and development directories
// Be conservative - don't remove "test" or "tests" dirs as they might be runtime modules
let should_remove = matches!(
name,
"build"
| "dist"
| ".eggs"
| ".tox"
| ".pytest_cache"
| ".mypy_cache"
| ".ruff_cache"
| "htmlcov"
| ".DS_Store"
| "Thumbs.db"
| ".vscode"
| ".idea"
) || name.ends_with(".egg-info")
|| name.starts_with("pip-build-env-")
|| name.starts_with("pip-req-build-");
if should_remove {
let remove_result = if path.is_dir() {
fs::remove_dir_all(&path).await
} else {
fs::remove_file(&path).await
};
if remove_result.is_ok() {
removed_items.push(path);
}
}
}
Ok(removed_items)
}
/// Remove pip cache and metadata files
async fn remove_pip_artifacts(
&self,
staging_dir: &Path,
) -> Result<Vec<std::path::PathBuf>, Error> {
let mut removed_items = Vec::new();
for entry in ignore::WalkBuilder::new(staging_dir)
.hidden(false)
.parents(false)
.build()
{
let path = match entry {
Ok(e) => e.into_path(),
Err(_) => continue,
};
if !path.is_file() {
continue;
}
let Some(filename) = path.file_name().and_then(|n| n.to_str()) else {
continue;
};
// Remove pip installation metadata that contains hardcoded paths
if matches!(filename, "INSTALLER" | "REQUESTED" | "direct_url.json") {
if let Ok(()) = fs::remove_file(&path).await {
removed_items.push(path);
}
}
}
Ok(removed_items)
}
}
impl crate::artifact_qa::traits::Action for PythonBytecodeCleanupPatcher {
const NAME: &'static str = "Python Bytecode Cleanup";
async fn run(
_ctx: &BuildContext,
env: &BuildEnvironment,
_findings: Option<&crate::artifact_qa::diagnostics::DiagnosticCollector>,
) -> Result<Report, Error> {
// Only run for Python packages
if !env.is_python_package() {
return Ok(Report::ok());
}
let self_instance = Self;
let staging_dir = env.staging_dir();
let mut all_removed = Vec::new();
// Remove __pycache__ directories
let removed_pycache = self_instance.remove_pycache_dirs(staging_dir).await?;
all_removed.extend(removed_pycache);
// Remove bytecode files
let removed_bytecode = self_instance.remove_bytecode_files(staging_dir).await?;
all_removed.extend(removed_bytecode);
// Remove build artifacts
let removed_artifacts = self_instance.remove_build_artifacts(staging_dir).await?;
all_removed.extend(removed_artifacts);
// Remove pip metadata
let removed_pip = self_instance.remove_pip_artifacts(staging_dir).await?;
all_removed.extend(removed_pip);
let mut warnings = Vec::new();
if !all_removed.is_empty() {
warnings.push(format!(
"Removed {} Python bytecode artifacts",
all_removed.len()
));
}
Ok(Report {
changed_files: all_removed,
warnings,
..Default::default()
})
}
}
impl Patcher for PythonBytecodeCleanupPatcher {}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/patchers/mod.rs | crates/builder/src/artifact_qa/patchers/mod.rs | //! Registry of all post-build patcher modules.
pub mod binary_string;
pub mod codesigner;
pub mod headers;
pub mod la_cleaner;
pub mod object_cleaner;
pub mod permissions;
pub mod pkgconfig;
pub mod placeholder;
pub mod python_bytecode_cleanup;
pub mod python_isolation;
pub mod rpath;
// Re-export the concrete types so callers can use
// `patchers::PlaceholderPatcher`, etc.
pub use binary_string::BinaryStringPatcher;
pub use codesigner::CodeSigner;
pub use headers::HeaderPatcher;
pub use la_cleaner::LaFileCleaner;
pub use object_cleaner::ObjectFileCleaner;
pub use permissions::PermissionsFixer;
pub use pkgconfig::PkgConfigPatcher;
pub use placeholder::PlaceholderPatcher;
pub use python_bytecode_cleanup::PythonBytecodeCleanupPatcher;
pub use python_isolation::PythonIsolationPatcher;
pub use rpath::RPathPatcher;
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/patchers/python_isolation.rs | crates/builder/src/artifact_qa/patchers/python_isolation.rs | //! Python isolation patcher - creates wrapper scripts for isolated Python packages
use crate::artifact_qa::{
reports::Report,
traits::{Action, Patcher},
};
use crate::{BuildContext, BuildEnvironment};
use sps2_errors::{BuildError, Error};
use std::collections::HashMap;
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use tokio::fs;
#[derive(Default)]
pub struct PythonIsolationPatcher;
impl PythonIsolationPatcher {
/// Detect Python version from package-specific lib directory
async fn detect_python_version(&self, package_prefix: &Path) -> Result<String, Error> {
let lib_dir = package_prefix.join("lib");
if !lib_dir.exists() {
return Err(BuildError::InstallFailed {
message: "No lib directory found in package prefix".to_string(),
}
.into());
}
let mut entries = fs::read_dir(&lib_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.is_dir() {
let dir_name = path.file_name().unwrap().to_string_lossy();
if dir_name.starts_with("python3.") {
// Extract version from directory name like "python3.11"
return Ok(dir_name.to_string());
}
}
}
Err(BuildError::InstallFailed {
message: "Could not detect Python version from package lib directory".to_string(),
}
.into())
}
/// Find all executables in package bin directory
async fn find_package_executables(&self, package_bin_dir: &Path) -> Result<Vec<String>, Error> {
let mut executables = Vec::new();
if !package_bin_dir.exists() {
return Ok(executables);
}
let mut entries = fs::read_dir(package_bin_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.is_file() {
if let Some(filename) = path.file_name().and_then(|n| n.to_str()) {
// Check if file is executable
if let Ok(metadata) = path.metadata() {
let permissions = metadata.permissions();
if permissions.mode() & 0o111 != 0 {
// File is executable
executables.push(filename.to_string());
}
}
}
}
}
Ok(executables)
}
/// Create wrapper script for a Python executable
async fn create_wrapper_script(
&self,
main_bin_dir: &Path,
executable_name: &str,
package_name: &str,
python_version: &str,
) -> Result<(), Error> {
let wrapper_path = main_bin_dir.join(executable_name);
// Create wrapper script content
let wrapper_content = format!(
r#"#!/bin/bash
# Wrapper for {executable_name} from {package_name} package
# Generated by sps2 Python isolation patcher
export PYTHONPATH="{live}/python/{package_name}/lib/{python_version}/site-packages:${{PYTHONPATH}}"
exec {live}/bin/{python_version} "{live}/python/{package_name}/bin/{executable_name}" "$@"
"#,
live = sps2_config::fixed_paths::LIVE_DIR
);
// Write wrapper script
fs::write(&wrapper_path, wrapper_content).await?;
// Make wrapper script executable
let mut permissions = fs::metadata(&wrapper_path).await?.permissions();
permissions.set_mode(0o755);
fs::set_permissions(&wrapper_path, permissions).await?;
Ok(())
}
}
impl Action for PythonIsolationPatcher {
const NAME: &'static str = "Python Isolation";
async fn run(
_ctx: &BuildContext,
env: &BuildEnvironment,
_findings: Option<&crate::artifact_qa::diagnostics::DiagnosticCollector>,
) -> Result<Report, Error> {
let self_instance = Self;
// Only run for Python packages
if !env.is_python_package() {
return Ok(Report::ok());
}
let staging_dir = env.staging_dir();
let live_prefix = env.get_live_prefix().trim_start_matches('/');
let package_name = env.package_name();
// Check if we have entry points from build metadata
let entry_points_json = env
.get_extra_env("PYTHON_ENTRY_POINTS")
.unwrap_or_else(|| "{}".to_string());
let entry_points: HashMap<String, String> =
serde_json::from_str(&entry_points_json).unwrap_or_default();
if entry_points.is_empty() {
return Ok(Report::ok());
}
let package_specific_prefix = staging_dir
.join(live_prefix)
.join("python")
.join(package_name);
// Detect Python version from package lib directory
let python_version = self_instance
.detect_python_version(&package_specific_prefix)
.await?;
// Find all executables in package bin directory
let package_bin_dir = package_specific_prefix.join("bin");
let executables = self_instance
.find_package_executables(&package_bin_dir)
.await?;
if executables.is_empty() {
return Ok(Report::ok());
}
// Create main bin directory if it doesn't exist
let main_bin_dir = staging_dir.join(live_prefix).join("bin");
fs::create_dir_all(&main_bin_dir).await?;
// Create wrapper scripts for all executables
let mut created_wrappers = Vec::new();
for executable in &executables {
self_instance
.create_wrapper_script(&main_bin_dir, executable, package_name, &python_version)
.await?;
created_wrappers.push(main_bin_dir.join(executable));
}
Ok(Report {
changed_files: created_wrappers,
..Default::default()
})
}
}
impl Patcher for PythonIsolationPatcher {}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/patchers/pkgconfig.rs | crates/builder/src/artifact_qa/patchers/pkgconfig.rs | //! Fixes *.pc and *Config.cmake so downstream builds never see /opt/pm/build/…
use crate::artifact_qa::{reports::Report, traits::Patcher};
use crate::{BuildContext, BuildEnvironment};
use ignore::WalkBuilder;
use sps2_errors::Error;
pub struct PkgConfigPatcher;
impl crate::artifact_qa::traits::Action for PkgConfigPatcher {
const NAME: &'static str = "pkg‑config / CMake patcher";
async fn run(
_ctx: &BuildContext,
env: &BuildEnvironment,
_findings: Option<&crate::artifact_qa::diagnostics::DiagnosticCollector>,
) -> Result<Report, Error> {
let build_prefix = env.build_prefix().to_string_lossy().into_owned();
let build_src = format!("{build_prefix}/src");
let build_base = "/opt/pm/build";
let actual = sps2_config::fixed_paths::LIVE_DIR;
let pat = WalkBuilder::new(env.staging_dir())
.build()
.filter_map(Result::ok)
.map(ignore::DirEntry::into_path)
.filter(|p| {
p.is_file() && {
p.extension().and_then(|e| e.to_str()) == Some("pc")
|| p.file_name()
.and_then(|n| n.to_str())
.is_some_and(|n| n.ends_with("Config.cmake"))
}
})
.collect::<Vec<_>>();
let mut changed = Vec::new();
for f in pat {
if let Ok(s) = std::fs::read_to_string(&f) {
let mut modified = false;
let mut result = s.clone();
// Replace build paths in order of specificity (most specific first)
if result.contains(&build_src) {
result = result.replace(&build_src, actual);
modified = true;
}
if result.contains(&build_prefix) {
result = result.replace(&build_prefix, actual);
modified = true;
}
if result.contains(build_base) {
result = result.replace(build_base, actual);
modified = true;
}
if modified {
std::fs::write(&f, result)?;
changed.push(f);
}
}
}
Ok(Report {
changed_files: changed,
..Default::default()
})
}
}
impl Patcher for PkgConfigPatcher {}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/patchers/la_cleaner.rs | crates/builder/src/artifact_qa/patchers/la_cleaner.rs | //! Cleaner that removes libtool archive (.la) files
use crate::artifact_qa::{reports::Report, traits::Patcher};
use crate::{BuildContext, BuildEnvironment};
use sps2_errors::Error;
pub struct LaFileCleaner;
impl crate::artifact_qa::traits::Action for LaFileCleaner {
const NAME: &'static str = "Libtool archive cleaner";
async fn run(
_ctx: &BuildContext,
env: &BuildEnvironment,
_findings: Option<&crate::artifact_qa::diagnostics::DiagnosticCollector>,
) -> Result<Report, Error> {
let staging_dir = env.staging_dir();
let mut removed_files = Vec::new();
// Walk staging directory for .la files
for entry in ignore::WalkBuilder::new(staging_dir)
.hidden(false)
.parents(false)
.build()
{
let path = match entry {
Ok(e) => e.into_path(),
Err(_) => continue,
};
if !path.is_file() {
continue;
}
// Check if it's a .la file
if let Some(ext) = path.extension().and_then(|e| e.to_str()) {
if ext == "la" {
// Remove the file
if let Ok(()) = std::fs::remove_file(&path) {
removed_files.push(path);
}
// Ignore removal errors
}
}
}
let mut warnings = Vec::new();
let removed = removed_files;
if !removed.is_empty() {
warnings.push(format!("Removed {} libtool archives", removed.len()));
}
Ok(Report {
changed_files: removed,
warnings,
..Default::default()
})
}
}
impl Patcher for LaFileCleaner {}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/patchers/codesigner.rs | crates/builder/src/artifact_qa/patchers/codesigner.rs | //! Re-signs binaries after patching to fix code signature issues on macOS
use crate::artifact_qa::{macho_utils, reports::Report, traits::Patcher};
use crate::{BuildContext, BuildEnvironment};
use sps2_errors::Error;
use sps2_platform::{PlatformContext, PlatformManager};
use std::path::Path;
pub struct CodeSigner {
platform: &'static sps2_platform::Platform,
}
impl CodeSigner {
/// Create a new `CodeSigner` with platform abstraction
#[must_use]
pub fn new() -> Self {
Self {
platform: PlatformManager::instance().platform(),
}
}
/// Check if a file is a Mach-O binary (executable or dylib)
fn is_macho_binary(path: &Path) -> bool {
// Check if it's a dynamic library (including versioned ones)
if let Some(name) = path.file_name().and_then(|n| n.to_str()) {
if name.contains(".dylib") || name.contains(".so") {
return true;
}
}
// Use the shared MachO detection logic
macho_utils::is_macho_file(path)
}
/// Re-sign a binary with ad-hoc signature
async fn resign_binary(
&self,
ctx: &PlatformContext,
path: &Path,
) -> Result<bool, sps2_errors::Error> {
// First check if the signature is valid
let is_valid =
(self.platform.binary().verify_signature(ctx, path).await).unwrap_or_default();
// If signature is invalid or modified, re-sign it
if is_valid {
Ok(false) // No re-signing needed
} else {
// Re-sign with ad-hoc signature (identity = None)
match self.platform.binary().sign_binary(ctx, path, None).await {
Ok(()) => Ok(true),
Err(e) => Err(e.into()),
}
}
}
}
impl crate::artifact_qa::traits::Action for CodeSigner {
const NAME: &'static str = "Code re-signer";
async fn run(
ctx: &BuildContext,
env: &BuildEnvironment,
_findings: Option<&crate::artifact_qa::diagnostics::DiagnosticCollector>,
) -> Result<Report, Error> {
// Only run on macOS
if !cfg!(target_os = "macos") {
return Ok(Report::ok());
}
let signer = Self::new();
// Create platform context from build context
let platform_ctx = signer.platform.create_context(ctx.event_sender.clone());
let mut resigned_count = 0;
let mut errors = Vec::new();
// Walk through all files in staging directory
for entry in ignore::WalkBuilder::new(env.staging_dir())
.hidden(false)
.parents(false)
.build()
.filter_map(Result::ok)
{
let path = entry.into_path();
if !path.is_file() || !Self::is_macho_binary(&path) {
continue;
}
match signer.resign_binary(&platform_ctx, &path).await {
Ok(true) => resigned_count += 1,
Ok(false) => {} // No re-signing needed
Err(e) => {
errors.push(format!("Failed to re-sign {}: {}", path.display(), e));
}
}
}
let mut warnings = Vec::new();
if resigned_count > 0 {
warnings.push(format!("Re-signed {resigned_count} binaries"));
}
Ok(Report {
changed_files: Vec::new(),
warnings,
errors,
..Default::default()
})
}
}
impl Default for CodeSigner {
fn default() -> Self {
Self::new()
}
}
impl Patcher for CodeSigner {}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/patchers/binary_string.rs | crates/builder/src/artifact_qa/patchers/binary_string.rs | //! Binary-safe string patcher for embedded paths in executables and libraries
use crate::artifact_qa::{macho_utils, reports::Report, traits::Patcher};
use crate::{BuildContext, BuildEnvironment};
use sps2_errors::Error;
use sps2_events::{AppEvent, GeneralEvent};
use std::collections::HashMap;
use std::path::Path;
/// Find all occurrences of needle in haystack and return their byte offsets
fn find_binary_strings(haystack: &[u8], needle: &[u8]) -> Vec<usize> {
let mut positions = Vec::new();
if needle.is_empty() || haystack.len() < needle.len() {
return positions;
}
// Use a simple sliding window approach
for i in 0..=(haystack.len() - needle.len()) {
if &haystack[i..i + needle.len()] == needle {
positions.push(i);
}
}
positions
}
/// Replace a string in binary data with null-padding to maintain file structure
/// Returns true if replacement was made, false if new string was too long
fn replace_binary_string(
data: &mut [u8],
offset: usize,
old_str: &str,
new_str: &str,
allocated_len: Option<usize>,
) -> bool {
let old_bytes = old_str.as_bytes();
let new_bytes = new_str.as_bytes();
// Determine allocated length by scanning for null terminator
let alloc_len = if let Some(len) = allocated_len {
len
} else {
// Find the null terminator starting from offset
let mut len = old_bytes.len();
for (i, &byte) in data.iter().enumerate().skip(offset + old_bytes.len()) {
if byte == 0 {
len = i - offset + 1; // Include the null terminator
break;
}
}
len
};
// Check if new string fits in allocated space
if new_bytes.len() + 1 > alloc_len {
return false;
}
// Copy new string
data[offset..offset + new_bytes.len()].copy_from_slice(new_bytes);
// Null-pad the rest
for i in (offset + new_bytes.len())..(offset + alloc_len) {
if i < data.len() {
data[i] = 0;
}
}
true
}
pub struct BinaryStringPatcher;
impl crate::artifact_qa::traits::Action for BinaryStringPatcher {
const NAME: &'static str = "Binary string patcher";
async fn run(
ctx: &BuildContext,
env: &BuildEnvironment,
findings: Option<&crate::artifact_qa::diagnostics::DiagnosticCollector>,
) -> Result<Report, Error> {
let staging_dir = env.staging_dir();
let build_prefix = env.build_prefix().to_string_lossy().into_owned();
let build_src = format!("{build_prefix}/src");
let build_base = "/opt/pm/build".to_string();
let install_prefix = sps2_config::fixed_paths::LIVE_DIR.to_string(); // Actual runtime installation prefix
// Prepare replacements map - order matters! Most specific first
let mut replacements = HashMap::new();
replacements.insert(build_src, install_prefix.clone());
replacements.insert(build_prefix, install_prefix.clone());
replacements.insert(build_base, install_prefix.clone());
let mut patched_files = Vec::new();
let mut skipped_files = Vec::new();
// Helper to check if file is a binary we should process
let is_binary_file = |path: &std::path::Path| -> bool {
let has_binary_extension = if let Some(name) = path.file_name().and_then(|n| n.to_str())
{
// Check for dynamic libraries (including versioned ones)
name.contains(".so")
|| name.contains(".dylib")
|| std::path::Path::new(name)
.extension()
.is_some_and(|ext| ext.eq_ignore_ascii_case("a"))
} else {
false
};
has_binary_extension || macho_utils::is_macho_file(path)
};
// Get the list of files to process
let files_to_process: Box<dyn Iterator<Item = std::path::PathBuf>> =
if let Some(findings) = findings {
// Use validator findings - only process files with hardcoded paths that are binaries
let files_with_issues = findings.get_files_with_hardcoded_paths();
let paths: Vec<std::path::PathBuf> = files_with_issues
.keys()
.map(|&p| p.to_path_buf())
.filter(|p| is_binary_file(p))
.collect();
Box::new(paths.into_iter())
} else {
// Fall back to walking the entire directory (old behavior)
Box::new(
ignore::WalkBuilder::new(staging_dir)
.hidden(false)
.parents(false)
.build()
.filter_map(Result::ok)
.map(ignore::DirEntry::into_path)
.filter(|p| p.is_file() && is_binary_file(p)),
)
};
for path in files_to_process {
// Process the file
if let Ok((was_patched, was_skipped)) = process_binary_file(&path, &replacements) {
if was_patched {
patched_files.push(path.clone());
}
if was_skipped {
skipped_files.push((path, "Path too long".to_string()));
}
}
}
let patched = patched_files;
let skipped = skipped_files;
if !skipped.is_empty() {
// Send warning event about skipped files
crate::utils::events::send_event(
ctx,
AppEvent::General(GeneralEvent::warning_with_context(
format!(
"Binary string patcher: {} paths too long to patch in {} files",
skipped.len(),
skipped
.iter()
.map(|(p, _)| p)
.collect::<std::collections::HashSet<_>>()
.len()
),
"Some embedded paths could not be patched due to length constraints",
)),
);
}
if !patched.is_empty() {
crate::utils::events::send_event(
ctx,
AppEvent::General(GeneralEvent::OperationCompleted {
operation: format!("Patched {} binary files", patched.len()),
success: true,
}),
);
}
Ok(Report {
changed_files: patched,
..Default::default()
})
}
}
fn process_binary_file(
path: &Path,
replacements: &HashMap<String, String>,
) -> Result<(bool, bool), Error> {
let mut data = std::fs::read(path)?;
let mut any_patched = false;
let mut any_skipped = false;
for (old_path, new_path) in replacements {
let positions = find_binary_strings(&data, old_path.as_bytes());
for offset in positions {
if replace_binary_string(&mut data, offset, old_path, new_path, None) {
any_patched = true;
} else {
any_skipped = true;
}
}
}
if any_patched {
// Write the patched file atomically
let temp_path = path.with_extension("tmp");
std::fs::write(&temp_path, &data)?;
std::fs::rename(&temp_path, path)?;
}
Ok((any_patched, any_skipped))
}
impl Patcher for BinaryStringPatcher {}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/scanners/macho.rs | crates/builder/src/artifact_qa/scanners/macho.rs | //! Validator that inspects Mach‑O headers without spawning `otool`.
use crate::artifact_qa::{
diagnostics::{DiagnosticCollector, IssueType},
reports::Report,
traits::Validator,
};
use crate::{BuildContext, BuildEnvironment};
use object::{
macho::{MachHeader32, MachHeader64},
read::macho::{
FatArch, LoadCommandVariant, MachHeader, MachOFatFile32, MachOFatFile64, MachOFile,
},
Endianness, FileKind,
};
use sps2_errors::Error;
use sps2_events::{AppEvent, GeneralEvent};
pub struct MachOScanner;
impl crate::artifact_qa::traits::Action for MachOScanner {
const NAME: &'static str = "Mach‑O load‑command scanner";
async fn run(
ctx: &BuildContext,
env: &BuildEnvironment,
_findings: Option<&DiagnosticCollector>,
) -> Result<Report, Error> {
let build_prefix = env.build_prefix().to_string_lossy().into_owned();
let build_src = format!("{build_prefix}/src");
let build_base = "/opt/pm/build";
let mut collector = DiagnosticCollector::new();
for entry in ignore::WalkBuilder::new(env.staging_dir())
.hidden(false)
.parents(false)
.build()
.filter_map(Result::ok)
{
let path = entry.into_path();
if !path.is_file() {
continue;
}
if let Ok(data) = std::fs::read(&path) {
if let Ok(kind) = FileKind::parse(&*data) {
let build_paths = vec![build_base, &build_prefix, &build_src];
check_macho_file(&data, kind, &build_paths, &path, &mut collector);
}
}
}
if collector.has_findings() {
// Emit detailed diagnostics as warning events
let diagnostic_messages = collector.generate_diagnostic_messages();
// Emit each file's diagnostics as a separate warning
for msg in &diagnostic_messages {
crate::utils::events::send_event(
ctx,
AppEvent::General(GeneralEvent::warning_with_context(
"Mach-O validation failed",
msg,
)),
);
}
// Return report with errors (not Err!) so pipeline continues
let error_count = collector.count();
let mut report = Report::default();
// Add the summary as an error so is_fatal() returns true
report.errors.push(format!(
"Mach‑O contains bad install‑name or RPATH ({error_count} file(s)). Check warnings above for details."
));
// Include the collector in the report so patchers can use it
report.findings = Some(collector);
Ok(report)
} else {
Ok(Report::ok())
}
}
}
fn check_macho_file(
data: &[u8],
kind: FileKind,
build_paths: &[&str],
file_path: &std::path::Path,
collector: &mut DiagnosticCollector,
) {
match kind {
FileKind::MachO32 => {
if let Ok(file) = MachOFile::<MachHeader32<Endianness>, _>::parse(data) {
check_load_commands(&file, build_paths, file_path, collector);
}
}
FileKind::MachO64 => {
if let Ok(file) = MachOFile::<MachHeader64<Endianness>, _>::parse(data) {
check_load_commands(&file, build_paths, file_path, collector);
}
}
FileKind::MachOFat32 => {
if let Ok(fat) = MachOFatFile32::parse(data) {
for arch in fat.arches() {
let (off, sz) = arch.file_range();
let Ok(start): Result<usize, _> = off.try_into() else {
continue;
};
let Ok(size): Result<usize, _> = sz.try_into() else {
continue;
};
let Some(end) = start.checked_add(size) else {
continue;
};
if let Some(slice) = data.get(start..end) {
if let Ok(kind) = FileKind::parse(slice) {
check_macho_file(slice, kind, build_paths, file_path, collector);
}
}
}
}
}
FileKind::MachOFat64 => {
if let Ok(fat) = MachOFatFile64::parse(data) {
for arch in fat.arches() {
let (off, sz) = arch.file_range();
let Ok(start): Result<usize, _> = off.try_into() else {
continue;
};
let Ok(size): Result<usize, _> = sz.try_into() else {
continue;
};
let Some(end) = start.checked_add(size) else {
continue;
};
if let Some(slice) = data.get(start..end) {
if let Ok(kind) = FileKind::parse(slice) {
check_macho_file(slice, kind, build_paths, file_path, collector);
}
}
}
}
}
_ => {}
}
}
fn check_load_commands<'data, Mach, R>(
file: &MachOFile<'data, Mach, R>,
build_paths: &[&str],
file_path: &std::path::Path,
collector: &mut DiagnosticCollector,
) where
Mach: MachHeader,
R: object::ReadRef<'data>,
{
let endian = file.endian();
if let Ok(mut commands) = file.macho_load_commands() {
while let Ok(Some(cmd)) = commands.next() {
if let Ok(variant) = cmd.variant() {
let path_bytes = match variant {
LoadCommandVariant::Dylib(d) | LoadCommandVariant::IdDylib(d) => {
cmd.string(endian, d.dylib.name).ok()
}
LoadCommandVariant::Rpath(r) => cmd.string(endian, r.path).ok(),
_ => None,
};
if let Some(bytes) = path_bytes {
if let Ok(path_str) = std::str::from_utf8(bytes) {
// Check for any build paths - those are always bad
// /opt/pm/live paths are always OK
for build_path in build_paths {
if path_str.starts_with(build_path) {
let issue_type = match variant {
LoadCommandVariant::Rpath(_) => IssueType::BadRPath {
rpath: path_str.to_string(),
},
_ => IssueType::BadInstallName {
install_name: path_str.to_string(),
},
};
collector.add_macho_issue(file_path, issue_type);
break; // One match is enough
}
}
// Note: We intentionally do NOT check for self-referencing install names
// when they use @rpath/. This is the correct, modern way to build
// relocatable libraries on macOS. The @rpath/ prefix tells the dynamic
// linker to search for the library using the runtime path search paths.
//
// Example: A library with install name "@rpath/libfoo.1.dylib" is
// correctly configured for runtime path loading and should not be flagged
// as an error.
}
}
}
}
}
}
impl Validator for MachOScanner {}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/scanners/archive.rs | crates/builder/src/artifact_qa/scanners/archive.rs | //! Validator that looks into static archives (*.a) files.
use crate::artifact_qa::{
diagnostics::{DiagnosticCollector, IssueType},
reports::Report,
traits::Validator,
};
use crate::{BuildContext, BuildEnvironment};
use object::read::archive::ArchiveFile;
use sps2_errors::Error;
use sps2_events::{AppEvent, GeneralEvent};
pub struct ArchiveScanner;
impl crate::artifact_qa::traits::Action for ArchiveScanner {
const NAME: &'static str = "Static‑archive scanner";
async fn run(
ctx: &BuildContext,
env: &BuildEnvironment,
_findings: Option<&DiagnosticCollector>,
) -> Result<Report, Error> {
let build_prefix = env.build_prefix().to_string_lossy().into_owned();
let build_src = format!("{build_prefix}/src");
let build_base = "/opt/pm/build";
let mut collector = DiagnosticCollector::new();
for entry in ignore::WalkBuilder::new(env.staging_dir())
.hidden(false)
.parents(false)
.build()
.filter_map(Result::ok)
{
let path = entry.into_path();
if !path.is_file() {
continue;
}
let ext = path.extension().and_then(|e| e.to_str()).unwrap_or("");
if ext != "a" {
continue;
}
if let Ok(bytes) = std::fs::read(&path) {
// Check static archives using the object crate
if let Ok(archive) = ArchiveFile::parse(&*bytes) {
for member in archive.members().flatten() {
if let Ok(name) = std::str::from_utf8(member.name()) {
if name.contains(build_base) {
collector.add_finding(
crate::artifact_qa::diagnostics::ValidationFinding {
file_path: path.clone(),
issue_type: IssueType::BuildPathInArchive {
path: build_base.to_string(),
member: Some(name.to_string()),
},
context: std::collections::HashMap::new(),
},
);
break;
} else if name.contains(&build_prefix) {
collector.add_finding(
crate::artifact_qa::diagnostics::ValidationFinding {
file_path: path.clone(),
issue_type: IssueType::BuildPathInArchive {
path: build_prefix.clone(),
member: Some(name.to_string()),
},
context: std::collections::HashMap::new(),
},
);
break;
} else if name.contains(&build_src) {
collector.add_finding(
crate::artifact_qa::diagnostics::ValidationFinding {
file_path: path.clone(),
issue_type: IssueType::BuildPathInArchive {
path: build_src.clone(),
member: Some(name.to_string()),
},
context: std::collections::HashMap::new(),
},
);
break;
}
}
}
}
}
}
if collector.has_findings() {
// Emit detailed diagnostics as warning events
let diagnostic_messages = collector.generate_diagnostic_messages();
// Emit each file's diagnostics as a separate warning
for msg in &diagnostic_messages {
crate::utils::events::send_event(
ctx,
AppEvent::General(GeneralEvent::warning_with_context(
"Archive validation failed",
msg,
)),
);
}
// Return report with errors (not Err!) so pipeline continues
let error_count = collector.count();
let mut report = Report::default();
// Add the summary as an error so is_fatal() returns true
report.errors.push(format!(
"Static archives contain build paths ({error_count} file(s)). Check warnings above for details."
));
// Include the collector in the report so patchers can use it
report.findings = Some(collector);
Ok(report)
} else {
Ok(Report::ok())
}
}
}
impl Validator for ArchiveScanner {}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/scanners/mod.rs | crates/builder/src/artifact_qa/scanners/mod.rs | //! Registry of all scanner (validator) modules.
pub mod archive;
pub mod hardcoded;
pub mod macho;
pub mod staging;
// Re-export the concrete types for convenient access elsewhere.
pub use archive::ArchiveScanner;
pub use hardcoded::HardcodedScanner;
pub use macho::MachOScanner;
pub use staging::StagingScanner;
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/scanners/staging.rs | crates/builder/src/artifact_qa/scanners/staging.rs | //! Validator that checks if the staging directory contains any files.
//!
//! This is a fundamental check that runs for all build system profiles.
//! An empty staging directory indicates that the build succeeded but no files
//! were installed, which usually means the install step failed or was skipped.
use crate::artifact_qa::{diagnostics::DiagnosticCollector, reports::Report, traits::Validator};
use crate::{BuildContext, BuildEnvironment};
use sps2_errors::Error;
use std::path::Path;
pub struct StagingScanner;
impl crate::artifact_qa::traits::Action for StagingScanner {
const NAME: &'static str = "Staging directory scanner";
async fn run(
_ctx: &BuildContext,
env: &BuildEnvironment,
_findings: Option<&DiagnosticCollector>,
) -> Result<Report, Error> {
let staging_dir = env.staging_dir();
// Check if staging directory exists and has any content
if !staging_dir.exists() {
let mut report = Report::default();
report.errors.push(format!(
"Staging directory does not exist: {}",
staging_dir.display()
));
return Ok(report);
}
// Check if staging directory is empty
if is_directory_empty(staging_dir)? {
let mut report = Report::default();
report.errors.push(format!(
"Staging directory is empty: {}. This usually indicates that the build's install step failed or was not run. Check the build recipe for proper 'make install' or equivalent installation commands.",
staging_dir.display()
));
return Ok(report);
}
// Staging directory has content - success
Ok(Report::ok())
}
}
impl Validator for StagingScanner {}
/// Check if a directory is empty (has no files or subdirectories)
fn is_directory_empty(dir: &Path) -> Result<bool, Error> {
let mut entries =
std::fs::read_dir(dir).map_err(|e| sps2_errors::BuildError::ValidationFailed {
message: format!("Failed to read staging directory {}: {}", dir.display(), e),
})?;
// If we can get even one entry, the directory is not empty
Ok(entries.next().is_none())
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/artifact_qa/scanners/hardcoded.rs | crates/builder/src/artifact_qa/scanners/hardcoded.rs | //! Validator that searches every byte for `/opt/pm/build/...` or the
//! placeholder prefix. It is binary‑safe and SIMD‑accelerated.
use crate::artifact_qa::{diagnostics::DiagnosticCollector, reports::Report, traits::Validator};
use crate::{BuildContext, BuildEnvironment};
use bstr::ByteSlice;
use ignore::WalkBuilder;
use sps2_errors::Error;
use sps2_events::{AppEvent, GeneralEvent};
pub struct HardcodedScanner;
impl crate::artifact_qa::traits::Action for HardcodedScanner {
const NAME: &'static str = "Hardcoded‑path scanner";
async fn run(
ctx: &BuildContext,
env: &BuildEnvironment,
_findings: Option<&DiagnosticCollector>,
) -> Result<Report, Error> {
let build_prefix = env.build_prefix().to_string_lossy().into_owned();
let build_src = format!("{build_prefix}/src");
let build_base = "/opt/pm/build";
// Debug: Print the build prefixes we're scanning for
crate::utils::events::send_event(
ctx,
AppEvent::General(GeneralEvent::debug(format!(
"Hardcoded path scanner: checking for {build_base} | {build_prefix} | {build_src}"
))),
);
let mut collector = DiagnosticCollector::new();
for entry in WalkBuilder::new(env.staging_dir())
.hidden(false)
.parents(false)
.build()
.filter_map(Result::ok)
{
let path = entry.into_path();
if path.is_file() {
// Skip Python bytecode files - they contain paths but are regenerated at runtime
if let Some(ext) = path.extension() {
if ext == "pyc" || ext == "pyo" {
continue;
}
}
if let Ok(data) = std::fs::read(&path) {
let hay = data.as_slice();
// Check for any build-related paths
if hay.find(build_base.as_bytes()).is_some() {
collector.add_hardcoded_path(&path, build_base, false);
} else if hay.find(build_prefix.as_bytes()).is_some() {
collector.add_hardcoded_path(&path, &build_prefix, false);
} else if hay.find(build_src.as_bytes()).is_some() {
collector.add_hardcoded_path(&path, &build_src, false);
}
}
}
}
if collector.has_findings() {
// Emit detailed diagnostics as warning events
let diagnostic_messages = collector.generate_diagnostic_messages();
// Emit each file's diagnostics as a separate warning
for msg in &diagnostic_messages {
crate::utils::events::send_event(
ctx,
AppEvent::General(GeneralEvent::warning_with_context(
"Hardcoded path validation failed",
msg,
)),
);
}
// Return report with errors (not Err!) so pipeline continues
let error_count = collector.count();
let mut report = Report::default();
// Add the summary as an error so is_fatal() returns true
report.errors.push(format!(
"Hardcoded path(s) found in {error_count} file(s). Check warnings above for details."
));
// Include the collector in the report so patchers can use it
report.findings = Some(collector);
Ok(report)
} else {
Ok(Report::ok())
}
}
}
impl Validator for HardcodedScanner {}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/recipe/parser.rs | crates/builder/src/recipe/parser.rs | //! YAML recipe parser with validation and variable expansion
use super::model::{Build, ParsedStep, PostCommand, PostOption, YamlRecipe};
use sps2_errors::{BuildError, Error};
use std::collections::HashMap;
use std::path::Path;
/// Parse a YAML recipe from a file
///
/// # Errors
///
/// Returns an error if:
/// - The file cannot be read
/// - The YAML is invalid
/// - Required fields are missing
/// - Validation fails
pub async fn parse_yaml_recipe(path: &Path) -> Result<YamlRecipe, Error> {
let content = tokio::fs::read_to_string(path)
.await
.map_err(|e| BuildError::RecipeError {
message: format!("failed to read recipe: {e}"),
})?;
parse_yaml_recipe_from_string(&content)
}
/// Parse a YAML recipe from a string
///
/// # Errors
///
/// Returns an error if:
/// - The YAML is invalid
/// - Required fields are missing
/// - Validation fails
pub fn parse_yaml_recipe_from_string(content: &str) -> Result<YamlRecipe, Error> {
let mut recipe: YamlRecipe =
serde_yaml2::from_str(content).map_err(|e| BuildError::RecipeError {
message: format!("failed to parse YAML: {e}"),
})?;
// Validate the recipe
validate_recipe(&recipe)?;
// Expand variables in the recipe
expand_variables(&mut recipe);
Ok(recipe)
}
/// Validate a parsed recipe
fn validate_recipe(recipe: &YamlRecipe) -> Result<(), Error> {
// Validate metadata
if recipe.metadata.name.is_empty() {
return Err(BuildError::RecipeError {
message: "metadata.name cannot be empty".to_string(),
}
.into());
}
if recipe.metadata.version.is_empty() {
return Err(BuildError::RecipeError {
message: "metadata.version cannot be empty".to_string(),
}
.into());
}
// Validate build stage
match &recipe.build {
Build::System { system, args: _ } => {
// System builds are always valid
let _ = system; // Use to avoid unused warning
}
Build::Steps { steps } => {
if steps.is_empty() {
return Err(BuildError::RecipeError {
message: "build.steps cannot be empty".to_string(),
}
.into());
}
}
}
Ok(())
}
/// Expand variables in the recipe using facts and built-in variables
fn expand_variables(recipe: &mut YamlRecipe) {
// Build variable context
let mut context = HashMap::new();
// Add built-in variables
context.insert("NAME".to_string(), recipe.metadata.name.clone());
context.insert("VERSION".to_string(), recipe.metadata.version.clone());
context.insert(
"PREFIX".to_string(),
sps2_config::fixed_paths::LIVE_DIR.to_string(),
);
context.insert("JOBS".to_string(), num_cpus::get().to_string());
// Add user-defined facts
for (key, value) in &recipe.facts {
context.insert(key.clone(), value.clone());
}
// Add environment variables (they can reference facts)
let mut env_vars = recipe.environment.variables.clone();
for value in env_vars.values_mut() {
*value = expand_string(value, &context);
}
recipe.environment.variables = env_vars;
// Update context with expanded environment variables
for (key, value) in &recipe.environment.variables {
context.insert(key.clone(), value.clone());
}
// Expand variables in build steps
match &mut recipe.build {
Build::System { system: _, args } => {
for arg in args {
*arg = expand_string(arg, &context);
}
}
Build::Steps { steps } => {
for step in steps {
expand_build_step(step, &context);
}
}
}
// Expand variables in post commands
for cmd in &mut recipe.post.commands {
match cmd {
PostCommand::Simple(s) => *s = expand_string(s, &context),
PostCommand::Shell { shell } => *shell = expand_string(shell, &context),
}
}
// Expand variables in post option paths
if let PostOption::Paths(paths) = &mut recipe.post.fix_permissions {
for path in paths {
*path = expand_string(path, &context);
}
}
}
/// Expand variables in a single string
fn expand_string(input: &str, context: &HashMap<String, String>) -> String {
let mut result = input.to_string();
// Expand ${VAR} style variables
for (key, value) in context {
result = result.replace(&format!("${{{key}}}"), value);
}
// Expand $VAR style variables (but only if followed by non-alphanumeric)
for (key, value) in context {
// This is a simple implementation - a more robust one would use regex
result = result.replace(&format!("${key} "), &format!("{value} "));
result = result.replace(&format!("${key}/"), &format!("{value}/"));
result = result.replace(&format!("${key}"), value);
}
result
}
/// Expand variables in a build step
fn expand_build_step(step: &mut ParsedStep, context: &HashMap<String, String>) {
match step {
ParsedStep::Command { command } => {
*command = expand_string(command, context);
}
ParsedStep::Shell { shell } => {
*shell = expand_string(shell, context);
}
ParsedStep::Make { make } => {
for arg in make {
*arg = expand_string(arg, context);
}
}
ParsedStep::Configure { configure } => {
for arg in configure {
*arg = expand_string(arg, context);
}
}
ParsedStep::Cmake { cmake } => {
for arg in cmake {
*arg = expand_string(arg, context);
}
}
ParsedStep::Meson { meson } => {
for arg in meson {
*arg = expand_string(arg, context);
}
}
ParsedStep::Cargo { cargo } => {
for arg in cargo {
*arg = expand_string(arg, context);
}
}
ParsedStep::Go { go } => {
for arg in go {
*arg = expand_string(arg, context);
}
}
ParsedStep::Python { python } => {
for arg in python {
*arg = expand_string(arg, context);
}
}
ParsedStep::Nodejs { nodejs } => {
for arg in nodejs {
*arg = expand_string(arg, context);
}
}
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/recipe/executor.rs | crates/builder/src/recipe/executor.rs | //! YAML recipe execution
use crate::yaml::RecipeMetadata;
use crate::{BuildConfig, BuildContext, BuildEnvironment};
use sps2_errors::Error;
use sps2_types::package::PackageSpec;
/// Execute the YAML recipe and return dependencies, metadata, install request status, and `qa_pipeline`
pub async fn execute_recipe(
config: &BuildConfig,
context: &BuildContext,
environment: &mut BuildEnvironment,
) -> Result<
(
Vec<String>,
Vec<PackageSpec>,
RecipeMetadata,
bool,
sps2_types::QaPipelineOverride,
),
Error,
> {
// Execute YAML recipe using staged execution
crate::utils::executor::execute_staged_build(config, context, environment).await
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/recipe/model.rs | crates/builder/src/recipe/model.rs | //! YAML recipe format for sps2
//!
//! This module provides a declarative YAML-based recipe format that replaces
//! the Starlark-based system with proper staged execution.
use crate::environment::IsolationLevel;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Complete YAML recipe structure
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct YamlRecipe {
/// Package metadata (required)
pub metadata: Metadata,
/// Dynamic facts/variables (optional)
#[serde(default)]
pub facts: HashMap<String, String>,
/// Environment setup stage (optional)
#[serde(default)]
pub environment: Environment,
/// Source acquisition stage (required)
pub source: Source,
/// Build stage (required)
pub build: Build,
/// Post-processing stage (optional)
#[serde(default)]
pub post: Post,
/// Installation behavior (optional)
#[serde(default)]
pub install: Install,
}
/// Package metadata
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Metadata {
pub name: String,
pub version: String,
pub description: String,
pub license: String,
#[serde(default)]
pub homepage: Option<String>,
#[serde(default)]
pub dependencies: Dependencies,
}
/// Dependencies specification
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct Dependencies {
#[serde(default)]
pub runtime: Vec<String>,
#[serde(default)]
pub build: Vec<String>,
}
/// Environment setup stage
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Environment {
/// Isolation level: none (0), standard (1), enhanced (2), hermetic (3)
#[serde(default = "default_isolation")]
pub isolation: IsolationLevel,
/// Apply optimized compiler flags
#[serde(default)]
pub defaults: bool,
/// Allow network during build
#[serde(default)]
pub network: bool,
/// Environment variables
#[serde(default)]
pub variables: HashMap<String, String>,
}
fn default_isolation() -> IsolationLevel {
IsolationLevel::Default
}
impl Default for Environment {
fn default() -> Self {
Self {
isolation: default_isolation(),
defaults: false,
network: false,
variables: HashMap::new(),
}
}
}
/// Source acquisition stage
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Source {
/// Source method (single source for backward compatibility)
#[serde(flatten)]
pub method: Option<SourceMethod>,
/// Multiple sources (new multi-source support)
#[serde(default)]
pub sources: Vec<NamedSource>,
/// Patches to apply after extraction
#[serde(default)]
pub patches: Vec<String>,
}
/// Named source with optional extract location
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NamedSource {
/// Source method
#[serde(flatten)]
pub method: SourceMethod,
/// Where to extract relative to build directory (optional)
#[serde(default)]
pub extract_to: Option<String>,
}
/// Source acquisition methods
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum SourceMethod {
Git { git: GitSource },
Fetch { fetch: FetchSource },
Local { local: LocalSource },
}
/// Git source specification
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GitSource {
pub url: String,
#[serde(rename = "ref")]
pub git_ref: String,
}
/// Fetch source specification
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FetchSource {
pub url: String,
#[serde(default)]
pub checksum: Option<Checksum>,
/// Where to extract relative to build directory (optional)
#[serde(default)]
pub extract_to: Option<String>,
}
/// Checksum specification
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Checksum {
#[serde(flatten)]
pub algorithm: ChecksumAlgorithm,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum ChecksumAlgorithm {
Blake3 { blake3: String },
Sha256 { sha256: String },
Md5 { md5: String },
}
/// Local source specification
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LocalSource {
pub path: String,
}
/// Build stage
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum Build {
/// Simple build system invocation
System {
system: BuildSystem,
#[serde(default)]
args: Vec<String>,
},
/// Complex build with custom steps
Steps { steps: Vec<ParsedStep> },
}
/// Supported build systems
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum BuildSystem {
Autotools,
Cmake,
Meson,
Cargo,
Make,
Go,
Python,
Nodejs,
}
/// Parsed build step from YAML recipe
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum ParsedStep {
// Simple command (splits by whitespace, no shell features)
Command { command: String },
// Shell command (passed to sh -c, supports pipes/redirects/etc)
Shell { shell: String },
Make { make: Vec<String> },
Configure { configure: Vec<String> },
Cmake { cmake: Vec<String> },
Meson { meson: Vec<String> },
Cargo { cargo: Vec<String> },
Go { go: Vec<String> },
Python { python: Vec<String> },
Nodejs { nodejs: Vec<String> },
}
/// Post-processing stage
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct Post {
/// Rpath patching strategy
#[serde(default)]
pub patch_rpaths: RpathPatchOption,
/// Fix executable permissions
#[serde(default)]
pub fix_permissions: PostOption,
/// QA pipeline override (auto, rust, c, go, python, skip)
#[serde(default)]
pub qa_pipeline: sps2_types::QaPipelineOverride,
/// Custom post-processing commands
#[serde(default)]
pub commands: Vec<PostCommand>,
}
/// Post-processing command
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum PostCommand {
/// Simple command string
Simple(String),
/// Shell command (passed to sh -c, supports pipes/redirects/etc)
Shell { shell: String },
}
/// Post-processing option (bool or list of paths)
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum PostOption {
Enabled(bool),
Paths(Vec<String>),
}
impl Default for PostOption {
fn default() -> Self {
PostOption::Enabled(false)
}
}
/// Rpath patching strategy
#[derive(Debug, Clone, Default, Serialize)]
#[serde(rename_all = "lowercase")]
pub enum RpathPatchOption {
/// Modern style: Keep @rpath references (relocatable binaries)
#[default]
Default,
/// Absolute style: Convert @rpath to absolute paths
Absolute,
/// Skip rpath patching entirely
Skip,
}
impl<'de> serde::Deserialize<'de> for RpathPatchOption {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
struct RpathPatchOptionVisitor;
impl serde::de::Visitor<'_> for RpathPatchOptionVisitor {
type Value = RpathPatchOption;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("an rpath patch option (default, absolute, or skip)")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
match value.trim().to_ascii_lowercase().as_str() {
"default" => Ok(RpathPatchOption::Default),
"absolute" => Ok(RpathPatchOption::Absolute),
"skip" => Ok(RpathPatchOption::Skip),
other => Err(serde::de::Error::unknown_variant(
other,
&["default", "absolute", "skip"],
)),
}
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
self.visit_str(&value)
}
}
deserializer.deserialize_any(RpathPatchOptionVisitor)
}
}
/// Installation behavior
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct Install {
/// Auto-install after building
#[serde(default)]
pub auto: bool,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_simple_recipe() {
let yaml = r"
metadata:
name: zlib
version: 1.3.1
description: General-purpose lossless data compression library
license: Zlib
source:
fetch:
url: https://github.com/madler/zlib/releases/download/v1.3.1/zlib-1.3.1.tar.gz
build:
system: cmake
args:
- -DCMAKE_BUILD_TYPE=Release
";
let recipe: YamlRecipe = serde_yaml2::from_str(yaml).unwrap();
assert_eq!(recipe.metadata.name, "zlib");
assert_eq!(recipe.metadata.version, "1.3.1");
}
#[test]
fn test_parse_complex_recipe() {
let yaml = r#"
metadata:
name: gcc
version: 15.1.0
description: GNU Compiler Collection
license: GPL-3.0-or-later
dependencies:
build:
- gmp
- mpfr
facts:
build_triple: aarch64-apple-darwin24
environment:
isolation: default
defaults: true
variables:
LDFLAGS: "-L${PREFIX}/lib"
source:
local:
path: ./src
patches:
- gcc-darwin.patch
build:
steps:
- command: mkdir -p build
- command: cd build && ../configure --build=${build_triple}
post:
fix_permissions: true
"#;
let recipe: YamlRecipe = serde_yaml2::from_str(yaml).unwrap();
assert_eq!(recipe.metadata.name, "gcc");
assert_eq!(
recipe.facts.get("build_triple").unwrap(),
"aarch64-apple-darwin24"
);
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/recipe/mod.rs | crates/builder/src/recipe/mod.rs | //! Recipe parsing and execution module
pub mod executor;
pub mod model;
pub mod parser;
// Re-export commonly used items
pub use executor::execute_recipe;
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/packaging/archive.rs | crates/builder/src/packaging/archive.rs | //! Deterministic TAR archive creation for reproducible builds
use sps2_errors::{BuildError, Error};
use std::path::{Path, PathBuf};
use tokio::fs::File;
/// Default deterministic timestamp (Unix epoch) for reproducible builds
const DETERMINISTIC_TIMESTAMP: u64 = 0;
/// Environment variable for `SOURCE_DATE_EPOCH` (standard for reproducible builds)
const SOURCE_DATE_EPOCH_VAR: &str = "SOURCE_DATE_EPOCH";
/// Create deterministic tar archive from directory using the tar crate
/// Ensures identical input produces identical compressed output for reproducible builds
/// Create a deterministic tar archive from a source directory
///
/// # Errors
///
/// Returns an error if file I/O operations fail or tar creation fails.
pub async fn create_deterministic_tar_archive(
source_dir: &Path,
tar_path: &Path,
) -> Result<(), Error> {
// Use the global deterministic timestamp
let deterministic_timestamp = get_deterministic_timestamp();
create_deterministic_tar_archive_with_timestamp(source_dir, tar_path, deterministic_timestamp)
.await
}
/// Create deterministic tar archive with explicit timestamp (for testing)
/// Ensures identical input produces identical compressed output for reproducible builds
pub async fn create_deterministic_tar_archive_with_timestamp(
source_dir: &Path,
tar_path: &Path,
timestamp: u64,
) -> Result<(), Error> {
use tar::Builder;
let file = File::create(tar_path).await?;
let file = file.into_std().await;
let source_dir = source_dir.to_path_buf(); // Clone to move into closure
// Create deterministic tar using the tar crate
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let mut tar_builder = Builder::new(file);
// Set deterministic behavior
tar_builder.follow_symlinks(false);
add_directory_to_tar_with_timestamp(&mut tar_builder, &source_dir, "".as_ref(), timestamp)?;
tar_builder.finish()?;
Ok(())
})
.await
.map_err(|e| BuildError::Failed {
message: format!("tar creation task failed: {e}"),
})??;
Ok(())
}
/// Recursively add directory contents to tar archive with deterministic ordering
/// This is the enhanced deterministic version with improved file ordering and metadata normalization
/// for reproducible builds
fn add_directory_to_tar_with_timestamp(
tar_builder: &mut tar::Builder<std::fs::File>,
dir_path: &Path,
tar_path: &Path,
deterministic_timestamp: u64,
) -> Result<(), Error> {
let mut entries = std::fs::read_dir(dir_path)?.collect::<Result<Vec<_>, _>>()?;
// Enhanced deterministic sorting for optimal compression:
// 1. Sort all entries lexicographically by filename (case-sensitive, locale-independent)
// 2. This ensures consistent ordering across different filesystems and locales
entries.sort_by(|a, b| {
// Use OS string comparison for consistent, locale-independent ordering
a.file_name().cmp(&b.file_name())
});
for entry in entries {
let file_path = entry.path();
let file_name = entry.file_name();
// Skip the package.tar file if it exists to avoid recursion
if file_name == "package.tar" {
continue;
}
// Construct tar entry path - avoid leading separators for root entries
let tar_entry_path = if tar_path.as_os_str().is_empty() {
PathBuf::from(&file_name)
} else {
tar_path.join(&file_name)
};
let metadata = entry.metadata()?;
if metadata.is_dir() {
// Add directory entry with fully normalized metadata
let mut header = tar::Header::new_gnu();
header.set_entry_type(tar::EntryType::Directory);
header.set_size(0);
header.set_mode(normalize_file_permissions(&metadata));
header.set_mtime(deterministic_timestamp);
header.set_uid(0); // Normalized ownership
header.set_gid(0); // Normalized ownership
header.set_username("root")?; // Consistent username
header.set_groupname("root")?; // Consistent group name
header.set_device_major(0)?; // Clear device numbers
header.set_device_minor(0)?; // Clear device numbers
header.set_cksum();
let tar_dir_path = format!("{}/", tar_entry_path.display());
tar_builder.append_data(&mut header, &tar_dir_path, std::io::empty())?;
// Recursively add directory contents
add_directory_to_tar_with_timestamp(
tar_builder,
&file_path,
&tar_entry_path,
deterministic_timestamp,
)?;
} else if metadata.is_file() {
// Add file entry with fully normalized metadata
let mut file = std::fs::File::open(&file_path)?;
let mut header = tar::Header::new_gnu();
header.set_entry_type(tar::EntryType::Regular);
header.set_size(metadata.len());
header.set_mode(normalize_file_permissions(&metadata));
header.set_mtime(deterministic_timestamp);
header.set_uid(0); // Normalized ownership
header.set_gid(0); // Normalized ownership
header.set_username("root")?; // Consistent username
header.set_groupname("root")?; // Consistent group name
header.set_device_major(0)?; // Clear device numbers
header.set_device_minor(0)?; // Clear device numbers
header.set_cksum();
tar_builder.append_data(
&mut header,
tar_entry_path.display().to_string(),
&mut file,
)?;
} else if metadata.is_symlink() {
// Handle symlinks deterministically
let target = std::fs::read_link(&file_path)?;
let mut header = tar::Header::new_gnu();
header.set_entry_type(tar::EntryType::Symlink);
header.set_size(0);
header.set_mode(0o777); // Standard symlink permissions
header.set_mtime(deterministic_timestamp);
header.set_uid(0); // Normalized ownership
header.set_gid(0); // Normalized ownership
header.set_username("root")?; // Consistent username
header.set_groupname("root")?; // Consistent group name
header.set_link_name(&target)?;
header.set_device_major(0)?; // Clear device numbers
header.set_device_minor(0)?; // Clear device numbers
header.set_cksum();
tar_builder.append_data(
&mut header,
tar_entry_path.display().to_string(),
std::io::empty(),
)?;
}
// Skip other special files (device nodes, fifos, etc.) for security and consistency
}
Ok(())
}
/// Get deterministic timestamp for reproducible builds
/// Uses `SOURCE_DATE_EPOCH` if set, otherwise uses epoch (0)
#[must_use]
pub fn get_deterministic_timestamp() -> u64 {
std::env::var(SOURCE_DATE_EPOCH_VAR)
.ok()
.and_then(|val| val.parse::<u64>().ok())
.unwrap_or(DETERMINISTIC_TIMESTAMP)
}
/// Normalize file permissions for deterministic output
/// Ensures consistent permissions across different filesystems and umask settings
fn normalize_file_permissions(metadata: &std::fs::Metadata) -> u32 {
use std::os::unix::fs::PermissionsExt;
let current_mode = metadata.permissions().mode();
if metadata.is_dir() {
0o755 // Directories: rwxr-xr-x
} else if metadata.is_file() {
// Files: check if any execute bit is set
if current_mode & 0o111 != 0 {
0o755 // Executable files: rwxr-xr-x
} else {
0o644 // Regular files: rw-r--r--
}
} else {
0o644 // Default for other file types
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/packaging/manifest.rs | crates/builder/src/packaging/manifest.rs | //! Package manifest and SBOM coordination
//! NOTE: SBOM generation is currently disabled by callers (soft-disabled).
// use crate::utils::events::send_event;
use crate::yaml::RecipeMetadata;
use crate::{BuildContext, BuildEnvironment};
// use sps2_errors::Error;
use sps2_types::Manifest;
// Create package manifest
#[must_use]
pub fn create_manifest(
context: &BuildContext,
runtime_deps: Vec<String>,
recipe_metadata: &RecipeMetadata,
environment: &BuildEnvironment,
) -> Manifest {
use sps2_types::{ManifestDependencies as Dependencies, ManifestPackageInfo as PackageInfo};
// Generate Python metadata if this is a Python package
let python_metadata = if environment.used_build_systems().contains("python") {
Some(create_python_metadata_from_env(environment))
} else {
None
};
Manifest {
format_version: sps2_types::PackageFormatVersion::CURRENT,
package: PackageInfo {
name: context.name.clone(),
version: context.version.to_string(),
revision: context.revision,
arch: context.arch.clone(),
description: recipe_metadata.description.clone(),
homepage: recipe_metadata.homepage.clone(),
license: recipe_metadata.license.clone(),
legacy_compression: None,
},
dependencies: Dependencies {
runtime: runtime_deps,
build: Vec::new(), // Build deps not included in final manifest
},
python: python_metadata,
}
}
/// Create Python metadata for builder-centric approach
fn create_python_metadata_from_env(
environment: &BuildEnvironment,
) -> sps2_types::PythonPackageMetadata {
use std::collections::HashMap;
// Extract metadata from build environment
let requires_python = environment
.get_extra_env("PYTHON_REQUIRES_VERSION")
.unwrap_or_else(|| ">=3.8".to_string());
let executables = environment
.get_extra_env("PYTHON_ENTRY_POINTS")
.and_then(|json_str| serde_json::from_str::<HashMap<String, String>>(&json_str).ok())
.unwrap_or_default();
// For builder-centric approach, wheel_file and requirements_file are not used
// since the builder has already installed the package to staging
sps2_types::PythonPackageMetadata {
requires_python,
wheel_file: String::new(), // Not used in builder-centric approach
requirements_file: String::new(), // Not used in builder-centric approach
executables,
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/packaging/mod.rs | crates/builder/src/packaging/mod.rs | //! Packaging module for archive, compression, manifest, and signing
//! SBOM support removed from packaging.
pub mod archive;
pub mod compression;
pub mod manifest;
pub mod signing;
use self::archive::create_deterministic_tar_archive;
use self::compression::compress_with_zstd;
use self::signing::PackageSigner;
use crate::utils::events::send_event;
use crate::utils::fileops::copy_directory_strip_live_prefix;
use crate::{BuildConfig, BuildContext, BuildEnvironment};
use sps2_errors::{BuildError, Error};
use sps2_events::{AppEvent, GeneralEvent};
use sps2_types::Manifest;
use sps2_types::PythonPackageMetadata;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use tokio::fs;
/// Create package archive and sign it
///
/// # Errors
///
/// Returns an error if:
/// - Package creation fails
/// - Package signing fails (when enabled)
pub async fn create_and_sign_package(
config: &BuildConfig,
context: &BuildContext,
environment: &BuildEnvironment,
manifest: Manifest,
) -> Result<PathBuf, Error> {
// Package the result
send_event(
context,
AppEvent::General(GeneralEvent::OperationStarted {
operation: "Creating package archive".to_string(),
}),
);
let package_path = create_package(config, context, environment, manifest).await?;
send_event(
context,
AppEvent::General(GeneralEvent::OperationCompleted {
operation: format!("Package created: {}", package_path.display()),
success: true,
}),
);
// Sign the package if configured
sign_package(config, context, &package_path).await?;
Ok(package_path)
}
/// Create the final package
///
/// # Errors
///
/// Returns an error if:
/// - Python package structure creation fails (for Python packages)
/// - Manifest serialization to TOML fails
/// - SP package archive creation fails
pub async fn create_package(
config: &BuildConfig,
context: &BuildContext,
environment: &BuildEnvironment,
mut manifest: Manifest,
) -> Result<PathBuf, Error> {
let package_path = context.output_path();
// Handle Python packages specially
if environment.is_python_package() {
let python_metadata = create_python_package_structure(environment, &manifest).await?;
manifest.python = Some(python_metadata);
}
// Create package using the real manifest data
let manifest_string = toml::to_string(&manifest).map_err(|e| BuildError::Failed {
message: format!("failed to serialize manifest: {e}"),
})?;
// Create proper .sp archive with manifest
create_sp_package(
config,
context,
environment.staging_dir(),
&package_path,
&manifest_string,
)
.await?;
Ok(package_path)
}
/// Create a .sp package archive with manifest and tar+zstd compression
///
/// # Errors
///
/// Returns an error if:
/// - Directory creation fails
/// - File I/O operations fail (writing manifest, copying SBOM files)
/// - Tar archive creation fails or times out
/// - Zstd compression fails
/// - Cleanup operations fail
pub async fn create_sp_package(
_config: &BuildConfig,
context: &BuildContext,
staging_dir: &Path,
output_path: &Path,
manifest_content: &str,
) -> Result<(), Error> {
// Create the directory structure for .sp package
let package_dir = staging_dir.parent().ok_or_else(|| BuildError::Failed {
message: "Invalid staging directory path".to_string(),
})?;
let package_temp_dir = package_dir.join("package_temp");
fs::create_dir_all(&package_temp_dir).await?;
// Step 1: Create manifest.toml in package root
let manifest_path = package_temp_dir.join("manifest.toml");
fs::write(&manifest_path, manifest_content).await?;
// SBOM files removed: no SBOM files are copied into the package
// Step 3: Copy staging directory contents as package files
send_event(
context,
AppEvent::General(GeneralEvent::OperationStarted {
operation: "Copying package files".to_string(),
}),
);
// Copy staging directory contents, stripping the opt/pm/live prefix
if staging_dir.exists() {
copy_directory_strip_live_prefix(staging_dir, &package_temp_dir).await?;
}
send_event(
context,
AppEvent::General(GeneralEvent::OperationCompleted {
operation: "Package files copied".to_string(),
success: true,
}),
);
// Step 4: Create deterministic tar archive
send_event(
context,
AppEvent::General(GeneralEvent::OperationStarted {
operation: "Creating tar archive".to_string(),
}),
);
// Debug: List contents before tar creation
send_event(
context,
AppEvent::General(GeneralEvent::debug(format!(
"Creating tar from: {}",
package_temp_dir.display()
))),
);
let tar_path = package_temp_dir.join("package.tar");
// Add timeout for tar creation to prevent hanging
let tar_result = tokio::time::timeout(
std::time::Duration::from_secs(30),
create_deterministic_tar_archive(&package_temp_dir, &tar_path),
)
.await;
match tar_result {
Ok(result) => result?,
Err(_) => {
return Err(BuildError::Failed {
message: "Tar archive creation timed out after 30 seconds".to_string(),
}
.into());
}
}
send_event(
context,
AppEvent::General(GeneralEvent::OperationCompleted {
operation: "Tar archive created".to_string(),
success: true,
}),
);
// Step 5: Compress with zstd at default level
send_event(
context,
AppEvent::General(GeneralEvent::OperationStarted {
operation: "Compressing package with zstd".to_string(),
}),
);
compress_with_zstd(&tar_path, output_path).await?;
send_event(
context,
AppEvent::General(GeneralEvent::OperationCompleted {
operation: "Package compression completed".to_string(),
success: true,
}),
);
// Step 6: Cleanup temporary files
fs::remove_dir_all(&package_temp_dir).await?;
Ok(())
}
/// Sign the package if signing is enabled
///
/// # Errors
///
/// Returns an error if:
/// - Package signing fails (when signing is enabled)
/// - Cryptographic operations fail during signing
pub async fn sign_package(
config: &BuildConfig,
context: &BuildContext,
package_path: &Path,
) -> Result<(), Error> {
if !config.packaging_settings().signing.enabled {
return Ok(());
}
send_event(
context,
AppEvent::General(GeneralEvent::OperationStarted {
operation: format!(
"Signing package {}",
package_path
.file_name()
.unwrap_or_default()
.to_string_lossy()
),
}),
);
let signer = PackageSigner::new(config.packaging_settings().signing.clone());
match signer.sign_package(package_path).await? {
Some(sig_path) => {
send_event(
context,
AppEvent::General(GeneralEvent::OperationCompleted {
operation: format!("Package signed: {}", sig_path.display()),
success: true,
}),
);
}
None => {
// Signing was disabled
send_event(
context,
AppEvent::General(GeneralEvent::OperationCompleted {
operation: "Package signing skipped (disabled)".to_string(),
success: true,
}),
);
}
}
Ok(())
}
/// Create Python package structure and metadata
///
/// # Errors
///
/// Returns an error if:
/// - Required Python environment variables are missing
/// - Directory creation fails
/// - File copying operations fail (wheel, lockfile)
/// - Path operations fail
async fn create_python_package_structure(
environment: &BuildEnvironment,
manifest: &Manifest,
) -> Result<PythonPackageMetadata, Error> {
// Get Python metadata from the build environment
let wheel_path = environment
.get_extra_env("PYTHON_WHEEL_PATH")
.ok_or_else(|| BuildError::Failed {
message: "Python wheel path not found in build environment".to_string(),
})?;
let lockfile_path = environment
.get_extra_env("PYTHON_LOCKFILE_PATH")
.ok_or_else(|| BuildError::Failed {
message: "Python lockfile path not found in build environment".to_string(),
})?;
let entry_points_json = environment
.get_extra_env("PYTHON_ENTRY_POINTS")
.unwrap_or_else(|| "{}".to_string());
let requires_python = environment
.get_extra_env("PYTHON_REQUIRES_VERSION")
.unwrap_or_else(|| ">=3.8".to_string());
// Parse entry points
let executables: HashMap<String, String> =
serde_json::from_str(&entry_points_json).unwrap_or_default();
// Create python/ subdirectory in staging
let python_dir = environment
.staging_dir()
.join(manifest.filename().replace(".sp", ""))
.join("python");
fs::create_dir_all(&python_dir).await?;
// Copy wheel file
let wheel_src = PathBuf::from(&wheel_path);
let wheel_filename = wheel_src.file_name().ok_or_else(|| BuildError::Failed {
message: "Invalid wheel path".to_string(),
})?;
let wheel_dst = python_dir.join(wheel_filename);
fs::copy(&wheel_src, &wheel_dst).await?;
// Copy lockfile
let lockfile_src = PathBuf::from(&lockfile_path);
let lockfile_dst = python_dir.join("requirements.lock.txt");
fs::copy(&lockfile_src, &lockfile_dst).await?;
// Create Python metadata
Ok(PythonPackageMetadata {
requires_python,
wheel_file: format!("python/{}", wheel_filename.to_string_lossy()),
requirements_file: "python/requirements.lock.txt".to_string(),
executables,
})
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/packaging/signing.rs | crates/builder/src/packaging/signing.rs | //! Package signing with Minisign
use minisign::{sign, KeyPair, PublicKey, SecretKey, SecretKeyBox, SignatureBox};
use sps2_config::builder::SigningSettings;
use sps2_errors::{BuildError, Error};
use std::io::Cursor;
use std::path::{Path, PathBuf};
use tokio::fs;
/// Package signer using Minisign
pub struct PackageSigner {
settings: SigningSettings,
}
impl PackageSigner {
/// Create new package signer
#[must_use]
pub fn new(settings: SigningSettings) -> Self {
Self { settings }
}
/// Sign a package file, creating a detached .minisig signature
///
/// # Errors
///
/// Returns an error if:
/// - The private key path is not configured or doesn't exist
/// - The package file doesn't exist
/// - Key decryption or signature creation fails
/// - Writing the signature file fails
pub async fn sign_package(&self, package_path: &Path) -> Result<Option<PathBuf>, Error> {
if !self.settings.enabled {
return Ok(None);
}
let private_key_path = PathBuf::from(self.settings.identity.as_ref().ok_or_else(|| {
BuildError::SigningError {
message: "No private key path configured".to_string(),
}
})?);
if !private_key_path.exists() {
return Err(BuildError::SigningError {
message: format!("Private key file not found: {}", private_key_path.display()),
}
.into());
}
if !package_path.exists() {
return Err(BuildError::SigningError {
message: format!("Package file not found: {}", package_path.display()),
}
.into());
}
// Read the private key
let key_data = fs::read(private_key_path)
.await
.map_err(|e| BuildError::SigningError {
message: format!("Failed to read private key: {e}"),
})?;
// Parse secret key from file
let sk_box_str = String::from_utf8(key_data).map_err(|e| BuildError::SigningError {
message: format!("Invalid UTF-8 in private key file: {e}"),
})?;
let sk_box =
SecretKeyBox::from_string(&sk_box_str).map_err(|e| BuildError::SigningError {
message: format!("Failed to parse private key: {e}"),
})?;
let secret_key = sk_box
.into_secret_key(
self.settings
.keychain_path
.as_deref()
.map(|p| p.to_string_lossy().to_string()),
)
.map_err(|e| BuildError::SigningError {
message: format!("Failed to decrypt private key: {e}"),
})?;
// Read the package file to sign
let package_data = fs::read(package_path)
.await
.map_err(|e| BuildError::SigningError {
message: format!("Failed to read package file: {e}"),
})?;
// Create signature
let trusted_comment = match self.settings.entitlements_file.as_deref() {
Some(path) => path.to_string_lossy(),
None => "sps2 package signature".into(),
};
let untrusted_comment = format!(
"signature from sps2 for {}",
package_path
.file_name()
.unwrap_or_default()
.to_string_lossy()
);
let package_reader = Cursor::new(&package_data);
let signature = sign(
None, // No additional public key validation
&secret_key,
package_reader,
Some(trusted_comment.as_ref()),
Some(&untrusted_comment),
)
.map_err(|e| BuildError::SigningError {
message: format!("Failed to create signature: {e}"),
})?;
// Write signature to .minisig file
let sig_path = package_path.with_extension("sp.minisig");
fs::write(&sig_path, signature.into_string())
.await
.map_err(|e| BuildError::SigningError {
message: format!("Failed to write signature file: {e}"),
})?;
Ok(Some(sig_path))
}
/// Verify a package signature (for testing)
///
/// # Errors
///
/// Returns an error if:
/// - The package or signature files cannot be read
/// - The signature cannot be parsed
pub async fn verify_package(
&self,
package_path: &Path,
public_key: &PublicKey,
) -> Result<bool, Error> {
let sig_path = package_path.with_extension("sp.minisig");
if !sig_path.exists() {
return Ok(false);
}
// Read package data and signature
let package_data = fs::read(package_path)
.await
.map_err(|e| BuildError::SigningError {
message: format!("Failed to read package file: {e}"),
})?;
let sig_data =
fs::read_to_string(&sig_path)
.await
.map_err(|e| BuildError::SigningError {
message: format!("Failed to read signature file: {e}"),
})?;
// Parse and verify signature
let signature_box =
SignatureBox::from_string(&sig_data).map_err(|e| BuildError::SigningError {
message: format!("Failed to parse signature: {e}"),
})?;
let package_reader = Cursor::new(&package_data);
let is_valid = minisign::verify(
public_key,
&signature_box,
package_reader,
true,
false,
false,
)
.is_ok();
Ok(is_valid)
}
/// Generate a new key pair for signing (development/testing only)
///
/// # Errors
///
/// Returns an error if key pair generation fails.
pub fn generate_keypair() -> Result<(SecretKey, PublicKey), Error> {
// Use unencrypted keypair for testing to avoid interactive prompts
let KeyPair { pk, sk } =
KeyPair::generate_unencrypted_keypair().map_err(|e| BuildError::SigningError {
message: format!("Failed to generate key pair: {e}"),
})?;
Ok((sk, pk))
}
/// Generate a new key pair with encryption for signing
///
/// # Errors
///
/// Returns an error if key pair generation fails.
pub fn generate_encrypted_keypair(
password: Option<&str>,
) -> Result<(SecretKeyBox, PublicKey), Error> {
let KeyPair { pk, sk } =
KeyPair::generate_unencrypted_keypair().map_err(|e| BuildError::SigningError {
message: format!("Failed to generate key pair: {e}"),
})?;
let sk_box = sk.to_box(password).map_err(|e| BuildError::SigningError {
message: format!("Failed to encrypt secret key: {e}"),
})?;
Ok((sk_box, pk))
}
/// Save a secret key to file
///
/// # Errors
///
/// Returns an error if key serialization or file writing fails.
pub async fn save_secret_key(
secret_key: &SecretKey,
path: &Path,
password: Option<&str>,
) -> Result<(), Error> {
let sk_box = secret_key
.to_box(password)
.map_err(|e| BuildError::SigningError {
message: format!("Failed to serialize secret key: {e}"),
})?;
fs::write(path, sk_box.to_string())
.await
.map_err(|e| BuildError::SigningError {
message: format!("Failed to write secret key: {e}"),
})?;
Ok(())
}
/// Save a secret key box to file
///
/// # Errors
///
/// Returns an error if file writing fails.
pub async fn save_secret_key_box(sk_box: &SecretKeyBox, path: &Path) -> Result<(), Error> {
fs::write(path, sk_box.to_string())
.await
.map_err(|e| BuildError::SigningError {
message: format!("Failed to write secret key: {e}"),
})?;
Ok(())
}
/// Save a public key to file
///
/// # Errors
///
/// Returns an error if key serialization or file writing fails.
pub async fn save_public_key(public_key: &PublicKey, path: &Path) -> Result<(), Error> {
let pk_box = public_key.to_box().map_err(|e| BuildError::SigningError {
message: format!("Failed to serialize public key: {e}"),
})?;
fs::write(path, pk_box.to_string())
.await
.map_err(|e| BuildError::SigningError {
message: format!("Failed to write public key: {e}"),
})?;
Ok(())
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/packaging/compression.rs | crates/builder/src/packaging/compression.rs | //! Zstandard compression for sps2 packages
//!
//! This module applies a fixed Zstandard compression level when creating
//! package archives, ensuring consistent output across builds.
use sps2_errors::Error;
use std::path::Path;
const DEFAULT_LEVEL: i32 = 9;
/// Compress tar archive with zstd using async-compression
/// Compress a tar file using Zstandard compression
///
/// # Errors
///
/// Returns an error if file I/O operations fail or compression fails.
pub async fn compress_with_zstd(tar_path: &Path, output_path: &Path) -> Result<(), Error> {
use async_compression::tokio::write::ZstdEncoder;
use async_compression::Level;
use tokio::fs::File;
use tokio::io::{AsyncWriteExt, BufReader};
let input_file = File::open(tar_path).await?;
let output_file = File::create(output_path).await?;
// Create zstd encoder with default compression level
let level = Level::Precise(DEFAULT_LEVEL);
let mut encoder = ZstdEncoder::with_quality(output_file, level);
// Copy tar file through zstd encoder
let mut reader = BufReader::new(input_file);
tokio::io::copy(&mut reader, &mut encoder).await?;
// Ensure all data is written
encoder.shutdown().await?;
Ok(())
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/stages/source.rs | crates/builder/src/stages/source.rs | //! Source stage types and operations
use serde::{Deserialize, Serialize};
/// Source operations that can be executed
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum SourceStep {
/// Clean the source directory
Cleanup,
/// Fetch file from URL
Fetch {
url: String,
extract_to: Option<String>,
},
/// Fetch with MD5 verification
FetchMd5 {
url: String,
md5: String,
extract_to: Option<String>,
},
/// Fetch with SHA256 verification
FetchSha256 {
url: String,
sha256: String,
extract_to: Option<String>,
},
/// Fetch with BLAKE3 verification
FetchBlake3 {
url: String,
blake3: String,
extract_to: Option<String>,
},
/// Extract downloaded archives
Extract { extract_to: Option<String> },
/// Clone from git
Git { url: String, ref_: String },
/// Copy local files
Copy { src_path: Option<String> },
/// Apply a patch
ApplyPatch { path: String },
}
// Note: ParsedSource is recipe::model::Source
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/stages/environment.rs | crates/builder/src/stages/environment.rs | //! Environment stage types and operations
use crate::environment::IsolationLevel;
use serde::{Deserialize, Serialize};
/// Environment setup operations
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum EnvironmentStep {
/// Set isolation level
SetIsolation { level: IsolationLevel },
/// Apply compiler defaults
WithDefaults,
/// Allow network access
AllowNetwork { enabled: bool },
/// Set environment variable
SetEnv { key: String, value: String },
}
// Note: ParsedEnvironment is recipe::model::Environment
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/stages/executors.rs | crates/builder/src/stages/executors.rs | //! Stage-specific execution functions
use crate::security::SecurityContext;
use crate::stages::{BuildCommand, EnvironmentStep, PostStep, SourceStep};
use crate::utils::events::send_event;
use crate::{BuildCommandResult, BuildContext, BuildEnvironment, BuilderApi};
use sps2_errors::Error;
use sps2_events::{AppEvent, GeneralEvent};
use std::path::Path;
use tokio::fs;
/// Check if a file is an archive that should be extracted
fn is_archive(path: &Path) -> bool {
if let Some(ext) = path.extension().and_then(|e| e.to_str()) {
matches!(ext, "gz" | "tgz" | "bz2" | "xz" | "zip")
} else {
// For files without extensions (like GitHub API downloads), check the file content
use std::fs::File;
use std::io::Read;
if let Ok(mut file) = File::open(path) {
let mut magic = [0u8; 4];
if file.read_exact(&mut magic).is_ok() {
// Check for gzip magic number (1f 8b)
if magic[0] == 0x1f && magic[1] == 0x8b {
return true;
}
// Check for ZIP magic number (50 4b)
if magic[0] == 0x50 && magic[1] == 0x4b {
return true;
}
// Check for bzip2 magic number (42 5a)
if magic[0] == 0x42 && magic[1] == 0x5a {
return true;
}
}
}
false
}
}
/// Execute a source step
pub async fn execute_source_step(
step: &SourceStep,
api: &mut BuilderApi,
environment: &mut BuildEnvironment,
) -> Result<(), Error> {
match step {
SourceStep::Cleanup => {
cleanup_directories(api, environment).await?;
}
SourceStep::Fetch { url, extract_to } => {
let download_path = api.fetch(url).await?;
// Extract immediately after download
if is_archive(&download_path) {
api.extract_single_download(&download_path, extract_to.as_deref())
.await?;
}
}
SourceStep::FetchMd5 {
url,
md5,
extract_to,
} => {
let download_path = api.fetch_md5(url, md5).await?;
// Extract immediately after download and verification
if is_archive(&download_path) {
api.extract_single_download(&download_path, extract_to.as_deref())
.await?;
}
}
SourceStep::FetchSha256 {
url,
sha256,
extract_to,
} => {
let download_path = api.fetch_sha256(url, sha256).await?;
// Extract immediately after download and verification
if is_archive(&download_path) {
api.extract_single_download(&download_path, extract_to.as_deref())
.await?;
}
}
SourceStep::FetchBlake3 {
url,
blake3,
extract_to,
} => {
let download_path = api.fetch_blake3(url, blake3).await?;
// Extract immediately after download and verification
if is_archive(&download_path) {
api.extract_single_download(&download_path, extract_to.as_deref())
.await?;
}
}
SourceStep::Extract { extract_to } => {
api.extract_downloads_to(extract_to.as_deref()).await?;
}
SourceStep::Git { url, ref_ } => {
api.git(url, ref_).await?;
}
SourceStep::Copy { src_path } => {
api.copy(src_path.as_deref(), &environment.context).await?;
}
SourceStep::ApplyPatch { path } => {
let patch_path = environment.build_prefix().join("src").join(path);
api.apply_patch(&patch_path, environment).await?;
}
}
Ok(())
}
/// Execute a build command
pub async fn execute_build_command(
command: &BuildCommand,
api: &mut BuilderApi,
environment: &mut BuildEnvironment,
) -> Result<(), Error> {
match command {
BuildCommand::Configure { args } => {
api.configure(args, environment).await?;
}
BuildCommand::Make { args } => {
api.make(args, environment).await?;
}
BuildCommand::Autotools { args } => {
api.autotools(args, environment).await?;
}
BuildCommand::Cmake { args } => {
api.cmake(args, environment).await?;
}
BuildCommand::Meson { args } => {
api.meson(args, environment).await?;
}
BuildCommand::Cargo { args } => {
api.cargo(args, environment).await?;
}
BuildCommand::Go { args } => {
api.go(args, environment).await?;
}
BuildCommand::Python { args } => {
api.python(args, environment).await?;
}
BuildCommand::NodeJs { args } => {
api.nodejs(args, environment).await?;
}
BuildCommand::Command { program, args } => {
execute_command(program, args, api, environment).await?;
}
}
Ok(())
}
/// Execute a build command with security context
pub async fn execute_build_command_with_security(
command: &BuildCommand,
api: &mut BuilderApi,
environment: &mut BuildEnvironment,
security_context: &mut SecurityContext,
sps2_config: Option<&sps2_config::Config>,
) -> Result<(), Error> {
match command {
BuildCommand::Command { program, args } => {
// For shell commands, validate through security context
if program == "sh" && args.len() >= 2 && args[0] == "-c" {
// This is a shell command
let shell_cmd = &args[1];
// Validate through security context
let execution = security_context.execute_command(shell_cmd)?;
// Additional config-based validation
if let Some(config) = sps2_config {
for token in &execution.parsed.tokens {
if let crate::validation::parser::Token::Command(cmd) = token {
if !config.is_command_allowed(cmd) {
return Err(sps2_errors::BuildError::DisallowedCommand {
command: cmd.clone(),
}
.into());
}
}
}
}
// Execute the validated command
execute_command(program, args, api, environment).await?;
} else {
// For direct commands, validate and execute
let full_cmd = format!("{} {}", program, args.join(" "));
security_context.execute_command(&full_cmd)?;
execute_command(program, args, api, environment).await?;
}
}
// For build system commands, pass through normally (they're already sandboxed)
_ => execute_build_command(command, api, environment).await?,
}
Ok(())
}
/// Execute a post-processing step
pub async fn execute_post_step(
step: &PostStep,
api: &mut BuilderApi,
environment: &mut BuildEnvironment,
) -> Result<(), Error> {
match step {
PostStep::PatchRpaths { style, paths } => {
api.patch_rpaths(*style, paths, environment).await?;
}
PostStep::FixPermissions { paths } => {
api.fix_permissions(paths, environment)?;
}
PostStep::Command { program, args } => {
execute_command(program, args, api, environment).await?;
}
}
Ok(())
}
/// Execute a post-processing step with security context
///
/// # Errors
///
/// Returns an error if:
/// - Security validation fails for command execution
/// - Command is disallowed by `sps2_config`
/// - Post-processing operation fails (patch rpaths, fix permissions, etc.)
/// - Command execution fails
pub async fn execute_post_step_with_security(
step: &PostStep,
api: &mut BuilderApi,
environment: &mut BuildEnvironment,
security_context: &mut SecurityContext,
sps2_config: Option<&sps2_config::Config>,
) -> Result<(), Error> {
match step {
PostStep::Command { program, args } => {
// Validate command through security context
if program == "sh" && args.len() >= 2 && args[0] == "-c" {
let shell_cmd = &args[1];
// Validate through security context
let execution = security_context.execute_command(shell_cmd)?;
// Additional config-based validation
if let Some(config) = sps2_config {
for token in &execution.parsed.tokens {
if let crate::validation::parser::Token::Command(cmd) = token {
if !config.is_command_allowed(cmd) {
return Err(sps2_errors::BuildError::DisallowedCommand {
command: cmd.clone(),
}
.into());
}
}
}
}
execute_command(program, args, api, environment).await?;
} else {
let full_cmd = format!("{} {}", program, args.join(" "));
security_context.execute_command(&full_cmd)?;
execute_command(program, args, api, environment).await?;
}
}
// Other post steps don't need security validation
_ => execute_post_step(step, api, environment).await?,
}
Ok(())
}
/// Execute an environment step
#[allow(dead_code)] // Public API for environment step execution
pub fn execute_environment_step(
step: &EnvironmentStep,
api: &mut BuilderApi,
environment: &mut BuildEnvironment,
) -> Result<(), Error> {
match step {
EnvironmentStep::SetIsolation { level } => {
api.set_isolation(*level);
}
EnvironmentStep::WithDefaults => {
environment.apply_default_compiler_flags();
}
EnvironmentStep::AllowNetwork { enabled } => {
let _ = api.allow_network(*enabled);
}
EnvironmentStep::SetEnv { key, value } => {
environment.set_env_var(key.clone(), value.clone())?;
}
}
Ok(())
}
/// Execute a generic command
async fn execute_command(
program: &str,
args: &[String],
api: &mut BuilderApi,
environment: &mut BuildEnvironment,
) -> Result<BuildCommandResult, Error> {
// Special handling for make commands
if program == "make" {
api.make(args, environment).await
} else {
let arg_strs: Vec<&str> = args.iter().map(String::as_str).collect();
environment
.execute_command(program, &arg_strs, Some(&api.working_dir))
.await
}
}
/// Clean up directories
async fn cleanup_directories(
api: &BuilderApi,
environment: &BuildEnvironment,
) -> Result<(), Error> {
let staging_dir = environment.staging_dir();
send_event(
&environment.context,
AppEvent::General(GeneralEvent::debug(format!(
"Cleaned staging directory: {}",
staging_dir.display()
))),
);
if staging_dir.exists() {
fs::remove_dir_all(&staging_dir).await?;
}
fs::create_dir_all(&staging_dir).await?;
let source_dir = &api.working_dir;
send_event(
&environment.context,
AppEvent::General(GeneralEvent::debug(format!(
"Cleaned source directory: {}",
source_dir.display()
))),
);
if source_dir.exists() {
fs::remove_dir_all(source_dir).await?;
}
fs::create_dir_all(source_dir).await?;
Ok(())
}
// Note: Use execute_build_commands_list_with_security instead for proper validation
/// Execute a list of build commands with security context
pub async fn execute_build_commands_list_with_security(
_context: &BuildContext,
build_commands: &[BuildCommand],
api: &mut BuilderApi,
environment: &mut BuildEnvironment,
security_context: &mut SecurityContext,
sps2_config: Option<&sps2_config::Config>,
) -> Result<(), Error> {
for command in build_commands {
execute_build_command_with_security(
command,
api,
environment,
security_context,
sps2_config,
)
.await?;
// Command completed - duration tracking removed as per architectural decision
}
Ok(())
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/stages/build.rs | crates/builder/src/stages/build.rs | //! Build stage types and operations
use serde::{Deserialize, Serialize};
/// Build commands that can be executed
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum BuildCommand {
/// Run configure script
Configure { args: Vec<String> },
/// Run make
Make { args: Vec<String> },
/// Run autotools build
Autotools { args: Vec<String> },
/// Run `CMake` build
Cmake { args: Vec<String> },
/// Run Meson build
Meson { args: Vec<String> },
/// Run Cargo build
Cargo { args: Vec<String> },
/// Run Go build
Go { args: Vec<String> },
/// Run Python build
Python { args: Vec<String> },
/// Run Node.js build
NodeJs { args: Vec<String> },
/// Run arbitrary command
Command { program: String, args: Vec<String> },
}
// Note: ParsedBuild is recipe::model::Build
// Note: ParsedStep is recipe::model::ParsedStep
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/stages/mod.rs | crates/builder/src/stages/mod.rs | //! Stage-specific types for the builder
//!
//! This module provides types for each stage of the build process,
//! maintaining a clear separation between parsing and execution.
pub mod build;
pub mod environment;
pub mod executors;
pub mod post;
pub mod source;
// Re-export execution types
pub use build::BuildCommand;
pub use environment::EnvironmentStep;
pub use post::PostStep;
pub use source::SourceStep;
// The executors are used internally by utils/executor.rs
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/stages/post.rs | crates/builder/src/stages/post.rs | //! Post-processing stage types and operations
use serde::{Deserialize, Serialize};
use sps2_types::RpathStyle;
/// Post-processing operations
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PostStep {
/// Patch rpaths in binaries
PatchRpaths {
style: RpathStyle,
paths: Vec<String>,
},
/// Fix executable permissions
FixPermissions { paths: Vec<String> },
/// Run arbitrary command in post stage
Command { program: String, args: Vec<String> },
}
// Note: ParsedPost is recipe::model::Post
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/utils/timeout.rs | crates/builder/src/utils/timeout.rs | //! Timeout utilities for build operations
use sps2_errors::{BuildError, Error};
use std::future::Future;
use std::time::Duration;
/// Execute a future with a timeout
pub async fn with_timeout<T, F>(
future: F,
timeout_seconds: u64,
package_name: &str,
) -> Result<T, Error>
where
F: Future<Output = Result<T, Error>>,
{
tokio::time::timeout(Duration::from_secs(timeout_seconds), future)
.await
.map_err(|_| -> Error {
BuildError::BuildTimeout {
package: package_name.to_string(),
timeout_seconds,
}
.into()
})?
}
/// Execute a future with an optional timeout
pub async fn with_optional_timeout<T, F>(
future: F,
timeout_seconds: Option<u64>,
package_name: &str,
) -> Result<T, Error>
where
F: Future<Output = Result<T, Error>>,
{
if let Some(timeout) = timeout_seconds {
with_timeout(future, timeout, package_name).await
} else {
future.await
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/utils/executor.rs | crates/builder/src/utils/executor.rs | //! Staged execution implementation for proper build ordering
use crate::build_plan::{BuildPlan, EnvironmentConfig};
use crate::environment::BuildEnvironment;
use crate::recipe::parser::parse_yaml_recipe;
use crate::security::SecurityContext;
use crate::stages::executors::{
execute_build_commands_list_with_security, execute_post_step_with_security, execute_source_step,
};
use crate::utils::events::send_event;
use crate::yaml::RecipeMetadata;
use crate::{BuildConfig, BuildContext, BuilderApi};
use sps2_errors::Error;
use sps2_events::{AppEvent, GeneralEvent};
use std::collections::HashMap;
use tokio::fs;
/// Execute a build using staged execution model
pub async fn execute_staged_build(
config: &BuildConfig,
context: &BuildContext,
environment: &mut BuildEnvironment,
) -> Result<
(
Vec<String>,
Vec<sps2_types::package::PackageSpec>,
RecipeMetadata,
bool,
sps2_types::QaPipelineOverride,
),
Error,
> {
// Stage 0: Parse and analyze recipe
let yaml_recipe = parse_yaml_recipe(&context.recipe_path).await?;
let build_plan = BuildPlan::from_yaml(
&yaml_recipe,
&context.recipe_path,
config.sps2_config.as_ref(),
)?;
send_event(
context,
AppEvent::General(GeneralEvent::debug("Recipe analysis completed")),
);
// Create security context for the build
let build_root = environment.build_prefix().to_path_buf();
let mut initial_vars = HashMap::new();
// Add build-specific variables
initial_vars.insert("NAME".to_string(), context.name.clone());
initial_vars.insert("VERSION".to_string(), context.version.to_string());
let mut security_context = SecurityContext::new(build_root, initial_vars);
// Stage 1: Apply environment configuration
apply_environment_config(context, environment, &build_plan.environment).await?;
// Stage 2: Execute source operations
execute_source_stage(config, context, environment, &build_plan).await?;
// Stage 3: Execute build operations (with security context)
execute_build_stage_with_security(
config,
context,
environment,
&build_plan,
&mut security_context,
)
.await?;
// Stage 4: Execute post-processing operations (with security context)
execute_post_stage_with_security(
config,
context,
environment,
&build_plan,
&mut security_context,
)
.await?;
// Extract dependencies
let runtime_deps = build_plan.metadata.runtime_deps.clone();
let build_deps: Vec<sps2_types::package::PackageSpec> = build_plan
.metadata
.build_deps
.iter()
.map(|dep| sps2_types::package::PackageSpec::parse(dep))
.collect::<Result<Vec<_>, _>>()?;
Ok((
runtime_deps,
build_deps,
build_plan.metadata,
build_plan.auto_install,
build_plan.qa_pipeline,
))
}
/// Apply environment configuration before any build steps
async fn apply_environment_config(
context: &BuildContext,
environment: &mut BuildEnvironment,
config: &EnvironmentConfig,
) -> Result<(), Error> {
send_event(
context,
AppEvent::General(GeneralEvent::debug("Configuring build environment")),
);
// Apply isolation level from recipe
if config.isolation != environment.isolation_level() {
send_event(
context,
AppEvent::General(GeneralEvent::debug(format!(
"Applying isolation level {} from recipe",
config.isolation
))),
);
environment.set_isolation_level_from_recipe(config.isolation);
environment
.apply_isolation_level(
config.isolation,
config.network,
context.event_sender.as_ref(),
)
.await?;
// Verify isolation (skip for None)
if config.isolation != crate::environment::IsolationLevel::None {
environment.verify_isolation()?;
}
}
// Apply compiler defaults if requested
if config.defaults {
send_event(
context,
AppEvent::General(GeneralEvent::debug("Applying compiler defaults")),
);
environment.apply_default_compiler_flags();
}
// Set environment variables
for (key, value) in &config.variables {
environment.set_env_var(key.clone(), value.clone())?;
}
send_event(
context,
AppEvent::General(GeneralEvent::debug("Environment configuration complete")),
);
Ok(())
}
/// Execute source acquisition stage
async fn execute_source_stage(
config: &BuildConfig,
context: &BuildContext,
environment: &mut BuildEnvironment,
build_plan: &BuildPlan,
) -> Result<(), Error> {
if build_plan.source_steps.is_empty() {
return Ok(());
}
send_event(
context,
AppEvent::General(GeneralEvent::debug("Acquiring sources")),
);
// Create working directory
let working_dir = environment.build_prefix().join("src");
fs::create_dir_all(&working_dir).await?;
// Create builder API
let mut api = BuilderApi::new(working_dir.clone(), config.resources.clone())?;
// Source stage always allows network for fetching
let _result = api.allow_network(true);
// Clean staging area first
send_event(
context,
AppEvent::General(GeneralEvent::debug("Cleaning staging area")),
);
// Cleanup is handled as the first source step
execute_source_step(&crate::stages::SourceStep::Cleanup, &mut api, environment).await?;
// Execute source steps
for step in &build_plan.source_steps {
execute_source_step(step, &mut api, environment).await?;
// Command completed - duration tracking removed as per architectural decision
}
send_event(
context,
AppEvent::General(GeneralEvent::debug("Source acquisition completed")),
);
Ok(())
}
/// Execute build stage with security context
async fn execute_build_stage_with_security(
config: &BuildConfig,
context: &BuildContext,
environment: &mut BuildEnvironment,
build_plan: &BuildPlan,
security_context: &mut SecurityContext,
) -> Result<(), Error> {
if build_plan.build_steps.is_empty() {
return Ok(());
}
send_event(
context,
AppEvent::General(GeneralEvent::debug("Building package")),
);
// Get working directory
let working_dir = environment.build_prefix().join("src");
// Update security context to reflect the actual working directory
security_context.set_current_dir(working_dir.clone());
// Create builder API
let mut api = BuilderApi::new(working_dir, config.resources.clone())?;
// Use network setting from YAML recipe's environment config
let _result = api.allow_network(build_plan.environment.network);
// Execute build steps with timeout and security context
crate::utils::timeout::with_optional_timeout(
execute_build_commands_list_with_security(
context,
&build_plan.build_steps,
&mut api,
environment,
security_context,
config.sps2_config.as_ref(),
),
config.max_build_time(),
&context.name,
)
.await?;
// Transfer build metadata from API to environment
for (key, value) in api.build_metadata() {
environment.set_build_metadata(key.clone(), value.clone());
}
send_event(
context,
AppEvent::General(GeneralEvent::debug("Package build completed")),
);
Ok(())
}
/// Execute post-processing stage with security context
async fn execute_post_stage_with_security(
config: &BuildConfig,
context: &BuildContext,
environment: &mut BuildEnvironment,
build_plan: &BuildPlan,
security_context: &mut SecurityContext,
) -> Result<(), Error> {
if build_plan.post_steps.is_empty() {
return Ok(());
}
send_event(
context,
AppEvent::General(GeneralEvent::debug("Post-processing pipeline")),
);
// Get working directory
let working_dir = environment.build_prefix().join("src");
// Update security context to reflect the actual working directory
security_context.set_current_dir(working_dir.clone());
// Create builder API
let mut api = BuilderApi::new(working_dir, config.resources.clone())?;
// Execute post-processing steps
for step in &build_plan.post_steps {
execute_post_step_with_security(
step,
&mut api,
environment,
security_context,
config.sps2_config.as_ref(),
)
.await?;
// Command completed - duration tracking removed as per architectural decision
}
send_event(
context,
AppEvent::General(GeneralEvent::debug("Post-processing completed")),
);
Ok(())
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/utils/mod.rs | crates/builder/src/utils/mod.rs | //! Utility modules for the builder crate
pub mod events;
pub mod executor;
pub mod fileops;
pub mod format;
pub mod timeout;
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/utils/format.rs | crates/builder/src/utils/format.rs | /// File size and formatting utilities
use sps2_errors::{BuildError, Error};
use std::path::Path;
use tokio::fs::File;
use tokio::io::AsyncReadExt;
/// Information about detected compression format
#[derive(Clone, Debug, PartialEq)]
pub struct CompressionFormatInfo {
/// Estimated total compressed size
pub compressed_size: u64,
}
/// zstd magic number (4 bytes): 0x28B52FFD
const ZSTD_MAGIC: [u8; 4] = [0x28, 0xB5, 0x2F, 0xFD];
/// Detect the compression format of a .sp package file
///
/// # Errors
///
/// Returns an error if:
/// - The file cannot be opened or read
/// - The file is not a valid zstd-compressed package
/// - I/O operations fail during scanning
pub async fn detect_compression_format(file_path: &Path) -> Result<CompressionFormatInfo, Error> {
let mut file = File::open(file_path).await?;
let file_size = file.metadata().await?.len();
// Read the first 4 bytes to verify this is a zstd file
let mut magic_bytes = [0u8; 4];
file.read_exact(&mut magic_bytes).await?;
if magic_bytes != ZSTD_MAGIC {
return Err(BuildError::Failed {
message: format!(
"Invalid package format: expected zstd magic bytes, got {magic_bytes:?}"
),
}
.into());
}
Ok(CompressionFormatInfo {
compressed_size: file_size,
})
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/utils/events.rs | crates/builder/src/utils/events.rs | //! Event emission utilities for build operations
use crate::BuildContext;
use sps2_events::{AppEvent, EventEmitter};
/// Send event if context has event sender
pub fn send_event(context: &BuildContext, event: AppEvent) {
context.emit(event);
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/utils/fileops.rs | crates/builder/src/utils/fileops.rs | //! File system operations for build processes
use sps2_errors::Error;
use std::path::Path;
use tokio::fs;
/// Recursively copy directory contents
pub fn copy_directory_recursive<'a>(
src: &'a Path,
dst: &'a Path,
) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<(), Error>> + Send + 'a>> {
Box::pin(async move {
fs::create_dir_all(dst).await?;
let mut entries = fs::read_dir(src).await?;
while let Some(entry) = entries.next_entry().await? {
let entry_path = entry.path();
let dst_path = dst.join(entry.file_name());
if entry_path.is_dir() {
copy_directory_recursive(&entry_path, &dst_path).await?;
} else {
fs::copy(&entry_path, &dst_path).await?;
}
}
Ok(())
})
}
/// Recursively copy directory contents while stripping the opt/pm/live prefix
pub fn copy_directory_strip_live_prefix<'a>(
src: &'a Path,
dst: &'a Path,
) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<(), Error>> + Send + 'a>> {
Box::pin(async move {
fs::create_dir_all(dst).await?;
// Look for opt/pm/live subdirectory in the staging directory
let live_prefix_path = src.join("opt").join("pm").join("live");
if live_prefix_path.exists() && live_prefix_path.is_dir() {
// Copy contents of opt/pm/live directly to dst, stripping the prefix
copy_directory_recursive(&live_prefix_path, dst).await?;
} else {
// Fallback: copy everything as-is if no opt/pm/live structure found
copy_directory_recursive(src, dst).await?;
}
Ok(())
})
}
/// Copy source files from recipe directory to working directory (excluding .star files)
pub async fn copy_source_files(
recipe_dir: &Path,
working_dir: &Path,
context: &crate::BuildContext,
) -> Result<(), Error> {
use crate::utils::events::send_event;
use sps2_events::{AppEvent, GeneralEvent};
send_event(
context,
AppEvent::General(GeneralEvent::debug("Cleaning up temporary files")),
);
let mut entries = fs::read_dir(recipe_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let entry_path = entry.path();
let file_name = entry.file_name();
let dest_path = working_dir.join(&file_name);
if entry_path.is_dir() {
// Recursively copy directories
copy_directory_recursive(&entry_path, &dest_path).await?;
send_event(
context,
AppEvent::General(GeneralEvent::debug(format!(
"Copied directory {} to {}",
file_name.to_string_lossy(),
dest_path.display()
))),
);
} else if entry_path.extension().is_none_or(|ext| ext != "star") {
// Copy files except .star files
fs::copy(&entry_path, &dest_path).await?;
send_event(
context,
AppEvent::General(GeneralEvent::debug(format!(
"Copied {} to {}",
file_name.to_string_lossy(),
dest_path.display()
))),
);
}
}
Ok(())
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/yaml/recipe.rs | crates/builder/src/yaml/recipe.rs | //! Recipe data structures
use serde::{Deserialize, Serialize};
use sps2_types::RpathStyle;
/// Recipe metadata collected from `metadata()` function
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct RecipeMetadata {
pub name: String,
pub version: String,
pub description: Option<String>,
pub homepage: Option<String>,
pub license: Option<String>,
pub runtime_deps: Vec<String>,
pub build_deps: Vec<String>,
}
/// A build step from the `build()` function
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum BuildStep {
Fetch {
url: String,
},
FetchMd5 {
url: String,
md5: String,
},
FetchSha256 {
url: String,
sha256: String,
},
FetchBlake3 {
url: String,
blake3: String,
},
Extract,
Git {
url: String,
ref_: String,
},
ApplyPatch {
path: String,
},
AllowNetwork {
enabled: bool,
},
Configure {
args: Vec<String>,
},
Make {
args: Vec<String>,
},
Autotools {
args: Vec<String>,
},
Cmake {
args: Vec<String>,
},
Meson {
args: Vec<String>,
},
Cargo {
args: Vec<String>,
},
Go {
args: Vec<String>,
},
Python {
args: Vec<String>,
},
NodeJs {
args: Vec<String>,
},
Command {
program: String,
args: Vec<String>,
},
SetEnv {
key: String,
value: String,
},
WithDefaults,
Install,
// Cleanup staging directory
Cleanup,
// Copy source files
Copy {
src_path: Option<String>,
},
// Apply rpath patching to binaries and libraries
PatchRpaths {
style: RpathStyle,
paths: Vec<String>,
},
// Fix executable permissions on binaries
FixPermissions {
paths: Vec<String>,
},
// Set build isolation level
SetIsolation {
level: u8,
},
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/yaml/mod.rs | crates/builder/src/yaml/mod.rs | //! YAML recipe handling
//!
//! This module provides YAML-based recipe format for build recipes,
//! using a declarative, staged approach for package building.
mod recipe;
pub use recipe::{BuildStep, RecipeMetadata};
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/environment/variables.rs | crates/builder/src/environment/variables.rs | //! Environment variable setup and isolation
use super::core::BuildEnvironment;
use std::collections::HashMap;
impl BuildEnvironment {
/// Get a summary of the build environment for debugging
#[must_use]
pub fn environment_summary(&self) -> HashMap<String, String> {
let mut summary = HashMap::new();
summary.insert(
"build_prefix".to_string(),
self.build_prefix.display().to_string(),
);
summary.insert(
"staging_dir".to_string(),
self.staging_dir.display().to_string(),
);
summary.insert("package_name".to_string(), self.context.name.clone());
summary.insert(
"package_version".to_string(),
self.context.version.to_string(),
);
// Add key environment variables
for key in &[
"PATH",
"PKG_CONFIG_PATH",
"CMAKE_PREFIX_PATH",
"CFLAGS",
"LDFLAGS",
] {
if let Some(value) = self.env_vars.get(*key) {
summary.insert((*key).to_string(), value.clone());
}
}
summary
}
/// Setup base environment variables for isolated build
pub(crate) fn setup_environment(&mut self) {
// Clear potentially harmful environment variables for clean build
self.setup_clean_environment();
// Add staging dir to environment (standard autotools DESTDIR)
self.env_vars.insert(
"DESTDIR".to_string(),
self.staging_dir.display().to_string(),
);
// Set build prefix to final installation location (not staging dir)
self.env_vars.insert(
"PREFIX".to_string(),
sps2_config::fixed_paths::LIVE_DIR.to_string(),
);
// Set BUILD_PREFIX to package-specific prefix (e.g., /hello-1.0.0)
// This is used for staging directory structure, not for build system --prefix arguments
// Build systems now use LIVE_PREFIX for --prefix arguments
self.env_vars.insert(
"BUILD_PREFIX".to_string(),
format!("/{}-{}", self.context.name, self.context.version),
);
// Number of parallel jobs
self.env_vars
.insert("JOBS".to_string(), Self::cpu_count().to_string());
self.env_vars
.insert("MAKEFLAGS".to_string(), format!("-j{}", Self::cpu_count()));
// Compiler flags pointing to /opt/pm/live where dependencies are installed
let live_include = &format!("{}/include", sps2_config::fixed_paths::LIVE_DIR);
let live_lib = &format!("{}/lib", sps2_config::fixed_paths::LIVE_DIR);
self.env_vars
.insert("CFLAGS".to_string(), format!("-I{live_include}"));
self.env_vars
.insert("CPPFLAGS".to_string(), format!("-I{live_include}"));
// Base LDFLAGS with headerpad for macOS
let mut ldflags = format!("-L{live_lib}");
if cfg!(target_os = "macos") {
ldflags.push_str(" -headerpad_max_install_names");
}
self.env_vars.insert("LDFLAGS".to_string(), ldflags);
// Prevent system library contamination
// LIBRARY_PATH is used by compiler/linker at build time
self.env_vars
.insert("LIBRARY_PATH".to_string(), live_lib.to_string());
// Note: We don't set LD_LIBRARY_PATH or DYLD_LIBRARY_PATH as they're
// considered dangerous for isolation and are runtime variables, not build-time
// macOS specific settings - targeting Apple Silicon Macs (macOS 12.0+)
self.env_vars
.insert("MACOSX_DEPLOYMENT_TARGET".to_string(), "12.0".to_string());
}
/// Setup a clean environment by removing potentially harmful variables
fn setup_clean_environment(&mut self) {
// Keep only essential environment variables
let essential_vars = vec![
"PATH", "HOME", "USER", "SHELL", "TERM", "LANG", "LC_ALL", "TMPDIR", "TMP", "TEMP",
];
// Start with a minimal PATH containing only system essentials
// Then add /opt/pm/live/bin for sps2-installed tools
let path_components = [
"/usr/bin",
"/bin",
"/usr/sbin",
"/sbin",
sps2_config::fixed_paths::BIN_DIR,
];
self.env_vars
.insert("PATH".to_string(), path_components.join(":"));
// Copy essential variables from host environment (except PATH)
for var in essential_vars {
if var != "PATH" {
if let Ok(value) = std::env::var(var) {
self.env_vars.insert(var.to_string(), value);
}
}
}
// Clear potentially problematic variables
self.env_vars.remove("CFLAGS");
self.env_vars.remove("CPPFLAGS");
self.env_vars.remove("LDFLAGS");
self.env_vars.remove("PKG_CONFIG_PATH");
self.env_vars.remove("LIBRARY_PATH");
self.env_vars.remove("LD_LIBRARY_PATH");
self.env_vars.remove("DYLD_LIBRARY_PATH");
self.env_vars.remove("CMAKE_PREFIX_PATH");
self.env_vars.remove("ACLOCAL_PATH");
}
/// Setup environment for build dependencies
pub(crate) fn setup_build_deps_environment(&mut self) {
// Since dependencies are installed in /opt/pm/live, we just need to ensure
// the paths are set correctly. PATH already includes /opt/pm/live/bin
// PKG_CONFIG_PATH for dependency discovery
self.env_vars.insert(
"PKG_CONFIG_PATH".to_string(),
format!("{}/lib/pkgconfig", sps2_config::fixed_paths::LIVE_DIR),
);
// CMAKE_PREFIX_PATH for CMake-based builds
self.env_vars.insert(
"CMAKE_PREFIX_PATH".to_string(),
sps2_config::fixed_paths::LIVE_DIR.to_string(),
);
// Autotools-specific paths
self.env_vars.insert(
"ACLOCAL_PATH".to_string(),
format!("{}/share/aclocal", sps2_config::fixed_paths::LIVE_DIR),
);
// CFLAGS, LDFLAGS, and LIBRARY_PATH are already set to /opt/pm/live in setup_environment()
}
/// Apply default compiler flags for optimization and security
///
/// This method sets recommended compiler flags for macOS ARM64 builds.
/// It preserves existing flags while adding optimizations.
/// Does NOT modify dependency paths - those are handled separately.
pub fn apply_default_compiler_flags(&mut self) {
// Mark that with_defaults() was called
self.with_defaults_called = true;
// Detect target architecture
let arch = std::env::consts::ARCH;
let is_arm64 = arch == "aarch64";
let is_macos = cfg!(target_os = "macos");
// Base C/C++ optimization flags
let mut base_cflags = vec![
"-O2", // Standard optimization level
"-pipe", // Use pipes instead of temp files
"-fstack-protector-strong", // Stack protection for security
];
// Architecture-specific optimizations for Apple Silicon
if is_arm64 && is_macos {
// Use apple-m1 as a baseline for all Apple Silicon
// This is compatible with M1, M2, M3, and newer
base_cflags.extend(&[
"-mcpu=apple-m1", // Target Apple Silicon baseline
"-mtune=native", // Tune for the build machine
]);
}
// Merge C flags with existing ones
self.merge_compiler_flags("CFLAGS", &base_cflags);
self.merge_compiler_flags("CXXFLAGS", &base_cflags);
// Linker flags for macOS
if is_macos {
let linker_flags = vec![
"-Wl,-dead_strip", // Remove unused code
"-headerpad_max_install_names", // Reserve space for install name changes
];
self.merge_compiler_flags("LDFLAGS", &linker_flags);
}
// Rust-specific optimizations
if is_arm64 && is_macos {
// Set RUSTFLAGS for cargo builds
let rust_flags = ["-C", "target-cpu=apple-m1", "-C", "opt-level=2"];
let rust_flags_str = rust_flags.join(" ");
if let Some(existing) = self.env_vars.get("RUSTFLAGS") {
if existing.is_empty() {
self.env_vars
.insert("RUSTFLAGS".to_string(), rust_flags_str);
} else {
self.env_vars.insert(
"RUSTFLAGS".to_string(),
format!("{rust_flags_str} {existing}"),
);
}
} else {
self.env_vars
.insert("RUSTFLAGS".to_string(), rust_flags_str);
}
}
// Go-specific optimizations
if is_arm64 && is_macos {
// CGO flags inherit from CFLAGS/LDFLAGS automatically
// but we can set explicit Go flags
self.env_vars
.insert("GOFLAGS".to_string(), "-buildmode=pie".to_string());
}
// Python-specific architecture flag
if is_arm64 && is_macos {
self.env_vars
.insert("ARCHFLAGS".to_string(), "-arch arm64".to_string());
}
// CMake-specific variables (will be picked up by CMake build system)
if is_arm64 && is_macos {
self.env_vars
.insert("CMAKE_OSX_ARCHITECTURES".to_string(), "arm64".to_string());
}
// Note: CMAKE_INSTALL_NAME_DIR is now handled by the CMake build system
// as a command-line argument when with_defaults() is used
}
/// Helper to merge compiler flags without duplicating
fn merge_compiler_flags(&mut self, var_name: &str, new_flags: &[&str]) {
let existing = self.env_vars.get(var_name).cloned().unwrap_or_default();
// Convert new flags to string
let new_flags_str = new_flags.join(" ");
// Merge with existing flags
let merged = if existing.is_empty() {
new_flags_str
} else {
// Prepend optimization flags so user flags can override
format!("{new_flags_str} {existing}")
};
self.env_vars.insert(var_name.to_string(), merged);
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/environment/core.rs | crates/builder/src/environment/core.rs | //! Core `BuildEnvironment` struct and construction
use crate::BuildContext;
use sps2_errors::Error;
use sps2_events::{AppEvent, EventEmitter, EventSender, GeneralEvent};
use sps2_install::Installer;
use sps2_net::NetClient;
use sps2_resolver::Resolver;
use sps2_store::PackageStore;
use sps2_types::Version;
use std::collections::{HashMap, HashSet};
use std::path::{Path, PathBuf};
/// Live prefix where packages are installed at runtime
pub const LIVE_PREFIX: &str = sps2_config::fixed_paths::LIVE_DIR;
/// Build environment for isolated package building
#[derive(Clone, Debug)]
pub struct BuildEnvironment {
/// Build context
pub(crate) context: BuildContext,
/// Build prefix directory
pub(crate) build_prefix: PathBuf,
/// Staging directory for installation
pub(crate) staging_dir: PathBuf,
/// Environment variables
pub(crate) env_vars: HashMap<String, String>,
/// Build metadata from build systems (e.g., Python wheel path)
pub(crate) build_metadata: HashMap<String, String>,
/// Resolver for dependencies
pub(crate) resolver: Option<Resolver>,
/// Package store for build dependencies
pub(crate) store: Option<PackageStore>,
/// Installer for build dependencies
pub(crate) installer: Option<Installer>,
/// Network client for downloads
pub(crate) net: Option<NetClient>,
/// Whether `with_defaults()` was called (for optimized builds)
pub(crate) with_defaults_called: bool,
/// Build systems used during the build process
pub(crate) used_build_systems: HashSet<String>,
/// Fix permissions requests (None if not requested, Some(paths) if requested)
pub(crate) fix_permissions_request: Option<Vec<String>>,
/// Current isolation level
pub(crate) isolation_level: crate::environment::IsolationLevel,
}
impl EventEmitter for BuildEnvironment {
fn event_sender(&self) -> Option<&EventSender> {
self.context.event_sender()
}
}
impl BuildEnvironment {
/// Create new build environment
///
/// # Errors
///
/// Returns an error if the build environment cannot be initialized.
pub fn new(context: BuildContext, build_root: &Path) -> Result<Self, Error> {
let build_prefix = Self::get_build_prefix_path(build_root, &context.name, &context.version);
let staging_dir = build_prefix.join("stage");
let mut env_vars = HashMap::new();
env_vars.insert(
"PREFIX".to_string(),
sps2_config::fixed_paths::LIVE_DIR.to_string(),
);
env_vars.insert("DESTDIR".to_string(), staging_dir.display().to_string());
env_vars.insert("JOBS".to_string(), Self::cpu_count().to_string());
Ok(Self {
context,
build_prefix,
staging_dir,
env_vars,
build_metadata: HashMap::new(),
resolver: None,
store: None,
installer: None,
net: None,
with_defaults_called: false,
used_build_systems: HashSet::new(),
fix_permissions_request: None,
isolation_level: crate::environment::IsolationLevel::default(),
})
}
/// Set resolver for dependency management
#[must_use]
pub fn with_resolver(mut self, resolver: Resolver) -> Self {
self.resolver = Some(resolver);
self
}
/// Set package store for build dependencies
#[must_use]
pub fn with_store(mut self, store: PackageStore) -> Self {
self.store = Some(store);
self
}
/// Set installer for build dependencies
#[must_use]
pub fn with_installer(mut self, installer: Installer) -> Self {
self.installer = Some(installer);
self
}
/// Set network client for downloads
#[must_use]
pub fn with_net(mut self, net: NetClient) -> Self {
self.net = Some(net);
self
}
/// Get staging directory
#[must_use]
pub fn staging_dir(&self) -> &Path {
&self.staging_dir
}
/// Set staging directory
pub fn set_staging_dir(&mut self, path: PathBuf) {
self.staging_dir = path;
}
/// Get build context
#[must_use]
pub fn context(&self) -> &BuildContext {
&self.context
}
/// Get build prefix
#[must_use]
pub fn build_prefix(&self) -> &Path {
&self.build_prefix
}
/// Get `BUILD_PREFIX` environment variable value (package-specific prefix)
#[must_use]
pub fn get_build_prefix(&self) -> String {
format!("/{}-{}", self.context.name, self.context.version)
}
/// Get the live prefix where packages are installed at runtime
#[must_use]
pub fn get_live_prefix(&self) -> &'static str {
LIVE_PREFIX
}
/// Get environment variables
#[must_use]
pub fn env_vars(&self) -> &HashMap<String, String> {
&self.env_vars
}
/// Set environment variable
///
/// # Errors
///
/// Currently infallible, but returns Result for future compatibility.
pub fn set_env_var(&mut self, key: String, value: String) -> Result<(), Error> {
self.env_vars.insert(key, value);
Ok(())
}
/// Get the package path from the build context
#[must_use]
pub fn package_path(&self) -> Option<&Path> {
self.context.package_path.as_deref()
}
/// Get the output path where the package will be created
#[must_use]
pub fn package_output_path(&self) -> PathBuf {
self.context.output_path()
}
/// Check if this is a Python package based on build metadata
#[must_use]
pub fn is_python_package(&self) -> bool {
self.build_metadata.contains_key("PYTHON_WHEEL_PATH")
|| self.build_metadata.contains_key("PYTHON_BUILD_BACKEND")
}
/// Get extra environment variable (checks `build_metadata` first, then `env_vars`)
#[must_use]
pub fn get_extra_env(&self, key: &str) -> Option<String> {
self.build_metadata
.get(key)
.cloned()
.or_else(|| self.env_vars.get(key).cloned())
}
/// Set build metadata
pub fn set_build_metadata(&mut self, key: String, value: String) {
self.build_metadata.insert(key, value);
}
/// Get all build metadata
#[must_use]
pub fn build_metadata(&self) -> &HashMap<String, String> {
&self.build_metadata
}
/// Record that a build system was used during the build
pub fn record_build_system(&mut self, build_system: &str) {
self.used_build_systems.insert(build_system.to_string());
}
/// Get all build systems used during the build
#[must_use]
pub fn used_build_systems(&self) -> &HashSet<String> {
&self.used_build_systems
}
/// Get package name
#[must_use]
pub fn package_name(&self) -> &str {
&self.context.name
}
/// Record that `fix_permissions` was requested
pub fn record_fix_permissions_request(&mut self, paths: Vec<String>) {
// If already requested, merge the paths
if let Some(existing_paths) = &mut self.fix_permissions_request {
existing_paths.extend(paths);
} else {
self.fix_permissions_request = Some(paths);
}
}
/// Set isolation level from recipe
pub fn set_isolation_level_from_recipe(&mut self, level: crate::environment::IsolationLevel) {
self.isolation_level = level;
}
/// Get current isolation level
#[must_use]
pub fn isolation_level(&self) -> crate::environment::IsolationLevel {
self.isolation_level
}
/// Get build prefix path for package
#[must_use]
pub(crate) fn get_build_prefix_path(
build_root: &Path,
name: &str,
version: &Version,
) -> PathBuf {
build_root.join(name).join(version.to_string())
}
/// Get CPU count for parallel builds
#[must_use]
pub(crate) fn cpu_count() -> usize {
// Use 75% of available cores as per specification
let cores = num_cpus::get();
let target = cores.saturating_mul(3).saturating_add(3) / 4; // 75% using integer arithmetic
std::cmp::max(1, target)
}
/// Apply isolation level to the environment
///
/// # Errors
///
/// Returns an error if:
/// - Failed to apply network isolation settings
/// - Failed to configure hermetic environment
/// - Environment configuration is invalid
pub async fn apply_isolation_level(
&mut self,
level: crate::environment::IsolationLevel,
allow_network: bool,
event_sender: Option<&sps2_events::EventSender>,
) -> Result<(), Error> {
use super::hermetic::{self, HermeticConfig};
use crate::environment::IsolationLevel;
// Update current isolation level
self.isolation_level = level;
match level {
IsolationLevel::None => {
// No isolation - warn the user
if let Some(sender) = event_sender {
sender.emit(AppEvent::General(GeneralEvent::warning(
"[WARNING] BUILD ISOLATION DISABLED! This may lead to non-reproducible builds and potential security risks."
)));
}
// Use host environment as-is
self.env_vars = std::env::vars().collect();
// But still set critical build variables
self.env_vars.insert(
"DESTDIR".to_string(),
self.staging_dir().display().to_string(),
);
self.env_vars.insert(
"PREFIX".to_string(),
sps2_config::fixed_paths::LIVE_DIR.to_string(),
);
self.env_vars
.insert("JOBS".to_string(), Self::cpu_count().to_string());
self.env_vars
.insert("MAKEFLAGS".to_string(), format!("-j{}", Self::cpu_count()));
}
IsolationLevel::Default => {
// This is already done in initialize()
// Just ensure network settings are applied
if !allow_network && self.env_vars.contains_key("http_proxy") {
self.env_vars.remove("http_proxy");
self.env_vars.remove("https_proxy");
self.env_vars.remove("ftp_proxy");
self.env_vars.remove("all_proxy");
self.env_vars.remove("HTTP_PROXY");
self.env_vars.remove("HTTPS_PROXY");
self.env_vars.remove("FTP_PROXY");
self.env_vars.remove("ALL_PROXY");
}
}
IsolationLevel::Enhanced => {
// Standard + private HOME/TMPDIR
// First ensure standard isolation is applied
// (already done in initialize())
// Setup private HOME
let private_home = hermetic::setup_private_home(&self.build_prefix).await?;
self.env_vars
.insert("HOME".to_string(), private_home.display().to_string());
// Setup private TMPDIR
let private_tmp = hermetic::setup_private_tmpdir(&self.build_prefix).await?;
self.env_vars
.insert("TMPDIR".to_string(), private_tmp.display().to_string());
self.env_vars
.insert("TEMP".to_string(), private_tmp.display().to_string());
self.env_vars
.insert("TMP".to_string(), private_tmp.display().to_string());
// Apply network restrictions if needed
if !allow_network {
self.apply_network_isolation_vars();
}
}
IsolationLevel::Hermetic => {
// Full hermetic isolation
let config = HermeticConfig {
allow_network,
..Default::default()
};
self.apply_hermetic_isolation(&config, event_sender).await?;
}
}
Ok(())
}
/// Apply network isolation environment variables
fn apply_network_isolation_vars(&mut self) {
// Set proxy variables to invalid address to block network access
self.env_vars
.insert("http_proxy".to_string(), "http://127.0.0.1:1".to_string());
self.env_vars
.insert("https_proxy".to_string(), "http://127.0.0.1:1".to_string());
self.env_vars
.insert("ftp_proxy".to_string(), "http://127.0.0.1:1".to_string());
self.env_vars
.insert("all_proxy".to_string(), "http://127.0.0.1:1".to_string());
self.env_vars
.insert("HTTP_PROXY".to_string(), "http://127.0.0.1:1".to_string());
self.env_vars
.insert("HTTPS_PROXY".to_string(), "http://127.0.0.1:1".to_string());
self.env_vars
.insert("FTP_PROXY".to_string(), "http://127.0.0.1:1".to_string());
self.env_vars
.insert("ALL_PROXY".to_string(), "http://127.0.0.1:1".to_string());
self.env_vars.insert("no_proxy".to_string(), String::new());
self.env_vars.insert("NO_PROXY".to_string(), String::new());
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/environment/hermetic.rs | crates/builder/src/environment/hermetic.rs | // Crate-level pedantic settings apply
#![allow(clippy::module_name_repetitions)]
//! Hermetic build environment isolation
//!
//! This module provides comprehensive hermetic isolation features for build environments,
//! ensuring builds are reproducible and isolated from the host system.
use super::core::BuildEnvironment;
use sps2_errors::{BuildError, Error};
use sps2_events::{AppEvent, EventEmitter, EventSender, GeneralEvent};
use std::collections::{HashMap, HashSet};
use std::os::unix::fs::PermissionsExt;
use std::path::{Path, PathBuf};
use tokio::fs;
/// Hermetic isolation configuration
#[derive(Debug, Clone)]
pub struct HermeticConfig {
/// Environment variables to preserve (whitelist)
pub allowed_env_vars: HashSet<String>,
/// Paths that should be accessible read-only
pub allowed_read_paths: Vec<PathBuf>,
/// Paths that should be accessible read-write
pub allowed_write_paths: Vec<PathBuf>,
/// Whether to allow network access
pub allow_network: bool,
/// Temporary home directory path
pub temp_home: Option<PathBuf>,
/// Whether to create minimal device nodes
pub create_devices: bool,
}
impl Default for HermeticConfig {
fn default() -> Self {
let mut allowed_env_vars = HashSet::new();
// Minimal set of environment variables needed for builds
allowed_env_vars.insert("PATH".to_string());
allowed_env_vars.insert("HOME".to_string());
allowed_env_vars.insert("TMPDIR".to_string());
allowed_env_vars.insert("TEMP".to_string());
allowed_env_vars.insert("TMP".to_string());
allowed_env_vars.insert("USER".to_string());
allowed_env_vars.insert("SHELL".to_string());
allowed_env_vars.insert("TERM".to_string());
Self {
allowed_env_vars,
allowed_read_paths: vec![
PathBuf::from("/usr/bin"),
PathBuf::from("/usr/lib"),
PathBuf::from("/usr/include"),
PathBuf::from("/System"), // macOS system libraries
PathBuf::from("/Library/Developer/CommandLineTools"), // Xcode tools
],
allowed_write_paths: vec![],
allow_network: false,
temp_home: None,
create_devices: false, // Not typically needed on macOS
}
}
}
impl BuildEnvironment {
/// Apply hermetic isolation to the build environment
///
/// # Errors
///
/// Returns an error if isolation setup fails.
pub async fn apply_hermetic_isolation(
&mut self,
config: &HermeticConfig,
event_sender: Option<&EventSender>,
) -> Result<(), Error> {
// Send event for isolation start
if let Some(sender) = event_sender {
sender.emit(AppEvent::General(GeneralEvent::debug("Build step started")));
}
// Clear environment variables
self.clear_environment_vars(config);
// Setup temporary home directory
let temp_home = self.setup_temp_home(config).await?;
self.env_vars
.insert("HOME".to_string(), temp_home.display().to_string());
// Setup private temporary directory
let private_tmp = self.setup_private_tmp().await?;
self.env_vars
.insert("TMPDIR".to_string(), private_tmp.display().to_string());
self.env_vars
.insert("TEMP".to_string(), private_tmp.display().to_string());
self.env_vars
.insert("TMP".to_string(), private_tmp.display().to_string());
// Apply network isolation if configured
if !config.allow_network {
self.apply_network_isolation()?;
}
// Setup minimal device nodes if needed (mostly a no-op on macOS)
if config.create_devices {
Self::setup_minimal_devices();
}
// Send completion event
if let Some(sender) = event_sender {
sender.emit(AppEvent::General(GeneralEvent::debug(
"Build step completed",
)));
}
Ok(())
}
/// Clear all environment variables except those whitelisted
fn clear_environment_vars(&mut self, config: &HermeticConfig) {
// Get current environment
let current_env: HashMap<String, String> = std::env::vars().collect();
// Start with a clean slate for the build environment
self.env_vars.clear();
// Only copy over whitelisted variables
for (key, value) in current_env {
if config.allowed_env_vars.contains(&key) {
self.env_vars.insert(key, value);
}
}
// Ensure critical build variables are set
self.setup_clean_build_environment();
}
/// Setup clean build environment variables
fn setup_clean_build_environment(&mut self) {
// Set clean PATH with only necessary directories
// System paths must come first for security
let clean_path = [
"/usr/bin".to_string(),
"/bin".to_string(),
"/usr/sbin".to_string(),
"/sbin".to_string(),
sps2_config::fixed_paths::BIN_DIR.to_string(),
]
.join(":");
self.env_vars.insert("PATH".to_string(), clean_path);
// Set build-specific variables
self.env_vars
.insert("PREFIX".to_string(), self.staging_dir.display().to_string());
self.env_vars.insert(
"DESTDIR".to_string(),
self.staging_dir.display().to_string(),
);
self.env_vars
.insert("JOBS".to_string(), Self::cpu_count().to_string());
// Clean compiler/linker flags (point to live prefix)
let live_include = format!("{}/include", sps2_config::fixed_paths::LIVE_DIR);
let live_lib = format!("{}/lib", sps2_config::fixed_paths::LIVE_DIR);
self.env_vars
.insert("CFLAGS".to_string(), format!("-I{live_include}"));
self.env_vars
.insert("CXXFLAGS".to_string(), format!("-I{live_include}"));
// LDFLAGS with headerpad for macOS
let mut ldflags = format!("-L{live_lib}");
if cfg!(target_os = "macos") {
ldflags.push_str(" -headerpad_max_install_names");
}
self.env_vars.insert("LDFLAGS".to_string(), ldflags);
// Remove potentially harmful variables
self.env_vars.remove("LD_LIBRARY_PATH");
self.env_vars.remove("DYLD_LIBRARY_PATH"); // macOS specific
self.env_vars.remove("DYLD_FALLBACK_LIBRARY_PATH"); // macOS specific
self.env_vars.remove("PKG_CONFIG_PATH"); // Will be set when build deps are installed
// Set locale to ensure consistent behavior
self.env_vars
.insert("LANG".to_string(), "C.UTF-8".to_string());
self.env_vars.insert("LC_ALL".to_string(), "C".to_string());
}
/// Setup temporary home directory
async fn setup_temp_home(&self, config: &HermeticConfig) -> Result<PathBuf, Error> {
let temp_home = if let Some(ref home) = config.temp_home {
home.clone()
} else {
self.build_prefix.join("home")
};
// Create the directory
tokio::fs::create_dir_all(&temp_home)
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to create temp home: {e}"),
})?;
// Set restrictive permissions
let metadata = tokio::fs::metadata(&temp_home)
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to get temp home metadata: {e}"),
})?;
let mut perms = metadata.permissions();
perms.set_mode(0o700); // Owner read/write/execute only
tokio::fs::set_permissions(&temp_home, perms)
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to set temp home permissions: {e}"),
})?;
// Create minimal dot files to prevent tools from accessing real home
self.create_minimal_dotfiles(&temp_home).await?;
Ok(temp_home)
}
/// Create minimal dotfiles in temp home
async fn create_minimal_dotfiles(&self, temp_home: &Path) -> Result<(), Error> {
// Create empty .bashrc to prevent loading user's bashrc
let bashrc = temp_home.join(".bashrc");
tokio::fs::write(&bashrc, "# Hermetic build environment\n")
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to create .bashrc: {e}"),
})?;
// Create minimal .gitconfig to prevent git from accessing user config
let gitconfig = temp_home.join(".gitconfig");
let git_content = "[user]\n name = sps2-builder\n email = builder@sps2.local\n";
tokio::fs::write(&gitconfig, git_content)
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to create .gitconfig: {e}"),
})?;
// Create .config directory for tools that use XDG config
let config_dir = temp_home.join(".config");
tokio::fs::create_dir_all(&config_dir)
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to create .config: {e}"),
})?;
Ok(())
}
/// Setup private temporary directory
async fn setup_private_tmp(&self) -> Result<PathBuf, Error> {
let private_tmp = self.build_prefix.join("tmp");
// Create the directory
tokio::fs::create_dir_all(&private_tmp)
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to create private tmp: {e}"),
})?;
// Set sticky bit and appropriate permissions
let metadata = tokio::fs::metadata(&private_tmp)
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to get private tmp metadata: {e}"),
})?;
let mut perms = metadata.permissions();
perms.set_mode(0o1777); // Sticky bit + world writable
tokio::fs::set_permissions(&private_tmp, perms)
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to set private tmp permissions: {e}"),
})?;
Ok(private_tmp)
}
/// Apply network isolation
///
/// # Errors
///
/// Currently this function never returns an error, but returns `Result` for future extensibility
pub fn apply_network_isolation(&mut self) -> Result<(), Error> {
// Set environment variables to disable network access
self.env_vars
.insert("http_proxy".to_string(), "http://127.0.0.1:1".to_string());
self.env_vars
.insert("https_proxy".to_string(), "http://127.0.0.1:1".to_string());
self.env_vars
.insert("ftp_proxy".to_string(), "http://127.0.0.1:1".to_string());
self.env_vars
.insert("all_proxy".to_string(), "http://127.0.0.1:1".to_string());
self.env_vars
.insert("HTTP_PROXY".to_string(), "http://127.0.0.1:1".to_string());
self.env_vars
.insert("HTTPS_PROXY".to_string(), "http://127.0.0.1:1".to_string());
self.env_vars
.insert("FTP_PROXY".to_string(), "http://127.0.0.1:1".to_string());
self.env_vars
.insert("ALL_PROXY".to_string(), "http://127.0.0.1:1".to_string());
// Set no_proxy for localhost
self.env_vars.insert(
"no_proxy".to_string(),
"localhost,127.0.0.1,::1".to_string(),
);
self.env_vars.insert(
"NO_PROXY".to_string(),
"localhost,127.0.0.1,::1".to_string(),
);
Ok(())
}
/// Setup minimal device nodes (mostly no-op on macOS)
fn setup_minimal_devices() {
// On macOS, we don't need to create device nodes as they're managed by the kernel
// This is kept for API compatibility and future expansion
}
/// Verify hermetic isolation is properly applied
///
/// # Errors
///
/// Returns an error if:
/// - Forbidden environment variables are present
/// - Required isolation settings are not applied
/// - Home directory points outside the build environment
pub fn verify_hermetic_isolation(&self, config: &HermeticConfig) -> Result<(), Error> {
// Check that only allowed environment variables are set
for key in self.env_vars.keys() {
// Build-specific variables are always allowed
let build_vars = [
"PREFIX",
"DESTDIR",
"JOBS",
"CFLAGS",
"CXXFLAGS",
"LDFLAGS",
"PKG_CONFIG_PATH",
"LANG",
"LC_ALL",
];
if !config.allowed_env_vars.contains(key) && !build_vars.contains(&key.as_str()) {
return Err(BuildError::SandboxViolation {
message: format!("Unexpected environment variable: {key}"),
}
.into());
}
}
// Verify HOME is set to temp location
if let Some(home) = self.env_vars.get("HOME") {
let home_path = Path::new(home);
if !home_path.starts_with(&self.build_prefix) {
return Err(BuildError::SandboxViolation {
message: "HOME not pointing to isolated directory".to_string(),
}
.into());
}
} else {
return Err(BuildError::SandboxViolation {
message: "HOME environment variable not set".to_string(),
}
.into());
}
// Verify TMPDIR is set to private location
if let Some(tmpdir) = self.env_vars.get("TMPDIR") {
let tmp_path = Path::new(tmpdir);
if !tmp_path.starts_with(&self.build_prefix) {
return Err(BuildError::SandboxViolation {
message: "TMPDIR not pointing to isolated directory".to_string(),
}
.into());
}
} else {
return Err(BuildError::SandboxViolation {
message: "TMPDIR environment variable not set".to_string(),
}
.into());
}
// Verify network isolation if configured
if !config.allow_network {
let proxy_vars = ["http_proxy", "https_proxy", "HTTP_PROXY", "HTTPS_PROXY"];
for var in &proxy_vars {
if let Some(value) = self.env_vars.get(*var) {
if !value.contains("127.0.0.1:1") {
return Err(BuildError::SandboxViolation {
message: format!("Network isolation not properly configured: {var}"),
}
.into());
}
} else {
return Err(BuildError::SandboxViolation {
message: format!("Network isolation variable not set: {var}"),
}
.into());
}
}
}
Ok(())
}
}
/// Setup a private home directory for enhanced isolation
pub async fn setup_private_home(build_prefix: &Path) -> Result<PathBuf, Error> {
let temp_home = build_prefix.join("home");
// Create the directory
fs::create_dir_all(&temp_home)
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to create temp home: {e}"),
})?;
// Set restrictive permissions
let metadata = fs::metadata(&temp_home)
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to get temp home metadata: {e}"),
})?;
let mut perms = metadata.permissions();
perms.set_mode(0o700); // Owner read/write/execute only
fs::set_permissions(&temp_home, perms)
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to set temp home permissions: {e}"),
})?;
// Create minimal dot files
create_minimal_dotfiles_public(&temp_home).await?;
Ok(temp_home)
}
/// Setup a private temporary directory for enhanced isolation
pub async fn setup_private_tmpdir(build_prefix: &Path) -> Result<PathBuf, Error> {
let private_tmp = build_prefix.join("tmp");
// Create the directory
fs::create_dir_all(&private_tmp)
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to create private tmp: {e}"),
})?;
// Set sticky bit and appropriate permissions
let metadata = fs::metadata(&private_tmp)
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to get private tmp metadata: {e}"),
})?;
let mut perms = metadata.permissions();
perms.set_mode(0o1777); // Sticky bit + world writable
fs::set_permissions(&private_tmp, perms)
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to set private tmp permissions: {e}"),
})?;
Ok(private_tmp)
}
/// Create minimal dotfiles in a directory
async fn create_minimal_dotfiles_public(temp_home: &Path) -> Result<(), Error> {
// Create empty .bashrc to prevent loading user's bashrc
let bashrc = temp_home.join(".bashrc");
fs::write(&bashrc, "# Hermetic build environment\n")
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to create .bashrc: {e}"),
})?;
// Create minimal .gitconfig to prevent git from accessing user config
let gitconfig = temp_home.join(".gitconfig");
let git_content = "[user]\n name = sps2-builder\n email = builder@sps2.local\n";
fs::write(&gitconfig, git_content)
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to create .gitconfig: {e}"),
})?;
// Create .config directory for tools that expect it
let config_dir = temp_home.join(".config");
fs::create_dir_all(&config_dir)
.await
.map_err(|e| BuildError::Failed {
message: format!("Failed to create .config: {e}"),
})?;
Ok(())
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/environment/directories.rs | crates/builder/src/environment/directories.rs | //! Build directory structure management
use super::core::BuildEnvironment;
use sps2_errors::{BuildError, Error};
use sps2_events::{AppEvent, EventEmitter};
use tokio::fs;
impl BuildEnvironment {
/// Initialize the build environment
///
/// # Errors
///
/// Returns an error if directories cannot be created or environment setup fails.
pub async fn initialize(&mut self) -> Result<(), Error> {
self.emit_operation_started(format!(
"Building {} {}",
self.context.name, self.context.version
));
// Create build directories with better error reporting
fs::create_dir_all(&self.build_prefix)
.await
.map_err(|e| BuildError::Failed {
message: format!(
"Failed to create build prefix {}: {}",
self.build_prefix.display(),
e
),
})?;
fs::create_dir_all(&self.staging_dir)
.await
.map_err(|e| BuildError::Failed {
message: format!(
"Failed to create staging dir {}: {}",
self.staging_dir.display(),
e
),
})?;
// Set up environment variables
self.setup_environment();
Ok(())
}
/// Clean up build environment thoroughly
///
/// # Errors
///
/// Returns an error if directories cannot be removed during cleanup.
pub async fn cleanup(&self) -> Result<(), Error> {
// Remove any temporary build files in the build prefix
let temp_dirs = vec!["src", "build", "tmp"];
for dir in temp_dirs {
let temp_path = self.build_prefix.join(dir);
if temp_path.exists() {
fs::remove_dir_all(&temp_path).await?;
}
}
self.emit_operation_completed(
format!("Cleaned build environment for {}", self.context.name),
true,
);
Ok(())
}
/// Send event if sender is available
pub(crate) fn send_event(&self, event: AppEvent) {
if let Some(sender) = self.event_sender() {
sender.emit(event);
}
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/environment/types.rs | crates/builder/src/environment/types.rs | //! Types and result structures for build environment
use std::fmt;
use serde::de::{self, IgnoredAny, MapAccess, Unexpected, Visitor};
use std::path::PathBuf;
/// Result of executing a build command
#[derive(Debug)]
pub struct BuildCommandResult {
/// Whether the command succeeded
pub success: bool,
/// Exit code
pub exit_code: Option<i32>,
/// Standard output
pub stdout: String,
/// Standard error
pub stderr: String,
}
/// Result of the build process
#[derive(Debug, Clone)]
pub struct BuildResult {
/// Path to the generated package file
pub package_path: PathBuf,
/// SBOM files generated (SBOM disabled)
pub sbom_files: Vec<PathBuf>,
/// Build log
pub build_log: String,
/// Whether the recipe requested the package be installed after building
pub install_requested: bool,
}
impl BuildResult {
/// Create new build result
#[must_use]
pub fn new(package_path: PathBuf) -> Self {
Self {
package_path,
sbom_files: Vec::new(),
build_log: String::new(),
install_requested: false,
}
}
/// Add SBOM file
pub fn add_sbom_file(&mut self, path: PathBuf) {
self.sbom_files.push(path);
}
/// Set build log
pub fn set_build_log(&mut self, log: String) {
self.build_log = log;
}
/// Set install requested flag
#[must_use]
pub fn with_install_requested(mut self, install_requested: bool) -> Self {
self.install_requested = install_requested;
self
}
}
/// Build isolation level
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize)]
#[serde(rename_all = "lowercase")]
pub enum IsolationLevel {
/// No isolation - uses host environment as-is (shows warning)
None = 0,
/// Default isolation - clean environment, controlled paths (default)
Default = 1,
/// Enhanced isolation - default + private HOME/TMPDIR
Enhanced = 2,
/// Hermetic isolation - full whitelist approach, network blocking
Hermetic = 3,
}
impl Default for IsolationLevel {
fn default() -> Self {
Self::Default
}
}
impl<'de> serde::Deserialize<'de> for IsolationLevel {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
struct IsolationLevelVisitor;
impl<'de> Visitor<'de> for IsolationLevelVisitor {
type Value = IsolationLevel;
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("an isolation level (none, default, enhanced, hermetic, or 0-3)")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
parse_from_str(value)
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E>
where
E: de::Error,
{
parse_from_str(&value)
}
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
if value > u64::from(u8::MAX) {
return Err(de::Error::invalid_value(
Unexpected::Unsigned(value),
&"number between 0 and 3",
));
}
let byte = u8::try_from(value).map_err(|_| {
de::Error::invalid_value(Unexpected::Unsigned(value), &"number between 0 and 3")
})?;
parse_from_u8(byte)
}
fn visit_i64<E>(self, value: i64) -> Result<Self::Value, E>
where
E: de::Error,
{
if value < 0 {
return Err(de::Error::invalid_value(
Unexpected::Signed(value),
&"number between 0 and 3",
));
}
let unsigned = u64::try_from(value).map_err(|_| {
de::Error::invalid_value(Unexpected::Signed(value), &"number between 0 and 3")
})?;
self.visit_u64(unsigned)
}
fn visit_map<M>(self, mut map: M) -> Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
if let Some((key, _value)) = map.next_entry::<String, IgnoredAny>()? {
// Ensure no additional entries are present
if map.next_entry::<IgnoredAny, IgnoredAny>()?.is_some() {
return Err(de::Error::custom(
"isolation level map must contain a single entry",
));
}
parse_from_str(&key)
} else {
Err(de::Error::custom(
"expected isolation level map with a single entry",
))
}
}
}
deserializer.deserialize_any(IsolationLevelVisitor)
}
}
fn parse_from_str<E>(value: &str) -> Result<IsolationLevel, E>
where
E: de::Error,
{
let normalized = value.trim().to_ascii_lowercase();
match normalized.as_str() {
"none" => Ok(IsolationLevel::None),
"default" => Ok(IsolationLevel::Default),
"enhanced" => Ok(IsolationLevel::Enhanced),
"hermetic" => Ok(IsolationLevel::Hermetic),
other => Err(de::Error::unknown_variant(
other,
&["none", "default", "enhanced", "hermetic"],
)),
}
}
fn parse_from_u8<E>(value: u8) -> Result<IsolationLevel, E>
where
E: de::Error,
{
IsolationLevel::from_u8(value).ok_or_else(|| {
de::Error::invalid_value(
Unexpected::Unsigned(u64::from(value)),
&"number between 0 and 3",
)
})
}
impl IsolationLevel {
/// Convert from u8
pub fn from_u8(value: u8) -> Option<Self> {
match value {
0 => Some(Self::None),
1 => Some(Self::Default),
2 => Some(Self::Enhanced),
3 => Some(Self::Hermetic),
_ => None,
}
}
/// Convert to u8
pub fn as_u8(self) -> u8 {
self as u8
}
}
impl fmt::Display for IsolationLevel {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None => write!(f, "none"),
Self::Default => write!(f, "default"),
Self::Enhanced => write!(f, "enhanced"),
Self::Hermetic => write!(f, "hermetic"),
}
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/environment/mod.rs | crates/builder/src/environment/mod.rs | //! Build environment management
//!
//! This module provides isolated build environments for package building.
//! It manages directory structure, environment variables, dependency installation,
//! command execution, and environment isolation verification.
mod core;
mod dependencies;
mod directories;
mod execution;
mod hermetic;
mod isolation;
mod types;
mod variables;
// Re-export public API
pub use core::BuildEnvironment;
pub use types::{BuildCommandResult, BuildResult, IsolationLevel};
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/environment/dependencies.rs | crates/builder/src/environment/dependencies.rs | //! Build dependency installation
use super::core::BuildEnvironment;
use sps2_errors::{BuildError, Error};
use sps2_events::{AppEvent, FailureContext, GeneralEvent, LifecycleEvent};
use sps2_resolver::{InstalledPackage, ResolutionContext};
use sps2_state::StateManager;
use sps2_types::package::PackageSpec;
use sps2_types::Version;
use std::convert::TryFrom;
use std::time::Instant;
impl BuildEnvironment {
/// Setup build dependencies
///
/// # Errors
///
/// Returns an error if dependency resolution fails or build dependencies cannot be installed.
pub async fn setup_dependencies(&mut self, build_deps: Vec<PackageSpec>) -> Result<(), Error> {
if build_deps.is_empty() {
return Ok(());
}
let Some(resolver) = &self.resolver else {
return Err(BuildError::MissingBuildDep {
name: "resolver configuration".to_string(),
}
.into());
};
let build_target_count = build_deps.len();
// Get installed packages to check before resolving from repository
let installed_packages = Self::get_installed_packages().await.unwrap_or_default();
// Resolve build dependencies
let mut resolution_context = ResolutionContext::new();
for dep in build_deps {
resolution_context = resolution_context.add_build_dep(dep);
}
let local_target_count = installed_packages.len();
// Include installed packages to check before repository resolution
resolution_context = resolution_context.with_installed_packages(installed_packages.clone());
let resolve_start = Instant::now();
self.send_event(AppEvent::Lifecycle(LifecycleEvent::resolver_started(
0,
build_target_count,
local_target_count,
)));
let resolution = match resolver.resolve_with_sat(resolution_context).await {
Ok(resolution) => resolution,
Err(error) => {
self.send_event(AppEvent::Lifecycle(LifecycleEvent::resolver_failed(
FailureContext::from_error(&error),
Vec::new(),
)));
return Err(error);
}
};
let duration_ms = resolve_start.elapsed().as_millis();
let duration_ms = u64::try_from(duration_ms).unwrap_or(u64::MAX);
let mut downloaded_packages = 0usize;
let mut reused_packages = 0usize;
for node in resolution.nodes.values() {
match node.action {
sps2_resolver::NodeAction::Download => downloaded_packages += 1,
sps2_resolver::NodeAction::Local => reused_packages += 1,
}
}
self.send_event(AppEvent::Lifecycle(LifecycleEvent::resolver_completed(
resolution.nodes.len(),
downloaded_packages,
reused_packages,
duration_ms,
)));
// Install build dependencies to deps prefix
for node in resolution.packages_in_order() {
// Install all resolved build dependencies to the isolated deps prefix
self.install_build_dependency(node).await?;
}
// Update environment for build deps
self.setup_build_deps_environment();
Ok(())
}
/// Install a build dependency to isolated prefix
///
/// # Errors
///
/// Returns an error if the installer or store is not configured, or if installation fails.
async fn install_build_dependency(
&self,
node: &sps2_resolver::ResolvedNode,
) -> Result<(), Error> {
// Check if this is an already-installed package (marked by resolver with Local action and empty path)
if matches!(&node.action, sps2_resolver::NodeAction::Local) {
let is_empty_or_none = match &node.path {
None => true,
Some(path) => path.as_os_str().is_empty(),
};
if is_empty_or_none {
// Already installed - just verify it exists
self.send_event(AppEvent::General(GeneralEvent::debug(format!(
"{} {} is already installed in {}",
node.name,
node.version,
sps2_config::fixed_paths::LIVE_DIR
))));
// Verify the package is installed
self.verify_installed_package(&node.name, &node.version)
.await?;
// Package verification completed - metrics removed as per architectural decision
return Ok(());
}
}
let Some(_installer) = &self.installer else {
return Err(BuildError::MissingBuildDep {
name: "installer not configured".to_string(),
}
.into());
};
let Some(_store) = &self.store else {
return Err(BuildError::MissingBuildDep {
name: "package store not configured".to_string(),
}
.into());
};
let Some(net_client) = &self.net else {
return Err(BuildError::MissingBuildDep {
name: "network client not configured".to_string(),
}
.into());
};
self.send_event(AppEvent::Lifecycle(LifecycleEvent::install_started(
node.name.clone(),
node.version.clone(),
)));
// Install the build dependency to the isolated deps prefix
// This extracts the package contents to the build environment
match &node.action {
sps2_resolver::NodeAction::Download => {
if let Some(url) = &node.url {
self.send_event(AppEvent::Lifecycle(LifecycleEvent::download_started(
url.clone(),
Some(format!("{}:{}", node.name, node.version)),
None,
)));
// Download the .sp file to a temporary location
let temp_dir = std::env::temp_dir();
let sp_filename = format!("{}-{}.sp", node.name, node.version);
let temp_sp_path = temp_dir.join(&sp_filename);
// Use NetClient to download the file with consistent retry logic
let default_tx = {
let (tx, _) = sps2_events::channel();
tx
};
let event_sender = self.context.event_sender.as_ref().unwrap_or(&default_tx);
let bytes = sps2_net::fetch_bytes(net_client, url, event_sender)
.await
.map_err(|_e| BuildError::FetchFailed { url: url.clone() })?;
tokio::fs::write(&temp_sp_path, &bytes).await?;
// Clean up temporary file
if temp_sp_path.exists() {
let _ = tokio::fs::remove_file(&temp_sp_path).await;
}
// We don't extract to deps anymore - package should be installed to /opt/pm/live
return Err(BuildError::MissingBuildDep {
name: format!(
"{} {} needs to be installed via 'sps2 install'",
node.name, node.version
),
}
.into());
}
}
sps2_resolver::NodeAction::Local => {
if let Some(_path) = &node.path {
// We don't extract to deps anymore - package should be installed to /opt/pm/live
return Err(BuildError::MissingBuildDep {
name: format!(
"{} {} needs to be installed via 'sps2 install'",
node.name, node.version
),
}
.into());
}
}
}
Ok(())
}
/// Get currently installed packages from system state
async fn get_installed_packages() -> Result<Vec<InstalledPackage>, Error> {
// Create a minimal state manager to check installed packages
let base_path = std::path::Path::new(sps2_config::fixed_paths::PREFIX);
let state = StateManager::new(base_path).await?;
let packages = state.get_installed_packages().await?;
let mut installed = Vec::new();
for pkg in packages {
let version = Version::parse(&pkg.version)?;
installed.push(InstalledPackage::new(pkg.name, version));
}
Ok(installed)
}
/// Verify an already-installed package exists
///
/// # Errors
///
/// Returns an error if the package is not installed.
async fn verify_installed_package(&self, name: &str, version: &Version) -> Result<(), Error> {
// Check if package is installed using state manager
let base_path = std::path::Path::new(sps2_config::fixed_paths::PREFIX);
let state = StateManager::new(base_path).await?;
// Get all installed packages
let installed = state.get_installed_packages().await?;
// Check if our package is in the list
let is_installed = installed
.iter()
.any(|pkg| pkg.name == name && pkg.version == version.to_string());
if !is_installed {
return Err(BuildError::MissingBuildDep {
name: format!("{name} {version} is not installed"),
}
.into());
}
Ok(())
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/environment/isolation.rs | crates/builder/src/environment/isolation.rs | //! Environment verification and coordination
//!
//! This module provides comprehensive isolation verification for build environments,
//! including environment variable sanitization, path isolation checks, and
//! network isolation verification.
use super::core::BuildEnvironment;
use sps2_errors::{BuildError, Error};
use std::path::PathBuf;
impl BuildEnvironment {
/// Verify build environment isolation is properly set up
///
/// This performs comprehensive checks to ensure the build environment is
/// properly isolated from the host system.
///
/// # Errors
///
/// Returns an error if the build environment is not properly isolated or directories are missing.
pub fn verify_isolation(&self) -> Result<(), Error> {
// Perform basic isolation checks
self.verify_basic_isolation()?;
// Verify environment variables are sanitized
self.verify_environment_sanitization()?;
// Verify path isolation
self.verify_path_isolation()?;
// Verify no access to system paths
self.verify_system_path_isolation()?;
Ok(())
}
/// Verify basic isolation requirements
fn verify_basic_isolation(&self) -> Result<(), Error> {
// Check that critical directories exist
if !self.build_prefix.exists() {
return Err(BuildError::Failed {
message: format!(
"Build prefix does not exist: {}",
self.build_prefix.display()
),
}
.into());
}
if !self.staging_dir.exists() {
return Err(BuildError::Failed {
message: format!(
"Staging directory does not exist: {}",
self.staging_dir.display()
),
}
.into());
}
// Verify environment variables are set correctly
let required_vars = vec!["PREFIX", "DESTDIR", "JOBS"];
for var in required_vars {
if !self.env_vars.contains_key(var) {
return Err(BuildError::Failed {
message: format!("Required environment variable {var} not set"),
}
.into());
}
}
// PATH will be updated when build dependencies are installed
// So we just check it exists for now
if !self.env_vars.contains_key("PATH") {
return Err(BuildError::Failed {
message: "PATH environment variable not set".to_string(),
}
.into());
}
Ok(())
}
/// Verify environment variables are properly sanitized
fn verify_environment_sanitization(&self) -> Result<(), Error> {
// List of potentially dangerous environment variables that should not be set
let dangerous_vars = vec![
"LD_LIBRARY_PATH",
"DYLD_LIBRARY_PATH", // macOS specific
"DYLD_FALLBACK_LIBRARY_PATH", // macOS specific
"DYLD_INSERT_LIBRARIES", // macOS specific - can inject code
"LD_PRELOAD", // Linux equivalent of DYLD_INSERT_LIBRARIES
"PYTHONPATH", // Could interfere with Python builds
"PERL5LIB", // Could interfere with Perl builds
"RUBYLIB", // Could interfere with Ruby builds
"NODE_PATH", // Could interfere with Node.js builds
"GOPATH", // Could interfere with Go builds
"CARGO_HOME", // Could interfere with Rust builds
];
for var in dangerous_vars {
if self.env_vars.contains_key(var) {
return Err(BuildError::SandboxViolation {
message: format!("Dangerous environment variable {var} is set"),
}
.into());
}
}
// Verify compiler/linker flags are clean
self.verify_compiler_flags()?;
Ok(())
}
/// Verify compiler and linker flags are properly isolated
fn verify_compiler_flags(&self) -> Result<(), Error> {
// Check CFLAGS
if let Some(cflags) = self.env_vars.get("CFLAGS") {
// Check that CFLAGS contains /opt/pm/live/include
if !cflags.contains(&format!("{}/include", sps2_config::fixed_paths::LIVE_DIR)) {
return Err(BuildError::SandboxViolation {
message: "CFLAGS not properly configured for isolation".to_string(),
}
.into());
}
// Ensure no system paths are referenced
if cflags.contains("/usr/local") || cflags.contains("/opt/homebrew") {
return Err(BuildError::SandboxViolation {
message: "CFLAGS contains system paths".to_string(),
}
.into());
}
}
// Check LDFLAGS
if let Some(ldflags) = self.env_vars.get("LDFLAGS") {
// Check that LDFLAGS contains /opt/pm/live/lib
if !ldflags.contains(&format!("{}/lib", sps2_config::fixed_paths::LIVE_DIR)) {
return Err(BuildError::SandboxViolation {
message: "LDFLAGS not properly configured for isolation".to_string(),
}
.into());
}
// Ensure no system paths are referenced
if ldflags.contains("/usr/local") || ldflags.contains("/opt/homebrew") {
return Err(BuildError::SandboxViolation {
message: "LDFLAGS contains system paths".to_string(),
}
.into());
}
}
Ok(())
}
/// Verify PATH isolation
fn verify_path_isolation(&self) -> Result<(), Error> {
if let Some(path) = self.env_vars.get("PATH") {
let path_components: Vec<&str> = path.split(':').collect();
// Verify system paths come first
if path_components.is_empty() || !path_components[0].starts_with("/usr/bin") {
return Err(BuildError::SandboxViolation {
message: "System paths not first in PATH".to_string(),
}
.into());
}
// Verify /opt/pm/live/bin is in PATH but after system paths
let mut found_system = false;
for component in &path_components {
if component.starts_with("/usr/")
|| component.starts_with("/bin")
|| component.starts_with("/sbin")
{
found_system = true;
} else if *component == sps2_config::fixed_paths::BIN_DIR {
if !found_system {
return Err(BuildError::SandboxViolation {
message: "/opt/pm/live/bin appears before system paths in PATH"
.to_string(),
}
.into());
}
// /opt/pm/live/bin found in correct position
break;
}
}
// PATH is strictly controlled to only include system essentials and sps2 paths
// No external package managers (Homebrew, MacPorts, etc.) are allowed
} else {
return Err(BuildError::SandboxViolation {
message: "PATH not set".to_string(),
}
.into());
}
Ok(())
}
/// Verify no access to system paths outside allowed directories
fn verify_system_path_isolation(&self) -> Result<(), Error> {
// Check that build directories are within allowed paths
let allowed_prefixes = vec![
self.build_prefix.clone(),
PathBuf::from(sps2_config::fixed_paths::PREFIX), // sps2 system directory
];
// Verify staging directory is isolated
let mut staging_allowed = false;
for prefix in &allowed_prefixes {
if self.staging_dir.starts_with(prefix) {
staging_allowed = true;
break;
}
}
if !staging_allowed {
return Err(BuildError::SandboxViolation {
message: format!(
"Staging directory {} is outside allowed paths",
self.staging_dir.display()
),
}
.into());
}
Ok(())
}
/// Check if network isolation is properly configured
///
/// # Errors
///
/// Currently this function never returns an error, but returns `Result` for future extensibility
pub fn verify_network_isolation(&self) -> Result<bool, Error> {
// Check if proxy environment variables are set for isolation
let proxy_vars = ["http_proxy", "https_proxy", "HTTP_PROXY", "HTTPS_PROXY"];
let mut isolated = true;
for var in &proxy_vars {
if let Some(value) = self.env_vars.get(*var) {
// Check if pointing to invalid proxy (network isolation)
if !value.contains("127.0.0.1:1") {
isolated = false;
break;
}
} else {
// No proxy set means network is not isolated
isolated = false;
break;
}
}
Ok(isolated)
}
/// Get a summary of isolation status
///
/// Returns a map of isolation checks and their results. This should be used for debugging
/// and verifying that the build environment is properly isolated.
#[must_use]
pub fn isolation_summary(&self) -> std::collections::HashMap<String, String> {
let mut summary = std::collections::HashMap::new();
// Check basic isolation
summary.insert(
"basic_isolation".to_string(),
self.verify_basic_isolation()
.map_or_else(|e| format!("FAILED: {e}"), |()| "OK".to_string()),
);
// Check environment sanitization
summary.insert(
"env_sanitization".to_string(),
self.verify_environment_sanitization()
.map_or_else(|e| format!("FAILED: {e}"), |()| "OK".to_string()),
);
// Check path isolation
summary.insert(
"path_isolation".to_string(),
self.verify_path_isolation()
.map_or_else(|e| format!("FAILED: {e}"), |()| "OK".to_string()),
);
// Check network isolation
summary.insert(
"network_isolation".to_string(),
self.verify_network_isolation().map_or_else(
|e| format!("ERROR: {e}"),
|isolated| {
if isolated {
"ENABLED".to_string()
} else {
"DISABLED".to_string()
}
},
),
);
// Add key paths
summary.insert(
"build_prefix".to_string(),
self.build_prefix.display().to_string(),
);
summary.insert(
"staging_dir".to_string(),
self.staging_dir.display().to_string(),
);
summary
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/environment/execution.rs | crates/builder/src/environment/execution.rs | //! Command execution in isolated environment
use super::{core::BuildEnvironment, types::BuildCommandResult};
use sps2_errors::{BuildError, Error};
use sps2_events::{AppEvent, BuildDiagnostic, BuildEvent, EventEmitter, LogStream};
use sps2_platform::{PlatformContext, PlatformManager};
use std::collections::HashMap;
use std::path::Path;
use uuid::Uuid;
impl BuildEnvironment {
/// Convert command arguments to strings (no placeholder replacement needed)
fn convert_args_to_strings(args: &[&str]) -> Vec<String> {
args.iter().map(|arg| (*arg).to_string()).collect()
}
/// Get environment variables for execution (no placeholder replacement needed)
fn get_execution_env(&self) -> HashMap<String, String> {
self.env_vars.clone()
}
/// Execute a command in the build environment using the environment stored on the struct.
///
/// # Errors
///
/// Returns an error if the command fails to spawn or exits with non-zero status.
pub async fn execute_command(
&self,
program: &str,
args: &[&str],
working_dir: Option<&Path>,
) -> Result<BuildCommandResult, Error> {
// Delegate to unified executor with the current environment and strict failure handling
let env = self.get_execution_env();
self.execute_command_with_env(program, args, working_dir, &env, false)
.await
}
/// Execute a command with an explicit environment and optional allow-failure behavior.
/// If `allow_failure` is true, this returns Ok(BuildCommandResult) even on non-zero exit codes.
///
/// # Errors
///
/// Returns an error if the command fails to spawn. When `allow_failure` is false,
/// returns an error for non-zero exit status as well.
pub async fn execute_command_with_env(
&self,
program: &str,
args: &[&str],
working_dir: Option<&Path>,
env: &HashMap<String, String>,
allow_failure: bool,
) -> Result<BuildCommandResult, Error> {
// Use platform abstraction for process execution
let platform = PlatformManager::instance().platform();
let context = PlatformContext::new(self.context.event_sender.clone());
let mut cmd = platform.process().create_command(program);
// Replace placeholders in command arguments
let converted_args = Self::convert_args_to_strings(args);
cmd.args(&converted_args);
// Apply explicit environment
cmd.envs(env);
if let Some(dir) = working_dir {
cmd.current_dir(dir);
} else {
cmd.current_dir(&self.build_prefix);
}
// Send command info event to show what's running (with replaced paths)
self.emit_debug_with_context(
format!("Executing: {program} {}", converted_args.join(" ")),
std::collections::HashMap::from([(
"working_dir".to_string(),
working_dir.map_or_else(
|| self.build_prefix.display().to_string(),
|p| p.display().to_string(),
),
)]),
);
let output = platform
.process()
.execute_command(&context, cmd)
.await
.map_err(|e| BuildError::CompileFailed {
message: format!("{program}: {e}"),
})?;
let stdout_lines: Vec<String> = String::from_utf8_lossy(&output.stdout)
.lines()
.map(std::string::ToString::to_string)
.collect();
let stderr_lines: Vec<String> = String::from_utf8_lossy(&output.stderr)
.lines()
.map(std::string::ToString::to_string)
.collect();
let stdout_text = stdout_lines.join("\n");
let stderr_text = stderr_lines.join("\n");
if !stdout_text.is_empty() {
let session_id = self.context.session_id();
let command_id = Uuid::new_v4().to_string();
self.emit(AppEvent::Build(BuildEvent::Diagnostic(
BuildDiagnostic::LogChunk {
session_id: session_id.clone(),
command_id: Some(command_id.clone()),
stream: LogStream::Stdout,
text: stdout_text.clone(),
},
)));
if !stderr_text.is_empty() {
self.emit(AppEvent::Build(BuildEvent::Diagnostic(
BuildDiagnostic::LogChunk {
session_id,
command_id: Some(command_id),
stream: LogStream::Stderr,
text: stderr_text.clone(),
},
)));
}
} else if !stderr_text.is_empty() {
self.emit(AppEvent::Build(BuildEvent::Diagnostic(
BuildDiagnostic::LogChunk {
session_id: self.context.session_id(),
command_id: Some(Uuid::new_v4().to_string()),
stream: LogStream::Stderr,
text: stderr_text.clone(),
},
)));
}
let result = BuildCommandResult {
success: output.status.success(),
exit_code: output.status.code(),
stdout: stdout_text,
stderr: stderr_text,
};
if !result.success && !allow_failure {
return Err(BuildError::CompileFailed {
message: format!(
"{program} {} failed with exit code {:?}: {}",
args.join(" "),
result.exit_code,
result.stderr
),
}
.into());
}
// Handle any libtool --finish requirements
self.handle_libtool_finish(&result).await?;
Ok(result)
}
/// Check if libtool --finish needs to be run based on command output
fn check_libtool_finish_needed(result: &BuildCommandResult) -> Vec<String> {
use std::collections::HashSet;
let mut dirs = HashSet::new();
// Check both stdout and stderr for the libtool warning
let combined_output = format!("{}\n{}", result.stdout, result.stderr);
// Look for the pattern: "remember to run `libtool --finish /path/to/lib'"
for line in combined_output.lines() {
if line.contains("remember to run") && line.contains("libtool --finish") {
// Extract the directory path from the message
// Pattern: "warning: remember to run `libtool --finish /opt/pm/live/lib'"
if let Some(start) = line.find("libtool --finish") {
let remainder = &line[start + "libtool --finish".len()..];
// Find the directory path (everything up to the closing quote or end of line)
let dir_end = remainder
.find('\'')
.or_else(|| remainder.find('"'))
.unwrap_or(remainder.len());
let dir_path = remainder[..dir_end].trim();
if !dir_path.is_empty() {
dirs.insert(dir_path.to_string());
}
}
}
}
dirs.into_iter().collect()
}
/// Execute libtool --finish for the given directory
async fn execute_libtool_finish(&self, dir: &str) -> Result<BuildCommandResult, Error> {
// Use GNU libtool from fixed bin dir if it exists, otherwise try system libtool
let libtool_candidate =
std::path::Path::new(sps2_config::fixed_paths::BIN_DIR).join("libtool");
let libtool_path = if libtool_candidate.exists() {
libtool_candidate.display().to_string()
} else {
"libtool".to_string()
};
// Use platform abstraction for process execution
let platform = PlatformManager::instance().platform();
let context = PlatformContext::new(self.context.event_sender.clone());
let mut cmd = platform.process().create_command(&libtool_path);
cmd.args(["--finish", dir]);
cmd.envs(&self.env_vars);
cmd.current_dir(&self.build_prefix);
let output = platform
.process()
.execute_command(&context, cmd)
.await
.map_err(|e| BuildError::CompileFailed {
message: format!("Failed to run libtool --finish: {e}"),
})?;
let stdout_lines: Vec<String> = String::from_utf8_lossy(&output.stdout)
.lines()
.map(std::string::ToString::to_string)
.collect();
let stderr_lines: Vec<String> = String::from_utf8_lossy(&output.stderr)
.lines()
.map(std::string::ToString::to_string)
.collect();
Ok(BuildCommandResult {
success: output.status.success(),
exit_code: output.status.code(),
stdout: stdout_lines.join("\n"),
stderr: stderr_lines.join("\n"),
})
}
/// Handle libtool --finish requirements from command output
async fn handle_libtool_finish(&self, result: &BuildCommandResult) -> Result<(), Error> {
let libtool_dirs = Self::check_libtool_finish_needed(result);
if !libtool_dirs.is_empty() {
for dir in &libtool_dirs {
self.emit_debug(format!("Running libtool --finish {dir}"));
// Run libtool --finish for this directory
let finish_result = self.execute_libtool_finish(dir).await?;
if !finish_result.success {
self.emit_warning_with_context(
format!("libtool --finish {dir} failed"),
finish_result.stderr,
);
}
}
}
Ok(())
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/security/path_resolver.rs | crates/builder/src/security/path_resolver.rs | //! Path resolution and normalization for security validation
use sps2_errors::{BuildError, Error};
use std::collections::{HashMap, HashSet};
use std::path::{Component, Path, PathBuf};
/// Normalize a path by resolving .., ., and symlinks
pub fn normalize_path(
path: &Path,
cache: &HashMap<PathBuf, PathBuf>,
build_root: &Path,
current_dir: &Path,
) -> Result<PathBuf, Error> {
// Check cache first
if let Some(cached) = cache.get(path) {
return Ok(cached.clone());
}
// If the path is relative, join it with the current directory
let absolute_path = if path.is_relative() {
current_dir.join(path)
} else {
path.to_path_buf()
};
let mut normalized = PathBuf::new();
let mut depth = 0;
for component in absolute_path.components() {
match component {
Component::Prefix(_) | Component::RootDir => {
normalized.push(component);
depth = 0;
}
Component::CurDir => {
// Skip .
}
Component::ParentDir => {
if depth > 0 {
normalized.pop();
depth -= 1;
} else if !normalized.as_os_str().is_empty() {
// Trying to go above root
return Err(BuildError::PathTraversalAttempt {
path: path.display().to_string(),
reason: "Too many .. components".to_string(),
}
.into());
}
}
Component::Normal(name) => {
normalized.push(name);
depth += 1;
}
}
}
// Resolve symlinks (with loop detection)
resolve_symlinks_safe(&normalized, build_root)
}
/// Safely resolve symlinks with loop detection
fn resolve_symlinks_safe(path: &Path, build_root: &Path) -> Result<PathBuf, Error> {
const MAX_SYMLINK_DEPTH: usize = 10;
let mut visited = HashSet::new();
let mut current = path.to_path_buf();
let mut iterations = 0;
while iterations < MAX_SYMLINK_DEPTH {
if !visited.insert(current.clone()) {
return Err(BuildError::SymlinkLoop {
path: current.display().to_string(),
}
.into());
}
// Only resolve symlinks if the path exists
if current.exists() {
match std::fs::read_link(¤t) {
Ok(target) => {
current = if target.is_absolute() {
target
} else {
current
.parent()
.ok_or_else(|| BuildError::InvalidPath {
path: current.display().to_string(),
reason: "No parent directory".to_string(),
})?
.join(target)
};
// Normalize the new path
current = simple_normalize(¤t)?;
// Check if symlink is trying to escape build root
if !is_path_safe(¤t, build_root) {
return Err(BuildError::PathEscapeAttempt {
path: path.display().to_string(),
resolved: current.display().to_string(),
build_root: build_root.display().to_string(),
}
.into());
}
iterations += 1;
}
Err(_) => {
// Not a symlink, that's fine
break;
}
}
} else {
// Path doesn't exist yet, that's OK for write operations
break;
}
}
if iterations >= MAX_SYMLINK_DEPTH {
return Err(BuildError::TooManySymlinks {
path: path.display().to_string(),
}
.into());
}
Ok(current)
}
/// Simple path normalization without symlink resolution
fn simple_normalize(path: &Path) -> Result<PathBuf, Error> {
let mut normalized = PathBuf::new();
let mut depth = 0;
for component in path.components() {
match component {
Component::Prefix(_) | Component::RootDir => {
normalized.push(component);
depth = 0;
}
Component::CurDir => {
// Skip .
}
Component::ParentDir => {
if depth > 0 {
normalized.pop();
depth -= 1;
} else {
// Can't go up further
return Err(BuildError::PathTraversalAttempt {
path: path.display().to_string(),
reason: "Path goes above root".to_string(),
}
.into());
}
}
Component::Normal(name) => {
normalized.push(name);
depth += 1;
}
}
}
Ok(normalized)
}
/// Check if a path is safe (doesn't escape build environment)
fn is_path_safe(path: &Path, build_root: &Path) -> bool {
const SAFE_SYSTEM_PREFIXES: &[&str] = &[
"/usr/include",
"/usr/lib",
"/usr/local/include",
"/usr/local/lib",
"/usr/bin",
"/usr/local/bin",
"/bin",
"/opt/pm/live",
];
// Allow paths within build root
if path.starts_with(build_root) {
return true;
}
// Allow certain system paths for reading
SAFE_SYSTEM_PREFIXES
.iter()
.any(|prefix| path.starts_with(prefix))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_normalize_simple_paths() {
let cache = HashMap::new();
let build_root = Path::new("/opt/pm/build/test");
let current_dir = Path::new("/opt/pm/build/test/src");
// Simple absolute path
let result = normalize_path(
Path::new("/opt/pm/build/test/src"),
&cache,
build_root,
current_dir,
);
assert!(result.is_ok());
assert_eq!(result.unwrap(), PathBuf::from("/opt/pm/build/test/src"));
// Path with .
let result = normalize_path(
Path::new("/opt/pm/build/test/./src"),
&cache,
build_root,
current_dir,
);
assert!(result.is_ok());
assert_eq!(result.unwrap(), PathBuf::from("/opt/pm/build/test/src"));
// Path with ..
let result = normalize_path(
Path::new("/opt/pm/build/test/src/../lib"),
&cache,
build_root,
current_dir,
);
assert!(result.is_ok());
assert_eq!(result.unwrap(), PathBuf::from("/opt/pm/build/test/lib"));
}
#[test]
fn test_path_traversal_detection() {
let cache = HashMap::new();
let build_root = Path::new("/opt/pm/build/test");
let current_dir = Path::new("/opt/pm/build/test/src");
// Relative path resolving outside build root should normalize (not above filesystem root)
let result = normalize_path(
Path::new("../../../../etc/passwd"),
&cache,
build_root,
current_dir,
);
assert!(result.is_ok());
}
#[test]
fn test_safe_system_paths() {
let build_root = Path::new("/opt/pm/build/test");
// Safe system paths
assert!(is_path_safe(Path::new("/usr/include/stdio.h"), build_root));
assert!(is_path_safe(Path::new("/usr/lib/libc.so"), build_root));
assert!(is_path_safe(Path::new("/opt/pm/live/bin/gcc"), build_root));
// Unsafe system paths
assert!(!is_path_safe(Path::new("/etc/passwd"), build_root));
assert!(!is_path_safe(
Path::new("/home/user/.ssh/id_rsa"),
build_root
));
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/security/parser.rs | crates/builder/src/security/parser.rs | //! Command parser integration with security context
use super::context::{
CommandEffect, ParsedCommand, PathAccessType, SecurityContext, ValidatedExecution,
};
use crate::validation::parser::{tokenize_shell, Token};
use sps2_errors::{BuildError, Error};
use std::path::PathBuf;
/// Parse command and determine its effects and path accesses
pub fn parse_command_with_context(
command: &str,
context: &SecurityContext,
) -> Result<ValidatedExecution, Error> {
// First expand variables
let expanded = context.expand_variables(command);
// Tokenize the expanded command
let tokens = tokenize_shell(&expanded);
// Analyze command and its effects
let mut effect = CommandEffect::None;
let mut accessed_paths = Vec::new();
// Process all commands in the token stream
let mut i = 0;
while i < tokens.len() {
match &tokens[i] {
Token::Command(cmd) => {
// Find the end of this command (next operator or end of tokens)
let mut cmd_end = i + 1;
while cmd_end < tokens.len() {
if matches!(tokens[cmd_end], Token::Operator(_)) {
break;
}
cmd_end += 1;
}
// Extract tokens for this command
let cmd_tokens = &tokens[i..cmd_end];
// Process this command
let mut cmd_effect = CommandEffect::None;
let mut cmd_paths = Vec::new();
process_command(cmd, cmd_tokens, context, &mut cmd_effect, &mut cmd_paths)?;
// Merge effects (last effect wins for things like directory changes)
match cmd_effect {
CommandEffect::None => {}
_ => effect = cmd_effect,
}
// Accumulate all accessed paths
accessed_paths.extend(cmd_paths);
// Move to next command
i = cmd_end;
}
Token::Operator(_) => {
// Skip operators between commands
i += 1;
}
_ => {
// Skip other tokens when looking for commands
i += 1;
}
}
}
Ok(ValidatedExecution {
original: command.to_string(),
expanded,
parsed: ParsedCommand { tokens },
effect,
accessed_paths,
})
}
/// Process a specific command and determine its effects
fn process_command(
cmd: &str,
tokens: &[Token],
context: &SecurityContext,
effect: &mut CommandEffect,
accessed_paths: &mut Vec<(PathBuf, PathAccessType)>,
) -> Result<(), Error> {
match cmd {
// Directory change commands
"cd" => {
if let Some(Token::Argument(path)) = tokens.get(1) {
let resolved = context.validate_path_access(path, PathAccessType::Read)?;
*effect = CommandEffect::ChangeDirectory(resolved.clone());
accessed_paths.push((resolved, PathAccessType::Read));
} else {
// cd with no args goes to home - block this in build context
return Err(BuildError::DangerousCommand {
command: "cd".to_string(),
reason: "cd without arguments not allowed in build context".to_string(),
}
.into());
}
}
"pushd" => {
if let Some(Token::Argument(path)) = tokens.get(1) {
let resolved = context.validate_path_access(path, PathAccessType::Read)?;
*effect = CommandEffect::PushDirectory(resolved.clone());
accessed_paths.push((resolved, PathAccessType::Read));
} else {
return Err(BuildError::DangerousCommand {
command: "pushd".to_string(),
reason: "pushd without arguments not allowed".to_string(),
}
.into());
}
}
"popd" => {
*effect = CommandEffect::PopDirectory;
}
// Variable manipulation
"export" => {
if let Some(Token::Argument(assignment)) = tokens.get(1) {
if let Some((name, value)) = assignment.split_once('=') {
// Check for dangerous variables
if is_dangerous_variable(name) {
return Err(BuildError::DangerousCommand {
command: format!("export {name}"),
reason: format!("Setting {name} is not allowed"),
}
.into());
}
*effect = CommandEffect::SetVariable(name.to_string(), value.to_string());
}
}
}
"unset" => {
if let Some(Token::Argument(name)) = tokens.get(1) {
*effect = CommandEffect::UnsetVariable(name.to_string());
}
}
// File operations - validate all paths
"cp" | "mv" => {
process_source_dest_command(tokens, context, accessed_paths)?;
}
"rm" | "rmdir" => {
process_delete_command(tokens, context, accessed_paths)?;
}
"mkdir" | "touch" => {
process_create_command(tokens, context, accessed_paths)?;
}
"cat" | "head" | "tail" | "less" | "more" | "grep" | "sed" | "awk" => {
process_read_command(tokens, context, accessed_paths);
}
"chmod" | "chown" | "chgrp" => {
process_metadata_command(tokens, context, accessed_paths)?;
}
"ln" => {
process_link_command(tokens, context, accessed_paths)?;
}
"find" => {
process_find_command(tokens, context, accessed_paths)?;
}
// Archive operations
"tar" | "zip" | "unzip" => {
process_archive_command(cmd, tokens, context, accessed_paths)?;
}
// Build tools - generally safe but check paths
"make" | "cmake" | "gcc" | "clang" | "cc" | "c++" => {
process_build_tool_command(tokens, context, accessed_paths)?;
}
// Install command
"install" => {
process_install_command(tokens, context, accessed_paths)?;
}
// Direct execution
_ if cmd.starts_with("./") || cmd.starts_with('/') => {
let resolved = context.validate_path_access(cmd, PathAccessType::Execute)?;
accessed_paths.push((resolved, PathAccessType::Execute));
// Also check arguments for paths
check_arguments_for_paths(tokens, context, accessed_paths);
}
// Other commands - scan arguments for paths
_ => {
check_arguments_for_paths(tokens, context, accessed_paths);
}
}
Ok(())
}
/// Process commands that have source and destination paths
fn process_source_dest_command(
tokens: &[Token],
context: &SecurityContext,
accessed_paths: &mut Vec<(PathBuf, PathAccessType)>,
) -> Result<(), Error> {
let mut arg_count = 0;
let total_args = tokens
.iter()
.skip(1)
.filter(|t| matches!(t, Token::Argument(arg) if !arg.starts_with('-')))
.count();
for token in tokens.iter().skip(1) {
if let Token::Argument(arg) = token {
if !arg.starts_with('-') {
arg_count += 1;
let access_type = if arg_count == total_args {
PathAccessType::Write // Last arg is destination
} else {
PathAccessType::Read // Others are sources
};
let resolved = context.validate_path_access(arg, access_type)?;
accessed_paths.push((resolved, access_type));
}
}
}
Ok(())
}
/// Process deletion commands
fn process_delete_command(
tokens: &[Token],
context: &SecurityContext,
accessed_paths: &mut Vec<(PathBuf, PathAccessType)>,
) -> Result<(), Error> {
for token in tokens.iter().skip(1) {
if let Token::Argument(arg) = token {
if !arg.starts_with('-') {
let resolved = context.validate_path_access(arg, PathAccessType::Write)?;
accessed_paths.push((resolved, PathAccessType::Write));
}
}
}
Ok(())
}
/// Process creation commands
fn process_create_command(
tokens: &[Token],
context: &SecurityContext,
accessed_paths: &mut Vec<(PathBuf, PathAccessType)>,
) -> Result<(), Error> {
for token in tokens.iter().skip(1) {
if let Token::Argument(arg) = token {
if !arg.starts_with('-') {
let resolved = context.validate_path_access(arg, PathAccessType::Write)?;
accessed_paths.push((resolved, PathAccessType::Write));
}
}
}
Ok(())
}
/// Process read-only commands
fn process_read_command(
tokens: &[Token],
context: &SecurityContext,
accessed_paths: &mut Vec<(PathBuf, PathAccessType)>,
) {
for token in tokens.iter().skip(1) {
if let Token::Argument(arg) = token {
if !arg.starts_with('-') && looks_like_path(arg) {
if let Ok(resolved) = context.validate_path_access(arg, PathAccessType::Read) {
accessed_paths.push((resolved, PathAccessType::Read));
}
}
}
}
}
/// Process metadata modification commands
fn process_metadata_command(
tokens: &[Token],
context: &SecurityContext,
accessed_paths: &mut Vec<(PathBuf, PathAccessType)>,
) -> Result<(), Error> {
let mut skip_next = false;
for token in tokens.iter().skip(1) {
if skip_next {
skip_next = false;
continue;
}
if let Token::Argument(arg) = token {
if arg.starts_with('-') {
// Some options take a value
if arg == "-R" || arg == "-h" {
continue;
}
skip_next = true;
} else if !arg.chars().all(|c| c.is_numeric() || c == ':') {
// Not a permission mode or user:group spec
let resolved = context.validate_path_access(arg, PathAccessType::Write)?;
accessed_paths.push((resolved, PathAccessType::Write));
}
}
}
Ok(())
}
/// Process ln command
fn process_link_command(
tokens: &[Token],
context: &SecurityContext,
accessed_paths: &mut Vec<(PathBuf, PathAccessType)>,
) -> Result<(), Error> {
let args: Vec<&str> = tokens
.iter()
.skip(1)
.filter_map(|t| match t {
Token::Argument(arg) if !arg.starts_with('-') => Some(arg.as_str()),
_ => None,
})
.collect();
if args.len() >= 2 {
// Source (target of link) - read access
let source = context.validate_path_access(args[0], PathAccessType::Read)?;
accessed_paths.push((source, PathAccessType::Read));
// Destination (link name) - write access
let dest = context.validate_path_access(args[1], PathAccessType::Write)?;
accessed_paths.push((dest, PathAccessType::Write));
}
Ok(())
}
/// Process find command
fn process_find_command(
tokens: &[Token],
context: &SecurityContext,
accessed_paths: &mut Vec<(PathBuf, PathAccessType)>,
) -> Result<(), Error> {
// First non-option argument is the search path
for token in tokens.iter().skip(1) {
if let Token::Argument(arg) = token {
if !arg.starts_with('-') {
let resolved = context.validate_path_access(arg, PathAccessType::Read)?;
accessed_paths.push((resolved, PathAccessType::Read));
break; // Only first path for find
}
}
}
Ok(())
}
/// Process archive commands
fn process_archive_command(
cmd: &str,
tokens: &[Token],
context: &SecurityContext,
accessed_paths: &mut Vec<(PathBuf, PathAccessType)>,
) -> Result<(), Error> {
match cmd {
"tar" => {
// Detect if creating or extracting
let is_creating = tokens
.iter()
.any(|t| matches!(t, Token::Argument(arg) if arg.contains('c')));
// Find archive file (usually after -f)
let mut next_is_archive = false;
for token in tokens.iter().skip(1) {
if let Token::Argument(arg) = token {
if next_is_archive {
let access_type = if is_creating {
PathAccessType::Write
} else {
PathAccessType::Read
};
let resolved = context.validate_path_access(arg, access_type)?;
accessed_paths.push((resolved, access_type));
next_is_archive = false;
} else if arg.contains('f') {
next_is_archive = true;
} else if !arg.starts_with('-') {
// Other paths are sources/targets
let resolved = context.validate_path_access(arg, PathAccessType::Read)?;
accessed_paths.push((resolved, PathAccessType::Read));
}
}
}
}
_ => {
// For other archive tools, check all path-like arguments
check_arguments_for_paths(tokens, context, accessed_paths);
}
}
Ok(())
}
/// Process build tool commands
fn process_build_tool_command(
tokens: &[Token],
context: &SecurityContext,
accessed_paths: &mut Vec<(PathBuf, PathAccessType)>,
) -> Result<(), Error> {
for token in tokens.iter().skip(1) {
if let Token::Argument(arg) = token {
if let Some(output_arg) = arg.strip_prefix("-o") {
// Output file
let output = if !output_arg.is_empty() {
output_arg // -ofile
} else if let Some(Token::Argument(next)) = tokens.get(tokens.len()) {
next // -o file
} else {
continue;
};
let resolved = context.validate_path_access(output, PathAccessType::Write)?;
accessed_paths.push((resolved, PathAccessType::Write));
} else if looks_like_source_file(arg) {
if let Ok(resolved) = context.validate_path_access(arg, PathAccessType::Read) {
accessed_paths.push((resolved, PathAccessType::Read));
}
}
}
}
Ok(())
}
/// Process install command
fn process_install_command(
tokens: &[Token],
context: &SecurityContext,
accessed_paths: &mut Vec<(PathBuf, PathAccessType)>,
) -> Result<(), Error> {
let mut mode_next = false;
let mut dir_mode = false;
// Check for -d flag (directory creation mode)
for token in tokens {
if let Token::Argument(arg) = token {
if arg == "-d" {
dir_mode = true;
break;
}
}
}
let paths: Vec<&str> = tokens
.iter()
.skip(1)
.filter_map(|t| match t {
Token::Argument(arg) if !arg.starts_with('-') => {
if mode_next {
mode_next = false;
None
} else {
Some(arg.as_str())
}
}
Token::Argument(arg) if arg == "-m" => {
mode_next = true;
None
}
_ => None,
})
.collect();
if dir_mode {
// All arguments are directories to create
for path in paths {
let resolved = context.validate_path_access(path, PathAccessType::Write)?;
accessed_paths.push((resolved, PathAccessType::Write));
}
} else if paths.len() >= 2 {
// Last is destination, others are sources
for (i, path) in paths.iter().enumerate() {
let access_type = if i == paths.len() - 1 {
PathAccessType::Write
} else {
PathAccessType::Read
};
let resolved = context.validate_path_access(path, access_type)?;
accessed_paths.push((resolved, access_type));
}
}
Ok(())
}
/// Check arguments for paths and validate them
fn check_arguments_for_paths(
tokens: &[Token],
context: &SecurityContext,
accessed_paths: &mut Vec<(PathBuf, PathAccessType)>,
) {
for token in tokens.iter().skip(1) {
if let Token::Argument(arg) = token {
if looks_like_path(arg) {
// Conservative: assume read access for unknown commands
if let Ok(resolved) = context.validate_path_access(arg, PathAccessType::Read) {
accessed_paths.push((resolved, PathAccessType::Read));
}
}
}
}
}
/// Heuristic to detect if an argument looks like a path
fn looks_like_path(arg: &str) -> bool {
arg.starts_with('/')
|| arg.starts_with("./")
|| arg.starts_with("../")
|| arg.contains('/')
|| arg == "."
|| arg == ".."
}
/// Check if argument looks like a source file
fn looks_like_source_file(arg: &str) -> bool {
const SOURCE_EXTENSIONS: &[&str] = &[
".c", ".cc", ".cpp", ".cxx", ".h", ".hpp", ".hxx", ".s", ".S", ".asm", ".o", ".a", ".so",
".dylib",
];
SOURCE_EXTENSIONS.iter().any(|ext| arg.ends_with(ext))
}
/// Check if a variable name is dangerous to set
fn is_dangerous_variable(name: &str) -> bool {
const DANGEROUS_VARS: &[&str] = &[
"PATH",
"LD_LIBRARY_PATH",
"DYLD_LIBRARY_PATH",
"DYLD_INSERT_LIBRARIES",
"LD_PRELOAD",
"HOME",
"USER",
"SHELL",
];
DANGEROUS_VARS.contains(&name)
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/security/mod.rs | crates/builder/src/security/mod.rs | //! Build security context and validation
//!
//! This module provides a comprehensive security framework for tracking and
//! validating all file system operations and command executions during builds.
mod context;
mod parser;
mod path_resolver;
pub use context::SecurityContext;
pub use parser::parse_command_with_context;
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/security/context.rs | crates/builder/src/security/context.rs | //! Build security context that tracks execution state
use sps2_errors::{BuildError, Error};
use std::collections::HashMap;
use std::fmt::Write;
use std::path::{Path, PathBuf};
/// Build security context that tracks execution state
#[derive(Debug, Clone)]
pub struct SecurityContext {
/// Current working directory (absolute path)
current_dir: PathBuf,
/// Stack of directories for pushd/popd
dir_stack: Vec<PathBuf>,
/// Build root directory (e.g., /opt/pm/build/package-1.0)
build_root: PathBuf,
/// Environment variables including build variables
environment: HashMap<String, String>,
/// Command execution history for detecting patterns
command_history: Vec<String>,
/// Path resolution cache to detect symlink attacks
resolved_paths: HashMap<PathBuf, PathBuf>,
}
impl SecurityContext {
/// Create a new security context for a build
#[must_use]
pub fn new(build_root: PathBuf, initial_vars: HashMap<String, String>) -> Self {
let mut environment = HashMap::new();
// Standard build variables
environment.insert("BUILD_ROOT".to_string(), build_root.display().to_string());
environment.insert("BUILD_DIR".to_string(), build_root.display().to_string());
environment.insert(
"DESTDIR".to_string(),
build_root.join("destdir").display().to_string(),
);
environment.insert(
"PREFIX".to_string(),
sps2_config::fixed_paths::LIVE_DIR.to_string(),
);
// Merge with provided variables
environment.extend(initial_vars);
Self {
current_dir: build_root.clone(),
dir_stack: Vec::new(),
build_root,
environment,
command_history: Vec::new(),
resolved_paths: HashMap::new(),
}
}
/// Execute a command with security validation
///
/// # Errors
///
/// Returns an error if:
/// - Command parsing fails
/// - Path validation fails for command arguments
/// - Security constraints are violated
pub fn execute_command(&mut self, command: &str) -> Result<ValidatedExecution, Error> {
// Record in history
self.command_history.push(command.to_string());
// Parse and validate
let parsed = super::parse_command_with_context(command, self)?;
// Update context based on command effects
self.apply_command_effects(&parsed);
Ok(parsed)
}
/// Validate a path access in the current context
///
/// # Errors
///
/// Returns an error if:
/// - Path resolution fails
/// - Path escapes build root (for unauthorized access)
/// - Write access attempted outside build root
/// - Execute access attempted on unsafe executable
pub fn validate_path_access(
&self,
path: &str,
access_type: PathAccessType,
) -> Result<PathBuf, Error> {
// Expand variables first
let expanded = self.expand_variables(path);
// Resolve to absolute path
let absolute = self.resolve_path(&expanded)?;
// Additional checks based on access type
match access_type {
PathAccessType::Read => {
// Check if within build root first
if self.is_within_build_root(&absolute)? {
return Ok(absolute);
}
// Some system paths are OK to read even outside build root
if Self::is_safe_system_read(&absolute) {
return Ok(absolute);
}
// Otherwise, it's a path escape attempt
Err(BuildError::PathEscapeAttempt {
path: path.to_string(),
resolved: absolute.display().to_string(),
build_root: self.build_root.display().to_string(),
}
.into())
}
PathAccessType::Write => {
// Only allow writes within build root
if self.is_within_build_root(&absolute)? {
return Ok(absolute);
}
Err(BuildError::DangerousWrite {
path: absolute.display().to_string(),
}
.into())
}
PathAccessType::Execute => {
// Check if within build root first
if self.is_within_build_root(&absolute)? {
return Ok(absolute);
}
// Allow execution of safe system tools
if self.is_safe_executable(&absolute) {
return Ok(absolute);
}
Err(BuildError::DangerousExecution {
path: absolute.display().to_string(),
}
.into())
}
}
}
/// Expand all variables in a string
///
/// # Panics
///
/// Panics if the internal `write!` macro fails when formatting variable references.
/// This should not happen in practice as we're writing to a `String`.
#[must_use]
pub fn expand_variables(&self, input: &str) -> String {
// Handle ${VAR} style - manually parse to avoid regex dependency
let mut expanded = String::new();
let mut chars = input.chars().peekable();
while let Some(ch) = chars.next() {
if ch == '$' && chars.peek() == Some(&'{') {
chars.next(); // consume '{'
let mut var_name = String::new();
let mut found_closing = false;
for ch in chars.by_ref() {
if ch == '}' {
found_closing = true;
break;
}
var_name.push(ch);
}
if found_closing {
if let Some(value) = self.environment.get(&var_name) {
expanded.push_str(value);
} else {
// Keep original if variable not found
write!(expanded, "${{{var_name}}}").unwrap();
}
} else {
// Malformed variable
write!(expanded, "${{{var_name}").unwrap();
}
} else if ch == '$' && chars.peek().is_some_and(|c| c.is_alphabetic() || *c == '_') {
// Handle $VAR style
let mut var_name = String::new();
while let Some(&next_ch) = chars.peek() {
if next_ch.is_alphanumeric() || next_ch == '_' {
var_name.push(chars.next().unwrap());
} else {
break;
}
}
if let Some(value) = self.environment.get(&var_name) {
expanded.push_str(value);
} else {
// Keep original if variable not found
expanded.push('$');
expanded.push_str(&var_name);
}
} else {
expanded.push(ch);
}
}
expanded
}
/// Resolve a path to absolute form in current context
///
/// # Errors
///
/// Returns an error if:
/// - Path normalization fails
/// - Symlink resolution fails
/// - Path contains invalid components
pub fn resolve_path(&self, path: &str) -> Result<PathBuf, Error> {
let path = Path::new(path);
let absolute = if path.is_absolute() {
path.to_path_buf()
} else {
self.current_dir.join(path)
};
// Normalize the path (resolve .., ., etc)
super::path_resolver::normalize_path(
&absolute,
&self.resolved_paths,
&self.build_root,
&self.current_dir,
)
}
/// Apply command side effects to context
fn apply_command_effects(&mut self, exec: &ValidatedExecution) {
match &exec.effect {
CommandEffect::ChangeDirectory(new_dir) => {
self.current_dir.clone_from(new_dir);
}
CommandEffect::PushDirectory(new_dir) => {
self.dir_stack.push(self.current_dir.clone());
self.current_dir.clone_from(new_dir);
}
CommandEffect::PopDirectory => {
if let Some(prev_dir) = self.dir_stack.pop() {
self.current_dir = prev_dir;
}
}
CommandEffect::SetVariable(name, value) => {
self.environment.insert(name.clone(), value.clone());
}
CommandEffect::UnsetVariable(name) => {
self.environment.remove(name);
}
CommandEffect::None => {}
}
// No error can occur here
}
/// Check if a path is within the build root
pub(crate) fn is_within_build_root(&self, path: &Path) -> Result<bool, Error> {
// Normalize both paths for comparison
let normalized_path = super::path_resolver::normalize_path(
path,
&self.resolved_paths,
&self.build_root,
&self.current_dir,
)?;
// Check if path starts with build root
Ok(normalized_path.starts_with(&self.build_root))
}
/// Check if a system path is safe to read
fn is_safe_system_read(path: &Path) -> bool {
// Allow reading from these system locations
const SAFE_READ_PREFIXES: &[&str] = &[
"/usr/include",
"/usr/lib",
"/usr/local/include",
"/usr/local/lib",
sps2_config::fixed_paths::LIVE_DIR, // When accessing installed packages
];
SAFE_READ_PREFIXES
.iter()
.any(|prefix| path.starts_with(prefix))
}
/// Check if a path is safe to execute
fn is_safe_executable(&self, path: &Path) -> bool {
const SAFE_EXEC_PATHS: &[&str] = &[
"/usr/bin",
"/usr/local/bin",
"/bin",
sps2_config::fixed_paths::BIN_DIR,
];
// Allow execution of:
// 1. Anything within build root
if path.starts_with(&self.build_root) {
return true;
}
// 2. Standard system tools
SAFE_EXEC_PATHS
.iter()
.any(|prefix| path.starts_with(prefix))
}
/// Get current directory
#[must_use]
pub fn current_dir(&self) -> &Path {
&self.current_dir
}
/// Get build root
#[must_use]
pub fn build_root(&self) -> &Path {
&self.build_root
}
/// Update the current working directory
pub fn set_current_dir(&mut self, new_dir: PathBuf) {
self.current_dir = new_dir;
}
}
/// Types of path access to validate
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum PathAccessType {
Read,
Write,
Execute,
}
/// Result of validated command execution
#[derive(Debug, Clone)]
pub struct ValidatedExecution {
/// Original command
#[allow(dead_code)] // Available for security auditing and logging
pub original: String,
/// Expanded command with variables resolved
#[allow(dead_code)] // Available for security auditing and debugging
pub expanded: String,
/// Parsed command structure
pub parsed: ParsedCommand,
/// Side effects on context
pub effect: CommandEffect,
/// Validated paths accessed by this command
#[allow(dead_code)] // Available for security analysis and auditing
pub accessed_paths: Vec<(PathBuf, PathAccessType)>,
}
/// Parsed command structure
#[derive(Debug, Clone)]
pub struct ParsedCommand {
/// Tokenized command
pub tokens: Vec<crate::validation::parser::Token>,
}
/// Side effects a command has on the context
#[derive(Debug, Clone)]
pub enum CommandEffect {
None,
ChangeDirectory(PathBuf),
PushDirectory(PathBuf),
PopDirectory,
SetVariable(String, String),
UnsetVariable(String),
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/core/builder.rs | crates/builder/src/core/builder.rs | //! High-level build orchestration and workflow management
use super::context::BuildContext;
use crate::artifact_qa::run_quality_pipeline;
use crate::config::BuildConfig;
use crate::packaging::create_and_sign_package;
use crate::packaging::manifest::create_manifest;
use crate::recipe::execute_recipe;
use crate::utils::events::send_event;
use crate::{BuildEnvironment, BuildResult};
use sps2_errors::Error;
use sps2_events::{AppEvent, GeneralEvent};
use sps2_net::NetClient;
use sps2_resolver::Resolver;
use sps2_store::PackageStore;
use std::path::Path;
use sps2_config::ResourceManager;
use std::sync::Arc;
/// Package builder
#[derive(Clone)]
pub struct Builder {
/// Build configuration
config: BuildConfig,
/// Resolver for dependencies
resolver: Option<Resolver>,
/// Package store for output
store: Option<PackageStore>,
/// Network client for downloads
net: Option<NetClient>,
/// Resource manager
resources: Arc<ResourceManager>,
}
impl Builder {
/// Create new builder
#[must_use]
pub fn new() -> Self {
Self {
config: BuildConfig::default(),
resolver: None,
store: None,
net: None,
resources: Arc::new(ResourceManager::default()),
}
}
/// Create builder with configuration
#[must_use]
pub fn with_config(config: BuildConfig) -> Self {
Self {
config,
resolver: None,
store: None,
net: None,
resources: Arc::new(ResourceManager::default()),
}
}
/// Set resolver
#[must_use]
pub fn with_resolver(mut self, resolver: Resolver) -> Self {
self.resolver = Some(resolver);
self
}
/// Set package store
#[must_use]
pub fn with_store(mut self, store: PackageStore) -> Self {
self.store = Some(store);
self
}
/// Set network client
#[must_use]
pub fn with_net(mut self, net: NetClient) -> Self {
self.net = Some(net);
self
}
/// Build a package from a Starlark recipe
///
/// # Errors
///
/// Returns an error if:
/// - The recipe file cannot be read or parsed
/// - Build dependencies cannot be resolved or installed
/// - The build process fails or times out
/// - Package creation or signing fails
/// - Environment setup or cleanup fails
pub async fn build(&self, context: BuildContext) -> Result<BuildResult, Error> {
send_event(
&context,
AppEvent::General(GeneralEvent::OperationStarted {
operation: format!("Building {} {}", context.name, context.version),
}),
);
// Setup build environment
let mut environment = self.setup_build_environment(&context).await?;
// Execute recipe and setup dependencies
let (runtime_deps, recipe_metadata, install_requested, qa_pipeline) = self
.execute_recipe_and_setup_deps(&context, &mut environment)
.await?;
// Run quality checks
run_quality_pipeline(&context, &environment, Some(qa_pipeline)).await?;
// If fix_permissions was requested in the recipe, run it now as final step
if let Some(paths) = &environment.fix_permissions_request {
send_event(
&context,
AppEvent::General(GeneralEvent::OperationStarted {
operation: "Final permissions fix".into(),
}),
);
// Create a BuilderApi instance to call do_fix_permissions
let api = crate::core::api::BuilderApi::new(
environment.staging_dir().to_path_buf(),
self.resources.clone(),
)?;
let result = api.do_fix_permissions(paths, &environment)?;
send_event(
&context,
AppEvent::General(GeneralEvent::OperationCompleted {
operation: "Final permissions fix".into(),
success: result.success,
}),
);
// Log the result
if !result.stdout.is_empty() {
send_event(
&context,
AppEvent::General(GeneralEvent::debug(result.stdout)),
);
}
}
// Create manifest (SBOM soft-disabled here)
let manifest = create_manifest(&context, runtime_deps, &recipe_metadata, &environment);
// Create and sign package
let package_path =
create_and_sign_package(&self.config, &context, &environment, manifest).await?;
// Update context with the generated package path
let mut updated_context = context.clone();
updated_context.package_path = Some(package_path.clone());
// Cleanup and finalize
Self::cleanup_and_finalize(&updated_context, &environment, &package_path);
Ok(BuildResult::new(package_path).with_install_requested(install_requested))
}
/// Setup build environment with full isolation
async fn setup_build_environment(
&self,
context: &BuildContext,
) -> Result<BuildEnvironment, Error> {
// Create build environment with full isolation setup
// Use the configured build_root from BuildConfig (defaults to /opt/pm/build)
let build_root = self.config.build_root();
let mut environment = BuildEnvironment::new(context.clone(), build_root)?;
// Configure environment with resolver, store, and net client if available
if let Some(resolver) = &self.resolver {
environment = environment.with_resolver(resolver.clone());
}
if let Some(store) = &self.store {
environment = environment.with_store(store.clone());
}
if let Some(net) = &self.net {
environment = environment.with_net(net.clone());
}
// Initialize isolated environment
environment.initialize().await?;
// Note: Isolation level and network access are applied later in
// apply_environment_config() based on recipe settings with config defaults
send_event(
context,
AppEvent::General(GeneralEvent::OperationStarted {
operation: format!(
"Build environment isolated for {} {}",
context.name, context.version
),
}),
);
Ok(environment)
}
/// Execute recipe and setup build dependencies
async fn execute_recipe_and_setup_deps(
&self,
context: &BuildContext,
environment: &mut BuildEnvironment,
) -> Result<
(
Vec<String>,
crate::yaml::RecipeMetadata,
bool,
sps2_types::QaPipelineOverride,
),
Error,
> {
// Parse YAML recipe for metadata
let yaml_recipe = crate::recipe::parser::parse_yaml_recipe(&context.recipe_path).await?;
let recipe_metadata = crate::yaml::RecipeMetadata {
name: yaml_recipe.metadata.name.clone(),
version: yaml_recipe.metadata.version.clone(),
description: yaml_recipe.metadata.description.clone().into(),
homepage: yaml_recipe.metadata.homepage.clone(),
license: Some(yaml_recipe.metadata.license.clone()),
runtime_deps: yaml_recipe.metadata.dependencies.runtime.clone(),
build_deps: yaml_recipe.metadata.dependencies.build.clone(),
};
// Extract build dependencies as PackageSpec
let build_deps: Vec<sps2_types::package::PackageSpec> = recipe_metadata
.build_deps
.iter()
.map(|dep| sps2_types::package::PackageSpec::parse(dep))
.collect::<Result<Vec<_>, _>>()?;
// Setup build dependencies BEFORE executing build steps
if !build_deps.is_empty() {
send_event(
context,
AppEvent::General(GeneralEvent::OperationStarted {
operation: format!("Setting up {} build dependencies", build_deps.len()),
}),
);
environment.setup_dependencies(build_deps).await?;
// Log environment summary for debugging
let env_summary = environment.environment_summary();
send_event(
context,
AppEvent::General(GeneralEvent::debug_with_context(
"Build environment configured",
env_summary,
)),
);
}
// Use the build config as-is (it already has sps2_config from ops/build.rs)
let build_config = self.config.clone();
// Now execute the recipe with build dependencies already set up
let (runtime_deps, _build_deps, _metadata, install_requested, qa_pipeline) =
execute_recipe(&build_config, context, environment).await?;
// Note: YAML recipes using staged execution have isolation already applied
// during the environment configuration stage in staged_executor.rs.
Ok((
runtime_deps,
recipe_metadata,
install_requested,
qa_pipeline,
))
}
/// Cleanup build environment and finalize
fn cleanup_and_finalize(
context: &BuildContext,
environment: &BuildEnvironment,
_package_path: &Path,
) {
// Cleanup - skip for debugging
// environment.cleanup().await?;
send_event(
context,
AppEvent::General(GeneralEvent::debug(format!(
"Skipping cleanup for debugging - check {}",
environment.build_prefix().display()
))),
);
send_event(
context,
AppEvent::General(GeneralEvent::OperationCompleted {
operation: format!("Built {} {}", context.name, context.version),
success: true,
}),
);
}
}
impl Default for Builder {
fn default() -> Self {
Self::new()
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/core/api.rs | crates/builder/src/core/api.rs | //! Builder API for Starlark recipes
use crate::environment::IsolationLevel;
use crate::{BuildCommandResult, BuildEnvironment};
use md5::{Digest, Md5};
use sha2::{Digest as Sha2Digest, Sha256};
use sps2_errors::{BuildError, Error};
use sps2_hash::Hash;
use sps2_net::{NetClient, NetConfig};
use sps2_platform::{PlatformContext, PlatformManager};
use sps2_types::RpathStyle;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use tokio::fs;
use sps2_config::ResourceManager;
use sps2_events::{AppEvent, EventEmitter, GeneralEvent};
use std::sync::Arc;
/// Builder API exposed to Starlark recipes
#[derive(Clone)]
pub struct BuilderApi {
/// Working directory for source extraction
pub(crate) working_dir: PathBuf,
/// Downloaded files
downloads: HashMap<String, PathBuf>,
/// Network client for downloads
net_client: NetClient,
/// Whether network access is allowed
allow_network: bool,
/// SBOM generation enabled
auto_sbom: bool,
/// SBOM exclusion patterns
sbom_excludes: Vec<String>,
/// Whether install was requested during recipe execution
install_requested: bool,
/// Build metadata collected during build (e.g., Python wheel path)
build_metadata: HashMap<String, String>,
/// Build isolation level (None if not explicitly set)
explicit_isolation_level: Option<IsolationLevel>,
/// Resource manager
resources: Arc<ResourceManager>,
}
impl BuilderApi {
/// Create new builder API
///
/// # Errors
///
/// Returns an error if the network client cannot be created.
pub fn new(working_dir: PathBuf, resources: Arc<ResourceManager>) -> Result<Self, Error> {
Ok(Self {
working_dir,
downloads: HashMap::new(),
net_client: NetClient::new(NetConfig::default())?,
allow_network: false,
auto_sbom: true,
sbom_excludes: vec![
"./*.dSYM".to_string(),
"./*.pdb".to_string(),
"./*.a".to_string(),
"./*.la".to_string(),
],
install_requested: false,
build_metadata: HashMap::new(),
explicit_isolation_level: None,
resources,
})
}
/// Allow network access during build
#[must_use]
pub fn allow_network(&mut self, allow: bool) -> &mut Self {
self.allow_network = allow;
self
}
/// Update the working directory (used after git clone to point to the correct source)
pub fn set_working_dir(&mut self, new_working_dir: PathBuf) {
self.working_dir = new_working_dir;
}
/// Download a file
///
/// # Errors
///
/// Returns an error if:
/// - Network access is disabled
/// - The URL is invalid
/// - The download fails
pub async fn fetch(&mut self, url: &str) -> Result<PathBuf, Error> {
// Fetch operations always have network access - they're source fetching, not build operations
// Acquire a download permit
let _permit = self.resources.acquire_download_permit().await?;
// Check if already downloaded
if let Some(path) = self.downloads.get(url) {
return Ok(path.clone());
}
// Extract filename from URL
let filename = url
.split('/')
.next_back()
.ok_or_else(|| BuildError::InvalidUrl {
url: url.to_string(),
})?;
let download_path = self.working_dir.join(filename);
// Download file using the download module
// For builder, we don't have an event sender, so we'll use the client directly
let response = self.net_client.get(url).await?;
let bytes = response
.bytes()
.await
.map_err(|_e| BuildError::FetchFailed {
url: url.to_string(),
})?;
fs::write(&download_path, &bytes).await?;
// No hash verification - files are downloaded without validation
self.downloads
.insert(url.to_string(), download_path.clone());
// Note: Extraction is handled separately by extract_downloads_to() method
Ok(download_path)
}
/// Download and verify a file with MD5 hash
///
/// # Errors
///
/// Returns an error if:
/// - The URL is invalid
/// - The download fails
/// - The file hash doesn't match the expected MD5 hash
pub async fn fetch_md5(&mut self, url: &str, expected_md5: &str) -> Result<PathBuf, Error> {
let download_path = self.fetch(url).await?;
// Verify MD5 hash
let bytes = tokio::fs::read(&download_path).await?;
let mut hasher = Md5::new();
hasher.update(&bytes);
let actual_md5 = format!("{:x}", hasher.finalize());
if actual_md5.to_lowercase() != expected_md5.to_lowercase() {
tokio::fs::remove_file(&download_path).await?;
let filename = download_path
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("unknown");
return Err(BuildError::HashMismatch {
file: filename.to_string(),
expected: expected_md5.to_string(),
actual: actual_md5,
}
.into());
}
Ok(download_path)
}
/// Download and verify a file with SHA256 hash
///
/// # Errors
///
/// Returns an error if:
/// - The URL is invalid
/// - The download fails
/// - The file hash doesn't match the expected SHA256 hash
pub async fn fetch_sha256(
&mut self,
url: &str,
expected_sha256: &str,
) -> Result<PathBuf, Error> {
let download_path = self.fetch(url).await?;
// Verify SHA256 hash
let bytes = tokio::fs::read(&download_path).await?;
let mut hasher = Sha256::new();
Sha2Digest::update(&mut hasher, &bytes);
let actual_sha256 = format!("{:x}", hasher.finalize());
if actual_sha256.to_lowercase() != expected_sha256.to_lowercase() {
tokio::fs::remove_file(&download_path).await?;
let filename = download_path
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("unknown");
return Err(BuildError::HashMismatch {
file: filename.to_string(),
expected: expected_sha256.to_string(),
actual: actual_sha256,
}
.into());
}
Ok(download_path)
}
/// Download and verify a file with BLAKE3 hash
///
/// # Errors
///
/// Returns an error if:
/// - The URL is invalid
/// - The download fails
/// - The file hash doesn't match the expected BLAKE3 hash
pub async fn fetch_blake3(
&mut self,
url: &str,
expected_blake3: &str,
) -> Result<PathBuf, Error> {
let download_path = self.fetch(url).await?;
// Verify BLAKE3 hash specifically for download verification
let actual_hash = Hash::blake3_hash_file(&download_path).await?;
let actual_blake3 = actual_hash.to_hex();
if actual_blake3.to_lowercase() != expected_blake3.to_lowercase() {
tokio::fs::remove_file(&download_path).await?;
let filename = download_path
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("unknown");
return Err(BuildError::HashMismatch {
file: filename.to_string(),
expected: expected_blake3.to_string(),
actual: actual_blake3,
}
.into());
}
Ok(download_path)
}
/// Clone a git repository
///
/// # Errors
///
/// Returns an error if:
/// - Network access is disabled
/// - The URL is invalid
/// - The git clone fails
pub async fn git(&mut self, url: &str, ref_: &str) -> Result<PathBuf, Error> {
// Git operations always have network access - they're source fetching, not build operations
// Check if already cloned
if let Some(path) = self.downloads.get(url) {
return Ok(path.clone());
}
// Extract repository name from URL
let repo_name = url
.split('/')
.next_back()
.and_then(|s| s.strip_suffix(".git").or(Some(s)))
.ok_or_else(|| BuildError::InvalidUrl {
url: url.to_string(),
})?;
let clone_path = self.working_dir.join(repo_name);
// Use platform abstraction for process execution
let platform = PlatformManager::instance().platform();
let context = PlatformContext::new(None);
// Clone using git command (better compatibility than git2 crate)
let output = if ref_ == "HEAD" {
// For HEAD, don't use --branch flag
let mut cmd = platform.process().create_command("git");
cmd.args([
"clone",
"--depth",
"1",
url,
&clone_path.display().to_string(),
]);
cmd.current_dir(&self.working_dir);
platform.process().execute_command(&context, cmd).await?
} else {
// For specific branches/tags, use --branch
let mut cmd = platform.process().create_command("git");
cmd.args([
"clone",
"--depth",
"1",
"--branch",
ref_,
url,
&clone_path.display().to_string(),
]);
cmd.current_dir(&self.working_dir);
platform.process().execute_command(&context, cmd).await?
};
if !output.status.success() {
return Err(BuildError::GitCloneFailed {
message: format!(
"Failed to clone {}: {}",
url,
String::from_utf8_lossy(&output.stderr)
),
}
.into());
}
self.downloads.insert(url.to_string(), clone_path.clone());
// Update working directory to the cloned path so subsequent operations
// (like cargo build) work in the correct directory
self.set_working_dir(clone_path.clone());
Ok(clone_path)
}
/// Apply a patch file
///
/// # Errors
///
/// Returns an error if the patch command fails.
pub async fn apply_patch(
&self,
patch_path: &Path,
env: &BuildEnvironment,
) -> Result<BuildCommandResult, Error> {
env.execute_command(
"patch",
&["-p1", "-i", &patch_path.display().to_string()],
Some(&self.working_dir),
)
.await
}
/// Configure with autotools
///
/// # Errors
///
/// Returns an error if the configure or make commands fail.
pub async fn autotools(
&self,
args: &[String],
env: &mut BuildEnvironment,
) -> Result<BuildCommandResult, Error> {
use crate::build_systems::{AutotoolsBuildSystem, BuildSystem, BuildSystemContext};
// Record that we're using autotools build system
env.record_build_system("autotools");
// Extract source archive first if needed
self.extract_downloads().await?;
// Create build system context
let mut ctx = BuildSystemContext::new(env.clone(), self.working_dir.clone());
ctx.network_allowed = self.allow_network;
let autotools_system = AutotoolsBuildSystem::new();
// Configure
autotools_system.configure(&ctx, args).await?;
// Build
autotools_system.build(&ctx, &[]).await?;
// Install - this will also adjust staged files
autotools_system.install(&ctx).await?;
Ok(BuildCommandResult {
success: true,
exit_code: Some(0),
stdout: "Autotools build completed successfully".to_string(),
stderr: String::new(),
})
}
/// Configure with `CMake`
///
/// # Errors
///
/// Returns an error if the cmake or make commands fail.
pub async fn cmake(
&self,
args: &[String],
env: &mut BuildEnvironment,
) -> Result<BuildCommandResult, Error> {
use crate::build_systems::{BuildSystem, BuildSystemContext, CMakeBuildSystem};
// Record that we're using cmake build system
env.record_build_system("cmake");
// Extract source archive first if needed
self.extract_downloads().await?;
// Create build system context with out-of-source build directory
let build_dir = self.working_dir.join("build");
fs::create_dir_all(&build_dir).await?;
let mut ctx = BuildSystemContext::new(env.clone(), self.working_dir.clone());
ctx.build_dir = build_dir;
ctx.network_allowed = self.allow_network;
let cmake_system = CMakeBuildSystem::new();
// Configure
cmake_system.configure(&ctx, args).await?;
// Build
cmake_system.build(&ctx, &[]).await?;
// Install - this will also adjust staged files
cmake_system.install(&ctx).await?;
Ok(BuildCommandResult {
success: true,
exit_code: Some(0),
stdout: "CMake build completed successfully".to_string(),
stderr: String::new(),
})
}
/// Configure with Meson
///
/// # Errors
///
/// Returns an error if the meson commands fail.
pub async fn meson(
&self,
args: &[String],
env: &mut BuildEnvironment,
) -> Result<BuildCommandResult, Error> {
use crate::build_systems::{BuildSystem, BuildSystemContext, MesonBuildSystem};
// Record that we're using meson build system
env.record_build_system("meson");
// Extract source archive first if needed
self.extract_downloads().await?;
// Create build system context with out-of-source build directory
let build_dir = self.working_dir.join("build");
let mut ctx = BuildSystemContext::new(env.clone(), self.working_dir.clone());
ctx.build_dir = build_dir;
ctx.network_allowed = self.allow_network;
let meson_system = MesonBuildSystem::new();
// Configure
meson_system.configure(&ctx, args).await?;
// Build
meson_system.build(&ctx, &[]).await?;
// Install - this will also adjust staged files
meson_system.install(&ctx).await?;
Ok(BuildCommandResult {
success: true,
exit_code: Some(0),
stdout: "Meson build completed successfully".to_string(),
stderr: String::new(),
})
}
/// Build with Cargo
///
/// # Errors
///
/// Returns an error if the cargo command fails.
///
/// # Panics
///
/// Panics if the binary filename cannot be extracted from the path.
pub async fn cargo(
&self,
args: &[String],
env: &mut BuildEnvironment,
) -> Result<BuildCommandResult, Error> {
use crate::build_systems::{BuildSystem, BuildSystemContext, CargoBuildSystem};
// Record that we're using cargo build system
env.record_build_system("cargo");
// Extract source archive first if needed
self.extract_downloads().await?;
// Create build system context
let mut ctx = BuildSystemContext::new(env.clone(), self.working_dir.clone());
ctx.network_allowed = self.allow_network;
let cargo_system = CargoBuildSystem::new();
// Configure (checks Cargo.toml, sets up environment)
cargo_system.configure(&ctx, args).await?;
// Build
cargo_system.build(&ctx, args).await?;
// Install - this will copy binaries to staging/bin
cargo_system.install(&ctx).await?;
Ok(BuildCommandResult {
success: true,
exit_code: Some(0),
stdout: "Cargo build completed successfully".to_string(),
stderr: String::new(),
})
}
/// Build with Go
///
/// # Errors
///
/// Returns an error if the go command fails.
pub async fn go(
&self,
args: &[String],
env: &mut BuildEnvironment,
) -> Result<BuildCommandResult, Error> {
use crate::build_systems::{BuildSystem, BuildSystemContext, GoBuildSystem};
// Record that we're using go build system
env.record_build_system("go");
// Extract source archive first if needed
self.extract_downloads().await?;
// Create build system context
let mut ctx = BuildSystemContext::new(env.clone(), self.working_dir.clone());
ctx.network_allowed = self.allow_network;
let go_system = GoBuildSystem::new();
// Configure if needed (this will handle go mod vendor, etc)
go_system.configure(&ctx, args).await?;
// Build the project - this will output to staging/bin automatically
go_system.build(&ctx, args).await?;
// Install (verifies binaries and sets permissions)
go_system.install(&ctx).await?;
Ok(BuildCommandResult {
success: true,
exit_code: Some(0),
stdout: "Go build completed successfully".to_string(),
stderr: String::new(),
})
}
/// Build with Python
///
/// # Errors
///
/// Returns an error if the python3 command fails.
pub async fn python(
&mut self,
args: &[String],
env: &mut BuildEnvironment,
) -> Result<BuildCommandResult, Error> {
use crate::build_systems::{BuildSystem, BuildSystemContext, PythonBuildSystem};
// Record that we're using python build system
env.record_build_system("python");
// Extract source archive first if needed
self.extract_downloads().await?;
// Create build system context
let mut ctx = BuildSystemContext::new(env.clone(), self.working_dir.clone());
ctx.network_allowed = self.allow_network;
let python_system = PythonBuildSystem::new();
// Configure (detects build backend, sets up environment)
python_system.configure(&ctx, args).await?;
// Build (builds wheel or runs setup.py)
python_system.build(&ctx, args).await?;
// Install (installs to staging with BUILD_PREFIX)
python_system.install(&ctx).await?;
// Copy Python metadata from BuildSystemContext to BuilderApi
if let Ok(extra_env) = ctx.extra_env.read() {
for (key, value) in extra_env.iter() {
if key.starts_with("PYTHON_") {
self.build_metadata.insert(key.clone(), value.clone());
}
}
}
Ok(BuildCommandResult {
success: true,
exit_code: Some(0),
stdout: "Python build completed successfully".to_string(),
stderr: String::new(),
})
}
/// Build with Node.js
///
/// # Errors
///
/// Returns an error if the node/npm command fails.
pub async fn nodejs(
&self,
args: &[String],
env: &mut BuildEnvironment,
) -> Result<BuildCommandResult, Error> {
use crate::build_systems::{BuildSystem, BuildSystemContext, NodeJsBuildSystem};
// Record that we're using nodejs build system
env.record_build_system("nodejs");
// Extract source archive first if needed
self.extract_downloads().await?;
// Create build system context
let mut ctx = BuildSystemContext::new(env.clone(), self.working_dir.clone());
ctx.network_allowed = self.allow_network;
let nodejs_system = NodeJsBuildSystem::new();
// Configure (detects package manager, sets up environment)
nodejs_system.configure(&ctx, args).await?;
// Build (installs dependencies if needed, runs build scripts)
nodejs_system.build(&ctx, args).await?;
// Install (copies built artifacts and bin entries to staging)
nodejs_system.install(&ctx).await?;
Ok(BuildCommandResult {
success: true,
exit_code: Some(0),
stdout: "Node.js build completed successfully".to_string(),
stderr: String::new(),
})
}
/// Run configure step only
///
/// # Errors
///
/// Returns an error if the configure command fails.
pub async fn configure(
&self,
args: &[String],
env: &mut BuildEnvironment,
) -> Result<BuildCommandResult, Error> {
// Record that we're using configure (part of autotools)
env.record_build_system("configure");
// Extract source archive first if needed
self.extract_downloads().await?;
// Add prefix if not already specified
let mut configure_args = args.to_vec();
if !configure_args
.iter()
.any(|arg| arg.starts_with("--prefix="))
{
configure_args.insert(
0,
format!("--prefix={}", sps2_config::fixed_paths::LIVE_DIR),
);
}
env.execute_command(
"sh",
&["-c", &format!("./configure {}", configure_args.join(" "))],
Some(&self.working_dir),
)
.await
}
/// Run make step only
///
/// # Errors
///
/// Returns an error if the make command fails.
pub async fn make(
&self,
args: &[String],
env: &mut BuildEnvironment,
) -> Result<BuildCommandResult, Error> {
// Record that we're using make (generic build system)
env.record_build_system("make");
// Process arguments, replacing relative DESTDIR with absolute path
let processed_args: Vec<String> = args
.iter()
.map(|arg| {
if arg.starts_with("DESTDIR=") {
// Always use the absolute staging directory from environment
format!("DESTDIR={}", env.staging_dir().display())
} else {
arg.clone()
}
})
.collect();
let arg_strs: Vec<&str> = processed_args.iter().map(String::as_str).collect();
env.execute_command("make", &arg_strs, Some(&self.working_dir))
.await
}
/// Mark that installation is requested
///
/// This method does not actually perform installation during recipe execution.
/// Instead, it marks that the package should be installed after it's built.
/// The actual installation happens after the .sp package is created.
///
/// # Errors
///
/// This function currently never returns an error, but returns `Result` for API consistency
pub fn install(&mut self, _env: &BuildEnvironment) -> Result<BuildCommandResult, Error> {
// Mark that installation was requested
self.install_requested = true;
// Return success - the actual installation will happen later
Ok(BuildCommandResult {
success: true,
exit_code: Some(0),
stdout: "Installation request recorded".to_string(),
stderr: String::new(),
})
}
/// Set SBOM generation
#[must_use]
pub fn auto_sbom(&mut self, enable: bool) -> &mut Self {
self.auto_sbom = enable;
self
}
/// Set SBOM exclusion patterns
#[must_use]
pub fn sbom_excludes(&mut self, patterns: Vec<String>) -> &mut Self {
self.sbom_excludes = patterns;
self
}
/// Get SBOM configuration
#[must_use]
pub fn sbom_config(&self) -> (bool, &[String]) {
(self.auto_sbom, &self.sbom_excludes)
}
/// Check if installation was requested during recipe execution
#[must_use]
pub fn is_install_requested(&self) -> bool {
self.install_requested
}
/// Get build metadata collected during build
#[must_use]
pub fn build_metadata(&self) -> &HashMap<String, String> {
&self.build_metadata
}
/// Take build metadata (consumes the metadata)
pub fn take_build_metadata(&mut self) -> HashMap<String, String> {
std::mem::take(&mut self.build_metadata)
}
/// Set isolation level
pub fn set_isolation(&mut self, level: IsolationLevel) {
self.explicit_isolation_level = Some(level);
}
/// Get isolation level if explicitly set
#[must_use]
pub fn explicit_isolation_level(&self) -> Option<IsolationLevel> {
self.explicit_isolation_level
}
/// Check if isolation was explicitly set
#[must_use]
pub fn is_isolation_explicitly_set(&self) -> bool {
self.explicit_isolation_level.is_some()
}
/// Extract downloaded archives
///
/// # Errors
///
/// Returns an error if any archive extraction fails.
pub async fn extract_downloads(&self) -> Result<(), Error> {
for path in self.downloads.values() {
self.extract_single_download(path, None).await?;
}
Ok(())
}
/// Extract downloaded archives to a specific subdirectory
///
/// # Errors
///
/// Returns an error if any archive extraction fails.
pub async fn extract_downloads_to(&self, extract_to: Option<&str>) -> Result<(), Error> {
for path in self.downloads.values() {
self.extract_single_download(path, extract_to).await?;
}
Ok(())
}
/// Extract a single downloaded file
///
/// # Errors
///
/// Returns an error if archive extraction fails.
pub async fn extract_single_download(
&self,
path: &Path,
extract_to: Option<&str>,
) -> Result<(), Error> {
if let Some(ext) = path.extension().and_then(|e| e.to_str()) {
match ext {
"gz" | "tgz" => {
self.extract_tar_gz(path, extract_to).await?;
}
"bz2" => {
self.extract_tar_bz2(path, extract_to).await?;
}
"xz" => {
self.extract_tar_xz(path, extract_to).await?;
}
"zip" => {
self.extract_zip(path, extract_to).await?;
}
_ => {
// Unknown format, skip extraction
}
}
} else {
// For files without extensions (like GitHub API downloads), check magic numbers
let file_bytes = tokio::fs::read(path).await.unwrap_or_default();
if file_bytes.len() >= 4 {
let magic = &file_bytes[0..4];
// Check for gzip magic number (1f 8b)
if magic[0] == 0x1f && magic[1] == 0x8b {
self.extract_tar_gz(path, extract_to).await?;
}
// Check for ZIP magic number (50 4b)
else if magic[0] == 0x50 && magic[1] == 0x4b {
self.extract_zip(path, extract_to).await?;
}
// Check for bzip2 magic number (42 5a)
else if magic[0] == 0x42 && magic[1] == 0x5a {
self.extract_tar_bz2(path, extract_to).await?;
}
}
}
Ok(())
}
/// Extract tar.gz archive
///
/// # Errors
///
/// Returns an error if extraction fails.
async fn extract_tar_gz(&self, path: &Path, extract_to: Option<&str>) -> Result<(), Error> {
self.extract_compressed_tar(path, CompressionType::Gzip, extract_to)
.await
}
/// Extract tar.bz2 archive
///
/// # Errors
///
/// Returns an error if extraction fails.
async fn extract_tar_bz2(&self, path: &Path, extract_to: Option<&str>) -> Result<(), Error> {
self.extract_compressed_tar(path, CompressionType::Bzip2, extract_to)
.await
}
/// Extract tar.xz archive
///
/// # Errors
///
/// Returns an error if extraction fails.
async fn extract_tar_xz(&self, path: &Path, extract_to: Option<&str>) -> Result<(), Error> {
self.extract_compressed_tar(path, CompressionType::Xz, extract_to)
.await
}
/// Extract zip archive
///
/// # Errors
///
/// Returns an error if extraction fails.
async fn extract_zip(&self, path: &Path, extract_to: Option<&str>) -> Result<(), Error> {
let base_dir = if let Some(extract_to) = extract_to {
// For multi-source builds, extract_to should be relative to the parent of working_dir
if let Some(parent) = self.working_dir.parent() {
parent.join(extract_to)
} else {
self.working_dir.join(extract_to)
}
} else {
self.working_dir.clone()
};
let path_buf = path.to_path_buf();
tokio::task::spawn_blocking(move || {
use std::fs::File;
use zip::ZipArchive;
let file = File::open(&path_buf).map_err(|e| BuildError::ExtractionFailed {
message: format!("Failed to open zip archive: {e}"),
})?;
let mut archive = ZipArchive::new(file).map_err(|e| BuildError::ExtractionFailed {
message: format!("Failed to read zip archive: {e}"),
})?;
// Check if archive has a single top-level directory
let strip_components = usize::from(should_strip_zip_components(&mut archive)?);
for i in 0..archive.len() {
let mut file = archive
.by_index(i)
.map_err(|e| BuildError::ExtractionFailed {
message: format!("Failed to read zip entry: {e}"),
})?;
let outpath = match file.enclosed_name() {
Some(path) => {
// Strip components if needed
let components: Vec<_> = path.components().collect();
if strip_components > 0 && components.len() > strip_components {
base_dir
.join(components[strip_components..].iter().collect::<PathBuf>())
} else if strip_components == 0 {
base_dir.join(path)
} else {
continue; // Skip files at the stripped level
}
}
None => continue,
};
if file.name().ends_with('/') {
std::fs::create_dir_all(&outpath).map_err(|e| {
BuildError::ExtractionFailed {
message: format!("Failed to create directory: {e}"),
}
})?;
} else {
if let Some(p) = outpath.parent() {
if !p.exists() {
std::fs::create_dir_all(p).map_err(|e| {
BuildError::ExtractionFailed {
message: format!("Failed to create parent directory: {e}"),
}
})?;
}
}
let mut outfile =
File::create(&outpath).map_err(|e| BuildError::ExtractionFailed {
message: format!("Failed to create file: {e}"),
})?;
std::io::copy(&mut file, &mut outfile).map_err(|e| {
BuildError::ExtractionFailed {
message: format!("Failed to extract file: {e}"),
}
})?;
}
// Set permissions on Unix
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
if let Some(mode) = file.unix_mode() {
std::fs::set_permissions(&outpath, std::fs::Permissions::from_mode(mode))
.ok();
}
}
}
Ok::<(), Error>(())
})
.await
.map_err(|e| BuildError::ExtractionFailed {
message: format!("Task join error: {e}"),
})?
}
/// Extract compressed tar archive using async-compression
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | true |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/core/types.rs | crates/builder/src/core/types.rs | //! Core types for the builder module
//!
//! This module contains shared types used throughout the builder crate.
// Currently empty - will be populated as we refactor and identify common types
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/core/mod.rs | crates/builder/src/core/mod.rs | //! Core module containing main builder API and types
pub mod api;
pub mod builder;
pub mod context;
pub mod types;
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/core/context.rs | crates/builder/src/core/context.rs | //! Build context for package building
use sps2_events::{EventEmitter, EventSender};
use sps2_types::Version;
use std::path::PathBuf;
/// Build context for package building
#[derive(Clone, Debug)]
pub struct BuildContext {
/// Package name
pub name: String,
/// Package version
pub version: Version,
/// Revision number
pub revision: u32,
/// Target architecture
pub arch: String,
/// Recipe file path
pub recipe_path: PathBuf,
/// Output directory for .sp files
pub output_dir: PathBuf,
/// Event sender for progress reporting
pub event_sender: Option<EventSender>,
/// Path to the generated .sp package (set after package creation)
pub package_path: Option<PathBuf>,
/// Optional session identifier used for correlating events.
pub session_id: Option<String>,
}
impl EventEmitter for BuildContext {
fn event_sender(&self) -> Option<&EventSender> {
self.event_sender.as_ref()
}
}
impl BuildContext {
/// Create new build context
#[must_use]
pub fn new(name: String, version: Version, recipe_path: PathBuf, output_dir: PathBuf) -> Self {
Self {
name,
version,
revision: 1,
arch: "arm64".to_string(),
recipe_path,
output_dir,
event_sender: None,
package_path: None,
session_id: None,
}
}
/// Set revision number
#[must_use]
pub fn with_revision(mut self, revision: u32) -> Self {
self.revision = revision;
self
}
/// Set architecture
#[must_use]
pub fn with_arch(mut self, arch: String) -> Self {
self.arch = arch;
self
}
/// Set event sender
#[must_use]
pub fn with_event_sender(mut self, event_sender: EventSender) -> Self {
self.event_sender = Some(event_sender);
self
}
/// Attach a session identifier for event correlation.
#[must_use]
pub fn with_session_id(mut self, session_id: impl Into<String>) -> Self {
self.session_id = Some(session_id.into());
self
}
/// Retrieve the session identifier or derive a deterministic fallback.
#[must_use]
pub fn session_id(&self) -> String {
self.session_id
.clone()
.unwrap_or_else(|| format!("build:{}-{}", self.name, self.version))
}
/// Get package filename
#[must_use]
pub fn package_filename(&self) -> String {
format!(
"{}-{}-{}.{}.sp",
self.name, self.version, self.revision, self.arch
)
}
/// Get full output path
#[must_use]
pub fn output_path(&self) -> PathBuf {
self.output_dir.join(self.package_filename())
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/build_systems/nodejs.rs | crates/builder/src/build_systems/nodejs.rs | //! Node.js build system implementation
use super::{BuildSystem, BuildSystemConfig, BuildSystemContext, TestFailure, TestResults};
use async_trait::async_trait;
use sps2_errors::{BuildError, Error};
use std::collections::HashMap;
use std::path::Path;
use tokio::fs;
/// Node.js build system supporting npm, yarn, and pnpm
pub struct NodeJsBuildSystem {
config: BuildSystemConfig,
}
impl NodeJsBuildSystem {
/// Create a new Node.js build system instance
#[must_use]
pub fn new() -> Self {
Self {
config: BuildSystemConfig {
supports_out_of_source: false, // Node.js builds in-place
supports_parallel_builds: true,
supports_incremental_builds: true,
default_configure_args: vec![],
default_build_args: vec![],
env_prefix: Some("NODE_".to_string()),
watch_patterns: vec![
"package.json".to_string(),
"package-lock.json".to_string(),
"yarn.lock".to_string(),
"pnpm-lock.yaml".to_string(),
"**/*.js".to_string(),
"**/*.ts".to_string(),
"**/*.jsx".to_string(),
"**/*.tsx".to_string(),
],
},
}
}
/// Detect which package manager to use
async fn detect_package_manager(&self, source_dir: &Path) -> Result<PackageManager, Error> {
// Check for lock files first (most reliable)
if source_dir.join("pnpm-lock.yaml").exists() {
return Ok(PackageManager::Pnpm);
}
if source_dir.join("yarn.lock").exists() {
return Ok(PackageManager::Yarn);
}
if source_dir.join("package-lock.json").exists() {
return Ok(PackageManager::Npm);
}
// Check for packageManager field in package.json
let package_json = source_dir.join("package.json");
if package_json.exists() {
let content = fs::read_to_string(&package_json).await?;
if content.contains("\"packageManager\"") {
if content.contains("pnpm@") {
return Ok(PackageManager::Pnpm);
} else if content.contains("yarn@") {
return Ok(PackageManager::Yarn);
}
}
}
// Default to npm
Ok(PackageManager::Npm)
}
/// Setup offline mode and vendoring
async fn setup_offline_mode(
&self,
ctx: &BuildSystemContext,
pm: &PackageManager,
) -> Result<(), Error> {
if !ctx.network_allowed {
match pm {
PackageManager::Npm => {
// Create .npmrc for offline mode
let npmrc_content = "offline=true\n";
fs::write(ctx.source_dir.join(".npmrc"), npmrc_content).await?;
}
PackageManager::Yarn => {
// Yarn offline mode is handled via command line
}
PackageManager::Pnpm => {
// Create .pnpmfile.cjs for offline mode
let pnpmrc_content = "offline=true\n";
fs::write(ctx.source_dir.join(".pnpmrc"), pnpmrc_content).await?;
}
}
}
// Setup vendoring if node_modules exists
let node_modules = ctx.source_dir.join("node_modules");
if !node_modules.exists() && ctx.source_dir.join("vendor").exists() {
// Link vendor to node_modules
#[cfg(unix)]
{
use std::os::unix::fs::symlink;
symlink(ctx.source_dir.join("vendor"), &node_modules)?;
}
}
Ok(())
}
/// Get install command for package manager
fn get_install_command(pm: &PackageManager, offline: bool, has_lock_file: bool) -> Vec<String> {
match pm {
PackageManager::Npm => {
// Use ci only if lock file exists, otherwise use install
let mut args = if has_lock_file {
vec!["ci".to_string()]
} else {
vec!["install".to_string()]
};
if offline {
args.push("--offline".to_string());
}
args.push("--no-audit".to_string());
args.push("--no-fund".to_string());
args
}
PackageManager::Yarn => {
let mut args = vec!["install".to_string()];
if offline {
args.push("--offline".to_string());
}
args.push("--frozen-lockfile".to_string());
args.push("--non-interactive".to_string());
args
}
PackageManager::Pnpm => {
let mut args = vec!["install".to_string()];
if offline {
args.push("--offline".to_string());
}
args.push("--frozen-lockfile".to_string());
args
}
}
}
/// Get build script name from package.json
async fn get_build_script(&self, source_dir: &Path) -> Result<Option<String>, Error> {
let package_json = source_dir.join("package.json");
if !package_json.exists() {
return Ok(None);
}
let content = fs::read_to_string(&package_json).await?;
// Simple parsing - look for build script
if content.contains("\"build\":") {
return Ok(Some("build".to_string()));
}
if content.contains("\"compile\":") {
return Ok(Some("compile".to_string()));
}
if content.contains("\"dist\":") {
return Ok(Some("dist".to_string()));
}
Ok(None)
}
/// Parse test output from various test runners
fn parse_test_output(output: &str) -> (usize, usize, usize, Vec<TestFailure>) {
let mut total = 0;
let mut passed = 0;
let mut failed = 0;
let mut failures = vec![];
// Jest pattern: "Tests: 1 failed, 2 passed, 3 total"
if output.contains("Tests:") {
for line in output.lines() {
if line.contains("Tests:") && line.contains("total") {
let parts: Vec<&str> = line.split_whitespace().collect();
for (i, part) in parts.iter().enumerate() {
if let Ok(num) = part.parse::<usize>() {
if i + 1 < parts.len() {
if let Some(next_part) = parts.get(i + 1) {
match *next_part {
"passed" | "passed," => passed = num,
"failed" | "failed," => failed = num,
"total" => total = num,
_ => {}
}
}
}
}
}
}
}
}
// Mocha pattern: " 2 passing"
else if output.contains("passing") {
for line in output.lines() {
if line.contains("passing") {
if let Some(num_str) = line.split_whitespace().next() {
if let Ok(num) = num_str.parse::<usize>() {
passed = num;
total = num;
}
}
}
if line.contains("failing") {
if let Some(num_str) = line.split_whitespace().next() {
if let Ok(num) = num_str.parse::<usize>() {
failed = num;
total += num;
}
}
}
}
}
// TAP format: "ok 1 - test description"
else if output.contains("TAP version") || output.contains("ok 1") {
for line in output.lines() {
if line.starts_with("ok ") || line.starts_with("not ok ") {
total += 1;
if line.starts_with("ok ") {
passed += 1;
} else {
failed += 1;
if let Some(desc) = line.split(" - ").nth(1) {
failures.push(TestFailure {
name: desc.to_string(),
message: line.to_string(),
details: None,
});
}
}
}
}
}
// If no pattern matched but tests ran
if total == 0 && (output.contains("test") || output.contains("spec")) {
if output.contains("failed") || output.contains("error") {
total = 1;
failed = 1;
} else {
total = 1;
passed = 1;
}
}
(total, passed, failed, failures)
}
/// Find and copy built artifacts
async fn copy_built_artifacts(&self, ctx: &BuildSystemContext) -> Result<(), Error> {
let staging_dir = ctx.env.staging_dir();
let prefix_path = staging_dir.join(ctx.env.get_live_prefix().trim_start_matches('/'));
// Common output directories
let possible_dirs = vec!["dist", "build", "out", "lib"];
for dir_name in possible_dirs {
let output_dir = ctx.source_dir.join(dir_name);
if output_dir.exists() {
// Copy to staging with BUILD_PREFIX structure
let dest = prefix_path.join(dir_name);
fs::create_dir_all(&dest).await?;
copy_dir_recursive(&output_dir, &dest).await?;
}
}
// Handle bin entries from package.json
let package_json_path = ctx.source_dir.join("package.json");
if package_json_path.exists() {
let content = fs::read_to_string(&package_json_path).await?;
if let Ok(json) = serde_json::from_str::<serde_json::Value>(&content) {
if let Some(bin) = json.get("bin") {
let dest_bin = prefix_path.join("bin");
fs::create_dir_all(&dest_bin).await?;
match bin {
serde_json::Value::String(script) => {
// Single bin entry
let script_path = ctx.source_dir.join(script);
if script_path.exists() {
let bin_name =
json.get("name").and_then(|n| n.as_str()).unwrap_or("bin");
let dest = dest_bin.join(bin_name);
fs::copy(&script_path, &dest).await?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = fs::metadata(&dest).await?.permissions();
perms.set_mode(0o755);
fs::set_permissions(&dest, perms).await?;
}
}
}
serde_json::Value::Object(bins) => {
// Multiple bin entries
for (name, script) in bins {
if let Some(script_str) = script.as_str() {
let script_path = ctx.source_dir.join(script_str);
if script_path.exists() {
let dest = dest_bin.join(name);
fs::copy(&script_path, &dest).await?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms =
fs::metadata(&dest).await?.permissions();
perms.set_mode(0o755);
fs::set_permissions(&dest, perms).await?;
}
}
}
}
}
_ => {}
}
}
}
}
// Copy binaries from node_modules/.bin if they exist
let bin_dir = ctx.source_dir.join("node_modules/.bin");
if bin_dir.exists() {
let dest_bin = prefix_path.join("bin");
fs::create_dir_all(&dest_bin).await?;
let mut entries = fs::read_dir(&bin_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.is_file() {
let filename = path.file_name().unwrap();
let dest = dest_bin.join(filename);
fs::copy(&path, &dest).await?;
// Make executable
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = fs::metadata(&dest).await?.permissions();
perms.set_mode(0o755);
fs::set_permissions(&dest, perms).await?;
}
}
}
}
Ok(())
}
}
/// Node.js package managers
#[derive(Debug, Clone)]
enum PackageManager {
Npm,
Yarn,
Pnpm,
}
impl PackageManager {
fn command(&self) -> &str {
match self {
Self::Npm => "npm",
Self::Yarn => "yarn",
Self::Pnpm => "pnpm",
}
}
}
impl Default for NodeJsBuildSystem {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl BuildSystem for NodeJsBuildSystem {
async fn detect(&self, source_dir: &Path) -> Result<bool, Error> {
Ok(source_dir.join("package.json").exists())
}
fn get_config_options(&self) -> BuildSystemConfig {
self.config.clone()
}
async fn configure(&self, ctx: &BuildSystemContext, _args: &[String]) -> Result<(), Error> {
// Detect package manager
let pm = self.detect_package_manager(&ctx.source_dir).await?;
// Verify package manager is available
// Verify package manager is available with merged env
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(pm.command(), &["--version"], None, &merged_env, false)
.await?;
if !result.success {
return Err(BuildError::ConfigureFailed {
message: format!("{} not found in PATH", pm.command()),
}
.into());
}
// Setup offline mode if needed
self.setup_offline_mode(ctx, &pm).await?;
// Store package manager for later use
if let Ok(mut extra_env) = ctx.extra_env.write() {
extra_env.insert("NODE_PACKAGE_MANAGER".to_string(), format!("{pm:?}"));
}
Ok(())
}
async fn build(&self, ctx: &BuildSystemContext, args: &[String]) -> Result<(), Error> {
// Get package manager from configure phase
let pm_str = if let Ok(extra_env) = ctx.extra_env.read() {
extra_env
.get("NODE_PACKAGE_MANAGER")
.cloned()
.ok_or_else(|| BuildError::ConfigureFailed {
message: "Package manager not detected".to_string(),
})?
} else {
return Err(BuildError::ConfigureFailed {
message: "Cannot access extra environment".to_string(),
}
.into());
};
let pm = match pm_str.as_str() {
"Yarn" => PackageManager::Yarn,
"Pnpm" => PackageManager::Pnpm,
_ => PackageManager::Npm, // Default to Npm (includes "Npm")
};
// Check if lock file exists
let has_lock_file = match &pm {
PackageManager::Npm => ctx.source_dir.join("package-lock.json").exists(),
PackageManager::Yarn => ctx.source_dir.join("yarn.lock").exists(),
PackageManager::Pnpm => ctx.source_dir.join("pnpm-lock.yaml").exists(),
};
// Check if package.json has any dependencies
let package_json = ctx.source_dir.join("package.json");
let has_dependencies = if package_json.exists() {
let content = fs::read_to_string(&package_json).await?;
content.contains("\"dependencies\"") || content.contains("\"devDependencies\"")
} else {
false
};
// Only run install if there are dependencies or a lock file
if has_dependencies || has_lock_file {
let install_args = Self::get_install_command(&pm, !ctx.network_allowed, has_lock_file);
let arg_refs: Vec<&str> = install_args.iter().map(String::as_str).collect();
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
pm.command(),
&arg_refs,
Some(&ctx.source_dir),
&merged_env,
false,
)
.await?;
if !result.success {
return Err(BuildError::CompilationFailed {
message: format!("{} install failed: {}", pm.command(), result.stderr),
}
.into());
}
}
// Run build script if it exists
if let Some(build_script) = self.get_build_script(&ctx.source_dir).await? {
let mut run_args = vec!["run", &build_script];
// Add user arguments
if !args.is_empty() {
run_args.push("--");
run_args.extend(args.iter().map(String::as_str));
}
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
pm.command(),
&run_args,
Some(&ctx.source_dir),
&merged_env,
false,
)
.await?;
if !result.success {
return Err(BuildError::CompilationFailed {
message: format!("Build script failed: {}", result.stderr),
}
.into());
}
}
Ok(())
}
async fn test(&self, ctx: &BuildSystemContext) -> Result<TestResults, Error> {
let start = std::time::Instant::now();
// Get package manager
let pm_str = if let Ok(extra_env) = ctx.extra_env.read() {
extra_env
.get("NODE_PACKAGE_MANAGER")
.cloned()
.unwrap_or_else(|| "Npm".to_string())
} else {
"Npm".to_string()
};
let pm = match pm_str.as_str() {
"Yarn" => PackageManager::Yarn,
"Pnpm" => PackageManager::Pnpm,
_ => PackageManager::Npm, // Default to Npm (includes "Npm")
};
// Check if test script exists
let package_json = ctx.source_dir.join("package.json");
let has_test_script = if package_json.exists() {
let content = fs::read_to_string(&package_json).await?;
content.contains("\"test\":")
} else {
false
};
if !has_test_script {
// No tests defined
return Ok(TestResults {
total: 0,
passed: 0,
failed: 0,
skipped: 0,
duration: 0.0,
output: "No test script defined in package.json".to_string(),
failures: vec![],
});
}
// Run tests
// Run tests (allow failure) with merged env
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
pm.command(),
&["test"],
Some(&ctx.source_dir),
&merged_env,
true,
)
.await?;
let duration = start.elapsed().as_secs_f64();
let output = format!("{}\n{}", result.stdout, result.stderr);
// Parse test results
let (total, passed, failed, failures) = Self::parse_test_output(&output);
Ok(TestResults {
total,
passed,
failed,
skipped: total.saturating_sub(passed + failed),
duration,
output,
failures,
})
}
async fn install(&self, ctx: &BuildSystemContext) -> Result<(), Error> {
// Copy built artifacts to staging
self.copy_built_artifacts(ctx).await?;
// Also copy package.json for metadata with LIVE_PREFIX structure
let package_json_src = ctx.source_dir.join("package.json");
if package_json_src.exists() {
let staging_dir = ctx.env.staging_dir();
let prefix_path = staging_dir.join(ctx.env.get_live_prefix().trim_start_matches('/'));
let package_json_dest = prefix_path.join("package.json");
fs::copy(&package_json_src, &package_json_dest).await?;
}
Ok(())
}
fn get_env_vars(&self, ctx: &BuildSystemContext) -> HashMap<String, String> {
let mut vars = HashMap::new();
// Set NODE_ENV to production for builds
vars.insert("NODE_ENV".to_string(), "production".to_string());
// Disable telemetry and update checks
vars.insert("DISABLE_OPENCOLLECTIVE".to_string(), "1".to_string());
vars.insert("ADBLOCK".to_string(), "1".to_string());
vars.insert("DISABLE_TELEMETRY".to_string(), "1".to_string());
vars.insert("NO_UPDATE_NOTIFIER".to_string(), "1".to_string());
// Set npm configuration
vars.insert("NPM_CONFIG_LOGLEVEL".to_string(), "warn".to_string());
vars.insert("NPM_CONFIG_FUND".to_string(), "false".to_string());
vars.insert("NPM_CONFIG_AUDIT".to_string(), "false".to_string());
// Set cache directories
if let Some(cache_config) = &ctx.cache_config {
vars.insert(
"NPM_CONFIG_CACHE".to_string(),
cache_config.cache_dir.join("npm").display().to_string(),
);
vars.insert(
"YARN_CACHE_FOLDER".to_string(),
cache_config.cache_dir.join("yarn").display().to_string(),
);
vars.insert(
"PNPM_HOME".to_string(),
cache_config.cache_dir.join("pnpm").display().to_string(),
);
}
vars
}
fn name(&self) -> &'static str {
"nodejs"
}
}
/// Recursively copy directory contents
async fn copy_dir_recursive(src: &Path, dst: &Path) -> Result<(), Error> {
fs::create_dir_all(dst).await?;
let mut entries = fs::read_dir(src).await?;
while let Some(entry) = entries.next_entry().await? {
let src_path = entry.path();
let dst_path = dst.join(entry.file_name());
if src_path.is_dir() {
Box::pin(copy_dir_recursive(&src_path, &dst_path)).await?;
} else {
fs::copy(&src_path, &dst_path).await?;
}
}
Ok(())
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/build_systems/go.rs | crates/builder/src/build_systems/go.rs | //! Go build system implementation
use super::{BuildSystem, BuildSystemConfig, BuildSystemContext, TestFailure, TestResults};
use async_trait::async_trait;
use sps2_errors::{BuildError, Error};
use sps2_events::{AppEvent, EventEmitter, GeneralEvent};
use std::collections::HashMap;
use std::path::Path;
use tokio::fs;
/// Go build system
pub struct GoBuildSystem {
config: BuildSystemConfig,
}
impl GoBuildSystem {
/// Create a new Go build system instance
#[must_use]
pub fn new() -> Self {
Self {
config: BuildSystemConfig {
supports_out_of_source: false,
supports_parallel_builds: true,
supports_incremental_builds: true,
default_configure_args: vec![],
default_build_args: vec![],
env_prefix: Some("GO".to_string()),
watch_patterns: vec![
"go.mod".to_string(),
"go.sum".to_string(),
"**/*.go".to_string(),
"vendor/**".to_string(),
],
},
}
}
/// Setup Go module vendoring for offline builds
async fn setup_vendoring(&self, ctx: &BuildSystemContext) -> Result<(), Error> {
// Check if vendor directory exists
let vendor_dir = ctx.source_dir.join("vendor");
if !vendor_dir.exists() && ctx.network_allowed {
// Download dependencies and create vendor directory
let result = ctx
.execute("go", &["mod", "vendor"], Some(&ctx.source_dir))
.await?;
if !result.success {
return Err(BuildError::ConfigureFailed {
message: format!("go mod vendor failed: {}", result.stderr),
}
.into());
}
}
Ok(())
}
/// Check if this is a Go module project
fn is_go_module(source_dir: &Path) -> bool {
source_dir.join("go.mod").exists()
}
/// Get build arguments for go build
fn get_build_args(ctx: &BuildSystemContext, user_args: &[String]) -> Vec<String> {
let mut args = Vec::new();
// Check if user already provided a command (build, test, mod, etc.)
let has_command = !user_args.is_empty() && !user_args[0].starts_with('-');
// If user provided a command, add it first
if has_command {
args.push(user_args[0].clone());
} else {
// If no command provided, default to "build"
args.push("build".to_string());
}
// Only add build-specific flags if this is a build command
let is_build_command = args[0] == "build";
// Add -mod=vendor if vendor directory exists
let vendor_dir = ctx.source_dir.join("vendor");
if vendor_dir.exists() && !user_args.iter().any(|arg| arg.starts_with("-mod=")) {
args.push("-mod=vendor".to_string());
}
if is_build_command {
// Build flags for release builds
if !user_args.iter().any(|arg| arg.starts_with("-ldflags")) {
args.push("-ldflags=-s -w".to_string()); // Strip debug info
}
// Add parallel compilation
if ctx.jobs > 1 && !user_args.iter().any(|arg| arg.starts_with("-p=")) {
args.push(format!("-p={}", ctx.jobs));
}
}
// macOS ARM only - no cross-compilation support
// Add remaining user arguments (skip the command if it was provided)
let start_idx = usize::from(has_command);
args.extend(user_args.iter().skip(start_idx).cloned());
// Only add output path if this is a build command and user hasn't specified -o
if is_build_command && !args.iter().any(|arg| arg == "-o") {
// Determine output binary name from build context
let binary_name = ctx.env.package_name();
// Add output file path with LIVE_PREFIX structure
args.push("-o".to_string());
let staging_dir = ctx.env.staging_dir();
let prefix_path = staging_dir.join(ctx.env.get_live_prefix().trim_start_matches('/'));
let output_path = prefix_path.join("bin").join(binary_name);
args.push(output_path.display().to_string());
}
// Add build target (current directory by default) only for build command
if is_build_command
&& !user_args.iter().any(|arg| {
std::path::Path::new(arg)
.extension()
.is_some_and(|ext| ext.eq_ignore_ascii_case("go"))
|| arg.contains('/')
|| arg == "."
|| arg == "./..."
})
{
args.push(".".to_string()); // Build current package
}
args
}
/// Parse go test output
fn parse_test_output(output: &str) -> (usize, usize, usize, Vec<TestFailure>) {
let mut total = 0;
let mut passed = 0;
let mut failed = 0;
let mut failures = vec![];
let mut current_package = String::new();
for line in output.lines() {
if line.starts_with("=== RUN") {
total += 1;
} else if line.starts_with("--- PASS:") {
passed += 1;
} else if line.starts_with("--- FAIL:") {
failed += 1;
if let Some(test_name) = line
.strip_prefix("--- FAIL: ")
.and_then(|s| s.split_whitespace().next())
{
failures.push(TestFailure {
name: format!("{current_package}/{test_name}"),
message: line.to_string(),
details: None,
});
}
} else if line.starts_with("--- SKIP:") {
// Skipped tests don't count toward total in our model
} else if line.starts_with("FAIL\t") || line.starts_with("ok \t") {
// Package result line
if let Some(pkg) = line.split('\t').nth(1) {
current_package = pkg.to_string();
}
}
}
// If we didn't find individual test results, check for summary
if total == 0 && output.contains("PASS") {
// Assume at least one test passed
total = 1;
passed = 1;
} else if total == 0 && output.contains("FAIL") {
// Assume at least one test failed
total = 1;
failed = 1;
}
(total, passed, failed, failures)
}
}
impl Default for GoBuildSystem {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl BuildSystem for GoBuildSystem {
async fn detect(&self, source_dir: &Path) -> Result<bool, Error> {
// Check for go.mod (modern Go modules)
if source_dir.join("go.mod").exists() {
return Ok(true);
}
// Check for any .go files
let mut entries = fs::read_dir(source_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.extension().and_then(|s| s.to_str()) == Some("go") {
return Ok(true);
}
}
Ok(false)
}
fn get_config_options(&self) -> BuildSystemConfig {
self.config.clone()
}
async fn configure(&self, ctx: &BuildSystemContext, _args: &[String]) -> Result<(), Error> {
// Go doesn't have a configure step, but we can prepare the environment
// Check Go version
let result = ctx.execute("go", &["version"], None).await?;
if !result.success {
return Err(BuildError::ConfigureFailed {
message: "go not found in PATH".to_string(),
}
.into());
}
// Setup vendoring if needed
if Self::is_go_module(&ctx.source_dir) {
self.setup_vendoring(ctx).await?;
}
// Initialize go.mod if it doesn't exist but we have .go files
let go_mod = ctx.source_dir.join("go.mod");
if !go_mod.exists() {
let module_name = ctx
.source_dir
.file_name()
.and_then(|s| s.to_str())
.unwrap_or("main");
let result = ctx
.execute("go", &["mod", "init", module_name], Some(&ctx.source_dir))
.await?;
if !result.success {
// Non-fatal: old-style GOPATH project
ctx.env.emit(AppEvent::General(GeneralEvent::warning(
"go mod init failed, continuing with GOPATH mode",
)));
}
}
Ok(())
}
async fn build(&self, ctx: &BuildSystemContext, args: &[String]) -> Result<(), Error> {
// Create output directory with LIVE_PREFIX structure
let staging_dir = ctx.env.staging_dir();
let prefix_path = staging_dir.join(ctx.env.get_live_prefix().trim_start_matches('/'));
let output_dir = prefix_path.join("bin");
fs::create_dir_all(&output_dir).await?;
// Get build arguments
let build_args = Self::get_build_args(ctx, args);
let arg_refs: Vec<&str> = build_args.iter().map(String::as_str).collect();
// Run go build with merged env
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env("go", &arg_refs, Some(&ctx.source_dir), &merged_env, false)
.await?;
if !result.success {
return Err(BuildError::CompilationFailed {
message: format!("go build failed: {}", result.stderr),
}
.into());
}
Ok(())
}
async fn test(&self, ctx: &BuildSystemContext) -> Result<TestResults, Error> {
let start = std::time::Instant::now();
let mut test_args = vec!["test"];
// Add vendoring flag if vendor exists
let vendor_dir = ctx.source_dir.join("vendor");
if vendor_dir.exists() {
test_args.push("-mod=vendor");
}
// Add verbose flag to get detailed output
test_args.push("-v");
// Add parallel test execution
let jobs_str;
if ctx.jobs > 1 {
jobs_str = ctx.jobs.to_string();
test_args.push("-parallel");
test_args.push(&jobs_str);
}
// Test all packages
test_args.push("./...");
// Run go test (allow failure) with merged env
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env("go", &test_args, Some(&ctx.source_dir), &merged_env, true)
.await?;
let duration = start.elapsed().as_secs_f64();
let output = format!("{}\n{}", result.stdout, result.stderr);
// Parse test results
let (total, passed, failed, failures) = Self::parse_test_output(&output);
Ok(TestResults {
total,
passed,
failed,
skipped: 0, // Go doesn't report skipped in the same way
duration,
output,
failures,
})
}
async fn install(&self, ctx: &BuildSystemContext) -> Result<(), Error> {
// Go build already outputs to the staging directory with LIVE_PREFIX
// Just verify the binaries exist
let staging_dir = ctx.env.staging_dir();
let prefix_path = staging_dir.join(ctx.env.get_live_prefix().trim_start_matches('/'));
let bin_dir = prefix_path.join("bin");
if !bin_dir.exists() {
return Err(BuildError::InstallFailed {
message: "No binaries found in staging/bin".to_string(),
}
.into());
}
// Make sure binaries are executable
let mut entries = fs::read_dir(&bin_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.is_file() {
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let metadata = fs::metadata(&path).await?;
let mut perms = metadata.permissions();
perms.set_mode(0o755);
fs::set_permissions(&path, perms).await?;
}
}
}
Ok(())
}
fn get_env_vars(&self, ctx: &BuildSystemContext) -> HashMap<String, String> {
let mut vars = HashMap::new();
// Set GOPATH to build directory
vars.insert(
"GOPATH".to_string(),
ctx.build_dir.join("go").display().to_string(),
);
// Disable CGO by default for static binaries
let has_cgo_enabled = if let Ok(extra_env) = ctx.extra_env.read() {
extra_env.contains_key("CGO_ENABLED")
} else {
false
};
if !has_cgo_enabled {
vars.insert("CGO_ENABLED".to_string(), "0".to_string());
}
// Set module proxy for offline builds
if !ctx.network_allowed {
vars.insert("GOPROXY".to_string(), "off".to_string());
vars.insert("GONOPROXY".to_string(), "none".to_string());
vars.insert("GONOSUMDB".to_string(), "*".to_string());
vars.insert("GOPRIVATE".to_string(), "*".to_string());
}
// macOS ARM only - no cross-compilation support
// Set GOCACHE for build caching
if let Some(cache_config) = &ctx.cache_config {
vars.insert(
"GOCACHE".to_string(),
cache_config
.cache_dir
.join("go-build")
.display()
.to_string(),
);
}
vars
}
fn name(&self) -> &'static str {
"go"
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/build_systems/core.rs | crates/builder/src/build_systems/core.rs | //! Core types and utilities for build systems
use crate::BuildEnvironment;
use sps2_errors::Error;
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
/// Build system context containing all necessary information for building
pub struct BuildSystemContext {
/// Build environment
pub env: BuildEnvironment,
/// Source directory
pub source_dir: PathBuf,
/// Build directory (may be same as source for in-source builds)
pub build_dir: PathBuf,
/// Installation prefix
pub prefix: PathBuf,
/// Number of parallel jobs
pub jobs: usize,
/// Additional environment variables
pub extra_env: Arc<RwLock<HashMap<String, String>>>,
/// Whether network access is allowed
pub network_allowed: bool,
/// Cache configuration
pub cache_config: Option<CacheConfig>,
}
impl BuildSystemContext {
/// Create a new build context
///
/// The returned context should be used to configure and execute build operations.
#[must_use]
pub fn new(env: BuildEnvironment, source_dir: PathBuf) -> Self {
let prefix = PathBuf::from(sps2_config::fixed_paths::LIVE_DIR);
let jobs = env
.env_vars()
.get("JOBS")
.and_then(|j| j.parse().ok())
.unwrap_or(1);
Self {
env,
build_dir: source_dir.clone(),
source_dir,
prefix,
jobs,
extra_env: Arc::new(RwLock::new(HashMap::new())),
network_allowed: false,
cache_config: None,
}
}
/// Set build directory for out-of-source builds
#[must_use]
pub fn with_build_dir(mut self, build_dir: PathBuf) -> Self {
self.build_dir = build_dir;
self
}
/// Add extra environment variables
#[must_use]
pub fn with_extra_env(mut self, env: HashMap<String, String>) -> Self {
self.extra_env = Arc::new(RwLock::new(env));
self
}
/// Set network access permission
#[must_use]
pub fn with_network_allowed(mut self, allowed: bool) -> Self {
self.network_allowed = allowed;
self
}
/// Set cache configuration
#[must_use]
pub fn with_cache_config(mut self, config: CacheConfig) -> Self {
self.cache_config = Some(config);
self
}
/// Get all environment variables for the build
///
/// Returns a combined map of base environment variables and any extra variables added.
/// This map should be used when executing build commands.
#[must_use]
pub fn get_all_env_vars(&self) -> HashMap<String, String> {
let mut vars = self.env.env_vars().clone();
if let Ok(extra) = self.extra_env.read() {
vars.extend(extra.clone());
}
vars
}
/// Execute a command in the build context
///
/// # Errors
///
/// Returns an error if command execution fails
pub async fn execute(
&self,
program: &str,
args: &[&str],
working_dir: Option<&std::path::Path>,
) -> Result<crate::BuildCommandResult, Error> {
self.env.execute_command(program, args, working_dir).await
}
}
impl Clone for BuildSystemContext {
fn clone(&self) -> Self {
Self {
env: self.env.clone(),
source_dir: self.source_dir.clone(),
build_dir: self.build_dir.clone(),
prefix: self.prefix.clone(),
jobs: self.jobs,
extra_env: Arc::clone(&self.extra_env),
network_allowed: self.network_allowed,
cache_config: self.cache_config.clone(),
}
}
}
impl std::fmt::Debug for BuildSystemContext {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("BuildSystemContext")
.field("env", &self.env)
.field("source_dir", &self.source_dir)
.field("build_dir", &self.build_dir)
.field("prefix", &self.prefix)
.field("jobs", &self.jobs)
.field("extra_env", &self.extra_env)
.field("network_allowed", &self.network_allowed)
.field("cache_config", &self.cache_config)
.finish()
}
}
/// Build system configuration
#[derive(Clone, Debug, Default)]
pub struct BuildSystemConfig {
/// Whether out-of-source builds are supported
pub supports_out_of_source: bool,
/// Whether parallel builds are supported
pub supports_parallel_builds: bool,
/// Whether incremental builds are supported
pub supports_incremental_builds: bool,
/// Default configure arguments
pub default_configure_args: Vec<String>,
/// Default build arguments
pub default_build_args: Vec<String>,
/// Environment variable prefix for options
pub env_prefix: Option<String>,
/// File patterns to watch for changes
pub watch_patterns: Vec<String>,
}
/// Test results from running the test suite
#[derive(Clone, Debug)]
pub struct TestResults {
/// Total number of tests
pub total: usize,
/// Number of passed tests
pub passed: usize,
/// Number of failed tests
pub failed: usize,
/// Number of skipped tests
pub skipped: usize,
/// Test duration in seconds
pub duration: f64,
/// Detailed test output
pub output: String,
/// Test failures with details
pub failures: Vec<TestFailure>,
}
impl TestResults {
/// Check if all tests passed
#[must_use]
pub fn all_passed(&self) -> bool {
self.failed == 0
}
/// Get pass rate as percentage
#[must_use]
pub fn pass_rate(&self) -> f64 {
if self.total == 0 {
100.0
} else {
// Allow precision loss: f64 has 52-bit mantissa, but we won't have 2^52 tests
#[allow(clippy::cast_precision_loss)]
{
(self.passed as f64 / self.total as f64) * 100.0
}
}
}
}
/// Details about a test failure
#[derive(Clone, Debug)]
pub struct TestFailure {
/// Test name
pub name: String,
/// Failure message
pub message: String,
/// Stack trace or additional details
pub details: Option<String>,
}
/// Cache configuration for builds
#[derive(Clone, Debug)]
pub struct CacheConfig {
/// Whether to use ccache/sccache
pub use_compiler_cache: bool,
/// Compiler cache type
pub compiler_cache_type: CompilerCacheType,
/// Cache directory
pub cache_dir: PathBuf,
/// Maximum cache size in bytes
pub max_size: u64,
/// Whether to use distributed cache
pub distributed: bool,
}
/// Type of compiler cache to use
#[derive(Clone, Debug)]
pub enum CompilerCacheType {
/// Use ccache
CCache,
/// Use sccache
SCCache,
/// Use distcc
DistCC,
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/build_systems/python.rs | crates/builder/src/build_systems/python.rs | //! Python build system implementation with PEP 517/518 support
use super::{BuildSystem, BuildSystemConfig, BuildSystemContext, TestFailure, TestResults};
use async_trait::async_trait;
use sps2_errors::{BuildError, Error};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use tokio::fs;
/// Python build system with PEP 517/518 compliance
pub struct PythonBuildSystem {
config: BuildSystemConfig,
}
impl PythonBuildSystem {
/// Create a new Python build system instance
#[must_use]
pub fn new() -> Self {
Self {
config: BuildSystemConfig {
supports_out_of_source: true,
supports_parallel_builds: false, // Most Python builds are sequential
supports_incremental_builds: true,
default_configure_args: vec![],
default_build_args: vec![],
env_prefix: Some("PYTHON_".to_string()),
watch_patterns: vec![
"setup.py".to_string(),
"setup.cfg".to_string(),
"pyproject.toml".to_string(),
"requirements.txt".to_string(),
"**/*.py".to_string(),
],
},
}
}
/// Detect build backend from pyproject.toml
async fn detect_build_backend(&self, source_dir: &Path) -> Result<BuildBackend, Error> {
let pyproject_path = source_dir.join("pyproject.toml");
if pyproject_path.exists() {
// Read pyproject.toml to detect build backend
let content = fs::read_to_string(&pyproject_path).await?;
// Simple parsing - in production would use toml crate
if content.contains("[build-system]") {
if content.contains("setuptools") {
return Ok(BuildBackend::Setuptools);
} else if content.contains("poetry") {
return Ok(BuildBackend::Poetry);
} else if content.contains("flit") {
return Ok(BuildBackend::Flit);
} else if content.contains("hatchling") || content.contains("hatch") {
return Ok(BuildBackend::Hatch);
} else if content.contains("pdm") {
return Ok(BuildBackend::Pdm);
} else if content.contains("maturin") {
return Ok(BuildBackend::Maturin);
}
// Generic PEP 517 backend
return Ok(BuildBackend::Pep517);
}
}
// Fall back to setup.py
if source_dir.join("setup.py").exists() {
return Ok(BuildBackend::SetupPy);
}
Err(BuildError::ConfigureFailed {
message: "No Python build configuration found".to_string(),
}
.into())
}
/// Check if uv is available
async fn check_uv_available(&self, ctx: &BuildSystemContext) -> Result<bool, Error> {
let mut env = ctx.get_all_env_vars();
env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env("uv", &["--version"], None, &env, true)
.await;
Ok(result.map(|r| r.success).unwrap_or(false))
}
/// Create virtual environment for isolated builds
async fn create_venv(&self, ctx: &BuildSystemContext) -> Result<PathBuf, Error> {
let venv_path = ctx.build_dir.join("venv");
let use_uv = self.check_uv_available(ctx).await?;
if use_uv {
// Try uv for faster venv creation
let mut env = ctx.get_all_env_vars();
env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
"uv",
&["venv", "--seed", &venv_path.display().to_string()],
Some(&ctx.source_dir),
&env,
false,
)
.await?;
if result.success {
return Ok(venv_path);
}
// If uv failed, fall back to standard venv
}
// Fall back to standard venv (either uv not available or failed)
let mut env = ctx.get_all_env_vars();
env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
"python3",
&["-m", "venv", &venv_path.display().to_string()],
Some(&ctx.source_dir),
&env,
false,
)
.await?;
if !result.success {
return Err(BuildError::ConfigureFailed {
message: format!("Failed to create virtual environment: {}", result.stderr),
}
.into());
}
Ok(venv_path)
}
/// Get pip install arguments
fn get_pip_args(ctx: &BuildSystemContext, user_args: &[String]) -> Vec<String> {
let mut args = vec!["install".to_string()];
// Add non-interactive flags to prevent prompting
args.push("--no-input".to_string());
args.push("--disable-pip-version-check".to_string());
// Add offline mode if network is disabled
if !ctx.network_allowed {
args.push("--no-index".to_string());
args.push("--find-links".to_string());
args.push(ctx.source_dir.join("vendor").display().to_string());
}
// Add prefix for installation - use clean python/[package] structure
args.push("--prefix".to_string());
let staging_dir = ctx.env.staging_dir();
let live_prefix = ctx.env.get_live_prefix().trim_start_matches('/');
let package_name = ctx.env.package_name();
let package_specific_prefix = staging_dir
.join(live_prefix)
.join("python")
.join(package_name);
args.push(package_specific_prefix.display().to_string());
// Install with dependencies for self-contained packages
// Add user arguments
args.extend(user_args.iter().cloned());
args
}
/// Build wheel using uv
async fn build_wheel_uv(&self, ctx: &BuildSystemContext) -> Result<PathBuf, Error> {
let wheel_dir = ctx.build_dir.join("dist");
fs::create_dir_all(&wheel_dir).await?;
// Build wheel using uv build command
let mut env = ctx.get_all_env_vars();
env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
"uv",
&[
"build",
"--wheel",
"--out-dir",
&wheel_dir.display().to_string(),
],
Some(&ctx.source_dir),
&env,
false,
)
.await?;
if !result.success {
return Err(BuildError::CompilationFailed {
message: format!("Failed to build wheel with uv: {}", result.stderr),
}
.into());
}
// Find the built wheel
self.find_wheel_in_dir(&wheel_dir).await
}
/// Build wheel using PEP 517
async fn build_wheel_pep517(&self, ctx: &BuildSystemContext) -> Result<PathBuf, Error> {
let wheel_dir = ctx.build_dir.join("dist");
fs::create_dir_all(&wheel_dir).await?;
// Get venv path from environment
let venv_path = if let Ok(extra_env) = ctx.extra_env.read() {
extra_env
.get("PYTHON_VENV_PATH")
.map_or_else(|| ctx.build_dir.join("venv"), PathBuf::from)
} else {
ctx.build_dir.join("venv")
};
// Use venv's pip
let pip_path = venv_path.join("bin/pip");
// Install build dependencies
let mut env = ctx.get_all_env_vars();
env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
&pip_path.display().to_string(),
&[
"install",
"--upgrade",
"--no-input",
"pip",
"setuptools",
"wheel",
"build",
],
Some(&ctx.source_dir),
&env,
false,
)
.await?;
if !result.success {
return Err(BuildError::ConfigureFailed {
message: format!("Failed to install build tools: {}", result.stderr),
}
.into());
}
// Use venv's python
let python_path = venv_path.join("bin/python3");
// Build wheel using python-build
let wheel_dir_str = wheel_dir.display().to_string();
let mut build_args = vec!["-m", "build", "--wheel", "--outdir", &wheel_dir_str];
if !ctx.network_allowed {
build_args.push("--no-isolation");
}
let mut env = ctx.get_all_env_vars();
env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
&python_path.display().to_string(),
&build_args,
Some(&ctx.source_dir),
&env,
false,
)
.await?;
if !result.success {
return Err(BuildError::CompilationFailed {
message: format!("Failed to build wheel: {}", result.stderr),
}
.into());
}
// Find the built wheel
self.find_wheel_in_dir(&wheel_dir).await
}
/// Find wheel file in directory
async fn find_wheel_in_dir(&self, dir: &Path) -> Result<PathBuf, Error> {
let mut entries = fs::read_dir(dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.extension().and_then(|s| s.to_str()) == Some("whl") {
return Ok(path);
}
}
Err(BuildError::CompilationFailed {
message: "No wheel file found after build".to_string(),
}
.into())
}
/// Parse pytest output
fn parse_pytest_output(output: &str) -> (usize, usize, usize, Vec<TestFailure>) {
let mut passed = 0;
let mut failed = 0;
let mut skipped = 0;
let mut failures = vec![];
for line in output.lines() {
// Look for summary line like "====== 5 passed, 2 failed, 1 skipped in 1.23s ======"
if line.contains("passed") || line.contains("failed") || line.contains("skipped") {
let parts: Vec<&str> = line.split_whitespace().collect();
for (i, part) in parts.iter().enumerate() {
if let Ok(num) = part.parse::<usize>() {
if i + 1 < parts.len() {
if let Some(next_part) = parts.get(i + 1) {
match *next_part {
"passed" | "passed," => passed = num,
"failed" | "failed," => failed = num,
"skipped" | "skipped," => skipped = num,
_ => {}
}
}
}
}
}
}
// Capture individual test failures
else if line.starts_with("FAILED ") {
if let Some(test_info) = line.strip_prefix("FAILED ") {
let test_name = test_info.split(" - ").next().unwrap_or(test_info);
failures.push(TestFailure {
name: test_name.to_string(),
message: line.to_string(),
details: None,
});
}
}
}
let total = passed + failed + skipped;
(total, passed, failed, failures)
}
}
/// Python build backend types
#[derive(Debug, Clone)]
enum BuildBackend {
SetupPy, // Legacy setup.py
Setuptools, // Modern setuptools with pyproject.toml
Poetry, // Poetry build system
Flit, // Flit build system
Hatch, // Hatch/Hatchling build system
Pdm, // PDM build system
Maturin, // Maturin for Rust extensions
Pep517, // Generic PEP 517 backend
}
impl PythonBuildSystem {
/// Generate lockfile with graceful fallback from uv to pip-compile
async fn generate_lockfile_with_fallback(
&self,
ctx: &BuildSystemContext,
) -> Result<PathBuf, Error> {
let lockfile_path = ctx.build_dir.join("requirements.lock.txt");
// Try uv first if available
if self.check_uv_available(ctx).await? {
if let Ok(path) = self.generate_lockfile_uv(ctx, &lockfile_path).await {
return Ok(path);
}
// If uv failed, fall through to pip-compile fallback
}
// Fall back to pip-compile
self.generate_lockfile_pip_compile(ctx, &lockfile_path)
.await
}
/// Generate lockfile using uv
async fn generate_lockfile_uv(
&self,
ctx: &BuildSystemContext,
lockfile_path: &std::path::Path,
) -> Result<PathBuf, Error> {
// Try pyproject.toml first
let mut env = ctx.get_all_env_vars();
env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
"uv",
&[
"pip",
"compile",
"--output-file",
&lockfile_path.display().to_string(),
"pyproject.toml",
],
Some(&ctx.source_dir),
&env,
false,
)
.await?;
if result.success {
return Ok(lockfile_path.to_path_buf());
}
// Try requirements.txt if pyproject.toml fails
let req_txt = ctx.source_dir.join("requirements.txt");
if req_txt.exists() {
let mut env = ctx.get_all_env_vars();
env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
"uv",
&[
"pip",
"compile",
"--output-file",
&lockfile_path.display().to_string(),
"requirements.txt",
],
Some(&ctx.source_dir),
&env,
false,
)
.await?;
if result.success {
return Ok(lockfile_path.to_path_buf());
}
}
Err(BuildError::CompilationFailed {
message: "Failed to generate lockfile with uv".to_string(),
}
.into())
}
/// Generate lockfile using pip-compile
async fn generate_lockfile_pip_compile(
&self,
ctx: &BuildSystemContext,
lockfile_path: &std::path::Path,
) -> Result<PathBuf, Error> {
let venv_path = if let Ok(extra_env) = ctx.extra_env.read() {
extra_env
.get("PYTHON_VENV_PATH")
.map_or_else(|| ctx.build_dir.join("venv"), std::path::PathBuf::from)
} else {
ctx.build_dir.join("venv")
};
let pip_path = venv_path.join("bin/pip");
// Install pip-tools
let mut env = ctx.get_all_env_vars();
env.extend(self.get_env_vars(ctx));
let _ = ctx
.env
.execute_command_with_env(
&pip_path.display().to_string(),
&["install", "pip-tools"],
Some(&ctx.source_dir),
&env,
false,
)
.await?;
// Use pip-compile
let pip_compile = venv_path.join("bin/pip-compile");
let mut env = ctx.get_all_env_vars();
env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
&pip_compile.display().to_string(),
&[
"--output-file",
&lockfile_path.display().to_string(),
"pyproject.toml",
],
Some(&ctx.source_dir),
&env,
false,
)
.await?;
if !result.success {
return Err(BuildError::CompilationFailed {
message: format!("Failed to generate lockfile: {}", result.stderr),
}
.into());
}
Ok(lockfile_path.to_path_buf())
}
/// Extract entry points from wheel
fn extract_entry_points(wheel_path: &Path) -> Result<HashMap<String, String>, Error> {
use std::io::Read;
use zip::ZipArchive;
let mut executables = HashMap::new();
let file = std::fs::File::open(wheel_path).map_err(|e| BuildError::CompilationFailed {
message: format!("Failed to open wheel file: {e}"),
})?;
let mut archive = ZipArchive::new(file).map_err(|e| BuildError::CompilationFailed {
message: format!("Failed to read wheel archive: {e}"),
})?;
// Find .dist-info/entry_points.txt
for i in 0..archive.len() {
let mut file = archive
.by_index(i)
.map_err(|e| BuildError::CompilationFailed {
message: format!("Failed to read wheel entry: {e}"),
})?;
if file.name().ends_with(".dist-info/entry_points.txt") {
let mut contents = String::new();
file.read_to_string(&mut contents)
.map_err(|e| BuildError::CompilationFailed {
message: format!("Failed to read entry_points.txt: {e}"),
})?;
// Parse entry points
let mut in_console_scripts = false;
for line in contents.lines() {
let trimmed = line.trim();
if trimmed == "[console_scripts]" {
in_console_scripts = true;
continue;
}
if trimmed.starts_with('[') {
in_console_scripts = false;
continue;
}
if in_console_scripts && trimmed.contains('=') {
let parts: Vec<&str> = trimmed.splitn(2, '=').collect();
if parts.len() == 2 {
executables
.insert(parts[0].trim().to_string(), parts[1].trim().to_string());
}
}
}
break;
}
}
Ok(executables)
}
/// Extract Python version requirement from pyproject.toml
async fn extract_requires_python(&self, source_dir: &Path) -> Result<String, Error> {
let pyproject_path = source_dir.join("pyproject.toml");
if pyproject_path.exists() {
let content = fs::read_to_string(&pyproject_path).await?;
// Simple parsing - look for requires-python
for line in content.lines() {
if line.contains("requires-python") {
if let Some(value) = line.split('=').nth(1) {
// Remove quotes and whitespace
let cleaned = value.trim().trim_matches('"').trim_matches('\'');
return Ok(cleaned.to_string());
}
}
}
}
// Default to current Python 3 requirement
Ok(">=3.8".to_string())
}
/// Remove `direct_url.json` files that contain hardcoded paths
async fn remove_direct_url_files(&self, prefix_path: &Path) -> Result<(), Error> {
let lib_dir = prefix_path.join("lib");
if !lib_dir.exists() {
return Ok(());
}
let mut stack = vec![lib_dir];
while let Some(dir) = stack.pop() {
let mut entries = fs::read_dir(&dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.is_dir() {
stack.push(path.clone());
// Check for dist-info directories
if path
.file_name()
.and_then(|n| n.to_str())
.is_some_and(|n| n.ends_with(".dist-info"))
{
let direct_url = path.join("direct_url.json");
if direct_url.exists() {
fs::remove_file(&direct_url).await?;
}
}
}
}
}
Ok(())
}
}
impl Default for PythonBuildSystem {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl BuildSystem for PythonBuildSystem {
async fn detect(&self, source_dir: &Path) -> Result<bool, Error> {
// Check for pyproject.toml (PEP 517/518)
if source_dir.join("pyproject.toml").exists() {
return Ok(true);
}
// Check for setup.py (legacy)
if source_dir.join("setup.py").exists() {
return Ok(true);
}
// Check for setup.cfg
if source_dir.join("setup.cfg").exists() {
return Ok(true);
}
Ok(false)
}
fn get_config_options(&self) -> BuildSystemConfig {
self.config.clone()
}
async fn configure(&self, ctx: &BuildSystemContext, _args: &[String]) -> Result<(), Error> {
// Detect build backend
let backend = self.detect_build_backend(&ctx.source_dir).await?;
// Verify Python is available (try python3 then python)
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env("python3", &["--version"], None, &merged_env, true)
.await?;
if !result.success {
let fallback = ctx
.env
.execute_command_with_env("python", &["--version"], None, &merged_env, false)
.await?;
if !fallback.success {
return Err(BuildError::ConfigureFailed {
message: "python3/python not found in PATH".to_string(),
}
.into());
}
}
// Python detected
// Always create virtual environment for isolation
let venv_path = self.create_venv(ctx).await?;
// Store detected backend and venv path in environment for later use
if let Ok(mut extra_env) = ctx.extra_env.write() {
extra_env.insert("PYTHON_BUILD_BACKEND".to_string(), format!("{backend:?}"));
extra_env.insert(
"PYTHON_VENV_PATH".to_string(),
venv_path.display().to_string(),
);
}
Ok(())
}
async fn build(&self, ctx: &BuildSystemContext, _args: &[String]) -> Result<(), Error> {
// Try uv first if available, with graceful fallback to PEP 517
let wheel_path = if self.check_uv_available(ctx).await? {
match self.build_wheel_uv(ctx).await {
Ok(path) => path,
Err(_) => {
// If uv build failed, fall back to PEP 517
self.build_wheel_pep517(ctx).await?
}
}
} else {
self.build_wheel_pep517(ctx).await?
};
// Generate lockfile with graceful fallback
let lockfile_path = self.generate_lockfile_with_fallback(ctx).await?;
// Extract entry points from wheel
let entry_points = Self::extract_entry_points(&wheel_path)?;
// Extract Python version requirement
let requires_python = self.extract_requires_python(&ctx.source_dir).await?;
// Store all metadata for install phase
if let Ok(mut extra_env) = ctx.extra_env.write() {
extra_env.insert(
"PYTHON_WHEEL_PATH".to_string(),
wheel_path.display().to_string(),
);
extra_env.insert(
"PYTHON_LOCKFILE_PATH".to_string(),
lockfile_path.display().to_string(),
);
extra_env.insert(
"PYTHON_ENTRY_POINTS".to_string(),
serde_json::to_string(&entry_points).unwrap_or_else(|_| "{}".to_string()),
);
extra_env.insert("PYTHON_REQUIRES_VERSION".to_string(), requires_python);
}
Ok(())
}
async fn test(&self, ctx: &BuildSystemContext) -> Result<TestResults, Error> {
let start = std::time::Instant::now();
// Try pytest first
let mut test_cmd = "pytest";
let test_args;
let jobs_str;
// Check if pytest is available
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let pytest_check = ctx
.env
.execute_command_with_env("pytest", &["--version"], None, &merged_env, true)
.await;
if pytest_check.map(|r| r.success).unwrap_or(false) {
// Use pytest with verbose output
let mut args = vec!["-v".to_string(), "--tb=short".to_string()];
// Add parallel execution if supported
if ctx.jobs > 1 {
args.push("-n".to_string());
jobs_str = ctx.jobs.to_string();
args.push(jobs_str.clone());
}
test_args = args;
} else {
// Fall back to unittest
test_cmd = "python3";
test_args = vec![
"-m".to_string(),
"unittest".to_string(),
"discover".to_string(),
];
}
// Run tests
let test_arg_refs: Vec<&str> = test_args.iter().map(String::as_str).collect();
// Allow failure to parse results
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
test_cmd,
&test_arg_refs,
Some(&ctx.source_dir),
&merged_env,
true,
)
.await?;
let duration = start.elapsed().as_secs_f64();
let output = format!("{}\n{}", result.stdout, result.stderr);
// Parse test results
let (total, passed, failed, failures) = if test_cmd == "pytest" {
Self::parse_pytest_output(&output)
} else {
// Simple parsing for unittest
if result.success {
(1, 1, 0, vec![])
} else {
(
1,
0,
1,
vec![TestFailure {
name: "unittest".to_string(),
message: "Tests failed".to_string(),
details: Some(output.clone()),
}],
)
}
};
Ok(TestResults {
total,
passed,
failed,
skipped: 0,
duration,
output,
failures,
})
}
async fn install(&self, ctx: &BuildSystemContext) -> Result<(), Error> {
// Get wheel path and venv path from build phase
let (wheel_path, venv_path) = if let Ok(extra_env) = ctx.extra_env.read() {
let wheel = extra_env.get("PYTHON_WHEEL_PATH").cloned().ok_or_else(|| {
BuildError::InstallFailed {
message: "No wheel found from build phase".to_string(),
}
})?;
let venv = extra_env
.get("PYTHON_VENV_PATH")
.map_or_else(|| ctx.build_dir.join("venv"), PathBuf::from);
(wheel, venv)
} else {
return Err(BuildError::InstallFailed {
message: "Cannot access extra environment".to_string(),
}
.into());
};
// Use venv's pip
let pip_path = venv_path.join("bin/pip");
// Install wheel using pip
let pip_args = Self::get_pip_args(ctx, std::slice::from_ref(&wheel_path));
let arg_refs: Vec<&str> = pip_args.iter().map(String::as_str).collect();
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
&pip_path.display().to_string(),
&arg_refs,
Some(&ctx.source_dir),
&merged_env,
false,
)
.await?;
if !result.success {
return Err(BuildError::InstallFailed {
message: format!("pip install failed: {}", result.stderr),
}
.into());
}
// Fix shebangs to point to the correct packaged Python version
let staging_dir = ctx.env.staging_dir();
let live_prefix = ctx.env.get_live_prefix().trim_start_matches('/');
let package_name = ctx.env.package_name();
let package_specific_prefix = staging_dir
.join(live_prefix)
.join("python")
.join(package_name);
let scripts_dir = package_specific_prefix.join("bin");
if scripts_dir.exists() {
self.fix_shebangs(&scripts_dir, &package_specific_prefix, ctx)
.await?;
}
// Remove direct_url.json files which contain hardcoded paths
self.remove_direct_url_files(&package_specific_prefix)
.await?;
Ok(())
}
fn get_env_vars(&self, ctx: &BuildSystemContext) -> HashMap<String, String> {
let mut vars = HashMap::new();
// Set PYTHONPATH to include staging directory with LIVE_PREFIX
let staging_dir = ctx.env.staging_dir();
let prefix_in_staging = staging_dir.join(ctx.env.get_live_prefix().trim_start_matches('/'));
let site_packages = prefix_in_staging.join("lib/python*/site-packages");
vars.insert(
"PYTHONPATH".to_string(),
site_packages.display().to_string(),
);
// Disable user site packages for isolation
vars.insert("PYTHONNOUSERSITE".to_string(), "1".to_string());
// Set pip configuration
vars.insert("PIP_DISABLE_PIP_VERSION_CHECK".to_string(), "1".to_string());
vars.insert("PIP_NO_WARN_SCRIPT_LOCATION".to_string(), "1".to_string());
// Use virtual environment if created
let venv_path = ctx.build_dir.join("venv");
if venv_path.exists() {
let venv_bin = venv_path.join("bin");
if let Some(path) = vars.get("PATH") {
vars.insert(
"PATH".to_string(),
format!("{}:{}", venv_bin.display(), path),
);
}
vars.insert("VIRTUAL_ENV".to_string(), venv_path.display().to_string());
}
vars
}
fn name(&self) -> &'static str {
"python"
}
}
impl PythonBuildSystem {
/// Detect Python version from site-packages directory structure
async fn detect_python_version(&self, prefix_in_staging: &Path) -> Result<String, Error> {
let lib_dir = prefix_in_staging.join("lib");
if !lib_dir.exists() {
return Err(BuildError::InstallFailed {
message: "No lib directory found in staging".to_string(),
}
.into());
}
let mut entries = fs::read_dir(&lib_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.is_dir() {
let dir_name = path.file_name().unwrap().to_string_lossy();
if dir_name.starts_with("python3.") {
// Extract version from directory name like "python3.11"
return Ok(dir_name.to_string());
}
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | true |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/build_systems/mod.rs | crates/builder/src/build_systems/mod.rs | //! Build system abstraction and implementations
//!
//! This module provides a trait-based abstraction for different build systems
//! (autotools, cmake, meson, cargo, etc.) with automatic detection and
//! sophisticated configuration handling.
use async_trait::async_trait;
use sps2_errors::Error;
use std::collections::HashMap;
use std::path::Path;
mod autotools;
mod cargo;
mod cmake;
mod core;
mod go;
mod meson;
mod nodejs;
mod python;
pub use autotools::AutotoolsBuildSystem;
pub use cargo::CargoBuildSystem;
pub use cmake::CMakeBuildSystem;
pub use core::{BuildSystemConfig, BuildSystemContext, TestFailure, TestResults};
pub use go::GoBuildSystem;
pub use meson::MesonBuildSystem;
pub use nodejs::NodeJsBuildSystem;
pub use python::PythonBuildSystem;
/// Trait for build system implementations
#[async_trait]
pub trait BuildSystem: Send + Sync {
/// Detect if this build system applies to the source directory
async fn detect(&self, source_dir: &Path) -> Result<bool, Error>;
/// Get configuration options specific to this build system
fn get_config_options(&self) -> BuildSystemConfig;
/// Configure phase
async fn configure(&self, ctx: &BuildSystemContext, args: &[String]) -> Result<(), Error>;
/// Build phase
async fn build(&self, ctx: &BuildSystemContext, args: &[String]) -> Result<(), Error>;
/// Test phase
async fn test(&self, ctx: &BuildSystemContext) -> Result<TestResults, Error>;
/// Install phase
async fn install(&self, ctx: &BuildSystemContext) -> Result<(), Error>;
/// Get build system specific environment variables
fn get_env_vars(&self, ctx: &BuildSystemContext) -> HashMap<String, String>;
/// Get build system name
fn name(&self) -> &'static str;
/// Check if out-of-source build is preferred
fn prefers_out_of_source_build(&self) -> bool {
false
}
/// Get build directory name for out-of-source builds
fn build_directory_name(&self) -> &'static str {
"build"
}
}
/// Registry of available build systems
pub struct BuildSystemRegistry {
systems: Vec<Box<dyn BuildSystem>>,
}
impl BuildSystemRegistry {
/// Create a new registry with all supported build systems
#[must_use]
pub fn new() -> Self {
Self {
systems: vec![
Box::new(AutotoolsBuildSystem::new()),
Box::new(CMakeBuildSystem::new()),
Box::new(MesonBuildSystem::new()),
Box::new(CargoBuildSystem::new()),
Box::new(GoBuildSystem::new()),
Box::new(PythonBuildSystem::new()),
Box::new(NodeJsBuildSystem::new()),
],
}
}
/// Detect which build system to use for a source directory
///
/// # Errors
///
/// Returns an error if detection fails or no suitable build system is found
pub async fn detect(&self, source_dir: &Path) -> Result<&dyn BuildSystem, Error> {
for system in &self.systems {
if system.detect(source_dir).await? {
return Ok(system.as_ref());
}
}
Err(sps2_errors::BuildError::NoBuildSystemDetected {
path: source_dir.display().to_string(),
}
.into())
}
/// Get a specific build system by name
pub fn get(&self, name: &str) -> Option<&dyn BuildSystem> {
self.systems
.iter()
.find(|s| s.name().eq_ignore_ascii_case(name))
.map(std::convert::AsRef::as_ref)
}
}
impl Default for BuildSystemRegistry {
fn default() -> Self {
Self::new()
}
}
/// Automatically detect and return the appropriate build system
///
/// # Errors
///
/// Returns an error if no suitable build system can be detected
pub async fn detect_build_system(source_dir: &Path) -> Result<Box<dyn BuildSystem>, Error> {
let registry = BuildSystemRegistry::new();
let system = registry.detect(source_dir).await?;
// Return a boxed clone of the detected system
match system.name() {
"autotools" => Ok(Box::new(AutotoolsBuildSystem::new())),
"cmake" => Ok(Box::new(CMakeBuildSystem::new())),
"meson" => Ok(Box::new(MesonBuildSystem::new())),
"cargo" => Ok(Box::new(CargoBuildSystem::new())),
"go" => Ok(Box::new(GoBuildSystem::new())),
"python" => Ok(Box::new(PythonBuildSystem::new())),
"nodejs" => Ok(Box::new(NodeJsBuildSystem::new())),
_ => unreachable!("Unknown build system"),
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/build_systems/cmake.rs | crates/builder/src/build_systems/cmake.rs | //! `CMake` build system implementation
use super::{BuildSystem, BuildSystemConfig, BuildSystemContext, TestFailure, TestResults};
use async_trait::async_trait;
use sps2_errors::{BuildError, Error};
use std::collections::HashMap;
use std::path::Path;
use tokio::fs;
/// `CMake` build system
pub struct CMakeBuildSystem {
config: BuildSystemConfig,
}
impl CMakeBuildSystem {
/// Create a new `CMake` build system instance
#[must_use]
pub fn new() -> Self {
Self {
config: BuildSystemConfig {
supports_out_of_source: true,
supports_parallel_builds: true,
supports_incremental_builds: true,
default_configure_args: vec![
"-DCMAKE_BUILD_TYPE=Release".to_string(),
"-DCMAKE_COLOR_MAKEFILE=ON".to_string(),
],
default_build_args: vec![],
env_prefix: Some("CMAKE_".to_string()),
watch_patterns: vec![
"CMakeLists.txt".to_string(),
"*.cmake".to_string(),
"cmake/*.cmake".to_string(),
],
},
}
}
/// Add macOS-specific RPATH and install name arguments
fn add_macos_rpath_args(
args: &mut Vec<String>,
ctx: &BuildSystemContext,
user_args: &[String],
) {
// Set install RPATH to where libraries will be installed
if !user_args
.iter()
.any(|arg| arg.starts_with("-DCMAKE_INSTALL_RPATH="))
{
args.push(format!(
"-DCMAKE_INSTALL_RPATH={}/lib",
ctx.env.get_live_prefix()
));
}
// Enable macOS RPATH support
if !user_args
.iter()
.any(|arg| arg.starts_with("-DCMAKE_MACOSX_RPATH="))
{
args.push("-DCMAKE_MACOSX_RPATH=ON".to_string());
}
// Don't use install RPATH during build (use build RPATH)
if !user_args
.iter()
.any(|arg| arg.starts_with("-DCMAKE_BUILD_WITH_INSTALL_RPATH="))
{
args.push("-DCMAKE_BUILD_WITH_INSTALL_RPATH=OFF".to_string());
}
// Add RPATH entries for all linked library directories
if !user_args
.iter()
.any(|arg| arg.starts_with("-DCMAKE_INSTALL_RPATH_USE_LINK_PATH="))
{
args.push("-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON".to_string());
}
// If with_defaults() was called, set CMAKE_INSTALL_NAME_DIR to prevent self-referencing install names
if ctx.env.with_defaults_called
&& !user_args
.iter()
.any(|arg| arg.starts_with("-DCMAKE_INSTALL_NAME_DIR="))
{
args.push(format!(
"-DCMAKE_INSTALL_NAME_DIR={}/lib",
sps2_config::fixed_paths::LIVE_DIR
));
}
}
/// Get `CMake` configuration arguments
fn get_cmake_args(&self, ctx: &BuildSystemContext, user_args: &[String]) -> Vec<String> {
let mut args = vec![];
// Always specify source directory
args.push(ctx.source_dir.display().to_string());
// Add install prefix - use LIVE_PREFIX for runtime installation location
if !user_args
.iter()
.any(|arg| arg.starts_with("-DCMAKE_INSTALL_PREFIX="))
{
args.push(format!(
"-DCMAKE_INSTALL_PREFIX={}",
ctx.env.get_live_prefix()
));
}
// Add default arguments
for default_arg in &self.config.default_configure_args {
if !user_args
.iter()
.any(|arg| arg.starts_with(default_arg.split('=').next().unwrap_or("")))
{
args.push(default_arg.clone());
}
}
// Set RPATH for macOS to ensure binaries can find their libraries
if cfg!(target_os = "macos") {
Self::add_macos_rpath_args(&mut args, ctx, user_args);
}
// macOS ARM only - no cross-compilation support
// Add CMAKE_PREFIX_PATH from build dependencies
if let Some(pkg_config_path) = ctx.get_all_env_vars().get("PKG_CONFIG_PATH") {
let prefix_paths: Vec<String> = pkg_config_path
.split(':')
.filter_map(|p| {
Path::new(p)
.parent()
.and_then(|p| p.parent())
.map(|p| p.display().to_string())
})
.collect();
if !prefix_paths.is_empty() {
args.push(format!("-DCMAKE_PREFIX_PATH={}", prefix_paths.join(";")));
}
}
// Add find_package hints
if !user_args
.iter()
.any(|arg| arg.starts_with("-DCMAKE_FIND_PACKAGE_PREFER_CONFIG="))
{
args.push("-DCMAKE_FIND_PACKAGE_PREFER_CONFIG=ON".to_string());
}
// Add user arguments
args.extend(user_args.iter().cloned());
args
}
}
impl Default for CMakeBuildSystem {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl BuildSystem for CMakeBuildSystem {
async fn detect(&self, source_dir: &Path) -> Result<bool, Error> {
Ok(source_dir.join("CMakeLists.txt").exists())
}
fn get_config_options(&self) -> BuildSystemConfig {
self.config.clone()
}
async fn configure(&self, ctx: &BuildSystemContext, args: &[String]) -> Result<(), Error> {
// Create build directory for out-of-source build
if ctx.source_dir != ctx.build_dir {
fs::create_dir_all(&ctx.build_dir).await?;
}
// Build CMake command
let cmake_args = self.get_cmake_args(ctx, args);
let arg_refs: Vec<&str> = cmake_args.iter().map(String::as_str).collect();
// Prepare environment overlay
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
// Run cmake
let result = ctx
.env
.execute_command_with_env("cmake", &arg_refs, Some(&ctx.build_dir), &merged_env, false)
.await?;
if !result.success {
return Err(BuildError::ConfigureFailed {
message: format!("cmake configuration failed: {}", result.stderr),
}
.into());
}
Ok(())
}
async fn build(&self, ctx: &BuildSystemContext, args: &[String]) -> Result<(), Error> {
let mut cmake_args = vec!["--build", "."];
// Add parallel jobs
let jobs_str;
if ctx.jobs > 1 {
jobs_str = ctx.jobs.to_string();
cmake_args.push("--parallel");
cmake_args.push(&jobs_str);
}
// Add user arguments
if !args.is_empty() {
cmake_args.push("--");
cmake_args.extend(args.iter().map(String::as_str));
}
// Run cmake build with merged env
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
"cmake",
&cmake_args,
Some(&ctx.build_dir),
&merged_env,
false,
)
.await?;
if !result.success {
return Err(BuildError::CompilationFailed {
message: format!("cmake build failed: {}", result.stderr),
}
.into());
}
Ok(())
}
async fn test(&self, ctx: &BuildSystemContext) -> Result<TestResults, Error> {
let start = std::time::Instant::now();
// Run ctest allowing failure to parse output
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
"ctest",
&["--output-on-failure", "--parallel", &ctx.jobs.to_string()],
Some(&ctx.build_dir),
&merged_env,
true,
)
.await?;
let duration = start.elapsed().as_secs_f64();
let output = format!("{}\n{}", result.stdout, result.stderr);
// Parse CTest output
let mut total = 0;
let mut passed = 0;
let mut failed = 0;
let mut failures = vec![];
for line in output.lines() {
// Look for test summary line: "X% tests passed, Y tests failed out of Z"
if line.contains("% tests passed") {
if let Some(summary) = parse_ctest_summary(line) {
total = summary.0;
passed = summary.1;
failed = summary.2;
}
}
// Capture test failures
else if line.contains("***Failed") || line.contains("***Timeout") {
if let Some(test_name) = line.split_whitespace().nth(1) {
failures.push(TestFailure {
name: test_name.to_string(),
message: line.to_string(),
details: None,
});
}
}
}
// If no summary found but command failed, assume all tests failed
if total == 0 && !result.success {
total = 1;
failed = 1;
}
Ok(TestResults {
total,
passed,
failed,
skipped: 0,
duration,
output,
failures,
})
}
async fn install(&self, ctx: &BuildSystemContext) -> Result<(), Error> {
// When using DESTDIR, we need to adjust the install behavior
// DESTDIR is prepended to the install prefix, so if CMAKE_INSTALL_PREFIX is /opt/pm/live
// and DESTDIR is /path/to/stage, files go to /path/to/stage/opt/pm/live
// To get files in /path/to/stage directly, we need to strip the prefix during packaging
// Prepare environment with DESTDIR for staged install
let staging_dir = ctx.env.staging_dir().display().to_string();
let mut merged_env = ctx.get_all_env_vars();
merged_env.insert("DESTDIR".to_string(), staging_dir.clone());
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
"cmake",
&["--install", "."],
Some(&ctx.build_dir),
&merged_env,
true,
)
.await;
match result {
Ok(res) if res.success => {
// No need to adjust staged files since we're using BUILD_PREFIX now
Ok(())
}
_ => {
// Fallback to make install for older CMake versions or if cmake --install fails
let mut env_for_make = ctx.get_all_env_vars();
env_for_make.insert("DESTDIR".to_string(), staging_dir.clone());
env_for_make.extend(self.get_env_vars(ctx));
let make_result = ctx
.env
.execute_command_with_env(
"make",
&["install"],
Some(&ctx.build_dir),
&env_for_make,
false,
)
.await?;
if !make_result.success {
return Err(BuildError::InstallFailed {
message: format!("cmake install failed: {}", make_result.stderr),
}
.into());
}
// No need to adjust staged files since we're using BUILD_PREFIX now
Ok(())
}
}
}
fn get_env_vars(&self, ctx: &BuildSystemContext) -> HashMap<String, String> {
let mut vars = HashMap::new();
// Set DESTDIR for install phase
vars.insert(
"DESTDIR".to_string(),
ctx.env.staging_dir().display().to_string(),
);
// CMake-specific environment variables
if let Some(cache_config) = &ctx.cache_config {
if cache_config.use_compiler_cache {
match cache_config.compiler_cache_type {
super::core::CompilerCacheType::CCache => {
vars.insert(
"CMAKE_CXX_COMPILER_LAUNCHER".to_string(),
"ccache".to_string(),
);
vars.insert(
"CMAKE_C_COMPILER_LAUNCHER".to_string(),
"ccache".to_string(),
);
}
super::core::CompilerCacheType::SCCache => {
vars.insert(
"CMAKE_CXX_COMPILER_LAUNCHER".to_string(),
"sccache".to_string(),
);
vars.insert(
"CMAKE_C_COMPILER_LAUNCHER".to_string(),
"sccache".to_string(),
);
}
super::core::CompilerCacheType::DistCC => {}
}
}
}
vars
}
fn name(&self) -> &'static str {
"cmake"
}
fn prefers_out_of_source_build(&self) -> bool {
true
}
}
/// Parse `CTest` summary line
fn parse_ctest_summary(line: &str) -> Option<(usize, usize, usize)> {
// Parse "X% tests passed, Y tests failed out of Z"
let parts: Vec<&str> = line.split_whitespace().collect();
// Find "failed" and "of" positions
let failed_pos = parts.iter().position(|&s| s == "failed")?;
let out_of_pos = parts.iter().position(|&s| s == "of")?;
// Look for the number before "tests failed"
// The pattern is "N tests failed", so we need to go back 2 positions from "failed"
let failed = if failed_pos >= 2 && parts.get(failed_pos - 1) == Some(&"tests") {
parts.get(failed_pos - 2)?.parse().ok()?
} else {
parts.get(failed_pos - 1)?.parse().ok()?
};
let total = parts.get(out_of_pos + 1)?.parse().ok()?;
let passed = total - failed;
Some((total, passed, failed))
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/build_systems/cargo.rs | crates/builder/src/build_systems/cargo.rs | //! Cargo (Rust) build system implementation
use super::{BuildSystem, BuildSystemConfig, BuildSystemContext, TestFailure, TestResults};
use async_trait::async_trait;
use sps2_errors::{BuildError, Error};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use tokio::fs;
/// Cargo build system for Rust projects
pub struct CargoBuildSystem {
config: BuildSystemConfig,
}
impl CargoBuildSystem {
/// Create a new Cargo build system instance
#[must_use]
pub fn new() -> Self {
Self {
config: BuildSystemConfig {
supports_out_of_source: false, // Cargo manages its own target directory
supports_parallel_builds: true,
supports_incremental_builds: true,
default_configure_args: vec![],
default_build_args: vec!["--release".to_string()],
env_prefix: Some("CARGO_".to_string()),
watch_patterns: vec![
"Cargo.toml".to_string(),
"Cargo.lock".to_string(),
"src/**/*.rs".to_string(),
"build.rs".to_string(),
],
},
}
}
/// Setup vendored dependencies for offline builds
async fn setup_vendoring(&self, ctx: &BuildSystemContext) -> Result<(), Error> {
// Check if .cargo/vendor directory exists
let vendor_dir = ctx.source_dir.join(".cargo/vendor");
if vendor_dir.exists() {
// Create .cargo/config.toml for vendored dependencies
let cargo_dir = ctx.source_dir.join(".cargo");
fs::create_dir_all(&cargo_dir).await?;
let config_content = r#"[source.crates-io]
replace-with = "vendored-sources"
[source.vendored-sources]
directory = "vendor"
[net]
offline = true
"#;
let config_path = cargo_dir.join("config.toml");
fs::write(&config_path, config_content).await?;
} else if !ctx.network_allowed {
// Ensure offline mode
return Ok(());
}
Ok(())
}
/// Get cargo build arguments
fn get_build_args(&self, ctx: &BuildSystemContext, user_args: &[String]) -> Vec<String> {
let mut args = vec!["build".to_string()];
// Add default arguments
for default_arg in &self.config.default_build_args {
if !user_args.contains(default_arg) && !user_args.contains(&"--debug".to_string()) {
args.push(default_arg.clone());
}
}
// Add parallel jobs
if ctx.jobs > 1 && !user_args.iter().any(|arg| arg.starts_with("-j")) {
args.push(format!("-j{}", ctx.jobs));
}
// Add offline mode if network is disabled
if !ctx.network_allowed && !user_args.contains(&"--offline".to_string()) {
args.push("--offline".to_string());
}
// macOS ARM only - no cross-compilation support
// Handle features
let features = Self::extract_features(user_args);
if !features.is_empty() {
args.push("--features".to_string());
args.push(features.join(","));
}
// Add user arguments (except features which we handled above)
args.extend(
user_args
.iter()
.filter(|arg| !arg.starts_with("--features="))
.cloned(),
);
args
}
/// Extract feature flags from arguments
fn extract_features(args: &[String]) -> Vec<String> {
args.iter()
.filter_map(|arg| {
arg.strip_prefix("--features=")
.map(|features| features.split(',').map(String::from).collect::<Vec<_>>())
})
.flatten()
.collect()
}
/// Find built binaries in target directory
async fn find_built_binaries(&self, ctx: &BuildSystemContext) -> Result<Vec<PathBuf>, Error> {
let mut binaries = vec![];
// Determine target directory
let target_base = ctx.source_dir.join("target");
let target_dir = target_base.join("release");
// Read Cargo.toml to find binary targets
let cargo_toml = ctx.source_dir.join("Cargo.toml");
let cargo_content = fs::read_to_string(&cargo_toml).await?;
// Simple parsing - in production would use toml crate
if cargo_content.contains("[[bin]]")
|| cargo_content.contains("[package]")
|| cargo_content.contains("[workspace]")
{
// Look for executables in target/release
if target_dir.exists() {
let mut entries = fs::read_dir(&target_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.is_file() {
// Check if it's executable
let metadata = fs::metadata(&path).await?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
if metadata.permissions().mode() & 0o111 != 0 {
// Skip build artifacts
if let Some(name) = path.file_name() {
let name_str = name.to_string_lossy();
if !name_str.ends_with(".d")
&& !name_str.ends_with(".rlib")
&& !name_str.ends_with(".rmeta")
&& !name_str.contains("deps")
{
binaries.push(path);
}
}
}
}
}
}
}
}
Ok(binaries)
}
/// Check if this is a workspace project
async fn is_workspace(&self, ctx: &BuildSystemContext) -> Result<bool, Error> {
let cargo_toml = ctx.source_dir.join("Cargo.toml");
let cargo_content = fs::read_to_string(&cargo_toml).await?;
Ok(cargo_content.contains("[workspace]"))
}
/// Parse cargo test output
fn parse_test_output(output: &str) -> (usize, usize, usize, Vec<TestFailure>) {
let mut total = 0;
let mut passed = 0;
let mut failed = 0;
let mut failures = vec![];
let mut current_failure: Option<TestFailure> = None;
for line in output.lines() {
// Look for test result lines
if line.contains("test result:") {
// Format: "test result: ok. X passed; Y failed; Z ignored; W measured; A filtered out"
if let Some(counts) = parse_cargo_test_summary(line) {
total = counts.0;
passed = counts.1;
failed = counts.2;
}
}
// Capture test failures
else if line.contains("---- ") && line.contains(" stdout ----") {
if let Some(test_name) = line
.strip_prefix("---- ")
.and_then(|s| s.strip_suffix(" stdout ----"))
{
current_failure = Some(TestFailure {
name: test_name.to_string(),
message: String::new(),
details: Some(String::new()),
});
}
}
// Collect failure details
else if let Some(failure) = &mut current_failure {
if line == "failures:" {
failures.push(failure.clone());
current_failure = None;
} else if let Some(details) = &mut failure.details {
details.push_str(line);
details.push('\n');
}
}
}
(total, passed, failed, failures)
}
}
impl Default for CargoBuildSystem {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl BuildSystem for CargoBuildSystem {
async fn detect(&self, source_dir: &Path) -> Result<bool, Error> {
Ok(source_dir.join("Cargo.toml").exists())
}
fn get_config_options(&self) -> BuildSystemConfig {
self.config.clone()
}
async fn configure(&self, ctx: &BuildSystemContext, _args: &[String]) -> Result<(), Error> {
// Cargo doesn't have a separate configure step
// But we can set up vendoring and check dependencies
// Setup vendoring if needed
self.setup_vendoring(ctx).await?;
// Verify cargo is available
// Execute with merged env
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env("cargo", &["--version"], None, &merged_env, false)
.await?;
if !result.success {
return Err(BuildError::ConfigureFailed {
message: "cargo not found in PATH".to_string(),
}
.into());
}
// Check if we can read Cargo.toml
let cargo_toml = ctx.source_dir.join("Cargo.toml");
if !cargo_toml.exists() {
return Err(BuildError::ConfigureFailed {
message: "Cargo.toml not found".to_string(),
}
.into());
}
Ok(())
}
async fn build(&self, ctx: &BuildSystemContext, args: &[String]) -> Result<(), Error> {
let build_args = self.get_build_args(ctx, args);
let arg_refs: Vec<&str> = build_args.iter().map(String::as_str).collect();
// Run cargo build
// Run cargo build with merged env
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
"cargo",
&arg_refs,
Some(&ctx.source_dir),
&merged_env,
false,
)
.await?;
if !result.success {
return Err(BuildError::CompilationFailed {
message: format!("cargo build failed: {}", result.stderr),
}
.into());
}
Ok(())
}
async fn test(&self, ctx: &BuildSystemContext) -> Result<TestResults, Error> {
let start = std::time::Instant::now();
let mut test_args = vec!["test"];
// Add release flag if we built in release mode
if let Ok(extra_env) = ctx.extra_env.read() {
if extra_env.get("PROFILE").map(String::as_str) == Some("release") {
test_args.push("--release");
}
}
// Add offline mode if needed
if !ctx.network_allowed {
test_args.push("--offline");
}
// Add parallel jobs
let jobs_str;
if ctx.jobs > 1 {
jobs_str = ctx.jobs.to_string();
test_args.push("--");
test_args.push("--test-threads");
test_args.push(&jobs_str);
}
// Run cargo test (allow failure)
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
"cargo",
&test_args,
Some(&ctx.source_dir),
&merged_env,
true,
)
.await?;
let duration = start.elapsed().as_secs_f64();
let output = format!("{}\n{}", result.stdout, result.stderr);
// Parse test results
let (total, passed, failed, failures) = Self::parse_test_output(&output);
Ok(TestResults {
total,
passed,
failed,
skipped: total.saturating_sub(passed + failed),
duration,
output,
failures,
})
}
async fn install(&self, ctx: &BuildSystemContext) -> Result<(), Error> {
// Find built binaries
let binaries = self.find_built_binaries(ctx).await?;
if binaries.is_empty() {
// Check if this is a workspace project
if self.is_workspace(ctx).await? {
return Err(BuildError::InstallFailed {
message: "No binaries found for workspace project. The build may have failed or the workspace may not contain any binary targets.".to_string(),
}
.into());
}
// Try cargo install as fallback for single-crate projects
// Since cargo install --root expects an actual directory path, we need to install
// to a temp location and then move files to the staging dir with BUILD_PREFIX structure
let temp_install_dir = ctx.build_dir.join("cargo_install_temp");
fs::create_dir_all(&temp_install_dir).await?;
let temp_install_str = temp_install_dir.display().to_string();
let install_args = vec![
"install",
"--path",
".",
"--root",
&temp_install_str,
"--offline",
];
let result = ctx
.execute("cargo", &install_args, Some(&ctx.source_dir))
.await?;
if !result.success {
return Err(BuildError::InstallFailed {
message: format!("cargo install failed: {}", result.stderr),
}
.into());
}
// Move files from temp install to staging with LIVE_PREFIX structure
let staging_dir = ctx.env.staging_dir();
let prefix_path = staging_dir.join(ctx.env.get_live_prefix().trim_start_matches('/'));
// Move bin directory
let temp_bin = temp_install_dir.join("bin");
if temp_bin.exists() {
let dest_bin = prefix_path.join("bin");
fs::create_dir_all(&dest_bin).await?;
let mut entries = fs::read_dir(&temp_bin).await?;
while let Some(entry) = entries.next_entry().await? {
let src = entry.path();
if let Some(filename) = src.file_name() {
let dest = dest_bin.join(filename);
fs::rename(&src, &dest).await?;
}
}
}
// Clean up temp directory
let _ = fs::remove_dir_all(&temp_install_dir).await;
} else {
// Manually copy binaries to staging with LIVE_PREFIX structure
let staging_dir = ctx.env.staging_dir();
let prefix_path = staging_dir.join(ctx.env.get_live_prefix().trim_start_matches('/'));
let staging_bin = prefix_path.join("bin");
fs::create_dir_all(&staging_bin).await?;
for binary in binaries {
if let Some(filename) = binary.file_name() {
let dest = staging_bin.join(filename);
fs::copy(&binary, &dest).await?;
// Preserve executable permissions
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let metadata = fs::metadata(&binary).await?;
let mut perms = metadata.permissions();
perms.set_mode(0o755);
fs::set_permissions(&dest, perms).await?;
}
}
}
}
Ok(())
}
fn get_env_vars(&self, ctx: &BuildSystemContext) -> HashMap<String, String> {
let mut vars = HashMap::new();
// Cargo-specific environment variables
vars.insert(
"CARGO_TARGET_DIR".to_string(),
ctx.build_dir.join("target").display().to_string(),
);
// Set CARGO_HOME to build directory for isolation
vars.insert(
"CARGO_HOME".to_string(),
ctx.build_dir.join(".cargo").display().to_string(),
);
// Enable colored output
vars.insert("CARGO_TERM_COLOR".to_string(), "always".to_string());
// Set profile
vars.insert("PROFILE".to_string(), "release".to_string());
// Compiler cache support
if let Some(cache_config) = &ctx.cache_config {
if cache_config.use_compiler_cache {
if let super::core::CompilerCacheType::SCCache = &cache_config.compiler_cache_type {
vars.insert("RUSTC_WRAPPER".to_string(), "sccache".to_string());
}
}
}
// macOS ARM only - no cross-compilation support
vars
}
fn name(&self) -> &'static str {
"cargo"
}
fn prefers_out_of_source_build(&self) -> bool {
// Cargo manages its own target directory
false
}
}
/// Parse cargo test summary line
fn parse_cargo_test_summary(line: &str) -> Option<(usize, usize, usize)> {
// Format: "test result: ok. X passed; Y failed; Z ignored; W measured; A filtered out"
// Extract the part after "ok." or "FAILED."
let stats_part = if let Some(pos) = line.find(". ") {
&line[pos + 2..]
} else {
line
};
let mut passed = 0;
let mut failed = 0;
let mut ignored = 0;
for part in stats_part.split(';') {
let part = part.trim();
if let Some((num_str, rest)) = part.split_once(' ') {
if let Ok(num) = num_str.parse::<usize>() {
if rest.starts_with("passed") {
passed = num;
} else if rest.starts_with("failed") {
failed = num;
} else if rest.starts_with("ignored") {
ignored = num;
}
}
}
}
if passed > 0 || failed > 0 || ignored > 0 {
let total = passed + failed + ignored;
Some((total, passed, failed))
} else {
None
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/build_systems/autotools.rs | crates/builder/src/build_systems/autotools.rs | //! GNU Autotools build system implementation
use super::{BuildSystem, BuildSystemConfig, BuildSystemContext, TestResults};
use async_trait::async_trait;
use sps2_errors::{BuildError, Error};
use std::collections::HashMap;
use std::path::Path;
use tokio::fs;
/// GNU Autotools build system
pub struct AutotoolsBuildSystem {
config: BuildSystemConfig,
}
impl AutotoolsBuildSystem {
/// Create a new Autotools build system instance
///
/// The instance must be used with a build context to configure and build projects.
#[must_use]
pub fn new() -> Self {
Self {
config: BuildSystemConfig {
supports_out_of_source: true,
supports_parallel_builds: true,
supports_incremental_builds: true,
default_configure_args: vec![],
default_build_args: vec![],
env_prefix: None,
watch_patterns: vec![
"configure".to_string(),
"configure.ac".to_string(),
"configure.in".to_string(),
"Makefile.am".to_string(),
"Makefile.in".to_string(),
],
},
}
}
/// Check if autoreconf is needed
async fn needs_autoreconf(&self, source_dir: &Path) -> Result<bool, Error> {
// If configure exists and is newer than configure.ac, no need for autoreconf
let configure_path = source_dir.join("configure");
if !configure_path.exists() {
return Ok(true);
}
// Check for configure.ac or configure.in
let configure_ac = source_dir.join("configure.ac");
let configure_in = source_dir.join("configure.in");
if configure_ac.exists() {
let configure_meta = fs::metadata(&configure_path).await?;
let configure_ac_meta = fs::metadata(&configure_ac).await?;
// If configure.ac is newer than configure, run autoreconf
if let (Ok(configure_time), Ok(ac_time)) =
(configure_meta.modified(), configure_ac_meta.modified())
{
return Ok(ac_time > configure_time);
}
}
Ok(configure_ac.exists() || configure_in.exists())
}
/// Run autoreconf if needed
async fn run_autoreconf(&self, ctx: &BuildSystemContext) -> Result<(), Error> {
if self.needs_autoreconf(&ctx.source_dir).await? {
let result = ctx
.execute("autoreconf", &["-fiv"], Some(&ctx.source_dir))
.await?;
if !result.success {
return Err(BuildError::ConfigureFailed {
message: format!("autoreconf failed: {}", result.stderr),
}
.into());
}
}
Ok(())
}
/// Handle config.cache for faster reconfiguration
async fn handle_config_cache(&self, ctx: &BuildSystemContext) -> Result<(), Error> {
if let Some(cache_config) = &ctx.cache_config {
let cache_file = cache_config.cache_dir.join("config.cache");
if cache_file.exists() {
// Copy config.cache to build directory
let dest = ctx.build_dir.join("config.cache");
fs::copy(&cache_file, &dest).await?;
}
}
Ok(())
}
/// Get configure arguments including cross-compilation
fn get_configure_args(ctx: &BuildSystemContext, user_args: &[String]) -> Vec<String> {
let mut args = vec![];
// Add prefix - use LIVE_PREFIX for runtime installation location
if !user_args.iter().any(|arg| arg.starts_with("--prefix=")) {
args.push(format!("--prefix={}", ctx.env.get_live_prefix()));
}
// macOS ARM only - no cross-compilation support
// Add user arguments
args.extend(user_args.iter().cloned());
// Add compiler flags from environment
if let Some(cflags) = ctx.get_all_env_vars().get("CFLAGS") {
args.push(format!("CFLAGS={cflags}"));
}
if let Some(cxxflags) = ctx.get_all_env_vars().get("CXXFLAGS") {
args.push(format!("CXXFLAGS={cxxflags}"));
}
// Handle LDFLAGS with RPATH for macOS
let mut ldflags = ctx
.get_all_env_vars()
.get("LDFLAGS")
.cloned()
.unwrap_or_default();
if cfg!(target_os = "macos") {
// Add RPATH to the library directory for runtime linking
let rpath_flag = format!("-Wl,-rpath,{}/lib", ctx.env.get_live_prefix());
if !ldflags.contains(&rpath_flag) {
if !ldflags.is_empty() {
ldflags.push(' ');
}
ldflags.push_str(&rpath_flag);
}
}
if !ldflags.is_empty() {
args.push(format!("LDFLAGS={ldflags}"));
}
args
}
}
impl Default for AutotoolsBuildSystem {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl BuildSystem for AutotoolsBuildSystem {
async fn detect(&self, source_dir: &Path) -> Result<bool, Error> {
// Check for configure script
if source_dir.join("configure").exists() {
return Ok(true);
}
// Check for configure.ac or configure.in (needs autoreconf)
if source_dir.join("configure.ac").exists() || source_dir.join("configure.in").exists() {
return Ok(true);
}
// Check for Makefile.am (automake project)
if source_dir.join("Makefile.am").exists() {
return Ok(true);
}
Ok(false)
}
fn get_config_options(&self) -> BuildSystemConfig {
self.config.clone()
}
async fn configure(&self, ctx: &BuildSystemContext, args: &[String]) -> Result<(), Error> {
// Run autoreconf if needed
self.run_autoreconf(ctx).await?;
// Handle config cache
self.handle_config_cache(ctx).await?;
// Create build directory if out-of-source build
if ctx.source_dir != ctx.build_dir {
fs::create_dir_all(&ctx.build_dir).await?;
}
// Get configure script path
let configure_path = if ctx.source_dir == ctx.build_dir {
"./configure".to_string()
} else {
ctx.source_dir.join("configure").display().to_string()
};
// Build configure command
let configure_args = Self::get_configure_args(ctx, args);
let mut cmd_args = vec![configure_path];
cmd_args.extend(configure_args);
// Run configure - properly quote environment variables with spaces
let cmd_str = cmd_args
.into_iter()
.map(|arg| {
// Quote environment variable assignments that contain spaces
if arg.contains('=') && arg.contains(' ') {
let parts: Vec<&str> = arg.splitn(2, '=').collect();
if parts.len() == 2 {
format!("{}=\"{}\"", parts[0], parts[1])
} else {
arg
}
} else {
arg
}
})
.collect::<Vec<_>>()
.join(" ");
// Run configure with merged env
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
"sh",
&["-c", &cmd_str],
Some(&ctx.build_dir),
&merged_env,
false,
)
.await?;
if !result.success {
return Err(BuildError::ConfigureFailed {
message: format!("configure failed: {}", result.stderr),
}
.into());
}
// Save config.cache if caching is enabled
if let Some(cache_config) = &ctx.cache_config {
let config_cache = ctx.build_dir.join("config.cache");
if config_cache.exists() {
let dest = cache_config.cache_dir.join("config.cache");
fs::create_dir_all(&cache_config.cache_dir).await?;
fs::copy(&config_cache, &dest).await?;
}
}
Ok(())
}
async fn build(&self, ctx: &BuildSystemContext, args: &[String]) -> Result<(), Error> {
let mut make_args = vec![];
// Add parallel jobs
if ctx.jobs > 1 {
make_args.push(format!("-j{}", ctx.jobs));
}
// Add user arguments
make_args.extend(args.iter().cloned());
// Convert to string slices
let arg_refs: Vec<&str> = make_args.iter().map(String::as_str).collect();
// Run make with merged env
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env("make", &arg_refs, Some(&ctx.build_dir), &merged_env, false)
.await?;
if !result.success {
return Err(BuildError::CompilationFailed {
message: format!("make failed: {}", result.stderr),
}
.into());
}
Ok(())
}
async fn test(&self, ctx: &BuildSystemContext) -> Result<TestResults, Error> {
// Run make check or make test
let start = std::time::Instant::now();
// Try "make check" first (allow failure to parse)
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env("make", &["check"], Some(&ctx.build_dir), &merged_env, true)
.await?;
let success = if result.success {
true
} else {
// Fallback to "make test"
let test_result = ctx
.env
.execute_command_with_env(
"make",
&["test"],
Some(&ctx.build_dir),
&merged_env,
true,
)
.await?;
test_result.success
};
let duration = start.elapsed().as_secs_f64();
// Parse test results from output
// This is a simple implementation; real implementation would parse test suite output
let output = format!("{}\n{}", result.stdout, result.stderr);
let (total, passed, failed, skipped) = if success {
// If make check succeeded, assume all tests passed
// Real implementation would parse TESTS output
(1, 1, 0, 0)
} else {
(1, 0, 1, 0)
};
Ok(TestResults {
total,
passed,
failed,
skipped,
duration,
output,
failures: vec![],
})
}
async fn install(&self, ctx: &BuildSystemContext) -> Result<(), Error> {
// Run make install with DESTDIR
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
"make",
&[
"install",
&format!("DESTDIR={}", ctx.env.staging_dir().display()),
],
Some(&ctx.build_dir),
&merged_env,
false,
)
.await?;
if !result.success {
return Err(BuildError::InstallFailed {
message: format!("make install failed: {}", result.stderr),
}
.into());
}
// No need to adjust staged files since we're using BUILD_PREFIX now
// which already includes the package-name-version structure
Ok(())
}
fn get_env_vars(&self, ctx: &BuildSystemContext) -> HashMap<String, String> {
let mut vars = HashMap::new();
// Standard autotools environment variables
vars.insert("PREFIX".to_string(), ctx.prefix.display().to_string());
vars.insert(
"DESTDIR".to_string(),
ctx.env.staging_dir().display().to_string(),
);
// Compiler cache setup
if let Some(cache_config) = &ctx.cache_config {
if cache_config.use_compiler_cache {
match cache_config.compiler_cache_type {
super::core::CompilerCacheType::CCache => {
vars.insert("CC".to_string(), "ccache gcc".to_string());
vars.insert("CXX".to_string(), "ccache g++".to_string());
}
super::core::CompilerCacheType::SCCache => {
vars.insert("RUSTC_WRAPPER".to_string(), "sccache".to_string());
}
super::core::CompilerCacheType::DistCC => {}
}
}
}
vars
}
fn name(&self) -> &'static str {
"autotools"
}
fn prefers_out_of_source_build(&self) -> bool {
// Autotools supports both, but in-source is traditional
false
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/builder/src/build_systems/meson.rs | crates/builder/src/build_systems/meson.rs | //! Meson build system implementation
use super::{BuildSystem, BuildSystemConfig, BuildSystemContext, TestFailure, TestResults};
use async_trait::async_trait;
use sps2_errors::{BuildError, Error};
use std::collections::HashMap;
use std::path::Path;
/// Meson build system
pub struct MesonBuildSystem {
config: BuildSystemConfig,
}
impl MesonBuildSystem {
/// Create a new Meson build system instance
#[must_use]
pub fn new() -> Self {
Self {
config: BuildSystemConfig {
supports_out_of_source: true,
supports_parallel_builds: true,
supports_incremental_builds: true,
default_configure_args: vec![
"--buildtype=release".to_string(),
"--optimization=2".to_string(),
"--strip".to_string(),
],
default_build_args: vec![],
env_prefix: Some("MESON_".to_string()),
watch_patterns: vec![
"meson.build".to_string(),
"meson_options.txt".to_string(),
"*/meson.build".to_string(),
],
},
}
}
/// Check if wrap handling should be disabled
fn should_disable_wrap(ctx: &BuildSystemContext) -> bool {
// Always disable wrap downloads to ensure reproducible builds
!ctx.network_allowed
}
/// Get Meson setup arguments
fn get_setup_args(&self, ctx: &BuildSystemContext, user_args: &[String]) -> Vec<String> {
let mut args = vec!["setup".to_string()];
// Build directory
args.push(ctx.build_dir.display().to_string());
// Source directory (if different from current)
if ctx.source_dir != std::env::current_dir().unwrap_or_default() {
args.push(ctx.source_dir.display().to_string());
}
// Add prefix - use LIVE_PREFIX for runtime installation location
if !user_args.iter().any(|arg| arg.starts_with("--prefix=")) {
args.push(format!("--prefix={}", ctx.env.get_live_prefix()));
}
// Add default arguments
for default_arg in &self.config.default_configure_args {
if !user_args
.iter()
.any(|arg| arg.starts_with(default_arg.split('=').next().unwrap_or("")))
{
args.push(default_arg.clone());
}
}
// Handle wrap mode
if Self::should_disable_wrap(ctx)
&& !user_args.iter().any(|arg| arg.starts_with("--wrap-mode="))
{
args.push("--wrap-mode=nodownload".to_string());
}
// macOS ARM only - no cross-compilation support
// Add PKG_CONFIG_PATH
if let Some(pkg_config_path) = ctx.get_all_env_vars().get("PKG_CONFIG_PATH") {
args.push(format!("--pkg-config-path={pkg_config_path}"));
}
// Add user arguments
args.extend(user_args.iter().cloned());
args
}
/// Parse Meson test output
fn parse_test_output(output: &str) -> (usize, usize, usize, Vec<TestFailure>) {
let mut total = 0;
let mut passed = 0;
let mut failed = 0;
let mut failures = vec![];
for line in output.lines() {
// Meson test output format: "1/4 test_name OK 0.12s"
if let Some((test_num, test_name, status)) = parse_meson_test_line(line) {
total = total.max(test_num);
match status {
"OK" | "EXPECTEDFAIL" => passed += 1,
"FAIL" | "TIMEOUT" => {
failed += 1;
failures.push(TestFailure {
name: test_name.to_string(),
message: format!("Test {test_name} failed with status: {status}"),
details: None,
});
}
_ => {} // Don't count as passed or failed (includes SKIP)
}
}
}
(total, passed, failed, failures)
}
}
impl Default for MesonBuildSystem {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl BuildSystem for MesonBuildSystem {
async fn detect(&self, source_dir: &Path) -> Result<bool, Error> {
Ok(source_dir.join("meson.build").exists())
}
fn get_config_options(&self) -> BuildSystemConfig {
self.config.clone()
}
async fn configure(&self, ctx: &BuildSystemContext, args: &[String]) -> Result<(), Error> {
// Get setup arguments
let setup_args = self.get_setup_args(ctx, args);
let arg_refs: Vec<&str> = setup_args.iter().map(String::as_str).collect();
// Merge environment
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
// Run meson setup
let result = ctx
.env
.execute_command_with_env(
"meson",
&arg_refs,
Some(&ctx.source_dir),
&merged_env,
false,
)
.await?;
if !result.success {
return Err(BuildError::ConfigureFailed {
message: format!("meson setup failed: {}", result.stderr),
}
.into());
}
Ok(())
}
async fn build(&self, ctx: &BuildSystemContext, args: &[String]) -> Result<(), Error> {
let mut compile_args = vec!["compile"];
// Add parallel jobs
let jobs_str;
if ctx.jobs > 1 {
jobs_str = ctx.jobs.to_string();
compile_args.push("-j");
compile_args.push(&jobs_str);
}
// Add build directory
let build_dir_str = ctx.build_dir.display().to_string();
compile_args.push("-C");
compile_args.push(&build_dir_str);
// Add user arguments
let user_arg_refs: Vec<&str> = args.iter().map(String::as_str).collect();
compile_args.extend(user_arg_refs);
// Run meson compile
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
"meson",
&compile_args,
Some(&ctx.source_dir),
&merged_env,
false,
)
.await?;
if !result.success {
return Err(BuildError::CompilationFailed {
message: format!("meson compile failed: {}", result.stderr),
}
.into());
}
Ok(())
}
async fn test(&self, ctx: &BuildSystemContext) -> Result<TestResults, Error> {
let start = std::time::Instant::now();
// Run meson test (allow failure to parse)
let build_dir_str = ctx.build_dir.display().to_string();
let jobs_str = ctx.jobs.to_string();
let mut merged_env = ctx.get_all_env_vars();
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
"meson",
&[
"test",
"-C",
&build_dir_str,
"--print-errorlogs",
"--num-processes",
&jobs_str,
],
Some(&ctx.source_dir),
&merged_env,
true,
)
.await?;
let duration = start.elapsed().as_secs_f64();
let output = format!("{}\n{}", result.stdout, result.stderr);
// Parse test results
let (total, passed, failed, failures) = Self::parse_test_output(&output);
let skipped = total.saturating_sub(passed + failed);
Ok(TestResults {
total,
passed,
failed,
skipped,
duration,
output,
failures,
})
}
async fn install(&self, ctx: &BuildSystemContext) -> Result<(), Error> {
// Run meson install with DESTDIR in env
let build_dir_str = ctx.build_dir.display().to_string();
let mut merged_env = ctx.get_all_env_vars();
merged_env.insert(
"DESTDIR".to_string(),
ctx.env.staging_dir().display().to_string(),
);
merged_env.extend(self.get_env_vars(ctx));
let result = ctx
.env
.execute_command_with_env(
"meson",
&["install", "-C", &build_dir_str],
Some(&ctx.source_dir),
&merged_env,
false,
)
.await?;
if !result.success {
return Err(BuildError::InstallFailed {
message: format!("meson install failed: {}", result.stderr),
}
.into());
}
// No need to adjust staged files since we're using BUILD_PREFIX now
Ok(())
}
fn get_env_vars(&self, _ctx: &BuildSystemContext) -> HashMap<String, String> {
let mut vars = HashMap::new();
// Meson uses DESTDIR environment variable for staged installs
// This is set in the install() method rather than globally
// Meson-specific environment variables
vars.insert("MESON_FORCE_COLOR".to_string(), "1".to_string());
vars
}
fn name(&self) -> &'static str {
"meson"
}
fn prefers_out_of_source_build(&self) -> bool {
// Meson requires out-of-source builds
true
}
fn build_directory_name(&self) -> &'static str {
"builddir"
}
}
/// Parse a Meson test output line
fn parse_meson_test_line(line: &str) -> Option<(usize, &str, &str)> {
// Format: "1/4 test_name OK 0.12s"
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 3 {
return None;
}
// Parse test number
let test_num_str = parts[0];
if let Some(slash_pos) = test_num_str.find('/') {
if let Ok(num) = test_num_str[..slash_pos].parse() {
let test_name = parts[1];
let status = parts[2];
return Some((num, test_name, status));
}
}
None
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/errors/src/config.rs | crates/errors/src/config.rs | //! Configuration error types
use std::borrow::Cow;
use crate::UserFacingError;
use thiserror::Error;
#[derive(Debug, Clone, Error)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[non_exhaustive]
pub enum ConfigError {
#[error("config file not found: {path}")]
NotFound { path: String },
#[error("invalid config: {message}")]
Invalid { message: String },
#[error("parse error: {message}")]
ParseError { message: String },
#[error("missing required field: {field}")]
MissingField { field: String },
#[error("invalid value for {field}: {value}")]
InvalidValue { field: String, value: String },
#[error("environment variable not found: {var}")]
EnvVarNotFound { var: String },
#[error("failed to write config to {path}: {error}")]
WriteError { path: String, error: String },
#[error("failed to serialize config: {error}")]
SerializeError { error: String },
}
impl UserFacingError for ConfigError {
fn user_message(&self) -> Cow<'_, str> {
Cow::Owned(self.to_string())
}
fn user_hint(&self) -> Option<&'static str> {
match self {
Self::NotFound { .. } => {
Some("Provide a configuration file or run `sps2 setup` to create one.")
}
Self::MissingField { field } => Some(match field.as_str() {
"store" => "Set the store path in the configuration file or via CLI flags.",
_ => "Add the missing configuration field noted in the error message.",
}),
Self::InvalidValue { .. } | Self::Invalid { .. } | Self::ParseError { .. } => {
Some("Fix the configuration value and retry the command.")
}
Self::EnvVarNotFound { .. } => {
Some("Export the environment variable or move the value into the config file.")
}
Self::WriteError { .. } => Some("Ensure the config path is writable and retry."),
_ => None,
}
}
fn is_retryable(&self) -> bool {
false
}
fn user_code(&self) -> Option<&'static str> {
let code = match self {
Self::NotFound { .. } => "config.not_found",
Self::Invalid { .. } => "config.invalid",
Self::ParseError { .. } => "config.parse_error",
Self::MissingField { .. } => "config.missing_field",
Self::InvalidValue { .. } => "config.invalid_value",
Self::EnvVarNotFound { .. } => "config.env_var_not_found",
Self::WriteError { .. } => "config.write_error",
Self::SerializeError { .. } => "config.serialize_error",
};
Some(code)
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/errors/src/guard.rs | crates/errors/src/guard.rs | //! Guard-specific error types for state verification and healing operations
use std::borrow::Cow;
use crate::UserFacingError;
use thiserror::Error;
/// Severity levels for guard-related errors and discrepancies.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum DiscrepancySeverity {
/// Critical - system unusable, immediate action required
Critical,
/// High - major functionality affected, action recommended
High,
/// Medium - minor issues, action optional
Medium,
/// Low - informational only
Low,
}
impl DiscrepancySeverity {
#[must_use]
pub fn description(self) -> &'static str {
match self {
Self::Critical => "Critical",
Self::High => "High",
Self::Medium => "Medium",
Self::Low => "Low",
}
}
#[must_use]
pub fn requires_immediate_action(self) -> bool {
matches!(self, Self::Critical | Self::High)
}
}
/// Errors emitted by the guard subsystem.
#[derive(Debug, Clone, Error)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[non_exhaustive]
pub enum GuardError {
/// Verification operation failed.
#[error("verification failed during {operation}: {details}")]
VerificationFailed {
operation: String,
details: String,
discrepancies_count: usize,
state_id: String,
duration_ms: u64,
},
/// Healing operation failed for a specific discrepancy.
#[error("healing failed for {discrepancy_type} at {file_path}: {reason}")]
HealingFailed {
discrepancy_type: String,
file_path: String,
reason: String,
recoverable: bool,
},
/// Cache operation failed.
#[error("cache operation failed: {operation} - {reason}")]
CacheError { operation: String, reason: String },
/// Invalid guard configuration.
#[error("invalid guard configuration for {field}: {reason}")]
ConfigurationError {
field: String,
reason: String,
suggested_fix: Option<String>,
},
/// Permission denied for guard operation.
#[error("permission denied for {operation} on {path}")]
PermissionError {
operation: String,
path: String,
required_permissions: String,
},
/// Scope validation failed.
#[error("invalid verification scope: {scope_type} - {reason}")]
ScopeError {
scope_type: String,
reason: String,
suggested_scope: Option<String>,
},
/// Guard operation timed out.
#[error("guard operation timed out: {operation} after {duration_ms}ms")]
TimeoutError {
operation: String,
duration_ms: u64,
timeout_limit_ms: u64,
},
/// Resource exhaustion during guard operation.
#[error("insufficient resources for {operation}: {resource_type}")]
ResourceExhausted {
operation: String,
resource_type: String,
current_usage: Option<String>,
limit: Option<String>,
},
/// Integrity check failed.
#[error("integrity check failed for {component}: {details}")]
IntegrityError {
component: String,
details: String,
severity: DiscrepancySeverity,
},
/// Guard state inconsistency detected.
#[error("guard state inconsistency: {description}")]
StateInconsistency {
description: String,
current_state: Option<String>,
expected_state: Option<String>,
recovery_possible: bool,
},
}
impl UserFacingError for GuardError {
fn user_message(&self) -> Cow<'_, str> {
Cow::Owned(self.to_string())
}
fn user_hint(&self) -> Option<&'static str> {
match self {
Self::VerificationFailed { .. } => {
Some("Inspect the reported discrepancies and rerun `sps2 verify --heal`.")
}
Self::HealingFailed { .. } => {
Some("Review the affected file and resolve the discrepancy before retrying.")
}
Self::ConfigurationError { .. } => {
Some("Fix the guard configuration option noted in the error message.")
}
Self::PermissionError { .. } => {
Some("Adjust filesystem permissions or rerun with elevated privileges.")
}
Self::ScopeError { .. } => Some("Adjust the verification scope to a supported value."),
Self::TimeoutError { .. } => {
Some("Increase the guard timeout or retry when the system is less busy.")
}
Self::ResourceExhausted { .. } => {
Some("Free up the constrained resource and retry the verification.")
}
_ => None,
}
}
fn is_retryable(&self) -> bool {
match self {
Self::HealingFailed { recoverable, .. } => *recoverable,
Self::CacheError { .. }
| Self::TimeoutError { .. }
| Self::ResourceExhausted { .. } => true,
_ => false,
}
}
fn user_code(&self) -> Option<&'static str> {
let code = match self {
Self::VerificationFailed { .. } => "guard.verification_failed",
Self::HealingFailed { .. } => "guard.healing_failed",
Self::CacheError { .. } => "guard.cache_error",
Self::ConfigurationError { .. } => "guard.configuration_error",
Self::PermissionError { .. } => "guard.permission_error",
Self::ScopeError { .. } => "guard.scope_error",
Self::TimeoutError { .. } => "guard.timeout",
Self::ResourceExhausted { .. } => "guard.resource_exhausted",
Self::IntegrityError { .. } => "guard.integrity_error",
Self::StateInconsistency { .. } => "guard.state_inconsistency",
};
Some(code)
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/errors/src/lib.rs | crates/errors/src/lib.rs | #![warn(mismatched_lifetime_syntaxes)]
#![deny(clippy::pedantic, unsafe_code)]
#![allow(clippy::module_name_repetitions)]
//! Error types for the sps2 package manager
//!
//! This crate provides fine-grained error types organized by domain.
//! All error types implement Clone where possible for easier handling.
use std::borrow::Cow;
use thiserror::Error;
pub mod build;
pub mod config;
pub mod guard;
pub mod install;
pub mod network;
pub mod ops;
pub mod package;
pub mod platform;
pub mod signing;
pub mod state;
pub mod storage;
pub mod version;
// Re-export all error types at the root
pub use build::BuildError;
pub use config::ConfigError;
pub use guard::{DiscrepancySeverity, GuardError};
pub use install::InstallError;
pub use network::NetworkError;
pub use ops::OpsError;
pub use package::PackageError;
pub use platform::PlatformError;
pub use signing::SigningError;
pub use state::StateError;
pub use storage::StorageError;
pub use version::VersionError;
/// Generic error type for cross-crate boundaries
#[derive(Debug, Clone, Error)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum Error {
#[error("network error: {0}")]
Network(#[from] NetworkError),
#[error("storage error: {0}")]
Storage(#[from] StorageError),
#[error("state error: {0}")]
State(#[from] StateError),
#[error("package error: {0}")]
Package(#[from] PackageError),
#[error("version error: {0}")]
Version(#[from] VersionError),
#[error("config error: {0}")]
Config(#[from] ConfigError),
#[error("build error: {0}")]
Build(#[from] BuildError),
#[error("install error: {0}")]
Install(#[from] InstallError),
#[error("ops error: {0}")]
Ops(#[from] OpsError),
#[error("guard error: {0}")]
Guard(#[from] GuardError),
#[error("platform error: {0}")]
Platform(#[from] PlatformError),
#[error("signing error: {0}")]
Signing(#[from] SigningError),
#[error("internal error: {0}")]
Internal(String),
#[error("operation cancelled")]
Cancelled,
#[error("I/O error: {message}")]
Io {
#[cfg_attr(feature = "serde", serde(with = "io_kind_as_str"))]
kind: std::io::ErrorKind,
message: String,
#[cfg_attr(feature = "serde", serde(with = "opt_path_buf"))]
path: Option<std::path::PathBuf>,
},
}
impl Error {
/// Create an internal error with a message
pub fn internal(msg: impl Into<String>) -> Self {
Self::Internal(msg.into())
}
/// Create an Io error with an associated path
pub fn io_with_path(err: &std::io::Error, path: impl Into<std::path::PathBuf>) -> Self {
Self::Io {
kind: err.kind(),
message: err.to_string(),
path: Some(path.into()),
}
}
}
impl From<std::io::Error> for Error {
fn from(err: std::io::Error) -> Self {
Self::Io {
kind: err.kind(),
message: err.to_string(),
path: None,
}
}
}
impl From<semver::Error> for Error {
fn from(err: semver::Error) -> Self {
Self::Version(VersionError::ParseError {
message: err.to_string(),
})
}
}
impl From<sqlx::Error> for Error {
fn from(err: sqlx::Error) -> Self {
Self::State(StateError::DatabaseError {
message: err.to_string(),
})
}
}
impl From<serde_json::Error> for Error {
fn from(err: serde_json::Error) -> Self {
Self::Internal(format!("JSON error: {err}"))
}
}
impl From<minisign_verify::Error> for Error {
fn from(err: minisign_verify::Error) -> Self {
Self::Signing(SigningError::VerificationFailed {
reason: err.to_string(),
})
}
}
/// Result type alias for sps2 operations
pub type Result<T> = std::result::Result<T, Error>;
/// Minimal interface for rendering user-facing error information without
/// requiring heavyweight envelopes.
pub trait UserFacingError {
/// Short message suitable for CLI output.
fn user_message(&self) -> Cow<'_, str>;
/// Optional remediation hint.
fn user_hint(&self) -> Option<&'static str> {
None
}
/// Whether retrying the same operation is likely to succeed.
fn is_retryable(&self) -> bool {
false
}
/// Stable error code for analytics / structured reporting.
fn user_code(&self) -> Option<&'static str> {
None
}
}
impl UserFacingError for Error {
fn user_message(&self) -> Cow<'_, str> {
match self {
Error::Network(err) => err.user_message(),
Error::Install(err) => err.user_message(),
Error::Ops(err) => err.user_message(),
Error::Io { message, .. } => Cow::Owned(message.clone()),
_ => Cow::Owned(self.to_string()),
}
}
fn user_hint(&self) -> Option<&'static str> {
match self {
Error::Network(err) => err.user_hint(),
Error::Install(err) => err.user_hint(),
Error::Ops(err) => err.user_hint(),
Error::Config(_) => Some("Check your sps2 configuration file."),
_ => None,
}
}
fn is_retryable(&self) -> bool {
match self {
Error::Network(err) => err.is_retryable(),
Error::Install(err) => err.is_retryable(),
Error::Ops(err) => err.is_retryable(),
Error::Io { .. } => true,
_ => false,
}
}
fn user_code(&self) -> Option<&'static str> {
match self {
Error::Network(err) => err.user_code(),
Error::Storage(err) => err.user_code(),
Error::State(err) => err.user_code(),
Error::Package(err) => err.user_code(),
Error::Version(err) => err.user_code(),
Error::Config(err) => err.user_code(),
Error::Build(err) => err.user_code(),
Error::Install(err) => err.user_code(),
Error::Ops(err) => err.user_code(),
Error::Guard(err) => err.user_code(),
Error::Platform(err) => err.user_code(),
Error::Signing(err) => err.user_code(),
Error::Internal(_) => Some("error.internal"),
Error::Cancelled => Some("error.cancelled"),
Error::Io { .. } => Some("error.io"),
}
}
}
// Serde helper modules for optional path and io::ErrorKind as string
#[cfg(feature = "serde")]
mod io_kind_as_str {
use serde::{Deserialize, Deserializer, Serializer};
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn serialize<S>(kind: &std::io::ErrorKind, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
s.serialize_str(&format!("{kind:?}"))
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<std::io::ErrorKind, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
// Best effort mapping; default to Other
Ok(match s.as_str() {
"NotFound" => std::io::ErrorKind::NotFound,
"PermissionDenied" => std::io::ErrorKind::PermissionDenied,
"ConnectionRefused" => std::io::ErrorKind::ConnectionRefused,
"ConnectionReset" => std::io::ErrorKind::ConnectionReset,
"ConnectionAborted" => std::io::ErrorKind::ConnectionAborted,
"NotConnected" => std::io::ErrorKind::NotConnected,
"AddrInUse" => std::io::ErrorKind::AddrInUse,
"AddrNotAvailable" => std::io::ErrorKind::AddrNotAvailable,
"BrokenPipe" => std::io::ErrorKind::BrokenPipe,
"AlreadyExists" => std::io::ErrorKind::AlreadyExists,
"WouldBlock" => std::io::ErrorKind::WouldBlock,
"InvalidInput" => std::io::ErrorKind::InvalidInput,
"InvalidData" => std::io::ErrorKind::InvalidData,
"TimedOut" => std::io::ErrorKind::TimedOut,
"WriteZero" => std::io::ErrorKind::WriteZero,
"Interrupted" => std::io::ErrorKind::Interrupted,
"Unsupported" => std::io::ErrorKind::Unsupported,
"UnexpectedEof" => std::io::ErrorKind::UnexpectedEof,
_ => std::io::ErrorKind::Other,
})
}
}
#[cfg(feature = "serde")]
mod opt_path_buf {
use serde::{Deserialize, Deserializer, Serializer};
#[allow(clippy::ref_option)]
pub fn serialize<S>(path: &Option<std::path::PathBuf>, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match path {
Some(pb) => s.serialize_some(&pb.display().to_string()),
None => s.serialize_none(),
}
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<std::path::PathBuf>, D::Error>
where
D: Deserializer<'de>,
{
let opt = Option::<String>::deserialize(deserializer)?;
Ok(opt.map(std::path::PathBuf::from))
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/errors/src/version.rs | crates/errors/src/version.rs | //! Version and constraint parsing error types
use std::borrow::Cow;
use crate::UserFacingError;
use thiserror::Error;
#[derive(Debug, Clone, Error)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[non_exhaustive]
pub enum VersionError {
#[error("invalid version: {input}")]
InvalidVersion { input: String },
#[error("invalid version constraint: {input}")]
InvalidConstraint { input: String },
#[error("incompatible version: {version} does not satisfy {constraint}")]
IncompatibleVersion { version: String, constraint: String },
#[error("no version satisfies constraints: {constraints}")]
NoSatisfyingVersion { constraints: String },
#[error("version parse error: {message}")]
ParseError { message: String },
}
impl UserFacingError for VersionError {
fn user_message(&self) -> Cow<'_, str> {
Cow::Owned(self.to_string())
}
fn user_hint(&self) -> Option<&'static str> {
match self {
Self::InvalidVersion { .. } | Self::ParseError { .. } => {
Some("Use semantic-version strings like 1.2.3 or consult the package's available versions.")
}
Self::InvalidConstraint { .. } => Some("Use caret (`^`), tilde (`~`), or equality constraints accepted by sps2."),
Self::IncompatibleVersion { .. } | Self::NoSatisfyingVersion { .. } => {
Some("Relax the version requirement or select a different package build.")
}
}
}
fn is_retryable(&self) -> bool {
false
}
fn user_code(&self) -> Option<&'static str> {
let code = match self {
Self::InvalidVersion { .. } => "version.invalid_version",
Self::InvalidConstraint { .. } => "version.invalid_constraint",
Self::IncompatibleVersion { .. } => "version.incompatible_version",
Self::NoSatisfyingVersion { .. } => "version.no_satisfying_version",
Self::ParseError { .. } => "version.parse_error",
};
Some(code)
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/errors/src/network.rs | crates/errors/src/network.rs | //! Network-related error types
use std::borrow::Cow;
use crate::UserFacingError;
use thiserror::Error;
const HINT_CHECK_CONNECTION: &str = "Check your network connection and retry.";
const HINT_RETRY_LATER: &str = "Retry the operation; the service may recover shortly.";
#[derive(Debug, Clone, Error)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[non_exhaustive]
pub enum NetworkError {
#[error("connection timeout to {url}")]
Timeout { url: String },
#[error("download failed: {0}")]
DownloadFailed(String),
#[error("connection refused: {0}")]
ConnectionRefused(String),
#[error("invalid URL: {0}")]
InvalidUrl(String),
#[error("HTTP error {status}: {message}")]
HttpError { status: u16, message: String },
#[error("checksum mismatch: expected {expected}, got {actual}")]
ChecksumMismatch { expected: String, actual: String },
#[error("SSL/TLS error: {0}")]
TlsError(String),
#[error("network unavailable")]
NetworkUnavailable,
#[error("rate limited: retry after {seconds} seconds")]
RateLimited { seconds: u64 },
#[error("partial content not supported for resumable download")]
PartialContentNotSupported,
#[error("content length mismatch: expected {expected}, got {actual}")]
ContentLengthMismatch { expected: u64, actual: u64 },
#[error("range request failed: {message}")]
RangeRequestFailed { message: String },
#[error("file size exceeds limit: {size} bytes > {limit} bytes")]
FileSizeExceeded { size: u64, limit: u64 },
#[error("stream interrupted after {bytes} bytes")]
StreamInterrupted { bytes: u64 },
#[error("unsupported protocol: {protocol}")]
UnsupportedProtocol { protocol: String },
}
impl UserFacingError for NetworkError {
fn user_message(&self) -> Cow<'_, str> {
Cow::Owned(self.to_string())
}
fn user_hint(&self) -> Option<&'static str> {
match self {
Self::Timeout { .. } | Self::NetworkUnavailable => Some(HINT_CHECK_CONNECTION),
Self::RateLimited { .. } => Some("Wait for the rate limit window to expire."),
Self::PartialContentNotSupported | Self::RangeRequestFailed { .. } => {
Some("Retry without resume or select a different mirror.")
}
Self::StreamInterrupted { .. } => Some(HINT_RETRY_LATER),
Self::ChecksumMismatch { .. } => {
Some("Retry with `--no-cache` or verify the artifact.")
}
_ => None,
}
}
fn is_retryable(&self) -> bool {
matches!(
self,
Self::Timeout { .. }
| Self::DownloadFailed(_)
| Self::ConnectionRefused(_)
| Self::NetworkUnavailable
| Self::RateLimited { .. }
| Self::PartialContentNotSupported
| Self::ContentLengthMismatch { .. }
| Self::StreamInterrupted { .. }
)
}
fn user_code(&self) -> Option<&'static str> {
let code = match self {
Self::Timeout { .. } => "network.timeout",
Self::DownloadFailed(_) => "network.download_failed",
Self::ConnectionRefused(_) => "network.connection_refused",
Self::InvalidUrl(_) => "network.invalid_url",
Self::HttpError { .. } => "network.http_error",
Self::ChecksumMismatch { .. } => "network.checksum_mismatch",
Self::TlsError(_) => "network.tls_error",
Self::NetworkUnavailable => "network.unavailable",
Self::RateLimited { .. } => "network.rate_limited",
Self::PartialContentNotSupported => "network.partial_content_not_supported",
Self::ContentLengthMismatch { .. } => "network.content_length_mismatch",
Self::RangeRequestFailed { .. } => "network.range_request_failed",
Self::FileSizeExceeded { .. } => "network.file_size_exceeded",
Self::StreamInterrupted { .. } => "network.stream_interrupted",
Self::UnsupportedProtocol { .. } => "network.unsupported_protocol",
};
Some(code)
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/errors/src/platform.rs | crates/errors/src/platform.rs | //! Platform-specific operation errors
use std::borrow::Cow;
use crate::{BuildError, StorageError, UserFacingError};
use thiserror::Error;
/// Errors that can occur during platform-specific operations
#[derive(Debug, Clone, Error)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[non_exhaustive]
pub enum PlatformError {
#[error("binary operation failed: {operation} on {binary_path} - {message}")]
BinaryOperationFailed {
operation: String,
binary_path: String,
message: String,
},
#[error("filesystem operation failed: {operation} - {message}")]
FilesystemOperationFailed { operation: String, message: String },
#[error("process execution failed: {command} - {message}")]
ProcessExecutionFailed { command: String, message: String },
#[error("platform capability not available: {capability}")]
CapabilityUnavailable { capability: String },
#[error("command not found: {command}")]
CommandNotFound { command: String },
#[error("invalid binary format: {path} - {message}")]
InvalidBinaryFormat { path: String, message: String },
#[error("signing operation failed: {binary_path} - {message}")]
SigningFailed {
binary_path: String,
message: String,
},
#[error("permission denied: {operation} - {message}")]
PermissionDenied { operation: String, message: String },
#[error("tool '{tool}' not found. {suggestion}")]
ToolNotFound {
tool: String,
suggestion: String,
searched_paths: Vec<std::path::PathBuf>,
},
#[error("multiple tools not found: {}", .tools.join(", "))]
MultipleToolsNotFound {
tools: Vec<String>,
suggestions: Vec<String>,
},
#[error("command failed: {command} - {error}")]
CommandFailed { command: String, error: String },
#[error("configuration error: {message}")]
ConfigError { message: String },
}
impl From<PlatformError> for BuildError {
fn from(err: PlatformError) -> Self {
match err {
PlatformError::SigningFailed { message, .. } => BuildError::SigningError { message },
PlatformError::BinaryOperationFailed {
operation, message, ..
} if operation.contains("sign") => BuildError::SigningError { message },
PlatformError::ProcessExecutionFailed { command, message }
if command.contains("git") =>
{
BuildError::Failed {
message: format!("git operation failed: {message}"),
}
}
PlatformError::ProcessExecutionFailed { command, message }
if command.contains("tar") || command.contains("zstd") =>
{
BuildError::ExtractionFailed { message }
}
PlatformError::FilesystemOperationFailed { message, .. } => BuildError::Failed {
message: format!("filesystem operation failed: {message}"),
},
_ => BuildError::Failed {
message: err.to_string(),
},
}
}
}
impl UserFacingError for PlatformError {
fn user_message(&self) -> Cow<'_, str> {
Cow::Owned(self.to_string())
}
fn user_hint(&self) -> Option<&'static str> {
match self {
Self::CommandNotFound { .. }
| Self::ToolNotFound { .. }
| Self::MultipleToolsNotFound { .. } => {
Some("Install the required tool or adjust your PATH, then retry.")
}
Self::CapabilityUnavailable { .. } => {
Some("Enable the required platform capability or use an alternative workflow.")
}
Self::PermissionDenied { .. } => {
Some("Adjust filesystem permissions or rerun the command with elevated privileges.")
}
_ => None,
}
}
fn is_retryable(&self) -> bool {
matches!(
self,
Self::BinaryOperationFailed { .. }
| Self::FilesystemOperationFailed { .. }
| Self::ProcessExecutionFailed { .. }
| Self::CommandFailed { .. }
| Self::CommandNotFound { .. }
| Self::ToolNotFound { .. }
| Self::MultipleToolsNotFound { .. }
| Self::PermissionDenied { .. }
)
}
fn user_code(&self) -> Option<&'static str> {
let code = match self {
Self::BinaryOperationFailed { .. } => "platform.binary_operation_failed",
Self::FilesystemOperationFailed { .. } => "platform.filesystem_operation_failed",
Self::ProcessExecutionFailed { .. } => "platform.process_execution_failed",
Self::CapabilityUnavailable { .. } => "platform.capability_unavailable",
Self::CommandNotFound { .. } => "platform.command_not_found",
Self::InvalidBinaryFormat { .. } => "platform.invalid_binary_format",
Self::SigningFailed { .. } => "platform.signing_failed",
Self::PermissionDenied { .. } => "platform.permission_denied",
Self::ToolNotFound { .. } => "platform.tool_not_found",
Self::MultipleToolsNotFound { .. } => "platform.multiple_tools_not_found",
Self::CommandFailed { .. } => "platform.command_failed",
Self::ConfigError { .. } => "platform.config_error",
};
Some(code)
}
}
impl From<PlatformError> for StorageError {
fn from(err: PlatformError) -> Self {
match err {
PlatformError::FilesystemOperationFailed { operation, message } => {
if operation.contains("clone") || operation.contains("apfs") {
StorageError::ApfsCloneFailed { message }
} else if operation.contains("rename") || operation.contains("atomic") {
StorageError::AtomicRenameFailed { message }
} else {
StorageError::IoError { message }
}
}
PlatformError::PermissionDenied { message, .. } => {
StorageError::PermissionDenied { path: message }
}
_ => StorageError::IoError {
message: err.to_string(),
},
}
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/errors/src/state.rs | crates/errors/src/state.rs | //! State management error types
use std::borrow::Cow;
use crate::UserFacingError;
use thiserror::Error;
#[derive(Debug, Clone, Error)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[non_exhaustive]
pub enum StateError {
#[error("invalid state transition from {from} to {to}")]
InvalidTransition { from: String, to: String },
#[error("state conflict: {message}")]
Conflict { message: String },
#[error("state not found: {id}")]
StateNotFound { id: String },
#[error("database error: {message}")]
DatabaseError { message: String },
#[error("transaction failed: {message}")]
TransactionFailed { message: String },
#[error("state corrupted: {message}")]
StateCorrupted { message: String },
#[error("rollback failed: {message}")]
RollbackFailed { message: String },
#[error("active state missing")]
ActiveStateMissing,
#[error("migration failed: {message}")]
MigrationFailed { message: String },
}
impl UserFacingError for StateError {
fn user_message(&self) -> Cow<'_, str> {
Cow::Owned(self.to_string())
}
fn user_hint(&self) -> Option<&'static str> {
match self {
Self::Conflict { .. } => Some("Retry once the concurrent operation has completed."),
Self::StateNotFound { .. } => Some("List available states with `sps2 history --all`."),
Self::ActiveStateMissing => {
Some("Run `sps2 check-health` to rebuild the active state.")
}
Self::MigrationFailed { .. } => {
Some("Review the migration logs and rerun `sps2 check-health`.")
}
_ => None,
}
}
fn is_retryable(&self) -> bool {
matches!(self, Self::Conflict { .. } | Self::TransactionFailed { .. })
}
fn user_code(&self) -> Option<&'static str> {
let code = match self {
Self::InvalidTransition { .. } => "state.invalid_transition",
Self::Conflict { .. } => "state.conflict",
Self::StateNotFound { .. } => "state.state_not_found",
Self::DatabaseError { .. } => "state.database_error",
Self::TransactionFailed { .. } => "state.transaction_failed",
Self::StateCorrupted { .. } => "state.state_corrupted",
Self::RollbackFailed { .. } => "state.rollback_failed",
Self::ActiveStateMissing => "state.active_state_missing",
Self::MigrationFailed { .. } => "state.migration_failed",
};
Some(code)
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/errors/src/storage.rs | crates/errors/src/storage.rs | //! Storage and filesystem-related error types
use std::borrow::Cow;
use crate::UserFacingError;
use thiserror::Error;
#[derive(Debug, Clone, Error)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[non_exhaustive]
pub enum StorageError {
#[error("disk full: {path}")]
DiskFull { path: String },
#[error("permission denied: {path}")]
PermissionDenied { path: String },
#[error("path not found: {path}")]
PathNotFound { path: String },
#[error("directory not found: {path}")]
DirectoryNotFound { path: std::path::PathBuf },
#[error("already exists: {path}")]
AlreadyExists { path: String },
#[error("IO error: {message}")]
IoError { message: String },
#[error("corrupted data: {message}")]
CorruptedData { message: String },
#[error("invalid path: {path}")]
InvalidPath { path: String },
#[error("lock acquisition failed: {path}")]
LockFailed { path: String },
#[error("APFS clone failed: {message}")]
ApfsCloneFailed { message: String },
#[error("atomic rename failed: {message}")]
AtomicRenameFailed { message: String },
#[error("package not found: {hash}")]
PackageNotFound { hash: String },
}
impl From<std::io::Error> for StorageError {
fn from(err: std::io::Error) -> Self {
// Without a known path, avoid inventing placeholders; preserve message only
Self::IoError {
message: err.to_string(),
}
}
}
impl StorageError {
/// Convert an `io::Error` into a `StorageError` with an associated path
#[must_use]
pub fn from_io_with_path(err: &std::io::Error, path: &std::path::Path) -> Self {
match err.kind() {
std::io::ErrorKind::PermissionDenied => Self::PermissionDenied {
path: path.display().to_string(),
},
std::io::ErrorKind::NotFound => Self::PathNotFound {
path: path.display().to_string(),
},
std::io::ErrorKind::AlreadyExists => Self::AlreadyExists {
path: path.display().to_string(),
},
_ => Self::IoError {
message: format!("{}: {}", path.display(), err),
},
}
}
}
impl UserFacingError for StorageError {
fn user_message(&self) -> Cow<'_, str> {
Cow::Owned(self.to_string())
}
fn user_hint(&self) -> Option<&'static str> {
match self {
Self::DiskFull { .. } => Some("Free up disk space under /opt/pm and retry."),
Self::PermissionDenied { .. } => {
Some("Adjust filesystem permissions or retry with elevated privileges.")
}
Self::LockFailed { .. } => {
Some("Wait for other package-manager operations to finish, then retry.")
}
_ => None,
}
}
fn is_retryable(&self) -> bool {
matches!(self, Self::LockFailed { .. } | Self::IoError { .. })
}
fn user_code(&self) -> Option<&'static str> {
let code = match self {
Self::DiskFull { .. } => "storage.disk_full",
Self::PermissionDenied { .. } => "storage.permission_denied",
Self::PathNotFound { .. } => "storage.path_not_found",
Self::DirectoryNotFound { .. } => "storage.directory_not_found",
Self::AlreadyExists { .. } => "storage.already_exists",
Self::IoError { .. } => "storage.io_error",
Self::CorruptedData { .. } => "storage.corrupted_data",
Self::InvalidPath { .. } => "storage.invalid_path",
Self::LockFailed { .. } => "storage.lock_failed",
Self::ApfsCloneFailed { .. } => "storage.apfs_clone_failed",
Self::AtomicRenameFailed { .. } => "storage.atomic_rename_failed",
Self::PackageNotFound { .. } => "storage.package_not_found",
};
Some(code)
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/errors/src/install.rs | crates/errors/src/install.rs | //! Installation system error types
use std::borrow::Cow;
use crate::UserFacingError;
use thiserror::Error;
const HINT_WAIT_AND_RETRY: &str = "Wait for pending operations to finish, then retry.";
const HINT_RETRY_LATER: &str = "Retry the operation; the service may recover shortly.";
const HINT_DOWNLOAD_TIMEOUT: &str =
"Retry the download or increase the timeout with --download-timeout.";
#[derive(Debug, Clone, Error)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[non_exhaustive]
pub enum InstallError {
#[error("package not found: {package}")]
PackageNotFound { package: String },
#[error("extraction failed: {message}")]
ExtractionFailed { message: String },
#[error("atomic operation failed: {message}")]
AtomicOperationFailed { message: String },
#[error("filesystem operation failed: {operation} on {path}: {message}")]
FilesystemError {
operation: String,
path: String,
message: String,
},
#[error("state not found: {state_id}")]
StateNotFound { state_id: String },
#[error("package has dependents: {package}")]
PackageHasDependents { package: String },
#[error("no packages specified")]
NoPackagesSpecified,
#[error("local package not found: {path}")]
LocalPackageNotFound { path: String },
#[error("invalid package file {path}: {message}")]
InvalidPackageFile { path: String, message: String },
#[error("task execution failed: {message}")]
TaskError { message: String },
#[error("package not installed: {package}")]
PackageNotInstalled { package: String },
#[error("concurrency error: {message}")]
ConcurrencyError { message: String },
#[error("download timeout: {package} from {url} after {timeout_seconds}s")]
DownloadTimeout {
package: String,
url: String,
timeout_seconds: u64,
},
#[error("missing download URL for package: {package}")]
MissingDownloadUrl { package: String },
#[error("missing local path for package: {package}")]
MissingLocalPath { package: String },
#[error("temporary file error: {message}")]
TempFileError { message: String },
#[error("operation timeout: {message}")]
OperationTimeout { message: String },
#[error("no progress detected: {message}")]
NoProgress { message: String },
}
impl UserFacingError for InstallError {
fn user_message(&self) -> Cow<'_, str> {
Cow::Owned(self.to_string())
}
fn user_hint(&self) -> Option<&'static str> {
match self {
Self::ConcurrencyError { .. } => Some(HINT_WAIT_AND_RETRY),
Self::OperationTimeout { .. } | Self::NoProgress { .. } => Some(HINT_RETRY_LATER),
Self::DownloadTimeout { .. } => Some(HINT_DOWNLOAD_TIMEOUT),
Self::MissingDownloadUrl { .. } | Self::MissingLocalPath { .. } => {
Some("Ensure the package manifest includes a valid source.")
}
_ => None,
}
}
fn is_retryable(&self) -> bool {
matches!(
self,
Self::ConcurrencyError { .. }
| Self::OperationTimeout { .. }
| Self::NoProgress { .. }
| Self::DownloadTimeout { .. }
)
}
fn user_code(&self) -> Option<&'static str> {
let code = match self {
Self::PackageNotFound { .. } => "install.package_not_found",
Self::ExtractionFailed { .. } => "install.extraction_failed",
Self::AtomicOperationFailed { .. } => "install.atomic_operation_failed",
Self::FilesystemError { .. } => "install.filesystem_error",
Self::StateNotFound { .. } => "install.state_not_found",
Self::PackageHasDependents { .. } => "install.package_has_dependents",
Self::NoPackagesSpecified => "install.no_packages_specified",
Self::LocalPackageNotFound { .. } => "install.local_package_not_found",
Self::InvalidPackageFile { .. } => "install.invalid_package_file",
Self::TaskError { .. } => "install.task_error",
Self::PackageNotInstalled { .. } => "install.package_not_installed",
Self::ConcurrencyError { .. } => "install.concurrency_error",
Self::DownloadTimeout { .. } => "install.download_timeout",
Self::MissingDownloadUrl { .. } => "install.missing_download_url",
Self::MissingLocalPath { .. } => "install.missing_local_path",
Self::TempFileError { .. } => "install.temp_file_error",
Self::OperationTimeout { .. } => "install.operation_timeout",
Self::NoProgress { .. } => "install.no_progress",
};
Some(code)
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/errors/src/build.rs | crates/errors/src/build.rs | //! Build system error types
use std::borrow::Cow;
use crate::UserFacingError;
use thiserror::Error;
#[derive(Debug, Clone, Error)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[non_exhaustive]
pub enum BuildError {
#[error("build failed: {message}")]
Failed { message: String },
#[error("recipe error: {message}")]
RecipeError { message: String },
#[error("missing build dependency: {name}")]
MissingBuildDep { name: String },
#[error("fetch failed: {url}")]
FetchFailed { url: String },
#[error("patch failed: {patch}")]
PatchFailed { patch: String },
#[error("configure failed: {message}")]
ConfigureFailed { message: String },
#[error("compile failed: {message}")]
CompileFailed { message: String },
#[error("install failed: {message}")]
InstallFailed { message: String },
#[error("sandbox violation: {message}")]
SandboxViolation { message: String },
#[error("network access denied")]
NetworkAccessDenied,
#[error("build timeout after {seconds} seconds")]
Timeout { seconds: u64 },
#[error("hash mismatch for {file}: expected {expected}, got {actual}")]
HashMismatch {
file: String,
expected: String,
actual: String,
},
#[error("SBOM error: {message}")]
SbomError { message: String },
#[error("build timeout for {package} after {timeout_seconds} seconds")]
BuildTimeout {
package: String,
timeout_seconds: u64,
},
#[error("extraction failed: {message}")]
ExtractionFailed { message: String },
#[error("network access disabled for {url}")]
NetworkDisabled { url: String },
#[error("invalid URL: {url}")]
InvalidUrl { url: String },
#[error("signing error: {message}")]
SigningError { message: String },
#[error("no build system detected in {path}")]
NoBuildSystemDetected { path: String },
#[error("dependency conflict: {message}")]
DependencyConflict { message: String },
#[error("compilation failed: {message}")]
CompilationFailed { message: String },
#[error("tests failed: {passed}/{total} tests passed")]
TestsFailed { passed: usize, total: usize },
#[error("quality assurance failed: {message}")]
QualityAssuranceFailed { message: String },
#[error("linter error: {linter} - {message}")]
LinterError { linter: String, message: String },
#[error("security vulnerability found: {scanner} - {message}")]
SecurityVulnerability { scanner: String, message: String },
#[error("policy violation: {rule} - {message}")]
PolicyViolation { rule: String, message: String },
#[error("license compliance error: {message}")]
LicenseComplianceError { message: String },
#[error("draft metadata extraction failed: {message}")]
DraftMetadataFailed { message: String },
#[error("draft template rendering failed: {message}")]
DraftTemplateFailed { message: String },
#[error("draft source preparation failed: {message}")]
DraftSourceFailed { message: String },
#[error("unsupported archive format: {format}")]
UnsupportedArchiveFormat { format: String },
#[error("git clone failed: {message}")]
GitCloneFailed { message: String },
#[error("validation failed: {message}")]
ValidationFailed { message: String },
#[error("dangerous command blocked: {command} - {reason}")]
DangerousCommand { command: String, reason: String },
#[error("invalid path: {path} - {reason}")]
InvalidPath { path: String, reason: String },
#[error("invalid URL: {url} - {reason}")]
InvalidUrlValidation { url: String, reason: String },
#[error("command parsing failed: {command} - {reason}")]
CommandParseError { command: String, reason: String },
#[error("path escape attempt: {path} resolves to {resolved} outside build root {build_root}")]
PathEscapeAttempt {
path: String,
resolved: String,
build_root: String,
},
#[error("dangerous write operation to {path}")]
DangerousWrite { path: String },
#[error("dangerous execution of {path}")]
DangerousExecution { path: String },
#[error("symlink loop detected at {path}")]
SymlinkLoop { path: String },
#[error("too many symlinks while resolving {path}")]
TooManySymlinks { path: String },
#[error("path traversal attempt: {path} - {reason}")]
PathTraversalAttempt { path: String, reason: String },
#[error("disallowed command: {command}")]
DisallowedCommand { command: String },
}
impl UserFacingError for BuildError {
fn user_message(&self) -> Cow<'_, str> {
Cow::Owned(self.to_string())
}
fn user_hint(&self) -> Option<&'static str> {
match self {
Self::MissingBuildDep { .. } => {
Some("Install the missing build dependency or declare it in the recipe.")
}
Self::FetchFailed { .. } | Self::InvalidUrl { .. } | Self::NetworkDisabled { .. } => {
Some("Check network access or provide local source artifacts for the build.")
}
Self::NetworkAccessDenied => {
Some("Allow network access for the build or supply pre-fetched sources.")
}
Self::PatchFailed { .. } => {
Some("Update the patch so it applies cleanly to the current sources.")
}
Self::Timeout { .. } | Self::BuildTimeout { .. } => {
Some("Increase the build timeout or reduce parallelism, then retry.")
}
Self::SigningError { .. } => {
Some("Verify signing configuration and ensure the required keys are available.")
}
Self::RecipeError { .. }
| Self::InvalidPath { .. }
| Self::InvalidUrlValidation { .. } => {
Some("Correct the recipe definition before retrying the build.")
}
_ => None,
}
}
fn is_retryable(&self) -> bool {
matches!(
self,
Self::FetchFailed { .. } | Self::Timeout { .. } | Self::BuildTimeout { .. }
)
}
fn user_code(&self) -> Option<&'static str> {
let code = match self {
Self::Failed { .. } => "build.failed",
Self::RecipeError { .. } => "build.recipe_error",
Self::MissingBuildDep { .. } => "build.missing_build_dep",
Self::FetchFailed { .. } => "build.fetch_failed",
Self::PatchFailed { .. } => "build.patch_failed",
Self::ConfigureFailed { .. } => "build.configure_failed",
Self::CompileFailed { .. } => "build.compile_failed",
Self::InstallFailed { .. } => "build.install_failed",
Self::SandboxViolation { .. } => "build.sandbox_violation",
Self::NetworkAccessDenied => "build.network_access_denied",
Self::Timeout { .. } => "build.timeout",
Self::HashMismatch { .. } => "build.hash_mismatch",
Self::SbomError { .. } => "build.sbom_error",
Self::BuildTimeout { .. } => "build.build_timeout",
Self::ExtractionFailed { .. } => "build.extraction_failed",
Self::NetworkDisabled { .. } => "build.network_disabled",
Self::InvalidUrl { .. } => "build.invalid_url",
Self::SigningError { .. } => "build.signing_error",
Self::NoBuildSystemDetected { .. } => "build.no_build_system_detected",
Self::DependencyConflict { .. } => "build.dependency_conflict",
Self::CompilationFailed { .. } => "build.compilation_failed",
Self::TestsFailed { .. } => "build.tests_failed",
Self::QualityAssuranceFailed { .. } => "build.quality_assurance_failed",
Self::LinterError { .. } => "build.linter_error",
Self::SecurityVulnerability { .. } => "build.security_vulnerability",
Self::PolicyViolation { .. } => "build.policy_violation",
Self::LicenseComplianceError { .. } => "build.license_compliance_error",
Self::DraftMetadataFailed { .. } => "build.draft_metadata_failed",
Self::DraftTemplateFailed { .. } => "build.draft_template_failed",
Self::DraftSourceFailed { .. } => "build.draft_source_failed",
Self::UnsupportedArchiveFormat { .. } => "build.unsupported_archive_format",
Self::GitCloneFailed { .. } => "build.git_clone_failed",
Self::ValidationFailed { .. } => "build.validation_failed",
Self::DangerousCommand { .. } => "build.dangerous_command",
Self::InvalidPath { .. } => "build.invalid_path",
Self::InvalidUrlValidation { .. } => "build.invalid_url_validation",
Self::CommandParseError { .. } => "build.command_parse_error",
Self::PathEscapeAttempt { .. } => "build.path_escape_attempt",
Self::DangerousWrite { .. } => "build.dangerous_write",
Self::DangerousExecution { .. } => "build.dangerous_execution",
Self::SymlinkLoop { .. } => "build.symlink_loop",
Self::TooManySymlinks { .. } => "build.too_many_symlinks",
Self::PathTraversalAttempt { .. } => "build.path_traversal_attempt",
Self::DisallowedCommand { .. } => "build.disallowed_command",
};
Some(code)
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/errors/src/signing.rs | crates/errors/src/signing.rs | #![deny(clippy::pedantic, unsafe_code)]
//! Signing error types
use std::borrow::Cow;
use crate::UserFacingError;
use thiserror::Error;
#[derive(Debug, Clone, Error)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[non_exhaustive]
pub enum SigningError {
#[error("signature verification failed: {reason}")]
VerificationFailed { reason: String },
#[error("no trusted key found for signature with key id: {key_id}")]
NoTrustedKeyFound { key_id: String },
#[error("invalid signature format: {0}")]
InvalidSignatureFormat(String),
#[error("invalid public key format: {0}")]
InvalidPublicKey(String),
}
impl UserFacingError for SigningError {
fn user_message(&self) -> Cow<'_, str> {
Cow::Owned(self.to_string())
}
fn user_hint(&self) -> Option<&'static str> {
match self {
Self::VerificationFailed { .. } => Some("Ensure you have the correct public key and the artifact has not been tampered with."),
Self::NoTrustedKeyFound { .. } => Some("Import the missing public key (`sps2 keys import`) and retry."),
Self::InvalidSignatureFormat { .. } | Self::InvalidPublicKey { .. } => {
Some("Check the signature and key files for corruption or unsupported formats.")
}
}
}
fn is_retryable(&self) -> bool {
matches!(self, Self::NoTrustedKeyFound { .. })
}
fn user_code(&self) -> Option<&'static str> {
let code = match self {
Self::VerificationFailed { .. } => "signing.verification_failed",
Self::NoTrustedKeyFound { .. } => "signing.no_trusted_key",
Self::InvalidSignatureFormat { .. } => "signing.invalid_signature_format",
Self::InvalidPublicKey { .. } => "signing.invalid_public_key",
};
Some(code)
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/errors/src/package.rs | crates/errors/src/package.rs | //! Package-related error types
use std::borrow::Cow;
use crate::UserFacingError;
use thiserror::Error;
#[derive(Debug, Clone, Error)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[non_exhaustive]
pub enum PackageError {
#[error("package not found: {name}")]
NotFound { name: String },
#[error("package corrupted: {message}")]
Corrupted { message: String },
#[error("missing dependency: {name} {spec}")]
MissingDependency { name: String, spec: String },
#[error("dependency conflict: {message}")]
DependencyConflict { message: String },
#[error("circular dependency: {packages}")]
CircularDependency { packages: String },
#[error("invalid manifest: {message}")]
InvalidManifest { message: String },
#[error("signature verification failed: {message}")]
SignatureVerificationFailed { message: String },
#[error("unsigned package")]
UnsignedPackage,
#[error("invalid package format: {message}")]
InvalidFormat { message: String },
#[error("SBOM missing or invalid: {message}")]
SbomError { message: String },
#[error("already installed: {name} {version}")]
AlreadyInstalled { name: String, version: String },
#[error("dependency cycle detected: {package}")]
DependencyCycle { package: String },
#[error("incompatible package format version {version}: {reason}")]
IncompatibleFormat { version: String, reason: String },
#[error("resolution timeout: {message}")]
ResolutionTimeout { message: String },
#[error("source not available: {package}")]
SourceNotAvailable { package: String },
}
impl UserFacingError for PackageError {
fn user_message(&self) -> Cow<'_, str> {
Cow::Owned(self.to_string())
}
fn user_hint(&self) -> Option<&'static str> {
match self {
Self::NotFound { .. } => Some("Run `sps2 reposync` or check the package name."),
Self::MissingDependency { .. } => {
Some("Add the missing dependency to your install request or build recipe.")
}
Self::DependencyConflict { .. } | Self::DependencyCycle { .. } => {
Some("Adjust your requested package versions to resolve the dependency conflict.")
}
Self::SignatureVerificationFailed { .. } | Self::UnsignedPackage => {
Some("Verify the package signature or supply trusted keys before proceeding.")
}
Self::AlreadyInstalled { .. } => {
Some("Use `sps2 update` or `sps2 upgrade` if you want a newer version.")
}
Self::ResolutionTimeout { .. } => {
Some("Retry the operation with fewer packages or increase the resolver timeout.")
}
Self::SourceNotAvailable { .. } => {
Some("Ensure the source repository is reachable or configured.")
}
_ => None,
}
}
fn is_retryable(&self) -> bool {
matches!(
self,
Self::ResolutionTimeout { .. } | Self::SourceNotAvailable { .. }
)
}
fn user_code(&self) -> Option<&'static str> {
let code = match self {
Self::NotFound { .. } => "package.not_found",
Self::Corrupted { .. } => "package.corrupted",
Self::MissingDependency { .. } => "package.missing_dependency",
Self::DependencyConflict { .. } => "package.dependency_conflict",
Self::CircularDependency { .. } => "package.circular_dependency",
Self::InvalidManifest { .. } => "package.invalid_manifest",
Self::SignatureVerificationFailed { .. } => "package.signature_verification_failed",
Self::UnsignedPackage => "package.unsigned",
Self::InvalidFormat { .. } => "package.invalid_format",
Self::SbomError { .. } => "package.sbom_error",
Self::AlreadyInstalled { .. } => "package.already_installed",
Self::DependencyCycle { .. } => "package.dependency_cycle",
Self::IncompatibleFormat { .. } => "package.incompatible_format",
Self::ResolutionTimeout { .. } => "package.resolution_timeout",
Self::SourceNotAvailable { .. } => "package.source_not_available",
};
Some(code)
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/errors/src/ops.rs | crates/errors/src/ops.rs | //! Operation orchestration error types
use std::borrow::Cow;
use crate::UserFacingError;
use thiserror::Error;
const HINT_PROVIDE_PACKAGE: &str =
"Provide at least one package spec (e.g. `sps2 install ripgrep`).";
#[derive(Debug, Clone, Error)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[non_exhaustive]
pub enum OpsError {
#[error("operation failed: {message}")]
OperationFailed { message: String },
#[error("component not found: {component}")]
MissingComponent { component: String },
#[error("invalid operation: {operation}")]
InvalidOperation { operation: String },
#[error("serialization error: {message}")]
SerializationError { message: String },
#[error("no packages specified")]
NoPackagesSpecified,
#[error("recipe not found: {path}")]
RecipeNotFound { path: String },
#[error("invalid recipe {path}: {reason}")]
InvalidRecipe { path: String, reason: String },
#[error("package not found: {package}")]
PackageNotFound { package: String },
#[error("no previous state")]
NoPreviousState,
#[error("state not found: {state_id}")]
StateNotFound { state_id: uuid::Uuid },
#[error("repository sync failed: {message}")]
RepoSyncFailed { message: String },
#[error("self-update failed: {message}")]
SelfUpdateFailed { message: String },
#[error("state verification failed: {discrepancies} discrepancies found in state {state_id}")]
VerificationFailed {
discrepancies: usize,
state_id: String,
},
#[error("staging directory not found: {path} (for package {package})")]
StagingDirectoryNotFound { path: String, package: String },
#[error("invalid staging directory {path}: {reason}")]
InvalidStagingDirectory { path: String, reason: String },
}
impl UserFacingError for OpsError {
fn user_message(&self) -> Cow<'_, str> {
Cow::Owned(self.to_string())
}
fn user_hint(&self) -> Option<&'static str> {
match self {
Self::NoPackagesSpecified => Some(HINT_PROVIDE_PACKAGE),
Self::NoPreviousState => Some("Create a state snapshot before attempting rollback."),
_ => None,
}
}
fn is_retryable(&self) -> bool {
matches!(self, Self::NoPackagesSpecified | Self::NoPreviousState)
}
fn user_code(&self) -> Option<&'static str> {
let code = match self {
Self::OperationFailed { .. } => "ops.operation_failed",
Self::MissingComponent { .. } => "ops.missing_component",
Self::InvalidOperation { .. } => "ops.invalid_operation",
Self::SerializationError { .. } => "ops.serialization_error",
Self::NoPackagesSpecified => "ops.no_packages_specified",
Self::RecipeNotFound { .. } => "ops.recipe_not_found",
Self::InvalidRecipe { .. } => "ops.invalid_recipe",
Self::PackageNotFound { .. } => "ops.package_not_found",
Self::NoPreviousState => "ops.no_previous_state",
Self::StateNotFound { .. } => "ops.state_not_found",
Self::RepoSyncFailed { .. } => "ops.repo_sync_failed",
Self::SelfUpdateFailed { .. } => "ops.self_update_failed",
Self::VerificationFailed { .. } => "ops.verification_failed",
Self::StagingDirectoryNotFound { .. } => "ops.staging_directory_not_found",
Self::InvalidStagingDirectory { .. } => "ops.invalid_staging_directory",
};
Some(code)
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/store/src/manifest_io.rs | crates/store/src/manifest_io.rs | #![deny(clippy::pedantic, unsafe_code)]
#![allow(clippy::module_name_repetitions)]
//! Manifest I/O helpers colocated with the store.
use sps2_errors::{Error, PackageError};
use sps2_types::Manifest;
use std::path::Path;
/// Read `manifest.toml` from a path
///
/// # Errors
/// Returns an error if reading or parsing the manifest fails.
pub async fn read_manifest(path: &Path) -> Result<Manifest, Error> {
let content =
tokio::fs::read_to_string(path)
.await
.map_err(|e| PackageError::InvalidManifest {
message: format!("failed to read manifest: {e}"),
})?;
Manifest::from_toml(&content)
}
/// Write `manifest.toml` to a path
///
/// # Errors
/// Returns an error if serialization or writing fails.
pub async fn write_manifest(path: &Path, manifest: &Manifest) -> Result<(), Error> {
let content = manifest.to_toml()?;
Ok(tokio::fs::write(path, content)
.await
.map_err(|e| PackageError::InvalidManifest {
message: format!("failed to write manifest: {e}"),
})?)
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/store/src/lib.rs | crates/store/src/lib.rs | #![warn(mismatched_lifetime_syntaxes)]
#![deny(clippy::pedantic, unsafe_code)]
//! Content-addressed storage for sps2
//!
//! This crate manages the `/opt/pm/store/` directory where packages
//! are stored by their content hash. Each package is immutable and
//! can be hard-linked into multiple state directories.
mod archive;
mod file_store;
mod format_detection;
pub mod manifest_io;
mod package;
pub use archive::{
create_package, extract_package, extract_package_with_events, list_package_contents,
};
pub use file_store::{FileStore, FileVerificationResult};
pub use format_detection::{PackageFormatDetector, PackageFormatInfo, StoreFormatValidator};
pub use package::StoredPackage;
use sps2_errors::{Error, StorageError};
use sps2_hash::Hash;
use sps2_platform::filesystem_helpers::set_compression;
use sps2_platform::PlatformManager;
use std::path::{Path, PathBuf};
/// Store manager for content-addressed packages
#[derive(Clone, Debug)]
pub struct PackageStore {
base_path: PathBuf,
format_validator: StoreFormatValidator,
file_store: FileStore,
}
impl PackageStore {
/// Create a new store instance
#[must_use]
pub fn new(base_path: PathBuf) -> Self {
let file_store = FileStore::new(&base_path);
Self {
base_path,
format_validator: StoreFormatValidator::new(),
file_store,
}
}
/// Create a new store instance that allows incompatible package formats
///
/// This is useful for migration tools that need to work with older package formats
#[must_use]
pub fn new_with_migration_support(base_path: PathBuf) -> Self {
let file_store = FileStore::new(&base_path);
Self {
base_path,
format_validator: StoreFormatValidator::allow_incompatible(),
file_store,
}
}
/// Create a platform context for filesystem operations
fn create_platform_context() -> (
&'static sps2_platform::Platform,
sps2_platform::core::PlatformContext,
) {
let platform = PlatformManager::instance().platform();
let context = platform.create_context(None);
(platform, context)
}
/// Get the path for a package hash
#[must_use]
pub fn package_path(&self, hash: &Hash) -> PathBuf {
self.base_path.join("packages").join(hash.to_hex())
}
/// Get the file store for file-level operations
#[must_use]
pub fn file_store(&self) -> &FileStore {
&self.file_store
}
/// Get the path to a file in the file store by its hash
#[must_use]
pub fn file_path(&self, hash: &Hash) -> PathBuf {
self.file_store.file_path(hash)
}
/// Check if a package exists in the store
pub async fn has_package(&self, hash: &Hash) -> bool {
let path = self.package_path(hash);
let (platform, ctx) = Self::create_platform_context();
platform.filesystem().exists(&ctx, &path).await
}
/// Load a stored package if it exists in the store
///
/// # Errors
///
/// Returns an error if the package metadata cannot be loaded from disk.
pub async fn load_package_if_exists(
&self,
hash: &Hash,
) -> Result<Option<StoredPackage>, Error> {
if self.has_package(hash).await {
let package_path = self.package_path(hash);
let package = StoredPackage::load(&package_path).await?;
Ok(Some(package))
} else {
Ok(None)
}
}
/// Add a package to the store from a .sp file
///
/// This extracts the package, hashes individual files for deduplication,
/// and stores the package metadata with file references.
///
/// # Errors
///
/// Returns an error if:
/// - File I/O operations fail
/// - Package extraction fails
/// - Package hash computation fails
/// - Directory creation fails
/// - Package format is incompatible
pub async fn add_package(&self, sp_file: &Path) -> Result<StoredPackage, Error> {
// Validate package format before processing (no direct printing here)
self.format_validator
.validate_before_storage(sp_file)
.await?;
// Extract to temporary directory first
let temp_dir = tempfile::tempdir().map_err(|e| StorageError::IoError {
message: e.to_string(),
})?;
extract_package(sp_file, temp_dir.path()).await?;
// Compute hash of the extracted contents for package identity
let package_hash = sps2_hash::Hash::hash_directory(temp_dir.path()).await?;
// Check if package already exists
let package_path = self.base_path.join("packages").join(package_hash.to_hex());
let (platform, ctx) = Self::create_platform_context();
if platform.filesystem().exists(&ctx, &package_path).await {
// Package already stored, just return it
return StoredPackage::load(&package_path).await;
}
// Initialize file store if needed
self.file_store.initialize().await?;
// Hash and store all individual files
let file_results = self.file_store.store_directory(temp_dir.path()).await?;
// Create package directory
platform
.filesystem()
.create_dir_all(&ctx, &package_path)
.await?;
// Copy manifest to package directory
let manifest_src = temp_dir.path().join("manifest.toml");
let manifest_dest = package_path.join("manifest.toml");
tokio::fs::copy(&manifest_src, &manifest_dest).await?;
// Copy SBOM if it exists
for sbom_name in &["sbom.spdx.json", "sbom.cdx.json"] {
let sbom_src = temp_dir.path().join(sbom_name);
if platform.filesystem().exists(&ctx, &sbom_src).await {
let sbom_dest = package_path.join(sbom_name);
tokio::fs::copy(&sbom_src, &sbom_dest).await?;
}
}
// Create files.json with all file references
let files_json =
serde_json::to_string_pretty(&file_results).map_err(|e| StorageError::IoError {
message: format!("failed to serialize file results: {e}"),
})?;
let files_path = package_path.join("files.json");
tokio::fs::write(&files_path, files_json).await?;
// Set compression on macOS
set_compression(&package_path)?;
StoredPackage::load(&package_path).await
}
/// Remove a package from the store
///
/// # Errors
///
/// Returns an error if directory removal fails
pub async fn remove_package(&self, hash: &Hash) -> Result<(), Error> {
let path = self.package_path(hash);
let (platform, ctx) = Self::create_platform_context();
if platform.filesystem().exists(&ctx, &path).await {
platform.filesystem().remove_dir_all(&ctx, &path).await?;
}
Ok(())
}
/// Get the size of a stored package
///
/// # Errors
///
/// Returns an error if the package path doesn't exist or size calculation fails
pub async fn package_size(&self, hash: &Hash) -> Result<u64, Error> {
let path = self.package_path(hash);
let (platform, ctx) = Self::create_platform_context();
platform.filesystem().size(&ctx, &path).await.map_err(|e| {
StorageError::IoError {
message: e.to_string(),
}
.into()
})
}
/// Link package contents into a destination
///
/// # Errors
///
/// Returns an error if:
/// - Package loading fails
/// - Linking operation fails
pub async fn link_package(&self, hash: &Hash, dest_root: &Path) -> Result<(), Error> {
let pkg = StoredPackage::load(&self.package_path(hash)).await?;
pkg.link_to(dest_root).await
}
/// Get SBOM data for a package
///
/// # Errors
///
/// Returns an error if:
/// - Package cannot be found by hash
/// - SBOM file cannot be read
pub async fn get_package_sbom(&self, hash: &Hash) -> Result<Vec<u8>, Error> {
// Get the package path using the hash
let package_path = self.package_path(hash);
let (platform, ctx) = Self::create_platform_context();
// Try to read SPDX SBOM first
let spdx_path = package_path.join("sbom.spdx.json");
if platform.filesystem().exists(&ctx, &spdx_path).await {
return tokio::fs::read(&spdx_path).await.map_err(|e| {
StorageError::IoError {
message: format!("failed to read SBOM file: {e}"),
}
.into()
});
}
// Fall back to CycloneDX SBOM
let cdx_path = package_path.join("sbom.cdx.json");
if platform.filesystem().exists(&ctx, &cdx_path).await {
return tokio::fs::read(&cdx_path).await.map_err(|e| {
StorageError::IoError {
message: format!("failed to read SBOM file: {e}"),
}
.into()
});
}
// No SBOM found
Err(StorageError::IoError {
message: format!(
"SBOM file not found for package with hash {}",
hash.to_hex()
),
}
.into())
}
/// Add a local package file to the store
///
/// # Errors
///
/// Returns an error if package addition fails
pub async fn add_local_package(
&self,
local_path: &std::path::Path,
) -> Result<std::path::PathBuf, Error> {
let stored_package = self.add_package(local_path).await?;
Ok(stored_package.path().to_path_buf())
}
/// List all packages in the store
///
/// # Errors
///
/// Returns an error if directory traversal fails or I/O operations fail
pub async fn list_packages(&self) -> Result<Vec<Hash>, Error> {
let mut packages = Vec::new();
let mut entries = tokio::fs::read_dir(&self.base_path).await?;
while let Some(entry) = entries.next_entry().await? {
if !entry.file_type().await?.is_dir() {
continue;
}
let name = entry.file_name();
if let Some(hash_str) = name.to_str() {
// Each directory name should be a complete hash (flat structure)
if let Ok(hash) = Hash::from_hex(hash_str) {
packages.push(hash);
}
}
}
Ok(packages)
}
/// Clean up the store (remove empty directories)
///
/// # Errors
///
/// Returns an error if directory cleanup operations fail
pub async fn cleanup(&self) -> Result<(), Error> {
// Walk the store and remove empty directories
self.cleanup_dir(&self.base_path).await?;
Ok(())
}
async fn cleanup_dir(&self, dir: &Path) -> Result<bool, Error> {
let mut is_empty = true;
let mut entries = tokio::fs::read_dir(dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if entry.file_type().await?.is_dir() {
if Box::pin(self.cleanup_dir(&path)).await? {
// Remove empty directory
let _ = tokio::fs::remove_dir(&path).await;
} else {
is_empty = false;
}
} else {
is_empty = false;
}
}
Ok(is_empty)
}
/// Verify store integrity
///
/// # Errors
///
/// Returns an error if package listing fails or verification operations fail
pub async fn verify(&self) -> Result<Vec<(Hash, String)>, Error> {
let mut errors = Vec::new();
let (platform, ctx) = Self::create_platform_context();
for hash in self.list_packages().await? {
let path = self.package_path(&hash);
// Check manifest exists
let manifest_path = path.join("manifest.toml");
if !platform.filesystem().exists(&ctx, &manifest_path).await {
errors.push((hash, "missing manifest.toml".to_string()));
}
// Could add more verification here (file checksums, etc.)
}
Ok(errors)
}
/// Garbage collect unreferenced packages
///
/// # Errors
///
/// Currently returns success, but may return errors in future implementations
/// when state manager integration is added
pub fn garbage_collect(&self) -> Result<usize, Error> {
// This would need to integrate with state manager to find unreferenced packages
// For now, return 0 packages removed
Ok(0)
}
/// Verify store integrity
///
/// # Errors
///
/// Returns an error if the base path doesn't exist or is not accessible
pub fn verify_integrity(&self) -> Result<(), Error> {
// Basic verification - check if base path exists and is accessible
if !self.base_path.exists() {
return Err(sps2_errors::StorageError::DirectoryNotFound {
path: self.base_path.clone(),
}
.into());
}
Ok(())
}
/// Get package format information from a .sp file
///
/// # Errors
///
/// Returns an error if format detection fails
pub async fn get_package_format_info(
&self,
sp_file: &Path,
) -> Result<PackageFormatInfo, Error> {
let detector = PackageFormatDetector::new();
detector.detect_format(sp_file).await
}
/// Get package format information from a stored package
///
/// # Errors
///
/// Returns an error if the package is not found or format detection fails
pub async fn get_stored_package_format_info(
&self,
hash: &Hash,
) -> Result<PackageFormatInfo, Error> {
let package_path = self.package_path(hash);
let (platform, ctx) = Self::create_platform_context();
if !platform.filesystem().exists(&ctx, &package_path).await {
return Err(StorageError::PackageNotFound {
hash: hash.to_hex(),
}
.into());
}
self.format_validator
.validate_stored_package(&package_path)
.await
}
/// Check if a stored package is compatible with the current format version
///
/// # Errors
///
/// Returns an error if the package is not found or format detection fails
pub async fn is_package_compatible(&self, hash: &Hash) -> Result<bool, Error> {
let format_info = self.get_stored_package_format_info(hash).await?;
Ok(format_info.is_compatible)
}
/// Add package from file with specific name and version
///
/// # Errors
///
/// Returns an error if package addition fails
pub async fn add_package_from_file(
&self,
file_path: &std::path::Path,
_package_name: &str,
_package_version: &sps2_types::Version,
) -> Result<StoredPackage, Error> {
// For now, just delegate to add_package
self.add_package(file_path).await
}
/// Add package from staging directory
///
/// This computes the package hash, stores individual files,
/// and creates the package metadata directory.
///
/// # Errors
///
/// Returns an error if staging directory processing fails
pub async fn add_package_from_staging(
&self,
staging_path: &std::path::Path,
_package_id: &sps2_resolver::PackageId,
) -> Result<StoredPackage, Error> {
// Read manifest to get package info
let manifest_path = staging_path.join("manifest.toml");
let _manifest_content = tokio::fs::read_to_string(&manifest_path)
.await
.map_err(|e| StorageError::IoError {
message: format!("failed to read manifest from staging: {e}"),
})?;
// Compute hash of staging directory contents for package identity
let package_hash = self.compute_staging_hash(staging_path).await?;
// Check if package already exists
let package_path = self.base_path.join("packages").join(package_hash.to_hex());
let (platform, ctx) = Self::create_platform_context();
if platform.filesystem().exists(&ctx, &package_path).await {
return StoredPackage::load(&package_path).await;
}
// Initialize file store if needed
self.file_store.initialize().await?;
// Hash and store all individual files
let file_results = self.file_store.store_directory(staging_path).await?;
// Create package directory
platform
.filesystem()
.create_dir_all(&ctx, &package_path)
.await?;
// Copy manifest to package directory
let manifest_dest = package_path.join("manifest.toml");
tokio::fs::copy(&manifest_path, &manifest_dest).await?;
// Copy SBOM if it exists
for sbom_name in &["sbom.spdx.json", "sbom.cdx.json"] {
let sbom_src = staging_path.join(sbom_name);
if platform.filesystem().exists(&ctx, &sbom_src).await {
let sbom_dest = package_path.join(sbom_name);
tokio::fs::copy(&sbom_src, &sbom_dest).await?;
}
}
// Create files.json with all file references
let files_json =
serde_json::to_string_pretty(&file_results).map_err(|e| StorageError::IoError {
message: format!("failed to serialize file results: {e}"),
})?;
let files_path = package_path.join("files.json");
tokio::fs::write(&files_path, files_json).await?;
// Set compression on macOS
set_compression(&package_path)?;
StoredPackage::load(&package_path).await
}
/// Compute hash of staging directory contents
async fn compute_staging_hash(&self, staging_path: &Path) -> Result<Hash, Error> {
// Hash the entire staging directory for true content-addressable storage
sps2_hash::Hash::hash_directory(staging_path).await
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/store/src/archive.rs | crates/store/src/archive.rs | //! Package archive handling (.sp files)
//!
//! This module provides support for .sp package archives using zstd compression.
use async_compression::tokio::bufread::ZstdDecoder as AsyncZstdReader;
use sps2_errors::{Error, PackageError, StorageError};
use sps2_events::{AppEvent, EventEmitter, EventSender, GeneralEvent};
use sps2_platform::core::PlatformContext;
use sps2_platform::PlatformManager;
use std::path::Path;
use tar::Archive;
use tokio::io::{AsyncWriteExt, BufReader};
/// Create a platform context for filesystem operations
fn create_platform_context() -> (&'static sps2_platform::Platform, PlatformContext) {
let platform = PlatformManager::instance().platform();
let context = platform.create_context(None);
(platform, context)
}
/// Extract a .sp package file to a directory
///
/// # Errors
///
/// Returns an error if:
/// - Tar extraction fails
/// - The extracted package is missing manifest.toml
/// - I/O operations fail
pub async fn extract_package(sp_file: &Path, dest: &Path) -> Result<(), Error> {
extract_package_with_events(sp_file, dest, None).await
}
/// Extract a .sp package file to a directory with optional event reporting
///
/// # Errors
///
/// Returns an error if:
/// - Tar extraction fails
/// - The extracted package is missing manifest.toml
/// - I/O operations fail
pub async fn extract_package_with_events(
sp_file: &Path,
dest: &Path,
event_sender: Option<&EventSender>,
) -> Result<(), Error> {
// Try zstd extraction first, fall back to plain tar if it fails
match extract_zstd_tar_file(sp_file, dest, event_sender).await {
Ok(()) => {}
Err(_) => {
// Fall back to plain tar
extract_plain_tar_file(sp_file, dest, event_sender).await?;
}
}
// Verify manifest exists
let manifest_path = dest.join("manifest.toml");
let (platform, ctx) = create_platform_context();
if !platform.filesystem().exists(&ctx, &manifest_path).await {
return Err(PackageError::InvalidFormat {
message: "missing manifest.toml in package".to_string(),
}
.into());
}
Ok(())
}
/// List the contents of a .sp package without extracting
///
/// # Errors
///
/// Returns an error if:
/// - Archive reading fails
/// - I/O operations fail
pub async fn list_package_contents(sp_file: &Path) -> Result<Vec<String>, Error> {
// Try zstd-compressed listing first, fall back to plain tar
match list_zstd_tar_contents(sp_file).await {
Ok(contents) => Ok(contents),
Err(_) => list_plain_tar_contents(sp_file).await,
}
}
/// Create a .sp package file from a directory
///
/// # Errors
///
/// Returns an error if:
/// - Source directory is missing manifest.toml
/// - Archive creation fails
/// - I/O operations fail
/// - Directory creation fails
pub async fn create_package(src: &Path, sp_file: &Path) -> Result<(), Error> {
// Verify source has required structure
let manifest_path = src.join("manifest.toml");
let (platform, ctx) = create_platform_context();
if !platform.filesystem().exists(&ctx, &manifest_path).await {
return Err(PackageError::InvalidFormat {
message: "source directory missing manifest.toml".to_string(),
}
.into());
}
// Create parent directory if needed
if let Some(parent) = sp_file.parent() {
platform.filesystem().create_dir_all(&ctx, parent).await?;
}
// Create archive using blocking operations
let src = src.to_path_buf();
let sp_file = sp_file.to_path_buf();
tokio::task::spawn_blocking(move || {
use std::fs::OpenOptions;
use std::io::Write;
// Open file with create and write permissions
let file = OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(&sp_file)
.map_err(|e| StorageError::IoError {
message: format!("failed to create file: {e}"),
})?;
let mut builder = tar::Builder::new(file);
// Set options for deterministic output
builder.mode(tar::HeaderMode::Deterministic);
builder.follow_symlinks(false);
// Add all files from the source directory
add_dir_to_tar(&mut builder, &src, Path::new(""))?;
// Finish the archive - this writes the tar EOF blocks
builder.finish()?;
// Get the file back and ensure it's synced
let mut file = builder.into_inner().map_err(|e| StorageError::IoError {
message: format!("failed to get file from tar builder: {e}"),
})?;
file.flush().map_err(|e| StorageError::IoError {
message: format!("failed to flush file: {e}"),
})?;
file.sync_all().map_err(|e| StorageError::IoError {
message: format!("failed to sync file: {e}"),
})?;
Ok::<(), Error>(())
})
.await
.map_err(|e| Error::internal(format!("create task failed: {e}")))??;
Ok(())
}
/// Extract a zstd-compressed tar archive using temporary file
async fn extract_zstd_tar_file(
file_path: &Path,
dest: &Path,
event_sender: Option<&EventSender>,
) -> Result<(), Error> {
// Create destination directory
let (platform, ctx) = create_platform_context();
platform.filesystem().create_dir_all(&ctx, dest).await?;
// Create a temporary file to decompress to, then extract with tar
let temp_file = tempfile::NamedTempFile::new().map_err(|e| StorageError::IoError {
message: format!("failed to create temp file: {e}"),
})?;
let temp_path = temp_file.path().to_path_buf();
// Decompress the zstd file to temporary location
{
use tokio::fs::File;
let input_file = File::open(file_path)
.await
.map_err(|e| StorageError::IoError {
message: format!("failed to open compressed file: {e}"),
})?;
let mut output_file =
File::create(&temp_path)
.await
.map_err(|e| StorageError::IoError {
message: format!("failed to create temp output file: {e}"),
})?;
let mut decoder = AsyncZstdReader::new(BufReader::new(input_file));
tokio::io::copy(&mut decoder, &mut output_file)
.await
.map_err(|e| StorageError::IoError {
message: format!("failed to decompress zstd file: {e}"),
})?;
output_file
.flush()
.await
.map_err(|e| StorageError::IoError {
message: format!("failed to flush temp file: {e}"),
})?;
}
// Now extract the decompressed tar file using blocking operations
let temp_path_for_task = temp_path.clone();
let dest = dest.to_path_buf();
// Keep the temp_file alive until after the blocking operation completes
tokio::task::spawn_blocking(move || {
use std::fs::File;
let file = File::open(&temp_path_for_task).map_err(|e| StorageError::IoError {
message: format!("failed to open decompressed temp file: {e}"),
})?;
let mut archive = Archive::new(file);
// Set options for security
archive.set_preserve_permissions(true);
archive.set_preserve_mtime(true);
archive.set_unpack_xattrs(false); // Don't unpack extended attributes
// Extract all entries with security checks
extract_archive_entries(&mut archive, &dest)?;
Ok::<(), Error>(())
})
.await
.map_err(|e| Error::internal(format!("zstd extract task failed: {e}")))??;
// Now we can safely drop the temp_file
drop(temp_file);
// Send decompression completed event
if let Some(sender) = event_sender {
sender.emit(AppEvent::General(GeneralEvent::OperationCompleted {
operation: "Zstd decompression completed".to_string(),
success: true,
}));
}
// Send overall extraction completed event
if let Some(sender) = event_sender {
sender.emit(AppEvent::General(GeneralEvent::OperationCompleted {
operation: format!("Package extraction completed: {}", file_path.display()),
success: true,
}));
}
Ok(())
}
/// Extract a plain (uncompressed) tar archive
async fn extract_plain_tar_file(
file_path: &Path,
dest: &Path,
event_sender: Option<&EventSender>,
) -> Result<(), Error> {
// Create destination directory
let (platform, ctx) = create_platform_context();
platform.filesystem().create_dir_all(&ctx, dest).await?;
let file_path = file_path.to_path_buf();
let dest = dest.to_path_buf();
tokio::task::spawn_blocking(move || {
use std::fs::File;
let file = File::open(&file_path)?;
let mut archive = Archive::new(file);
// Set options for security
archive.set_preserve_permissions(true);
archive.set_preserve_mtime(true);
archive.set_unpack_xattrs(false); // Don't unpack extended attributes
// Extract all entries with security checks
extract_archive_entries(&mut archive, &dest)?;
Ok::<(), Error>(())
})
.await
.map_err(|e| Error::internal(format!("plain tar extract task failed: {e}")))??;
// Send extraction completed event
if let Some(sender) = event_sender {
sender.emit(AppEvent::General(GeneralEvent::OperationCompleted {
operation: "Plain tar extraction completed".to_string(),
success: true,
}));
}
Ok(())
}
/// List contents of a zstd-compressed tar archive
async fn list_zstd_tar_contents(file_path: &Path) -> Result<Vec<String>, Error> {
// Create a temporary file to decompress to, then list contents
let temp_file = tempfile::NamedTempFile::new().map_err(|e| StorageError::IoError {
message: format!("failed to create temp file: {e}"),
})?;
let temp_path = temp_file.path().to_path_buf();
// Decompress the zstd file to temporary location
{
use tokio::fs::File;
let input_file = File::open(file_path)
.await
.map_err(|e| StorageError::IoError {
message: format!("failed to open compressed file: {e}"),
})?;
let mut output_file =
File::create(&temp_path)
.await
.map_err(|e| StorageError::IoError {
message: format!("failed to create temp output file: {e}"),
})?;
let mut decoder = AsyncZstdReader::new(BufReader::new(input_file));
tokio::io::copy(&mut decoder, &mut output_file)
.await
.map_err(|e| StorageError::IoError {
message: format!("failed to decompress zstd file: {e}"),
})?;
output_file
.flush()
.await
.map_err(|e| StorageError::IoError {
message: format!("failed to flush temp file: {e}"),
})?;
}
// Now list the decompressed tar file contents
let temp_path_for_task = temp_path.clone();
// Keep temp_file alive until after the blocking operation completes
let result = tokio::task::spawn_blocking(move || -> Result<Vec<String>, Error> {
use std::fs::File;
let file = File::open(&temp_path_for_task)?;
let mut archive = Archive::new(file);
let mut files = Vec::new();
for entry in archive.entries()? {
let entry = entry?;
let path = entry.path()?;
files.push(path.to_string_lossy().to_string());
}
files.sort();
Ok(files)
})
.await
.map_err(|e| Error::internal(format!("zstd list task failed: {e}")))?;
// Now we can safely drop the temp_file
drop(temp_file);
result
}
/// List contents of a plain tar file
async fn list_plain_tar_contents(file_path: &Path) -> Result<Vec<String>, Error> {
let file_path = file_path.to_path_buf();
tokio::task::spawn_blocking(move || -> Result<Vec<String>, Error> {
use std::fs::File;
let file = File::open(&file_path)?;
let mut archive = Archive::new(file);
let mut files = Vec::new();
for entry in archive.entries()? {
let entry = entry?;
let path = entry.path()?;
files.push(path.to_string_lossy().to_string());
}
files.sort();
Ok(files)
})
.await
.map_err(|e| Error::internal(format!("plain tar list task failed: {e}")))?
}
/// Extract entries from a tar archive with security checks
fn extract_archive_entries<R: std::io::Read>(
archive: &mut Archive<R>,
dest: &Path,
) -> Result<(), Error> {
// Extract all entries
for entry in archive.entries()? {
let mut entry = entry?;
// Get the path
let path = entry.path()?;
// Security check: ensure path doesn't escape destination
if path
.components()
.any(|c| c == std::path::Component::ParentDir)
{
return Err(PackageError::InvalidFormat {
message: "archive contains path traversal".to_string(),
}
.into());
}
// Unpack the entry
entry.unpack_in(dest)?;
}
Ok(())
}
/// Recursively add directory contents to tar
fn add_dir_to_tar<W: std::io::Write>(
builder: &mut tar::Builder<W>,
src: &Path,
prefix: &Path,
) -> Result<(), Error> {
let entries = std::fs::read_dir(src).map_err(|e| StorageError::IoError {
message: e.to_string(),
})?;
for entry in entries {
let entry = entry.map_err(|e| StorageError::IoError {
message: e.to_string(),
})?;
let path = entry.path();
let name = entry.file_name();
let tar_path = prefix.join(&name);
let metadata = entry.metadata().map_err(|e| StorageError::IoError {
message: e.to_string(),
})?;
if metadata.is_dir() {
// Add directory
builder
.append_dir(&tar_path, &path)
.map_err(|e| StorageError::IoError {
message: e.to_string(),
})?;
// Recursively add contents
add_dir_to_tar(builder, &path, &tar_path)?;
} else if metadata.is_file() {
// Add file
let mut file = std::fs::File::open(&path).map_err(|e| StorageError::IoError {
message: e.to_string(),
})?;
builder
.append_file(&tar_path, &mut file)
.map_err(|e| StorageError::IoError {
message: e.to_string(),
})?;
} else if metadata.is_symlink() {
// Add symlink
let target = std::fs::read_link(&path).map_err(|e| StorageError::IoError {
message: e.to_string(),
})?;
let mut header = tar::Header::new_gnu();
header.set_metadata(&metadata);
header.set_entry_type(tar::EntryType::Symlink);
builder
.append_link(&mut header, &tar_path, &target)
.map_err(|e| StorageError::IoError {
message: e.to_string(),
})?;
}
}
Ok(())
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/store/src/format_detection.rs | crates/store/src/format_detection.rs | //! Package format version detection and validation for store operations
//!
//! This module provides fast format version detection without full package parsing,
//! enabling compatibility checking and migration support.
use sps2_errors::{Error, PackageError, StorageError};
use sps2_types::{PackageFormatChecker, PackageFormatValidationResult, PackageFormatVersion};
use std::path::Path;
use tokio::{fs::File, io::AsyncReadExt};
/// Package format detection result
#[derive(Debug, Clone)]
pub struct PackageFormatInfo {
/// Detected format version
pub version: PackageFormatVersion,
/// Whether fast header detection was used
pub from_header: bool,
/// Whether format is compatible with current version
pub is_compatible: bool,
/// Validation result with details
pub validation: PackageFormatValidationResult,
}
/// Package format detector for .sp files
#[derive(Clone, Debug)]
pub struct PackageFormatDetector {
checker: PackageFormatChecker,
}
impl PackageFormatDetector {
/// Create a new format detector
#[must_use]
pub fn new() -> Self {
Self {
checker: PackageFormatChecker::new(),
}
}
/// Detect package format version from .sp file
///
/// This method first attempts fast header detection, then falls back to
/// manifest parsing if needed.
///
/// # Errors
///
/// Returns an error if:
/// - File cannot be read
/// - Package format is invalid or corrupted
/// - I/O operations fail
pub async fn detect_format(&self, sp_file: &Path) -> Result<PackageFormatInfo, Error> {
// First try fast header detection
if let Ok(version) = self.detect_from_header(sp_file).await {
let validation = self.checker.validate_version(&version);
let is_compatible = matches!(
validation,
PackageFormatValidationResult::Compatible
| PackageFormatValidationResult::BackwardsCompatible { .. }
);
return Ok(PackageFormatInfo {
version,
from_header: true,
is_compatible,
validation,
});
}
// Fall back to manifest parsing
let version = self.detect_from_manifest(sp_file).await?;
let validation = self.checker.validate_version(&version);
let is_compatible = matches!(
validation,
PackageFormatValidationResult::Compatible
| PackageFormatValidationResult::BackwardsCompatible { .. }
);
Ok(PackageFormatInfo {
version,
from_header: false,
is_compatible,
validation,
})
}
/// Fast format version detection from package header
///
/// This method reads only the first few bytes of the package to detect
/// the format version without decompressing or parsing the entire package.
///
/// # Errors
///
/// Returns an error if the header cannot be read or is invalid
pub async fn detect_from_header(&self, sp_file: &Path) -> Result<PackageFormatVersion, Error> {
let mut file = File::open(sp_file)
.await
.map_err(|e| StorageError::IoError {
message: format!("failed to open package for header detection: {e}"),
})?;
// Read enough bytes for both zstd header and version header
let mut header_buffer = vec![0u8; 64]; // 64 bytes should be enough
let bytes_read =
file.read(&mut header_buffer)
.await
.map_err(|e| StorageError::IoError {
message: format!("failed to read package header: {e}"),
})?;
header_buffer.truncate(bytes_read);
// Look for version header pattern after zstd header
Self::find_version_header_in_buffer(&header_buffer)
}
/// Detect format version from manifest inside the package
///
/// This method extracts and parses the manifest.toml to get the format version.
/// It's slower but more reliable than header detection.
///
/// # Errors
///
/// Returns an error if manifest extraction or parsing fails
pub async fn detect_from_manifest(
&self,
sp_file: &Path,
) -> Result<PackageFormatVersion, Error> {
// Create temporary directory for manifest extraction
let temp_dir = tempfile::tempdir().map_err(|e| StorageError::IoError {
message: format!("failed to create temp dir for manifest extraction: {e}"),
})?;
// Extract the package (full extraction since partial extraction was removed)
crate::archive::extract_package(sp_file, temp_dir.path()).await?;
// Read and parse the manifest
let manifest_path = temp_dir.path().join("manifest.toml");
let manifest = crate::manifest_io::read_manifest(&manifest_path).await?;
Ok(manifest.format_version().clone())
}
/// Find version header pattern in a buffer
///
/// Looks for the version header magic bytes (SPV1) and extracts version information
fn find_version_header_in_buffer(buffer: &[u8]) -> Result<PackageFormatVersion, Error> {
const VERSION_MAGIC: [u8; 4] = [0x53, 0x50, 0x56, 0x31]; // "SPV1"
// Search for the version magic bytes in the buffer
for window_start in 0..buffer.len().saturating_sub(12) {
let window = &buffer[window_start..window_start + 12];
if window.len() >= 12 && window[0..4] == VERSION_MAGIC {
// Found version header, parse it
return PackageFormatVersion::from_header_bytes(window).map_err(|e| {
PackageError::InvalidFormat {
message: format!("failed to parse version header: {e}"),
}
.into()
});
}
}
Err(PackageError::InvalidFormat {
message: "No version header found in package".to_string(),
}
.into())
}
/// Validate package format compatibility
///
/// Checks if a package with the given format version can be processed
/// by the current version of sps2.
///
/// # Errors
///
/// Returns an error if the package format is incompatible
pub fn validate_compatibility(&self, format_info: &PackageFormatInfo) -> Result<(), Error> {
match &format_info.validation {
PackageFormatValidationResult::Compatible => Ok(()),
PackageFormatValidationResult::BackwardsCompatible { warning: _ } => {
// Allow processing without direct printing; callers may emit events if needed
Ok(())
}
PackageFormatValidationResult::RequiresMigration { migration: _ } => {
Err(PackageError::IncompatibleFormat {
version: format_info.version.to_string(),
reason: "Package requires migration to current format".to_string(),
}
.into())
}
PackageFormatValidationResult::Incompatible { reason, suggestion } => {
Err(PackageError::IncompatibleFormat {
version: format_info.version.to_string(),
reason: format!("{reason}. {suggestion}"),
}
.into())
}
}
}
/// Check if a package supports a specific feature based on its format version
#[must_use]
pub fn supports_feature(&self, version: &PackageFormatVersion, feature: &str) -> bool {
let compat_info = version.compatibility_info();
match feature {
"signatures" => compat_info.supports_signatures,
"seekable_compression" => true,
_ => false,
}
}
}
impl Default for PackageFormatDetector {
fn default() -> Self {
Self::new()
}
}
/// Store-level format validation for package operations
#[derive(Clone, Debug)]
pub struct StoreFormatValidator {
detector: PackageFormatDetector,
require_compatibility: bool,
}
impl StoreFormatValidator {
/// Create a new store format validator
#[must_use]
pub fn new() -> Self {
Self {
detector: PackageFormatDetector::new(),
require_compatibility: true,
}
}
/// Create a validator that allows incompatible packages (for migration tools)
#[must_use]
pub fn allow_incompatible() -> Self {
Self {
detector: PackageFormatDetector::new(),
require_compatibility: false,
}
}
/// Validate package format before store operations
///
/// # Errors
///
/// Returns an error if package format is incompatible and compatibility is required
pub async fn validate_before_storage(
&self,
sp_file: &Path,
) -> Result<PackageFormatInfo, Error> {
let format_info = self.detector.detect_format(sp_file).await?;
if self.require_compatibility {
self.detector.validate_compatibility(&format_info)?;
}
Ok(format_info)
}
/// Validate package format after loading from store
///
/// # Errors
///
/// Returns an error if manifest parsing fails or format is incompatible
pub async fn validate_stored_package(
&self,
package_path: &Path,
) -> Result<PackageFormatInfo, Error> {
// For stored packages, read the manifest directly
let manifest_path = package_path.join("manifest.toml");
let manifest = crate::manifest_io::read_manifest(&manifest_path).await?;
let version = manifest.format_version().clone();
let validation = self.detector.checker.validate_version(&version);
let is_compatible = matches!(
validation,
PackageFormatValidationResult::Compatible
| PackageFormatValidationResult::BackwardsCompatible { .. }
);
let format_info = PackageFormatInfo {
version,
from_header: false, // Read from manifest
is_compatible,
validation,
};
if self.require_compatibility {
self.detector.validate_compatibility(&format_info)?;
}
Ok(format_info)
}
}
impl Default for StoreFormatValidator {
fn default() -> Self {
Self::new()
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/store/src/package.rs | crates/store/src/package.rs | //! Stored package representation and operations
use sps2_errors::{Error, PackageError, StorageError};
use sps2_hash::FileHashResult;
use sps2_platform::core::PlatformContext;
use sps2_platform::PlatformManager;
use sps2_types::Manifest;
use std::path::{Path, PathBuf};
use tokio::fs;
/// A package stored in the content-addressed store
pub struct StoredPackage {
path: PathBuf,
manifest: Manifest,
/// File hash results if available (for new file-level packages)
file_hashes: Option<Vec<FileHashResult>>,
}
impl StoredPackage {
/// Load a stored package
///
/// # Errors
///
/// Returns an error if:
/// - The manifest file cannot be found or read
/// - The manifest file is invalid
pub async fn load(path: &Path) -> Result<Self, Error> {
let manifest_path = path.join("manifest.toml");
let manifest = crate::manifest_io::read_manifest(&manifest_path).await?;
// Try to load file hashes if available
let files_json_path = path.join("files.json");
let platform = PlatformManager::instance().platform();
let ctx = platform.create_context(None);
let file_hashes = if platform.filesystem().exists(&ctx, &files_json_path).await {
let content = fs::read_to_string(&files_json_path).await?;
serde_json::from_str(&content).ok()
} else {
None
};
Ok(Self {
path: path.to_path_buf(),
manifest,
file_hashes,
})
}
/// Create a platform context for filesystem operations
fn create_platform_context() -> (&'static sps2_platform::Platform, PlatformContext) {
let platform = PlatformManager::instance().platform();
let context = platform.create_context(None);
(platform, context)
}
/// Get the package manifest
#[must_use]
pub fn manifest(&self) -> &Manifest {
&self.manifest
}
/// Get the package path
#[must_use]
pub fn path(&self) -> &Path {
&self.path
}
/// Get the package hash from the path
#[must_use]
pub fn hash(&self) -> Option<sps2_hash::Hash> {
// The hash is the last component of the path
self.path
.file_name()
.and_then(|name| name.to_str())
.and_then(|hash_str| sps2_hash::Hash::from_hex(hash_str).ok())
}
/// Check if this package has file-level hashes
#[must_use]
pub fn has_file_hashes(&self) -> bool {
self.file_hashes.is_some()
}
/// Get the file hashes if available
#[must_use]
pub fn file_hashes(&self) -> Option<&[FileHashResult]> {
self.file_hashes.as_deref()
}
/// Get the files directory
#[must_use]
pub fn files_path(&self) -> PathBuf {
// New structure: files are under opt/pm/live
let live_path = self.path.join("opt/pm/live");
if live_path.exists() {
return live_path; // Return the live path directly
}
// Legacy: Check for package-version directory
let package_name = &self.manifest.package.name;
let package_version = &self.manifest.package.version;
let versioned_path = self.path.join(format!("{package_name}-{package_version}"));
if versioned_path.exists() {
return versioned_path;
}
// Fallback to package name without version
self.path.join(package_name)
}
/// Get the blobs directory
#[must_use]
pub fn blobs_path(&self) -> PathBuf {
self.path.join("blobs")
}
/// Link package contents to a destination
///
/// # Errors
///
/// Returns an error if file linking operations fail or the package lacks
/// file-level hashes (legacy packages are no longer supported).
pub async fn link_to(&self, dest_root: &Path) -> Result<(), Error> {
let file_hashes = self
.file_hashes
.as_ref()
.ok_or_else(|| PackageError::Corrupted {
message: "package is missing file hashes and is no longer supported".to_string(),
})?;
let store_base = self
.path
.parent()
.and_then(std::path::Path::parent)
.ok_or_else(|| StorageError::InvalidPath {
path: self.path.display().to_string(),
})?;
let file_store = crate::FileStore::new(store_base);
// Link all files from the file store
file_store
.link_files(file_hashes, &PathBuf::new(), dest_root)
.await?;
Ok(())
}
/// Calculate total size of the package
///
/// # Errors
///
/// Returns an error if size calculation fails due to I/O issues
pub async fn size(&self) -> Result<u64, Error> {
let (platform, ctx) = Self::create_platform_context();
platform
.filesystem()
.size(&ctx, &self.path)
.await
.map_err(|e| {
StorageError::IoError {
message: e.to_string(),
}
.into()
})
}
/// List all files in the package
///
/// # Errors
///
/// Returns an error if directory traversal fails or I/O operations fail
pub async fn list_files(&self) -> Result<Vec<PathBuf>, Error> {
let files_dir = self.files_path();
let (platform, ctx) = Self::create_platform_context();
if !platform.filesystem().exists(&ctx, &files_dir).await {
return Ok(Vec::new());
}
let mut files = Vec::new();
self.collect_files(&files_dir, &files_dir, &mut files)
.await?;
Ok(files)
}
async fn collect_files(
&self,
base: &Path,
dir: &Path,
files: &mut Vec<PathBuf>,
) -> Result<(), Error> {
let mut entries = fs::read_dir(dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
let metadata = entry.metadata().await?;
if metadata.is_dir() {
Box::pin(self.collect_files(base, &path, files)).await?;
} else {
// Store relative path
if let Ok(rel_path) = path.strip_prefix(base) {
files.push(rel_path.to_path_buf());
}
}
}
Ok(())
}
/// Verify package integrity
///
/// # Errors
///
/// Returns an error if:
/// - Required directories are missing
/// - Manifest validation fails
/// - Package structure is corrupted
pub async fn verify(&self) -> Result<(), Error> {
let (platform, ctx) = Self::create_platform_context();
// Check required directories exist
if !platform.filesystem().exists(&ctx, &self.files_path()).await {
return Err(PackageError::Corrupted {
message: "missing files directory".to_string(),
}
.into());
}
// Validate manifest
self.manifest.validate()?;
// Could add more verification here (file checksums, etc.)
Ok(())
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/store/src/file_store.rs | crates/store/src/file_store.rs | //! File-level content-addressed storage operations
//!
//! This module provides functionality for storing individual files
//! by their content hash, enabling deduplication across packages.
use sps2_errors::{Error, StorageError};
use sps2_hash::{calculate_file_storage_path, FileHashResult, FileHasher, FileHasherConfig, Hash};
use sps2_platform::core::PlatformContext;
use sps2_platform::PlatformManager;
use std::path::{Path, PathBuf};
use tokio::fs;
use uuid::Uuid;
/// Result of file verification operation
#[derive(Debug, Clone, PartialEq)]
pub enum FileVerificationResult {
/// File is valid and matches expected hash
Valid,
/// File is missing from store
Missing,
/// File exists but hash doesn't match
HashMismatch { expected: Hash, actual: Hash },
/// Verification failed due to error
Error { message: String },
}
/// File store for content-addressed file storage
#[derive(Clone, Debug)]
pub struct FileStore {
/// Base path for file objects (/opt/pm/store/objects)
objects_path: PathBuf,
/// File hasher for computing file hashes
file_hasher: FileHasher,
}
impl FileStore {
/// Create a new file store instance
#[must_use]
pub fn new(store_base_path: &Path) -> Self {
let objects_path = store_base_path.join("objects");
let file_hasher = FileHasher::new(FileHasherConfig::default());
Self {
objects_path,
file_hasher,
}
}
/// Create a platform context for filesystem operations
fn create_platform_context() -> (&'static sps2_platform::Platform, PlatformContext) {
let platform = PlatformManager::instance().platform();
let context = platform.create_context(None);
(platform, context)
}
/// Initialize the file store directory structure
///
/// # Errors
/// Returns an error if directory creation fails
pub async fn initialize(&self) -> Result<(), Error> {
// Create the objects directory
let (platform, ctx) = Self::create_platform_context();
platform
.filesystem()
.create_dir_all(&ctx, &self.objects_path)
.await?;
// Create prefix directories (00-ff)
for i in 0..256 {
let prefix1 = format!("{i:02x}");
let prefix1_path = self.objects_path.join(&prefix1);
platform
.filesystem()
.create_dir_all(&ctx, &prefix1_path)
.await?;
for j in 0..256 {
let prefix2 = format!("{j:02x}");
let prefix2_path = prefix1_path.join(&prefix2);
platform
.filesystem()
.create_dir_all(&ctx, &prefix2_path)
.await?;
}
}
Ok(())
}
/// Get the storage path for a file hash
#[must_use]
pub fn file_path(&self, hash: &Hash) -> PathBuf {
let (prefix, full_hash) = calculate_file_storage_path(hash);
self.objects_path.join(prefix).join(full_hash)
}
/// Check if a file exists in the store
pub async fn has_file(&self, hash: &Hash) -> bool {
let path = self.file_path(hash);
let (platform, ctx) = Self::create_platform_context();
platform.filesystem().exists(&ctx, &path).await
}
/// Store a file by its content hash
///
/// Returns true if the file was newly stored, false if it already existed
///
/// # Errors
/// Returns an error if file operations fail
pub async fn store_file(&self, source_path: &Path, hash: &Hash) -> Result<bool, Error> {
let dest_path = self.file_path(hash);
let (platform, ctx) = Self::create_platform_context();
if platform.filesystem().exists(&ctx, &dest_path).await {
return Ok(false);
}
// Ensure parent directory exists
let parent_dir = dest_path.parent().ok_or_else(|| StorageError::IoError {
message: "failed to get parent directory".to_string(),
})?;
platform
.filesystem()
.create_dir_all(&ctx, parent_dir)
.await?;
// Create a unique temporary file path
let temp_file_name = format!("{}.tmp", Uuid::new_v4());
let temp_path = parent_dir.join(temp_file_name);
// Copy to temporary file
if let Err(e) = fs::copy(source_path, &temp_path).await {
// Clean up temp file on failure
let _ = platform.filesystem().remove_file(&ctx, &temp_path).await;
return Err(StorageError::IoError {
message: format!("failed to copy file to temp: {e}"),
}
.into());
}
// Attempt to atomically rename (move) the file
match fs::rename(&temp_path, &dest_path).await {
Ok(()) => {
// Make file read-only after successful move
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let metadata = fs::metadata(&dest_path).await?;
let mut perms = metadata.permissions();
let mode = perms.mode() & 0o555; // Remove write permissions
perms.set_mode(mode);
fs::set_permissions(&dest_path, perms).await?;
}
Ok(true)
}
Err(e) => {
// Clean up the temporary file
let _ = platform.filesystem().remove_file(&ctx, &temp_path).await;
// If the error is because the file exists, another process/thread beat us to it.
// This is not an error condition for us.
if e.kind() == std::io::ErrorKind::AlreadyExists {
Ok(false)
} else {
Err(StorageError::IoError {
message: format!("failed to move temp file to store: {e}"),
}
.into())
}
}
}
}
/// Store a file and compute its hash
///
/// # Errors
/// Returns an error if file operations fail
pub async fn store_file_with_hash(&self, source_path: &Path) -> Result<(Hash, bool), Error> {
// Compute hash
let hash = Hash::hash_file(source_path).await?;
// Store file
let newly_stored = self.store_file(source_path, &hash).await?;
Ok((hash, newly_stored))
}
/// Link a stored file to a destination
///
/// # Errors
/// Returns an error if the file doesn't exist or linking fails
pub async fn link_file(&self, hash: &Hash, dest_path: &Path) -> Result<(), Error> {
let source_path = self.file_path(hash);
let (platform, ctx) = Self::create_platform_context();
if !platform.filesystem().exists(&ctx, &source_path).await {
return Err(StorageError::PathNotFound {
path: source_path.display().to_string(),
}
.into());
}
// Ensure parent directory exists
if let Some(parent) = dest_path.parent() {
platform.filesystem().create_dir_all(&ctx, parent).await?;
}
// Idempotency check: if the correct file already exists, do nothing.
if platform.filesystem().exists(&ctx, dest_path).await {
if let Ok(actual_hash) =
Hash::hash_file_with_algorithm(dest_path, hash.algorithm()).await
{
if actual_hash == *hash {
return Ok(()); // File is already correct, skip.
}
}
// File exists but is wrong, or we couldn't hash it; proceed to remove.
platform.filesystem().remove_file(&ctx, dest_path).await?;
}
// Use APFS clonefile on macOS for copy-on-write semantics
// This prevents corruption of the store when files are modified in place
platform
.filesystem()
.clone_file(&ctx, &source_path, dest_path)
.await?;
Ok(())
}
/// Store all files from a directory
///
/// Returns a list of file hash results
///
/// # Errors
/// Returns an error if directory traversal or file operations fail
pub async fn store_directory(&self, dir_path: &Path) -> Result<Vec<FileHashResult>, Error> {
// Hash all files in the directory
let hash_results = self.file_hasher.hash_directory(dir_path).await?;
// Filter out manifest.toml and sbom files before storing
// Also fix paths by stripping opt/pm/live/ prefix
let mut filtered_results = Vec::new();
for result in hash_results {
// Skip manifest and sbom files - they should only exist in package metadata
if result.relative_path == "manifest.toml"
|| result.relative_path == "sbom.spdx.json"
|| result.relative_path == "sbom.cdx.json"
{
continue;
}
// Skip opt/pm/live directory entries themselves
if result.relative_path == "opt"
|| result.relative_path == "opt/pm"
|| result.relative_path == "opt/pm/live"
{
continue;
}
// Store the file if it's not a directory or symlink
if !result.is_directory && !result.is_symlink {
// Use original path for file storage
let original_path = result.relative_path.clone();
let file_path = dir_path.join(&original_path);
self.store_file(&file_path, &result.hash).await?;
}
filtered_results.push(result);
}
Ok(filtered_results)
}
/// Link files from hash results to a destination directory
///
/// # Errors
/// Returns an error if linking operations fail
pub async fn link_files(
&self,
hash_results: &[FileHashResult],
source_base: &Path,
dest_base: &Path,
) -> Result<(), Error> {
let (platform, ctx) = Self::create_platform_context();
for result in hash_results {
// Skip manifest.toml and sbom files - they should only exist in store
if result.relative_path == "manifest.toml"
|| result.relative_path == "sbom.spdx.json"
|| result.relative_path == "sbom.cdx.json"
{
continue;
}
let dest_path = dest_base.join(&result.relative_path);
if result.is_directory {
// Create directory
platform
.filesystem()
.create_dir_all(&ctx, &dest_path)
.await?;
} else if result.is_symlink {
// Recreate symlink
let source_path = source_base.join(&result.relative_path);
if let Ok(target) = fs::read_link(&source_path).await {
// Ensure parent directory exists
if let Some(parent) = dest_path.parent() {
platform.filesystem().create_dir_all(&ctx, parent).await?;
}
// Idempotency check: if correct symlink exists, do nothing.
if let Ok(existing_target) = fs::read_link(&dest_path).await {
if existing_target == target {
continue; // Symlink is already correct, skip.
}
}
// If we're here, the path is either not a symlink, or the wrong one.
if platform.filesystem().exists(&ctx, &dest_path).await {
// Use remove_dir_all in case it's a directory.
if platform.filesystem().is_dir(&ctx, &dest_path).await {
platform
.filesystem()
.remove_dir_all(&ctx, &dest_path)
.await?;
} else {
platform.filesystem().remove_file(&ctx, &dest_path).await?;
}
}
// Create symlink
#[cfg(unix)]
{
use std::os::unix::fs::symlink;
symlink(&target, &dest_path)?;
}
}
} else {
// Link regular file
self.link_file(&result.hash, &dest_path).await?;
}
}
Ok(())
}
/// Remove a file from the store
///
/// # Errors
/// Returns an error if file removal fails
pub async fn remove_file(&self, hash: &Hash) -> Result<(), Error> {
let path = self.file_path(hash);
let (platform, ctx) = Self::create_platform_context();
if platform.filesystem().exists(&ctx, &path).await {
platform.filesystem().remove_file(&ctx, &path).await?;
}
Ok(())
}
/// Get the size of a stored file
///
/// # Errors
/// Returns an error if the file doesn't exist or metadata cannot be read
pub async fn file_size(&self, hash: &Hash) -> Result<u64, Error> {
let path = self.file_path(hash);
let (platform, ctx) = Self::create_platform_context();
platform.filesystem().size(&ctx, &path).await.map_err(|_| {
StorageError::PathNotFound {
path: path.display().to_string(),
}
.into()
})
}
/// Verify that a stored file matches its expected hash
///
/// # Errors
/// Returns an error if the file doesn't exist or hashing fails
pub async fn verify_file(&self, hash: &Hash) -> Result<bool, Error> {
let path = self.file_path(hash);
let (platform, ctx) = Self::create_platform_context();
if !platform.filesystem().exists(&ctx, &path).await {
return Ok(false);
}
// Use the same algorithm as the expected hash for verification
let actual_hash = Hash::hash_file_with_algorithm(&path, hash.algorithm()).await?;
Ok(actual_hash == *hash)
}
/// Verify a stored file and return detailed result
///
/// This function performs verification and returns a detailed result
/// that can be used by higher-level verification systems.
///
/// # Errors
/// Returns an error if verification fails due to I/O issues
pub async fn verify_file_detailed(&self, hash: &Hash) -> Result<FileVerificationResult, Error> {
let path = self.file_path(hash);
let (platform, ctx) = Self::create_platform_context();
if !platform.filesystem().exists(&ctx, &path).await {
return Ok(FileVerificationResult::Missing);
}
// Use the same algorithm as the expected hash for verification
match Hash::hash_file_with_algorithm(&path, hash.algorithm()).await {
Ok(actual_hash) => {
if actual_hash == *hash {
Ok(FileVerificationResult::Valid)
} else {
Ok(FileVerificationResult::HashMismatch {
expected: hash.clone(),
actual: actual_hash,
})
}
}
Err(e) => Ok(FileVerificationResult::Error {
message: e.to_string(),
}),
}
}
/// Clean up empty prefix directories
///
/// # Errors
/// Returns an error if directory operations fail
pub async fn cleanup(&self) -> Result<(), Error> {
let mut entries = fs::read_dir(&self.objects_path).await?;
while let Some(entry) = entries.next_entry().await? {
if entry.file_type().await?.is_dir() {
let prefix_path = entry.path();
// Check if directory is empty
let mut prefix_entries = fs::read_dir(&prefix_path).await?;
if prefix_entries.next_entry().await?.is_none() {
// Directory is empty, remove it
let _ = fs::remove_dir(&prefix_path).await;
}
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
#[tokio::test]
async fn test_file_store_operations() {
let temp_dir = TempDir::new().unwrap();
let store = FileStore::new(temp_dir.path());
// Initialize store
store.initialize().await.unwrap();
// Create a test file
let test_file = temp_dir.path().join("test.txt");
fs::write(&test_file, b"Hello, world!").await.unwrap();
// Store file
let (hash, newly_stored) = store.store_file_with_hash(&test_file).await.unwrap();
assert!(newly_stored);
// Check file exists
assert!(store.has_file(&hash).await);
// Store same file again
let (_, newly_stored) = store.store_file_with_hash(&test_file).await.unwrap();
assert!(!newly_stored); // Should already exist
// Link file to new location
let link_dest = temp_dir.path().join("linked.txt");
store.link_file(&hash, &link_dest).await.unwrap();
// Verify linked file content
let content = fs::read(&link_dest).await.unwrap();
assert_eq!(content, b"Hello, world!");
// Verify file integrity
assert!(store.verify_file(&hash).await.unwrap());
}
#[tokio::test]
async fn test_directory_storage() {
let temp_dir = TempDir::new().unwrap();
let store = FileStore::new(temp_dir.path());
store.initialize().await.unwrap();
// Create test directory structure
let test_dir = temp_dir.path().join("test_pkg");
fs::create_dir(&test_dir).await.unwrap();
fs::write(test_dir.join("file1.txt"), b"content1")
.await
.unwrap();
fs::create_dir(test_dir.join("subdir")).await.unwrap();
fs::write(test_dir.join("subdir/file2.txt"), b"content2")
.await
.unwrap();
// Store directory
let results = store.store_directory(&test_dir).await.unwrap();
// Should have entries for files and directories
assert!(results.len() >= 2); // At least the two files
// Link files to new location
let dest_dir = temp_dir.path().join("linked_pkg");
store
.link_files(&results, &test_dir, &dest_dir)
.await
.unwrap();
// Verify linked files
assert!(dest_dir.join("file1.txt").exists());
assert!(dest_dir.join("subdir/file2.txt").exists());
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/net/src/lib.rs | crates/net/src/lib.rs | #![warn(mismatched_lifetime_syntaxes)]
#![deny(clippy::pedantic, unsafe_code)]
#![allow(clippy::module_name_repetitions)]
//! Network operations for sps2
//!
//! This crate handles all HTTP operations including package downloads,
//! index fetching, and connection pooling with retry logic.
mod client;
mod download;
pub mod signing;
pub use client::{NetClient, NetConfig};
pub use download::{
DownloadResult, PackageDownloadConfig, PackageDownloadRequest, PackageDownloadResult,
PackageDownloader, RetryConfig,
};
pub use signing::{
verify_minisign_bytes_with_keys, verify_minisign_file_with_keys, Algorithm, PublicKeyRef,
};
use sps2_errors::{Error, NetworkError};
use sps2_events::{AppEvent, EventEmitter, EventSender, GeneralEvent};
use sps2_hash::Hash;
use std::path::Path;
use url::Url;
/// Download a file with progress reporting
///
/// # Errors
///
/// Returns an error if the URL is invalid, the download fails, or there are
/// I/O errors while writing the file.
pub async fn download_file(
_client: &NetClient,
url: &str,
dest: &Path,
expected_hash: Option<&Hash>,
tx: &EventSender,
) -> Result<(Hash, u64), Error> {
let downloader = PackageDownloader::with_defaults(sps2_events::ProgressManager::new())?;
let result = downloader
.download_with_resume(
url,
dest,
expected_hash,
"simple_download".to_string(),
None,
None,
tx.clone(),
)
.await?;
Ok((result.hash, result.size))
}
/// Fetch text content from a URL
///
/// # Errors
///
/// Returns an error if the HTTP request fails, the server returns an error status,
/// or the response body cannot be decoded as text.
pub async fn fetch_text(client: &NetClient, url: &str, tx: &EventSender) -> Result<String, Error> {
tx.emit(AppEvent::General(GeneralEvent::debug(format!(
"Fetching text from {url}"
))));
let response = client.get(url).await?;
if !response.status().is_success() {
return Err(NetworkError::HttpError {
status: response.status().as_u16(),
message: response.status().to_string(),
}
.into());
}
response
.text()
.await
.map_err(|e| NetworkError::DownloadFailed(e.to_string()).into())
}
/// Conditionally fetch text content from a URL with `ETag` support
///
/// # Errors
///
/// Returns an error if the HTTP request fails, the server returns an error status,
/// or the response body cannot be decoded as text.
///
/// # Returns
///
/// Returns `Ok(None)` if the server responds with 304 Not Modified,
/// `Ok(Some((content, etag)))` if new content is available.
pub async fn fetch_text_conditional(
client: &NetClient,
url: &str,
etag: Option<&str>,
tx: &EventSender,
) -> Result<Option<(String, Option<String>)>, Error> {
tx.emit(AppEvent::General(GeneralEvent::debug(format!(
"Fetching text from {url} with conditional request"
))));
let mut headers = Vec::new();
if let Some(etag_value) = etag {
headers.push(("If-None-Match", etag_value));
}
let response = client.get_with_headers(url, &headers).await?;
// Handle 304 Not Modified
if response.status() == reqwest::StatusCode::NOT_MODIFIED {
tx.emit(AppEvent::General(GeneralEvent::debug(
"Server returned 304 Not Modified - using cached content",
)));
return Ok(None);
}
if !response.status().is_success() {
return Err(NetworkError::HttpError {
status: response.status().as_u16(),
message: response.status().to_string(),
}
.into());
}
// Extract new ETag from response headers
let new_etag = response
.headers()
.get("etag")
.and_then(|v| v.to_str().ok())
.map(String::from);
let content = response
.text()
.await
.map_err(|e| NetworkError::DownloadFailed(e.to_string()))?;
Ok(Some((content, new_etag)))
}
/// Fetch binary content from a URL
///
/// # Errors
///
/// Returns an error if the HTTP request fails, the server returns an error status,
/// or the response body cannot be read as bytes.
pub async fn fetch_bytes(
client: &NetClient,
url: &str,
tx: &EventSender,
) -> Result<Vec<u8>, Error> {
tx.emit(AppEvent::General(GeneralEvent::debug(format!(
"Fetching bytes from {url}"
))));
let response = client.get(url).await?;
if !response.status().is_success() {
return Err(NetworkError::HttpError {
status: response.status().as_u16(),
message: response.status().to_string(),
}
.into());
}
response
.bytes()
.await
.map(|b| b.to_vec())
.map_err(|e| NetworkError::DownloadFailed(e.to_string()).into())
}
/// Check if a URL is accessible
///
/// # Errors
///
/// Returns an error if there are network issues preventing the HEAD request.
/// Note: This function returns `Ok(false)` for inaccessible URLs rather than errors.
pub async fn check_url(client: &NetClient, url: &str) -> Result<bool, Error> {
match client.head(url).await {
Ok(response) => Ok(response.status().is_success()),
Err(_) => Ok(false),
}
}
/// Parse and validate a URL
///
/// # Errors
///
/// Returns an error if the URL string is malformed or invalid according to RFC 3986.
pub fn parse_url(url: &str) -> Result<Url, Error> {
Url::parse(url).map_err(|e| NetworkError::InvalidUrl(e.to_string()).into())
}
/// Fetch and deserialize JSON content from a URL
///
/// # Errors
///
/// Returns an error if the HTTP request fails, the server returns an error status,
/// or the response body cannot be deserialized from JSON.
pub async fn fetch_json<T: serde::de::DeserializeOwned>(
client: &NetClient,
url: &str,
tx: &EventSender,
) -> Result<T, Error> {
let text = fetch_text(client, url, tx).await?;
serde_json::from_str(&text).map_err(|e| {
sps2_errors::OpsError::SerializationError {
message: e.to_string(),
}
.into()
})
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/net/src/client.rs | crates/net/src/client.rs | //! HTTP client with connection pooling and retry logic
use futures::StreamExt;
use reqwest::{Client, Response, StatusCode};
use sps2_errors::{Error, NetworkError};
use std::time::Duration;
/// Download progress information
#[derive(Debug, Clone)]
pub struct DownloadProgress {
pub downloaded: u64,
pub total: u64,
}
/// Network client configuration
#[derive(Debug, Clone)]
pub struct NetConfig {
pub timeout: Duration,
pub connect_timeout: Duration,
pub pool_idle_timeout: Duration,
pub pool_max_idle_per_host: usize,
pub retry_count: u32,
pub retry_delay: Duration,
pub user_agent: String,
}
impl Default for NetConfig {
fn default() -> Self {
Self {
timeout: Duration::from_secs(300), // 5 minutes for large downloads
connect_timeout: Duration::from_secs(30),
pool_idle_timeout: Duration::from_secs(90),
pool_max_idle_per_host: 10,
retry_count: 3,
retry_delay: Duration::from_secs(1),
user_agent: format!("sps2/{}", env!("CARGO_PKG_VERSION")),
}
}
}
/// HTTP client wrapper with retry logic
#[derive(Clone)]
pub struct NetClient {
client: Client,
config: NetConfig,
}
impl std::fmt::Debug for NetClient {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("NetClient")
.field("config", &self.config)
.finish_non_exhaustive()
}
}
impl NetClient {
/// Create a new network client that does not consult system proxy configuration.
///
/// # Errors
///
/// Returns an error if the HTTP client cannot be created.
pub fn new_without_proxies(config: NetConfig) -> Result<Self, Error> {
let client = Client::builder()
.timeout(config.timeout)
.connect_timeout(config.connect_timeout)
.pool_idle_timeout(config.pool_idle_timeout)
.pool_max_idle_per_host(config.pool_max_idle_per_host)
.no_proxy()
.user_agent(&config.user_agent)
.build()
.map_err(|e| NetworkError::ConnectionRefused(e.to_string()))?;
Ok(Self { client, config })
}
/// Create a new network client
///
/// # Errors
///
/// Returns an error if the HTTP client cannot be created due to invalid configuration
/// or if the underlying reqwest client fails to initialize.
pub fn new(config: NetConfig) -> Result<Self, Error> {
let client = Client::builder()
.timeout(config.timeout)
.connect_timeout(config.connect_timeout)
.pool_idle_timeout(config.pool_idle_timeout)
.pool_max_idle_per_host(config.pool_max_idle_per_host)
.user_agent(&config.user_agent)
.build()
.map_err(|e| NetworkError::ConnectionRefused(e.to_string()))?;
Ok(Self { client, config })
}
/// Create with default configuration
///
/// # Errors
///
/// Returns an error if the HTTP client cannot be created with default settings.
pub fn with_defaults() -> Result<Self, Error> {
Self::new(NetConfig::default())
}
/// Execute a GET request with retries
///
/// # Errors
///
/// Returns an error if the request fails after all retry attempts, including
/// network timeouts, connection failures, or server errors.
pub async fn get(&self, url: &str) -> Result<Response, Error> {
self.retry_request(|| self.client.get(url).send()).await
}
/// Execute a GET request with custom headers and retries
///
/// # Errors
///
/// Returns an error if the request fails after all retry attempts, including
/// network timeouts, connection failures, or server errors.
pub async fn get_with_headers(
&self,
url: &str,
headers: &[(&str, &str)],
) -> Result<Response, Error> {
self.retry_request(|| {
let mut request = self.client.get(url);
for (key, value) in headers {
request = request.header(*key, *value);
}
request.send()
})
.await
}
/// Execute a GET request with Range header for partial content
///
/// # Errors
///
/// Returns an error if the request fails after all retry attempts, including
/// network timeouts, connection failures, or server errors.
pub async fn get_range(
&self,
url: &str,
start_byte: u64,
end_byte: Option<u64>,
) -> Result<Response, Error> {
let range_value = match end_byte {
Some(end) => format!("bytes={start_byte}-{end}"),
None => format!("bytes={start_byte}-"),
};
self.get_with_headers(url, &[("Range", &range_value)]).await
}
/// Check if server supports range requests
///
/// # Errors
///
/// Returns an error if the HEAD request fails.
pub async fn supports_range_requests(&self, url: &str) -> Result<bool, Error> {
let response = self.head(url).await?;
Ok(response
.headers()
.get("accept-ranges")
.and_then(|v| v.to_str().ok())
== Some("bytes"))
}
/// Execute a HEAD request with retries
///
/// # Errors
///
/// Returns an error if the request fails after all retry attempts, including
/// network timeouts, connection failures, or server errors.
pub async fn head(&self, url: &str) -> Result<Response, Error> {
self.retry_request(|| self.client.head(url).send()).await
}
/// Download file with progress callback
///
/// # Errors
///
/// Returns an error if the download fails, the file cannot be created,
/// or if there are I/O errors while writing the downloaded content.
pub async fn download_file_with_progress<F>(
&self,
url: &str,
dest: &std::path::Path,
progress_callback: F,
) -> Result<(), Error>
where
F: Fn(DownloadProgress),
{
let response = self.get(url).await?;
let total_size = response.content_length().unwrap_or(0);
let mut file = tokio::fs::File::create(dest).await?;
let mut stream = response.bytes_stream();
let mut downloaded = 0u64;
while let Some(chunk) = stream.next().await {
let chunk =
chunk.map_err(|e| sps2_errors::NetworkError::DownloadFailed(e.to_string()))?;
tokio::io::AsyncWriteExt::write_all(&mut file, &chunk).await?;
downloaded += chunk.len() as u64;
progress_callback(DownloadProgress {
downloaded,
total: total_size,
});
}
Ok(())
}
/// Execute a request with retries
async fn retry_request<F, Fut>(&self, mut f: F) -> Result<Response, Error>
where
F: FnMut() -> Fut,
Fut: std::future::Future<Output = Result<Response, reqwest::Error>>,
{
let mut last_error = None;
for attempt in 0..=self.config.retry_count {
if attempt > 0 {
tokio::time::sleep(self.config.retry_delay * attempt).await;
}
match f().await {
Ok(response) => {
// Check for rate limiting
if response.status() == StatusCode::TOO_MANY_REQUESTS {
if let Some(retry_after) = response
.headers()
.get("retry-after")
.and_then(|v| v.to_str().ok())
.and_then(|s| s.parse::<u64>().ok())
{
return Err(NetworkError::RateLimited {
seconds: retry_after,
}
.into());
}
}
return Ok(response);
}
Err(e) => {
last_error = Some(e);
// Don't retry on certain errors
if !Self::should_retry(last_error.as_ref().unwrap()) {
break;
}
}
}
}
// Convert the last error
match last_error {
Some(e) if e.is_timeout() => Err(NetworkError::Timeout {
url: e
.url()
.map(std::string::ToString::to_string)
.unwrap_or_default(),
}
.into()),
Some(e) if e.is_connect() => Err(NetworkError::ConnectionRefused(e.to_string()).into()),
Some(e) => Err(NetworkError::DownloadFailed(e.to_string()).into()),
None => Err(NetworkError::DownloadFailed("Unknown error".to_string()).into()),
}
}
/// Determine if an error should be retried
fn should_retry(error: &reqwest::Error) -> bool {
// Retry on timeout, connection errors, and server errors
error.is_timeout()
|| error.is_connect()
|| error.status().is_none_or(|s| s.is_server_error())
}
/// Get the underlying reqwest client for advanced usage
#[must_use]
pub fn inner(&self) -> &Client {
&self.client
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/net/src/signing.rs | crates/net/src/signing.rs | #![warn(mismatched_lifetime_syntaxes)]
#![deny(clippy::pedantic, unsafe_code)]
use base64::{engine::general_purpose, Engine as _};
use minisign::{sign, SecretKeyBox};
use minisign_verify::{PublicKey, Signature};
use serde::{Deserialize, Serialize};
use sps2_errors::{Error, SigningError};
use std::fs;
use std::path::Path;
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum Algorithm {
Minisign,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PublicKeyRef {
pub id: String,
pub algo: Algorithm,
pub data: String,
}
/// A hack to extract the key ID from a raw signature string because the `minisign_verify`
/// crate doesn't expose it publicly.
fn extract_key_id_from_sig_str(signature_str: &str) -> Result<String, SigningError> {
let mut lines = signature_str.lines();
lines.next(); // Skip untrusted comment
let sig_line = lines.next().ok_or_else(|| {
SigningError::InvalidSignatureFormat("Missing signature line".to_string())
})?;
let decoded_sig = general_purpose::STANDARD.decode(sig_line).map_err(|e| {
SigningError::InvalidSignatureFormat(format!("Failed to decode signature line: {e}"))
})?;
if decoded_sig.len() < 10 {
return Err(SigningError::InvalidSignatureFormat(
"Signature line is too short".to_string(),
));
}
let key_id_bytes = &decoded_sig[2..10];
Ok(hex::encode(key_id_bytes))
}
/// Verify content at `content_path` against a minisign signature string using any of the provided trusted keys.
///
/// # Errors
///
/// Returns an error if:
/// - The content file cannot be read
/// - The signature verification fails
/// - No matching trusted key is found
pub fn verify_minisign_file_with_keys(
content_path: &Path,
signature_str: &str,
trusted_keys: &[PublicKeyRef],
) -> Result<String, Error> {
let content = fs::read(content_path).map_err(|e| {
Error::internal(format!(
"Failed to read content for signature verification: {e}"
))
})?;
Ok(verify_minisign_bytes_with_keys(
&content,
signature_str,
trusted_keys,
)?)
}
/// Verify raw bytes against a minisign signature string using any of the provided trusted keys.
///
/// # Errors
///
/// Returns an error if:
/// - The signature string cannot be decoded
/// - The public key cannot be decoded from base64
/// - The signature verification fails
/// - No matching trusted key is found
pub fn verify_minisign_bytes_with_keys(
content: &[u8],
signature_str: &str,
trusted_keys: &[PublicKeyRef],
) -> Result<String, SigningError> {
let key_id_from_sig = extract_key_id_from_sig_str(signature_str)?;
let sig = Signature::decode(signature_str)
.map_err(|e| SigningError::InvalidSignatureFormat(e.to_string()))?;
for key in trusted_keys {
if key.algo != Algorithm::Minisign {
continue;
}
if key.id == key_id_from_sig {
let pk = PublicKey::from_base64(&key.data)
.map_err(|e| SigningError::InvalidPublicKey(e.to_string()))?;
return match pk.verify(content, &sig, false) {
Ok(()) => Ok(key.id.clone()),
Err(e) => Err(SigningError::VerificationFailed {
reason: e.to_string(),
}),
};
}
}
Err(SigningError::NoTrustedKeyFound {
key_id: key_id_from_sig,
})
}
/// Sign raw bytes with a Minisign secret key file and return the signature string.
///
/// The secret key file is expected to be in Minisign "secret key box" format.
/// If the key is encrypted, provide the `passphrase_or_keychain` string as required by
/// the underlying minisign crate (for macOS keychain integration or passphrase).
///
/// # Errors
///
/// Returns an error if the key cannot be read/parsed, the secret key cannot be decrypted,
/// or the signing operation fails.
pub fn minisign_sign_bytes(
bytes: &[u8],
secret_key_path: &std::path::Path,
passphrase_or_keychain: Option<&str>,
trusted_comment: Option<&str>,
untrusted_comment: Option<&str>,
) -> Result<String, Error> {
use std::io::Cursor;
let sk_box_str = std::fs::read_to_string(secret_key_path)
.map_err(|e| Error::internal(format!("Failed to read secret key file: {e}")))?;
let sk_box = SecretKeyBox::from_string(&sk_box_str)
.map_err(|e| Error::internal(format!("Failed to parse secret key: {e}")))?;
let secret_key = sk_box
.into_secret_key(passphrase_or_keychain.map(std::string::ToString::to_string))
.map_err(|e| Error::internal(format!("Failed to decrypt secret key: {e}")))?;
let signature = sign(
None,
&secret_key,
Cursor::new(bytes),
trusted_comment,
untrusted_comment,
)
.map_err(|e| Error::internal(format!("Failed to sign bytes: {e}")))?;
Ok(signature.into_string())
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/net/src/download/config.rs | crates/net/src/download/config.rs | //! Configuration structures for package downloads
use sps2_hash::Hash;
use sps2_types::Version;
use std::path::PathBuf;
use std::time::Duration;
use sps2_config::ResourceManager;
use std::sync::Arc;
/// Configuration for package downloads
#[derive(Clone, Debug)]
pub struct PackageDownloadConfig {
/// Maximum file size allowed (default: 2GB)
pub max_file_size: u64,
/// Buffer size for streaming (default: 128KB)
pub buffer_size: usize,
/// Maximum number of concurrent downloads (default: 4)
pub max_concurrent: usize,
/// Retry configuration
pub retry_config: RetryConfig,
/// Timeout for individual chunks (default: 30s)
pub chunk_timeout: Duration,
/// Minimum chunk size for resumable downloads (default: 1MB)
pub min_chunk_size: u64,
/// Resource manager
pub resources: Arc<ResourceManager>,
}
impl Default for PackageDownloadConfig {
fn default() -> Self {
Self {
max_file_size: 2 * 1024 * 1024 * 1024, // 2GB
buffer_size: 128 * 1024, // 128KB
max_concurrent: 4,
retry_config: RetryConfig::default(),
chunk_timeout: Duration::from_secs(30),
min_chunk_size: 1024 * 1024, // 1MB
resources: Arc::new(ResourceManager::default()),
}
}
}
/// Retry configuration for downloads
#[derive(Debug, Clone)]
pub struct RetryConfig {
/// Maximum number of retries
pub max_retries: u32,
/// Initial backoff delay
pub initial_delay: Duration,
/// Maximum backoff delay
pub max_delay: Duration,
/// Backoff multiplier
pub backoff_multiplier: f64,
/// Jitter factor (0.0 to 1.0)
pub jitter_factor: f64,
}
impl Default for RetryConfig {
fn default() -> Self {
Self {
max_retries: 3,
initial_delay: Duration::from_millis(500),
max_delay: Duration::from_secs(30),
backoff_multiplier: 2.0,
jitter_factor: 0.1,
}
}
}
/// Request for downloading a package
#[derive(Debug, Clone)]
pub struct PackageDownloadRequest {
pub name: String,
pub version: Version,
pub package_url: String,
pub signature_url: Option<String>,
pub expected_hash: Option<Hash>,
}
/// Result of a package download operation
#[derive(Debug)]
pub struct PackageDownloadResult {
pub package_path: PathBuf,
pub signature_path: Option<PathBuf>,
pub hash: Hash,
pub size: u64,
pub download_time: Duration,
pub signature_verified: bool,
}
/// Parameters for streaming download with unified progress tracking
pub(super) struct StreamParams<'a> {
pub total_size: u64,
pub expected_hash: Option<&'a Hash>,
pub event_sender: &'a sps2_events::EventSender,
/// URL being downloaded - used for timeout error reporting
pub url: &'a str,
pub progress_tracker_id: String,
/// Optional parent progress ID - reserved for future parent-child coordination features
#[allow(dead_code)]
pub parent_progress_id: Option<String>,
pub progress_manager: Option<&'a sps2_events::ProgressManager>,
}
/// Result of a download operation
#[derive(Debug)]
pub struct DownloadResult {
pub hash: Hash,
pub size: u64,
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/net/src/download/stream.rs | crates/net/src/download/stream.rs | //! Low-level streaming download mechanics
use super::config::{DownloadResult, StreamParams};
use super::resume::calculate_existing_file_hash;
use futures::StreamExt;
use sps2_errors::{Error, NetworkError};
use sps2_hash::Hash;
use std::path::Path;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::fs::{self as tokio_fs, File, OpenOptions};
use tokio::io::{AsyncSeekExt, AsyncWriteExt, SeekFrom};
/// RAII guard for download lock file - ensures cleanup on drop
struct LockGuard {
path: std::path::PathBuf,
_file: File,
}
impl LockGuard {
async fn new(lock_path: std::path::PathBuf) -> Result<Self, Error> {
let file = OpenOptions::new()
.write(true)
.create_new(true) // Atomic - fails if file already exists
.open(&lock_path)
.await
.map_err(|e| {
if e.kind() == std::io::ErrorKind::AlreadyExists {
NetworkError::DownloadFailed(format!(
"File {} is already being downloaded by another process",
lock_path.display()
))
} else {
NetworkError::DownloadFailed(format!(
"Failed to create lock file {}: {e}",
lock_path.display()
))
}
})?;
Ok(Self {
path: lock_path,
_file: file,
})
}
}
impl Drop for LockGuard {
fn drop(&mut self) {
// Best-effort cleanup - ignore errors
let _ = std::fs::remove_file(&self.path);
}
}
/// Prepare file and hasher for download
async fn prepare_download(
config: &super::config::PackageDownloadConfig,
dest_path: &Path,
resume_offset: u64,
) -> Result<(File, blake3::Hasher), Error> {
let mut file = if resume_offset > 0 {
OpenOptions::new()
.create(true)
.write(true)
.truncate(false)
.open(dest_path)
.await?
} else {
tokio_fs::File::create(dest_path).await?
};
if resume_offset > 0 {
file.seek(SeekFrom::End(0)).await?;
}
let hasher = if resume_offset > 0 {
calculate_existing_file_hash(config, dest_path, resume_offset).await?
} else {
blake3::Hasher::new()
};
Ok((file, hasher))
}
/// Handle progress reporting during download
fn should_report_progress(first_chunk: bool, last_update: &Instant) -> bool {
first_chunk || last_update.elapsed() >= Duration::from_millis(50)
}
/// Report progress update
fn report_progress(params: &StreamParams<'_>, current_downloaded: u64) {
if let Some(progress_manager) = params.progress_manager {
progress_manager.update_progress(
¶ms.progress_tracker_id,
current_downloaded,
Some(params.total_size),
params.event_sender,
);
}
}
/// Verify download hash matches expected
fn verify_hash(
final_hash: &Hash,
expected_hash: Option<&Hash>,
dest_path: &Path,
) -> Result<(), Error> {
if let Some(expected) = expected_hash {
if final_hash != expected {
let _ = std::fs::remove_file(dest_path);
return Err(NetworkError::ChecksumMismatch {
expected: expected.to_hex(),
actual: final_hash.to_hex(),
}
.into());
}
}
Ok(())
}
/// Stream download with progress reporting and hash calculation
pub(super) async fn stream_download(
config: &super::config::PackageDownloadConfig,
response: reqwest::Response,
dest_path: &Path,
resume_offset: u64,
params: &StreamParams<'_>,
) -> Result<DownloadResult, Error> {
// Create lock file atomically to prevent concurrent downloads
// Lock guard will automatically clean up on drop (including panics/errors)
let lock_path = dest_path.with_extension("lock");
let _lock_guard = LockGuard::new(lock_path).await?;
let (mut file, mut hasher) = prepare_download(config, dest_path, resume_offset).await?;
// Initialize progress tracking
let downloaded = Arc::new(AtomicU64::new(resume_offset));
let mut last_progress_update = Instant::now();
let mut first_chunk = true;
// Stream the response
let mut stream = response.bytes_stream();
let chunk_timeout = config.chunk_timeout;
loop {
let chunk_result = tokio::time::timeout(chunk_timeout, stream.next()).await;
match chunk_result {
Ok(Some(chunk_result)) => {
let chunk =
chunk_result.map_err(|e| NetworkError::DownloadFailed(e.to_string()))?;
hasher.update(&chunk);
file.write_all(&chunk).await?;
let current_downloaded = downloaded
.fetch_add(chunk.len() as u64, Ordering::Relaxed)
+ chunk.len() as u64;
if should_report_progress(first_chunk, &last_progress_update) {
report_progress(params, current_downloaded);
last_progress_update = Instant::now();
first_chunk = false;
}
}
Ok(None) => break,
Err(_) => {
return Err(NetworkError::Timeout {
url: params.url.to_string(),
}
.into());
}
}
}
file.flush().await?;
drop(file);
let final_downloaded = downloaded.load(Ordering::Relaxed);
report_progress(params, final_downloaded);
let final_hash = Hash::from_blake3_bytes(*hasher.finalize().as_bytes());
verify_hash(&final_hash, params.expected_hash, dest_path)?;
// Lock guard automatically cleaned up on drop
Ok(DownloadResult {
hash: final_hash,
size: final_downloaded,
})
}
/// Download a simple file (for signatures)
pub(super) async fn download_file_simple(
client: &crate::client::NetClient,
url: &str,
dest_path: &Path,
_tx: &sps2_events::EventSender,
) -> Result<(), Error> {
let response = client.get(url).await?;
if !response.status().is_success() {
return Err(NetworkError::HttpError {
status: response.status().as_u16(),
message: response.status().to_string(),
}
.into());
}
let content = response
.bytes()
.await
.map_err(|e| NetworkError::DownloadFailed(e.to_string()))?;
tokio_fs::write(dest_path, content).await?;
Ok(())
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/net/src/download/validation.rs | crates/net/src/download/validation.rs | //! URL validation and HTTP response validation for downloads
use sps2_errors::{Error, NetworkError};
use url::Url;
/// Validate URL and check for supported protocols
pub(super) fn validate_url(url: &str) -> Result<String, Error> {
let parsed = Url::parse(url).map_err(|e| NetworkError::InvalidUrl(e.to_string()))?;
match parsed.scheme() {
"http" | "https" | "file" => Ok(url.to_string()),
scheme => Err(NetworkError::UnsupportedProtocol {
protocol: scheme.to_string(),
}
.into()),
}
}
/// Validate HTTP response for download
pub(super) fn validate_response(
response: &reqwest::Response,
is_resume: bool,
) -> Result<(), Error> {
let status = response.status();
if is_resume {
if status != reqwest::StatusCode::PARTIAL_CONTENT {
return Err(NetworkError::PartialContentNotSupported.into());
}
} else if !status.is_success() {
return Err(NetworkError::HttpError {
status: status.as_u16(),
message: status.to_string(),
}
.into());
}
Ok(())
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/net/src/download/core.rs | crates/net/src/download/core.rs | //! Main downloader orchestration and `PackageDownloader` implementation
use super::config::{
DownloadResult, PackageDownloadConfig, PackageDownloadRequest, PackageDownloadResult,
StreamParams,
};
use super::resume::get_resume_offset;
use super::retry::calculate_backoff_delay;
use super::stream::{download_file_simple, stream_download};
use super::validation::{validate_response, validate_url};
use crate::client::{NetClient, NetConfig};
use sps2_errors::{Error, NetworkError, SigningError};
use sps2_events::{
AppEvent, EventEmitter, EventSender, FailureContext, GeneralEvent, LifecycleEvent,
};
use sps2_hash::Hash;
use sps2_types::Version;
use std::path::Path;
use std::time::{Duration, Instant};
use tokio::fs as tokio_fs;
/// A streaming package downloader with resumable capabilities
pub struct PackageDownloader {
config: PackageDownloadConfig,
client: NetClient,
progress_manager: sps2_events::ProgressManager,
}
impl PackageDownloader {
/// Create a new package downloader
///
/// # Errors
///
/// Returns an error if the HTTP client cannot be initialized.
pub fn new(
config: PackageDownloadConfig,
progress_manager: sps2_events::ProgressManager,
) -> Result<Self, Error> {
let net_config = NetConfig {
timeout: Duration::from_secs(600), // 10 minutes for large files
connect_timeout: Duration::from_secs(30),
retry_count: config.retry_config.max_retries,
retry_delay: config.retry_config.initial_delay,
..NetConfig::default()
};
let client = NetClient::new(net_config)?;
Ok(Self {
config,
client,
progress_manager,
})
}
/// Create with default configuration
///
/// # Errors
///
/// Returns an error if the HTTP client cannot be initialized.
pub fn with_defaults(progress_manager: sps2_events::ProgressManager) -> Result<Self, Error> {
Self::new(PackageDownloadConfig::default(), progress_manager)
}
/// Download a .sp package file with concurrent signature download
///
/// # Errors
///
/// Returns an error if the download fails, hash verification fails,
/// or file I/O operations fail.
#[allow(clippy::too_many_arguments)] // Core download function requires all parameters for operation
pub async fn download_package(
&self,
package_name: &str,
version: &Version,
package_url: &str,
signature_url: Option<&str>,
dest_dir: &Path,
expected_hash: Option<&Hash>,
progress_tracker_id: String,
parent_progress_id: Option<String>,
tx: &EventSender,
) -> Result<PackageDownloadResult, Error> {
let start_time = Instant::now();
// Create destination paths
// Extract filename from URL instead of constructing it
let package_filename = package_url
.split('/')
.next_back()
.unwrap_or(&format!("{package_name}-{version}.sp"))
.to_string();
let package_path = dest_dir.join(&package_filename);
let signature_path =
signature_url.map(|_| dest_dir.join(format!("{package_filename}.minisig")));
// Ensure destination directory exists
tokio_fs::create_dir_all(dest_dir).await?;
// Download package and signature concurrently
// Create progress tracker if not provided
let tracker_id = if progress_tracker_id.is_empty() {
let config = sps2_events::patterns::DownloadProgressConfig {
operation_name: format!("Downloading {package_name}"),
total_bytes: None,
package_name: Some(package_name.to_string()),
url: package_url.to_string(),
};
self.progress_manager.create_download_tracker(&config)
} else {
progress_tracker_id
};
let package_fut = self.download_with_resume(
package_url,
&package_path,
expected_hash,
tracker_id,
parent_progress_id.clone(),
Some(package_name.to_string()),
tx.clone(),
);
let signature_fut = async {
match (signature_url, &signature_path) {
(Some(sig_url), Some(sig_path)) => {
download_file_simple(&self.client, sig_url, sig_path, tx).await
}
_ => Ok(()),
}
};
// Execute downloads concurrently
let (package_result, _signature_result) = tokio::try_join!(package_fut, signature_fut)?;
let download_time = start_time.elapsed();
// Verify signature if available
let signature_verified = if let Some(sig_path) = &signature_path {
if sig_path.exists() {
self.verify_package_signature(&package_path, sig_path)
.await?
} else {
false
}
} else {
false
};
Ok(PackageDownloadResult {
package_path,
signature_path,
hash: package_result.hash,
size: package_result.size,
download_time,
signature_verified,
})
}
/// Verify the signature of a downloaded package
///
/// # Errors
///
/// Returns an error if signature file cannot be read, trusted keys cannot be loaded,
/// or if no trusted keys are available for verification.
async fn verify_package_signature(
&self,
package_path: &Path,
signature_path: &Path,
) -> Result<bool, Error> {
// Read signature file
let sig_str = tokio::fs::read_to_string(signature_path)
.await
.map_err(|e| {
NetworkError::DownloadFailed(format!(
"Failed to read signature file {}: {e}",
signature_path.display()
))
})?;
// Load trusted keys from the standard location
let keys_dir = std::path::Path::new(sps2_config::fixed_paths::KEYS_DIR);
let keys_file = keys_dir.join("trusted_keys.json");
let mut allowed = Vec::new();
// Try to read and parse trusted keys file
match tokio::fs::read_to_string(&keys_file).await {
Ok(content) => match serde_json::from_str::<serde_json::Value>(&content) {
Ok(json) => {
if let Some(obj) = json.as_object() {
for (key_id, entry) in obj {
if let Some(pk) = entry.get("public_key").and_then(|v| v.as_str()) {
allowed.push(crate::signing::PublicKeyRef {
id: key_id.clone(),
algo: crate::signing::Algorithm::Minisign,
data: pk.to_string(),
});
}
}
}
}
Err(e) => {
return Err(NetworkError::DownloadFailed(format!(
"Failed to parse trusted keys file {}: {e}",
keys_file.display()
))
.into());
}
},
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
// No trusted keys file - this is a warning condition, not an error
// Return false to indicate signature could not be verified
return Ok(false);
}
Err(e) => {
return Err(NetworkError::DownloadFailed(format!(
"Failed to read trusted keys file {}: {e}",
keys_file.display()
))
.into());
}
}
// If no keys were found in the file, we cannot verify
if allowed.is_empty() {
return Ok(false);
}
// Perform signature verification with the loaded keys
match crate::signing::verify_minisign_file_with_keys(package_path, &sig_str, &allowed) {
Ok(_key_id) => Ok(true),
Err(Error::Signing(SigningError::NoTrustedKeyFound { .. })) => {
// Key ID from signature doesn't match any of our trusted keys
// This is not necessarily an error - return false to indicate unverified
Ok(false)
}
Err(e) => {
// Actual verification failure (signature mismatch, invalid format, etc.)
// This is a security issue - fail the download
Err(NetworkError::DownloadFailed(format!(
"Signature verification failed for {}: {e}",
package_path.display()
))
.into())
}
}
}
/// Download multiple packages concurrently
///
/// # Errors
///
/// Returns an error if any download fails. Successful downloads are preserved.
///
/// # Panics
///
/// Panics if the semaphore is closed (which should not happen in normal operation).
pub async fn download_packages_batch(
&self,
packages: Vec<PackageDownloadRequest>,
dest_dir: &Path,
batch_progress_id: Option<String>,
tx: &EventSender,
) -> Result<Vec<PackageDownloadResult>, Error> {
use futures::stream::{FuturesUnordered, StreamExt};
let mut futures = FuturesUnordered::new();
let total_packages = packages.len();
for request in packages {
let downloader = self.clone();
let dest_dir = dest_dir.to_path_buf();
let tx = tx.clone();
let batch_progress_id_clone = batch_progress_id.clone();
let fut = async move {
// Acquire permit with proper RAII - will be released when dropped
let _permit = downloader
.config
.resources
.acquire_download_permit()
.await?;
// Create individual progress tracker for this download
let child_config = sps2_events::patterns::DownloadProgressConfig {
operation_name: format!("Downloading {}", request.name),
total_bytes: None, // Will be determined during download
package_name: Some(request.name.clone()),
url: request.package_url.clone(),
};
let child_id = downloader
.progress_manager
.create_download_tracker(&child_config);
// Register as child of batch operation if we have a parent
if let Some(ref parent_id) = batch_progress_id_clone {
#[allow(clippy::cast_precision_loss)]
// Acceptable precision loss for progress weights
let weight = 1.0 / total_packages as f64; // Equal weight for each package
downloader.progress_manager.register_child_tracker(
parent_id,
&child_id,
format!("Downloading {}", request.name),
weight,
&tx,
);
}
// Perform the download - permit will be held throughout
let result = downloader
.download_package(
&request.name,
&request.version,
&request.package_url,
request.signature_url.as_deref(),
&dest_dir,
request.expected_hash.as_ref(),
child_id.clone(), // Individual progress tracker ID
batch_progress_id_clone.clone(), // Parent progress ID
&tx,
)
.await;
// Complete child tracker regardless of success/failure
if let Some(ref parent_id) = batch_progress_id_clone {
let success = result.is_ok();
downloader
.progress_manager
.complete_child_tracker(parent_id, &child_id, success, &tx);
}
// Permit is automatically released here when _permit is dropped
result
};
futures.push(fut);
}
let mut results = Vec::new();
while let Some(result) = futures.next().await {
results.push(result?);
}
Ok(results)
}
/// Download a file with resumable capability
///
/// # Errors
///
/// Returns an error if the download fails, network issues occur,
/// hash verification fails, or file I/O operations fail.
#[allow(clippy::too_many_arguments)]
pub async fn download_with_resume(
&self,
url: &str,
dest_path: &Path,
expected_hash: Option<&Hash>,
progress_tracker_id: String,
parent_progress_id: Option<String>,
package: Option<String>,
tx: EventSender,
) -> Result<DownloadResult, Error> {
let url = validate_url(url)?;
let mut retry_count = 0;
#[allow(unused_assignments)] // Used after retry loop for error reporting
let mut last_error: Option<Error> = None;
loop {
match self
.try_download_with_resume(
&url,
dest_path,
expected_hash,
progress_tracker_id.clone(),
parent_progress_id.clone(),
package.as_deref(),
&tx,
)
.await
{
Ok(result) => return Ok(result),
Err(e) => {
last_error = Some(e);
retry_count += 1;
if retry_count > self.config.retry_config.max_retries {
break;
}
// Calculate backoff delay with jitter
let delay = calculate_backoff_delay(&self.config.retry_config, retry_count);
// Emit retry event with progress preservation
{
// Get current progress from partial download
let accumulated_bytes =
if let Ok(metadata) = tokio::fs::metadata(dest_path).await {
metadata.len()
} else {
0
};
tx.emit(AppEvent::Progress(sps2_events::ProgressEvent::Paused {
id: progress_tracker_id.clone(),
reason: format!(
"Retry attempt {}/{}",
retry_count, self.config.retry_config.max_retries
),
items_completed: accumulated_bytes,
}));
}
tx.emit(AppEvent::General(GeneralEvent::debug(format!(
"Download failed, retrying in {delay:?} (attempt {retry_count}/{})...",
self.config.retry_config.max_retries
))));
tokio::time::sleep(delay).await;
// Resume progress tracking
tx.emit(AppEvent::Progress(sps2_events::ProgressEvent::Resumed {
id: progress_tracker_id.clone(),
pause_duration: delay,
}));
}
}
}
let final_error = last_error.unwrap_or_else(|| {
NetworkError::DownloadFailed("Maximum retries exceeded".to_string()).into()
});
let failure = FailureContext::from_error(&final_error);
tx.emit(AppEvent::Lifecycle(LifecycleEvent::download_failed(
url.to_string(),
package.clone(),
failure,
)));
Err(final_error)
}
/// Attempt a single download with resume capability
#[allow(clippy::too_many_arguments)]
async fn try_download_with_resume(
&self,
url: &str,
dest_path: &Path,
expected_hash: Option<&Hash>,
progress_tracker_id: String,
parent_progress_id: Option<String>,
package: Option<&str>,
tx: &EventSender,
) -> Result<DownloadResult, Error> {
// Check if partial file exists and validate its integrity
// If validation fails, get_resume_offset will return 0 automatically
let resume_offset = get_resume_offset(&self.config, dest_path).await?;
// Prepare request with range header if resuming
let mut headers = Vec::new();
if resume_offset > 0 {
headers.push(("Range", format!("bytes={resume_offset}-")));
}
// Make HTTP request
let response = if headers.is_empty() {
self.client.get(url).await?
} else {
self.client
.get_with_headers(
url,
&headers
.iter()
.map(|(k, v)| (*k, v.as_str()))
.collect::<Vec<_>>(),
)
.await?
};
// Validate response
validate_response(&response, resume_offset > 0)?;
// Get total size information
let content_length = response.content_length().unwrap_or(0);
let total_size =
if resume_offset > 0 && response.status() == reqwest::StatusCode::PARTIAL_CONTENT {
// For partial content, content-length is the remaining bytes
resume_offset + content_length
} else {
content_length
};
// Validate file size limits
if total_size > self.config.max_file_size {
return Err(NetworkError::FileSizeExceeded {
size: total_size,
limit: self.config.max_file_size,
}
.into());
}
tx.emit(AppEvent::Lifecycle(LifecycleEvent::download_started(
url.to_string(),
package.map(str::to_string),
Some(total_size),
)));
// Download with streaming and progress
let params = StreamParams {
total_size,
expected_hash,
event_sender: tx,
url,
progress_tracker_id,
parent_progress_id,
progress_manager: Some(&self.progress_manager),
};
let result =
stream_download(&self.config, response, dest_path, resume_offset, ¶ms).await?;
tx.emit(AppEvent::Lifecycle(LifecycleEvent::download_completed(
url.to_string(),
package.map(str::to_string),
result.size,
)));
Ok(result)
}
}
impl Clone for PackageDownloader {
fn clone(&self) -> Self {
Self {
config: self.config.clone(),
client: self.client.clone(),
progress_manager: self.progress_manager.clone(),
}
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/net/src/download/mod.rs | crates/net/src/download/mod.rs | //! Production-ready streaming download infrastructure for .sp files
//!
//! This module provides high-performance, resumable downloads with concurrent
//! signature verification and comprehensive error handling.
mod config;
mod core;
mod resume;
mod retry;
mod stream;
mod validation;
// Re-export public types and structs
pub use config::{
DownloadResult, PackageDownloadConfig, PackageDownloadRequest, PackageDownloadResult,
RetryConfig,
};
pub use core::PackageDownloader;
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/net/src/download/resume.rs | crates/net/src/download/resume.rs | //! Resumable download logic for package downloads
use super::config::PackageDownloadConfig;
use sps2_errors::Error;
use std::path::Path;
use tokio::fs as tokio_fs;
use tokio::io::AsyncReadExt;
/// Get the offset for resuming a download
///
/// This function checks if a partial download exists, is large enough to resume,
/// and validates its integrity before allowing resumption.
///
/// # Errors
///
/// Returns an error if file operations fail.
pub(super) async fn get_resume_offset(
config: &PackageDownloadConfig,
dest_path: &Path,
) -> Result<u64, Error> {
match tokio_fs::metadata(dest_path).await {
Ok(metadata) => {
let size = metadata.len();
if size >= config.min_chunk_size {
// Validate the integrity of the partial file by hashing what we have
// If this fails, the partial file is likely corrupted
match calculate_existing_file_hash(config, dest_path, size).await {
Ok(_hasher) => {
// Validation successful, can resume from this offset
Ok(size)
}
Err(_e) => {
// Partial file is corrupted, delete and start over
let _ = tokio_fs::remove_file(dest_path).await;
Ok(0)
}
}
} else {
// File is too small to resume, start over
let _ = tokio_fs::remove_file(dest_path).await;
Ok(0)
}
}
Err(_) => Ok(0), // File doesn't exist
}
}
/// Calculate hash of existing file content for resume
pub(super) async fn calculate_existing_file_hash(
config: &PackageDownloadConfig,
dest_path: &Path,
bytes: u64,
) -> Result<blake3::Hasher, Error> {
let mut file = tokio_fs::File::open(dest_path).await?;
let mut hasher = blake3::Hasher::new();
let mut buffer = vec![0; config.buffer_size];
let mut remaining = bytes;
while remaining > 0 {
let to_read =
usize::try_from(std::cmp::min(buffer.len() as u64, remaining)).unwrap_or(buffer.len());
let bytes_read = file.read(&mut buffer[..to_read]).await?;
if bytes_read == 0 {
break;
}
hasher.update(&buffer[..bytes_read]);
remaining -= bytes_read as u64;
}
Ok(hasher)
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/net/src/download/retry.rs | crates/net/src/download/retry.rs | //! Retry logic and backoff calculations for downloads
use super::config::RetryConfig;
use std::time::Duration;
/// Calculate exponential backoff delay with jitter
///
/// This function implements exponential backoff with proper overflow protection
/// and jitter to prevent thundering herd problems.
pub(super) fn calculate_backoff_delay(retry_config: &RetryConfig, attempt: u32) -> Duration {
// Cap attempt at a reasonable value to prevent overflow (2^30 is already huge)
let attempt = attempt.saturating_sub(1).min(30);
// Calculate exponential backoff: base_delay * multiplier^attempt
// Precision loss is acceptable for backoff calculations
#[allow(clippy::cast_precision_loss)]
let base_ms = retry_config.initial_delay.as_millis() as f64;
#[allow(clippy::cast_precision_loss)]
let max_ms = retry_config.max_delay.as_millis() as f64;
let multiplier = retry_config.backoff_multiplier;
// Use floating point for exponential calculation, clamped to max_delay
// Cast is safe: attempt is capped at 30, which fits in i32
#[allow(clippy::cast_possible_wrap)]
let delay_ms = (base_ms * multiplier.powi(attempt as i32))
.min(max_ms)
.max(0.0);
// Add jitter: random value in range [-jitter_factor/2, +jitter_factor/2]
// This prevents thundering herd when multiple clients retry simultaneously
let jitter_factor = retry_config.jitter_factor.clamp(0.0, 1.0);
let jitter_ms = delay_ms * jitter_factor * (rand::random::<f64>() - 0.5);
let final_delay_ms = (delay_ms + jitter_ms).max(0.0);
// Convert to Duration, clamping at u64::MAX milliseconds
#[allow(clippy::cast_precision_loss)]
let final_delay_ms = if final_delay_ms > u64::MAX as f64 {
u64::MAX
} else {
// Safe: value is positive (max'd with 0) and already range-checked
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
{
final_delay_ms as u64
}
};
Duration::from_millis(final_delay_ms)
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/repository/src/lib.rs | crates/repository/src/lib.rs | #![warn(mismatched_lifetime_syntaxes)]
#![deny(clippy::pedantic, unsafe_code)]
#![allow(clippy::module_name_repetitions)]
use base64::Engine as _;
use chrono::Utc;
use regex::Regex;
use sps2_errors::{Error, StorageError};
use sps2_hash::Hash;
use sps2_index::{DependencyInfo, Index, VersionEntry};
use std::path::{Path, PathBuf};
use tokio::fs;
#[derive(Debug, Clone)]
pub struct PackageArtifact {
pub name: String,
pub version: String,
pub revision: u32,
pub arch: String,
pub blake3: String,
pub filename: String,
}
#[async_trait::async_trait]
pub trait ObjectStore: Send + Sync {
async fn put_object(&self, key: &str, bytes: &[u8]) -> Result<(), Error>;
async fn get_object(&self, key: &str) -> Result<Vec<u8>, Error>;
async fn list_prefix(&self, prefix: &str) -> Result<Vec<String>, Error>;
}
/// Local filesystem-backed object store for development
#[derive(Debug, Clone)]
pub struct LocalStore {
base: PathBuf,
}
impl LocalStore {
#[must_use]
pub fn new<P: Into<PathBuf>>(base: P) -> Self {
Self { base: base.into() }
}
fn path_for(&self, key: &str) -> PathBuf {
self.base.join(key)
}
}
#[async_trait::async_trait]
impl ObjectStore for LocalStore {
async fn put_object(&self, key: &str, bytes: &[u8]) -> Result<(), Error> {
let path = self.path_for(key);
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).await?;
}
fs::write(&path, bytes).await?;
Ok(())
}
async fn get_object(&self, key: &str) -> Result<Vec<u8>, Error> {
let path = self.path_for(key);
Ok(fs::read(&path).await?)
}
async fn list_prefix(&self, prefix: &str) -> Result<Vec<String>, Error> {
let mut results = Vec::new();
let dir = self.base.join(prefix);
let dir = if dir.is_dir() { dir } else { self.base.clone() };
let mut rd = fs::read_dir(&dir)
.await
.map_err(|e| StorageError::IoError {
message: e.to_string(),
})?;
while let Some(entry) = rd.next_entry().await? {
let path = entry.path();
if path.is_file() {
if let Some(name) = path.file_name().and_then(|s| s.to_str()) {
results.push(name.to_string());
}
}
}
Ok(results)
}
}
/// Publisher builds and signs an index from objects in a store
#[derive(Debug, Clone)]
pub struct Publisher<S: ObjectStore> {
pub store: S,
pub base_url: String,
}
impl<S: ObjectStore> Publisher<S> {
#[must_use]
pub fn new(store: S, base_url: String) -> Self {
Self { store, base_url }
}
/// Scan a directory for `.sp` files and return artifacts.
///
/// # Errors
///
/// Returns an error if directory entries cannot be read, or if hashing any
/// matched package file fails.
pub async fn scan_packages_local_dir(&self, dir: &Path) -> Result<Vec<PackageArtifact>, Error> {
let mut artifacts = Vec::new();
let mut rd = fs::read_dir(dir).await?;
let re = Regex::new(r"^(.+?)-([^-]+)-(\d+)\.([^.]+)\.sp$")
.map_err(|e| Error::internal(e.to_string()))?;
while let Some(entry) = rd.next_entry().await? {
let path = entry.path();
if !path.is_file() {
continue;
}
if path.extension().and_then(|s| s.to_str()) != Some("sp") {
continue;
}
let filename = path
.file_name()
.and_then(|s| s.to_str())
.ok_or_else(|| Error::internal("invalid filename"))?
.to_string();
if let Some(caps) = re.captures(&filename) {
// Be defensive and skip if any capture group is missing
let Some(g1) = caps.get(1) else { continue };
let Some(g2) = caps.get(2) else { continue };
let Some(g3) = caps.get(3) else { continue };
let Some(g4) = caps.get(4) else { continue };
let name = g1.as_str().to_string();
let version = g2.as_str().to_string();
let revision: u32 = g3.as_str().parse().unwrap_or(1);
let arch = g4.as_str().to_string();
// Compute BLAKE3 hash
let hash = Hash::blake3_hash_file(&path).await?.to_hex();
artifacts.push(PackageArtifact {
name,
version,
revision,
arch,
blake3: hash,
filename,
});
}
}
Ok(artifacts)
}
/// Build an Index from artifacts
#[must_use]
pub fn build_index(&self, artifacts: &[PackageArtifact]) -> Index {
let mut index = Index::new();
for a in artifacts {
let entry = VersionEntry {
revision: a.revision,
arch: a.arch.clone(),
blake3: a.blake3.clone(),
download_url: format!("{}/{}", self.base_url.trim_end_matches('/'), a.filename),
minisig_url: format!(
"{}/{}.minisig",
self.base_url.trim_end_matches('/'),
a.filename
),
dependencies: DependencyInfo::default(),
sbom: None,
description: None,
homepage: None,
license: None,
};
index.add_version(a.name.clone(), a.version.clone(), entry);
}
index
}
/// Serialize and sign index, then publish `index.json` and `index.json.minisig` to store.
///
/// # Errors
///
/// Returns an error if index serialization fails, minisign signing fails,
/// or writing to the object store fails.
pub async fn publish_index(
&self,
index: &Index,
secret_key_path: &Path,
passphrase_or_keychain: Option<&str>,
) -> Result<(), Error> {
let json = index.to_json()?;
let sig = sps2_net::signing::minisign_sign_bytes(
json.as_bytes(),
secret_key_path,
passphrase_or_keychain,
Some("sps2 repository index"),
Some("index.json"),
)?;
self.store.put_object("index.json", json.as_bytes()).await?;
self.store
.put_object("index.json.minisig", sig.as_bytes())
.await?;
Ok(())
}
}
/// Keys.json model and helpers
pub mod keys {
use super::*;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TrustedKey {
pub key_id: String,
pub public_key: String, // base64
pub comment: Option<String>,
pub trusted_since: i64,
pub expires_at: Option<i64>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct KeyRotation {
pub previous_key_id: String,
pub new_key: TrustedKey,
pub rotation_signature: String,
pub timestamp: i64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RepositoryKeys {
pub keys: Vec<TrustedKey>,
pub rotations: Vec<KeyRotation>,
#[serde(default)]
pub max_signature_age: Option<u64>,
}
/// Derive minisign `key_id` from public key base64 (bytes[2..10]).
///
/// # Errors
///
/// Returns an error if the base64 payload cannot be decoded or is too short.
pub fn key_id_from_public_base64(b64: &str) -> Result<String, Error> {
let decoded = base64::engine::general_purpose::STANDARD
.decode(b64)
.map_err(|e| Error::internal(format!("invalid minisign public key: {e}")))?;
if decoded.len() < 10 {
return Err(Error::internal("minisign public key too short"));
}
Ok(hex::encode(&decoded[2..10]))
}
/// Extract base64 from a minisign public key box or return input if it's already base64
#[must_use]
pub fn extract_base64(pk_input: &str) -> String {
let trimmed = pk_input.trim();
if trimmed.lines().count() <= 1 && !trimmed.contains(' ') {
return trimmed.to_string();
}
// Parse box: skip first line, take next non-empty line
let mut lines = trimmed.lines();
let _ = lines.next();
for line in lines {
let l = line.trim();
if !l.is_empty() {
return l.to_string();
}
}
trimmed.to_string()
}
/// Write `keys.json` to the repository directory.
///
/// # Errors
///
/// Returns an error if serialization or writing to disk fails.
pub async fn write_keys_json(dir: &Path, repo_keys: &RepositoryKeys) -> Result<(), Error> {
let content = serde_json::to_string_pretty(repo_keys)
.map_err(|e| Error::internal(format!("serialize keys.json: {e}")))?;
let path = dir.join("keys.json");
fs::write(path, content).await?;
Ok(())
}
/// Create a `RepositoryKeys` with a single trusted key and no rotations.
///
/// # Errors
///
/// Returns an error if deriving the minisign key id from the provided
/// base64 public key fails.
pub fn make_single_key(
pk_base64: String,
comment: Option<String>,
) -> Result<RepositoryKeys, Error> {
let key_id = key_id_from_public_base64(&pk_base64)?;
let trusted = TrustedKey {
key_id,
public_key: pk_base64,
comment,
trusted_since: Utc::now().timestamp(),
expires_at: None,
};
Ok(RepositoryKeys {
keys: vec![trusted],
rotations: Vec::new(),
max_signature_age: None,
})
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/resolver/src/lib.rs | crates/resolver/src/lib.rs | #![warn(mismatched_lifetime_syntaxes)]
#![deny(clippy::pedantic, unsafe_code)]
#![allow(clippy::module_name_repetitions)]
//! Dependency resolution for sps2
//!
//! This crate provides deterministic, parallel dependency resolution
//! for both installation and building operations. It implements a
//! topological sort with concurrent execution.
mod execution;
mod graph;
mod resolver;
mod sat;
pub use execution::ExecutionPlan;
pub use graph::{DepEdge, DepKind, DependencyGraph, NodeAction, PackageId, ResolvedNode};
pub use resolver::Resolver;
pub use sat::{solve_dependencies, DependencyProblem, DependencySolution};
use sps2_types::package::PackageSpec;
use sps2_types::Version;
use std::collections::HashMap;
use std::path::PathBuf;
/// Simple representation of an installed package
#[derive(Clone, Debug)]
pub struct InstalledPackage {
/// Package name
pub name: String,
/// Package version
pub version: Version,
}
impl InstalledPackage {
/// Create new installed package
#[must_use]
pub fn new(name: String, version: Version) -> Self {
Self { name, version }
}
}
/// Resolution context for packages
#[derive(Clone, Debug)]
pub struct ResolutionContext {
/// Runtime dependencies to resolve
pub runtime_deps: Vec<PackageSpec>,
/// Build dependencies to resolve (only for build operations)
pub build_deps: Vec<PackageSpec>,
/// Local package files to include
pub local_files: Vec<PathBuf>,
/// Already installed packages that can satisfy dependencies
pub installed_packages: Vec<InstalledPackage>,
}
impl ResolutionContext {
/// Create new resolution context
#[must_use]
pub fn new() -> Self {
Self {
runtime_deps: Vec::new(),
build_deps: Vec::new(),
local_files: Vec::new(),
installed_packages: Vec::new(),
}
}
/// Add runtime dependency
#[must_use]
pub fn add_runtime_dep(mut self, spec: PackageSpec) -> Self {
self.runtime_deps.push(spec);
self
}
/// Add build dependency
#[must_use]
pub fn add_build_dep(mut self, spec: PackageSpec) -> Self {
self.build_deps.push(spec);
self
}
/// Add local package file
#[must_use]
pub fn add_local_file(mut self, path: PathBuf) -> Self {
self.local_files.push(path);
self
}
/// Add installed packages
#[must_use]
pub fn with_installed_packages(mut self, packages: Vec<InstalledPackage>) -> Self {
self.installed_packages = packages;
self
}
}
impl Default for ResolutionContext {
fn default() -> Self {
Self::new()
}
}
/// Result of dependency resolution
#[derive(Clone, Debug)]
pub struct ResolutionResult {
/// Resolved dependency graph
pub nodes: HashMap<PackageId, ResolvedNode>,
/// Execution plan with topological order
pub execution_plan: ExecutionPlan,
}
impl ResolutionResult {
/// Get all packages in topological order
#[must_use]
pub fn packages_in_order(&self) -> Vec<&ResolvedNode> {
self.execution_plan
.batches()
.iter()
.flatten()
.filter_map(|id| self.nodes.get(id))
.collect()
}
/// Get packages by dependency kind
#[must_use]
pub fn packages_by_kind(&self, kind: &DepKind) -> Vec<&ResolvedNode> {
self.nodes
.values()
.filter(|node| node.deps.iter().any(|edge| &edge.kind == kind))
.collect()
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
alexykn/sps2 | https://github.com/alexykn/sps2/blob/a357a9ae7317314ef1605ce29b66f064bd6eb510/crates/resolver/src/resolver.rs | crates/resolver/src/resolver.rs | //! Main dependency resolver implementation
use crate::graph::DependencyGraph;
use crate::sat::{Clause, DependencyProblem, Literal, PackageVersion};
use crate::{
DepEdge, DepKind, ExecutionPlan, PackageId, ResolutionContext, ResolutionResult, ResolvedNode,
};
use semver::Version;
use sps2_errors::{Error, PackageError};
use sps2_events::{EventEmitter, EventSender};
use sps2_index::{IndexManager, VersionEntry};
use sps2_platform::{PlatformContext, PlatformManager};
use sps2_types::package::PackageSpec;
use sps2_types::version::VersionConstraint;
use sps2_types::Manifest;
use std::collections::{HashMap, HashSet};
use std::path::Path;
/// Type alias for version entries map to reduce complexity
type VersionEntriesMap<'a> = HashMap<(String, Version), (&'a VersionEntry, DepKind)>;
/// Parameters for processing a single transitive dependency
struct TransitiveDependencyParams<'a> {
parent_name: &'a str,
parent_version: &'a Version,
dep_spec: &'a PackageSpec,
dep_kind: DepKind,
}
/// Dependency resolver
#[derive(Clone, Debug)]
pub struct Resolver {
/// Package index manager
index: IndexManager,
/// Event sender for progress and status updates
event_sender: Option<EventSender>,
}
impl Resolver {
/// Create new resolver with index manager
#[must_use]
pub fn new(index: IndexManager) -> Self {
Self {
index,
event_sender: None,
}
}
/// Create new resolver with index manager and event sender
#[must_use]
pub fn with_events(index: IndexManager, event_sender: EventSender) -> Self {
Self {
index,
event_sender: Some(event_sender),
}
}
/// Resolve dependencies using SAT solver for more accurate resolution
///
/// This method converts the dependency problem to a SAT problem and uses
/// a DPLL-based solver with conflict-driven clause learning to find
/// an optimal solution.
///
/// # Errors
///
/// Returns an error if:
/// - A package is not found in the index
/// - No valid solution exists (conflicting constraints)
/// - Version parsing fails
pub async fn resolve_with_sat(
&self,
context: ResolutionContext,
) -> Result<ResolutionResult, Error> {
use tokio::time::{timeout, Duration};
let resolution_timeout = Duration::from_secs(120);
timeout(resolution_timeout, async {
let mut graph = DependencyGraph::new();
let mut already_satisfied = HashSet::new();
// First, check installed packages for each dependency
let mut remaining_package_deps: HashMap<String, Vec<(PackageSpec, DepKind)>> =
HashMap::new();
// Check runtime dependencies against installed packages
for spec in &context.runtime_deps {
if let Some(installed) = context
.installed_packages
.iter()
.find(|pkg| pkg.name == spec.name && spec.version_spec.matches(&pkg.version))
{
// Package is already installed and satisfies spec
let package_id =
PackageId::new(installed.name.clone(), installed.version.clone());
already_satisfied.insert(package_id.clone());
let node = ResolvedNode::local(
installed.name.clone(),
installed.version.clone(),
std::path::PathBuf::new(), // Empty path for installed packages
Vec::new(), // No dependencies to resolve for already installed packages
);
graph.add_node(node);
} else {
// Need to resolve from repository
remaining_package_deps
.entry(spec.name.clone())
.or_default()
.push((spec.clone(), DepKind::Runtime));
}
}
// Check build dependencies against installed packages
for spec in &context.build_deps {
if let Some(installed) = context
.installed_packages
.iter()
.find(|pkg| pkg.name == spec.name && spec.version_spec.matches(&pkg.version))
{
// Package is already installed and satisfies spec
let package_id =
PackageId::new(installed.name.clone(), installed.version.clone());
if !already_satisfied.contains(&package_id) {
already_satisfied.insert(package_id);
let node = ResolvedNode::local(
installed.name.clone(),
installed.version.clone(),
std::path::PathBuf::new(), // Empty path for installed packages
Vec::new(), // No dependencies to resolve for already installed packages
);
graph.add_node(node);
}
} else {
// Need to resolve from repository
remaining_package_deps
.entry(spec.name.clone())
.or_default()
.push((spec.clone(), DepKind::Build));
}
}
// If we have remaining dependencies to resolve, use SAT solver
if !remaining_package_deps.is_empty() {
// Create SAT problem for remaining dependencies
let (mut problem, package_deps) =
Self::create_sat_problem_from_deps(&remaining_package_deps);
// Add available versions and constraints
let mut version_entries =
self.add_package_versions_to_problem(&mut problem, &package_deps);
// Process transitive dependencies
self.process_transitive_dependencies(&mut problem, &mut version_entries);
// Solve and convert to dependency graph
let solution = crate::sat::solve_dependencies(problem, self.event_sender()).await?;
let sat_graph =
Self::create_dependency_graph_from_solution(&solution, &version_entries)?;
// Merge SAT results into main graph
for (id, node) in sat_graph.nodes {
graph.nodes.insert(id.clone(), node);
}
for (from, tos) in sat_graph.edges {
graph.edges.insert(from, tos);
}
}
// Handle local files
for path in &context.local_files {
Self::resolve_local_file(path, &mut graph).await?;
}
// Create execution plan
let sorted = graph.topological_sort()?;
let execution_plan = ExecutionPlan::from_sorted_packages(&sorted, &graph);
Ok(ResolutionResult {
nodes: graph.nodes,
execution_plan,
})
})
.await
.map_err(|_| PackageError::ResolutionTimeout {
message: "SAT-based dependency resolution timed out after 2 minutes".to_string(),
})?
}
/// Create SAT problem from already-collected dependencies
fn create_sat_problem_from_deps(
package_deps: &HashMap<String, Vec<(PackageSpec, DepKind)>>,
) -> (
DependencyProblem,
HashMap<String, Vec<(PackageSpec, DepKind)>>,
) {
let mut problem = DependencyProblem::new();
// Clone the package_deps for return
let deps_clone = package_deps.clone();
// Add required packages to the problem
for name in package_deps.keys() {
problem.require_package(name.clone());
}
(problem, deps_clone)
}
/// Add available package versions to the SAT problem
fn add_package_versions_to_problem(
&self,
problem: &mut DependencyProblem,
package_deps: &HashMap<String, Vec<(PackageSpec, DepKind)>>,
) -> VersionEntriesMap<'_> {
let mut version_entries: HashMap<(String, Version), (&VersionEntry, DepKind)> =
HashMap::new();
for (package_name, specs) in package_deps {
if let Some(index) = self.index.index() {
if let Some(package_info) = index.packages.get(package_name) {
for (version_str, version_entry) in &package_info.versions {
if let Ok(version) = Version::parse(version_str) {
// Check if this version satisfies any of the specs
let mut satisfies_any = false;
let mut dep_kind = DepKind::Runtime;
for (spec, kind) in specs {
if spec.version_spec.matches(&version) {
satisfies_any = true;
dep_kind = *kind;
break;
}
}
if satisfies_any {
let pv = PackageVersion::new(package_name.clone(), version.clone());
problem.add_package_version(pv);
version_entries.insert(
(package_name.clone(), version),
(version_entry, dep_kind),
);
}
}
}
}
}
// Add constraints for each package specification
// At most one version can be selected
problem.add_at_most_one_constraint(package_name);
// At least one version must be selected (for required packages)
problem.add_at_least_one_constraint(package_name);
// Add version constraints as clauses
for (spec, _kind) in specs {
Self::add_version_constraints(problem, spec);
}
}
version_entries
}
/// Process transitive dependencies
fn process_transitive_dependencies<'a>(
&'a self,
problem: &mut DependencyProblem,
version_entries: &mut VersionEntriesMap<'a>,
) {
let mut processed = HashSet::new();
let mut to_process: Vec<(String, Version, DepKind)> = Vec::new();
// Initialize with direct dependencies
for ((name, version), (_entry, kind)) in &*version_entries {
to_process.push((name.clone(), version.clone(), *kind));
}
while let Some((pkg_name, pkg_version, parent_kind)) = to_process.pop() {
let key = (pkg_name.clone(), pkg_version.clone());
if processed.contains(&key) {
continue;
}
processed.insert(key.clone());
// Clone the dependencies we need to process
let deps_to_process = if let Some((version_entry, _)) = version_entries.get(&key) {
let mut deps = Vec::new();
// Collect runtime dependencies
for dep_str in &version_entry.dependencies.runtime {
if let Ok(dep_spec) = PackageSpec::parse(dep_str) {
deps.push((dep_spec, DepKind::Runtime));
}
}
// Collect build dependencies if this is a build dependency
if parent_kind == DepKind::Build {
for dep_str in &version_entry.dependencies.build {
if let Ok(dep_spec) = PackageSpec::parse(dep_str) {
deps.push((dep_spec, DepKind::Build));
}
}
}
deps
} else {
Vec::new()
};
// Now process the dependencies - process each one separately to avoid borrow issues
for (dep_spec, dep_kind) in deps_to_process {
let params = TransitiveDependencyParams {
parent_name: &pkg_name,
parent_version: &pkg_version,
dep_spec: &dep_spec,
dep_kind,
};
self.process_single_transitive_dependency(
problem,
&mut to_process,
version_entries,
¶ms,
);
}
}
}
/// Create dependency graph from SAT solution
fn create_dependency_graph_from_solution(
solution: &crate::sat::DependencySolution,
version_entries: &VersionEntriesMap<'_>,
) -> Result<DependencyGraph, Error> {
let mut graph = DependencyGraph::new();
let mut resolved_nodes = HashMap::new();
// Create nodes for selected packages
for (name, version) in &solution.selected {
let key = (name.clone(), version.clone());
if let Some((version_entry, _kind)) = version_entries.get(&key) {
let package_id = PackageId::new(name.clone(), version.clone());
// Create dependency edges
let mut deps = Vec::new();
for dep_str in &version_entry.dependencies.runtime {
if let Ok(dep_spec) = PackageSpec::parse(dep_str) {
deps.push(DepEdge::new(
dep_spec.name.clone(),
dep_spec.version_spec,
DepKind::Runtime,
));
}
}
let mut node = ResolvedNode::download(
name.clone(),
version.clone(),
Self::resolve_download_url(&version_entry.download_url)?,
deps,
);
// Propagate signature URL and expected hash from index
node.signature_url = Some(version_entry.minisig_url.clone());
if let Ok(hash) = sps2_hash::Hash::from_hex(&version_entry.blake3) {
node.expected_hash = Some(hash);
}
resolved_nodes.insert(package_id.clone(), node.clone());
graph.add_node(node);
}
}
// Add edges to graph
for (package_id, node) in &resolved_nodes {
for edge in &node.deps {
// Find the resolved version of the dependency
if let Some(dep_version) = solution.selected.get(&edge.name) {
let dep_id = PackageId::new(edge.name.clone(), dep_version.clone());
graph.add_edge(&dep_id, package_id);
}
}
}
Ok(graph)
}
/// Add version constraints to SAT problem
fn add_version_constraints(problem: &mut DependencyProblem, spec: &PackageSpec) {
// Clone the versions to avoid borrow issues
let versions = problem
.get_package_versions(&spec.name)
.into_iter()
.cloned()
.collect::<Vec<_>>();
for constraint in spec.version_spec.constraints() {
match constraint {
VersionConstraint::Exact(v) => {
// Only the exact version can be true
for pv in &versions {
if &pv.version != v {
if let Some(var) = problem.variables.get_variable(pv) {
problem.add_clause(Clause::unit(Literal::negative(var)));
}
}
}
}
VersionConstraint::NotEqual(v) => {
// The specified version must be false
for pv in &versions {
if &pv.version == v {
if let Some(var) = problem.variables.get_variable(pv) {
problem.add_clause(Clause::unit(Literal::negative(var)));
}
}
}
}
_ => {
// For range constraints, we rely on version filtering during problem setup
}
}
}
}
/// Process a single transitive dependency
fn process_single_transitive_dependency<'a>(
&'a self,
problem: &mut DependencyProblem,
to_process: &mut Vec<(String, Version, DepKind)>,
version_entries: &mut VersionEntriesMap<'a>,
params: &TransitiveDependencyParams<'_>,
) {
let parent_pv = PackageVersion::new(
params.parent_name.to_string(),
params.parent_version.clone(),
);
if let Some(index) = self.index.index() {
if let Some(package_info) = index.packages.get(¶ms.dep_spec.name) {
let mut valid_versions = Vec::new();
for (version_str, version_entry) in &package_info.versions {
if let Ok(version) = Version::parse(version_str) {
if params.dep_spec.version_spec.matches(&version) {
let dep_pv =
PackageVersion::new(params.dep_spec.name.clone(), version.clone());
let dep_var = problem.add_package_version(dep_pv);
valid_versions.push(dep_var);
// Add to version entries
version_entries.insert(
(params.dep_spec.name.clone(), version.clone()),
(version_entry, params.dep_kind),
);
// Add to processing queue
to_process.push((
params.dep_spec.name.clone(),
version,
params.dep_kind,
));
}
}
}
if !valid_versions.is_empty() {
// Add implication: parent => (dep1 OR dep2 OR ...)
// Which is equivalent to: !parent OR dep1 OR dep2 OR ...
if let Some(parent_var) = problem.variables.get_variable(&parent_pv) {
let mut clause_lits = vec![Literal::negative(parent_var)];
clause_lits.extend(valid_versions.into_iter().map(Literal::positive));
problem.add_clause(Clause::new(clause_lits));
}
// Ensure at most one version of the dependency
problem.add_at_most_one_constraint(¶ms.dep_spec.name);
}
}
}
}
/// Resolve a local package file
async fn resolve_local_file(path: &Path, graph: &mut DependencyGraph) -> Result<(), Error> {
// Load manifest from local .sp file
let manifest = Self::load_local_manifest(path).await?;
let version = Version::parse(&manifest.package.version)?;
let _package_id = PackageId::new(manifest.package.name.clone(), version.clone());
// Create dependency edges from manifest
let mut deps = Vec::new();
for dep in &manifest.dependencies.runtime {
// Parse dependency spec
let dep_spec = PackageSpec::parse(dep)?;
let edge = DepEdge::new(
dep_spec.name.clone(),
dep_spec.version_spec,
DepKind::Runtime,
);
deps.push(edge);
}
// Create resolved node for local file
let node = ResolvedNode::local(manifest.package.name, version, path.to_path_buf(), deps);
graph.add_node(node);
Ok(())
}
/// Load manifest from local .sp file
async fn load_local_manifest(path: &Path) -> Result<Manifest, Error> {
use tokio::fs;
// Create temporary directory for extraction
let temp_dir =
std::env::temp_dir().join(format!("sps2_manifest_{}", uuid::Uuid::new_v4().simple()));
fs::create_dir_all(&temp_dir).await?;
// Ensure cleanup on error
let _cleanup_guard = scopeguard::guard(&temp_dir, |temp_dir| {
if temp_dir.exists() {
let _ = std::fs::remove_dir_all(temp_dir);
}
});
// Step 1: Decompress .sp file with zstd to get tar file
let tar_path = temp_dir.join("package.tar");
// Use platform abstraction for process execution
let platform = PlatformManager::instance().platform();
let context = PlatformContext::new(None);
let mut zstd_cmd = platform.process().create_command("zstd");
zstd_cmd.args([
"--decompress",
"-o",
&tar_path.display().to_string(),
&path.display().to_string(),
]);
let zstd_output = platform
.process()
.execute_command(&context, zstd_cmd)
.await?;
if !zstd_output.status.success() {
return Err(PackageError::InvalidFormat {
message: format!(
"failed to decompress .sp file: {}",
String::from_utf8_lossy(&zstd_output.stderr)
),
}
.into());
}
// Step 2: Extract only manifest.toml from tar archive
let manifest_content = Self::extract_manifest_from_tar(&tar_path).await?;
// Step 3: Parse the manifest
let manifest = Manifest::from_toml(&manifest_content)?;
Ok(manifest)
}
/// Extract manifest.toml content from tar archive
async fn extract_manifest_from_tar(tar_path: &Path) -> Result<String, Error> {
// Use platform abstraction for process execution
let platform = PlatformManager::instance().platform();
let context = PlatformContext::new(None);
// Use tar to extract just the manifest.toml file and output to stdout
let mut tar_cmd = platform.process().create_command("tar");
tar_cmd.args([
"--extract",
"--file",
&tar_path.display().to_string(),
"--to-stdout",
"manifest.toml",
]);
let tar_output = platform
.process()
.execute_command(&context, tar_cmd)
.await?;
if !tar_output.status.success() {
return Err(PackageError::InvalidFormat {
message: format!(
"failed to extract manifest from tar: {}",
String::from_utf8_lossy(&tar_output.stderr)
),
}
.into());
}
let manifest_content =
String::from_utf8(tar_output.stdout).map_err(|_| PackageError::InvalidFormat {
message: "manifest.toml contains invalid UTF-8".to_string(),
})?;
if manifest_content.trim().is_empty() {
return Err(PackageError::InvalidFormat {
message: "manifest.toml is empty or missing".to_string(),
}
.into());
}
Ok(manifest_content)
}
/// Get available versions for a package
#[must_use]
pub fn get_package_versions(&self, name: &str) -> Option<Vec<&VersionEntry>> {
self.index.get_package_versions(name)
}
/// Search for packages
#[must_use]
pub fn search_packages(&self, query: &str) -> Vec<&str> {
self.index.search(query)
}
/// Check if a package exists
#[must_use]
pub fn package_exists(&self, name: &str) -> bool {
self.index.get_package_versions(name).is_some()
}
/// Find best version for a package spec
#[must_use]
pub fn find_best_version(&self, spec: &PackageSpec) -> Option<&VersionEntry> {
self.index.find_best_version(spec)
}
/// Resolve download URL with repository integration
///
/// This is currently a pass-through but will be enhanced for:
/// - Mirror failover
/// - CDN optimization
/// - Repository URL resolution
fn resolve_download_url(url: &str) -> Result<String, Error> {
// For now, pass through the URL as-is
// Future enhancements:
// - Check for repository URL patterns and resolve to CDN
// - Support mirror failover
// - Handle repository index entries
// Basic URL validation
if url.is_empty() {
return Err(PackageError::InvalidFormat {
message: "empty download URL".to_string(),
}
.into());
}
// Ensure HTTPS for security (skip in test mode or when explicitly disabled)
// Allow HTTP in test environments
let allow_http = std::env::var("SPS2_ALLOW_HTTP").is_ok();
if !allow_http && url.starts_with("http://") {
return Ok(url.replace("http://", "https://"));
}
Ok(url.to_string())
}
}
impl EventEmitter for Resolver {
fn event_sender(&self) -> Option<&EventSender> {
self.event_sender.as_ref()
}
}
| rust | BSD-3-Clause | a357a9ae7317314ef1605ce29b66f064bd6eb510 | 2026-01-04T20:17:02.345249Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.