repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-cache/src/by_timestamp.rs | crates/uv-cache/src/by_timestamp.rs | use serde::{Deserialize, Serialize};
use uv_cache_info::Timestamp;
#[derive(Deserialize, Serialize)]
pub struct CachedByTimestamp<Data> {
pub timestamp: Timestamp,
pub data: Data,
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-cache/src/archive.rs | crates/uv-cache/src/archive.rs | use std::path::Path;
use std::str::FromStr;
/// A unique identifier for an archive (unzipped wheel) in the cache.
#[derive(Debug, Clone, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)]
pub struct ArchiveId(String);
impl Default for ArchiveId {
fn default() -> Self {
Self::new()
}
}
impl ArchiveId {
/// Generate a new unique identifier for an archive.
pub fn new() -> Self {
Self(nanoid::nanoid!())
}
}
impl AsRef<Path> for ArchiveId {
fn as_ref(&self) -> &Path {
self.0.as_ref()
}
}
impl std::fmt::Display for ArchiveId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl FromStr for ArchiveId {
type Err = <String as FromStr>::Err;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self(s.to_string()))
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-cache/src/cli.rs | crates/uv-cache/src/cli.rs | use std::io;
use std::path::{Path, PathBuf};
use uv_static::EnvVars;
use crate::Cache;
use clap::{Parser, ValueHint};
use tracing::{debug, warn};
#[derive(Parser, Debug, Clone)]
#[command(next_help_heading = "Cache options")]
pub struct CacheArgs {
/// Avoid reading from or writing to the cache, instead using a temporary directory for the
/// duration of the operation.
#[arg(
global = true,
long,
short,
alias = "no-cache-dir",
env = EnvVars::UV_NO_CACHE,
value_parser = clap::builder::BoolishValueParser::new(),
)]
pub no_cache: bool,
/// Path to the cache directory.
///
/// Defaults to `$XDG_CACHE_HOME/uv` or `$HOME/.cache/uv` on macOS and Linux, and
/// `%LOCALAPPDATA%\uv\cache` on Windows.
///
/// To view the location of the cache directory, run `uv cache dir`.
#[arg(global = true, long, env = EnvVars::UV_CACHE_DIR, value_hint = ValueHint::DirPath)]
pub cache_dir: Option<PathBuf>,
}
impl Cache {
/// Prefer, in order:
///
/// 1. A temporary cache directory, if the user requested `--no-cache`.
/// 2. The specific cache directory specified by the user via `--cache-dir` or `UV_CACHE_DIR`.
/// 3. The system-appropriate cache directory.
/// 4. A `.uv_cache` directory in the current working directory.
///
/// Returns an absolute cache dir.
pub fn from_settings(no_cache: bool, cache_dir: Option<PathBuf>) -> Result<Self, io::Error> {
if no_cache {
Self::temp()
} else if let Some(cache_dir) = cache_dir {
Ok(Self::from_path(cache_dir))
} else if let Some(cache_dir) = uv_dirs::legacy_user_cache_dir().filter(|dir| dir.exists())
{
// If the user has an existing directory at (e.g.) `/Users/user/Library/Caches/uv`,
// respect it for backwards compatibility. Otherwise, prefer the XDG strategy, even on
// macOS.
Ok(Self::from_path(cache_dir))
} else if let Some(cache_dir) = uv_dirs::user_cache_dir() {
if cfg!(windows) {
// On Windows, we append `cache` to the LocalAppData directory, i.e., prefer
// `C:\Users\User\AppData\Local\uv\cache` over `C:\Users\User\AppData\Local\uv`.
//
// Unfortunately, v0.3.0 and v0.3.1 used the latter, so we need to migrate the cache
// for those users.
let destination = cache_dir.join("cache");
let source = cache_dir;
if let Err(err) = migrate_windows_cache(&source, &destination) {
warn!(
"Failed to migrate cache from `{}` to `{}`: {err}",
source.display(),
destination.display()
);
}
Ok(Self::from_path(destination))
} else {
Ok(Self::from_path(cache_dir))
}
} else {
Ok(Self::from_path(".uv_cache"))
}
}
}
impl TryFrom<CacheArgs> for Cache {
type Error = io::Error;
fn try_from(value: CacheArgs) -> Result<Self, Self::Error> {
Self::from_settings(value.no_cache, value.cache_dir)
}
}
/// Migrate the Windows cache from `C:\Users\User\AppData\Local\uv` to `C:\Users\User\AppData\Local\uv\cache`.
fn migrate_windows_cache(source: &Path, destination: &Path) -> Result<(), io::Error> {
// The list of expected cache buckets in v0.3.0.
for directory in [
"built-wheels-v3",
"flat-index-v0",
"git-v0",
"interpreter-v2",
"simple-v12",
"wheels-v1",
"archive-v0",
"builds-v0",
"environments-v1",
] {
let source = source.join(directory);
let destination = destination.join(directory);
// Migrate the cache bucket.
if source.exists() {
debug!(
"Migrating cache bucket from {} to {}",
source.display(),
destination.display()
);
if let Some(parent) = destination.parent() {
fs_err::create_dir_all(parent)?;
}
fs_err::rename(&source, &destination)?;
}
}
// The list of expected cache files in v0.3.0.
for file in [".gitignore", "CACHEDIR.TAG"] {
let source = source.join(file);
let destination = destination.join(file);
// Migrate the cache file.
if source.exists() {
debug!(
"Migrating cache file from {} to {}",
source.display(),
destination.display()
);
if let Some(parent) = destination.parent() {
fs_err::create_dir_all(parent)?;
}
fs_err::rename(&source, &destination)?;
}
}
Ok(())
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-cache/src/wheel.rs | crates/uv-cache/src/wheel.rs | use std::path::{Path, PathBuf};
use uv_cache_key::{CanonicalUrl, cache_digest};
use uv_distribution_types::IndexUrl;
use uv_redacted::DisplaySafeUrl;
/// Cache wheels and their metadata, both from remote wheels and built from source distributions.
#[derive(Debug, Clone)]
pub enum WheelCache<'a> {
/// Either PyPI or an alternative index, which we key by index URL.
Index(&'a IndexUrl),
/// A direct URL dependency, which we key by URL.
Url(&'a DisplaySafeUrl),
/// A path dependency, which we key by URL.
Path(&'a DisplaySafeUrl),
/// An editable dependency, which we key by URL.
Editable(&'a DisplaySafeUrl),
/// A Git dependency, which we key by URL (including LFS state), SHA.
///
/// Note that this variant only exists for source distributions; wheels can't be delivered
/// through Git.
Git(&'a DisplaySafeUrl, &'a str),
}
impl WheelCache<'_> {
/// The root directory for a cache bucket.
pub fn root(&self) -> PathBuf {
match self {
Self::Index(IndexUrl::Pypi(_)) => WheelCacheKind::Pypi.root(),
Self::Index(url) => WheelCacheKind::Index
.root()
.join(cache_digest(&CanonicalUrl::new(url.url()))),
Self::Url(url) => WheelCacheKind::Url
.root()
.join(cache_digest(&CanonicalUrl::new(url))),
Self::Path(url) => WheelCacheKind::Path
.root()
.join(cache_digest(&CanonicalUrl::new(url))),
Self::Editable(url) => WheelCacheKind::Editable
.root()
.join(cache_digest(&CanonicalUrl::new(url))),
Self::Git(url, sha) => WheelCacheKind::Git
.root()
.join(cache_digest(&CanonicalUrl::new(url)))
.join(sha),
}
}
/// A subdirectory in a bucket for wheels for a specific package.
pub fn wheel_dir(&self, package_name: impl AsRef<Path>) -> PathBuf {
self.root().join(package_name)
}
}
#[derive(Debug, Clone, Copy)]
pub(crate) enum WheelCacheKind {
/// A cache of data from PyPI.
Pypi,
/// A cache of data from an alternative index.
Index,
/// A cache of data from an arbitrary URL.
Url,
/// A cache of data from a local path.
Path,
/// A cache of data from an editable URL.
Editable,
/// A cache of data from a Git repository.
Git,
}
impl WheelCacheKind {
pub(crate) fn to_str(self) -> &'static str {
match self {
Self::Pypi => "pypi",
Self::Index => "index",
Self::Url => "url",
Self::Path => "path",
Self::Editable => "editable",
Self::Git => "git",
}
}
pub(crate) fn root(self) -> PathBuf {
Path::new(self.to_str()).to_path_buf()
}
}
impl AsRef<Path> for WheelCacheKind {
fn as_ref(&self) -> &Path {
self.to_str().as_ref()
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-cache/src/removal.rs | crates/uv-cache/src/removal.rs | //! Derived from Cargo's `clean` implementation.
//! Cargo is dual-licensed under either Apache 2.0 or MIT, at the user's choice.
//! Source: <https://github.com/rust-lang/cargo/blob/e1ebce1035f9b53bb46a55bd4b0ecf51e24c6458/src/cargo/ops/cargo_clean.rs#L324>
use std::io;
use std::path::Path;
use crate::CleanReporter;
/// Remove a file or directory and all its contents, returning a [`Removal`] with
/// the number of files and directories removed, along with a total byte count.
pub fn rm_rf(path: impl AsRef<Path>) -> io::Result<Removal> {
Remover::default().rm_rf(path, false)
}
/// A builder for a [`Remover`] that can remove files and directories.
#[derive(Default)]
pub(crate) struct Remover {
reporter: Option<Box<dyn CleanReporter>>,
}
impl Remover {
/// Create a new [`Remover`] with the given reporter.
pub(crate) fn new(reporter: Box<dyn CleanReporter>) -> Self {
Self {
reporter: Some(reporter),
}
}
/// Remove a file or directory and all its contents, returning a [`Removal`] with
/// the number of files and directories removed, along with a total byte count.
pub(crate) fn rm_rf(
&self,
path: impl AsRef<Path>,
skip_locked_file: bool,
) -> io::Result<Removal> {
let mut removal = Removal::default();
removal.rm_rf(path.as_ref(), self.reporter.as_deref(), skip_locked_file)?;
Ok(removal)
}
}
/// A removal operation with statistics on the number of files and directories removed.
#[derive(Debug, Default)]
pub struct Removal {
/// The number of files removed.
pub num_files: u64,
/// The number of directories removed.
pub num_dirs: u64,
/// The total number of bytes removed.
///
/// Note: this will both over-count bytes removed for hard-linked files, and under-count
/// bytes in general since it's a measure of the exact byte size (as opposed to the block size).
pub total_bytes: u64,
}
impl Removal {
/// Recursively remove a file or directory and all its contents.
fn rm_rf(
&mut self,
path: &Path,
reporter: Option<&dyn CleanReporter>,
skip_locked_file: bool,
) -> io::Result<()> {
let metadata = match fs_err::symlink_metadata(path) {
Ok(metadata) => metadata,
Err(err) if err.kind() == io::ErrorKind::NotFound => return Ok(()),
Err(err) => return Err(err),
};
if !metadata.is_dir() {
self.num_files += 1;
// Remove the file.
self.total_bytes += metadata.len();
if metadata.is_symlink() {
#[cfg(windows)]
{
use std::os::windows::fs::FileTypeExt;
if metadata.file_type().is_symlink_dir() {
remove_dir(path)?;
} else {
remove_file(path)?;
}
}
#[cfg(not(windows))]
{
remove_file(path)?;
}
} else {
remove_file(path)?;
}
reporter.map(CleanReporter::on_clean);
return Ok(());
}
for entry in walkdir::WalkDir::new(path).contents_first(true) {
// If we hit a directory that lacks read permissions, try to make it readable.
if let Err(ref err) = entry {
if err
.io_error()
.is_some_and(|err| err.kind() == io::ErrorKind::PermissionDenied)
{
if let Some(dir) = err.path() {
if set_readable(dir).unwrap_or(false) {
// Retry the operation; if we _just_ `self.rm_rf(dir)` and continue,
// `walkdir` may give us duplicate entries for the directory.
return self.rm_rf(path, reporter, skip_locked_file);
}
}
}
}
let entry = entry?;
// Remove the exclusive lock last.
if skip_locked_file
&& entry.file_name() == ".lock"
&& entry
.path()
.strip_prefix(path)
.is_ok_and(|suffix| suffix == Path::new(".lock"))
{
continue;
}
if entry.file_type().is_symlink() && {
#[cfg(windows)]
{
use std::os::windows::fs::FileTypeExt;
entry.file_type().is_symlink_dir()
}
#[cfg(not(windows))]
{
false
}
} {
self.num_files += 1;
remove_dir(entry.path())?;
} else if entry.file_type().is_dir() {
// Remove the directory with the exclusive lock last.
if skip_locked_file && entry.path() == path {
continue;
}
self.num_dirs += 1;
// The contents should have been removed by now, but sometimes a race condition is
// hit where other files have been added by the OS. Fall back to `remove_dir_all`,
// which will remove the directory robustly across platforms.
remove_dir_all(entry.path())?;
} else {
self.num_files += 1;
// Remove the file.
if let Ok(meta) = entry.metadata() {
self.total_bytes += meta.len();
}
remove_file(entry.path())?;
}
reporter.map(CleanReporter::on_clean);
}
reporter.map(CleanReporter::on_complete);
Ok(())
}
}
impl std::ops::AddAssign for Removal {
fn add_assign(&mut self, other: Self) {
self.num_files += other.num_files;
self.num_dirs += other.num_dirs;
self.total_bytes += other.total_bytes;
}
}
/// If the directory isn't readable by the current user, change the permissions to make it readable.
#[cfg_attr(windows, allow(unused_variables, clippy::unnecessary_wraps))]
fn set_readable(path: &Path) -> io::Result<bool> {
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = fs_err::metadata(path)?.permissions();
if perms.mode() & 0o500 == 0 {
perms.set_mode(perms.mode() | 0o500);
fs_err::set_permissions(path, perms)?;
return Ok(true);
}
}
Ok(false)
}
/// If the file is readonly, change the permissions to make it _not_ readonly.
fn set_not_readonly(path: &Path) -> io::Result<bool> {
let mut perms = fs_err::metadata(path)?.permissions();
if !perms.readonly() {
return Ok(false);
}
// We're about to delete the file, so it's fine to set the permissions to world-writable.
#[allow(clippy::permissions_set_readonly_false)]
perms.set_readonly(false);
fs_err::set_permissions(path, perms)?;
Ok(true)
}
/// Like [`fs_err::remove_file`], but attempts to change the permissions to force the file to be
/// deleted (if it is readonly).
fn remove_file(path: &Path) -> io::Result<()> {
match fs_err::remove_file(path) {
Ok(()) => Ok(()),
Err(err)
if err.kind() == io::ErrorKind::PermissionDenied
&& set_not_readonly(path).unwrap_or(false) =>
{
fs_err::remove_file(path)
}
Err(err) => Err(err),
}
}
/// Like [`fs_err::remove_dir`], but attempts to change the permissions to force the directory to
/// be deleted (if it is readonly).
fn remove_dir(path: &Path) -> io::Result<()> {
match fs_err::remove_dir(path) {
Ok(()) => Ok(()),
Err(err)
if err.kind() == io::ErrorKind::PermissionDenied
&& set_readable(path).unwrap_or(false) =>
{
fs_err::remove_dir(path)
}
Err(err) => Err(err),
}
}
/// Like [`fs_err::remove_dir_all`], but attempts to change the permissions to force the directory
/// to be deleted (if it is readonly).
fn remove_dir_all(path: &Path) -> io::Result<()> {
match fs_err::remove_dir_all(path) {
Ok(()) => Ok(()),
Err(err)
if err.kind() == io::ErrorKind::PermissionDenied
&& set_readable(path).unwrap_or(false) =>
{
fs_err::remove_dir_all(path)
}
Err(err) => Err(err),
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-dirs/src/lib.rs | crates/uv-dirs/src/lib.rs | use std::{
env,
ffi::OsString,
path::{Path, PathBuf},
};
use etcetera::BaseStrategy;
use uv_static::EnvVars;
/// Returns an appropriate user-level directory for storing executables.
///
/// This follows, in order:
///
/// - `$OVERRIDE_VARIABLE` (if provided)
/// - `$XDG_BIN_HOME`
/// - `$XDG_DATA_HOME/../bin`
/// - `$HOME/.local/bin`
///
/// On all platforms.
///
/// Returns `None` if a directory cannot be found, i.e., if `$HOME` cannot be resolved. Does not
/// check if the directory exists.
pub fn user_executable_directory(override_variable: Option<&'static str>) -> Option<PathBuf> {
override_variable
.and_then(std::env::var_os)
.and_then(parse_path)
.or_else(|| std::env::var_os(EnvVars::XDG_BIN_HOME).and_then(parse_path))
.or_else(|| {
std::env::var_os(EnvVars::XDG_DATA_HOME)
.and_then(parse_path)
.map(|path| path.join("../bin"))
})
.or_else(|| {
let home_dir = etcetera::home_dir().ok();
home_dir.map(|path| path.join(".local").join("bin"))
})
}
/// Returns an appropriate user-level directory for storing the cache.
///
/// Corresponds to `$XDG_CACHE_HOME/uv` on Unix.
pub fn user_cache_dir() -> Option<PathBuf> {
etcetera::base_strategy::choose_base_strategy()
.ok()
.map(|dirs| dirs.cache_dir().join("uv"))
}
/// Returns the legacy cache directory path.
///
/// Uses `/Users/user/Library/Application Support/uv` on macOS, in contrast to the new preference
/// for using the XDG directories on all Unix platforms.
pub fn legacy_user_cache_dir() -> Option<PathBuf> {
etcetera::base_strategy::choose_native_strategy()
.ok()
.map(|dirs| dirs.cache_dir().join("uv"))
.map(|dir| {
if cfg!(windows) {
dir.join("cache")
} else {
dir
}
})
}
/// Returns an appropriate user-level directory for storing application state.
///
/// Corresponds to `$XDG_DATA_HOME/uv` on Unix.
pub fn user_state_dir() -> Option<PathBuf> {
etcetera::base_strategy::choose_base_strategy()
.ok()
.map(|dirs| dirs.data_dir().join("uv"))
}
/// Returns the legacy state directory path.
///
/// Uses `/Users/user/Library/Application Support/uv` on macOS, in contrast to the new preference
/// for using the XDG directories on all Unix platforms.
pub fn legacy_user_state_dir() -> Option<PathBuf> {
etcetera::base_strategy::choose_native_strategy()
.ok()
.map(|dirs| dirs.data_dir().join("uv"))
.map(|dir| if cfg!(windows) { dir.join("data") } else { dir })
}
/// Return a [`PathBuf`] if the given [`OsString`] is an absolute path.
fn parse_path(path: OsString) -> Option<PathBuf> {
let path = PathBuf::from(path);
if path.is_absolute() { Some(path) } else { None }
}
/// Returns the path to the user configuration directory.
///
/// On Windows, use, e.g., C:\Users\Alice\AppData\Roaming
/// On Linux and macOS, use `XDG_CONFIG_HOME` or $HOME/.config, e.g., /home/alice/.config.
pub fn user_config_dir() -> Option<PathBuf> {
etcetera::choose_base_strategy()
.map(|dirs| dirs.config_dir())
.ok()
}
pub fn user_uv_config_dir() -> Option<PathBuf> {
user_config_dir().map(|mut path| {
path.push("uv");
path
})
}
#[cfg(not(windows))]
fn locate_system_config_xdg(value: Option<&str>) -> Option<PathBuf> {
// On Linux and macOS, read the `XDG_CONFIG_DIRS` environment variable.
use std::path::Path;
let default = "/etc/xdg";
let config_dirs = value.filter(|s| !s.is_empty()).unwrap_or(default);
for dir in config_dirs.split(':').take_while(|s| !s.is_empty()) {
let uv_toml_path = Path::new(dir).join("uv").join("uv.toml");
if uv_toml_path.is_file() {
return Some(uv_toml_path);
}
}
None
}
#[cfg(windows)]
fn locate_system_config_windows(system_drive: impl AsRef<Path>) -> Option<PathBuf> {
// On Windows, use `%SYSTEMDRIVE%\ProgramData\uv\uv.toml` (e.g., `C:\ProgramData`).
let candidate = system_drive
.as_ref()
.join("ProgramData")
.join("uv")
.join("uv.toml");
candidate.as_path().is_file().then_some(candidate)
}
/// Returns the path to the system configuration file.
///
/// On Unix-like systems, uses the `XDG_CONFIG_DIRS` environment variable (falling back to
/// `/etc/xdg/uv/uv.toml` if unset or empty) and then `/etc/uv/uv.toml`
///
/// On Windows, uses `%SYSTEMDRIVE%\ProgramData\uv\uv.toml`.
pub fn system_config_file() -> Option<PathBuf> {
#[cfg(windows)]
{
env::var(EnvVars::SYSTEMDRIVE)
.ok()
.and_then(|system_drive| locate_system_config_windows(format!("{system_drive}\\")))
}
#[cfg(not(windows))]
{
if let Some(path) =
locate_system_config_xdg(env::var(EnvVars::XDG_CONFIG_DIRS).ok().as_deref())
{
return Some(path);
}
// Fallback to `/etc/uv/uv.toml` if `XDG_CONFIG_DIRS` is not set or no valid
// path is found.
let candidate = Path::new("/etc/uv/uv.toml");
match candidate.try_exists() {
Ok(true) => Some(candidate.to_path_buf()),
Ok(false) => None,
Err(err) => {
tracing::warn!("Failed to query system configuration file: {err}");
None
}
}
}
}
#[cfg(test)]
mod test {
#[cfg(windows)]
use crate::locate_system_config_windows;
#[cfg(not(windows))]
use crate::locate_system_config_xdg;
use assert_fs::fixture::FixtureError;
use assert_fs::prelude::*;
use indoc::indoc;
#[test]
#[cfg(not(windows))]
fn test_locate_system_config_xdg() -> Result<(), FixtureError> {
// Write a `uv.toml` to a temporary directory.
let context = assert_fs::TempDir::new()?;
context.child("uv").child("uv.toml").write_str(indoc! {
r#"
[pip]
index-url = "https://test.pypi.org/simple"
"#,
})?;
// None
assert_eq!(locate_system_config_xdg(None), None);
// Empty string
assert_eq!(locate_system_config_xdg(Some("")), None);
// Single colon
assert_eq!(locate_system_config_xdg(Some(":")), None);
// Assert that the `system_config_file` function returns the correct path.
assert_eq!(
locate_system_config_xdg(Some(context.to_str().unwrap())).unwrap(),
context.child("uv").child("uv.toml").path()
);
// Write a separate `uv.toml` to a different directory.
let first = context.child("first");
let first_config = first.child("uv").child("uv.toml");
first_config.write_str("")?;
assert_eq!(
locate_system_config_xdg(Some(
format!("{}:{}", first.to_string_lossy(), context.to_string_lossy()).as_str()
))
.unwrap(),
first_config.path()
);
Ok(())
}
#[test]
#[cfg(unix)]
fn test_locate_system_config_xdg_unix_permissions() -> Result<(), FixtureError> {
let context = assert_fs::TempDir::new()?;
let config = context.child("uv").child("uv.toml");
config.write_str("")?;
fs_err::set_permissions(
&context,
std::os::unix::fs::PermissionsExt::from_mode(0o000),
)
.unwrap();
assert_eq!(
locate_system_config_xdg(Some(context.to_str().unwrap())),
None
);
Ok(())
}
#[test]
#[cfg(windows)]
fn test_windows_config() -> Result<(), FixtureError> {
// Write a `uv.toml` to a temporary directory.
let context = assert_fs::TempDir::new()?;
context
.child("ProgramData")
.child("uv")
.child("uv.toml")
.write_str(indoc! { r#"
[pip]
index-url = "https://test.pypi.org/simple"
"#})?;
// This is typically only a drive (that is, letter and colon) but we
// allow anything, including a path to the test fixtures...
assert_eq!(
locate_system_config_windows(context.path()).unwrap(),
context
.child("ProgramData")
.child("uv")
.child("uv.toml")
.path()
);
// This does not have a `ProgramData` child, so contains no config.
let context = assert_fs::TempDir::new()?;
assert_eq!(locate_system_config_windows(context.path()), None);
Ok(())
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-macros/src/lib.rs | crates/uv-macros/src/lib.rs | mod options_metadata;
use proc_macro::TokenStream;
use quote::{quote, quote_spanned};
use syn::spanned::Spanned;
use syn::{Attribute, DeriveInput, ImplItem, ItemImpl, LitStr, parse_macro_input};
#[proc_macro_derive(OptionsMetadata, attributes(option, doc, option_group))]
pub fn derive_options_metadata(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
options_metadata::derive_impl(input)
.unwrap_or_else(syn::Error::into_compile_error)
.into()
}
#[proc_macro_derive(CombineOptions)]
pub fn derive_combine(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
impl_combine(&input)
}
fn impl_combine(ast: &DeriveInput) -> TokenStream {
let name = &ast.ident;
let fields = if let syn::Data::Struct(syn::DataStruct {
fields: syn::Fields::Named(ref fields),
..
}) = ast.data
{
&fields.named
} else {
unimplemented!();
};
let combines = fields.iter().map(|f| {
let name = &f.ident;
quote! {
#name: self.#name.combine(other.#name)
}
});
let stream = quote! {
impl crate::Combine for #name {
fn combine(self, other: #name) -> #name {
#name {
#(#combines),*
}
}
}
};
stream.into()
}
fn get_doc_comment(attrs: &[Attribute]) -> String {
attrs
.iter()
.filter_map(|attr| {
if attr.path().is_ident("doc") {
if let syn::Meta::NameValue(meta) = &attr.meta {
if let syn::Expr::Lit(expr) = &meta.value {
if let syn::Lit::Str(str) = &expr.lit {
return Some(str.value().trim().to_string());
}
}
}
}
None
})
.collect::<Vec<_>>()
.join("\n")
}
fn get_env_var_pattern_from_attr(attrs: &[Attribute]) -> Option<String> {
attrs
.iter()
.find(|attr| attr.path().is_ident("attr_env_var_pattern"))
.and_then(|attr| attr.parse_args::<LitStr>().ok())
.map(|lit_str| lit_str.value())
}
fn get_added_in(attrs: &[Attribute]) -> Option<String> {
attrs
.iter()
.find(|a| a.path().is_ident("attr_added_in"))
.and_then(|attr| attr.parse_args::<LitStr>().ok())
.map(|lit_str| lit_str.value())
}
fn is_hidden(attrs: &[Attribute]) -> bool {
attrs.iter().any(|attr| attr.path().is_ident("attr_hidden"))
}
/// This attribute is used to generate environment variables metadata for [`uv_static::EnvVars`].
#[proc_macro_attribute]
pub fn attribute_env_vars_metadata(_attr: TokenStream, input: TokenStream) -> TokenStream {
let ast = parse_macro_input!(input as ItemImpl);
let constants: Vec<_> = ast
.items
.iter()
.filter_map(|item| match item {
ImplItem::Const(item) if !is_hidden(&item.attrs) => {
let doc = get_doc_comment(&item.attrs);
let added_in = get_added_in(&item.attrs);
let syn::Expr::Lit(syn::ExprLit {
lit: syn::Lit::Str(lit),
..
}) = &item.expr
else {
return None;
};
let name = lit.value();
Some((name, doc, added_in, item.ident.span()))
}
ImplItem::Fn(item) if !is_hidden(&item.attrs) => {
// Extract the environment variable patterns.
if let Some(pattern) = get_env_var_pattern_from_attr(&item.attrs) {
let doc = get_doc_comment(&item.attrs);
let added_in = get_added_in(&item.attrs);
Some((pattern, doc, added_in, item.sig.span()))
} else {
None // Skip if pattern extraction fails.
}
}
_ => None,
})
.collect();
// Look for missing attr_added_in and issue a compiler error if any are found.
let added_in_errors: Vec<_> = constants
.iter()
.filter_map(|(name, _, added_in, span)| {
added_in.is_none().then_some({
let msg = format!(
"missing #[attr_added_in(\"x.y.z\")] on `{name}`\nnote: env vars for an upcoming release should be annotated with `#[attr_added_in(\"next release\")]`"
);
quote_spanned! {*span => compile_error!(#msg); }
})
})
.collect();
if !added_in_errors.is_empty() {
return quote! { #ast #(#added_in_errors)* }.into();
}
let struct_name = &ast.self_ty;
let pairs = constants.iter().map(|(name, doc, added_in, _span)| {
if let Some(added_in) = added_in {
quote! { (#name, #doc, Some(#added_in)) }
} else {
quote! { (#name, #doc, None) }
}
});
let expanded = quote! {
#ast
impl #struct_name {
/// Returns a list of pairs of env var and their documentation defined in this impl block.
pub fn metadata<'a>() -> &'a [(&'static str, &'static str, Option<&'static str>)] {
&[#(#pairs),*]
}
}
};
expanded.into()
}
#[proc_macro_attribute]
pub fn attr_hidden(_attr: TokenStream, item: TokenStream) -> TokenStream {
item
}
#[proc_macro_attribute]
pub fn attr_env_var_pattern(_attr: TokenStream, item: TokenStream) -> TokenStream {
item
}
#[proc_macro_attribute]
pub fn attr_added_in(_attr: TokenStream, item: TokenStream) -> TokenStream {
item
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-macros/src/options_metadata.rs | crates/uv-macros/src/options_metadata.rs | //! Taken directly from Ruff.
//!
//! See: <https://github.com/astral-sh/ruff/blob/dc8db1afb08704ad6a788c497068b01edf8b460d/crates/ruff_macros/src/config.rs>
use proc_macro2::{TokenStream, TokenTree};
use quote::{quote, quote_spanned};
use syn::meta::ParseNestedMeta;
use syn::spanned::Spanned;
use syn::{
AngleBracketedGenericArguments, Attribute, Data, DataStruct, DeriveInput, ExprLit, Field,
Fields, GenericArgument, Lit, LitStr, Meta, Path, PathArguments, PathSegment, Type, TypePath,
};
use textwrap::dedent;
pub(crate) fn derive_impl(input: DeriveInput) -> syn::Result<TokenStream> {
let DeriveInput {
ident,
data,
attrs: struct_attributes,
..
} = input;
match data {
Data::Struct(DataStruct {
fields: Fields::Named(fields),
..
}) => {
let mut output = vec![];
for field in &fields.named {
if let Some(attr) = field
.attrs
.iter()
.find(|attr| attr.path().is_ident("option"))
{
output.push(handle_option(field, attr)?);
} else if field
.attrs
.iter()
.any(|attr| attr.path().is_ident("option_group"))
{
output.push(handle_option_group(field)?);
} else if let Some(serde) = field
.attrs
.iter()
.find(|attr| attr.path().is_ident("serde"))
{
// If a field has the `serde(flatten)` attribute, flatten the options into the parent
// by calling `Type::record` instead of `visitor.visit_set`
if let (Type::Path(ty), Meta::List(list)) = (&field.ty, &serde.meta) {
for token in list.tokens.clone() {
if let TokenTree::Ident(ident) = token {
if ident == "flatten" {
output.push(quote_spanned!(
ty.span() => (<#ty>::record(visit))
));
break;
}
}
}
}
}
}
let docs: Vec<&Attribute> = struct_attributes
.iter()
.filter(|attr| attr.path().is_ident("doc"))
.collect();
// Convert the list of `doc` attributes into a single string.
let doc = dedent(
&docs
.into_iter()
.map(parse_doc)
.collect::<syn::Result<Vec<_>>>()?
.join("\n"),
)
.trim_matches('\n')
.to_string();
let documentation = if doc.is_empty() {
None
} else {
Some(quote!(
fn documentation() -> Option<&'static str> {
Some(&#doc)
}
))
};
Ok(quote! {
#[automatically_derived]
impl uv_options_metadata::OptionsMetadata for #ident {
fn record(visit: &mut dyn uv_options_metadata::Visit) {
#(#output);*
}
#documentation
}
})
}
_ => Err(syn::Error::new(
ident.span(),
"Can only derive ConfigurationOptions from structs with named fields.",
)),
}
}
/// For a field with type `Option<Foobar>` where `Foobar` itself is a struct
/// deriving `ConfigurationOptions`, create code that calls retrieves options
/// from that group: `Foobar::get_available_options()`
fn handle_option_group(field: &Field) -> syn::Result<TokenStream> {
let ident = field
.ident
.as_ref()
.expect("Expected to handle named fields");
match &field.ty {
Type::Path(TypePath {
path: Path { segments, .. },
..
}) => match segments.first() {
Some(PathSegment {
ident: type_ident,
arguments:
PathArguments::AngleBracketed(AngleBracketedGenericArguments { args, .. }),
}) if type_ident == "Option" => {
let path = &args[0];
let kebab_name = LitStr::new(&ident.to_string().replace('_', "-"), ident.span());
Ok(quote_spanned!(
ident.span() => (visit.record_set(#kebab_name, uv_options_metadata::OptionSet::of::<#path>()))
))
}
_ => Err(syn::Error::new(
ident.span(),
"Expected `Option<_>` as type.",
)),
},
_ => Err(syn::Error::new(ident.span(), "Expected type.")),
}
}
/// Parse a `doc` attribute into it a string literal.
fn parse_doc(doc: &Attribute) -> syn::Result<String> {
match &doc.meta {
Meta::NameValue(syn::MetaNameValue {
value:
syn::Expr::Lit(ExprLit {
lit: Lit::Str(lit_str),
..
}),
..
}) => Ok(lit_str.value()),
_ => Err(syn::Error::new(doc.span(), "Expected doc attribute.")),
}
}
/// Parse an `#[option(doc="...", default="...", value_type="...",
/// example="...")]` attribute and return data in the form of an `OptionField`.
fn handle_option(field: &Field, attr: &Attribute) -> syn::Result<TokenStream> {
let docs: Vec<&Attribute> = field
.attrs
.iter()
.filter(|attr| attr.path().is_ident("doc"))
.collect();
if docs.is_empty() {
return Err(syn::Error::new(
field.span(),
"Missing documentation for field",
));
}
// Convert the list of `doc` attributes into a single string.
let doc = dedent(
&docs
.into_iter()
.map(parse_doc)
.collect::<syn::Result<Vec<_>>>()?
.join("\n"),
)
.trim_matches('\n')
.to_string();
let ident = field
.ident
.as_ref()
.expect("Expected to handle named fields");
let FieldAttributes {
default,
value_type,
example,
scope,
possible_values,
} = parse_field_attributes(attr)?;
let kebab_name = LitStr::new(&ident.to_string().replace('_', "-"), ident.span());
let scope = if let Some(scope) = scope {
quote!(Some(#scope))
} else {
quote!(None)
};
let deprecated = if let Some(deprecated) = field
.attrs
.iter()
.find(|attr| attr.path().is_ident("deprecated"))
{
fn quote_option(option: Option<String>) -> TokenStream {
match option {
None => quote!(None),
Some(value) => quote!(Some(#value)),
}
}
let deprecated = parse_deprecated_attribute(deprecated)?;
let note = quote_option(deprecated.note);
let since = quote_option(deprecated.since);
quote!(Some(uv_options_metadata::Deprecated { since: #since, message: #note }))
} else {
quote!(None)
};
let possible_values = if possible_values == Some(true) {
let inner_type = get_inner_type_if_option(&field.ty).unwrap_or(&field.ty);
let inner_type = quote!(#inner_type);
quote!(
Some(
<#inner_type as clap::ValueEnum>::value_variants()
.iter()
.filter_map(clap::ValueEnum::to_possible_value)
.map(|value| uv_options_metadata::PossibleValue {
name: value.get_name().to_string(),
help: value.get_help().map(ToString::to_string),
})
.collect()
)
)
} else {
quote!(None)
};
Ok(quote_spanned!(
ident.span() => {
visit.record_field(#kebab_name, uv_options_metadata::OptionField{
doc: &#doc,
default: &#default,
value_type: &#value_type,
example: &#example,
scope: #scope,
deprecated: #deprecated,
possible_values: #possible_values,
})
}
))
}
#[derive(Debug)]
struct FieldAttributes {
default: String,
value_type: String,
example: String,
scope: Option<String>,
possible_values: Option<bool>,
}
fn parse_field_attributes(attribute: &Attribute) -> syn::Result<FieldAttributes> {
let mut default = None;
let mut value_type = None;
let mut example = None;
let mut scope = None;
let mut possible_values = None;
attribute.parse_nested_meta(|meta| {
if meta.path.is_ident("default") {
default = Some(get_string_literal(&meta, "default", "option")?.value());
} else if meta.path.is_ident("value_type") {
value_type = Some(get_string_literal(&meta, "value_type", "option")?.value());
} else if meta.path.is_ident("scope") {
scope = Some(get_string_literal(&meta, "scope", "option")?.value());
} else if meta.path.is_ident("example") {
let example_text = get_string_literal(&meta, "value_type", "option")?.value();
example = Some(dedent(&example_text).trim_matches('\n').to_string());
} else if meta.path.is_ident("possible_values") {
possible_values = get_bool_literal(&meta, "possible_values", "option")?;
} else {
return Err(syn::Error::new(
meta.path.span(),
format!(
"Deprecated meta {:?} is not supported by uv's option macro.",
meta.path.get_ident()
),
));
}
Ok(())
})?;
let Some(default) = default else {
return Err(syn::Error::new(
attribute.span(),
"Mandatory `default` field is missing in `#[option]` attribute. Specify the default using `#[option(default=\"..\")]`.",
));
};
let Some(value_type) = value_type else {
return Err(syn::Error::new(
attribute.span(),
"Mandatory `value_type` field is missing in `#[option]` attribute. Specify the value type using `#[option(value_type=\"..\")]`.",
));
};
let Some(example) = example else {
return Err(syn::Error::new(
attribute.span(),
"Mandatory `example` field is missing in `#[option]` attribute. Add an example using `#[option(example=\"..\")]`.",
));
};
Ok(FieldAttributes {
default,
value_type,
example,
scope,
possible_values,
})
}
fn parse_deprecated_attribute(attribute: &Attribute) -> syn::Result<DeprecatedAttribute> {
let mut deprecated = DeprecatedAttribute::default();
attribute.parse_nested_meta(|meta| {
if meta.path.is_ident("note") {
deprecated.note = Some(get_string_literal(&meta, "note", "deprecated")?.value());
} else if meta.path.is_ident("since") {
deprecated.since = Some(get_string_literal(&meta, "since", "deprecated")?.value());
} else {
return Err(syn::Error::new(
meta.path.span(),
format!(
"Deprecated meta {:?} is not supported by uv's option macro.",
meta.path.get_ident()
),
));
}
Ok(())
})?;
Ok(deprecated)
}
fn get_inner_type_if_option(ty: &Type) -> Option<&Type> {
if let Type::Path(type_path) = ty {
if type_path.path.segments.len() == 1 && type_path.path.segments[0].ident == "Option" {
if let PathArguments::AngleBracketed(angle_bracketed_args) =
&type_path.path.segments[0].arguments
{
if angle_bracketed_args.args.len() == 1 {
if let GenericArgument::Type(inner_type) = &angle_bracketed_args.args[0] {
return Some(inner_type);
}
}
}
}
}
None
}
fn get_string_literal(
meta: &ParseNestedMeta,
meta_name: &str,
attribute_name: &str,
) -> syn::Result<syn::LitStr> {
let expr: syn::Expr = meta.value()?.parse()?;
let mut value = &expr;
while let syn::Expr::Group(e) = value {
value = &e.expr;
}
if let syn::Expr::Lit(ExprLit {
lit: Lit::Str(lit), ..
}) = value
{
let suffix = lit.suffix();
if !suffix.is_empty() {
return Err(syn::Error::new(
lit.span(),
format!("unexpected suffix `{suffix}` on string literal"),
));
}
Ok(lit.clone())
} else {
Err(syn::Error::new(
expr.span(),
format!("expected {attribute_name} attribute to be a string: `{meta_name} = \"...\"`"),
))
}
}
fn get_bool_literal(
meta: &ParseNestedMeta,
meta_name: &str,
attribute_name: &str,
) -> syn::Result<Option<bool>> {
let expr: syn::Expr = meta.value()?.parse()?;
let mut value = &expr;
while let syn::Expr::Group(e) = value {
value = &e.expr;
}
if let syn::Expr::Lit(ExprLit {
lit: Lit::Bool(lit),
..
}) = value
{
Ok(Some(lit.value))
} else {
Err(syn::Error::new(
expr.span(),
format!("expected {attribute_name} attribute to be a boolean: `{meta_name} = true`"),
))
}
}
#[derive(Default, Debug)]
struct DeprecatedAttribute {
since: Option<String>,
note: Option<String>,
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-state/src/lib.rs | crates/uv-state/src/lib.rs | use std::{
io::{self, Write},
path::{Path, PathBuf},
sync::Arc,
};
use fs_err as fs;
use tempfile::{TempDir, tempdir};
/// The main state storage abstraction.
///
/// This is appropriate for storing persistent data that is not user-facing, such as managed Python
/// installations or tool environments.
#[derive(Debug, Clone)]
pub struct StateStore {
/// The state storage.
root: PathBuf,
/// A temporary state storage.
///
/// Included to ensure that the temporary store exists for the length of the operation, but
/// is dropped at the end as appropriate.
_temp_dir_drop: Option<Arc<TempDir>>,
}
impl StateStore {
/// A persistent state store at `root`.
pub fn from_path(root: impl Into<PathBuf>) -> Result<Self, io::Error> {
Ok(Self {
root: root.into(),
_temp_dir_drop: None,
})
}
/// Create a temporary state store.
pub fn temp() -> Result<Self, io::Error> {
let temp_dir = tempdir()?;
Ok(Self {
root: temp_dir.path().to_path_buf(),
_temp_dir_drop: Some(Arc::new(temp_dir)),
})
}
/// Return the root of the state store.
pub fn root(&self) -> &Path {
&self.root
}
/// Initialize the state store.
pub fn init(self) -> Result<Self, io::Error> {
let root = &self.root;
// Create the state store directory, if it doesn't exist.
fs::create_dir_all(root)?;
// Add a .gitignore.
match fs::OpenOptions::new()
.write(true)
.create_new(true)
.open(root.join(".gitignore"))
{
Ok(mut file) => file.write_all(b"*")?,
Err(err) if err.kind() == io::ErrorKind::AlreadyExists => (),
Err(err) => return Err(err),
}
Ok(Self {
root: fs::canonicalize(root)?,
..self
})
}
/// The folder for a specific cache bucket
pub fn bucket(&self, state_bucket: StateBucket) -> PathBuf {
self.root.join(state_bucket.to_str())
}
/// Prefer, in order:
///
/// 1. The specific state directory specified by the user.
/// 2. The system-appropriate user-level data directory.
/// 3. A `.uv` directory in the current working directory.
///
/// Returns an absolute cache dir.
pub fn from_settings(state_dir: Option<PathBuf>) -> Result<Self, io::Error> {
if let Some(state_dir) = state_dir {
Self::from_path(state_dir)
} else if let Some(data_dir) = uv_dirs::legacy_user_state_dir().filter(|dir| dir.exists()) {
// If the user has an existing directory at (e.g.) `/Users/user/Library/Application Support/uv`,
// respect it for backwards compatibility. Otherwise, prefer the XDG strategy, even on
// macOS.
Self::from_path(data_dir)
} else if let Some(data_dir) = uv_dirs::user_state_dir() {
Self::from_path(data_dir)
} else {
Self::from_path(".uv")
}
}
}
/// The different kinds of data in the state store are stored in different bucket, which in our case
/// are subdirectories of the state store root.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum StateBucket {
/// Managed Python installations
ManagedPython,
/// Installed tools.
Tools,
/// Credentials.
Credentials,
}
impl StateBucket {
fn to_str(self) -> &'static str {
match self {
Self::ManagedPython => "python",
Self::Tools => "tools",
Self::Credentials => "credentials",
}
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/dry_run.rs | crates/uv-configuration/src/dry_run.rs | #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub enum DryRun {
/// The operation should execute in dry run mode.
Enabled,
/// The operation should execute in dry run mode and check if the current environment is
/// synced.
Check,
/// The operation should execute in normal mode.
#[default]
Disabled,
}
impl DryRun {
/// Determine the [`DryRun`] setting based on the command-line arguments.
pub fn from_args(dry_run: bool) -> Self {
if dry_run {
Self::Enabled
} else {
Self::Disabled
}
}
/// Returns `true` if dry run mode is enabled.
pub const fn enabled(&self) -> bool {
matches!(self, Self::Enabled) || matches!(self, Self::Check)
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/target_triple.rs | crates/uv-configuration/src/target_triple.rs | use tracing::debug;
use uv_pep508::MarkerEnvironment;
use uv_platform_tags::{Arch, Os, Platform};
use uv_static::EnvVars;
/// The supported target triples. Each triple consists of an architecture, vendor, and operating
/// system.
///
/// See: <https://doc.rust-lang.org/nightly/rustc/platform-support.html>
#[derive(Debug, Clone, Copy, Eq, PartialEq, serde::Deserialize)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum TargetTriple {
/// An alias for `x86_64-pc-windows-msvc`, the default target for Windows.
Windows,
/// An alias for `x86_64-unknown-linux-gnu`, the default target for Linux.
Linux,
/// An alias for `aarch64-apple-darwin`, the default target for macOS.
Macos,
/// A 64-bit x86 Windows target.
#[cfg_attr(feature = "clap", value(name = "x86_64-pc-windows-msvc"))]
#[serde(rename = "x86_64-pc-windows-msvc")]
#[serde(alias = "x8664-pc-windows-msvc")]
X8664PcWindowsMsvc,
/// An ARM64 Windows target.
#[cfg_attr(feature = "clap", value(name = "aarch64-pc-windows-msvc"))]
#[serde(rename = "aarch64-pc-windows-msvc")]
#[serde(alias = "arm64-pc-windows-msvc")]
Aarch64PcWindowsMsvc,
/// A 32-bit x86 Windows target.
#[cfg_attr(feature = "clap", value(name = "i686-pc-windows-msvc"))]
#[serde(rename = "i686-pc-windows-msvc")]
I686PcWindowsMsvc,
/// An x86 Linux target. Equivalent to `x86_64-manylinux_2_28`.
#[cfg_attr(feature = "clap", value(name = "x86_64-unknown-linux-gnu"))]
#[serde(rename = "x86_64-unknown-linux-gnu")]
#[serde(alias = "x8664-unknown-linux-gnu")]
X8664UnknownLinuxGnu,
/// An ARM-based macOS target, as seen on Apple Silicon devices
///
/// By default, assumes the least-recent, non-EOL macOS version (13.0), but respects
/// the `MACOSX_DEPLOYMENT_TARGET` environment variable if set.
#[cfg_attr(feature = "clap", value(name = "aarch64-apple-darwin"))]
#[serde(rename = "aarch64-apple-darwin")]
Aarch64AppleDarwin,
/// An x86 macOS target.
///
/// By default, assumes the least-recent, non-EOL macOS version (13.0), but respects
/// the `MACOSX_DEPLOYMENT_TARGET` environment variable if set.
#[cfg_attr(feature = "clap", value(name = "x86_64-apple-darwin"))]
#[serde(rename = "x86_64-apple-darwin")]
#[serde(alias = "x8664-apple-darwin")]
X8664AppleDarwin,
/// An ARM64 Linux target. Equivalent to `aarch64-manylinux_2_28`.
#[cfg_attr(feature = "clap", value(name = "aarch64-unknown-linux-gnu"))]
#[serde(rename = "aarch64-unknown-linux-gnu")]
Aarch64UnknownLinuxGnu,
/// An ARM64 Linux target.
#[cfg_attr(feature = "clap", value(name = "aarch64-unknown-linux-musl"))]
#[serde(rename = "aarch64-unknown-linux-musl")]
Aarch64UnknownLinuxMusl,
/// An `x86_64` Linux target.
#[cfg_attr(feature = "clap", value(name = "x86_64-unknown-linux-musl"))]
#[serde(rename = "x86_64-unknown-linux-musl")]
#[serde(alias = "x8664-unknown-linux-musl")]
X8664UnknownLinuxMusl,
/// A RISCV64 Linux target.
#[cfg_attr(feature = "clap", value(name = "riscv64-unknown-linux"))]
#[serde(rename = "riscv64-unknown-linux")]
Riscv64UnknownLinuxGnu,
/// An `x86_64` target for the `manylinux2014` platform. Equivalent to `x86_64-manylinux_2_17`.
#[cfg_attr(feature = "clap", value(name = "x86_64-manylinux2014"))]
#[serde(rename = "x86_64-manylinux2014")]
#[serde(alias = "x8664-manylinux2014")]
X8664Manylinux2014,
/// An `x86_64` target for the `manylinux_2_17` platform.
#[cfg_attr(feature = "clap", value(name = "x86_64-manylinux_2_17"))]
#[serde(rename = "x86_64-manylinux_2_17")]
#[serde(alias = "x8664-manylinux217")]
X8664Manylinux217,
/// An `x86_64` target for the `manylinux_2_28` platform.
#[cfg_attr(feature = "clap", value(name = "x86_64-manylinux_2_28"))]
#[serde(rename = "x86_64-manylinux_2_28")]
#[serde(alias = "x8664-manylinux228")]
X8664Manylinux228,
/// An `x86_64` target for the `manylinux_2_31` platform.
#[cfg_attr(feature = "clap", value(name = "x86_64-manylinux_2_31"))]
#[serde(rename = "x86_64-manylinux_2_31")]
#[serde(alias = "x8664-manylinux231")]
X8664Manylinux231,
/// An `x86_64` target for the `manylinux_2_32` platform.
#[cfg_attr(feature = "clap", value(name = "x86_64-manylinux_2_32"))]
#[serde(rename = "x86_64-manylinux_2_32")]
#[serde(alias = "x8664-manylinux232")]
X8664Manylinux232,
/// An `x86_64` target for the `manylinux_2_33` platform.
#[cfg_attr(feature = "clap", value(name = "x86_64-manylinux_2_33"))]
#[serde(rename = "x86_64-manylinux_2_33")]
#[serde(alias = "x8664-manylinux233")]
X8664Manylinux233,
/// An `x86_64` target for the `manylinux_2_34` platform.
#[cfg_attr(feature = "clap", value(name = "x86_64-manylinux_2_34"))]
#[serde(rename = "x86_64-manylinux_2_34")]
#[serde(alias = "x8664-manylinux234")]
X8664Manylinux234,
/// An `x86_64` target for the `manylinux_2_35` platform.
#[cfg_attr(feature = "clap", value(name = "x86_64-manylinux_2_35"))]
#[serde(rename = "x86_64-manylinux_2_35")]
#[serde(alias = "x8664-manylinux235")]
X8664Manylinux235,
/// An `x86_64` target for the `manylinux_2_36` platform.
#[cfg_attr(feature = "clap", value(name = "x86_64-manylinux_2_36"))]
#[serde(rename = "x86_64-manylinux_2_36")]
#[serde(alias = "x8664-manylinux236")]
X8664Manylinux236,
/// An `x86_64` target for the `manylinux_2_37` platform.
#[cfg_attr(feature = "clap", value(name = "x86_64-manylinux_2_37"))]
#[serde(rename = "x86_64-manylinux_2_37")]
#[serde(alias = "x8664-manylinux237")]
X8664Manylinux237,
/// An `x86_64` target for the `manylinux_2_38` platform.
#[cfg_attr(feature = "clap", value(name = "x86_64-manylinux_2_38"))]
#[serde(rename = "x86_64-manylinux_2_38")]
#[serde(alias = "x8664-manylinux238")]
X8664Manylinux238,
/// An `x86_64` target for the `manylinux_2_39` platform.
#[cfg_attr(feature = "clap", value(name = "x86_64-manylinux_2_39"))]
#[serde(rename = "x86_64-manylinux_2_39")]
#[serde(alias = "x8664-manylinux239")]
X8664Manylinux239,
/// An `x86_64` target for the `manylinux_2_40` platform.
#[cfg_attr(feature = "clap", value(name = "x86_64-manylinux_2_40"))]
#[serde(rename = "x86_64-manylinux_2_40")]
#[serde(alias = "x8664-manylinux240")]
X8664Manylinux240,
/// An ARM64 target for the `manylinux2014` platform. Equivalent to `aarch64-manylinux_2_17`.
#[cfg_attr(feature = "clap", value(name = "aarch64-manylinux2014"))]
#[serde(rename = "aarch64-manylinux2014")]
Aarch64Manylinux2014,
/// An ARM64 target for the `manylinux_2_17` platform.
#[cfg_attr(feature = "clap", value(name = "aarch64-manylinux_2_17"))]
#[serde(rename = "aarch64-manylinux_2_17")]
#[serde(alias = "aarch64-manylinux217")]
Aarch64Manylinux217,
/// An ARM64 target for the `manylinux_2_28` platform.
#[cfg_attr(feature = "clap", value(name = "aarch64-manylinux_2_28"))]
#[serde(rename = "aarch64-manylinux_2_28")]
#[serde(alias = "aarch64-manylinux228")]
Aarch64Manylinux228,
/// An ARM64 target for the `manylinux_2_31` platform.
#[cfg_attr(feature = "clap", value(name = "aarch64-manylinux_2_31"))]
#[serde(rename = "aarch64-manylinux_2_31")]
#[serde(alias = "aarch64-manylinux231")]
Aarch64Manylinux231,
/// An ARM64 target for the `manylinux_2_32` platform.
#[cfg_attr(feature = "clap", value(name = "aarch64-manylinux_2_32"))]
#[serde(rename = "aarch64-manylinux_2_32")]
#[serde(alias = "aarch64-manylinux232")]
Aarch64Manylinux232,
/// An ARM64 target for the `manylinux_2_33` platform.
#[cfg_attr(feature = "clap", value(name = "aarch64-manylinux_2_33"))]
#[serde(rename = "aarch64-manylinux_2_33")]
#[serde(alias = "aarch64-manylinux233")]
Aarch64Manylinux233,
/// An ARM64 target for the `manylinux_2_34` platform.
#[cfg_attr(feature = "clap", value(name = "aarch64-manylinux_2_34"))]
#[serde(rename = "aarch64-manylinux_2_34")]
#[serde(alias = "aarch64-manylinux234")]
Aarch64Manylinux234,
/// An ARM64 target for the `manylinux_2_35` platform.
#[cfg_attr(feature = "clap", value(name = "aarch64-manylinux_2_35"))]
#[serde(rename = "aarch64-manylinux_2_35")]
#[serde(alias = "aarch64-manylinux235")]
Aarch64Manylinux235,
/// An ARM64 target for the `manylinux_2_36` platform.
#[cfg_attr(feature = "clap", value(name = "aarch64-manylinux_2_36"))]
#[serde(rename = "aarch64-manylinux_2_36")]
#[serde(alias = "aarch64-manylinux236")]
Aarch64Manylinux236,
/// An ARM64 target for the `manylinux_2_37` platform.
#[cfg_attr(feature = "clap", value(name = "aarch64-manylinux_2_37"))]
#[serde(rename = "aarch64-manylinux_2_37")]
#[serde(alias = "aarch64-manylinux237")]
Aarch64Manylinux237,
/// An ARM64 target for the `manylinux_2_38` platform.
#[cfg_attr(feature = "clap", value(name = "aarch64-manylinux_2_38"))]
#[serde(rename = "aarch64-manylinux_2_38")]
#[serde(alias = "aarch64-manylinux238")]
Aarch64Manylinux238,
/// An ARM64 target for the `manylinux_2_39` platform.
#[cfg_attr(feature = "clap", value(name = "aarch64-manylinux_2_39"))]
#[serde(rename = "aarch64-manylinux_2_39")]
#[serde(alias = "aarch64-manylinux239")]
Aarch64Manylinux239,
/// An ARM64 target for the `manylinux_2_40` platform.
#[cfg_attr(feature = "clap", value(name = "aarch64-manylinux_2_40"))]
#[serde(rename = "aarch64-manylinux_2_40")]
#[serde(alias = "aarch64-manylinux240")]
Aarch64Manylinux240,
/// An ARM64 Android target.
///
/// By default uses Android API level 24, but respects
/// the `ANDROID_API_LEVEL` environment variable if set.
#[cfg_attr(feature = "clap", value(name = "aarch64-linux-android"))]
#[serde(rename = "aarch64-linux-android")]
Aarch64LinuxAndroid,
/// An `x86_64` Android target.
///
/// By default uses Android API level 24, but respects
/// the `ANDROID_API_LEVEL` environment variable if set.
#[cfg_attr(feature = "clap", value(name = "x86_64-linux-android"))]
#[serde(rename = "x86_64-linux-android")]
X8664LinuxAndroid,
/// A wasm32 target using the Pyodide 2024 platform. Meant for use with Python 3.12.
#[cfg_attr(feature = "clap", value(name = "wasm32-pyodide2024"))]
Wasm32Pyodide2024,
/// An ARM64 target for iOS device
///
/// By default, iOS 13.0 is used, but respects the `IPHONEOS_DEPLOYMENT_TARGET`
/// environment variable if set.
#[cfg_attr(feature = "clap", value(name = "arm64-apple-ios"))]
#[serde(rename = "arm64-apple-ios")]
Arm64Ios,
/// An ARM64 target for iOS simulator
///
/// By default, iOS 13.0 is used, but respects the `IPHONEOS_DEPLOYMENT_TARGET`
/// environment variable if set.
#[cfg_attr(feature = "clap", value(name = "arm64-apple-ios-simulator"))]
#[serde(rename = "arm64-apple-ios-simulator")]
Arm64IosSimulator,
/// An `x86_64` target for iOS simulator
///
/// By default, iOS 13.0 is used, but respects the `IPHONEOS_DEPLOYMENT_TARGET`
/// environment variable if set.
#[cfg_attr(feature = "clap", value(name = "x86_64-apple-ios-simulator"))]
#[serde(rename = "x86_64-apple-ios-simulator")]
X8664IosSimulator,
}
impl TargetTriple {
/// Return the [`Platform`] for the target.
pub fn platform(self) -> Platform {
match self {
Self::Windows | Self::X8664PcWindowsMsvc => Platform::new(Os::Windows, Arch::X86_64),
Self::Aarch64PcWindowsMsvc => Platform::new(Os::Windows, Arch::Aarch64),
Self::Linux | Self::X8664UnknownLinuxGnu => Platform::new(
Os::Manylinux {
major: 2,
minor: 28,
},
Arch::X86_64,
),
Self::Macos | Self::Aarch64AppleDarwin => {
let (major, minor) = macos_deployment_target().map_or((13, 0), |(major, minor)| {
debug!("Found macOS deployment target: {}.{}", major, minor);
(major, minor)
});
Platform::new(Os::Macos { major, minor }, Arch::Aarch64)
}
Self::I686PcWindowsMsvc => Platform::new(Os::Windows, Arch::X86),
Self::X8664AppleDarwin => {
let (major, minor) = macos_deployment_target().map_or((13, 0), |(major, minor)| {
debug!("Found macOS deployment target: {}.{}", major, minor);
(major, minor)
});
Platform::new(Os::Macos { major, minor }, Arch::X86_64)
}
Self::Aarch64UnknownLinuxGnu => Platform::new(
Os::Manylinux {
major: 2,
minor: 28,
},
Arch::Aarch64,
),
Self::Riscv64UnknownLinuxGnu => Platform::new(
Os::Manylinux {
major: 2,
minor: 39,
},
Arch::Riscv64,
),
Self::Aarch64UnknownLinuxMusl => {
Platform::new(Os::Musllinux { major: 1, minor: 2 }, Arch::Aarch64)
}
Self::X8664UnknownLinuxMusl => {
Platform::new(Os::Musllinux { major: 1, minor: 2 }, Arch::X86_64)
}
Self::X8664Manylinux2014 => Platform::new(
Os::Manylinux {
major: 2,
minor: 17,
},
Arch::X86_64,
),
Self::X8664Manylinux217 => Platform::new(
Os::Manylinux {
major: 2,
minor: 17,
},
Arch::X86_64,
),
Self::X8664Manylinux228 => Platform::new(
Os::Manylinux {
major: 2,
minor: 28,
},
Arch::X86_64,
),
Self::X8664Manylinux231 => Platform::new(
Os::Manylinux {
major: 2,
minor: 31,
},
Arch::X86_64,
),
Self::X8664Manylinux232 => Platform::new(
Os::Manylinux {
major: 2,
minor: 32,
},
Arch::X86_64,
),
Self::X8664Manylinux233 => Platform::new(
Os::Manylinux {
major: 2,
minor: 33,
},
Arch::X86_64,
),
Self::X8664Manylinux234 => Platform::new(
Os::Manylinux {
major: 2,
minor: 34,
},
Arch::X86_64,
),
Self::X8664Manylinux235 => Platform::new(
Os::Manylinux {
major: 2,
minor: 35,
},
Arch::X86_64,
),
Self::X8664Manylinux236 => Platform::new(
Os::Manylinux {
major: 2,
minor: 36,
},
Arch::X86_64,
),
Self::X8664Manylinux237 => Platform::new(
Os::Manylinux {
major: 2,
minor: 37,
},
Arch::X86_64,
),
Self::X8664Manylinux238 => Platform::new(
Os::Manylinux {
major: 2,
minor: 38,
},
Arch::X86_64,
),
Self::X8664Manylinux239 => Platform::new(
Os::Manylinux {
major: 2,
minor: 39,
},
Arch::X86_64,
),
Self::X8664Manylinux240 => Platform::new(
Os::Manylinux {
major: 2,
minor: 40,
},
Arch::X86_64,
),
Self::Aarch64Manylinux2014 => Platform::new(
Os::Manylinux {
major: 2,
minor: 17,
},
Arch::Aarch64,
),
Self::Aarch64Manylinux217 => Platform::new(
Os::Manylinux {
major: 2,
minor: 17,
},
Arch::Aarch64,
),
Self::Aarch64Manylinux228 => Platform::new(
Os::Manylinux {
major: 2,
minor: 28,
},
Arch::Aarch64,
),
Self::Aarch64Manylinux231 => Platform::new(
Os::Manylinux {
major: 2,
minor: 31,
},
Arch::Aarch64,
),
Self::Aarch64Manylinux232 => Platform::new(
Os::Manylinux {
major: 2,
minor: 32,
},
Arch::Aarch64,
),
Self::Aarch64Manylinux233 => Platform::new(
Os::Manylinux {
major: 2,
minor: 33,
},
Arch::Aarch64,
),
Self::Aarch64Manylinux234 => Platform::new(
Os::Manylinux {
major: 2,
minor: 34,
},
Arch::Aarch64,
),
Self::Aarch64Manylinux235 => Platform::new(
Os::Manylinux {
major: 2,
minor: 35,
},
Arch::Aarch64,
),
Self::Aarch64Manylinux236 => Platform::new(
Os::Manylinux {
major: 2,
minor: 36,
},
Arch::Aarch64,
),
Self::Aarch64Manylinux237 => Platform::new(
Os::Manylinux {
major: 2,
minor: 37,
},
Arch::Aarch64,
),
Self::Aarch64Manylinux238 => Platform::new(
Os::Manylinux {
major: 2,
minor: 38,
},
Arch::Aarch64,
),
Self::Aarch64Manylinux239 => Platform::new(
Os::Manylinux {
major: 2,
minor: 39,
},
Arch::Aarch64,
),
Self::Aarch64Manylinux240 => Platform::new(
Os::Manylinux {
major: 2,
minor: 40,
},
Arch::Aarch64,
),
Self::Wasm32Pyodide2024 => Platform::new(
Os::Pyodide {
major: 2024,
minor: 0,
},
Arch::Wasm32,
),
Self::Aarch64LinuxAndroid => {
let api_level = android_api_level().map_or(24, |api_level| {
debug!("Found Android API level: {}", api_level);
api_level
});
Platform::new(Os::Android { api_level }, Arch::Aarch64)
}
Self::X8664LinuxAndroid => {
let api_level = android_api_level().map_or(24, |api_level| {
debug!("Found Android API level: {}", api_level);
api_level
});
Platform::new(Os::Android { api_level }, Arch::X86_64)
}
Self::Arm64Ios => {
let (major, minor) = ios_deployment_target().map_or((13, 0), |(major, minor)| {
debug!("Found iOS deployment target: {}.{}", major, minor);
(major, minor)
});
Platform::new(
Os::Ios {
major,
minor,
simulator: false,
},
Arch::Aarch64,
)
}
Self::Arm64IosSimulator => {
let (major, minor) = ios_deployment_target().map_or((13, 0), |(major, minor)| {
debug!("Found iOS deployment target: {}.{}", major, minor);
(major, minor)
});
Platform::new(
Os::Ios {
major,
minor,
simulator: true,
},
Arch::Aarch64,
)
}
Self::X8664IosSimulator => {
let (major, minor) = ios_deployment_target().map_or((13, 0), |(major, minor)| {
debug!("Found iOS deployment target: {}.{}", major, minor);
(major, minor)
});
Platform::new(
Os::Ios {
major,
minor,
simulator: true,
},
Arch::X86_64,
)
}
}
}
/// Return the `platform_machine` value for the target.
pub fn platform_machine(self) -> &'static str {
match self {
Self::Windows | Self::X8664PcWindowsMsvc => "x86_64",
Self::Aarch64PcWindowsMsvc => "ARM64",
Self::Linux | Self::X8664UnknownLinuxGnu => "x86_64",
Self::Macos | Self::Aarch64AppleDarwin => "arm64",
Self::I686PcWindowsMsvc => "x86",
Self::X8664AppleDarwin => "x86_64",
Self::Aarch64UnknownLinuxGnu => "aarch64",
Self::Aarch64UnknownLinuxMusl => "aarch64",
Self::X8664UnknownLinuxMusl => "x86_64",
Self::Riscv64UnknownLinuxGnu => "riscv64",
Self::X8664Manylinux2014 => "x86_64",
Self::X8664Manylinux217 => "x86_64",
Self::X8664Manylinux228 => "x86_64",
Self::X8664Manylinux231 => "x86_64",
Self::X8664Manylinux232 => "x86_64",
Self::X8664Manylinux233 => "x86_64",
Self::X8664Manylinux234 => "x86_64",
Self::X8664Manylinux235 => "x86_64",
Self::X8664Manylinux236 => "x86_64",
Self::X8664Manylinux237 => "x86_64",
Self::X8664Manylinux238 => "x86_64",
Self::X8664Manylinux239 => "x86_64",
Self::X8664Manylinux240 => "x86_64",
Self::Aarch64Manylinux2014 => "aarch64",
Self::Aarch64Manylinux217 => "aarch64",
Self::Aarch64Manylinux228 => "aarch64",
Self::Aarch64Manylinux231 => "aarch64",
Self::Aarch64Manylinux232 => "aarch64",
Self::Aarch64Manylinux233 => "aarch64",
Self::Aarch64Manylinux234 => "aarch64",
Self::Aarch64Manylinux235 => "aarch64",
Self::Aarch64Manylinux236 => "aarch64",
Self::Aarch64Manylinux237 => "aarch64",
Self::Aarch64Manylinux238 => "aarch64",
Self::Aarch64Manylinux239 => "aarch64",
Self::Aarch64Manylinux240 => "aarch64",
Self::Aarch64LinuxAndroid => "aarch64",
Self::X8664LinuxAndroid => "x86_64",
Self::Wasm32Pyodide2024 => "wasm32",
Self::Arm64Ios => "arm64",
Self::Arm64IosSimulator => "arm64",
Self::X8664IosSimulator => "x86_64",
}
}
/// Return the `platform_system` value for the target.
pub fn platform_system(self) -> &'static str {
match self {
Self::Windows | Self::X8664PcWindowsMsvc => "Windows",
Self::Aarch64PcWindowsMsvc => "Windows",
Self::Linux | Self::X8664UnknownLinuxGnu => "Linux",
Self::Macos | Self::Aarch64AppleDarwin => "Darwin",
Self::I686PcWindowsMsvc => "Windows",
Self::X8664AppleDarwin => "Darwin",
Self::Aarch64UnknownLinuxGnu => "Linux",
Self::Aarch64UnknownLinuxMusl => "Linux",
Self::X8664UnknownLinuxMusl => "Linux",
Self::Riscv64UnknownLinuxGnu => "Linux",
Self::X8664Manylinux2014 => "Linux",
Self::X8664Manylinux217 => "Linux",
Self::X8664Manylinux228 => "Linux",
Self::X8664Manylinux231 => "Linux",
Self::X8664Manylinux232 => "Linux",
Self::X8664Manylinux233 => "Linux",
Self::X8664Manylinux234 => "Linux",
Self::X8664Manylinux235 => "Linux",
Self::X8664Manylinux236 => "Linux",
Self::X8664Manylinux237 => "Linux",
Self::X8664Manylinux238 => "Linux",
Self::X8664Manylinux239 => "Linux",
Self::X8664Manylinux240 => "Linux",
Self::Aarch64Manylinux2014 => "Linux",
Self::Aarch64Manylinux217 => "Linux",
Self::Aarch64Manylinux228 => "Linux",
Self::Aarch64Manylinux231 => "Linux",
Self::Aarch64Manylinux232 => "Linux",
Self::Aarch64Manylinux233 => "Linux",
Self::Aarch64Manylinux234 => "Linux",
Self::Aarch64Manylinux235 => "Linux",
Self::Aarch64Manylinux236 => "Linux",
Self::Aarch64Manylinux237 => "Linux",
Self::Aarch64Manylinux238 => "Linux",
Self::Aarch64Manylinux239 => "Linux",
Self::Aarch64Manylinux240 => "Linux",
Self::Aarch64LinuxAndroid => "Android",
Self::X8664LinuxAndroid => "Android",
Self::Wasm32Pyodide2024 => "Emscripten",
Self::Arm64Ios => "iOS",
Self::Arm64IosSimulator => "iOS",
Self::X8664IosSimulator => "iOS",
}
}
/// Return the `platform_version` value for the target.
pub fn platform_version(self) -> &'static str {
match self {
Self::Windows | Self::X8664PcWindowsMsvc => "",
Self::Aarch64PcWindowsMsvc => "",
Self::Linux | Self::X8664UnknownLinuxGnu => "",
Self::Macos | Self::Aarch64AppleDarwin => "",
Self::I686PcWindowsMsvc => "",
Self::X8664AppleDarwin => "",
Self::Aarch64UnknownLinuxGnu => "",
Self::Aarch64UnknownLinuxMusl => "",
Self::X8664UnknownLinuxMusl => "",
Self::Riscv64UnknownLinuxGnu => "",
Self::X8664Manylinux2014 => "",
Self::X8664Manylinux217 => "",
Self::X8664Manylinux228 => "",
Self::X8664Manylinux231 => "",
Self::X8664Manylinux232 => "",
Self::X8664Manylinux233 => "",
Self::X8664Manylinux234 => "",
Self::X8664Manylinux235 => "",
Self::X8664Manylinux236 => "",
Self::X8664Manylinux237 => "",
Self::X8664Manylinux238 => "",
Self::X8664Manylinux239 => "",
Self::X8664Manylinux240 => "",
Self::Aarch64Manylinux2014 => "",
Self::Aarch64Manylinux217 => "",
Self::Aarch64Manylinux228 => "",
Self::Aarch64Manylinux231 => "",
Self::Aarch64Manylinux232 => "",
Self::Aarch64Manylinux233 => "",
Self::Aarch64Manylinux234 => "",
Self::Aarch64Manylinux235 => "",
Self::Aarch64Manylinux236 => "",
Self::Aarch64Manylinux237 => "",
Self::Aarch64Manylinux238 => "",
Self::Aarch64Manylinux239 => "",
Self::Aarch64Manylinux240 => "",
Self::Aarch64LinuxAndroid => "",
Self::X8664LinuxAndroid => "",
// This is the value Emscripten gives for its version:
// https://github.com/emscripten-core/emscripten/blob/4.0.8/system/lib/libc/emscripten_syscall_stubs.c#L63
// It doesn't really seem to mean anything? But for completeness we include it here.
Self::Wasm32Pyodide2024 => "#1",
Self::Arm64Ios => "",
Self::Arm64IosSimulator => "",
Self::X8664IosSimulator => "",
}
}
/// Return the `platform_release` value for the target.
pub fn platform_release(self) -> &'static str {
match self {
Self::Windows | Self::X8664PcWindowsMsvc => "",
Self::Aarch64PcWindowsMsvc => "",
Self::Linux | Self::X8664UnknownLinuxGnu => "",
Self::Macos | Self::Aarch64AppleDarwin => "",
Self::I686PcWindowsMsvc => "",
Self::X8664AppleDarwin => "",
Self::Aarch64UnknownLinuxGnu => "",
Self::Aarch64UnknownLinuxMusl => "",
Self::X8664UnknownLinuxMusl => "",
Self::Riscv64UnknownLinuxGnu => "",
Self::X8664Manylinux2014 => "",
Self::X8664Manylinux217 => "",
Self::X8664Manylinux228 => "",
Self::X8664Manylinux231 => "",
Self::X8664Manylinux232 => "",
Self::X8664Manylinux233 => "",
Self::X8664Manylinux234 => "",
Self::X8664Manylinux235 => "",
Self::X8664Manylinux236 => "",
Self::X8664Manylinux237 => "",
Self::X8664Manylinux238 => "",
Self::X8664Manylinux239 => "",
Self::X8664Manylinux240 => "",
Self::Aarch64Manylinux2014 => "",
Self::Aarch64Manylinux217 => "",
Self::Aarch64Manylinux228 => "",
Self::Aarch64Manylinux231 => "",
Self::Aarch64Manylinux232 => "",
Self::Aarch64Manylinux233 => "",
Self::Aarch64Manylinux234 => "",
Self::Aarch64Manylinux235 => "",
Self::Aarch64Manylinux236 => "",
Self::Aarch64Manylinux237 => "",
Self::Aarch64Manylinux238 => "",
Self::Aarch64Manylinux239 => "",
Self::Aarch64Manylinux240 => "",
Self::Aarch64LinuxAndroid => "",
Self::X8664LinuxAndroid => "",
// This is the Emscripten compiler version for Pyodide 2024.
// See https://pyodide.org/en/stable/development/abi.html#pyodide-2024-0
Self::Wasm32Pyodide2024 => "3.1.58",
Self::Arm64Ios => "",
Self::Arm64IosSimulator => "",
Self::X8664IosSimulator => "",
}
}
/// Return the `os_name` value for the target.
pub fn os_name(self) -> &'static str {
match self {
Self::Windows | Self::X8664PcWindowsMsvc => "nt",
Self::Aarch64PcWindowsMsvc => "nt",
Self::Linux | Self::X8664UnknownLinuxGnu => "posix",
Self::Macos | Self::Aarch64AppleDarwin => "posix",
Self::I686PcWindowsMsvc => "nt",
Self::X8664AppleDarwin => "posix",
Self::Aarch64UnknownLinuxGnu => "posix",
Self::Aarch64UnknownLinuxMusl => "posix",
Self::X8664UnknownLinuxMusl => "posix",
Self::Riscv64UnknownLinuxGnu => "posix",
Self::X8664Manylinux2014 => "posix",
Self::X8664Manylinux217 => "posix",
Self::X8664Manylinux228 => "posix",
Self::X8664Manylinux231 => "posix",
Self::X8664Manylinux232 => "posix",
Self::X8664Manylinux233 => "posix",
Self::X8664Manylinux234 => "posix",
Self::X8664Manylinux235 => "posix",
Self::X8664Manylinux236 => "posix",
Self::X8664Manylinux237 => "posix",
Self::X8664Manylinux238 => "posix",
Self::X8664Manylinux239 => "posix",
Self::X8664Manylinux240 => "posix",
Self::Aarch64Manylinux2014 => "posix",
Self::Aarch64Manylinux217 => "posix",
Self::Aarch64Manylinux228 => "posix",
Self::Aarch64Manylinux231 => "posix",
Self::Aarch64Manylinux232 => "posix",
Self::Aarch64Manylinux233 => "posix",
Self::Aarch64Manylinux234 => "posix",
Self::Aarch64Manylinux235 => "posix",
Self::Aarch64Manylinux236 => "posix",
Self::Aarch64Manylinux237 => "posix",
Self::Aarch64Manylinux238 => "posix",
Self::Aarch64Manylinux239 => "posix",
Self::Aarch64Manylinux240 => "posix",
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | true |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/package_options.rs | crates/uv-configuration/src/package_options.rs | use std::path::Path;
use either::Either;
use rustc_hash::FxHashMap;
use uv_cache::Refresh;
use uv_cache_info::Timestamp;
use uv_distribution_types::Requirement;
use uv_normalize::PackageName;
/// Whether to reinstall packages.
#[derive(Debug, Default, Clone, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
pub enum Reinstall {
/// Don't reinstall any packages; respect the existing installation.
#[default]
None,
/// Reinstall all packages in the plan.
All,
/// Reinstall only the specified packages.
Packages(Vec<PackageName>, Vec<Box<Path>>),
}
impl Reinstall {
/// Determine the reinstall strategy to use.
pub fn from_args(reinstall: Option<bool>, reinstall_package: Vec<PackageName>) -> Option<Self> {
match reinstall {
Some(true) => Some(Self::All),
Some(false) => Some(Self::None),
None if reinstall_package.is_empty() => None,
None => Some(Self::Packages(reinstall_package, Vec::new())),
}
}
/// Returns `true` if no packages should be reinstalled.
pub fn is_none(&self) -> bool {
matches!(self, Self::None)
}
/// Returns `true` if all packages should be reinstalled.
pub fn is_all(&self) -> bool {
matches!(self, Self::All)
}
/// Returns `true` if the specified package should be reinstalled.
pub fn contains_package(&self, package_name: &PackageName) -> bool {
match self {
Self::None => false,
Self::All => true,
Self::Packages(packages, ..) => packages.contains(package_name),
}
}
/// Returns `true` if the specified path should be reinstalled.
pub fn contains_path(&self, path: &Path) -> bool {
match self {
Self::None => false,
Self::All => true,
Self::Packages(.., paths) => paths
.iter()
.any(|target| same_file::is_same_file(path, target).unwrap_or(false)),
}
}
/// Combine a set of [`Reinstall`] values.
#[must_use]
pub fn combine(self, other: Self) -> Self {
match self {
// Setting `--reinstall` or `--no-reinstall` should clear previous `--reinstall-package` selections.
Self::All | Self::None => self,
Self::Packages(self_packages, self_paths) => match other {
// If `--reinstall` was enabled previously, `--reinstall-package` is subsumed by reinstalling all packages.
Self::All => other,
// If `--no-reinstall` was enabled previously, then `--reinstall-package` enables an explicit reinstall of those packages.
Self::None => Self::Packages(self_packages, self_paths),
// If `--reinstall-package` was included twice, combine the requirements.
Self::Packages(other_packages, other_paths) => {
let mut combined_packages = self_packages;
combined_packages.extend(other_packages);
let mut combined_paths = self_paths;
combined_paths.extend(other_paths);
Self::Packages(combined_packages, combined_paths)
}
},
}
}
/// Add a [`Box<Path>`] to the [`Reinstall`] policy.
#[must_use]
pub fn with_path(self, path: Box<Path>) -> Self {
match self {
Self::None => Self::Packages(vec![], vec![path]),
Self::All => Self::All,
Self::Packages(packages, mut paths) => {
paths.push(path);
Self::Packages(packages, paths)
}
}
}
/// Add a [`Package`] to the [`Reinstall`] policy.
#[must_use]
pub fn with_package(self, package_name: PackageName) -> Self {
match self {
Self::None => Self::Packages(vec![package_name], vec![]),
Self::All => Self::All,
Self::Packages(mut packages, paths) => {
packages.push(package_name);
Self::Packages(packages, paths)
}
}
}
/// Create a [`Reinstall`] strategy to reinstall a single package.
pub fn package(package_name: PackageName) -> Self {
Self::Packages(vec![package_name], vec![])
}
}
/// Create a [`Refresh`] policy by integrating the [`Reinstall`] policy.
impl From<Reinstall> for Refresh {
fn from(value: Reinstall) -> Self {
match value {
Reinstall::None => Self::None(Timestamp::now()),
Reinstall::All => Self::All(Timestamp::now()),
Reinstall::Packages(packages, paths) => {
Self::Packages(packages, paths, Timestamp::now())
}
}
}
}
/// Whether to allow package upgrades.
#[derive(Debug, Default, Clone)]
pub enum Upgrade {
/// Prefer pinned versions from the existing lockfile, if possible.
#[default]
None,
/// Allow package upgrades for all packages, ignoring the existing lockfile.
All,
/// Allow package upgrades, but only for the specified packages.
Packages(FxHashMap<PackageName, Vec<Requirement>>),
}
impl Upgrade {
/// Determine the upgrade selection strategy from the command-line arguments.
pub fn from_args(upgrade: Option<bool>, upgrade_package: Vec<Requirement>) -> Option<Self> {
match upgrade {
Some(true) => Some(Self::All),
// TODO(charlie): `--no-upgrade` with `--upgrade-package` should allow the specified
// packages to be upgraded. Right now, `--upgrade-package` is silently ignored.
Some(false) => Some(Self::None),
None if upgrade_package.is_empty() => None,
None => Some(Self::Packages(upgrade_package.into_iter().fold(
FxHashMap::default(),
|mut map, requirement| {
map.entry(requirement.name.clone())
.or_default()
.push(requirement);
map
},
))),
}
}
/// Create an [`Upgrade`] strategy to upgrade a single package.
pub fn package(package_name: PackageName) -> Self {
Self::Packages({
let mut map = FxHashMap::default();
map.insert(package_name, vec![]);
map
})
}
/// Returns `true` if no packages should be upgraded.
pub fn is_none(&self) -> bool {
matches!(self, Self::None)
}
/// Returns `true` if all packages should be upgraded.
pub fn is_all(&self) -> bool {
matches!(self, Self::All)
}
/// Returns `true` if the specified package should be upgraded.
pub fn contains(&self, package_name: &PackageName) -> bool {
match self {
Self::None => false,
Self::All => true,
Self::Packages(packages) => packages.contains_key(package_name),
}
}
/// Returns an iterator over the constraints.
///
/// When upgrading, users can provide bounds on the upgrade (e.g., `--upgrade-package flask<3`).
pub fn constraints(&self) -> impl Iterator<Item = &Requirement> {
if let Self::Packages(packages) = self {
Either::Right(
packages
.values()
.flat_map(|requirements| requirements.iter()),
)
} else {
Either::Left(std::iter::empty())
}
}
/// Combine a set of [`Upgrade`] values.
#[must_use]
pub fn combine(self, other: Self) -> Self {
match self {
// Setting `--upgrade` or `--no-upgrade` should clear previous `--upgrade-package` selections.
Self::All | Self::None => self,
Self::Packages(self_packages) => match other {
// If `--upgrade` was enabled previously, `--upgrade-package` is subsumed by upgrading all packages.
Self::All => other,
// If `--no-upgrade` was enabled previously, then `--upgrade-package` enables an explicit upgrade of those packages.
Self::None => Self::Packages(self_packages),
// If `--upgrade-package` was included twice, combine the requirements.
Self::Packages(other_packages) => {
let mut combined = self_packages;
for (package, requirements) in other_packages {
combined.entry(package).or_default().extend(requirements);
}
Self::Packages(combined)
}
},
}
}
}
/// Create a [`Refresh`] policy by integrating the [`Upgrade`] policy.
impl From<Upgrade> for Refresh {
fn from(value: Upgrade) -> Self {
match value {
Upgrade::None => Self::None(Timestamp::now()),
Upgrade::All => Self::All(Timestamp::now()),
Upgrade::Packages(packages) => Self::Packages(
packages.into_keys().collect::<Vec<_>>(),
Vec::new(),
Timestamp::now(),
),
}
}
}
/// Whether to isolate builds.
#[derive(Debug, Default, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum BuildIsolation {
/// Isolate all builds.
#[default]
Isolate,
/// Do not isolate any builds.
Shared,
/// Do not isolate builds for the specified packages.
SharedPackage(Vec<PackageName>),
}
impl BuildIsolation {
/// Determine the build isolation strategy from the command-line arguments.
pub fn from_args(
no_build_isolation: Option<bool>,
no_build_isolation_package: Vec<PackageName>,
) -> Option<Self> {
match no_build_isolation {
Some(true) => Some(Self::Shared),
Some(false) => Some(Self::Isolate),
None if no_build_isolation_package.is_empty() => None,
None => Some(Self::SharedPackage(no_build_isolation_package)),
}
}
/// Combine a set of [`BuildIsolation`] values.
#[must_use]
pub fn combine(self, other: Self) -> Self {
match self {
// Setting `--build-isolation` or `--no-build-isolation` should clear previous `--no-build-isolation-package` selections.
Self::Isolate | Self::Shared => self,
Self::SharedPackage(self_packages) => match other {
// If `--no-build-isolation` was enabled previously, `--no-build-isolation-package` is subsumed by sharing all builds.
Self::Shared => other,
// If `--build-isolation` was enabled previously, then `--no-build-isolation-package` enables specific packages to be shared.
Self::Isolate => Self::SharedPackage(self_packages),
// If `--no-build-isolation-package` was included twice, combine the packages.
Self::SharedPackage(other_packages) => {
let mut combined = self_packages;
combined.extend(other_packages);
Self::SharedPackage(combined)
}
},
}
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/threading.rs | crates/uv-configuration/src/threading.rs | //! Configure rayon and determine thread stack sizes.
use std::sync::LazyLock;
use std::sync::atomic::{AtomicUsize, Ordering};
use uv_static::EnvVars;
/// The default minimum stack size for uv threads.
pub const UV_DEFAULT_STACK_SIZE: usize = 4 * 1024 * 1024;
/// We don't allow setting a smaller stack size than 1MB.
#[allow(clippy::identity_op)]
pub const UV_MIN_STACK_SIZE: usize = 1 * 1024 * 1024;
/// Running out of stack has been an issue for us. We box types and futures in various places
/// to mitigate this.
///
/// Main thread stack-size has a BIG variety here across platforms and it's harder to control
/// (which is why Rust doesn't by default). Notably on macOS and Linux you will typically get 8MB
/// main thread, while on Windows you will typically get 1MB, which is *tiny*:
/// <https://learn.microsoft.com/en-us/cpp/build/reference/stack-stack-allocations?view=msvc-170>
///
/// To normalize this we just spawn a new thread called main2 with a size we can set
/// ourselves. 2MB is typically too small (especially for our debug builds), while 4MB
/// seems fine. This value can be changed with `UV_STACK_SIZE`, with a fallback to reading
/// `RUST_MIN_STACK`, to allow checking a larger or smaller stack size. There is a hardcoded stack
/// size minimum of 1MB, which is the lowest platform default we observed.
///
/// Non-main threads should all have 2MB, as Rust forces platform consistency there,
/// but even then stack overflows can occur in release mode
/// (<https://github.com/astral-sh/uv/issues/12769>), so rayon and tokio get the same stack size,
/// with the 4MB default.
pub fn min_stack_size() -> usize {
let stack_size = if let Some(uv_stack_size) = std::env::var(EnvVars::UV_STACK_SIZE)
.ok()
.and_then(|var| var.parse::<usize>().ok())
{
uv_stack_size
} else if let Some(uv_stack_size) = std::env::var(EnvVars::RUST_MIN_STACK)
.ok()
.and_then(|var| var.parse::<usize>().ok())
{
uv_stack_size
} else {
UV_DEFAULT_STACK_SIZE
};
if stack_size < UV_MIN_STACK_SIZE {
return UV_DEFAULT_STACK_SIZE;
}
stack_size
}
/// The number of threads for the rayon threadpool.
///
/// The default of 0 makes rayon use its default.
pub static RAYON_PARALLELISM: AtomicUsize = AtomicUsize::new(0);
/// Initialize the threadpool lazily. Always call before using rayon the potentially first time.
///
/// The `uv` crate sets [`RAYON_PARALLELISM`] from the user settings, and the extract and install
/// code initialize the threadpool lazily only if they are actually used by calling
/// `LazyLock::force(&RAYON_INITIALIZE)`.
pub static RAYON_INITIALIZE: LazyLock<()> = LazyLock::new(|| {
rayon::ThreadPoolBuilder::new()
.num_threads(RAYON_PARALLELISM.load(Ordering::Relaxed))
.stack_size(min_stack_size())
.build_global()
.expect("failed to initialize global rayon pool");
});
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/env_file.rs | crates/uv-configuration/src/env_file.rs | use std::path::PathBuf;
/// A collection of `.env` file paths.
#[derive(Default, Debug, Clone, PartialEq, Eq)]
pub struct EnvFile(Vec<PathBuf>);
impl EnvFile {
/// Parse the env file paths from command-line arguments.
pub fn from_args(env_file: Vec<String>, no_env_file: bool) -> Self {
if no_env_file {
return Self::default();
}
if env_file.is_empty() {
return Self::default();
}
let mut paths = Vec::new();
// Split on spaces, but respect backslashes.
for env_file in env_file {
let mut current = String::new();
let mut escape = false;
for c in env_file.chars() {
if escape {
current.push(c);
escape = false;
} else if c == '\\' {
escape = true;
} else if c.is_whitespace() {
if !current.is_empty() {
paths.push(PathBuf::from(current));
current = String::new();
}
} else {
current.push(c);
}
}
if !current.is_empty() {
paths.push(PathBuf::from(current));
}
}
Self(paths)
}
/// Iterate over the paths in the env file.
pub fn iter(&self) -> impl DoubleEndedIterator<Item = &PathBuf> {
self.0.iter()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_from_args_default() {
let env_file = EnvFile::from_args(vec![], false);
assert_eq!(env_file, EnvFile::default());
}
#[test]
fn test_from_args_no_env_file() {
let env_file = EnvFile::from_args(vec!["path1 path2".to_string()], true);
assert_eq!(env_file, EnvFile::default());
}
#[test]
fn test_from_args_empty_string() {
let env_file = EnvFile::from_args(vec![String::new()], false);
assert_eq!(env_file, EnvFile::default());
}
#[test]
fn test_from_args_whitespace_only() {
let env_file = EnvFile::from_args(vec![" ".to_string()], false);
assert_eq!(env_file, EnvFile::default());
}
#[test]
fn test_from_args_single_path() {
let env_file = EnvFile::from_args(vec!["path1".to_string()], false);
assert_eq!(env_file.0, vec![PathBuf::from("path1")]);
}
#[test]
fn test_from_args_multiple_paths() {
let env_file = EnvFile::from_args(vec!["path1 path2 path3".to_string()], false);
assert_eq!(
env_file.0,
vec![
PathBuf::from("path1"),
PathBuf::from("path2"),
PathBuf::from("path3")
]
);
}
#[test]
fn test_from_args_escaped_spaces() {
let env_file = EnvFile::from_args(vec![r"path\ with\ spaces".to_string()], false);
assert_eq!(env_file.0, vec![PathBuf::from("path with spaces")]);
}
#[test]
fn test_from_args_mixed_escaped_and_normal() {
let env_file =
EnvFile::from_args(vec![r"path1 path\ with\ spaces path2".to_string()], false);
assert_eq!(
env_file.0,
vec![
PathBuf::from("path1"),
PathBuf::from("path with spaces"),
PathBuf::from("path2")
]
);
}
#[test]
fn test_from_args_escaped_backslash() {
let env_file = EnvFile::from_args(vec![r"path\\with\\backslashes".to_string()], false);
assert_eq!(env_file.0, vec![PathBuf::from(r"path\with\backslashes")]);
}
#[test]
fn test_iter() {
let env_file = EnvFile(vec![PathBuf::from("path1"), PathBuf::from("path2")]);
let paths: Vec<_> = env_file.iter().collect();
assert_eq!(
paths,
vec![&PathBuf::from("path1"), &PathBuf::from("path2")]
);
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/trusted_publishing.rs | crates/uv-configuration/src/trusted_publishing.rs | use serde::{Deserialize, Serialize};
#[derive(Clone, Copy, Debug, Default, Deserialize, PartialEq, Eq, Serialize)]
#[serde(rename_all = "kebab-case")]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum TrustedPublishing {
/// Attempt trusted publishing when we're in a supported environment, continue if that fails.
///
/// Supported environments include GitHub Actions and GitLab CI/CD.
#[default]
Automatic,
// Force trusted publishing.
Always,
// Never try to get a trusted publishing token.
Never,
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/lib.rs | crates/uv-configuration/src/lib.rs | pub use authentication::*;
pub use build_options::*;
pub use concurrency::*;
pub use constraints::*;
pub use dependency_groups::*;
pub use dry_run::*;
pub use editable::*;
pub use env_file::*;
pub use excludes::*;
pub use export_format::*;
pub use extras::*;
pub use hash::*;
pub use install_options::*;
pub use name_specifiers::*;
pub use overrides::*;
pub use package_options::*;
pub use project_build_backend::*;
pub use required_version::*;
pub use sources::*;
pub use target_triple::*;
pub use threading::*;
pub use trusted_host::*;
pub use trusted_publishing::*;
pub use vcs::*;
mod authentication;
mod build_options;
mod concurrency;
mod constraints;
mod dependency_groups;
mod dry_run;
mod editable;
mod env_file;
mod excludes;
mod export_format;
mod extras;
mod hash;
mod install_options;
mod name_specifiers;
mod overrides;
mod package_options;
mod project_build_backend;
mod required_version;
mod sources;
mod target_triple;
mod threading;
mod trusted_host;
mod trusted_publishing;
mod vcs;
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/trusted_host.rs | crates/uv-configuration/src/trusted_host.rs | use serde::{Deserialize, Deserializer};
#[cfg(feature = "schemars")]
use std::borrow::Cow;
use std::str::FromStr;
use url::Url;
/// A host specification (wildcard, or host, with optional scheme and/or port) for which
/// certificates are not verified when making HTTPS requests.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum TrustedHost {
Wildcard,
Host {
scheme: Option<String>,
host: String,
port: Option<u16>,
},
}
impl TrustedHost {
/// Returns `true` if the [`Url`] matches this trusted host.
pub fn matches(&self, url: &Url) -> bool {
match self {
Self::Wildcard => true,
Self::Host { scheme, host, port } => {
if scheme.as_ref().is_some_and(|scheme| scheme != url.scheme()) {
return false;
}
if port.is_some_and(|port| url.port() != Some(port)) {
return false;
}
if Some(host.as_str()) != url.host_str() {
return false;
}
true
}
}
}
}
impl<'de> Deserialize<'de> for TrustedHost {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
struct Inner {
scheme: Option<String>,
host: String,
port: Option<u16>,
}
serde_untagged::UntaggedEnumVisitor::new()
.string(|string| Self::from_str(string).map_err(serde::de::Error::custom))
.map(|map| {
map.deserialize::<Inner>().map(|inner| Self::Host {
scheme: inner.scheme,
host: inner.host,
port: inner.port,
})
})
.deserialize(deserializer)
}
}
impl serde::Serialize for TrustedHost {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
let s = self.to_string();
serializer.serialize_str(&s)
}
}
#[derive(Debug, thiserror::Error)]
pub enum TrustedHostError {
#[error("missing host for `--trusted-host`: `{0}`")]
MissingHost(String),
#[error("invalid port for `--trusted-host`: `{0}`")]
InvalidPort(String),
}
impl FromStr for TrustedHost {
type Err = TrustedHostError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == "*" {
return Ok(Self::Wildcard);
}
// Detect scheme.
let (scheme, s) = if let Some(s) = s.strip_prefix("https://") {
(Some("https".to_string()), s)
} else if let Some(s) = s.strip_prefix("http://") {
(Some("http".to_string()), s)
} else {
(None, s)
};
let mut parts = s.splitn(2, ':');
// Detect host.
let host = parts
.next()
.and_then(|host| host.split('/').next())
.map(ToString::to_string)
.ok_or_else(|| TrustedHostError::MissingHost(s.to_string()))?;
// Detect port.
let port = parts
.next()
.map(str::parse)
.transpose()
.map_err(|_| TrustedHostError::InvalidPort(s.to_string()))?;
Ok(Self::Host { scheme, host, port })
}
}
impl std::fmt::Display for TrustedHost {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::Wildcard => {
write!(f, "*")?;
}
Self::Host { scheme, host, port } => {
if let Some(scheme) = &scheme {
write!(f, "{scheme}://{host}")?;
} else {
write!(f, "{host}")?;
}
if let Some(port) = port {
write!(f, ":{port}")?;
}
}
}
Ok(())
}
}
#[cfg(feature = "schemars")]
impl schemars::JsonSchema for TrustedHost {
fn schema_name() -> Cow<'static, str> {
Cow::Borrowed("TrustedHost")
}
fn json_schema(_generator: &mut schemars::generate::SchemaGenerator) -> schemars::Schema {
schemars::json_schema!({
"type": "string",
"description": "A host or host-port pair."
})
}
}
#[cfg(test)]
mod tests {
#[test]
fn parse() {
assert_eq!(
"*".parse::<super::TrustedHost>().unwrap(),
super::TrustedHost::Wildcard
);
assert_eq!(
"example.com".parse::<super::TrustedHost>().unwrap(),
super::TrustedHost::Host {
scheme: None,
host: "example.com".to_string(),
port: None
}
);
assert_eq!(
"example.com:8080".parse::<super::TrustedHost>().unwrap(),
super::TrustedHost::Host {
scheme: None,
host: "example.com".to_string(),
port: Some(8080)
}
);
assert_eq!(
"https://example.com".parse::<super::TrustedHost>().unwrap(),
super::TrustedHost::Host {
scheme: Some("https".to_string()),
host: "example.com".to_string(),
port: None
}
);
assert_eq!(
"https://example.com/hello/world"
.parse::<super::TrustedHost>()
.unwrap(),
super::TrustedHost::Host {
scheme: Some("https".to_string()),
host: "example.com".to_string(),
port: None
}
);
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/install_options.rs | crates/uv-configuration/src/install_options.rs | use std::collections::BTreeSet;
use tracing::debug;
use uv_normalize::PackageName;
/// Minimal view of a package used to apply install filters.
#[derive(Debug, Clone, Copy)]
pub struct InstallTarget<'a> {
/// The package name.
pub name: &'a PackageName,
/// Whether the package refers to a local source (path, directory, editable, etc.).
pub is_local: bool,
}
#[derive(Debug, Clone, Default)]
pub struct InstallOptions {
/// Omit the project itself from the resolution.
pub no_install_project: bool,
/// Include only the project itself in the resolution.
pub only_install_project: bool,
/// Omit all workspace members (including the project itself) from the resolution.
pub no_install_workspace: bool,
/// Include only workspace members (including the project itself) in the resolution.
pub only_install_workspace: bool,
/// Omit all local packages from the resolution.
pub no_install_local: bool,
/// Include only local packages in the resolution.
pub only_install_local: bool,
/// Omit the specified packages from the resolution.
pub no_install_package: Vec<PackageName>,
/// Include only the specified packages in the resolution.
pub only_install_package: Vec<PackageName>,
}
impl InstallOptions {
#[allow(clippy::fn_params_excessive_bools)]
pub fn new(
no_install_project: bool,
only_install_project: bool,
no_install_workspace: bool,
only_install_workspace: bool,
no_install_local: bool,
only_install_local: bool,
no_install_package: Vec<PackageName>,
only_install_package: Vec<PackageName>,
) -> Self {
Self {
no_install_project,
only_install_project,
no_install_workspace,
only_install_workspace,
no_install_local,
only_install_local,
no_install_package,
only_install_package,
}
}
/// Returns `true` if a package passes the install filters.
pub fn include_package(
&self,
target: InstallTarget<'_>,
project_name: Option<&PackageName>,
members: &BTreeSet<PackageName>,
) -> bool {
let package_name = target.name;
// If `--only-install-package` is set, only include specified packages.
if !self.only_install_package.is_empty() {
if self.only_install_package.contains(package_name) {
return true;
}
debug!("Omitting `{package_name}` from resolution due to `--only-install-package`");
return false;
}
// If `--only-install-local` is set, only include local packages.
if self.only_install_local {
if target.is_local {
return true;
}
debug!("Omitting `{package_name}` from resolution due to `--only-install-local`");
return false;
}
// If `--only-install-workspace` is set, only include the project and workspace members.
if self.only_install_workspace {
// Check if it's the project itself
if let Some(project_name) = project_name {
if package_name == project_name {
return true;
}
}
// Check if it's a workspace member
if members.contains(package_name) {
return true;
}
// Otherwise, exclude it
debug!("Omitting `{package_name}` from resolution due to `--only-install-workspace`");
return false;
}
// If `--only-install-project` is set, only include the project itself.
if self.only_install_project {
if let Some(project_name) = project_name {
if package_name == project_name {
return true;
}
}
debug!("Omitting `{package_name}` from resolution due to `--only-install-project`");
return false;
}
// If `--no-install-project` is set, remove the project itself.
if self.no_install_project {
if let Some(project_name) = project_name {
if package_name == project_name {
debug!(
"Omitting `{package_name}` from resolution due to `--no-install-project`"
);
return false;
}
}
}
// If `--no-install-workspace` is set, remove the project and any workspace members.
if self.no_install_workspace {
// In some cases, the project root might be omitted from the list of workspace members
// encoded in the lockfile. (But we already checked this above if `--no-install-project`
// is set.)
if !self.no_install_project {
if let Some(project_name) = project_name {
if package_name == project_name {
debug!(
"Omitting `{package_name}` from resolution due to `--no-install-workspace`"
);
return false;
}
}
}
if members.contains(package_name) {
debug!("Omitting `{package_name}` from resolution due to `--no-install-workspace`");
return false;
}
}
// If `--no-install-local` is set, remove local packages.
if self.no_install_local {
if target.is_local {
debug!("Omitting `{package_name}` from resolution due to `--no-install-local`");
return false;
}
}
// If `--no-install-package` is provided, remove the requested packages.
if self.no_install_package.contains(package_name) {
debug!("Omitting `{package_name}` from resolution due to `--no-install-package`");
return false;
}
true
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/name_specifiers.rs | crates/uv-configuration/src/name_specifiers.rs | #[cfg(feature = "schemars")]
use std::borrow::Cow;
use std::str::FromStr;
use uv_normalize::PackageName;
/// A specifier used for (e.g.) pip's `--no-binary` flag.
///
/// This is a superset of the package name format, allowing for special values `:all:` and `:none:`.
#[derive(Debug, Clone)]
pub enum PackageNameSpecifier {
All,
None,
Package(PackageName),
}
impl FromStr for PackageNameSpecifier {
type Err = uv_normalize::InvalidNameError;
fn from_str(name: &str) -> Result<Self, Self::Err> {
match name {
":all:" => Ok(Self::All),
":none:" => Ok(Self::None),
_ => Ok(Self::Package(PackageName::from_str(name)?)),
}
}
}
impl<'de> serde::Deserialize<'de> for PackageNameSpecifier {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
struct Visitor;
impl serde::de::Visitor<'_> for Visitor {
type Value = PackageNameSpecifier;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a package name or `:all:` or `:none:`")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
// Accept the special values `:all:` and `:none:`.
match value {
":all:" => Ok(PackageNameSpecifier::All),
":none:" => Ok(PackageNameSpecifier::None),
_ => {
// Otherwise, parse the value as a package name.
match PackageName::from_str(value) {
Ok(name) => Ok(PackageNameSpecifier::Package(name)),
Err(err) => Err(E::custom(err)),
}
}
}
}
}
deserializer.deserialize_str(Visitor)
}
}
#[cfg(feature = "schemars")]
impl schemars::JsonSchema for PackageNameSpecifier {
fn schema_name() -> Cow<'static, str> {
Cow::Borrowed("PackageNameSpecifier")
}
fn json_schema(_gen: &mut schemars::generate::SchemaGenerator) -> schemars::Schema {
schemars::json_schema!({
"type": "string",
"pattern": r"^(:none:|:all:|([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9._-]*[a-zA-Z0-9]))$",
"description": "The name of a package, or `:all:` or `:none:` to select or omit all packages, respectively.",
})
}
}
/// A repeated specifier used for (e.g.) pip's `--no-binary` flag.
///
/// This is a superset of the package name format, allowing for special values `:all:` and `:none:`.
#[derive(Debug, Clone)]
pub enum PackageNameSpecifiers {
All,
None,
Packages(Vec<PackageName>),
}
impl PackageNameSpecifiers {
pub(crate) fn from_iter(specifiers: impl Iterator<Item = PackageNameSpecifier>) -> Self {
let mut packages = Vec::new();
let mut all: bool = false;
for specifier in specifiers {
match specifier {
PackageNameSpecifier::None => {
packages.clear();
all = false;
}
PackageNameSpecifier::All => {
all = true;
}
PackageNameSpecifier::Package(name) => {
packages.push(name);
}
}
}
if all {
Self::All
} else if packages.is_empty() {
Self::None
} else {
Self::Packages(packages)
}
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/extras.rs | crates/uv-configuration/src/extras.rs | use std::{borrow::Cow, sync::Arc};
use uv_normalize::{DefaultExtras, ExtraName};
/// Manager of all extra decisions and settings history.
///
/// This is an Arc mostly just to avoid size bloat on things that contain these.
#[derive(Debug, Default, Clone)]
pub struct ExtrasSpecification(Arc<ExtrasSpecificationInner>);
/// Manager of all dependency-group decisions and settings history.
#[derive(Debug, Default, Clone)]
pub struct ExtrasSpecificationInner {
/// Extras to include.
include: IncludeExtras,
/// Extras to exclude (always wins over include).
exclude: Vec<ExtraName>,
/// Whether an `--only` flag was passed.
///
/// If true, users of this API should refrain from looking at packages
/// that *aren't* specified by the extras. This is exposed
/// via [`ExtrasSpecificationInner::prod`][].
only_extras: bool,
/// The "raw" flags/settings we were passed for diagnostics.
history: ExtrasSpecificationHistory,
}
impl ExtrasSpecification {
/// Create from history.
///
/// This is the "real" constructor, it's basically taking raw CLI flags but in
/// a way that's a bit nicer for other constructors to use.
fn from_history(history: ExtrasSpecificationHistory) -> Self {
let ExtrasSpecificationHistory {
mut extra,
mut only_extra,
no_extra,
all_extras,
no_default_extras,
mut defaults,
} = history.clone();
// `extra` and `only_extra` actually have the same meanings: packages to include.
// But if `only_extra` is non-empty then *other* packages should be excluded.
// So we just record whether it was and then treat the two lists as equivalent.
let only_extras = !only_extra.is_empty();
// --only flags imply --no-default-extras
let default_extras = !no_default_extras && !only_extras;
let include = if all_extras {
// If this is set we can ignore extra/only_extra/defaults as irrelevant.
IncludeExtras::All
} else {
// Merge all these lists, they're equivalent now
extra.append(&mut only_extra);
// Resolve default extras potentially also setting All
if default_extras {
match &mut defaults {
DefaultExtras::All => IncludeExtras::All,
DefaultExtras::List(defaults) => {
extra.append(defaults);
IncludeExtras::Some(extra)
}
}
} else {
IncludeExtras::Some(extra)
}
};
Self(Arc::new(ExtrasSpecificationInner {
include,
exclude: no_extra,
only_extras,
history,
}))
}
/// Create from raw CLI args
#[allow(clippy::fn_params_excessive_bools)]
pub fn from_args(
extra: Vec<ExtraName>,
no_extra: Vec<ExtraName>,
no_default_extras: bool,
only_extra: Vec<ExtraName>,
all_extras: bool,
) -> Self {
Self::from_history(ExtrasSpecificationHistory {
extra,
only_extra,
no_extra,
all_extras,
no_default_extras,
// This is unknown at CLI-time, use `.with_defaults(...)` to apply this later!
defaults: DefaultExtras::default(),
})
}
/// Helper to make a spec from just a --extra
pub fn from_extra(extra: Vec<ExtraName>) -> Self {
Self::from_history(ExtrasSpecificationHistory {
extra,
..Default::default()
})
}
/// Helper to make a spec from just --all-extras
pub fn from_all_extras() -> Self {
Self::from_history(ExtrasSpecificationHistory {
all_extras: true,
..Default::default()
})
}
/// Apply defaults to a base [`ExtrasSpecification`].
pub fn with_defaults(&self, defaults: DefaultExtras) -> ExtrasSpecificationWithDefaults {
// Explicitly clone the inner history and set the defaults, then remake the result.
let mut history = self.0.history.clone();
history.defaults = defaults;
ExtrasSpecificationWithDefaults {
cur: Self::from_history(history),
prev: self.clone(),
}
}
}
impl std::ops::Deref for ExtrasSpecification {
type Target = ExtrasSpecificationInner;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl ExtrasSpecificationInner {
/// Returns `true` if packages other than the ones referenced by these
/// extras should be considered.
///
/// That is, if I tell you to install a project and this is false,
/// you should ignore the project itself and all its dependencies,
/// and instead just install the extras.
///
/// (This is really just asking if an --only flag was passed.)
pub fn prod(&self) -> bool {
!self.only_extras
}
/// Returns `true` if the specification includes the given extra.
pub fn contains(&self, extra: &ExtraName) -> bool {
// exclude always trumps include
!self.exclude.contains(extra) && self.include.contains(extra)
}
/// Iterate over all extras that we think should exist.
pub fn desugarred_names(&self) -> impl Iterator<Item = &ExtraName> {
self.include.names().chain(&self.exclude)
}
/// Returns an iterator over all extras that are included in the specification,
/// assuming `all_names` is an iterator over all extras.
pub fn extra_names<'a, Names>(
&'a self,
all_names: Names,
) -> impl Iterator<Item = &'a ExtraName> + 'a
where
Names: Iterator<Item = &'a ExtraName> + 'a,
{
all_names.filter(move |name| self.contains(name))
}
/// Iterate over all groups the user explicitly asked for on the CLI
pub fn explicit_names(&self) -> impl Iterator<Item = &ExtraName> {
let ExtrasSpecificationHistory {
extra,
only_extra,
no_extra,
// These reference no extras explicitly
all_extras: _,
no_default_extras: _,
defaults: _,
} = self.history();
extra.iter().chain(no_extra).chain(only_extra)
}
/// Returns `true` if the specification will have no effect.
pub fn is_empty(&self) -> bool {
self.prod() && self.exclude.is_empty() && self.include.is_empty()
}
/// Get the raw history for diagnostics
pub fn history(&self) -> &ExtrasSpecificationHistory {
&self.history
}
}
/// Context about a [`ExtrasSpecification`][] that we've preserved for diagnostics
#[derive(Debug, Default, Clone)]
pub struct ExtrasSpecificationHistory {
pub extra: Vec<ExtraName>,
pub only_extra: Vec<ExtraName>,
pub no_extra: Vec<ExtraName>,
pub all_extras: bool,
pub no_default_extras: bool,
pub defaults: DefaultExtras,
}
impl ExtrasSpecificationHistory {
/// Returns all the CLI flags that this represents.
///
/// If a flag was provided multiple times (e.g. `--extra A --extra B`) this will
/// elide the arguments and just show the flag once (e.g. just yield "--extra").
///
/// Conceptually this being an empty list should be equivalent to
/// [`ExtrasSpecification::is_empty`][] when there aren't any defaults set.
/// When there are defaults the two will disagree, and rightfully so!
pub fn as_flags_pretty(&self) -> Vec<Cow<'_, str>> {
let Self {
extra,
no_extra,
all_extras,
only_extra,
no_default_extras,
// defaults aren't CLI flags!
defaults: _,
} = self;
let mut flags = vec![];
if *all_extras {
flags.push(Cow::Borrowed("--all-extras"));
}
if *no_default_extras {
flags.push(Cow::Borrowed("--no-default-extras"));
}
match &**extra {
[] => {}
[extra] => flags.push(Cow::Owned(format!("--extra {extra}"))),
[..] => flags.push(Cow::Borrowed("--extra")),
}
match &**only_extra {
[] => {}
[extra] => flags.push(Cow::Owned(format!("--only-extra {extra}"))),
[..] => flags.push(Cow::Borrowed("--only-extra")),
}
match &**no_extra {
[] => {}
[extra] => flags.push(Cow::Owned(format!("--no-extra {extra}"))),
[..] => flags.push(Cow::Borrowed("--no-extra")),
}
flags
}
}
/// A trivial newtype wrapped around [`ExtrasSpecification`][] that signifies "defaults applied"
///
/// It includes a copy of the previous semantics to provide info on if
/// the group being a default actually affected it being enabled, because it's obviously "correct".
/// (These are Arcs so it's ~free to hold onto the previous semantics)
#[derive(Debug, Clone)]
pub struct ExtrasSpecificationWithDefaults {
/// The active semantics
cur: ExtrasSpecification,
/// The semantics before defaults were applied
prev: ExtrasSpecification,
}
impl ExtrasSpecificationWithDefaults {
/// Do not enable any extras
///
/// Many places in the code need to know what extras are active,
/// but various commands or subsystems never enable any extras,
/// in which case they want this.
pub fn none() -> Self {
ExtrasSpecification::default().with_defaults(DefaultExtras::default())
}
/// Returns `true` if the specification was enabled, and *only* because it was a default
pub fn contains_because_default(&self, extra: &ExtraName) -> bool {
self.cur.contains(extra) && !self.prev.contains(extra)
}
}
impl std::ops::Deref for ExtrasSpecificationWithDefaults {
type Target = ExtrasSpecification;
fn deref(&self) -> &Self::Target {
&self.cur
}
}
#[derive(Debug, Clone)]
pub enum IncludeExtras {
/// Include dependencies from the specified extras.
Some(Vec<ExtraName>),
/// A marker indicates including dependencies from all extras.
All,
}
impl IncludeExtras {
/// Returns `true` if the specification includes the given extra.
pub fn contains(&self, extra: &ExtraName) -> bool {
match self {
Self::Some(extras) => extras.contains(extra),
Self::All => true,
}
}
/// Returns `true` if the specification will have no effect.
pub fn is_empty(&self) -> bool {
match self {
Self::Some(extras) => extras.is_empty(),
// Although technically this is a noop if they have no extras,
// conceptually they're *trying* to have an effect, so treat it as one.
Self::All => false,
}
}
/// Iterate over all extras referenced in the [`IncludeExtras`].
pub fn names(&self) -> std::slice::Iter<'_, ExtraName> {
match self {
Self::Some(extras) => extras.iter(),
Self::All => [].iter(),
}
}
}
impl Default for IncludeExtras {
fn default() -> Self {
Self::Some(Vec::new())
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/build_options.rs | crates/uv-configuration/src/build_options.rs | use std::fmt::{Display, Formatter};
use uv_normalize::PackageName;
use crate::{PackageNameSpecifier, PackageNameSpecifiers};
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Hash)]
pub enum BuildKind {
/// A PEP 517 wheel build.
#[default]
Wheel,
/// A PEP 517 source distribution build.
Sdist,
/// A PEP 660 editable installation wheel build.
Editable,
}
impl Display for BuildKind {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Wheel => f.write_str("wheel"),
Self::Sdist => f.write_str("sdist"),
Self::Editable => f.write_str("editable"),
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum BuildOutput {
/// Send the build backend output to `stderr`.
Stderr,
/// Send the build backend output to `tracing`.
Debug,
/// Do not display the build backend output.
Quiet,
}
#[derive(Debug, Default, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
pub struct BuildOptions {
no_binary: NoBinary,
no_build: NoBuild,
}
impl BuildOptions {
pub fn new(no_binary: NoBinary, no_build: NoBuild) -> Self {
Self {
no_binary,
no_build,
}
}
#[must_use]
pub fn combine(self, no_binary: NoBinary, no_build: NoBuild) -> Self {
Self {
no_binary: self.no_binary.combine(no_binary),
no_build: self.no_build.combine(no_build),
}
}
pub fn no_binary_package(&self, package_name: &PackageName) -> bool {
match &self.no_binary {
NoBinary::None => false,
NoBinary::All => match &self.no_build {
// Allow `all` to be overridden by specific build exclusions
NoBuild::Packages(packages) => !packages.contains(package_name),
_ => true,
},
NoBinary::Packages(packages) => packages.contains(package_name),
}
}
pub fn no_build_package(&self, package_name: &PackageName) -> bool {
match &self.no_build {
NoBuild::All => match &self.no_binary {
// Allow `all` to be overridden by specific binary exclusions
NoBinary::Packages(packages) => !packages.contains(package_name),
_ => true,
},
NoBuild::None => false,
NoBuild::Packages(packages) => packages.contains(package_name),
}
}
pub fn no_build_requirement(&self, package_name: Option<&PackageName>) -> bool {
match package_name {
Some(name) => self.no_build_package(name),
None => self.no_build_all(),
}
}
pub fn no_binary_requirement(&self, package_name: Option<&PackageName>) -> bool {
match package_name {
Some(name) => self.no_binary_package(name),
None => self.no_binary_all(),
}
}
pub fn no_build_all(&self) -> bool {
matches!(self.no_build, NoBuild::All)
}
pub fn no_binary_all(&self) -> bool {
matches!(self.no_binary, NoBinary::All)
}
/// Return the [`NoBuild`] strategy to use.
pub fn no_build(&self) -> &NoBuild {
&self.no_build
}
/// Return the [`NoBinary`] strategy to use.
pub fn no_binary(&self) -> &NoBinary {
&self.no_binary
}
}
#[derive(Debug, Default, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
pub enum NoBinary {
/// Allow installation of any wheel.
#[default]
None,
/// Do not allow installation from any wheels.
All,
/// Do not allow installation from the specific wheels.
Packages(Vec<PackageName>),
}
impl NoBinary {
/// Determine the binary installation strategy to use for the given arguments.
pub fn from_args(no_binary: Option<bool>, no_binary_package: Vec<PackageName>) -> Self {
match no_binary {
Some(true) => Self::All,
Some(false) => Self::None,
None => {
if no_binary_package.is_empty() {
Self::None
} else {
Self::Packages(no_binary_package)
}
}
}
}
/// Determine the binary installation strategy to use for the given arguments from the pip CLI.
pub fn from_pip_args(no_binary: Vec<PackageNameSpecifier>) -> Self {
let combined = PackageNameSpecifiers::from_iter(no_binary.into_iter());
match combined {
PackageNameSpecifiers::All => Self::All,
PackageNameSpecifiers::None => Self::None,
PackageNameSpecifiers::Packages(packages) => Self::Packages(packages),
}
}
/// Determine the binary installation strategy to use for the given argument from the pip CLI.
pub fn from_pip_arg(no_binary: PackageNameSpecifier) -> Self {
Self::from_pip_args(vec![no_binary])
}
/// Combine a set of [`NoBinary`] values.
#[must_use]
pub fn combine(self, other: Self) -> Self {
match (self, other) {
// If both are `None`, the result is `None`.
(Self::None, Self::None) => Self::None,
// If either is `All`, the result is `All`.
(Self::All, _) | (_, Self::All) => Self::All,
// If one is `None`, the result is the other.
(Self::Packages(a), Self::None) => Self::Packages(a),
(Self::None, Self::Packages(b)) => Self::Packages(b),
// If both are `Packages`, the result is the union of the two.
(Self::Packages(mut a), Self::Packages(b)) => {
a.extend(b);
Self::Packages(a)
}
}
}
/// Extend a [`NoBinary`] value with another.
pub fn extend(&mut self, other: Self) {
match (&mut *self, other) {
// If either is `All`, the result is `All`.
(Self::All, _) | (_, Self::All) => *self = Self::All,
// If both are `None`, the result is `None`.
(Self::None, Self::None) => {
// Nothing to do.
}
// If one is `None`, the result is the other.
(Self::Packages(_), Self::None) => {
// Nothing to do.
}
(Self::None, Self::Packages(b)) => {
// Take ownership of `b`.
*self = Self::Packages(b);
}
// If both are `Packages`, the result is the union of the two.
(Self::Packages(a), Self::Packages(b)) => {
a.extend(b);
}
}
}
}
impl NoBinary {
/// Returns `true` if all wheels are allowed.
pub fn is_none(&self) -> bool {
matches!(self, Self::None)
}
}
#[derive(Debug, Default, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
pub enum NoBuild {
/// Allow building wheels from any source distribution.
#[default]
None,
/// Do not allow building wheels from any source distribution.
All,
/// Do not allow building wheels from the given package's source distributions.
Packages(Vec<PackageName>),
}
impl NoBuild {
/// Determine the build strategy to use for the given arguments.
pub fn from_args(no_build: Option<bool>, no_build_package: Vec<PackageName>) -> Self {
match no_build {
Some(true) => Self::All,
Some(false) => Self::None,
None => {
if no_build_package.is_empty() {
Self::None
} else {
Self::Packages(no_build_package)
}
}
}
}
/// Determine the build strategy to use for the given arguments from the pip CLI.
pub fn from_pip_args(only_binary: Vec<PackageNameSpecifier>, no_build: bool) -> Self {
if no_build {
Self::All
} else {
let combined = PackageNameSpecifiers::from_iter(only_binary.into_iter());
match combined {
PackageNameSpecifiers::All => Self::All,
PackageNameSpecifiers::None => Self::None,
PackageNameSpecifiers::Packages(packages) => Self::Packages(packages),
}
}
}
/// Determine the build strategy to use for the given argument from the pip CLI.
pub fn from_pip_arg(no_build: PackageNameSpecifier) -> Self {
Self::from_pip_args(vec![no_build], false)
}
/// Combine a set of [`NoBuild`] values.
#[must_use]
pub fn combine(self, other: Self) -> Self {
match (self, other) {
// If both are `None`, the result is `None`.
(Self::None, Self::None) => Self::None,
// If either is `All`, the result is `All`.
(Self::All, _) | (_, Self::All) => Self::All,
// If one is `None`, the result is the other.
(Self::Packages(a), Self::None) => Self::Packages(a),
(Self::None, Self::Packages(b)) => Self::Packages(b),
// If both are `Packages`, the result is the union of the two.
(Self::Packages(mut a), Self::Packages(b)) => {
a.extend(b);
Self::Packages(a)
}
}
}
/// Extend a [`NoBuild`] value with another.
pub fn extend(&mut self, other: Self) {
match (&mut *self, other) {
// If either is `All`, the result is `All`.
(Self::All, _) | (_, Self::All) => *self = Self::All,
// If both are `None`, the result is `None`.
(Self::None, Self::None) => {
// Nothing to do.
}
// If one is `None`, the result is the other.
(Self::Packages(_), Self::None) => {
// Nothing to do.
}
(Self::None, Self::Packages(b)) => {
// Take ownership of `b`.
*self = Self::Packages(b);
}
// If both are `Packages`, the result is the union of the two.
(Self::Packages(a), Self::Packages(b)) => {
a.extend(b);
}
}
}
}
impl NoBuild {
/// Returns `true` if all builds are allowed.
pub fn is_none(&self) -> bool {
matches!(self, Self::None)
}
}
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum IndexStrategy {
/// Only use results from the first index that returns a match for a given package name.
///
/// While this differs from pip's behavior, it's the default index strategy as it's the most
/// secure.
#[default]
#[cfg_attr(feature = "clap", clap(alias = "first-match"))]
FirstIndex,
/// Search for every package name across all indexes, exhausting the versions from the first
/// index before moving on to the next.
///
/// In this strategy, we look for every package across all indexes. When resolving, we attempt
/// to use versions from the indexes in order, such that we exhaust all available versions from
/// the first index before moving on to the next. Further, if a version is found to be
/// incompatible in the first index, we do not reconsider that version in subsequent indexes,
/// even if the secondary index might contain compatible versions (e.g., variants of the same
/// versions with different ABI tags or Python version constraints).
///
/// See: <https://peps.python.org/pep-0708/>
#[cfg_attr(feature = "clap", clap(alias = "unsafe-any-match"))]
#[serde(alias = "unsafe-any-match")]
UnsafeFirstMatch,
/// Search for every package name across all indexes, preferring the "best" version found. If a
/// package version is in multiple indexes, only look at the entry for the first index.
///
/// In this strategy, we look for every package across all indexes. When resolving, we consider
/// all versions from all indexes, choosing the "best" version found (typically, the highest
/// compatible version).
///
/// This most closely matches pip's behavior, but exposes the resolver to "dependency confusion"
/// attacks whereby malicious actors can publish packages to public indexes with the same name
/// as internal packages, causing the resolver to install the malicious package in lieu of
/// the intended internal package.
///
/// See: <https://peps.python.org/pep-0708/>
UnsafeBestMatch,
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use anyhow::Error;
use super::*;
#[test]
fn no_build_from_args() -> Result<(), Error> {
assert_eq!(
NoBuild::from_pip_args(vec![PackageNameSpecifier::from_str(":all:")?], false),
NoBuild::All,
);
assert_eq!(
NoBuild::from_pip_args(vec![PackageNameSpecifier::from_str(":all:")?], true),
NoBuild::All,
);
assert_eq!(
NoBuild::from_pip_args(vec![PackageNameSpecifier::from_str(":none:")?], true),
NoBuild::All,
);
assert_eq!(
NoBuild::from_pip_args(vec![PackageNameSpecifier::from_str(":none:")?], false),
NoBuild::None,
);
assert_eq!(
NoBuild::from_pip_args(
vec![
PackageNameSpecifier::from_str("foo")?,
PackageNameSpecifier::from_str("bar")?
],
false
),
NoBuild::Packages(vec![
PackageName::from_str("foo")?,
PackageName::from_str("bar")?
]),
);
assert_eq!(
NoBuild::from_pip_args(
vec![
PackageNameSpecifier::from_str("test")?,
PackageNameSpecifier::All
],
false
),
NoBuild::All,
);
assert_eq!(
NoBuild::from_pip_args(
vec![
PackageNameSpecifier::from_str("foo")?,
PackageNameSpecifier::from_str(":none:")?,
PackageNameSpecifier::from_str("bar")?
],
false
),
NoBuild::Packages(vec![PackageName::from_str("bar")?]),
);
Ok(())
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/overrides.rs | crates/uv-configuration/src/overrides.rs | use std::borrow::Cow;
use either::Either;
use rustc_hash::{FxBuildHasher, FxHashMap};
use uv_distribution_types::Requirement;
use uv_normalize::PackageName;
use uv_pep508::MarkerTree;
/// A set of overrides for a set of requirements.
#[derive(Debug, Default, Clone)]
pub struct Overrides(FxHashMap<PackageName, Vec<Requirement>>);
impl Overrides {
/// Create a new set of overrides from a set of requirements.
pub fn from_requirements(requirements: Vec<Requirement>) -> Self {
let mut overrides: FxHashMap<PackageName, Vec<Requirement>> =
FxHashMap::with_capacity_and_hasher(requirements.len(), FxBuildHasher);
for requirement in requirements {
overrides
.entry(requirement.name.clone())
.or_default()
.push(requirement);
}
Self(overrides)
}
/// Return an iterator over all [`Requirement`]s in the override set.
pub fn requirements(&self) -> impl Iterator<Item = &Requirement> {
self.0.values().flat_map(|requirements| requirements.iter())
}
/// Get the overrides for a package.
pub fn get(&self, name: &PackageName) -> Option<&Vec<Requirement>> {
self.0.get(name)
}
/// Apply the overrides to a set of requirements.
///
/// NB: Change this method together with [`Constraints::apply`].
pub fn apply<'a>(
&'a self,
requirements: impl IntoIterator<Item = &'a Requirement>,
) -> impl Iterator<Item = Cow<'a, Requirement>> {
if self.0.is_empty() {
// Fast path: There are no overrides.
return Either::Left(requirements.into_iter().map(Cow::Borrowed));
}
Either::Right(requirements.into_iter().flat_map(|requirement| {
let Some(overrides) = self.get(&requirement.name) else {
// Case 1: No override(s).
return Either::Left(std::iter::once(Cow::Borrowed(requirement)));
};
// ASSUMPTION: There is one `extra = "..."`, and it's either the only marker or part
// of the main conjunction.
let Some(extra_expression) = requirement.marker.top_level_extra() else {
// Case 2: A non-optional dependency with override(s).
return Either::Right(Either::Right(overrides.iter().map(Cow::Borrowed)));
};
// Case 3: An optional dependency with override(s).
//
// When the original requirement is an optional dependency, the override(s) need to
// be optional for the same extra, otherwise we activate extras that should be inactive.
Either::Right(Either::Left(overrides.iter().map(
move |override_requirement| {
// Add the extra to the override marker.
let mut joint_marker = MarkerTree::expression(extra_expression.clone());
joint_marker.and(override_requirement.marker);
Cow::Owned(Requirement {
marker: joint_marker,
..override_requirement.clone()
})
},
)))
}))
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/vcs.rs | crates/uv-configuration/src/vcs.rs | use std::io::Write;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use serde::Deserialize;
use uv_git::GIT;
#[derive(Debug, thiserror::Error)]
pub enum VersionControlError {
#[error("Attempted to initialize a Git repository, but `git` was not found in PATH")]
GitNotInstalled,
#[error("Failed to initialize Git repository at `{0}`\nstdout: {1}\nstderr: {2}")]
GitInit(PathBuf, String, String),
#[error("`git` command failed")]
GitCommand(#[source] std::io::Error),
#[error(transparent)]
Io(#[from] std::io::Error),
}
/// The version control system to use.
#[derive(Clone, Copy, Debug, PartialEq, Default, Deserialize)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum VersionControlSystem {
/// Use Git for version control.
#[default]
Git,
/// Do not use any version control system.
None,
}
impl VersionControlSystem {
/// Initializes the VCS system based on the provided path.
pub fn init(&self, path: &Path) -> Result<(), VersionControlError> {
match self {
Self::Git => {
let Ok(git) = GIT.as_ref() else {
return Err(VersionControlError::GitNotInstalled);
};
let output = Command::new(git)
.arg("init")
.current_dir(path)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.map_err(VersionControlError::GitCommand)?;
if !output.status.success() {
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(VersionControlError::GitInit(
path.to_path_buf(),
stdout.to_string(),
stderr.to_string(),
));
}
// Create the `.gitignore`, if it doesn't exist.
match fs_err::OpenOptions::new()
.write(true)
.create_new(true)
.open(path.join(".gitignore"))
{
Ok(mut file) => file.write_all(GITIGNORE.as_bytes())?,
Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => (),
Err(err) => return Err(err.into()),
}
Ok(())
}
Self::None => Ok(()),
}
}
}
impl std::fmt::Display for VersionControlSystem {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Git => write!(f, "git"),
Self::None => write!(f, "none"),
}
}
}
const GITIGNORE: &str = "# Python-generated files
__pycache__/
*.py[oc]
build/
dist/
wheels/
*.egg-info
# Virtual environments
.venv
";
/// Setting for Git LFS (Large File Storage) support.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum GitLfsSetting {
/// Git LFS is disabled (default).
#[default]
Disabled,
/// Git LFS is enabled. Tracks whether it came from an environment variable.
Enabled { from_env: bool },
}
impl GitLfsSetting {
pub fn new(from_arg: Option<bool>, from_env: Option<bool>) -> Self {
match (from_arg, from_env) {
(Some(true), _) => Self::Enabled { from_env: false },
(_, Some(true)) => Self::Enabled { from_env: true },
_ => Self::Disabled,
}
}
}
impl From<GitLfsSetting> for Option<bool> {
fn from(setting: GitLfsSetting) -> Self {
match setting {
GitLfsSetting::Enabled { .. } => Some(true),
GitLfsSetting::Disabled => None,
}
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/export_format.rs | crates/uv-configuration/src/export_format.rs | /// The format to use when exporting a `uv.lock` file.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
pub enum ExportFormat {
/// Export in `requirements.txt` format.
#[default]
#[serde(rename = "requirements.txt", alias = "requirements-txt")]
#[cfg_attr(
feature = "clap",
clap(name = "requirements.txt", alias = "requirements-txt")
)]
RequirementsTxt,
/// Export in `pylock.toml` format.
#[serde(rename = "pylock.toml", alias = "pylock-toml")]
#[cfg_attr(feature = "clap", clap(name = "pylock.toml", alias = "pylock-toml"))]
PylockToml,
/// Export in `CycloneDX` v1.5 JSON format.
#[serde(rename = "cyclonedx1.5")]
#[cfg_attr(
feature = "clap",
clap(name = "cyclonedx1.5", alias = "cyclonedx1.5+json")
)]
CycloneDX1_5,
}
/// The output format to use in `uv pip compile`.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
pub enum PipCompileFormat {
/// Export in `requirements.txt` format.
#[default]
#[serde(rename = "requirements.txt", alias = "requirements-txt")]
#[cfg_attr(
feature = "clap",
clap(name = "requirements.txt", alias = "requirements-txt")
)]
RequirementsTxt,
/// Export in `pylock.toml` format.
#[serde(rename = "pylock.toml", alias = "pylock-toml")]
#[cfg_attr(feature = "clap", clap(name = "pylock.toml", alias = "pylock-toml"))]
PylockToml,
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/concurrency.rs | crates/uv-configuration/src/concurrency.rs | use std::num::NonZeroUsize;
/// Concurrency limit settings.
#[derive(Copy, Clone, Debug)]
pub struct Concurrency {
/// The maximum number of concurrent downloads.
///
/// Note this value must be non-zero.
pub downloads: usize,
/// The maximum number of concurrent builds.
///
/// Note this value must be non-zero.
pub builds: usize,
/// The maximum number of concurrent installs.
///
/// Note this value must be non-zero.
pub installs: usize,
}
impl Default for Concurrency {
fn default() -> Self {
Self {
downloads: Self::DEFAULT_DOWNLOADS,
builds: Self::threads(),
installs: Self::threads(),
}
}
}
impl Concurrency {
// The default concurrent downloads limit.
pub const DEFAULT_DOWNLOADS: usize = 50;
// The default concurrent builds and install limit.
pub fn threads() -> usize {
std::thread::available_parallelism()
.map(NonZeroUsize::get)
.unwrap_or(1)
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/hash.rs | crates/uv-configuration/src/hash.rs | #[derive(Debug, Copy, Clone)]
pub enum HashCheckingMode {
/// Hashes should be validated against a pre-defined list of hashes. Every requirement must
/// itself be hashable (e.g., Git dependencies are forbidden) _and_ have a hash in the lockfile.
Require,
/// Hashes should be validated, if present, but ignored if absent.
Verify,
}
impl HashCheckingMode {
/// Return the [`HashCheckingMode`] from the command-line arguments, if any.
///
/// By default, the hash checking mode is [`HashCheckingMode::Verify`]. If `--require-hashes` is
/// passed, the hash checking mode is [`HashCheckingMode::Require`]. If `--no-verify-hashes` is
/// passed, then no hash checking is performed.
pub fn from_args(require_hashes: Option<bool>, verify_hashes: Option<bool>) -> Option<Self> {
if require_hashes == Some(true) {
// Given `--require-hashes`, always require hashes, regardless of any other flags.
Some(Self::Require)
} else if verify_hashes == Some(true) {
// Given `--verify-hashes`, always verify hashes, regardless of any other flags.
Some(Self::Verify)
} else if verify_hashes == Some(false) {
// Given `--no-verify-hashes` (without `--require-hashes`), do not verify hashes.
None
} else if require_hashes == Some(false) {
// Given `--no-require-hashes` (without `--verify-hashes`), do not require hashes.
None
} else {
// By default, verify hashes.
Some(Self::Verify)
}
}
/// Returns `true` if the hash checking mode is `Require`.
pub fn is_require(&self) -> bool {
matches!(self, Self::Require)
}
}
impl std::fmt::Display for HashCheckingMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Require => write!(f, "--require-hashes"),
Self::Verify => write!(f, "--verify-hashes"),
}
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/authentication.rs | crates/uv-configuration/src/authentication.rs | use uv_auth::{self, KeyringProvider};
/// Keyring provider type to use for credential lookup.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum KeyringProviderType {
/// Do not use keyring for credential lookup.
#[default]
Disabled,
/// Use the `keyring` command for credential lookup.
Subprocess,
// /// Not yet implemented
// Auto,
// /// Not implemented yet. Maybe use <https://docs.rs/keyring/latest/keyring/> for this?
// Import,
}
// See <https://pip.pypa.io/en/stable/topics/authentication/#keyring-support> for details.
impl KeyringProviderType {
pub fn to_provider(&self) -> Option<KeyringProvider> {
match self {
Self::Disabled => None,
Self::Subprocess => Some(KeyringProvider::subprocess()),
}
}
}
impl std::fmt::Display for KeyringProviderType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Disabled => write!(f, "disabled"),
Self::Subprocess => write!(f, "subprocess"),
}
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/dependency_groups.rs | crates/uv-configuration/src/dependency_groups.rs | use std::{borrow::Cow, sync::Arc};
use uv_normalize::{DEV_DEPENDENCIES, DefaultGroups, GroupName};
/// Manager of all dependency-group decisions and settings history.
///
/// This is an Arc mostly just to avoid size bloat on things that contain these.
#[derive(Debug, Default, Clone)]
pub struct DependencyGroups(Arc<DependencyGroupsInner>);
/// Manager of all dependency-group decisions and settings history.
#[derive(Debug, Default, Clone)]
pub struct DependencyGroupsInner {
/// Groups to include.
include: IncludeGroups,
/// Groups to exclude (always wins over include).
exclude: Vec<GroupName>,
/// Whether an `--only` flag was passed.
///
/// If true, users of this API should refrain from looking at packages
/// that *aren't* specified by the dependency-groups. This is exposed
/// via [`DependencyGroupsInner::prod`][].
only_groups: bool,
/// The "raw" flags/settings we were passed for diagnostics.
history: DependencyGroupsHistory,
}
impl DependencyGroups {
/// Create from history.
///
/// This is the "real" constructor, it's basically taking raw CLI flags but in
/// a way that's a bit nicer for other constructors to use.
fn from_history(history: DependencyGroupsHistory) -> Self {
let DependencyGroupsHistory {
dev_mode,
mut group,
mut only_group,
mut no_group,
all_groups,
no_default_groups,
mut defaults,
} = history.clone();
// First desugar --dev flags
match dev_mode {
Some(DevMode::Include) => group.push(DEV_DEPENDENCIES.clone()),
Some(DevMode::Only) => only_group.push(DEV_DEPENDENCIES.clone()),
Some(DevMode::Exclude) => no_group.push(DEV_DEPENDENCIES.clone()),
None => {}
}
// `group` and `only_group` actually have the same meanings: packages to include.
// But if `only_group` is non-empty then *other* packages should be excluded.
// So we just record whether it was and then treat the two lists as equivalent.
let only_groups = !only_group.is_empty();
// --only flags imply --no-default-groups
let default_groups = !no_default_groups && !only_groups;
let include = if all_groups {
// If this is set we can ignore group/only_group/defaults as irrelevant
// (`--all-groups --only-*` is rejected at the CLI level, don't worry about it).
IncludeGroups::All
} else {
// Merge all these lists, they're equivalent now
group.append(&mut only_group);
// Resolve default groups potentially also setting All
if default_groups {
match &mut defaults {
DefaultGroups::All => IncludeGroups::All,
DefaultGroups::List(defaults) => {
group.append(defaults);
IncludeGroups::Some(group)
}
}
} else {
IncludeGroups::Some(group)
}
};
Self(Arc::new(DependencyGroupsInner {
include,
exclude: no_group,
only_groups,
history,
}))
}
/// Create from raw CLI args
#[allow(clippy::fn_params_excessive_bools)]
pub fn from_args(
dev: bool,
no_dev: bool,
only_dev: bool,
group: Vec<GroupName>,
no_group: Vec<GroupName>,
no_default_groups: bool,
only_group: Vec<GroupName>,
all_groups: bool,
) -> Self {
// Lower the --dev flags into a single dev mode.
//
// In theory only one of these 3 flags should be set (enforced by CLI),
// but we explicitly allow `--dev` and `--only-dev` to both be set,
// and "saturate" that to `--only-dev`.
let dev_mode = if only_dev {
Some(DevMode::Only)
} else if no_dev {
Some(DevMode::Exclude)
} else if dev {
Some(DevMode::Include)
} else {
None
};
Self::from_history(DependencyGroupsHistory {
dev_mode,
group,
only_group,
no_group,
all_groups,
no_default_groups,
// This is unknown at CLI-time, use `.with_defaults(...)` to apply this later!
defaults: DefaultGroups::default(),
})
}
/// Helper to make a spec from just a --dev flag
pub fn from_dev_mode(dev_mode: DevMode) -> Self {
Self::from_history(DependencyGroupsHistory {
dev_mode: Some(dev_mode),
..Default::default()
})
}
/// Helper to make a spec from just a --group
pub fn from_group(group: GroupName) -> Self {
Self::from_history(DependencyGroupsHistory {
group: vec![group],
..Default::default()
})
}
/// Apply defaults to a base [`DependencyGroups`].
///
/// This is appropriate in projects, where the `dev` group is synced by default.
pub fn with_defaults(&self, defaults: DefaultGroups) -> DependencyGroupsWithDefaults {
// Explicitly clone the inner history and set the defaults, then remake the result.
let mut history = self.0.history.clone();
history.defaults = defaults;
DependencyGroupsWithDefaults {
cur: Self::from_history(history),
prev: self.clone(),
}
}
}
impl std::ops::Deref for DependencyGroups {
type Target = DependencyGroupsInner;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DependencyGroupsInner {
/// Returns `true` if packages other than the ones referenced by these
/// dependency-groups should be considered.
///
/// That is, if I tell you to install a project and this is false,
/// you should ignore the project itself and all its dependencies,
/// and instead just install the dependency-groups.
///
/// (This is really just asking if an --only flag was passed.)
pub fn prod(&self) -> bool {
!self.only_groups
}
/// Returns `true` if the specification includes the given group.
pub fn contains(&self, group: &GroupName) -> bool {
// exclude always trumps include
!self.exclude.contains(group) && self.include.contains(group)
}
/// Iterate over all groups that we think should exist.
pub fn desugarred_names(&self) -> impl Iterator<Item = &GroupName> {
self.include.names().chain(&self.exclude)
}
/// Returns an iterator over all groups that are included in the specification,
/// assuming `all_names` is an iterator over all groups.
pub fn group_names<'a, Names>(
&'a self,
all_names: Names,
) -> impl Iterator<Item = &'a GroupName> + 'a
where
Names: Iterator<Item = &'a GroupName> + 'a,
{
all_names.filter(move |name| self.contains(name))
}
/// Iterate over all groups the user explicitly asked for on the CLI
pub fn explicit_names(&self) -> impl Iterator<Item = &GroupName> {
let DependencyGroupsHistory {
// Strictly speaking this is an explicit reference to "dev"
// but we're currently tolerant of dev not existing when referenced with
// these flags, since it kinda implicitly always exists even if
// it's not properly defined in a config file.
dev_mode: _,
group,
only_group,
no_group,
// These reference no groups explicitly
all_groups: _,
no_default_groups: _,
// This doesn't include defaults because the `dev` group may not be defined
// but gets implicitly added as a default sometimes!
defaults: _,
} = self.history();
group.iter().chain(no_group).chain(only_group)
}
/// Returns `true` if the specification will have no effect.
pub fn is_empty(&self) -> bool {
self.prod() && self.exclude.is_empty() && self.include.is_empty()
}
/// Get the raw history for diagnostics
pub fn history(&self) -> &DependencyGroupsHistory {
&self.history
}
}
/// Context about a [`DependencyGroups`][] that we've preserved for diagnostics
#[derive(Debug, Default, Clone)]
pub struct DependencyGroupsHistory {
pub dev_mode: Option<DevMode>,
pub group: Vec<GroupName>,
pub only_group: Vec<GroupName>,
pub no_group: Vec<GroupName>,
pub all_groups: bool,
pub no_default_groups: bool,
pub defaults: DefaultGroups,
}
impl DependencyGroupsHistory {
/// Returns all the CLI flags that this represents.
///
/// If a flag was provided multiple times (e.g. `--group A --group B`) this will
/// elide the arguments and just show the flag once (e.g. just yield "--group").
///
/// Conceptually this being an empty list should be equivalent to
/// [`DependencyGroups::is_empty`][] when there aren't any defaults set.
/// When there are defaults the two will disagree, and rightfully so!
pub fn as_flags_pretty(&self) -> Vec<Cow<'_, str>> {
let Self {
dev_mode,
group,
only_group,
no_group,
all_groups,
no_default_groups,
// defaults aren't CLI flags!
defaults: _,
} = self;
let mut flags = vec![];
if *all_groups {
flags.push(Cow::Borrowed("--all-groups"));
}
if *no_default_groups {
flags.push(Cow::Borrowed("--no-default-groups"));
}
if let Some(dev_mode) = dev_mode {
flags.push(Cow::Borrowed(dev_mode.as_flag()));
}
match &**group {
[] => {}
[group] => flags.push(Cow::Owned(format!("--group {group}"))),
[..] => flags.push(Cow::Borrowed("--group")),
}
match &**only_group {
[] => {}
[group] => flags.push(Cow::Owned(format!("--only-group {group}"))),
[..] => flags.push(Cow::Borrowed("--only-group")),
}
match &**no_group {
[] => {}
[group] => flags.push(Cow::Owned(format!("--no-group {group}"))),
[..] => flags.push(Cow::Borrowed("--no-group")),
}
flags
}
}
/// A trivial newtype wrapped around [`DependencyGroups`][] that signifies "defaults applied"
///
/// It includes a copy of the previous semantics to provide info on if
/// the group being a default actually affected it being enabled, because it's obviously "correct".
/// (These are Arcs so it's ~free to hold onto the previous semantics)
#[derive(Debug, Clone)]
pub struct DependencyGroupsWithDefaults {
/// The active semantics
cur: DependencyGroups,
/// The semantics before defaults were applied
prev: DependencyGroups,
}
impl DependencyGroupsWithDefaults {
/// Do not enable any groups
///
/// Many places in the code need to know what dependency-groups are active,
/// but various commands or subsystems never enable any dependency-groups,
/// in which case they want this.
pub fn none() -> Self {
DependencyGroups::default().with_defaults(DefaultGroups::default())
}
/// Returns `true` if the specification was enabled, and *only* because it was a default
pub fn contains_because_default(&self, group: &GroupName) -> bool {
self.cur.contains(group) && !self.prev.contains(group)
}
}
impl std::ops::Deref for DependencyGroupsWithDefaults {
type Target = DependencyGroups;
fn deref(&self) -> &Self::Target {
&self.cur
}
}
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub enum DevMode {
/// Include development dependencies.
#[default]
Include,
/// Exclude development dependencies.
Exclude,
/// Only include development dependencies, excluding all other dependencies.
Only,
}
impl DevMode {
/// Returns the flag that was used to request development dependencies.
pub fn as_flag(&self) -> &'static str {
match self {
Self::Exclude => "--no-dev",
Self::Include => "--dev",
Self::Only => "--only-dev",
}
}
}
#[derive(Debug, Clone)]
pub enum IncludeGroups {
/// Include dependencies from the specified groups.
Some(Vec<GroupName>),
/// A marker indicates including dependencies from all groups.
All,
}
impl IncludeGroups {
/// Returns `true` if the specification includes the given group.
pub fn contains(&self, group: &GroupName) -> bool {
match self {
Self::Some(groups) => groups.contains(group),
Self::All => true,
}
}
/// Returns `true` if the specification will have no effect.
pub fn is_empty(&self) -> bool {
match self {
Self::Some(groups) => groups.is_empty(),
// Although technically this is a noop if they have no groups,
// conceptually they're *trying* to have an effect, so treat it as one.
Self::All => false,
}
}
/// Iterate over all groups referenced in the [`IncludeGroups`].
pub fn names(&self) -> std::slice::Iter<'_, GroupName> {
match self {
Self::Some(groups) => groups.iter(),
Self::All => [].iter(),
}
}
}
impl Default for IncludeGroups {
fn default() -> Self {
Self::Some(Vec::new())
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/constraints.rs | crates/uv-configuration/src/constraints.rs | use std::borrow::Cow;
use either::Either;
use rustc_hash::FxHashMap;
use uv_distribution_types::{Requirement, RequirementSource};
use uv_normalize::PackageName;
use uv_pep508::MarkerTree;
/// A set of constraints for a set of requirements.
#[derive(Debug, Default, Clone)]
pub struct Constraints(FxHashMap<PackageName, Vec<Requirement>>);
impl Constraints {
/// Create a new set of constraints from a set of requirements.
pub fn from_requirements(requirements: impl Iterator<Item = Requirement>) -> Self {
let mut constraints: FxHashMap<PackageName, Vec<Requirement>> = FxHashMap::default();
for requirement in requirements {
// Skip empty constraints.
if let RequirementSource::Registry { specifier, .. } = &requirement.source {
if specifier.is_empty() {
continue;
}
}
constraints
.entry(requirement.name.clone())
.or_default()
.push(Requirement {
// We add and apply constraints independent of their extras.
extras: Box::new([]),
..requirement
});
}
Self(constraints)
}
/// Return an iterator over all [`Requirement`]s in the constraint set.
pub fn requirements(&self) -> impl Iterator<Item = &Requirement> {
self.0.values().flat_map(|requirements| requirements.iter())
}
/// Get the constraints for a package.
pub fn get(&self, name: &PackageName) -> Option<&Vec<Requirement>> {
self.0.get(name)
}
/// Apply the constraints to a set of requirements.
///
/// NB: Change this method together with [`Overrides::apply`].
pub fn apply<'a>(
&'a self,
requirements: impl IntoIterator<Item = Cow<'a, Requirement>>,
) -> impl Iterator<Item = Cow<'a, Requirement>> {
requirements.into_iter().flat_map(|requirement| {
let Some(constraints) = self.get(&requirement.name) else {
// Case 1: No constraint(s).
return Either::Left(std::iter::once(requirement));
};
// ASSUMPTION: There is one `extra = "..."`, and it's either the only marker or part
// of the main conjunction.
let Some(extra_expression) = requirement.marker.top_level_extra() else {
// Case 2: A non-optional dependency with constraint(s).
return Either::Right(Either::Right(
std::iter::once(requirement).chain(constraints.iter().map(Cow::Borrowed)),
));
};
// Case 3: An optional dependency with constraint(s).
//
// When the original requirement is an optional dependency, the constraint(s) need to
// be optional for the same extra, otherwise we activate extras that should be inactive.
Either::Right(Either::Left(std::iter::once(requirement).chain(
constraints.iter().cloned().map(move |constraint| {
// Add the extra to the override marker.
let mut joint_marker = MarkerTree::expression(extra_expression.clone());
joint_marker.and(constraint.marker);
Cow::Owned(Requirement {
marker: joint_marker,
..constraint
})
}),
)))
})
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/excludes.rs | crates/uv-configuration/src/excludes.rs | use rustc_hash::FxHashSet;
use uv_normalize::PackageName;
/// A set of packages to exclude from resolution.
#[derive(Debug, Default, Clone)]
pub struct Excludes(FxHashSet<PackageName>);
impl Excludes {
/// Return an iterator over all package names in the exclusion set.
pub fn iter(&self) -> impl Iterator<Item = &PackageName> {
self.0.iter()
}
/// Check if a package is excluded.
pub fn contains(&self, name: &PackageName) -> bool {
self.0.contains(name)
}
}
impl FromIterator<PackageName> for Excludes {
fn from_iter<I: IntoIterator<Item = PackageName>>(iter: I) -> Self {
Self(iter.into_iter().collect())
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/project_build_backend.rs | crates/uv-configuration/src/project_build_backend.rs | /// Available project build backends for use in `pyproject.toml`.
#[derive(Clone, Copy, Debug, PartialEq, serde::Deserialize)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum ProjectBuildBackend {
#[cfg_attr(feature = "clap", value(alias = "uv-build", alias = "uv_build"))]
/// Use uv as the project build backend.
Uv,
#[serde(alias = "hatchling")]
#[cfg_attr(feature = "clap", value(alias = "hatchling"))]
/// Use [hatchling](https://pypi.org/project/hatchling) as the project build backend.
Hatch,
/// Use [flit-core](https://pypi.org/project/flit-core) as the project build backend.
#[serde(alias = "flit-core")]
#[cfg_attr(feature = "clap", value(alias = "flit-core"))]
Flit,
/// Use [pdm-backend](https://pypi.org/project/pdm-backend) as the project build backend.
#[serde(alias = "pdm-backend")]
#[cfg_attr(feature = "clap", value(alias = "pdm-backend"))]
PDM,
/// Use [poetry-core](https://pypi.org/project/poetry-core) as the project build backend.
#[serde(alias = "poetry-core")]
#[cfg_attr(feature = "clap", value(alias = "poetry-core", alias = "poetry_core"))]
Poetry,
/// Use [setuptools](https://pypi.org/project/setuptools) as the project build backend.
Setuptools,
/// Use [maturin](https://pypi.org/project/maturin) as the project build backend.
Maturin,
/// Use [scikit-build-core](https://pypi.org/project/scikit-build-core) as the project build backend.
#[serde(alias = "scikit-build-core")]
#[cfg_attr(feature = "clap", value(alias = "scikit-build-core"))]
Scikit,
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/editable.rs | crates/uv-configuration/src/editable.rs | #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub enum EditableMode {
#[default]
Editable,
NonEditable,
}
impl From<bool> for EditableMode {
fn from(value: bool) -> Self {
if value {
Self::Editable
} else {
Self::NonEditable
}
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/required_version.rs | crates/uv-configuration/src/required_version.rs | #[cfg(feature = "schemars")]
use std::borrow::Cow;
use std::{fmt::Formatter, str::FromStr};
use uv_pep440::{Version, VersionSpecifier, VersionSpecifiers, VersionSpecifiersParseError};
/// A required version of uv, represented as a version specifier (e.g. `>=0.5.0`).
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RequiredVersion(VersionSpecifiers);
impl RequiredVersion {
/// Return `true` if the given version is required.
pub fn contains(&self, version: &Version) -> bool {
self.0.contains(version)
}
/// Returns the underlying [`VersionSpecifiers`].
pub fn specifiers(&self) -> &VersionSpecifiers {
&self.0
}
}
impl FromStr for RequiredVersion {
type Err = VersionSpecifiersParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// Treat `0.5.0` as `==0.5.0`, for backwards compatibility.
if let Ok(version) = Version::from_str(s) {
Ok(Self(VersionSpecifiers::from(
VersionSpecifier::equals_version(version),
)))
} else {
Ok(Self(VersionSpecifiers::from_str(s)?))
}
}
}
#[cfg(feature = "schemars")]
impl schemars::JsonSchema for RequiredVersion {
fn schema_name() -> Cow<'static, str> {
Cow::Borrowed("RequiredVersion")
}
fn json_schema(_generator: &mut schemars::generate::SchemaGenerator) -> schemars::Schema {
schemars::json_schema!({
"type": "string",
"description": "A version specifier, e.g. `>=0.5.0` or `==0.5.0`."
})
}
}
impl<'de> serde::Deserialize<'de> for RequiredVersion {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
struct Visitor;
impl serde::de::Visitor<'_> for Visitor {
type Value = RequiredVersion;
fn expecting(&self, f: &mut Formatter) -> std::fmt::Result {
f.write_str("a string")
}
fn visit_str<E: serde::de::Error>(self, v: &str) -> Result<Self::Value, E> {
RequiredVersion::from_str(v).map_err(serde::de::Error::custom)
}
}
deserializer.deserialize_str(Visitor)
}
}
impl std::fmt::Display for RequiredVersion {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(&self.0, f)
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-configuration/src/sources.rs | crates/uv-configuration/src/sources.rs | #[derive(
Debug, Default, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize,
)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
pub enum SourceStrategy {
/// Use `tool.uv.sources` when resolving dependencies.
#[default]
Enabled,
/// Ignore `tool.uv.sources` when resolving dependencies.
Disabled,
}
impl SourceStrategy {
/// Return the [`SourceStrategy`] from the command-line arguments, if any.
pub fn from_args(no_sources: bool) -> Self {
if no_sources {
Self::Disabled
} else {
Self::Enabled
}
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-globfilter/src/lib.rs | crates/uv-globfilter/src/lib.rs | //! Implementation of PEP 639 cross-language restricted globs and a related directory traversal
//! prefilter.
//!
//! The goal is globs that are portable between languages and operating systems.
mod glob_dir_filter;
mod portable_glob;
pub use glob_dir_filter::GlobDirFilter;
pub use portable_glob::{PortableGlobError, PortableGlobParser};
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-globfilter/src/glob_dir_filter.rs | crates/uv-globfilter/src/glob_dir_filter.rs | use globset::{Glob, GlobSet, GlobSetBuilder};
use regex_automata::dfa;
use regex_automata::dfa::Automaton;
use std::path::{MAIN_SEPARATOR, MAIN_SEPARATOR_STR, Path};
use tracing::warn;
/// Chosen at a whim -Konsti
const DFA_SIZE_LIMIT: usize = 1_000_000;
/// Filter a directory tree traversal (walkdir) by whether any paths of a directory can be included
/// at all.
///
/// Internally, the globs are converted to a regex and then to a DFA, which unlike the globs and the
/// regex allows to check for prefix matches.
pub struct GlobDirFilter {
glob_set: GlobSet,
dfa: Option<dfa::dense::DFA<Vec<u32>>>,
}
impl GlobDirFilter {
/// The filter matches if any of the globs matches.
///
/// See <https://github.com/BurntSushi/ripgrep/discussions/2927> for the error returned.
pub fn from_globs(globs: &[Glob]) -> Result<Self, globset::Error> {
let mut glob_set_builder = GlobSetBuilder::new();
for glob in globs {
glob_set_builder.add(glob.clone());
}
let glob_set = glob_set_builder.build()?;
let regexes: Vec<_> = globs
.iter()
.map(|glob| {
let main_separator = regex::escape(MAIN_SEPARATOR_STR);
glob.regex()
// We are using a custom DFA builder
.strip_prefix("(?-u)")
.expect("a glob is a non-unicode byte regex")
// Match windows paths if applicable
.replace('/', &main_separator)
})
.collect();
let dfa_builder = dfa::dense::Builder::new()
.syntax(
// The glob regex is a byte matcher
regex_automata::util::syntax::Config::new()
.unicode(false)
.utf8(false),
)
.configure(
dfa::dense::Config::new()
.start_kind(dfa::StartKind::Anchored)
// DFA can grow exponentially, in which case we bail out
.dfa_size_limit(Some(DFA_SIZE_LIMIT))
.determinize_size_limit(Some(DFA_SIZE_LIMIT)),
)
.build_many(®exes);
let dfa = if let Ok(dfa) = dfa_builder {
Some(dfa)
} else {
// TODO(konsti): `regex_automata::dfa::dense::BuildError` should allow asking whether
// is a size error
warn!(
"Glob expressions regex is larger than {DFA_SIZE_LIMIT} bytes, \
falling back to full directory traversal!"
);
None
};
Ok(Self { glob_set, dfa })
}
/// Whether the path (file or directory) matches any of the globs.
///
/// We include a directory if we are potentially including files it contains.
pub fn match_path(&self, path: &Path) -> bool {
self.match_directory(path) || self.glob_set.is_match(path)
}
/// Check whether a directory or any of its children can be matched by any of the globs.
///
/// This option never returns false if any child matches, but it may return true even if we
/// don't end up including any child.
pub fn match_directory(&self, path: &Path) -> bool {
let Some(dfa) = &self.dfa else {
return true;
};
// Allow the root path
if path == Path::new("") {
return true;
}
let config_anchored =
regex_automata::util::start::Config::new().anchored(regex_automata::Anchored::Yes);
let mut state = dfa.start_state(&config_anchored).unwrap();
// Paths aren't necessarily UTF-8, which we can gloss over since the globs match bytes only
// anyway.
let byte_path = path.as_os_str().as_encoded_bytes();
for b in byte_path {
state = dfa.next_state(state, *b);
}
// Say we're looking at a directory `foo/bar`. We want to continue if either `foo/bar` is
// a match, e.g., from `foo/*`, or a path below it can match, e.g., from `foo/bar/*`.
let eoi_state = dfa.next_eoi_state(state);
// We must not call `next_eoi_state` on the slash state, we want to only check if more
// characters (path components) are allowed, not if we're matching the `$` anchor at the
// end.
let slash_state = dfa.next_state(state, u8::try_from(MAIN_SEPARATOR).unwrap());
debug_assert!(
!dfa.is_quit_state(eoi_state) && !dfa.is_quit_state(slash_state),
"matcher is in quit state"
);
dfa.is_match_state(eoi_state) || !dfa.is_dead_state(slash_state)
}
}
#[cfg(test)]
mod tests {
use crate::PortableGlobParser;
use crate::glob_dir_filter::GlobDirFilter;
use std::path::{MAIN_SEPARATOR, Path};
use tempfile::tempdir;
use walkdir::WalkDir;
const FILES: [&str; 5] = [
"path1/dir1/subdir/a.txt",
"path2/dir2/subdir/a.txt",
"path3/dir3/subdir/a.txt",
"path4/dir4/subdir/a.txt",
"path5/dir5/subdir/a.txt",
];
const PATTERNS: [&str; 5] = [
// Only sufficient for descending one level
"path1/*",
// Only sufficient for descending one level
"path2/dir2",
// Sufficient for descending
"path3/dir3/subdir/a.txt",
// Sufficient for descending
"path4/**/*",
// Not sufficient for descending
"path5",
];
#[test]
fn match_directory() {
let patterns = PATTERNS.map(|pattern| PortableGlobParser::Pep639.parse(pattern).unwrap());
let matcher = GlobDirFilter::from_globs(&patterns).unwrap();
assert!(matcher.match_directory(&Path::new("path1").join("dir1")));
assert!(matcher.match_directory(&Path::new("path2").join("dir2")));
assert!(matcher.match_directory(&Path::new("path3").join("dir3")));
assert!(matcher.match_directory(&Path::new("path4").join("dir4")));
assert!(!matcher.match_directory(&Path::new("path5").join("dir5")));
}
/// Check that we skip directories that can never match.
#[test]
fn prefilter() {
let dir = tempdir().unwrap();
for file in FILES {
let file = dir.path().join(file);
fs_err::create_dir_all(file.parent().unwrap()).unwrap();
fs_err::File::create(file).unwrap();
}
let patterns = PATTERNS.map(|pattern| PortableGlobParser::Pep639.parse(pattern).unwrap());
let matcher = GlobDirFilter::from_globs(&patterns).unwrap();
// Test the prefix filtering
let visited: Vec<_> = WalkDir::new(dir.path())
.sort_by_file_name()
.into_iter()
.filter_entry(|entry| {
let relative = entry
.path()
.strip_prefix(dir.path())
.expect("walkdir starts with root");
matcher.match_directory(relative)
})
.map(|entry| {
let entry = entry.unwrap();
let relative = entry
.path()
.strip_prefix(dir.path())
.expect("walkdir starts with root")
.to_str()
.unwrap()
.to_string();
// Translate windows paths back to the unix fixture
relative.replace(MAIN_SEPARATOR, "/")
})
.collect();
assert_eq!(
visited,
[
"",
"path1",
"path1/dir1",
"path2",
"path2/dir2",
"path3",
"path3/dir3",
"path3/dir3/subdir",
"path3/dir3/subdir/a.txt",
"path4",
"path4/dir4",
"path4/dir4/subdir",
"path4/dir4/subdir/a.txt",
"path5"
]
);
}
/// Check that the walkdir yield the correct set of files.
#[test]
fn walk_dir() {
let dir = tempdir().unwrap();
for file in FILES {
let file = dir.path().join(file);
fs_err::create_dir_all(file.parent().unwrap()).unwrap();
fs_err::File::create(file).unwrap();
}
let patterns = PATTERNS.map(|pattern| PortableGlobParser::Pep639.parse(pattern).unwrap());
let include_matcher = GlobDirFilter::from_globs(&patterns).unwrap();
let walkdir_root = dir.path();
let mut matches: Vec<_> = WalkDir::new(walkdir_root)
.sort_by_file_name()
.into_iter()
.filter_entry(|entry| {
// TODO(konsti): This should be prettier.
let relative = entry
.path()
.strip_prefix(walkdir_root)
.expect("walkdir starts with root");
include_matcher.match_directory(relative)
})
.filter_map(|entry| {
let entry = entry.as_ref().unwrap();
// TODO(konsti): This should be prettier.
let relative = entry
.path()
.strip_prefix(walkdir_root)
.expect("walkdir starts with root");
if include_matcher.match_path(relative) {
// Translate windows paths back to the unix fixture
Some(relative.to_str().unwrap().replace(MAIN_SEPARATOR, "/"))
} else {
None
}
})
.collect();
matches.sort();
assert_eq!(
matches,
[
"",
"path1",
"path1/dir1",
"path2",
"path2/dir2",
"path3",
"path3/dir3",
"path3/dir3/subdir",
"path3/dir3/subdir/a.txt",
"path4",
"path4/dir4",
"path4/dir4/subdir",
"path4/dir4/subdir/a.txt",
"path5"
]
);
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-globfilter/src/main.rs | crates/uv-globfilter/src/main.rs | #![allow(clippy::print_stdout)]
use globset::GlobSetBuilder;
use std::env::args;
use tracing::trace;
use uv_globfilter::{GlobDirFilter, PortableGlobParser};
use walkdir::WalkDir;
fn main() {
let includes = ["src/**", "pyproject.toml"];
let excludes = ["__pycache__", "*.pyc", "*.pyo"];
let mut include_globs = Vec::new();
for include in includes {
let glob = PortableGlobParser::Pep639.parse(include).unwrap();
include_globs.push(glob.clone());
}
let include_matcher = GlobDirFilter::from_globs(&include_globs).unwrap();
let mut exclude_builder = GlobSetBuilder::new();
for exclude in excludes {
// Excludes are unanchored
let exclude = if let Some(exclude) = exclude.strip_prefix("/") {
exclude.to_string()
} else {
format!("**/{exclude}").to_string()
};
let glob = PortableGlobParser::Pep639.parse(&exclude).unwrap();
exclude_builder.add(glob);
}
// https://github.com/BurntSushi/ripgrep/discussions/2927
let exclude_matcher = exclude_builder.build().unwrap();
let walkdir_root = args().next().unwrap();
for entry in WalkDir::new(&walkdir_root)
.sort_by_file_name()
.into_iter()
.filter_entry(|entry| {
// TODO(konsti): This should be prettier.
let relative = entry
.path()
.strip_prefix(&walkdir_root)
.expect("walkdir starts with root")
.to_path_buf();
include_matcher.match_directory(&relative) && !exclude_matcher.is_match(&relative)
})
{
let entry = entry.unwrap();
// TODO(konsti): This should be prettier.
let relative = entry
.path()
.strip_prefix(&walkdir_root)
.expect("walkdir starts with root")
.to_path_buf();
if !include_matcher.match_path(&relative) || exclude_matcher.is_match(&relative) {
trace!("Excluding: `{}`", relative.display());
continue;
}
println!("{}", relative.display());
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-globfilter/src/portable_glob.rs | crates/uv-globfilter/src/portable_glob.rs | //! Cross-language glob syntax from
//! [PEP 639](https://packaging.python.org/en/latest/specifications/glob-patterns/).
use globset::{Glob, GlobBuilder};
use owo_colors::OwoColorize;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum PortableGlobError {
/// Shows the failing glob in the error message.
#[error(transparent)]
GlobError(#[from] globset::Error),
#[error(
"The parent directory operator (`..`) at position {pos} is not allowed in glob: `{glob}`"
)]
ParentDirectory { glob: String, pos: usize },
#[error("Invalid character `{invalid}` at position {pos} in glob: `{glob}`")]
InvalidCharacter {
glob: String,
pos: usize,
invalid: char,
},
#[error(
"Invalid character `{invalid}` at position {pos} in glob: `{glob}`. {}{} Characters can be escaped with a backslash",
"hint".bold().cyan(),
":".bold()
)]
InvalidCharacterUv {
glob: String,
pos: usize,
invalid: char,
},
#[error(
"Only forward slashes are allowed as path separator, invalid character at position {pos} in glob: `{glob}`"
)]
InvalidBackslash { glob: String, pos: usize },
#[error(
"Path separators can't be escaped, invalid character at position {pos} in glob: `{glob}`"
)]
InvalidEscapee { glob: String, pos: usize },
#[error("Invalid character `{invalid}` in range at position {pos} in glob: `{glob}`")]
InvalidCharacterRange {
glob: String,
pos: usize,
invalid: char,
},
#[error("Too many at stars at position {pos} in glob: `{glob}`")]
TooManyStars { glob: String, pos: usize },
#[error("Trailing backslash at position {pos} in glob: `{glob}`")]
TrailingEscape { glob: String, pos: usize },
}
/// Cross-language glob syntax from
/// [PEP 639](https://packaging.python.org/en/latest/specifications/glob-patterns/).
///
/// The variant determines whether the parser strictly adheres to PEP 639 rules or allows extensions
/// such as backslash escapes.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum PortableGlobParser {
/// Follow the PEP 639 rules strictly.
Pep639,
/// In addition to the PEP 639 syntax, allow escaping characters with backslashes.
///
/// For cross-platform compatibility, escaping path separators is not allowed, i.e., forward
/// slashes and backslashes can't be escaped.
Uv,
}
impl PortableGlobParser {
fn backslash_escape(self) -> bool {
match self {
Self::Pep639 => false,
Self::Uv => true,
}
}
/// Parse cross-language glob syntax based on [PEP 639](https://packaging.python.org/en/latest/specifications/glob-patterns/):
///
/// - Alphanumeric characters, underscores (`_`), hyphens (`-`) and dots (`.`) are matched verbatim.
/// - The special glob characters are:
/// - `*`: Matches any number of characters except path separators
/// - `?`: Matches a single character except the path separator
/// - `**`: Matches any number of characters including path separators
/// - `[]`, containing only the verbatim matched characters: Matches a single of the characters contained. Within
/// `[...]`, the hyphen indicates a locale-agnostic range (e.g. `a-z`, order based on Unicode code points). Hyphens at
/// the start or end are matched literally.
/// - `\`: Disallowed in PEP 639 mode. In uv mode, it escapes the following character to be matched verbatim.
/// - The path separator is the forward slash character (`/`). Patterns are relative to the given directory, a leading slash
/// character for absolute paths is not supported.
/// - Parent directory indicators (`..`) are not allowed.
///
/// These rules mean that matching the backslash (`\`) is forbidden, which avoid collisions with the windows path separator.
pub fn parse(&self, glob: &str) -> Result<Glob, PortableGlobError> {
self.check(glob)?;
Ok(GlobBuilder::new(glob)
.literal_separator(true)
// No need to support Windows-style paths, so the backslash can be used a escape.
.backslash_escape(self.backslash_escape())
.build()?)
}
/// See [`parse_portable_glob`].
pub fn check(&self, glob: &str) -> Result<(), PortableGlobError> {
let mut chars = glob.chars().enumerate().peekable();
// A `..` is on a parent directory indicator at the start of the string or after a directory
// separator.
let mut start_or_slash = true;
// The number of consecutive stars before the current character.
while let Some((pos, c)) = chars.next() {
// `***` or `**literals` can be correctly represented with less stars. They are banned by
// `glob`, they are allowed by `globset` and PEP 639 is ambiguous, so we're filtering them
// out.
if c == '*' {
let mut star_run = 1;
while let Some((_, c)) = chars.peek() {
if *c == '*' {
star_run += 1;
chars.next();
} else {
break;
}
}
if star_run >= 3 {
return Err(PortableGlobError::TooManyStars {
glob: glob.to_string(),
// We don't update pos for the stars.
pos,
});
} else if star_run == 2 {
if chars.peek().is_some_and(|(_, c)| *c != '/') {
return Err(PortableGlobError::TooManyStars {
glob: glob.to_string(),
// We don't update pos for the stars.
pos,
});
}
}
start_or_slash = false;
} else if c.is_alphanumeric() || matches!(c, '_' | '-' | '?') {
start_or_slash = false;
} else if c == '.' {
if start_or_slash && matches!(chars.peek(), Some((_, '.'))) {
return Err(PortableGlobError::ParentDirectory {
pos,
glob: glob.to_string(),
});
}
start_or_slash = false;
} else if c == '/' {
start_or_slash = true;
} else if c == '[' {
for (pos, c) in chars.by_ref() {
if c.is_alphanumeric() || matches!(c, '_' | '-' | '.') {
// Allowed.
} else if c == ']' {
break;
} else {
return Err(PortableGlobError::InvalidCharacterRange {
glob: glob.to_string(),
pos,
invalid: c,
});
}
}
start_or_slash = false;
} else if c == '\\' {
match self {
Self::Pep639 => {
return Err(PortableGlobError::InvalidBackslash {
glob: glob.to_string(),
pos,
});
}
Self::Uv => {
match chars.next() {
Some((pos, '/' | '\\')) => {
// For cross-platform compatibility, we don't allow forward slashes or
// backslashes to be escaped.
return Err(PortableGlobError::InvalidEscapee {
glob: glob.to_string(),
pos,
});
}
Some(_) => {
// Escaped character
}
None => {
return Err(PortableGlobError::TrailingEscape {
glob: glob.to_string(),
pos,
});
}
}
}
}
} else {
let err = match self {
Self::Pep639 => PortableGlobError::InvalidCharacter {
glob: glob.to_string(),
pos,
invalid: c,
},
Self::Uv => PortableGlobError::InvalidCharacterUv {
glob: glob.to_string(),
pos,
invalid: c,
},
};
return Err(err);
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use insta::assert_snapshot;
#[test]
fn test_error() {
let parse_err = |glob| {
let error = PortableGlobParser::Pep639.parse(glob).unwrap_err();
anstream::adapter::strip_str(&error.to_string()).to_string()
};
assert_snapshot!(
parse_err(".."),
@"The parent directory operator (`..`) at position 0 is not allowed in glob: `..`"
);
assert_snapshot!(
parse_err("licenses/.."),
@"The parent directory operator (`..`) at position 9 is not allowed in glob: `licenses/..`"
);
assert_snapshot!(
parse_err("licenses/LICEN!E.txt"),
@"Invalid character `!` at position 14 in glob: `licenses/LICEN!E.txt`"
);
assert_snapshot!(
parse_err("licenses/LICEN[!C]E.txt"),
@"Invalid character `!` in range at position 15 in glob: `licenses/LICEN[!C]E.txt`"
);
assert_snapshot!(
parse_err("licenses/LICEN[C?]E.txt"),
@"Invalid character `?` in range at position 16 in glob: `licenses/LICEN[C?]E.txt`"
);
assert_snapshot!(
parse_err("******"),
@"Too many at stars at position 0 in glob: `******`"
);
assert_snapshot!(
parse_err("licenses/**license"),
@"Too many at stars at position 9 in glob: `licenses/**license`"
);
assert_snapshot!(
parse_err("licenses/***/licenses.csv"),
@"Too many at stars at position 9 in glob: `licenses/***/licenses.csv`"
);
assert_snapshot!(
parse_err(r"licenses\eula.txt"),
@r"Only forward slashes are allowed as path separator, invalid character at position 8 in glob: `licenses\eula.txt`"
);
assert_snapshot!(
parse_err(r"**/@test"),
@"Invalid character `@` at position 3 in glob: `**/@test`"
);
// Escapes are not allowed in strict PEP 639 mode
assert_snapshot!(
parse_err(r"public domain/Gulliver\\’s Travels.txt"),
@r"Invalid character ` ` at position 6 in glob: `public domain/Gulliver\\’s Travels.txt`"
);
let parse_err_uv = |glob| {
let error = PortableGlobParser::Uv.parse(glob).unwrap_err();
anstream::adapter::strip_str(&error.to_string()).to_string()
};
assert_snapshot!(
parse_err_uv(r"**/@test"),
@"Invalid character `@` at position 3 in glob: `**/@test`. hint: Characters can be escaped with a backslash"
);
// Escaping slashes is not allowed.
assert_snapshot!(
parse_err_uv(r"licenses\\MIT.txt"),
@r"Path separators can't be escaped, invalid character at position 9 in glob: `licenses\\MIT.txt`"
);
assert_snapshot!(
parse_err_uv(r"licenses\/MIT.txt"),
@r"Path separators can't be escaped, invalid character at position 9 in glob: `licenses\/MIT.txt`"
);
}
#[test]
fn test_valid() {
let cases = [
r"licenses/*.txt",
r"licenses/**/*.txt",
r"LICEN[CS]E.txt",
r"LICEN?E.txt",
r"[a-z].txt",
r"[a-z._-].txt",
r"*/**",
r"LICENSE..txt",
r"LICENSE_file-1.txt",
// (google translate)
r"licenses/라이센스*.txt",
r"licenses/ライセンス*.txt",
r"licenses/执照*.txt",
r"src/**",
];
let cases_uv = [
r"public-domain/Gulliver\’s\ Travels.txt",
// https://github.com/astral-sh/uv/issues/13280
r"**/\@test",
];
for case in cases {
PortableGlobParser::Pep639.parse(case).unwrap();
}
for case in cases.iter().chain(cases_uv.iter()) {
PortableGlobParser::Uv.parse(case).unwrap();
}
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-cache-key/src/lib.rs | crates/uv-cache-key/src/lib.rs | pub use cache_key::{CacheKey, CacheKeyHasher};
pub use canonical_url::{CanonicalUrl, RepositoryUrl};
pub use digest::{cache_digest, hash_digest};
mod cache_key;
mod canonical_url;
mod digest;
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-cache-key/src/digest.rs | crates/uv-cache-key/src/digest.rs | use std::hash::{Hash, Hasher};
use seahash::SeaHasher;
use crate::cache_key::{CacheKey, CacheKeyHasher};
/// Compute a hex string hash of a `CacheKey` object.
///
/// The value returned by [`cache_digest`] should be stable across releases and platforms.
pub fn cache_digest<H: CacheKey>(hashable: &H) -> String {
/// Compute a u64 hash of a [`CacheKey`] object.
fn cache_key_u64<H: CacheKey>(hashable: &H) -> u64 {
let mut hasher = CacheKeyHasher::new();
hashable.cache_key(&mut hasher);
hasher.finish()
}
to_hex(cache_key_u64(hashable))
}
/// Compute a hex string hash of a hashable object.
pub fn hash_digest<H: Hash>(hashable: &H) -> String {
/// Compute a u64 hash of a hashable object.
fn hash_u64<H: Hash>(hashable: &H) -> u64 {
let mut hasher = SeaHasher::new();
hashable.hash(&mut hasher);
hasher.finish()
}
to_hex(hash_u64(hashable))
}
/// Convert a u64 to a hex string.
fn to_hex(num: u64) -> String {
hex::encode(num.to_le_bytes())
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-cache-key/src/cache_key.rs | crates/uv-cache-key/src/cache_key.rs | use std::borrow::Cow;
use std::collections::{BTreeMap, BTreeSet};
use std::hash::{Hash, Hasher};
use std::num::{
NonZeroI8, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI128, NonZeroU8, NonZeroU16, NonZeroU32,
NonZeroU64, NonZeroU128,
};
use std::path::{Path, PathBuf};
use seahash::SeaHasher;
use url::Url;
/// A trait for types that can be hashed in a stable way across versions and platforms. Equivalent
/// to Ruff's [`CacheKey`] trait.
pub trait CacheKey {
fn cache_key(&self, state: &mut CacheKeyHasher);
fn cache_key_slice(data: &[Self], state: &mut CacheKeyHasher)
where
Self: Sized,
{
for piece in data {
piece.cache_key(state);
}
}
}
impl CacheKey for bool {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_u8(u8::from(*self));
}
}
impl CacheKey for char {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_u32(*self as u32);
}
}
impl CacheKey for usize {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_usize(*self);
}
}
impl CacheKey for u128 {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_u128(*self);
}
}
impl CacheKey for u64 {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_u64(*self);
}
}
impl CacheKey for u32 {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_u32(*self);
}
}
impl CacheKey for u16 {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_u16(*self);
}
}
impl CacheKey for u8 {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_u8(*self);
}
}
impl CacheKey for isize {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_isize(*self);
}
}
impl CacheKey for i128 {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_i128(*self);
}
}
impl CacheKey for i64 {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_i64(*self);
}
}
impl CacheKey for i32 {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_i32(*self);
}
}
impl CacheKey for i16 {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_i16(*self);
}
}
impl CacheKey for i8 {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_i8(*self);
}
}
macro_rules! impl_cache_key_non_zero {
($name:ident) => {
impl CacheKey for $name {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
self.get().cache_key(state)
}
}
};
}
impl_cache_key_non_zero!(NonZeroU8);
impl_cache_key_non_zero!(NonZeroU16);
impl_cache_key_non_zero!(NonZeroU32);
impl_cache_key_non_zero!(NonZeroU64);
impl_cache_key_non_zero!(NonZeroU128);
impl_cache_key_non_zero!(NonZeroI8);
impl_cache_key_non_zero!(NonZeroI16);
impl_cache_key_non_zero!(NonZeroI32);
impl_cache_key_non_zero!(NonZeroI64);
impl_cache_key_non_zero!(NonZeroI128);
macro_rules! impl_cache_key_tuple {
() => (
impl CacheKey for () {
#[inline]
fn cache_key(&self, _state: &mut CacheKeyHasher) {}
}
);
( $($name:ident)+) => (
impl<$($name: CacheKey),+> CacheKey for ($($name,)+) where last_type!($($name,)+): ?Sized {
#[allow(non_snake_case)]
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
let ($(ref $name,)+) = *self;
$($name.cache_key(state);)+
}
}
);
}
macro_rules! last_type {
($a:ident,) => { $a };
($a:ident, $($rest_a:ident,)+) => { last_type!($($rest_a,)+) };
}
impl_cache_key_tuple! {}
impl_cache_key_tuple! { T }
impl_cache_key_tuple! { T B }
impl_cache_key_tuple! { T B C }
impl_cache_key_tuple! { T B C D }
impl_cache_key_tuple! { T B C D E }
impl_cache_key_tuple! { T B C D E F }
impl_cache_key_tuple! { T B C D E F G }
impl_cache_key_tuple! { T B C D E F G H }
impl_cache_key_tuple! { T B C D E F G H I }
impl_cache_key_tuple! { T B C D E F G H I J }
impl_cache_key_tuple! { T B C D E F G H I J K }
impl_cache_key_tuple! { T B C D E F G H I J K L }
impl CacheKey for str {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
self.hash(&mut *state);
}
}
impl CacheKey for String {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
self.hash(&mut *state);
}
}
impl CacheKey for Path {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
self.hash(&mut *state);
}
}
impl CacheKey for PathBuf {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
self.as_path().cache_key(state);
}
}
impl CacheKey for Url {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
self.as_str().cache_key(state);
}
}
impl<T: CacheKey> CacheKey for Option<T> {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
match self {
None => state.write_usize(0),
Some(value) => {
state.write_usize(1);
value.cache_key(state);
}
}
}
}
impl<T: CacheKey> CacheKey for [T] {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_usize(self.len());
CacheKey::cache_key_slice(self, state);
}
}
impl<T: ?Sized + CacheKey> CacheKey for &T {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
(**self).cache_key(state);
}
}
impl<T: ?Sized + CacheKey> CacheKey for &mut T {
#[inline]
fn cache_key(&self, state: &mut CacheKeyHasher) {
(**self).cache_key(state);
}
}
impl<T> CacheKey for Vec<T>
where
T: CacheKey,
{
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_usize(self.len());
CacheKey::cache_key_slice(self, state);
}
}
impl<V: CacheKey> CacheKey for BTreeSet<V> {
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_usize(self.len());
for item in self {
item.cache_key(state);
}
}
}
impl<K: CacheKey + Ord, V: CacheKey> CacheKey for BTreeMap<K, V> {
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_usize(self.len());
for (key, value) in self {
key.cache_key(state);
value.cache_key(state);
}
}
}
impl<V: ?Sized> CacheKey for Cow<'_, V>
where
V: CacheKey + ToOwned,
{
fn cache_key(&self, state: &mut CacheKeyHasher) {
(**self).cache_key(state);
}
}
#[derive(Clone, Default)]
pub struct CacheKeyHasher {
inner: SeaHasher,
}
impl CacheKeyHasher {
pub fn new() -> Self {
Self {
inner: SeaHasher::new(),
}
}
}
impl Hasher for CacheKeyHasher {
#[inline]
fn finish(&self) -> u64 {
self.inner.finish()
}
#[inline]
fn write(&mut self, bytes: &[u8]) {
self.inner.write(bytes);
}
#[inline]
fn write_u8(&mut self, i: u8) {
self.inner.write_u8(i);
}
#[inline]
fn write_u16(&mut self, i: u16) {
self.inner.write_u16(i);
}
#[inline]
fn write_u32(&mut self, i: u32) {
self.inner.write_u32(i);
}
#[inline]
fn write_u64(&mut self, i: u64) {
self.inner.write_u64(i);
}
#[inline]
fn write_u128(&mut self, i: u128) {
self.inner.write_u128(i);
}
#[inline]
fn write_usize(&mut self, i: usize) {
self.inner.write_usize(i);
}
#[inline]
fn write_i8(&mut self, i: i8) {
self.inner.write_i8(i);
}
#[inline]
fn write_i16(&mut self, i: i16) {
self.inner.write_i16(i);
}
#[inline]
fn write_i32(&mut self, i: i32) {
self.inner.write_i32(i);
}
#[inline]
fn write_i64(&mut self, i: i64) {
self.inner.write_i64(i);
}
#[inline]
fn write_i128(&mut self, i: i128) {
self.inner.write_i128(i);
}
#[inline]
fn write_isize(&mut self, i: isize) {
self.inner.write_isize(i);
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-cache-key/src/canonical_url.rs | crates/uv-cache-key/src/canonical_url.rs | use std::borrow::Cow;
use std::fmt::{Debug, Formatter};
use std::hash::{Hash, Hasher};
use std::ops::Deref;
use url::Url;
use uv_redacted::{DisplaySafeUrl, DisplaySafeUrlError};
use crate::cache_key::{CacheKey, CacheKeyHasher};
/// A wrapper around `Url` which represents a "canonical" version of an original URL.
///
/// A "canonical" url is only intended for internal comparison purposes. It's to help paper over
/// mistakes such as depending on `github.com/foo/bar` vs. `github.com/foo/bar.git`.
///
/// This is **only** for internal purposes and provides no means to actually read the underlying
/// string value of the `Url` it contains. This is intentional, because all fetching should still
/// happen within the context of the original URL.
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
pub struct CanonicalUrl(DisplaySafeUrl);
impl CanonicalUrl {
pub fn new(url: &DisplaySafeUrl) -> Self {
let mut url = url.clone();
// If the URL cannot be a base, then it's not a valid URL anyway.
if url.cannot_be_a_base() {
return Self(url);
}
// Strip credentials.
let _ = url.set_password(None);
let _ = url.set_username("");
// Strip a trailing slash.
if url.path().ends_with('/') {
url.path_segments_mut().unwrap().pop_if_empty();
}
// For GitHub URLs specifically, just lower-case everything. GitHub
// treats both the same, but they hash differently, and we're gonna be
// hashing them. This wants a more general solution, and also we're
// almost certainly not using the same case conversion rules that GitHub
// does. (See issue #84)
if url.host_str() == Some("github.com") {
let scheme = url.scheme().to_lowercase();
url.set_scheme(&scheme).unwrap();
let path = url.path().to_lowercase();
url.set_path(&path);
}
// Repos can generally be accessed with or without `.git` extension.
if let Some((prefix, suffix)) = url.path().rsplit_once('@') {
// Ex) `git+https://github.com/pypa/sample-namespace-packages.git@2.0.0`
let needs_chopping = std::path::Path::new(prefix)
.extension()
.is_some_and(|ext| ext.eq_ignore_ascii_case("git"));
if needs_chopping {
let prefix = &prefix[..prefix.len() - 4];
let path = format!("{prefix}@{suffix}");
url.set_path(&path);
}
} else {
// Ex) `git+https://github.com/pypa/sample-namespace-packages.git`
let needs_chopping = std::path::Path::new(url.path())
.extension()
.is_some_and(|ext| ext.eq_ignore_ascii_case("git"));
if needs_chopping {
let last = {
// Unwrap safety: We checked `url.cannot_be_a_base()`, and `url.path()` having
// an extension implies at least one segment.
let last = url.path_segments().unwrap().next_back().unwrap();
last[..last.len() - 4].to_owned()
};
url.path_segments_mut().unwrap().pop().push(&last);
}
}
// Decode any percent-encoded characters in the path.
if memchr::memchr(b'%', url.path().as_bytes()).is_some() {
// Unwrap safety: We checked `url.cannot_be_a_base()`.
let decoded = url
.path_segments()
.unwrap()
.map(|segment| {
percent_encoding::percent_decode_str(segment)
.decode_utf8()
.unwrap_or(Cow::Borrowed(segment))
.into_owned()
})
.collect::<Vec<_>>();
let mut path_segments = url.path_segments_mut().unwrap();
path_segments.clear();
path_segments.extend(decoded);
}
Self(url)
}
pub fn parse(url: &str) -> Result<Self, DisplaySafeUrlError> {
Ok(Self::new(&DisplaySafeUrl::parse(url)?))
}
}
impl CacheKey for CanonicalUrl {
fn cache_key(&self, state: &mut CacheKeyHasher) {
// `as_str` gives the serialisation of a url (which has a spec) and so insulates against
// possible changes in how the URL crate does hashing.
self.0.as_str().cache_key(state);
}
}
impl Hash for CanonicalUrl {
fn hash<H: Hasher>(&self, state: &mut H) {
// `as_str` gives the serialisation of a url (which has a spec) and so insulates against
// possible changes in how the URL crate does hashing.
self.0.as_str().hash(state);
}
}
impl From<CanonicalUrl> for DisplaySafeUrl {
fn from(value: CanonicalUrl) -> Self {
value.0
}
}
impl std::fmt::Display for CanonicalUrl {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(&self.0, f)
}
}
/// Like [`CanonicalUrl`], but attempts to represent an underlying source repository, abstracting
/// away details like the specific commit or branch, or the subdirectory to build within the
/// repository.
///
/// For example, `https://github.com/pypa/package.git#subdirectory=pkg_a` and
/// `https://github.com/pypa/package.git#subdirectory=pkg_b` would map to different
/// [`CanonicalUrl`] values, but the same [`RepositoryUrl`], since they map to the same
/// resource.
///
/// The additional information it holds should only be used to discriminate between
/// sources that hold the exact same commit in their canonical representation,
/// but may differ in the contents such as when Git LFS is enabled.
///
/// A different cache key will be computed when Git LFS is enabled.
/// When Git LFS is `false` or `None`, the cache key remains unchanged.
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
pub struct RepositoryUrl {
repo_url: DisplaySafeUrl,
with_lfs: Option<bool>,
}
impl RepositoryUrl {
pub fn new(url: &DisplaySafeUrl) -> Self {
let mut url = CanonicalUrl::new(url).0;
// If a Git URL ends in a reference (like a branch, tag, or commit), remove it.
if url.scheme().starts_with("git+") {
if let Some(prefix) = url
.path()
.rsplit_once('@')
.map(|(prefix, _suffix)| prefix.to_string())
{
url.set_path(&prefix);
}
}
// Drop any fragments and query parameters.
url.set_fragment(None);
url.set_query(None);
Self {
repo_url: url,
with_lfs: None,
}
}
pub fn parse(url: &str) -> Result<Self, DisplaySafeUrlError> {
Ok(Self::new(&DisplaySafeUrl::parse(url)?))
}
#[must_use]
pub fn with_lfs(mut self, lfs: Option<bool>) -> Self {
self.with_lfs = lfs;
self
}
}
impl CacheKey for RepositoryUrl {
fn cache_key(&self, state: &mut CacheKeyHasher) {
// `as_str` gives the serialisation of a url (which has a spec) and so insulates against
// possible changes in how the URL crate does hashing.
self.repo_url.as_str().cache_key(state);
if let Some(true) = self.with_lfs {
1u8.cache_key(state);
}
}
}
impl Hash for RepositoryUrl {
fn hash<H: Hasher>(&self, state: &mut H) {
// `as_str` gives the serialisation of a url (which has a spec) and so insulates against
// possible changes in how the URL crate does hashing.
self.repo_url.as_str().hash(state);
if let Some(true) = self.with_lfs {
1u8.hash(state);
}
}
}
impl Deref for RepositoryUrl {
type Target = Url;
fn deref(&self) -> &Self::Target {
&self.repo_url
}
}
impl std::fmt::Display for RepositoryUrl {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(&self.repo_url, f)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn user_credential_does_not_affect_cache_key() -> Result<(), DisplaySafeUrlError> {
let mut hasher = CacheKeyHasher::new();
CanonicalUrl::parse("https://example.com/pypa/sample-namespace-packages.git@2.0.0")?
.cache_key(&mut hasher);
let hash_without_creds = hasher.finish();
let mut hasher = CacheKeyHasher::new();
CanonicalUrl::parse(
"https://user:foo@example.com/pypa/sample-namespace-packages.git@2.0.0",
)?
.cache_key(&mut hasher);
let hash_with_creds = hasher.finish();
assert_eq!(
hash_without_creds, hash_with_creds,
"URLs with no user credentials should hash the same as URLs with different user credentials",
);
let mut hasher = CacheKeyHasher::new();
CanonicalUrl::parse(
"https://user:bar@example.com/pypa/sample-namespace-packages.git@2.0.0",
)?
.cache_key(&mut hasher);
let hash_with_creds = hasher.finish();
assert_eq!(
hash_without_creds, hash_with_creds,
"URLs with different user credentials should hash the same",
);
let mut hasher = CacheKeyHasher::new();
CanonicalUrl::parse("https://:bar@example.com/pypa/sample-namespace-packages.git@2.0.0")?
.cache_key(&mut hasher);
let hash_with_creds = hasher.finish();
assert_eq!(
hash_without_creds, hash_with_creds,
"URLs with no username, though with a password, should hash the same as URLs with different user credentials",
);
let mut hasher = CacheKeyHasher::new();
CanonicalUrl::parse("https://user:@example.com/pypa/sample-namespace-packages.git@2.0.0")?
.cache_key(&mut hasher);
let hash_with_creds = hasher.finish();
assert_eq!(
hash_without_creds, hash_with_creds,
"URLs with no password, though with a username, should hash the same as URLs with different user credentials",
);
Ok(())
}
#[test]
fn canonical_url() -> Result<(), DisplaySafeUrlError> {
// Two URLs should be considered equal regardless of the `.git` suffix.
assert_eq!(
CanonicalUrl::parse("git+https://github.com/pypa/sample-namespace-packages.git")?,
CanonicalUrl::parse("git+https://github.com/pypa/sample-namespace-packages")?,
);
// Two URLs should be considered equal regardless of the `.git` suffix.
assert_eq!(
CanonicalUrl::parse("git+https://github.com/pypa/sample-namespace-packages.git@2.0.0")?,
CanonicalUrl::parse("git+https://github.com/pypa/sample-namespace-packages@2.0.0")?,
);
// Two URLs should be _not_ considered equal if they point to different repositories.
assert_ne!(
CanonicalUrl::parse("git+https://github.com/pypa/sample-namespace-packages.git")?,
CanonicalUrl::parse("git+https://github.com/pypa/sample-packages.git")?,
);
// Two URLs should _not_ be considered equal if they request different subdirectories.
assert_ne!(
CanonicalUrl::parse(
"git+https://github.com/pypa/sample-namespace-packages.git#subdirectory=pkg_resources/pkg_a"
)?,
CanonicalUrl::parse(
"git+https://github.com/pypa/sample-namespace-packages.git#subdirectory=pkg_resources/pkg_b"
)?,
);
// Two URLs should _not_ be considered equal if they differ in Git LFS enablement.
assert_ne!(
CanonicalUrl::parse(
"git+https://github.com/pypa/sample-namespace-packages.git#lfs=true"
)?,
CanonicalUrl::parse("git+https://github.com/pypa/sample-namespace-packages.git")?,
);
// Two URLs should _not_ be considered equal if they request different commit tags.
assert_ne!(
CanonicalUrl::parse(
"git+https://github.com/pypa/sample-namespace-packages.git@v1.0.0"
)?,
CanonicalUrl::parse(
"git+https://github.com/pypa/sample-namespace-packages.git@v2.0.0"
)?,
);
// Two URLs that cannot be a base should be considered equal.
assert_eq!(
CanonicalUrl::parse("git+https:://github.com/pypa/sample-namespace-packages.git")?,
CanonicalUrl::parse("git+https:://github.com/pypa/sample-namespace-packages.git")?,
);
// Two URLs should _not_ be considered equal based on percent-decoding slashes.
assert_ne!(
CanonicalUrl::parse("https://github.com/pypa/sample%2Fnamespace%2Fpackages")?,
CanonicalUrl::parse("https://github.com/pypa/sample/namespace/packages")?,
);
// Two URLs should be considered equal regardless of percent-encoding.
assert_eq!(
CanonicalUrl::parse("https://github.com/pypa/sample%2Bnamespace%2Bpackages")?,
CanonicalUrl::parse("https://github.com/pypa/sample+namespace+packages")?,
);
// Two URLs should _not_ be considered equal based on percent-decoding slashes.
assert_ne!(
CanonicalUrl::parse(
"file:///home/ferris/my_project%2Fmy_project-0.1.0-py3-none-any.whl"
)?,
CanonicalUrl::parse(
"file:///home/ferris/my_project/my_project-0.1.0-py3-none-any.whl"
)?,
);
// Two URLs should be considered equal regardless of percent-encoding.
assert_eq!(
CanonicalUrl::parse(
"file:///home/ferris/my_project/my_project-0.1.0+foo-py3-none-any.whl"
)?,
CanonicalUrl::parse(
"file:///home/ferris/my_project/my_project-0.1.0%2Bfoo-py3-none-any.whl"
)?,
);
Ok(())
}
#[test]
fn repository_url() -> Result<(), DisplaySafeUrlError> {
// Two URLs should be considered equal regardless of the `.git` suffix.
assert_eq!(
RepositoryUrl::parse("git+https://github.com/pypa/sample-namespace-packages.git")?,
RepositoryUrl::parse("git+https://github.com/pypa/sample-namespace-packages")?,
);
// Two URLs should be considered equal regardless of the `.git` suffix.
assert_eq!(
RepositoryUrl::parse(
"git+https://github.com/pypa/sample-namespace-packages.git@2.0.0"
)?,
RepositoryUrl::parse("git+https://github.com/pypa/sample-namespace-packages@2.0.0")?,
);
// Two URLs should be _not_ considered equal if they point to different repositories.
assert_ne!(
RepositoryUrl::parse("git+https://github.com/pypa/sample-namespace-packages.git")?,
RepositoryUrl::parse("git+https://github.com/pypa/sample-packages.git")?,
);
// Two URLs should be considered equal if they map to the same repository, even if they
// request different subdirectories.
assert_eq!(
RepositoryUrl::parse(
"git+https://github.com/pypa/sample-namespace-packages.git#subdirectory=pkg_resources/pkg_a"
)?,
RepositoryUrl::parse(
"git+https://github.com/pypa/sample-namespace-packages.git#subdirectory=pkg_resources/pkg_b"
)?,
);
// Two URLs should be considered equal if they map to the same repository, even if they
// request different commit tags.
assert_eq!(
RepositoryUrl::parse(
"git+https://github.com/pypa/sample-namespace-packages.git@v1.0.0"
)?,
RepositoryUrl::parse(
"git+https://github.com/pypa/sample-namespace-packages.git@v2.0.0"
)?,
);
// Two URLs should be considered equal if they map to the same repository, even if they
// differ in Git LFS enablement.
assert_eq!(
RepositoryUrl::parse(
"git+https://github.com/pypa/sample-namespace-packages.git#lfs=true"
)?,
RepositoryUrl::parse("git+https://github.com/pypa/sample-namespace-packages.git")?,
);
Ok(())
}
#[test]
fn repository_url_with_lfs() -> Result<(), DisplaySafeUrlError> {
let mut hasher = CacheKeyHasher::new();
RepositoryUrl::parse("https://example.com/pypa/sample-namespace-packages.git@2.0.0")?
.cache_key(&mut hasher);
let repo_url_basic = hasher.finish();
let mut hasher = CacheKeyHasher::new();
RepositoryUrl::parse(
"https://user:foo@example.com/pypa/sample-namespace-packages.git@2.0.0#foo=bar",
)?
.cache_key(&mut hasher);
let repo_url_with_fragments = hasher.finish();
assert_eq!(
repo_url_basic, repo_url_with_fragments,
"repository urls should have the exact cache keys as fragments are removed",
);
let mut hasher = CacheKeyHasher::new();
RepositoryUrl::parse(
"https://user:foo@example.com/pypa/sample-namespace-packages.git@2.0.0#foo=bar",
)?
.with_lfs(None)
.cache_key(&mut hasher);
let git_url_with_fragments = hasher.finish();
assert_eq!(
repo_url_with_fragments, git_url_with_fragments,
"both structs should have the exact cache keys as fragments are still removed",
);
let mut hasher = CacheKeyHasher::new();
RepositoryUrl::parse(
"https://user:foo@example.com/pypa/sample-namespace-packages.git@2.0.0#foo=bar",
)?
.with_lfs(Some(false))
.cache_key(&mut hasher);
let git_url_with_fragments_and_lfs_false = hasher.finish();
assert_eq!(
git_url_with_fragments, git_url_with_fragments_and_lfs_false,
"both structs should have the exact cache keys as lfs false should not influence them",
);
let mut hasher = CacheKeyHasher::new();
RepositoryUrl::parse(
"https://user:foo@example.com/pypa/sample-namespace-packages.git@2.0.0#foo=bar",
)?
.with_lfs(Some(true))
.cache_key(&mut hasher);
let git_url_with_fragments_and_lfs_true = hasher.finish();
assert_ne!(
git_url_with_fragments, git_url_with_fragments_and_lfs_true,
"both structs should have different cache keys as one has Git LFS enabled",
);
Ok(())
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-virtualenv/src/lib.rs | crates/uv-virtualenv/src/lib.rs | use std::io;
use std::path::Path;
use thiserror::Error;
use uv_preview::Preview;
use uv_python::{Interpreter, PythonEnvironment};
pub use virtualenv::{OnExisting, RemovalReason, remove_virtualenv};
mod virtualenv;
#[derive(Debug, Error)]
pub enum Error {
#[error(transparent)]
Io(#[from] io::Error),
#[error(
"Could not find a suitable Python executable for the virtual environment based on the interpreter: {0}"
)]
NotFound(String),
#[error(transparent)]
Python(#[from] uv_python::managed::Error),
}
/// The value to use for the shell prompt when inside a virtual environment.
#[derive(Debug)]
pub enum Prompt {
/// Use the current directory name as the prompt.
CurrentDirectoryName,
/// Use the fixed string as the prompt.
Static(String),
/// Default to no prompt. The prompt is then set by the activator script
/// to the virtual environment's directory name.
None,
}
impl Prompt {
/// Determine the prompt value to be used from the command line arguments.
pub fn from_args(prompt: Option<String>) -> Self {
match prompt {
Some(prompt) if prompt == "." => Self::CurrentDirectoryName,
Some(prompt) => Self::Static(prompt),
None => Self::None,
}
}
}
/// Create a virtualenv.
#[allow(clippy::fn_params_excessive_bools)]
pub fn create_venv(
location: &Path,
interpreter: Interpreter,
prompt: Prompt,
system_site_packages: bool,
on_existing: OnExisting,
relocatable: bool,
seed: bool,
upgradeable: bool,
preview: Preview,
) -> Result<PythonEnvironment, Error> {
// Create the virtualenv at the given location.
let virtualenv = virtualenv::create(
location,
&interpreter,
prompt,
system_site_packages,
on_existing,
relocatable,
seed,
upgradeable,
preview,
)?;
// Create the corresponding `PythonEnvironment`.
let interpreter = interpreter.with_virtualenv(virtualenv);
Ok(PythonEnvironment::from_interpreter(interpreter))
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-virtualenv/src/virtualenv.rs | crates/uv-virtualenv/src/virtualenv.rs | //! Create a virtual environment.
use std::env::consts::EXE_SUFFIX;
use std::io;
use std::io::{BufWriter, Write};
use std::path::Path;
use console::Term;
use fs_err::File;
use itertools::Itertools;
use owo_colors::OwoColorize;
use tracing::{debug, trace};
use uv_fs::{CWD, Simplified, cachedir};
use uv_preview::Preview;
use uv_pypi_types::Scheme;
use uv_python::managed::{PythonMinorVersionLink, create_link_to_executable};
use uv_python::{Interpreter, VirtualEnvironment};
use uv_shell::escape_posix_for_single_quotes;
use uv_version::version;
use uv_warnings::warn_user_once;
use crate::{Error, Prompt};
/// Activation scripts for the environment, with dependent paths templated out.
const ACTIVATE_TEMPLATES: &[(&str, &str)] = &[
("activate", include_str!("activator/activate")),
("activate.csh", include_str!("activator/activate.csh")),
("activate.fish", include_str!("activator/activate.fish")),
("activate.nu", include_str!("activator/activate.nu")),
("activate.ps1", include_str!("activator/activate.ps1")),
("activate.bat", include_str!("activator/activate.bat")),
("deactivate.bat", include_str!("activator/deactivate.bat")),
("pydoc.bat", include_str!("activator/pydoc.bat")),
(
"activate_this.py",
include_str!("activator/activate_this.py"),
),
];
const VIRTUALENV_PATCH: &str = include_str!("_virtualenv.py");
/// Very basic `.cfg` file format writer.
fn write_cfg(f: &mut impl Write, data: &[(String, String)]) -> io::Result<()> {
for (key, value) in data {
writeln!(f, "{key} = {value}")?;
}
Ok(())
}
/// Create a [`VirtualEnvironment`] at the given location.
#[allow(clippy::fn_params_excessive_bools)]
pub(crate) fn create(
location: &Path,
interpreter: &Interpreter,
prompt: Prompt,
system_site_packages: bool,
on_existing: OnExisting,
relocatable: bool,
seed: bool,
upgradeable: bool,
preview: Preview,
) -> Result<VirtualEnvironment, Error> {
// Determine the base Python executable; that is, the Python executable that should be
// considered the "base" for the virtual environment.
//
// For consistency with the standard library, rely on `sys._base_executable`, _unless_ we're
// using a uv-managed Python (in which case, we can do better for symlinked executables).
let base_python = if cfg!(unix) && interpreter.is_standalone() {
interpreter.find_base_python()?
} else {
interpreter.to_base_python()?
};
debug!(
"Using base executable for virtual environment: {}",
base_python.display()
);
// Extract the prompt and compute the absolute path prior to validating the location; otherwise,
// we risk deleting (and recreating) the current working directory, which would cause the `CWD`
// queries to fail.
let prompt = match prompt {
Prompt::CurrentDirectoryName => CWD
.file_name()
.map(|name| name.to_string_lossy().to_string()),
Prompt::Static(value) => Some(value),
Prompt::None => None,
};
let absolute = std::path::absolute(location)?;
// Validate the existing location.
match location.metadata() {
Ok(metadata) if metadata.is_file() => {
return Err(Error::Io(io::Error::new(
io::ErrorKind::AlreadyExists,
format!("File exists at `{}`", location.user_display()),
)));
}
Ok(metadata)
if metadata.is_dir()
&& location
.read_dir()
.is_ok_and(|mut dir| dir.next().is_none()) =>
{
// If it's an empty directory, we can proceed
trace!(
"Using empty directory at `{}` for virtual environment",
location.user_display()
);
}
Ok(metadata) if metadata.is_dir() => {
let is_virtualenv = uv_fs::is_virtualenv_base(location);
let name = if is_virtualenv {
"virtual environment"
} else {
"directory"
};
let hint = format!(
"Use the `{}` flag or set `{}` to replace the existing {name}",
"--clear".green(),
"UV_VENV_CLEAR=1".green()
);
// TODO(zanieb): We may want to consider omitting the hint in some of these cases, e.g.,
// when `--no-clear` is used do we want to suggest `--clear`?
let err = Err(Error::Io(io::Error::new(
io::ErrorKind::AlreadyExists,
format!(
"A {name} already exists at: {}\n\n{}{} {hint}",
location.user_display(),
"hint".bold().cyan(),
":".bold(),
),
)));
match on_existing {
OnExisting::Allow => {
debug!("Allowing existing {name} due to `--allow-existing`");
}
OnExisting::Remove(reason) => {
debug!("Removing existing {name} ({reason})");
// Before removing the virtual environment, we need to canonicalize the path
// because `Path::metadata` will follow the symlink but we're still operating on
// the unresolved path and will remove the symlink itself.
let location = location
.canonicalize()
.unwrap_or_else(|_| location.to_path_buf());
remove_virtualenv(&location)?;
fs_err::create_dir_all(&location)?;
}
OnExisting::Fail => return err,
// If not a virtual environment, fail without prompting.
OnExisting::Prompt if !is_virtualenv => return err,
OnExisting::Prompt => {
match confirm_clear(location, name)? {
Some(true) => {
debug!("Removing existing {name} due to confirmation");
// Before removing the virtual environment, we need to canonicalize the
// path because `Path::metadata` will follow the symlink but we're still
// operating on the unresolved path and will remove the symlink itself.
let location = location
.canonicalize()
.unwrap_or_else(|_| location.to_path_buf());
remove_virtualenv(&location)?;
fs_err::create_dir_all(&location)?;
}
Some(false) => return err,
// When we don't have a TTY, warn that the behavior will change in the future
None => {
warn_user_once!(
"A {name} already exists at `{}`. In the future, uv will require `{}` to replace it",
location.user_display(),
"--clear".green(),
);
}
}
}
}
}
Ok(_) => {
// It's not a file or a directory
return Err(Error::Io(io::Error::new(
io::ErrorKind::AlreadyExists,
format!("Object already exists at `{}`", location.user_display()),
)));
}
Err(err) if err.kind() == io::ErrorKind::NotFound => {
fs_err::create_dir_all(location)?;
}
Err(err) => return Err(Error::Io(err)),
}
// Use the absolute path for all further operations.
let location = absolute;
let bin_name = if cfg!(unix) {
"bin"
} else if cfg!(windows) {
"Scripts"
} else {
unimplemented!("Only Windows and Unix are supported")
};
let scripts = location.join(&interpreter.virtualenv().scripts);
// Add the CACHEDIR.TAG.
cachedir::ensure_tag(&location)?;
// Create a `.gitignore` file to ignore all files in the venv.
fs_err::write(location.join(".gitignore"), "*")?;
let mut using_minor_version_link = false;
let executable_target = if upgradeable && interpreter.is_standalone() {
if let Some(minor_version_link) = PythonMinorVersionLink::from_executable(
base_python.as_path(),
&interpreter.key(),
preview,
) {
if !minor_version_link.exists() {
base_python.clone()
} else {
let debug_symlink_term = if cfg!(windows) {
"junction"
} else {
"symlink directory"
};
debug!(
"Using {} {} instead of base Python path: {}",
debug_symlink_term,
&minor_version_link.symlink_directory.display(),
&base_python.display()
);
using_minor_version_link = true;
minor_version_link.symlink_executable.clone()
}
} else {
base_python.clone()
}
} else {
base_python.clone()
};
// Per PEP 405, the Python `home` is the parent directory of the interpreter.
// In preview mode, for standalone interpreters, this `home` value will include a
// symlink directory on Unix or junction on Windows to enable transparent Python patch
// upgrades.
let python_home = executable_target
.parent()
.ok_or_else(|| {
io::Error::new(
io::ErrorKind::NotFound,
"The Python interpreter needs to have a parent directory",
)
})?
.to_path_buf();
let python_home = python_home.as_path();
// Different names for the python interpreter
fs_err::create_dir_all(&scripts)?;
let executable = scripts.join(format!("python{EXE_SUFFIX}"));
#[cfg(unix)]
{
uv_fs::replace_symlink(&executable_target, &executable)?;
uv_fs::replace_symlink(
"python",
scripts.join(format!("python{}", interpreter.python_major())),
)?;
uv_fs::replace_symlink(
"python",
scripts.join(format!(
"python{}.{}",
interpreter.python_major(),
interpreter.python_minor(),
)),
)?;
if interpreter.gil_disabled() {
uv_fs::replace_symlink(
"python",
scripts.join(format!(
"python{}.{}t",
interpreter.python_major(),
interpreter.python_minor(),
)),
)?;
}
if interpreter.markers().implementation_name() == "pypy" {
uv_fs::replace_symlink(
"python",
scripts.join(format!("pypy{}", interpreter.python_major())),
)?;
uv_fs::replace_symlink("python", scripts.join("pypy"))?;
}
if interpreter.markers().implementation_name() == "graalpy" {
uv_fs::replace_symlink("python", scripts.join("graalpy"))?;
}
}
// On Windows, we use trampolines that point to an executable target. For standalone
// interpreters, this target path includes a minor version junction to enable
// transparent upgrades.
if cfg!(windows) {
if using_minor_version_link {
let target = scripts.join(WindowsExecutable::Python.exe(interpreter));
create_link_to_executable(target.as_path(), &executable_target)
.map_err(Error::Python)?;
let targetw = scripts.join(WindowsExecutable::Pythonw.exe(interpreter));
create_link_to_executable(targetw.as_path(), &executable_target)
.map_err(Error::Python)?;
if interpreter.gil_disabled() {
let targett = scripts.join(WindowsExecutable::PythonMajorMinort.exe(interpreter));
create_link_to_executable(targett.as_path(), &executable_target)
.map_err(Error::Python)?;
let targetwt = scripts.join(WindowsExecutable::PythonwMajorMinort.exe(interpreter));
create_link_to_executable(targetwt.as_path(), &executable_target)
.map_err(Error::Python)?;
}
} else {
// Always copy `python.exe`.
copy_launcher_windows(
WindowsExecutable::Python,
interpreter,
&base_python,
&scripts,
python_home,
)?;
match interpreter.implementation_name() {
"graalpy" => {
// For GraalPy, copy `graalpy.exe` and `python3.exe`.
copy_launcher_windows(
WindowsExecutable::GraalPy,
interpreter,
&base_python,
&scripts,
python_home,
)?;
copy_launcher_windows(
WindowsExecutable::PythonMajor,
interpreter,
&base_python,
&scripts,
python_home,
)?;
}
"pypy" => {
// For PyPy, copy all versioned executables and all PyPy-specific executables.
copy_launcher_windows(
WindowsExecutable::PythonMajor,
interpreter,
&base_python,
&scripts,
python_home,
)?;
copy_launcher_windows(
WindowsExecutable::PythonMajorMinor,
interpreter,
&base_python,
&scripts,
python_home,
)?;
copy_launcher_windows(
WindowsExecutable::Pythonw,
interpreter,
&base_python,
&scripts,
python_home,
)?;
copy_launcher_windows(
WindowsExecutable::PyPy,
interpreter,
&base_python,
&scripts,
python_home,
)?;
copy_launcher_windows(
WindowsExecutable::PyPyMajor,
interpreter,
&base_python,
&scripts,
python_home,
)?;
copy_launcher_windows(
WindowsExecutable::PyPyMajorMinor,
interpreter,
&base_python,
&scripts,
python_home,
)?;
copy_launcher_windows(
WindowsExecutable::PyPyw,
interpreter,
&base_python,
&scripts,
python_home,
)?;
copy_launcher_windows(
WindowsExecutable::PyPyMajorMinorw,
interpreter,
&base_python,
&scripts,
python_home,
)?;
}
_ => {
// For all other interpreters, copy `pythonw.exe`.
copy_launcher_windows(
WindowsExecutable::Pythonw,
interpreter,
&base_python,
&scripts,
python_home,
)?;
// If the GIL is disabled, copy `venvlaunchert.exe` and `venvwlaunchert.exe`.
if interpreter.gil_disabled() {
copy_launcher_windows(
WindowsExecutable::PythonMajorMinort,
interpreter,
&base_python,
&scripts,
python_home,
)?;
copy_launcher_windows(
WindowsExecutable::PythonwMajorMinort,
interpreter,
&base_python,
&scripts,
python_home,
)?;
}
}
}
}
}
#[cfg(not(any(unix, windows)))]
{
compile_error!("Only Windows and Unix are supported")
}
// Add all the activate scripts for different shells
for (name, template) in ACTIVATE_TEMPLATES {
let path_sep = if cfg!(windows) { ";" } else { ":" };
let relative_site_packages = [
interpreter.virtualenv().purelib.as_path(),
interpreter.virtualenv().platlib.as_path(),
]
.iter()
.dedup()
.map(|path| {
pathdiff::diff_paths(path, &interpreter.virtualenv().scripts)
.expect("Failed to calculate relative path to site-packages")
})
.map(|path| path.simplified().to_str().unwrap().replace('\\', "\\\\"))
.join(path_sep);
let virtual_env_dir = match (relocatable, name.to_owned()) {
(true, "activate") => {
r#"'"$(dirname -- "$(dirname -- "$(realpath -- "$SCRIPT_PATH")")")"'"#.to_string()
}
(true, "activate.bat") => r"%~dp0..".to_string(),
(true, "activate.fish") => {
r#"'"$(dirname -- "$(cd "$(dirname -- "$(status -f)")"; and pwd)")"'"#.to_string()
}
(true, "activate.nu") => r"(path self | path dirname | path dirname)".to_string(),
(false, "activate.nu") => {
format!(
"'{}'",
escape_posix_for_single_quotes(location.simplified().to_str().unwrap())
)
}
// Note:
// * relocatable activate scripts appear not to be possible in csh.
// * `activate.ps1` is already relocatable by default.
_ => escape_posix_for_single_quotes(location.simplified().to_str().unwrap()),
};
let activator = template
.replace("{{ VIRTUAL_ENV_DIR }}", &virtual_env_dir)
.replace("{{ BIN_NAME }}", bin_name)
.replace(
"{{ VIRTUAL_PROMPT }}",
prompt.as_deref().unwrap_or_default(),
)
.replace("{{ PATH_SEP }}", path_sep)
.replace("{{ RELATIVE_SITE_PACKAGES }}", &relative_site_packages);
fs_err::write(scripts.join(name), activator)?;
}
let mut pyvenv_cfg_data: Vec<(String, String)> = vec![
(
"home".to_string(),
python_home.simplified_display().to_string(),
),
(
"implementation".to_string(),
interpreter
.markers()
.platform_python_implementation()
.to_string(),
),
("uv".to_string(), version().to_string()),
(
"version_info".to_string(),
interpreter.markers().python_full_version().string.clone(),
),
(
"include-system-site-packages".to_string(),
if system_site_packages {
"true".to_string()
} else {
"false".to_string()
},
),
];
if relocatable {
pyvenv_cfg_data.push(("relocatable".to_string(), "true".to_string()));
}
if seed {
pyvenv_cfg_data.push(("seed".to_string(), "true".to_string()));
}
if let Some(prompt) = prompt {
pyvenv_cfg_data.push(("prompt".to_string(), prompt));
}
if cfg!(windows) && interpreter.markers().implementation_name() == "graalpy" {
pyvenv_cfg_data.push((
"venvlauncher_command".to_string(),
python_home
.join("graalpy.exe")
.simplified_display()
.to_string(),
));
}
let mut pyvenv_cfg = BufWriter::new(File::create(location.join("pyvenv.cfg"))?);
write_cfg(&mut pyvenv_cfg, &pyvenv_cfg_data)?;
drop(pyvenv_cfg);
// Construct the path to the `site-packages` directory.
let site_packages = location.join(&interpreter.virtualenv().purelib);
fs_err::create_dir_all(&site_packages)?;
// If necessary, create a symlink from `lib64` to `lib`.
// See: https://github.com/python/cpython/blob/b228655c227b2ca298a8ffac44d14ce3d22f6faa/Lib/venv/__init__.py#L135C11-L135C16
#[cfg(unix)]
if interpreter.pointer_size().is_64()
&& interpreter.markers().os_name() == "posix"
&& interpreter.markers().sys_platform() != "darwin"
{
match fs_err::os::unix::fs::symlink("lib", location.join("lib64")) {
Ok(()) => {}
Err(err) if err.kind() == io::ErrorKind::AlreadyExists => {}
Err(err) => {
return Err(err.into());
}
}
}
// Populate `site-packages` with a `_virtualenv.py` file.
fs_err::write(site_packages.join("_virtualenv.py"), VIRTUALENV_PATCH)?;
fs_err::write(site_packages.join("_virtualenv.pth"), "import _virtualenv")?;
Ok(VirtualEnvironment {
scheme: Scheme {
purelib: location.join(&interpreter.virtualenv().purelib),
platlib: location.join(&interpreter.virtualenv().platlib),
scripts: location.join(&interpreter.virtualenv().scripts),
data: location.join(&interpreter.virtualenv().data),
include: location.join(&interpreter.virtualenv().include),
},
root: location,
executable,
base_executable: base_python,
})
}
/// Prompt a confirmation that the virtual environment should be cleared.
///
/// If not a TTY, returns `None`.
fn confirm_clear(location: &Path, name: &'static str) -> Result<Option<bool>, io::Error> {
let term = Term::stderr();
if term.is_term() {
let prompt = format!(
"A {name} already exists at `{}`. Do you want to replace it?",
location.user_display(),
);
let hint = format!(
"Use the `{}` flag or set `{}` to skip this prompt",
"--clear".green(),
"UV_VENV_CLEAR=1".green()
);
Ok(Some(uv_console::confirm_with_hint(
&prompt, &hint, &term, true,
)?))
} else {
Ok(None)
}
}
/// Perform a safe removal of a virtual environment.
pub fn remove_virtualenv(location: &Path) -> Result<(), Error> {
// On Windows, if the current executable is in the directory, defer self-deletion since Windows
// won't let you unlink a running executable.
#[cfg(windows)]
if let Ok(itself) = std::env::current_exe() {
let target = std::path::absolute(location)?;
if itself.starts_with(&target) {
debug!("Detected self-delete of executable: {}", itself.display());
self_replace::self_delete_outside_path(location)?;
}
}
// We defer removal of the `pyvenv.cfg` until the end, so if we fail to remove the environment,
// uv can still identify it as a Python virtual environment that can be deleted.
for entry in fs_err::read_dir(location)? {
let entry = entry?;
let path = entry.path();
if path == location.join("pyvenv.cfg") {
continue;
}
if path.is_dir() {
fs_err::remove_dir_all(&path)?;
} else {
fs_err::remove_file(&path)?;
}
}
match fs_err::remove_file(location.join("pyvenv.cfg")) {
Ok(()) => {}
Err(err) if err.kind() == io::ErrorKind::NotFound => {}
Err(err) => return Err(err.into()),
}
// Remove the virtual environment directory itself
match fs_err::remove_dir_all(location) {
Ok(()) => {}
Err(err) if err.kind() == io::ErrorKind::NotFound => {}
// If the virtual environment is a mounted file system, e.g., in a Docker container, we
// cannot delete it — but that doesn't need to be a fatal error
Err(err) if err.kind() == io::ErrorKind::ResourceBusy => {
debug!(
"Skipping removal of `{}` directory due to {err}",
location.display(),
);
}
Err(err) => return Err(err.into()),
}
Ok(())
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum RemovalReason {
/// The removal was explicitly requested, i.e., with `--clear`.
UserRequest,
/// The environment can be removed because it is considered temporary, e.g., a build
/// environment.
TemporaryEnvironment,
/// The environment can be removed because it is managed by uv, e.g., a project or tool
/// environment.
ManagedEnvironment,
}
impl std::fmt::Display for RemovalReason {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::UserRequest => f.write_str("requested with `--clear`"),
Self::ManagedEnvironment => f.write_str("environment is managed by uv"),
Self::TemporaryEnvironment => f.write_str("environment is temporary"),
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Default)]
pub enum OnExisting {
/// Prompt before removing an existing directory.
///
/// If a TTY is not available, fail.
#[default]
Prompt,
/// Fail if the directory already exists and is non-empty.
Fail,
/// Allow an existing directory, overwriting virtual environment files while retaining other
/// files in the directory.
Allow,
/// Remove an existing directory.
Remove(RemovalReason),
}
impl OnExisting {
pub fn from_args(allow_existing: bool, clear: bool, no_clear: bool) -> Self {
if allow_existing {
Self::Allow
} else if clear {
Self::Remove(RemovalReason::UserRequest)
} else if no_clear {
Self::Fail
} else {
Self::Prompt
}
}
}
#[derive(Debug, Copy, Clone)]
enum WindowsExecutable {
/// The `python.exe` executable (or `venvlauncher.exe` launcher shim).
Python,
/// The `python3.exe` executable (or `venvlauncher.exe` launcher shim).
PythonMajor,
/// The `python3.<minor>.exe` executable (or `venvlauncher.exe` launcher shim).
PythonMajorMinor,
/// The `python3.<minor>t.exe` executable (or `venvlaunchert.exe` launcher shim).
PythonMajorMinort,
/// The `pythonw.exe` executable (or `venvwlauncher.exe` launcher shim).
Pythonw,
/// The `pythonw3.<minor>t.exe` executable (or `venvwlaunchert.exe` launcher shim).
PythonwMajorMinort,
/// The `pypy.exe` executable.
PyPy,
/// The `pypy3.exe` executable.
PyPyMajor,
/// The `pypy3.<minor>.exe` executable.
PyPyMajorMinor,
/// The `pypyw.exe` executable.
PyPyw,
/// The `pypy3.<minor>w.exe` executable.
PyPyMajorMinorw,
/// The `graalpy.exe` executable.
GraalPy,
}
impl WindowsExecutable {
/// The name of the Python executable.
fn exe(self, interpreter: &Interpreter) -> String {
match self {
Self::Python => String::from("python.exe"),
Self::PythonMajor => {
format!("python{}.exe", interpreter.python_major())
}
Self::PythonMajorMinor => {
format!(
"python{}.{}.exe",
interpreter.python_major(),
interpreter.python_minor()
)
}
Self::PythonMajorMinort => {
format!(
"python{}.{}t.exe",
interpreter.python_major(),
interpreter.python_minor()
)
}
Self::Pythonw => String::from("pythonw.exe"),
Self::PythonwMajorMinort => {
format!(
"pythonw{}.{}t.exe",
interpreter.python_major(),
interpreter.python_minor()
)
}
Self::PyPy => String::from("pypy.exe"),
Self::PyPyMajor => {
format!("pypy{}.exe", interpreter.python_major())
}
Self::PyPyMajorMinor => {
format!(
"pypy{}.{}.exe",
interpreter.python_major(),
interpreter.python_minor()
)
}
Self::PyPyw => String::from("pypyw.exe"),
Self::PyPyMajorMinorw => {
format!(
"pypy{}.{}w.exe",
interpreter.python_major(),
interpreter.python_minor()
)
}
Self::GraalPy => String::from("graalpy.exe"),
}
}
/// The name of the launcher shim.
fn launcher(self, interpreter: &Interpreter) -> &'static str {
match self {
Self::Python | Self::PythonMajor | Self::PythonMajorMinor
if interpreter.gil_disabled() =>
{
"venvlaunchert.exe"
}
Self::Python | Self::PythonMajor | Self::PythonMajorMinor => "venvlauncher.exe",
Self::Pythonw if interpreter.gil_disabled() => "venvwlaunchert.exe",
Self::Pythonw => "venvwlauncher.exe",
Self::PythonMajorMinort => "venvlaunchert.exe",
Self::PythonwMajorMinort => "venvwlaunchert.exe",
// From 3.13 on these should replace the `python.exe` and `pythonw.exe` shims.
// These are not relevant as of now for PyPy as it doesn't yet support Python 3.13.
Self::PyPy | Self::PyPyMajor | Self::PyPyMajorMinor => "venvlauncher.exe",
Self::PyPyw | Self::PyPyMajorMinorw => "venvwlauncher.exe",
Self::GraalPy => "venvlauncher.exe",
}
}
}
/// <https://github.com/python/cpython/blob/d457345bbc6414db0443819290b04a9a4333313d/Lib/venv/__init__.py#L261-L267>
/// <https://github.com/pypa/virtualenv/blob/d9fdf48d69f0d0ca56140cf0381edbb5d6fe09f5/src/virtualenv/create/via_global_ref/builtin/cpython/cpython3.py#L78-L83>
///
/// There's two kinds of applications on windows: Those that allocate a console (python.exe)
/// and those that don't because they use window(s) (pythonw.exe).
fn copy_launcher_windows(
executable: WindowsExecutable,
interpreter: &Interpreter,
base_python: &Path,
scripts: &Path,
python_home: &Path,
) -> Result<(), Error> {
// First priority: the `python.exe` and `pythonw.exe` shims.
let shim = interpreter
.stdlib()
.join("venv")
.join("scripts")
.join("nt")
.join(executable.exe(interpreter));
match fs_err::copy(shim, scripts.join(executable.exe(interpreter))) {
Ok(_) => return Ok(()),
Err(err) if err.kind() == io::ErrorKind::NotFound => {}
Err(err) => {
return Err(err.into());
}
}
// Second priority: the `venvlauncher.exe` and `venvwlauncher.exe` shims.
// These are equivalent to the `python.exe` and `pythonw.exe` shims, which were
// renamed in Python 3.13.
let shim = interpreter
.stdlib()
.join("venv")
.join("scripts")
.join("nt")
.join(executable.launcher(interpreter));
match fs_err::copy(shim, scripts.join(executable.exe(interpreter))) {
Ok(_) => return Ok(()),
Err(err) if err.kind() == io::ErrorKind::NotFound => {}
Err(err) => {
return Err(err.into());
}
}
// Third priority: on Conda at least, we can look for the launcher shim next to
// the Python executable itself.
let shim = base_python.with_file_name(executable.launcher(interpreter));
match fs_err::copy(shim, scripts.join(executable.exe(interpreter))) {
Ok(_) => return Ok(()),
Err(err) if err.kind() == io::ErrorKind::NotFound => {}
Err(err) => {
return Err(err.into());
}
}
// Fourth priority: if the launcher shim doesn't exist, assume this is
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | true |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-logging/src/lib.rs | crates/uv-logging/src/lib.rs | use std::fmt;
use jiff::Timestamp;
use owo_colors::OwoColorize;
use tracing::{Event, Subscriber};
use tracing_subscriber::fmt::format::Writer;
use tracing_subscriber::fmt::{FmtContext, FormatEvent, FormatFields};
use tracing_subscriber::registry::LookupSpan;
/// The style of a uv logging line.
pub struct UvFormat {
pub display_timestamp: bool,
pub display_level: bool,
pub show_spans: bool,
}
impl Default for UvFormat {
/// Regardless of the tracing level, show messages without any adornment.
fn default() -> Self {
Self {
display_timestamp: false,
display_level: true,
show_spans: false,
}
}
}
/// See <https://docs.rs/tracing-subscriber/0.3.18/src/tracing_subscriber/fmt/format/mod.rs.html#1026-1156>
impl<S, N> FormatEvent<S, N> for UvFormat
where
S: Subscriber + for<'a> LookupSpan<'a>,
N: for<'a> FormatFields<'a> + 'static,
{
fn format_event(
&self,
ctx: &FmtContext<'_, S, N>,
mut writer: Writer<'_>,
event: &Event<'_>,
) -> fmt::Result {
let meta = event.metadata();
let ansi = writer.has_ansi_escapes();
if self.display_timestamp {
if ansi {
write!(writer, "{} ", Timestamp::now().dimmed())?;
} else {
write!(writer, "{} ", Timestamp::now())?;
}
}
if self.display_level {
let level = meta.level();
// Same colors as tracing
if ansi {
match *level {
tracing::Level::TRACE => write!(writer, "{} ", level.purple())?,
tracing::Level::DEBUG => write!(writer, "{} ", level.blue())?,
tracing::Level::INFO => write!(writer, "{} ", level.green())?,
tracing::Level::WARN => write!(writer, "{} ", level.yellow())?,
tracing::Level::ERROR => write!(writer, "{} ", level.red())?,
}
} else {
write!(writer, "{level} ")?;
}
}
if self.show_spans {
let span = event.parent();
let mut seen = false;
let span = span
.and_then(|id| ctx.span(id))
.or_else(|| ctx.lookup_current());
let scope = span.into_iter().flat_map(|span| span.scope().from_root());
for span in scope {
seen = true;
if ansi {
write!(writer, "{}:", span.metadata().name().bold())?;
} else {
write!(writer, "{}:", span.metadata().name())?;
}
}
if seen {
writer.write_char(' ')?;
}
}
ctx.field_format().format_fields(writer.by_ref(), event)?;
writeln!(writer)
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/universal_marker.rs | crates/uv-resolver/src/universal_marker.rs | use std::borrow::Borrow;
use std::collections::BTreeSet;
use std::str::FromStr;
use itertools::Itertools;
use rustc_hash::FxHashMap;
use uv_normalize::{ExtraName, GroupName, PackageName};
use uv_pep508::{ExtraOperator, MarkerEnvironment, MarkerExpression, MarkerOperator, MarkerTree};
use uv_pypi_types::{ConflictItem, ConflictKind, Conflicts, Inference};
use crate::ResolveError;
/// A representation of a marker for use in universal resolution.
///
/// (This degrades gracefully to a standard PEP 508 marker in the case of
/// non-universal resolution.)
///
/// This universal marker is meant to combine both a PEP 508 marker and a
/// marker for conflicting extras/groups. The latter specifically expresses
/// whether a particular edge in a dependency graph should be followed
/// depending on the activated extras and groups.
///
/// A universal marker evaluates to true only when *both* its PEP 508 marker
/// and its conflict marker evaluate to true.
#[derive(Default, Copy, Clone, Eq, Hash, PartialEq, PartialOrd, Ord)]
pub struct UniversalMarker {
/// The full combined PEP 508 and "conflict" marker.
///
/// In the original design, the PEP 508 marker was kept separate
/// from the conflict marker, since the conflict marker is not really
/// specified by PEP 508. However, this approach turned out to be
/// bunk because the conflict marker vary depending on which part of
/// the PEP 508 marker is true. For example, you might have a different
/// conflict marker for one platform versus the other. The only way to
/// resolve this is to combine them both into one marker.
///
/// The downside of this is that since conflict markers aren't part of
/// PEP 508, combining them is pretty weird. We could combine them into
/// a new type of marker that isn't PEP 508. But it's not clear what the
/// best design for that is, and at the time of writing, it would have
/// been a lot of additional work. (Our PEP 508 marker implementation is
/// rather sophisticated given its boolean simplification capabilities.
/// So leveraging all that work is a huge shortcut.) So to accomplish
/// this, we technically preserve PEP 508 compatibility but abuse the
/// `extra` attribute to encode conflicts.
///
/// So for example, if a particular dependency should only be activated
/// on `Darwin` and when the extra `x1` for package `foo` is enabled,
/// then its "universal" marker looks like this:
///
/// ```text
/// sys_platform == 'Darwin' and extra == 'extra-3-foo-x1'
/// ```
///
/// Then, when `uv sync --extra x1` is called, we encode that was
/// `extra-3-foo-x1` and pass it as-needed when evaluating this marker.
///
/// Why `extra-3-foo-x1`?
///
/// * The `extra` prefix is there to distinguish it from `group`.
/// * The `3` is there to indicate the length of the package name,
/// in bytes. This isn't strictly necessary for encoding, but
/// is required if we were ever to need to decode a package and
/// extra/group name from a conflict marker.
/// * The `foo` package name ensures we namespace the extra/group name,
/// since multiple packages can have the same extra/group name.
///
/// We only use alphanumeric characters and hyphens in order to limit
/// ourselves to valid extra names. (If we could use other characters then
/// that would avoid the need to encode the length of the package name.)
///
/// So while the above marker is still technically valid from a PEP 508
/// stand-point, evaluating it requires uv's custom encoding of extras (and
/// groups).
marker: MarkerTree,
/// The strictly PEP 508 version of `marker`. Basically, `marker`, but
/// without any extras in it. This could be computed on demand (and
/// that's what we used to do), but we do it enough that it was causing a
/// regression in some cases.
pep508: MarkerTree,
}
impl UniversalMarker {
/// A constant universal marker that always evaluates to `true`.
pub(crate) const TRUE: Self = Self {
marker: MarkerTree::TRUE,
pep508: MarkerTree::TRUE,
};
/// A constant universal marker that always evaluates to `false`.
pub(crate) const FALSE: Self = Self {
marker: MarkerTree::FALSE,
pep508: MarkerTree::FALSE,
};
/// Creates a new universal marker from its constituent pieces.
pub(crate) fn new(mut pep508_marker: MarkerTree, conflict_marker: ConflictMarker) -> Self {
pep508_marker.and(conflict_marker.marker);
Self::from_combined(pep508_marker)
}
/// Creates a new universal marker from a marker that has already been
/// combined from a PEP 508 and conflict marker.
pub(crate) fn from_combined(marker: MarkerTree) -> Self {
Self {
marker,
pep508: marker.without_extras(),
}
}
/// Combine this universal marker with the one given in a way that unions
/// them. That is, the updated marker will evaluate to `true` if `self` or
/// `other` evaluate to `true`.
pub(crate) fn or(&mut self, other: Self) {
self.marker.or(other.marker);
self.pep508.or(other.pep508);
}
/// Combine this universal marker with the one given in a way that
/// intersects them. That is, the updated marker will evaluate to `true` if
/// `self` and `other` evaluate to `true`.
pub(crate) fn and(&mut self, other: Self) {
self.marker.and(other.marker);
self.pep508.and(other.pep508);
}
/// Imbibes the world knowledge expressed by `conflicts` into this marker.
///
/// This will effectively simplify the conflict marker in this universal
/// marker. In particular, it enables simplifying based on the fact that no
/// two items from the same set in the given conflicts can be active at a
/// given time.
pub(crate) fn imbibe(&mut self, conflicts: ConflictMarker) {
let self_marker = self.marker;
self.marker = conflicts.marker;
self.marker.implies(self_marker);
self.pep508 = self.marker.without_extras();
}
/// If all inference sets reduce to the same marker, simplify the marker using that knowledge.
pub(crate) fn unify_inference_sets(&mut self, conflict_sets: &[BTreeSet<Inference>]) {
let mut previous_marker = None;
for conflict_set in conflict_sets {
let mut marker = self.marker;
for inference in conflict_set {
let extra = encode_conflict_item(&inference.item);
marker = if inference.included {
marker.simplify_extras_with(|candidate| *candidate == extra)
} else {
marker.simplify_not_extras_with(|candidate| *candidate == extra)
};
}
if let Some(previous_marker) = &previous_marker {
if previous_marker != &marker {
return;
}
} else {
previous_marker = Some(marker);
}
}
if let Some(all_branches_marker) = previous_marker {
self.marker = all_branches_marker;
self.pep508 = self.marker.without_extras();
}
}
/// Assumes that a given extra/group for the given package is activated.
///
/// This may simplify the conflicting marker component of this universal
/// marker.
pub(crate) fn assume_conflict_item(&mut self, item: &ConflictItem) {
match *item.kind() {
ConflictKind::Extra(ref extra) => self.assume_extra(item.package(), extra),
ConflictKind::Group(ref group) => self.assume_group(item.package(), group),
ConflictKind::Project => self.assume_project(item.package()),
}
self.pep508 = self.marker.without_extras();
}
/// Assumes that a given extra/group for the given package is not
/// activated.
///
/// This may simplify the conflicting marker component of this universal
/// marker.
pub(crate) fn assume_not_conflict_item(&mut self, item: &ConflictItem) {
match *item.kind() {
ConflictKind::Extra(ref extra) => self.assume_not_extra(item.package(), extra),
ConflictKind::Group(ref group) => self.assume_not_group(item.package(), group),
ConflictKind::Project => self.assume_not_project(item.package()),
}
self.pep508 = self.marker.without_extras();
}
/// Assumes that the "production" dependencies for the given project are
/// activated.
///
/// This may simplify the conflicting marker component of this universal
/// marker.
fn assume_project(&mut self, package: &PackageName) {
let extra = encode_project(package);
self.marker = self
.marker
.simplify_extras_with(|candidate| *candidate == extra);
self.pep508 = self.marker.without_extras();
}
/// Assumes that the "production" dependencies for the given project are
/// not activated.
///
/// This may simplify the conflicting marker component of this universal
/// marker.
fn assume_not_project(&mut self, package: &PackageName) {
let extra = encode_project(package);
self.marker = self
.marker
.simplify_not_extras_with(|candidate| *candidate == extra);
self.pep508 = self.marker.without_extras();
}
/// Assumes that a given extra for the given package is activated.
///
/// This may simplify the conflicting marker component of this universal
/// marker.
fn assume_extra(&mut self, package: &PackageName, extra: &ExtraName) {
let extra = encode_package_extra(package, extra);
self.marker = self
.marker
.simplify_extras_with(|candidate| *candidate == extra);
self.pep508 = self.marker.without_extras();
}
/// Assumes that a given extra for the given package is not activated.
///
/// This may simplify the conflicting marker component of this universal
/// marker.
fn assume_not_extra(&mut self, package: &PackageName, extra: &ExtraName) {
let extra = encode_package_extra(package, extra);
self.marker = self
.marker
.simplify_not_extras_with(|candidate| *candidate == extra);
self.pep508 = self.marker.without_extras();
}
/// Assumes that a given group for the given package is activated.
///
/// This may simplify the conflicting marker component of this universal
/// marker.
fn assume_group(&mut self, package: &PackageName, group: &GroupName) {
let extra = encode_package_group(package, group);
self.marker = self
.marker
.simplify_extras_with(|candidate| *candidate == extra);
self.pep508 = self.marker.without_extras();
}
/// Assumes that a given group for the given package is not activated.
///
/// This may simplify the conflicting marker component of this universal
/// marker.
fn assume_not_group(&mut self, package: &PackageName, group: &GroupName) {
let extra = encode_package_group(package, group);
self.marker = self
.marker
.simplify_not_extras_with(|candidate| *candidate == extra);
self.pep508 = self.marker.without_extras();
}
/// Returns true if this universal marker will always evaluate to `true`.
pub(crate) fn is_true(self) -> bool {
self.marker.is_true()
}
/// Returns true if this universal marker will always evaluate to `false`.
pub(crate) fn is_false(self) -> bool {
self.marker.is_false()
}
/// Returns true if this universal marker is disjoint with the one given.
///
/// Two universal markers are disjoint when it is impossible for them both
/// to evaluate to `true` simultaneously.
pub(crate) fn is_disjoint(self, other: Self) -> bool {
self.marker.is_disjoint(other.marker)
}
/// Returns true if this universal marker is satisfied by the given marker
/// environment.
///
/// This should only be used when evaluating a marker that is known not to
/// have any extras. For example, the PEP 508 markers on a fork.
pub(crate) fn evaluate_no_extras(self, env: &MarkerEnvironment) -> bool {
self.marker.evaluate(env, &[])
}
/// Returns true if this universal marker is satisfied by the given marker
/// environment and list of activated extras and groups.
///
/// The activated extras and groups should be the complete set activated
/// for a particular context. And each extra and group must be scoped to
/// the particular package that it's enabled for.
pub(crate) fn evaluate<P, E, G>(
self,
env: &MarkerEnvironment,
projects: impl Iterator<Item = P>,
extras: impl Iterator<Item = (P, E)>,
groups: impl Iterator<Item = (P, G)>,
) -> bool
where
P: Borrow<PackageName>,
E: Borrow<ExtraName>,
G: Borrow<GroupName>,
{
let projects = projects.map(|package| encode_project(package.borrow()));
let extras =
extras.map(|(package, extra)| encode_package_extra(package.borrow(), extra.borrow()));
let groups =
groups.map(|(package, group)| encode_package_group(package.borrow(), group.borrow()));
self.marker.evaluate(
env,
&projects
.chain(extras)
.chain(groups)
.collect::<Vec<ExtraName>>(),
)
}
/// Returns true if the marker always evaluates to true if the given set of extras is activated.
pub(crate) fn evaluate_only_extras<P, E, G>(self, extras: &[(P, E)], groups: &[(P, G)]) -> bool
where
P: Borrow<PackageName>,
E: Borrow<ExtraName>,
G: Borrow<GroupName>,
{
let extras = extras
.iter()
.map(|(package, extra)| encode_package_extra(package.borrow(), extra.borrow()));
let groups = groups
.iter()
.map(|(package, group)| encode_package_group(package.borrow(), group.borrow()));
self.marker
.evaluate_only_extras(&extras.chain(groups).collect::<Vec<ExtraName>>())
}
/// Returns the internal marker that combines both the PEP 508
/// and conflict marker.
pub fn combined(self) -> MarkerTree {
self.marker
}
/// Returns the PEP 508 marker for this universal marker.
///
/// One should be cautious using this. Generally speaking, it should only
/// be used when one knows universal resolution isn't in effect. When
/// universal resolution is enabled (i.e., there may be multiple forks
/// producing different versions of the same package), then one should
/// always use a universal marker since it accounts for all possible ways
/// for a package to be installed.
pub fn pep508(self) -> MarkerTree {
self.pep508
}
/// Returns the non-PEP 508 marker expression that represents conflicting
/// extras/groups.
///
/// Like with `UniversalMarker::pep508`, one should be cautious when using
/// this. It is generally always wrong to consider conflicts in isolation
/// from PEP 508 markers. But this can be useful for detecting failure
/// cases. For example, the code for emitting a `ResolverOutput` (even a
/// universal one) in a `requirements.txt` format checks for the existence
/// of non-trivial conflict markers and fails if any are found. (Because
/// conflict markers cannot be represented in the `requirements.txt`
/// format.)
pub(crate) fn conflict(self) -> ConflictMarker {
ConflictMarker {
marker: self.marker.only_extras(),
}
}
}
impl std::fmt::Debug for UniversalMarker {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
std::fmt::Debug::fmt(&self.marker, f)
}
}
/// A marker that is only for representing conflicting extras/groups.
///
/// This encapsulates the encoding of extras and groups into PEP 508
/// markers.
#[derive(Default, Clone, Copy, Eq, Hash, PartialEq, PartialOrd, Ord)]
pub struct ConflictMarker {
marker: MarkerTree,
}
impl ConflictMarker {
/// A constant conflict marker that always evaluates to `true`.
pub const TRUE: Self = Self {
marker: MarkerTree::TRUE,
};
/// A constant conflict marker that always evaluates to `false`.
pub const FALSE: Self = Self {
marker: MarkerTree::FALSE,
};
/// Creates a new conflict marker from the declared conflicts provided.
pub fn from_conflicts(conflicts: &Conflicts) -> Self {
if conflicts.is_empty() {
return Self::TRUE;
}
let mut marker = Self::TRUE;
for set in conflicts.iter() {
for (item1, item2) in set.iter().tuple_combinations() {
let pair = Self::from_conflict_item(item1)
.negate()
.or(Self::from_conflict_item(item2).negate());
marker = marker.and(pair);
}
}
marker
}
/// Create a conflict marker that is true only when the given extra or
/// group (for a specific package) is activated.
pub fn from_conflict_item(item: &ConflictItem) -> Self {
match *item.kind() {
ConflictKind::Extra(ref extra) => Self::extra(item.package(), extra),
ConflictKind::Group(ref group) => Self::group(item.package(), group),
ConflictKind::Project => Self::project(item.package()),
}
}
/// Create a conflict marker that is true only when the production
/// dependencies for the given package are activated.
pub fn project(package: &PackageName) -> Self {
let operator = uv_pep508::ExtraOperator::Equal;
let name = uv_pep508::MarkerValueExtra::Extra(encode_project(package));
let expr = uv_pep508::MarkerExpression::Extra { operator, name };
let marker = MarkerTree::expression(expr);
Self { marker }
}
/// Create a conflict marker that is true only when the given extra for the
/// given package is activated.
pub fn extra(package: &PackageName, extra: &ExtraName) -> Self {
let operator = uv_pep508::ExtraOperator::Equal;
let name = uv_pep508::MarkerValueExtra::Extra(encode_package_extra(package, extra));
let expr = uv_pep508::MarkerExpression::Extra { operator, name };
let marker = MarkerTree::expression(expr);
Self { marker }
}
/// Create a conflict marker that is true only when the given group for the
/// given package is activated.
pub fn group(package: &PackageName, group: &GroupName) -> Self {
let operator = uv_pep508::ExtraOperator::Equal;
let name = uv_pep508::MarkerValueExtra::Extra(encode_package_group(package, group));
let expr = uv_pep508::MarkerExpression::Extra { operator, name };
let marker = MarkerTree::expression(expr);
Self { marker }
}
/// Returns a new conflict marker that is the negation of this one.
#[must_use]
pub fn negate(self) -> Self {
Self {
marker: self.marker.negate(),
}
}
/// Returns a new conflict marker corresponding to the union of `self` and
/// `other`.
#[must_use]
pub fn or(self, other: Self) -> Self {
let mut marker = self.marker;
marker.or(other.marker);
Self { marker }
}
/// Returns a new conflict marker corresponding to the intersection of
/// `self` and `other`.
#[must_use]
pub fn and(self, other: Self) -> Self {
let mut marker = self.marker;
marker.and(other.marker);
Self { marker }
}
/// Returns a new conflict marker corresponding to the logical implication
/// of `self` and the given consequent.
///
/// If the conflict marker returned is always `true`, then it can be said
/// that `self` implies `consequent`.
#[must_use]
pub fn implies(self, other: Self) -> Self {
let mut marker = self.marker;
marker.implies(other.marker);
Self { marker }
}
/// Returns true if this conflict marker will always evaluate to `true`.
pub fn is_true(self) -> bool {
self.marker.is_true()
}
/// Returns true if this conflict marker will always evaluate to `false`.
pub fn is_false(self) -> bool {
self.marker.is_false()
}
/// Returns inclusion and exclusion (respectively) conflict items parsed
/// from this conflict marker.
///
/// This returns an error if any `extra` could not be parsed as a valid
/// encoded conflict extra.
pub(crate) fn filter_rules(
self,
) -> Result<(Vec<ConflictItem>, Vec<ConflictItem>), ResolveError> {
let (mut raw_include, mut raw_exclude) = (vec![], vec![]);
self.marker.visit_extras(|op, extra| {
match op {
MarkerOperator::Equal => raw_include.push(extra.to_owned()),
MarkerOperator::NotEqual => raw_exclude.push(extra.to_owned()),
// OK by the contract of `MarkerTree::visit_extras`.
_ => unreachable!(),
}
});
let include = raw_include
.into_iter()
.map(|extra| ParsedRawExtra::parse(&extra).and_then(|parsed| parsed.to_conflict_item()))
.collect::<Result<Vec<_>, _>>()?;
let exclude = raw_exclude
.into_iter()
.map(|extra| ParsedRawExtra::parse(&extra).and_then(|parsed| parsed.to_conflict_item()))
.collect::<Result<Vec<_>, _>>()?;
Ok((include, exclude))
}
}
impl std::fmt::Debug for ConflictMarker {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
// This is a little more succinct than the default.
write!(f, "ConflictMarker({:?})", self.marker)
}
}
/// Encodes the given conflict into a valid `extra` value in a PEP 508 marker.
fn encode_conflict_item(conflict: &ConflictItem) -> ExtraName {
match conflict.kind() {
ConflictKind::Extra(extra) => encode_package_extra(conflict.package(), extra),
ConflictKind::Group(group) => encode_package_group(conflict.package(), group),
ConflictKind::Project => encode_project(conflict.package()),
}
}
/// Encodes the given package name and its corresponding extra into a valid
/// `extra` value in a PEP 508 marker.
fn encode_package_extra(package: &PackageName, extra: &ExtraName) -> ExtraName {
// This is OK because `PackageName` and `ExtraName` have the same
// validation rules, and we combine them in a way that always results in a
// valid name.
//
// Note also that we encode the length of the package name (in bytes) into
// the encoded extra name as well. This ensures we can parse out both the
// package and extra name if necessary. If we didn't do this, then some
// cases could be ambiguous since our field delimiter (`-`) is also a valid
// character in `package` or `extra` values. But if we know the length of
// the package name, we can always parse each field unambiguously.
let package_len = package.as_str().len();
ExtraName::from_owned(format!("extra-{package_len}-{package}-{extra}")).unwrap()
}
/// Encodes the given package name and its corresponding group into a valid
/// `extra` value in a PEP 508 marker.
fn encode_package_group(package: &PackageName, group: &GroupName) -> ExtraName {
// See `encode_package_extra`, the same considerations apply here.
let package_len = package.as_str().len();
ExtraName::from_owned(format!("group-{package_len}-{package}-{group}")).unwrap()
}
/// Encodes the given project package name into a valid `extra` value in a PEP
/// 508 marker.
fn encode_project(package: &PackageName) -> ExtraName {
// See `encode_package_extra`, the same considerations apply here.
let package_len = package.as_str().len();
ExtraName::from_owned(format!("project-{package_len}-{package}")).unwrap()
}
#[derive(Debug)]
enum ParsedRawExtra<'a> {
Project { package: &'a str },
Extra { package: &'a str, extra: &'a str },
Group { package: &'a str, group: &'a str },
}
impl<'a> ParsedRawExtra<'a> {
fn parse(raw_extra: &'a ExtraName) -> Result<Self, ResolveError> {
fn mkerr(raw_extra: &ExtraName, reason: impl Into<String>) -> ResolveError {
let raw_extra = raw_extra.to_owned();
let reason = reason.into();
ResolveError::InvalidExtraInConflictMarker { reason, raw_extra }
}
let raw = raw_extra.as_str();
let Some((kind, tail)) = raw.split_once('-') else {
return Err(mkerr(
raw_extra,
"expected to find leading `package`, `extra-` or `group-`",
));
};
let Some((len, tail)) = tail.split_once('-') else {
return Err(mkerr(
raw_extra,
"expected to find `{number}-` after leading `package-`, `extra-` or `group-`",
));
};
let len = len.parse::<usize>().map_err(|_| {
mkerr(
raw_extra,
format!("found package length number `{len}`, but could not parse into integer"),
)
})?;
let Some((package, tail)) = tail.split_at_checked(len) else {
return Err(mkerr(
raw_extra,
format!(
"expected at least {len} bytes for package name, but found {found}",
found = tail.len()
),
));
};
match kind {
"project" => Ok(ParsedRawExtra::Project { package }),
"extra" | "group" => {
if !tail.starts_with('-') {
return Err(mkerr(
raw_extra,
format!("expected `-` after package name `{package}`"),
));
}
let tail = &tail[1..];
if kind == "extra" {
Ok(ParsedRawExtra::Extra {
package,
extra: tail,
})
} else {
Ok(ParsedRawExtra::Group {
package,
group: tail,
})
}
}
_ => Err(mkerr(
raw_extra,
format!("unrecognized kind `{kind}` (must be `extra` or `group`)"),
)),
}
}
fn to_conflict_item(&self) -> Result<ConflictItem, ResolveError> {
let package = PackageName::from_str(self.package()).map_err(|name_error| {
ResolveError::InvalidValueInConflictMarker {
kind: "package",
name_error,
}
})?;
match self {
Self::Project { .. } => Ok(ConflictItem::from(package)),
Self::Extra { extra, .. } => {
let extra = ExtraName::from_str(extra).map_err(|name_error| {
ResolveError::InvalidValueInConflictMarker {
kind: "extra",
name_error,
}
})?;
Ok(ConflictItem::from((package, extra)))
}
Self::Group { group, .. } => {
let group = GroupName::from_str(group).map_err(|name_error| {
ResolveError::InvalidValueInConflictMarker {
kind: "group",
name_error,
}
})?;
Ok(ConflictItem::from((package, group)))
}
}
}
fn package(&self) -> &'a str {
match self {
Self::Project { package, .. } => package,
Self::Extra { package, .. } => package,
Self::Group { package, .. } => package,
}
}
}
/// Resolve the conflict markers in a [`MarkerTree`] based on the conditions under which each
/// conflict item is known to be true.
///
/// For example, if the `cpu` extra is known to be enabled when `sys_platform == 'darwin'`, then
/// given the combined marker `python_version >= '3.8' and extra == 'extra-7-project-cpu'`, this
/// method would return `python_version >= '3.8' and sys_platform == 'darwin'`.
///
/// If a conflict item isn't present in the map of known conflicts, it's assumed to be false in all
/// environments.
pub(crate) fn resolve_conflicts(
marker: MarkerTree,
known_conflicts: &FxHashMap<ConflictItem, MarkerTree>,
) -> MarkerTree {
if marker.is_true() || marker.is_false() {
return marker;
}
let mut transformed = MarkerTree::FALSE;
// Convert the marker to DNF, then re-build it.
for dnf in marker.to_dnf() {
let mut or = MarkerTree::TRUE;
for marker in dnf {
let MarkerExpression::Extra {
ref operator,
ref name,
} = marker
else {
or.and(MarkerTree::expression(marker));
continue;
};
let Some(name) = name.as_extra() else {
or.and(MarkerTree::expression(marker));
continue;
};
// Given an extra marker (like `extra == 'extra-7-project-cpu'`), search for the
// corresponding conflict; once found, inline the marker of conditions under which the
// conflict is known to be true.
let mut found = false;
for (conflict_item, conflict_marker) in known_conflicts {
// Search for the conflict item as an extra.
if let Some(extra) = conflict_item.extra() {
let package = conflict_item.package();
let encoded = encode_package_extra(package, extra);
if encoded == *name {
match operator {
ExtraOperator::Equal => {
or.and(*conflict_marker);
found = true;
break;
}
ExtraOperator::NotEqual => {
or.and(conflict_marker.negate());
found = true;
break;
}
}
}
}
// Search for the conflict item as a group.
if let Some(group) = conflict_item.group() {
let package = conflict_item.package();
let encoded = encode_package_group(package, group);
if encoded == *name {
match operator {
ExtraOperator::Equal => {
or.and(*conflict_marker);
found = true;
break;
}
ExtraOperator::NotEqual => {
or.and(conflict_marker.negate());
found = true;
break;
}
}
}
}
// Search for the conflict item as a project.
if conflict_item.extra().is_none() && conflict_item.group().is_none() {
let package = conflict_item.package();
let encoded = encode_project(package);
if encoded == *name {
match operator {
ExtraOperator::Equal => {
or.and(*conflict_marker);
found = true;
break;
}
ExtraOperator::NotEqual => {
or.and(conflict_marker.negate());
found = true;
break;
}
}
}
}
}
// If we didn't find the marker in the list of known conflicts, assume it's always
// false.
if !found {
match operator {
ExtraOperator::Equal => {
or.and(MarkerTree::FALSE);
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | true |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/prerelease.rs | crates/uv-resolver/src/prerelease.rs | use uv_distribution_types::RequirementSource;
use uv_normalize::PackageName;
use uv_pep440::Operator;
use crate::resolver::ForkSet;
use crate::{DependencyMode, Manifest, ResolverEnvironment};
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum PrereleaseMode {
/// Disallow all pre-release versions.
Disallow,
/// Allow all pre-release versions.
Allow,
/// Allow pre-release versions if all versions of a package are pre-release.
IfNecessary,
/// Allow pre-release versions for first-party packages with explicit pre-release markers in
/// their version requirements.
Explicit,
/// Allow pre-release versions if all versions of a package are pre-release, or if the package
/// has an explicit pre-release marker in its version requirements.
#[default]
IfNecessaryOrExplicit,
}
impl std::fmt::Display for PrereleaseMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Disallow => write!(f, "disallow"),
Self::Allow => write!(f, "allow"),
Self::IfNecessary => write!(f, "if-necessary"),
Self::Explicit => write!(f, "explicit"),
Self::IfNecessaryOrExplicit => write!(f, "if-necessary-or-explicit"),
}
}
}
/// Like [`PrereleaseMode`], but with any additional information required to select a candidate,
/// like the set of direct dependencies.
#[derive(Debug, Clone)]
pub(crate) enum PrereleaseStrategy {
/// Disallow all pre-release versions.
Disallow,
/// Allow all pre-release versions.
Allow,
/// Allow pre-release versions if all versions of a package are pre-release.
IfNecessary,
/// Allow pre-release versions for first-party packages with explicit pre-release markers in
/// their version requirements.
Explicit(ForkSet),
/// Allow pre-release versions if all versions of a package are pre-release, or if the package
/// has an explicit pre-release marker in its version requirements.
IfNecessaryOrExplicit(ForkSet),
}
impl PrereleaseStrategy {
pub(crate) fn from_mode(
mode: PrereleaseMode,
manifest: &Manifest,
env: &ResolverEnvironment,
dependencies: DependencyMode,
) -> Self {
let mut packages = ForkSet::default();
match mode {
PrereleaseMode::Disallow => Self::Disallow,
PrereleaseMode::Allow => Self::Allow,
PrereleaseMode::IfNecessary => Self::IfNecessary,
_ => {
for requirement in manifest.requirements(env, dependencies) {
let RequirementSource::Registry { specifier, .. } = &requirement.source else {
continue;
};
if specifier
.iter()
.filter(|spec| {
!matches!(spec.operator(), Operator::NotEqual | Operator::NotEqualStar)
})
.any(uv_pep440::VersionSpecifier::any_prerelease)
{
packages.add(&requirement, ());
}
}
match mode {
PrereleaseMode::Explicit => Self::Explicit(packages),
PrereleaseMode::IfNecessaryOrExplicit => Self::IfNecessaryOrExplicit(packages),
_ => unreachable!(),
}
}
}
}
/// Returns `true` if a [`PackageName`] is allowed to have pre-release versions.
pub(crate) fn allows(
&self,
package_name: &PackageName,
env: &ResolverEnvironment,
) -> AllowPrerelease {
match self {
Self::Disallow => AllowPrerelease::No,
Self::Allow => AllowPrerelease::Yes,
Self::IfNecessary => AllowPrerelease::IfNecessary,
Self::Explicit(packages) => {
if packages.contains(package_name, env) {
AllowPrerelease::Yes
} else {
AllowPrerelease::No
}
}
Self::IfNecessaryOrExplicit(packages) => {
if packages.contains(package_name, env) {
AllowPrerelease::Yes
} else {
AllowPrerelease::IfNecessary
}
}
}
}
}
/// The pre-release strategy for a given package.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub(crate) enum AllowPrerelease {
/// Allow all pre-release versions.
Yes,
/// Disallow all pre-release versions.
No,
/// Allow pre-release versions if all versions of this package are pre-release.
IfNecessary,
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/lib.rs | crates/uv-resolver/src/lib.rs | pub use dependency_mode::DependencyMode;
pub use error::{ErrorTree, NoSolutionError, NoSolutionHeader, ResolveError, SentinelRange};
pub use exclude_newer::{
ExcludeNewer, ExcludeNewerChange, ExcludeNewerPackage, ExcludeNewerPackageChange,
ExcludeNewerPackageEntry, ExcludeNewerValue, ExcludeNewerValueChange,
};
pub use exclusions::Exclusions;
pub use flat_index::{FlatDistributions, FlatIndex};
pub use fork_strategy::ForkStrategy;
pub use lock::{
Installable, Lock, LockError, LockVersion, Package, PackageMap, PylockToml,
PylockTomlErrorKind, RequirementsTxtExport, ResolverManifest, SatisfiesResult, TreeDisplay,
VERSION, cyclonedx_json,
};
pub use manifest::Manifest;
pub use options::{Flexibility, Options, OptionsBuilder};
pub use preferences::{Preference, PreferenceError, Preferences};
pub use prerelease::PrereleaseMode;
pub use python_requirement::PythonRequirement;
pub use resolution::{
AnnotationStyle, ConflictingDistributionError, DisplayResolutionGraph, ResolverOutput,
};
pub use resolution_mode::ResolutionMode;
pub use resolver::{
BuildId, DefaultResolverProvider, DerivationChainBuilder, InMemoryIndex, MetadataResponse,
PackageVersionsResult, Reporter as ResolverReporter, Resolver, ResolverEnvironment,
ResolverProvider, VersionsResponse, WheelMetadataResult,
};
pub use universal_marker::{ConflictMarker, UniversalMarker};
pub use version_map::VersionMap;
pub use yanks::AllowedYanks;
/// A custom `HashSet` using `hashbrown`.
///
/// We use `hashbrown` instead of `std` to get access to its `Equivalent`
/// trait. This lets use store things like `ConflictItem`, but refer to it via
/// `ConflictItemRef`. i.e., We can avoid allocs on lookups.
type FxHashbrownSet<T> = hashbrown::HashSet<T, rustc_hash::FxBuildHasher>;
type FxHashbrownMap<K, V> = hashbrown::HashMap<K, V, rustc_hash::FxBuildHasher>;
mod candidate_selector;
mod dependency_mode;
mod dependency_provider;
mod error;
mod exclude_newer;
mod exclusions;
mod flat_index;
mod fork_indexes;
mod fork_strategy;
mod fork_urls;
mod graph_ops;
mod lock;
mod manifest;
mod marker;
mod options;
mod pins;
mod preferences;
mod prerelease;
pub mod pubgrub;
mod python_requirement;
mod redirect;
mod resolution;
mod resolution_mode;
mod resolver;
mod universal_marker;
mod version_map;
mod yanks;
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/preferences.rs | crates/uv-resolver/src/preferences.rs | use std::path::Path;
use std::str::FromStr;
use rustc_hash::FxHashMap;
use tracing::trace;
use uv_distribution_types::{IndexUrl, InstalledDist, InstalledDistKind};
use uv_normalize::PackageName;
use uv_pep440::{Operator, Version};
use uv_pep508::{MarkerTree, VerbatimUrl, VersionOrUrl};
use uv_pypi_types::{HashDigest, HashDigests, HashError};
use uv_requirements_txt::{RequirementEntry, RequirementsTxtRequirement};
use crate::lock::PylockTomlPackage;
use crate::universal_marker::UniversalMarker;
use crate::{LockError, ResolverEnvironment};
#[derive(thiserror::Error, Debug)]
pub enum PreferenceError {
#[error(transparent)]
Hash(#[from] HashError),
}
/// A pinned requirement, as extracted from a `requirements.txt` file.
#[derive(Clone, Debug)]
pub struct Preference {
name: PackageName,
version: Version,
/// The markers on the requirement itself (those after the semicolon).
marker: MarkerTree,
/// The index URL of the package, if any.
index: PreferenceIndex,
/// If coming from a package with diverging versions, the markers of the forks this preference
/// is part of, otherwise `None`.
fork_markers: Vec<UniversalMarker>,
hashes: HashDigests,
/// The source of the preference.
source: PreferenceSource,
}
impl Preference {
/// Create a [`Preference`] from a [`RequirementEntry`].
pub fn from_entry(entry: RequirementEntry) -> Result<Option<Self>, PreferenceError> {
let RequirementsTxtRequirement::Named(requirement) = entry.requirement else {
return Ok(None);
};
let Some(VersionOrUrl::VersionSpecifier(specifier)) = requirement.version_or_url.as_ref()
else {
trace!("Excluding {requirement} from preferences due to non-version specifier");
return Ok(None);
};
let [specifier] = specifier.as_ref() else {
trace!("Excluding {requirement} from preferences due to multiple version specifiers");
return Ok(None);
};
if *specifier.operator() != Operator::Equal {
trace!("Excluding {requirement} from preferences due to inexact version specifier");
return Ok(None);
}
Ok(Some(Self {
name: requirement.name,
version: specifier.version().clone(),
marker: requirement.marker,
// `requirements.txt` doesn't have fork annotations.
fork_markers: vec![],
// `requirements.txt` doesn't allow a requirement to specify an explicit index.
index: PreferenceIndex::Any,
hashes: entry
.hashes
.iter()
.map(String::as_str)
.map(HashDigest::from_str)
.collect::<Result<_, _>>()?,
source: PreferenceSource::RequirementsTxt,
}))
}
/// Create a [`Preference`] from a locked distribution.
pub fn from_lock(
package: &crate::lock::Package,
install_path: &Path,
) -> Result<Option<Self>, LockError> {
let Some(version) = package.version() else {
return Ok(None);
};
Ok(Some(Self {
name: package.id.name.clone(),
version: version.clone(),
marker: MarkerTree::TRUE,
index: PreferenceIndex::from(package.index(install_path)?),
fork_markers: package.fork_markers().to_vec(),
hashes: HashDigests::empty(),
source: PreferenceSource::Lock,
}))
}
/// Create a [`Preference`] from a locked distribution.
pub fn from_pylock_toml(package: &PylockTomlPackage) -> Result<Option<Self>, LockError> {
let Some(version) = package.version.as_ref() else {
return Ok(None);
};
Ok(Some(Self {
name: package.name.clone(),
version: version.clone(),
marker: MarkerTree::TRUE,
index: PreferenceIndex::from(
package
.index
.as_ref()
.map(|index| IndexUrl::from(VerbatimUrl::from(index.clone()))),
),
// `pylock.toml` doesn't have fork annotations.
fork_markers: vec![],
hashes: HashDigests::empty(),
source: PreferenceSource::Lock,
}))
}
/// Create a [`Preference`] from an installed distribution.
pub fn from_installed(dist: &InstalledDist) -> Option<Self> {
let InstalledDistKind::Registry(dist) = &dist.kind else {
return None;
};
Some(Self {
name: dist.name.clone(),
version: dist.version.clone(),
marker: MarkerTree::TRUE,
index: PreferenceIndex::Any,
fork_markers: vec![],
hashes: HashDigests::empty(),
source: PreferenceSource::Environment,
})
}
/// Return the [`PackageName`] of the package for this [`Preference`].
pub fn name(&self) -> &PackageName {
&self.name
}
/// Return the [`Version`] of the package for this [`Preference`].
pub fn version(&self) -> &Version {
&self.version
}
}
#[derive(Debug, Clone)]
pub enum PreferenceIndex {
/// The preference should match to any index.
Any,
/// The preference should match to an implicit index.
Implicit,
/// The preference should match to a specific index.
Explicit(IndexUrl),
}
impl PreferenceIndex {
/// Returns `true` if the preference matches the given explicit [`IndexUrl`].
pub(crate) fn matches(&self, index: &IndexUrl) -> bool {
match self {
Self::Any => true,
Self::Implicit => false,
Self::Explicit(preference) => {
// Preferences are stored in the lockfile without credentials, while the index URL
// in locations such as `pyproject.toml` may contain credentials.
*preference.url() == *index.without_credentials()
}
}
}
}
impl From<Option<IndexUrl>> for PreferenceIndex {
fn from(index: Option<IndexUrl>) -> Self {
match index {
Some(index) => Self::Explicit(index),
None => Self::Implicit,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum PreferenceSource {
/// The preference is from an installed package in the environment.
Environment,
/// The preference is from a `uv.ock` file.
Lock,
/// The preference is from a `requirements.txt` file.
RequirementsTxt,
/// The preference is from the current solve.
Resolver,
}
#[derive(Debug, Clone)]
pub(crate) struct Entry {
marker: UniversalMarker,
index: PreferenceIndex,
pin: Pin,
source: PreferenceSource,
}
impl Entry {
/// Return the [`UniversalMarker`] associated with the entry.
pub(crate) fn marker(&self) -> &UniversalMarker {
&self.marker
}
/// Return the [`IndexUrl`] associated with the entry, if any.
pub(crate) fn index(&self) -> &PreferenceIndex {
&self.index
}
/// Return the pinned data associated with the entry.
pub(crate) fn pin(&self) -> &Pin {
&self.pin
}
/// Return the source of the entry.
pub(crate) fn source(&self) -> PreferenceSource {
self.source
}
}
/// A set of pinned packages that should be preserved during resolution, if possible.
///
/// The marker is the marker of the fork that resolved to the pin, if any.
///
/// Preferences should be prioritized first by whether their marker matches and then by the order
/// they are stored, so that a lockfile has higher precedence than sibling forks.
#[derive(Debug, Clone, Default)]
pub struct Preferences(FxHashMap<PackageName, Vec<Entry>>);
impl Preferences {
/// Create a map of pinned packages from an iterator of [`Preference`] entries.
///
/// The provided [`ResolverEnvironment`] will be used to filter the preferences
/// to an applicable subset.
pub fn from_iter(
preferences: impl IntoIterator<Item = Preference>,
env: &ResolverEnvironment,
) -> Self {
let mut map = FxHashMap::<PackageName, Vec<_>>::default();
for preference in preferences {
// Filter non-matching preferences when resolving for an environment.
if let Some(markers) = env.marker_environment() {
if !preference.marker.evaluate(markers, &[]) {
trace!("Excluding {preference} from preferences due to unmatched markers");
continue;
}
if !preference.fork_markers.is_empty() {
if !preference
.fork_markers
.iter()
.any(|marker| marker.evaluate_no_extras(markers))
{
trace!(
"Excluding {preference} from preferences due to unmatched fork markers"
);
continue;
}
}
}
// Flatten the list of markers into individual entries.
if preference.fork_markers.is_empty() {
map.entry(preference.name).or_default().push(Entry {
marker: UniversalMarker::TRUE,
index: preference.index,
pin: Pin {
version: preference.version,
hashes: preference.hashes,
},
source: preference.source,
});
} else {
for fork_marker in preference.fork_markers {
map.entry(preference.name.clone()).or_default().push(Entry {
marker: fork_marker,
index: preference.index.clone(),
pin: Pin {
version: preference.version.clone(),
hashes: preference.hashes.clone(),
},
source: preference.source,
});
}
}
}
Self(map)
}
/// Insert a preference at the back.
pub(crate) fn insert(
&mut self,
package_name: PackageName,
index: Option<IndexUrl>,
markers: UniversalMarker,
pin: impl Into<Pin>,
source: PreferenceSource,
) {
self.0.entry(package_name).or_default().push(Entry {
marker: markers,
index: PreferenceIndex::from(index),
pin: pin.into(),
source,
});
}
/// Returns an iterator over the preferences.
pub fn iter(
&self,
) -> impl Iterator<
Item = (
&PackageName,
impl Iterator<Item = (&UniversalMarker, &PreferenceIndex, &Version)>,
),
> {
self.0.iter().map(|(name, preferences)| {
(
name,
preferences
.iter()
.map(|entry| (&entry.marker, &entry.index, entry.pin.version())),
)
})
}
/// Return the pinned version for a package, if any.
pub(crate) fn get(&self, package_name: &PackageName) -> &[Entry] {
self.0
.get(package_name)
.map(Vec::as_slice)
.unwrap_or_default()
}
/// Return the hashes for a package, if the version matches that of the pin.
pub(crate) fn match_hashes(
&self,
package_name: &PackageName,
version: &Version,
) -> Option<&[HashDigest]> {
self.0
.get(package_name)
.into_iter()
.flatten()
.find(|entry| entry.pin.version() == version)
.map(|entry| entry.pin.hashes())
}
}
impl std::fmt::Display for Preference {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}=={}", self.name, self.version)
}
}
/// The pinned data associated with a package in a locked `requirements.txt` file (e.g., `flask==1.2.3`).
#[derive(Debug, Clone)]
pub(crate) struct Pin {
version: Version,
hashes: HashDigests,
}
impl Pin {
/// Return the version of the pinned package.
pub(crate) fn version(&self) -> &Version {
&self.version
}
/// Return the hashes of the pinned package.
pub(crate) fn hashes(&self) -> &[HashDigest] {
self.hashes.as_slice()
}
}
impl From<Version> for Pin {
fn from(version: Version) -> Self {
Self {
version,
hashes: HashDigests::empty(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::str::FromStr;
/// Test that [`PreferenceIndex::matches`] correctly ignores credentials when comparing URLs.
///
/// This is relevant for matching lockfile preferences (stored without credentials)
/// against index URLs from pyproject.toml (which may include usernames for auth).
#[test]
fn test_preference_index_matches_ignores_credentials() {
// URL without credentials (as stored in lockfile)
let index_without_creds = IndexUrl::from_str("https:/pypi_index.com/simple").unwrap();
// URL with username (as specified in pyproject.toml)
let index_with_username =
IndexUrl::from_str("https://username@pypi_index.com/simple").unwrap();
let preference = PreferenceIndex::Explicit(index_without_creds.clone());
assert!(
preference.matches(&index_with_username),
"PreferenceIndex should match URLs that differ only in username"
);
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/version_map.rs | crates/uv-resolver/src/version_map.rs | use std::collections::Bound;
use std::collections::btree_map::{BTreeMap, Entry};
use std::ops::RangeBounds;
use std::sync::OnceLock;
use pubgrub::Ranges;
use rustc_hash::FxHashMap;
use tracing::instrument;
use uv_client::{FlatIndexEntry, OwnedArchive, SimpleDetailMetadata, VersionFiles};
use uv_configuration::BuildOptions;
use uv_distribution_filename::{DistFilename, WheelFilename};
use uv_distribution_types::{
HashComparison, IncompatibleSource, IncompatibleWheel, IndexUrl, PrioritizedDist,
RegistryBuiltWheel, RegistrySourceDist, RequiresPython, SourceDistCompatibility,
WheelCompatibility,
};
use uv_normalize::PackageName;
use uv_pep440::Version;
use uv_platform_tags::{IncompatibleTag, TagCompatibility, Tags};
use uv_pypi_types::{HashDigest, ResolutionMetadata, Yanked};
use uv_types::HashStrategy;
use uv_warnings::warn_user_once;
use crate::flat_index::FlatDistributions;
use crate::{ExcludeNewer, ExcludeNewerValue, yanks::AllowedYanks};
/// A map from versions to distributions.
#[derive(Debug)]
pub struct VersionMap {
/// The inner representation of the version map.
inner: VersionMapInner,
}
impl VersionMap {
/// Initialize a [`VersionMap`] from the given metadata.
///
/// Note it is possible for files to have a different yank status per PEP 592 but in the official
/// PyPI warehouse this cannot happen.
///
/// Here, we track if each file is yanked separately. If a release is partially yanked, the
/// unyanked distributions _can_ be used.
///
/// PEP 592: <https://peps.python.org/pep-0592/#warehouse-pypi-implementation-notes>
#[instrument(skip_all, fields(package_name))]
pub(crate) fn from_simple_metadata(
simple_metadata: OwnedArchive<SimpleDetailMetadata>,
package_name: &PackageName,
index: &IndexUrl,
tags: Option<&Tags>,
requires_python: &RequiresPython,
allowed_yanks: &AllowedYanks,
hasher: &HashStrategy,
exclude_newer: Option<&ExcludeNewer>,
flat_index: Option<FlatDistributions>,
build_options: &BuildOptions,
) -> Self {
let mut stable = false;
let mut local = false;
let mut map = BTreeMap::new();
let mut core_metadata = FxHashMap::default();
// Create stubs for each entry in simple metadata. The full conversion
// from a `VersionFiles` to a PrioritizedDist for each version
// isn't done until that specific version is requested.
for (datum_index, datum) in simple_metadata.iter().enumerate() {
// Deserialize the version.
let version = rkyv::deserialize::<Version, rkyv::rancor::Error>(&datum.version)
.expect("archived version always deserializes");
// Deserialize the metadata.
let core_metadatum =
rkyv::deserialize::<Option<ResolutionMetadata>, rkyv::rancor::Error>(
&datum.metadata,
)
.expect("archived metadata always deserializes");
if let Some(core_metadatum) = core_metadatum {
core_metadata.insert(version.clone(), core_metadatum);
}
stable |= version.is_stable();
local |= version.is_local();
map.insert(
version,
LazyPrioritizedDist::OnlySimple(SimplePrioritizedDist {
datum_index,
dist: OnceLock::new(),
}),
);
}
// If a set of flat distributions have been given, we need to add those
// to our map of entries as well.
for (version, prioritized_dist) in flat_index.into_iter().flatten() {
stable |= version.is_stable();
match map.entry(version) {
Entry::Vacant(e) => {
e.insert(LazyPrioritizedDist::OnlyFlat(prioritized_dist));
}
// When there is both a `VersionFiles` (from the "simple"
// metadata) and a flat distribution for the same version of
// a package, we store both and "merge" them into a single
// `PrioritizedDist` upon access later.
Entry::Occupied(e) => match e.remove_entry() {
(version, LazyPrioritizedDist::OnlySimple(simple_dist)) => {
map.insert(
version,
LazyPrioritizedDist::Both {
flat: prioritized_dist,
simple: simple_dist,
},
);
}
_ => unreachable!(),
},
}
}
Self {
inner: VersionMapInner::Lazy(VersionMapLazy {
map,
stable,
local,
core_metadata,
simple_metadata,
no_binary: build_options.no_binary_package(package_name),
no_build: build_options.no_build_package(package_name),
index: index.clone(),
tags: tags.cloned(),
allowed_yanks: allowed_yanks.clone(),
hasher: hasher.clone(),
requires_python: requires_python.clone(),
exclude_newer: exclude_newer.and_then(|en| en.exclude_newer_package(package_name)),
}),
}
}
#[instrument(skip_all, fields(package_name))]
pub(crate) fn from_flat_metadata(
flat_metadata: Vec<FlatIndexEntry>,
tags: Option<&Tags>,
hasher: &HashStrategy,
build_options: &BuildOptions,
) -> Self {
let mut stable = false;
let mut local = false;
let mut map = BTreeMap::new();
for (version, prioritized_dist) in
FlatDistributions::from_entries(flat_metadata, tags, hasher, build_options)
{
stable |= version.is_stable();
local |= version.is_local();
map.insert(version, prioritized_dist);
}
Self {
inner: VersionMapInner::Eager(VersionMapEager { map, stable, local }),
}
}
/// Return the [`ResolutionMetadata`] for the given version, if any.
pub fn get_metadata(&self, version: &Version) -> Option<&ResolutionMetadata> {
match self.inner {
VersionMapInner::Eager(_) => None,
VersionMapInner::Lazy(ref lazy) => lazy.core_metadata.get(version),
}
}
/// Return the [`DistFile`] for the given version, if any.
pub(crate) fn get(&self, version: &Version) -> Option<&PrioritizedDist> {
match self.inner {
VersionMapInner::Eager(ref eager) => eager.map.get(version),
VersionMapInner::Lazy(ref lazy) => lazy.get(version),
}
}
/// Return an iterator over the versions in this map.
pub(crate) fn versions(&self) -> impl DoubleEndedIterator<Item = &Version> {
match &self.inner {
VersionMapInner::Eager(eager) => either::Either::Left(eager.map.keys()),
VersionMapInner::Lazy(lazy) => either::Either::Right(lazy.map.keys()),
}
}
/// Return the index URL where this package came from.
pub(crate) fn index(&self) -> Option<&IndexUrl> {
match &self.inner {
VersionMapInner::Eager(_) => None,
VersionMapInner::Lazy(lazy) => Some(&lazy.index),
}
}
/// Return an iterator over the versions and distributions.
///
/// Note that the value returned in this iterator is a [`VersionMapDist`],
/// which can be used to lazily request a [`CompatibleDist`]. This is
/// useful in cases where one can skip materializing a full distribution
/// for each version.
pub(crate) fn iter(
&self,
range: &Ranges<Version>,
) -> impl DoubleEndedIterator<Item = (&Version, VersionMapDistHandle<'_>)> {
// Performance optimization: If we only have a single version, return that version directly.
if let Some(version) = range.as_singleton() {
either::Either::Left(match self.inner {
VersionMapInner::Eager(ref eager) => {
either::Either::Left(eager.map.get_key_value(version).into_iter().map(
move |(version, dist)| {
let version_map_dist = VersionMapDistHandle {
inner: VersionMapDistHandleInner::Eager(dist),
};
(version, version_map_dist)
},
))
}
VersionMapInner::Lazy(ref lazy) => {
either::Either::Right(lazy.map.get_key_value(version).into_iter().map(
move |(version, dist)| {
let version_map_dist = VersionMapDistHandle {
inner: VersionMapDistHandleInner::Lazy { lazy, dist },
};
(version, version_map_dist)
},
))
}
})
} else {
either::Either::Right(match self.inner {
VersionMapInner::Eager(ref eager) => {
either::Either::Left(eager.map.range(BoundingRange::from(range)).map(
|(version, dist)| {
let version_map_dist = VersionMapDistHandle {
inner: VersionMapDistHandleInner::Eager(dist),
};
(version, version_map_dist)
},
))
}
VersionMapInner::Lazy(ref lazy) => {
either::Either::Right(lazy.map.range(BoundingRange::from(range)).map(
|(version, dist)| {
let version_map_dist = VersionMapDistHandle {
inner: VersionMapDistHandleInner::Lazy { lazy, dist },
};
(version, version_map_dist)
},
))
}
})
}
}
/// Return the [`Hashes`] for the given version, if any.
pub(crate) fn hashes(&self, version: &Version) -> Option<&[HashDigest]> {
match self.inner {
VersionMapInner::Eager(ref eager) => {
eager.map.get(version).map(PrioritizedDist::hashes)
}
VersionMapInner::Lazy(ref lazy) => lazy.get(version).map(PrioritizedDist::hashes),
}
}
/// Returns the total number of distinct versions in this map.
///
/// Note that this may include versions of distributions that are not
/// usable in the current environment.
pub(crate) fn len(&self) -> usize {
match self.inner {
VersionMapInner::Eager(VersionMapEager { ref map, .. }) => map.len(),
VersionMapInner::Lazy(VersionMapLazy { ref map, .. }) => map.len(),
}
}
/// Returns `true` if the map contains at least one stable (non-pre-release) version.
pub(crate) fn stable(&self) -> bool {
match self.inner {
VersionMapInner::Eager(ref map) => map.stable,
VersionMapInner::Lazy(ref map) => map.stable,
}
}
/// Returns `true` if the map contains at least one local version (e.g., `2.6.0+cpu`).
pub(crate) fn local(&self) -> bool {
match self.inner {
VersionMapInner::Eager(ref map) => map.local,
VersionMapInner::Lazy(ref map) => map.local,
}
}
}
impl From<FlatDistributions> for VersionMap {
fn from(flat_index: FlatDistributions) -> Self {
let stable = flat_index.iter().any(|(version, _)| version.is_stable());
let local = flat_index.iter().any(|(version, _)| version.is_local());
let map = flat_index.into();
Self {
inner: VersionMapInner::Eager(VersionMapEager { map, stable, local }),
}
}
}
/// A lazily initialized distribution.
///
/// This permits access to a handle that can be turned into a resolvable
/// distribution when desired. This is coupled with a `Version` in
/// [`VersionMap::iter`] to permit iteration over all items in a map without
/// necessarily constructing a distribution for every version if it isn't
/// needed.
///
/// Note that because of laziness, not all such items can be turned into
/// a valid distribution. For example, if in the process of building a
/// distribution no compatible wheel or source distribution could be found,
/// then building a `CompatibleDist` will fail.
pub(crate) struct VersionMapDistHandle<'a> {
inner: VersionMapDistHandleInner<'a>,
}
enum VersionMapDistHandleInner<'a> {
Eager(&'a PrioritizedDist),
Lazy {
lazy: &'a VersionMapLazy,
dist: &'a LazyPrioritizedDist,
},
}
impl<'a> VersionMapDistHandle<'a> {
/// Returns a prioritized distribution from this handle.
pub(crate) fn prioritized_dist(&self) -> Option<&'a PrioritizedDist> {
match self.inner {
VersionMapDistHandleInner::Eager(dist) => Some(dist),
VersionMapDistHandleInner::Lazy { lazy, dist } => Some(lazy.get_lazy(dist)?),
}
}
}
/// The kind of internal version map we have.
#[derive(Debug)]
#[allow(clippy::large_enum_variant)]
enum VersionMapInner {
/// All distributions are fully materialized in memory.
///
/// This usually happens when one needs a `VersionMap` from a
/// `FlatDistributions`.
Eager(VersionMapEager),
/// Some distributions might be fully materialized (i.e., by initializing
/// a `VersionMap` with a `FlatDistributions`), but some distributions
/// might still be in their "raw" `SimpleMetadata` format. In this case, a
/// `PrioritizedDist` isn't actually created in memory until the
/// specific version has been requested.
Lazy(VersionMapLazy),
}
/// A map from versions to distributions that are fully materialized in memory.
#[derive(Debug)]
struct VersionMapEager {
/// A map from version to distribution.
map: BTreeMap<Version, PrioritizedDist>,
/// Whether the version map contains at least one stable (non-pre-release) version.
stable: bool,
/// Whether the version map contains at least one local version.
local: bool,
}
/// A map that lazily materializes some prioritized distributions upon access.
///
/// The idea here is that some packages have a lot of versions published, and
/// needing to materialize a full `VersionMap` with all corresponding metadata
/// for every version in memory is expensive. Since a `SimpleMetadata` can be
/// materialized with very little cost (via `rkyv` in the warm cached case),
/// avoiding another conversion step into a fully filled out `VersionMap` can
/// provide substantial savings in some cases.
#[derive(Debug)]
struct VersionMapLazy {
/// A map from version to possibly-initialized distribution.
map: BTreeMap<Version, LazyPrioritizedDist>,
/// Whether the version map contains at least one stable (non-pre-release) version.
stable: bool,
/// Whether the version map contains at least one local version.
local: bool,
/// The pre-populated metadata for each version.
core_metadata: FxHashMap<Version, ResolutionMetadata>,
/// The raw simple metadata from which `PrioritizedDist`s should
/// be constructed.
simple_metadata: OwnedArchive<SimpleDetailMetadata>,
/// When true, wheels aren't allowed.
no_binary: bool,
/// When true, source dists aren't allowed.
no_build: bool,
/// The URL of the index where this package came from.
index: IndexUrl,
/// The set of compatibility tags that determines whether a wheel is usable
/// in the current environment.
tags: Option<Tags>,
/// Whether files newer than this timestamp should be excluded or not.
exclude_newer: Option<ExcludeNewerValue>,
/// Which yanked versions are allowed
allowed_yanks: AllowedYanks,
/// The hashes of allowed distributions.
hasher: HashStrategy,
/// The `requires-python` constraint for the resolution.
requires_python: RequiresPython,
}
impl VersionMapLazy {
/// Returns the distribution for the given version, if it exists.
fn get(&self, version: &Version) -> Option<&PrioritizedDist> {
let lazy_dist = self.map.get(version)?;
let priority_dist = self.get_lazy(lazy_dist)?;
Some(priority_dist)
}
/// Given a reference to a possibly-initialized distribution that is in
/// this lazy map, return the corresponding distribution.
///
/// When both a flat and simple distribution are present internally, they
/// are merged automatically.
fn get_lazy<'p>(&'p self, lazy_dist: &'p LazyPrioritizedDist) -> Option<&'p PrioritizedDist> {
match *lazy_dist {
LazyPrioritizedDist::OnlyFlat(ref dist) => Some(dist),
LazyPrioritizedDist::OnlySimple(ref dist) => self.get_simple(None, dist),
LazyPrioritizedDist::Both {
ref flat,
ref simple,
} => self.get_simple(Some(flat), simple),
}
}
/// Given an optional starting point, return the final form of the
/// given simple distribution. If it wasn't initialized yet, then this
/// initializes it. If the distribution would otherwise be empty, this
/// returns `None`.
fn get_simple<'p>(
&'p self,
init: Option<&'p PrioritizedDist>,
simple: &'p SimplePrioritizedDist,
) -> Option<&'p PrioritizedDist> {
let get_or_init = || {
let files = rkyv::deserialize::<VersionFiles, rkyv::rancor::Error>(
&self
.simple_metadata
.datum(simple.datum_index)
.expect("index to lazy dist is correct")
.files,
)
.expect("archived version files always deserializes");
let mut priority_dist = init.cloned().unwrap_or_default();
for (filename, file) in files.all() {
// Support resolving as if it were an earlier timestamp, at least as long files have
// upload time information.
let (excluded, upload_time) = if let Some(exclude_newer) = &self.exclude_newer {
match file.upload_time_utc_ms.as_ref() {
Some(&upload_time) if upload_time >= exclude_newer.timestamp_millis() => {
(true, Some(upload_time))
}
None => {
warn_user_once!(
"{} is missing an upload date, but user provided: {exclude_newer}",
file.filename,
);
(true, None)
}
_ => (false, None),
}
} else {
(false, None)
};
// Prioritize amongst all available files.
let yanked = file.yanked.as_deref();
let hashes = file.hashes.clone();
match filename {
DistFilename::WheelFilename(filename) => {
let compatibility = self.wheel_compatibility(
&filename,
&filename.name,
&filename.version,
hashes.as_slice(),
yanked,
excluded,
upload_time,
);
let dist = RegistryBuiltWheel {
filename,
file: Box::new(file),
index: self.index.clone(),
};
priority_dist.insert_built(dist, hashes, compatibility);
}
DistFilename::SourceDistFilename(filename) => {
let compatibility = self.source_dist_compatibility(
&filename.name,
&filename.version,
hashes.as_slice(),
yanked,
excluded,
upload_time,
);
let dist = RegistrySourceDist {
name: filename.name.clone(),
version: filename.version.clone(),
ext: filename.extension,
file: Box::new(file),
index: self.index.clone(),
wheels: vec![],
};
priority_dist.insert_source(dist, hashes, compatibility);
}
}
}
if priority_dist.is_empty() {
None
} else {
Some(priority_dist)
}
};
simple.dist.get_or_init(get_or_init).as_ref()
}
fn source_dist_compatibility(
&self,
name: &PackageName,
version: &Version,
hashes: &[HashDigest],
yanked: Option<&Yanked>,
excluded: bool,
upload_time: Option<i64>,
) -> SourceDistCompatibility {
// Check if builds are disabled
if self.no_build {
return SourceDistCompatibility::Incompatible(IncompatibleSource::NoBuild);
}
// Check if after upload time cutoff
if excluded {
return SourceDistCompatibility::Incompatible(IncompatibleSource::ExcludeNewer(
upload_time,
));
}
// Check if yanked
if let Some(yanked) = yanked {
if yanked.is_yanked() && !self.allowed_yanks.contains(name, version) {
return SourceDistCompatibility::Incompatible(IncompatibleSource::Yanked(
yanked.clone(),
));
}
}
// Check if hashes line up. If hashes aren't required, they're considered matching.
let hash_policy = self.hasher.get_package(name, version);
let required_hashes = hash_policy.digests();
let hash = if required_hashes.is_empty() {
HashComparison::Matched
} else {
if hashes.is_empty() {
HashComparison::Missing
} else if hashes.iter().any(|hash| required_hashes.contains(hash)) {
HashComparison::Matched
} else {
HashComparison::Mismatched
}
};
SourceDistCompatibility::Compatible(hash)
}
fn wheel_compatibility(
&self,
filename: &WheelFilename,
name: &PackageName,
version: &Version,
hashes: &[HashDigest],
yanked: Option<&Yanked>,
excluded: bool,
upload_time: Option<i64>,
) -> WheelCompatibility {
// Check if binaries are disabled
if self.no_binary {
return WheelCompatibility::Incompatible(IncompatibleWheel::NoBinary);
}
// Check if after upload time cutoff
if excluded {
return WheelCompatibility::Incompatible(IncompatibleWheel::ExcludeNewer(upload_time));
}
// Check if yanked
if let Some(yanked) = yanked {
if yanked.is_yanked() && !self.allowed_yanks.contains(name, version) {
return WheelCompatibility::Incompatible(IncompatibleWheel::Yanked(yanked.clone()));
}
}
// Determine a compatibility for the wheel based on tags.
let priority = if let Some(tags) = &self.tags {
match filename.compatibility(tags) {
TagCompatibility::Incompatible(tag) => {
return WheelCompatibility::Incompatible(IncompatibleWheel::Tag(tag));
}
TagCompatibility::Compatible(priority) => Some(priority),
}
} else {
// Check if the wheel is compatible with the `requires-python` (i.e., the Python
// ABI tag is not less than the `requires-python` minimum version).
if !self.requires_python.matches_wheel_tag(filename) {
return WheelCompatibility::Incompatible(IncompatibleWheel::Tag(
IncompatibleTag::AbiPythonVersion,
));
}
None
};
// Check if hashes line up. If hashes aren't required, they're considered matching.
let hash_policy = self.hasher.get_package(name, version);
let required_hashes = hash_policy.digests();
let hash = if required_hashes.is_empty() {
HashComparison::Matched
} else {
if hashes.is_empty() {
HashComparison::Missing
} else if hashes.iter().any(|hash| required_hashes.contains(hash)) {
HashComparison::Matched
} else {
HashComparison::Mismatched
}
};
// Break ties with the build tag.
let build_tag = filename.build_tag().cloned();
WheelCompatibility::Compatible(hash, priority, build_tag)
}
}
/// Represents a possibly initialized [`PrioritizedDist`] for
/// a single version of a package.
#[derive(Debug)]
enum LazyPrioritizedDist {
/// Represents an eagerly constructed distribution from a
/// `FlatDistributions`.
OnlyFlat(PrioritizedDist),
/// Represents a lazily constructed distribution from an index into a
/// `VersionFiles` from `SimpleMetadata`.
OnlySimple(SimplePrioritizedDist),
/// Combines the above. This occurs when we have data from both a flat
/// distribution and a simple distribution.
Both {
flat: PrioritizedDist,
simple: SimplePrioritizedDist,
},
}
/// Represents a lazily initialized `PrioritizedDist`.
#[derive(Debug)]
struct SimplePrioritizedDist {
/// An offset into `SimpleMetadata` corresponding to a `SimpleMetadatum`.
/// This provides access to a `VersionFiles` that is used to construct a
/// `PrioritizedDist`.
datum_index: usize,
/// A lazily initialized distribution.
///
/// Note that the `Option` does not represent the initialization state.
/// The `Option` can be `None` even after initialization, for example,
/// if initialization could not find any usable files from which to
/// construct a distribution. (One easy way to effect this, at the time
/// of writing, is to use `--exclude-newer 1900-01-01`.)
dist: OnceLock<Option<PrioritizedDist>>,
}
/// A range that can be used to iterate over a subset of a [`BTreeMap`].
#[derive(Debug)]
struct BoundingRange<'a> {
min: Bound<&'a Version>,
max: Bound<&'a Version>,
}
impl<'a> From<&'a Ranges<Version>> for BoundingRange<'a> {
fn from(value: &'a Ranges<Version>) -> Self {
let (min, max) = value
.bounding_range()
.unwrap_or((Bound::Unbounded, Bound::Unbounded));
Self { min, max }
}
}
impl<'a> RangeBounds<Version> for BoundingRange<'a> {
fn start_bound(&self) -> Bound<&'a Version> {
self.min
}
fn end_bound(&self) -> Bound<&'a Version> {
self.max
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/manifest.rs | crates/uv-resolver/src/manifest.rs | use std::borrow::Cow;
use std::collections::BTreeSet;
use either::Either;
use uv_configuration::{Constraints, Excludes, Overrides};
use uv_distribution_types::Requirement;
use uv_normalize::PackageName;
use uv_types::RequestedRequirements;
use crate::preferences::Preferences;
use crate::{DependencyMode, Exclusions, ResolverEnvironment};
/// A manifest of requirements, constraints, and preferences.
#[derive(Clone, Debug)]
pub struct Manifest {
/// The direct requirements for the project.
pub(crate) requirements: Vec<Requirement>,
/// The constraints for the project.
pub(crate) constraints: Constraints,
/// The overrides for the project.
pub(crate) overrides: Overrides,
/// The dependency excludes for the project.
pub(crate) excludes: Excludes,
/// The preferences for the project.
///
/// These represent "preferred" versions of a given package. For example, they may be the
/// versions that are already installed in the environment, or already pinned in an existing
/// lockfile.
pub(crate) preferences: Preferences,
/// The name of the project.
pub(crate) project: Option<PackageName>,
/// Members of the project's workspace.
pub(crate) workspace_members: BTreeSet<PackageName>,
/// The installed packages to exclude from consideration during resolution.
///
/// These typically represent packages that are being upgraded or reinstalled
/// and should be pulled from a remote source like a package index.
pub(crate) exclusions: Exclusions,
/// The lookahead requirements for the project.
///
/// These represent transitive dependencies that should be incorporated when making
/// determinations around "allowed" versions (for example, "allowed" URLs or "allowed"
/// pre-release versions).
pub(crate) lookaheads: Vec<RequestedRequirements>,
}
impl Manifest {
pub fn new(
requirements: Vec<Requirement>,
constraints: Constraints,
overrides: Overrides,
excludes: Excludes,
preferences: Preferences,
project: Option<PackageName>,
workspace_members: BTreeSet<PackageName>,
exclusions: Exclusions,
lookaheads: Vec<RequestedRequirements>,
) -> Self {
Self {
requirements,
constraints,
overrides,
excludes,
preferences,
project,
workspace_members,
exclusions,
lookaheads,
}
}
pub fn simple(requirements: Vec<Requirement>) -> Self {
Self {
requirements,
constraints: Constraints::default(),
overrides: Overrides::default(),
excludes: Excludes::default(),
preferences: Preferences::default(),
project: None,
exclusions: Exclusions::default(),
workspace_members: BTreeSet::new(),
lookaheads: Vec::new(),
}
}
#[must_use]
pub fn with_constraints(mut self, constraints: Constraints) -> Self {
self.constraints = constraints;
self
}
/// Return an iterator over all requirements, constraints, and overrides, in priority order,
/// such that requirements come first, followed by constraints, followed by overrides.
///
/// At time of writing, this is used for:
/// - Determining which requirements should allow yanked versions.
/// - Determining which requirements should allow pre-release versions (e.g., `torch>=2.2.0a1`).
/// - Determining which requirements should allow direct URLs (e.g., `torch @ https://...`).
pub fn requirements<'a>(
&'a self,
env: &'a ResolverEnvironment,
mode: DependencyMode,
) -> impl Iterator<Item = Cow<'a, Requirement>> + 'a {
self.requirements_no_overrides(env, mode)
.chain(self.overrides(env, mode))
}
/// Like [`Self::requirements`], but without the overrides.
pub fn requirements_no_overrides<'a>(
&'a self,
env: &'a ResolverEnvironment,
mode: DependencyMode,
) -> impl Iterator<Item = Cow<'a, Requirement>> + 'a {
match mode {
// Include all direct and transitive requirements, with constraints and overrides applied.
DependencyMode::Transitive => Either::Left(
self.lookaheads
.iter()
.flat_map(move |lookahead| {
self.overrides
.apply(lookahead.requirements())
.filter(|requirement| !self.excludes.contains(&requirement.name))
.filter(move |requirement| {
requirement
.evaluate_markers(env.marker_environment(), lookahead.extras())
})
})
.chain(
self.overrides
.apply(&self.requirements)
.filter(|requirement| !self.excludes.contains(&requirement.name))
.filter(move |requirement| {
requirement.evaluate_markers(env.marker_environment(), &[])
}),
)
.chain(
self.constraints
.requirements()
.filter(|requirement| !self.excludes.contains(&requirement.name))
.filter(move |requirement| {
requirement.evaluate_markers(env.marker_environment(), &[])
})
.map(Cow::Borrowed),
),
),
// Include direct requirements, with constraints and overrides applied.
DependencyMode::Direct => Either::Right(
self.overrides
.apply(&self.requirements)
.chain(self.constraints.requirements().map(Cow::Borrowed))
.filter(|requirement| !self.excludes.contains(&requirement.name))
.filter(move |requirement| {
requirement.evaluate_markers(env.marker_environment(), &[])
}),
),
}
}
/// Only the overrides from [`Self::requirements`].
pub fn overrides<'a>(
&'a self,
env: &'a ResolverEnvironment,
mode: DependencyMode,
) -> impl Iterator<Item = Cow<'a, Requirement>> + 'a {
match mode {
// Include all direct and transitive requirements, with constraints and overrides applied.
DependencyMode::Transitive => Either::Left(
self.overrides
.requirements()
.filter(|requirement| !self.excludes.contains(&requirement.name))
.filter(move |requirement| {
requirement.evaluate_markers(env.marker_environment(), &[])
})
.map(Cow::Borrowed),
),
// Include direct requirements, with constraints and overrides applied.
DependencyMode::Direct => Either::Right(
self.overrides
.requirements()
.filter(|requirement| !self.excludes.contains(&requirement.name))
.filter(move |requirement| {
requirement.evaluate_markers(env.marker_environment(), &[])
})
.map(Cow::Borrowed),
),
}
}
/// Return an iterator over the names of all user-provided requirements.
///
/// This includes:
/// - Direct requirements
/// - Dependencies of editable requirements
/// - Transitive dependencies of local package requirements
///
/// At time of writing, this is used for:
/// - Determining which packages should use the "lowest-compatible version" of a package, when
/// the `lowest-direct` strategy is in use.
pub fn user_requirements<'a>(
&'a self,
env: &'a ResolverEnvironment,
mode: DependencyMode,
) -> impl Iterator<Item = Cow<'a, Requirement>> + 'a {
match mode {
// Include direct requirements, dependencies of editables, and transitive dependencies
// of local packages.
DependencyMode::Transitive => Either::Left(
self.lookaheads
.iter()
.filter(|lookahead| lookahead.direct())
.flat_map(move |lookahead| {
self.overrides
.apply(lookahead.requirements())
.filter(move |requirement| {
requirement
.evaluate_markers(env.marker_environment(), lookahead.extras())
})
})
.chain(
self.overrides
.apply(&self.requirements)
.filter(move |requirement| {
requirement.evaluate_markers(env.marker_environment(), &[])
}),
),
),
// Restrict to the direct requirements.
DependencyMode::Direct => {
Either::Right(self.overrides.apply(self.requirements.iter()).filter(
move |requirement| requirement.evaluate_markers(env.marker_environment(), &[]),
))
}
}
}
/// Returns an iterator over the direct requirements, with overrides applied.
///
/// At time of writing, this is used for:
/// - Determining which packages should have development dependencies included in the
/// resolution (assuming the user enabled development dependencies).
pub fn direct_requirements<'a>(
&'a self,
env: &'a ResolverEnvironment,
) -> impl Iterator<Item = Cow<'a, Requirement>> + 'a {
self.overrides
.apply(self.requirements.iter())
.filter(move |requirement| requirement.evaluate_markers(env.marker_environment(), &[]))
}
/// Apply the overrides and constraints to a set of requirements.
///
/// Constraints are always applied _on top_ of overrides, such that constraints are applied
/// even if a requirement is overridden.
pub fn apply<'a>(
&'a self,
requirements: impl IntoIterator<Item = &'a Requirement>,
) -> impl Iterator<Item = Cow<'a, Requirement>> {
self.constraints.apply(self.overrides.apply(requirements))
}
/// Returns the number of input requirements.
pub fn num_requirements(&self) -> usize {
self.requirements.len()
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/redirect.rs | crates/uv-resolver/src/redirect.rs | use uv_git::GitResolver;
use uv_pep508::VerbatimUrl;
use uv_pypi_types::{ParsedGitUrl, ParsedUrl, VerbatimParsedUrl};
use uv_redacted::DisplaySafeUrl;
/// Map a URL to a precise URL, if possible.
pub(crate) fn url_to_precise(url: VerbatimParsedUrl, git: &GitResolver) -> VerbatimParsedUrl {
let ParsedUrl::Git(ParsedGitUrl {
url: git_url,
subdirectory,
}) = &url.parsed_url
else {
return url;
};
let Some(new_git_url) = git.precise(git_url.clone()) else {
if cfg!(debug_assertions) {
panic!("Unresolved Git URL: {}, {git_url:?}", url.verbatim);
} else {
return url;
}
};
let new_parsed_url = ParsedGitUrl {
url: new_git_url,
subdirectory: subdirectory.clone(),
};
let new_url = DisplaySafeUrl::from(new_parsed_url.clone());
let new_verbatim_url = apply_redirect(&url.verbatim, new_url);
VerbatimParsedUrl {
parsed_url: ParsedUrl::Git(new_parsed_url),
verbatim: new_verbatim_url,
}
}
/// Given a [`VerbatimUrl`] and a redirect, apply the redirect to the URL while preserving as much
/// of the verbatim representation as possible.
fn apply_redirect(url: &VerbatimUrl, redirect: DisplaySafeUrl) -> VerbatimUrl {
let redirect = VerbatimUrl::from_url(redirect);
// The redirect should be the "same" URL, but with a specific commit hash added after the `@`.
// We take advantage of this to preserve as much of the verbatim representation as possible.
if let Some(given) = url.given() {
let (given, fragment) = given
.split_once('#')
.map_or((given, None), |(prefix, suffix)| (prefix, Some(suffix)));
if let Some(precise_suffix) = redirect
.raw()
.path()
.rsplit_once('@')
.map(|(_, suffix)| suffix.to_owned())
{
// If there was an `@` in the original representation...
if let Some((.., parsed_suffix)) = url.raw().path().rsplit_once('@') {
if let Some((given_prefix, given_suffix)) = given.rsplit_once('@') {
// And the portion after the `@` is stable between the parsed and given representations...
if given_suffix == parsed_suffix {
// Preserve everything that precedes the `@` in the precise representation.
let given = format!("{given_prefix}@{precise_suffix}");
let given = if let Some(fragment) = fragment {
format!("{given}#{fragment}")
} else {
given
};
return redirect.with_given(given);
}
}
} else {
// If there was no `@` in the original representation, we can just append the
// precise suffix to the given representation.
let given = format!("{given}@{precise_suffix}");
let given = if let Some(fragment) = fragment {
format!("{given}#{fragment}")
} else {
given
};
return redirect.with_given(given);
}
}
}
redirect
}
#[cfg(test)]
mod tests {
use uv_pep508::{VerbatimUrl, VerbatimUrlError};
use uv_redacted::DisplaySafeUrl;
use crate::redirect::apply_redirect;
#[test]
fn test_apply_redirect() -> Result<(), VerbatimUrlError> {
// If there's no `@` in the original representation, we can just append the precise suffix
// to the given representation.
let verbatim = VerbatimUrl::parse_url("https://github.com/flask.git")?
.with_given("git+https://github.com/flask.git");
let redirect = DisplaySafeUrl::parse(
"https://github.com/flask.git@b90a4f1f4a370e92054b9cc9db0efcb864f87ebe",
)?;
let expected = VerbatimUrl::parse_url(
"https://github.com/flask.git@b90a4f1f4a370e92054b9cc9db0efcb864f87ebe",
)?
.with_given("https://github.com/flask.git@b90a4f1f4a370e92054b9cc9db0efcb864f87ebe");
assert_eq!(apply_redirect(&verbatim, redirect), expected);
// If there's an `@` in the original representation, and it's stable between the parsed and
// given representations, we preserve everything that precedes the `@` in the precise
// representation.
let verbatim = VerbatimUrl::parse_url("https://github.com/flask.git@main")?
.with_given("git+https://${DOMAIN}.com/flask.git@main");
let redirect = DisplaySafeUrl::parse(
"https://github.com/flask.git@b90a4f1f4a370e92054b9cc9db0efcb864f87ebe",
)?;
let expected = VerbatimUrl::parse_url(
"https://github.com/flask.git@b90a4f1f4a370e92054b9cc9db0efcb864f87ebe",
)?
.with_given("https://${DOMAIN}.com/flask.git@b90a4f1f4a370e92054b9cc9db0efcb864f87ebe");
assert_eq!(apply_redirect(&verbatim, redirect), expected);
// If there's a conflict after the `@`, discard the original representation.
let verbatim = VerbatimUrl::parse_url("https://github.com/flask.git@main")?
.with_given("git+https://github.com/flask.git@${TAG}");
let redirect = DisplaySafeUrl::parse(
"https://github.com/flask.git@b90a4f1f4a370e92054b9cc9db0efcb864f87ebe",
)?;
let expected = VerbatimUrl::parse_url(
"https://github.com/flask.git@b90a4f1f4a370e92054b9cc9db0efcb864f87ebe",
)?;
assert_eq!(apply_redirect(&verbatim, redirect), expected);
// We should preserve subdirectory fragments.
let verbatim = VerbatimUrl::parse_url("https://github.com/flask.git#subdirectory=src")?
.with_given("git+https://github.com/flask.git#subdirectory=src");
let redirect = DisplaySafeUrl::parse(
"https://github.com/flask.git@b90a4f1f4a370e92054b9cc9db0efcb864f87ebe#subdirectory=src",
)?;
let expected = VerbatimUrl::parse_url(
"https://github.com/flask.git@b90a4f1f4a370e92054b9cc9db0efcb864f87ebe#subdirectory=src",
)?.with_given("git+https://github.com/flask.git@b90a4f1f4a370e92054b9cc9db0efcb864f87ebe#subdirectory=src");
assert_eq!(apply_redirect(&verbatim, redirect), expected);
Ok(())
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolution_mode.rs | crates/uv-resolver/src/resolution_mode.rs | use crate::resolver::{ForkMap, ForkSet};
use crate::{DependencyMode, Manifest, ResolverEnvironment};
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, serde::Deserialize, serde::Serialize)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum ResolutionMode {
/// Resolve the highest compatible version of each package.
#[default]
Highest,
/// Resolve the lowest compatible version of each package.
Lowest,
/// Resolve the lowest compatible version of any direct dependencies, and the highest
/// compatible version of any transitive dependencies.
LowestDirect,
}
impl std::fmt::Display for ResolutionMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Highest => write!(f, "highest"),
Self::Lowest => write!(f, "lowest"),
Self::LowestDirect => write!(f, "lowest-direct"),
}
}
}
/// Like [`ResolutionMode`], but with any additional information required to select a candidate,
/// like the set of direct dependencies.
#[derive(Debug, Clone)]
pub(crate) enum ResolutionStrategy {
/// Resolve the highest compatible version of each package.
Highest,
/// Resolve the lowest compatible version of each package.
Lowest,
/// Resolve the lowest compatible version of any direct dependencies, and the highest
/// compatible version of any transitive dependencies.
LowestDirect(ForkSet),
}
impl ResolutionStrategy {
pub(crate) fn from_mode(
mode: ResolutionMode,
manifest: &Manifest,
env: &ResolverEnvironment,
dependencies: DependencyMode,
) -> Self {
match mode {
ResolutionMode::Highest => Self::Highest,
ResolutionMode::Lowest => Self::Lowest,
ResolutionMode::LowestDirect => {
let mut first_party = ForkMap::default();
for requirement in manifest.user_requirements(env, dependencies) {
first_party.add(&requirement, ());
}
Self::LowestDirect(first_party)
}
}
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/exclusions.rs | crates/uv-resolver/src/exclusions.rs | use uv_configuration::{Reinstall, Upgrade};
use uv_normalize::PackageName;
/// Tracks locally installed packages that should not be selected during resolution.
#[derive(Debug, Default, Clone)]
pub struct Exclusions {
reinstall: Reinstall,
upgrade: Upgrade,
}
impl Exclusions {
pub fn new(reinstall: Reinstall, upgrade: Upgrade) -> Self {
Self { reinstall, upgrade }
}
pub fn reinstall(&self, package: &PackageName) -> bool {
self.reinstall.contains_package(package)
}
pub fn upgrade(&self, package: &PackageName) -> bool {
self.upgrade.contains(package)
}
pub fn contains(&self, package: &PackageName) -> bool {
self.reinstall(package) || self.upgrade(package)
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/fork_urls.rs | crates/uv-resolver/src/fork_urls.rs | use std::collections::hash_map::Entry;
use rustc_hash::FxHashMap;
use uv_normalize::PackageName;
use uv_pypi_types::VerbatimParsedUrl;
use crate::ResolveError;
use crate::resolver::ResolverEnvironment;
/// See [`crate::resolver::ForkState`].
#[derive(Default, Debug, Clone)]
pub(crate) struct ForkUrls(FxHashMap<PackageName, VerbatimParsedUrl>);
impl ForkUrls {
/// Get the URL previously used for a package in this fork.
pub(crate) fn get(&self, package_name: &PackageName) -> Option<&VerbatimParsedUrl> {
self.0.get(package_name)
}
/// Whether we use a URL for this package.
pub(crate) fn contains_key(&self, package_name: &PackageName) -> bool {
self.0.contains_key(package_name)
}
/// Check that this is the only URL used for this package in this fork.
pub(crate) fn insert(
&mut self,
package_name: &PackageName,
url: &VerbatimParsedUrl,
env: &ResolverEnvironment,
) -> Result<(), ResolveError> {
match self.0.entry(package_name.clone()) {
Entry::Occupied(previous) => {
if previous.get() != url {
let mut conflicting_url =
vec![previous.get().parsed_url.clone(), url.parsed_url.clone()];
conflicting_url.sort();
return Err(ResolveError::ConflictingUrls {
package_name: package_name.clone(),
urls: conflicting_url,
env: env.clone(),
});
}
}
Entry::Vacant(vacant) => {
vacant.insert(url.clone());
}
}
Ok(())
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/python_requirement.rs | crates/uv-resolver/src/python_requirement.rs | use std::collections::Bound;
use uv_distribution_types::{RequiresPython, RequiresPythonRange};
use uv_pep440::Version;
use uv_pep508::{MarkerEnvironment, MarkerTree};
use uv_python::{Interpreter, PythonVersion};
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct PythonRequirement {
source: PythonRequirementSource,
/// The exact installed version of Python.
exact: Version,
/// The installed version of Python.
installed: RequiresPython,
/// The target version of Python; that is, the version of Python for which we are resolving
/// dependencies. This is typically the same as the installed version, but may be different
/// when specifying an alternate Python version for the resolution.
target: RequiresPython,
}
impl PythonRequirement {
/// Create a [`PythonRequirement`] to resolve against both an [`Interpreter`] and a
/// [`PythonVersion`].
pub fn from_python_version(interpreter: &Interpreter, python_version: &PythonVersion) -> Self {
let exact = interpreter.python_full_version().version.clone();
let installed = interpreter
.python_full_version()
.version
.only_release()
.without_trailing_zeros();
let target = python_version
.python_full_version()
.only_release()
.without_trailing_zeros();
Self {
exact,
installed: RequiresPython::greater_than_equal_version(&installed),
target: RequiresPython::greater_than_equal_version(&target),
source: PythonRequirementSource::PythonVersion,
}
}
/// Create a [`PythonRequirement`] to resolve against both an [`Interpreter`] and a
/// [`MarkerEnvironment`].
pub fn from_requires_python(
interpreter: &Interpreter,
requires_python: RequiresPython,
) -> Self {
Self::from_marker_environment(interpreter.markers(), requires_python)
}
/// Create a [`PythonRequirement`] to resolve against an [`Interpreter`].
pub fn from_interpreter(interpreter: &Interpreter) -> Self {
let exact = interpreter
.python_full_version()
.version
.clone()
.without_trailing_zeros();
let installed = interpreter
.python_full_version()
.version
.only_release()
.without_trailing_zeros();
Self {
exact,
installed: RequiresPython::greater_than_equal_version(&installed),
target: RequiresPython::greater_than_equal_version(&installed),
source: PythonRequirementSource::Interpreter,
}
}
/// Create a [`PythonRequirement`] from a [`MarkerEnvironment`] and a
/// specific `Requires-Python` directive.
///
/// This has the same "source" as
/// [`PythonRequirement::from_requires_python`], but is useful for
/// constructing a `PythonRequirement` without an [`Interpreter`].
pub fn from_marker_environment(
marker_env: &MarkerEnvironment,
requires_python: RequiresPython,
) -> Self {
let exact = marker_env
.python_full_version()
.version
.clone()
.without_trailing_zeros();
let installed = marker_env
.python_full_version()
.version
.only_release()
.without_trailing_zeros();
Self {
exact,
installed: RequiresPython::greater_than_equal_version(&installed),
target: requires_python,
source: PythonRequirementSource::RequiresPython,
}
}
/// Narrow the [`PythonRequirement`] to the given version, if it's stricter (i.e., greater)
/// than the current `Requires-Python` minimum.
///
/// Returns `None` if the given range is not narrower than the current range.
pub fn narrow(&self, target: &RequiresPythonRange) -> Option<Self> {
Some(Self {
exact: self.exact.clone(),
installed: self.installed.clone(),
target: self.target.narrow(target)?,
source: self.source,
})
}
/// Split the [`PythonRequirement`] at the given version.
///
/// For example, if the current requirement is `>=3.10`, and the split point is `3.11`, then
/// the result will be `>=3.10 and <3.11` and `>=3.11`.
pub fn split(&self, at: Bound<Version>) -> Option<(Self, Self)> {
let (lower, upper) = self.target.split(at)?;
Some((
Self {
exact: self.exact.clone(),
installed: self.installed.clone(),
target: lower,
source: self.source,
},
Self {
exact: self.exact.clone(),
installed: self.installed.clone(),
target: upper,
source: self.source,
},
))
}
/// Returns `true` if the minimum version of Python required by the target is greater than the
/// installed version.
pub fn raises(&self, target: &RequiresPythonRange) -> bool {
target.lower() > self.target.range().lower()
}
/// Return the exact version of Python.
pub fn exact(&self) -> &Version {
&self.exact
}
/// Return the installed version of Python.
pub fn installed(&self) -> &RequiresPython {
&self.installed
}
/// Return the target version of Python.
pub fn target(&self) -> &RequiresPython {
&self.target
}
/// Return the source of the [`PythonRequirement`].
pub fn source(&self) -> PythonRequirementSource {
self.source
}
/// A wrapper around `RequiresPython::simplify_markers`. See its docs for
/// more info.
///
/// When this `PythonRequirement` isn't `RequiresPython`, the given markers
/// are returned unchanged.
pub(crate) fn simplify_markers(&self, marker: MarkerTree) -> MarkerTree {
self.target.simplify_markers(marker)
}
/// Return a [`MarkerTree`] representing the Python requirement.
///
/// See: [`RequiresPython::to_marker_tree`]
pub fn to_marker_tree(&self) -> MarkerTree {
self.target.to_marker_tree()
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Hash, Ord)]
pub enum PythonRequirementSource {
/// `--python-version`
PythonVersion,
/// `Requires-Python`
RequiresPython,
/// The discovered Python interpreter.
Interpreter,
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/error.rs | crates/uv-resolver/src/error.rs | use std::collections::{BTreeMap, BTreeSet, Bound};
use std::fmt::Formatter;
use std::sync::Arc;
use indexmap::IndexSet;
use itertools::Itertools;
use owo_colors::OwoColorize;
use pubgrub::{
DefaultStringReporter, DerivationTree, Derived, External, Range, Ranges, Reporter, Term,
};
use rustc_hash::FxHashMap;
use tracing::trace;
use uv_distribution_types::{
DerivationChain, DistErrorKind, IndexCapabilities, IndexLocations, IndexUrl, RequestedDist,
};
use uv_normalize::{ExtraName, InvalidNameError, PackageName};
use uv_pep440::{LocalVersionSlice, LowerBound, Version, VersionSpecifier};
use uv_pep508::{MarkerEnvironment, MarkerExpression, MarkerTree, MarkerValueVersion};
use uv_platform_tags::Tags;
use uv_pypi_types::ParsedUrl;
use uv_redacted::DisplaySafeUrl;
use uv_static::EnvVars;
use crate::candidate_selector::CandidateSelector;
use crate::dependency_provider::UvDependencyProvider;
use crate::fork_indexes::ForkIndexes;
use crate::fork_urls::ForkUrls;
use crate::prerelease::AllowPrerelease;
use crate::pubgrub::{PubGrubPackage, PubGrubPackageInner, PubGrubReportFormatter};
use crate::python_requirement::PythonRequirement;
use crate::resolution::ConflictingDistributionError;
use crate::resolver::{
MetadataUnavailable, ResolverEnvironment, UnavailablePackage, UnavailableReason,
};
use crate::{InMemoryIndex, Options};
#[derive(Debug, thiserror::Error)]
pub enum ResolveError {
#[error("Failed to resolve dependencies for package `{1}=={2}`")]
Dependencies(
#[source] Box<ResolveError>,
PackageName,
Version,
DerivationChain,
),
#[error(transparent)]
Client(#[from] uv_client::Error),
#[error(transparent)]
Distribution(#[from] uv_distribution::Error),
#[error("The channel closed unexpectedly")]
ChannelClosed,
#[error(transparent)]
Join(#[from] tokio::task::JoinError),
#[error("Attempted to wait on an unregistered task: `{_0}`")]
UnregisteredTask(String),
#[error(
"Requirements contain conflicting URLs for package `{package_name}`{}:\n- {}",
if env.marker_environment().is_some() {
String::new()
} else {
format!(" in {env}")
},
urls.iter()
.map(|url| format!("{}{}", DisplaySafeUrl::from(url.clone()), if url.is_editable() { " (editable)" } else { "" }))
.collect::<Vec<_>>()
.join("\n- ")
)]
ConflictingUrls {
package_name: PackageName,
urls: Vec<ParsedUrl>,
env: ResolverEnvironment,
},
#[error(
"Requirements contain conflicting indexes for package `{package_name}`{}:\n- {}",
if env.marker_environment().is_some() {
String::new()
} else {
format!(" in {env}")
},
indexes.iter()
.map(std::string::ToString::to_string)
.collect::<Vec<_>>()
.join("\n- ")
)]
ConflictingIndexesForEnvironment {
package_name: PackageName,
indexes: Vec<IndexUrl>,
env: ResolverEnvironment,
},
#[error("Requirements contain conflicting indexes for package `{0}`: `{1}` vs. `{2}`")]
ConflictingIndexes(PackageName, String, String),
#[error(
"Package `{name}` was included as a URL dependency. URL dependencies must be expressed as direct requirements or constraints. Consider adding `{requirement}` to your dependencies or constraints file.",
name = name.cyan(),
requirement = format!("{name} @ {url}").cyan(),
)]
DisallowedUrl { name: PackageName, url: String },
#[error(transparent)]
DistributionType(#[from] uv_distribution_types::Error),
#[error(transparent)]
ParsedUrl(#[from] uv_pypi_types::ParsedUrlError),
#[error("{0} `{1}`")]
Dist(
DistErrorKind,
Box<RequestedDist>,
DerivationChain,
#[source] Arc<uv_distribution::Error>,
),
#[error(transparent)]
NoSolution(#[from] Box<NoSolutionError>),
#[error("Attempted to construct an invalid version specifier")]
InvalidVersion(#[from] uv_pep440::VersionSpecifierBuildError),
#[error(
"In `--require-hashes` mode, all requirements must be pinned upfront with `==`, but found: `{0}`"
)]
UnhashedPackage(PackageName),
#[error("found conflicting distribution in resolution: {0}")]
ConflictingDistribution(ConflictingDistributionError),
#[error("Package `{0}` is unavailable")]
PackageUnavailable(PackageName),
#[error("Invalid extra value in conflict marker: {reason}: {raw_extra}")]
InvalidExtraInConflictMarker {
reason: String,
raw_extra: ExtraName,
},
#[error("Invalid {kind} value in conflict marker: {name_error}")]
InvalidValueInConflictMarker {
kind: &'static str,
#[source]
name_error: InvalidNameError,
},
#[error(
"The index returned metadata for the wrong package: expected {request} for {expected}, got {request} for {actual}"
)]
MismatchedPackageName {
request: &'static str,
expected: PackageName,
actual: PackageName,
},
}
impl<T> From<tokio::sync::mpsc::error::SendError<T>> for ResolveError {
/// Drop the value we want to send to not leak the private type we're sending.
/// The tokio error only says "channel closed", so we don't lose information.
fn from(_value: tokio::sync::mpsc::error::SendError<T>) -> Self {
Self::ChannelClosed
}
}
pub type ErrorTree = DerivationTree<PubGrubPackage, Range<Version>, UnavailableReason>;
/// A wrapper around [`pubgrub::error::NoSolutionError`] that displays a resolution failure report.
pub struct NoSolutionError {
error: pubgrub::NoSolutionError<UvDependencyProvider>,
index: InMemoryIndex,
available_versions: FxHashMap<PackageName, BTreeSet<Version>>,
available_indexes: FxHashMap<PackageName, BTreeSet<IndexUrl>>,
selector: CandidateSelector,
python_requirement: PythonRequirement,
index_locations: IndexLocations,
index_capabilities: IndexCapabilities,
unavailable_packages: FxHashMap<PackageName, UnavailablePackage>,
incomplete_packages: FxHashMap<PackageName, BTreeMap<Version, MetadataUnavailable>>,
fork_urls: ForkUrls,
fork_indexes: ForkIndexes,
env: ResolverEnvironment,
current_environment: MarkerEnvironment,
tags: Option<Tags>,
workspace_members: BTreeSet<PackageName>,
options: Options,
}
impl NoSolutionError {
/// Create a new [`NoSolutionError`] from a [`pubgrub::NoSolutionError`].
pub(crate) fn new(
error: pubgrub::NoSolutionError<UvDependencyProvider>,
index: InMemoryIndex,
available_versions: FxHashMap<PackageName, BTreeSet<Version>>,
available_indexes: FxHashMap<PackageName, BTreeSet<IndexUrl>>,
selector: CandidateSelector,
python_requirement: PythonRequirement,
index_locations: IndexLocations,
index_capabilities: IndexCapabilities,
unavailable_packages: FxHashMap<PackageName, UnavailablePackage>,
incomplete_packages: FxHashMap<PackageName, BTreeMap<Version, MetadataUnavailable>>,
fork_urls: ForkUrls,
fork_indexes: ForkIndexes,
env: ResolverEnvironment,
current_environment: MarkerEnvironment,
tags: Option<Tags>,
workspace_members: BTreeSet<PackageName>,
options: Options,
) -> Self {
Self {
error,
index,
available_versions,
available_indexes,
selector,
python_requirement,
index_locations,
index_capabilities,
unavailable_packages,
incomplete_packages,
fork_urls,
fork_indexes,
env,
current_environment,
tags,
workspace_members,
options,
}
}
/// Given a [`DerivationTree`], collapse any [`External::FromDependencyOf`] incompatibilities
/// wrap an [`PubGrubPackageInner::Extra`] package.
pub(crate) fn collapse_proxies(derivation_tree: ErrorTree) -> ErrorTree {
fn collapse(derivation_tree: ErrorTree) -> Option<ErrorTree> {
match derivation_tree {
DerivationTree::Derived(derived) => {
match (&*derived.cause1, &*derived.cause2) {
(
DerivationTree::External(External::FromDependencyOf(package1, ..)),
DerivationTree::External(External::FromDependencyOf(package2, ..)),
) if package1.is_proxy() && package2.is_proxy() => None,
(
DerivationTree::External(External::FromDependencyOf(package, ..)),
cause,
) if package.is_proxy() => collapse(cause.clone()),
(
cause,
DerivationTree::External(External::FromDependencyOf(package, ..)),
) if package.is_proxy() => collapse(cause.clone()),
(cause1, cause2) => {
let cause1 = collapse(cause1.clone());
let cause2 = collapse(cause2.clone());
match (cause1, cause2) {
(Some(cause1), Some(cause2)) => {
Some(DerivationTree::Derived(Derived {
cause1: Arc::new(cause1),
cause2: Arc::new(cause2),
..derived
}))
}
(Some(cause), None) | (None, Some(cause)) => Some(cause),
_ => None,
}
}
}
}
DerivationTree::External(_) => Some(derivation_tree),
}
}
collapse(derivation_tree)
.expect("derivation tree should contain at least one external term")
}
/// Simplifies the version ranges on any incompatibilities to remove the `[max]` sentinel.
///
/// The `[max]` sentinel is used to represent the maximum local version of a package, to
/// implement PEP 440 semantics for local version equality. For example, `1.0.0+foo` needs to
/// satisfy `==1.0.0`.
pub(crate) fn collapse_local_version_segments(derivation_tree: ErrorTree) -> ErrorTree {
fn strip(derivation_tree: ErrorTree) -> Option<ErrorTree> {
match derivation_tree {
DerivationTree::External(External::NotRoot(_, _)) => Some(derivation_tree),
DerivationTree::External(External::NoVersions(package, versions)) => {
if SentinelRange::from(&versions).is_complement() {
return None;
}
let versions = SentinelRange::from(&versions).strip();
Some(DerivationTree::External(External::NoVersions(
package, versions,
)))
}
DerivationTree::External(External::FromDependencyOf(
package1,
versions1,
package2,
versions2,
)) => {
let versions1 = SentinelRange::from(&versions1).strip();
let versions2 = SentinelRange::from(&versions2).strip();
Some(DerivationTree::External(External::FromDependencyOf(
package1, versions1, package2, versions2,
)))
}
DerivationTree::External(External::Custom(package, versions, reason)) => {
let versions = SentinelRange::from(&versions).strip();
Some(DerivationTree::External(External::Custom(
package, versions, reason,
)))
}
DerivationTree::Derived(mut derived) => {
let cause1 = strip((*derived.cause1).clone());
let cause2 = strip((*derived.cause2).clone());
match (cause1, cause2) {
(Some(cause1), Some(cause2)) => Some(DerivationTree::Derived(Derived {
cause1: Arc::new(cause1),
cause2: Arc::new(cause2),
terms: std::mem::take(&mut derived.terms)
.into_iter()
.map(|(pkg, term)| {
let term = match term {
Term::Positive(versions) => {
Term::Positive(SentinelRange::from(&versions).strip())
}
Term::Negative(versions) => {
Term::Negative(SentinelRange::from(&versions).strip())
}
};
(pkg, term)
})
.collect(),
shared_id: derived.shared_id,
})),
(Some(cause), None) | (None, Some(cause)) => Some(cause),
_ => None,
}
}
}
}
strip(derivation_tree).expect("derivation tree should contain at least one term")
}
/// Given a [`DerivationTree`], identify the largest required Python version that is missing.
pub fn find_requires_python(&self) -> LowerBound {
fn find(derivation_tree: &ErrorTree, minimum: &mut LowerBound) {
match derivation_tree {
DerivationTree::Derived(derived) => {
find(derived.cause1.as_ref(), minimum);
find(derived.cause2.as_ref(), minimum);
}
DerivationTree::External(External::FromDependencyOf(.., package, version)) => {
if let PubGrubPackageInner::Python(_) = &**package {
if let Some((lower, ..)) = version.bounding_range() {
let lower = LowerBound::new(lower.cloned());
if lower > *minimum {
*minimum = lower;
}
}
}
}
DerivationTree::External(_) => {}
}
}
let mut minimum = LowerBound::default();
find(&self.error, &mut minimum);
minimum
}
/// Initialize a [`NoSolutionHeader`] for this error.
pub fn header(&self) -> NoSolutionHeader {
NoSolutionHeader::new(self.env.clone())
}
/// Get the conflict derivation tree for external analysis
pub fn derivation_tree(&self) -> &ErrorTree {
&self.error
}
/// Hint at limiting the resolver environment if universal resolution failed for a target
/// that is not the current platform or not the current Python version.
fn hint_disjoint_targets(&self, f: &mut Formatter) -> std::fmt::Result {
// Only applicable to universal resolution.
let Some(markers) = self.env.fork_markers() else {
return Ok(());
};
// TODO(konsti): This is a crude approximation to telling the user the difference
// between their Python version and the relevant Python version range from the marker.
let current_python_version = self.current_environment.python_version().version.clone();
let current_python_marker = MarkerTree::expression(MarkerExpression::Version {
key: MarkerValueVersion::PythonVersion,
specifier: VersionSpecifier::equals_version(current_python_version.clone()),
});
if markers.is_disjoint(current_python_marker) {
write!(
f,
"\n\n{}{} While the active Python version is {}, \
the resolution failed for other Python versions supported by your \
project. Consider limiting your project's supported Python versions \
using `requires-python`.",
"hint".bold().cyan(),
":".bold(),
current_python_version,
)?;
} else if !markers.evaluate(&self.current_environment, &[]) {
write!(
f,
"\n\n{}{} The resolution failed for an environment that is not the current one, \
consider limiting the environments with `tool.uv.environments`.",
"hint".bold().cyan(),
":".bold(),
)?;
}
Ok(())
}
/// Get the packages that are involved in this error.
pub fn packages(&self) -> impl Iterator<Item = &PackageName> {
self.error
.packages()
.into_iter()
.filter_map(|p| p.name())
.unique()
}
}
impl std::fmt::Debug for NoSolutionError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
// Include every field except `index`, which doesn't implement `Debug`.
let Self {
error,
index: _,
available_versions,
available_indexes,
selector,
python_requirement,
index_locations,
index_capabilities,
unavailable_packages,
incomplete_packages,
fork_urls,
fork_indexes,
env,
current_environment,
tags,
workspace_members,
options,
} = self;
f.debug_struct("NoSolutionError")
.field("error", error)
.field("available_versions", available_versions)
.field("available_indexes", available_indexes)
.field("selector", selector)
.field("python_requirement", python_requirement)
.field("index_locations", index_locations)
.field("index_capabilities", index_capabilities)
.field("unavailable_packages", unavailable_packages)
.field("incomplete_packages", incomplete_packages)
.field("fork_urls", fork_urls)
.field("fork_indexes", fork_indexes)
.field("env", env)
.field("current_environment", current_environment)
.field("tags", tags)
.field("workspace_members", workspace_members)
.field("options", options)
.finish()
}
}
impl std::error::Error for NoSolutionError {}
impl std::fmt::Display for NoSolutionError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
// Write the derivation report.
let formatter = PubGrubReportFormatter {
available_versions: &self.available_versions,
python_requirement: &self.python_requirement,
workspace_members: &self.workspace_members,
tags: self.tags.as_ref(),
};
// Transform the error tree for reporting
let mut tree = self.error.clone();
simplify_derivation_tree_markers(&self.python_requirement, &mut tree);
let should_display_tree = std::env::var_os(EnvVars::UV_INTERNAL__SHOW_DERIVATION_TREE)
.is_some()
|| tracing::enabled!(tracing::Level::TRACE);
if should_display_tree {
display_tree(&tree, "Resolver derivation tree before reduction");
}
collapse_no_versions_of_workspace_members(&mut tree, &self.workspace_members);
if self.workspace_members.len() == 1 {
let project = self.workspace_members.iter().next().unwrap();
drop_root_dependency_on_project(&mut tree, project);
}
collapse_unavailable_versions(&mut tree);
collapse_redundant_depends_on_no_versions(&mut tree);
simplify_derivation_tree_ranges(
&mut tree,
&self.available_versions,
&self.selector,
&self.env,
);
// This needs to be applied _after_ simplification of the ranges
collapse_redundant_no_versions(&mut tree);
while collapse_redundant_no_versions_tree(&mut tree) {
// Continue collapsing until no more redundant nodes are found
}
if should_display_tree {
display_tree(&tree, "Resolver derivation tree after reduction");
}
let report = DefaultStringReporter::report_with_formatter(&tree, &formatter);
write!(f, "{report}")?;
// Include any additional hints.
let mut additional_hints = IndexSet::default();
formatter.generate_hints(
&tree,
&self.index,
&self.selector,
&self.index_locations,
&self.index_capabilities,
&self.available_indexes,
&self.unavailable_packages,
&self.incomplete_packages,
&self.fork_urls,
&self.fork_indexes,
&self.env,
self.tags.as_ref(),
&self.workspace_members,
&self.options,
&mut additional_hints,
);
for hint in additional_hints {
write!(f, "\n\n{hint}")?;
}
self.hint_disjoint_targets(f)?;
Ok(())
}
}
#[allow(clippy::print_stderr)]
fn display_tree(
error: &DerivationTree<PubGrubPackage, Range<Version>, UnavailableReason>,
name: &str,
) {
let mut lines = Vec::new();
display_tree_inner(error, &mut lines, 0);
lines.reverse();
if std::env::var_os(EnvVars::UV_INTERNAL__SHOW_DERIVATION_TREE).is_some() {
eprintln!("{name}\n{}", lines.join("\n"));
} else {
trace!("{name}\n{}", lines.join("\n"));
}
}
fn display_tree_inner(
error: &DerivationTree<PubGrubPackage, Range<Version>, UnavailableReason>,
lines: &mut Vec<String>,
depth: usize,
) {
let prefix = " ".repeat(depth);
match error {
DerivationTree::Derived(derived) => {
display_tree_inner(&derived.cause1, lines, depth + 1);
display_tree_inner(&derived.cause2, lines, depth + 1);
for (package, term) in &derived.terms {
match term {
Term::Positive(versions) => {
lines.push(format!("{prefix}term {package}{versions}"));
}
Term::Negative(versions) => {
lines.push(format!("{prefix}term not {package}{versions}"));
}
}
}
}
DerivationTree::External(external) => match external {
External::FromDependencyOf(package, version, dependency, dependency_version) => {
lines.push(format!(
"{prefix}{package}{version} depends on {dependency}{dependency_version}"
));
}
External::Custom(package, versions, reason) => match reason {
UnavailableReason::Package(_) => {
lines.push(format!("{prefix}{package} {reason}"));
}
UnavailableReason::Version(_) => {
lines.push(format!("{prefix}{package}{versions} {reason}"));
}
},
External::NoVersions(package, versions) => {
lines.push(format!("{prefix}no versions of {package}{versions}"));
}
External::NotRoot(package, versions) => {
lines.push(format!("{prefix}not root {package}{versions}"));
}
},
}
}
fn collapse_redundant_no_versions(
tree: &mut DerivationTree<PubGrubPackage, Range<Version>, UnavailableReason>,
) {
match tree {
DerivationTree::External(_) => {}
DerivationTree::Derived(derived) => {
match (
Arc::make_mut(&mut derived.cause1),
Arc::make_mut(&mut derived.cause2),
) {
// If we have a node for a package with no versions...
(
DerivationTree::External(External::NoVersions(package, versions)),
ref mut other,
)
| (
ref mut other,
DerivationTree::External(External::NoVersions(package, versions)),
) => {
// First, always recursively visit the other side of the tree
collapse_redundant_no_versions(other);
// Retrieve the nearest terms, either alongside this node or from the parent.
let package_terms = if let DerivationTree::Derived(derived) = other {
derived.terms.get(package)
} else {
derived.terms.get(package)
};
let Some(Term::Positive(term)) = package_terms else {
return;
};
let versions = versions.complement();
// If we're disqualifying a single version, this is important to retain, e.g,
// for `only foo==1.0.0 is available`
if versions.as_singleton().is_some() {
return;
}
// If the range in the conclusion (terms) matches the range of no versions,
// then we'll drop this node. If the range is "all versions", then there's no
// also no need to enumerate the available versions.
if *term != Range::full() && *term != versions {
return;
}
*tree = other.clone();
}
// If not, just recurse
_ => {
collapse_redundant_no_versions(Arc::make_mut(&mut derived.cause1));
collapse_redundant_no_versions(Arc::make_mut(&mut derived.cause2));
}
}
}
}
}
/// Given a [`DerivationTree`], collapse any derived trees with two `NoVersions` nodes for the same
/// package. For example, if we have a tree like:
///
/// ```text
/// term Python>=3.7.9
/// no versions of Python>=3.7.9, <3.8
/// no versions of Python>=3.8
/// ```
///
/// We can simplify this to:
///
/// ```text
/// no versions of Python>=3.7.9
/// ```
///
/// This function returns a `bool` indicating if a change was made. This allows for repeated calls,
/// e.g., the following tree contains nested redundant trees:
///
/// ```text
/// term Python>=3.10
/// no versions of Python>=3.11, <3.12
/// term Python>=3.10, <3.11 | >=3.12
/// no versions of Python>=3.12
/// no versions of Python>=3.10, <3.11
/// ```
///
/// We can simplify this to:
///
/// ```text
/// no versions of Python>=3.10
/// ```
///
/// This appears to be common with the way the resolver currently models Python version
/// incompatibilities.
fn collapse_redundant_no_versions_tree(
tree: &mut DerivationTree<PubGrubPackage, Range<Version>, UnavailableReason>,
) -> bool {
match tree {
DerivationTree::External(_) => false,
DerivationTree::Derived(derived) => {
match (
Arc::make_mut(&mut derived.cause1),
Arc::make_mut(&mut derived.cause2),
) {
// If we have a tree with two `NoVersions` nodes for the same package...
(
DerivationTree::External(External::NoVersions(package, versions)),
DerivationTree::External(External::NoVersions(other_package, other_versions)),
) if package == other_package => {
// Retrieve the terms from the parent.
let Some(Term::Positive(term)) = derived.terms.get(package) else {
return false;
};
// If they're both subsets of the term, then drop this node in favor of the term
if versions.subset_of(term) && other_versions.subset_of(term) {
*tree = DerivationTree::External(External::NoVersions(
package.clone(),
term.clone(),
));
return true;
}
false
}
// If not, just recurse
_ => {
collapse_redundant_no_versions_tree(Arc::make_mut(&mut derived.cause1))
|| collapse_redundant_no_versions_tree(Arc::make_mut(&mut derived.cause2))
}
}
}
}
}
/// Given a [`DerivationTree`], collapse any `NoVersion` incompatibilities for workspace members
/// to avoid saying things like "only <workspace-member>==0.1.0 is available".
fn collapse_no_versions_of_workspace_members(
tree: &mut DerivationTree<PubGrubPackage, Range<Version>, UnavailableReason>,
workspace_members: &BTreeSet<PackageName>,
) {
match tree {
DerivationTree::External(_) => {}
DerivationTree::Derived(derived) => {
match (
Arc::make_mut(&mut derived.cause1),
Arc::make_mut(&mut derived.cause2),
) {
// If we have a node for a package with no versions...
(DerivationTree::External(External::NoVersions(package, _)), ref mut other)
| (ref mut other, DerivationTree::External(External::NoVersions(package, _))) => {
// First, always recursively visit the other side of the tree
collapse_no_versions_of_workspace_members(other, workspace_members);
// Then, if the package is a workspace member...
let (PubGrubPackageInner::Package { name, .. }
| PubGrubPackageInner::Extra { name, .. }
| PubGrubPackageInner::Group { name, .. }) = &**package
else {
return;
};
if !workspace_members.contains(name) {
return;
}
// Replace this node with the other tree
*tree = other.clone();
}
// If not, just recurse
_ => {
collapse_no_versions_of_workspace_members(
Arc::make_mut(&mut derived.cause1),
workspace_members,
);
collapse_no_versions_of_workspace_members(
Arc::make_mut(&mut derived.cause2),
workspace_members,
);
}
}
}
}
}
/// Given a [`DerivationTree`], collapse `NoVersions` incompatibilities that are redundant children
/// of a dependency. For example, if we have a tree like:
///
/// ```text
/// A>=1,<2 depends on B
/// A has no versions >1,<2
/// C depends on A>=1,<2
/// ```
///
/// We can simplify this to `C depends A>=1 and A>=1 depends on B so C depends on B` without
/// explaining that there are no other versions of A. This is dependent on range of A in "A depends
/// on" being a subset of range of A in "depends on A". For example, in a tree like:
///
/// ```text
/// A>=1,<3 depends on B
/// A has no versions >2,<3
/// C depends on A>=2,<3
/// ```
///
/// We cannot say `C depends on A>=2 and A>=1 depends on B so C depends on B` because there is a
/// hole in the range — `A>=1,<3` is not a subset of `A>=2,<3`.
fn collapse_redundant_depends_on_no_versions(
tree: &mut DerivationTree<PubGrubPackage, Range<Version>, UnavailableReason>,
) {
match tree {
DerivationTree::External(_) => {}
DerivationTree::Derived(derived) => {
// If one node is a dependency incompatibility...
match (
Arc::make_mut(&mut derived.cause1),
Arc::make_mut(&mut derived.cause2),
) {
(
DerivationTree::External(External::FromDependencyOf(package, versions, _, _)),
ref mut other,
)
| (
ref mut other,
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | true |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/flat_index.rs | crates/uv-resolver/src/flat_index.rs | use std::collections::BTreeMap;
use std::collections::btree_map::Entry;
use rustc_hash::FxHashMap;
use tracing::instrument;
use uv_client::{FlatIndexEntries, FlatIndexEntry};
use uv_configuration::BuildOptions;
use uv_distribution_filename::{DistFilename, SourceDistFilename, WheelFilename};
use uv_distribution_types::{
File, HashComparison, HashPolicy, IncompatibleSource, IncompatibleWheel, IndexUrl,
PrioritizedDist, RegistryBuiltWheel, RegistrySourceDist, SourceDistCompatibility,
WheelCompatibility,
};
use uv_normalize::PackageName;
use uv_pep440::Version;
use uv_platform_tags::{TagCompatibility, Tags};
use uv_pypi_types::HashDigest;
use uv_types::HashStrategy;
/// A set of [`PrioritizedDist`] from a `--find-links` entry, indexed by [`PackageName`]
/// and [`Version`].
#[derive(Debug, Clone, Default)]
pub struct FlatIndex {
/// The list of [`FlatDistributions`] from the `--find-links` entries, indexed by package name.
index: FxHashMap<PackageName, FlatDistributions>,
/// Whether any `--find-links` entries could not be resolved due to a lack of network
/// connectivity.
offline: bool,
}
impl FlatIndex {
/// Collect all files from a `--find-links` target into a [`FlatIndex`].
#[instrument(skip_all)]
pub fn from_entries(
entries: FlatIndexEntries,
tags: Option<&Tags>,
hasher: &HashStrategy,
build_options: &BuildOptions,
) -> Self {
// Collect compatible distributions.
let mut index = FxHashMap::<PackageName, FlatDistributions>::default();
for entry in entries.entries {
let distributions = index.entry(entry.filename.name().clone()).or_default();
distributions.add_file(
entry.file,
entry.filename,
tags,
hasher,
build_options,
entry.index,
);
}
// Collect offline entries.
let offline = entries.offline;
Self { index, offline }
}
/// Get the [`FlatDistributions`] for the given package name.
pub fn get(&self, package_name: &PackageName) -> Option<&FlatDistributions> {
self.index.get(package_name)
}
/// Whether any `--find-links` entries could not be resolved due to a lack of network
/// connectivity.
pub fn offline(&self) -> bool {
self.offline
}
}
/// A set of [`PrioritizedDist`] from a `--find-links` entry for a single package, indexed
/// by [`Version`].
#[derive(Debug, Clone, Default)]
pub struct FlatDistributions(BTreeMap<Version, PrioritizedDist>);
impl FlatDistributions {
/// Collect all files from a `--find-links` target into a [`FlatIndex`].
#[instrument(skip_all)]
pub fn from_entries(
entries: Vec<FlatIndexEntry>,
tags: Option<&Tags>,
hasher: &HashStrategy,
build_options: &BuildOptions,
) -> Self {
let mut distributions = Self::default();
for entry in entries {
distributions.add_file(
entry.file,
entry.filename,
tags,
hasher,
build_options,
entry.index,
);
}
distributions
}
/// Returns an [`Iterator`] over the distributions.
pub fn iter(&self) -> impl Iterator<Item = (&Version, &PrioritizedDist)> {
self.0.iter()
}
/// Removes the [`PrioritizedDist`] for the given version.
pub fn remove(&mut self, version: &Version) -> Option<PrioritizedDist> {
self.0.remove(version)
}
/// Add the given [`File`] to the [`FlatDistributions`] for the given package.
fn add_file(
&mut self,
file: File,
filename: DistFilename,
tags: Option<&Tags>,
hasher: &HashStrategy,
build_options: &BuildOptions,
index: IndexUrl,
) {
// No `requires-python` here: for source distributions, we don't have that information;
// for wheels, we read it lazily only when selected.
match filename {
DistFilename::WheelFilename(filename) => {
let version = filename.version.clone();
let compatibility = Self::wheel_compatibility(
&filename,
file.hashes.as_slice(),
tags,
hasher,
build_options,
);
let dist = RegistryBuiltWheel {
filename,
file: Box::new(file),
index,
};
match self.0.entry(version) {
Entry::Occupied(mut entry) => {
entry.get_mut().insert_built(dist, vec![], compatibility);
}
Entry::Vacant(entry) => {
entry.insert(PrioritizedDist::from_built(dist, vec![], compatibility));
}
}
}
DistFilename::SourceDistFilename(filename) => {
let compatibility = Self::source_dist_compatibility(
&filename,
file.hashes.as_slice(),
hasher,
build_options,
);
let dist = RegistrySourceDist {
name: filename.name.clone(),
version: filename.version.clone(),
ext: filename.extension,
file: Box::new(file),
index,
wheels: vec![],
};
match self.0.entry(filename.version) {
Entry::Occupied(mut entry) => {
entry.get_mut().insert_source(dist, vec![], compatibility);
}
Entry::Vacant(entry) => {
entry.insert(PrioritizedDist::from_source(dist, vec![], compatibility));
}
}
}
}
}
fn source_dist_compatibility(
filename: &SourceDistFilename,
hashes: &[HashDigest],
hasher: &HashStrategy,
build_options: &BuildOptions,
) -> SourceDistCompatibility {
// Check if source distributions are allowed for this package.
if build_options.no_build_package(&filename.name) {
return SourceDistCompatibility::Incompatible(IncompatibleSource::NoBuild);
}
// Check if hashes line up
let hash = if let HashPolicy::Validate(required) =
hasher.get_package(&filename.name, &filename.version)
{
if hashes.is_empty() {
HashComparison::Missing
} else if required.iter().any(|hash| hashes.contains(hash)) {
HashComparison::Matched
} else {
HashComparison::Mismatched
}
} else {
HashComparison::Matched
};
SourceDistCompatibility::Compatible(hash)
}
fn wheel_compatibility(
filename: &WheelFilename,
hashes: &[HashDigest],
tags: Option<&Tags>,
hasher: &HashStrategy,
build_options: &BuildOptions,
) -> WheelCompatibility {
// Check if binaries are allowed for this package.
if build_options.no_binary_package(&filename.name) {
return WheelCompatibility::Incompatible(IncompatibleWheel::NoBinary);
}
// Determine a compatibility for the wheel based on tags.
let priority = match tags {
Some(tags) => match filename.compatibility(tags) {
TagCompatibility::Incompatible(tag) => {
return WheelCompatibility::Incompatible(IncompatibleWheel::Tag(tag));
}
TagCompatibility::Compatible(priority) => Some(priority),
},
None => None,
};
// Check if hashes line up.
let hash = if let HashPolicy::Validate(required) =
hasher.get_package(&filename.name, &filename.version)
{
if hashes.is_empty() {
HashComparison::Missing
} else if required.iter().any(|hash| hashes.contains(hash)) {
HashComparison::Matched
} else {
HashComparison::Mismatched
}
} else {
HashComparison::Matched
};
// Break ties with the build tag.
let build_tag = filename.build_tag().cloned();
WheelCompatibility::Compatible(hash, priority, build_tag)
}
}
impl IntoIterator for FlatDistributions {
type Item = (Version, PrioritizedDist);
type IntoIter = std::collections::btree_map::IntoIter<Version, PrioritizedDist>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl From<FlatDistributions> for BTreeMap<Version, PrioritizedDist> {
fn from(distributions: FlatDistributions) -> Self {
distributions.0
}
}
/// For external users.
impl From<BTreeMap<Version, PrioritizedDist>> for FlatDistributions {
fn from(distributions: BTreeMap<Version, PrioritizedDist>) -> Self {
Self(distributions)
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/candidate_selector.rs | crates/uv-resolver/src/candidate_selector.rs | use std::fmt::{Display, Formatter};
use either::Either;
use itertools::Itertools;
use pubgrub::Range;
use smallvec::SmallVec;
use tracing::{debug, trace};
use uv_configuration::IndexStrategy;
use uv_distribution_types::{CompatibleDist, IncompatibleDist, IncompatibleSource, IndexUrl};
use uv_distribution_types::{DistributionMetadata, IncompatibleWheel, Name, PrioritizedDist};
use uv_normalize::PackageName;
use uv_pep440::Version;
use uv_platform_tags::Tags;
use uv_types::InstalledPackagesProvider;
use crate::preferences::{Entry, PreferenceSource, Preferences};
use crate::prerelease::{AllowPrerelease, PrereleaseStrategy};
use crate::resolution_mode::ResolutionStrategy;
use crate::version_map::{VersionMap, VersionMapDistHandle};
use crate::{Exclusions, Manifest, Options, ResolverEnvironment};
#[derive(Debug, Clone)]
#[allow(clippy::struct_field_names)]
pub(crate) struct CandidateSelector {
resolution_strategy: ResolutionStrategy,
prerelease_strategy: PrereleaseStrategy,
index_strategy: IndexStrategy,
}
impl CandidateSelector {
/// Return a [`CandidateSelector`] for the given [`Manifest`].
pub(crate) fn for_resolution(
options: &Options,
manifest: &Manifest,
env: &ResolverEnvironment,
) -> Self {
Self {
resolution_strategy: ResolutionStrategy::from_mode(
options.resolution_mode,
manifest,
env,
options.dependency_mode,
),
prerelease_strategy: PrereleaseStrategy::from_mode(
options.prerelease_mode,
manifest,
env,
options.dependency_mode,
),
index_strategy: options.index_strategy,
}
}
#[inline]
#[allow(dead_code)]
pub(crate) fn resolution_strategy(&self) -> &ResolutionStrategy {
&self.resolution_strategy
}
#[inline]
#[allow(dead_code)]
pub(crate) fn prerelease_strategy(&self) -> &PrereleaseStrategy {
&self.prerelease_strategy
}
#[inline]
#[allow(dead_code)]
pub(crate) fn index_strategy(&self) -> &IndexStrategy {
&self.index_strategy
}
/// Select a [`Candidate`] from a set of candidate versions and files.
///
/// Unless present in the provided [`Exclusions`], local distributions from the
/// [`InstalledPackagesProvider`] are preferred over remote distributions in
/// the [`VersionMap`].
pub(crate) fn select<'a, InstalledPackages: InstalledPackagesProvider>(
&'a self,
package_name: &'a PackageName,
range: &Range<Version>,
version_maps: &'a [VersionMap],
preferences: &'a Preferences,
installed_packages: &'a InstalledPackages,
exclusions: &'a Exclusions,
index: Option<&'a IndexUrl>,
env: &ResolverEnvironment,
tags: Option<&'a Tags>,
) -> Option<Candidate<'a>> {
let reinstall = exclusions.reinstall(package_name);
let upgrade = exclusions.upgrade(package_name);
// If we have a preference (e.g., from a lockfile), search for a version matching that
// preference.
//
// If `--reinstall` is provided, we should omit any already-installed packages from here,
// since we can't reinstall already-installed packages.
//
// If `--upgrade` is provided, we should still search for a matching preference. In
// practice, preferences should be empty if `--upgrade` is provided, but it's the caller's
// responsibility to ensure that.
if let Some(preferred) = self.get_preferred(
package_name,
range,
version_maps,
preferences,
installed_packages,
reinstall,
index,
env,
tags,
) {
trace!("Using preference {} {}", preferred.name, preferred.version);
return Some(preferred);
}
// If we don't have a preference, find an already-installed distribution that satisfies the
// range.
let installed = if reinstall {
None
} else {
Self::get_installed(package_name, range, installed_packages, tags)
};
// If we're not upgrading, we should prefer the already-installed distribution.
if !upgrade {
if let Some(installed) = installed {
trace!(
"Using installed {} {} that satisfies {range}",
installed.name, installed.version
);
return Some(installed);
}
}
// Otherwise, find the best candidate from the version maps.
let compatible = self.select_no_preference(package_name, range, version_maps, env);
// Cross-reference against the already-installed distribution.
//
// If the already-installed version is _more_ compatible than the best candidate
// from the version maps, use the installed version.
if let Some(installed) = installed {
if compatible.as_ref().is_none_or(|compatible| {
let highest = self.use_highest_version(package_name, env);
if highest {
installed.version() >= compatible.version()
} else {
installed.version() <= compatible.version()
}
}) {
trace!(
"Using installed {} {} that satisfies {range}",
installed.name, installed.version
);
return Some(installed);
}
}
compatible
}
/// If the package has a preference, an existing version from an existing lockfile or a version
/// from a sibling fork, and the preference satisfies the current range, use that.
///
/// We try to find a resolution that, depending on the input, does not diverge from the
/// lockfile or matches a sibling fork. We try an exact match for the current markers (fork
/// or specific) first, to ensure stability with repeated locking. If that doesn't work, we
/// fall back to preferences that don't match in hopes of still resolving different forks into
/// the same version; A solution with less different versions is more desirable than one where
/// we may have more recent version in some cases, but overall more versions.
fn get_preferred<'a, InstalledPackages: InstalledPackagesProvider>(
&'a self,
package_name: &'a PackageName,
range: &Range<Version>,
version_maps: &'a [VersionMap],
preferences: &'a Preferences,
installed_packages: &'a InstalledPackages,
reinstall: bool,
index: Option<&'a IndexUrl>,
env: &ResolverEnvironment,
tags: Option<&'a Tags>,
) -> Option<Candidate<'a>> {
let preferences = preferences.get(package_name);
// If there are multiple preferences for the same package, we need to sort them by priority.
let preferences = match preferences {
[] => return None,
[entry] => {
// Filter out preferences that map to a conflicting index.
if index.is_some_and(|index| !entry.index().matches(index)) {
return None;
}
Either::Left(std::iter::once((entry.pin().version(), entry.source())))
}
[..] => {
type Entries<'a> = SmallVec<[&'a Entry; 3]>;
let mut preferences = preferences.iter().collect::<Entries>();
// Filter out preferences that map to a conflicting index.
preferences.retain(|entry| index.is_none_or(|index| entry.index().matches(index)));
// Sort the preferences by priority.
let highest = self.use_highest_version(package_name, env);
preferences.sort_by_key(|entry| {
let marker = entry.marker();
// Prefer preferences that match the current environment.
let matches_env = env.included_by_marker(marker.pep508());
// Prefer the latest (or earliest) version.
let version = if highest {
Either::Left(entry.pin().version())
} else {
Either::Right(std::cmp::Reverse(entry.pin().version()))
};
std::cmp::Reverse((matches_env, version))
});
Either::Right(
preferences
.into_iter()
.map(|entry| (entry.pin().version(), entry.source())),
)
}
};
self.get_preferred_from_iter(
preferences,
package_name,
range,
version_maps,
installed_packages,
reinstall,
env,
tags,
)
}
/// Return the first preference that satisfies the current range and is allowed.
fn get_preferred_from_iter<'a, InstalledPackages: InstalledPackagesProvider>(
&'a self,
preferences: impl Iterator<Item = (&'a Version, PreferenceSource)>,
package_name: &'a PackageName,
range: &Range<Version>,
version_maps: &'a [VersionMap],
installed_packages: &'a InstalledPackages,
reinstall: bool,
env: &ResolverEnvironment,
tags: Option<&Tags>,
) -> Option<Candidate<'a>> {
for (version, source) in preferences {
// Respect the version range for this requirement.
if !range.contains(version) {
continue;
}
// Check for a locally installed distribution that matches the preferred version, unless
// we have to reinstall, in which case we can't reuse an already-installed distribution.
if !reinstall {
let installed_dists = installed_packages.get_packages(package_name);
match installed_dists.as_slice() {
[] => {}
[dist] => {
if dist.version() == version {
debug!(
"Found installed version of {dist} that satisfies preference in {range}"
);
// Verify that the installed distribution is compatible with the environment.
if tags.is_some_and(|tags| {
let Ok(Some(wheel_tags)) = dist.read_tags() else {
return false;
};
!wheel_tags.is_compatible(tags)
}) {
debug!("Platform tags mismatch for installed {dist}");
continue;
}
return Some(Candidate {
name: package_name,
version,
dist: CandidateDist::Compatible(CompatibleDist::InstalledDist(
dist,
)),
choice_kind: VersionChoiceKind::Preference,
});
}
}
// We do not consider installed distributions with multiple versions because
// during installation these must be reinstalled from the remote
_ => {
debug!(
"Ignoring installed versions of {package_name}: multiple distributions found"
);
}
}
}
// Respect the pre-release strategy for this fork.
if version.any_prerelease() {
let allow = match self.prerelease_strategy.allows(package_name, env) {
AllowPrerelease::Yes => true,
AllowPrerelease::No => false,
// If the pre-release was provided via an existing file, rather than from the
// current solve, accept it unless pre-releases are completely banned.
AllowPrerelease::IfNecessary => match source {
PreferenceSource::Resolver => false,
PreferenceSource::Lock
| PreferenceSource::Environment
| PreferenceSource::RequirementsTxt => true,
},
};
if !allow {
continue;
}
}
// Check for a remote distribution that matches the preferred version
if let Some((version_map, file)) = version_maps
.iter()
.find_map(|version_map| version_map.get(version).map(|dist| (version_map, dist)))
{
// If the preferred version has a local variant, prefer that.
if version_map.local() {
for local in version_map
.versions()
.rev()
.take_while(|local| *local > version)
{
if !local.is_local() {
continue;
}
if local.clone().without_local() != *version {
continue;
}
if !range.contains(local) {
continue;
}
if let Some(dist) = version_map.get(local) {
debug!("Preferring local version `{package_name}` (v{local})");
return Some(Candidate::new(
package_name,
local,
dist,
VersionChoiceKind::Preference,
));
}
}
}
return Some(Candidate::new(
package_name,
version,
file,
VersionChoiceKind::Preference,
));
}
}
None
}
/// Check for an installed distribution that satisfies the current range and is allowed.
fn get_installed<'a, InstalledPackages: InstalledPackagesProvider>(
package_name: &'a PackageName,
range: &Range<Version>,
installed_packages: &'a InstalledPackages,
tags: Option<&'a Tags>,
) -> Option<Candidate<'a>> {
let installed_dists = installed_packages.get_packages(package_name);
match installed_dists.as_slice() {
[] => {}
[dist] => {
let version = dist.version();
// Respect the version range for this requirement.
if !range.contains(version) {
return None;
}
// Verify that the installed distribution is compatible with the environment.
if tags.is_some_and(|tags| {
let Ok(Some(wheel_tags)) = dist.read_tags() else {
return false;
};
!wheel_tags.is_compatible(tags)
}) {
debug!("Platform tags mismatch for installed {dist}");
return None;
}
return Some(Candidate {
name: package_name,
version,
dist: CandidateDist::Compatible(CompatibleDist::InstalledDist(dist)),
choice_kind: VersionChoiceKind::Installed,
});
}
// We do not consider installed distributions with multiple versions because
// during installation these must be reinstalled from the remote
_ => {
debug!(
"Ignoring installed versions of {package_name}: multiple distributions found"
);
}
}
None
}
/// Select a [`Candidate`] without checking for version preference such as an existing
/// lockfile.
pub(crate) fn select_no_preference<'a>(
&'a self,
package_name: &'a PackageName,
range: &Range<Version>,
version_maps: &'a [VersionMap],
env: &ResolverEnvironment,
) -> Option<Candidate<'a>> {
trace!(
"Selecting candidate for {package_name} with range {range} with {} remote versions",
version_maps.iter().map(VersionMap::len).sum::<usize>(),
);
let highest = self.use_highest_version(package_name, env);
let allow_prerelease = match self.prerelease_strategy.allows(package_name, env) {
AllowPrerelease::Yes => true,
AllowPrerelease::No => false,
// Allow pre-releases if there are no stable versions available.
AllowPrerelease::IfNecessary => !version_maps.iter().any(VersionMap::stable),
};
if self.index_strategy == IndexStrategy::UnsafeBestMatch {
if highest {
Self::select_candidate(
version_maps
.iter()
.enumerate()
.map(|(map_index, version_map)| {
version_map
.iter(range)
.rev()
.map(move |item| (map_index, item))
})
.kmerge_by(
|(index1, (version1, _)), (index2, (version2, _))| match version1
.cmp(version2)
{
std::cmp::Ordering::Equal => index1 < index2,
std::cmp::Ordering::Less => false,
std::cmp::Ordering::Greater => true,
},
)
.map(|(_, item)| item),
package_name,
range,
allow_prerelease,
)
} else {
Self::select_candidate(
version_maps
.iter()
.enumerate()
.map(|(map_index, version_map)| {
version_map.iter(range).map(move |item| (map_index, item))
})
.kmerge_by(
|(index1, (version1, _)), (index2, (version2, _))| match version1
.cmp(version2)
{
std::cmp::Ordering::Equal => index1 < index2,
std::cmp::Ordering::Less => true,
std::cmp::Ordering::Greater => false,
},
)
.map(|(_, item)| item),
package_name,
range,
allow_prerelease,
)
}
} else {
if highest {
version_maps.iter().find_map(|version_map| {
Self::select_candidate(
version_map.iter(range).rev(),
package_name,
range,
allow_prerelease,
)
})
} else {
version_maps.iter().find_map(|version_map| {
Self::select_candidate(
version_map.iter(range),
package_name,
range,
allow_prerelease,
)
})
}
}
}
/// By default, we select the latest version, but we also allow using the lowest version instead
/// to check the lower bounds.
pub(crate) fn use_highest_version(
&self,
package_name: &PackageName,
env: &ResolverEnvironment,
) -> bool {
match &self.resolution_strategy {
ResolutionStrategy::Highest => true,
ResolutionStrategy::Lowest => false,
ResolutionStrategy::LowestDirect(direct_dependencies) => {
!direct_dependencies.contains(package_name, env)
}
}
}
/// Select the first-matching [`Candidate`] from a set of candidate versions and files,
/// preferring wheels to source distributions.
///
/// The returned [`Candidate`] _may not_ be compatible with the current platform; in such
/// cases, the resolver is responsible for tracking the incompatibility and re-running the
/// selection process with additional constraints.
fn select_candidate<'a>(
versions: impl Iterator<Item = (&'a Version, VersionMapDistHandle<'a>)>,
package_name: &'a PackageName,
range: &Range<Version>,
allow_prerelease: bool,
) -> Option<Candidate<'a>> {
let mut steps = 0usize;
let mut incompatible: Option<Candidate> = None;
for (version, maybe_dist) in versions {
steps += 1;
// If we have an incompatible candidate, and we've progressed past it, return it.
if incompatible
.as_ref()
.is_some_and(|incompatible| version != incompatible.version)
{
trace!(
"Returning incompatible candidate for package {package_name} with range {range} after {steps} steps",
);
return incompatible;
}
let candidate = {
if version.any_prerelease() && !allow_prerelease {
continue;
}
if !range.contains(version) {
continue;
}
let Some(dist) = maybe_dist.prioritized_dist() else {
continue;
};
trace!(
"Found candidate for package {package_name} with range {range} after {steps} steps: {version} version"
);
Candidate::new(package_name, version, dist, VersionChoiceKind::Compatible)
};
// If candidate is not compatible due to exclude newer, continue searching.
// This is a special case — we pretend versions with exclude newer incompatibilities
// do not exist so that they are not present in error messages in our test suite.
// TODO(zanieb): Now that `--exclude-newer` is user facing we may want to consider
// flagging this behavior such that we _will_ report filtered distributions due to
// exclude-newer in our error messages.
if matches!(
candidate.dist(),
CandidateDist::Incompatible {
incompatible_dist: IncompatibleDist::Source(IncompatibleSource::ExcludeNewer(
_
)) | IncompatibleDist::Wheel(
IncompatibleWheel::ExcludeNewer(_)
),
..
}
) {
continue;
}
// If the candidate isn't compatible, we store it as incompatible and continue
// searching. Typically, we want to return incompatible candidates so that PubGrub can
// track them (then continue searching, with additional constraints). However, we may
// see multiple entries for the same version (e.g., if the same version exists on
// multiple indexes and `--index-strategy unsafe-best-match` is enabled), and it's
// possible that one of them is compatible while the other is not.
//
// See, e.g., <https://github.com/astral-sh/uv/issues/8922>. At time of writing,
// markupsafe==3.0.2 exists on the PyTorch index, but there's only a single wheel:
//
// MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
//
// Meanwhile, there are a large number of wheels on PyPI for the same version. If the
// user is on Python 3.12, and we return the incompatible PyTorch wheel without
// considering the PyPI wheels, PubGrub will mark 3.0.2 as an incompatible version,
// even though there are compatible wheels on PyPI. Thus, we need to ensure that we
// return the first _compatible_ candidate across all indexes, if such a candidate
// exists.
if matches!(candidate.dist(), CandidateDist::Incompatible { .. }) {
if incompatible.is_none() {
incompatible = Some(candidate);
}
continue;
}
trace!(
"Returning candidate for package {package_name} with range {range} after {steps} steps",
);
return Some(candidate);
}
if incompatible.is_some() {
trace!(
"Returning incompatible candidate for package {package_name} with range {range} after {steps} steps",
);
return incompatible;
}
trace!(
"Exhausted all candidates for package {package_name} with range {range} after {steps} steps"
);
None
}
}
#[derive(Debug, Clone)]
pub(crate) enum CandidateDist<'a> {
Compatible(CompatibleDist<'a>),
Incompatible {
/// The reason the prioritized distribution is incompatible.
incompatible_dist: IncompatibleDist,
/// The prioritized distribution that had no compatible wheelr or sdist.
prioritized_dist: &'a PrioritizedDist,
},
}
impl CandidateDist<'_> {
/// For an installable dist, return the prioritized distribution.
fn prioritized(&self) -> Option<&PrioritizedDist> {
match self {
Self::Compatible(dist) => dist.prioritized(),
Self::Incompatible {
incompatible_dist: _,
prioritized_dist: prioritized,
} => Some(prioritized),
}
}
}
impl<'a> From<&'a PrioritizedDist> for CandidateDist<'a> {
fn from(value: &'a PrioritizedDist) -> Self {
if let Some(dist) = value.get() {
CandidateDist::Compatible(dist)
} else {
// TODO(zanieb)
// We always return the source distribution (if one exists) instead of the wheel
// but in the future we may want to return both so the resolver can explain
// why neither distribution kind can be used.
let dist = if let Some(incompatibility) = value.incompatible_source() {
IncompatibleDist::Source(incompatibility.clone())
} else if let Some(incompatibility) = value.incompatible_wheel() {
IncompatibleDist::Wheel(incompatibility.clone())
} else {
IncompatibleDist::Unavailable
};
CandidateDist::Incompatible {
incompatible_dist: dist,
prioritized_dist: value,
}
}
}
}
/// The reason why we selected the version of the candidate version, either a preference or being
/// compatible.
#[derive(Debug, Clone, Copy)]
pub(crate) enum VersionChoiceKind {
/// A preference from an output file such as `-o requirements.txt` or `uv.lock`.
Preference,
/// A preference from an installed version.
Installed,
/// The next compatible version in a version map
Compatible,
}
impl Display for VersionChoiceKind {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Preference => f.write_str("preference"),
Self::Installed => f.write_str("installed"),
Self::Compatible => f.write_str("compatible"),
}
}
}
#[derive(Debug, Clone)]
pub(crate) struct Candidate<'a> {
/// The name of the package.
name: &'a PackageName,
/// The version of the package.
version: &'a Version,
/// The distributions to use for resolving and installing the package.
dist: CandidateDist<'a>,
/// Whether this candidate was selected from a preference.
choice_kind: VersionChoiceKind,
}
impl<'a> Candidate<'a> {
fn new(
name: &'a PackageName,
version: &'a Version,
dist: &'a PrioritizedDist,
choice_kind: VersionChoiceKind,
) -> Self {
Self {
name,
version,
dist: CandidateDist::from(dist),
choice_kind,
}
}
/// Return the name of the package.
pub(crate) fn name(&self) -> &PackageName {
self.name
}
/// Return the version of the package.
pub(crate) fn version(&self) -> &Version {
self.version
}
/// Return the distribution for the package, if compatible.
pub(crate) fn compatible(&self) -> Option<&CompatibleDist<'a>> {
if let CandidateDist::Compatible(ref dist) = self.dist {
Some(dist)
} else {
None
}
}
/// Return this candidate was selected from a preference.
pub(crate) fn choice_kind(&self) -> VersionChoiceKind {
self.choice_kind
}
/// Return the distribution for the candidate.
pub(crate) fn dist(&self) -> &CandidateDist<'a> {
&self.dist
}
/// Return the prioritized distribution for the candidate.
pub(crate) fn prioritized(&self) -> Option<&PrioritizedDist> {
self.dist.prioritized()
}
}
impl Name for Candidate<'_> {
fn name(&self) -> &PackageName {
self.name
}
}
impl DistributionMetadata for Candidate<'_> {
fn version_or_url(&self) -> uv_distribution_types::VersionOrUrlRef<'_> {
uv_distribution_types::VersionOrUrlRef::Version(self.version)
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/options.rs | crates/uv-resolver/src/options.rs | use uv_configuration::{BuildOptions, IndexStrategy};
use uv_pypi_types::SupportedEnvironments;
use uv_torch::TorchStrategy;
use crate::fork_strategy::ForkStrategy;
use crate::{DependencyMode, ExcludeNewer, PrereleaseMode, ResolutionMode};
/// Options for resolving a manifest.
#[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct Options {
pub resolution_mode: ResolutionMode,
pub prerelease_mode: PrereleaseMode,
pub dependency_mode: DependencyMode,
pub fork_strategy: ForkStrategy,
pub exclude_newer: ExcludeNewer,
pub index_strategy: IndexStrategy,
pub required_environments: SupportedEnvironments,
pub flexibility: Flexibility,
pub build_options: BuildOptions,
pub torch_backend: Option<TorchStrategy>,
}
/// Builder for [`Options`].
#[derive(Debug, Default, Clone)]
pub struct OptionsBuilder {
resolution_mode: ResolutionMode,
prerelease_mode: PrereleaseMode,
dependency_mode: DependencyMode,
fork_strategy: ForkStrategy,
exclude_newer: ExcludeNewer,
index_strategy: IndexStrategy,
required_environments: SupportedEnvironments,
flexibility: Flexibility,
build_options: BuildOptions,
torch_backend: Option<TorchStrategy>,
}
impl OptionsBuilder {
/// Creates a new builder.
pub fn new() -> Self {
Self::default()
}
/// Sets the [`ResolutionMode`].
#[must_use]
pub fn resolution_mode(mut self, resolution_mode: ResolutionMode) -> Self {
self.resolution_mode = resolution_mode;
self
}
/// Sets the [`PrereleaseMode`].
#[must_use]
pub fn prerelease_mode(mut self, prerelease_mode: PrereleaseMode) -> Self {
self.prerelease_mode = prerelease_mode;
self
}
/// Sets the dependency mode.
#[must_use]
pub fn dependency_mode(mut self, dependency_mode: DependencyMode) -> Self {
self.dependency_mode = dependency_mode;
self
}
/// Sets the multi-version mode.
#[must_use]
pub fn fork_strategy(mut self, fork_strategy: ForkStrategy) -> Self {
self.fork_strategy = fork_strategy;
self
}
/// Sets the exclusion date.
#[must_use]
pub fn exclude_newer(mut self, exclude_newer: ExcludeNewer) -> Self {
self.exclude_newer = exclude_newer;
self
}
/// Sets the index strategy.
#[must_use]
pub fn index_strategy(mut self, index_strategy: IndexStrategy) -> Self {
self.index_strategy = index_strategy;
self
}
/// Sets the required platforms.
#[must_use]
pub fn required_environments(mut self, required_environments: SupportedEnvironments) -> Self {
self.required_environments = required_environments;
self
}
/// Sets the [`Flexibility`].
#[must_use]
pub fn flexibility(mut self, flexibility: Flexibility) -> Self {
self.flexibility = flexibility;
self
}
/// Sets the [`BuildOptions`].
#[must_use]
pub fn build_options(mut self, build_options: BuildOptions) -> Self {
self.build_options = build_options;
self
}
/// Sets the [`TorchStrategy`].
#[must_use]
pub fn torch_backend(mut self, torch_backend: Option<TorchStrategy>) -> Self {
self.torch_backend = torch_backend;
self
}
/// Builds the options.
pub fn build(self) -> Options {
Options {
resolution_mode: self.resolution_mode,
prerelease_mode: self.prerelease_mode,
dependency_mode: self.dependency_mode,
fork_strategy: self.fork_strategy,
exclude_newer: self.exclude_newer,
index_strategy: self.index_strategy,
required_environments: self.required_environments,
flexibility: self.flexibility,
build_options: self.build_options,
torch_backend: self.torch_backend,
}
}
}
/// Whether the [`Options`] are configurable or fixed.
///
/// Applies to the [`ResolutionMode`], [`PrereleaseMode`], and [`DependencyMode`] fields.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub enum Flexibility {
/// The setting is configurable.
#[default]
Configurable,
/// The setting is fixed.
Fixed,
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/pins.rs | crates/uv-resolver/src/pins.rs | use rustc_hash::FxHashMap;
use uv_distribution_types::{CompatibleDist, ResolvedDist};
use uv_normalize::PackageName;
use crate::candidate_selector::Candidate;
/// A set of package versions pinned to specific files.
///
/// For example, given `Flask==3.0.0`, the [`FilePins`] would contain a mapping from `Flask` to
/// `3.0.0` to the specific wheel or source distribution archive that was pinned for that version.
#[derive(Clone, Debug, Default)]
pub(crate) struct FilePins(FxHashMap<(PackageName, uv_pep440::Version), ResolvedDist>);
// Inserts are common (every time we select a version) while reads are rare (converting the
// final resolution).
impl FilePins {
/// Pin a candidate package.
pub(crate) fn insert(&mut self, candidate: &Candidate, dist: &CompatibleDist) {
self.0
.entry((candidate.name().clone(), candidate.version().clone()))
// Avoid the expensive clone when a version is selected again.
.or_insert_with(|| dist.for_installation().to_owned());
}
/// Return the pinned file for the given package name and version, if it exists.
pub(crate) fn get(
&self,
name: &PackageName,
version: &uv_pep440::Version,
) -> Option<&ResolvedDist> {
self.0.get(&(name.clone(), version.clone()))
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/dependency_provider.rs | crates/uv-resolver/src/dependency_provider.rs | use std::convert::Infallible;
use pubgrub::{Dependencies, DependencyProvider, PackageResolutionStatistics, Range};
use uv_pep440::Version;
use crate::pubgrub::{PubGrubPackage, PubGrubPriority, PubGrubTiebreaker};
use crate::resolver::UnavailableReason;
/// We don't use a dependency provider, we interact with state directly, but we still need this one
/// for type
#[derive(Clone)]
pub(crate) struct UvDependencyProvider;
impl DependencyProvider for UvDependencyProvider {
type P = PubGrubPackage;
type V = Version;
type VS = Range<Version>;
type M = UnavailableReason;
/// Main priority and tiebreak for virtual packages.
type Priority = (PubGrubPriority, PubGrubTiebreaker);
type Err = Infallible;
fn prioritize(
&self,
_package: &Self::P,
_range: &Self::VS,
_stats: &PackageResolutionStatistics,
) -> Self::Priority {
unimplemented!()
}
fn choose_version(
&self,
_package: &Self::P,
_range: &Self::VS,
) -> Result<Option<Self::V>, Self::Err> {
unimplemented!()
}
fn get_dependencies(
&self,
_package: &Self::P,
_version: &Self::V,
) -> Result<Dependencies<Self::P, Self::VS, Self::M>, Self::Err> {
unimplemented!()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn priority_size() {
assert_eq!(
size_of::<<UvDependencyProvider as DependencyProvider>::Priority>(),
24
);
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/dependency_mode.rs | crates/uv-resolver/src/dependency_mode.rs | #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, serde::Deserialize)]
pub enum DependencyMode {
/// Include all dependencies, whether direct or transitive.
#[default]
Transitive,
/// Exclude transitive dependencies, only resolving the root package's immediate dependencies.
Direct,
}
impl DependencyMode {
/// Returns `true` if transitive dependencies should be included.
pub fn is_transitive(self) -> bool {
matches!(self, Self::Transitive)
}
/// Returns `true` if (only) direct dependencies should be excluded.
pub fn is_direct(self) -> bool {
matches!(self, Self::Direct)
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/graph_ops.rs | crates/uv-resolver/src/graph_ops.rs | use std::collections::BTreeSet;
use std::collections::hash_map::Entry;
use petgraph::graph::{EdgeIndex, NodeIndex};
use petgraph::visit::EdgeRef;
use petgraph::{Direction, Graph};
use rustc_hash::{FxBuildHasher, FxHashMap, FxHashSet};
use uv_pep508::MarkerTree;
use uv_pypi_types::{ConflictItem, Conflicts, Inference};
use crate::resolution::ResolutionGraphNode;
use crate::universal_marker::UniversalMarker;
/// Determine the markers under which a package is reachable in the dependency tree.
///
/// The algorithm is a variant of Dijkstra's algorithm for not totally ordered distances:
/// Whenever we find a shorter distance to a node (a marker that is not a subset of the existing
/// marker), we re-queue the node and update all its children. This implicitly handles cycles,
/// whenever we re-reach a node through a cycle the marker we have is a more
/// specific marker/longer path, so we don't update the node and don't re-queue it.
pub(crate) fn marker_reachability<
Marker: Boolean + Copy + PartialEq,
Node,
Edge: Reachable<Marker>,
>(
graph: &Graph<Node, Edge>,
fork_markers: &[Edge],
) -> FxHashMap<NodeIndex, Marker> {
// Note that we build including the virtual packages due to how we propagate markers through
// the graph, even though we then only read the markers for base packages.
let mut reachability = FxHashMap::with_capacity_and_hasher(graph.node_count(), FxBuildHasher);
// Collect the root nodes.
//
// Besides the actual virtual root node, virtual dev dependencies packages are also root
// nodes since the edges don't cover dev dependencies.
let mut queue: Vec<_> = graph
.node_indices()
.filter(|node_index| {
graph
.edges_directed(*node_index, Direction::Incoming)
.next()
.is_none()
})
.collect();
// The root nodes are always applicable, unless the user has restricted resolver
// environments with `tool.uv.environments`.
let root_markers = if fork_markers.is_empty() {
Edge::true_marker()
} else {
fork_markers
.iter()
.fold(Edge::false_marker(), |mut acc, edge| {
acc.or(edge.marker());
acc
})
};
for root_index in &queue {
reachability.insert(*root_index, root_markers);
}
// Propagate all markers through the graph, so that the eventual marker for each node is the
// union of the markers of each path we can reach the node by.
while let Some(parent_index) = queue.pop() {
let marker = reachability[&parent_index];
for child_edge in graph.edges_directed(parent_index, Direction::Outgoing) {
// The marker for all paths to the child through the parent.
let mut child_marker = child_edge.weight().marker();
child_marker.and(marker);
match reachability.entry(child_edge.target()) {
Entry::Occupied(mut existing) => {
// If the marker is a subset of the existing marker (A ⊆ B exactly if
// A ∪ B = A), updating the child wouldn't change child's marker.
child_marker.or(*existing.get());
if &child_marker != existing.get() {
existing.insert(child_marker);
queue.push(child_edge.target());
}
}
Entry::Vacant(vacant) => {
vacant.insert(child_marker);
queue.push(child_edge.target());
}
}
}
}
reachability
}
/// Traverse the given dependency graph and propagate activated markers.
///
/// For example, given an edge like `foo[x1] -> bar`, then it is known that
/// `x1` is activated. This in turn can be used to simplify any downstream
/// conflict markers with `extra == "x1"` in them (by replacing `extra == "x1"`
/// with `true`).
pub(crate) fn simplify_conflict_markers(
conflicts: &Conflicts,
graph: &mut Graph<ResolutionGraphNode, UniversalMarker>,
) {
// Do nothing if there are no declared conflicts. Without any declared
// conflicts, we know we have no conflict markers and thus nothing to
// simplify by determining which extras are activated at different points
// in the dependency graph.
if conflicts.is_empty() {
return;
}
// The set of activated extras and groups for each node. The ROOT nodes
// don't have any extras/groups activated.
let mut activated: FxHashMap<NodeIndex, Vec<FxHashSet<ConflictItem>>> = FxHashMap::default();
// Collect the root nodes.
//
// Besides the actual virtual root node, virtual dev dependencies packages are also root
// nodes since the edges don't cover dev dependencies.
let mut queue: Vec<_> = graph
.node_indices()
.filter(|node_index| {
graph
.edges_directed(*node_index, Direction::Incoming)
.next()
.is_none()
})
.collect();
let mut seen: FxHashSet<NodeIndex> = FxHashSet::default();
while let Some(parent_index) = queue.pop() {
if let Some((package, extra)) = graph[parent_index].package_extra_names() {
for set in activated
.entry(parent_index)
.or_insert_with(|| vec![FxHashSet::default()])
{
set.insert(ConflictItem::from((package.clone(), extra.clone())));
}
}
if let Some((package, group)) = graph[parent_index].package_group_names() {
for set in activated
.entry(parent_index)
.or_insert_with(|| vec![FxHashSet::default()])
{
set.insert(ConflictItem::from((package.clone(), group.clone())));
}
}
let sets = activated
.get(&parent_index)
.cloned()
.unwrap_or_else(|| vec![FxHashSet::default()]);
for child_edge in graph.edges_directed(parent_index, Direction::Outgoing) {
let mut change = false;
for set in sets.clone() {
let existing = activated.entry(child_edge.target()).or_default();
// This is doing a linear scan for testing membership, which
// is non-ideal. But it's not actually clear that there's a
// strictly better alternative without a real workload being
// slow because of this. Namely, we are checking whether the
// _set_ being inserted is equivalent to an existing set. So
// instead of, say, `Vec<FxHashSet<ConflictItem>>`, we could
// have `BTreeSet<BTreeSet<ConflictItem>>`. But this in turn
// makes mutating the elements in each set (done above) more
// difficult and likely require more allocations.
//
// So if this does result in a perf slowdown on some real
// work-load, I think the first step would be to re-examine
// whether we're doing more work than we need to be doing. If
// we aren't, then we might want a more purpose-built data
// structure for this.
if !existing.contains(&set) {
existing.push(set);
change = true;
}
}
if seen.insert(child_edge.target()) || change {
queue.push(child_edge.target());
}
}
}
let mut inferences: FxHashMap<NodeIndex, Vec<BTreeSet<Inference>>> = FxHashMap::default();
for (node_id, sets) in activated {
let mut new_sets = Vec::with_capacity(sets.len());
for set in sets {
let mut new_set = BTreeSet::default();
for item in set {
for conflict_set in conflicts.iter() {
if !conflict_set.contains(item.package(), item.as_ref().kind()) {
continue;
}
for conflict_item in conflict_set.iter() {
if conflict_item == &item {
continue;
}
new_set.insert(Inference {
item: conflict_item.clone(),
included: false,
});
}
}
new_set.insert(Inference {
item,
included: true,
});
}
new_sets.push(new_set);
}
inferences.insert(node_id, new_sets);
}
for edge_index in (0..graph.edge_count()).map(EdgeIndex::new) {
let (from_index, to_index) = graph.edge_endpoints(edge_index).unwrap();
// If there are ambiguous edges (i.e., two or more edges
// with the same package name), then we specifically skip
// conflict marker simplification. It seems that in some
// cases, the logic encoded in `inferences` isn't quite enough
// to perfectly disambiguate between them. It's plausible we
// could do better here, but it requires smarter simplification
// logic. ---AG
let ambiguous_edges = graph
.edges_directed(from_index, Direction::Outgoing)
.filter(|edge| graph[to_index].package_name() == graph[edge.target()].package_name())
.count();
if ambiguous_edges > 1 {
continue;
}
let Some(inference_sets) = inferences.get(&from_index) else {
continue;
};
// If not all possible paths (represented by our inferences)
// satisfy the conflict marker on this edge, then we can't make any
// simplifications. Namely, because it follows that out inferences
// aren't always true. Some of them may sometimes be false.
let all_paths_satisfied = inference_sets.iter().all(|set| {
let extras = set
.iter()
.filter_map(|inf| {
if !inf.included {
return None;
}
Some((inf.item.package(), inf.item.extra()?))
})
.collect::<Vec<_>>();
let groups = set
.iter()
.filter_map(|inf| {
if !inf.included {
return None;
}
Some((inf.item.package(), inf.item.group()?))
})
.collect::<Vec<_>>();
// Notably, the marker must be possible to satisfy with the extras and groups alone.
// For example, when `a` and `b` conflict, this marker does not simplify:
// ```
// (platform_machine == 'x86_64' and extra == 'extra-5-foo-b') or extra == 'extra-5-foo-a'
// ````
graph[edge_index].evaluate_only_extras(&extras, &groups)
});
if all_paths_satisfied {
for set in inference_sets {
for inf in set {
// TODO(konsti): Now that `Inference` is public, move more `included` handling
// to `UniversalMarker`.
if inf.included {
graph[edge_index].assume_conflict_item(&inf.item);
} else {
graph[edge_index].assume_not_conflict_item(&inf.item);
}
}
}
} else {
graph[edge_index].unify_inference_sets(inference_sets);
}
}
}
pub(crate) trait Reachable<T> {
/// The marker representing the "true" value.
fn true_marker() -> T;
/// The marker representing the "false" value.
fn false_marker() -> T;
/// The marker attached to the edge.
fn marker(&self) -> T;
}
impl Reachable<Self> for MarkerTree {
fn true_marker() -> Self {
Self::TRUE
}
fn false_marker() -> Self {
Self::FALSE
}
fn marker(&self) -> Self {
*self
}
}
impl Reachable<Self> for UniversalMarker {
fn true_marker() -> Self {
Self::TRUE
}
fn false_marker() -> Self {
Self::FALSE
}
fn marker(&self) -> Self {
*self
}
}
/// A trait for types that can be used as markers in the dependency graph.
pub(crate) trait Boolean {
/// Perform a logical AND operation with another marker.
fn and(&mut self, other: Self);
/// Perform a logical OR operation with another marker.
fn or(&mut self, other: Self);
}
impl Boolean for UniversalMarker {
fn and(&mut self, other: Self) {
self.and(other);
}
fn or(&mut self, other: Self) {
self.or(other);
}
}
impl Boolean for MarkerTree {
fn and(&mut self, other: Self) {
self.and(other);
}
fn or(&mut self, other: Self) {
self.or(other);
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/yanks.rs | crates/uv-resolver/src/yanks.rs | use std::sync::Arc;
use rustc_hash::{FxHashMap, FxHashSet};
use uv_distribution_types::RequirementSource;
use uv_normalize::PackageName;
use uv_pep440::Version;
use crate::{DependencyMode, Manifest, ResolverEnvironment};
/// A set of package versions that are permitted, even if they're marked as yanked by the
/// relevant index.
#[derive(Debug, Default, Clone)]
pub struct AllowedYanks(Arc<FxHashMap<PackageName, FxHashSet<Version>>>);
impl AllowedYanks {
pub fn from_manifest(
manifest: &Manifest,
env: &ResolverEnvironment,
dependencies: DependencyMode,
) -> Self {
let mut allowed_yanks = FxHashMap::<PackageName, FxHashSet<Version>>::default();
// Allow yanks for any pinned input requirements.
for requirement in manifest.requirements(env, dependencies) {
let RequirementSource::Registry { specifier, .. } = &requirement.source else {
continue;
};
let [specifier] = specifier.as_ref() else {
continue;
};
if matches!(
specifier.operator(),
uv_pep440::Operator::Equal | uv_pep440::Operator::ExactEqual
) {
allowed_yanks
.entry(requirement.name.clone())
.or_default()
.insert(specifier.version().clone());
}
}
// Allow yanks for any packages that are already pinned in the lockfile.
for (name, preferences) in manifest.preferences.iter() {
allowed_yanks
.entry(name.clone())
.or_default()
.extend(preferences.map(|(.., version)| version.clone()));
}
Self(Arc::new(allowed_yanks))
}
/// Returns `true` if the package-version is allowed, even if it's marked as yanked.
pub fn contains(&self, package_name: &PackageName, version: &Version) -> bool {
self.0
.get(package_name)
.is_some_and(|versions| versions.contains(version))
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/exclude_newer.rs | crates/uv-resolver/src/exclude_newer.rs | #[cfg(feature = "schemars")]
use std::borrow::Cow;
use std::{
ops::{Deref, DerefMut},
str::FromStr,
};
use jiff::{Span, Timestamp, ToSpan, Unit, tz::TimeZone};
use rustc_hash::FxHashMap;
use uv_normalize::PackageName;
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ExcludeNewerValueChange {
/// A relative span changed to a new value
SpanChanged(ExcludeNewerSpan, ExcludeNewerSpan),
/// A relative span was added
SpanAdded(ExcludeNewerSpan),
/// A relative span was removed
SpanRemoved,
/// A relative span is present and the timestamp changed
RelativeTimestampChanged(Timestamp, Timestamp, ExcludeNewerSpan),
/// The timestamp changed and a relative span is not present
AbsoluteTimestampChanged(Timestamp, Timestamp),
}
impl ExcludeNewerValueChange {
pub fn is_relative_timestamp_change(&self) -> bool {
matches!(self, Self::RelativeTimestampChanged(_, _, _))
}
}
impl std::fmt::Display for ExcludeNewerValueChange {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::SpanChanged(old, new) => {
write!(f, "change of exclude newer span from `{old}` to `{new}`")
}
Self::SpanAdded(span) => {
write!(f, "addition of exclude newer span `{span}`")
}
Self::SpanRemoved => {
write!(f, "removal of exclude newer span")
}
Self::RelativeTimestampChanged(old, new, span) => {
write!(
f,
"change of calculated ({span}) exclude newer timestamp from `{old}` to `{new}`"
)
}
Self::AbsoluteTimestampChanged(old, new) => {
write!(
f,
"change of exclude newer timestamp from `{old}` to `{new}`"
)
}
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ExcludeNewerChange {
GlobalChanged(ExcludeNewerValueChange),
GlobalAdded(ExcludeNewerValue),
GlobalRemoved,
Package(ExcludeNewerPackageChange),
}
impl ExcludeNewerChange {
/// Whether the change is due to a change in a relative timestamp.
pub fn is_relative_timestamp_change(&self) -> bool {
match self {
Self::GlobalChanged(change) => change.is_relative_timestamp_change(),
Self::GlobalAdded(_) | Self::GlobalRemoved => false,
Self::Package(change) => change.is_relative_timestamp_change(),
}
}
}
impl std::fmt::Display for ExcludeNewerChange {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::GlobalChanged(change) => {
write!(f, "{change}")
}
Self::GlobalAdded(value) => {
write!(f, "addition of global exclude newer {value}")
}
Self::GlobalRemoved => write!(f, "removal of global exclude newer"),
Self::Package(change) => {
write!(f, "{change}")
}
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ExcludeNewerPackageChange {
PackageAdded(PackageName, ExcludeNewerValue),
PackageRemoved(PackageName),
PackageChanged(PackageName, ExcludeNewerValueChange),
}
impl ExcludeNewerPackageChange {
pub fn is_relative_timestamp_change(&self) -> bool {
match self {
Self::PackageAdded(_, _) | Self::PackageRemoved(_) => false,
Self::PackageChanged(_, change) => change.is_relative_timestamp_change(),
}
}
}
impl std::fmt::Display for ExcludeNewerPackageChange {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::PackageAdded(name, value) => {
write!(
f,
"addition of exclude newer `{value}` for package `{name}`"
)
}
Self::PackageRemoved(name) => {
write!(f, "removal of exclude newer for package `{name}`")
}
Self::PackageChanged(name, change) => {
write!(f, "{change} for package `{name}`")
}
}
}
}
/// A timestamp that excludes files newer than it.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ExcludeNewerValue {
/// The resolved timestamp.
timestamp: Timestamp,
/// The span used to derive the [`Timestamp`], if any.
span: Option<ExcludeNewerSpan>,
}
impl ExcludeNewerValue {
pub fn into_parts(self) -> (Timestamp, Option<ExcludeNewerSpan>) {
(self.timestamp, self.span)
}
pub fn compare(&self, other: &Self) -> Option<ExcludeNewerValueChange> {
match (&self.span, &other.span) {
(None, Some(span)) => Some(ExcludeNewerValueChange::SpanAdded(*span)),
(Some(_), None) => Some(ExcludeNewerValueChange::SpanRemoved),
(Some(self_span), Some(other_span)) if self_span != other_span => Some(
ExcludeNewerValueChange::SpanChanged(*self_span, *other_span),
),
(Some(_), Some(span)) if self.timestamp != other.timestamp => {
Some(ExcludeNewerValueChange::RelativeTimestampChanged(
self.timestamp,
other.timestamp,
*span,
))
}
(None, None) if self.timestamp != other.timestamp => Some(
ExcludeNewerValueChange::AbsoluteTimestampChanged(self.timestamp, other.timestamp),
),
(Some(_), Some(_)) | (None, None) => None,
}
}
}
#[derive(Debug, Copy, Clone)]
pub struct ExcludeNewerSpan(Span);
impl std::fmt::Display for ExcludeNewerSpan {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl PartialEq for ExcludeNewerSpan {
fn eq(&self, other: &Self) -> bool {
self.0.fieldwise() == other.0.fieldwise()
}
}
impl Eq for ExcludeNewerSpan {}
impl serde::Serialize for ExcludeNewerSpan {
/// Serialize to an ISO 8601 duration string.
///
/// We use ISO 8601 format for serialization (rather than the "friendly" format).
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&self.0.to_string())
}
}
impl<'de> serde::Deserialize<'de> for ExcludeNewerSpan {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
let span: Span = s.parse().map_err(serde::de::Error::custom)?;
Ok(Self(span))
}
}
impl serde::Serialize for ExcludeNewerValue {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.timestamp.serialize(serializer)
}
}
impl<'de> serde::Deserialize<'de> for ExcludeNewerValue {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
// Support both a simple string ("2024-03-11T00:00:00Z") and a table
// ({ timestamp = "2024-03-11T00:00:00Z", span = "P2W" })
#[derive(serde::Deserialize)]
struct TableForm {
timestamp: Timestamp,
span: Option<ExcludeNewerSpan>,
}
#[derive(serde::Deserialize)]
#[serde(untagged)]
enum Helper {
String(String),
Table(Box<TableForm>),
}
match Helper::deserialize(deserializer)? {
Helper::String(s) => Self::from_str(&s).map_err(serde::de::Error::custom),
Helper::Table(table) => Ok(Self::new(table.timestamp, table.span)),
}
}
}
impl ExcludeNewerValue {
/// Return the [`Timestamp`] in milliseconds.
pub fn timestamp_millis(&self) -> i64 {
self.timestamp.as_millisecond()
}
/// Return the [`Timestamp`].
pub fn timestamp(&self) -> Timestamp {
self.timestamp
}
/// Return the [`ExcludeNewerSpan`] used to construct the [`Timestamp`], if any.
pub fn span(&self) -> Option<&ExcludeNewerSpan> {
self.span.as_ref()
}
/// Create a new [`ExcludeNewerTimestamp`].
pub fn new(timestamp: Timestamp, span: Option<ExcludeNewerSpan>) -> Self {
Self { timestamp, span }
}
}
impl From<Timestamp> for ExcludeNewerValue {
fn from(timestamp: Timestamp) -> Self {
Self {
timestamp,
span: None,
}
}
}
/// Determine what format the user likely intended and return an appropriate error message.
fn format_exclude_newer_error(
input: &str,
date_err: &jiff::Error,
span_err: &jiff::Error,
) -> String {
let trimmed = input.trim();
// Check for ISO 8601 duration (`[-+]?[Pp]`), e.g., "P2W", "+P1D", "-P30D"
let after_sign = trimmed.trim_start_matches(['+', '-']);
if after_sign.starts_with('P') || after_sign.starts_with('p') {
return format!("`{input}` could not be parsed as an ISO 8601 duration: {span_err}");
}
// Check for friendly duration (`[-+]?\s*[0-9]+\s*[A-Za-z]`), e.g., "2 weeks", "-30 days",
// "1hour"
let after_sign_trimmed = after_sign.trim_start();
let mut chars = after_sign_trimmed.chars().peekable();
// Check if we start with a digit
if chars.peek().is_some_and(char::is_ascii_digit) {
// Skip digits
while chars.peek().is_some_and(char::is_ascii_digit) {
chars.next();
}
// Skip optional whitespace
while chars.peek().is_some_and(|c| c.is_whitespace()) {
chars.next();
}
// Check if next character is a letter (unit designator)
if chars.peek().is_some_and(char::is_ascii_alphabetic) {
return format!("`{input}` could not be parsed as a duration: {span_err}");
}
}
// Check for date/timestamp (`[-+]?[0-9]{4}-`), e.g., "2024-01-01", "2024-01-01T00:00:00Z"
let mut chars = after_sign.chars();
let looks_like_date = chars.next().is_some_and(|c| c.is_ascii_digit())
&& chars.next().is_some_and(|c| c.is_ascii_digit())
&& chars.next().is_some_and(|c| c.is_ascii_digit())
&& chars.next().is_some_and(|c| c.is_ascii_digit())
&& chars.next().is_some_and(|c| c == '-');
if looks_like_date {
return format!("`{input}` could not be parsed as a valid date: {date_err}");
}
// If we can't tell, return a generic error message
format!(
"`{input}` could not be parsed as a valid exclude-newer value (expected a date like `2024-01-01`, a timestamp like `2024-01-01T00:00:00Z`, or a duration like `3 days` or `P3D`)"
)
}
impl FromStr for ExcludeNewerValue {
type Err = String;
/// Parse an [`ExcludeNewerTimestamp`] from a string.
///
/// Accepts RFC 3339 timestamps (e.g., `2006-12-02T02:07:43Z`), local dates in the same format
/// (e.g., `2006-12-02`), "friendly" durations (e.g., `1 week`, `30 days`), and ISO 8601
/// durations (e.g., `PT24H`, `P7D`, `P30D`).
fn from_str(input: &str) -> Result<Self, Self::Err> {
// Try parsing as a timestamp first
if let Ok(timestamp) = input.parse::<Timestamp>() {
return Ok(Self::new(timestamp, None));
}
// Try parsing as a date
// In Jiff, if an RFC 3339 timestamp could be parsed, then it must necessarily be the case
// that a date can also be parsed. So we can collapse the error cases here. That is, if we
// fail to parse a timestamp and a date, then it should be sufficient to just report the
// error from parsing the date. If someone tried to write a timestamp but committed an error
// in the non-date portion, the date parsing below will still report a holistic error that
// will make sense to the user. (I added a snapshot test for that case.)
let date_err = match input.parse::<jiff::civil::Date>() {
Ok(date) => {
let timestamp = date
.checked_add(1.day())
.and_then(|date| date.to_zoned(TimeZone::system()))
.map(|zdt| zdt.timestamp())
.map_err(|err| {
format!(
"`{input}` parsed to date `{date}`, but could not \
be converted to a timestamp: {err}",
)
})?;
return Ok(Self::new(timestamp, None));
}
Err(err) => err,
};
// Try parsing as a span
let span_err = match input.parse::<Span>() {
Ok(span) => {
// Allow overriding the current time in tests for deterministic snapshots
let now = if let Ok(test_time) = std::env::var("UV_TEST_CURRENT_TIMESTAMP") {
test_time
.parse::<Timestamp>()
.expect("UV_TEST_CURRENT_TIMESTAMP must be a valid RFC 3339 timestamp")
.to_zoned(TimeZone::UTC)
} else {
Timestamp::now().to_zoned(TimeZone::UTC)
};
// We do not allow years and months as units, as the amount of time they represent
// is not fixed and can differ depending on the local time zone. We could allow this
// via the CLI in the future, but shouldn't allow it via persistent configuration.
if span.get_years() != 0 {
let years = span
.total((Unit::Year, &now))
.map(f64::ceil)
.unwrap_or(1.0)
.abs();
let days = years * 365.0;
return Err(format!(
"Duration `{input}` uses unit 'years' which is not allowed; use days instead, e.g., `{days:.0} days`.",
));
}
if span.get_months() != 0 {
let months = span
.total((Unit::Month, &now))
.map(f64::ceil)
.unwrap_or(1.0)
.abs();
let days = months * 30.0;
return Err(format!(
"Duration `{input}` uses 'months' which is not allowed; use days instead, e.g., `{days:.0} days`."
));
}
// We're using a UTC timezone so there are no transitions (e.g., DST) and days are
// always 24 hours. This means that we can also allow weeks as a unit.
//
// Note we use `span.abs()` so `1 day ago` has the same effect as `1 day` instead
// of resulting in a future date.
let cutoff = now.checked_sub(span.abs()).map_err(|err| {
format!("Duration `{input}` is too large to subtract from current time: {err}")
})?;
return Ok(Self::new(cutoff.into(), Some(ExcludeNewerSpan(span))));
}
Err(err) => err,
};
// Return a targeted error message based on heuristics about what the user likely intended
Err(format_exclude_newer_error(input, &date_err, &span_err))
}
}
impl std::fmt::Display for ExcludeNewerValue {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.timestamp.fmt(f)
}
}
/// A package-specific exclude-newer entry.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct ExcludeNewerPackageEntry {
pub package: PackageName,
pub timestamp: ExcludeNewerValue,
}
impl FromStr for ExcludeNewerPackageEntry {
type Err = String;
/// Parses a [`ExcludeNewerPackageEntry`] from a string in the format `PACKAGE=DATE`.
fn from_str(s: &str) -> Result<Self, Self::Err> {
let Some((package, date)) = s.split_once('=') else {
return Err(format!(
"Invalid `exclude-newer-package` value `{s}`: expected format `PACKAGE=DATE`"
));
};
let package = PackageName::from_str(package).map_err(|err| {
format!("Invalid `exclude-newer-package` package name `{package}`: {err}")
})?;
let timestamp = ExcludeNewerValue::from_str(date)
.map_err(|err| format!("Invalid `exclude-newer-package` timestamp `{date}`: {err}"))?;
Ok(Self { package, timestamp })
}
}
impl From<(PackageName, ExcludeNewerValue)> for ExcludeNewerPackageEntry {
fn from((package, timestamp): (PackageName, ExcludeNewerValue)) -> Self {
Self { package, timestamp }
}
}
#[derive(Debug, Clone, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct ExcludeNewerPackage(FxHashMap<PackageName, ExcludeNewerValue>);
impl Deref for ExcludeNewerPackage {
type Target = FxHashMap<PackageName, ExcludeNewerValue>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for ExcludeNewerPackage {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl FromIterator<ExcludeNewerPackageEntry> for ExcludeNewerPackage {
fn from_iter<T: IntoIterator<Item = ExcludeNewerPackageEntry>>(iter: T) -> Self {
Self(
iter.into_iter()
.map(|entry| (entry.package, entry.timestamp))
.collect(),
)
}
}
impl IntoIterator for ExcludeNewerPackage {
type Item = (PackageName, ExcludeNewerValue);
type IntoIter = std::collections::hash_map::IntoIter<PackageName, ExcludeNewerValue>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl<'a> IntoIterator for &'a ExcludeNewerPackage {
type Item = (&'a PackageName, &'a ExcludeNewerValue);
type IntoIter = std::collections::hash_map::Iter<'a, PackageName, ExcludeNewerValue>;
fn into_iter(self) -> Self::IntoIter {
self.0.iter()
}
}
impl ExcludeNewerPackage {
/// Convert to the inner `HashMap`.
pub fn into_inner(self) -> FxHashMap<PackageName, ExcludeNewerValue> {
self.0
}
pub fn compare(&self, other: &Self) -> Option<ExcludeNewerPackageChange> {
for (package, timestamp) in self {
let Some(other_timestamp) = other.get(package) else {
return Some(ExcludeNewerPackageChange::PackageRemoved(package.clone()));
};
if let Some(change) = timestamp.compare(other_timestamp) {
return Some(ExcludeNewerPackageChange::PackageChanged(
package.clone(),
change,
));
}
}
for (package, value) in other {
if !self.contains_key(package) {
return Some(ExcludeNewerPackageChange::PackageAdded(
package.clone(),
value.clone(),
));
}
}
None
}
}
/// A setting that excludes files newer than a timestamp, at a global level or per-package.
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize, Default)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct ExcludeNewer {
/// Global timestamp that applies to all packages if no package-specific timestamp is set.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub global: Option<ExcludeNewerValue>,
/// Per-package timestamps that override the global timestamp.
#[serde(default, skip_serializing_if = "FxHashMap::is_empty")]
pub package: ExcludeNewerPackage,
}
impl ExcludeNewer {
/// Create a new exclude newer configuration with just a global timestamp.
pub fn global(global: ExcludeNewerValue) -> Self {
Self {
global: Some(global),
package: ExcludeNewerPackage::default(),
}
}
/// Create a new exclude newer configuration.
pub fn new(global: Option<ExcludeNewerValue>, package: ExcludeNewerPackage) -> Self {
Self { global, package }
}
/// Create from CLI arguments.
pub fn from_args(
global: Option<ExcludeNewerValue>,
package: Vec<ExcludeNewerPackageEntry>,
) -> Self {
let package: ExcludeNewerPackage = package.into_iter().collect();
Self { global, package }
}
/// Returns the timestamp for a specific package, falling back to the global timestamp if set.
pub fn exclude_newer_package(&self, package_name: &PackageName) -> Option<ExcludeNewerValue> {
self.package
.get(package_name)
.cloned()
.or(self.global.clone())
}
/// Returns true if this has any configuration (global or per-package).
pub fn is_empty(&self) -> bool {
self.global.is_none() && self.package.is_empty()
}
pub fn compare(&self, other: &Self) -> Option<ExcludeNewerChange> {
match (&self.global, &other.global) {
(Some(self_global), Some(other_global)) => {
if let Some(change) = self_global.compare(other_global) {
return Some(ExcludeNewerChange::GlobalChanged(change));
}
}
(None, Some(global)) => {
return Some(ExcludeNewerChange::GlobalAdded(global.clone()));
}
(Some(_), None) => return Some(ExcludeNewerChange::GlobalRemoved),
(None, None) => (),
}
self.package
.compare(&other.package)
.map(ExcludeNewerChange::Package)
}
}
impl std::fmt::Display for ExcludeNewer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(global) = &self.global {
write!(f, "global: {global}")?;
if !self.package.is_empty() {
write!(f, ", ")?;
}
}
let mut first = true;
for (name, timestamp) in &self.package {
if !first {
write!(f, ", ")?;
}
write!(f, "{name}: {timestamp}")?;
first = false;
}
Ok(())
}
}
#[cfg(feature = "schemars")]
impl schemars::JsonSchema for ExcludeNewerValue {
fn schema_name() -> Cow<'static, str> {
Cow::Borrowed("ExcludeNewerTimestamp")
}
fn json_schema(_generator: &mut schemars::generate::SchemaGenerator) -> schemars::Schema {
schemars::json_schema!({
"type": "string",
"description": "Exclude distributions uploaded after the given timestamp.\n\nAccepts both RFC 3339 timestamps (e.g., `2006-12-02T02:07:43Z`) and local dates in the same format (e.g., `2006-12-02`), as well as relative durations (e.g., `1 week`, `30 days`, `6 months`). Relative durations are resolved to a timestamp at lock time.",
})
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/marker.rs | crates/uv-resolver/src/marker.rs | use pubgrub::Ranges;
use smallvec::SmallVec;
use std::ops::Bound;
use uv_pep440::{LowerBound, UpperBound, Version};
use uv_pep508::{CanonicalMarkerValueVersion, MarkerTree, MarkerTreeKind};
use uv_distribution_types::RequiresPythonRange;
/// Returns the bounding Python versions that can satisfy the [`MarkerTree`], if it's constrained.
pub(crate) fn requires_python(tree: MarkerTree) -> Option<RequiresPythonRange> {
/// A small vector of Python version markers.
type Markers = SmallVec<[Ranges<Version>; 3]>;
/// Collect the Python version markers from the tree.
///
/// Specifically, performs a DFS to collect all Python requirements on the path to every
/// `MarkerTreeKind::True` node.
fn collect_python_markers(tree: MarkerTree, markers: &mut Markers, range: &Ranges<Version>) {
match tree.kind() {
MarkerTreeKind::True => {
markers.push(range.clone());
}
MarkerTreeKind::False => {}
MarkerTreeKind::Version(marker) => match marker.key() {
CanonicalMarkerValueVersion::PythonFullVersion => {
for (range, tree) in marker.edges() {
collect_python_markers(tree, markers, range);
}
}
CanonicalMarkerValueVersion::ImplementationVersion => {
for (_, tree) in marker.edges() {
collect_python_markers(tree, markers, range);
}
}
},
MarkerTreeKind::String(marker) => {
for (_, tree) in marker.children() {
collect_python_markers(tree, markers, range);
}
}
MarkerTreeKind::In(marker) => {
for (_, tree) in marker.children() {
collect_python_markers(tree, markers, range);
}
}
MarkerTreeKind::Contains(marker) => {
for (_, tree) in marker.children() {
collect_python_markers(tree, markers, range);
}
}
MarkerTreeKind::Extra(marker) => {
for (_, tree) in marker.children() {
collect_python_markers(tree, markers, range);
}
}
MarkerTreeKind::List(marker) => {
for (_, tree) in marker.children() {
collect_python_markers(tree, markers, range);
}
}
}
}
if tree.is_true() || tree.is_false() {
return None;
}
let mut markers = Markers::new();
collect_python_markers(tree, &mut markers, &Ranges::full());
// If there are no Python version markers, return `None`.
if markers.iter().all(|range| {
let Some((lower, upper)) = range.bounding_range() else {
return true;
};
matches!((lower, upper), (Bound::Unbounded, Bound::Unbounded))
}) {
return None;
}
// Take the union of the intersections of the Python version markers.
let range = markers
.into_iter()
.fold(Ranges::empty(), |acc: Ranges<Version>, range| {
acc.union(&range)
});
let (lower, upper) = range.bounding_range()?;
Some(RequiresPythonRange::new(
LowerBound::new(lower.cloned()),
UpperBound::new(upper.cloned()),
))
}
#[cfg(test)]
mod tests {
use super::*;
use std::ops::Bound;
use std::str::FromStr;
use uv_pep440::UpperBound;
#[test]
fn test_requires_python() {
// An exact version match.
let tree = MarkerTree::from_str("python_full_version == '3.8.*'").unwrap();
let range = requires_python(tree).unwrap();
assert_eq!(
*range.lower(),
LowerBound::new(Bound::Included(Version::from_str("3.8").unwrap()))
);
assert_eq!(
*range.upper(),
UpperBound::new(Bound::Excluded(Version::from_str("3.9").unwrap()))
);
// A version range with exclusive bounds.
let tree =
MarkerTree::from_str("python_full_version > '3.8' and python_full_version < '3.9'")
.unwrap();
let range = requires_python(tree).unwrap();
assert_eq!(
*range.lower(),
LowerBound::new(Bound::Excluded(Version::from_str("3.8").unwrap()))
);
assert_eq!(
*range.upper(),
UpperBound::new(Bound::Excluded(Version::from_str("3.9").unwrap()))
);
// A version range with inclusive bounds.
let tree =
MarkerTree::from_str("python_full_version >= '3.8' and python_full_version <= '3.9'")
.unwrap();
let range = requires_python(tree).unwrap();
assert_eq!(
*range.lower(),
LowerBound::new(Bound::Included(Version::from_str("3.8").unwrap()))
);
assert_eq!(
*range.upper(),
UpperBound::new(Bound::Included(Version::from_str("3.9").unwrap()))
);
// A version with a lower bound.
let tree = MarkerTree::from_str("python_full_version >= '3.8'").unwrap();
let range = requires_python(tree).unwrap();
assert_eq!(
*range.lower(),
LowerBound::new(Bound::Included(Version::from_str("3.8").unwrap()))
);
assert_eq!(*range.upper(), UpperBound::new(Bound::Unbounded));
// A version with an upper bound.
let tree = MarkerTree::from_str("python_full_version < '3.9'").unwrap();
let range = requires_python(tree).unwrap();
assert_eq!(*range.lower(), LowerBound::new(Bound::Unbounded));
assert_eq!(
*range.upper(),
UpperBound::new(Bound::Excluded(Version::from_str("3.9").unwrap()))
);
// A disjunction with a non-Python marker (i.e., an unbounded range).
let tree =
MarkerTree::from_str("python_full_version > '3.8' or sys_platform == 'win32'").unwrap();
let range = requires_python(tree).unwrap();
assert_eq!(*range.lower(), LowerBound::new(Bound::Unbounded));
assert_eq!(*range.upper(), UpperBound::new(Bound::Unbounded));
// A complex mix of conjunctions and disjunctions.
let tree = MarkerTree::from_str("(python_full_version >= '3.8' and python_full_version < '3.9') or (python_full_version >= '3.10' and python_full_version < '3.11')").unwrap();
let range = requires_python(tree).unwrap();
assert_eq!(
*range.lower(),
LowerBound::new(Bound::Included(Version::from_str("3.8").unwrap()))
);
assert_eq!(
*range.upper(),
UpperBound::new(Bound::Excluded(Version::from_str("3.11").unwrap()))
);
// An unbounded range across two specifiers.
let tree =
MarkerTree::from_str("python_full_version > '3.8' or python_full_version <= '3.8'")
.unwrap();
assert_eq!(requires_python(tree), None);
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/fork_strategy.rs | crates/uv-resolver/src/fork_strategy.rs | #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, serde::Deserialize, serde::Serialize)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum ForkStrategy {
/// Optimize for selecting the fewest number of versions for each package. Older versions may
/// be preferred if they are compatible with a wider range of supported Python versions or
/// platforms.
Fewest,
/// Optimize for selecting latest supported version of each package, for each supported Python
/// version.
#[default]
RequiresPython,
}
impl std::fmt::Display for ForkStrategy {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Fewest => write!(f, "fewest"),
Self::RequiresPython => write!(f, "requires-python"),
}
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/fork_indexes.rs | crates/uv-resolver/src/fork_indexes.rs | use rustc_hash::FxHashMap;
use uv_distribution_types::IndexMetadata;
use uv_normalize::PackageName;
use crate::ResolveError;
use crate::resolver::ResolverEnvironment;
/// See [`crate::resolver::ForkState`].
#[derive(Default, Debug, Clone)]
pub(crate) struct ForkIndexes(FxHashMap<PackageName, IndexMetadata>);
impl ForkIndexes {
/// Get the [`Index`] previously used for a package in this fork.
pub(crate) fn get(&self, package_name: &PackageName) -> Option<&IndexMetadata> {
self.0.get(package_name)
}
/// Check that this is the only [`Index`] used for this package in this fork.
pub(crate) fn insert(
&mut self,
package_name: &PackageName,
index: &IndexMetadata,
env: &ResolverEnvironment,
) -> Result<(), ResolveError> {
if let Some(previous) = self.0.insert(package_name.clone(), index.clone()) {
if &previous != index {
let mut conflicts = vec![previous.url, index.url.clone()];
conflicts.sort();
return Err(ResolveError::ConflictingIndexesForEnvironment {
package_name: package_name.clone(),
indexes: conflicts,
env: env.clone(),
});
}
}
Ok(())
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/pubgrub/report.rs | crates/uv-resolver/src/pubgrub/report.rs | use std::cmp::Ordering;
use std::collections::{BTreeMap, BTreeSet};
use std::ops::Bound;
use indexmap::IndexSet;
use itertools::Itertools;
use owo_colors::OwoColorize;
use pubgrub::{DerivationTree, Derived, External, Map, Range, ReportFormatter, Term};
use rustc_hash::FxHashMap;
use uv_configuration::{IndexStrategy, NoBinary, NoBuild};
use uv_distribution_types::{
IncompatibleDist, IncompatibleSource, IncompatibleWheel, Index, IndexCapabilities,
IndexLocations, IndexMetadata, IndexUrl, RequiresPython,
};
use uv_normalize::PackageName;
use uv_pep440::{Version, VersionSpecifiers};
use uv_platform_tags::{AbiTag, IncompatibleTag, LanguageTag, PlatformTag, Tags};
use crate::candidate_selector::CandidateSelector;
use crate::error::{ErrorTree, PrefixMatch};
use crate::fork_indexes::ForkIndexes;
use crate::fork_urls::ForkUrls;
use crate::prerelease::AllowPrerelease;
use crate::pubgrub::{PubGrubPackage, PubGrubPackageInner, PubGrubPython};
use crate::python_requirement::{PythonRequirement, PythonRequirementSource};
use crate::resolver::{
MetadataUnavailable, UnavailableErrorChain, UnavailablePackage, UnavailableReason,
UnavailableVersion,
};
use crate::{Flexibility, InMemoryIndex, Options, ResolverEnvironment, VersionsResponse};
#[derive(Debug)]
pub(crate) struct PubGrubReportFormatter<'a> {
/// The versions that were available for each package.
pub(crate) available_versions: &'a FxHashMap<PackageName, BTreeSet<Version>>,
/// The versions that were available for each package.
pub(crate) python_requirement: &'a PythonRequirement,
/// The members of the workspace.
pub(crate) workspace_members: &'a BTreeSet<PackageName>,
/// The compatible tags for the resolution.
pub(crate) tags: Option<&'a Tags>,
}
impl ReportFormatter<PubGrubPackage, Range<Version>, UnavailableReason>
for PubGrubReportFormatter<'_>
{
type Output = String;
fn format_external(
&self,
external: &External<PubGrubPackage, Range<Version>, UnavailableReason>,
) -> Self::Output {
match external {
External::NotRoot(package, version) => {
format!("we are solving dependencies of {package} {version}")
}
External::NoVersions(package, set) => {
if matches!(
&**package,
PubGrubPackageInner::Python(PubGrubPython::Target)
) {
let target = self.python_requirement.target();
return format!(
"the requested {package} version ({target}) does not satisfy {}",
self.compatible_range(package, set)
);
}
if matches!(
&**package,
PubGrubPackageInner::Python(PubGrubPython::Installed)
) {
let installed = self.python_requirement.exact();
return format!(
"the current {package} version ({installed}) does not satisfy {}",
self.compatible_range(package, set)
);
}
if set == &Range::full() {
format!("there are no versions of {package}")
} else if set.as_singleton().is_some() {
format!("there is no version of {package}{set}")
} else {
let complement = set.complement();
let range =
// Note that sometimes we do not have a range of available versions, e.g.,
// when a package is from a non-registry source. In that case, we cannot
// perform further simplification of the range.
if let Some(available_versions) = package.name().and_then(|name| self.available_versions.get(name)) {
update_availability_range(&complement, available_versions)
} else {
complement
};
if range.is_empty() {
return format!("there are no versions of {package}");
}
if range.iter().count() == 1 {
format!(
"only {} is available",
self.availability_range(package, &range)
)
} else {
format!(
"only the following versions of {} {}",
package,
self.availability_range(package, &range)
)
}
}
}
External::Custom(package, set, reason) => {
if let Some(root) = self.format_root(package) {
format!("{root} cannot be used because {reason}")
} else {
match reason {
UnavailableReason::Package(reason) => {
let message = reason.singular_message();
format!("{}{}", package, Padded::new(" ", &message, ""))
}
UnavailableReason::Version(reason) => {
let range = self.compatible_range(package, set);
let message = if range.plural() {
reason.plural_message()
} else {
reason.singular_message()
};
let context = reason.context_message(
self.tags,
self.python_requirement.target().abi_tag(),
);
if let Some(context) = context {
format!("{}{}{}", range, Padded::new(" ", &message, " "), context)
} else {
format!("{}{}", range, Padded::new(" ", &message, ""))
}
}
}
}
}
External::FromDependencyOf(package, package_set, dependency, dependency_set) => {
if package.name_no_root() == dependency.name_no_root() {
if let Some(member) = self.format_workspace_member(package) {
return format!(
"{member} depends on itself at an incompatible version ({})",
PackageRange::dependency(dependency, dependency_set, None)
);
}
}
if let Some(root) = self.format_root_requires(package) {
return format!(
"{root} {}",
self.dependency_range(dependency, dependency_set)
);
}
format!(
"{}",
self.compatible_range(package, package_set)
.depends_on(dependency, dependency_set),
)
}
}
}
/// Try to print terms of an incompatibility in a human-readable way.
fn format_terms(&self, terms: &Map<PubGrubPackage, Term<Range<Version>>>) -> String {
let mut terms_vec: Vec<_> = terms.iter().collect();
// We avoid relying on hashmap iteration order here by always sorting
// by package first.
terms_vec.sort_by(|&(pkg1, _), &(pkg2, _)| pkg1.cmp(pkg2));
match terms_vec.as_slice() {
[] => "the requirements are unsatisfiable".into(),
[(root, _)] if matches!(&**(*root), PubGrubPackageInner::Root(_)) => {
let root = self.format_root(root).unwrap();
format!("{root} are unsatisfiable")
}
[(package, Term::Positive(range))]
if matches!(&**(*package), PubGrubPackageInner::Package { .. }) =>
{
if let Some(member) = self.format_workspace_member(package) {
format!("{member}'s requirements are unsatisfiable")
} else {
format!("{} cannot be used", self.compatible_range(package, range))
}
}
[(package, Term::Negative(range))]
if matches!(&**(*package), PubGrubPackageInner::Package { .. }) =>
{
format!("{} must be used", self.compatible_range(package, range))
}
[(p1, Term::Positive(r1)), (p2, Term::Negative(r2))] => self.format_external(
&External::FromDependencyOf((*p1).clone(), r1.clone(), (*p2).clone(), r2.clone()),
),
[(p1, Term::Negative(r1)), (p2, Term::Positive(r2))] => self.format_external(
&External::FromDependencyOf((*p2).clone(), r2.clone(), (*p1).clone(), r1.clone()),
),
slice => {
let mut result = String::new();
let str_terms: Vec<_> = slice
.iter()
.map(|(p, t)| format!("{}", PackageTerm::new(p, t, self)))
.collect();
for (index, term) in str_terms.iter().enumerate() {
result.push_str(term);
match str_terms.len().cmp(&2) {
Ordering::Equal if index == 0 => {
result.push_str(" and ");
}
Ordering::Greater if index + 1 < str_terms.len() => {
result.push_str(", ");
}
_ => (),
}
}
if slice.len() == 1 {
result.push_str(" cannot be used");
} else {
result.push_str(" are incompatible");
}
result
}
}
}
/// Simplest case, we just combine two external incompatibilities.
fn explain_both_external(
&self,
external1: &External<PubGrubPackage, Range<Version>, UnavailableReason>,
external2: &External<PubGrubPackage, Range<Version>, UnavailableReason>,
current_terms: &Map<PubGrubPackage, Term<Range<Version>>>,
) -> String {
let external = self.format_both_external(external1, external2);
let terms = self.format_terms(current_terms);
format!(
"Because {}we can conclude that {}",
Padded::from_string("", &external, ", "),
Padded::from_string("", &terms, "."),
)
}
/// Both causes have already been explained so we use their refs.
fn explain_both_ref(
&self,
ref_id1: usize,
derived1: &Derived<PubGrubPackage, Range<Version>, UnavailableReason>,
ref_id2: usize,
derived2: &Derived<PubGrubPackage, Range<Version>, UnavailableReason>,
current_terms: &Map<PubGrubPackage, Term<Range<Version>>>,
) -> String {
// TODO: order should be chosen to make it more logical.
let derived1_terms = self.format_terms(&derived1.terms);
let derived2_terms = self.format_terms(&derived2.terms);
let current_terms = self.format_terms(current_terms);
format!(
"Because we know from ({}) that {}and we know from ({}) that {}{}",
ref_id1,
Padded::new("", &derived1_terms, " "),
ref_id2,
Padded::new("", &derived2_terms, ", "),
Padded::new("", ¤t_terms, "."),
)
}
/// One cause is derived (already explained so one-line),
/// the other is a one-line external cause,
/// and finally we conclude with the current incompatibility.
fn explain_ref_and_external(
&self,
ref_id: usize,
derived: &Derived<PubGrubPackage, Range<Version>, UnavailableReason>,
external: &External<PubGrubPackage, Range<Version>, UnavailableReason>,
current_terms: &Map<PubGrubPackage, Term<Range<Version>>>,
) -> String {
// TODO: order should be chosen to make it more logical.
let derived_terms = self.format_terms(&derived.terms);
let external = self.format_external(external);
let current_terms = self.format_terms(current_terms);
format!(
"Because we know from ({}) that {}and {}we can conclude that {}",
ref_id,
Padded::new("", &derived_terms, " "),
Padded::new("", &external, ", "),
Padded::new("", ¤t_terms, "."),
)
}
/// Add an external cause to the chain of explanations.
fn and_explain_external(
&self,
external: &External<PubGrubPackage, Range<Version>, UnavailableReason>,
current_terms: &Map<PubGrubPackage, Term<Range<Version>>>,
) -> String {
let external = self.format_external(external);
let terms = self.format_terms(current_terms);
format!(
"And because {}we can conclude that {}",
Padded::from_string("", &external, ", "),
Padded::from_string("", &terms, "."),
)
}
/// Add an already explained incompat to the chain of explanations.
fn and_explain_ref(
&self,
ref_id: usize,
derived: &Derived<PubGrubPackage, Range<Version>, UnavailableReason>,
current_terms: &Map<PubGrubPackage, Term<Range<Version>>>,
) -> String {
let derived = self.format_terms(&derived.terms);
let current = self.format_terms(current_terms);
format!(
"And because we know from ({}) that {}we can conclude that {}",
ref_id,
Padded::from_string("", &derived, ", "),
Padded::from_string("", ¤t, "."),
)
}
/// Add an already explained incompat to the chain of explanations.
fn and_explain_prior_and_external(
&self,
prior_external: &External<PubGrubPackage, Range<Version>, UnavailableReason>,
external: &External<PubGrubPackage, Range<Version>, UnavailableReason>,
current_terms: &Map<PubGrubPackage, Term<Range<Version>>>,
) -> String {
let external = self.format_both_external(prior_external, external);
let terms = self.format_terms(current_terms);
format!(
"And because {}we can conclude that {}",
Padded::from_string("", &external, ", "),
Padded::from_string("", &terms, "."),
)
}
}
impl PubGrubReportFormatter<'_> {
/// Return the formatting for "the root package requires", if the given
/// package is the root package.
///
/// If not given the root package, returns `None`.
fn format_root_requires(&self, package: &PubGrubPackage) -> Option<String> {
if self.is_workspace() {
if matches!(&**package, PubGrubPackageInner::Root(_)) {
if self.is_single_project_workspace() {
return Some("your project requires".to_string());
}
return Some("your workspace requires".to_string());
}
}
match &**package {
PubGrubPackageInner::Root(Some(name)) => Some(format!("{name} depends on")),
PubGrubPackageInner::Root(None) => Some("you require".to_string()),
_ => None,
}
}
/// Return the formatting for "the root package", if the given
/// package is the root package.
///
/// If not given the root package, returns `None`.
fn format_root(&self, package: &PubGrubPackage) -> Option<String> {
if self.is_workspace() {
if matches!(&**package, PubGrubPackageInner::Root(_)) {
if self.is_single_project_workspace() {
return Some("your project's requirements".to_string());
}
return Some("your workspace's requirements".to_string());
}
}
match &**package {
PubGrubPackageInner::Root(Some(_)) => Some("your requirements".to_string()),
PubGrubPackageInner::Root(None) => Some("your requirements".to_string()),
_ => None,
}
}
/// Whether the resolution error is for a workspace.
fn is_workspace(&self) -> bool {
!self.workspace_members.is_empty()
}
/// Whether the resolution error is for a workspace with a exactly one project.
fn is_single_project_workspace(&self) -> bool {
self.workspace_members.len() == 1
}
/// Return a display name for the package if it is a workspace member.
fn format_workspace_member(&self, package: &PubGrubPackage) -> Option<String> {
match &**package {
// TODO(zanieb): Improve handling of dev and extra for single-project workspaces
PubGrubPackageInner::Package {
name, extra, group, ..
} if self.workspace_members.contains(name) => {
if self.is_single_project_workspace() && extra.is_none() && group.is_none() {
Some("your project".to_string())
} else {
Some(format!("{package}"))
}
}
PubGrubPackageInner::Extra { name, .. } if self.workspace_members.contains(name) => {
Some(format!("{package}"))
}
PubGrubPackageInner::Group { name, .. } if self.workspace_members.contains(name) => {
Some(format!("{package}"))
}
_ => None,
}
}
/// Return whether the given package is the root package.
fn is_root(package: &PubGrubPackage) -> bool {
matches!(&**package, PubGrubPackageInner::Root(_))
}
/// Return whether the given package is a workspace member.
fn is_single_project_workspace_member(&self, package: &PubGrubPackage) -> bool {
match &**package {
// TODO(zanieb): Improve handling of dev and extra for single-project workspaces
PubGrubPackageInner::Package {
name, extra, group, ..
} if self.workspace_members.contains(name) => {
self.is_single_project_workspace() && extra.is_none() && group.is_none()
}
_ => false,
}
}
/// Create a [`PackageRange::compatibility`] display with this formatter attached.
fn compatible_range<'a>(
&'a self,
package: &'a PubGrubPackage,
range: &'a Range<Version>,
) -> PackageRange<'a> {
PackageRange::compatibility(package, range, Some(self))
}
/// Create a [`PackageRange::dependency`] display with this formatter attached.
fn dependency_range<'a>(
&'a self,
package: &'a PubGrubPackage,
range: &'a Range<Version>,
) -> PackageRange<'a> {
PackageRange::dependency(package, range, Some(self))
}
/// Create a [`PackageRange::availability`] display with this formatter attached.
fn availability_range<'a>(
&'a self,
package: &'a PubGrubPackage,
range: &'a Range<Version>,
) -> PackageRange<'a> {
PackageRange::availability(package, range, Some(self))
}
/// Format two external incompatibilities, combining them if possible.
fn format_both_external(
&self,
external1: &External<PubGrubPackage, Range<Version>, UnavailableReason>,
external2: &External<PubGrubPackage, Range<Version>, UnavailableReason>,
) -> String {
match (external1, external2) {
(
External::FromDependencyOf(package1, package_set1, dependency1, dependency_set1),
External::FromDependencyOf(package2, _, dependency2, dependency_set2),
) if package1 == package2 => {
let dependency1 = self.dependency_range(dependency1, dependency_set1);
let dependency2 = self.dependency_range(dependency2, dependency_set2);
if let Some(root) = self.format_root_requires(package1) {
return format!(
"{root} {}and {}",
Padded::new("", &dependency1, " "),
dependency2,
);
}
format!(
"{}",
self.compatible_range(package1, package_set1)
.depends_on(dependency1.package, dependency_set1)
.and(dependency2.package, dependency_set2),
)
}
(.., External::FromDependencyOf(package, _, dependency, _))
if Self::is_root(package)
&& self.is_single_project_workspace_member(dependency) =>
{
self.format_external(external1)
}
(External::FromDependencyOf(package, _, dependency, _), ..)
if Self::is_root(package)
&& self.is_single_project_workspace_member(dependency) =>
{
self.format_external(external2)
}
_ => {
let external1 = self.format_external(external1);
let external2 = self.format_external(external2);
format!(
"{}and {}",
Padded::from_string("", &external1, " "),
&external2,
)
}
}
}
/// Generate the [`PubGrubHints`] for a derivation tree.
///
/// The [`PubGrubHints`] help users resolve errors by providing additional context or modifying
/// their requirements.
pub(crate) fn generate_hints(
&self,
derivation_tree: &ErrorTree,
index: &InMemoryIndex,
selector: &CandidateSelector,
index_locations: &IndexLocations,
index_capabilities: &IndexCapabilities,
available_indexes: &FxHashMap<PackageName, BTreeSet<IndexUrl>>,
unavailable_packages: &FxHashMap<PackageName, UnavailablePackage>,
incomplete_packages: &FxHashMap<PackageName, BTreeMap<Version, MetadataUnavailable>>,
fork_urls: &ForkUrls,
fork_indexes: &ForkIndexes,
env: &ResolverEnvironment,
tags: Option<&Tags>,
workspace_members: &BTreeSet<PackageName>,
options: &Options,
output_hints: &mut IndexSet<PubGrubHint>,
) {
match derivation_tree {
DerivationTree::External(External::Custom(package, set, reason)) => {
if let Some(name) = package.name_no_root() {
// Check for no versions due to pre-release options.
if !fork_urls.contains_key(name) {
self.prerelease_hint(name, set, selector, env, options, output_hints);
}
// Check for no versions due to no `--find-links` flat index.
Self::index_hints(
name,
set,
selector,
index_locations,
index_capabilities,
available_indexes,
unavailable_packages,
incomplete_packages,
output_hints,
);
if let UnavailableReason::Version(UnavailableVersion::IncompatibleDist(
incompatibility,
)) = reason
{
match incompatibility {
// Check for unavailable versions due to `--no-build` or `--no-binary`.
IncompatibleDist::Wheel(IncompatibleWheel::NoBinary) => {
output_hints.insert(PubGrubHint::NoBinary {
package: name.clone(),
option: options.build_options.no_binary().clone(),
});
}
IncompatibleDist::Source(IncompatibleSource::NoBuild) => {
output_hints.insert(PubGrubHint::NoBuild {
package: name.clone(),
option: options.build_options.no_build().clone(),
});
}
// Check for unavailable versions due to incompatible tags.
IncompatibleDist::Wheel(IncompatibleWheel::Tag(tag)) => {
if let Some(hint) = self.tag_hint(
name,
set,
*tag,
index,
selector,
fork_indexes,
env,
tags,
) {
output_hints.insert(hint);
}
}
_ => {}
}
}
}
}
DerivationTree::External(External::NoVersions(package, set)) => {
if let Some(name) = package.name_no_root() {
// Check for no versions due to pre-release options.
if !fork_urls.contains_key(name) {
self.prerelease_hint(name, set, selector, env, options, output_hints);
}
// Check for no versions due to no `--find-links` flat index.
Self::index_hints(
name,
set,
selector,
index_locations,
index_capabilities,
available_indexes,
unavailable_packages,
incomplete_packages,
output_hints,
);
}
}
DerivationTree::External(External::FromDependencyOf(
package,
package_set,
dependency,
dependency_set,
)) => {
// Check for a dependency on a workspace package by a non-workspace package.
// Generally, this indicates that the workspace package is shadowing a transitive
// dependency name.
if let (Some(package_name), Some(dependency_name)) =
(package.name(), dependency.name())
{
if workspace_members.contains(dependency_name)
&& !workspace_members.contains(package_name)
{
output_hints.insert(PubGrubHint::DependsOnWorkspacePackage {
package: package_name.clone(),
dependency: dependency_name.clone(),
workspace: self.is_workspace() && !self.is_single_project_workspace(),
});
}
if package_name == dependency_name
&& (dependency.extra().is_none() || package.extra() == dependency.extra())
&& (dependency.group().is_none() || dependency.group() == package.group())
&& workspace_members.contains(package_name)
{
output_hints.insert(PubGrubHint::DependsOnItself {
package: package_name.clone(),
workspace: self.is_workspace() && !self.is_single_project_workspace(),
});
}
}
// Check for no versions due to `Requires-Python`.
if matches!(
&**dependency,
PubGrubPackageInner::Python(PubGrubPython::Target)
) {
if let Some(name) = package.name() {
output_hints.insert(PubGrubHint::RequiresPython {
source: self.python_requirement.source(),
requires_python: self.python_requirement.target().clone(),
name: name.clone(),
package_set: package_set.clone(),
package_requires_python: dependency_set.clone(),
});
}
}
}
DerivationTree::External(External::NotRoot(..)) => {}
DerivationTree::Derived(derived) => {
self.generate_hints(
&derived.cause1,
index,
selector,
index_locations,
index_capabilities,
available_indexes,
unavailable_packages,
incomplete_packages,
fork_urls,
fork_indexes,
env,
tags,
workspace_members,
options,
output_hints,
);
self.generate_hints(
&derived.cause2,
index,
selector,
index_locations,
index_capabilities,
available_indexes,
unavailable_packages,
incomplete_packages,
fork_urls,
fork_indexes,
env,
tags,
workspace_members,
options,
output_hints,
);
}
}
}
/// Generate a [`PubGrubHint`] for a package that doesn't have any wheels matching the current
/// Python version, ABI, or platform.
fn tag_hint(
&self,
name: &PackageName,
set: &Range<Version>,
tag: IncompatibleTag,
index: &InMemoryIndex,
selector: &CandidateSelector,
fork_indexes: &ForkIndexes,
env: &ResolverEnvironment,
tags: Option<&Tags>,
) -> Option<PubGrubHint> {
let response = if let Some(url) = fork_indexes.get(name).map(IndexMetadata::url) {
index.explicit().get(&(name.clone(), url.clone()))
} else {
index.implicit().get(name)
}?;
let VersionsResponse::Found(version_maps) = &*response else {
return None;
};
let candidate = selector.select_no_preference(name, set, version_maps, env)?;
let prioritized = candidate.prioritized()?;
match tag {
IncompatibleTag::Invalid => None,
IncompatibleTag::Python => {
let best = tags.and_then(Tags::python_tag);
let tags = prioritized.python_tags().collect::<BTreeSet<_>>();
if tags.is_empty() {
None
} else {
Some(PubGrubHint::LanguageTags {
package: name.clone(),
version: candidate.version().clone(),
tags,
best,
})
}
}
IncompatibleTag::Abi | IncompatibleTag::AbiPythonVersion => {
let best = tags.and_then(Tags::abi_tag);
let tags = prioritized
.abi_tags()
// Ignore `none`, which is universally compatible.
//
// As an example, `none` can appear here if we're solving for Python 3.13, and
// the distribution includes a wheel for `cp312-none-macosx_11_0_arm64`.
//
// In that case, the wheel isn't compatible, but when solving for Python 3.13,
// the `cp312` Python tag _can_ be compatible (e.g., for `cp312-abi3-macosx_11_0_arm64.whl`),
// so this is considered an ABI incompatibility rather than Python incompatibility.
.filter(|tag| *tag != AbiTag::None)
.collect::<BTreeSet<_>>();
if tags.is_empty() {
None
} else {
Some(PubGrubHint::AbiTags {
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | true |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/pubgrub/distribution.rs | crates/uv-resolver/src/pubgrub/distribution.rs | use uv_distribution_types::{DistributionMetadata, Name, VersionOrUrlRef};
use uv_normalize::PackageName;
use uv_pep440::Version;
use uv_pypi_types::VerbatimParsedUrl;
#[derive(Debug)]
pub(crate) enum PubGrubDistribution<'a> {
Registry(&'a PackageName, &'a Version),
Url(&'a PackageName, &'a VerbatimParsedUrl),
}
impl<'a> PubGrubDistribution<'a> {
pub(crate) fn from_registry(name: &'a PackageName, version: &'a Version) -> Self {
Self::Registry(name, version)
}
pub(crate) fn from_url(name: &'a PackageName, url: &'a VerbatimParsedUrl) -> Self {
Self::Url(name, url)
}
}
impl Name for PubGrubDistribution<'_> {
fn name(&self) -> &PackageName {
match self {
Self::Registry(name, _) => name,
Self::Url(name, _) => name,
}
}
}
impl DistributionMetadata for PubGrubDistribution<'_> {
fn version_or_url(&self) -> VersionOrUrlRef<'_> {
match self {
Self::Registry(_, version) => VersionOrUrlRef::Version(version),
Self::Url(_, url) => VersionOrUrlRef::Url(&url.verbatim),
}
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/pubgrub/mod.rs | crates/uv-resolver/src/pubgrub/mod.rs | pub(crate) use crate::pubgrub::dependencies::PubGrubDependency;
pub(crate) use crate::pubgrub::distribution::PubGrubDistribution;
pub use crate::pubgrub::package::{PubGrubPackage, PubGrubPackageInner, PubGrubPython};
pub(crate) use crate::pubgrub::priority::{PubGrubPriorities, PubGrubPriority, PubGrubTiebreaker};
pub(crate) use crate::pubgrub::report::PubGrubReportFormatter;
mod dependencies;
mod distribution;
mod package;
mod priority;
mod report;
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/pubgrub/priority.rs | crates/uv-resolver/src/pubgrub/priority.rs | use std::cmp::Reverse;
use hashbrown::hash_map::{EntryRef, OccupiedEntry};
use pubgrub::{DependencyProvider, Range};
use rustc_hash::FxBuildHasher;
use uv_normalize::PackageName;
use uv_pep440::Version;
use crate::dependency_provider::UvDependencyProvider;
use crate::fork_urls::ForkUrls;
use crate::pubgrub::{PubGrubPackage, PubGrubPackageInner, PubGrubPython};
use crate::{FxHashbrownMap, SentinelRange};
/// A prioritization map to guide the PubGrub resolution process.
///
/// During resolution, PubGrub needs to decide which package to consider next. The priorities
/// encoded here are used to guide that decision.
///
/// Like `pip`, we prefer packages that are pinned to direct URLs over packages pinned to a single
/// version over packages that are constrained in some way over packages that are unconstrained.
///
/// See: <https://github.com/pypa/pip/blob/ef78c129b1a966dbbbdb8ebfffc43723e89110d1/src/pip/_internal/resolution/resolvelib/provider.py#L120>
///
/// Our main priority is the package name, the earlier we encounter a package, the higher its
/// priority. This way, all virtual packages of the same name will be applied in a batch. To ensure
/// determinism, we also track the discovery order of virtual packages as secondary order.
#[derive(Clone, Debug, Default)]
pub(crate) struct PubGrubPriorities {
package_priority: FxHashbrownMap<PackageName, PubGrubPriority>,
virtual_package_tiebreaker: FxHashbrownMap<PubGrubPackage, PubGrubTiebreaker>,
}
impl PubGrubPriorities {
/// Add a [`PubGrubPackage`] to the priority map.
pub(crate) fn insert(
&mut self,
package: &PubGrubPackage,
version: &Range<Version>,
urls: &ForkUrls,
) {
let len = self.virtual_package_tiebreaker.len();
self.virtual_package_tiebreaker
.entry_ref(package)
.or_insert_with(|| {
PubGrubTiebreaker::from(u32::try_from(len).expect("Less than 2**32 packages"))
});
// The root package and Python constraints have no explicit priority, the root package is
// always first and the Python version (range) is fixed.
let Some(name) = package.name_no_root() else {
return;
};
let len = self.package_priority.len();
match self.package_priority.entry_ref(name) {
EntryRef::Occupied(mut entry) => {
// Preserve the original index.
let index = Self::get_index(&entry).unwrap_or(len);
// Compute the priority.
let priority = if urls.get(name).is_some() {
PubGrubPriority::DirectUrl(Reverse(index))
} else if version.as_singleton().is_some()
|| SentinelRange::from(version).is_sentinel()
{
PubGrubPriority::Singleton(Reverse(index))
} else {
// Keep the conflict-causing packages to avoid loops where we seesaw between
// `Unspecified` and `Conflict*`.
if matches!(
entry.get(),
PubGrubPriority::ConflictEarly(_) | PubGrubPriority::ConflictLate(_)
) {
return;
}
PubGrubPriority::Unspecified(Reverse(index))
};
// Take the maximum of the new and existing priorities.
if priority > *entry.get() {
entry.insert(priority);
}
}
EntryRef::Vacant(entry) => {
// Compute the priority.
let priority = if urls.get(name).is_some() {
PubGrubPriority::DirectUrl(Reverse(len))
} else if version.as_singleton().is_some()
|| SentinelRange::from(version).is_sentinel()
{
PubGrubPriority::Singleton(Reverse(len))
} else {
PubGrubPriority::Unspecified(Reverse(len))
};
// Insert the priority.
entry.insert(priority);
}
}
}
fn get_index(
entry: &OccupiedEntry<'_, PackageName, PubGrubPriority, FxBuildHasher>,
) -> Option<usize> {
match entry.get() {
PubGrubPriority::ConflictLate(Reverse(index))
| PubGrubPriority::Unspecified(Reverse(index))
| PubGrubPriority::ConflictEarly(Reverse(index))
| PubGrubPriority::Singleton(Reverse(index))
| PubGrubPriority::DirectUrl(Reverse(index)) => Some(*index),
PubGrubPriority::Root => None,
}
}
/// Return the [`PubGrubPriority`] of the given package, if it exists.
pub(crate) fn get(
&self,
package: &PubGrubPackage,
) -> <UvDependencyProvider as DependencyProvider>::Priority {
match &**package {
// There is only a single root package despite the value. The priorities on root don't
// matter for the resolution output, since the Pythons don't have dependencies
// themselves and are only used when the package is incompatible.
PubGrubPackageInner::Root(_) => (PubGrubPriority::Root, PubGrubTiebreaker::from(0)),
PubGrubPackageInner::Python(PubGrubPython::Installed) => {
(PubGrubPriority::Root, PubGrubTiebreaker::from(1))
}
PubGrubPackageInner::Python(PubGrubPython::Target) => {
(PubGrubPriority::Root, PubGrubTiebreaker::from(2))
}
PubGrubPackageInner::System(_) => (PubGrubPriority::Root, PubGrubTiebreaker::from(3)),
PubGrubPackageInner::Marker { name, .. }
| PubGrubPackageInner::Extra { name, .. }
| PubGrubPackageInner::Group { name, .. }
| PubGrubPackageInner::Package { name, .. } => {
// To ensure deterministic resolution, each (virtual) package needs to be registered
// on discovery (as dependency of another package), before we query it for
// prioritization.
let package_priority = match self.package_priority.get(name) {
Some(priority) => *priority,
None => {
if cfg!(debug_assertions) {
panic!("Package not known: `{name}` from `{package}`")
} else {
PubGrubPriority::Unspecified(Reverse(usize::MAX))
}
}
};
let package_tiebreaker = match self.virtual_package_tiebreaker.get(package) {
Some(tiebreaker) => *tiebreaker,
None => {
if cfg!(debug_assertions) {
panic!("Package not registered in prioritization: `{package:?}`")
} else {
PubGrubTiebreaker(Reverse(u32::MAX))
}
}
};
(package_priority, package_tiebreaker)
}
}
}
/// Mark a package as prioritized by setting it to [`PubGrubPriority::ConflictEarly`], if it
/// doesn't have a higher priority already.
///
/// Returns whether the priority was changed, i.e., it's the first time we hit this condition
/// for the package.
pub(crate) fn mark_conflict_early(&mut self, package: &PubGrubPackage) -> bool {
let Some(name) = package.name_no_root() else {
// Not a correctness bug
if cfg!(debug_assertions) {
panic!("URL packages must not be involved in conflict handling")
} else {
return false;
}
};
let len = self.package_priority.len();
match self.package_priority.entry_ref(name) {
EntryRef::Vacant(entry) => {
entry.insert(PubGrubPriority::ConflictEarly(Reverse(len)));
true
}
EntryRef::Occupied(mut entry) => {
if matches!(entry.get(), PubGrubPriority::ConflictEarly(_)) {
// Already in the right category
return false;
}
let index = Self::get_index(&entry).unwrap_or(len);
entry.insert(PubGrubPriority::ConflictEarly(Reverse(index)));
true
}
}
}
/// Mark a package as prioritized by setting it to [`PubGrubPriority::ConflictLate`], if it
/// doesn't have a higher priority already.
///
/// Returns whether the priority was changed, i.e., it's the first time this package was
/// marked as conflicting above the threshold.
pub(crate) fn mark_conflict_late(&mut self, package: &PubGrubPackage) -> bool {
let Some(name) = package.name_no_root() else {
// Not a correctness bug
if cfg!(debug_assertions) {
panic!("URL packages must not be involved in conflict handling")
} else {
return false;
}
};
let len = self.package_priority.len();
match self.package_priority.entry_ref(name) {
EntryRef::Vacant(entry) => {
entry.insert(PubGrubPriority::ConflictLate(Reverse(len)));
true
}
EntryRef::Occupied(mut entry) => {
// The ConflictEarly` match avoids infinite loops.
if matches!(
entry.get(),
PubGrubPriority::ConflictLate(_) | PubGrubPriority::ConflictEarly(_)
) {
// Already in the right category
return false;
}
let index = Self::get_index(&entry).unwrap_or(len);
entry.insert(PubGrubPriority::ConflictLate(Reverse(index)));
true
}
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub(crate) enum PubGrubPriority {
/// The package has no specific priority.
///
/// As such, its priority is based on the order in which the packages were added (FIFO), such
/// that the first package we visit is prioritized over subsequent packages.
///
/// TODO(charlie): Prefer constrained over unconstrained packages, if they're at the same depth
/// in the dependency graph.
Unspecified(Reverse<usize>),
/// Selected version of this package were often the culprit of rejecting another package, so
/// it's deprioritized behind `ConflictEarly`. It's still the higher than `Unspecified` to
/// conflict before selecting unrelated packages.
ConflictLate(Reverse<usize>),
/// Selected version of this package were often rejected, so it's prioritized over
/// `ConflictLate`.
ConflictEarly(Reverse<usize>),
/// The version range is constrained to a single version (e.g., with the `==` operator).
Singleton(Reverse<usize>),
/// The package was specified via a direct URL.
///
/// N.B.: URLs need to have priority over registry distributions for correctly matching registry
/// distributions to URLs, see [`PubGrubPackage::from_package`] an
/// [`ForkUrls`].
DirectUrl(Reverse<usize>),
/// The package is the root package.
Root,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub(crate) struct PubGrubTiebreaker(Reverse<u32>);
impl From<u32> for PubGrubTiebreaker {
fn from(value: u32) -> Self {
Self(Reverse(value))
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/pubgrub/package.rs | crates/uv-resolver/src/pubgrub/package.rs | use std::ops::Deref;
use std::sync::Arc;
use uv_normalize::{ExtraName, GroupName, PackageName};
use uv_pep508::MarkerTree;
use uv_pypi_types::ConflictItemRef;
use crate::python_requirement::PythonRequirement;
/// [`Arc`] wrapper around [`PubGrubPackageInner`] to make cloning (inside PubGrub) cheap.
#[derive(Debug, Clone, Eq, Hash, PartialEq, PartialOrd, Ord)]
pub struct PubGrubPackage(Arc<PubGrubPackageInner>);
impl Deref for PubGrubPackage {
type Target = PubGrubPackageInner;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::fmt::Display for PubGrubPackage {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(&self.0, f)
}
}
impl From<PubGrubPackageInner> for PubGrubPackage {
fn from(package: PubGrubPackageInner) -> Self {
Self(Arc::new(package))
}
}
/// A PubGrub-compatible wrapper around a "Python package", with two notable characteristics:
///
/// 1. Includes a [`PubGrubPackage::Root`] variant, to satisfy PubGrub's requirement that a
/// resolution starts from a single root.
/// 2. Uses the same strategy as pip and posy to handle extras: for each extra, we create a virtual
/// package (e.g., `black[colorama]`), and mark it as a dependency of the real package (e.g.,
/// `black`). We then discard the virtual packages at the end of the resolution process.
#[derive(Debug, Clone, Eq, Hash, PartialEq, PartialOrd, Ord)]
pub enum PubGrubPackageInner {
/// The root package, which is used to start the resolution process.
Root(Option<PackageName>),
/// A Python version.
Python(PubGrubPython),
/// A system package, which is used to represent a non-Python package.
System(PackageName),
/// A Python package.
///
/// Note that it is guaranteed that `extra` and `dev` are never both
/// `Some`. That is, if one is `Some` then the other must be `None`.
Package {
name: PackageName,
extra: Option<ExtraName>,
group: Option<GroupName>,
marker: MarkerTree,
},
/// A proxy package to represent a dependency with an extra (e.g., `black[colorama]`).
///
/// For a given package `black`, and an extra `colorama`, we create a virtual package
/// with exactly two dependencies: `PubGrubPackage::Package("black", None)` and
/// `PubGrubPackage::Package("black", Some("colorama")`. Both dependencies are pinned to the
/// same version, and the virtual package is discarded at the end of the resolution process.
///
/// The benefit of the proxy package (versus `PubGrubPackage::Package("black", Some("colorama")`
/// on its own) is that it enables us to avoid attempting to retrieve metadata for irrelevant
/// versions the extra variants by making it clear to PubGrub that the extra variant must match
/// the exact same version of the base variant. Without the proxy package, then when provided
/// requirements like `black==23.0.1` and `black[colorama]`, PubGrub may attempt to retrieve
/// metadata for `black[colorama]` versions other than `23.0.1`.
Extra {
name: PackageName,
extra: ExtraName,
marker: MarkerTree,
},
/// A proxy package to represent an enabled dependency group.
///
/// This is similar in spirit to [PEP 735](https://peps.python.org/pep-0735/) and similar in
/// implementation to the `Extra` variant. The main difference is that we treat groups as
/// enabled globally, rather than on a per-requirement basis.
Group {
name: PackageName,
group: GroupName,
marker: MarkerTree,
},
/// A proxy package for a base package with a marker (e.g., `black; python_version >= "3.6"`).
///
/// If a requirement has an extra _and_ a marker, it will be represented via the `Extra` variant
/// rather than the `Marker` variant.
Marker {
name: PackageName,
/// The marker associated with this proxy package.
marker: MarkerTree,
},
}
impl PubGrubPackage {
/// Create a [`PubGrubPackage`] from a package name and extra.
pub(crate) fn from_package(
name: PackageName,
extra: Option<ExtraName>,
group: Option<GroupName>,
marker: MarkerTree,
) -> Self {
// Remove all extra expressions from the marker, since we track extras
// separately. This also avoids an issue where packages added via
// extras end up having two distinct marker expressions, which in turn
// makes them two distinct packages. This results in PubGrub being
// unable to unify version constraints across such packages.
let marker = marker.simplify_extras_with(|_| true);
if let Some(extra) = extra {
Self(Arc::new(PubGrubPackageInner::Extra {
name,
extra,
marker,
}))
} else if let Some(group) = group {
Self(Arc::new(PubGrubPackageInner::Group {
name,
group,
marker,
}))
} else if !marker.is_true() {
Self(Arc::new(PubGrubPackageInner::Marker { name, marker }))
} else {
Self(Arc::new(PubGrubPackageInner::Package {
name,
extra,
group: None,
marker,
}))
}
}
/// If this package is a proxy package, return the base package it depends on.
///
/// While dependency groups may be attached to a package, we don't consider them here as
/// there is no (mandatory) dependency from a dependency group to the package.
pub(crate) fn base_package(&self) -> Option<Self> {
match &**self {
PubGrubPackageInner::Root(_)
| PubGrubPackageInner::Python(_)
| PubGrubPackageInner::System(_)
| PubGrubPackageInner::Package { .. } => None,
PubGrubPackageInner::Group { .. } => {
// The dependency groups of a package do not by themselves require the package
// itself.
None
}
PubGrubPackageInner::Extra { name, .. } | PubGrubPackageInner::Marker { name, .. } => {
Some(Self::from_package(
name.clone(),
None,
None,
MarkerTree::TRUE,
))
}
}
}
/// Returns the name of this PubGrub package, if it has one.
pub(crate) fn name(&self) -> Option<&PackageName> {
match &**self {
// A root can never be a dependency of another package, and a `Python` pubgrub
// package is never returned by `get_dependencies`. So these cases never occur.
PubGrubPackageInner::Root(None) | PubGrubPackageInner::Python(_) => None,
PubGrubPackageInner::Root(Some(name))
| PubGrubPackageInner::System(name)
| PubGrubPackageInner::Package { name, .. }
| PubGrubPackageInner::Extra { name, .. }
| PubGrubPackageInner::Group { name, .. }
| PubGrubPackageInner::Marker { name, .. } => Some(name),
}
}
/// Returns the name of this PubGrub package, if it is not the root package, a Python version
/// constraint, or a system package.
pub(crate) fn name_no_root(&self) -> Option<&PackageName> {
match &**self {
PubGrubPackageInner::Root(_)
| PubGrubPackageInner::Python(_)
| PubGrubPackageInner::System(_) => None,
PubGrubPackageInner::Package { name, .. }
| PubGrubPackageInner::Extra { name, .. }
| PubGrubPackageInner::Group { name, .. }
| PubGrubPackageInner::Marker { name, .. } => Some(name),
}
}
/// Returns the marker expression associated with this PubGrub package, if
/// it has one.
pub(crate) fn marker(&self) -> MarkerTree {
match &**self {
// A root can never be a dependency of another package, and a `Python` pubgrub
// package is never returned by `get_dependencies`. So these cases never occur.
PubGrubPackageInner::Root(_)
| PubGrubPackageInner::Python(_)
| PubGrubPackageInner::System(_) => MarkerTree::TRUE,
PubGrubPackageInner::Package { marker, .. }
| PubGrubPackageInner::Extra { marker, .. }
| PubGrubPackageInner::Group { marker, .. } => *marker,
PubGrubPackageInner::Marker { marker, .. } => *marker,
}
}
/// Returns the extra name associated with this PubGrub package, if it has
/// one.
///
/// Note that if this returns `Some`, then `dev` must return `None`.
pub(crate) fn extra(&self) -> Option<&ExtraName> {
match &**self {
// A root can never be a dependency of another package, and a `Python` pubgrub
// package is never returned by `get_dependencies`. So these cases never occur.
PubGrubPackageInner::Root(_)
| PubGrubPackageInner::Python(_)
| PubGrubPackageInner::System(_)
| PubGrubPackageInner::Package { extra: None, .. }
| PubGrubPackageInner::Group { .. }
| PubGrubPackageInner::Marker { .. } => None,
PubGrubPackageInner::Package {
extra: Some(extra), ..
}
| PubGrubPackageInner::Extra { extra, .. } => Some(extra),
}
}
/// Returns the dependency group name associated with this PubGrub
/// package, if it has one.
///
/// Note that if this returns `Some`, then `extra` must return `None`.
pub(crate) fn group(&self) -> Option<&GroupName> {
match &**self {
// A root can never be a dependency of another package, and a `Python` pubgrub
// package is never returned by `get_dependencies`. So these cases never occur.
PubGrubPackageInner::Root(_)
| PubGrubPackageInner::Python(_)
| PubGrubPackageInner::System(_)
| PubGrubPackageInner::Package { group: None, .. }
| PubGrubPackageInner::Extra { .. }
| PubGrubPackageInner::Marker { .. } => None,
PubGrubPackageInner::Package {
group: Some(group), ..
}
| PubGrubPackageInner::Group { group, .. } => Some(group),
}
}
/// Extracts a possible conflicting item from this package.
///
/// If this package can't possibly be classified as conflicting, then
/// this returns `None`.
pub(crate) fn conflicting_item(&self) -> Option<ConflictItemRef<'_>> {
let package = self.name_no_root()?;
match (self.extra(), self.group()) {
(None, None) => Some(ConflictItemRef::from(package)),
(Some(extra), None) => Some(ConflictItemRef::from((package, extra))),
(None, Some(group)) => Some(ConflictItemRef::from((package, group))),
(Some(extra), Some(group)) => {
unreachable!(
"PubGrub package cannot have both an extra and a group, \
but found extra=`{extra}` and group=`{group}` for \
package `{package}`",
)
}
}
}
/// Returns `true` if this PubGrub package is the root package.
pub(crate) fn is_root(&self) -> bool {
matches!(&**self, PubGrubPackageInner::Root(_))
}
/// Returns `true` if this PubGrub package is a proxy package.
pub(crate) fn is_proxy(&self) -> bool {
matches!(
&**self,
PubGrubPackageInner::Extra { .. }
| PubGrubPackageInner::Group { .. }
| PubGrubPackageInner::Marker { .. }
)
}
/// This simplifies the markers on this package (if any exist) using the
/// given Python requirement as assumed context.
///
/// See `RequiresPython::simplify_markers` for more details.
///
/// NOTE: This routine is kind of weird, because this should only really be
/// applied in contexts where the `PubGrubPackage` is printed as output.
/// So in theory, this should be a transformation into a new type with a
/// "printable" `PubGrubPackage` coupled with a `Requires-Python`. But at
/// time of writing, this was a larger refactor, particularly in the error
/// reporting where this routine is used.
pub(crate) fn simplify_markers(&mut self, python_requirement: &PythonRequirement) {
match *Arc::make_mut(&mut self.0) {
PubGrubPackageInner::Root(_)
| PubGrubPackageInner::Python(_)
| PubGrubPackageInner::System(_) => {}
PubGrubPackageInner::Package { ref mut marker, .. }
| PubGrubPackageInner::Extra { ref mut marker, .. }
| PubGrubPackageInner::Group { ref mut marker, .. }
| PubGrubPackageInner::Marker { ref mut marker, .. } => {
*marker = python_requirement.simplify_markers(*marker);
}
}
}
/// This isn't actually used anywhere, but can be useful for printf-debugging.
#[allow(dead_code)]
pub(crate) fn kind(&self) -> &'static str {
match &**self {
PubGrubPackageInner::Root(_) => "root",
PubGrubPackageInner::Python(_) => "python",
PubGrubPackageInner::System(_) => "system",
PubGrubPackageInner::Package { .. } => "package",
PubGrubPackageInner::Extra { .. } => "extra",
PubGrubPackageInner::Group { .. } => "group",
PubGrubPackageInner::Marker { .. } => "marker",
}
}
/// Returns a new [`PubGrubPackage`] representing the base package with the given name.
pub(crate) fn base(name: &PackageName) -> Self {
Self::from_package(name.clone(), None, None, MarkerTree::TRUE)
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Hash, Ord)]
pub enum PubGrubPython {
/// The Python version installed in the current environment.
Installed,
/// The Python version for which dependencies are being resolved.
Target,
}
impl std::fmt::Display for PubGrubPackageInner {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Root(name) => {
if let Some(name) = name {
write!(f, "{}", name.as_ref())
} else {
write!(f, "root")
}
}
Self::Python(_) => write!(f, "Python"),
Self::System(name) => write!(f, "system:{name}"),
Self::Package {
name,
extra: None,
marker,
group: None,
} => {
if let Some(marker) = marker.contents() {
write!(f, "{name}{{{marker}}}")
} else {
write!(f, "{name}")
}
}
Self::Package {
name,
extra: Some(extra),
marker,
group: None,
} => {
if let Some(marker) = marker.contents() {
write!(f, "{name}[{extra}]{{{marker}}}")
} else {
write!(f, "{name}[{extra}]")
}
}
Self::Package {
name,
extra: None,
marker,
group: Some(dev),
} => {
if let Some(marker) = marker.contents() {
write!(f, "{name}:{dev}{{{marker}}}")
} else {
write!(f, "{name}:{dev}")
}
}
Self::Marker { name, marker, .. } => {
if let Some(marker) = marker.contents() {
write!(f, "{name}{{{marker}}}")
} else {
write!(f, "{name}")
}
}
Self::Extra { name, extra, .. } => write!(f, "{name}[{extra}]"),
Self::Group {
name, group: dev, ..
} => write!(f, "{name}:{dev}"),
// It is guaranteed that `extra` and `dev` are never set at the same time.
Self::Package {
name: _,
extra: Some(_),
marker: _,
group: Some(_),
} => unreachable!(),
}
}
}
impl From<&Self> for PubGrubPackage {
fn from(package: &Self) -> Self {
package.clone()
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/pubgrub/dependencies.rs | crates/uv-resolver/src/pubgrub/dependencies.rs | use std::borrow::Cow;
use std::iter;
use either::Either;
use pubgrub::Ranges;
use uv_distribution_types::{Requirement, RequirementSource};
use uv_normalize::{ExtraName, GroupName, PackageName};
use uv_pep440::{Version, VersionSpecifiers};
use uv_pypi_types::{
ConflictItemRef, Conflicts, ParsedArchiveUrl, ParsedDirectoryUrl, ParsedGitUrl, ParsedPathUrl,
ParsedUrl, VerbatimParsedUrl,
};
use crate::pubgrub::{PubGrubPackage, PubGrubPackageInner};
#[derive(Clone, Debug, Eq, PartialEq)]
pub(crate) struct PubGrubDependency {
pub(crate) package: PubGrubPackage,
pub(crate) version: Ranges<Version>,
/// When the parent that created this dependency is a "normal" package
/// (non-extra non-group), this corresponds to its name.
///
/// This is used to create project-level `ConflictItemRef` for a specific
/// package. In effect, this lets us "delay" filtering of project
/// dependencies when a conflict is declared between the project and a
/// group.
///
/// The main problem with dealing with project level conflicts is that if you
/// declare a conflict between a package and a group, we represent that
/// group as a dependency of that package. So if you filter out the package
/// in a fork due to a conflict, you also filter out the group. Therefore,
/// we introduce this parent field to enable "delayed" filtering.
pub(crate) parent: Option<PackageName>,
/// This field is set if the [`Requirement`] had a URL. We still use a URL from [`Urls`]
/// even if this field is None where there is an override with a URL or there is a different
/// requirement or constraint for the same package that has a URL.
pub(crate) url: Option<VerbatimParsedUrl>,
}
impl PubGrubDependency {
pub(crate) fn from_requirement<'a>(
conflicts: &Conflicts,
requirement: Cow<'a, Requirement>,
group_name: Option<&'a GroupName>,
parent_package: Option<&'a PubGrubPackage>,
) -> impl Iterator<Item = Self> + 'a {
let parent_name = parent_package.and_then(|package| package.name_no_root());
let is_normal_parent = parent_package
.map(|pp| pp.extra().is_none() && pp.group().is_none())
.unwrap_or(false);
let iter = if !requirement.extras.is_empty() {
// This is crazy subtle, but if any of the extras in the
// requirement are part of a declared conflict, then we
// specifically need (at time of writing) to include the
// base package as a dependency. This results in both
// the base package and the extra package being sibling
// dependencies at the point in which forks are created
// base on conflicting extras. If the base package isn't
// present at that point, then it's impossible for the
// fork that excludes all conflicting extras to reach
// the non-extra dependency, which may be necessary for
// correctness.
//
// But why do we not include the base package in the first
// place? Well, that's part of an optimization[1].
//
// [1]: https://github.com/astral-sh/uv/pull/9540
let base = if requirement
.extras
.iter()
.any(|extra| conflicts.contains(&requirement.name, extra))
{
Either::Left(iter::once((None, None)))
} else {
Either::Right(iter::empty())
};
Either::Left(Either::Left(base.chain(
Box::into_iter(requirement.extras.clone()).map(|extra| (Some(extra), None)),
)))
} else if !requirement.groups.is_empty() {
let base = if requirement
.groups
.iter()
.any(|group| conflicts.contains(&requirement.name, group))
{
Either::Left(iter::once((None, None)))
} else {
Either::Right(iter::empty())
};
Either::Left(Either::Right(base.chain(
Box::into_iter(requirement.groups.clone()).map(|group| (None, Some(group))),
)))
} else {
Either::Right(iter::once((None, None)))
};
// Add the package, plus any extra variants.
iter.map(move |(extra, group)| {
let pubgrub_requirement =
PubGrubRequirement::from_requirement(&requirement, extra, group);
let PubGrubRequirement {
package,
version,
url,
} = pubgrub_requirement;
match &*package {
PubGrubPackageInner::Package { .. } => Self {
package,
version,
parent: if is_normal_parent {
parent_name.cloned()
} else {
None
},
url,
},
PubGrubPackageInner::Marker { .. } => Self {
package,
version,
parent: if is_normal_parent {
parent_name.cloned()
} else {
None
},
url,
},
PubGrubPackageInner::Extra { name, .. } => {
if group_name.is_none() {
debug_assert!(
parent_name.is_none_or(|parent_name| parent_name != name),
"extras not flattened for {name}"
);
}
Self {
package,
version,
parent: None,
url,
}
}
PubGrubPackageInner::Group { name, .. } => {
if group_name.is_none() {
debug_assert!(
parent_name.is_none_or(|parent_name| parent_name != name),
"group not flattened for {name}"
);
}
Self {
package,
version,
parent: None,
url,
}
}
PubGrubPackageInner::Root(_) => unreachable!("Root package in dependencies"),
PubGrubPackageInner::Python(_) => {
unreachable!("Python package in dependencies")
}
PubGrubPackageInner::System(_) => unreachable!("System package in dependencies"),
}
})
}
/// Extracts a possible conflicting item from this dependency.
///
/// If this package can't possibly be classified as conflicting, then this
/// returns `None`.
pub(crate) fn conflicting_item(&self) -> Option<ConflictItemRef<'_>> {
self.package.conflicting_item()
}
}
/// A PubGrub-compatible package and version range.
#[derive(Debug, Clone)]
pub(crate) struct PubGrubRequirement {
pub(crate) package: PubGrubPackage,
pub(crate) version: Ranges<Version>,
pub(crate) url: Option<VerbatimParsedUrl>,
}
impl PubGrubRequirement {
/// Convert a [`Requirement`] to a PubGrub-compatible package and range, while returning the URL
/// on the [`Requirement`], if any.
pub(crate) fn from_requirement(
requirement: &Requirement,
extra: Option<ExtraName>,
group: Option<GroupName>,
) -> Self {
let (verbatim_url, parsed_url) = match &requirement.source {
RequirementSource::Registry { specifier, .. } => {
return Self::from_registry_requirement(specifier, extra, group, requirement);
}
RequirementSource::Url {
subdirectory,
location,
ext,
url,
} => {
let parsed_url = ParsedUrl::Archive(ParsedArchiveUrl::from_source(
location.clone(),
subdirectory.clone(),
*ext,
));
(url, parsed_url)
}
RequirementSource::Git {
git,
url,
subdirectory,
} => {
let parsed_url =
ParsedUrl::Git(ParsedGitUrl::from_source(git.clone(), subdirectory.clone()));
(url, parsed_url)
}
RequirementSource::Path {
ext,
url,
install_path,
} => {
let parsed_url = ParsedUrl::Path(ParsedPathUrl::from_source(
install_path.clone(),
*ext,
url.to_url(),
));
(url, parsed_url)
}
RequirementSource::Directory {
editable,
r#virtual,
url,
install_path,
} => {
let parsed_url = ParsedUrl::Directory(ParsedDirectoryUrl::from_source(
install_path.clone(),
*editable,
*r#virtual,
url.to_url(),
));
(url, parsed_url)
}
};
Self {
package: PubGrubPackage::from_package(
requirement.name.clone(),
extra,
group,
requirement.marker,
),
version: Ranges::full(),
url: Some(VerbatimParsedUrl {
parsed_url,
verbatim: verbatim_url.clone(),
}),
}
}
fn from_registry_requirement(
specifier: &VersionSpecifiers,
extra: Option<ExtraName>,
group: Option<GroupName>,
requirement: &Requirement,
) -> Self {
Self {
package: PubGrubPackage::from_package(
requirement.name.clone(),
extra,
group,
requirement.marker,
),
url: None,
version: Ranges::from(specifier.clone()),
}
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolver/index.rs | crates/uv-resolver/src/resolver/index.rs | use std::hash::BuildHasherDefault;
use std::sync::Arc;
use rustc_hash::FxHasher;
use uv_distribution_types::{IndexUrl, VersionId};
use uv_normalize::PackageName;
use uv_once_map::OnceMap;
use crate::resolver::provider::{MetadataResponse, VersionsResponse};
/// In-memory index of package metadata.
#[derive(Default, Clone)]
pub struct InMemoryIndex(Arc<SharedInMemoryIndex>);
#[derive(Default)]
struct SharedInMemoryIndex {
/// A map from package name to the metadata for that package and the index where the metadata
/// came from.
implicit: FxOnceMap<PackageName, Arc<VersionsResponse>>,
explicit: FxOnceMap<(PackageName, IndexUrl), Arc<VersionsResponse>>,
/// A map from package ID to metadata for that distribution.
distributions: FxOnceMap<VersionId, Arc<MetadataResponse>>,
}
pub(crate) type FxOnceMap<K, V> = OnceMap<K, V, BuildHasherDefault<FxHasher>>;
impl InMemoryIndex {
/// Returns a reference to the package metadata map.
pub fn implicit(&self) -> &FxOnceMap<PackageName, Arc<VersionsResponse>> {
&self.0.implicit
}
/// Returns a reference to the package metadata map.
pub fn explicit(&self) -> &FxOnceMap<(PackageName, IndexUrl), Arc<VersionsResponse>> {
&self.0.explicit
}
/// Returns a reference to the distribution metadata map.
pub fn distributions(&self) -> &FxOnceMap<VersionId, Arc<MetadataResponse>> {
&self.0.distributions
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolver/environment.rs | crates/uv-resolver/src/resolver/environment.rs | use std::collections::BTreeSet;
use std::sync::Arc;
use itertools::Itertools;
use tracing::trace;
use uv_distribution_types::{RequiresPython, RequiresPythonRange};
use uv_pep440::VersionSpecifiers;
use uv_pep508::{MarkerEnvironment, MarkerTree};
use uv_pypi_types::{ConflictItem, ConflictItemRef, ConflictKind, ResolverMarkerEnvironment};
use crate::pubgrub::{PubGrubDependency, PubGrubPackage};
use crate::resolver::ForkState;
use crate::universal_marker::{ConflictMarker, UniversalMarker};
use crate::{PythonRequirement, ResolveError};
/// Represents one or more marker environments for a resolution.
///
/// Dependencies outside of the marker environments represented by this value
/// are ignored for that particular resolution.
///
/// In normal "pip"-style resolution, one resolver environment corresponds to
/// precisely one marker environment. In universal resolution, multiple marker
/// environments may be specified via a PEP 508 marker expression. In either
/// case, as mentioned above, dependencies not in these marker environments are
/// ignored for the corresponding resolution.
///
/// Callers must provide this to the resolver to indicate, broadly, what kind
/// of resolution it will produce. Generally speaking, callers should provide
/// a specific marker environment for `uv pip`-style resolutions and ask for a
/// universal resolution for uv's project based commands like `uv lock`.
///
/// Callers can rely on this type being reasonably cheap to clone.
///
/// # Internals
///
/// Inside the resolver, when doing a universal resolution, it may create
/// many "forking" states to deal with the fact that there may be multiple
/// incompatible dependency specifications. Specifically, in the Python world,
/// the main constraint is that for any one *specific* marker environment,
/// there must be only one version of a package in a corresponding resolution.
/// But when doing a universal resolution, we want to support many marker
/// environments, and in this context, the "universal" resolution may contain
/// multiple versions of the same package. This is allowed so long as, for
/// any marker environment supported by this resolution, an installation will
/// select at most one version of any given package.
///
/// During resolution, a `ResolverEnvironment` is attached to each internal
/// fork. For non-universal or "specific" resolution, there is only ever one
/// fork because a `ResolverEnvironment` corresponds to one and exactly one
/// marker environment. For universal resolution, the resolver may choose
/// to split its execution into multiple branches. Each of those branches
/// (also called "forks" or "splits") will get its own marker expression that
/// represents a set of marker environments that is guaranteed to be disjoint
/// with the marker environments described by the marker expressions of all
/// other branches.
///
/// Whether it's universal resolution or not, and whether it's one of many
/// forks or one fork, this type represents the set of possible dependency
/// specifications allowed in the resolution produced by a single fork.
///
/// An exception to this is `requires-python`. That is handled separately and
/// explicitly by the resolver. (Perhaps a future refactor can incorporate
/// `requires-python` into this type as well, but it's not totally clear at
/// time of writing if that's a good idea or not.)
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ResolverEnvironment {
kind: Kind,
}
/// The specific kind of resolver environment.
///
/// Note that it is explicitly intended that this type remain unexported from
/// this module. The motivation for this design is to discourage repeated case
/// analysis on this type, and instead try to encapsulate the case analysis via
/// higher level routines on `ResolverEnvironment` itself. (This goal may prove
/// intractable, so don't treat it like gospel.)
#[derive(Clone, Debug, Eq, PartialEq)]
enum Kind {
/// We're solving for one specific marker environment only.
///
/// Generally, this is what's done for `uv pip`. For the project based
/// commands, like `uv lock`, we do universal resolution.
Specific {
/// The marker environment being resolved for.
///
/// Any dependency specification that isn't satisfied by this marker
/// environment is ignored.
marker_env: ResolverMarkerEnvironment,
},
/// We're solving for all possible marker environments.
Universal {
/// The initial set of "fork preferences." These will come from the
/// lock file when available, or the list of supported environments
/// explicitly written into the `pyproject.toml`.
///
/// Note that this may be empty, which means resolution should begin
/// with no forks. Or equivalently, a single fork whose marker
/// expression matches all marker environments.
initial_forks: Arc<[MarkerTree]>,
/// The markers associated with this resolver fork.
markers: MarkerTree,
/// Conflicting group inclusions.
///
/// Note that inclusions don't play a role in predicates
/// like `ResolverEnvironment::included_by_group`. Instead,
/// only exclusions are considered.
///
/// We record inclusions for two reasons. First is that if
/// we somehow wind up with an inclusion and exclusion rule
/// for the same conflict item, then we treat the resulting
/// fork as impossible. (You cannot require that an extra is
/// both included and excluded. Such a rule can never be
/// satisfied.) Second is that we use the inclusion rules to
/// write conflict markers after resolution is finished.
include: Arc<crate::FxHashbrownSet<ConflictItem>>,
/// Conflicting group exclusions.
exclude: Arc<crate::FxHashbrownSet<ConflictItem>>,
},
}
impl ResolverEnvironment {
/// Create a resolver environment that is fixed to one and only one marker
/// environment.
///
/// This enables `uv pip`-style resolutions. That is, the resolution
/// returned is only guaranteed to be installable for this specific marker
/// environment.
pub fn specific(marker_env: ResolverMarkerEnvironment) -> Self {
let kind = Kind::Specific { marker_env };
Self { kind }
}
/// Create a resolver environment for producing a multi-platform
/// resolution.
///
/// The set of marker expressions given corresponds to an initial
/// seeded set of resolver branches. This might come from a lock file
/// corresponding to the set of forks produced by a previous resolution, or
/// it might come from a human crafted set of marker expressions.
///
/// The "normal" case is that the initial forks are empty. When empty,
/// resolution will create forks as needed to deal with potentially
/// conflicting dependency specifications across distinct marker
/// environments.
///
/// The order of the initial forks is significant, although we don't
/// guarantee any specific treatment (similar to, at time of writing, how
/// the order of dependencies specified is also significant but has no
/// specific guarantees around it). Changing the ordering can help when our
/// custom fork prioritization fails.
pub fn universal(initial_forks: Vec<MarkerTree>) -> Self {
let kind = Kind::Universal {
initial_forks: initial_forks.into(),
markers: MarkerTree::TRUE,
include: Arc::new(crate::FxHashbrownSet::default()),
exclude: Arc::new(crate::FxHashbrownSet::default()),
};
Self { kind }
}
/// Returns the marker environment corresponding to this resolver
/// environment.
///
/// This only returns a marker environment when resolving for a specific
/// marker environment. i.e., A non-universal or "pip"-style resolution.
pub fn marker_environment(&self) -> Option<&MarkerEnvironment> {
match self.kind {
Kind::Specific { ref marker_env } => Some(marker_env),
Kind::Universal { .. } => None,
}
}
/// Returns `false` only when this environment is a fork and it is disjoint
/// with the given marker.
pub(crate) fn included_by_marker(&self, marker: MarkerTree) -> bool {
match self.kind {
Kind::Specific { .. } => true,
Kind::Universal { ref markers, .. } => !markers.is_disjoint(marker),
}
}
/// Returns true if the dependency represented by this forker may be
/// included in the given resolver environment.
pub(crate) fn included_by_group(&self, group: ConflictItemRef<'_>) -> bool {
match self.kind {
Kind::Specific { .. } => true,
Kind::Universal { ref exclude, .. } => !exclude.contains(&group),
}
}
/// Returns the bounding Python versions that can satisfy this
/// resolver environment's marker, if it's constrained.
pub(crate) fn requires_python(&self) -> Option<RequiresPythonRange> {
let Kind::Universal {
markers: pep508_marker,
..
} = self.kind
else {
return None;
};
crate::marker::requires_python(pep508_marker)
}
/// For a universal resolution, return the markers of the current fork.
pub(crate) fn fork_markers(&self) -> Option<MarkerTree> {
match self.kind {
Kind::Specific { .. } => None,
Kind::Universal { markers, .. } => Some(markers),
}
}
/// Narrow this environment given the forking markers.
///
/// This effectively intersects any markers in this environment with the
/// markers given, and returns the new resulting environment.
///
/// This is also useful in tests to generate a "forked" marker environment.
///
/// # Panics
///
/// This panics if the resolver environment corresponds to one and only one
/// specific marker environment. i.e., "pip"-style resolution.
fn narrow_environment(&self, rhs: MarkerTree) -> Self {
match self.kind {
Kind::Specific { .. } => {
unreachable!("environment narrowing only happens in universal resolution")
}
Kind::Universal {
ref initial_forks,
markers: ref lhs,
ref include,
ref exclude,
} => {
let mut markers = *lhs;
markers.and(rhs);
let kind = Kind::Universal {
initial_forks: Arc::clone(initial_forks),
markers,
include: Arc::clone(include),
exclude: Arc::clone(exclude),
};
Self { kind }
}
}
}
/// Returns a new resolver environment with the given groups included or
/// excluded from it. An `Ok` variant indicates an include rule while an
/// `Err` variant indicates en exclude rule.
///
/// When a group is excluded from a resolver environment,
/// `ResolverEnvironment::included_by_group` will return false. The idea
/// is that a dependency with a corresponding group should be excluded by
/// forks in the resolver with this environment. (Include rules have no
/// effect in `included_by_group` since, for the purposes of conflicts
/// during resolution, we only care about what *isn't* allowed.)
///
/// If calling this routine results in the same conflict item being both
/// included and excluded, then this returns `None` (since it would
/// otherwise result in a fork that can never be satisfied).
///
/// # Panics
///
/// This panics if the resolver environment corresponds to one and only one
/// specific marker environment. i.e., "pip"-style resolution.
pub(crate) fn filter_by_group(
&self,
rules: impl IntoIterator<Item = Result<ConflictItem, ConflictItem>>,
) -> Option<Self> {
match self.kind {
Kind::Specific { .. } => {
unreachable!("environment narrowing only happens in universal resolution")
}
Kind::Universal {
ref initial_forks,
ref markers,
ref include,
ref exclude,
} => {
let mut include: crate::FxHashbrownSet<_> = (**include).clone();
let mut exclude: crate::FxHashbrownSet<_> = (**exclude).clone();
for rule in rules {
match rule {
Ok(item) => {
if exclude.contains(&item) {
return None;
}
include.insert(item);
}
Err(item) => {
if include.contains(&item) {
return None;
}
exclude.insert(item);
}
}
}
let kind = Kind::Universal {
initial_forks: Arc::clone(initial_forks),
markers: *markers,
include: Arc::new(include),
exclude: Arc::new(exclude),
};
Some(Self { kind })
}
}
}
/// Create an initial set of forked states based on this resolver
/// environment configuration.
///
/// In the "clean" universal case, this just returns a singleton `Vec` with
/// the given fork state. But when the resolver is configured to start
/// with an initial set of forked resolver states (e.g., those present in
/// a lock file), then this creates the initial set of forks from that
/// configuration.
pub(crate) fn initial_forked_states(
&self,
init: ForkState,
) -> Result<Vec<ForkState>, ResolveError> {
let Kind::Universal {
ref initial_forks,
markers: ref _markers,
include: ref _include,
exclude: ref _exclude,
} = self.kind
else {
return Ok(vec![init]);
};
if initial_forks.is_empty() {
return Ok(vec![init]);
}
initial_forks
.iter()
.rev()
.filter_map(|&initial_fork| {
let combined = UniversalMarker::from_combined(initial_fork);
let (include, exclude) = match combined.conflict().filter_rules() {
Ok(rules) => rules,
Err(err) => return Some(Err(err)),
};
let mut env = self.filter_by_group(
include
.into_iter()
.map(Ok)
.chain(exclude.into_iter().map(Err)),
)?;
env = env.narrow_environment(combined.pep508());
Some(Ok(init.clone().with_env(env)))
})
.collect()
}
/// Narrow the [`PythonRequirement`] if this resolver environment
/// corresponds to a more constraining fork.
///
/// For example, if this is a fork where `python_version >= '3.12'` is
/// always true, and if the given python requirement (perhaps derived from
/// `Requires-Python`) is `>=3.10`, then this will "narrow" the requirement
/// to `>=3.12`, corresponding to the marker expression describing this
/// fork.
///
/// If this environment is not a fork, then this returns `None`.
pub(crate) fn narrow_python_requirement(
&self,
python_requirement: &PythonRequirement,
) -> Option<PythonRequirement> {
python_requirement.narrow(&self.requires_python()?)
}
/// Returns a message formatted for end users representing a fork in the
/// resolver.
///
/// If this resolver environment does not correspond to a particular fork,
/// then `None` is returned.
///
/// This is useful in contexts where one wants to display a message
/// relating to a particular fork, but either no message or an entirely
/// different message when this isn't a fork.
pub(crate) fn end_user_fork_display(&self) -> Option<String> {
match &self.kind {
Kind::Specific { .. } => None,
Kind::Universal {
initial_forks: _,
markers,
include,
exclude,
} => {
let format_conflict_item = |conflict_item: &ConflictItem| {
format!(
"{}{}",
conflict_item.package(),
match conflict_item.kind() {
ConflictKind::Extra(extra) => format!("[{extra}]"),
ConflictKind::Group(group) => {
format!("[group:{group}]")
}
ConflictKind::Project => String::new(),
}
)
};
if markers.is_true() && include.is_empty() && exclude.is_empty() {
return None;
}
let mut descriptors = Vec::new();
if !markers.is_true() {
descriptors.push(format!("markers: {markers:?}"));
}
if !include.is_empty() {
descriptors.push(format!(
"included: {}",
// Sort to ensure stable error messages
include
.iter()
.map(format_conflict_item)
.collect::<BTreeSet<_>>()
.into_iter()
.join(", "),
));
}
if !exclude.is_empty() {
descriptors.push(format!(
"excluded: {}",
// Sort to ensure stable error messages
exclude
.iter()
.map(format_conflict_item)
.collect::<BTreeSet<_>>()
.into_iter()
.join(", "),
));
}
Some(format!("split ({})", descriptors.join("; ")))
}
}
}
/// Creates a universal marker expression corresponding to the fork that is
/// represented by this resolver environment. A universal marker includes
/// not just the standard PEP 508 marker, but also a marker based on
/// conflicting extras/groups.
///
/// This returns `None` when this does not correspond to a fork.
pub(crate) fn try_universal_markers(&self) -> Option<UniversalMarker> {
match self.kind {
Kind::Specific { .. } => None,
Kind::Universal {
ref markers,
ref include,
ref exclude,
..
} => {
let mut conflict_marker = ConflictMarker::TRUE;
for item in exclude.iter() {
conflict_marker =
conflict_marker.and(ConflictMarker::from_conflict_item(item).negate());
}
for item in include.iter() {
conflict_marker = conflict_marker.and(ConflictMarker::from_conflict_item(item));
}
Some(UniversalMarker::new(*markers, conflict_marker))
}
}
}
}
/// A user visible representation of a resolver environment.
///
/// This is most useful in error and log messages.
impl std::fmt::Display for ResolverEnvironment {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self.kind {
Kind::Specific { .. } => write!(f, "marker environment"),
Kind::Universal { ref markers, .. } => {
if markers.is_true() {
write!(f, "all marker environments")
} else {
write!(f, "split `{markers:?}`")
}
}
}
}
}
/// The different forking possibilities.
///
/// Upon seeing a dependency, when determining whether to fork, three
/// different cases are possible:
///
/// 1. Forking cannot be ruled out.
/// 2. The dependency is excluded by the "parent" fork.
/// 3. The dependency is unconditional and thus cannot provoke new forks.
///
/// This enum encapsulates those possibilities. In the first case, a helper is
/// returned to help management the nuts and bolts of forking.
#[derive(Debug)]
pub(crate) enum ForkingPossibility<'d> {
Possible(Forker<'d>),
DependencyAlwaysExcluded,
NoForkingPossible,
}
impl<'d> ForkingPossibility<'d> {
pub(crate) fn new(env: &ResolverEnvironment, dep: &'d PubGrubDependency) -> Self {
let marker = dep.package.marker();
if !env.included_by_marker(marker) {
ForkingPossibility::DependencyAlwaysExcluded
} else if marker.is_true() {
ForkingPossibility::NoForkingPossible
} else {
let forker = Forker {
package: &dep.package,
marker,
};
ForkingPossibility::Possible(forker)
}
}
}
/// An encapsulation of forking based on a single dependency.
#[derive(Debug)]
pub(crate) struct Forker<'d> {
package: &'d PubGrubPackage,
marker: MarkerTree,
}
impl Forker<'_> {
/// Attempt a fork based on the given resolver environment.
///
/// If a fork is possible, then a new forker and at least one new
/// resolver environment is returned. In some cases, it is possible for
/// more resolver environments to be returned. (For example, when the
/// negation of this forker's markers has overlap with the given resolver
/// environment.)
pub(crate) fn fork(
&self,
env: &ResolverEnvironment,
) -> Option<(Self, Vec<ResolverEnvironment>)> {
if !env.included_by_marker(self.marker) {
return None;
}
let Kind::Universal {
markers: ref env_marker,
..
} = env.kind
else {
panic!("resolver must be in universal mode for forking")
};
let mut envs = vec![];
{
let not_marker = self.marker.negate();
if !env_marker.is_disjoint(not_marker) {
envs.push(env.narrow_environment(not_marker));
}
}
// Note also that we push this one last for historical reasons.
// Changing the order of forks can change the output in some
// ways. While it's probably fine, we try to avoid changing the
// output.
envs.push(env.narrow_environment(self.marker));
let mut remaining_marker = self.marker;
remaining_marker.and(env_marker.negate());
let remaining_forker = Forker {
package: self.package,
marker: remaining_marker,
};
Some((remaining_forker, envs))
}
/// Returns true if the dependency represented by this forker may be
/// included in the given resolver environment.
pub(crate) fn included(&self, env: &ResolverEnvironment) -> bool {
let marker = self.package.marker();
env.included_by_marker(marker)
}
}
/// Fork the resolver based on a `Requires-Python` specifier.
pub(crate) fn fork_version_by_python_requirement(
requires_python: &VersionSpecifiers,
python_requirement: &PythonRequirement,
env: &ResolverEnvironment,
) -> Vec<ResolverEnvironment> {
let requires_python = RequiresPython::from_specifiers(requires_python);
let lower = requires_python.range().lower().clone();
// Attempt to split the current Python requirement based on the `requires-python` specifier.
//
// For example, if the current requirement is `>=3.10`, and the split point is `>=3.11`, then
// the result will be `>=3.10 and <3.11` and `>=3.11`.
//
// However, if the current requirement is `>=3.10`, and the split point is `>=3.9`, then the
// lower segment will be empty, so we should return an empty list.
let Some((lower, upper)) = python_requirement.split(lower.into()) else {
trace!(
"Unable to split Python requirement `{}` via `Requires-Python` specifier `{}`",
python_requirement.target(),
requires_python,
);
return vec![];
};
let Kind::Universal {
markers: ref env_marker,
..
} = env.kind
else {
panic!("resolver must be in universal mode for forking")
};
let mut envs = vec![];
if !env_marker.is_disjoint(lower.to_marker_tree()) {
envs.push(env.narrow_environment(lower.to_marker_tree()));
}
if !env_marker.is_disjoint(upper.to_marker_tree()) {
envs.push(env.narrow_environment(upper.to_marker_tree()));
}
debug_assert!(!envs.is_empty(), "at least one fork should be produced");
envs
}
/// Fork the resolver based on a marker.
pub(crate) fn fork_version_by_marker(
env: &ResolverEnvironment,
marker: MarkerTree,
) -> Option<(ResolverEnvironment, ResolverEnvironment)> {
let Kind::Universal {
markers: ref env_marker,
..
} = env.kind
else {
panic!("resolver must be in universal mode for forking")
};
// Attempt to split based on the marker.
//
// For example, given `python_version >= '3.10'` and the split marker `sys_platform == 'linux'`,
// the result will be:
//
// `python_version >= '3.10' and sys_platform == 'linux'`
// `python_version >= '3.10' and sys_platform != 'linux'`
//
// If the marker is disjoint with the current environment, then we should return an empty list.
// If the marker complement is disjoint with the current environment, then we should also return
// an empty list.
//
// For example, given `python_version >= '3.10' and sys_platform == 'linux'` and the split marker
// `sys_platform == 'win32'`, return an empty list, since the following isn't satisfiable:
//
// python_version >= '3.10' and sys_platform == 'linux' and sys_platform == 'win32'
if env_marker.is_disjoint(marker) {
return None;
}
let with_marker = env.narrow_environment(marker);
let complement = marker.negate();
if env_marker.is_disjoint(complement) {
return None;
}
let without_marker = env.narrow_environment(complement);
Some((with_marker, without_marker))
}
#[cfg(test)]
mod tests {
use std::ops::Bound;
use std::sync::LazyLock;
use uv_pep440::{LowerBound, UpperBound, Version};
use uv_pep508::{MarkerEnvironment, MarkerEnvironmentBuilder};
use uv_distribution_types::{RequiresPython, RequiresPythonRange};
use super::*;
/// A dummy marker environment used in tests below.
///
/// It doesn't matter too much what we use here, and indeed, this one was
/// copied from our uv microbenchmarks.
static MARKER_ENV: LazyLock<MarkerEnvironment> = LazyLock::new(|| {
MarkerEnvironment::try_from(MarkerEnvironmentBuilder {
implementation_name: "cpython",
implementation_version: "3.11.5",
os_name: "posix",
platform_machine: "arm64",
platform_python_implementation: "CPython",
platform_release: "21.6.0",
platform_system: "Darwin",
platform_version: "Darwin Kernel Version 21.6.0: Mon Aug 22 20:19:52 PDT 2022; root:xnu-8020.140.49~2/RELEASE_ARM64_T6000",
python_full_version: "3.11.5",
python_version: "3.11",
sys_platform: "darwin",
}).unwrap()
});
fn requires_python_lower(lower_version_bound: &str) -> RequiresPython {
RequiresPython::greater_than_equal_version(&version(lower_version_bound))
}
fn requires_python_range_lower(lower_version_bound: &str) -> RequiresPythonRange {
let lower = LowerBound::new(Bound::Included(version(lower_version_bound)));
RequiresPythonRange::new(lower, UpperBound::default())
}
fn marker(marker: &str) -> MarkerTree {
marker
.parse::<MarkerTree>()
.expect("valid pep508 marker expression")
}
fn version(v: &str) -> Version {
v.parse().expect("valid pep440 version string")
}
fn python_requirement(python_version_greater_than_equal: &str) -> PythonRequirement {
let requires_python = requires_python_lower(python_version_greater_than_equal);
PythonRequirement::from_marker_environment(&MARKER_ENV, requires_python)
}
/// Tests that narrowing a Python requirement when resolving for a
/// specific marker environment never produces a more constrained Python
/// requirement.
#[test]
fn narrow_python_requirement_specific() {
let resolver_marker_env = ResolverMarkerEnvironment::from(MARKER_ENV.clone());
let resolver_env = ResolverEnvironment::specific(resolver_marker_env);
let pyreq = python_requirement("3.10");
assert_eq!(resolver_env.narrow_python_requirement(&pyreq), None);
let pyreq = python_requirement("3.11");
assert_eq!(resolver_env.narrow_python_requirement(&pyreq), None);
let pyreq = python_requirement("3.12");
assert_eq!(resolver_env.narrow_python_requirement(&pyreq), None);
}
/// Tests that narrowing a Python requirement during a universal resolution
/// *without* any forks will never produce a more constrained Python
/// requirement.
#[test]
fn narrow_python_requirement_universal() {
let resolver_env = ResolverEnvironment::universal(vec![]);
let pyreq = python_requirement("3.10");
assert_eq!(resolver_env.narrow_python_requirement(&pyreq), None);
let pyreq = python_requirement("3.11");
assert_eq!(resolver_env.narrow_python_requirement(&pyreq), None);
let pyreq = python_requirement("3.12");
assert_eq!(resolver_env.narrow_python_requirement(&pyreq), None);
}
/// Inside a fork whose marker's Python requirement is equal
/// to our Requires-Python means that narrowing does not produce
/// a result.
#[test]
fn narrow_python_requirement_forking_no_op() {
let pyreq = python_requirement("3.10");
let resolver_env = ResolverEnvironment::universal(vec![])
.narrow_environment(marker("python_version >= '3.10'"));
assert_eq!(resolver_env.narrow_python_requirement(&pyreq), None);
}
/// In this test, we narrow a more relaxed requirement compared to the
/// marker for the current fork. This in turn results in a stricter
/// requirement corresponding to what's specified in the fork.
#[test]
fn narrow_python_requirement_forking_stricter() {
let pyreq = python_requirement("3.10");
let resolver_env = ResolverEnvironment::universal(vec![])
.narrow_environment(marker("python_version >= '3.11'"));
let expected = {
let range = requires_python_range_lower("3.11");
let requires_python = requires_python_lower("3.10").narrow(&range).unwrap();
PythonRequirement::from_marker_environment(&MARKER_ENV, requires_python)
};
assert_eq!(
resolver_env.narrow_python_requirement(&pyreq),
Some(expected)
);
}
/// In this test, we narrow a stricter requirement compared to the marker
/// for the current fork. This in turn results in a requirement that
/// remains unchanged.
#[test]
fn narrow_python_requirement_forking_relaxed() {
let pyreq = python_requirement("3.11");
let resolver_env = ResolverEnvironment::universal(vec![])
.narrow_environment(marker("python_version >= '3.10'"));
assert_eq!(
resolver_env.narrow_python_requirement(&pyreq),
Some(python_requirement("3.11")),
);
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolver/indexes.rs | crates/uv-resolver/src/resolver/indexes.rs | use uv_distribution_types::{IndexMetadata, RequirementSource};
use uv_normalize::PackageName;
use uv_pypi_types::ConflictItem;
use crate::resolver::ForkMap;
use crate::{DependencyMode, Manifest, ResolverEnvironment};
/// A map of package names to their explicit index.
///
/// For example, given:
/// ```toml
/// [[tool.uv.index]]
/// name = "pytorch"
/// url = "https://download.pytorch.org/whl/cu121"
///
/// [tool.uv.sources]
/// torch = { index = "pytorch" }
/// ```
///
/// [`Indexes`] would contain a single entry mapping `torch` to `https://download.pytorch.org/whl/cu121`.
#[derive(Debug, Default, Clone)]
pub(crate) struct Indexes(ForkMap<Entry>);
#[derive(Debug, Clone)]
struct Entry {
index: IndexMetadata,
conflict: Option<ConflictItem>,
}
impl Indexes {
/// Determine the set of explicit, pinned indexes in the [`Manifest`].
pub(crate) fn from_manifest(
manifest: &Manifest,
env: &ResolverEnvironment,
dependencies: DependencyMode,
) -> Self {
let mut indexes = ForkMap::default();
for requirement in manifest.requirements(env, dependencies) {
let RequirementSource::Registry {
index: Some(index),
conflict,
..
} = &requirement.source
else {
continue;
};
let index = index.clone();
let conflict = conflict.clone();
indexes.add(&requirement, Entry { index, conflict });
}
Self(indexes)
}
/// Returns `true` if the map contains any indexes for a package.
pub(crate) fn contains_key(&self, name: &PackageName) -> bool {
self.0.contains_key(name)
}
/// Return the explicit index used for a package in the given fork.
pub(crate) fn get(&self, name: &PackageName, env: &ResolverEnvironment) -> Vec<&IndexMetadata> {
let entries = self.0.get(name, env);
entries
.iter()
.filter(|entry| {
entry
.conflict
.as_ref()
.is_none_or(|conflict| env.included_by_group(conflict.as_ref()))
})
.map(|entry| &entry.index)
.collect()
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolver/batch_prefetch.rs | crates/uv-resolver/src/resolver/batch_prefetch.rs | use std::cmp::min;
use std::sync::Arc;
use itertools::Itertools;
use pubgrub::{Range, Ranges, Term};
use rustc_hash::{FxHashMap, FxHashSet};
use tokio::sync::mpsc::Sender;
use tracing::{debug, trace};
use crate::candidate_selector::CandidateSelector;
use crate::pubgrub::{PubGrubPackage, PubGrubPackageInner};
use crate::resolver::Request;
use crate::{
InMemoryIndex, PythonRequirement, ResolveError, ResolverEnvironment, VersionsResponse,
};
use uv_distribution_types::{
CompatibleDist, DistributionMetadata, IndexCapabilities, IndexMetadata,
};
use uv_normalize::PackageName;
use uv_pep440::Version;
use uv_pep508::MarkerTree;
enum BatchPrefetchStrategy {
/// Go through the next versions assuming the existing selection and its constraints
/// remain.
Compatible {
compatible: Range<Version>,
previous: Version,
},
/// We encounter cases (botocore) where the above doesn't work: Say we previously selected
/// a==x.y.z, which depends on b==x.y.z. a==x.y.z is incompatible, but we don't know that
/// yet. We just selected b==x.y.z and want to prefetch, since for all versions of a we try,
/// we have to wait for the matching version of b. The exiting range gives us only one version
/// of b, so the compatible strategy doesn't prefetch any version. Instead, we try the next
/// heuristic where the next version of b will be x.y.(z-1) and so forth.
InOrder { previous: Version },
}
/// Prefetch a large number of versions if we already unsuccessfully tried many versions.
///
/// This is an optimization specifically targeted at cold cache urllib3/boto3/botocore, where we
/// have to fetch the metadata for a lot of versions.
///
/// Note that these all heuristics that could totally prefetch lots of irrelevant versions.
#[derive(Clone)]
pub(crate) struct BatchPrefetcher {
// Types to determine whether we need to prefetch.
tried_versions: FxHashMap<PackageName, FxHashSet<Version>>,
last_prefetch: FxHashMap<PackageName, usize>,
// Types to execute the prefetch.
prefetch_runner: BatchPrefetcherRunner,
}
/// The types that are needed for running the batch prefetching after we determined that we need to
/// prefetch.
///
/// These types are shared (e.g., `Arc`) so they can be cheaply cloned and moved between threads.
#[derive(Clone)]
pub(crate) struct BatchPrefetcherRunner {
capabilities: IndexCapabilities,
index: InMemoryIndex,
request_sink: Sender<Request>,
}
impl BatchPrefetcher {
pub(crate) fn new(
capabilities: IndexCapabilities,
index: InMemoryIndex,
request_sink: Sender<Request>,
) -> Self {
Self {
tried_versions: FxHashMap::default(),
last_prefetch: FxHashMap::default(),
prefetch_runner: BatchPrefetcherRunner {
capabilities,
index,
request_sink,
},
}
}
/// Prefetch a large number of versions if we already unsuccessfully tried many versions.
pub(crate) fn prefetch_batches(
&mut self,
next: &PubGrubPackage,
index: Option<&IndexMetadata>,
version: &Version,
current_range: &Range<Version>,
unchangeable_constraints: Option<&Term<Range<Version>>>,
python_requirement: &PythonRequirement,
selector: &CandidateSelector,
env: &ResolverEnvironment,
) -> Result<(), ResolveError> {
let PubGrubPackageInner::Package {
name,
extra: None,
group: None,
marker: MarkerTree::TRUE,
} = &**next
else {
return Ok(());
};
let (num_tried, do_prefetch) = self.should_prefetch(next);
if !do_prefetch {
return Ok(());
}
let total_prefetch = min(num_tried, 50);
// This is immediate, we already fetched the version map.
let versions_response = if let Some(index) = index {
self.prefetch_runner
.index
.explicit()
.wait_blocking(&(name.clone(), index.url().clone()))
.ok_or_else(|| ResolveError::UnregisteredTask(name.to_string()))?
} else {
self.prefetch_runner
.index
.implicit()
.wait_blocking(name)
.ok_or_else(|| ResolveError::UnregisteredTask(name.to_string()))?
};
let phase = BatchPrefetchStrategy::Compatible {
compatible: current_range.clone(),
previous: version.clone(),
};
self.last_prefetch.insert(name.clone(), num_tried);
self.prefetch_runner.send_prefetch(
name,
unchangeable_constraints,
total_prefetch,
&versions_response,
phase,
python_requirement,
selector,
env,
)?;
Ok(())
}
/// Each time we tried a version for a package, we register that here.
pub(crate) fn version_tried(&mut self, package: &PubGrubPackage, version: &Version) {
// Only track base packages, no virtual packages from extras.
let PubGrubPackageInner::Package {
name,
extra: None,
group: None,
marker: MarkerTree::TRUE,
} = &**package
else {
return;
};
self.tried_versions
.entry(name.clone())
.or_default()
.insert(version.clone());
}
/// After 5, 10, 20, 40 tried versions, prefetch that many versions to start early but not
/// too aggressive. Later we schedule the prefetch of 50 versions every 20 versions, this gives
/// us a good buffer until we see prefetch again and is high enough to saturate the task pool.
fn should_prefetch(&self, next: &PubGrubPackage) -> (usize, bool) {
let PubGrubPackageInner::Package {
name,
extra: None,
group: None,
marker: MarkerTree::TRUE,
} = &**next
else {
return (0, false);
};
let num_tried = self.tried_versions.get(name).map_or(0, FxHashSet::len);
let previous_prefetch = self.last_prefetch.get(name).copied().unwrap_or_default();
let do_prefetch = (num_tried >= 5 && previous_prefetch < 5)
|| (num_tried >= 10 && previous_prefetch < 10)
|| (num_tried >= 20 && previous_prefetch < 20)
|| (num_tried >= 20 && num_tried - previous_prefetch >= 20);
(num_tried, do_prefetch)
}
/// Log stats about how many versions we tried.
pub(crate) fn log_tried_versions(&self) {
let total_versions: usize = self.tried_versions.values().map(FxHashSet::len).sum();
let mut tried_versions: Vec<_> = self
.tried_versions
.iter()
.map(|(name, versions)| (name, versions.len()))
.collect();
tried_versions.sort_by(|(p1, c1), (p2, c2)| {
c1.cmp(c2)
.reverse()
.then(p1.to_string().cmp(&p2.to_string()))
});
let counts = tried_versions
.iter()
.map(|(package, count)| format!("{package} {count}"))
.join(", ");
debug!("Tried {total_versions} versions: {counts}");
}
}
impl BatchPrefetcherRunner {
/// Given that the conditions for prefetching are met, find the versions to prefetch and
/// send the prefetch requests.
fn send_prefetch(
&self,
name: &PackageName,
unchangeable_constraints: Option<&Term<Ranges<Version>>>,
total_prefetch: usize,
versions_response: &Arc<VersionsResponse>,
mut phase: BatchPrefetchStrategy,
python_requirement: &PythonRequirement,
selector: &CandidateSelector,
env: &ResolverEnvironment,
) -> Result<(), ResolveError> {
let VersionsResponse::Found(version_map) = &**versions_response else {
return Ok(());
};
let mut prefetch_count = 0;
for _ in 0..total_prefetch {
let candidate = match phase {
BatchPrefetchStrategy::Compatible {
compatible,
previous,
} => {
if let Some(candidate) =
selector.select_no_preference(name, &compatible, version_map, env)
{
let compatible = compatible.intersection(
&Range::singleton(candidate.version().clone()).complement(),
);
phase = BatchPrefetchStrategy::Compatible {
compatible,
previous: candidate.version().clone(),
};
candidate
} else {
// We exhausted the compatible version, switch to ignoring the existing
// constraints on the package and instead going through versions in order.
phase = BatchPrefetchStrategy::InOrder { previous };
continue;
}
}
BatchPrefetchStrategy::InOrder { previous } => {
let mut range = if selector.use_highest_version(name, env) {
Range::strictly_lower_than(previous)
} else {
Range::strictly_higher_than(previous)
};
// If we have constraints from root, don't go beyond those. Example: We are
// prefetching for foo 1.60 and have a dependency for `foo>=1.50`, so we should
// only prefetch 1.60 to 1.50, knowing 1.49 will always be rejected.
if let Some(unchangeable_constraints) = &unchangeable_constraints {
range = match unchangeable_constraints {
Term::Positive(constraints) => range.intersection(constraints),
Term::Negative(negative_constraints) => {
range.intersection(&negative_constraints.complement())
}
};
}
if let Some(candidate) =
selector.select_no_preference(name, &range, version_map, env)
{
phase = BatchPrefetchStrategy::InOrder {
previous: candidate.version().clone(),
};
candidate
} else {
// Both strategies exhausted their candidates.
break;
}
}
};
let Some(dist) = candidate.compatible() else {
continue;
};
// Avoid prefetching source distributions, which could be expensive.
let Some(wheel) = dist.wheel() else {
continue;
};
// Avoid prefetching built distributions that don't support _either_ PEP 658 (`.metadata`)
// or range requests.
if !(wheel.file.dist_info_metadata
|| self.capabilities.supports_range_requests(&wheel.index))
{
debug!("Abandoning prefetch for {wheel} due to missing registry capabilities");
return Ok(());
}
// Avoid prefetching for distributions that don't satisfy the Python requirement.
if !satisfies_python(dist, python_requirement) {
continue;
}
let dist = dist.for_resolution();
// Emit a request to fetch the metadata for this version.
trace!(
"Prefetching {prefetch_count} ({}) {}",
match phase {
BatchPrefetchStrategy::Compatible { .. } => "compatible",
BatchPrefetchStrategy::InOrder { .. } => "in order",
},
dist
);
prefetch_count += 1;
if self.index.distributions().register(candidate.version_id()) {
let request = Request::from(dist);
self.request_sink.blocking_send(request)?;
}
}
match prefetch_count {
0 => debug!("No `{name}` versions to prefetch"),
1 => debug!("Prefetched 1 `{name}` version"),
_ => debug!("Prefetched {prefetch_count} `{name}` versions"),
}
Ok(())
}
}
fn satisfies_python(dist: &CompatibleDist, python_requirement: &PythonRequirement) -> bool {
match dist {
CompatibleDist::InstalledDist(_) => {}
CompatibleDist::SourceDist { sdist, .. }
| CompatibleDist::IncompatibleWheel { sdist, .. } => {
// Source distributions must meet both the _target_ Python version and the
// _installed_ Python version (to build successfully).
if let Some(requires_python) = sdist.file.requires_python.as_ref() {
if !python_requirement
.installed()
.is_contained_by(requires_python)
{
return false;
}
if !python_requirement.target().is_contained_by(requires_python) {
return false;
}
}
}
CompatibleDist::CompatibleWheel { wheel, .. } => {
// Wheels must meet the _target_ Python version.
if let Some(requires_python) = wheel.file.requires_python.as_ref() {
if !python_requirement.target().is_contained_by(requires_python) {
return false;
}
}
}
}
true
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolver/availability.rs | crates/uv-resolver/src/resolver/availability.rs | use std::fmt::{Display, Formatter};
use std::iter;
use std::sync::Arc;
use uv_distribution_types::IncompatibleDist;
use uv_pep440::{Version, VersionSpecifiers};
use uv_platform_tags::{AbiTag, Tags};
use crate::resolver::{MetadataUnavailable, VersionFork};
/// The reason why a package or a version cannot be used.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum UnavailableReason {
/// The entire package cannot be used.
Package(UnavailablePackage),
/// A single version cannot be used.
Version(UnavailableVersion),
}
impl Display for UnavailableReason {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Version(version) => Display::fmt(version, f),
Self::Package(package) => Display::fmt(package, f),
}
}
}
/// The package version is unavailable and cannot be used. Unlike [`MetadataUnavailable`], this
/// applies to a single version of the package.
///
/// Most variant are from [`MetadataResponse`] without the error source, since we don't format
/// the source and we want to merge unavailable messages across versions.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum UnavailableVersion {
/// Version is incompatible because it has no usable distributions
IncompatibleDist(IncompatibleDist),
/// The wheel metadata was found, but could not be parsed.
InvalidMetadata,
/// The wheel metadata was found, but the metadata was inconsistent.
InconsistentMetadata,
/// The wheel has an invalid structure.
InvalidStructure,
/// The wheel metadata was not found in the cache and the network is not available.
Offline,
/// The source distribution has a `requires-python` requirement that is not met by the installed
/// Python version (and static metadata is not available).
RequiresPython(VersionSpecifiers),
}
impl UnavailableVersion {
pub(crate) fn message(&self) -> String {
match self {
Self::IncompatibleDist(invalid_dist) => format!("{invalid_dist}"),
Self::InvalidMetadata => "invalid metadata".into(),
Self::InconsistentMetadata => "inconsistent metadata".into(),
Self::InvalidStructure => "an invalid package format".into(),
Self::Offline => "to be downloaded from a registry".into(),
Self::RequiresPython(requires_python) => {
format!("Python {requires_python}")
}
}
}
pub(crate) fn singular_message(&self) -> String {
match self {
Self::IncompatibleDist(invalid_dist) => invalid_dist.singular_message(),
Self::InvalidMetadata => format!("has {self}"),
Self::InconsistentMetadata => format!("has {self}"),
Self::InvalidStructure => format!("has {self}"),
Self::Offline => format!("needs {self}"),
Self::RequiresPython(..) => format!("requires {self}"),
}
}
pub(crate) fn plural_message(&self) -> String {
match self {
Self::IncompatibleDist(invalid_dist) => invalid_dist.plural_message(),
Self::InvalidMetadata => format!("have {self}"),
Self::InconsistentMetadata => format!("have {self}"),
Self::InvalidStructure => format!("have {self}"),
Self::Offline => format!("need {self}"),
Self::RequiresPython(..) => format!("require {self}"),
}
}
pub(crate) fn context_message(
&self,
tags: Option<&Tags>,
requires_python: Option<AbiTag>,
) -> Option<String> {
match self {
Self::IncompatibleDist(invalid_dist) => {
invalid_dist.context_message(tags, requires_python)
}
Self::InvalidMetadata => None,
Self::InconsistentMetadata => None,
Self::InvalidStructure => None,
Self::Offline => None,
Self::RequiresPython(..) => None,
}
}
}
impl Display for UnavailableVersion {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.message())
}
}
impl From<&MetadataUnavailable> for UnavailableVersion {
fn from(reason: &MetadataUnavailable) -> Self {
match reason {
MetadataUnavailable::Offline => Self::Offline,
MetadataUnavailable::InvalidMetadata(_) => Self::InvalidMetadata,
MetadataUnavailable::InconsistentMetadata(_) => Self::InconsistentMetadata,
MetadataUnavailable::InvalidStructure(_) => Self::InvalidStructure,
MetadataUnavailable::RequiresPython(requires_python, _python_version) => {
Self::RequiresPython(requires_python.clone())
}
}
}
}
/// Display the error chain for unavailable packages.
#[derive(Debug, Clone)]
pub struct UnavailableErrorChain(Arc<dyn std::error::Error + Send + Sync + 'static>);
impl Display for UnavailableErrorChain {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
for source in iter::successors(Some(&self.0 as &dyn std::error::Error), |&err| err.source())
{
writeln!(f, "Caused by: {}", source.to_string().trim())?;
}
Ok(())
}
}
impl PartialEq for UnavailableErrorChain {
/// Whether we can collapse two reasons into one because they would be rendered the same.
fn eq(&self, other: &Self) -> bool {
self.to_string() == other.to_string()
}
}
impl Eq for UnavailableErrorChain {}
/// The package is unavailable and cannot be used.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum UnavailablePackage {
/// Index lookups were disabled (i.e., `--no-index`) and the package was not found in a flat index (i.e. from `--find-links`).
NoIndex,
/// Network requests were disabled (i.e., `--offline`), and the package was not found in the cache.
Offline,
/// The package was not found in the registry.
NotFound,
/// The package metadata was found, but could not be parsed.
InvalidMetadata(UnavailableErrorChain),
/// The package has an invalid structure.
InvalidStructure(UnavailableErrorChain),
}
impl UnavailablePackage {
pub(crate) fn message(&self) -> &'static str {
match self {
Self::NoIndex => "not found in the provided package locations",
Self::Offline => "not found in the cache",
Self::NotFound => "not found in the package registry",
Self::InvalidMetadata(_) => "invalid metadata",
Self::InvalidStructure(_) => "an invalid package format",
}
}
pub(crate) fn singular_message(&self) -> String {
match self {
Self::NoIndex => format!("was {self}"),
Self::Offline => format!("was {self}"),
Self::NotFound => format!("was {self}"),
Self::InvalidMetadata(_) => format!("has {self}"),
Self::InvalidStructure(_) => format!("has {self}"),
}
}
}
impl Display for UnavailablePackage {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(self.message())
}
}
impl From<&MetadataUnavailable> for UnavailablePackage {
fn from(reason: &MetadataUnavailable) -> Self {
match reason {
MetadataUnavailable::Offline => Self::Offline,
MetadataUnavailable::InvalidMetadata(err) => {
Self::InvalidMetadata(UnavailableErrorChain(err.clone()))
}
MetadataUnavailable::InconsistentMetadata(err) => {
Self::InvalidMetadata(UnavailableErrorChain(err.clone()))
}
MetadataUnavailable::InvalidStructure(err) => {
Self::InvalidStructure(UnavailableErrorChain(err.clone()))
}
MetadataUnavailable::RequiresPython(..) => {
unreachable!("`requires-python` is only known upfront for registry distributions")
}
}
}
}
#[derive(Debug, Clone)]
pub(crate) enum ResolverVersion {
/// A version that is not usable for some reason
Unavailable(Version, UnavailableVersion),
/// A usable version
Unforked(Version),
/// A set of forks, optionally with resolved versions
Forked(Vec<VersionFork>),
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolver/system.rs | crates/uv-resolver/src/resolver/system.rs | use std::str::FromStr;
use pubgrub::Ranges;
use uv_normalize::PackageName;
use uv_pep440::Version;
use uv_redacted::DisplaySafeUrl;
use uv_torch::TorchBackend;
use crate::pubgrub::{PubGrubDependency, PubGrubPackage, PubGrubPackageInner};
#[derive(Debug, Clone, PartialEq, Eq)]
pub(super) struct SystemDependency {
/// The name of the system dependency (e.g., `cuda`).
name: PackageName,
/// The version of the system dependency (e.g., `12.4`).
version: Version,
}
impl SystemDependency {
/// Extract a [`SystemDependency`] from an index URL.
///
/// For example, given `https://download.pytorch.org/whl/cu124`, returns CUDA 12.4.
pub(super) fn from_index(index: &DisplaySafeUrl) -> Option<Self> {
let backend = TorchBackend::from_index(index)?;
if let Some(cuda_version) = backend.cuda_version() {
Some(Self {
name: PackageName::from_str("cuda").unwrap(),
version: cuda_version,
})
} else {
backend.rocm_version().map(|rocm_version| Self {
name: PackageName::from_str("rocm").unwrap(),
version: rocm_version,
})
}
}
}
impl std::fmt::Display for SystemDependency {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}@{}", self.name, self.version)
}
}
impl From<SystemDependency> for PubGrubDependency {
fn from(value: SystemDependency) -> Self {
Self {
package: PubGrubPackage::from(PubGrubPackageInner::System(value.name)),
version: Ranges::singleton(value.version),
parent: None,
url: None,
}
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use uv_normalize::PackageName;
use uv_pep440::Version;
use uv_redacted::DisplaySafeUrl;
use crate::resolver::system::SystemDependency;
#[test]
fn pypi() {
let url = DisplaySafeUrl::parse("https://pypi.org/simple").unwrap();
assert_eq!(SystemDependency::from_index(&url), None);
}
#[test]
fn pytorch_cuda_12_4() {
let url = DisplaySafeUrl::parse("https://download.pytorch.org/whl/cu124").unwrap();
assert_eq!(
SystemDependency::from_index(&url),
Some(SystemDependency {
name: PackageName::from_str("cuda").unwrap(),
version: Version::new([12, 4]),
})
);
}
#[test]
fn pytorch_cpu() {
let url = DisplaySafeUrl::parse("https://download.pytorch.org/whl/cpu").unwrap();
assert_eq!(SystemDependency::from_index(&url), None);
}
#[test]
fn pytorch_xpu() {
let url = DisplaySafeUrl::parse("https://download.pytorch.org/whl/xpu").unwrap();
assert_eq!(SystemDependency::from_index(&url), None);
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolver/mod.rs | crates/uv-resolver/src/resolver/mod.rs | //! Given a set of requirements, find a set of compatible packages.
use std::borrow::Cow;
use std::cmp::Ordering;
use std::collections::{BTreeMap, BTreeSet, VecDeque};
use std::fmt::{Display, Formatter, Write};
use std::ops::Bound;
use std::sync::Arc;
use std::time::Instant;
use std::{iter, slice, thread};
use dashmap::DashMap;
use either::Either;
use futures::{FutureExt, StreamExt};
use itertools::Itertools;
use pubgrub::{Id, IncompId, Incompatibility, Kind, Range, Ranges, State};
use rustc_hash::{FxHashMap, FxHashSet};
use tokio::sync::mpsc::{self, Receiver, Sender};
use tokio::sync::oneshot;
use tokio_stream::wrappers::ReceiverStream;
use tracing::{Level, debug, info, instrument, trace, warn};
use uv_configuration::{Constraints, Excludes, Overrides};
use uv_distribution::{ArchiveMetadata, DistributionDatabase};
use uv_distribution_types::{
BuiltDist, CompatibleDist, DerivationChain, Dist, DistErrorKind, DistributionMetadata,
IncompatibleDist, IncompatibleSource, IncompatibleWheel, IndexCapabilities, IndexLocations,
IndexMetadata, IndexUrl, InstalledDist, Name, PythonRequirementKind, RemoteSource, Requirement,
ResolvedDist, ResolvedDistRef, SourceDist, VersionOrUrlRef, implied_markers,
};
use uv_git::GitResolver;
use uv_normalize::{ExtraName, GroupName, PackageName};
use uv_pep440::{MIN_VERSION, Version, VersionSpecifiers, release_specifiers_to_ranges};
use uv_pep508::{
MarkerEnvironment, MarkerExpression, MarkerOperator, MarkerTree, MarkerValueString,
};
use uv_platform_tags::{IncompatibleTag, Tags};
use uv_pypi_types::{ConflictItem, ConflictItemRef, ConflictKindRef, Conflicts, VerbatimParsedUrl};
use uv_torch::TorchStrategy;
use uv_types::{BuildContext, HashStrategy, InstalledPackagesProvider};
use uv_warnings::warn_user_once;
use crate::candidate_selector::{Candidate, CandidateDist, CandidateSelector};
use crate::dependency_provider::UvDependencyProvider;
use crate::error::{NoSolutionError, ResolveError};
use crate::fork_indexes::ForkIndexes;
use crate::fork_strategy::ForkStrategy;
use crate::fork_urls::ForkUrls;
use crate::manifest::Manifest;
use crate::pins::FilePins;
use crate::preferences::{PreferenceSource, Preferences};
use crate::pubgrub::{
PubGrubDependency, PubGrubDistribution, PubGrubPackage, PubGrubPackageInner, PubGrubPriorities,
PubGrubPython,
};
use crate::python_requirement::PythonRequirement;
use crate::resolution::ResolverOutput;
use crate::resolution_mode::ResolutionStrategy;
pub(crate) use crate::resolver::availability::{
ResolverVersion, UnavailableErrorChain, UnavailablePackage, UnavailableReason,
UnavailableVersion,
};
use crate::resolver::batch_prefetch::BatchPrefetcher;
pub use crate::resolver::derivation::DerivationChainBuilder;
pub use crate::resolver::environment::ResolverEnvironment;
use crate::resolver::environment::{
ForkingPossibility, fork_version_by_marker, fork_version_by_python_requirement,
};
pub(crate) use crate::resolver::fork_map::{ForkMap, ForkSet};
pub use crate::resolver::index::InMemoryIndex;
use crate::resolver::indexes::Indexes;
pub use crate::resolver::provider::{
DefaultResolverProvider, MetadataResponse, PackageVersionsResult, ResolverProvider,
VersionsResponse, WheelMetadataResult,
};
pub use crate::resolver::reporter::{BuildId, Reporter};
use crate::resolver::system::SystemDependency;
pub(crate) use crate::resolver::urls::Urls;
use crate::universal_marker::{ConflictMarker, UniversalMarker};
use crate::yanks::AllowedYanks;
use crate::{
DependencyMode, ExcludeNewer, Exclusions, FlatIndex, Options, ResolutionMode, VersionMap,
marker,
};
pub(crate) use provider::MetadataUnavailable;
mod availability;
mod batch_prefetch;
mod derivation;
mod environment;
mod fork_map;
mod index;
mod indexes;
mod provider;
mod reporter;
mod system;
mod urls;
/// The number of conflicts a package may accumulate before we re-prioritize and backtrack.
const CONFLICT_THRESHOLD: usize = 5;
pub struct Resolver<Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider> {
state: ResolverState<InstalledPackages>,
provider: Provider,
}
/// State that is shared between the prefetcher and the PubGrub solver during
/// resolution, across all forks.
struct ResolverState<InstalledPackages: InstalledPackagesProvider> {
project: Option<PackageName>,
requirements: Vec<Requirement>,
constraints: Constraints,
overrides: Overrides,
excludes: Excludes,
preferences: Preferences,
git: GitResolver,
capabilities: IndexCapabilities,
locations: IndexLocations,
exclusions: Exclusions,
urls: Urls,
indexes: Indexes,
dependency_mode: DependencyMode,
hasher: HashStrategy,
env: ResolverEnvironment,
// The environment of the current Python interpreter.
current_environment: MarkerEnvironment,
tags: Option<Tags>,
python_requirement: PythonRequirement,
conflicts: Conflicts,
workspace_members: BTreeSet<PackageName>,
selector: CandidateSelector,
index: InMemoryIndex,
installed_packages: InstalledPackages,
/// Incompatibilities for packages that are entirely unavailable.
unavailable_packages: DashMap<PackageName, UnavailablePackage>,
/// Incompatibilities for packages that are unavailable at specific versions.
incomplete_packages: DashMap<PackageName, DashMap<Version, MetadataUnavailable>>,
/// The options that were used to configure this resolver.
options: Options,
/// The reporter to use for this resolver.
reporter: Option<Arc<dyn Reporter>>,
}
impl<'a, Context: BuildContext, InstalledPackages: InstalledPackagesProvider>
Resolver<DefaultResolverProvider<'a, Context>, InstalledPackages>
{
/// Initialize a new resolver using the default backend doing real requests.
///
/// Reads the flat index entries.
///
/// # Marker environment
///
/// The marker environment is optional.
///
/// When a marker environment is not provided, the resolver is said to be
/// in "universal" mode. When in universal mode, the resolution produced
/// may contain multiple versions of the same package. And thus, in order
/// to use the resulting resolution, there must be a "universal"-aware
/// reader of the resolution that knows to exclude distributions that can't
/// be used in the current environment.
///
/// When a marker environment is provided, the resolver is in
/// "non-universal" mode, which corresponds to standard `pip` behavior that
/// works only for a specific marker environment.
pub fn new(
manifest: Manifest,
options: Options,
python_requirement: &'a PythonRequirement,
env: ResolverEnvironment,
current_environment: &MarkerEnvironment,
conflicts: Conflicts,
tags: Option<&'a Tags>,
flat_index: &'a FlatIndex,
index: &'a InMemoryIndex,
hasher: &'a HashStrategy,
build_context: &'a Context,
installed_packages: InstalledPackages,
database: DistributionDatabase<'a, Context>,
) -> Result<Self, ResolveError> {
let provider = DefaultResolverProvider::new(
database,
flat_index,
tags,
python_requirement.target(),
AllowedYanks::from_manifest(&manifest, &env, options.dependency_mode),
hasher,
options.exclude_newer.clone(),
build_context.build_options(),
build_context.capabilities(),
);
Self::new_custom_io(
manifest,
options,
hasher,
env,
current_environment,
tags.cloned(),
python_requirement,
conflicts,
index,
build_context.git(),
build_context.capabilities(),
build_context.locations(),
provider,
installed_packages,
)
}
}
impl<Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider>
Resolver<Provider, InstalledPackages>
{
/// Initialize a new resolver using a user provided backend.
pub fn new_custom_io(
manifest: Manifest,
options: Options,
hasher: &HashStrategy,
env: ResolverEnvironment,
current_environment: &MarkerEnvironment,
tags: Option<Tags>,
python_requirement: &PythonRequirement,
conflicts: Conflicts,
index: &InMemoryIndex,
git: &GitResolver,
capabilities: &IndexCapabilities,
locations: &IndexLocations,
provider: Provider,
installed_packages: InstalledPackages,
) -> Result<Self, ResolveError> {
let state = ResolverState {
index: index.clone(),
git: git.clone(),
capabilities: capabilities.clone(),
selector: CandidateSelector::for_resolution(&options, &manifest, &env),
dependency_mode: options.dependency_mode,
urls: Urls::from_manifest(&manifest, &env, git, options.dependency_mode),
indexes: Indexes::from_manifest(&manifest, &env, options.dependency_mode),
project: manifest.project,
workspace_members: manifest.workspace_members,
requirements: manifest.requirements,
constraints: manifest.constraints,
overrides: manifest.overrides,
excludes: manifest.excludes,
preferences: manifest.preferences,
exclusions: manifest.exclusions,
hasher: hasher.clone(),
locations: locations.clone(),
env,
current_environment: current_environment.clone(),
tags,
python_requirement: python_requirement.clone(),
conflicts,
installed_packages,
unavailable_packages: DashMap::default(),
incomplete_packages: DashMap::default(),
options,
reporter: None,
};
Ok(Self { state, provider })
}
/// Set the [`Reporter`] to use for this installer.
#[must_use]
pub fn with_reporter(self, reporter: Arc<dyn Reporter>) -> Self {
Self {
state: ResolverState {
reporter: Some(reporter.clone()),
..self.state
},
provider: self
.provider
.with_reporter(reporter.into_distribution_reporter()),
}
}
/// Resolve a set of requirements into a set of pinned versions.
pub async fn resolve(self) -> Result<ResolverOutput, ResolveError> {
let state = Arc::new(self.state);
let provider = Arc::new(self.provider);
// A channel to fetch package metadata (e.g., given `flask`, fetch all versions) and version
// metadata (e.g., given `flask==1.0.0`, fetch the metadata for that version).
// Channel size is set large to accommodate batch prefetching.
let (request_sink, request_stream) = mpsc::channel(300);
// Run the fetcher.
let requests_fut = state.clone().fetch(provider.clone(), request_stream).fuse();
// Spawn the PubGrub solver on a dedicated thread.
let solver = state.clone();
let (tx, rx) = oneshot::channel();
thread::Builder::new()
.name("uv-resolver".into())
.spawn(move || {
let result = solver.solve(&request_sink);
// This may fail if the main thread returned early due to an error.
let _ = tx.send(result);
})
.unwrap();
let resolve_fut = async move { rx.await.map_err(|_| ResolveError::ChannelClosed) };
// Wait for both to complete.
let ((), resolution) = tokio::try_join!(requests_fut, resolve_fut)?;
state.on_complete();
resolution
}
}
impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackages> {
#[instrument(skip_all)]
fn solve(
self: Arc<Self>,
request_sink: &Sender<Request>,
) -> Result<ResolverOutput, ResolveError> {
debug!(
"Solving with installed Python version: {}",
self.python_requirement.exact()
);
debug!(
"Solving with target Python version: {}",
self.python_requirement.target()
);
let mut visited = FxHashSet::default();
let root = PubGrubPackage::from(PubGrubPackageInner::Root(self.project.clone()));
let pubgrub = State::init(root.clone(), MIN_VERSION.clone());
let prefetcher = BatchPrefetcher::new(
self.capabilities.clone(),
self.index.clone(),
request_sink.clone(),
);
let state = ForkState::new(
pubgrub,
self.env.clone(),
self.python_requirement.clone(),
prefetcher,
);
let mut preferences = self.preferences.clone();
let mut forked_states = self.env.initial_forked_states(state)?;
let mut resolutions = vec![];
'FORK: while let Some(mut state) = forked_states.pop() {
if let Some(split) = state.env.end_user_fork_display() {
let requires_python = state.python_requirement.target();
debug!("Solving {split} (requires-python: {requires_python:?})");
}
let start = Instant::now();
loop {
let highest_priority_pkg =
if let Some(initial) = state.initial_id.take() {
// If we just forked based on `requires-python`, we can skip unit
// propagation, since we already propagated the package that initiated
// the fork.
initial
} else {
// Run unit propagation.
let result = state.pubgrub.unit_propagation(state.next);
match result {
Err(err) => {
// If unit propagation failed, there is no solution.
return Err(self.convert_no_solution_err(
err,
state.fork_urls,
state.fork_indexes,
state.env,
self.current_environment.clone(),
Some(&self.options.exclude_newer),
&visited,
));
}
Ok(conflicts) => {
for (affected, incompatibility) in conflicts {
// Conflict tracking: If there was a conflict, track affected and
// culprit for all root cause incompatibilities
state.record_conflict(affected, None, incompatibility);
}
}
}
// Pre-visit all candidate packages, to allow metadata to be fetched in parallel.
if self.dependency_mode.is_transitive() {
Self::pre_visit(
state
.pubgrub
.partial_solution
.prioritized_packages()
.map(|(id, range)| (&state.pubgrub.package_store[id], range)),
&self.urls,
&self.indexes,
&state.python_requirement,
request_sink,
)?;
}
Self::reprioritize_conflicts(&mut state);
trace!(
"Assigned packages: {}",
state
.pubgrub
.partial_solution
.extract_solution()
.filter(|(p, _)| !state.pubgrub.package_store[*p].is_proxy())
.map(|(p, v)| format!("{}=={}", state.pubgrub.package_store[p], v))
.join(", ")
);
// Choose a package.
// We aren't allowed to use the term intersection as it would extend the
// mutable borrow of `state`.
let Some((highest_priority_pkg, _)) =
state.pubgrub.partial_solution.pick_highest_priority_pkg(
|id, _range| state.priorities.get(&state.pubgrub.package_store[id]),
)
else {
// All packages have been assigned, the fork has been successfully resolved
if tracing::enabled!(Level::DEBUG) {
state.prefetcher.log_tried_versions();
}
debug!(
"{} resolution took {:.3}s",
state.env,
start.elapsed().as_secs_f32()
);
let resolution = state.into_resolution();
// Walk over the selected versions, and mark them as preferences. We have to
// add forks back as to not override the preferences from the lockfile for
// the next fork
//
// If we're using a resolution mode that varies based on whether a dependency is
// direct or transitive, skip preferences, as we risk adding a preference from
// one fork (in which it's a transitive dependency) to another fork (in which
// it's direct).
if matches!(
self.options.resolution_mode,
ResolutionMode::Lowest | ResolutionMode::Highest
) {
for (package, version) in &resolution.nodes {
preferences.insert(
package.name.clone(),
package.index.clone(),
resolution
.env
.try_universal_markers()
.unwrap_or(UniversalMarker::TRUE),
version.clone(),
PreferenceSource::Resolver,
);
}
}
resolutions.push(resolution);
continue 'FORK;
};
trace!(
"Chose package for decision: {}. remaining choices: {}",
state.pubgrub.package_store[highest_priority_pkg],
state
.pubgrub
.partial_solution
.undecided_packages()
.filter(|(p, _)| !state.pubgrub.package_store[**p].is_proxy())
.map(|(p, _)| state.pubgrub.package_store[*p].to_string())
.join(", ")
);
highest_priority_pkg
};
state.next = highest_priority_pkg;
// TODO(charlie): Remove as many usages of `next_package` as we can.
let next_id = state.next;
let next_package = &state.pubgrub.package_store[state.next];
let url = next_package
.name()
.and_then(|name| state.fork_urls.get(name));
let index = next_package
.name()
.and_then(|name| state.fork_indexes.get(name));
// Consider:
// ```toml
// dependencies = [
// "iniconfig == 1.1.1 ; python_version < '3.12'",
// "iniconfig @ https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl ; python_version >= '3.12'",
// ]
// ```
// In the `python_version < '3.12'` case, we haven't pre-visited `iniconfig` yet,
// since we weren't sure whether it might also be a URL requirement when
// transforming the requirements. For that case, we do another request here
// (idempotent due to caching).
self.request_package(next_package, url, index, request_sink)?;
let version = if let Some(version) = state.initial_version.take() {
// If we just forked based on platform support, we can skip version selection,
// since the fork operation itself already selected the appropriate version for
// the platform.
version
} else {
let term_intersection = state
.pubgrub
.partial_solution
.term_intersection_for_package(next_id)
.expect("a package was chosen but we don't have a term");
let decision = self.choose_version(
next_package,
next_id,
index.map(IndexMetadata::url),
term_intersection.unwrap_positive(),
&mut state.pins,
&preferences,
&state.fork_urls,
&state.env,
&state.python_requirement,
&state.pubgrub,
&mut visited,
request_sink,
)?;
// Pick the next compatible version.
let Some(version) = decision else {
debug!("No compatible version found for: {next_package}");
let term_intersection = state
.pubgrub
.partial_solution
.term_intersection_for_package(next_id)
.expect("a package was chosen but we don't have a term");
if let PubGrubPackageInner::Package { name, .. } = &**next_package {
// Check if the decision was due to the package being unavailable
if let Some(entry) = self.unavailable_packages.get(name) {
state
.pubgrub
.add_incompatibility(Incompatibility::custom_term(
next_id,
term_intersection.clone(),
UnavailableReason::Package(entry.clone()),
));
continue;
}
}
state
.pubgrub
.add_incompatibility(Incompatibility::no_versions(
next_id,
term_intersection.clone(),
));
continue;
};
let version = match version {
ResolverVersion::Unforked(version) => version,
ResolverVersion::Forked(forks) => {
forked_states.extend(self.version_forks_to_fork_states(state, forks));
continue 'FORK;
}
ResolverVersion::Unavailable(version, reason) => {
state.add_unavailable_version(version, reason);
continue;
}
};
// Only consider registry packages for prefetch.
if url.is_none() {
state.prefetcher.prefetch_batches(
next_package,
index,
&version,
term_intersection.unwrap_positive(),
state
.pubgrub
.partial_solution
.unchanging_term_for_package(next_id),
&state.python_requirement,
&self.selector,
&state.env,
)?;
}
version
};
state.prefetcher.version_tried(next_package, &version);
self.on_progress(next_package, &version);
if !state
.added_dependencies
.entry(next_id)
.or_default()
.insert(version.clone())
{
// `dep_incompats` are already in `incompatibilities` so we know there are not satisfied
// terms and can add the decision directly.
state
.pubgrub
.partial_solution
.add_decision(next_id, version);
continue;
}
// Retrieve that package dependencies.
let forked_deps = self.get_dependencies_forking(
next_id,
next_package,
&version,
&state.pins,
&state.fork_urls,
&state.env,
&state.python_requirement,
&state.pubgrub,
)?;
match forked_deps {
ForkedDependencies::Unavailable(reason) => {
// Then here, if we get a reason that we consider unrecoverable, we should
// show the derivation chain.
state
.pubgrub
.add_incompatibility(Incompatibility::custom_version(
next_id,
version.clone(),
UnavailableReason::Version(reason),
));
}
ForkedDependencies::Unforked(dependencies) => {
// Enrich the state with any URLs, etc.
state
.visit_package_version_dependencies(
next_id,
&version,
&self.urls,
&self.indexes,
&dependencies,
&self.git,
&self.workspace_members,
self.selector.resolution_strategy(),
)
.map_err(|err| {
enrich_dependency_error(err, next_id, &version, &state.pubgrub)
})?;
// Emit a request to fetch the metadata for each registry package.
self.visit_dependencies(&dependencies, &state, request_sink)
.map_err(|err| {
enrich_dependency_error(err, next_id, &version, &state.pubgrub)
})?;
// Add the dependencies to the state.
state.add_package_version_dependencies(next_id, &version, dependencies);
}
ForkedDependencies::Forked {
mut forks,
diverging_packages,
} => {
debug!(
"Pre-fork {} took {:.3}s",
state.env,
start.elapsed().as_secs_f32()
);
// Prioritize the forks.
match (self.options.fork_strategy, self.options.resolution_mode) {
(ForkStrategy::Fewest, _) | (_, ResolutionMode::Lowest) => {
// Prefer solving forks with lower Python bounds, since they're more
// likely to produce solutions that work for forks with higher
// Python bounds (whereas the inverse is not true).
forks.sort_by(|a, b| {
a.cmp_requires_python(b)
.reverse()
.then_with(|| a.cmp_upper_bounds(b))
});
}
(ForkStrategy::RequiresPython, _) => {
// Otherwise, prefer solving forks with higher Python bounds, since
// we want to prioritize choosing the latest-compatible package
// version for each Python version.
forks.sort_by(|a, b| {
a.cmp_requires_python(b).then_with(|| a.cmp_upper_bounds(b))
});
}
}
for new_fork_state in self.forks_to_fork_states(
state,
&version,
forks,
request_sink,
&diverging_packages,
) {
forked_states.push(new_fork_state?);
}
continue 'FORK;
}
}
}
}
if resolutions.len() > 1 {
info!(
"Solved your requirements for {} environments",
resolutions.len()
);
}
if tracing::enabled!(Level::DEBUG) {
for resolution in &resolutions {
if let Some(env) = resolution.env.end_user_fork_display() {
let packages: FxHashSet<_> = resolution
.nodes
.keys()
.map(|package| &package.name)
.collect();
debug!(
"Distinct solution for {env} with {} package(s)",
packages.len()
);
}
}
}
for resolution in &resolutions {
Self::trace_resolution(resolution);
}
ResolverOutput::from_state(
&resolutions,
&self.requirements,
&self.constraints,
&self.overrides,
&self.preferences,
&self.index,
&self.git,
&self.python_requirement,
&self.conflicts,
self.selector.resolution_strategy(),
self.options.clone(),
)
}
/// Change the priority of often conflicting packages and backtrack.
///
/// To be called after unit propagation.
fn reprioritize_conflicts(state: &mut ForkState) {
for package in state.conflict_tracker.prioritize.drain(..) {
let changed = state
.priorities
.mark_conflict_early(&state.pubgrub.package_store[package]);
if changed {
debug!(
"Package {} has too many conflicts (affected), prioritizing",
&state.pubgrub.package_store[package]
);
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | true |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolver/reporter.rs | crates/uv-resolver/src/resolver/reporter.rs | use std::sync::Arc;
use uv_distribution_types::{BuildableSource, VersionOrUrlRef};
use uv_normalize::PackageName;
use uv_redacted::DisplaySafeUrl;
pub type BuildId = usize;
pub trait Reporter: Send + Sync {
/// Callback to invoke when a dependency is resolved.
fn on_progress(&self, name: &PackageName, version: &VersionOrUrlRef);
/// Callback to invoke when the resolution is complete.
fn on_complete(&self);
/// Callback to invoke when a source distribution build is kicked off.
fn on_build_start(&self, source: &BuildableSource) -> usize;
/// Callback to invoke when a source distribution build is complete.
fn on_build_complete(&self, source: &BuildableSource, id: usize);
/// Callback to invoke when a download is kicked off.
fn on_download_start(&self, name: &PackageName, size: Option<u64>) -> usize;
/// Callback to invoke when a download makes progress (i.e. some number of bytes are
/// downloaded).
fn on_download_progress(&self, id: usize, bytes: u64);
/// Callback to invoke when a download is complete.
fn on_download_complete(&self, name: &PackageName, id: usize);
/// Callback to invoke when a repository checkout begins.
fn on_checkout_start(&self, url: &DisplaySafeUrl, rev: &str) -> usize;
/// Callback to invoke when a repository checkout completes.
fn on_checkout_complete(&self, url: &DisplaySafeUrl, rev: &str, id: usize);
}
impl dyn Reporter {
/// Converts this reporter to a [`uv_distribution::Reporter`].
pub(crate) fn into_distribution_reporter(
self: Arc<dyn Reporter>,
) -> Arc<dyn uv_distribution::Reporter> {
Arc::new(Facade {
reporter: self.clone(),
})
}
}
/// A facade for converting from [`Reporter`] to [`uv_distribution::Reporter`].
struct Facade {
reporter: Arc<dyn Reporter>,
}
impl uv_distribution::Reporter for Facade {
fn on_build_start(&self, source: &BuildableSource) -> usize {
self.reporter.on_build_start(source)
}
fn on_build_complete(&self, source: &BuildableSource, id: usize) {
self.reporter.on_build_complete(source, id);
}
fn on_checkout_start(&self, url: &DisplaySafeUrl, rev: &str) -> usize {
self.reporter.on_checkout_start(url, rev)
}
fn on_checkout_complete(&self, url: &DisplaySafeUrl, rev: &str, id: usize) {
self.reporter.on_checkout_complete(url, rev, id);
}
fn on_download_start(&self, name: &PackageName, size: Option<u64>) -> usize {
self.reporter.on_download_start(name, size)
}
fn on_download_progress(&self, id: usize, bytes: u64) {
self.reporter.on_download_progress(id, bytes);
}
fn on_download_complete(&self, name: &PackageName, id: usize) {
self.reporter.on_download_complete(name, id);
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolver/derivation.rs | crates/uv-resolver/src/resolver/derivation.rs | use pubgrub::{Id, Kind, State};
use rustc_hash::FxHashMap;
use uv_distribution_types::{DerivationChain, DerivationStep};
use uv_pep440::Version;
use crate::dependency_provider::UvDependencyProvider;
use crate::pubgrub::PubGrubPackage;
/// Build a [`DerivationChain`] from the pubgrub state, which is available in `uv-resolver`, but not
/// in `uv-distribution-types`.
#[derive(Debug, Default, Clone, PartialEq, Eq, Hash)]
pub struct DerivationChainBuilder;
impl DerivationChainBuilder {
/// Compute a [`DerivationChain`] from the current PubGrub state.
///
/// This is used to construct a derivation chain upon resolution failure.
pub(crate) fn from_state(
id: Id<PubGrubPackage>,
version: &Version,
state: &State<UvDependencyProvider>,
) -> Option<DerivationChain> {
/// Find a path from the current package to the root package.
fn find_path(
id: Id<PubGrubPackage>,
version: &Version,
state: &State<UvDependencyProvider>,
solution: &FxHashMap<Id<PubGrubPackage>, Version>,
path: &mut Vec<DerivationStep>,
) -> bool {
// Retrieve the incompatibilities for the current package.
let Some(incompatibilities) = state.incompatibilities.get(&id) else {
return false;
};
for index in incompatibilities {
let incompat = &state.incompatibility_store[*index];
// Find a dependency from a package to the current package.
if let Kind::FromDependencyOf(id1, _, id2, v2) = &incompat.kind {
if id == *id2 && v2.contains(version) {
if let Some(version) = solution.get(id1) {
let p1 = &state.package_store[*id1];
let p2 = &state.package_store[*id2];
if p1.name_no_root() == p2.name_no_root() {
// Skip proxied dependencies.
if find_path(*id1, version, state, solution, path) {
return true;
}
} else if let Some(name) = p1.name_no_root() {
// Add to the current path.
path.push(DerivationStep::new(
name.clone(),
p1.extra().cloned(),
p1.group().cloned(),
Some(version.clone()),
v2.clone(),
));
// Recursively search the next package.
if find_path(*id1, version, state, solution, path) {
return true;
}
// Backtrack if the path didn't lead to the root.
path.pop();
} else {
// If we've reached the root, return.
return true;
}
}
}
}
}
false
}
let solution: FxHashMap<_, _> = state.partial_solution.extract_solution().collect();
let path = {
let mut path = vec![];
if !find_path(id, version, state, &solution, &mut path) {
return None;
}
path.reverse();
path
};
Some(path.into_iter().collect())
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolver/fork_map.rs | crates/uv-resolver/src/resolver/fork_map.rs | use rustc_hash::FxHashMap;
use uv_distribution_types::Requirement;
use uv_normalize::PackageName;
use uv_pep508::MarkerTree;
use crate::ResolverEnvironment;
/// A set of package names associated with a given fork.
pub(crate) type ForkSet = ForkMap<()>;
/// A map from package names to their values for a given fork.
#[derive(Debug, Clone)]
pub(crate) struct ForkMap<T>(FxHashMap<PackageName, Vec<Entry<T>>>);
/// An entry in a [`ForkMap`].
#[derive(Debug, Clone)]
struct Entry<T> {
value: T,
marker: MarkerTree,
}
impl<T> Default for ForkMap<T> {
fn default() -> Self {
Self(FxHashMap::default())
}
}
impl<T> ForkMap<T> {
/// Associate a value with the [`Requirement`] in a given fork.
pub(crate) fn add(&mut self, requirement: &Requirement, value: T) {
let entry = Entry {
value,
marker: requirement.marker,
};
self.0
.entry(requirement.name.clone())
.or_default()
.push(entry);
}
/// Returns `true` if the map contains any values for a package that are compatible with the
/// given fork.
pub(crate) fn contains(&self, package_name: &PackageName, env: &ResolverEnvironment) -> bool {
!self.get(package_name, env).is_empty()
}
/// Returns `true` if the map contains any values for a package.
pub(crate) fn contains_key(&self, package_name: &PackageName) -> bool {
self.0.contains_key(package_name)
}
/// Returns a list of values associated with a package that are compatible with the given fork.
///
/// Compatibility implies that the markers on the requirement that contained this value
/// are not disjoint with the given fork. Note that this does not imply that the requirement
/// diverged in the given fork - values from overlapping forks may be combined.
pub(crate) fn get(&self, package_name: &PackageName, env: &ResolverEnvironment) -> Vec<&T> {
let Some(values) = self.0.get(package_name) else {
return Vec::new();
};
values
.iter()
.filter(|entry| env.included_by_marker(entry.marker))
.map(|entry| &entry.value)
.collect()
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolver/urls.rs | crates/uv-resolver/src/resolver/urls.rs | use either::Either;
use rustc_hash::FxHashMap;
use same_file::is_same_file;
use tracing::debug;
use uv_cache_key::CanonicalUrl;
use uv_git::GitResolver;
use uv_normalize::PackageName;
use uv_pep508::{MarkerTree, VerbatimUrl};
use uv_pypi_types::{ParsedDirectoryUrl, ParsedUrl, VerbatimParsedUrl};
use crate::{DependencyMode, Manifest, ResolveError, ResolverEnvironment};
/// The URLs that are allowed for packages.
///
/// These are the URLs used in the root package or by other URL dependencies (including path
/// dependencies). They take precedence over requirements by version (except for the special case
/// where we are in a fork that doesn't use any of the URL(s) used in other forks). Each fork may
/// only use a single URL.
///
/// This type contains all URLs without checking, the validation happens in
/// [`crate::fork_urls::ForkUrls`].
#[derive(Debug, Default)]
pub(crate) struct Urls {
/// URL requirements in overrides. An override URL replaces all requirements and constraints
/// URLs. There can be multiple URLs for the same package as long as they are in different
/// forks.
overrides: FxHashMap<PackageName, Vec<(MarkerTree, VerbatimParsedUrl)>>,
/// URLs from regular requirements or from constraints. There can be multiple URLs for the same
/// package as long as they are in different forks.
regular: FxHashMap<PackageName, Vec<VerbatimParsedUrl>>,
}
impl Urls {
pub(crate) fn from_manifest(
manifest: &Manifest,
env: &ResolverEnvironment,
git: &GitResolver,
dependencies: DependencyMode,
) -> Self {
let mut regular: FxHashMap<PackageName, Vec<VerbatimParsedUrl>> = FxHashMap::default();
let mut overrides: FxHashMap<PackageName, Vec<(MarkerTree, VerbatimParsedUrl)>> =
FxHashMap::default();
// Add all direct regular requirements and constraints URL.
for requirement in manifest.requirements_no_overrides(env, dependencies) {
let Some(url) = requirement.source.to_verbatim_parsed_url() else {
// Registry requirement
continue;
};
let package_urls = regular.entry(requirement.name.clone()).or_default();
if let Some(package_url) = package_urls
.iter_mut()
.find(|package_url| same_resource(&package_url.parsed_url, &url.parsed_url, git))
{
// Allow editables to override non-editables.
let previous_editable = package_url.is_editable();
*package_url = url;
if previous_editable {
if let VerbatimParsedUrl {
parsed_url: ParsedUrl::Directory(ParsedDirectoryUrl { editable, .. }),
verbatim: _,
} = package_url
{
if editable.is_none() {
debug!("Allowing an editable variant of {}", &package_url.verbatim);
*editable = Some(true);
}
}
}
} else {
package_urls.push(url);
}
}
// Add all URLs from overrides. If there is an override URL, all other URLs from
// requirements and constraints are moot and will be removed.
for requirement in manifest.overrides(env, dependencies) {
let Some(url) = requirement.source.to_verbatim_parsed_url() else {
// Registry requirement
continue;
};
// We only clear for non-URL overrides, since e.g. with an override `anyio==0.0.0` and
// a requirements.txt entry `./anyio`, we still use the URL. See
// `allow_recursive_url_local_path_override_constraint`.
regular.remove(&requirement.name);
overrides
.entry(requirement.name.clone())
.or_default()
.push((requirement.marker, url));
}
Self { overrides, regular }
}
/// Return an iterator over the allowed URLs for the given package.
///
/// If we have a URL override, apply it unconditionally for registry and URL requirements.
/// Otherwise, there are two case: for a URL requirement (`url` isn't `None`), check that the
/// URL is allowed and return its canonical form.
///
/// For registry requirements, we return an empty iterator.
pub(crate) fn get_url<'a>(
&'a self,
env: &'a ResolverEnvironment,
name: &'a PackageName,
url: Option<&'a VerbatimParsedUrl>,
git: &'a GitResolver,
) -> Result<impl Iterator<Item = &'a VerbatimParsedUrl>, ResolveError> {
if let Some(override_urls) = self.get_overrides(name) {
Ok(Either::Left(Either::Left(override_urls.iter().filter_map(
|(marker, url)| {
if env.included_by_marker(*marker) {
Some(url)
} else {
None
}
},
))))
} else if let Some(url) = url {
let url =
self.canonicalize_allowed_url(env, name, git, &url.verbatim, &url.parsed_url)?;
Ok(Either::Left(Either::Right(std::iter::once(url))))
} else {
Ok(Either::Right(std::iter::empty()))
}
}
/// Return `true` if the package has any URL (from overrides or regular requirements).
pub(crate) fn any_url(&self, name: &PackageName) -> bool {
self.get_overrides(name).is_some() || self.get_regular(name).is_some()
}
/// Return the [`VerbatimUrl`] override for the given package, if any.
fn get_overrides(&self, package: &PackageName) -> Option<&[(MarkerTree, VerbatimParsedUrl)]> {
self.overrides.get(package).map(Vec::as_slice)
}
/// Return the allowed [`VerbatimUrl`]s for given package from regular requirements and
/// constraints (but not overrides), if any.
///
/// It's more than one more URL if they are in different forks (or conflict after forking).
fn get_regular(&self, package: &PackageName) -> Option<&[VerbatimParsedUrl]> {
self.regular.get(package).map(Vec::as_slice)
}
/// Check if a URL is allowed (known), and if so, return its canonical form.
fn canonicalize_allowed_url<'a>(
&'a self,
env: &ResolverEnvironment,
package_name: &'a PackageName,
git: &'a GitResolver,
verbatim_url: &'a VerbatimUrl,
parsed_url: &'a ParsedUrl,
) -> Result<&'a VerbatimParsedUrl, ResolveError> {
let Some(expected) = self.get_regular(package_name) else {
return Err(ResolveError::DisallowedUrl {
name: package_name.clone(),
url: verbatim_url.to_string(),
});
};
let matching_urls: Vec<_> = expected
.iter()
.filter(|requirement| same_resource(&requirement.parsed_url, parsed_url, git))
.collect();
let [allowed_url] = matching_urls.as_slice() else {
let mut conflicting_urls: Vec<_> = matching_urls
.into_iter()
.map(|parsed_url| parsed_url.parsed_url.clone())
.chain(std::iter::once(parsed_url.clone()))
.collect();
conflicting_urls.sort();
return Err(ResolveError::ConflictingUrls {
package_name: package_name.clone(),
urls: conflicting_urls,
env: env.clone(),
});
};
Ok(*allowed_url)
}
}
/// Returns `true` if the [`ParsedUrl`] instances point to the same resource.
fn same_resource(a: &ParsedUrl, b: &ParsedUrl, git: &GitResolver) -> bool {
match (a, b) {
(ParsedUrl::Archive(a), ParsedUrl::Archive(b)) => {
a.subdirectory.as_deref().map(uv_fs::normalize_path)
== b.subdirectory.as_deref().map(uv_fs::normalize_path)
&& CanonicalUrl::new(&a.url) == CanonicalUrl::new(&b.url)
}
(ParsedUrl::Git(a), ParsedUrl::Git(b)) => {
a.subdirectory.as_deref().map(uv_fs::normalize_path)
== b.subdirectory.as_deref().map(uv_fs::normalize_path)
&& git.same_ref(&a.url, &b.url)
}
(ParsedUrl::Path(a), ParsedUrl::Path(b)) => {
a.install_path == b.install_path
|| is_same_file(&a.install_path, &b.install_path).unwrap_or(false)
}
(ParsedUrl::Directory(a), ParsedUrl::Directory(b)) => {
(a.install_path == b.install_path
|| is_same_file(&a.install_path, &b.install_path).unwrap_or(false))
&& a.editable.is_none_or(|a| b.editable.is_none_or(|b| a == b))
}
_ => false,
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolver/provider.rs | crates/uv-resolver/src/resolver/provider.rs | use std::future::Future;
use std::sync::Arc;
use uv_client::MetadataFormat;
use uv_configuration::BuildOptions;
use uv_distribution::{ArchiveMetadata, DistributionDatabase, Reporter};
use uv_distribution_types::{
Dist, IndexCapabilities, IndexMetadata, IndexMetadataRef, InstalledDist, RequestedDist,
RequiresPython,
};
use uv_normalize::PackageName;
use uv_pep440::{Version, VersionSpecifiers};
use uv_platform_tags::Tags;
use uv_types::{BuildContext, HashStrategy};
use crate::ExcludeNewer;
use crate::flat_index::FlatIndex;
use crate::version_map::VersionMap;
use crate::yanks::AllowedYanks;
pub type PackageVersionsResult = Result<VersionsResponse, uv_client::Error>;
pub type WheelMetadataResult = Result<MetadataResponse, uv_distribution::Error>;
/// The response when requesting versions for a package
#[derive(Debug)]
pub enum VersionsResponse {
/// The package was found in the registry with the included versions
Found(Vec<VersionMap>),
/// The package was not found in the registry
NotFound,
/// The package was not found in the local registry
NoIndex,
/// The package was not found in the cache and the network is not available.
Offline,
}
#[derive(Debug)]
pub enum MetadataResponse {
/// The wheel metadata was found and parsed successfully.
Found(ArchiveMetadata),
/// A non-fatal error.
Unavailable(MetadataUnavailable),
/// The distribution could not be built or downloaded, a fatal error.
Error(Box<RequestedDist>, Arc<uv_distribution::Error>),
}
/// Non-fatal metadata fetching error.
///
/// This is also the unavailability reasons for a package, while version unavailability is separate
/// in [`UnavailableVersion`].
#[derive(Debug, Clone)]
pub enum MetadataUnavailable {
/// The wheel metadata was not found in the cache and the network is not available.
Offline,
/// The wheel metadata was found, but could not be parsed.
InvalidMetadata(Arc<uv_pypi_types::MetadataError>),
/// The wheel metadata was found, but the metadata was inconsistent.
InconsistentMetadata(Arc<uv_distribution::Error>),
/// The wheel has an invalid structure.
InvalidStructure(Arc<uv_metadata::Error>),
/// The source distribution has a `requires-python` requirement that is not met by the installed
/// Python version (and static metadata is not available).
RequiresPython(VersionSpecifiers, Version),
}
impl MetadataUnavailable {
/// Like [`std::error::Error::source`], but we don't want to derive the std error since our
/// formatting system is more custom.
pub(crate) fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Self::Offline => None,
Self::InvalidMetadata(err) => Some(err),
Self::InconsistentMetadata(err) => Some(err),
Self::InvalidStructure(err) => Some(err),
Self::RequiresPython(_, _) => None,
}
}
}
pub trait ResolverProvider {
/// Get the version map for a package.
fn get_package_versions<'io>(
&'io self,
package_name: &'io PackageName,
index: Option<&'io IndexMetadata>,
) -> impl Future<Output = PackageVersionsResult> + 'io;
/// Get the metadata for a distribution.
///
/// For a wheel, this is done by querying it (remote) metadata. For a source distribution, we
/// (fetch and) build the source distribution and return the metadata from the built
/// distribution.
fn get_or_build_wheel_metadata<'io>(
&'io self,
dist: &'io Dist,
) -> impl Future<Output = WheelMetadataResult> + 'io;
/// Get the metadata for an installed distribution.
fn get_installed_metadata<'io>(
&'io self,
dist: &'io InstalledDist,
) -> impl Future<Output = WheelMetadataResult> + 'io;
/// Set the [`Reporter`] to use for this installer.
#[must_use]
fn with_reporter(self, reporter: Arc<dyn Reporter>) -> Self;
}
/// The main IO backend for the resolver, which does cached requests network requests using the
/// [`RegistryClient`] and [`DistributionDatabase`].
pub struct DefaultResolverProvider<'a, Context: BuildContext> {
/// The [`DistributionDatabase`] used to build source distributions.
fetcher: DistributionDatabase<'a, Context>,
/// These are the entries from `--find-links` that act as overrides for index responses.
flat_index: FlatIndex,
tags: Option<Tags>,
requires_python: RequiresPython,
allowed_yanks: AllowedYanks,
hasher: HashStrategy,
exclude_newer: ExcludeNewer,
build_options: &'a BuildOptions,
capabilities: &'a IndexCapabilities,
}
impl<'a, Context: BuildContext> DefaultResolverProvider<'a, Context> {
/// Reads the flat index entries and builds the provider.
pub fn new(
fetcher: DistributionDatabase<'a, Context>,
flat_index: &'a FlatIndex,
tags: Option<&'a Tags>,
requires_python: &'a RequiresPython,
allowed_yanks: AllowedYanks,
hasher: &'a HashStrategy,
exclude_newer: ExcludeNewer,
build_options: &'a BuildOptions,
capabilities: &'a IndexCapabilities,
) -> Self {
Self {
fetcher,
flat_index: flat_index.clone(),
tags: tags.cloned(),
requires_python: requires_python.clone(),
allowed_yanks,
hasher: hasher.clone(),
exclude_newer,
build_options,
capabilities,
}
}
}
impl<Context: BuildContext> ResolverProvider for DefaultResolverProvider<'_, Context> {
/// Make a "Simple API" request for the package and convert the result to a [`VersionMap`].
async fn get_package_versions<'io>(
&'io self,
package_name: &'io PackageName,
index: Option<&'io IndexMetadata>,
) -> PackageVersionsResult {
let result = self
.fetcher
.client()
.manual(|client, semaphore| {
client.simple_detail(
package_name,
index.map(IndexMetadataRef::from),
self.capabilities,
semaphore,
)
})
.await;
// If a package is pinned to an explicit index, ignore any `--find-links` entries.
let flat_index = index.is_none().then_some(&self.flat_index);
match result {
Ok(results) => Ok(VersionsResponse::Found(
results
.into_iter()
.map(|(index, metadata)| match metadata {
MetadataFormat::Simple(metadata) => VersionMap::from_simple_metadata(
metadata,
package_name,
index,
self.tags.as_ref(),
&self.requires_python,
&self.allowed_yanks,
&self.hasher,
Some(&self.exclude_newer),
flat_index
.and_then(|flat_index| flat_index.get(package_name))
.cloned(),
self.build_options,
),
MetadataFormat::Flat(metadata) => VersionMap::from_flat_metadata(
metadata,
self.tags.as_ref(),
&self.hasher,
self.build_options,
),
})
.collect(),
)),
Err(err) => match err.kind() {
uv_client::ErrorKind::RemotePackageNotFound(_) => {
if let Some(flat_index) = flat_index
.and_then(|flat_index| flat_index.get(package_name))
.cloned()
{
Ok(VersionsResponse::Found(vec![VersionMap::from(flat_index)]))
} else {
Ok(VersionsResponse::NotFound)
}
}
uv_client::ErrorKind::NoIndex(_) => {
if let Some(flat_index) = flat_index
.and_then(|flat_index| flat_index.get(package_name))
.cloned()
{
Ok(VersionsResponse::Found(vec![VersionMap::from(flat_index)]))
} else if flat_index.is_some_and(FlatIndex::offline) {
Ok(VersionsResponse::Offline)
} else {
Ok(VersionsResponse::NoIndex)
}
}
uv_client::ErrorKind::Offline(_) => {
if let Some(flat_index) = flat_index
.and_then(|flat_index| flat_index.get(package_name))
.cloned()
{
Ok(VersionsResponse::Found(vec![VersionMap::from(flat_index)]))
} else {
Ok(VersionsResponse::Offline)
}
}
_ => Err(err),
},
}
}
/// Fetch the metadata for a distribution, building it if necessary.
async fn get_or_build_wheel_metadata<'io>(&'io self, dist: &'io Dist) -> WheelMetadataResult {
match self
.fetcher
.get_or_build_wheel_metadata(dist, self.hasher.get(dist))
.await
{
Ok(metadata) => Ok(MetadataResponse::Found(metadata)),
Err(err) => match err {
uv_distribution::Error::Client(client) => {
let retries = client.retries();
match client.into_kind() {
uv_client::ErrorKind::Offline(_) => {
Ok(MetadataResponse::Unavailable(MetadataUnavailable::Offline))
}
uv_client::ErrorKind::MetadataParseError(_, _, err) => {
Ok(MetadataResponse::Unavailable(
MetadataUnavailable::InvalidMetadata(Arc::new(*err)),
))
}
uv_client::ErrorKind::Metadata(_, err) => {
Ok(MetadataResponse::Unavailable(
MetadataUnavailable::InvalidStructure(Arc::new(err)),
))
}
kind => Err(uv_client::Error::new(kind, retries).into()),
}
}
uv_distribution::Error::WheelMetadataVersionMismatch { .. } => {
Ok(MetadataResponse::Unavailable(
MetadataUnavailable::InconsistentMetadata(Arc::new(err)),
))
}
uv_distribution::Error::WheelMetadataNameMismatch { .. } => {
Ok(MetadataResponse::Unavailable(
MetadataUnavailable::InconsistentMetadata(Arc::new(err)),
))
}
uv_distribution::Error::Metadata(err) => Ok(MetadataResponse::Unavailable(
MetadataUnavailable::InvalidMetadata(Arc::new(err)),
)),
uv_distribution::Error::WheelMetadata(_, err) => Ok(MetadataResponse::Unavailable(
MetadataUnavailable::InvalidStructure(Arc::new(*err)),
)),
uv_distribution::Error::RequiresPython(requires_python, version) => {
Ok(MetadataResponse::Unavailable(
MetadataUnavailable::RequiresPython(requires_python, version),
))
}
err => Ok(MetadataResponse::Error(
Box::new(RequestedDist::Installable(dist.clone())),
Arc::new(err),
)),
},
}
}
/// Return the metadata for an installed distribution.
async fn get_installed_metadata<'io>(
&'io self,
dist: &'io InstalledDist,
) -> WheelMetadataResult {
match self.fetcher.get_installed_metadata(dist).await {
Ok(metadata) => Ok(MetadataResponse::Found(metadata)),
Err(err) => Ok(MetadataResponse::Error(
Box::new(RequestedDist::Installed(dist.clone())),
Arc::new(err),
)),
}
}
/// Set the [`Reporter`] to use for this installer.
fn with_reporter(self, reporter: Arc<dyn Reporter>) -> Self {
Self {
fetcher: self.fetcher.with_reporter(reporter),
..self
}
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/lock/installable.rs | crates/uv-resolver/src/lock/installable.rs | use std::collections::BTreeSet;
use std::collections::VecDeque;
use std::collections::hash_map::Entry;
use std::path::Path;
use std::sync::Arc;
use either::Either;
use itertools::Itertools;
use petgraph::Graph;
use rustc_hash::{FxBuildHasher, FxHashMap, FxHashSet};
use uv_configuration::ExtrasSpecificationWithDefaults;
use uv_configuration::{BuildOptions, DependencyGroupsWithDefaults, InstallOptions};
use uv_distribution_types::{Edge, Node, Resolution, ResolvedDist};
use uv_normalize::{ExtraName, GroupName, PackageName};
use uv_platform_tags::Tags;
use uv_pypi_types::ResolverMarkerEnvironment;
use crate::lock::{HashedDist, LockErrorKind, Package, TagPolicy};
use crate::{Lock, LockError};
pub trait Installable<'lock> {
/// Return the root install path.
fn install_path(&self) -> &'lock Path;
/// Return the [`Lock`] to install.
fn lock(&self) -> &'lock Lock;
/// Return the [`PackageName`] of the root packages in the target.
fn roots(&self) -> impl Iterator<Item = &PackageName>;
/// Return the [`PackageName`] of the target, if available.
fn project_name(&self) -> Option<&PackageName>;
/// Convert the [`Lock`] to a [`Resolution`] using the given marker environment, tags, and root.
fn to_resolution(
&self,
marker_env: &ResolverMarkerEnvironment,
tags: &Tags,
extras: &ExtrasSpecificationWithDefaults,
groups: &DependencyGroupsWithDefaults,
build_options: &BuildOptions,
install_options: &InstallOptions,
) -> Result<Resolution, LockError> {
let size_guess = self.lock().packages.len();
let mut petgraph = Graph::with_capacity(size_guess, size_guess);
let mut inverse = FxHashMap::with_capacity_and_hasher(size_guess, FxBuildHasher);
let mut queue: VecDeque<(&Package, Option<&ExtraName>)> = VecDeque::new();
let mut seen = FxHashSet::default();
let mut activated_projects: Vec<&PackageName> = vec![];
let mut activated_extras: Vec<(&PackageName, &ExtraName)> = vec![];
let mut activated_groups: Vec<(&PackageName, &GroupName)> = vec![];
let root = petgraph.add_node(Node::Root);
// Determine the set of activated extras and groups, from the root.
//
// TODO(charlie): This isn't quite right. Below, when we add the dependency groups to the
// graph, we rely on the activated extras and dependency groups, to evaluate the conflict
// marker. But at that point, we don't know the full set of activated extras; this is only
// computed below. We somehow need to add the dependency groups _after_ we've computed all
// enabled extras, but the groups themselves could depend on the set of enabled extras.
if !self.lock().conflicts().is_empty() {
for root_name in self.roots() {
let dist = self
.lock()
.find_by_name(root_name)
.map_err(|_| LockErrorKind::MultipleRootPackages {
name: root_name.clone(),
})?
.ok_or_else(|| LockErrorKind::MissingRootPackage {
name: root_name.clone(),
})?;
// Track the activated extras.
if groups.prod() {
activated_projects.push(&dist.id.name);
for extra in extras.extra_names(dist.optional_dependencies.keys()) {
activated_extras.push((&dist.id.name, extra));
}
}
// Track the activated groups.
for group in dist
.dependency_groups
.keys()
.filter(|group| groups.contains(group))
{
activated_groups.push((&dist.id.name, group));
}
}
}
// Initialize the workspace roots.
let mut roots = vec![];
for root_name in self.roots() {
let dist = self
.lock()
.find_by_name(root_name)
.map_err(|_| LockErrorKind::MultipleRootPackages {
name: root_name.clone(),
})?
.ok_or_else(|| LockErrorKind::MissingRootPackage {
name: root_name.clone(),
})?;
// Add the workspace package to the graph.
let index = petgraph.add_node(if groups.prod() {
self.package_to_node(dist, tags, build_options, install_options, marker_env)?
} else {
self.non_installable_node(dist, tags, marker_env)?
});
inverse.insert(&dist.id, index);
// Add an edge from the root.
petgraph.add_edge(root, index, Edge::Prod);
// Push the package onto the queue.
roots.push((dist, index));
}
// Add the workspace dependencies to the queue.
for (dist, index) in roots {
if groups.prod() {
// Push its dependencies onto the queue.
queue.push_back((dist, None));
for extra in extras.extra_names(dist.optional_dependencies.keys()) {
queue.push_back((dist, Some(extra)));
}
}
// Add any dev dependencies.
for (group, dep) in dist
.dependency_groups
.iter()
.filter_map(|(group, deps)| {
if groups.contains(group) {
Some(deps.iter().map(move |dep| (group, dep)))
} else {
None
}
})
.flatten()
{
if !dep.complexified_marker.evaluate(
marker_env,
activated_projects.iter().copied(),
activated_extras.iter().copied(),
activated_groups.iter().copied(),
) {
continue;
}
let dep_dist = self.lock().find_by_id(&dep.package_id);
// Add the package to the graph.
let dep_index = match inverse.entry(&dep.package_id) {
Entry::Vacant(entry) => {
let index = petgraph.add_node(self.package_to_node(
dep_dist,
tags,
build_options,
install_options,
marker_env,
)?);
entry.insert(index);
index
}
Entry::Occupied(entry) => {
// Critically, if the package is already in the graph, then it's a workspace
// member. If it was omitted due to, e.g., `--only-dev`, but is itself
// referenced as a development dependency, then we need to re-enable it.
let index = *entry.get();
let node = &mut petgraph[index];
if !groups.prod() {
*node = self.package_to_node(
dep_dist,
tags,
build_options,
install_options,
marker_env,
)?;
}
index
}
};
petgraph.add_edge(
index,
dep_index,
// This is OK because we are resolving to a resolution for
// a specific marker environment and set of extras/groups.
// So at this point, we know the extras/groups have been
// satisfied, so we can safely drop the conflict marker.
Edge::Dev(group.clone()),
);
// Push its dependencies on the queue.
if seen.insert((&dep.package_id, None)) {
queue.push_back((dep_dist, None));
}
for extra in &dep.extra {
if seen.insert((&dep.package_id, Some(extra))) {
queue.push_back((dep_dist, Some(extra)));
}
}
}
}
// Add any requirements that are exclusive to the workspace root (e.g., dependencies in
// PEP 723 scripts).
for dependency in self.lock().requirements() {
if !dependency.marker.evaluate(marker_env, &[]) {
continue;
}
let root_name = &dependency.name;
let dist = self
.lock()
.find_by_markers(root_name, marker_env)
.map_err(|_| LockErrorKind::MultipleRootPackages {
name: root_name.clone(),
})?
.ok_or_else(|| LockErrorKind::MissingRootPackage {
name: root_name.clone(),
})?;
// Add the package to the graph.
let index = petgraph.add_node(if groups.prod() {
self.package_to_node(dist, tags, build_options, install_options, marker_env)?
} else {
self.non_installable_node(dist, tags, marker_env)?
});
inverse.insert(&dist.id, index);
// Add the edge.
petgraph.add_edge(root, index, Edge::Prod);
// Push its dependencies on the queue.
if seen.insert((&dist.id, None)) {
queue.push_back((dist, None));
}
for extra in &dependency.extras {
if seen.insert((&dist.id, Some(extra))) {
queue.push_back((dist, Some(extra)));
}
}
}
// Add any dependency groups that are exclusive to the workspace root (e.g., dev
// dependencies in (legacy) non-project workspace roots).
for (group, dependency) in self
.lock()
.dependency_groups()
.iter()
.filter_map(|(group, deps)| {
if groups.contains(group) {
Some(deps.iter().map(move |dep| (group, dep)))
} else {
None
}
})
.flatten()
{
if !dependency.marker.evaluate(marker_env, &[]) {
continue;
}
let root_name = &dependency.name;
let dist = self
.lock()
.find_by_markers(root_name, marker_env)
.map_err(|_| LockErrorKind::MultipleRootPackages {
name: root_name.clone(),
})?
.ok_or_else(|| LockErrorKind::MissingRootPackage {
name: root_name.clone(),
})?;
// Add the package to the graph.
let index = match inverse.entry(&dist.id) {
Entry::Vacant(entry) => {
let index = petgraph.add_node(self.package_to_node(
dist,
tags,
build_options,
install_options,
marker_env,
)?);
entry.insert(index);
index
}
Entry::Occupied(entry) => {
// Critically, if the package is already in the graph, then it's a workspace
// member. If it was omitted due to, e.g., `--only-dev`, but is itself
// referenced as a development dependency, then we need to re-enable it.
let index = *entry.get();
let node = &mut petgraph[index];
if !groups.prod() {
*node = self.package_to_node(
dist,
tags,
build_options,
install_options,
marker_env,
)?;
}
index
}
};
// Add the edge.
petgraph.add_edge(root, index, Edge::Dev(group.clone()));
// Push its dependencies on the queue.
if seen.insert((&dist.id, None)) {
queue.push_back((dist, None));
}
for extra in &dependency.extras {
if seen.insert((&dist.id, Some(extra))) {
queue.push_back((dist, Some(extra)));
}
}
}
// Below, we traverse the dependency graph in a breadth first manner
// twice. It's only in the second traversal that we actually build
// up our resolution graph. In the first traversal, we accumulate all
// activated extras. This includes the extras explicitly enabled on
// the CLI (which were gathered above) and the extras enabled via
// dependency specifications like `foo[extra]`. We need to do this
// to correctly support conflicting extras.
//
// In particular, the way conflicting extras works is by forking the
// resolver based on the extras that are declared as conflicting. But
// this forking needs to be made manifest somehow in the lock file to
// avoid multiple versions of the same package being installed into the
// environment. This is why "conflict markers" were invented. For
// example, you might have both `torch` and `torch+cpu` in your
// dependency graph, where the latter is only enabled when the `cpu`
// extra is enabled, and the former is specifically *not* enabled
// when the `cpu` extra is enabled.
//
// In order to evaluate these conflict markers correctly, we need to
// know whether the `cpu` extra is enabled when we visit the `torch`
// dependency. If we think it's disabled, then we'll erroneously
// include it if the extra is actually enabled. But in order to tell
// if it's enabled, we need to traverse the entire dependency graph
// first to inspect which extras are enabled!
//
// Of course, we don't need to do this at all if there aren't any
// conflicts. In which case, we skip all of this and just do the one
// traversal below.
if !self.lock().conflicts().is_empty() {
let mut activated_extras_set: BTreeSet<(&PackageName, &ExtraName)> =
activated_extras.iter().copied().collect();
let mut queue = queue.clone();
let mut seen = seen.clone();
while let Some((package, extra)) = queue.pop_front() {
let deps = if let Some(extra) = extra {
Either::Left(
package
.optional_dependencies
.get(extra)
.into_iter()
.flatten(),
)
} else {
Either::Right(package.dependencies.iter())
};
for dep in deps {
let mut additional_activated_extras = vec![];
for extra in &dep.extra {
let key = (&dep.package_id.name, extra);
if !activated_extras_set.contains(&key) {
additional_activated_extras.push(key);
}
}
if !dep.complexified_marker.evaluate(
marker_env,
activated_projects.iter().copied(),
activated_extras
.iter()
.chain(additional_activated_extras.iter())
.copied(),
activated_groups.iter().copied(),
) {
continue;
}
// It is, I believe, possible to be here for a dependency that
// will ultimately not be included in the final resolution.
// Specifically, carrying on from the example in the comments
// above, we might visit `torch` first and thus not know if
// the `cpu` feature is enabled or not, and thus, the marker
// evaluation above will pass.
//
// So is this a problem? Well, this is the main reason why we
// do two graph traversals. On the second traversal below, we
// will have seen all of the enabled extras, and so `torch`
// will be excluded.
//
// But could this lead to a bigger list of activated extras
// than we actually have? I believe that is indeed possible,
// but I think it is only a problem if it leads to extras that
// *conflict* with one another being simultaneously enabled.
// However, after this first traversal, we check our set of
// accumulated extras to ensure that there are no conflicts. If
// there are, we raise an error. ---AG
for key in additional_activated_extras {
activated_extras_set.insert(key);
activated_extras.push(key);
}
let dep_dist = self.lock().find_by_id(&dep.package_id);
// Push its dependencies on the queue.
if seen.insert((&dep.package_id, None)) {
queue.push_back((dep_dist, None));
}
for extra in &dep.extra {
if seen.insert((&dep.package_id, Some(extra))) {
queue.push_back((dep_dist, Some(extra)));
}
}
}
}
// At time of writing, it's somewhat expected that the set of
// conflicting extras is pretty small. With that said, the
// time complexity of the following routine is pretty gross.
// Namely, `set.contains` is linear in the size of the set,
// iteration over all conflicts is also obviously linear in
// the number of conflicting sets and then for each of those,
// we visit every possible pair of activated extra from above,
// which is quadratic in the total number of extras enabled. I
// believe the simplest improvement here, if it's necessary, is
// to adjust the `Conflicts` internals to own these sorts of
// checks. ---AG
for set in self.lock().conflicts().iter() {
for ((pkg1, extra1), (pkg2, extra2)) in
activated_extras_set.iter().tuple_combinations()
{
if set.contains(pkg1, *extra1) && set.contains(pkg2, *extra2) {
return Err(LockErrorKind::ConflictingExtra {
package1: (*pkg1).clone(),
extra1: (*extra1).clone(),
package2: (*pkg2).clone(),
extra2: (*extra2).clone(),
}
.into());
}
}
}
}
while let Some((package, extra)) = queue.pop_front() {
let deps = if let Some(extra) = extra {
Either::Left(
package
.optional_dependencies
.get(extra)
.into_iter()
.flatten(),
)
} else {
Either::Right(package.dependencies.iter())
};
for dep in deps {
if !dep.complexified_marker.evaluate(
marker_env,
activated_projects.iter().copied(),
activated_extras.iter().copied(),
activated_groups.iter().copied(),
) {
continue;
}
let dep_dist = self.lock().find_by_id(&dep.package_id);
// Add the dependency to the graph.
let dep_index = match inverse.entry(&dep.package_id) {
Entry::Vacant(entry) => {
let index = petgraph.add_node(self.package_to_node(
dep_dist,
tags,
build_options,
install_options,
marker_env,
)?);
entry.insert(index);
index
}
Entry::Occupied(entry) => *entry.get(),
};
// Add the edge.
let index = inverse[&package.id];
petgraph.add_edge(
index,
dep_index,
if let Some(extra) = extra {
Edge::Optional(extra.clone())
} else {
Edge::Prod
},
);
// Push its dependencies on the queue.
if seen.insert((&dep.package_id, None)) {
queue.push_back((dep_dist, None));
}
for extra in &dep.extra {
if seen.insert((&dep.package_id, Some(extra))) {
queue.push_back((dep_dist, Some(extra)));
}
}
}
}
Ok(Resolution::new(petgraph))
}
/// Create an installable [`Node`] from a [`Package`].
fn installable_node(
&self,
package: &Package,
tags: &Tags,
marker_env: &ResolverMarkerEnvironment,
build_options: &BuildOptions,
) -> Result<Node, LockError> {
let tag_policy = TagPolicy::Required(tags);
let HashedDist { dist, hashes } =
package.to_dist(self.install_path(), tag_policy, build_options, marker_env)?;
let version = package.version().cloned();
let dist = ResolvedDist::Installable {
dist: Arc::new(dist),
version,
};
Ok(Node::Dist {
dist,
hashes,
install: true,
})
}
/// Create a non-installable [`Node`] from a [`Package`].
fn non_installable_node(
&self,
package: &Package,
tags: &Tags,
marker_env: &ResolverMarkerEnvironment,
) -> Result<Node, LockError> {
let HashedDist { dist, .. } = package.to_dist(
self.install_path(),
TagPolicy::Preferred(tags),
&BuildOptions::default(),
marker_env,
)?;
let version = package.version().cloned();
let dist = ResolvedDist::Installable {
dist: Arc::new(dist),
version,
};
let hashes = package.hashes();
Ok(Node::Dist {
dist,
hashes,
install: false,
})
}
/// Convert a lockfile entry to a graph [`Node`].
fn package_to_node(
&self,
package: &Package,
tags: &Tags,
build_options: &BuildOptions,
install_options: &InstallOptions,
marker_env: &ResolverMarkerEnvironment,
) -> Result<Node, LockError> {
if install_options.include_package(
package.as_install_target(),
self.project_name(),
self.lock().members(),
) {
self.installable_node(package, tags, marker_env, build_options)
} else {
self.non_installable_node(package, tags, marker_env)
}
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/lock/tree.rs | crates/uv-resolver/src/lock/tree.rs | use std::collections::{BTreeSet, VecDeque};
use std::fmt::Write;
use either::Either;
use itertools::Itertools;
use owo_colors::OwoColorize;
use petgraph::graph::{EdgeIndex, NodeIndex};
use petgraph::prelude::EdgeRef;
use petgraph::{Direction, Graph};
use rustc_hash::{FxBuildHasher, FxHashMap, FxHashSet};
use uv_configuration::DependencyGroupsWithDefaults;
use uv_console::human_readable_bytes;
use uv_normalize::{ExtraName, GroupName, PackageName};
use uv_pep440::Version;
use uv_pep508::MarkerTree;
use uv_pypi_types::ResolverMarkerEnvironment;
use crate::lock::PackageId;
use crate::{Lock, PackageMap};
#[derive(Debug)]
pub struct TreeDisplay<'env> {
/// The constructed dependency graph.
graph: petgraph::graph::Graph<Node<'env>, Edge<'env>, petgraph::Directed>,
/// The packages considered as roots of the dependency tree.
roots: Vec<NodeIndex>,
/// The latest known version of each package.
latest: &'env PackageMap<Version>,
/// Maximum display depth of the dependency tree.
depth: usize,
/// Whether to de-duplicate the displayed dependencies.
no_dedupe: bool,
/// Reference to the lock to look up additional metadata (e.g., wheel sizes).
lock: &'env Lock,
/// Whether to show sizes in the rendered output.
show_sizes: bool,
}
impl<'env> TreeDisplay<'env> {
/// Create a new [`DisplayDependencyGraph`] for the set of installed packages.
pub fn new(
lock: &'env Lock,
markers: Option<&'env ResolverMarkerEnvironment>,
latest: &'env PackageMap<Version>,
depth: usize,
prune: &[PackageName],
packages: &[PackageName],
groups: &DependencyGroupsWithDefaults,
no_dedupe: bool,
invert: bool,
show_sizes: bool,
) -> Self {
// Identify any workspace members.
//
// These include:
// - The members listed in the lockfile.
// - The root package, if it's not in the list of members. (The root package is omitted from
// the list of workspace members for single-member workspaces with a `[project]` section,
// to avoid cluttering the lockfile.
let members: BTreeSet<&PackageId> = if lock.members().is_empty() {
lock.root().into_iter().map(|package| &package.id).collect()
} else {
lock.packages
.iter()
.filter_map(|package| {
if lock.members().contains(&package.id.name) {
Some(&package.id)
} else {
None
}
})
.collect()
};
// Create a graph.
let size_guess = lock.packages.len();
let mut graph =
Graph::<Node, Edge, petgraph::Directed>::with_capacity(size_guess, size_guess);
let mut inverse = FxHashMap::with_capacity_and_hasher(size_guess, FxBuildHasher);
let mut queue: VecDeque<(&PackageId, Option<&ExtraName>)> = VecDeque::new();
let mut seen = FxHashSet::default();
let root = graph.add_node(Node::Root);
// Add the root packages to the graph.
for id in members.iter().copied() {
if prune.contains(&id.name) {
continue;
}
let dist = lock.find_by_id(id);
// Add the workspace package to the graph. Under `--only-group`, the workspace member
// may not be installed, but it's still relevant for the dependency tree, since we want
// to show the connection from the workspace package to the enabled dependency groups.
let index = *inverse
.entry(id)
.or_insert_with(|| graph.add_node(Node::Package(id)));
// Add an edge from the root.
graph.add_edge(root, index, Edge::Prod(None));
if groups.prod() {
// Push its dependencies on the queue.
if seen.insert((id, None)) {
queue.push_back((id, None));
}
// Push any extras on the queue.
for extra in dist.optional_dependencies.keys() {
if seen.insert((id, Some(extra))) {
queue.push_back((id, Some(extra)));
}
}
}
// Add any development dependencies.
for (group, dep) in dist
.dependency_groups
.iter()
.filter_map(|(group, deps)| {
if groups.contains(group) {
Some(deps.iter().map(move |dep| (group, dep)))
} else {
None
}
})
.flatten()
{
if prune.contains(&dep.package_id.name) {
continue;
}
if markers
.is_some_and(|markers| !dep.complexified_marker.evaluate_no_extras(markers))
{
continue;
}
// Add the dependency to the graph and get its index.
let dep_index = *inverse
.entry(&dep.package_id)
.or_insert_with(|| graph.add_node(Node::Package(&dep.package_id)));
// Add an edge from the workspace package.
graph.add_edge(index, dep_index, Edge::Dev(group, Some(&dep.extra)));
// Push its dependencies on the queue.
if seen.insert((&dep.package_id, None)) {
queue.push_back((&dep.package_id, None));
}
for extra in &dep.extra {
if seen.insert((&dep.package_id, Some(extra))) {
queue.push_back((&dep.package_id, Some(extra)));
}
}
}
}
// Identify any packages that are connected directly to the synthetic root node, i.e.,
// requirements that are attached to the workspace itself.
//
// These include
// - `[dependency-groups]` dependencies for workspaces whose roots do not include a
// `[project]` table, since those roots are not workspace members, but they _can_ define
// dependencies.
// - `dependencies` in PEP 723 scripts.
{
// Index the lockfile by name.
let by_name: FxHashMap<_, Vec<_>> = {
lock.packages().iter().fold(
FxHashMap::with_capacity_and_hasher(lock.len(), FxBuildHasher),
|mut map, package| {
map.entry(&package.id.name).or_default().push(package);
map
},
)
};
// Identify any requirements attached to the workspace itself.
for requirement in lock.requirements() {
for package in by_name.get(&requirement.name).into_iter().flatten() {
// Determine whether this entry is "relevant" for the requirement, by intersecting
// the markers.
let marker = if package.fork_markers.is_empty() {
requirement.marker
} else {
let mut combined = MarkerTree::FALSE;
for fork_marker in &package.fork_markers {
combined.or(fork_marker.pep508());
}
combined.and(requirement.marker);
combined
};
if marker.is_false() {
continue;
}
if markers.is_some_and(|markers| !marker.evaluate(markers, &[])) {
continue;
}
// Add the package to the graph.
let index = inverse
.entry(&package.id)
.or_insert_with(|| graph.add_node(Node::Package(&package.id)));
// Add an edge from the root.
graph.add_edge(root, *index, Edge::Prod(None));
// Push its dependencies on the queue.
if seen.insert((&package.id, None)) {
queue.push_back((&package.id, None));
}
}
}
// Identify any dependency groups attached to the workspace itself.
for (group, requirements) in lock.dependency_groups() {
for requirement in requirements {
for package in by_name.get(&requirement.name).into_iter().flatten() {
// Determine whether this entry is "relevant" for the requirement, by intersecting
// the markers.
let marker = if package.fork_markers.is_empty() {
requirement.marker
} else {
let mut combined = MarkerTree::FALSE;
for fork_marker in &package.fork_markers {
combined.or(fork_marker.pep508());
}
combined.and(requirement.marker);
combined
};
if marker.is_false() {
continue;
}
if markers.is_some_and(|markers| !marker.evaluate(markers, &[])) {
continue;
}
// Add the package to the graph.
let index = inverse
.entry(&package.id)
.or_insert_with(|| graph.add_node(Node::Package(&package.id)));
// Add an edge from the root.
graph.add_edge(root, *index, Edge::Dev(group, None));
// Push its dependencies on the queue.
if seen.insert((&package.id, None)) {
queue.push_back((&package.id, None));
}
}
}
}
}
// Create all the relevant nodes.
while let Some((id, extra)) = queue.pop_front() {
let index = inverse[&id];
let package = lock.find_by_id(id);
let deps = if let Some(extra) = extra {
Either::Left(
package
.optional_dependencies
.get(extra)
.into_iter()
.flatten(),
)
} else {
Either::Right(package.dependencies.iter())
};
for dep in deps {
if prune.contains(&dep.package_id.name) {
continue;
}
if markers
.is_some_and(|markers| !dep.complexified_marker.evaluate_no_extras(markers))
{
continue;
}
// Add the dependency to the graph.
let dep_index = *inverse
.entry(&dep.package_id)
.or_insert_with(|| graph.add_node(Node::Package(&dep.package_id)));
// Add an edge from the workspace package.
graph.add_edge(
index,
dep_index,
if let Some(extra) = extra {
Edge::Optional(extra, Some(&dep.extra))
} else {
Edge::Prod(Some(&dep.extra))
},
);
// Push its dependencies on the queue.
if seen.insert((&dep.package_id, None)) {
queue.push_back((&dep.package_id, None));
}
for extra in &dep.extra {
if seen.insert((&dep.package_id, Some(extra))) {
queue.push_back((&dep.package_id, Some(extra)));
}
}
}
}
// Filter the graph to remove any unreachable nodes.
{
let mut reachable = graph
.node_indices()
.filter(|index| match graph[*index] {
Node::Package(package_id) => members.contains(package_id),
Node::Root => true,
})
.collect::<FxHashSet<_>>();
let mut stack = reachable.iter().copied().collect::<VecDeque<_>>();
while let Some(node) = stack.pop_front() {
for edge in graph.edges_directed(node, Direction::Outgoing) {
if reachable.insert(edge.target()) {
stack.push_back(edge.target());
}
}
}
// Remove the unreachable nodes from the graph.
graph.retain_nodes(|_, index| reachable.contains(&index));
}
// Reverse the graph.
if invert {
graph.reverse();
}
// Filter the graph to those nodes reachable from the target packages.
if !packages.is_empty() {
let mut reachable = graph
.node_indices()
.filter(|index| {
let Node::Package(package_id) = graph[*index] else {
return false;
};
packages.contains(&package_id.name)
})
.collect::<FxHashSet<_>>();
let mut stack = reachable.iter().copied().collect::<VecDeque<_>>();
while let Some(node) = stack.pop_front() {
for edge in graph.edges_directed(node, Direction::Outgoing) {
if reachable.insert(edge.target()) {
stack.push_back(edge.target());
}
}
}
// Remove the unreachable nodes from the graph.
graph.retain_nodes(|_, index| reachable.contains(&index));
}
// Compute the list of roots.
let roots = {
// If specific packages were requested, use them as roots.
if !packages.is_empty() {
let mut roots = graph
.node_indices()
.filter(|index| {
let Node::Package(package_id) = graph[*index] else {
return false;
};
packages.contains(&package_id.name)
})
.collect::<Vec<_>>();
// Sort the roots.
roots.sort_by_key(|index| &graph[*index]);
roots
} else {
let mut edges = vec![];
// Remove any cycles.
let feedback_set: Vec<EdgeIndex> = petgraph::algo::greedy_feedback_arc_set(&graph)
.map(|e| e.id())
.collect();
for edge_id in feedback_set {
if let Some((source, target)) = graph.edge_endpoints(edge_id) {
if let Some(weight) = graph.remove_edge(edge_id) {
edges.push((source, target, weight));
}
}
}
// Find the root nodes: nodes with no incoming edges, or only an edge from the proxy.
let mut roots = graph
.node_indices()
.filter(|index| {
graph
.edges_directed(*index, Direction::Incoming)
.next()
.is_none()
})
.collect::<Vec<_>>();
// Sort the roots.
roots.sort_by_key(|index| &graph[*index]);
// Re-add the removed edges.
for (source, target, weight) in edges {
graph.add_edge(source, target, weight);
}
roots
}
};
Self {
graph,
roots,
latest,
depth,
no_dedupe,
lock,
show_sizes,
}
}
/// Perform a depth-first traversal of the given package and its dependencies.
fn visit(
&'env self,
cursor: Cursor,
visited: &mut FxHashMap<&'env PackageId, Vec<&'env PackageId>>,
path: &mut Vec<&'env PackageId>,
) -> Vec<String> {
// Short-circuit if the current path is longer than the provided depth.
if path.len() > self.depth {
return Vec::new();
}
let Node::Package(package_id) = self.graph[cursor.node()] else {
return Vec::new();
};
let edge = cursor.edge().map(|edge_id| &self.graph[edge_id]);
let line = {
let mut line = format!("{}", package_id.name);
if let Some(extras) = edge.and_then(Edge::extras) {
if !extras.is_empty() {
line.push('[');
line.push_str(extras.iter().join(", ").as_str());
line.push(']');
}
}
if let Some(version) = package_id.version.as_ref() {
line.push(' ');
line.push('v');
let _ = write!(line, "{version}");
}
if let Some(edge) = edge {
match edge {
Edge::Prod(_) => {}
Edge::Optional(extra, _) => {
let _ = write!(line, " (extra: {extra})");
}
Edge::Dev(group, _) => {
let _ = write!(line, " (group: {group})");
}
}
}
// Append compressed wheel size, if available in the lockfile.
// Keep it simple: use the first wheel entry that includes a size.
if self.show_sizes {
let package = self.lock.find_by_id(package_id);
if let Some(size_bytes) = package.wheels.iter().find_map(|wheel| wheel.size) {
let (bytes, unit) = human_readable_bytes(size_bytes);
line.push(' ');
line.push_str(format!("{}", format!("({bytes:.1}{unit})").dimmed()).as_str());
}
}
line
};
// Skip the traversal if:
// 1. The package is in the current traversal path (i.e., a dependency cycle).
// 2. The package has been visited and de-duplication is enabled (default).
if let Some(requirements) = visited.get(package_id) {
if !self.no_dedupe || path.contains(&package_id) {
return if requirements.is_empty() {
vec![line]
} else {
vec![format!("{line} (*)")]
};
}
}
// Incorporate the latest version of the package, if known.
let line = if let Some(version) = self.latest.get(package_id) {
format!("{line} {}", format!("(latest: v{version})").bold().cyan())
} else {
line
};
let mut dependencies = self
.graph
.edges_directed(cursor.node(), Direction::Outgoing)
.filter_map(|edge| match self.graph[edge.target()] {
Node::Root => None,
Node::Package(_) => Some(Cursor::new(edge.target(), edge.id())),
})
.collect::<Vec<_>>();
dependencies.sort_by_key(|cursor| {
let node = &self.graph[cursor.node()];
let edge = cursor
.edge()
.map(|edge_id| &self.graph[edge_id])
.map(Edge::kind);
(edge, node)
});
let mut lines = vec![line];
// Keep track of the dependency path to avoid cycles.
visited.insert(
package_id,
dependencies
.iter()
.filter_map(|node| match self.graph[node.node()] {
Node::Package(package_id) => Some(package_id),
Node::Root => None,
})
.collect(),
);
path.push(package_id);
for (index, dep) in dependencies.iter().enumerate() {
// For sub-visited packages, add the prefix to make the tree display user-friendly.
// The key observation here is you can group the tree as follows when you're at the
// root of the tree:
// root_package
// ├── level_1_0 // Group 1
// │ ├── level_2_0 ...
// │ │ ├── level_3_0 ...
// │ │ └── level_3_1 ...
// │ └── level_2_1 ...
// ├── level_1_1 // Group 2
// │ ├── level_2_2 ...
// │ └── level_2_3 ...
// └── level_1_2 // Group 3
// └── level_2_4 ...
//
// The lines in Group 1 and 2 have `├── ` at the top and `| ` at the rest while
// those in Group 3 have `└── ` at the top and ` ` at the rest.
// This observation is true recursively even when looking at the subtree rooted
// at `level_1_0`.
let (prefix_top, prefix_rest) = if dependencies.len() - 1 == index {
("└── ", " ")
} else {
("├── ", "│ ")
};
for (visited_index, visited_line) in self.visit(*dep, visited, path).iter().enumerate()
{
let prefix = if visited_index == 0 {
prefix_top
} else {
prefix_rest
};
lines.push(format!("{prefix}{visited_line}"));
}
}
path.pop();
lines
}
/// Depth-first traverse the nodes to render the tree.
fn render(&self) -> Vec<String> {
let mut path = Vec::new();
let mut lines = Vec::with_capacity(self.graph.node_count());
let mut visited =
FxHashMap::with_capacity_and_hasher(self.graph.node_count(), FxBuildHasher);
for node in &self.roots {
match self.graph[*node] {
Node::Root => {
for edge in self.graph.edges_directed(*node, Direction::Outgoing) {
let node = edge.target();
path.clear();
lines.extend(self.visit(
Cursor::new(node, edge.id()),
&mut visited,
&mut path,
));
}
}
Node::Package(_) => {
path.clear();
lines.extend(self.visit(Cursor::root(*node), &mut visited, &mut path));
}
}
}
lines
}
}
#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd)]
enum Node<'env> {
/// The synthetic root node.
Root,
/// A package in the dependency graph.
Package(&'env PackageId),
}
#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd)]
enum Edge<'env> {
Prod(Option<&'env BTreeSet<ExtraName>>),
Optional(&'env ExtraName, Option<&'env BTreeSet<ExtraName>>),
Dev(&'env GroupName, Option<&'env BTreeSet<ExtraName>>),
}
impl<'env> Edge<'env> {
fn extras(&self) -> Option<&'env BTreeSet<ExtraName>> {
match self {
Self::Prod(extras) => *extras,
Self::Optional(_, extras) => *extras,
Self::Dev(_, extras) => *extras,
}
}
fn kind(&self) -> EdgeKind<'env> {
match self {
Self::Prod(_) => EdgeKind::Prod,
Self::Optional(extra, _) => EdgeKind::Optional(extra),
Self::Dev(group, _) => EdgeKind::Dev(group),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd)]
enum EdgeKind<'env> {
Prod,
Optional(&'env ExtraName),
Dev(&'env GroupName),
}
/// A node in the dependency graph along with the edge that led to it, or `None` for root nodes.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd)]
struct Cursor(NodeIndex, Option<EdgeIndex>);
impl Cursor {
/// Create a [`Cursor`] representing a node in the dependency tree.
fn new(node: NodeIndex, edge: EdgeIndex) -> Self {
Self(node, Some(edge))
}
/// Create a [`Cursor`] representing a root node in the dependency tree.
fn root(node: NodeIndex) -> Self {
Self(node, None)
}
/// Return the [`NodeIndex`] of the node.
fn node(&self) -> NodeIndex {
self.0
}
/// Return the [`EdgeIndex`] of the edge that led to the node, if any.
fn edge(&self) -> Option<EdgeIndex> {
self.1
}
}
impl std::fmt::Display for TreeDisplay<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
use owo_colors::OwoColorize;
let mut deduped = false;
for line in self.render() {
deduped |= line.contains('*');
writeln!(f, "{line}")?;
}
if deduped {
let message = if self.no_dedupe {
"(*) Package tree is a cycle and cannot be shown".italic()
} else {
"(*) Package tree already displayed".italic()
};
writeln!(f, "{message}")?;
}
Ok(())
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/lock/map.rs | crates/uv-resolver/src/lock/map.rs | use rustc_hash::FxHashMap;
use crate::lock::{Package, PackageId};
/// A map from package to values, indexed by [`PackageId`].
#[derive(Debug, Clone)]
pub struct PackageMap<T>(FxHashMap<PackageId, T>);
impl<T> Default for PackageMap<T> {
fn default() -> Self {
Self(FxHashMap::default())
}
}
impl<T> PackageMap<T> {
/// Insert a value by [`PackageId`].
pub fn insert(&mut self, package: Package, value: T) -> Option<T> {
self.0.insert(package.id, value)
}
/// Get a value by [`PackageId`].
pub(crate) fn get(&self, package_id: &PackageId) -> Option<&T> {
self.0.get(package_id)
}
}
impl<T> FromIterator<(Package, T)> for PackageMap<T> {
fn from_iter<I: IntoIterator<Item = (Package, T)>>(iter: I) -> Self {
Self(
iter.into_iter()
.map(|(package, value)| (package.id, value))
.collect(),
)
}
}
impl<T> Extend<(Package, T)> for PackageMap<T> {
fn extend<I: IntoIterator<Item = (Package, T)>>(&mut self, iter: I) {
self.0
.extend(iter.into_iter().map(|(package, value)| (package.id, value)));
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/lock/mod.rs | crates/uv-resolver/src/lock/mod.rs | use std::borrow::Cow;
use std::collections::{BTreeMap, BTreeSet, VecDeque};
use std::error::Error;
use std::fmt::{Debug, Display, Formatter};
use std::io;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::sync::{Arc, LazyLock};
use itertools::Itertools;
use jiff::Timestamp;
use owo_colors::OwoColorize;
use petgraph::graph::NodeIndex;
use petgraph::visit::EdgeRef;
use rustc_hash::{FxHashMap, FxHashSet};
use serde::Serializer;
use toml_edit::{Array, ArrayOfTables, InlineTable, Item, Table, Value, value};
use tracing::debug;
use url::Url;
use uv_cache_key::RepositoryUrl;
use uv_configuration::{BuildOptions, Constraints, InstallTarget};
use uv_distribution::{DistributionDatabase, FlatRequiresDist};
use uv_distribution_filename::{
BuildTag, DistExtension, ExtensionError, SourceDistExtension, WheelFilename,
};
use uv_distribution_types::{
BuiltDist, DependencyMetadata, DirectUrlBuiltDist, DirectUrlSourceDist, DirectorySourceDist,
Dist, DistributionMetadata, FileLocation, GitSourceDist, IndexLocations, IndexMetadata,
IndexUrl, Name, PathBuiltDist, PathSourceDist, RegistryBuiltDist, RegistryBuiltWheel,
RegistrySourceDist, RemoteSource, Requirement, RequirementSource, RequiresPython, ResolvedDist,
SimplifiedMarkerTree, StaticMetadata, ToUrlError, UrlString,
};
use uv_fs::{PortablePath, PortablePathBuf, relative_to};
use uv_git::{RepositoryReference, ResolvedRepositoryReference};
use uv_git_types::{GitLfs, GitOid, GitReference, GitUrl, GitUrlParseError};
use uv_normalize::{ExtraName, GroupName, PackageName};
use uv_pep440::Version;
use uv_pep508::{MarkerEnvironment, MarkerTree, VerbatimUrl, VerbatimUrlError, split_scheme};
use uv_platform_tags::{
AbiTag, IncompatibleTag, LanguageTag, PlatformTag, TagCompatibility, TagPriority, Tags,
};
use uv_pypi_types::{
ConflictKind, Conflicts, HashAlgorithm, HashDigest, HashDigests, Hashes, ParsedArchiveUrl,
ParsedGitUrl, PyProjectToml,
};
use uv_redacted::{DisplaySafeUrl, DisplaySafeUrlError};
use uv_small_str::SmallString;
use uv_types::{BuildContext, HashStrategy};
use uv_workspace::{Editability, WorkspaceMember};
use crate::exclude_newer::ExcludeNewerSpan;
use crate::fork_strategy::ForkStrategy;
pub(crate) use crate::lock::export::PylockTomlPackage;
pub use crate::lock::export::RequirementsTxtExport;
pub use crate::lock::export::{PylockToml, PylockTomlErrorKind, cyclonedx_json};
pub use crate::lock::installable::Installable;
pub use crate::lock::map::PackageMap;
pub use crate::lock::tree::TreeDisplay;
use crate::resolution::{AnnotatedDist, ResolutionGraphNode};
use crate::universal_marker::{ConflictMarker, UniversalMarker};
use crate::{
ExcludeNewer, ExcludeNewerPackage, ExcludeNewerValue, InMemoryIndex, MetadataResponse,
PrereleaseMode, ResolutionMode, ResolverOutput,
};
mod export;
mod installable;
mod map;
mod tree;
/// The current version of the lockfile format.
pub const VERSION: u32 = 1;
/// The current revision of the lockfile format.
const REVISION: u32 = 3;
static LINUX_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let pep508 = MarkerTree::from_str("os_name == 'posix' and sys_platform == 'linux'").unwrap();
UniversalMarker::new(pep508, ConflictMarker::TRUE)
});
static WINDOWS_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let pep508 = MarkerTree::from_str("os_name == 'nt' and sys_platform == 'win32'").unwrap();
UniversalMarker::new(pep508, ConflictMarker::TRUE)
});
static MAC_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let pep508 = MarkerTree::from_str("os_name == 'posix' and sys_platform == 'darwin'").unwrap();
UniversalMarker::new(pep508, ConflictMarker::TRUE)
});
static ANDROID_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let pep508 = MarkerTree::from_str("sys_platform == 'android'").unwrap();
UniversalMarker::new(pep508, ConflictMarker::TRUE)
});
static ARM_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let pep508 =
MarkerTree::from_str("platform_machine == 'aarch64' or platform_machine == 'arm64' or platform_machine == 'ARM64'")
.unwrap();
UniversalMarker::new(pep508, ConflictMarker::TRUE)
});
static X86_64_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let pep508 =
MarkerTree::from_str("platform_machine == 'x86_64' or platform_machine == 'amd64' or platform_machine == 'AMD64'")
.unwrap();
UniversalMarker::new(pep508, ConflictMarker::TRUE)
});
static X86_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let pep508 = MarkerTree::from_str(
"platform_machine == 'i686' or platform_machine == 'i386' or platform_machine == 'win32' or platform_machine == 'x86'",
)
.unwrap();
UniversalMarker::new(pep508, ConflictMarker::TRUE)
});
static LINUX_ARM_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let mut marker = *LINUX_MARKERS;
marker.and(*ARM_MARKERS);
marker
});
static LINUX_X86_64_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let mut marker = *LINUX_MARKERS;
marker.and(*X86_64_MARKERS);
marker
});
static LINUX_X86_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let mut marker = *LINUX_MARKERS;
marker.and(*X86_MARKERS);
marker
});
static WINDOWS_ARM_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let mut marker = *WINDOWS_MARKERS;
marker.and(*ARM_MARKERS);
marker
});
static WINDOWS_X86_64_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let mut marker = *WINDOWS_MARKERS;
marker.and(*X86_64_MARKERS);
marker
});
static WINDOWS_X86_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let mut marker = *WINDOWS_MARKERS;
marker.and(*X86_MARKERS);
marker
});
static MAC_ARM_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let mut marker = *MAC_MARKERS;
marker.and(*ARM_MARKERS);
marker
});
static MAC_X86_64_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let mut marker = *MAC_MARKERS;
marker.and(*X86_64_MARKERS);
marker
});
static MAC_X86_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let mut marker = *MAC_MARKERS;
marker.and(*X86_MARKERS);
marker
});
static ANDROID_ARM_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let mut marker = *ANDROID_MARKERS;
marker.and(*ARM_MARKERS);
marker
});
static ANDROID_X86_64_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let mut marker = *ANDROID_MARKERS;
marker.and(*X86_64_MARKERS);
marker
});
static ANDROID_X86_MARKERS: LazyLock<UniversalMarker> = LazyLock::new(|| {
let mut marker = *ANDROID_MARKERS;
marker.and(*X86_MARKERS);
marker
});
/// A distribution with its associated hash.
///
/// This pairs a [`Dist`] with the [`HashDigests`] for the specific wheel or
/// sdist that would be installed.
pub(crate) struct HashedDist {
pub(crate) dist: Dist,
pub(crate) hashes: HashDigests,
}
#[derive(Clone, Debug, PartialEq, Eq, serde::Deserialize)]
#[serde(try_from = "LockWire")]
pub struct Lock {
/// The (major) version of the lockfile format.
///
/// Changes to the major version indicate backwards- and forwards-incompatible changes to the
/// lockfile format. A given uv version only supports a single major version of the lockfile
/// format.
///
/// In other words, a version of uv that supports version 2 of the lockfile format will not be
/// able to read lockfiles generated under version 1 or 3.
version: u32,
/// The revision of the lockfile format.
///
/// Changes to the revision indicate backwards-compatible changes to the lockfile format.
/// In other words, versions of uv that only support revision 1 _will_ be able to read lockfiles
/// with a revision greater than 1 (though they may ignore newer fields).
revision: u32,
/// If this lockfile was built from a forking resolution with non-identical forks, store the
/// forks in the lockfile so we can recreate them in subsequent resolutions.
fork_markers: Vec<UniversalMarker>,
/// The conflicting groups/extras specified by the user.
conflicts: Conflicts,
/// The list of supported environments specified by the user.
supported_environments: Vec<MarkerTree>,
/// The list of required platforms specified by the user.
required_environments: Vec<MarkerTree>,
/// The range of supported Python versions.
requires_python: RequiresPython,
/// We discard the lockfile if these options don't match.
options: ResolverOptions,
/// The actual locked version and their metadata.
packages: Vec<Package>,
/// A map from package ID to index in `packages`.
///
/// This can be used to quickly lookup the full package for any ID
/// in this lock. For example, the dependencies for each package are
/// listed as package IDs. This map can be used to find the full
/// package for each such dependency.
///
/// It is guaranteed that every package in this lock has an entry in
/// this map, and that every dependency for every package has an ID
/// that exists in this map. That is, there are no dependencies that don't
/// have a corresponding locked package entry in the same lockfile.
by_id: FxHashMap<PackageId, usize>,
/// The input requirements to the resolution.
manifest: ResolverManifest,
}
impl Lock {
/// Initialize a [`Lock`] from a [`ResolverOutput`].
pub fn from_resolution(resolution: &ResolverOutput, root: &Path) -> Result<Self, LockError> {
let mut packages = BTreeMap::new();
let requires_python = resolution.requires_python.clone();
// Determine the set of packages included at multiple versions.
let mut seen = FxHashSet::default();
let mut duplicates = FxHashSet::default();
for node_index in resolution.graph.node_indices() {
let ResolutionGraphNode::Dist(dist) = &resolution.graph[node_index] else {
continue;
};
if !dist.is_base() {
continue;
}
if !seen.insert(dist.name()) {
duplicates.insert(dist.name());
}
}
// Lock all base packages.
for node_index in resolution.graph.node_indices() {
let ResolutionGraphNode::Dist(dist) = &resolution.graph[node_index] else {
continue;
};
if !dist.is_base() {
continue;
}
// If there are multiple distributions for the same package, include the markers of all
// forks that included the current distribution.
let fork_markers = if duplicates.contains(dist.name()) {
resolution
.fork_markers
.iter()
.filter(|fork_markers| !fork_markers.is_disjoint(dist.marker))
.copied()
.collect()
} else {
vec![]
};
let mut package = Package::from_annotated_dist(dist, fork_markers, root)?;
Self::remove_unreachable_wheels(resolution, &requires_python, node_index, &mut package);
// Add all dependencies
for edge in resolution.graph.edges(node_index) {
let ResolutionGraphNode::Dist(dependency_dist) = &resolution.graph[edge.target()]
else {
continue;
};
let marker = *edge.weight();
package.add_dependency(&requires_python, dependency_dist, marker, root)?;
}
let id = package.id.clone();
if let Some(locked_dist) = packages.insert(id, package) {
return Err(LockErrorKind::DuplicatePackage {
id: locked_dist.id.clone(),
}
.into());
}
}
// Lock all extras and development dependencies.
for node_index in resolution.graph.node_indices() {
let ResolutionGraphNode::Dist(dist) = &resolution.graph[node_index] else {
continue;
};
if let Some(extra) = dist.extra.as_ref() {
let id = PackageId::from_annotated_dist(dist, root)?;
let Some(package) = packages.get_mut(&id) else {
return Err(LockErrorKind::MissingExtraBase {
id,
extra: extra.clone(),
}
.into());
};
for edge in resolution.graph.edges(node_index) {
let ResolutionGraphNode::Dist(dependency_dist) =
&resolution.graph[edge.target()]
else {
continue;
};
let marker = *edge.weight();
package.add_optional_dependency(
&requires_python,
extra.clone(),
dependency_dist,
marker,
root,
)?;
}
}
if let Some(group) = dist.group.as_ref() {
let id = PackageId::from_annotated_dist(dist, root)?;
let Some(package) = packages.get_mut(&id) else {
return Err(LockErrorKind::MissingDevBase {
id,
group: group.clone(),
}
.into());
};
for edge in resolution.graph.edges(node_index) {
let ResolutionGraphNode::Dist(dependency_dist) =
&resolution.graph[edge.target()]
else {
continue;
};
let marker = *edge.weight();
package.add_group_dependency(
&requires_python,
group.clone(),
dependency_dist,
marker,
root,
)?;
}
}
}
let packages = packages.into_values().collect();
let options = ResolverOptions {
resolution_mode: resolution.options.resolution_mode,
prerelease_mode: resolution.options.prerelease_mode,
fork_strategy: resolution.options.fork_strategy,
exclude_newer: resolution.options.exclude_newer.clone().into(),
};
let lock = Self::new(
VERSION,
REVISION,
packages,
requires_python,
options,
ResolverManifest::default(),
Conflicts::empty(),
vec![],
vec![],
resolution.fork_markers.clone(),
)?;
Ok(lock)
}
/// Remove wheels that can't be selected for installation due to environment markers.
///
/// For example, a package included under `sys_platform == 'win32'` does not need Linux
/// wheels.
fn remove_unreachable_wheels(
graph: &ResolverOutput,
requires_python: &RequiresPython,
node_index: NodeIndex,
locked_dist: &mut Package,
) {
// Remove wheels that don't match `requires-python` and can't be selected for installation.
locked_dist
.wheels
.retain(|wheel| requires_python.matches_wheel_tag(&wheel.filename));
// Filter by platform tags.
locked_dist.wheels.retain(|wheel| {
// Naively, we'd check whether `platform_system == 'Linux'` is disjoint, or
// `os_name == 'posix'` is disjoint, or `sys_platform == 'linux'` is disjoint (each on its
// own sufficient to exclude linux wheels), but due to
// `(A ∩ (B ∩ C) = ∅) => ((A ∩ B = ∅) or (A ∩ C = ∅))`
// a single disjointness check with the intersection is sufficient, so we have one
// constant per platform.
let platform_tags = wheel.filename.platform_tags();
if platform_tags.iter().all(PlatformTag::is_any) {
return true;
}
if platform_tags.iter().all(PlatformTag::is_linux) {
if platform_tags.iter().all(PlatformTag::is_arm) {
if graph.graph[node_index]
.marker()
.is_disjoint(*LINUX_ARM_MARKERS)
{
return false;
}
} else if platform_tags.iter().all(PlatformTag::is_x86_64) {
if graph.graph[node_index]
.marker()
.is_disjoint(*LINUX_X86_64_MARKERS)
{
return false;
}
} else if platform_tags.iter().all(PlatformTag::is_x86) {
if graph.graph[node_index]
.marker()
.is_disjoint(*LINUX_X86_MARKERS)
{
return false;
}
} else if graph.graph[node_index].marker().is_disjoint(*LINUX_MARKERS) {
return false;
}
}
if platform_tags.iter().all(PlatformTag::is_windows) {
if platform_tags.iter().all(PlatformTag::is_arm) {
if graph.graph[node_index]
.marker()
.is_disjoint(*WINDOWS_ARM_MARKERS)
{
return false;
}
} else if platform_tags.iter().all(PlatformTag::is_x86_64) {
if graph.graph[node_index]
.marker()
.is_disjoint(*WINDOWS_X86_64_MARKERS)
{
return false;
}
} else if platform_tags.iter().all(PlatformTag::is_x86) {
if graph.graph[node_index]
.marker()
.is_disjoint(*WINDOWS_X86_MARKERS)
{
return false;
}
} else if graph.graph[node_index]
.marker()
.is_disjoint(*WINDOWS_MARKERS)
{
return false;
}
}
if platform_tags.iter().all(PlatformTag::is_macos) {
if platform_tags.iter().all(PlatformTag::is_arm) {
if graph.graph[node_index]
.marker()
.is_disjoint(*MAC_ARM_MARKERS)
{
return false;
}
} else if platform_tags.iter().all(PlatformTag::is_x86_64) {
if graph.graph[node_index]
.marker()
.is_disjoint(*MAC_X86_64_MARKERS)
{
return false;
}
} else if platform_tags.iter().all(PlatformTag::is_x86) {
if graph.graph[node_index]
.marker()
.is_disjoint(*MAC_X86_MARKERS)
{
return false;
}
} else if graph.graph[node_index].marker().is_disjoint(*MAC_MARKERS) {
return false;
}
}
if platform_tags.iter().all(PlatformTag::is_android) {
if platform_tags.iter().all(PlatformTag::is_arm) {
if graph.graph[node_index]
.marker()
.is_disjoint(*ANDROID_ARM_MARKERS)
{
return false;
}
} else if platform_tags.iter().all(PlatformTag::is_x86_64) {
if graph.graph[node_index]
.marker()
.is_disjoint(*ANDROID_X86_64_MARKERS)
{
return false;
}
} else if platform_tags.iter().all(PlatformTag::is_x86) {
if graph.graph[node_index]
.marker()
.is_disjoint(*ANDROID_X86_MARKERS)
{
return false;
}
} else if graph.graph[node_index]
.marker()
.is_disjoint(*ANDROID_MARKERS)
{
return false;
}
}
if platform_tags.iter().all(PlatformTag::is_arm) {
if graph.graph[node_index].marker().is_disjoint(*ARM_MARKERS) {
return false;
}
}
if platform_tags.iter().all(PlatformTag::is_x86_64) {
if graph.graph[node_index]
.marker()
.is_disjoint(*X86_64_MARKERS)
{
return false;
}
}
if platform_tags.iter().all(PlatformTag::is_x86) {
if graph.graph[node_index].marker().is_disjoint(*X86_MARKERS) {
return false;
}
}
true
});
}
/// Initialize a [`Lock`] from a list of [`Package`] entries.
fn new(
version: u32,
revision: u32,
mut packages: Vec<Package>,
requires_python: RequiresPython,
options: ResolverOptions,
manifest: ResolverManifest,
conflicts: Conflicts,
supported_environments: Vec<MarkerTree>,
required_environments: Vec<MarkerTree>,
fork_markers: Vec<UniversalMarker>,
) -> Result<Self, LockError> {
// Put all dependencies for each package in a canonical order and
// check for duplicates.
for package in &mut packages {
package.dependencies.sort();
for windows in package.dependencies.windows(2) {
let (dep1, dep2) = (&windows[0], &windows[1]);
if dep1 == dep2 {
return Err(LockErrorKind::DuplicateDependency {
id: package.id.clone(),
dependency: dep1.clone(),
}
.into());
}
}
// Perform the same validation for optional dependencies.
for (extra, dependencies) in &mut package.optional_dependencies {
dependencies.sort();
for windows in dependencies.windows(2) {
let (dep1, dep2) = (&windows[0], &windows[1]);
if dep1 == dep2 {
return Err(LockErrorKind::DuplicateOptionalDependency {
id: package.id.clone(),
extra: extra.clone(),
dependency: dep1.clone(),
}
.into());
}
}
}
// Perform the same validation for dev dependencies.
for (group, dependencies) in &mut package.dependency_groups {
dependencies.sort();
for windows in dependencies.windows(2) {
let (dep1, dep2) = (&windows[0], &windows[1]);
if dep1 == dep2 {
return Err(LockErrorKind::DuplicateDevDependency {
id: package.id.clone(),
group: group.clone(),
dependency: dep1.clone(),
}
.into());
}
}
}
}
packages.sort_by(|dist1, dist2| dist1.id.cmp(&dist2.id));
// Check for duplicate package IDs and also build up the map for
// packages keyed by their ID.
let mut by_id = FxHashMap::default();
for (i, dist) in packages.iter().enumerate() {
if by_id.insert(dist.id.clone(), i).is_some() {
return Err(LockErrorKind::DuplicatePackage {
id: dist.id.clone(),
}
.into());
}
}
// Build up a map from ID to extras.
let mut extras_by_id = FxHashMap::default();
for dist in &packages {
for extra in dist.optional_dependencies.keys() {
extras_by_id
.entry(dist.id.clone())
.or_insert_with(FxHashSet::default)
.insert(extra.clone());
}
}
// Remove any non-existent extras (e.g., extras that were requested but don't exist).
for dist in &mut packages {
for dep in dist
.dependencies
.iter_mut()
.chain(dist.optional_dependencies.values_mut().flatten())
.chain(dist.dependency_groups.values_mut().flatten())
{
dep.extra.retain(|extra| {
extras_by_id
.get(&dep.package_id)
.is_some_and(|extras| extras.contains(extra))
});
}
}
// Check that every dependency has an entry in `by_id`. If any don't,
// it implies we somehow have a dependency with no corresponding locked
// package.
for dist in &packages {
for dep in &dist.dependencies {
if !by_id.contains_key(&dep.package_id) {
return Err(LockErrorKind::UnrecognizedDependency {
id: dist.id.clone(),
dependency: dep.clone(),
}
.into());
}
}
// Perform the same validation for optional dependencies.
for dependencies in dist.optional_dependencies.values() {
for dep in dependencies {
if !by_id.contains_key(&dep.package_id) {
return Err(LockErrorKind::UnrecognizedDependency {
id: dist.id.clone(),
dependency: dep.clone(),
}
.into());
}
}
}
// Perform the same validation for dev dependencies.
for dependencies in dist.dependency_groups.values() {
for dep in dependencies {
if !by_id.contains_key(&dep.package_id) {
return Err(LockErrorKind::UnrecognizedDependency {
id: dist.id.clone(),
dependency: dep.clone(),
}
.into());
}
}
}
// Also check that our sources are consistent with whether we have
// hashes or not.
if let Some(requires_hash) = dist.id.source.requires_hash() {
for wheel in &dist.wheels {
if requires_hash != wheel.hash.is_some() {
return Err(LockErrorKind::Hash {
id: dist.id.clone(),
artifact_type: "wheel",
expected: requires_hash,
}
.into());
}
}
}
}
let lock = Self {
version,
revision,
fork_markers,
conflicts,
supported_environments,
required_environments,
requires_python,
options,
packages,
by_id,
manifest,
};
Ok(lock)
}
/// Record the requirements that were used to generate this lock.
#[must_use]
pub fn with_manifest(mut self, manifest: ResolverManifest) -> Self {
self.manifest = manifest;
self
}
/// Record the conflicting groups that were used to generate this lock.
#[must_use]
pub fn with_conflicts(mut self, conflicts: Conflicts) -> Self {
self.conflicts = conflicts;
self
}
/// Record the supported environments that were used to generate this lock.
#[must_use]
pub fn with_supported_environments(mut self, supported_environments: Vec<MarkerTree>) -> Self {
// We "complexify" the markers given, since the supported
// environments given might be coming directly from what's written in
// `pyproject.toml`, and those are assumed to be simplified (i.e.,
// they assume `requires-python` is true). But a `Lock` always uses
// non-simplified markers internally, so we need to re-complexify them
// here.
//
// The nice thing about complexifying is that it's a no-op if the
// markers given have already been complexified.
self.supported_environments = supported_environments
.into_iter()
.map(|marker| self.requires_python.complexify_markers(marker))
.collect();
self
}
/// Record the required platforms that were used to generate this lock.
#[must_use]
pub fn with_required_environments(mut self, required_environments: Vec<MarkerTree>) -> Self {
self.required_environments = required_environments
.into_iter()
.map(|marker| self.requires_python.complexify_markers(marker))
.collect();
self
}
/// Returns `true` if this [`Lock`] includes `provides-extra` metadata.
pub fn supports_provides_extra(&self) -> bool {
// `provides-extra` was added in Version 1 Revision 1.
(self.version(), self.revision()) >= (1, 1)
}
/// Returns `true` if this [`Lock`] includes entries for empty `dependency-group` metadata.
pub fn includes_empty_groups(&self) -> bool {
// Empty dependency groups are included as of https://github.com/astral-sh/uv/pull/8598,
// but Version 1 Revision 1 is the first revision published after that change.
(self.version(), self.revision()) >= (1, 1)
}
/// Returns the lockfile version.
pub fn version(&self) -> u32 {
self.version
}
/// Returns the lockfile revision.
pub fn revision(&self) -> u32 {
self.revision
}
/// Returns the number of packages in the lockfile.
pub fn len(&self) -> usize {
self.packages.len()
}
/// Returns `true` if the lockfile contains no packages.
pub fn is_empty(&self) -> bool {
self.packages.is_empty()
}
/// Returns the [`Package`] entries in this lock.
pub fn packages(&self) -> &[Package] {
&self.packages
}
/// Returns the supported Python version range for the lockfile, if present.
pub fn requires_python(&self) -> &RequiresPython {
&self.requires_python
}
/// Returns the resolution mode used to generate this lock.
pub fn resolution_mode(&self) -> ResolutionMode {
self.options.resolution_mode
}
/// Returns the pre-release mode used to generate this lock.
pub fn prerelease_mode(&self) -> PrereleaseMode {
self.options.prerelease_mode
}
/// Returns the multi-version mode used to generate this lock.
pub fn fork_strategy(&self) -> ForkStrategy {
self.options.fork_strategy
}
/// Returns the exclude newer setting used to generate this lock.
pub fn exclude_newer(&self) -> ExcludeNewer {
// TODO(zanieb): It'd be nice not to hide this clone here, but I am hesitant to introduce
// a whole new `ExcludeNewerRef` type just for this
self.options.exclude_newer.clone().into()
}
/// Returns the conflicting groups that were used to generate this lock.
pub fn conflicts(&self) -> &Conflicts {
&self.conflicts
}
/// Returns the supported environments that were used to generate this lock.
pub fn supported_environments(&self) -> &[MarkerTree] {
&self.supported_environments
}
/// Returns the required platforms that were used to generate this lock.
pub fn required_environments(&self) -> &[MarkerTree] {
&self.required_environments
}
/// Returns the workspace members that were used to generate this lock.
pub fn members(&self) -> &BTreeSet<PackageName> {
&self.manifest.members
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | true |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/lock/export/pylock_toml.rs | crates/uv-resolver/src/lock/export/pylock_toml.rs | use std::borrow::Cow;
use std::ffi::OsStr;
use std::path::Path;
use std::str::FromStr;
use std::sync::Arc;
use jiff::Timestamp;
use jiff::civil::{Date, DateTime, Time};
use jiff::tz::{Offset, TimeZone};
use serde::Deserialize;
use toml_edit::{Array, ArrayOfTables, Item, Table, value};
use url::Url;
use uv_cache_key::RepositoryUrl;
use uv_configuration::{
BuildOptions, DependencyGroupsWithDefaults, EditableMode, ExtrasSpecificationWithDefaults,
InstallOptions,
};
use uv_distribution_filename::{
BuildTag, DistExtension, ExtensionError, SourceDistExtension, SourceDistFilename,
SourceDistFilenameError, WheelFilename, WheelFilenameError,
};
use uv_distribution_types::{
BuiltDist, DirectUrlBuiltDist, DirectUrlSourceDist, DirectorySourceDist, Dist, Edge,
FileLocation, GitSourceDist, IndexUrl, Name, Node, PathBuiltDist, PathSourceDist,
RegistryBuiltDist, RegistryBuiltWheel, RegistrySourceDist, RemoteSource, RequiresPython,
Resolution, ResolvedDist, SourceDist, ToUrlError, UrlString,
};
use uv_fs::{PortablePathBuf, relative_to};
use uv_git::{RepositoryReference, ResolvedRepositoryReference};
use uv_git_types::{GitLfs, GitOid, GitReference, GitUrl, GitUrlParseError};
use uv_normalize::{ExtraName, GroupName, PackageName};
use uv_pep440::Version;
use uv_pep508::{MarkerEnvironment, MarkerTree, VerbatimUrl};
use uv_platform_tags::{TagCompatibility, TagPriority, Tags};
use uv_pypi_types::{HashDigests, Hashes, ParsedGitUrl, VcsKind};
use uv_redacted::DisplaySafeUrl;
use uv_small_str::SmallString;
use crate::lock::export::ExportableRequirements;
use crate::lock::{Source, WheelTagHint, each_element_on_its_line_array};
use crate::resolution::ResolutionGraphNode;
use crate::{Installable, LockError, ResolverOutput};
#[derive(Debug, thiserror::Error)]
pub enum PylockTomlErrorKind {
#[error(
"Package `{0}` includes both a registry (`packages.wheels`) and a directory source (`packages.directory`)"
)]
WheelWithDirectory(PackageName),
#[error(
"Package `{0}` includes both a registry (`packages.wheels`) and a VCS source (`packages.vcs`)"
)]
WheelWithVcs(PackageName),
#[error(
"Package `{0}` includes both a registry (`packages.wheels`) and an archive source (`packages.archive`)"
)]
WheelWithArchive(PackageName),
#[error(
"Package `{0}` includes both a registry (`packages.sdist`) and a directory source (`packages.directory`)"
)]
SdistWithDirectory(PackageName),
#[error(
"Package `{0}` includes both a registry (`packages.sdist`) and a VCS source (`packages.vcs`)"
)]
SdistWithVcs(PackageName),
#[error(
"Package `{0}` includes both a registry (`packages.sdist`) and an archive source (`packages.archive`)"
)]
SdistWithArchive(PackageName),
#[error(
"Package `{0}` includes both a directory (`packages.directory`) and a VCS source (`packages.vcs`)"
)]
DirectoryWithVcs(PackageName),
#[error(
"Package `{0}` includes both a directory (`packages.directory`) and an archive source (`packages.archive`)"
)]
DirectoryWithArchive(PackageName),
#[error(
"Package `{0}` includes both a VCS (`packages.vcs`) and an archive source (`packages.archive`)"
)]
VcsWithArchive(PackageName),
#[error(
"Package `{0}` must include one of: `wheels`, `directory`, `archive`, `sdist`, or `vcs`"
)]
MissingSource(PackageName),
#[error("Package `{0}` does not include a compatible wheel for the current platform")]
MissingWheel(PackageName),
#[error("`packages.wheel` entry for `{0}` must have a `path` or `url`")]
WheelMissingPathUrl(PackageName),
#[error("`packages.sdist` entry for `{0}` must have a `path` or `url`")]
SdistMissingPathUrl(PackageName),
#[error("`packages.archive` entry for `{0}` must have a `path` or `url`")]
ArchiveMissingPathUrl(PackageName),
#[error("`packages.vcs` entry for `{0}` must have a `url` or `path`")]
VcsMissingPathUrl(PackageName),
#[error("URL must end in a valid wheel filename: `{0}`")]
UrlMissingFilename(DisplaySafeUrl),
#[error("Path must end in a valid wheel filename: `{0}`")]
PathMissingFilename(Box<Path>),
#[error("Failed to convert path to URL")]
PathToUrl,
#[error("Failed to convert URL to path")]
UrlToPath,
#[error(
"Package `{0}` can't be installed because it doesn't have a source distribution or wheel for the current platform"
)]
NeitherSourceDistNorWheel(PackageName),
#[error(
"Package `{0}` can't be installed because it is marked as both `--no-binary` and `--no-build`"
)]
NoBinaryNoBuild(PackageName),
#[error(
"Package `{0}` can't be installed because it is marked as `--no-binary` but has no source distribution"
)]
NoBinary(PackageName),
#[error(
"Package `{0}` can't be installed because it is marked as `--no-build` but has no binary distribution"
)]
NoBuild(PackageName),
#[error(
"Package `{0}` can't be installed because the binary distribution is incompatible with the current platform"
)]
IncompatibleWheelOnly(PackageName),
#[error(
"Package `{0}` can't be installed because it is marked as `--no-binary` but is itself a binary distribution"
)]
NoBinaryWheelOnly(PackageName),
#[error(transparent)]
WheelFilename(#[from] WheelFilenameError),
#[error(transparent)]
SourceDistFilename(#[from] SourceDistFilenameError),
#[error(transparent)]
ToUrl(#[from] ToUrlError),
#[error(transparent)]
GitUrlParse(#[from] GitUrlParseError),
#[error(transparent)]
LockError(#[from] LockError),
#[error(transparent)]
Extension(#[from] ExtensionError),
#[error(transparent)]
Jiff(#[from] jiff::Error),
#[error(transparent)]
Io(#[from] std::io::Error),
#[error(transparent)]
Deserialize(#[from] toml::de::Error),
}
#[derive(Debug)]
pub struct PylockTomlError {
kind: Box<PylockTomlErrorKind>,
hint: Option<WheelTagHint>,
}
impl std::error::Error for PylockTomlError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.kind.source()
}
}
impl std::fmt::Display for PylockTomlError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.kind)?;
if let Some(hint) = &self.hint {
write!(f, "\n\n{hint}")?;
}
Ok(())
}
}
impl<E> From<E> for PylockTomlError
where
PylockTomlErrorKind: From<E>,
{
fn from(err: E) -> Self {
Self {
kind: Box::new(PylockTomlErrorKind::from(err)),
hint: None,
}
}
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct PylockToml {
lock_version: Version,
created_by: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub requires_python: Option<RequiresPython>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub extras: Vec<ExtraName>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub dependency_groups: Vec<GroupName>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub default_groups: Vec<GroupName>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub packages: Vec<PylockTomlPackage>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
attestation_identities: Vec<PylockTomlAttestationIdentity>,
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct PylockTomlPackage {
pub name: PackageName,
#[serde(skip_serializing_if = "Option::is_none")]
pub version: Option<Version>,
#[serde(skip_serializing_if = "Option::is_none")]
pub index: Option<DisplaySafeUrl>,
#[serde(
skip_serializing_if = "uv_pep508::marker::ser::is_empty",
serialize_with = "uv_pep508::marker::ser::serialize",
default
)]
marker: MarkerTree,
#[serde(skip_serializing_if = "Option::is_none")]
requires_python: Option<RequiresPython>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
dependencies: Vec<PylockTomlDependency>,
#[serde(skip_serializing_if = "Option::is_none")]
vcs: Option<PylockTomlVcs>,
#[serde(skip_serializing_if = "Option::is_none")]
directory: Option<PylockTomlDirectory>,
#[serde(skip_serializing_if = "Option::is_none")]
archive: Option<PylockTomlArchive>,
#[serde(skip_serializing_if = "Option::is_none")]
sdist: Option<PylockTomlSdist>,
#[serde(skip_serializing_if = "Option::is_none")]
wheels: Option<Vec<PylockTomlWheel>>,
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
#[allow(clippy::empty_structs_with_brackets)]
struct PylockTomlDependency {}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
struct PylockTomlDirectory {
path: PortablePathBuf,
#[serde(skip_serializing_if = "Option::is_none")]
editable: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
subdirectory: Option<PortablePathBuf>,
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
struct PylockTomlVcs {
r#type: VcsKind,
#[serde(skip_serializing_if = "Option::is_none")]
url: Option<DisplaySafeUrl>,
#[serde(skip_serializing_if = "Option::is_none")]
path: Option<PortablePathBuf>,
#[serde(skip_serializing_if = "Option::is_none")]
requested_revision: Option<String>,
commit_id: GitOid,
#[serde(skip_serializing_if = "Option::is_none")]
subdirectory: Option<PortablePathBuf>,
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
struct PylockTomlArchive {
#[serde(skip_serializing_if = "Option::is_none")]
url: Option<DisplaySafeUrl>,
#[serde(skip_serializing_if = "Option::is_none")]
path: Option<PortablePathBuf>,
#[serde(skip_serializing_if = "Option::is_none")]
size: Option<u64>,
#[serde(
skip_serializing_if = "Option::is_none",
serialize_with = "timestamp_to_toml_datetime",
deserialize_with = "timestamp_from_toml_datetime",
default
)]
upload_time: Option<Timestamp>,
#[serde(skip_serializing_if = "Option::is_none")]
subdirectory: Option<PortablePathBuf>,
hashes: Hashes,
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
struct PylockTomlSdist {
#[serde(skip_serializing_if = "Option::is_none")]
name: Option<SmallString>,
#[serde(skip_serializing_if = "Option::is_none")]
url: Option<DisplaySafeUrl>,
#[serde(skip_serializing_if = "Option::is_none")]
path: Option<PortablePathBuf>,
#[serde(
skip_serializing_if = "Option::is_none",
serialize_with = "timestamp_to_toml_datetime",
deserialize_with = "timestamp_from_toml_datetime",
default
)]
upload_time: Option<Timestamp>,
#[serde(skip_serializing_if = "Option::is_none")]
size: Option<u64>,
hashes: Hashes,
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
struct PylockTomlWheel {
#[serde(skip_serializing_if = "Option::is_none")]
name: Option<WheelFilename>,
#[serde(skip_serializing_if = "Option::is_none")]
url: Option<DisplaySafeUrl>,
#[serde(skip_serializing_if = "Option::is_none")]
path: Option<PortablePathBuf>,
#[serde(
skip_serializing_if = "Option::is_none",
serialize_with = "timestamp_to_toml_datetime",
deserialize_with = "timestamp_from_toml_datetime",
default
)]
upload_time: Option<Timestamp>,
#[serde(skip_serializing_if = "Option::is_none")]
size: Option<u64>,
hashes: Hashes,
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
struct PylockTomlAttestationIdentity {
kind: String,
}
impl<'lock> PylockToml {
/// Construct a [`PylockToml`] from a [`ResolverOutput`].
///
/// If `tags` is provided, only wheels compatible with the given tags will be included.
/// If `build_options` is provided, packages marked as `--only-binary` will not include
/// source distributions.
pub fn from_resolution(
resolution: &ResolverOutput,
omit: &[PackageName],
install_path: &Path,
tags: Option<&Tags>,
build_options: &BuildOptions,
) -> Result<Self, PylockTomlErrorKind> {
// The lock version is always `1.0` at time of writing.
let lock_version = Version::new([1, 0]);
// The created by field is always `uv` at time of writing.
let created_by = "uv".to_string();
// Use the `requires-python` from the target lockfile.
let requires_python = resolution.requires_python.clone();
// We don't support locking for multiple extras at time of writing.
let extras = vec![];
// We don't support locking for multiple dependency groups at time of writing.
let dependency_groups = vec![];
// We don't support locking for multiple dependency groups at time of writing.
let default_groups = vec![];
// We don't support attestation identities at time of writing.
let attestation_identities = vec![];
// Convert each node to a `pylock.toml`-style package.
let mut packages = Vec::with_capacity(resolution.graph.node_count());
for node_index in resolution.graph.node_indices() {
let ResolutionGraphNode::Dist(node) = &resolution.graph[node_index] else {
continue;
};
if !node.is_base() {
continue;
}
let ResolvedDist::Installable { dist, version } = &node.dist else {
continue;
};
if omit.contains(dist.name()) {
continue;
}
// "The version MUST NOT be included when it cannot be guaranteed to be consistent with the code used (i.e. when a source tree is used)."
let version = version
.as_ref()
.filter(|_| !matches!(&**dist, Dist::Source(SourceDist::Directory(..))));
// Create a `pylock.toml`-style package.
let mut package = PylockTomlPackage {
name: dist.name().clone(),
version: version.cloned(),
marker: node.marker.pep508(),
requires_python: None,
dependencies: vec![],
index: None,
vcs: None,
directory: None,
archive: None,
sdist: None,
wheels: None,
};
match &**dist {
Dist::Built(BuiltDist::DirectUrl(dist)) => {
package.archive = Some(PylockTomlArchive {
url: Some((*dist.location).clone()),
path: None,
size: dist.size(),
upload_time: None,
subdirectory: None,
hashes: Hashes::from(node.hashes.clone()),
});
}
Dist::Built(BuiltDist::Path(dist)) => {
let path = relative_to(&dist.install_path, install_path)
.map(Box::<Path>::from)
.unwrap_or_else(|_| dist.install_path.clone());
package.archive = Some(PylockTomlArchive {
url: None,
path: Some(PortablePathBuf::from(path)),
size: dist.size(),
upload_time: None,
subdirectory: None,
hashes: Hashes::from(node.hashes.clone()),
});
}
Dist::Built(BuiltDist::Registry(dist)) => {
// Filter wheels based on build options (--no-binary).
let no_binary = build_options.no_binary_package(dist.name());
if !no_binary {
// Filter wheels based on tag compatibility.
let wheels: Vec<_> = dist
.wheels
.iter()
.filter(|wheel| {
tags.is_none_or(|tags| {
wheel.filename.compatibility(tags).is_compatible()
})
})
.collect();
if !wheels.is_empty() {
package.wheels = Some(
wheels
.into_iter()
.map(|wheel| {
let url = wheel
.file
.url
.to_url()
.map_err(PylockTomlErrorKind::ToUrl)?;
Ok(PylockTomlWheel {
// Optional "when the last component of path/ url would be the same value".
name: if url.filename().is_ok_and(|filename| {
filename == *wheel.file.filename
}) {
None
} else {
Some(wheel.filename.clone())
},
upload_time: wheel
.file
.upload_time_utc_ms
.map(Timestamp::from_millisecond)
.transpose()?,
url: Some(
wheel
.file
.url
.to_url()
.map_err(PylockTomlErrorKind::ToUrl)?,
),
path: None,
size: wheel.file.size,
hashes: Hashes::from(wheel.file.hashes.clone()),
})
})
.collect::<Result<Vec<_>, PylockTomlErrorKind>>()?,
);
}
}
// Filter sdist based on build options (--only-binary).
let no_build = build_options.no_build_package(dist.name());
if !no_build {
if let Some(sdist) = dist.sdist.as_ref() {
let url = sdist
.file
.url
.to_url()
.map_err(PylockTomlErrorKind::ToUrl)?;
package.sdist = Some(PylockTomlSdist {
// Optional "when the last component of path/ url would be the same value".
name: if url
.filename()
.is_ok_and(|filename| filename == *sdist.file.filename)
{
None
} else {
Some(sdist.file.filename.clone())
},
upload_time: sdist
.file
.upload_time_utc_ms
.map(Timestamp::from_millisecond)
.transpose()?,
url: Some(url),
path: None,
size: sdist.file.size,
hashes: Hashes::from(sdist.file.hashes.clone()),
});
}
}
}
Dist::Source(SourceDist::DirectUrl(dist)) => {
package.archive = Some(PylockTomlArchive {
url: Some((*dist.location).clone()),
path: None,
size: dist.size(),
upload_time: None,
subdirectory: dist.subdirectory.clone().map(PortablePathBuf::from),
hashes: Hashes::from(node.hashes.clone()),
});
}
Dist::Source(SourceDist::Directory(dist)) => {
let path = relative_to(&dist.install_path, install_path)
.map(Box::<Path>::from)
.unwrap_or_else(|_| dist.install_path.clone());
package.directory = Some(PylockTomlDirectory {
path: PortablePathBuf::from(path),
editable: dist.editable,
subdirectory: None,
});
}
Dist::Source(SourceDist::Git(dist)) => {
package.vcs = Some(PylockTomlVcs {
r#type: VcsKind::Git,
url: Some(dist.git.repository().clone()),
path: None,
requested_revision: dist.git.reference().as_str().map(ToString::to_string),
commit_id: dist.git.precise().unwrap_or_else(|| {
panic!("Git distribution is missing a precise hash: {dist}")
}),
subdirectory: dist.subdirectory.clone().map(PortablePathBuf::from),
});
}
Dist::Source(SourceDist::Path(dist)) => {
let path = relative_to(&dist.install_path, install_path)
.map(Box::<Path>::from)
.unwrap_or_else(|_| dist.install_path.clone());
package.archive = Some(PylockTomlArchive {
url: None,
path: Some(PortablePathBuf::from(path)),
size: dist.size(),
upload_time: None,
subdirectory: None,
hashes: Hashes::from(node.hashes.clone()),
});
}
Dist::Source(SourceDist::Registry(dist)) => {
// Filter wheels based on build options (--no-binary).
let no_binary = build_options.no_binary_package(&dist.name);
if !no_binary {
// Filter wheels based on tag compatibility.
let wheels: Vec<_> = dist
.wheels
.iter()
.filter(|wheel| {
tags.is_none_or(|tags| {
wheel.filename.compatibility(tags).is_compatible()
})
})
.collect();
if !wheels.is_empty() {
package.wheels = Some(
wheels
.into_iter()
.map(|wheel| {
let url = wheel
.file
.url
.to_url()
.map_err(PylockTomlErrorKind::ToUrl)?;
Ok(PylockTomlWheel {
// Optional "when the last component of path/ url would be the same value".
name: if url.filename().is_ok_and(|filename| {
filename == *wheel.file.filename
}) {
None
} else {
Some(wheel.filename.clone())
},
upload_time: wheel
.file
.upload_time_utc_ms
.map(Timestamp::from_millisecond)
.transpose()?,
url: Some(
wheel
.file
.url
.to_url()
.map_err(PylockTomlErrorKind::ToUrl)?,
),
path: None,
size: wheel.file.size,
hashes: Hashes::from(wheel.file.hashes.clone()),
})
})
.collect::<Result<Vec<_>, PylockTomlErrorKind>>()?,
);
}
}
// Filter sdist based on build options (--only-binary).
let no_build = build_options.no_build_package(&dist.name);
if !no_build {
let url = dist.file.url.to_url().map_err(PylockTomlErrorKind::ToUrl)?;
package.sdist = Some(PylockTomlSdist {
// Optional "when the last component of path/ url would be the same value".
name: if url
.filename()
.is_ok_and(|filename| filename == *dist.file.filename)
{
None
} else {
Some(dist.file.filename.clone())
},
upload_time: dist
.file
.upload_time_utc_ms
.map(Timestamp::from_millisecond)
.transpose()?,
url: Some(url),
path: None,
size: dist.file.size,
hashes: Hashes::from(dist.file.hashes.clone()),
});
}
}
}
// Add the package to the list of packages.
packages.push(package);
}
// Sort the packages by name, then version.
packages.sort_by(|a, b| a.name.cmp(&b.name).then(a.version.cmp(&b.version)));
// Return the constructed `pylock.toml`.
Ok(Self {
lock_version,
created_by,
requires_python: Some(requires_python),
extras,
dependency_groups,
default_groups,
packages,
attestation_identities,
})
}
/// Construct a [`PylockToml`] from a uv lockfile.
pub fn from_lock(
target: &impl Installable<'lock>,
prune: &[PackageName],
extras: &ExtrasSpecificationWithDefaults,
dev: &DependencyGroupsWithDefaults,
annotate: bool,
editable: Option<EditableMode>,
install_options: &'lock InstallOptions,
) -> Result<Self, PylockTomlErrorKind> {
// Extract the packages from the lock file.
let ExportableRequirements(mut nodes) = ExportableRequirements::from_lock(
target,
prune,
extras,
dev,
annotate,
install_options,
)?;
// Sort the nodes.
nodes.sort_unstable_by_key(|node| &node.package.id);
// The lock version is always `1.0` at time of writing.
let lock_version = Version::new([1, 0]);
// The created by field is always `uv` at time of writing.
let created_by = "uv".to_string();
// Use the `requires-python` from the target lockfile.
let requires_python = target.lock().requires_python.clone();
// We don't support locking for multiple extras at time of writing.
let extras = vec![];
// We don't support locking for multiple dependency groups at time of writing.
let dependency_groups = vec![];
// We don't support locking for multiple dependency groups at time of writing.
let default_groups = vec![];
// We don't support attestation identities at time of writing.
let attestation_identities = vec![];
// Convert each node to a `pylock.toml`-style package.
let mut packages = Vec::with_capacity(nodes.len());
for node in nodes {
let package = node.package;
// Extract the `packages.wheels` field.
//
// This field only includes wheels from a registry. Wheels included via direct URL or
// direct path instead map to the `packages.archive` field.
let wheels = match &package.id.source {
Source::Registry(source) => {
let wheels = package
.wheels
.iter()
.map(|wheel| wheel.to_registry_wheel(source, target.install_path()))
.collect::<Result<Vec<RegistryBuiltWheel>, LockError>>()?;
Some(
wheels
.into_iter()
.map(|wheel| {
let url = wheel
.file
.url
.to_url()
.map_err(PylockTomlErrorKind::ToUrl)?;
Ok(PylockTomlWheel {
// Optional "when the last component of path/ url would be the same value".
name: if url
.filename()
.is_ok_and(|filename| filename == *wheel.file.filename)
{
None
} else {
Some(wheel.filename.clone())
},
upload_time: wheel
.file
.upload_time_utc_ms
.map(Timestamp::from_millisecond)
.transpose()?,
url: Some(url),
path: None,
size: wheel.file.size,
hashes: Hashes::from(wheel.file.hashes),
})
})
.collect::<Result<Vec<_>, PylockTomlErrorKind>>()?,
)
}
Source::Path(..) => None,
Source::Git(..) => None,
Source::Direct(..) => None,
Source::Directory(..) => None,
Source::Editable(..) => None,
Source::Virtual(..) => {
// Omit virtual packages entirely; they shouldn't be installed.
continue;
}
};
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | true |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/lock/export/cyclonedx_json.rs | crates/uv-resolver/src/lock/export/cyclonedx_json.rs | use std::collections::HashMap;
use std::path::Path;
use cyclonedx_bom::models::component::Classification;
use cyclonedx_bom::models::dependency::{Dependencies, Dependency};
use cyclonedx_bom::models::metadata::Metadata;
use cyclonedx_bom::models::property::{Properties, Property};
use cyclonedx_bom::models::tool::{Tool, Tools};
use cyclonedx_bom::prelude::{Bom, Component, Components, NormalizedString};
use itertools::Itertools;
use percent_encoding::{AsciiSet, CONTROLS, percent_encode};
use rustc_hash::FxHashSet;
use uv_configuration::{
DependencyGroupsWithDefaults, ExtrasSpecificationWithDefaults, InstallOptions,
};
use uv_fs::PortablePath;
use uv_normalize::PackageName;
use uv_pep508::MarkerTree;
use uv_preview::{Preview, PreviewFeatures};
use uv_warnings::warn_user;
use crate::lock::export::{ExportableRequirement, ExportableRequirements};
use crate::lock::{LockErrorKind, Package, PackageId, RegistrySource, Source};
use crate::{Installable, LockError};
/// Character set for percent-encoding PURL components, copied from packageurl.rs (<https://github.com/scm-rs/packageurl.rs/blob/a725aa0ab332934c350641508017eb09ddfa0813/src/purl.rs#L18>).
const PURL_ENCODE_SET: &AsciiSet = &CONTROLS
.add(b' ')
.add(b'"')
.add(b'#')
.add(b'%')
.add(b'<')
.add(b'>')
.add(b'`')
.add(b'?')
.add(b'{')
.add(b'}')
.add(b';')
.add(b'=')
.add(b'+')
.add(b'@')
.add(b'\\')
.add(b'[')
.add(b']')
.add(b'^')
.add(b'|');
/// Creates `CycloneDX` components, registering them in a `HashMap` so that they can be retrieved by `PackageId`.
/// Also ensures uniqueness when generating bom-refs by using a numeric prefix which is incremented for each component.
#[derive(Default)]
struct ComponentBuilder<'a> {
id_counter: usize, // Used as prefix in bom-ref generation, to ensure uniqueness
package_to_component_map: HashMap<&'a PackageId, Component>,
}
impl<'a> ComponentBuilder<'a> {
/// Creates a bom-ref string in the format "{package_name}-{id}@{version}" or "{package_name}-{id}" if no version is provided.
fn create_bom_ref(&mut self, name: &str, version: Option<&str>) -> String {
self.id_counter += 1;
let id = self.id_counter;
if let Some(version) = version {
format!("{name}-{id}@{version}")
} else {
format!("{name}-{id}")
}
}
/// Extract version string from a package.
fn get_version_string(package: &Package) -> Option<String> {
package
.id
.version
.as_ref()
.map(std::string::ToString::to_string)
}
/// Extract package name string from a package.
fn get_package_name(package: &Package) -> &str {
package.id.name.as_str()
}
/// Generate a Package URL (purl) from a package. Returns `None` for local sources.
fn create_purl(package: &Package) -> Option<String> {
let name = percent_encode(Self::get_package_name(package).as_bytes(), PURL_ENCODE_SET);
let version = Self::get_version_string(package)
.map(|v| format!("@{}", percent_encode(v.as_bytes(), PURL_ENCODE_SET)))
.unwrap_or_default();
let (purl_type, qualifiers) = match &package.id.source {
// By convention all Python packages use the "pypi" purl type, regardless of their source. For packages
// from non-default repositories, we add a qualifier to indicate their source explicitly.
// See the specs at
// https://github.com/package-url/purl-spec/blob/9041aa7/types/pypi-definition.json
// and https://github.com/package-url/purl-spec/blob/9041aa7/purl-specification.md
Source::Registry(registry_source) => {
let qualifiers = match registry_source {
RegistrySource::Url(url) => {
// Only add repository_url qualifier for non-default registries
if !url.as_ref().starts_with("https://pypi.org/") {
vec![("repository_url", url.as_ref())]
} else {
vec![]
}
}
RegistrySource::Path(_) => vec![],
};
("pypi", qualifiers)
}
Source::Git(url, _) => ("pypi", vec![("vcs_url", url.as_ref())]),
Source::Direct(url, _) => ("pypi", vec![("download_url", url.as_ref())]),
// No purl for local sources
Source::Path(_) | Source::Directory(_) | Source::Editable(_) | Source::Virtual(_) => {
return None;
}
};
let qualifiers = if qualifiers.is_empty() {
String::new()
} else {
Self::format_qualifiers(&qualifiers)
};
Some(format!("pkg:{purl_type}/{name}{version}{qualifiers}"))
}
fn format_qualifiers(qualifiers: &[(&str, &str)]) -> String {
let joined_qualifiers = qualifiers
.iter()
.map(|(key, value)| {
format!(
"{key}={}",
percent_encode(value.as_bytes(), PURL_ENCODE_SET)
)
})
.join("&");
format!("?{joined_qualifiers}")
}
fn create_component(
&mut self,
package: &'a Package,
package_type: PackageType,
marker: Option<&MarkerTree>,
) -> Component {
let component = self.create_component_from_package(package, package_type, marker);
self.package_to_component_map
.insert(&package.id, component.clone());
component
}
fn create_synthetic_root_component(&mut self, root: Option<&Package>) -> Component {
let name = root.map(Self::get_package_name).unwrap_or("uv-workspace");
let bom_ref = self.create_bom_ref(name, None);
// No need to register as we manually add dependencies in `if all_packages` check in `from_lock`
Component {
component_type: Classification::Library,
name: NormalizedString::new(name),
version: None,
bom_ref: Some(bom_ref),
purl: None,
mime_type: None,
supplier: None,
author: None,
publisher: None,
group: None,
description: None,
scope: None,
hashes: None,
licenses: None,
copyright: None,
cpe: None,
swid: None,
modified: None,
pedigree: None,
external_references: None,
properties: None,
components: None,
evidence: None,
signature: None,
model_card: None,
data: None,
}
}
fn create_component_from_package(
&mut self,
package: &Package,
package_type: PackageType,
marker: Option<&MarkerTree>,
) -> Component {
let name = Self::get_package_name(package);
let version = Self::get_version_string(package);
let bom_ref = self.create_bom_ref(name, version.as_deref());
let purl = Self::create_purl(package).and_then(|purl_string| purl_string.parse().ok());
let mut properties = vec![];
match package_type {
PackageType::Workspace(path) => {
properties.push(Property::new(
"uv:workspace:path",
&PortablePath::from(path).to_string(),
));
}
PackageType::Root | PackageType::Dependency => {}
}
if let Some(marker_contents) = marker.and_then(|marker| marker.contents()) {
properties.push(Property::new(
"uv:package:marker",
&marker_contents.to_string(),
));
}
Component {
component_type: Classification::Library,
name: NormalizedString::new(name),
version: version.as_deref().map(NormalizedString::new),
bom_ref: Some(bom_ref),
purl,
mime_type: None,
supplier: None,
author: None,
publisher: None,
group: None,
description: None,
scope: None,
hashes: None,
licenses: None,
copyright: None,
cpe: None,
swid: None,
modified: None,
pedigree: None,
external_references: None,
properties: if !properties.is_empty() {
Some(Properties(properties))
} else {
None
},
components: None,
evidence: None,
signature: None,
model_card: None,
data: None,
}
}
fn get_component(&self, id: &PackageId) -> Option<&Component> {
self.package_to_component_map.get(id)
}
}
pub fn from_lock<'lock>(
target: &impl Installable<'lock>,
prune: &[PackageName],
extras: &ExtrasSpecificationWithDefaults,
groups: &DependencyGroupsWithDefaults,
annotate: bool,
install_options: &'lock InstallOptions,
preview: Preview,
all_packages: bool,
) -> Result<Bom, LockError> {
if !preview.is_enabled(PreviewFeatures::SBOM_EXPORT) {
warn_user!(
"`uv export --format=cyclonedx1.5` is experimental and may change without warning. Pass `--preview-features {}` to disable this warning.",
PreviewFeatures::SBOM_EXPORT
);
}
// Extract the packages from the lock file.
let ExportableRequirements(mut nodes) = ExportableRequirements::from_lock(
target,
prune,
extras,
groups,
annotate,
install_options,
)?;
nodes.sort_unstable_by_key(|node| &node.package.id);
// CycloneDX requires exactly one root component in `metadata.component`.
let root = match target.roots().collect::<Vec<_>>().as_slice() {
// Single root: use it directly
[single_root] => nodes
.iter()
.find(|node| &node.package.id.name == *single_root)
.map(|node| node.package),
// Multiple roots or no roots: use fallback
_ => None,
}
.or_else(|| target.lock().root()); // Fallback to project root
let mut component_builder = ComponentBuilder::default();
let mut metadata = Metadata {
component: root
.map(|package| component_builder.create_component(package, PackageType::Root, None)),
timestamp: cyclonedx_bom::prelude::DateTime::now().ok(),
tools: Some(Tools::List(vec![Tool {
vendor: Some(NormalizedString::new("Astral Software Inc.")),
name: Some(NormalizedString::new("uv")),
version: Some(NormalizedString::new(uv_version::version())),
hashes: None,
external_references: None,
}])),
..Metadata::default()
};
let workspace_member_ids = nodes
.iter()
.filter_map(|node| {
if target.lock().members().contains(&node.package.id.name) {
Some(&node.package.id)
} else {
None
}
})
.collect::<FxHashSet<_>>();
let mut components = nodes
.iter()
.filter(|node| root.is_none_or(|root_pkg| root_pkg.id != node.package.id)) // Filter out root package as this is included in `metadata`
.map(|node| {
let package_type = if workspace_member_ids.contains(&node.package.id) {
let path = match &node.package.id.source {
Source::Path(path)
| Source::Directory(path)
| Source::Editable(path)
| Source::Virtual(path) => path,
Source::Registry(_) | Source::Git(_, _) | Source::Direct(_, _) => {
// Workspace packages should always be local dependencies
return Err(LockErrorKind::NonLocalWorkspaceMember {
id: node.package.id.clone(),
}
.into());
}
};
PackageType::Workspace(path)
} else {
PackageType::Dependency
};
Ok(component_builder.create_component(node.package, package_type, Some(&node.marker)))
})
.collect::<Result<Vec<_>, LockError>>()?;
let mut dependencies = create_dependencies(&nodes, &component_builder);
// With `--all-packages`, use synthetic root which depends on workspace root and all workspace members.
// This ensures that we don't have any dangling components resulting from workspace packages not depended on by the workspace root.
if all_packages {
let synthetic_root = component_builder.create_synthetic_root_component(root);
let synthetic_root_bom_ref = synthetic_root
.bom_ref
.clone()
.expect("bom-ref should always exist");
let workspace_root = metadata.component.replace(synthetic_root);
if let Some(workspace_root) = workspace_root {
components.push(workspace_root);
}
dependencies.push(Dependency {
dependency_ref: synthetic_root_bom_ref,
dependencies: workspace_member_ids
.iter()
.filter_map(|c| component_builder.get_component(c))
.map(|c| c.bom_ref.clone().expect("bom-ref should always exist"))
.sorted_unstable()
.collect(),
});
}
let bom = Bom {
metadata: Some(metadata),
components: Some(Components(components)),
dependencies: Some(Dependencies(dependencies)),
..Bom::default()
};
Ok(bom)
}
fn create_dependencies(
nodes: &[ExportableRequirement<'_>],
component_builder: &ComponentBuilder,
) -> Vec<Dependency> {
nodes
.iter()
.map(|node| {
let component = component_builder
.get_component(&node.package.id)
.expect("All nodes should have been added to map");
let immediate_deps = &node.package.dependencies;
let optional_deps = node.package.optional_dependencies.values().flatten();
let dep_groups = node.package.dependency_groups.values().flatten();
let package_deps = immediate_deps
.iter()
.chain(optional_deps)
.chain(dep_groups)
.filter_map(|dep| component_builder.get_component(&dep.package_id));
let bom_refs = package_deps
.map(|p| p.bom_ref.clone().expect("bom-ref should always exist"))
.sorted_unstable()
.unique()
.collect();
Dependency {
dependency_ref: component
.bom_ref
.clone()
.expect("bom-ref should always exist"),
dependencies: bom_refs,
}
})
.collect()
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum PackageType<'a> {
Root,
Workspace(&'a Path),
Dependency,
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/lock/export/mod.rs | crates/uv-resolver/src/lock/export/mod.rs | use std::collections::VecDeque;
use std::collections::hash_map::Entry;
use either::Either;
use petgraph::graph::NodeIndex;
use petgraph::prelude::EdgeRef;
use petgraph::visit::IntoNodeReferences;
use petgraph::{Direction, Graph};
use rustc_hash::{FxBuildHasher, FxHashMap, FxHashSet};
use uv_configuration::{
DependencyGroupsWithDefaults, ExtrasSpecificationWithDefaults, InstallOptions,
};
use uv_normalize::{ExtraName, GroupName, PackageName};
use uv_pep508::MarkerTree;
use uv_pypi_types::ConflictItem;
use crate::graph_ops::{Reachable, marker_reachability};
use crate::lock::LockErrorKind;
pub(crate) use crate::lock::export::pylock_toml::PylockTomlPackage;
pub use crate::lock::export::pylock_toml::{PylockToml, PylockTomlErrorKind};
pub use crate::lock::export::requirements_txt::RequirementsTxtExport;
use crate::universal_marker::resolve_conflicts;
use crate::{Installable, LockError, Package};
pub mod cyclonedx_json;
mod pylock_toml;
mod requirements_txt;
/// A flat requirement, with its associated marker.
#[derive(Debug, Clone, PartialEq, Eq)]
struct ExportableRequirement<'lock> {
/// The [`Package`] associated with the requirement.
package: &'lock Package,
/// The marker that must be satisfied to install the package.
marker: MarkerTree,
/// The list of packages that depend on this package.
dependents: Vec<&'lock Package>,
}
/// A set of flattened, exportable requirements, generated from a lockfile.
#[derive(Debug, Clone, PartialEq, Eq)]
struct ExportableRequirements<'lock>(Vec<ExportableRequirement<'lock>>);
impl<'lock> ExportableRequirements<'lock> {
/// Generate the set of exportable [`ExportableRequirement`] entries from the given lockfile.
fn from_lock(
target: &impl Installable<'lock>,
prune: &[PackageName],
extras: &ExtrasSpecificationWithDefaults,
groups: &DependencyGroupsWithDefaults,
annotate: bool,
install_options: &'lock InstallOptions,
) -> Result<Self, LockError> {
let size_guess = target.lock().packages.len();
let mut graph = Graph::<Node<'lock>, Edge<'lock>>::with_capacity(size_guess, size_guess);
let mut inverse = FxHashMap::with_capacity_and_hasher(size_guess, FxBuildHasher);
let mut queue: VecDeque<(&Package, Option<&ExtraName>)> = VecDeque::new();
let mut seen = FxHashSet::default();
let mut conflicts = if target.lock().conflicts.is_empty() {
None
} else {
Some(FxHashMap::default())
};
let root = graph.add_node(Node::Root);
// Add the workspace packages to the queue.
for root_name in target.roots() {
if prune.contains(root_name) {
continue;
}
let dist = target
.lock()
.find_by_name(root_name)
.map_err(|_| LockErrorKind::MultipleRootPackages {
name: root_name.clone(),
})?
.ok_or_else(|| LockErrorKind::MissingRootPackage {
name: root_name.clone(),
})?;
if groups.prod() {
// Add the workspace package to the graph.
let index = *inverse
.entry(&dist.id)
.or_insert_with(|| graph.add_node(Node::Package(dist)));
graph.add_edge(root, index, Edge::Prod(MarkerTree::TRUE));
// Push its dependencies on the queue.
queue.push_back((dist, None));
for extra in extras.extra_names(dist.optional_dependencies.keys()) {
queue.push_back((dist, Some(extra)));
// Track the activated extra in the list of known conflicts.
if let Some(conflicts) = conflicts.as_mut() {
conflicts.insert(
ConflictItem::from((dist.id.name.clone(), extra.clone())),
MarkerTree::TRUE,
);
}
}
}
// Add any development dependencies.
for (group, dep) in dist
.dependency_groups
.iter()
.filter_map(|(group, deps)| {
if groups.contains(group) {
Some(deps.iter().map(move |dep| (group, dep)))
} else {
None
}
})
.flatten()
{
// Track the activated group in the list of known conflicts.
if let Some(conflicts) = conflicts.as_mut() {
conflicts.insert(
ConflictItem::from((dist.id.name.clone(), group.clone())),
MarkerTree::TRUE,
);
}
if prune.contains(&dep.package_id.name) {
continue;
}
let dep_dist = target.lock().find_by_id(&dep.package_id);
// Add the dependency to the graph.
let dep_index = *inverse
.entry(&dep.package_id)
.or_insert_with(|| graph.add_node(Node::Package(dep_dist)));
// Add an edge from the root. Development dependencies may be installed without
// installing the workspace package itself (which can never have markers on it
// anyway), so they're directly connected to the root.
graph.add_edge(
root,
dep_index,
Edge::Dev(group, dep.simplified_marker.as_simplified_marker_tree()),
);
// Push its dependencies on the queue.
if seen.insert((&dep.package_id, None)) {
queue.push_back((dep_dist, None));
}
for extra in &dep.extra {
if seen.insert((&dep.package_id, Some(extra))) {
queue.push_back((dep_dist, Some(extra)));
}
}
}
}
// Add requirements that are exclusive to the workspace root (e.g., dependency groups in
// (legacy) non-project workspace roots).
let root_requirements = target
.lock()
.requirements()
.iter()
.chain(
target
.lock()
.dependency_groups()
.iter()
.filter_map(|(group, deps)| {
if groups.contains(group) {
Some(deps)
} else {
None
}
})
.flatten(),
)
.filter(|dep| !prune.contains(&dep.name))
.collect::<Vec<_>>();
// Index the lockfile by package name, to avoid making multiple passes over the lockfile.
if !root_requirements.is_empty() {
let by_name: FxHashMap<_, Vec<_>> = {
let names = root_requirements
.iter()
.map(|dep| &dep.name)
.collect::<FxHashSet<_>>();
target.lock().packages().iter().fold(
FxHashMap::with_capacity_and_hasher(size_guess, FxBuildHasher),
|mut map, package| {
if names.contains(&package.id.name) {
map.entry(&package.id.name).or_default().push(package);
}
map
},
)
};
for requirement in root_requirements {
for dist in by_name.get(&requirement.name).into_iter().flatten() {
// Determine whether this entry is "relevant" for the requirement, by intersecting
// the markers.
let marker = if dist.fork_markers.is_empty() {
requirement.marker
} else {
let mut combined = MarkerTree::FALSE;
for fork_marker in &dist.fork_markers {
combined.or(fork_marker.pep508());
}
combined.and(requirement.marker);
combined
};
if marker.is_false() {
continue;
}
// Simplify the marker.
let marker = target.lock().simplify_environment(marker);
// Add the dependency to the graph and get its index.
let dep_index = *inverse
.entry(&dist.id)
.or_insert_with(|| graph.add_node(Node::Package(dist)));
// Add an edge from the root.
graph.add_edge(root, dep_index, Edge::Prod(marker));
// Push its dependencies on the queue.
if seen.insert((&dist.id, None)) {
queue.push_back((dist, None));
}
for extra in &requirement.extras {
if seen.insert((&dist.id, Some(extra))) {
queue.push_back((dist, Some(extra)));
}
}
}
}
}
// Create all the relevant nodes.
while let Some((package, extra)) = queue.pop_front() {
let index = inverse[&package.id];
let deps = if let Some(extra) = extra {
Either::Left(
package
.optional_dependencies
.get(extra)
.into_iter()
.flatten(),
)
} else {
Either::Right(package.dependencies.iter())
};
for dep in deps {
if prune.contains(&dep.package_id.name) {
continue;
}
// Evaluate the conflict marker.
let dep_dist = target.lock().find_by_id(&dep.package_id);
// Add the dependency to the graph.
let dep_index = *inverse
.entry(&dep.package_id)
.or_insert_with(|| graph.add_node(Node::Package(dep_dist)));
// Add an edge from the dependency.
graph.add_edge(
index,
dep_index,
if let Some(extra) = extra {
Edge::Optional(extra, dep.simplified_marker.as_simplified_marker_tree())
} else {
Edge::Prod(dep.simplified_marker.as_simplified_marker_tree())
},
);
// Push its dependencies on the queue.
if seen.insert((&dep.package_id, None)) {
queue.push_back((dep_dist, None));
}
for extra in &dep.extra {
if seen.insert((&dep.package_id, Some(extra))) {
queue.push_back((dep_dist, Some(extra)));
}
}
}
}
// Determine the reachability of each node in the graph.
let mut reachability = if let Some(conflicts) = conflicts.as_ref() {
conflict_marker_reachability(&graph, &[], conflicts)
} else {
marker_reachability(&graph, &[])
};
// Collect all packages.
let nodes = graph
.node_references()
.filter_map(|(index, node)| match node {
Node::Root => None,
Node::Package(package) => Some((index, package)),
})
.filter(|(_index, package)| {
install_options.include_package(
package.as_install_target(),
target.project_name(),
target.lock().members(),
)
})
.map(|(index, package)| ExportableRequirement {
package,
marker: reachability.remove(&index).unwrap_or_default(),
dependents: if annotate {
let mut dependents = graph
.edges_directed(index, Direction::Incoming)
.map(|edge| &graph[edge.source()])
.filter_map(|node| match node {
Node::Package(package) => Some(*package),
Node::Root => None,
})
.collect::<Vec<_>>();
dependents.sort_unstable_by_key(|package| package.name());
dependents.dedup_by_key(|package| package.name());
dependents
} else {
Vec::new()
},
})
.filter(|requirement| !requirement.marker.is_false())
.collect::<Vec<_>>();
Ok(Self(nodes))
}
}
/// A node in the graph.
#[derive(Debug, Clone, PartialEq, Eq)]
enum Node<'lock> {
Root,
Package(&'lock Package),
}
/// An edge in the resolution graph, along with the marker that must be satisfied to traverse it.
#[derive(Debug, Clone)]
enum Edge<'lock> {
Prod(MarkerTree),
Optional(&'lock ExtraName, MarkerTree),
Dev(&'lock GroupName, MarkerTree),
}
impl Edge<'_> {
/// Return the [`MarkerTree`] for this edge.
fn marker(&self) -> &MarkerTree {
match self {
Self::Prod(marker) => marker,
Self::Optional(_, marker) => marker,
Self::Dev(_, marker) => marker,
}
}
}
impl Reachable<MarkerTree> for Edge<'_> {
fn true_marker() -> MarkerTree {
MarkerTree::TRUE
}
fn false_marker() -> MarkerTree {
MarkerTree::FALSE
}
fn marker(&self) -> MarkerTree {
*self.marker()
}
}
/// Determine the markers under which a package is reachable in the dependency tree, taking into
/// account conflicts.
///
/// This method is structurally similar to [`marker_reachability`], but it _also_ attempts to resolve
/// conflict markers. Specifically, in addition to tracking the reachability marker for each node,
/// we also track (for each node) the conditions under which each conflict item is `true`. Then,
/// when evaluating the marker for the node, we inline the conflict marker conditions, thus removing
/// all conflict items from the marker expression.
fn conflict_marker_reachability<'lock>(
graph: &Graph<Node<'lock>, Edge<'lock>>,
fork_markers: &[Edge<'lock>],
known_conflicts: &FxHashMap<ConflictItem, MarkerTree>,
) -> FxHashMap<NodeIndex, MarkerTree> {
// For each node, track the conditions under which each conflict item is enabled.
let mut conflict_maps =
FxHashMap::<NodeIndex, FxHashMap<ConflictItem, MarkerTree>>::with_capacity_and_hasher(
graph.node_count(),
FxBuildHasher,
);
// Note that we build including the virtual packages due to how we propagate markers through
// the graph, even though we then only read the markers for base packages.
let mut reachability = FxHashMap::with_capacity_and_hasher(graph.node_count(), FxBuildHasher);
// Collect the root nodes.
//
// Besides the actual virtual root node, virtual dev dependencies packages are also root
// nodes since the edges don't cover dev dependencies.
let mut queue: Vec<_> = graph
.node_indices()
.filter(|node_index| {
graph
.edges_directed(*node_index, Direction::Incoming)
.next()
.is_none()
})
.collect();
// The root nodes are always applicable, unless the user has restricted resolver
// environments with `tool.uv.environments`.
let root_markers = if fork_markers.is_empty() {
MarkerTree::TRUE
} else {
fork_markers
.iter()
.fold(MarkerTree::FALSE, |mut acc, edge| {
acc.or(*edge.marker());
acc
})
};
for root_index in &queue {
reachability.insert(*root_index, root_markers);
}
// Propagate all markers through the graph, so that the eventual marker for each node is the
// union of the markers of each path we can reach the node by.
while let Some(parent_index) = queue.pop() {
// Resolve any conflicts in the parent marker.
reachability.entry(parent_index).and_modify(|marker| {
let conflict_map = conflict_maps.get(&parent_index).unwrap_or(known_conflicts);
*marker = resolve_conflicts(*marker, conflict_map);
});
// When we see an edge like `parent [dotenv]> flask`, we should take the reachability
// on `parent`, combine it with the marker on the edge, then add `flask[dotenv]` to
// the inference map on the `flask` node.
for child_edge in graph.edges_directed(parent_index, Direction::Outgoing) {
let mut parent_marker = reachability[&parent_index];
// The marker for all paths to the child through the parent.
let mut parent_map = conflict_maps
.get(&parent_index)
.cloned()
.unwrap_or_else(|| known_conflicts.clone());
match child_edge.weight() {
Edge::Prod(marker) => {
// Resolve any conflicts on the edge.
let marker = resolve_conflicts(*marker, &parent_map);
// Propagate the edge to the known conflicts.
for value in parent_map.values_mut() {
value.and(marker);
}
// Propagate the edge to the node itself.
parent_marker.and(marker);
}
Edge::Optional(extra, marker) => {
// Resolve any conflicts on the edge.
let marker = resolve_conflicts(*marker, &parent_map);
// Propagate the edge to the known conflicts.
for value in parent_map.values_mut() {
value.and(marker);
}
// Propagate the edge to the node itself.
parent_marker.and(marker);
// Add a known conflict item for the extra.
if let Node::Package(parent) = graph[parent_index] {
let item = ConflictItem::from((parent.name().clone(), (*extra).clone()));
parent_map.insert(item, parent_marker);
}
}
Edge::Dev(group, marker) => {
// Resolve any conflicts on the edge.
let marker = resolve_conflicts(*marker, &parent_map);
// Propagate the edge to the known conflicts.
for value in parent_map.values_mut() {
value.and(marker);
}
// Propagate the edge to the node itself.
parent_marker.and(marker);
// Add a known conflict item for the group.
if let Node::Package(parent) = graph[parent_index] {
let item = ConflictItem::from((parent.name().clone(), (*group).clone()));
parent_map.insert(item, parent_marker);
}
}
}
// Combine the inferred conflicts with the existing conflicts on the node.
match conflict_maps.entry(child_edge.target()) {
Entry::Occupied(mut existing) => {
let child_map = existing.get_mut();
for (key, value) in parent_map {
let mut after = child_map.get(&key).copied().unwrap_or(MarkerTree::FALSE);
after.or(value);
child_map.entry(key).or_insert(MarkerTree::FALSE).or(value);
}
}
Entry::Vacant(vacant) => {
vacant.insert(parent_map);
}
}
// Combine the inferred marker with the existing marker on the node.
match reachability.entry(child_edge.target()) {
Entry::Occupied(mut existing) => {
// If the marker is a subset of the existing marker (A ⊆ B exactly if
// A ∪ B = A), updating the child wouldn't change child's marker.
parent_marker.or(*existing.get());
if parent_marker != *existing.get() {
existing.insert(parent_marker);
queue.push(child_edge.target());
}
}
Entry::Vacant(vacant) => {
vacant.insert(parent_marker);
queue.push(child_edge.target());
}
}
}
}
reachability
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/lock/export/requirements_txt.rs | crates/uv-resolver/src/lock/export/requirements_txt.rs | use std::borrow::Cow;
use std::fmt::Formatter;
use std::path::{Component, Path, PathBuf};
use owo_colors::OwoColorize;
use url::Url;
use uv_configuration::{
DependencyGroupsWithDefaults, EditableMode, ExtrasSpecificationWithDefaults, InstallOptions,
};
use uv_distribution_filename::{DistExtension, SourceDistExtension};
use uv_fs::Simplified;
use uv_git_types::GitReference;
use uv_normalize::PackageName;
use uv_pypi_types::{ParsedArchiveUrl, ParsedGitUrl};
use uv_redacted::DisplaySafeUrl;
use crate::lock::export::{ExportableRequirement, ExportableRequirements};
use crate::lock::{Package, PackageId, Source};
use crate::{Installable, LockError};
/// An export of a [`Lock`] that renders in `requirements.txt` format.
#[derive(Debug)]
pub struct RequirementsTxtExport<'lock> {
nodes: Vec<ExportableRequirement<'lock>>,
hashes: bool,
editable: Option<EditableMode>,
}
impl<'lock> RequirementsTxtExport<'lock> {
pub fn from_lock(
target: &impl Installable<'lock>,
prune: &[PackageName],
extras: &ExtrasSpecificationWithDefaults,
dev: &DependencyGroupsWithDefaults,
annotate: bool,
editable: Option<EditableMode>,
hashes: bool,
install_options: &'lock InstallOptions,
) -> Result<Self, LockError> {
// Extract the packages from the lock file.
let ExportableRequirements(mut nodes) = ExportableRequirements::from_lock(
target,
prune,
extras,
dev,
annotate,
install_options,
)?;
// Sort the nodes, such that unnamed URLs (editables) appear at the top.
nodes.sort_unstable_by(|a, b| {
RequirementComparator::from(a.package).cmp(&RequirementComparator::from(b.package))
});
Ok(Self {
nodes,
hashes,
editable,
})
}
}
impl std::fmt::Display for RequirementsTxtExport<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
// Write out each package.
for ExportableRequirement {
package,
marker,
dependents,
} in &self.nodes
{
match &package.id.source {
Source::Registry(_) => {
let version = package
.id
.version
.as_ref()
.expect("registry package without version");
write!(f, "{}=={}", package.id.name, version)?;
}
Source::Git(url, git) => {
// Remove the fragment and query from the URL; they're already present in the
// `GitSource`.
let mut url = url.to_url().map_err(|_| std::fmt::Error)?;
url.set_fragment(None);
url.set_query(None);
// Reconstruct the `GitUrl` from the `GitSource`.
let git_url = uv_git_types::GitUrl::from_commit(
url,
GitReference::from(git.kind.clone()),
git.precise,
git.lfs,
)
.expect("Internal Git URLs must have supported schemes");
// Reconstruct the PEP 508-compatible URL from the `GitSource`.
let url = DisplaySafeUrl::from(ParsedGitUrl {
url: git_url.clone(),
subdirectory: git.subdirectory.clone(),
});
write!(f, "{} @ {}", package.id.name, url)?;
}
Source::Direct(url, direct) => {
let url = DisplaySafeUrl::from(ParsedArchiveUrl {
url: url.to_url().map_err(|_| std::fmt::Error)?,
subdirectory: direct.subdirectory.clone(),
ext: DistExtension::Source(SourceDistExtension::TarGz),
});
write!(
f,
"{} @ {}",
package.id.name,
// TODO(zanieb): We should probably omit passwords here by default, but we
// should change it in a breaking release and allow opt-in to include them.
url.displayable_with_credentials()
)?;
}
Source::Path(path) | Source::Directory(path) => {
if path.is_absolute() {
write!(
f,
"{}",
Url::from_file_path(path).map_err(|()| std::fmt::Error)?
)?;
} else {
write!(f, "{}", anchor(path).portable_display())?;
}
}
Source::Editable(path) => match self.editable {
None | Some(EditableMode::Editable) => {
write!(f, "-e {}", anchor(path).portable_display())?;
}
Some(EditableMode::NonEditable) => {
if path.is_absolute() {
write!(
f,
"{}",
Url::from_file_path(path).map_err(|()| std::fmt::Error)?
)?;
} else {
write!(f, "{}", anchor(path).portable_display())?;
}
}
},
Source::Virtual(_) => {
continue;
}
}
if let Some(contents) = marker.contents() {
write!(f, " ; {contents}")?;
}
if self.hashes {
let mut hashes = package.hashes();
hashes.sort_unstable();
if !hashes.is_empty() {
for hash in hashes.iter() {
writeln!(f, " \\")?;
write!(f, " --hash=")?;
write!(f, "{hash}")?;
}
}
}
writeln!(f)?;
// Add "via ..." comments for all dependents.
match dependents.as_slice() {
[] => {}
[dependent] => {
writeln!(f, "{}", format!(" # via {}", dependent.id.name).green())?;
}
_ => {
writeln!(f, "{}", " # via".green())?;
for &dependent in dependents {
writeln!(f, "{}", format!(" # {}", dependent.id.name).green())?;
}
}
}
}
Ok(())
}
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
enum RequirementComparator<'lock> {
Editable(&'lock Path),
Path(&'lock Path),
Package(&'lock PackageId),
}
impl<'lock> From<&'lock Package> for RequirementComparator<'lock> {
fn from(value: &'lock Package) -> Self {
match &value.id.source {
Source::Path(path) | Source::Directory(path) => Self::Path(path),
Source::Editable(path) => Self::Editable(path),
_ => Self::Package(&value.id),
}
}
}
/// Modify a relative [`Path`] to anchor it at the current working directory.
///
/// For example, given `foo/bar`, returns `./foo/bar`.
fn anchor(path: &Path) -> Cow<'_, Path> {
match path.components().next() {
None => Cow::Owned(PathBuf::from(".")),
Some(Component::CurDir | Component::ParentDir) => Cow::Borrowed(path),
_ => Cow::Owned(PathBuf::from("./").join(path)),
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolution/display.rs | crates/uv-resolver/src/resolution/display.rs | use std::collections::BTreeSet;
use owo_colors::OwoColorize;
use petgraph::visit::EdgeRef;
use petgraph::{Directed, Direction, Graph};
use rustc_hash::{FxBuildHasher, FxHashMap};
use uv_distribution_types::{DistributionMetadata, Name, SourceAnnotation, SourceAnnotations};
use uv_normalize::PackageName;
use uv_pep508::MarkerTree;
use crate::resolution::{RequirementsTxtDist, ResolutionGraphNode};
use crate::{ResolverEnvironment, ResolverOutput};
/// A [`std::fmt::Display`] implementation for the resolution graph.
#[derive(Debug)]
pub struct DisplayResolutionGraph<'a> {
/// The underlying graph.
resolution: &'a ResolverOutput,
/// The resolver marker environment, used to determine the markers that apply to each package.
env: &'a ResolverEnvironment,
/// The packages to exclude from the output.
no_emit_packages: &'a [PackageName],
/// Whether to include hashes in the output.
show_hashes: bool,
/// Whether to include extras in the output (e.g., `black[colorama]`).
include_extras: bool,
/// Whether to include environment markers in the output (e.g., `black ; sys_platform == "win32"`).
include_markers: bool,
/// Whether to include annotations in the output, to indicate which dependency or dependencies
/// requested each package.
include_annotations: bool,
/// Whether to include indexes in the output, to indicate which index was used for each package.
include_index_annotation: bool,
/// The style of annotation comments, used to indicate the dependencies that requested each
/// package.
annotation_style: AnnotationStyle,
}
#[derive(Debug)]
enum DisplayResolutionGraphNode<'dist> {
Root,
Dist(RequirementsTxtDist<'dist>),
}
impl<'a> DisplayResolutionGraph<'a> {
/// Create a new [`DisplayResolutionGraph`] for the given graph.
///
/// Note that this panics if any of the forks in the given resolver
/// output contain non-empty conflicting groups. That is, when using `uv
/// pip compile`, specifying conflicts is not supported because their
/// conditional logic cannot be encoded into a `requirements.txt`.
#[allow(clippy::fn_params_excessive_bools)]
pub fn new(
underlying: &'a ResolverOutput,
env: &'a ResolverEnvironment,
no_emit_packages: &'a [PackageName],
show_hashes: bool,
include_extras: bool,
include_markers: bool,
include_annotations: bool,
include_index_annotation: bool,
annotation_style: AnnotationStyle,
) -> Self {
for fork_marker in &underlying.fork_markers {
assert!(
fork_marker.conflict().is_true(),
"found fork marker {fork_marker:?} with non-trivial conflicting marker, \
cannot display resolver output with conflicts in requirements.txt format",
);
}
Self {
resolution: underlying,
env,
no_emit_packages,
show_hashes,
include_extras,
include_markers,
include_annotations,
include_index_annotation,
annotation_style,
}
}
}
/// Write the graph in the `{name}=={version}` format of requirements.txt that pip uses.
impl std::fmt::Display for DisplayResolutionGraph<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Determine the annotation sources for each package.
let sources = if self.include_annotations {
let mut sources = SourceAnnotations::default();
for requirement in self.resolution.requirements.iter().filter(|requirement| {
requirement.evaluate_markers(self.env.marker_environment(), &[])
}) {
if let Some(origin) = &requirement.origin {
sources.add(
&requirement.name,
SourceAnnotation::Requirement(origin.clone()),
);
}
}
for requirement in self
.resolution
.constraints
.requirements()
.filter(|requirement| {
requirement.evaluate_markers(self.env.marker_environment(), &[])
})
{
if let Some(origin) = &requirement.origin {
sources.add(
&requirement.name,
SourceAnnotation::Constraint(origin.clone()),
);
}
}
for requirement in self
.resolution
.overrides
.requirements()
.filter(|requirement| {
requirement.evaluate_markers(self.env.marker_environment(), &[])
})
{
if let Some(origin) = &requirement.origin {
sources.add(
&requirement.name,
SourceAnnotation::Override(origin.clone()),
);
}
}
sources
} else {
SourceAnnotations::default()
};
// Convert a [`petgraph::graph::Graph`] based on [`ResolutionGraphNode`] to a graph based on
// [`DisplayResolutionGraphNode`]. In other words: converts from [`AnnotatedDist`] to
// [`RequirementsTxtDist`].
//
// We assign each package its propagated markers: In `requirements.txt`, we want a flat list
// that for each package tells us if it should be installed on the current platform, without
// looking at which packages depend on it.
let graph = self.resolution.graph.map(
|_index, node| match node {
ResolutionGraphNode::Root => DisplayResolutionGraphNode::Root,
ResolutionGraphNode::Dist(dist) => {
let dist = RequirementsTxtDist::from_annotated_dist(dist);
DisplayResolutionGraphNode::Dist(dist)
}
},
// We can drop the edge markers, while retaining their existence and direction for the
// annotations.
|_index, _edge| (),
);
// Reduce the graph, removing or combining extras for a given package.
let graph = if self.include_extras {
combine_extras(&graph)
} else {
strip_extras(&graph)
};
// Collect all packages.
let mut nodes = graph
.node_indices()
.filter_map(|index| {
let dist = &graph[index];
let name = dist.name();
if self.no_emit_packages.contains(name) {
return None;
}
Some((index, dist))
})
.collect::<Vec<_>>();
// Sort the nodes by name, but with editable packages first.
nodes.sort_unstable_by_key(|(index, node)| (node.to_comparator(), *index));
// Print out the dependency graph.
for (index, node) in nodes {
// Display the node itself.
let mut line = node
.to_requirements_txt(&self.resolution.requires_python, self.include_markers)
.to_string();
// Display the distribution hashes, if any.
let mut has_hashes = false;
if self.show_hashes {
for hash in node.hashes {
has_hashes = true;
line.push_str(" \\\n");
line.push_str(" --hash=");
line.push_str(&hash.to_string());
}
}
// Determine the annotation comment and separator (between comment and requirement).
let mut annotation = None;
// If enabled, include annotations to indicate the dependencies that requested each
// package (e.g., `# via mypy`).
if self.include_annotations {
// Display all dependents (i.e., all packages that depend on the current package).
let dependents = {
let mut dependents = graph
.edges_directed(index, Direction::Incoming)
.map(|edge| &graph[edge.source()])
.map(uv_distribution_types::Name::name)
.collect::<Vec<_>>();
dependents.sort_unstable();
dependents.dedup();
dependents
};
// Include all external sources (e.g., requirements files).
let default = BTreeSet::default();
let source = sources.get(node.name()).unwrap_or(&default);
match self.annotation_style {
AnnotationStyle::Line => match dependents.as_slice() {
[] if source.is_empty() => {}
[] if source.len() == 1 => {
let separator = if has_hashes { "\n " } else { " " };
let comment = format!("# via {}", source.iter().next().unwrap())
.green()
.to_string();
annotation = Some((separator, comment));
}
dependents => {
let separator = if has_hashes { "\n " } else { " " };
let dependents = dependents
.iter()
.map(ToString::to_string)
.chain(source.iter().map(ToString::to_string))
.collect::<Vec<_>>()
.join(", ");
let comment = format!("# via {dependents}").green().to_string();
annotation = Some((separator, comment));
}
},
AnnotationStyle::Split => match dependents.as_slice() {
[] if source.is_empty() => {}
[] if source.len() == 1 => {
let separator = "\n";
let comment = format!(" # via {}", source.iter().next().unwrap())
.green()
.to_string();
annotation = Some((separator, comment));
}
[dependent] if source.is_empty() => {
let separator = "\n";
let comment = format!(" # via {dependent}").green().to_string();
annotation = Some((separator, comment));
}
dependents => {
let separator = "\n";
let dependent = source
.iter()
.map(ToString::to_string)
.chain(dependents.iter().map(ToString::to_string))
.map(|name| format!(" # {name}"))
.collect::<Vec<_>>()
.join("\n");
let comment = format!(" # via\n{dependent}").green().to_string();
annotation = Some((separator, comment));
}
},
}
}
if let Some((separator, comment)) = annotation {
// Assemble the line with the annotations and remove trailing whitespaces.
for line in format!("{line:24}{separator}{comment}").lines() {
let line = line.trim_end();
writeln!(f, "{line}")?;
}
} else {
// Write the line as is.
writeln!(f, "{line}")?;
}
// If enabled, include indexes to indicate which index was used for each package (e.g.,
// `# from https://pypi.org/simple`).
if self.include_index_annotation {
if let Some(index) = node.dist.index() {
let url = index.without_credentials();
writeln!(f, "{}", format!(" # from {url}").green())?;
}
}
}
Ok(())
}
}
/// Indicate the style of annotation comments, used to indicate the dependencies that requested each
/// package.
#[derive(Debug, Default, Copy, Clone, PartialEq, serde::Deserialize)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum AnnotationStyle {
/// Render the annotations on a single, comma-separated line.
Line,
/// Render each annotation on its own line.
#[default]
Split,
}
/// We don't need the edge markers anymore since we switched to propagated markers.
type IntermediatePetGraph<'dist> = Graph<DisplayResolutionGraphNode<'dist>, (), Directed>;
type RequirementsTxtGraph<'dist> = Graph<RequirementsTxtDist<'dist>, (), Directed>;
/// Reduce the graph, such that all nodes for a single package are combined, regardless of
/// the extras, as long as they have the same version and markers.
///
/// For example, `flask` and `flask[dotenv]` should be reduced into a single `flask[dotenv]`
/// node.
///
/// If the extras have different markers, they'll be treated as separate nodes. For example,
/// `flask[dotenv] ; sys_platform == "win32"` and `flask[async] ; sys_platform == "linux"`
/// would _not_ be combined.
///
/// We also remove the root node, to simplify the graph structure.
fn combine_extras<'dist>(graph: &IntermediatePetGraph<'dist>) -> RequirementsTxtGraph<'dist> {
/// Return the key for a node.
fn version_marker<'dist>(dist: &'dist RequirementsTxtDist) -> (&'dist PackageName, MarkerTree) {
(dist.name(), dist.markers)
}
let mut next = RequirementsTxtGraph::with_capacity(graph.node_count(), graph.edge_count());
let mut inverse = FxHashMap::with_capacity_and_hasher(graph.node_count(), FxBuildHasher);
// Re-add the nodes to the reduced graph.
for index in graph.node_indices() {
let DisplayResolutionGraphNode::Dist(dist) = &graph[index] else {
continue;
};
// In the `requirements.txt` output, we want a flat installation list, so we need to use
// the reachability markers instead of the edge markers.
match inverse.entry(version_marker(dist)) {
std::collections::hash_map::Entry::Occupied(entry) => {
let index = *entry.get();
let node: &mut RequirementsTxtDist = &mut next[index];
node.extras.extend(dist.extras.iter().cloned());
node.extras.sort_unstable();
node.extras.dedup();
}
std::collections::hash_map::Entry::Vacant(entry) => {
let index = next.add_node(dist.clone());
entry.insert(index);
}
}
}
// Re-add the edges to the reduced graph.
for edge in graph.edge_indices() {
let (source, target) = graph.edge_endpoints(edge).unwrap();
let DisplayResolutionGraphNode::Dist(source_node) = &graph[source] else {
continue;
};
let DisplayResolutionGraphNode::Dist(target_node) = &graph[target] else {
continue;
};
let source = inverse[&version_marker(source_node)];
let target = inverse[&version_marker(target_node)];
next.update_edge(source, target, ());
}
next
}
/// Reduce the graph, such that all nodes for a single package are combined, with extras
/// removed.
///
/// For example, `flask`, `flask[async]`, and `flask[dotenv]` should be reduced into a single
/// `flask` node, with a conjunction of their markers.
///
/// We also remove the root node, to simplify the graph structure.
fn strip_extras<'dist>(graph: &IntermediatePetGraph<'dist>) -> RequirementsTxtGraph<'dist> {
let mut next = RequirementsTxtGraph::with_capacity(graph.node_count(), graph.edge_count());
let mut inverse = FxHashMap::with_capacity_and_hasher(graph.node_count(), FxBuildHasher);
// Re-add the nodes to the reduced graph.
for index in graph.node_indices() {
let DisplayResolutionGraphNode::Dist(dist) = &graph[index] else {
continue;
};
// In the `requirements.txt` output, we want a flat installation list, so we need to use
// the reachability markers instead of the edge markers.
match inverse.entry(dist.version_id()) {
std::collections::hash_map::Entry::Occupied(entry) => {
let index = *entry.get();
let node: &mut RequirementsTxtDist = &mut next[index];
node.extras.clear();
// Consider:
// ```
// foo[bar]==1.0.0; sys_platform == 'linux'
// foo==1.0.0; sys_platform != 'linux'
// ```
// In this case, we want to write `foo==1.0.0; sys_platform == 'linux' or sys_platform == 'windows'`
node.markers.or(dist.markers);
}
std::collections::hash_map::Entry::Vacant(entry) => {
let index = next.add_node(dist.clone());
entry.insert(index);
}
}
}
// Re-add the edges to the reduced graph.
for edge in graph.edge_indices() {
let (source, target) = graph.edge_endpoints(edge).unwrap();
let DisplayResolutionGraphNode::Dist(source_node) = &graph[source] else {
continue;
};
let DisplayResolutionGraphNode::Dist(target_node) = &graph[target] else {
continue;
};
let source = inverse[&source_node.version_id()];
let target = inverse[&target_node.version_id()];
next.update_edge(source, target, ());
}
next
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolution/mod.rs | crates/uv-resolver/src/resolution/mod.rs | use std::fmt::Display;
use uv_distribution::Metadata;
use uv_distribution_types::{
BuiltDist, Dist, DistributionMetadata, IndexUrl, Name, ResolvedDist, SourceDist,
VersionOrUrlRef,
};
use uv_normalize::{ExtraName, GroupName, PackageName};
use uv_pep440::Version;
use uv_pypi_types::HashDigests;
pub use crate::resolution::display::{AnnotationStyle, DisplayResolutionGraph};
pub(crate) use crate::resolution::output::ResolutionGraphNode;
pub use crate::resolution::output::{ConflictingDistributionError, ResolverOutput};
pub(crate) use crate::resolution::requirements_txt::RequirementsTxtDist;
use crate::universal_marker::UniversalMarker;
mod display;
mod output;
mod requirements_txt;
/// A pinned package with its resolved distribution and metadata. The [`ResolvedDist`] refers to a
/// specific distribution (e.g., a specific wheel), while the [`Metadata23`] refers to the metadata
/// for the package-version pair.
#[derive(Debug, Clone)]
pub(crate) struct AnnotatedDist {
pub(crate) dist: ResolvedDist,
pub(crate) name: PackageName,
pub(crate) version: Version,
pub(crate) extra: Option<ExtraName>,
pub(crate) group: Option<GroupName>,
pub(crate) hashes: HashDigests,
pub(crate) metadata: Option<Metadata>,
/// The "full" marker for this distribution. It precisely describes all
/// marker environments for which this distribution _can_ be installed.
/// That is, when doing a traversal over all of the distributions in a
/// resolution, this marker corresponds to the disjunction of all paths to
/// this distribution in the resolution graph.
pub(crate) marker: UniversalMarker,
}
impl AnnotatedDist {
/// Returns `true` if the [`AnnotatedDist`] is a base package (i.e., not an extra or a
/// dependency group).
pub(crate) fn is_base(&self) -> bool {
self.extra.is_none() && self.group.is_none()
}
/// Returns the [`IndexUrl`] of the distribution, if it is from a registry.
pub(crate) fn index(&self) -> Option<&IndexUrl> {
match &self.dist {
ResolvedDist::Installed { .. } => None,
ResolvedDist::Installable { dist, .. } => match dist.as_ref() {
Dist::Built(dist) => match dist {
BuiltDist::Registry(dist) => Some(&dist.best_wheel().index),
BuiltDist::DirectUrl(_) => None,
BuiltDist::Path(_) => None,
},
Dist::Source(dist) => match dist {
SourceDist::Registry(dist) => Some(&dist.index),
SourceDist::DirectUrl(_) => None,
SourceDist::Git(_) => None,
SourceDist::Path(_) => None,
SourceDist::Directory(_) => None,
},
},
}
}
}
impl Name for AnnotatedDist {
fn name(&self) -> &PackageName {
self.dist.name()
}
}
impl DistributionMetadata for AnnotatedDist {
fn version_or_url(&self) -> VersionOrUrlRef<'_> {
self.dist.version_or_url()
}
}
impl Display for AnnotatedDist {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.dist, f)
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolution/output.rs | crates/uv-resolver/src/resolution/output.rs | use std::collections::BTreeMap;
use std::fmt::{Display, Formatter};
use std::sync::Arc;
use indexmap::IndexSet;
use petgraph::{
Directed, Direction,
graph::{Graph, NodeIndex},
};
use rustc_hash::{FxBuildHasher, FxHashMap, FxHashSet};
use uv_configuration::{Constraints, Overrides};
use uv_distribution::Metadata;
use uv_distribution_types::{
Dist, DistributionMetadata, Edge, IndexUrl, Name, Node, Requirement, RequiresPython,
ResolutionDiagnostic, ResolvedDist, VersionId, VersionOrUrlRef,
};
use uv_git::GitResolver;
use uv_normalize::{ExtraName, GroupName, PackageName};
use uv_pep440::{Version, VersionSpecifier};
use uv_pep508::{MarkerEnvironment, MarkerTree, MarkerTreeKind};
use uv_pypi_types::{Conflicts, HashDigests, ParsedUrlError, VerbatimParsedUrl, Yanked};
use crate::graph_ops::{marker_reachability, simplify_conflict_markers};
use crate::pins::FilePins;
use crate::preferences::Preferences;
use crate::redirect::url_to_precise;
use crate::resolution::AnnotatedDist;
use crate::resolution_mode::ResolutionStrategy;
use crate::resolver::{Resolution, ResolutionDependencyEdge, ResolutionPackage};
use crate::universal_marker::{ConflictMarker, UniversalMarker};
use crate::{
InMemoryIndex, MetadataResponse, Options, PythonRequirement, ResolveError, VersionsResponse,
};
/// The output of a successful resolution.
///
/// Includes a complete resolution graph in which every node represents a pinned package and every
/// edge represents a dependency between two pinned packages.
#[derive(Debug)]
pub struct ResolverOutput {
/// The underlying graph.
pub(crate) graph: Graph<ResolutionGraphNode, UniversalMarker, Directed>,
/// The range of supported Python versions.
pub(crate) requires_python: RequiresPython,
/// If the resolution had non-identical forks, store the forks in the lockfile so we can
/// recreate them in subsequent resolutions.
pub(crate) fork_markers: Vec<UniversalMarker>,
/// Any diagnostics that were encountered while building the graph.
pub(crate) diagnostics: Vec<ResolutionDiagnostic>,
/// The requirements that were used to build the graph.
pub(crate) requirements: Vec<Requirement>,
/// The constraints that were used to build the graph.
pub(crate) constraints: Constraints,
/// The overrides that were used to build the graph.
pub(crate) overrides: Overrides,
/// The options that were used to build the graph.
pub(crate) options: Options,
}
#[derive(Debug, Clone)]
#[allow(clippy::large_enum_variant)]
pub(crate) enum ResolutionGraphNode {
Root,
Dist(AnnotatedDist),
}
impl ResolutionGraphNode {
pub(crate) fn marker(&self) -> &UniversalMarker {
match self {
Self::Root => &UniversalMarker::TRUE,
Self::Dist(dist) => &dist.marker,
}
}
pub(crate) fn package_extra_names(&self) -> Option<(&PackageName, &ExtraName)> {
match self {
Self::Root => None,
Self::Dist(dist) => {
let extra = dist.extra.as_ref()?;
Some((&dist.name, extra))
}
}
}
pub(crate) fn package_group_names(&self) -> Option<(&PackageName, &GroupName)> {
match self {
Self::Root => None,
Self::Dist(dist) => {
let group = dist.group.as_ref()?;
Some((&dist.name, group))
}
}
}
pub(crate) fn package_name(&self) -> Option<&PackageName> {
match self {
Self::Root => None,
Self::Dist(dist) => Some(&dist.name),
}
}
}
impl Display for ResolutionGraphNode {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Root => f.write_str("root"),
Self::Dist(dist) => Display::fmt(dist, f),
}
}
}
#[derive(Debug, Eq, PartialEq, Hash)]
struct PackageRef<'a> {
package_name: &'a PackageName,
version: &'a Version,
url: Option<&'a VerbatimParsedUrl>,
index: Option<&'a IndexUrl>,
extra: Option<&'a ExtraName>,
group: Option<&'a GroupName>,
}
impl ResolverOutput {
/// Create a new [`ResolverOutput`] from the resolved PubGrub state.
pub(crate) fn from_state(
resolutions: &[Resolution],
requirements: &[Requirement],
constraints: &Constraints,
overrides: &Overrides,
preferences: &Preferences,
index: &InMemoryIndex,
git: &GitResolver,
python: &PythonRequirement,
conflicts: &Conflicts,
resolution_strategy: &ResolutionStrategy,
options: Options,
) -> Result<Self, ResolveError> {
let size_guess = resolutions[0].nodes.len();
let mut graph: Graph<ResolutionGraphNode, UniversalMarker, Directed> =
Graph::with_capacity(size_guess, size_guess);
let mut inverse: FxHashMap<PackageRef, NodeIndex<u32>> =
FxHashMap::with_capacity_and_hasher(size_guess, FxBuildHasher);
let mut diagnostics = Vec::new();
// Add the root node.
let root_index = graph.add_node(ResolutionGraphNode::Root);
let mut seen = FxHashSet::default();
for resolution in resolutions {
// Add every package to the graph.
for (package, version) in &resolution.nodes {
if !seen.insert((package, version)) {
// Insert each node only once.
continue;
}
Self::add_version(
&mut graph,
&mut inverse,
&mut diagnostics,
preferences,
&resolution.pins,
index,
git,
package,
version,
)?;
}
}
let mut seen = FxHashSet::default();
for resolution in resolutions {
let marker = resolution.env.try_universal_markers().unwrap_or_default();
// Add every edge to the graph, propagating the marker for the current fork, if
// necessary.
for edge in &resolution.edges {
if !seen.insert((edge, marker)) {
// Insert each node only once.
continue;
}
Self::add_edge(&mut graph, &mut inverse, root_index, edge, marker);
}
}
// Extract the `Requires-Python` range, if provided.
let requires_python = python.target().clone();
let fork_markers: Vec<UniversalMarker> = if let [resolution] = resolutions {
// In the case of a singleton marker, we only include it if it's not
// always true. Otherwise, we keep our `fork_markers` empty as there
// are no forks.
resolution
.env
.try_universal_markers()
.into_iter()
.filter(|marker| !marker.is_true())
.collect()
} else {
resolutions
.iter()
.map(|resolution| resolution.env.try_universal_markers().unwrap_or_default())
.collect()
};
// Compute and apply the marker reachability.
let mut reachability = marker_reachability(&graph, &fork_markers);
// Apply the reachability to the graph and imbibe world
// knowledge about conflicts.
let conflict_marker = ConflictMarker::from_conflicts(conflicts);
for index in graph.node_indices() {
if let ResolutionGraphNode::Dist(dist) = &mut graph[index] {
dist.marker = reachability.remove(&index).unwrap_or_default();
dist.marker.imbibe(conflict_marker);
}
}
for weight in graph.edge_weights_mut() {
weight.imbibe(conflict_marker);
}
simplify_conflict_markers(conflicts, &mut graph);
// Discard any unreachable nodes.
graph.retain_nodes(|graph, node| !graph[node].marker().is_false());
if matches!(resolution_strategy, ResolutionStrategy::Lowest) {
report_missing_lower_bounds(&graph, &mut diagnostics, constraints, overrides);
}
let output = Self {
graph,
requires_python,
diagnostics,
requirements: requirements.to_vec(),
constraints: constraints.clone(),
overrides: overrides.clone(),
options,
fork_markers,
};
// We only do conflicting distribution detection when no
// conflicting groups have been specified. The reason here
// is that when there are conflicting groups, then from the
// perspective of marker expressions only, it may look like
// one can install different versions of the same package for
// the same marker environment. However, the thing preventing
// this is that the only way this should be possible is if
// one tries to install two or more conflicting extras at
// the same time. At which point, uv will report an error,
// thereby sidestepping the possibility of installing different
// versions of the same package into the same virtualenv. ---AG
//
// FIXME: When `UniversalMarker` supports extras/groups, we can
// re-enable this.
if conflicts.is_empty() {
#[allow(unused_mut, reason = "Used in debug_assertions below")]
let mut conflicting = output.find_conflicting_distributions();
if !conflicting.is_empty() {
tracing::warn!(
"found {} conflicting distributions in resolution, \
please report this as a bug at \
https://github.com/astral-sh/uv/issues/new",
conflicting.len()
);
}
// When testing, we materialize any conflicting distributions as an
// error to ensure any relevant tests fail. Otherwise, we just leave
// it at the warning message above. The reason for not returning an
// error "in production" is that an incorrect resolution may only be
// incorrect in certain marker environments, but fine in most others.
// Returning an error in that case would make `uv` unusable whenever
// the bug occurs, but letting it through means `uv` *could* still be
// usable.
#[cfg(debug_assertions)]
if let Some(err) = conflicting.pop() {
return Err(ResolveError::ConflictingDistribution(err));
}
}
Ok(output)
}
fn add_edge(
graph: &mut Graph<ResolutionGraphNode, UniversalMarker>,
inverse: &mut FxHashMap<PackageRef<'_>, NodeIndex>,
root_index: NodeIndex,
edge: &ResolutionDependencyEdge,
marker: UniversalMarker,
) {
let from_index = edge.from.as_ref().map_or(root_index, |from| {
inverse[&PackageRef {
package_name: from,
version: &edge.from_version,
url: edge.from_url.as_ref(),
index: edge.from_index.as_ref(),
extra: edge.from_extra.as_ref(),
group: edge.from_group.as_ref(),
}]
});
let to_index = inverse[&PackageRef {
package_name: &edge.to,
version: &edge.to_version,
url: edge.to_url.as_ref(),
index: edge.to_index.as_ref(),
extra: edge.to_extra.as_ref(),
group: edge.to_group.as_ref(),
}];
let edge_marker = {
let mut edge_marker = edge.universal_marker();
edge_marker.and(marker);
edge_marker
};
if let Some(weight) = graph
.find_edge(from_index, to_index)
.and_then(|edge| graph.edge_weight_mut(edge))
{
// If either the existing marker or new marker is `true`, then the dependency is
// included unconditionally, and so the combined marker is `true`.
weight.or(edge_marker);
} else {
graph.update_edge(from_index, to_index, edge_marker);
}
}
fn add_version<'a>(
graph: &mut Graph<ResolutionGraphNode, UniversalMarker>,
inverse: &mut FxHashMap<PackageRef<'a>, NodeIndex>,
diagnostics: &mut Vec<ResolutionDiagnostic>,
preferences: &Preferences,
pins: &FilePins,
in_memory: &InMemoryIndex,
git: &GitResolver,
package: &'a ResolutionPackage,
version: &'a Version,
) -> Result<(), ResolveError> {
let ResolutionPackage {
name,
extra,
dev: group,
url,
index,
} = &package;
// Map the package to a distribution.
let (dist, hashes, metadata) = Self::parse_dist(
name,
index.as_ref(),
url.as_ref(),
version,
pins,
diagnostics,
preferences,
in_memory,
git,
)?;
if let Some(metadata) = metadata.as_ref() {
// Validate the extra.
if let Some(extra) = extra {
if !metadata.provides_extra.contains(extra) {
diagnostics.push(ResolutionDiagnostic::MissingExtra {
dist: dist.clone(),
extra: extra.clone(),
});
}
}
// Validate the development dependency group.
if let Some(dev) = group {
if !metadata.dependency_groups.contains_key(dev) {
diagnostics.push(ResolutionDiagnostic::MissingGroup {
dist: dist.clone(),
group: dev.clone(),
});
}
}
}
// Add the distribution to the graph.
let node = graph.add_node(ResolutionGraphNode::Dist(AnnotatedDist {
dist,
name: name.clone(),
version: version.clone(),
extra: extra.clone(),
group: group.clone(),
hashes,
metadata,
marker: UniversalMarker::TRUE,
}));
inverse.insert(
PackageRef {
package_name: name,
version,
url: url.as_ref(),
index: index.as_ref(),
extra: extra.as_ref(),
group: group.as_ref(),
},
node,
);
Ok(())
}
fn parse_dist(
name: &PackageName,
index: Option<&IndexUrl>,
url: Option<&VerbatimParsedUrl>,
version: &Version,
pins: &FilePins,
diagnostics: &mut Vec<ResolutionDiagnostic>,
preferences: &Preferences,
in_memory: &InMemoryIndex,
git: &GitResolver,
) -> Result<(ResolvedDist, HashDigests, Option<Metadata>), ResolveError> {
Ok(if let Some(url) = url {
// Create the distribution.
let dist = Dist::from_url(name.clone(), url_to_precise(url.clone(), git))?;
let version_id = VersionId::from_url(&url.verbatim);
// Extract the hashes.
let hashes = Self::get_hashes(
name,
index,
Some(url),
&version_id,
version,
preferences,
in_memory,
);
// Extract the metadata.
let metadata = {
let response = in_memory
.distributions()
.get(&version_id)
.unwrap_or_else(|| {
panic!("Every URL distribution should have metadata: {version_id:?}")
});
let MetadataResponse::Found(archive) = &*response else {
panic!("Every URL distribution should have metadata: {version_id:?}")
};
archive.metadata.clone()
};
(
ResolvedDist::Installable {
dist: Arc::new(dist),
version: Some(version.clone()),
},
hashes,
Some(metadata),
)
} else {
let dist = pins
.get(name, version)
.expect("Every package should be pinned")
.clone();
let version_id = dist.version_id();
// Track yanks for any registry distributions.
match dist.yanked() {
None | Some(Yanked::Bool(false)) => {}
Some(Yanked::Bool(true)) => {
diagnostics.push(ResolutionDiagnostic::YankedVersion {
dist: dist.clone(),
reason: None,
});
}
Some(Yanked::Reason(reason)) => {
diagnostics.push(ResolutionDiagnostic::YankedVersion {
dist: dist.clone(),
reason: Some(reason.to_string()),
});
}
}
// Extract the hashes.
let hashes = Self::get_hashes(
name,
index,
None,
&version_id,
version,
preferences,
in_memory,
);
// Extract the metadata.
let metadata = {
in_memory
.distributions()
.get(&version_id)
.and_then(|response| {
if let MetadataResponse::Found(archive) = &*response {
Some(archive.metadata.clone())
} else {
None
}
})
};
(dist, hashes, metadata)
})
}
/// Identify the hashes for the [`VersionId`], preserving any hashes that were provided by the
/// lockfile.
fn get_hashes(
name: &PackageName,
index: Option<&IndexUrl>,
url: Option<&VerbatimParsedUrl>,
version_id: &VersionId,
version: &Version,
preferences: &Preferences,
in_memory: &InMemoryIndex,
) -> HashDigests {
// 1. Look for hashes from the lockfile.
if let Some(digests) = preferences.match_hashes(name, version) {
if !digests.is_empty() {
return HashDigests::from(digests);
}
}
// 2. Look for hashes for the distribution (i.e., the specific wheel or source distribution).
if let Some(metadata_response) = in_memory.distributions().get(version_id) {
if let MetadataResponse::Found(ref archive) = *metadata_response {
let mut digests = archive.hashes.clone();
digests.sort_unstable();
if !digests.is_empty() {
return digests;
}
}
}
// 3. Look for hashes from the registry, which are served at the package level.
if url.is_none() {
// Query the implicit and explicit indexes (lazily) for the hashes.
let implicit_response = in_memory.implicit().get(name);
let mut explicit_response = None;
// Search in the implicit indexes.
let hashes = implicit_response
.as_ref()
.and_then(|response| {
if let VersionsResponse::Found(version_maps) = &**response {
Some(version_maps)
} else {
None
}
})
.into_iter()
.flatten()
.filter(|version_map| version_map.index() == index)
.find_map(|version_map| version_map.hashes(version))
.or_else(|| {
// Search in the explicit indexes.
explicit_response = index
.and_then(|index| in_memory.explicit().get(&(name.clone(), index.clone())));
explicit_response
.as_ref()
.and_then(|response| {
if let VersionsResponse::Found(version_maps) = &**response {
Some(version_maps)
} else {
None
}
})
.into_iter()
.flatten()
.filter(|version_map| version_map.index() == index)
.find_map(|version_map| version_map.hashes(version))
});
if let Some(hashes) = hashes {
let mut digests = HashDigests::from(hashes);
digests.sort_unstable();
if !digests.is_empty() {
return digests;
}
}
}
HashDigests::empty()
}
/// Returns an iterator over the distinct packages in the graph.
fn dists(&self) -> impl Iterator<Item = &AnnotatedDist> {
self.graph
.node_indices()
.filter_map(move |index| match &self.graph[index] {
ResolutionGraphNode::Root => None,
ResolutionGraphNode::Dist(dist) => Some(dist),
})
}
/// Return the number of distinct packages in the graph.
pub fn len(&self) -> usize {
self.dists().filter(|dist| dist.is_base()).count()
}
/// Return `true` if there are no packages in the graph.
pub fn is_empty(&self) -> bool {
self.dists().any(AnnotatedDist::is_base)
}
/// Returns `true` if the graph contains the given package.
pub fn contains(&self, name: &PackageName) -> bool {
self.dists().any(|dist| dist.name() == name)
}
/// Return the [`ResolutionDiagnostic`]s that were encountered while building the graph.
pub fn diagnostics(&self) -> &[ResolutionDiagnostic] {
&self.diagnostics
}
/// Return the marker tree specific to this resolution.
///
/// This accepts an in-memory-index and marker environment, all
/// of which should be the same values given to the resolver that produced
/// this graph.
///
/// The marker tree returned corresponds to an expression that, when true,
/// this resolution is guaranteed to be correct. Note though that it's
/// possible for resolution to be correct even if the returned marker
/// expression is false.
///
/// For example, if the root package has a dependency `foo; sys_platform ==
/// "macos"` and resolution was performed on Linux, then the marker tree
/// returned will contain a `sys_platform == "linux"` expression. This
/// means that whenever the marker expression evaluates to true (i.e., the
/// current platform is Linux), then the resolution here is correct. But
/// it is possible that the resolution is also correct on other platforms
/// that aren't macOS, such as Windows. (It is unclear at time of writing
/// whether this is fundamentally impossible to compute, or just impossible
/// to compute in some cases.)
pub fn marker_tree(
&self,
index: &InMemoryIndex,
marker_env: &MarkerEnvironment,
) -> Result<MarkerTree, Box<ParsedUrlError>> {
use uv_pep508::{
CanonicalMarkerValueString, CanonicalMarkerValueVersion, MarkerExpression,
MarkerOperator, MarkerTree,
};
/// A subset of the possible marker values.
///
/// We only track the marker parameters that are referenced in a marker
/// expression. We'll use references to the parameter later to generate
/// values based on the current marker environment.
#[derive(Debug, Eq, Hash, PartialEq)]
enum MarkerParam {
Version(CanonicalMarkerValueVersion),
String(CanonicalMarkerValueString),
}
/// Add all marker parameters from the given tree to the given set.
fn add_marker_params_from_tree(marker_tree: MarkerTree, set: &mut IndexSet<MarkerParam>) {
match marker_tree.kind() {
MarkerTreeKind::True => {}
MarkerTreeKind::False => {}
MarkerTreeKind::Version(marker) => {
set.insert(MarkerParam::Version(marker.key()));
for (_, tree) in marker.edges() {
add_marker_params_from_tree(tree, set);
}
}
MarkerTreeKind::String(marker) => {
set.insert(MarkerParam::String(marker.key()));
for (_, tree) in marker.children() {
add_marker_params_from_tree(tree, set);
}
}
MarkerTreeKind::In(marker) => {
set.insert(MarkerParam::String(marker.key()));
for (_, tree) in marker.children() {
add_marker_params_from_tree(tree, set);
}
}
MarkerTreeKind::Contains(marker) => {
set.insert(MarkerParam::String(marker.key()));
for (_, tree) in marker.children() {
add_marker_params_from_tree(tree, set);
}
}
// We specifically don't care about these for the
// purposes of generating a marker string for a lock
// file. Quoted strings are marker values given by the
// user. We don't track those here, since we're only
// interested in which markers are used.
MarkerTreeKind::Extra(marker) => {
for (_, tree) in marker.children() {
add_marker_params_from_tree(tree, set);
}
}
MarkerTreeKind::List(marker) => {
for (_, tree) in marker.children() {
add_marker_params_from_tree(tree, set);
}
}
}
}
let mut seen_marker_values = IndexSet::default();
for i in self.graph.node_indices() {
let ResolutionGraphNode::Dist(dist) = &self.graph[i] else {
continue;
};
let version_id = match dist.version_or_url() {
VersionOrUrlRef::Version(version) => {
VersionId::from_registry(dist.name().clone(), version.clone())
}
VersionOrUrlRef::Url(verbatim_url) => VersionId::from_url(verbatim_url.raw()),
};
let res = index
.distributions()
.get(&version_id)
.expect("every package in resolution graph has metadata");
let MetadataResponse::Found(archive, ..) = &*res else {
panic!(
"Every package should have metadata: {:?}",
dist.version_id()
)
};
for req in self
.constraints
.apply(self.overrides.apply(archive.metadata.requires_dist.iter()))
{
add_marker_params_from_tree(req.marker, &mut seen_marker_values);
}
}
// Ensure that we consider markers from direct dependencies.
for direct_req in self
.constraints
.apply(self.overrides.apply(self.requirements.iter()))
{
add_marker_params_from_tree(direct_req.marker, &mut seen_marker_values);
}
// Generate the final marker expression as a conjunction of
// strict equality terms.
let mut conjunction = MarkerTree::TRUE;
for marker_param in seen_marker_values {
let expr = match marker_param {
MarkerParam::Version(value_version) => {
let from_env = marker_env.get_version(value_version);
MarkerExpression::Version {
key: value_version.into(),
specifier: VersionSpecifier::equals_version(from_env.clone()),
}
}
MarkerParam::String(value_string) => {
let from_env = marker_env.get_string(value_string);
MarkerExpression::String {
key: value_string.into(),
operator: MarkerOperator::Equal,
value: from_env.into(),
}
}
};
conjunction.and(MarkerTree::expression(expr));
}
Ok(conjunction)
}
/// Returns a sequence of conflicting distribution errors from this
/// resolution.
///
/// Correct resolutions always return an empty sequence. A non-empty
/// sequence implies there is a package with two distinct versions in the
/// same marker environment in this resolution. This in turn implies that
/// an installation in that marker environment could wind up trying to
/// install different versions of the same package, which is not allowed.
fn find_conflicting_distributions(&self) -> Vec<ConflictingDistributionError> {
let mut name_to_markers: BTreeMap<&PackageName, Vec<(&Version, &UniversalMarker)>> =
BTreeMap::new();
for node in self.graph.node_weights() {
let annotated_dist = match node {
ResolutionGraphNode::Root => continue,
ResolutionGraphNode::Dist(annotated_dist) => annotated_dist,
};
name_to_markers
.entry(&annotated_dist.name)
.or_default()
.push((&annotated_dist.version, &annotated_dist.marker));
}
let mut dupes = vec![];
for (name, marker_trees) in name_to_markers {
for (i, (version1, marker1)) in marker_trees.iter().enumerate() {
for (version2, marker2) in &marker_trees[i + 1..] {
if version1 == version2 {
continue;
}
if !marker1.is_disjoint(**marker2) {
dupes.push(ConflictingDistributionError {
name: name.clone(),
version1: (*version1).clone(),
version2: (*version2).clone(),
marker1: **marker1,
marker2: **marker2,
});
}
}
}
}
dupes
}
}
/// An error that occurs for conflicting versions of the same package.
///
/// Specifically, this occurs when two distributions with the same package
/// name are found with distinct versions in at least one possible marker
/// environment. This error reflects an error that could occur when installing
/// the corresponding resolution into that marker environment.
#[derive(Debug)]
pub struct ConflictingDistributionError {
name: PackageName,
version1: Version,
version2: Version,
marker1: UniversalMarker,
marker2: UniversalMarker,
}
impl std::error::Error for ConflictingDistributionError {}
impl Display for ConflictingDistributionError {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
let Self {
ref name,
ref version1,
ref version2,
ref marker1,
ref marker2,
} = *self;
write!(
f,
"found conflicting versions for package `{name}`:
`{marker1:?}` (for version `{version1}`) is not disjoint with \
`{marker2:?}` (for version `{version2}`)",
)
}
}
/// Convert a [`ResolverOutput`] into a [`uv_distribution_types::Resolution`].
///
/// This involves converting [`ResolutionGraphNode`]s into [`Node`]s, which in turn involves
/// dropping any extras and dependency groups from the graph nodes. Instead, each package is
/// collapsed into a single node, with extras and dependency groups annotating the _edges_, rather
/// than being represented as separate nodes. This is a more natural representation, but a further
/// departure from the PubGrub model.
///
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | true |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-resolver/src/resolution/requirements_txt.rs | crates/uv-resolver/src/resolution/requirements_txt.rs | use std::borrow::Cow;
use std::fmt::Display;
use std::path::Path;
use itertools::Itertools;
use uv_distribution_types::{
DistributionMetadata, Name, RequiresPython, ResolvedDist, SimplifiedMarkerTree, Verbatim,
VersionOrUrlRef,
};
use uv_normalize::{ExtraName, PackageName};
use uv_pep440::Version;
use uv_pep508::{MarkerTree, Scheme, split_scheme};
use uv_pypi_types::HashDigest;
use crate::resolution::AnnotatedDist;
#[derive(Debug, Clone)]
/// A pinned package with its resolved distribution and all the extras that were pinned for it.
pub(crate) struct RequirementsTxtDist<'dist> {
pub(crate) dist: &'dist ResolvedDist,
pub(crate) version: &'dist Version,
pub(crate) hashes: &'dist [HashDigest],
pub(crate) markers: MarkerTree,
pub(crate) extras: Vec<ExtraName>,
}
impl<'dist> RequirementsTxtDist<'dist> {
/// Convert the [`RequirementsTxtDist`] to a requirement that adheres to the `requirements.txt`
/// format.
///
/// This typically results in a PEP 508 representation of the requirement, but will write an
/// unnamed requirement for relative paths, which can't be represented with PEP 508 (but are
/// supported in `requirements.txt`).
pub(crate) fn to_requirements_txt(
&self,
requires_python: &RequiresPython,
include_markers: bool,
) -> Cow<'_, str> {
// If the URL is editable, write it as an editable requirement.
if self.dist.is_editable() {
if let VersionOrUrlRef::Url(url) = self.dist.version_or_url() {
let given = url.verbatim();
return Cow::Owned(format!("-e {given}"));
}
}
// If the URL is not _definitively_ a `file://` URL, write it as a relative path.
if self.dist.is_local() {
if let VersionOrUrlRef::Url(url) = self.dist.version_or_url() {
let given = url.verbatim();
let given = match split_scheme(&given) {
Some((scheme, path)) => {
match Scheme::parse(scheme) {
Some(Scheme::File) => {
if path
.strip_prefix("//localhost")
.filter(|path| path.starts_with('/'))
.is_some()
{
// Always absolute; nothing to do.
None
} else if let Some(path) = path.strip_prefix("//") {
// Strip the prefix, to convert, e.g., `file://flask-3.0.3-py3-none-any.whl` to `flask-3.0.3-py3-none-any.whl`.
//
// However, we should allow any of the following:
// - `file:///flask-3.0.3-py3-none-any.whl`
// - `file://C:\Users\user\flask-3.0.3-py3-none-any.whl`
// - `file:///C:\Users\user\flask-3.0.3-py3-none-any.whl`
if !path.starts_with("${PROJECT_ROOT}")
&& !Path::new(path).has_root()
{
Some(Cow::Owned(path.to_string()))
} else {
// Ex) `file:///flask-3.0.3-py3-none-any.whl`
None
}
} else {
// Ex) `file:./flask-3.0.3-py3-none-any.whl`
None
}
}
Some(_) => None,
None => {
// Ex) `flask @ C:\Users\user\flask-3.0.3-py3-none-any.whl`
Some(given)
}
}
}
None => {
// Ex) `flask @ flask-3.0.3-py3-none-any.whl`
Some(given)
}
};
if let Some(given) = given {
return if let Some(markers) =
SimplifiedMarkerTree::new(requires_python, self.markers)
.try_to_string()
.filter(|_| include_markers)
{
Cow::Owned(format!("{given} ; {markers}"))
} else {
given
};
}
}
}
if self.extras.is_empty() {
if let Some(markers) = SimplifiedMarkerTree::new(requires_python, self.markers)
.try_to_string()
.filter(|_| include_markers)
{
Cow::Owned(format!("{} ; {}", self.dist.verbatim(), markers))
} else {
self.dist.verbatim()
}
} else {
let mut extras = self.extras.clone();
extras.sort_unstable();
extras.dedup();
if let Some(markers) = SimplifiedMarkerTree::new(requires_python, self.markers)
.try_to_string()
.filter(|_| include_markers)
{
Cow::Owned(format!(
"{}[{}]{} ; {}",
self.name(),
extras.into_iter().join(", "),
self.version_or_url().verbatim(),
markers,
))
} else {
Cow::Owned(format!(
"{}[{}]{}",
self.name(),
extras.into_iter().join(", "),
self.version_or_url().verbatim()
))
}
}
}
/// Convert the [`RequirementsTxtDist`] to a comparator that can be used to sort the requirements
/// in a `requirements.txt` file.
pub(crate) fn to_comparator(&self) -> RequirementsTxtComparator<'_> {
if self.dist.is_editable() {
if let VersionOrUrlRef::Url(url) = self.dist.version_or_url() {
return RequirementsTxtComparator::Url(url.verbatim());
}
}
if let VersionOrUrlRef::Url(url) = self.version_or_url() {
RequirementsTxtComparator::Name {
name: self.name(),
version: self.version,
url: Some(url.verbatim()),
extras: &self.extras,
}
} else {
RequirementsTxtComparator::Name {
name: self.name(),
version: self.version,
url: None,
extras: &self.extras,
}
}
}
pub(crate) fn from_annotated_dist(annotated: &'dist AnnotatedDist) -> Self {
assert!(
annotated.marker.conflict().is_true(),
"found dist {annotated} with non-trivial conflicting marker {marker:?}, \
which cannot be represented in a `requirements.txt` format",
marker = annotated.marker,
);
Self {
dist: &annotated.dist,
version: &annotated.version,
hashes: annotated.hashes.as_slice(),
// OK because we've asserted above that this dist
// does not have a non-trivial conflicting marker
// that we would otherwise need to care about.
markers: annotated.marker.combined(),
extras: if let Some(extra) = annotated.extra.clone() {
vec![extra]
} else {
vec![]
},
}
}
}
/// A comparator for sorting requirements in a `requirements.txt` file.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub(crate) enum RequirementsTxtComparator<'a> {
/// Sort by URL for editable requirements.
Url(Cow<'a, str>),
/// In universal mode, we can have multiple versions for a package, so we track the version and
/// the URL (for non-index packages) to have a stable sort for those, too.
Name {
name: &'a PackageName,
version: &'a Version,
url: Option<Cow<'a, str>>,
extras: &'a [ExtraName],
},
}
impl Name for RequirementsTxtDist<'_> {
fn name(&self) -> &PackageName {
self.dist.name()
}
}
impl DistributionMetadata for RequirementsTxtDist<'_> {
fn version_or_url(&self) -> VersionOrUrlRef<'_> {
self.dist.version_or_url()
}
}
impl Display for RequirementsTxtDist<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.dist, f)
}
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
astral-sh/uv | https://github.com/astral-sh/uv/blob/2318e48e819080f37a002551035c2b1880a81a70/crates/uv-requirements/src/lib.rs | crates/uv-requirements/src/lib.rs | pub use crate::extras::*;
pub use crate::lookahead::*;
pub use crate::source_tree::*;
pub use crate::sources::*;
pub use crate::specification::*;
pub use crate::unnamed::*;
use uv_distribution_types::{
Dist, DistErrorKind, GitSourceDist, Requirement, RequirementSource, SourceDist,
};
mod extras;
mod lookahead;
mod source_tree;
mod sources;
mod specification;
mod unnamed;
pub mod upgrade;
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("{0} `{1}`")]
Dist(DistErrorKind, Box<Dist>, #[source] uv_distribution::Error),
#[error(transparent)]
Distribution(#[from] uv_distribution::Error),
#[error(transparent)]
DistributionTypes(#[from] uv_distribution_types::Error),
#[error(transparent)]
WheelFilename(#[from] uv_distribution_filename::WheelFilenameError),
#[error(transparent)]
Io(#[from] std::io::Error),
}
impl Error {
/// Create an [`Error`] from a distribution error.
pub(crate) fn from_dist(dist: Dist, err: uv_distribution::Error) -> Self {
Self::Dist(DistErrorKind::from_dist(&dist, &err), Box::new(dist), err)
}
}
/// Convert a [`Requirement`] into a [`Dist`], if it is a direct URL.
pub(crate) fn required_dist(
requirement: &Requirement,
) -> Result<Option<Dist>, uv_distribution_types::Error> {
Ok(Some(match &requirement.source {
RequirementSource::Registry { .. } => return Ok(None),
RequirementSource::Url {
subdirectory,
location,
ext,
url,
} => Dist::from_http_url(
requirement.name.clone(),
url.clone(),
location.clone(),
subdirectory.clone(),
*ext,
)?,
RequirementSource::Git {
git,
subdirectory,
url,
} => Dist::Source(SourceDist::Git(GitSourceDist {
name: requirement.name.clone(),
git: Box::new(git.clone()),
subdirectory: subdirectory.clone(),
url: url.clone(),
})),
RequirementSource::Path {
install_path,
ext,
url,
} => Dist::from_file_url(requirement.name.clone(), url.clone(), install_path, *ext)?,
RequirementSource::Directory {
install_path,
r#virtual,
url,
editable,
} => Dist::from_directory_url(
requirement.name.clone(),
url.clone(),
install_path,
*editable,
*r#virtual,
)?,
}))
}
| rust | Apache-2.0 | 2318e48e819080f37a002551035c2b1880a81a70 | 2026-01-04T15:31:58.679374Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.