repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/session/capabilities.rs | crates/ruff_server/src/session/capabilities.rs | use lsp_types::ClientCapabilities;
use ruff_linter::display_settings;
#[derive(Debug, Clone, PartialEq, Eq, Default)]
#[expect(clippy::struct_excessive_bools)]
pub(crate) struct ResolvedClientCapabilities {
pub(crate) code_action_deferred_edit_resolution: bool,
pub(crate) apply_edit: bool,
pub(crate) document_changes: bool,
pub(crate) workspace_refresh: bool,
pub(crate) pull_diagnostics: bool,
}
impl ResolvedClientCapabilities {
pub(super) fn new(client_capabilities: &ClientCapabilities) -> Self {
let code_action_settings = client_capabilities
.text_document
.as_ref()
.and_then(|doc_settings| doc_settings.code_action.as_ref());
let code_action_data_support = code_action_settings
.and_then(|code_action_settings| code_action_settings.data_support)
.unwrap_or_default();
let code_action_edit_resolution = code_action_settings
.and_then(|code_action_settings| code_action_settings.resolve_support.as_ref())
.is_some_and(|resolve_support| resolve_support.properties.contains(&"edit".into()));
let apply_edit = client_capabilities
.workspace
.as_ref()
.and_then(|workspace| workspace.apply_edit)
.unwrap_or_default();
let document_changes = client_capabilities
.workspace
.as_ref()
.and_then(|workspace| workspace.workspace_edit.as_ref())
.and_then(|workspace_edit| workspace_edit.document_changes)
.unwrap_or_default();
let workspace_refresh = client_capabilities
.workspace
.as_ref()
.and_then(|workspace| workspace.diagnostics.as_ref())
.and_then(|diagnostic| diagnostic.refresh_support)
.unwrap_or_default();
let pull_diagnostics = client_capabilities
.text_document
.as_ref()
.and_then(|text_document| text_document.diagnostic.as_ref())
.is_some();
Self {
code_action_deferred_edit_resolution: code_action_data_support
&& code_action_edit_resolution,
apply_edit,
document_changes,
workspace_refresh,
pull_diagnostics,
}
}
}
impl std::fmt::Display for ResolvedClientCapabilities {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
display_settings! {
formatter = f,
namespace = "capabilities",
fields = [
self.code_action_deferred_edit_resolution,
self.apply_edit,
self.document_changes,
self.workspace_refresh,
self.pull_diagnostics,
]
};
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/session/request_queue.rs | crates/ruff_server/src/session/request_queue.rs | use crate::session::client::ClientResponseHandler;
use lsp_server::RequestId;
use rustc_hash::FxHashMap;
use std::cell::{Cell, OnceCell, RefCell};
use std::fmt::Formatter;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::time::Instant;
/// Tracks the pending requests between client and server.
pub(crate) struct RequestQueue {
incoming: Incoming,
outgoing: Outgoing,
}
impl RequestQueue {
pub(super) fn new() -> Self {
Self {
incoming: Incoming::default(),
outgoing: Outgoing::default(),
}
}
pub(crate) fn outgoing_mut(&mut self) -> &mut Outgoing {
&mut self.outgoing
}
/// Returns the server to client request queue.
pub(crate) fn outgoing(&self) -> &Outgoing {
&self.outgoing
}
/// Returns the client to server request queue.
pub(crate) fn incoming(&self) -> &Incoming {
&self.incoming
}
pub(crate) fn incoming_mut(&mut self) -> &mut Incoming {
&mut self.incoming
}
}
/// Requests from client -> server.
///
/// Tracks which requests are pending. Requests that aren't registered are considered completed.
///
/// A request is pending if:
///
/// * it has been registered
/// * it hasn't been cancelled
/// * it hasn't been completed
///
/// Tracking whether a request is pending is required to ensure that the server sends exactly
/// one response for every request as required by the LSP specification.
#[derive(Default, Debug)]
pub(crate) struct Incoming {
pending: FxHashMap<RequestId, PendingRequest>,
}
impl Incoming {
/// Registers a new pending request.
pub(crate) fn register(&mut self, request_id: RequestId, method: String) {
self.pending.insert(request_id, PendingRequest::new(method));
}
/// Cancels the pending request with the given id.
///
/// Returns the method name if the request was still pending, `None` if it was already completed.
pub(super) fn cancel(&mut self, request_id: &RequestId) -> Option<String> {
self.pending.remove(request_id).map(|mut pending| {
if let Some(cancellation_token) = pending.cancellation_token.take() {
cancellation_token.cancel();
}
pending.method
})
}
/// Returns `true` if the request with the given id is still pending.
#[expect(dead_code)]
pub(crate) fn is_pending(&self, request_id: &RequestId) -> bool {
self.pending.contains_key(request_id)
}
/// Returns the cancellation token for the given request id if the request is still pending.
pub(crate) fn cancellation_token(
&self,
request_id: &RequestId,
) -> Option<RequestCancellationToken> {
let pending = self.pending.get(request_id)?;
Some(RequestCancellationToken::clone(
pending
.cancellation_token
.get_or_init(RequestCancellationToken::default),
))
}
/// Marks the request as completed.
///
/// Returns the time when the request was registered and the request method name, or `None` if the request was not pending.
pub(crate) fn complete(&mut self, request_id: &RequestId) -> Option<(Instant, String)> {
self.pending
.remove(request_id)
.map(|pending| (pending.start_time, pending.method))
}
}
/// A request from the client to the server that hasn't been responded yet.
#[derive(Debug)]
struct PendingRequest {
/// The time when the request was registered.
///
/// This does not include the time the request was queued in the main loop before it was registered.
start_time: Instant,
/// The method name of the request.
method: String,
/// A cancellation token to cancel this request.
///
/// This is only initialized for background requests. Local tasks don't support cancellation (unless retried)
/// as they're processed immediately after receiving the request; Making it impossible for a
/// cancellation message to be processed before the task is completed.
cancellation_token: OnceCell<RequestCancellationToken>,
}
impl PendingRequest {
fn new(method: String) -> Self {
Self {
start_time: Instant::now(),
method,
cancellation_token: OnceCell::new(),
}
}
}
/// Token to cancel a specific request.
///
/// Can be shared between threads to check for cancellation *after* a request has been scheduled.
#[derive(Debug, Default)]
pub(crate) struct RequestCancellationToken(Arc<AtomicBool>);
impl RequestCancellationToken {
/// Returns true if the request was cancelled.
pub(crate) fn is_cancelled(&self) -> bool {
self.0.load(std::sync::atomic::Ordering::Relaxed)
}
/// Signals that the request should not be processed because it was cancelled.
fn cancel(&self) {
self.0.store(true, std::sync::atomic::Ordering::Relaxed);
}
fn clone(this: &Self) -> Self {
RequestCancellationToken(this.0.clone())
}
}
/// Requests from server -> client.
#[derive(Default)]
pub(crate) struct Outgoing {
/// The id of the next request sent from the server to the client.
next_request_id: Cell<i32>,
/// A map of request ids to the handlers that process the client-response.
response_handlers: RefCell<FxHashMap<RequestId, ClientResponseHandler>>,
}
impl Outgoing {
/// Registers a handler, returns the id for the request.
#[must_use]
pub(crate) fn register(&self, handler: ClientResponseHandler) -> RequestId {
let id = self.next_request_id.get();
self.next_request_id.set(id + 1);
self.response_handlers
.borrow_mut()
.insert(id.into(), handler);
id.into()
}
/// Marks the request with the given id as complete and returns the handler to process the response.
///
/// Returns `None` if the request was not found.
#[must_use]
pub(crate) fn complete(&mut self, request_id: &RequestId) -> Option<ClientResponseHandler> {
self.response_handlers.get_mut().remove(request_id)
}
}
impl std::fmt::Debug for Outgoing {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Outgoing")
.field("next_request_id", &self.next_request_id)
.field("response_handlers", &"<response handlers>")
.finish()
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/session/index/ruff_settings.rs | crates/ruff_server/src/session/index/ruff_settings.rs | use std::collections::BTreeMap;
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use anyhow::Context;
use ignore::{WalkBuilder, WalkState};
use ruff_linter::settings::types::GlobPath;
use ruff_linter::{settings::types::FilePattern, settings::types::PreviewMode};
use ruff_workspace::Settings;
use ruff_workspace::pyproject::find_fallback_target_version;
use ruff_workspace::resolver::match_exclusion;
use ruff_workspace::{
configuration::{Configuration, FormatConfiguration, LintConfiguration, RuleSelection},
pyproject::{find_user_settings_toml, settings_toml},
resolver::ConfigurationTransformer,
};
use crate::session::Client;
use crate::session::options::ConfigurationPreference;
use crate::session::settings::{EditorSettings, ResolvedConfiguration};
#[derive(Debug)]
pub struct RuffSettings {
/// The path to this configuration file, used for debugging.
/// The default fallback configuration does not have a file path.
path: Option<PathBuf>,
/// The resolved settings.
settings: Settings,
}
impl RuffSettings {
pub(crate) fn path(&self) -> Option<&Path> {
self.path.as_deref()
}
}
impl Deref for RuffSettings {
type Target = Settings;
fn deref(&self) -> &Settings {
&self.settings
}
}
pub(super) struct RuffSettingsIndex {
/// Index from folder to the resolved ruff settings.
index: BTreeMap<PathBuf, Arc<RuffSettings>>,
fallback: Arc<RuffSettings>,
}
impl std::fmt::Display for RuffSettings {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(&self.settings, f)
}
}
impl RuffSettings {
/// Constructs [`RuffSettings`] by attempting to resolve settings from a user-provided
/// configuration file, such as `pyproject.toml` or `ruff.toml`, within the
/// user's workspace.
///
/// In the absence of a valid configuration file, it gracefully falls back to
/// editor-only settings.
pub(crate) fn fallback(editor_settings: &EditorSettings, root: &Path) -> RuffSettings {
struct FallbackTransformer<'a> {
inner: EditorConfigurationTransformer<'a>,
}
impl ConfigurationTransformer for FallbackTransformer<'_> {
fn transform(&self, mut configuration: Configuration) -> Configuration {
let fallback = find_fallback_target_version(self.inner.1);
if let Some(fallback) = fallback {
tracing::debug!(
"Derived `target-version` from found `requires-python`: {fallback:?}"
);
configuration.target_version = Some(fallback.into());
}
self.inner.transform(configuration)
}
}
find_user_settings_toml()
.and_then(|user_settings| {
tracing::debug!(
"Loading settings from user configuration file: `{}`",
user_settings.display()
);
ruff_workspace::resolver::resolve_root_settings(
&user_settings,
&FallbackTransformer {
inner: EditorConfigurationTransformer(editor_settings, root),
},
ruff_workspace::resolver::ConfigurationOrigin::UserSettings,
)
.ok()
.map(|settings| RuffSettings {
path: Some(user_settings),
settings,
})
})
.unwrap_or_else(|| {
let fallback = find_fallback_target_version(root);
if let Some(fallback) = fallback {
tracing::debug!(
"Derived `target-version` from found `requires-python` for fallback configuration: {fallback:?}"
);
}
let configuration = Configuration {
target_version: fallback.map(Into::into),
..Configuration::default()
};
Self::with_editor_settings(editor_settings, root, configuration).expect(
"editor configuration should merge successfully with default configuration",
)
})
}
/// Constructs [`RuffSettings`] by merging the editor-defined settings with the
/// default configuration.
fn editor_only(editor_settings: &EditorSettings, root: &Path) -> RuffSettings {
Self::with_editor_settings(editor_settings, root, Configuration::default())
.expect("editor configuration should merge successfully with default configuration")
}
/// Merges the `configuration` with the editor defined settings.
fn with_editor_settings(
editor_settings: &EditorSettings,
root: &Path,
configuration: Configuration,
) -> anyhow::Result<RuffSettings> {
let settings = EditorConfigurationTransformer(editor_settings, root)
.transform(configuration)
.into_settings(root)?;
Ok(RuffSettings {
path: None,
settings,
})
}
}
impl RuffSettingsIndex {
/// Create the settings index for the given workspace root.
///
/// This will create the index in the following order:
/// 1. Resolve any settings from above the workspace root
/// 2. Resolve any settings from the workspace root itself
/// 3. Resolve any settings from within the workspace directory tree
///
/// If this is the default workspace i.e., the client did not specify any workspace and so the
/// server will be running in a single file mode, then only (1) and (2) will be resolved,
/// skipping (3).
pub(super) fn new(
client: &Client,
root: &Path,
editor_settings: &EditorSettings,
is_default_workspace: bool,
) -> Self {
if editor_settings.configuration_preference == ConfigurationPreference::EditorOnly {
tracing::debug!(
"Using editor-only settings for workspace: {} (skipped indexing)",
root.display()
);
return RuffSettingsIndex {
index: BTreeMap::default(),
fallback: Arc::new(RuffSettings::editor_only(editor_settings, root)),
};
}
tracing::debug!("Indexing settings for workspace: {}", root.display());
let mut has_error = false;
let mut respect_gitignore = None;
let mut index = BTreeMap::default();
// If this is *not* the default workspace, then we should skip the workspace root itself
// because it will be resolved when walking the workspace directory tree. This is done by
// the `WalkBuilder` below.
let should_skip_workspace = usize::from(!is_default_workspace);
// Add any settings from above the workspace root, skipping the workspace root itself if
// this is *not* the default workspace.
for directory in root.ancestors().skip(should_skip_workspace) {
match settings_toml(directory) {
Ok(Some(pyproject)) => {
match ruff_workspace::resolver::resolve_root_settings(
&pyproject,
&EditorConfigurationTransformer(editor_settings, root),
ruff_workspace::resolver::ConfigurationOrigin::Ancestor,
) {
Ok(settings) => {
tracing::debug!("Loaded settings from: `{}`", pyproject.display());
respect_gitignore = Some(settings.file_resolver.respect_gitignore);
index.insert(
directory.to_path_buf(),
Arc::new(RuffSettings {
path: Some(pyproject),
settings,
}),
);
break;
}
error => {
tracing::error!(
"{:#}",
error
.with_context(|| {
format!(
"Failed to resolve settings for {}",
pyproject.display()
)
})
.unwrap_err()
);
has_error = true;
continue;
}
}
}
Ok(None) => continue,
Err(err) => {
tracing::error!("{err:#}");
has_error = true;
continue;
}
}
}
let fallback = Arc::new(RuffSettings::fallback(editor_settings, root));
// If this is the default workspace, the server is running in single-file mode. What this
// means is that the user opened a file directly (not the folder) in the editor and the
// server didn't receive a workspace folder during initialization. In this case, we default
// to the current working directory and skip walking the workspace directory tree for any
// settings.
//
// Refer to https://github.com/astral-sh/ruff/pull/13770 to understand what this behavior
// means for different editors.
if is_default_workspace {
if has_error {
client.show_error_message(format!(
"Error while resolving settings from workspace {}. Please refer to the logs for more details.",
root.display()
));
}
return RuffSettingsIndex { index, fallback };
}
// Add any settings within the workspace itself
let mut builder = WalkBuilder::new(root);
if let Ok(cwd) = std::env::current_dir() {
builder.current_dir(cwd);
}
builder.standard_filters(
respect_gitignore.unwrap_or_else(|| fallback.file_resolver.respect_gitignore),
);
builder.hidden(false);
builder.threads(
std::thread::available_parallelism()
.map_or(1, std::num::NonZeroUsize::get)
.min(12),
);
let walker = builder.build_parallel();
let index = std::sync::RwLock::new(index);
let has_error = AtomicBool::new(has_error);
walker.run(|| {
Box::new(|result| {
let Ok(entry) = result else {
return WalkState::Continue;
};
// Skip non-directories.
if !entry
.file_type()
.is_some_and(|file_type| file_type.is_dir())
{
return WalkState::Continue;
}
let directory = entry.into_path();
// If the directory is excluded from the workspace, skip it.
if let Some(file_name) = directory.file_name() {
let settings = index
.read()
.unwrap()
.range(..directory.clone())
.rfind(|(path, _)| directory.starts_with(path))
.map(|(_, settings)| settings.clone())
.unwrap_or_else(|| fallback.clone());
if match_exclusion(&directory, file_name, &settings.file_resolver.exclude) {
tracing::debug!("Ignored path via `exclude`: {}", directory.display());
return WalkState::Skip;
} else if match_exclusion(
&directory,
file_name,
&settings.file_resolver.extend_exclude,
) {
tracing::debug!(
"Ignored path via `extend-exclude`: {}",
directory.display()
);
return WalkState::Skip;
}
}
match settings_toml(&directory) {
Ok(Some(pyproject)) => {
match ruff_workspace::resolver::resolve_root_settings(
&pyproject,
&EditorConfigurationTransformer(editor_settings, root),
ruff_workspace::resolver::ConfigurationOrigin::Ancestor,
) {
Ok(settings) => {
tracing::debug!(
"Loaded settings from: `{}` for `{}`",
pyproject.display(),
directory.display()
);
index.write().unwrap().insert(
directory,
Arc::new(RuffSettings {
path: Some(pyproject),
settings,
}),
);
}
error => {
tracing::error!(
"{:#}",
error
.with_context(|| {
format!(
"Failed to resolve settings for {}",
pyproject.display()
)
})
.unwrap_err()
);
has_error.store(true, Ordering::Relaxed);
}
}
}
Ok(None) => {}
Err(err) => {
tracing::error!("{err:#}");
has_error.store(true, Ordering::Relaxed);
}
}
WalkState::Continue
})
});
if has_error.load(Ordering::Relaxed) {
client.show_error_message(format!(
"Error while resolving settings from workspace {}. Please refer to the logs for more details.",
root.display()
));
}
RuffSettingsIndex {
index: index.into_inner().unwrap(),
fallback,
}
}
pub(super) fn get(&self, document_path: &Path) -> Arc<RuffSettings> {
self.index
.range(..document_path.to_path_buf())
.rfind(|(path, _)| document_path.starts_with(path))
.map(|(_, settings)| settings)
.unwrap_or_else(|| &self.fallback)
.clone()
}
pub(super) fn fallback(&self) -> Arc<RuffSettings> {
self.fallback.clone()
}
/// Returns an iterator over the paths to the configuration files in the index.
pub(crate) fn config_file_paths(&self) -> impl Iterator<Item = &Path> {
self.index
.values()
.filter_map(|settings| settings.path.as_deref())
}
}
struct EditorConfigurationTransformer<'a>(&'a EditorSettings, &'a Path);
impl ConfigurationTransformer for EditorConfigurationTransformer<'_> {
fn transform(&self, filesystem_configuration: Configuration) -> Configuration {
let EditorSettings {
configuration,
format_preview,
lint_preview,
format_backend: _,
select,
extend_select,
ignore,
exclude,
line_length,
configuration_preference,
} = self.0.clone();
let project_root = self.1;
let editor_configuration = Configuration {
lint: LintConfiguration {
preview: lint_preview.map(PreviewMode::from),
rule_selections: vec![RuleSelection {
select,
extend_select: extend_select.unwrap_or_default(),
ignore: ignore.unwrap_or_default(),
..RuleSelection::default()
}],
..LintConfiguration::default()
},
format: FormatConfiguration {
preview: format_preview.map(PreviewMode::from),
..FormatConfiguration::default()
},
exclude: exclude.map(|exclude| {
exclude
.into_iter()
.map(|pattern| {
let absolute = GlobPath::normalize(&pattern, project_root);
FilePattern::User(pattern, absolute)
})
.collect()
}),
line_length,
..Configuration::default()
};
// Merge in the editor-specified configuration.
let editor_configuration = if let Some(configuration) = configuration {
match configuration {
ResolvedConfiguration::FilePath(path) => {
tracing::debug!(
"Combining settings from editor-specified configuration file at: {}",
path.display()
);
match open_configuration_file(&path) {
Ok(config_from_file) => editor_configuration.combine(config_from_file),
err => {
tracing::error!(
"{:?}",
err.context("Unable to load editor-specified configuration file")
.unwrap_err()
);
editor_configuration
}
}
}
ResolvedConfiguration::Inline(options) => {
tracing::debug!(
"Combining settings from editor-specified inline configuration"
);
match Configuration::from_options(*options, None, project_root) {
Ok(configuration) => editor_configuration.combine(configuration),
Err(err) => {
tracing::error!(
"Unable to load editor-specified inline configuration: {err:?}",
);
editor_configuration
}
}
}
}
} else {
editor_configuration
};
match configuration_preference {
ConfigurationPreference::EditorFirst => {
editor_configuration.combine(filesystem_configuration)
}
ConfigurationPreference::FilesystemFirst => {
filesystem_configuration.combine(editor_configuration)
}
ConfigurationPreference::EditorOnly => editor_configuration,
}
}
}
fn open_configuration_file(config_path: &Path) -> crate::Result<Configuration> {
ruff_workspace::resolver::resolve_configuration(
config_path,
&IdentityTransformer,
ruff_workspace::resolver::ConfigurationOrigin::UserSpecified,
)
}
struct IdentityTransformer;
impl ConfigurationTransformer for IdentityTransformer {
fn transform(&self, config: Configuration) -> Configuration {
config
}
}
#[cfg(test)]
mod tests {
use ruff_linter::line_width::LineLength;
use ruff_workspace::options::Options;
use super::*;
/// This test ensures that the inline configuration is correctly applied to the configuration.
#[test]
fn inline_settings() {
let editor_settings = EditorSettings {
configuration: Some(ResolvedConfiguration::Inline(Box::new(Options {
line_length: Some(LineLength::try_from(120).unwrap()),
..Default::default()
}))),
..Default::default()
};
let config = EditorConfigurationTransformer(&editor_settings, Path::new("/src/project"))
.transform(Configuration::default());
assert_eq!(config.line_length.unwrap().value(), 120);
}
/// This test ensures that between the inline configuration and specific settings, the specific
/// settings is prioritized.
#[test]
fn inline_and_specific_settings_resolution_order() {
let editor_settings = EditorSettings {
configuration: Some(ResolvedConfiguration::Inline(Box::new(Options {
line_length: Some(LineLength::try_from(120).unwrap()),
..Default::default()
}))),
line_length: Some(LineLength::try_from(100).unwrap()),
..Default::default()
};
let config = EditorConfigurationTransformer(&editor_settings, Path::new("/src/project"))
.transform(Configuration::default());
assert_eq!(config.line_length.unwrap().value(), 100);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/schedule.rs | crates/ruff_server/src/server/schedule.rs | use std::num::NonZeroUsize;
use crate::session::{Client, Session};
mod task;
mod thread;
pub(super) use task::{BackgroundSchedule, Task};
use self::{
task::{BackgroundTaskBuilder, SyncTask},
thread::ThreadPriority,
};
/// The event loop thread is actually a secondary thread that we spawn from the
/// _actual_ main thread. This secondary thread has a larger stack size
/// than some OS defaults (Windows, for example) and is also designated as
/// high-priority.
pub(crate) fn spawn_main_loop(
func: impl FnOnce() -> crate::Result<()> + Send + 'static,
) -> crate::Result<thread::JoinHandle<crate::Result<()>>> {
// Override OS defaults to avoid stack overflows on platforms with low stack size defaults.
const MAIN_THREAD_STACK_SIZE: usize = 2 * 1024 * 1024;
const MAIN_THREAD_NAME: &str = "ruff:main";
Ok(
thread::Builder::new(thread::ThreadPriority::LatencySensitive)
.name(MAIN_THREAD_NAME.into())
.stack_size(MAIN_THREAD_STACK_SIZE)
.spawn(func)?,
)
}
pub(crate) struct Scheduler {
fmt_pool: thread::Pool,
background_pool: thread::Pool,
}
impl Scheduler {
pub(super) fn new(worker_threads: NonZeroUsize) -> Self {
const FMT_THREADS: usize = 1;
Self {
fmt_pool: thread::Pool::new(NonZeroUsize::try_from(FMT_THREADS).unwrap()),
background_pool: thread::Pool::new(worker_threads),
}
}
/// Dispatches a `task` by either running it as a blocking function or
/// executing it on a background thread pool.
pub(super) fn dispatch(&mut self, task: Task, session: &mut Session, client: Client) {
match task {
Task::Sync(SyncTask { func }) => {
func(session, &client);
}
Task::Background(BackgroundTaskBuilder {
schedule,
builder: func,
}) => {
let static_func = func(session);
let task = move || static_func(&client);
match schedule {
BackgroundSchedule::Worker => {
self.background_pool.spawn(ThreadPriority::Worker, task);
}
BackgroundSchedule::LatencySensitive => self
.background_pool
.spawn(ThreadPriority::LatencySensitive, task),
BackgroundSchedule::Fmt => {
self.fmt_pool.spawn(ThreadPriority::LatencySensitive, task);
}
}
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/connection.rs | crates/ruff_server/src/server/connection.rs | use lsp_server as lsp;
pub type ConnectionSender = crossbeam::channel::Sender<lsp::Message>;
/// A builder for `Connection` that handles LSP initialization.
pub(crate) struct ConnectionInitializer {
connection: lsp::Connection,
}
impl ConnectionInitializer {
/// Create a new LSP server connection over stdin/stdout.
pub(crate) fn stdio() -> (Self, lsp::IoThreads) {
let (connection, threads) = lsp::Connection::stdio();
(Self { connection }, threads)
}
/// Starts the initialization process with the client by listening for an initialization request.
/// Returns a request ID that should be passed into `initialize_finish` later,
/// along with the initialization parameters that were provided.
pub(super) fn initialize_start(
&self,
) -> crate::Result<(lsp::RequestId, lsp_types::InitializeParams)> {
let (id, params) = self.connection.initialize_start()?;
Ok((id, serde_json::from_value(params)?))
}
/// Finishes the initialization process with the client,
/// returning an initialized `Connection`.
pub(super) fn initialize_finish(
self,
id: lsp::RequestId,
server_capabilities: &lsp_types::ServerCapabilities,
name: &str,
version: &str,
) -> crate::Result<lsp_server::Connection> {
self.connection.initialize_finish(
id,
serde_json::json!({
"capabilities": server_capabilities,
"serverInfo": {
"name": name,
"version": version
}
}),
)?;
Ok(self.connection)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api.rs | crates/ruff_server/src/server/api.rs | use std::panic::UnwindSafe;
use anyhow::anyhow;
use lsp_server::{self as server, RequestId};
use lsp_types::{notification::Notification, request::Request};
use notifications as notification;
use requests as request;
use crate::{
server::{
api::traits::{
BackgroundDocumentNotificationHandler, BackgroundDocumentRequestHandler,
SyncNotificationHandler,
},
schedule::Task,
},
session::{Client, Session},
};
mod diagnostics;
mod notifications;
mod requests;
mod traits;
use self::traits::{NotificationHandler, RequestHandler};
use super::{Result, schedule::BackgroundSchedule};
/// Defines the `document_url` method for implementers of [`Notification`] and [`Request`], given
/// the request or notification parameter type.
///
/// This would only work if the parameter type has a `text_document` field with a `uri` field
/// that is of type [`lsp_types::Url`].
macro_rules! define_document_url {
($params:ident: &$p:ty) => {
fn document_url($params: &$p) -> std::borrow::Cow<'_, lsp_types::Url> {
std::borrow::Cow::Borrowed(&$params.text_document.uri)
}
};
}
use define_document_url;
/// Processes a request from the client to the server.
///
/// The LSP specification requires that each request has exactly one response. Therefore,
/// it's crucial that all paths in this method call [`Client::respond`] exactly once.
/// The only exception to this is requests that were cancelled by the client. In this case,
/// the response was already sent by the [`notification::CancelNotificationHandler`].
pub(super) fn request(req: server::Request) -> Task {
let id = req.id.clone();
match req.method.as_str() {
request::CodeActions::METHOD => {
background_request_task::<request::CodeActions>(req, BackgroundSchedule::Worker)
}
request::CodeActionResolve::METHOD => {
background_request_task::<request::CodeActionResolve>(req, BackgroundSchedule::Worker)
}
request::DocumentDiagnostic::METHOD => {
background_request_task::<request::DocumentDiagnostic>(req, BackgroundSchedule::Worker)
}
request::ExecuteCommand::METHOD => sync_request_task::<request::ExecuteCommand>(req),
request::Format::METHOD => {
background_request_task::<request::Format>(req, BackgroundSchedule::Fmt)
}
request::FormatRange::METHOD => {
background_request_task::<request::FormatRange>(req, BackgroundSchedule::Fmt)
}
request::Hover::METHOD => {
background_request_task::<request::Hover>(req, BackgroundSchedule::Worker)
}
lsp_types::request::Shutdown::METHOD => sync_request_task::<requests::ShutdownHandler>(req),
method => {
tracing::warn!("Received request {method} which does not have a handler");
let result: Result<()> = Err(Error::new(
anyhow!("Unknown request: {method}"),
server::ErrorCode::MethodNotFound,
));
return Task::immediate(id, result);
}
}
.unwrap_or_else(|err| {
tracing::error!("Encountered error when routing request with ID {id}: {err}");
Task::sync(move |_session, client| {
client.show_error_message(
"Ruff failed to handle a request from the editor. Check the logs for more details.",
);
respond_silent_error(
id,
client,
lsp_server::ResponseError {
code: err.code as i32,
message: err.to_string(),
data: None,
},
);
})
})
}
pub(super) fn notification(notif: server::Notification) -> Task {
match notif.method.as_str() {
notification::DidChange::METHOD => {
sync_notification_task::<notification::DidChange>(notif)
}
notification::DidChangeConfiguration::METHOD => {
sync_notification_task::<notification::DidChangeConfiguration>(notif)
}
notification::DidChangeWatchedFiles::METHOD => {
sync_notification_task::<notification::DidChangeWatchedFiles>(notif)
}
notification::DidChangeWorkspace::METHOD => {
sync_notification_task::<notification::DidChangeWorkspace>(notif)
}
notification::DidClose::METHOD => sync_notification_task::<notification::DidClose>(notif),
notification::DidOpen::METHOD => sync_notification_task::<notification::DidOpen>(notif),
notification::DidOpenNotebook::METHOD => {
sync_notification_task::<notification::DidOpenNotebook>(notif)
}
notification::DidChangeNotebook::METHOD => {
sync_notification_task::<notification::DidChangeNotebook>(notif)
}
notification::DidCloseNotebook::METHOD => {
sync_notification_task::<notification::DidCloseNotebook>(notif)
}
lsp_types::notification::Cancel::METHOD => {
sync_notification_task::<notifications::CancelNotificationHandler>(notif)
}
lsp_types::notification::SetTrace::METHOD => {
tracing::trace!("Ignoring `setTrace` notification");
return Task::nothing();
}
method => {
tracing::warn!("Received notification {method} which does not have a handler.");
return Task::nothing();
}
}
.unwrap_or_else(|err| {
tracing::error!("Encountered error when routing notification: {err}");
Task::sync(|_session, client| {
client.show_error_message(
"Ruff failed to handle a notification from the editor. Check the logs for more details."
);
})
})
}
fn sync_request_task<R: traits::SyncRequestHandler>(req: server::Request) -> Result<Task>
where
<<R as RequestHandler>::RequestType as Request>::Params: UnwindSafe,
{
let (id, params) = cast_request::<R>(req)?;
Ok(Task::sync(move |session, client: &Client| {
let _span = tracing::debug_span!("request", %id, method = R::METHOD).entered();
let result = R::run(session, client, params);
respond::<R>(&id, result, client);
}))
}
fn background_request_task<R: traits::BackgroundDocumentRequestHandler>(
req: server::Request,
schedule: BackgroundSchedule,
) -> Result<Task>
where
<<R as RequestHandler>::RequestType as Request>::Params: UnwindSafe,
{
let (id, params) = cast_request::<R>(req)?;
Ok(Task::background(schedule, move |session: &Session| {
let cancellation_token = session
.request_queue()
.incoming()
.cancellation_token(&id)
.expect("request should have been tested for cancellation before scheduling");
let url = R::document_url(¶ms).into_owned();
let Some(snapshot) = session.take_snapshot(R::document_url(¶ms).into_owned()) else {
tracing::warn!("Ignoring request because snapshot for path `{url:?}` doesn't exist.");
return Box::new(|_| {});
};
Box::new(move |client| {
let _span = tracing::debug_span!("request", %id, method = R::METHOD).entered();
// Test again if the request was cancelled since it was scheduled on the background task
// and, if so, return early
if cancellation_token.is_cancelled() {
tracing::trace!(
"Ignoring request id={id} method={} because it was cancelled",
R::METHOD
);
// We don't need to send a response here because the `cancel` notification
// handler already responded with a message.
return;
}
let result =
std::panic::catch_unwind(|| R::run_with_snapshot(snapshot, client, params));
let response = request_result_to_response::<R>(result);
respond::<R>(&id, response, client);
})
}))
}
fn request_result_to_response<R>(
result: std::result::Result<
Result<<<R as RequestHandler>::RequestType as Request>::Result>,
Box<dyn std::any::Any + Send + 'static>,
>,
) -> Result<<<R as RequestHandler>::RequestType as Request>::Result>
where
R: BackgroundDocumentRequestHandler,
{
match result {
Ok(response) => response,
Err(error) => {
let message = if let Some(panic_message) = panic_message(&error) {
format!("Request handler failed with: {panic_message}")
} else {
"Request handler failed".into()
};
Err(Error {
code: lsp_server::ErrorCode::InternalError,
error: anyhow!(message),
})
}
}
}
fn sync_notification_task<N: SyncNotificationHandler>(notif: server::Notification) -> Result<Task> {
let (id, params) = cast_notification::<N>(notif)?;
Ok(Task::sync(move |session, client| {
let _span = tracing::debug_span!("notification", method = N::METHOD).entered();
if let Err(err) = N::run(session, client, params) {
tracing::error!("An error occurred while running {id}: {err}");
client
.show_error_message("Ruff encountered a problem. Check the logs for more details.");
}
}))
}
#[expect(dead_code)]
fn background_notification_thread<N>(
req: server::Notification,
schedule: BackgroundSchedule,
) -> Result<Task>
where
N: BackgroundDocumentNotificationHandler,
<<N as NotificationHandler>::NotificationType as Notification>::Params: UnwindSafe,
{
let (id, params) = cast_notification::<N>(req)?;
Ok(Task::background(schedule, move |session: &Session| {
let url = N::document_url(¶ms);
let Some(snapshot) = session.take_snapshot((*url).clone()) else {
tracing::debug!(
"Ignoring notification because snapshot for url `{url}` doesn't exist."
);
return Box::new(|_| {});
};
Box::new(move |client| {
let _span = tracing::debug_span!("notification", method = N::METHOD).entered();
let result =
match std::panic::catch_unwind(|| N::run_with_snapshot(snapshot, client, params)) {
Ok(result) => result,
Err(panic) => {
let message = if let Some(panic_message) = panic_message(&panic) {
format!("notification handler for {id} failed with: {panic_message}")
} else {
format!("notification handler for {id} failed")
};
tracing::error!(message);
client.show_error_message(
"Ruff encountered a panic. Check the logs for more details.",
);
return;
}
};
if let Err(err) = result {
tracing::error!("An error occurred while running {id}: {err}");
client.show_error_message(
"Ruff encountered a problem. Check the logs for more details.",
);
}
})
}))
}
/// Tries to cast a serialized request from the server into
/// a parameter type for a specific request handler.
/// It is *highly* recommended to not override this function in your
/// implementation.
fn cast_request<Req>(
request: server::Request,
) -> Result<(
RequestId,
<<Req as RequestHandler>::RequestType as Request>::Params,
)>
where
Req: RequestHandler,
<<Req as RequestHandler>::RequestType as Request>::Params: UnwindSafe,
{
request
.extract(Req::METHOD)
.map_err(|err| match err {
json_err @ server::ExtractError::JsonError { .. } => {
anyhow::anyhow!("JSON parsing failure:\n{json_err}")
}
server::ExtractError::MethodMismatch(_) => {
unreachable!("A method mismatch should not be possible here unless you've used a different handler (`Req`) \
than the one whose method name was matched against earlier.")
}
})
.with_failure_code(server::ErrorCode::InternalError)
}
/// Sends back a response to the server, but only if the request wasn't cancelled.
fn respond<Req>(
id: &RequestId,
result: Result<<<Req as RequestHandler>::RequestType as Request>::Result>,
client: &Client,
) where
Req: RequestHandler,
{
if let Err(err) = &result {
tracing::error!("An error occurred with request ID {id}: {err}");
client.show_error_message("Ruff encountered a problem. Check the logs for more details.");
}
if let Err(err) = client.respond(id, result) {
tracing::error!("Failed to send response: {err}");
}
}
/// Sends back an error response to the server using a [`Client`] without showing a warning
/// to the user.
fn respond_silent_error(id: RequestId, client: &Client, error: lsp_server::ResponseError) {
if let Err(err) = client.respond_err(id, error) {
tracing::error!("Failed to send response: {err}");
}
}
/// Tries to cast a serialized request from the server into
/// a parameter type for a specific request handler.
fn cast_notification<N>(
notification: server::Notification,
) -> Result<(
&'static str,
<<N as NotificationHandler>::NotificationType as Notification>::Params,
)>
where
N: NotificationHandler,
{
Ok((
N::METHOD,
notification
.extract(N::METHOD)
.map_err(|err| match err {
json_err @ server::ExtractError::JsonError { .. } => {
anyhow::anyhow!("JSON parsing failure:\n{json_err}")
}
server::ExtractError::MethodMismatch(_) => {
unreachable!("A method mismatch should not be possible here unless you've used a different handler (`N`) \
than the one whose method name was matched against earlier.")
}
})
.with_failure_code(server::ErrorCode::InternalError)?,
))
}
pub(crate) struct Error {
pub(crate) code: server::ErrorCode,
pub(crate) error: anyhow::Error,
}
/// A trait to convert result types into the server result type, [`super::Result`].
trait LSPResult<T> {
fn with_failure_code(self, code: server::ErrorCode) -> super::Result<T>;
}
impl<T, E: Into<anyhow::Error>> LSPResult<T> for core::result::Result<T, E> {
fn with_failure_code(self, code: server::ErrorCode) -> super::Result<T> {
self.map_err(|err| Error::new(err.into(), code))
}
}
impl Error {
pub(crate) fn new(err: anyhow::Error, code: server::ErrorCode) -> Self {
Self { code, error: err }
}
}
// Right now, we treat the error code as invisible data that won't
// be printed.
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.error.fmt(f)
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.error.fmt(f)
}
}
fn panic_message<'a>(
err: &'a Box<dyn std::any::Any + Send + 'static>,
) -> Option<std::borrow::Cow<'a, str>> {
if let Some(s) = err.downcast_ref::<String>() {
Some(s.into())
} else if let Some(&s) = err.downcast_ref::<&str>() {
Some(s.into())
} else {
None
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/main_loop.rs | crates/ruff_server/src/server/main_loop.rs | use anyhow::anyhow;
use crossbeam::select;
use lsp_server::Message;
use lsp_types::{
self as types, DidChangeWatchedFilesRegistrationOptions, FileSystemWatcher,
notification::Notification as _,
};
use crate::{
Server,
server::{api, schedule},
session::Client,
};
pub type MainLoopSender = crossbeam::channel::Sender<Event>;
pub(crate) type MainLoopReceiver = crossbeam::channel::Receiver<Event>;
impl Server {
pub(super) fn main_loop(&mut self) -> crate::Result<()> {
self.initialize(&Client::new(
self.main_loop_sender.clone(),
self.connection.sender.clone(),
));
let mut scheduler = schedule::Scheduler::new(self.worker_threads);
while let Ok(next_event) = self.next_event() {
let Some(next_event) = next_event else {
anyhow::bail!("client exited without proper shutdown sequence");
};
match next_event {
Event::Message(msg) => {
let client = Client::new(
self.main_loop_sender.clone(),
self.connection.sender.clone(),
);
let task = match msg {
Message::Request(req) => {
self.session
.request_queue_mut()
.incoming_mut()
.register(req.id.clone(), req.method.clone());
if self.session.is_shutdown_requested() {
tracing::warn!(
"Received request after server shutdown was requested, discarding"
);
client.respond_err(
req.id,
lsp_server::ResponseError {
code: lsp_server::ErrorCode::InvalidRequest as i32,
message: "Shutdown already requested".to_owned(),
data: None,
},
)?;
continue;
}
api::request(req)
}
Message::Notification(notification) => {
if notification.method == lsp_types::notification::Exit::METHOD {
if !self.session.is_shutdown_requested() {
return Err(anyhow!(
"Received exit notification before a shutdown request"
));
}
tracing::debug!("Received exit notification, exiting");
return Ok(());
}
api::notification(notification)
}
// Handle the response from the client to a server request
Message::Response(response) => {
if let Some(handler) = self
.session
.request_queue_mut()
.outgoing_mut()
.complete(&response.id)
{
handler(&client, response);
} else {
tracing::error!(
"Received a response with ID {}, which was not expected",
response.id
);
}
continue;
}
};
scheduler.dispatch(task, &mut self.session, client);
}
Event::SendResponse(response) => {
// Filter out responses for already canceled requests.
if let Some((start_time, method)) = self
.session
.request_queue_mut()
.incoming_mut()
.complete(&response.id)
{
let duration = start_time.elapsed();
tracing::trace!(name: "message response", method, %response.id, duration = format_args!("{:0.2?}", duration));
self.connection.sender.send(Message::Response(response))?;
} else {
tracing::trace!(
"Ignoring response for canceled request id={}",
response.id
);
}
}
}
}
Ok(())
}
/// Waits for the next message from the client or action.
///
/// Returns `Ok(None)` if the client connection is closed.
fn next_event(&self) -> Result<Option<Event>, crossbeam::channel::RecvError> {
select!(
recv(self.connection.receiver) -> msg => {
// Ignore disconnect errors, they're handled by the main loop (it will exit).
Ok(msg.ok().map(Event::Message))
},
recv(self.main_loop_receiver) -> event => event.map(Some),
)
}
fn initialize(&mut self, client: &Client) {
let dynamic_registration = self
.client_capabilities
.workspace
.as_ref()
.and_then(|workspace| workspace.did_change_watched_files)
.and_then(|watched_files| watched_files.dynamic_registration)
.unwrap_or_default();
if dynamic_registration {
// Register all dynamic capabilities here
// `workspace/didChangeWatchedFiles`
// (this registers the configuration file watcher)
let params = lsp_types::RegistrationParams {
registrations: vec![lsp_types::Registration {
id: "ruff-server-watch".into(),
method: "workspace/didChangeWatchedFiles".into(),
register_options: Some(
serde_json::to_value(DidChangeWatchedFilesRegistrationOptions {
watchers: vec![
FileSystemWatcher {
glob_pattern: types::GlobPattern::String(
"**/.ruff.toml".into(),
),
kind: None,
},
FileSystemWatcher {
glob_pattern: types::GlobPattern::String("**/ruff.toml".into()),
kind: None,
},
FileSystemWatcher {
glob_pattern: types::GlobPattern::String(
"**/pyproject.toml".into(),
),
kind: None,
},
],
})
.unwrap(),
),
}],
};
let response_handler = |_: &Client, ()| {
tracing::info!("Configuration file watcher successfully registered");
};
if let Err(err) = client.send_request::<lsp_types::request::RegisterCapability>(
&self.session,
params,
response_handler,
) {
tracing::error!(
"An error occurred when trying to register the configuration file watcher: {err}"
);
}
} else {
tracing::warn!(
"LSP client does not support dynamic capability registration - automatic configuration reloading will not be available."
);
}
}
}
#[derive(Debug)]
pub enum Event {
/// An incoming message from the LSP client.
Message(lsp_server::Message),
/// Send a response to the client
SendResponse(lsp_server::Response),
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/diagnostics.rs | crates/ruff_server/src/server/api/diagnostics.rs | use crate::{
lint::DiagnosticsMap,
session::{Client, DocumentQuery, DocumentSnapshot},
};
use super::LSPResult;
pub(super) fn generate_diagnostics(snapshot: &DocumentSnapshot) -> DiagnosticsMap {
if snapshot.client_settings().lint() {
let document = snapshot.query();
crate::lint::check(
document,
snapshot.encoding(),
snapshot.client_settings().show_syntax_errors(),
)
} else {
DiagnosticsMap::default()
}
}
pub(super) fn publish_diagnostics_for_document(
snapshot: &DocumentSnapshot,
client: &Client,
) -> crate::server::Result<()> {
for (uri, diagnostics) in generate_diagnostics(snapshot) {
client
.send_notification::<lsp_types::notification::PublishDiagnostics>(
lsp_types::PublishDiagnosticsParams {
uri,
diagnostics,
version: Some(snapshot.query().version()),
},
)
.with_failure_code(lsp_server::ErrorCode::InternalError)?;
}
Ok(())
}
pub(super) fn clear_diagnostics_for_document(
query: &DocumentQuery,
client: &Client,
) -> crate::server::Result<()> {
client
.send_notification::<lsp_types::notification::PublishDiagnostics>(
lsp_types::PublishDiagnosticsParams {
uri: query.make_key().into_url(),
diagnostics: vec![],
version: Some(query.version()),
},
)
.with_failure_code(lsp_server::ErrorCode::InternalError)?;
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/traits.rs | crates/ruff_server/src/server/api/traits.rs | //! Traits for handling requests and notifications from the LSP client.
//!
//! This module defines the trait abstractions used by the language server to handle incoming
//! requests and notifications from clients. It provides a type-safe way to implement LSP handlers
//! with different execution models (synchronous or asynchronous) and automatic retry capabilities.
//!
//! All request and notification handlers must implement the base traits [`RequestHandler`] and
//! [`NotificationHandler`], respectively, which associate them with specific LSP request or
//! notification types. These base traits are then extended by more specific traits that define
//! the execution model of the handler.
//!
//! The [`SyncRequestHandler`] and [`SyncNotificationHandler`] traits are for handlers that
//! executes synchronously on the main loop, providing mutable access to the [`Session`] that
//! contains the current state of the server. This is useful for handlers that need to modify
//! the server state such as when the content of a file changes.
//!
//! The [`BackgroundDocumentRequestHandler`] and [`BackgroundDocumentNotificationHandler`] traits
//! are for handlers that operate on a single document and can be executed on a background thread.
//! These handlers will have access to a snapshot of the document at the time of the request or
//! notification, allowing them to perform operations without blocking the main loop.
//!
//! The [`SyncNotificationHandler`] is the most common trait that would be used because most
//! notifications are specific to a single document and require updating the server state.
//! Similarly, the [`BackgroundDocumentRequestHandler`] is the most common request handler that
//! would be used as most requests are document-specific and can be executed in the background.
//!
//! See the `./requests` and `./notifications` directories for concrete implementations of these
//! traits in action.
use crate::session::{Client, DocumentSnapshot, Session};
use lsp_types::notification::Notification as LSPNotification;
use lsp_types::request::Request;
/// A supertrait for any server request handler.
pub(super) trait RequestHandler {
type RequestType: Request;
const METHOD: &'static str = <<Self as RequestHandler>::RequestType as Request>::METHOD;
}
/// A request handler that needs mutable access to the session.
///
/// This will block the main message receiver loop, meaning that no incoming requests or
/// notifications will be handled while `run` is executing. Try to avoid doing any I/O or
/// long-running computations.
pub(super) trait SyncRequestHandler: RequestHandler {
fn run(
session: &mut Session,
client: &Client,
params: <<Self as RequestHandler>::RequestType as Request>::Params,
) -> super::Result<<<Self as RequestHandler>::RequestType as Request>::Result>;
}
/// A request handler that can be run on a background thread.
///
/// This handler is specific to requests that operate on a single document.
pub(super) trait BackgroundDocumentRequestHandler: RequestHandler {
/// Returns the URL of the document that this request handler operates on.
///
/// This method can be implemented automatically using the [`define_document_url`] macro.
///
/// [`define_document_url`]: super::define_document_url
fn document_url(
params: &<<Self as RequestHandler>::RequestType as Request>::Params,
) -> std::borrow::Cow<'_, lsp_types::Url>;
fn run_with_snapshot(
snapshot: DocumentSnapshot,
client: &Client,
params: <<Self as RequestHandler>::RequestType as Request>::Params,
) -> super::Result<<<Self as RequestHandler>::RequestType as Request>::Result>;
}
/// A supertrait for any server notification handler.
pub(super) trait NotificationHandler {
type NotificationType: LSPNotification;
const METHOD: &'static str =
<<Self as NotificationHandler>::NotificationType as LSPNotification>::METHOD;
}
/// A notification handler that needs mutable access to the session.
///
/// This will block the main message receiver loop, meaning that no incoming requests or
/// notifications will be handled while `run` is executing. Try to avoid doing any I/O or
/// long-running computations.
pub(super) trait SyncNotificationHandler: NotificationHandler {
fn run(
session: &mut Session,
client: &Client,
params: <<Self as NotificationHandler>::NotificationType as LSPNotification>::Params,
) -> super::Result<()>;
}
/// A notification handler that can be run on a background thread.
pub(super) trait BackgroundDocumentNotificationHandler: NotificationHandler {
/// Returns the URL of the document that this notification handler operates on.
///
/// This method can be implemented automatically using the [`define_document_url`] macro.
///
/// [`define_document_url`]: super::define_document_url
fn document_url(
params: &<<Self as NotificationHandler>::NotificationType as LSPNotification>::Params,
) -> std::borrow::Cow<'_, lsp_types::Url>;
fn run_with_snapshot(
snapshot: DocumentSnapshot,
client: &Client,
params: <<Self as NotificationHandler>::NotificationType as LSPNotification>::Params,
) -> super::Result<()>;
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/notifications.rs | crates/ruff_server/src/server/api/notifications.rs | mod cancel;
mod did_change;
mod did_change_configuration;
mod did_change_notebook;
mod did_change_watched_files;
mod did_change_workspace;
mod did_close;
mod did_close_notebook;
mod did_open;
mod did_open_notebook;
use super::traits::{NotificationHandler, SyncNotificationHandler};
pub(super) use cancel::CancelNotificationHandler;
pub(super) use did_change::DidChange;
pub(super) use did_change_configuration::DidChangeConfiguration;
pub(super) use did_change_notebook::DidChangeNotebook;
pub(super) use did_change_watched_files::DidChangeWatchedFiles;
pub(super) use did_change_workspace::DidChangeWorkspace;
pub(super) use did_close::DidClose;
pub(super) use did_close_notebook::DidCloseNotebook;
pub(super) use did_open::DidOpen;
pub(super) use did_open_notebook::DidOpenNotebook;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/requests.rs | crates/ruff_server/src/server/api/requests.rs | mod code_action;
mod code_action_resolve;
mod diagnostic;
mod execute_command;
mod format;
mod format_range;
mod hover;
mod shutdown;
use super::{
define_document_url,
traits::{BackgroundDocumentRequestHandler, RequestHandler, SyncRequestHandler},
};
pub(super) use code_action::CodeActions;
pub(super) use code_action_resolve::CodeActionResolve;
pub(super) use diagnostic::DocumentDiagnostic;
pub(super) use execute_command::ExecuteCommand;
pub(super) use format::Format;
pub(super) use format_range::FormatRange;
pub(super) use hover::Hover;
pub(super) use shutdown::ShutdownHandler;
type FormatResponse = Option<Vec<lsp_types::TextEdit>>;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/requests/shutdown.rs | crates/ruff_server/src/server/api/requests/shutdown.rs | use crate::Session;
use crate::server::api::traits::{RequestHandler, SyncRequestHandler};
use crate::session::Client;
pub(crate) struct ShutdownHandler;
impl RequestHandler for ShutdownHandler {
type RequestType = lsp_types::request::Shutdown;
}
impl SyncRequestHandler for ShutdownHandler {
fn run(session: &mut Session, _client: &Client, _params: ()) -> crate::server::Result<()> {
tracing::debug!("Received shutdown request, waiting for shutdown notification");
session.set_shutdown_requested(true);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/requests/code_action_resolve.rs | crates/ruff_server/src/server/api/requests/code_action_resolve.rs | use std::borrow::Cow;
use lsp_server::ErrorCode;
use lsp_types::{self as types, request as req};
use ruff_linter::codes::Rule;
use crate::PositionEncoding;
use crate::edit::WorkspaceEditTracker;
use crate::fix::Fixes;
use crate::server::Result;
use crate::server::SupportedCodeAction;
use crate::server::api::LSPResult;
use crate::session::Client;
use crate::session::{DocumentQuery, DocumentSnapshot, ResolvedClientCapabilities};
pub(crate) struct CodeActionResolve;
impl super::RequestHandler for CodeActionResolve {
type RequestType = req::CodeActionResolveRequest;
}
impl super::BackgroundDocumentRequestHandler for CodeActionResolve {
fn document_url(params: &types::CodeAction) -> Cow<'_, types::Url> {
let uri: lsp_types::Url = serde_json::from_value(params.data.clone().unwrap_or_default())
.expect("code actions should have a URI in their data fields");
Cow::Owned(uri)
}
fn run_with_snapshot(
snapshot: DocumentSnapshot,
_client: &Client,
mut action: types::CodeAction,
) -> Result<types::CodeAction> {
let query = snapshot.query();
let code_actions = SupportedCodeAction::from_kind(
action
.kind
.clone()
.ok_or(anyhow::anyhow!("No kind was given for code action"))
.with_failure_code(ErrorCode::InvalidParams)?,
)
.collect::<Vec<_>>();
// Ensure that the code action maps to _exactly one_ supported code action
let [action_kind] = code_actions.as_slice() else {
return Err(anyhow::anyhow!(
"Code action resolver did not expect code action kind {:?}",
action.kind.as_ref().unwrap()
))
.with_failure_code(ErrorCode::InvalidParams);
};
match action_kind {
SupportedCodeAction::SourceFixAll | SupportedCodeAction::SourceOrganizeImports
if snapshot.is_notebook_cell() =>
{
// This should never occur because we ignore generating these code actions for a
// notebook cell in the `textDocument/codeAction` request handler.
return Err(anyhow::anyhow!(
"Code action resolver cannot resolve {:?} for a notebook cell",
action_kind.to_kind().as_str()
))
.with_failure_code(ErrorCode::InvalidParams);
}
_ => {}
}
action.edit = match action_kind {
SupportedCodeAction::SourceFixAll | SupportedCodeAction::NotebookSourceFixAll => Some(
resolve_edit_for_fix_all(
query,
snapshot.resolved_client_capabilities(),
snapshot.encoding(),
)
.with_failure_code(ErrorCode::InternalError)?,
),
SupportedCodeAction::SourceOrganizeImports
| SupportedCodeAction::NotebookSourceOrganizeImports => Some(
resolve_edit_for_organize_imports(
query,
snapshot.resolved_client_capabilities(),
snapshot.encoding(),
)
.with_failure_code(ErrorCode::InternalError)?,
),
SupportedCodeAction::QuickFix => {
// The client may ask us to resolve a code action, as it has no way of knowing
// whether e.g. `command` field will be filled out by the resolution callback.
return Ok(action);
}
};
Ok(action)
}
}
pub(super) fn resolve_edit_for_fix_all(
query: &DocumentQuery,
client_capabilities: &ResolvedClientCapabilities,
encoding: PositionEncoding,
) -> crate::Result<types::WorkspaceEdit> {
let mut tracker = WorkspaceEditTracker::new(client_capabilities);
tracker.set_fixes_for_document(fix_all_edit(query, encoding)?, query.version())?;
Ok(tracker.into_workspace_edit())
}
pub(super) fn fix_all_edit(
query: &DocumentQuery,
encoding: PositionEncoding,
) -> crate::Result<Fixes> {
crate::fix::fix_all(query, &query.settings().linter, encoding)
}
pub(super) fn resolve_edit_for_organize_imports(
query: &DocumentQuery,
client_capabilities: &ResolvedClientCapabilities,
encoding: PositionEncoding,
) -> crate::Result<types::WorkspaceEdit> {
let mut tracker = WorkspaceEditTracker::new(client_capabilities);
tracker.set_fixes_for_document(organize_imports_edit(query, encoding)?, query.version())?;
Ok(tracker.into_workspace_edit())
}
pub(super) fn organize_imports_edit(
query: &DocumentQuery,
encoding: PositionEncoding,
) -> crate::Result<Fixes> {
let mut linter_settings = query.settings().linter.clone();
linter_settings.rules = [
Rule::UnsortedImports, // I001
Rule::MissingRequiredImport, // I002
]
.into_iter()
.collect();
crate::fix::fix_all(query, &linter_settings, encoding)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/requests/hover.rs | crates/ruff_server/src/server/api/requests/hover.rs | use crate::server::Result;
use crate::session::{Client, DocumentSnapshot};
use anyhow::Context;
use lsp_types::{self as types, request as req};
use regex::Regex;
use ruff_linter::FixAvailability;
use ruff_linter::registry::{Linter, Rule, RuleNamespace};
use ruff_source_file::OneIndexed;
use std::fmt::Write;
pub(crate) struct Hover;
impl super::RequestHandler for Hover {
type RequestType = req::HoverRequest;
}
impl super::BackgroundDocumentRequestHandler for Hover {
fn document_url(params: &types::HoverParams) -> std::borrow::Cow<'_, lsp_types::Url> {
std::borrow::Cow::Borrowed(¶ms.text_document_position_params.text_document.uri)
}
fn run_with_snapshot(
snapshot: DocumentSnapshot,
_client: &Client,
params: types::HoverParams,
) -> Result<Option<types::Hover>> {
Ok(hover(&snapshot, ¶ms.text_document_position_params))
}
}
pub(crate) fn hover(
snapshot: &DocumentSnapshot,
position: &types::TextDocumentPositionParams,
) -> Option<types::Hover> {
// Hover only operates on text documents or notebook cells
let document = snapshot
.query()
.as_single_document()
.context("Failed to get text document for the hover request")
.unwrap();
let line_number: usize = position
.position
.line
.try_into()
.expect("line number should fit within a usize");
let line_range = document.index().line_range(
OneIndexed::from_zero_indexed(line_number),
document.contents(),
);
let line = &document.contents()[line_range];
// Get the list of codes.
let noqa_regex = Regex::new(r"(?i:# (?:(?:ruff|flake8): )?(?P<noqa>noqa))(?::\s?(?P<codes>([A-Z]+[0-9]+(?:[,\s]+)?)+))?").unwrap();
let noqa_captures = noqa_regex.captures(line)?;
let codes_match = noqa_captures.name("codes")?;
let codes_start = codes_match.start();
let code_regex = Regex::new(r"[A-Z]+[0-9]+").unwrap();
let cursor: usize = position
.position
.character
.try_into()
.expect("column number should fit within a usize");
let word = code_regex.find_iter(codes_match.as_str()).find(|code| {
cursor >= (code.start() + codes_start) && cursor < (code.end() + codes_start)
})?;
// Get rule for the code under the cursor.
let rule = Rule::from_code(word.as_str());
let output = if let Ok(rule) = rule {
format_rule_text(rule)
} else {
format!("{}: Rule not found", word.as_str())
};
let hover = types::Hover {
contents: types::HoverContents::Markup(types::MarkupContent {
kind: types::MarkupKind::Markdown,
value: output,
}),
range: None,
};
Some(hover)
}
fn format_rule_text(rule: Rule) -> String {
let mut output = String::new();
let _ = write!(&mut output, "# {} ({})", rule.name(), rule.noqa_code());
output.push('\n');
output.push('\n');
let (linter, _) = Linter::parse_code(&rule.noqa_code().to_string()).unwrap();
let _ = write!(
&mut output,
"Derived from the **{}** linter.",
linter.name()
);
output.push('\n');
output.push('\n');
let fix_availability = rule.fixable();
if matches!(
fix_availability,
FixAvailability::Always | FixAvailability::Sometimes
) {
output.push_str(&fix_availability.to_string());
output.push('\n');
output.push('\n');
}
if rule.is_preview() {
output.push_str(r"This rule is in preview and is not stable.");
output.push('\n');
output.push('\n');
}
if let Some(explanation) = rule.explanation() {
output.push_str(explanation.trim());
} else {
tracing::warn!("Rule {} does not have an explanation", rule.noqa_code());
output.push_str("An issue occurred: an explanation for this rule was not found.");
}
output
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/requests/execute_command.rs | crates/ruff_server/src/server/api/requests/execute_command.rs | use std::fmt::Write;
use std::str::FromStr;
use crate::edit::WorkspaceEditTracker;
use crate::server::SupportedCommand;
use crate::server::api::LSPResult;
use crate::session::{Client, Session};
use crate::{DIAGNOSTIC_NAME, DocumentKey};
use crate::{edit::DocumentVersion, server};
use lsp_server::ErrorCode;
use lsp_types::{self as types, TextDocumentIdentifier, request as req};
use serde::Deserialize;
pub(crate) struct ExecuteCommand;
#[derive(Deserialize)]
struct Argument {
uri: types::Url,
version: DocumentVersion,
}
/// The argument schema for the `ruff.printDebugInformation` command.
#[derive(Default, Deserialize)]
#[serde(rename_all = "camelCase")]
struct DebugCommandArgument {
/// The URI of the document to print debug information for.
///
/// When provided, both document-specific debug information and global information are printed.
/// If not provided ([None]), only global debug information is printed.
text_document: Option<TextDocumentIdentifier>,
}
impl super::RequestHandler for ExecuteCommand {
type RequestType = req::ExecuteCommand;
}
impl super::SyncRequestHandler for ExecuteCommand {
fn run(
session: &mut Session,
client: &Client,
params: types::ExecuteCommandParams,
) -> server::Result<Option<serde_json::Value>> {
let command = SupportedCommand::from_str(¶ms.command)
.with_failure_code(ErrorCode::InvalidParams)?;
if command == SupportedCommand::Debug {
// TODO: Currently we only use the first argument i.e., the first document that's
// provided but we could expand this to consider all *open* documents.
let argument: DebugCommandArgument = params.arguments.into_iter().next().map_or_else(
|| Ok(DebugCommandArgument::default()),
|value| serde_json::from_value(value).with_failure_code(ErrorCode::InvalidParams),
)?;
return Ok(Some(serde_json::Value::String(
debug_information(session, argument.text_document)
.with_failure_code(ErrorCode::InternalError)?,
)));
}
// check if we can apply a workspace edit
if !session.resolved_client_capabilities().apply_edit {
return Err(anyhow::anyhow!("Cannot execute the '{}' command: the client does not support `workspace/applyEdit`", command.label())).with_failure_code(ErrorCode::InternalError);
}
let mut arguments: Vec<Argument> = params
.arguments
.into_iter()
.map(|value| serde_json::from_value(value).with_failure_code(ErrorCode::InvalidParams))
.collect::<server::Result<_>>()?;
arguments.sort_by(|a, b| a.uri.cmp(&b.uri));
arguments.dedup_by(|a, b| a.uri == b.uri);
let mut edit_tracker = WorkspaceEditTracker::new(session.resolved_client_capabilities());
for Argument { uri, version } in arguments {
let Some(snapshot) = session.take_snapshot(uri.clone()) else {
tracing::error!("Document at {uri} could not be opened");
client.show_error_message("Ruff does not recognize this file");
return Ok(None);
};
match command {
SupportedCommand::FixAll => {
let fixes = super::code_action_resolve::fix_all_edit(
snapshot.query(),
snapshot.encoding(),
)
.with_failure_code(ErrorCode::InternalError)?;
edit_tracker
.set_fixes_for_document(fixes, snapshot.query().version())
.with_failure_code(ErrorCode::InternalError)?;
}
SupportedCommand::Format => {
let fixes = super::format::format_full_document(&snapshot)?;
edit_tracker
.set_fixes_for_document(fixes, version)
.with_failure_code(ErrorCode::InternalError)?;
}
SupportedCommand::OrganizeImports => {
let fixes = super::code_action_resolve::organize_imports_edit(
snapshot.query(),
snapshot.encoding(),
)
.with_failure_code(ErrorCode::InternalError)?;
edit_tracker
.set_fixes_for_document(fixes, snapshot.query().version())
.with_failure_code(ErrorCode::InternalError)?;
}
SupportedCommand::Debug => {
unreachable!("The debug command should have already been handled")
}
}
}
if !edit_tracker.is_empty() {
apply_edit(
session,
client,
command.label(),
edit_tracker.into_workspace_edit(),
)
.with_failure_code(ErrorCode::InternalError)?;
}
Ok(None)
}
}
fn apply_edit(
session: &mut Session,
client: &Client,
label: &str,
edit: types::WorkspaceEdit,
) -> crate::Result<()> {
client.send_request::<req::ApplyWorkspaceEdit>(
session,
types::ApplyWorkspaceEditParams {
label: Some(format!("{DIAGNOSTIC_NAME}: {label}")),
edit,
},
move |client, response| {
if !response.applied {
let reason = response
.failure_reason
.unwrap_or_else(|| String::from("unspecified reason"));
tracing::error!("Failed to apply workspace edit: {reason}");
client.show_error_message(format_args!("Ruff was unable to apply edits: {reason}"));
}
},
)
}
/// Returns a string with debug information about the session and the document at the given URI.
fn debug_information(
session: &Session,
text_document: Option<TextDocumentIdentifier>,
) -> crate::Result<String> {
let executable = std::env::current_exe()
.map(|path| format!("{}", path.display()))
.unwrap_or_else(|_| "<unavailable>".to_string());
let mut buffer = String::new();
writeln!(
buffer,
"Global:
executable = {executable}
version = {version}
position_encoding = {encoding:?}
workspace_root_folders = {workspace_folders:#?}
indexed_configuration_files = {config_files:#?}
open_documents_len = {open_documents_len}
client_capabilities = {client_capabilities:#?}
",
version = crate::version(),
encoding = session.encoding(),
workspace_folders = session.workspace_root_folders().collect::<Vec<_>>(),
config_files = session.config_file_paths().collect::<Vec<_>>(),
open_documents_len = session.open_documents_len(),
client_capabilities = session.resolved_client_capabilities(),
)?;
if let Some(TextDocumentIdentifier { uri }) = text_document {
let Some(snapshot) = session.take_snapshot(uri.clone()) else {
writeln!(buffer, "Unable to take a snapshot of the document at {uri}")?;
return Ok(buffer);
};
let query = snapshot.query();
writeln!(
buffer,
"Open document:
uri = {uri}
kind = {kind}
version = {version}
client_settings = {client_settings:#?}
config_path = {config_path:?}
{settings}
",
uri = uri.clone(),
kind = match session.key_from_url(uri) {
DocumentKey::Notebook(_) => "Notebook",
DocumentKey::NotebookCell(_) => "NotebookCell",
DocumentKey::Text(_) => "Text",
},
version = query.version(),
client_settings = snapshot.client_settings(),
config_path = query.settings().path(),
settings = query.settings(),
)?;
} else {
writeln!(
buffer,
"global_client_settings = {:#?}",
session.global_client_settings()
)?;
}
Ok(buffer)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/requests/format_range.rs | crates/ruff_server/src/server/api/requests/format_range.rs | use anyhow::Context;
use lsp_types::{self as types, Range, request as req};
use crate::edit::{RangeExt, ToRangeExt};
use crate::resolve::is_document_excluded_for_formatting;
use crate::server::Result;
use crate::server::api::LSPResult;
use crate::session::{Client, DocumentQuery, DocumentSnapshot};
use crate::{PositionEncoding, TextDocument};
pub(crate) struct FormatRange;
impl super::RequestHandler for FormatRange {
type RequestType = req::RangeFormatting;
}
impl super::BackgroundDocumentRequestHandler for FormatRange {
super::define_document_url!(params: &types::DocumentRangeFormattingParams);
fn run_with_snapshot(
snapshot: DocumentSnapshot,
_client: &Client,
params: types::DocumentRangeFormattingParams,
) -> Result<super::FormatResponse> {
format_document_range(&snapshot, params.range)
}
}
/// Formats the specified [`Range`] in the [`DocumentSnapshot`].
fn format_document_range(
snapshot: &DocumentSnapshot,
range: Range,
) -> Result<super::FormatResponse> {
let text_document = snapshot
.query()
.as_single_document()
.context("Failed to get text document for the format range request")
.unwrap();
let query = snapshot.query();
let backend = snapshot
.client_settings()
.editor_settings()
.format_backend();
format_text_document_range(text_document, range, query, snapshot.encoding(), backend)
}
/// Formats the specified [`Range`] in the [`TextDocument`].
fn format_text_document_range(
text_document: &TextDocument,
range: Range,
query: &DocumentQuery,
encoding: PositionEncoding,
backend: crate::format::FormatBackend,
) -> Result<super::FormatResponse> {
let settings = query.settings();
let file_path = query.virtual_file_path();
// If the document is excluded, return early.
if is_document_excluded_for_formatting(
&file_path,
&settings.file_resolver,
&settings.formatter,
text_document.language_id(),
) {
return Ok(None);
}
let text = text_document.contents();
let index = text_document.index();
let range = range.to_text_range(text, index, encoding);
let formatted_range = crate::format::format_range(
text_document,
query.source_type(),
&settings.formatter,
range,
&file_path,
backend,
)
.with_failure_code(lsp_server::ErrorCode::InternalError)?;
Ok(formatted_range.map(|formatted_range| {
vec![types::TextEdit {
range: formatted_range
.source_range()
.to_range(text, index, encoding),
new_text: formatted_range.into_code(),
}]
}))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/requests/format.rs | crates/ruff_server/src/server/api/requests/format.rs | use anyhow::Context;
use lsp_types::{self as types, request as req};
use types::TextEdit;
use ruff_source_file::LineIndex;
use crate::edit::{Replacement, ToRangeExt};
use crate::fix::Fixes;
use crate::resolve::is_document_excluded_for_formatting;
use crate::server::Result;
use crate::server::api::LSPResult;
use crate::session::{Client, DocumentQuery, DocumentSnapshot};
use crate::{PositionEncoding, TextDocument};
pub(crate) struct Format;
impl super::RequestHandler for Format {
type RequestType = req::Formatting;
}
impl super::BackgroundDocumentRequestHandler for Format {
super::define_document_url!(params: &types::DocumentFormattingParams);
fn run_with_snapshot(
snapshot: DocumentSnapshot,
_client: &Client,
_params: types::DocumentFormattingParams,
) -> Result<super::FormatResponse> {
format_document(&snapshot)
}
}
/// Formats either a full text document or each individual cell in a single notebook document.
pub(super) fn format_full_document(snapshot: &DocumentSnapshot) -> Result<Fixes> {
let mut fixes = Fixes::default();
let query = snapshot.query();
let backend = snapshot
.client_settings()
.editor_settings()
.format_backend();
match snapshot.query() {
DocumentQuery::Notebook { notebook, .. } => {
for (url, text_document) in notebook
.urls()
.map(|url| (url.clone(), notebook.cell_document_by_uri(url).unwrap()))
{
if let Some(changes) =
format_text_document(text_document, query, snapshot.encoding(), true, backend)?
{
fixes.insert(url, changes);
}
}
}
DocumentQuery::Text { document, .. } => {
if let Some(changes) =
format_text_document(document, query, snapshot.encoding(), false, backend)?
{
fixes.insert(snapshot.query().make_key().into_url(), changes);
}
}
}
Ok(fixes)
}
/// Formats either a full text document or an specific notebook cell. If the query within the snapshot is a notebook document
/// with no selected cell, this will throw an error.
pub(super) fn format_document(snapshot: &DocumentSnapshot) -> Result<super::FormatResponse> {
let text_document = snapshot
.query()
.as_single_document()
.context("Failed to get text document for the format request")
.unwrap();
let query = snapshot.query();
let backend = snapshot
.client_settings()
.editor_settings()
.format_backend();
format_text_document(
text_document,
query,
snapshot.encoding(),
query.as_notebook().is_some(),
backend,
)
}
fn format_text_document(
text_document: &TextDocument,
query: &DocumentQuery,
encoding: PositionEncoding,
is_notebook: bool,
backend: crate::format::FormatBackend,
) -> Result<super::FormatResponse> {
let settings = query.settings();
let file_path = query.virtual_file_path();
// If the document is excluded, return early.
if is_document_excluded_for_formatting(
&file_path,
&settings.file_resolver,
&settings.formatter,
text_document.language_id(),
) {
return Ok(None);
}
let source = text_document.contents();
let formatted = crate::format::format(
text_document,
query.source_type(),
&settings.formatter,
&file_path,
backend,
)
.with_failure_code(lsp_server::ErrorCode::InternalError)?;
let Some(mut formatted) = formatted else {
return Ok(None);
};
// special case - avoid adding a newline to a notebook cell if it didn't already exist
if is_notebook {
let mut trimmed = formatted.as_str();
if !source.ends_with("\r\n") {
trimmed = trimmed.trim_end_matches("\r\n");
}
if !source.ends_with('\n') {
trimmed = trimmed.trim_end_matches('\n');
}
if !source.ends_with('\r') {
trimmed = trimmed.trim_end_matches('\r');
}
formatted = trimmed.to_string();
}
let formatted_index: LineIndex = LineIndex::from_source_text(&formatted);
let unformatted_index = text_document.index();
let Replacement {
source_range,
modified_range: formatted_range,
} = Replacement::between(
source,
unformatted_index.line_starts(),
&formatted,
formatted_index.line_starts(),
);
Ok(Some(vec![TextEdit {
range: source_range.to_range(source, unformatted_index, encoding),
new_text: formatted[formatted_range].to_owned(),
}]))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/requests/code_action.rs | crates/ruff_server/src/server/api/requests/code_action.rs | use lsp_server::ErrorCode;
use lsp_types::{self as types, request as req};
use rustc_hash::FxHashSet;
use types::{CodeActionKind, CodeActionOrCommand};
use crate::DIAGNOSTIC_NAME;
use crate::edit::WorkspaceEditTracker;
use crate::lint::{DiagnosticFix, fixes_for_diagnostics};
use crate::server::Result;
use crate::server::SupportedCodeAction;
use crate::server::api::LSPResult;
use crate::session::{Client, DocumentSnapshot};
use super::code_action_resolve::{resolve_edit_for_fix_all, resolve_edit_for_organize_imports};
pub(crate) struct CodeActions;
impl super::RequestHandler for CodeActions {
type RequestType = req::CodeActionRequest;
}
impl super::BackgroundDocumentRequestHandler for CodeActions {
super::define_document_url!(params: &types::CodeActionParams);
fn run_with_snapshot(
snapshot: DocumentSnapshot,
_client: &Client,
params: types::CodeActionParams,
) -> Result<Option<types::CodeActionResponse>> {
let mut response: types::CodeActionResponse = types::CodeActionResponse::default();
let supported_code_actions = supported_code_actions(params.context.only.clone());
let fixes = fixes_for_diagnostics(params.context.diagnostics)
.with_failure_code(ErrorCode::InternalError)?;
if snapshot.client_settings().fix_violation()
&& supported_code_actions.contains(&SupportedCodeAction::QuickFix)
{
response
.extend(quick_fix(&snapshot, &fixes).with_failure_code(ErrorCode::InternalError)?);
}
if snapshot.client_settings().noqa_comments()
&& supported_code_actions.contains(&SupportedCodeAction::QuickFix)
{
response.extend(noqa_comments(&snapshot, &fixes));
}
if snapshot.client_settings().fix_all() {
if supported_code_actions.contains(&SupportedCodeAction::SourceFixAll) {
if snapshot.is_notebook_cell() {
// This is ignore here because the client requests this code action for each
// cell in parallel and the server would send a workspace edit with the same
// content which would result in applying the same edit multiple times
// resulting in (possibly) duplicate code.
tracing::debug!("Ignoring `source.fixAll` code action for a notebook cell");
} else {
response.push(fix_all(&snapshot).with_failure_code(ErrorCode::InternalError)?);
}
} else if supported_code_actions.contains(&SupportedCodeAction::NotebookSourceFixAll) {
response
.push(notebook_fix_all(&snapshot).with_failure_code(ErrorCode::InternalError)?);
}
}
if snapshot.client_settings().organize_imports() {
if supported_code_actions.contains(&SupportedCodeAction::SourceOrganizeImports) {
if snapshot.is_notebook_cell() {
// This is ignore here because the client requests this code action for each
// cell in parallel and the server would send a workspace edit with the same
// content which would result in applying the same edit multiple times
// resulting in (possibly) duplicate code.
tracing::debug!(
"Ignoring `source.organizeImports` code action for a notebook cell"
);
} else {
response.push(
organize_imports(&snapshot).with_failure_code(ErrorCode::InternalError)?,
);
}
} else if supported_code_actions
.contains(&SupportedCodeAction::NotebookSourceOrganizeImports)
{
response.push(
notebook_organize_imports(&snapshot)
.with_failure_code(ErrorCode::InternalError)?,
);
}
}
Ok(Some(response))
}
}
fn quick_fix(
snapshot: &DocumentSnapshot,
fixes: &[DiagnosticFix],
) -> crate::Result<Vec<CodeActionOrCommand>> {
let document = snapshot.query();
fixes
.iter()
.filter(|fix| !fix.edits.is_empty())
.map(|fix| {
let mut tracker = WorkspaceEditTracker::new(snapshot.resolved_client_capabilities());
let document_url = snapshot.query().make_key().into_url();
tracker.set_edits_for_document(
document_url.clone(),
document.version(),
fix.edits.clone(),
)?;
Ok(types::CodeActionOrCommand::CodeAction(types::CodeAction {
title: format!("{DIAGNOSTIC_NAME} ({}): {}", fix.code, fix.title),
kind: Some(types::CodeActionKind::QUICKFIX),
edit: Some(tracker.into_workspace_edit()),
diagnostics: Some(vec![fix.fixed_diagnostic.clone()]),
data: Some(
serde_json::to_value(document_url).expect("document url should serialize"),
),
..Default::default()
}))
})
.collect()
}
fn noqa_comments(snapshot: &DocumentSnapshot, fixes: &[DiagnosticFix]) -> Vec<CodeActionOrCommand> {
fixes
.iter()
.filter_map(|fix| {
let edit = fix.noqa_edit.clone()?;
let mut tracker = WorkspaceEditTracker::new(snapshot.resolved_client_capabilities());
tracker
.set_edits_for_document(
snapshot.query().make_key().into_url(),
snapshot.query().version(),
vec![edit],
)
.ok()?;
Some(types::CodeActionOrCommand::CodeAction(types::CodeAction {
title: format!("{DIAGNOSTIC_NAME} ({}): Disable for this line", fix.code),
kind: Some(types::CodeActionKind::QUICKFIX),
edit: Some(tracker.into_workspace_edit()),
diagnostics: Some(vec![fix.fixed_diagnostic.clone()]),
data: Some(
serde_json::to_value(snapshot.query().make_key().into_url())
.expect("document url should serialize"),
),
..Default::default()
}))
})
.collect()
}
fn fix_all(snapshot: &DocumentSnapshot) -> crate::Result<CodeActionOrCommand> {
let document = snapshot.query();
let (edit, data) = if snapshot
.resolved_client_capabilities()
.code_action_deferred_edit_resolution
{
// The editor will request the edit in a `CodeActionsResolve` request
(
None,
Some(
serde_json::to_value(snapshot.query().make_key().into_url())
.expect("document url should serialize"),
),
)
} else {
(
Some(resolve_edit_for_fix_all(
document,
snapshot.resolved_client_capabilities(),
snapshot.encoding(),
)?),
None,
)
};
Ok(CodeActionOrCommand::CodeAction(types::CodeAction {
title: format!("{DIAGNOSTIC_NAME}: Fix all auto-fixable problems"),
kind: Some(crate::SOURCE_FIX_ALL_RUFF),
edit,
data,
..Default::default()
}))
}
fn notebook_fix_all(snapshot: &DocumentSnapshot) -> crate::Result<CodeActionOrCommand> {
let document = snapshot.query();
let (edit, data) = if snapshot
.resolved_client_capabilities()
.code_action_deferred_edit_resolution
{
// The editor will request the edit in a `CodeActionsResolve` request
(
None,
Some(
serde_json::to_value(snapshot.query().make_key().into_url())
.expect("document url should serialize"),
),
)
} else {
(
Some(resolve_edit_for_fix_all(
document,
snapshot.resolved_client_capabilities(),
snapshot.encoding(),
)?),
None,
)
};
Ok(CodeActionOrCommand::CodeAction(types::CodeAction {
title: format!("{DIAGNOSTIC_NAME}: Fix all auto-fixable problems"),
kind: Some(crate::NOTEBOOK_SOURCE_FIX_ALL_RUFF),
edit,
data,
..Default::default()
}))
}
fn organize_imports(snapshot: &DocumentSnapshot) -> crate::Result<CodeActionOrCommand> {
let document = snapshot.query();
let (edit, data) = if snapshot
.resolved_client_capabilities()
.code_action_deferred_edit_resolution
{
// The edit will be resolved later in the `CodeActionsResolve` request
(
None,
Some(
serde_json::to_value(snapshot.query().make_key().into_url())
.expect("document url should serialize"),
),
)
} else {
(
Some(resolve_edit_for_organize_imports(
document,
snapshot.resolved_client_capabilities(),
snapshot.encoding(),
)?),
None,
)
};
Ok(CodeActionOrCommand::CodeAction(types::CodeAction {
title: format!("{DIAGNOSTIC_NAME}: Organize imports"),
kind: Some(crate::SOURCE_ORGANIZE_IMPORTS_RUFF),
edit,
data,
..Default::default()
}))
}
fn notebook_organize_imports(snapshot: &DocumentSnapshot) -> crate::Result<CodeActionOrCommand> {
let document = snapshot.query();
let (edit, data) = if snapshot
.resolved_client_capabilities()
.code_action_deferred_edit_resolution
{
// The edit will be resolved later in the `CodeActionsResolve` request
(
None,
Some(
serde_json::to_value(snapshot.query().make_key().into_url())
.expect("document url should serialize"),
),
)
} else {
(
Some(resolve_edit_for_organize_imports(
document,
snapshot.resolved_client_capabilities(),
snapshot.encoding(),
)?),
None,
)
};
Ok(CodeActionOrCommand::CodeAction(types::CodeAction {
title: format!("{DIAGNOSTIC_NAME}: Organize imports"),
kind: Some(crate::NOTEBOOK_SOURCE_ORGANIZE_IMPORTS_RUFF),
edit,
data,
..Default::default()
}))
}
/// If `action_filter` is `None`, this returns [`SupportedCodeActionKind::all()`]. Otherwise,
/// the list is filtered.
fn supported_code_actions(
action_filter: Option<Vec<CodeActionKind>>,
) -> FxHashSet<SupportedCodeAction> {
let Some(action_filter) = action_filter else {
return SupportedCodeAction::all().collect();
};
action_filter
.into_iter()
.flat_map(SupportedCodeAction::from_kind)
.collect()
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/requests/diagnostic.rs | crates/ruff_server/src/server/api/requests/diagnostic.rs | use crate::server::api::diagnostics::generate_diagnostics;
use crate::session::DocumentSnapshot;
use crate::{server::Result, session::Client};
use lsp_types::{self as types, request as req};
use types::{
DocumentDiagnosticReportResult, FullDocumentDiagnosticReport,
RelatedFullDocumentDiagnosticReport,
};
pub(crate) struct DocumentDiagnostic;
impl super::RequestHandler for DocumentDiagnostic {
type RequestType = req::DocumentDiagnosticRequest;
}
impl super::BackgroundDocumentRequestHandler for DocumentDiagnostic {
super::define_document_url!(params: &types::DocumentDiagnosticParams);
fn run_with_snapshot(
snapshot: DocumentSnapshot,
_client: &Client,
_params: types::DocumentDiagnosticParams,
) -> Result<DocumentDiagnosticReportResult> {
Ok(DocumentDiagnosticReportResult::Report(
types::DocumentDiagnosticReport::Full(RelatedFullDocumentDiagnosticReport {
related_documents: None,
full_document_diagnostic_report: FullDocumentDiagnosticReport {
// TODO(jane): eventually this will be important for caching diagnostic information.
result_id: None,
// Pull diagnostic requests are only called for text documents.
// Since diagnostic requests generate
items: generate_diagnostics(&snapshot)
.into_iter()
.next()
.map(|(_, diagnostics)| diagnostics)
.unwrap_or_default(),
},
}),
))
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/notifications/did_change_configuration.rs | crates/ruff_server/src/server/api/notifications/did_change_configuration.rs | use crate::server::Result;
use crate::session::{Client, Session};
use lsp_types as types;
use lsp_types::notification as notif;
pub(crate) struct DidChangeConfiguration;
impl super::NotificationHandler for DidChangeConfiguration {
type NotificationType = notif::DidChangeConfiguration;
}
impl super::SyncNotificationHandler for DidChangeConfiguration {
fn run(
_session: &mut Session,
_client: &Client,
_params: types::DidChangeConfigurationParams,
) -> Result<()> {
// TODO(jane): get this wired up after the pre-release
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/notifications/did_open_notebook.rs | crates/ruff_server/src/server/api/notifications/did_open_notebook.rs | use crate::edit::NotebookDocument;
use crate::server::Result;
use crate::server::api::LSPResult;
use crate::server::api::diagnostics::publish_diagnostics_for_document;
use crate::session::{Client, Session};
use lsp_server::ErrorCode;
use lsp_types as types;
use lsp_types::notification as notif;
pub(crate) struct DidOpenNotebook;
impl super::NotificationHandler for DidOpenNotebook {
type NotificationType = notif::DidOpenNotebookDocument;
}
impl super::SyncNotificationHandler for DidOpenNotebook {
fn run(
session: &mut Session,
client: &Client,
types::DidOpenNotebookDocumentParams {
notebook_document:
types::NotebookDocument {
uri,
version,
cells,
metadata,
..
},
cell_text_documents,
}: types::DidOpenNotebookDocumentParams,
) -> Result<()> {
let notebook = NotebookDocument::new(
version,
cells,
metadata.unwrap_or_default(),
cell_text_documents,
)
.with_failure_code(ErrorCode::InternalError)?;
session.open_notebook_document(uri.clone(), notebook);
// publish diagnostics
let snapshot = session
.take_snapshot(uri)
.expect("snapshot should be available");
publish_diagnostics_for_document(&snapshot, client)?;
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/notifications/did_close_notebook.rs | crates/ruff_server/src/server/api/notifications/did_close_notebook.rs | use crate::server::Result;
use crate::server::api::LSPResult;
use crate::session::{Client, Session};
use lsp_types::notification as notif;
use lsp_types::{self as types, NotebookDocumentIdentifier};
pub(crate) struct DidCloseNotebook;
impl super::NotificationHandler for DidCloseNotebook {
type NotificationType = notif::DidCloseNotebookDocument;
}
impl super::SyncNotificationHandler for DidCloseNotebook {
fn run(
session: &mut Session,
_client: &Client,
types::DidCloseNotebookDocumentParams {
notebook_document: NotebookDocumentIdentifier { uri },
..
}: types::DidCloseNotebookDocumentParams,
) -> Result<()> {
let key = session.key_from_url(uri);
session
.close_document(&key)
.with_failure_code(lsp_server::ErrorCode::InternalError)?;
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/notifications/cancel.rs | crates/ruff_server/src/server/api/notifications/cancel.rs | use lsp_server::RequestId;
use lsp_types::CancelParams;
use lsp_types::notification::Cancel;
use crate::server::Result;
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
use crate::session::{Client, Session};
pub(crate) struct CancelNotificationHandler;
impl NotificationHandler for CancelNotificationHandler {
type NotificationType = Cancel;
}
impl SyncNotificationHandler for CancelNotificationHandler {
fn run(session: &mut Session, client: &Client, params: CancelParams) -> Result<()> {
let id: RequestId = match params.id {
lsp_types::NumberOrString::Number(id) => id.into(),
lsp_types::NumberOrString::String(id) => id.into(),
};
let _ = client.cancel(session, id);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/notifications/did_open.rs | crates/ruff_server/src/server/api/notifications/did_open.rs | use crate::TextDocument;
use crate::server::Result;
use crate::server::api::LSPResult;
use crate::server::api::diagnostics::publish_diagnostics_for_document;
use crate::session::{Client, Session};
use lsp_types as types;
use lsp_types::notification as notif;
pub(crate) struct DidOpen;
impl super::NotificationHandler for DidOpen {
type NotificationType = notif::DidOpenTextDocument;
}
impl super::SyncNotificationHandler for DidOpen {
fn run(
session: &mut Session,
client: &Client,
types::DidOpenTextDocumentParams {
text_document:
types::TextDocumentItem {
uri,
text,
version,
language_id,
},
}: types::DidOpenTextDocumentParams,
) -> Result<()> {
let document = TextDocument::new(text, version).with_language_id(&language_id);
session.open_text_document(uri.clone(), document);
// Publish diagnostics if the client doesn't support pull diagnostics
if !session.resolved_client_capabilities().pull_diagnostics {
let snapshot = session
.take_snapshot(uri.clone())
.ok_or_else(|| {
anyhow::anyhow!("Unable to take snapshot for document with URL {uri}")
})
.with_failure_code(lsp_server::ErrorCode::InternalError)?;
publish_diagnostics_for_document(&snapshot, client)?;
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/notifications/did_change_notebook.rs | crates/ruff_server/src/server/api/notifications/did_change_notebook.rs | use crate::server::Result;
use crate::server::api::LSPResult;
use crate::server::api::diagnostics::publish_diagnostics_for_document;
use crate::session::{Client, Session};
use lsp_server::ErrorCode;
use lsp_types as types;
use lsp_types::notification as notif;
pub(crate) struct DidChangeNotebook;
impl super::NotificationHandler for DidChangeNotebook {
type NotificationType = notif::DidChangeNotebookDocument;
}
impl super::SyncNotificationHandler for DidChangeNotebook {
fn run(
session: &mut Session,
client: &Client,
types::DidChangeNotebookDocumentParams {
notebook_document: types::VersionedNotebookDocumentIdentifier { uri, version },
change: types::NotebookDocumentChangeEvent { cells, metadata },
}: types::DidChangeNotebookDocumentParams,
) -> Result<()> {
let key = session.key_from_url(uri);
session
.update_notebook_document(&key, cells, metadata, version)
.with_failure_code(ErrorCode::InternalError)?;
// publish new diagnostics
let snapshot = session
.take_snapshot(key.into_url())
.expect("snapshot should be available");
publish_diagnostics_for_document(&snapshot, client)?;
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/notifications/did_change.rs | crates/ruff_server/src/server/api/notifications/did_change.rs | use crate::server::Result;
use crate::server::api::LSPResult;
use crate::server::api::diagnostics::publish_diagnostics_for_document;
use crate::session::{Client, Session};
use lsp_server::ErrorCode;
use lsp_types as types;
use lsp_types::notification as notif;
pub(crate) struct DidChange;
impl super::NotificationHandler for DidChange {
type NotificationType = notif::DidChangeTextDocument;
}
impl super::SyncNotificationHandler for DidChange {
fn run(
session: &mut Session,
client: &Client,
types::DidChangeTextDocumentParams {
text_document:
types::VersionedTextDocumentIdentifier {
uri,
version: new_version,
},
content_changes,
}: types::DidChangeTextDocumentParams,
) -> Result<()> {
let key = session.key_from_url(uri);
session
.update_text_document(&key, content_changes, new_version)
.with_failure_code(ErrorCode::InternalError)?;
// Publish diagnostics if the client doesn't support pull diagnostics
if !session.resolved_client_capabilities().pull_diagnostics {
let snapshot = session.take_snapshot(key.into_url()).unwrap();
publish_diagnostics_for_document(&snapshot, client)?;
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/notifications/did_change_watched_files.rs | crates/ruff_server/src/server/api/notifications/did_change_watched_files.rs | use crate::server::Result;
use crate::server::api::LSPResult;
use crate::server::api::diagnostics::publish_diagnostics_for_document;
use crate::session::{Client, Session};
use lsp_types as types;
use lsp_types::notification as notif;
pub(crate) struct DidChangeWatchedFiles;
impl super::NotificationHandler for DidChangeWatchedFiles {
type NotificationType = notif::DidChangeWatchedFiles;
}
impl super::SyncNotificationHandler for DidChangeWatchedFiles {
fn run(
session: &mut Session,
client: &Client,
params: types::DidChangeWatchedFilesParams,
) -> Result<()> {
session.reload_settings(¶ms.changes, client);
if !params.changes.is_empty() {
if session.resolved_client_capabilities().workspace_refresh {
client
.send_request::<types::request::WorkspaceDiagnosticRefresh>(
session,
(),
|_, ()| (),
)
.with_failure_code(lsp_server::ErrorCode::InternalError)?;
} else {
// publish diagnostics for text documents
for url in session.text_document_urls() {
let snapshot = session
.take_snapshot(url.clone())
.expect("snapshot should be available");
publish_diagnostics_for_document(&snapshot, client)?;
}
}
// always publish diagnostics for notebook files (since they don't use pull diagnostics)
for url in session.notebook_document_urls() {
let snapshot = session
.take_snapshot(url.clone())
.expect("snapshot should be available");
publish_diagnostics_for_document(&snapshot, client)?;
}
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/notifications/did_change_workspace.rs | crates/ruff_server/src/server/api/notifications/did_change_workspace.rs | use crate::server::Result;
use crate::server::api::LSPResult;
use crate::session::{Client, Session};
use lsp_types as types;
use lsp_types::notification as notif;
pub(crate) struct DidChangeWorkspace;
impl super::NotificationHandler for DidChangeWorkspace {
type NotificationType = notif::DidChangeWorkspaceFolders;
}
impl super::SyncNotificationHandler for DidChangeWorkspace {
fn run(
session: &mut Session,
client: &Client,
params: types::DidChangeWorkspaceFoldersParams,
) -> Result<()> {
for types::WorkspaceFolder { uri, .. } in params.event.added {
session
.open_workspace_folder(uri, client)
.with_failure_code(lsp_server::ErrorCode::InvalidParams)?;
}
for types::WorkspaceFolder { uri, .. } in params.event.removed {
session
.close_workspace_folder(&uri)
.with_failure_code(lsp_server::ErrorCode::InvalidParams)?;
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/api/notifications/did_close.rs | crates/ruff_server/src/server/api/notifications/did_close.rs | use crate::server::Result;
use crate::server::api::LSPResult;
use crate::server::api::diagnostics::clear_diagnostics_for_document;
use crate::session::{Client, Session};
use lsp_types as types;
use lsp_types::notification as notif;
pub(crate) struct DidClose;
impl super::NotificationHandler for DidClose {
type NotificationType = notif::DidCloseTextDocument;
}
impl super::SyncNotificationHandler for DidClose {
fn run(
session: &mut Session,
client: &Client,
types::DidCloseTextDocumentParams {
text_document: types::TextDocumentIdentifier { uri },
}: types::DidCloseTextDocumentParams,
) -> Result<()> {
let key = session.key_from_url(uri);
// Publish an empty diagnostic report for the document. This will de-register any existing diagnostics.
let Some(snapshot) = session.take_snapshot(key.clone().into_url()) else {
tracing::debug!(
"Unable to close document with key {key} - the snapshot was unavailable"
);
return Ok(());
};
clear_diagnostics_for_document(snapshot.query(), client)?;
session
.close_document(&key)
.with_failure_code(lsp_server::ErrorCode::InternalError)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/schedule/task.rs | crates/ruff_server/src/server/schedule/task.rs | use lsp_server::RequestId;
use serde::Serialize;
use crate::session::{Client, Session};
type LocalFn = Box<dyn FnOnce(&mut Session, &Client)>;
type BackgroundFn = Box<dyn FnOnce(&Client) + Send + 'static>;
type BackgroundFnBuilder = Box<dyn FnOnce(&Session) -> BackgroundFn>;
/// Describes how the task should be run.
#[derive(Clone, Copy, Debug, Default)]
pub(in crate::server) enum BackgroundSchedule {
/// The task should be run on the background thread designated
/// for formatting actions. This is a high priority thread.
Fmt,
/// The task should be run on the general high-priority background
/// thread. Reserved for actions caused by the user typing (e.g.syntax highlighting).
#[expect(dead_code)]
LatencySensitive,
/// The task should be run on a regular-priority background thread.
/// The default for any request that isn't in the critical path of the user typing.
#[default]
Worker,
}
/// A [`Task`] is a future that has not yet started, and it is the job of
/// the [`super::Scheduler`] to make that happen, via [`super::Scheduler::dispatch`].
/// A task can either run on the main thread (in other words, the same thread as the
/// scheduler) or it can run in a background thread. The main difference between
/// the two is that background threads only have a read-only snapshot of the session,
/// while local tasks have exclusive access and can modify it as they please. Keep in mind that
/// local tasks will **block** the main event loop, so only use local tasks if you **need**
/// mutable state access or you need the absolute lowest latency possible.
#[must_use]
pub(in crate::server) enum Task {
Background(BackgroundTaskBuilder),
Sync(SyncTask),
}
// The reason why this isn't just a 'static background closure
// is because we need to take a snapshot of the session before sending
// this task to the background, and the inner closure can't take the session
// as an immutable reference since it's used mutably elsewhere. So instead,
// a background task is built using an outer closure that borrows the session to take a snapshot,
// that the inner closure can capture. This builder closure has a lifetime linked to the scheduler.
// When the task is dispatched, the scheduler runs the synchronous builder, which takes the session
// as a reference, to create the inner 'static closure. That closure is then moved to a background task pool.
pub(in crate::server) struct BackgroundTaskBuilder {
pub(super) schedule: BackgroundSchedule,
pub(super) builder: BackgroundFnBuilder,
}
pub(in crate::server) struct SyncTask {
pub(super) func: LocalFn,
}
impl Task {
/// Creates a new background task.
pub(crate) fn background(
schedule: BackgroundSchedule,
func: impl FnOnce(&Session) -> Box<dyn FnOnce(&Client) + Send + 'static> + 'static,
) -> Self {
Self::Background(BackgroundTaskBuilder {
schedule,
builder: Box::new(func),
})
}
/// Creates a new local task.
pub(crate) fn sync(func: impl FnOnce(&mut Session, &Client) + 'static) -> Self {
Self::Sync(SyncTask {
func: Box::new(func),
})
}
/// Creates a local task that immediately
/// responds with the provided `request`.
pub(crate) fn immediate<R>(id: RequestId, result: crate::server::Result<R>) -> Self
where
R: Serialize + Send + 'static,
{
Self::sync(move |_, client| {
if let Err(err) = client.respond(&id, result) {
tracing::error!("Unable to send immediate response: {err}");
}
})
}
/// Creates a local task that does nothing.
pub(crate) fn nothing() -> Self {
Self::sync(move |_, _| {})
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/schedule/thread.rs | crates/ruff_server/src/server/schedule/thread.rs | // +------------------------------------------------------------+
// | Code adopted from: |
// | Repository: https://github.com/rust-lang/rust-analyzer.git |
// | File: `crates/stdx/src/thread.rs` |
// | Commit: 03b3cb6be9f21c082f4206b35c7fe7f291c94eaa |
// +------------------------------------------------------------+
//! A utility module for working with threads that automatically joins threads upon drop
//! and abstracts over operating system quality of service (QoS) APIs
//! through the concept of a “thread priority”.
//!
//! The priority of a thread is frozen at thread creation time,
//! i.e. there is no API to change the priority of a thread once it has been spawned.
//!
//! As a system, rust-analyzer should have the property that
//! old manual scheduling APIs are replaced entirely by QoS.
//! To maintain this invariant, we panic when it is clear that
//! old scheduling APIs have been used.
//!
//! Moreover, we also want to ensure that every thread has an priority set explicitly
//! to force a decision about its importance to the system.
//! Thus, [`ThreadPriority`] has no default value
//! and every entry point to creating a thread requires a [`ThreadPriority`] upfront.
// Keeps us from getting warnings about the word `QoS`
#![allow(clippy::doc_markdown)]
use std::fmt;
mod pool;
mod priority;
pub(super) use pool::Pool;
pub(super) use priority::ThreadPriority;
pub(super) struct Builder {
priority: ThreadPriority,
inner: jod_thread::Builder,
}
impl Builder {
pub(super) fn new(priority: ThreadPriority) -> Builder {
Builder {
priority,
inner: jod_thread::Builder::new(),
}
}
pub(super) fn name(self, name: String) -> Builder {
Builder {
inner: self.inner.name(name),
..self
}
}
pub(super) fn stack_size(self, size: usize) -> Builder {
Builder {
inner: self.inner.stack_size(size),
..self
}
}
pub(super) fn spawn<F, T>(self, f: F) -> std::io::Result<JoinHandle<T>>
where
F: FnOnce() -> T,
F: Send + 'static,
T: Send + 'static,
{
let inner_handle = self.inner.spawn(move || {
self.priority.apply_to_current_thread();
f()
})?;
Ok(JoinHandle {
inner: Some(inner_handle),
allow_leak: false,
})
}
}
pub(crate) struct JoinHandle<T = ()> {
// `inner` is an `Option` so that we can
// take ownership of the contained `JoinHandle`.
inner: Option<jod_thread::JoinHandle<T>>,
allow_leak: bool,
}
impl<T> JoinHandle<T> {
pub(crate) fn join(mut self) -> T {
self.inner.take().unwrap().join()
}
}
impl<T> Drop for JoinHandle<T> {
fn drop(&mut self) {
if !self.allow_leak {
return;
}
if let Some(join_handle) = self.inner.take() {
join_handle.detach();
}
}
}
impl<T> fmt::Debug for JoinHandle<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("JoinHandle { .. }")
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/schedule/thread/priority.rs | crates/ruff_server/src/server/schedule/thread/priority.rs | // +------------------------------------------------------------+
// | Code adopted from: |
// | Repository: https://github.com/rust-lang/rust-analyzer.git |
// | File: `crates/stdx/src/thread/intent.rs` |
// | Commit: 03b3cb6be9f21c082f4206b35c7fe7f291c94eaa |
// +------------------------------------------------------------+
//! An opaque façade around platform-specific QoS APIs.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
// Please maintain order from least to most priority for the derived `Ord` impl.
pub(crate) enum ThreadPriority {
/// Any thread which does work that isn't in a critical path.
Worker,
/// Any thread which does work caused by the user typing, or
/// work that the editor may wait on.
LatencySensitive,
}
impl ThreadPriority {
// These APIs must remain private;
// we only want consumers to set thread priority
// during thread creation.
pub(crate) fn apply_to_current_thread(self) {
let class = thread_priority_to_qos_class(self);
set_current_thread_qos_class(class);
}
pub(crate) fn assert_is_used_on_current_thread(self) {
if IS_QOS_AVAILABLE {
let class = thread_priority_to_qos_class(self);
assert_eq!(get_current_thread_qos_class(), Some(class));
}
}
}
use imp::QoSClass;
const IS_QOS_AVAILABLE: bool = imp::IS_QOS_AVAILABLE;
fn set_current_thread_qos_class(class: QoSClass) {
imp::set_current_thread_qos_class(class);
}
fn get_current_thread_qos_class() -> Option<QoSClass> {
imp::get_current_thread_qos_class()
}
fn thread_priority_to_qos_class(priority: ThreadPriority) -> QoSClass {
imp::thread_priority_to_qos_class(priority)
}
// All Apple platforms use XNU as their kernel
// and thus have the concept of QoS.
#[cfg(target_vendor = "apple")]
mod imp {
use super::ThreadPriority;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
// Please maintain order from least to most priority for the derived `Ord` impl.
pub(super) enum QoSClass {
// Documentation adapted from https://github.com/apple-oss-distributions/libpthread/blob/67e155c94093be9a204b69637d198eceff2c7c46/include/sys/qos.h#L55
//
/// TLDR: invisible maintenance tasks
///
/// Contract:
///
/// * **You do not care about how long it takes for work to finish.**
/// * **You do not care about work being deferred temporarily.**
/// (e.g. if the device's battery is in a critical state)
///
/// Examples:
///
/// * in a video editor:
/// creating periodic backups of project files
/// * in a browser:
/// cleaning up cached sites which have not been accessed in a long time
/// * in a collaborative word processor:
/// creating a searchable index of all documents
///
/// Use this QoS class for background tasks
/// which the user did not initiate themselves
/// and which are invisible to the user.
/// It is expected that this work will take significant time to complete:
/// minutes or even hours.
///
/// This QoS class provides the most energy and thermally-efficient execution possible.
/// All other work is prioritized over background tasks.
Background,
/// TLDR: tasks that don't block using your app
///
/// Contract:
///
/// * **Your app remains useful even as the task is executing.**
///
/// Examples:
///
/// * in a video editor:
/// exporting a video to disk -
/// the user can still work on the timeline
/// * in a browser:
/// automatically extracting a downloaded zip file -
/// the user can still switch tabs
/// * in a collaborative word processor:
/// downloading images embedded in a document -
/// the user can still make edits
///
/// Use this QoS class for tasks which
/// may or may not be initiated by the user,
/// but whose result is visible.
/// It is expected that this work will take a few seconds to a few minutes.
/// Typically your app will include a progress bar
/// for tasks using this class.
///
/// This QoS class provides a balance between
/// performance, responsiveness and efficiency.
Utility,
/// TLDR: tasks that block using your app
///
/// Contract:
///
/// * **You need this work to complete
/// before the user can keep interacting with your app.**
/// * **Your work will not take more than a few seconds to complete.**
///
/// Examples:
///
/// * in a video editor:
/// opening a saved project
/// * in a browser:
/// loading a list of the user's bookmarks and top sites
/// when a new tab is created
/// * in a collaborative word processor:
/// running a search on the document's content
///
/// Use this QoS class for tasks which were initiated by the user
/// and block the usage of your app while they are in progress.
/// It is expected that this work will take a few seconds or less to complete;
/// not long enough to cause the user to switch to something else.
/// Your app will likely indicate progress on these tasks
/// through the display of placeholder content or modals.
///
/// This QoS class is not energy-efficient.
/// Rather, it provides responsiveness
/// by prioritizing work above other tasks on the system
/// except for critical user-interactive work.
UserInitiated,
/// TLDR: render loops and nothing else
///
/// Contract:
///
/// * **You absolutely need this work to complete immediately
/// or your app will appear to freeze.**
/// * **Your work will always complete virtually instantaneously.**
///
/// Examples:
///
/// * the main thread in a GUI application
/// * the update & render loop in a game
/// * a secondary thread which progresses an animation
///
/// Use this QoS class for any work which, if delayed,
/// will make your user interface unresponsive.
/// It is expected that this work will be virtually instantaneous.
///
/// This QoS class is not energy-efficient.
/// Specifying this class is a request to run with
/// nearly all available system CPU and I/O bandwidth even under contention.
UserInteractive,
}
pub(super) const IS_QOS_AVAILABLE: bool = true;
pub(super) fn set_current_thread_qos_class(class: QoSClass) {
let c = match class {
QoSClass::UserInteractive => libc::qos_class_t::QOS_CLASS_USER_INTERACTIVE,
QoSClass::UserInitiated => libc::qos_class_t::QOS_CLASS_USER_INITIATED,
QoSClass::Utility => libc::qos_class_t::QOS_CLASS_UTILITY,
QoSClass::Background => libc::qos_class_t::QOS_CLASS_BACKGROUND,
};
#[expect(unsafe_code)]
let code = unsafe { libc::pthread_set_qos_class_self_np(c, 0) };
if code == 0 {
return;
}
#[expect(unsafe_code)]
let errno = unsafe { *libc::__error() };
match errno {
libc::EPERM => {
// This thread has been excluded from the QoS system
// due to a previous call to a function such as `pthread_setschedparam`
// which is incompatible with QoS.
//
// Panic instead of returning an error
// to maintain the invariant that we only use QoS APIs.
panic!("tried to set QoS of thread which has opted out of QoS (os error {errno})")
}
libc::EINVAL => {
// This is returned if we pass something other than a qos_class_t
// to `pthread_set_qos_class_self_np`.
//
// This is impossible, so again panic.
unreachable!(
"invalid qos_class_t value was passed to pthread_set_qos_class_self_np"
)
}
_ => {
// `pthread_set_qos_class_self_np`’s documentation
// does not mention any other errors.
unreachable!("`pthread_set_qos_class_self_np` returned unexpected error {errno}")
}
}
}
pub(super) fn get_current_thread_qos_class() -> Option<QoSClass> {
#[expect(unsafe_code)]
let current_thread = unsafe { libc::pthread_self() };
let mut qos_class_raw = libc::qos_class_t::QOS_CLASS_UNSPECIFIED;
#[expect(unsafe_code)]
let code = unsafe {
libc::pthread_get_qos_class_np(
current_thread,
&raw mut qos_class_raw,
std::ptr::null_mut(),
)
};
if code != 0 {
// `pthread_get_qos_class_np`’s documentation states that
// an error value is placed into errno if the return code is not zero.
// However, it never states what errors are possible.
// Inspecting the source[0] shows that, as of this writing, it always returns zero.
//
// Whatever errors the function could report in future are likely to be
// ones which we cannot handle anyway
//
// 0: https://github.com/apple-oss-distributions/libpthread/blob/67e155c94093be9a204b69637d198eceff2c7c46/src/qos.c#L171-L177
#[expect(unsafe_code)]
let errno = unsafe { *libc::__error() };
unreachable!("`pthread_get_qos_class_np` failed unexpectedly (os error {errno})");
}
match qos_class_raw {
libc::qos_class_t::QOS_CLASS_USER_INTERACTIVE => Some(QoSClass::UserInteractive),
libc::qos_class_t::QOS_CLASS_USER_INITIATED => Some(QoSClass::UserInitiated),
libc::qos_class_t::QOS_CLASS_DEFAULT => None, // QoS has never been set
libc::qos_class_t::QOS_CLASS_UTILITY => Some(QoSClass::Utility),
libc::qos_class_t::QOS_CLASS_BACKGROUND => Some(QoSClass::Background),
libc::qos_class_t::QOS_CLASS_UNSPECIFIED => {
// Using manual scheduling APIs causes threads to “opt out” of QoS.
// At this point they become incompatible with QoS,
// and as such have the “unspecified” QoS class.
//
// Panic instead of returning an error
// to maintain the invariant that we only use QoS APIs.
panic!("tried to get QoS of thread which has opted out of QoS")
}
}
}
pub(super) fn thread_priority_to_qos_class(priority: ThreadPriority) -> QoSClass {
match priority {
ThreadPriority::Worker => QoSClass::Utility,
ThreadPriority::LatencySensitive => QoSClass::UserInitiated,
}
}
}
// FIXME: Windows has QoS APIs, we should use them!
#[cfg(not(target_vendor = "apple"))]
mod imp {
use super::ThreadPriority;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub(super) enum QoSClass {
Default,
}
pub(super) const IS_QOS_AVAILABLE: bool = false;
pub(super) fn set_current_thread_qos_class(_: QoSClass) {}
pub(super) fn get_current_thread_qos_class() -> Option<QoSClass> {
None
}
pub(super) fn thread_priority_to_qos_class(_: ThreadPriority) -> QoSClass {
QoSClass::Default
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/src/server/schedule/thread/pool.rs | crates/ruff_server/src/server/schedule/thread/pool.rs | // +------------------------------------------------------------+
// | Code adopted from: |
// | Repository: https://github.com/rust-lang/rust-analyzer.git |
// | File: `crates/stdx/src/thread/pool.rs` |
// | Commit: 03b3cb6be9f21c082f4206b35c7fe7f291c94eaa |
// +------------------------------------------------------------+
//! [`Pool`] implements a basic custom thread pool
//! inspired by the [`threadpool` crate](http://docs.rs/threadpool).
//! When you spawn a task you specify a thread priority
//! so the pool can schedule it to run on a thread with that priority.
//! rust-analyzer uses this to prioritize work based on latency requirements.
//!
//! The thread pool is implemented entirely using
//! the threading utilities in [`crate::server::schedule::thread`].
use std::{
num::NonZeroUsize,
panic::AssertUnwindSafe,
sync::{
Arc,
atomic::{AtomicUsize, Ordering},
},
};
use crossbeam::channel::{Receiver, Sender};
use super::{Builder, JoinHandle, ThreadPriority};
pub(crate) struct Pool {
// `_handles` is never read: the field is present
// only for its `Drop` impl.
// The worker threads exit once the channel closes;
// make sure to keep `job_sender` above `handles`
// so that the channel is actually closed
// before we join the worker threads!
job_sender: Sender<Job>,
_handles: Vec<JoinHandle>,
extant_tasks: Arc<AtomicUsize>,
}
struct Job {
requested_priority: ThreadPriority,
f: Box<dyn FnOnce() + Send + 'static>,
}
impl Pool {
pub(crate) fn new(threads: NonZeroUsize) -> Pool {
// Override OS defaults to avoid stack overflows on platforms with low stack size defaults.
const STACK_SIZE: usize = 2 * 1024 * 1024;
const INITIAL_PRIORITY: ThreadPriority = ThreadPriority::Worker;
let threads = usize::from(threads);
// Channel buffer capacity is between 2 and 4, depending on the pool size.
let (job_sender, job_receiver) = crossbeam::channel::bounded(std::cmp::min(threads * 2, 4));
let extant_tasks = Arc::new(AtomicUsize::new(0));
let mut handles = Vec::with_capacity(threads);
for i in 0..threads {
let handle = Builder::new(INITIAL_PRIORITY)
.stack_size(STACK_SIZE)
.name(format!("ruff:worker:{i}"))
.spawn({
let extant_tasks = Arc::clone(&extant_tasks);
let job_receiver: Receiver<Job> = job_receiver.clone();
move || {
let mut current_priority = INITIAL_PRIORITY;
for job in job_receiver {
if job.requested_priority != current_priority {
job.requested_priority.apply_to_current_thread();
current_priority = job.requested_priority;
}
extant_tasks.fetch_add(1, Ordering::SeqCst);
// SAFETY: it's safe to assume that `job.f` is unwind safe because we always
// abort the process if it panics.
// Panicking here ensures that we don't swallow errors and is the same as
// what rayon does.
// Any recovery should be implemented outside the thread pool (e.g. when
// dispatching requests/notifications etc).
if let Err(error) = std::panic::catch_unwind(AssertUnwindSafe(job.f)) {
if let Some(msg) = error.downcast_ref::<String>() {
tracing::error!("Worker thread panicked with: {msg}; aborting");
} else if let Some(msg) = error.downcast_ref::<&str>() {
tracing::error!("Worker thread panicked with: {msg}; aborting");
} else {
tracing::error!(
"Worker thread panicked with: {error:?}; aborting"
);
}
std::process::abort();
}
extant_tasks.fetch_sub(1, Ordering::SeqCst);
}
}
})
.expect("failed to spawn thread");
handles.push(handle);
}
Pool {
_handles: handles,
extant_tasks,
job_sender,
}
}
pub(crate) fn spawn<F>(&self, priority: ThreadPriority, f: F)
where
F: FnOnce() + Send + 'static,
{
let f = Box::new(move || {
if cfg!(debug_assertions) {
priority.assert_is_used_on_current_thread();
}
f();
});
let job = Job {
requested_priority: priority,
f,
};
self.job_sender.send(job).unwrap();
}
#[expect(dead_code)]
pub(super) fn len(&self) -> usize {
self.extant_tasks.load(Ordering::SeqCst)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/tests/notebook.rs | crates/ruff_server/tests/notebook.rs | use std::{
path::{Path, PathBuf},
str::FromStr,
};
use lsp_types::{
ClientCapabilities, LSPObject, NotebookDocumentCellChange, NotebookDocumentChangeTextContent,
Position, Range, TextDocumentContentChangeEvent, VersionedTextDocumentIdentifier,
};
use ruff_notebook::SourceValue;
use ruff_server::{Client, ClientOptions, GlobalOptions, Workspace, Workspaces};
const SUPER_RESOLUTION_OVERVIEW_PATH: &str =
"./resources/test/fixtures/tensorflow_test_notebook.ipynb";
struct NotebookChange {
version: i32,
metadata: Option<LSPObject>,
updated_cells: lsp_types::NotebookDocumentCellChange,
}
#[test]
fn super_resolution_overview() {
let file_path =
std::fs::canonicalize(PathBuf::from_str(SUPER_RESOLUTION_OVERVIEW_PATH).unwrap()).unwrap();
let file_url = lsp_types::Url::from_file_path(&file_path).unwrap();
let notebook = create_notebook(&file_path).unwrap();
insta::assert_snapshot!("initial_notebook", notebook_source(¬ebook));
let (main_loop_sender, main_loop_receiver) = crossbeam::channel::unbounded();
let (client_sender, client_receiver) = crossbeam::channel::unbounded();
let client = Client::new(main_loop_sender, client_sender);
let options = GlobalOptions::default();
let global = options.into_settings(client.clone());
let mut session = ruff_server::Session::new(
&ClientCapabilities::default(),
ruff_server::PositionEncoding::UTF16,
global,
&Workspaces::new(vec![
Workspace::new(lsp_types::Url::from_file_path(file_path.parent().unwrap()).unwrap())
.with_options(ClientOptions::default()),
]),
&client,
)
.unwrap();
session.open_notebook_document(file_url.clone(), notebook);
let changes = [NotebookChange {
version: 0,
metadata: None,
updated_cells: NotebookDocumentCellChange {
structure: None,
data: None,
text_content: Some(vec![NotebookDocumentChangeTextContent {
document: VersionedTextDocumentIdentifier {
uri: make_cell_uri(&file_path, 5),
version: 2,
},
changes: vec![
TextDocumentContentChangeEvent {
range: Some(Range {
start: Position {
line: 18,
character: 61,
},
end: Position {
line: 18,
character: 62,
},
}),
range_length: Some(1),
text: "\"".to_string(),
},
TextDocumentContentChangeEvent {
range: Some(Range {
start: Position {
line: 18,
character: 55,
},
end: Position {
line: 18,
character: 56,
},
}),
range_length: Some(1),
text: "\"".to_string(),
},
TextDocumentContentChangeEvent {
range: Some(Range {
start: Position {
line: 14,
character: 46,
},
end: Position {
line: 14,
character: 47,
},
}),
range_length: Some(1),
text: "\"".to_string(),
},
TextDocumentContentChangeEvent {
range: Some(Range {
start: Position {
line: 14,
character: 40,
},
end: Position {
line: 14,
character: 41,
},
}),
range_length: Some(1),
text: "\"".to_string(),
},
],
}]),
},
},
NotebookChange {
version: 1,
metadata: None,
updated_cells: NotebookDocumentCellChange {
structure: None,
data: None,
text_content: Some(vec![NotebookDocumentChangeTextContent {
document: VersionedTextDocumentIdentifier {
uri: make_cell_uri(&file_path, 4),
version: 2
},
changes: vec![TextDocumentContentChangeEvent {
range: Some(Range {
start: Position {
line: 0,
character: 0
},
end: Position {
line: 0,
character: 181
} }),
range_length: Some(181),
text: "test_img_path = tf.keras.utils.get_file(\n \"lr.jpg\",\n \"https://raw.githubusercontent.com/tensorflow/examples/master/lite/examples/super_resolution/android/app/src/main/assets/lr-1.jpg\",\n)".to_string()
}
]
}
]
)
}
},
NotebookChange {
version: 2,
metadata: None,
updated_cells: NotebookDocumentCellChange {
structure: None,
data: None,
text_content: Some(vec![NotebookDocumentChangeTextContent {
document: VersionedTextDocumentIdentifier {
uri: make_cell_uri(&file_path, 2),
version: 2,
},
changes: vec![TextDocumentContentChangeEvent {
range: Some(Range {
start: Position {
line: 3,
character: 0,
},
end: Position {
line: 3,
character: 21,
},
}),
range_length: Some(21),
text: "\nprint(tf.__version__)".to_string(),
}],
}]),
}
},
NotebookChange {
version: 3,
metadata: None,
updated_cells: NotebookDocumentCellChange {
structure: None,
data: None,
text_content: Some(vec![NotebookDocumentChangeTextContent {
document: VersionedTextDocumentIdentifier {
uri: make_cell_uri(&file_path, 1),
version: 2,
},
changes: vec![TextDocumentContentChangeEvent {
range: Some(Range {
start: Position {
line: 0,
character: 0,
},
end: Position {
line: 0,
character: 49,
},
}),
range_length: Some(49),
text: "!pip install matplotlib tensorflow tensorflow-hub".to_string(),
}],
}]),
},
},
NotebookChange {
version: 4,
metadata: None,
updated_cells: NotebookDocumentCellChange {
structure: None,
data: None,
text_content: Some(vec![NotebookDocumentChangeTextContent {
document: VersionedTextDocumentIdentifier {
uri: make_cell_uri(&file_path, 3),
version: 2,
},
changes: vec![TextDocumentContentChangeEvent {
range: Some(Range {
start: Position {
line: 3,
character: 0,
},
end: Position {
line: 15,
character: 37,
},
}),
range_length: Some(457),
text: "\n@tf.function(input_signature=[tf.TensorSpec(shape=[1, 50, 50, 3], dtype=tf.float32)])\ndef f(input):\n return concrete_func(input)\n\n\nconverter = tf.lite.TFLiteConverter.from_concrete_functions(\n [f.get_concrete_function()], model\n)\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\ntflite_model = converter.convert()\n\n# Save the TF Lite model.\nwith tf.io.gfile.GFile(\"ESRGAN.tflite\", \"wb\") as f:\n f.write(tflite_model)\n\nesrgan_model_path = \"./ESRGAN.tflite\"".to_string(),
}],
}]),
},
},
NotebookChange {
version: 5,
metadata: None,
updated_cells: NotebookDocumentCellChange {
structure: None,
data: None,
text_content: Some(vec![NotebookDocumentChangeTextContent {
document: VersionedTextDocumentIdentifier {
uri: make_cell_uri(&file_path, 0),
version: 2,
},
changes: vec![TextDocumentContentChangeEvent {
range: Some(Range {
start: Position {
line: 0,
character: 0,
},
end: Position {
line: 2,
character: 0,
},
}),
range_length: Some(139),
text: "# @title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n".to_string(),
}],
}]),
},
},
NotebookChange {
version: 6,
metadata: None,
updated_cells: NotebookDocumentCellChange {
structure: None,
data: None,
text_content: Some(vec![NotebookDocumentChangeTextContent {
document: VersionedTextDocumentIdentifier {
uri: make_cell_uri(&file_path, 6),
version: 2,
},
changes: vec![TextDocumentContentChangeEvent {
range: Some(Range {
start: Position {
line: 1,
character: 0,
},
end: Position {
line: 14,
character: 28,
},
}),
range_length: Some(361),
text: "plt.figure(figsize=(1, 1))\nplt.title(\"LR\")\nplt.imshow(lr.numpy())\nplt.figure(figsize=(10, 4))\nplt.subplot(1, 2, 1)\nplt.title(f\"ESRGAN (x4)\")\nplt.imshow(sr.numpy())\nbicubic = tf.image.resize(lr, [200, 200], tf.image.ResizeMethod.BICUBIC)\nbicubic = tf.cast(bicubic, tf.uint8)\nplt.subplot(1, 2, 2)\nplt.title(\"Bicubic\")\nplt.imshow(bicubic.numpy());".to_string(),
}],
}]),
},
}
];
let key = session.key_from_url(file_url.clone());
for NotebookChange {
version,
metadata,
updated_cells,
} in changes
{
session
.update_notebook_document(&key, Some(updated_cells), metadata, version)
.unwrap();
}
let snapshot = session.take_snapshot(file_url).unwrap();
insta::assert_snapshot!(
"changed_notebook",
notebook_source(snapshot.query().as_notebook().unwrap())
);
assert!(client_receiver.is_empty());
assert!(main_loop_receiver.is_empty());
}
fn notebook_source(notebook: &ruff_server::NotebookDocument) -> String {
notebook.make_ruff_notebook().source_code().to_string()
}
// produces an opaque URL based on a document path and a cell index
fn make_cell_uri(path: &Path, index: usize) -> lsp_types::Url {
lsp_types::Url::parse(&format!(
"notebook-cell:///Users/test/notebooks/{}.ipynb?cell={index}",
path.file_name().unwrap().to_string_lossy()
))
.unwrap()
}
fn create_notebook(file_path: &Path) -> anyhow::Result<ruff_server::NotebookDocument> {
let ruff_notebook = ruff_notebook::Notebook::from_path(file_path)?;
let mut cells = vec![];
let mut cell_documents = vec![];
for (i, cell) in ruff_notebook
.cells()
.iter()
.filter(|cell| cell.is_code_cell())
.enumerate()
{
let uri = make_cell_uri(file_path, i);
let (lsp_cell, cell_document) = cell_to_lsp_cell(cell, uri)?;
cells.push(lsp_cell);
cell_documents.push(cell_document);
}
let serde_json::Value::Object(metadata) = serde_json::to_value(ruff_notebook.metadata())?
else {
anyhow::bail!("Notebook metadata was not an object");
};
ruff_server::NotebookDocument::new(0, cells, metadata, cell_documents)
}
fn cell_to_lsp_cell(
cell: &ruff_notebook::Cell,
cell_uri: lsp_types::Url,
) -> anyhow::Result<(lsp_types::NotebookCell, lsp_types::TextDocumentItem)> {
let contents = match cell.source() {
SourceValue::String(string) => string.clone(),
SourceValue::StringArray(array) => array.join(""),
};
let metadata = match serde_json::to_value(cell.metadata())? {
serde_json::Value::Null => None,
serde_json::Value::Object(metadata) => Some(metadata),
_ => anyhow::bail!("Notebook cell metadata was not an object"),
};
Ok((
lsp_types::NotebookCell {
kind: match cell {
ruff_notebook::Cell::Code(_) => lsp_types::NotebookCellKind::Code,
ruff_notebook::Cell::Markdown(_) => lsp_types::NotebookCellKind::Markup,
ruff_notebook::Cell::Raw(_) => unreachable!(),
},
document: cell_uri.clone(),
metadata,
execution_summary: None,
},
lsp_types::TextDocumentItem::new(cell_uri, "python".to_string(), 1, contents),
))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_server/tests/document.rs | crates/ruff_server/tests/document.rs | const PANDAS_HTML_SRC: &str = include_str!("../resources/test/fixtures/pandas_html.py");
use lsp_types::{Position, Range, TextDocumentContentChangeEvent};
use ruff_server::{PositionEncoding, TextDocument};
#[test]
fn delete_lines_pandas_html() {
let mut document = TextDocument::new(PANDAS_HTML_SRC.to_string(), 1);
let changes = vec![
TextDocumentContentChangeEvent {
range: Some(Range {
start: Position {
line: 79,
character: 0,
},
end: Position {
line: 91,
character: 67,
},
}),
range_length: Some(388),
text: String::new(),
},
TextDocumentContentChangeEvent {
range: Some(Range {
start: Position {
line: 81,
character: 4,
},
end: Position {
line: 81,
character: 36,
},
}),
range_length: Some(32),
text: "p".into(),
},
TextDocumentContentChangeEvent {
range: Some(Range {
start: Position {
line: 81,
character: 5,
},
end: Position {
line: 81,
character: 5,
},
}),
range_length: Some(0),
text: "a".into(),
},
TextDocumentContentChangeEvent {
range: Some(Range {
start: Position {
line: 81,
character: 6,
},
end: Position {
line: 81,
character: 6,
},
}),
range_length: Some(0),
text: "s".into(),
},
TextDocumentContentChangeEvent {
range: Some(Range {
start: Position {
line: 81,
character: 7,
},
end: Position {
line: 81,
character: 7,
},
}),
range_length: Some(0),
text: "s".into(),
},
];
let mut version = 2;
for change in changes {
document.apply_changes(vec![change], version, PositionEncoding::UTF16);
version += 1;
}
insta::assert_snapshot!(document.contents());
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/db.rs | crates/ty_project/src/db.rs | use std::fmt::Formatter;
use std::panic::RefUnwindSafe;
use std::sync::Arc;
use std::{cmp, fmt};
pub use self::changes::ChangeResult;
use crate::CollectReporter;
use crate::metadata::settings::file_settings;
use crate::{ProgressReporter, Project, ProjectMetadata};
use get_size2::StandardTracker;
use ruff_db::Db as SourceDb;
use ruff_db::diagnostic::Diagnostic;
use ruff_db::files::{File, Files};
use ruff_db::system::System;
use ruff_db::vendored::VendoredFileSystem;
use salsa::{Database, Event, Setter};
use ty_module_resolver::SearchPaths;
use ty_python_semantic::lint::{LintRegistry, RuleSelection};
use ty_python_semantic::{AnalysisSettings, Db as SemanticDb, Program};
mod changes;
#[salsa::db]
pub trait Db: SemanticDb {
fn project(&self) -> Project;
fn dyn_clone(&self) -> Box<dyn Db>;
}
#[salsa::db]
#[derive(Clone)]
pub struct ProjectDatabase {
project: Option<Project>,
files: Files,
// IMPORTANT: Never return clones of `system` outside `ProjectDatabase` (only return references)
// or the "trick" to get a mutable `Arc` in `Self::system_mut` is no longer guaranteed to work.
system: Arc<dyn System + Send + Sync + RefUnwindSafe>,
// IMPORTANT: This field must be the last because we use `trigger_cancellation` (drops all other storage references)
// to drop all other references to the database, which gives us exclusive access to other `Arc`s stored on this db.
// However, for this to work it's important that the `storage` is dropped AFTER any `Arc` that
// we try to mutably borrow using `Arc::get_mut` (like `system`).
storage: salsa::Storage<ProjectDatabase>,
}
impl ProjectDatabase {
pub fn new<S>(project_metadata: ProjectMetadata, system: S) -> anyhow::Result<Self>
where
S: System + 'static + Send + Sync + RefUnwindSafe,
{
let mut db = Self {
project: None,
storage: salsa::Storage::new(if tracing::enabled!(tracing::Level::TRACE) {
Some(Box::new({
move |event: Event| {
if matches!(event.kind, salsa::EventKind::WillCheckCancellation) {
return;
}
tracing::trace!("Salsa event: {event:?}");
}
}))
} else {
None
}),
files: Files::default(),
system: Arc::new(system),
};
// TODO: Use the `program_settings` to compute the key for the database's persistent
// cache and load the cache if it exists.
// we may want to have a dedicated method for this?
// Initialize the `Program` singleton
let program_settings = project_metadata.to_program_settings(db.system(), db.vendored())?;
Program::from_settings(&db, program_settings);
db.project = Some(
Project::from_metadata(&db, project_metadata)
.map_err(|error| anyhow::anyhow!("{}", error.pretty(&db)))?,
);
Ok(db)
}
/// Checks the files in the project and its dependencies as per the project's check mode.
///
/// Use [`set_check_mode`] to update the check mode.
///
/// [`set_check_mode`]: ProjectDatabase::set_check_mode
pub fn check(&self) -> Vec<Diagnostic> {
let mut collector = CollectReporter::default();
self.project().check(self, &mut collector);
collector.into_sorted(self)
}
/// Checks the files in the project and its dependencies, using the given reporter.
///
/// Use [`set_check_mode`] to update the check mode.
///
/// [`set_check_mode`]: ProjectDatabase::set_check_mode
pub fn check_with_reporter(&self, reporter: &mut dyn ProgressReporter) {
self.project().check(self, reporter);
}
#[tracing::instrument(level = "debug", skip(self))]
pub fn check_file(&self, file: File) -> Vec<Diagnostic> {
self.project().check_file(self, file)
}
/// Set the check mode for the project.
pub fn set_check_mode(&mut self, mode: CheckMode) {
if self.project().check_mode(self) != mode {
tracing::debug!("Updating project to check {mode}");
self.project().set_check_mode(self).to(mode);
}
}
/// Returns a mutable reference to the system.
///
/// WARNING: Triggers a new revision, canceling other database handles. This can lead to deadlock.
pub fn system_mut(&mut self) -> &mut dyn System {
self.trigger_cancellation();
Arc::get_mut(&mut self.system).expect(
"ref count should be 1 because `trigger_cancellation` drops all other DB references.",
)
}
/// Returns a [`SalsaMemoryDump`] that can be use to dump Salsa memory usage information
/// to the CLI after a typechecker run.
pub fn salsa_memory_dump(&self) -> SalsaMemoryDump {
let memory_usage = ruff_memory_usage::attach_tracker(StandardTracker::new(), || {
<dyn salsa::Database>::memory_usage(self)
});
let mut ingredients = memory_usage
.structs
.into_iter()
.filter(|ingredient| ingredient.count() > 0)
.collect::<Vec<_>>();
let mut memos = memory_usage
.queries
.into_iter()
.filter(|(_, memos)| memos.count() > 0)
.collect::<Vec<_>>();
ingredients.sort_by_key(|ingredient| {
let heap_size = ingredient.heap_size_of_fields().unwrap_or_else(|| {
// Salsa currently does not expose a way to track the heap size of interned
// query arguments.
if !ingredient.debug_name().contains("interned_arguments") {
tracing::warn!(
"expected `heap_size` to be provided by Salsa struct `{}`",
ingredient.debug_name()
);
}
0
});
cmp::Reverse(ingredient.size_of_fields() + heap_size)
});
memos.sort_by_key(|(query, memo)| {
let heap_size = memo.heap_size_of_fields().unwrap_or_else(|| {
tracing::warn!("expected `heap_size` to be provided by Salsa query `{query}`");
0
});
cmp::Reverse(memo.size_of_fields() + heap_size)
});
let mut total_fields = 0;
let mut total_metadata = 0;
for ingredient in &ingredients {
total_fields += ingredient.size_of_fields();
total_fields += ingredient.heap_size_of_fields().unwrap_or(0);
total_metadata += ingredient.size_of_metadata();
}
let mut total_memo_fields = 0;
let mut total_memo_metadata = 0;
for (_, memo) in &memos {
total_memo_fields += memo.size_of_fields();
total_memo_fields += memo.heap_size_of_fields().unwrap_or(0);
total_memo_metadata += memo.size_of_metadata();
}
SalsaMemoryDump {
total_fields,
total_metadata,
total_memo_fields,
total_memo_metadata,
ingredients,
memos,
}
}
}
impl std::fmt::Debug for ProjectDatabase {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("ProjectDatabase")
.field("project", &self.project)
.field("files", &self.files)
.field("system", &self.system)
.finish_non_exhaustive()
}
}
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, get_size2::GetSize)]
#[cfg_attr(test, derive(serde::Serialize))]
pub enum CheckMode {
/// Checks the open files in the project.
OpenFiles,
/// Checks all files in the project, ignoring the open file set.
///
/// This includes virtual files, such as those opened in an editor.
#[default]
AllFiles,
}
impl fmt::Display for CheckMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
CheckMode::OpenFiles => write!(f, "open files"),
CheckMode::AllFiles => write!(f, "all files"),
}
}
}
/// Stores memory usage information.
pub struct SalsaMemoryDump {
total_fields: usize,
total_metadata: usize,
total_memo_fields: usize,
total_memo_metadata: usize,
ingredients: Vec<salsa::IngredientInfo>,
memos: Vec<(&'static str, salsa::IngredientInfo)>,
}
#[allow(clippy::cast_precision_loss)]
fn bytes_to_mb(total: usize) -> f64 {
total as f64 / 1_000_000.
}
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
impl SalsaMemoryDump {
/// Returns a short report that provides total memory usage information.
pub fn display_short(&self) -> impl fmt::Display + '_ {
struct DisplayShort<'a>(&'a SalsaMemoryDump);
impl fmt::Display for DisplayShort<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let SalsaMemoryDump {
total_fields,
total_metadata,
total_memo_fields,
total_memo_metadata,
ref ingredients,
ref memos,
} = *self.0;
writeln!(f, "=======SALSA SUMMARY=======")?;
writeln!(
f,
"TOTAL MEMORY USAGE: {:.2}MB",
bytes_to_mb(
total_metadata + total_fields + total_memo_fields + total_memo_metadata
)
)?;
writeln!(
f,
" struct metadata = {:.2}MB",
bytes_to_mb(total_metadata),
)?;
writeln!(f, " struct fields = {:.2}MB", bytes_to_mb(total_fields))?;
writeln!(
f,
" memo metadata = {:.2}MB",
bytes_to_mb(total_memo_metadata),
)?;
writeln!(
f,
" memo fields = {:.2}MB",
bytes_to_mb(total_memo_fields),
)?;
writeln!(f, "QUERY COUNT: {}", memos.len())?;
writeln!(f, "STRUCT COUNT: {}", ingredients.len())?;
Ok(())
}
}
DisplayShort(self)
}
/// Returns a short report that provides fine-grained memory usage information per
/// Salsa ingredient.
pub fn display_full(&self) -> impl fmt::Display + '_ {
struct DisplayFull<'a>(&'a SalsaMemoryDump);
impl fmt::Display for DisplayFull<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let SalsaMemoryDump {
total_fields,
total_metadata,
total_memo_fields,
total_memo_metadata,
ref ingredients,
ref memos,
} = *self.0;
writeln!(f, "=======SALSA STRUCTS=======")?;
for ingredient in ingredients {
let size_of_fields =
ingredient.size_of_fields() + ingredient.heap_size_of_fields().unwrap_or(0);
writeln!(
f,
"{:<50} metadata={:<8} fields={:<8} count={}",
format!("`{}`", ingredient.debug_name()),
format!("{:.2}MB", bytes_to_mb(ingredient.size_of_metadata())),
format!("{:.2}MB", bytes_to_mb(size_of_fields)),
ingredient.count()
)?;
}
writeln!(f, "=======SALSA QUERIES=======")?;
for (query_fn, memo) in memos {
let size_of_fields =
memo.size_of_fields() + memo.heap_size_of_fields().unwrap_or(0);
writeln!(f, "`{query_fn} -> {}`", memo.debug_name())?;
writeln!(
f,
" metadata={:<8} fields={:<8} count={}",
format!("{:.2}MB", bytes_to_mb(memo.size_of_metadata())),
format!("{:.2}MB", bytes_to_mb(size_of_fields)),
memo.count()
)?;
}
writeln!(f, "=======SALSA SUMMARY=======")?;
writeln!(
f,
"TOTAL MEMORY USAGE: {:.2}MB",
bytes_to_mb(
total_metadata + total_fields + total_memo_fields + total_memo_metadata
)
)?;
writeln!(
f,
" struct metadata = {:.2}MB",
bytes_to_mb(total_metadata),
)?;
writeln!(f, " struct fields = {:.2}MB", bytes_to_mb(total_fields))?;
writeln!(
f,
" memo metadata = {:.2}MB",
bytes_to_mb(total_memo_metadata),
)?;
writeln!(
f,
" memo fields = {:.2}MB",
bytes_to_mb(total_memo_fields),
)?;
Ok(())
}
}
DisplayFull(self)
}
/// Returns a redacted report that provides rounded totals of memory usage, to avoid
/// overly sensitive diffs in `mypy-primer` runs.
pub fn display_mypy_primer(&self) -> impl fmt::Display + '_ {
struct DisplayShort<'a>(&'a SalsaMemoryDump);
fn round_memory(total: usize) -> usize {
// Round the number to the nearest power of 1.05. This gives us a
// 2.5% threshold before the memory usage number is considered to have
// changed.
//
// TODO: Small changes in memory usage may cause the number to be rounded
// into the next power if it happened to already be close to the threshold.
// This also means that differences may surface as a result of small changes
// over time that are unrelated to the current change. Ideally we could compare
// the exact numbers across runs and compute the difference, but we don't have
// the infrastructure for that currently.
const BASE: f64 = 1.05;
BASE.powf(bytes_to_mb(total).log(BASE).round()) as usize
}
impl fmt::Display for DisplayShort<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let SalsaMemoryDump {
total_fields,
total_metadata,
total_memo_fields,
total_memo_metadata,
..
} = *self.0;
writeln!(f, "=======SALSA SUMMARY=======")?;
writeln!(
f,
"TOTAL MEMORY USAGE: ~{}MB",
round_memory(
total_metadata + total_fields + total_memo_fields + total_memo_metadata
)
)?;
writeln!(
f,
" struct metadata = ~{}MB",
round_memory(total_metadata)
)?;
writeln!(f, " struct fields = ~{}MB", round_memory(total_fields))?;
writeln!(
f,
" memo metadata = ~{}MB",
round_memory(total_memo_metadata)
)?;
writeln!(
f,
" memo fields = ~{}MB",
round_memory(total_memo_fields)
)?;
Ok(())
}
}
DisplayShort(self)
}
}
#[salsa::db]
impl ty_module_resolver::Db for ProjectDatabase {
fn search_paths(&self) -> &SearchPaths {
Program::get(self).search_paths(self)
}
}
#[salsa::db]
impl SemanticDb for ProjectDatabase {
fn should_check_file(&self, file: File) -> bool {
self.project
.is_some_and(|project| project.should_check_file(self, file))
}
fn rule_selection(&self, file: File) -> &RuleSelection {
let settings = file_settings(self, file);
settings.rules(self)
}
fn lint_registry(&self) -> &LintRegistry {
ty_python_semantic::default_lint_registry()
}
fn analysis_settings(&self) -> &AnalysisSettings {
self.project().settings(self).analysis()
}
fn verbose(&self) -> bool {
self.project().verbose(self)
}
}
#[salsa::db]
impl SourceDb for ProjectDatabase {
fn vendored(&self) -> &VendoredFileSystem {
ty_vendored::file_system()
}
fn system(&self) -> &dyn System {
&*self.system
}
fn files(&self) -> &Files {
&self.files
}
fn python_version(&self) -> ruff_python_ast::PythonVersion {
Program::get(self).python_version(self)
}
}
#[salsa::db]
impl salsa::Database for ProjectDatabase {}
#[salsa::db]
impl Db for ProjectDatabase {
fn project(&self) -> Project {
self.project.unwrap()
}
fn dyn_clone(&self) -> Box<dyn Db> {
Box::new(self.clone())
}
}
#[cfg(feature = "format")]
mod format {
use crate::ProjectDatabase;
use ruff_db::files::File;
use ruff_python_formatter::{Db as FormatDb, PyFormatOptions};
#[salsa::db]
impl FormatDb for ProjectDatabase {
fn format_options(&self, file: File) -> PyFormatOptions {
let source_ty = file.source_type(self);
PyFormatOptions::from_source_type(source_ty)
}
}
}
#[cfg(any(test, feature = "testing"))]
pub(crate) mod tests {
use std::sync::{Arc, Mutex};
use ruff_db::Db as SourceDb;
use ruff_db::files::{FileRootKind, Files};
use ruff_db::system::{DbWithTestSystem, System, TestSystem};
use ruff_db::vendored::VendoredFileSystem;
use ty_module_resolver::SearchPathSettings;
use ty_python_semantic::lint::{LintRegistry, RuleSelection};
use ty_python_semantic::{
AnalysisSettings, Program, ProgramSettings, PythonPlatform, PythonVersionWithSource,
};
use crate::db::Db;
use crate::{Project, ProjectMetadata};
type Events = Arc<Mutex<Vec<salsa::Event>>>;
#[salsa::db]
#[derive(Clone)]
pub struct TestDb {
storage: salsa::Storage<Self>,
events: Events,
files: Files,
system: TestSystem,
vendored: VendoredFileSystem,
project: Option<Project>,
}
impl TestDb {
pub fn new(project: ProjectMetadata) -> Self {
let events = Events::default();
let mut db = Self {
storage: salsa::Storage::new(Some(Box::new({
let events = events.clone();
move |event| {
let mut events = events.lock().unwrap();
events.push(event);
}
}))),
system: TestSystem::default(),
vendored: ty_vendored::file_system().clone(),
files: Files::default(),
events,
project: None,
};
let project = Project::from_metadata(&db, project).unwrap();
db.project = Some(project);
db
}
pub fn init_program(&mut self) -> anyhow::Result<()> {
let root = self.project().root(self);
let search_paths = SearchPathSettings::new(vec![root.to_path_buf()])
.to_search_paths(self.system(), self.vendored())
.expect("Valid search path settings");
Program::from_settings(
self,
ProgramSettings {
python_version: PythonVersionWithSource::default(),
python_platform: PythonPlatform::default(),
search_paths,
},
);
self.files().try_add_root(self, root, FileRootKind::Project);
Ok(())
}
}
impl TestDb {
/// Takes the salsa events.
pub fn take_salsa_events(&mut self) -> Vec<salsa::Event> {
let mut events = self.events.lock().unwrap();
std::mem::take(&mut *events)
}
}
impl DbWithTestSystem for TestDb {
fn test_system(&self) -> &TestSystem {
&self.system
}
fn test_system_mut(&mut self) -> &mut TestSystem {
&mut self.system
}
}
#[salsa::db]
impl SourceDb for TestDb {
fn vendored(&self) -> &VendoredFileSystem {
&self.vendored
}
fn system(&self) -> &dyn System {
&self.system
}
fn files(&self) -> &Files {
&self.files
}
fn python_version(&self) -> ruff_python_ast::PythonVersion {
Program::get(self).python_version(self)
}
}
#[salsa::db]
impl ty_module_resolver::Db for TestDb {
fn search_paths(&self) -> &ty_module_resolver::SearchPaths {
Program::get(self).search_paths(self)
}
}
#[salsa::db]
impl ty_python_semantic::Db for TestDb {
fn should_check_file(&self, file: ruff_db::files::File) -> bool {
!file.path(self).is_vendored_path()
}
fn rule_selection(&self, _file: ruff_db::files::File) -> &RuleSelection {
self.project().rules(self)
}
fn lint_registry(&self) -> &LintRegistry {
ty_python_semantic::default_lint_registry()
}
fn analysis_settings(&self) -> &AnalysisSettings {
self.project().settings(self).analysis()
}
fn verbose(&self) -> bool {
false
}
}
#[salsa::db]
impl Db for TestDb {
fn project(&self) -> Project {
self.project.unwrap()
}
fn dyn_clone(&self) -> Box<dyn Db> {
Box::new(self.clone())
}
}
#[salsa::db]
impl salsa::Database for TestDb {}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/lib.rs | crates/ty_project/src/lib.rs | #![warn(
clippy::disallowed_methods,
reason = "Prefer System trait methods over std methods in ty crates"
)]
use crate::glob::{GlobFilterCheckMode, IncludeResult};
use crate::metadata::options::{OptionDiagnostic, ToSettingsError};
use crate::walk::{ProjectFilesFilter, ProjectFilesWalker};
#[cfg(feature = "testing")]
pub use db::tests::TestDb;
pub use db::{ChangeResult, CheckMode, Db, ProjectDatabase, SalsaMemoryDump};
use files::{Index, Indexed, IndexedFiles};
use metadata::settings::Settings;
pub use metadata::{ProjectMetadata, ProjectMetadataError};
use ruff_db::diagnostic::{
Annotation, Diagnostic, DiagnosticId, Severity, Span, SubDiagnostic, SubDiagnosticSeverity,
};
use ruff_db::files::{File, FileRootKind};
use ruff_db::parsed::parsed_module;
use ruff_db::source::{SourceTextError, source_text};
use ruff_db::system::{SystemPath, SystemPathBuf};
use rustc_hash::FxHashSet;
use salsa::Durability;
use salsa::Setter;
use std::backtrace::BacktraceStatus;
use std::collections::hash_set;
use std::iter::FusedIterator;
use std::panic::{AssertUnwindSafe, UnwindSafe};
use std::sync::Arc;
use thiserror::Error;
use ty_python_semantic::add_inferred_python_version_hint_to_diagnostic;
use ty_python_semantic::lint::RuleSelection;
use ty_python_semantic::types::check_types;
mod db;
mod files;
mod glob;
pub mod metadata;
mod walk;
pub mod watch;
/// The project as a Salsa ingredient.
///
/// ## How is a project different from a program?
/// There are two (related) motivations:
///
/// 1. Program is defined in `ruff_db` and it can't reference the settings types for the linter and formatter
/// without introducing a cyclic dependency. The project is defined in a higher level crate
/// where it can reference these setting types.
/// 2. Running `ruff check` with different target versions results in different programs (settings) but
/// it remains the same project. That's why program is a narrowed view of the project only
/// holding on to the most fundamental settings required for checking.
#[salsa::input(heap_size=ruff_memory_usage::heap_size)]
#[derive(Debug)]
pub struct Project {
/// The files that are open in the project, [`None`] if there are no open files.
#[returns(ref)]
#[default]
open_fileset: FxHashSet<File>,
/// The first-party files of this project.
#[default]
#[returns(ref)]
file_set: IndexedFiles,
/// The metadata describing the project, including the unresolved options.
///
/// We box the metadata here because it's a fairly large type and
/// reducing the size of `Project` helps reduce the size of the
/// salsa allocated table for `Project`.
#[returns(deref)]
pub metadata: Box<ProjectMetadata>,
/// The resolved project settings.
///
/// We box the metadata here because it's a fairly large type and
/// reducing the size of `Project` helps reduce the size of the
/// salsa allocated table for `Project`.
#[returns(deref)]
pub settings: Box<Settings>,
/// The paths that should be included when checking this project.
///
/// The default (when this list is empty) is to include all files in the project root
/// (that satisfy the configured include and exclude patterns).
/// However, it's sometimes desired to only check a subset of the project, e.g. to see
/// the diagnostics for a single file or a folder.
///
/// This list gets initialized by the paths passed to `ty check <paths>`
///
/// ## How is this different from `open_files`?
///
/// The `included_paths` is closely related to `open_files`. The only difference is that
/// `open_files` is already a resolved set of files whereas `included_paths` is only a list of paths
/// that are resolved to files by indexing them. The other difference is that
/// new files added to any directory in `included_paths` will be indexed and added to the project
/// whereas `open_files` needs to be updated manually (e.g. by the IDE).
///
/// In short, `open_files` is cheaper in contexts where the set of files is known, like
/// in an IDE when the user only wants to check the open tabs. This could be modeled
/// with `included_paths` too but it would require an explicit walk dir step that's simply unnecessary.
#[default]
#[returns(deref)]
included_paths_list: Vec<SystemPathBuf>,
/// Diagnostics that were generated when resolving the project settings.
#[returns(deref)]
settings_diagnostics: Vec<OptionDiagnostic>,
/// The mode in which the project should be checked.
///
/// This changes the behavior of `check` to either check only the open files or all files in
/// the project including the virtual files that might exists in the editor.
#[default]
check_mode: CheckMode,
#[default]
verbose_flag: bool,
/// Whether to enforce exclusion rules even to files explicitly passed to ty on the command line.
#[default]
force_exclude_flag: bool,
}
/// A progress reporter.
pub trait ProgressReporter: Send + Sync {
/// Initialize the reporter with the number of files.
fn set_files(&mut self, files: usize);
/// Report the completion of checking a given file along with its diagnostics.
fn report_checked_file(&self, db: &ProjectDatabase, file: File, diagnostics: &[Diagnostic]);
/// Reports settings or IO related diagnostics. The diagnostics
/// can belong to different files or no file at all.
/// But it's never a file for which [`Self::report_checked_file`] gets called.
fn report_diagnostics(&mut self, db: &ProjectDatabase, diagnostics: Vec<Diagnostic>);
}
/// Reporter that collects all diagnostics into a `Vec`.
#[derive(Default)]
pub struct CollectReporter(std::sync::Mutex<Vec<Diagnostic>>);
impl CollectReporter {
pub fn into_sorted(self, db: &dyn Db) -> Vec<Diagnostic> {
let mut diagnostics = self.0.into_inner().unwrap();
diagnostics.sort_by(|left, right| {
left.rendering_sort_key(db)
.cmp(&right.rendering_sort_key(db))
});
diagnostics
}
}
impl ProgressReporter for CollectReporter {
fn set_files(&mut self, _files: usize) {}
fn report_checked_file(&self, _db: &ProjectDatabase, _file: File, diagnostics: &[Diagnostic]) {
if diagnostics.is_empty() {
return;
}
self.0
.lock()
.unwrap()
.extend(diagnostics.iter().map(Clone::clone));
}
fn report_diagnostics(&mut self, _db: &ProjectDatabase, diagnostics: Vec<Diagnostic>) {
self.0.get_mut().unwrap().extend(diagnostics);
}
}
#[salsa::tracked]
impl Project {
pub fn from_metadata(db: &dyn Db, metadata: ProjectMetadata) -> Result<Self, ToSettingsError> {
let (settings, diagnostics) = metadata.options().to_settings(db, metadata.root())?;
// This adds a file root for the project itself. This enables
// tracking of when changes are made to the files in a project
// at the directory level. At time of writing (2025-07-17),
// this is used for caching completions for submodules.
db.files()
.try_add_root(db, metadata.root(), FileRootKind::Project);
let project = Project::builder(Box::new(metadata), Box::new(settings), diagnostics)
.durability(Durability::MEDIUM)
.open_fileset_durability(Durability::LOW)
.file_set_durability(Durability::LOW)
.new(db);
Ok(project)
}
pub fn root(self, db: &dyn Db) -> &SystemPath {
self.metadata(db).root()
}
pub fn name(self, db: &dyn Db) -> &str {
self.metadata(db).name()
}
/// Returns the resolved linter rules for the project.
///
/// This is a salsa query to prevent re-computing queries if other, unrelated
/// settings change. For example, we don't want that changing the terminal settings
/// invalidates any type checking queries.
#[salsa::tracked(returns(deref), heap_size=ruff_memory_usage::heap_size)]
pub fn rules(self, db: &dyn Db) -> Arc<RuleSelection> {
self.settings(db).to_rules()
}
/// Returns `true` if `path` is both part of the project and included (see `included_paths_list`).
///
/// Unlike [Self::files], this method does not respect `.gitignore` files. It only checks
/// the project's include and exclude settings as well as the paths that were passed to `ty check <paths>`.
/// This means, that this method is an over-approximation of `Self::files` and may return `true` for paths
/// that won't be included when checking the project because they're ignored in a `.gitignore` file.
pub fn is_file_included(self, db: &dyn Db, path: &SystemPath) -> bool {
ProjectFilesFilter::from_project(db, self)
.is_file_included(path, GlobFilterCheckMode::Adhoc)
== IncludeResult::Included
}
pub fn is_directory_included(self, db: &dyn Db, path: &SystemPath) -> bool {
ProjectFilesFilter::from_project(db, self)
.is_directory_included(path, GlobFilterCheckMode::Adhoc)
== IncludeResult::Included
}
pub fn reload(self, db: &mut dyn Db, metadata: ProjectMetadata) {
tracing::debug!("Reloading project");
assert_eq!(self.root(db), metadata.root());
if &metadata != self.metadata(db) {
match metadata.options().to_settings(db, metadata.root()) {
Ok((settings, settings_diagnostics)) => {
if self.settings(db) != &settings {
self.set_settings(db).to(Box::new(settings));
}
if self.settings_diagnostics(db) != settings_diagnostics {
self.set_settings_diagnostics(db).to(settings_diagnostics);
}
}
Err(error) => {
self.set_settings_diagnostics(db)
.to(vec![error.into_diagnostic()]);
}
}
self.set_metadata(db).to(Box::new(metadata));
}
self.reload_files(db);
}
/// Checks the project and its dependencies according to the project's check mode.
pub(crate) fn check(self, db: &ProjectDatabase, reporter: &mut dyn ProgressReporter) {
let project_span = tracing::debug_span!("Project::check");
let _span = project_span.enter();
tracing::debug!(
"Checking {} in project '{name}'",
self.check_mode(db),
name = self.name(db)
);
let mut diagnostics: Vec<Diagnostic> = self
.settings_diagnostics(db)
.iter()
.map(OptionDiagnostic::to_diagnostic)
.collect();
let files = ProjectFiles::new(db, self);
reporter.set_files(files.len());
diagnostics.extend(
files
.diagnostics()
.iter()
.map(IOErrorDiagnostic::to_diagnostic),
);
reporter.report_diagnostics(db, diagnostics);
let open_files = self.open_files(db);
let check_start = ruff_db::Instant::now();
{
let db = db.clone();
let project_span = &project_span;
rayon::scope(move |scope| {
for file in &files {
let db = db.clone();
let reporter = &*reporter;
scope.spawn(move |_| {
let check_file_span =
tracing::debug_span!(parent: project_span, "check_file", ?file);
let _entered = check_file_span.entered();
match check_file_impl(&db, file) {
Ok(diagnostics) => {
reporter.report_checked_file(&db, file, diagnostics);
// This is outside `check_file_impl` to avoid that opening or closing
// a file invalidates the `check_file_impl` query of every file!
if !open_files.contains(&file) {
// The module has already been parsed by `check_file_impl`.
// We only retrieve it here so that we can call `clear` on it.
let parsed = parsed_module(&db, file);
// Drop the AST now that we are done checking this file. It is not currently open,
// so it is unlikely to be accessed again soon. If any queries need to access the AST
// from across files, it will be re-parsed.
parsed.clear();
}
}
Err(io_error) => {
reporter.report_checked_file(
&db,
file,
std::slice::from_ref(io_error),
);
}
}
});
}
});
};
tracing::debug!(
"Checking all files took {:.3}s",
check_start.elapsed().as_secs_f64(),
);
}
pub(crate) fn check_file(self, db: &dyn Db, file: File) -> Vec<Diagnostic> {
if !self.should_check_file(db, file) {
return Vec::new();
}
match check_file_impl(db, file) {
Ok(diagnostics) => diagnostics.to_vec(),
Err(diagnostic) => vec![diagnostic.clone()],
}
}
/// Opens a file in the project.
pub fn open_file(self, db: &mut dyn Db, file: File) {
tracing::debug!("Opening file `{}`", file.path(db));
let mut open_files = self.take_open_files(db);
open_files.insert(file);
self.set_open_files(db, open_files);
}
/// Closes a file in the project.
pub fn close_file(self, db: &mut dyn Db, file: File) -> bool {
tracing::debug!("Closing file `{}`", file.path(db));
let mut open_files = self.take_open_files(db);
let removed = open_files.remove(&file);
if removed {
self.set_open_files(db, open_files);
}
removed
}
pub fn set_included_paths(self, db: &mut dyn Db, paths: Vec<SystemPathBuf>) {
tracing::debug!("Setting included paths: {paths}", paths = paths.len());
self.set_included_paths_list(db).to(paths);
self.reload_files(db);
}
pub fn set_verbose(self, db: &mut dyn Db, verbose: bool) {
if self.verbose_flag(db) != verbose {
self.set_verbose_flag(db).to(verbose);
}
}
pub fn verbose(self, db: &dyn Db) -> bool {
self.verbose_flag(db)
}
pub fn set_force_exclude(self, db: &mut dyn Db, force: bool) {
if self.force_exclude_flag(db) != force {
self.set_force_exclude_flag(db).to(force);
}
}
pub fn force_exclude(self, db: &dyn Db) -> bool {
self.force_exclude_flag(db)
}
/// Returns the paths that should be checked.
///
/// The default is to check the entire project in which case this method returns
/// the project root. However, users can specify to only check specific sub-folders or
/// even files of a project by using `ty check <paths>`. In that case, this method
/// returns the provided absolute paths.
///
/// Note: The CLI doesn't prohibit users from specifying paths outside the project root.
/// This can be useful to check arbitrary files, but it isn't something we recommend.
/// We should try to support this use case but it's okay if there are some limitations around it.
fn included_paths_or_root(self, db: &dyn Db) -> &[SystemPathBuf] {
match self.included_paths_list(db) {
[] => std::slice::from_ref(&self.metadata(db).root),
paths => paths,
}
}
/// Returns the open files in the project or `None` if there are no open files.
pub fn open_files(self, db: &dyn Db) -> &FxHashSet<File> {
self.open_fileset(db)
}
/// Sets the open files in the project.
#[tracing::instrument(level = "debug", skip(self, db))]
pub fn set_open_files(self, db: &mut dyn Db, open_files: FxHashSet<File>) {
tracing::debug!("Set open project files (count: {})", open_files.len());
self.set_open_fileset(db).to(open_files);
}
/// This takes the open files from the project and returns them.
fn take_open_files(self, db: &mut dyn Db) -> FxHashSet<File> {
tracing::debug!("Take open project files");
// Salsa will cancel any pending queries and remove its own reference to `open_files`
// so that the reference counter to `open_files` now drops to 1.
self.set_open_fileset(db).to(FxHashSet::default())
}
/// Returns `true` if the file should be checked.
///
/// This depends on the project's check mode:
/// * For [`OpenFiles`], it checks if the file is either explicitly set as an open file using
/// [`open_file`] or a system virtual path
/// * For [`AllFiles`], it checks if the file is either a system virtual path or a part of the
/// indexed files in the project
///
/// [`open_file`]: Self::open_file
/// [`OpenFiles`]: CheckMode::OpenFiles
/// [`AllFiles`]: CheckMode::AllFiles
pub fn should_check_file(self, db: &dyn Db, file: File) -> bool {
let path = file.path(db);
// Try to return early to avoid adding a dependency on `open_files` or `file_set` which
// both have a durability of `LOW`.
if path.is_vendored_path() {
return false;
}
match self.check_mode(db) {
CheckMode::OpenFiles => self.open_files(db).contains(&file),
CheckMode::AllFiles => {
// Virtual files are always checked.
path.is_system_virtual_path() || self.files(db).contains(&file)
}
}
}
#[tracing::instrument(level = "debug", skip(self, db))]
pub fn remove_file(self, db: &mut dyn Db, file: File) {
tracing::debug!(
"Removing file `{}` from project `{}`",
file.path(db),
self.name(db)
);
let Some(mut index) = IndexedFiles::indexed_mut(db, self) else {
return;
};
index.remove(file);
}
pub fn add_file(self, db: &mut dyn Db, file: File) {
tracing::debug!(
"Adding file `{}` to project `{}`",
file.path(db),
self.name(db)
);
let Some(mut index) = IndexedFiles::indexed_mut(db, self) else {
return;
};
index.insert(file);
}
/// Replaces the diagnostics from indexing the project files with `diagnostics`.
///
/// This is a no-op if the project files haven't been indexed yet.
pub fn replace_index_diagnostics(self, db: &mut dyn Db, diagnostics: Vec<IOErrorDiagnostic>) {
let Some(mut index) = IndexedFiles::indexed_mut(db, self) else {
return;
};
index.set_diagnostics(diagnostics);
}
/// Returns the files belonging to this project.
pub fn files(self, db: &dyn Db) -> Indexed<'_> {
let files = self.file_set(db);
match files.get() {
Index::Lazy(vacant) => {
let _entered =
tracing::debug_span!("Project::index_files", project = %self.name(db))
.entered();
let start = ruff_db::Instant::now();
let walker = ProjectFilesWalker::new(db);
let (files, diagnostics) = walker.collect_set(db);
tracing::info!(
"Indexed {} file(s) in {:.3}s",
files.len(),
start.elapsed().as_secs_f64()
);
vacant.set(files, diagnostics)
}
Index::Indexed(indexed) => indexed,
}
}
pub fn reload_files(self, db: &mut dyn Db) {
tracing::debug!("Reloading files for project `{}`", self.name(db));
if !self.file_set(db).is_lazy() {
// Force a re-index of the files in the next revision.
self.set_file_set(db).to(IndexedFiles::lazy());
}
}
/// Check if the project's settings have any issues
pub fn check_settings(&self, db: &dyn Db) -> Vec<Diagnostic> {
self.settings_diagnostics(db)
.iter()
.map(OptionDiagnostic::to_diagnostic)
.collect()
}
}
#[salsa::tracked(returns(ref), heap_size=ruff_memory_usage::heap_size)]
pub(crate) fn check_file_impl(db: &dyn Db, file: File) -> Result<Box<[Diagnostic]>, Diagnostic> {
let mut diagnostics: Vec<Diagnostic> = Vec::new();
// Abort checking if there are IO errors.
let source = source_text(db, file);
if let Some(read_error) = source.read_error() {
return Err(IOErrorDiagnostic {
file: Some(file),
error: read_error.clone().into(),
}
.to_diagnostic());
}
let parsed = parsed_module(db, file);
let parsed_ref = parsed.load(db);
diagnostics.extend(
parsed_ref
.errors()
.iter()
.map(|error| Diagnostic::invalid_syntax(file, &error.error, error)),
);
diagnostics.extend(parsed_ref.unsupported_syntax_errors().iter().map(|error| {
let mut error = Diagnostic::invalid_syntax(file, error, error);
add_inferred_python_version_hint_to_diagnostic(db, &mut error, "parsing syntax");
error
}));
{
let db = AssertUnwindSafe(db);
match catch(&**db, file, || check_types(*db, file)) {
Ok(Some(type_check_diagnostics)) => {
diagnostics.extend(type_check_diagnostics);
}
Ok(None) => {}
Err(diagnostic) => diagnostics.push(diagnostic),
}
}
diagnostics.sort_unstable_by_key(|diagnostic| {
diagnostic
.primary_span()
.and_then(|span| span.range())
.unwrap_or_default()
.start()
});
Ok(diagnostics.into_boxed_slice())
}
#[derive(Debug)]
enum ProjectFiles<'a> {
OpenFiles(&'a FxHashSet<File>),
Indexed(files::Indexed<'a>),
}
impl<'a> ProjectFiles<'a> {
fn new(db: &'a dyn Db, project: Project) -> Self {
match project.check_mode(db) {
CheckMode::OpenFiles => ProjectFiles::OpenFiles(project.open_files(db)),
CheckMode::AllFiles => ProjectFiles::Indexed(project.files(db)),
}
}
fn diagnostics(&self) -> &[IOErrorDiagnostic] {
match self {
ProjectFiles::OpenFiles(_) => &[],
ProjectFiles::Indexed(files) => files.diagnostics(),
}
}
fn len(&self) -> usize {
match self {
ProjectFiles::OpenFiles(open_files) => open_files.len(),
ProjectFiles::Indexed(files) => files.len(),
}
}
}
impl<'a> IntoIterator for &'a ProjectFiles<'a> {
type Item = File;
type IntoIter = ProjectFilesIter<'a>;
fn into_iter(self) -> Self::IntoIter {
match self {
ProjectFiles::OpenFiles(files) => ProjectFilesIter::OpenFiles(files.iter()),
ProjectFiles::Indexed(files) => ProjectFilesIter::Indexed(files.into_iter()),
}
}
}
enum ProjectFilesIter<'db> {
OpenFiles(hash_set::Iter<'db, File>),
Indexed(files::IndexedIter<'db>),
}
impl Iterator for ProjectFilesIter<'_> {
type Item = File;
fn next(&mut self) -> Option<Self::Item> {
match self {
ProjectFilesIter::OpenFiles(files) => files.next().copied(),
ProjectFilesIter::Indexed(files) => files.next(),
}
}
}
impl FusedIterator for ProjectFilesIter<'_> {}
#[derive(Debug, Clone, get_size2::GetSize)]
pub struct IOErrorDiagnostic {
file: Option<File>,
error: IOErrorKind,
}
impl IOErrorDiagnostic {
fn to_diagnostic(&self) -> Diagnostic {
let mut diag = Diagnostic::new(DiagnosticId::Io, Severity::Error, &self.error);
if let Some(file) = self.file {
diag.annotate(Annotation::primary(Span::from(file)));
}
diag
}
}
#[derive(Error, Debug, Clone, get_size2::GetSize)]
enum IOErrorKind {
#[error(transparent)]
Walk(#[from] walk::WalkError),
#[error(transparent)]
SourceText(#[from] SourceTextError),
}
fn catch<F, R>(db: &dyn Db, file: File, f: F) -> Result<Option<R>, Diagnostic>
where
F: FnOnce() -> R + UnwindSafe,
{
match ruff_db::panic::catch_unwind(|| {
// Ignore salsa errors
salsa::Cancelled::catch(f).ok()
}) {
Ok(result) => Ok(result),
Err(error) => {
let message = error.to_diagnostic_message(Some(file.path(db)));
let mut diagnostic = Diagnostic::new(DiagnosticId::Panic, Severity::Fatal, message);
diagnostic.sub(SubDiagnostic::new(
SubDiagnosticSeverity::Info,
"This indicates a bug in ty.",
));
let report_message = "If you could open an issue at https://github.com/astral-sh/ty/issues/new?title=%5Bpanic%5D, we'd be very appreciative!";
diagnostic.sub(SubDiagnostic::new(
SubDiagnosticSeverity::Info,
report_message,
));
diagnostic.sub(SubDiagnostic::new(
SubDiagnosticSeverity::Info,
format!(
"Platform: {os} {arch}",
os = std::env::consts::OS,
arch = std::env::consts::ARCH
),
));
if let Some(version) = ruff_db::program_version() {
diagnostic.sub(SubDiagnostic::new(
SubDiagnosticSeverity::Info,
format!("Version: {version}"),
));
}
diagnostic.sub(SubDiagnostic::new(
SubDiagnosticSeverity::Info,
format!(
"Args: {args:?}",
args = std::env::args().collect::<Vec<_>>()
),
));
if let Some(backtrace) = error.backtrace {
match backtrace.status() {
BacktraceStatus::Disabled => {
diagnostic.sub(SubDiagnostic::new(
SubDiagnosticSeverity::Info,
"run with `RUST_BACKTRACE=1` environment variable to show the full backtrace information",
));
}
BacktraceStatus::Captured => {
diagnostic.sub(SubDiagnostic::new(
SubDiagnosticSeverity::Info,
format!("Backtrace:\n{backtrace}"),
));
}
_ => {}
}
}
if let Some(backtrace) = error.salsa_backtrace {
salsa::attach(db, || {
diagnostic.sub(SubDiagnostic::new(
SubDiagnosticSeverity::Info,
backtrace.to_string(),
));
});
}
Err(diagnostic)
}
}
}
#[cfg(test)]
mod tests {
use crate::ProjectMetadata;
use crate::check_file_impl;
use crate::db::tests::TestDb;
use ruff_db::files::system_path_to_file;
use ruff_db::source::source_text;
use ruff_db::system::{DbWithTestSystem, DbWithWritableSystem as _, SystemPath, SystemPathBuf};
use ruff_db::testing::assert_function_query_was_not_run;
use ruff_python_ast::name::Name;
use ty_python_semantic::types::check_types;
#[test]
fn check_file_skips_type_checking_when_file_cant_be_read() -> ruff_db::system::Result<()> {
let project = ProjectMetadata::new(Name::new_static("test"), SystemPathBuf::from("/"));
let mut db = TestDb::new(project);
db.init_program().unwrap();
let path = SystemPath::new("test.py");
db.write_file(path, "x = 10")?;
let file = system_path_to_file(&db, path).unwrap();
// Now the file gets deleted before we had a chance to read its source text.
db.memory_file_system().remove_file(path)?;
file.sync(&mut db);
assert_eq!(source_text(&db, file).as_str(), "");
assert_eq!(
check_file_impl(&db, file)
.as_ref()
.unwrap_err()
.primary_message()
.to_string(),
"Failed to read file: No such file or directory".to_string()
);
let events = db.take_salsa_events();
assert_function_query_was_not_run(&db, check_types, file, &events);
// The user now creates a new file with an empty text. The source text
// content returned by `source_text` remains unchanged, but the diagnostics should get updated.
db.write_file(path, "").unwrap();
assert_eq!(source_text(&db, file).as_str(), "");
assert_eq!(
check_file_impl(&db, file)
.as_ref()
.unwrap()
.iter()
.map(|diagnostic| diagnostic.primary_message().to_string())
.collect::<Vec<_>>(),
vec![] as Vec<String>
);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/glob.rs | crates/ty_project/src/glob.rs | use ruff_db::system::SystemPath;
pub(crate) use exclude::{ExcludeFilter, ExcludeFilterBuilder};
pub(crate) use include::{IncludeFilter, IncludeFilterBuilder};
pub(crate) use portable::{
AbsolutePortableGlobPattern, PortableGlobError, PortableGlobKind, PortableGlobPattern,
};
mod exclude;
mod include;
mod portable;
/// Path filtering based on an exclude and include glob pattern set.
///
/// Exclude patterns take precedence over includes.
#[derive(Clone, Debug, Eq, PartialEq, get_size2::GetSize)]
pub struct IncludeExcludeFilter {
include: IncludeFilter,
exclude: ExcludeFilter,
}
impl IncludeExcludeFilter {
pub(crate) fn new(include: IncludeFilter, exclude: ExcludeFilter) -> Self {
Self { include, exclude }
}
/// Returns whether this directory is included in this filter.
///
/// Note, this function never returns [`IncludeResult::Included`] for a path that is not included or excluded.
/// However, it may return [`IncludeResult::Included`] for directories that are not excluded, but where
/// it requires traversal to decide if any of its subdirectories or files are included. This, for example,
/// is the case when using wildcard include-patterns like `**/test`. Prefix wildcards require to traverse `src`
/// because it can't be known ahead of time whether it contains a `test` directory or file.
pub(crate) fn is_directory_maybe_included(
&self,
path: &SystemPath,
mode: GlobFilterCheckMode,
) -> IncludeResult {
if self.exclude.match_directory(path, mode) {
IncludeResult::Excluded
} else if self.include.match_directory(path) {
IncludeResult::Included
} else {
IncludeResult::NotIncluded
}
}
pub(crate) fn is_file_included(
&self,
path: &SystemPath,
mode: GlobFilterCheckMode,
) -> IncludeResult {
if self.exclude.match_file(path, mode) {
IncludeResult::Excluded
} else if self.include.match_file(path) {
IncludeResult::Included
} else {
IncludeResult::NotIncluded
}
}
}
impl std::fmt::Display for IncludeExcludeFilter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "include={}, exclude={}", &self.include, &self.exclude)
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub(crate) enum GlobFilterCheckMode {
/// The paths are checked top-to-bottom and inclusion is determined
/// for each path during the traversal.
TopDown,
/// An adhoc test if a single file or directory is included.
///
/// This is more expensive than a [`Self::TopDown`] check
/// because it may require testing every ancestor path in addition to the
/// path itself to ensure no ancestor path matches an exclude rule.
Adhoc,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub(crate) enum IncludeResult {
/// The path matches or at least is a prefix of an include pattern.
///
/// For directories: This isn't a guarantee that any file in this directory gets included
/// but we need to traverse it to make this decision.
Included,
/// The path matches an exclude pattern.
Excluded,
/// The path matches neither an include nor an exclude pattern and, therefore,
/// isn't included.
NotIncluded,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/walk.rs | crates/ty_project/src/walk.rs | use crate::glob::IncludeExcludeFilter;
use crate::{Db, GlobFilterCheckMode, IOErrorDiagnostic, IOErrorKind, IncludeResult, Project};
use ruff_db::files::{File, system_path_to_file};
use ruff_db::system::walk_directory::{ErrorKind, WalkDirectoryBuilder, WalkState};
use ruff_db::system::{SystemPath, SystemPathBuf};
use ruff_python_ast::PySourceType;
use rustc_hash::FxHashSet;
use std::path::PathBuf;
use thiserror::Error;
/// Filter that decides which files are included in the project.
///
/// In the future, this will hold a reference to the `include` and `exclude` pattern.
///
/// This struct mainly exists because `dyn Db` isn't `Send` or `Sync`, making it impossible
/// to access fields from within the walker.
#[derive(Debug)]
pub(crate) struct ProjectFilesFilter<'a> {
/// The same as [`Project::included_paths_or_root`].
included_paths: &'a [SystemPathBuf],
/// The resolved `src.include` and `src.exclude` filter.
src_filter: &'a IncludeExcludeFilter,
}
impl<'a> ProjectFilesFilter<'a> {
pub(crate) fn from_project(db: &'a dyn Db, project: Project) -> Self {
Self {
included_paths: project.included_paths_or_root(db),
src_filter: &project.settings(db).src().files,
}
}
fn match_included_paths(
&self,
path: &SystemPath,
mode: GlobFilterCheckMode,
) -> Option<CheckPathMatch> {
match mode {
GlobFilterCheckMode::TopDown => Some(CheckPathMatch::Partial),
GlobFilterCheckMode::Adhoc => {
self.included_paths
.iter()
.filter_map(|included_path| {
if let Ok(relative_path) = path.strip_prefix(included_path) {
// Exact matches are always included
if relative_path.as_str().is_empty() {
Some(CheckPathMatch::Full)
} else {
Some(CheckPathMatch::Partial)
}
} else {
None
}
})
.max()
}
}
}
/// Returns `true` if a file is part of the project and included in the paths to check.
///
/// A file is included in the checked files if it is a sub path of the project's root
/// (when no CLI path arguments are specified) or if it is a sub path of any path provided on the CLI (`ty check <paths>`) AND:
///
/// * It matches a positive `include` pattern and isn't excluded by a later negative `include` pattern.
/// * It doesn't match a positive `exclude` pattern or is re-included by a later negative `exclude` pattern.
///
/// ## Note
///
/// This method may return `true` for files that don't end up being included when walking the
/// project tree because it doesn't consider `.gitignore` and other ignore files when deciding
/// if a file's included.
pub(crate) fn is_file_included(
&self,
path: &SystemPath,
mode: GlobFilterCheckMode,
) -> IncludeResult {
match self.match_included_paths(path, mode) {
None => IncludeResult::NotIncluded,
Some(CheckPathMatch::Partial) => self.src_filter.is_file_included(path, mode),
Some(CheckPathMatch::Full) => IncludeResult::Included,
}
}
pub(crate) fn is_directory_included(
&self,
path: &SystemPath,
mode: GlobFilterCheckMode,
) -> IncludeResult {
match self.match_included_paths(path, mode) {
None => IncludeResult::NotIncluded,
Some(CheckPathMatch::Partial) => {
self.src_filter.is_directory_maybe_included(path, mode)
}
Some(CheckPathMatch::Full) => IncludeResult::Included,
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
enum CheckPathMatch {
/// The path is a partial match of the checked path (it's a sub path)
Partial,
/// The path matches a check path exactly.
Full,
}
pub(crate) struct ProjectFilesWalker<'a> {
walker: WalkDirectoryBuilder,
filter: ProjectFilesFilter<'a>,
force_exclude: bool,
}
impl<'a> ProjectFilesWalker<'a> {
pub(crate) fn new(db: &'a dyn Db) -> Self {
let project = db.project();
let filter = ProjectFilesFilter::from_project(db, project);
Self::from_paths(db, project.included_paths_or_root(db), filter)
.expect("included_paths_or_root to never return an empty iterator")
}
/// Creates a walker for indexing the project files incrementally.
///
/// The main difference to a full project walk is that `paths` may contain paths
/// that aren't part of the included files.
pub(crate) fn incremental<P>(db: &'a dyn Db, paths: impl IntoIterator<Item = P>) -> Option<Self>
where
P: AsRef<SystemPath>,
{
let project = db.project();
let filter = ProjectFilesFilter::from_project(db, project);
Self::from_paths(db, paths, filter)
}
fn from_paths<P>(
db: &'a dyn Db,
paths: impl IntoIterator<Item = P>,
filter: ProjectFilesFilter<'a>,
) -> Option<Self>
where
P: AsRef<SystemPath>,
{
let mut paths = paths.into_iter();
let mut walker = db
.system()
.walk_directory(paths.next()?.as_ref())
.standard_filters(db.project().settings(db).src().respect_ignore_files)
.ignore_hidden(false);
for path in paths {
walker = walker.add(path);
}
Some(Self {
walker,
filter,
force_exclude: db.project().force_exclude(db),
})
}
/// Walks the project paths and collects the paths of all files that
/// are included in the project.
pub(crate) fn collect_vec(self, db: &dyn Db) -> (Vec<File>, Vec<IOErrorDiagnostic>) {
let files = std::sync::Mutex::new(Vec::new());
let diagnostics = std::sync::Mutex::new(Vec::new());
self.walker.run(|| {
let db = db.dyn_clone();
let filter = &self.filter;
let files = &files;
let diagnostics = &diagnostics;
Box::new(move |entry| {
match entry {
Ok(entry) => {
// Skip excluded directories unless they were explicitly passed to the walker
// (which is the case passed to `ty check <paths>`).
if entry.file_type().is_directory() {
if entry.depth() > 0 || self.force_exclude {
let directory_included = filter
.is_directory_included(entry.path(), GlobFilterCheckMode::TopDown);
return match directory_included {
IncludeResult::Included => WalkState::Continue,
IncludeResult::Excluded => {
tracing::debug!(
"Skipping directory '{path}' because it is excluded by a default or `src.exclude` pattern",
path=entry.path()
);
WalkState::Skip
},
IncludeResult::NotIncluded => {
tracing::debug!(
"Skipping directory `{path}` because it doesn't match any `src.include` pattern or path specified on the CLI",
path=entry.path()
);
WalkState::Skip
},
};
}
} else {
// Ignore any non python files to avoid creating too many entries in `Files`.
// Unless the file is explicitly passed, we then always assume it's a python file.
let source_type = entry.path().extension().and_then(PySourceType::try_from_extension).or_else(|| {
if entry.depth() == 0 {
Some(PySourceType::Python)
} else {
db.system().source_type(entry.path())
}
});
if source_type.is_none()
{
return WalkState::Continue;
}
// For all files, except the ones that were explicitly passed to the walker (CLI),
// check if they're included in the project.
if entry.depth() > 0 || self.force_exclude {
match filter
.is_file_included(entry.path(), GlobFilterCheckMode::TopDown)
{
IncludeResult::Included => {},
IncludeResult::Excluded => {
tracing::debug!(
"Ignoring file `{path}` because it is excluded by a default or `src.exclude` pattern.",
path=entry.path()
);
return WalkState::Continue;
},
IncludeResult::NotIncluded => {
tracing::debug!(
"Ignoring file `{path}` because it doesn't match any `src.include` pattern or path specified on the CLI.",
path=entry.path()
);
return WalkState::Continue;
},
}
}
// If this returns `Err`, then the file was deleted between now and when the walk callback was called.
// We can ignore this.
if let Ok(file) = system_path_to_file(&*db, entry.path()) {
files.lock().unwrap().push(file);
}
}
}
Err(error) => match error.kind() {
ErrorKind::Loop { .. } => {
unreachable!("Loops shouldn't be possible without following symlinks.")
}
ErrorKind::Io { path, err } => {
let mut diagnostics = diagnostics.lock().unwrap();
let error = if let Some(path) = path {
WalkError::IOPathError {
path: path.clone(),
error: err.to_string(),
}
} else {
WalkError::IOError {
error: err.to_string(),
}
};
diagnostics.push(IOErrorDiagnostic {
file: None,
error: IOErrorKind::Walk(error),
});
}
ErrorKind::NonUtf8Path { path } => {
diagnostics.lock().unwrap().push(IOErrorDiagnostic {
file: None,
error: IOErrorKind::Walk(WalkError::NonUtf8Path {
path: path.clone(),
}),
});
}
},
}
WalkState::Continue
})
});
(
files.into_inner().unwrap(),
diagnostics.into_inner().unwrap(),
)
}
pub(crate) fn collect_set(self, db: &dyn Db) -> (FxHashSet<File>, Vec<IOErrorDiagnostic>) {
let (files, diagnostics) = self.collect_vec(db);
(files.into_iter().collect(), diagnostics)
}
}
#[derive(Error, Debug, Clone, get_size2::GetSize)]
pub(crate) enum WalkError {
#[error("`{path}`: {error}")]
IOPathError { path: SystemPathBuf, error: String },
#[error("Failed to walk project directory: {error}")]
IOError { error: String },
#[error("`{path}` is not a valid UTF-8 path")]
NonUtf8Path { path: PathBuf },
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/watch.rs | crates/ty_project/src/watch.rs | pub use project_watcher::ProjectWatcher;
use ruff_db::system::{SystemPath, SystemPathBuf, SystemVirtualPathBuf};
pub use watcher::{EventHandler, Watcher, directory_watcher};
mod project_watcher;
mod watcher;
/// Classification of a file system change event.
///
/// ## Renaming a path
/// Renaming a path creates a [`ChangeEvent::Deleted`] event for the old path and/or a [`ChangeEvent::Created`] for the new location.
/// Whether both events are created or just one of them depends from where to where the path was moved:
///
/// * Inside the watched directory: Both events are created.
/// * From a watched directory to a non-watched directory: Only a [`ChangeEvent::Deleted`] event is created.
/// * From a non-watched directory to a watched directory: Only a [`ChangeEvent::Created`] event is created.
///
/// ## Renaming a directory
/// It's up to the file watcher implementation to aggregate the rename event for a directory to a single rename
/// event instead of emitting an event for each file or subdirectory in that path.
#[derive(Debug, PartialEq, Eq)]
pub enum ChangeEvent {
/// The file corresponding to the given path was opened in an editor.
Opened(SystemPathBuf),
/// A new path was created
Created {
path: SystemPathBuf,
kind: CreatedKind,
},
/// The content or metadata of a path was changed.
Changed {
path: SystemPathBuf,
kind: ChangedKind,
},
/// A path was deleted.
Deleted {
path: SystemPathBuf,
kind: DeletedKind,
},
/// A new virtual path was created.
CreatedVirtual(SystemVirtualPathBuf),
/// The content of a virtual path was changed.
ChangedVirtual(SystemVirtualPathBuf),
/// A virtual path was deleted.
DeletedVirtual(SystemVirtualPathBuf),
/// The file watcher failed to observe some changes and now is out of sync with the file system.
///
/// This can happen if many files are changed at once. The consumer should rescan all files to catch up
/// with the file system.
Rescan,
}
impl ChangeEvent {
/// Creates a new [`Changed`] event for the file content at the given path.
///
/// [`Changed`]: ChangeEvent::Changed
pub fn file_content_changed(path: SystemPathBuf) -> ChangeEvent {
ChangeEvent::Changed {
path,
kind: ChangedKind::FileContent,
}
}
pub fn file_name(&self) -> Option<&str> {
self.system_path().and_then(|path| path.file_name())
}
pub fn system_path(&self) -> Option<&SystemPath> {
match self {
ChangeEvent::Opened(path)
| ChangeEvent::Created { path, .. }
| ChangeEvent::Changed { path, .. }
| ChangeEvent::Deleted { path, .. } => Some(path),
_ => None,
}
}
pub const fn is_rescan(&self) -> bool {
matches!(self, ChangeEvent::Rescan)
}
pub const fn is_created(&self) -> bool {
matches!(self, ChangeEvent::Created { .. })
}
pub const fn is_changed(&self) -> bool {
matches!(self, ChangeEvent::Changed { .. })
}
pub const fn is_deleted(&self) -> bool {
matches!(self, ChangeEvent::Deleted { .. })
}
}
/// Classification of an event that creates a new path.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum CreatedKind {
/// A file was created.
File,
/// A directory was created.
Directory,
/// A file, directory, or any other kind of path was created.
Any,
}
/// Classification of an event related to a content or metadata change.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum ChangedKind {
/// The content of a file was changed.
FileContent,
/// The metadata of a file was changed.
FileMetadata,
/// Either the content or metadata of a path was changed.
Any,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum DeletedKind {
File,
Directory,
Any,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/files.rs | crates/ty_project/src/files.rs | use std::marker::PhantomData;
use std::ops::Deref;
use std::sync::Arc;
use rustc_hash::FxHashSet;
use salsa::Setter;
use ruff_db::files::File;
use crate::db::Db;
use crate::{IOErrorDiagnostic, Project};
/// The indexed files of a project.
///
/// The indexing happens lazily, but the files are then cached for subsequent reads.
///
/// ## Implementation
/// The implementation uses internal mutability to transition between the lazy and indexed state
/// without triggering a new salsa revision. This is safe because the initial indexing happens on first access,
/// so no query can be depending on the contents of the indexed files before that. All subsequent mutations to
/// the indexed files must go through `IndexedMut`, which uses the Salsa setter `project.set_file_set` to
/// ensure that Salsa always knows when the set of indexed files have changed.
#[derive(Debug, get_size2::GetSize)]
pub struct IndexedFiles {
state: std::sync::Mutex<State>,
}
impl IndexedFiles {
pub fn lazy() -> Self {
Self {
state: std::sync::Mutex::new(State::Lazy),
}
}
fn indexed(inner: Arc<IndexedInner>) -> Self {
Self {
state: std::sync::Mutex::new(State::Indexed(inner)),
}
}
pub(super) fn get(&self) -> Index<'_> {
let state = self.state.lock().unwrap();
match &*state {
State::Lazy => Index::Lazy(LazyFiles { files: state }),
State::Indexed(inner) => Index::Indexed(Indexed {
inner: Arc::clone(inner),
_lifetime: PhantomData,
}),
}
}
pub(super) fn is_lazy(&self) -> bool {
matches!(*self.state.lock().unwrap(), State::Lazy)
}
/// Returns a mutable view on the index that allows cheap in-place mutations.
///
/// The changes are automatically written back to the database once the view is dropped.
pub(super) fn indexed_mut(db: &mut dyn Db, project: Project) -> Option<IndexedMut<'_>> {
// Calling `trigger_cancellation` cancels all pending salsa queries. This ensures that there are no pending
// reads to the file set (this `db` is the only alive db).
db.trigger_cancellation();
// Replace the state with lazy. The `IndexedMut` guard restores the state
// to `State::Indexed` or sets a new `PackageFiles` when it gets dropped to ensure the state
// is restored to how it has been before replacing the value.
//
// It isn't necessary to hold on to the lock after this point:
// * The above call to `trigger_cancellation` guarantees that there's exactly **one** DB reference.
// * `Indexed` has a `'db` lifetime, and this method requires a `&mut db`.
// This means that there can't be any pending reference to `Indexed` because Rust
// doesn't allow borrowing `db` as mutable (to call this method) and immutable (`Indexed<'db>`) at the same time.
// There can't be any other `Indexed<'db>` references created by clones of this DB because
// all clones must have been dropped at this point and the `Indexed`
// can't outlive the database (constrained by the `db` lifetime).
let state = {
let files = project.file_set(db);
let mut locked = files.state.lock().unwrap();
std::mem::replace(&mut *locked, State::Lazy)
};
let indexed = match state {
// If it's already lazy, just return. We also don't need to restore anything because the
// replace above was a no-op.
State::Lazy => return None,
State::Indexed(indexed) => indexed,
};
Some(IndexedMut {
db: Some(db),
project,
indexed,
did_change: false,
})
}
}
impl Default for IndexedFiles {
fn default() -> Self {
Self::lazy()
}
}
#[derive(Debug, get_size2::GetSize)]
enum State {
/// The files of a package haven't been indexed yet.
Lazy,
/// The files are indexed. Stores the known files of a package.
Indexed(Arc<IndexedInner>),
}
pub(super) enum Index<'db> {
/// The index has not yet been computed. Allows inserting the files.
Lazy(LazyFiles<'db>),
Indexed(Indexed<'db>),
}
/// Package files that have not been indexed yet.
pub(super) struct LazyFiles<'db> {
files: std::sync::MutexGuard<'db, State>,
}
impl<'db> LazyFiles<'db> {
/// Sets the indexed files of a package to `files`.
pub(super) fn set(
mut self,
files: FxHashSet<File>,
diagnostics: Vec<IOErrorDiagnostic>,
) -> Indexed<'db> {
let files = Indexed {
inner: Arc::new(IndexedInner { files, diagnostics }),
_lifetime: PhantomData,
};
*self.files = State::Indexed(Arc::clone(&files.inner));
files
}
}
/// The indexed files of the project.
///
/// Note: This type is intentionally non-cloneable. Making it cloneable requires
/// revisiting the locking behavior in [`IndexedFiles::indexed_mut`].
#[derive(Debug)]
pub struct Indexed<'db> {
inner: Arc<IndexedInner>,
// Preserve the lifetime of `PackageFiles`.
_lifetime: PhantomData<&'db ()>,
}
#[derive(Debug, get_size2::GetSize)]
struct IndexedInner {
files: FxHashSet<File>,
diagnostics: Vec<IOErrorDiagnostic>,
}
impl Indexed<'_> {
pub(super) fn diagnostics(&self) -> &[IOErrorDiagnostic] {
&self.inner.diagnostics
}
pub(super) fn len(&self) -> usize {
self.inner.files.len()
}
}
impl Deref for Indexed<'_> {
type Target = FxHashSet<File>;
fn deref(&self) -> &Self::Target {
&self.inner.files
}
}
pub(super) type IndexedIter<'a> = std::iter::Copied<std::collections::hash_set::Iter<'a, File>>;
impl<'a> IntoIterator for &'a Indexed<'_> {
type Item = File;
type IntoIter = IndexedIter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.inner.files.iter().copied()
}
}
/// A Mutable view of a project's indexed files.
///
/// Allows in-place mutation of the files without deep cloning the hash set.
/// The changes are written back when the mutable view is dropped or by calling [`Self::set`] manually.
pub(super) struct IndexedMut<'db> {
db: Option<&'db mut dyn Db>,
project: Project,
indexed: Arc<IndexedInner>,
did_change: bool,
}
impl IndexedMut<'_> {
pub(super) fn insert(&mut self, file: File) -> bool {
if self.inner_mut().files.insert(file) {
self.did_change = true;
true
} else {
false
}
}
pub(super) fn remove(&mut self, file: File) -> bool {
if self.inner_mut().files.remove(&file) {
self.did_change = true;
true
} else {
false
}
}
pub(super) fn set_diagnostics(&mut self, diagnostics: Vec<IOErrorDiagnostic>) {
self.inner_mut().diagnostics = diagnostics;
}
fn inner_mut(&mut self) -> &mut IndexedInner {
Arc::get_mut(&mut self.indexed)
.expect("All references to `FilesSet` should have been dropped")
}
fn set_impl(&mut self) {
let Some(db) = self.db.take() else {
return;
};
let indexed = Arc::clone(&self.indexed);
if self.did_change {
// If there are changes, set the new file_set to trigger a salsa revision change.
self.project
.set_file_set(db)
.to(IndexedFiles::indexed(indexed));
} else {
// The `indexed_mut` replaced the `state` with Lazy. Restore it back to the indexed state.
*self.project.file_set(db).state.lock().unwrap() = State::Indexed(indexed);
}
}
}
impl Drop for IndexedMut<'_> {
fn drop(&mut self) {
self.set_impl();
}
}
#[cfg(test)]
mod tests {
use rustc_hash::FxHashSet;
use crate::ProjectMetadata;
use crate::db::Db;
use crate::db::tests::TestDb;
use crate::files::Index;
use ruff_db::files::system_path_to_file;
use ruff_db::system::{DbWithWritableSystem as _, SystemPathBuf};
use ruff_python_ast::name::Name;
#[test]
fn re_entrance() -> anyhow::Result<()> {
let metadata = ProjectMetadata::new(Name::new_static("test"), SystemPathBuf::from("/test"));
let mut db = TestDb::new(metadata);
db.write_file("test.py", "")?;
let project = db.project();
let file = system_path_to_file(&db, "test.py").unwrap();
let files = match project.file_set(&db).get() {
Index::Lazy(lazy) => lazy.set(FxHashSet::from_iter([file]), Vec::new()),
Index::Indexed(files) => files,
};
// Calling files a second time should not dead-lock.
// This can e.g. happen when `check_file` iterates over all files and
// `should_check_file` queries the open files.
let files_2 = project.file_set(&db).get();
match files_2 {
Index::Lazy(_) => {
panic!("Expected indexed files, got lazy files");
}
Index::Indexed(files_2) => {
assert_eq!(
files_2.iter().collect::<Vec<_>>(),
files.iter().collect::<Vec<_>>()
);
}
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/metadata.rs | crates/ty_project/src/metadata.rs | use configuration_file::{ConfigurationFile, ConfigurationFileError};
use ruff_db::system::{System, SystemPath, SystemPathBuf};
use ruff_db::vendored::VendoredFileSystem;
use ruff_python_ast::name::Name;
use std::sync::Arc;
use thiserror::Error;
use ty_combine::Combine;
use ty_python_semantic::{MisconfigurationMode, ProgramSettings};
use crate::metadata::options::ProjectOptionsOverrides;
use crate::metadata::pyproject::{Project, PyProject, PyProjectError, ResolveRequiresPythonError};
use crate::metadata::value::ValueSource;
pub use options::Options;
use options::TyTomlError;
mod configuration_file;
pub mod options;
pub mod pyproject;
pub mod settings;
pub mod value;
#[derive(Debug, PartialEq, Eq, get_size2::GetSize)]
#[cfg_attr(test, derive(serde::Serialize))]
pub struct ProjectMetadata {
pub(super) name: Name,
pub(super) root: SystemPathBuf,
/// The raw options
pub(super) options: Options,
/// Paths of configurations other than the project's configuration that were combined into [`Self::options`].
///
/// This field stores the paths of the configuration files, mainly for
/// knowing which files to watch for changes.
///
/// The path ordering doesn't imply precedence.
#[cfg_attr(test, serde(skip_serializing_if = "Vec::is_empty"))]
pub(super) extra_configuration_paths: Vec<SystemPathBuf>,
#[cfg_attr(test, serde(skip))]
pub(super) misconfiguration_mode: MisconfigurationMode,
}
impl ProjectMetadata {
/// Creates a project with the given name and root that uses the default options.
pub fn new(name: Name, root: SystemPathBuf) -> Self {
Self {
name,
root,
extra_configuration_paths: Vec::default(),
options: Options::default(),
misconfiguration_mode: MisconfigurationMode::Fail,
}
}
pub fn from_config_file(
path: SystemPathBuf,
root: &SystemPath,
system: &dyn System,
) -> Result<Self, ProjectMetadataError> {
tracing::debug!("Using overridden configuration file at '{path}'");
let config_file = ConfigurationFile::from_path(path.clone(), system).map_err(|error| {
ProjectMetadataError::ConfigurationFileError {
source: Box::new(error),
path: path.clone(),
}
})?;
let options = config_file.into_options();
Ok(Self {
name: Name::new(root.file_name().unwrap_or("root")),
root: root.to_path_buf(),
options,
extra_configuration_paths: vec![path],
misconfiguration_mode: MisconfigurationMode::Fail,
})
}
/// Loads a project from a `pyproject.toml` file.
pub(crate) fn from_pyproject(
pyproject: PyProject,
root: SystemPathBuf,
) -> Result<Self, ResolveRequiresPythonError> {
Self::from_options(
pyproject.tool.and_then(|tool| tool.ty).unwrap_or_default(),
root,
pyproject.project.as_ref(),
MisconfigurationMode::Fail,
)
}
/// Loads a project from a set of options with an optional pyproject-project table.
pub fn from_options(
mut options: Options,
root: SystemPathBuf,
project: Option<&Project>,
misconfiguration_mode: MisconfigurationMode,
) -> Result<Self, ResolveRequiresPythonError> {
let name = project
.and_then(|project| project.name.as_deref())
.map(|name| Name::new(&**name))
.unwrap_or_else(|| Name::new(root.file_name().unwrap_or("root")));
// If the `options` don't specify a python version but the `project.requires-python` field is set,
// use that as a lower bound instead.
if let Some(project) = project {
if options
.environment
.as_ref()
.is_none_or(|env| env.python_version.is_none())
{
if let Some(requires_python) = project.resolve_requires_python_lower_bound()? {
let mut environment = options.environment.unwrap_or_default();
environment.python_version = Some(requires_python);
options.environment = Some(environment);
}
}
}
Ok(Self {
name,
root,
options,
extra_configuration_paths: Vec::new(),
misconfiguration_mode,
})
}
/// Discovers the closest project at `path` and returns its metadata.
///
/// The algorithm traverses upwards in the `path`'s ancestor chain and uses the following precedence
/// the resolve the project's root.
///
/// 1. The closest `pyproject.toml` with a `tool.ty` section or `ty.toml`.
/// 1. The closest `pyproject.toml`.
/// 1. Fallback to use `path` as the root and use the default settings.
pub fn discover(
path: &SystemPath,
system: &dyn System,
) -> Result<ProjectMetadata, ProjectMetadataError> {
tracing::debug!("Searching for a project in '{path}'");
if !system.is_directory(path) {
return Err(ProjectMetadataError::NotADirectory(path.to_path_buf()));
}
let mut closest_project: Option<ProjectMetadata> = None;
for project_root in path.ancestors() {
let pyproject_path = project_root.join("pyproject.toml");
let pyproject = if let Ok(pyproject_str) = system.read_to_string(&pyproject_path) {
match PyProject::from_toml_str(
&pyproject_str,
ValueSource::File(Arc::new(pyproject_path.clone())),
) {
Ok(pyproject) => Some(pyproject),
Err(error) => {
return Err(ProjectMetadataError::InvalidPyProject {
path: pyproject_path,
source: Box::new(error),
});
}
}
} else {
None
};
// A `ty.toml` takes precedence over a `pyproject.toml`.
let ty_toml_path = project_root.join("ty.toml");
if let Ok(ty_str) = system.read_to_string(&ty_toml_path) {
let options = match Options::from_toml_str(
&ty_str,
ValueSource::File(Arc::new(ty_toml_path.clone())),
) {
Ok(options) => options,
Err(error) => {
return Err(ProjectMetadataError::InvalidTyToml {
path: ty_toml_path,
source: Box::new(error),
});
}
};
if pyproject
.as_ref()
.is_some_and(|project| project.ty().is_some())
{
// TODO: Consider using a diagnostic here
tracing::warn!(
"Ignoring the `tool.ty` section in `{pyproject_path}` because `{ty_toml_path}` takes precedence."
);
}
tracing::debug!("Found project at '{}'", project_root);
let metadata = ProjectMetadata::from_options(
options,
project_root.to_path_buf(),
pyproject
.as_ref()
.and_then(|pyproject| pyproject.project.as_ref()),
MisconfigurationMode::Fail,
)
.map_err(|err| {
ProjectMetadataError::InvalidRequiresPythonConstraint {
source: err,
path: pyproject_path,
}
})?;
return Ok(metadata);
}
if let Some(pyproject) = pyproject {
let has_ty_section = pyproject.ty().is_some();
let metadata =
ProjectMetadata::from_pyproject(pyproject, project_root.to_path_buf())
.map_err(
|err| ProjectMetadataError::InvalidRequiresPythonConstraint {
source: err,
path: pyproject_path,
},
)?;
if has_ty_section {
tracing::debug!("Found project at '{}'", project_root);
return Ok(metadata);
}
// Not a project itself, keep looking for an enclosing project.
if closest_project.is_none() {
closest_project = Some(metadata);
}
}
}
// No project found, but maybe a pyproject.toml was found.
let metadata = if let Some(closest_project) = closest_project {
tracing::debug!(
"Project without `tool.ty` section: '{}'",
closest_project.root()
);
closest_project
} else {
tracing::debug!(
"The ancestor directories contain no `pyproject.toml`. Falling back to a virtual project."
);
// Create a project with a default configuration
Self::new(
path.file_name().unwrap_or("root").into(),
path.to_path_buf(),
)
};
Ok(metadata)
}
pub fn root(&self) -> &SystemPath {
&self.root
}
pub fn name(&self) -> &str {
&self.name
}
pub fn options(&self) -> &Options {
&self.options
}
pub fn extra_configuration_paths(&self) -> &[SystemPathBuf] {
&self.extra_configuration_paths
}
pub fn to_program_settings(
&self,
system: &dyn System,
vendored: &VendoredFileSystem,
) -> anyhow::Result<ProgramSettings> {
self.options.to_program_settings(
self.root(),
self.name(),
system,
vendored,
self.misconfiguration_mode,
)
}
pub fn apply_overrides(&mut self, overrides: &ProjectOptionsOverrides) {
self.options = overrides.apply_to(std::mem::take(&mut self.options));
}
/// Combine the project options with the CLI options where the CLI options take precedence.
pub fn apply_options(&mut self, options: Options) {
self.options = options.combine(std::mem::take(&mut self.options));
}
/// Applies the options from the configuration files to the project's options.
///
/// This includes:
///
/// * The user-level configuration
pub fn apply_configuration_files(
&mut self,
system: &dyn System,
) -> Result<(), ConfigurationFileError> {
if let Some(user) = ConfigurationFile::user(system)? {
tracing::debug!(
"Applying user-level configuration loaded from `{path}`.",
path = user.path()
);
self.apply_configuration_file(user);
}
Ok(())
}
/// Applies a lower-precedence configuration files to the project's options.
fn apply_configuration_file(&mut self, options: ConfigurationFile) {
self.extra_configuration_paths
.push(options.path().to_owned());
self.options.combine_with(options.into_options());
}
}
#[derive(Debug, Error)]
pub enum ProjectMetadataError {
#[error("project path '{0}' is not a directory")]
NotADirectory(SystemPathBuf),
#[error("{path} is not a valid `pyproject.toml`: {source}")]
InvalidPyProject {
source: Box<PyProjectError>,
path: SystemPathBuf,
},
#[error("{path} is not a valid `ty.toml`: {source}")]
InvalidTyToml {
source: Box<TyTomlError>,
path: SystemPathBuf,
},
#[error("Invalid `requires-python` version specifier (`{path}`): {source}")]
InvalidRequiresPythonConstraint {
source: ResolveRequiresPythonError,
path: SystemPathBuf,
},
#[error("Error loading configuration file at {path}: {source}")]
ConfigurationFileError {
source: Box<ConfigurationFileError>,
path: SystemPathBuf,
},
}
#[cfg(test)]
mod tests {
//! Integration tests for project discovery
use anyhow::{Context, anyhow};
use insta::assert_ron_snapshot;
use ruff_db::system::{SystemPathBuf, TestSystem};
use ruff_python_ast::PythonVersion;
use crate::{ProjectMetadata, ProjectMetadataError};
#[test]
fn project_without_pyproject() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files_all([(root.join("foo.py"), ""), (root.join("bar.py"), "")])
.context("Failed to write files")?;
let project =
ProjectMetadata::discover(&root, &system).context("Failed to discover project")?;
assert_eq!(project.root(), &*root);
with_escaped_paths(|| {
assert_ron_snapshot!(&project, @r#"
ProjectMetadata(
name: Name("app"),
root: "/app",
options: Options(),
)
"#);
});
Ok(())
}
#[test]
fn project_with_pyproject() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files_all([
(
root.join("pyproject.toml"),
r#"
[project]
name = "backend"
"#,
),
(root.join("db/__init__.py"), ""),
])
.context("Failed to write files")?;
let project =
ProjectMetadata::discover(&root, &system).context("Failed to discover project")?;
assert_eq!(project.root(), &*root);
with_escaped_paths(|| {
assert_ron_snapshot!(&project, @r#"
ProjectMetadata(
name: Name("backend"),
root: "/app",
options: Options(),
)
"#);
});
// Discovering the same package from a subdirectory should give the same result
let from_src = ProjectMetadata::discover(&root.join("db"), &system)
.context("Failed to discover project from src sub-directory")?;
assert_eq!(from_src, project);
Ok(())
}
#[test]
fn project_with_invalid_pyproject() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files_all([
(
root.join("pyproject.toml"),
r#"
[project]
name = "backend"
[tool.ty
"#,
),
(root.join("db/__init__.py"), ""),
])
.context("Failed to write files")?;
let Err(error) = ProjectMetadata::discover(&root, &system) else {
return Err(anyhow!(
"Expected project discovery to fail because of invalid syntax in the pyproject.toml"
));
};
assert_error_eq(
&error,
r#"/app/pyproject.toml is not a valid `pyproject.toml`: TOML parse error at line 5, column 29
|
5 | [tool.ty
| ^
unclosed table, expected `]`
"#,
);
Ok(())
}
#[test]
fn nested_projects_in_sub_project() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files_all([
(
root.join("pyproject.toml"),
r#"
[project]
name = "project-root"
[tool.ty.src]
root = "src"
"#,
),
(
root.join("packages/a/pyproject.toml"),
r#"
[project]
name = "nested-project"
[tool.ty.src]
root = "src"
"#,
),
])
.context("Failed to write files")?;
let sub_project = ProjectMetadata::discover(&root.join("packages/a"), &system)?;
with_escaped_paths(|| {
assert_ron_snapshot!(sub_project, @r#"
ProjectMetadata(
name: Name("nested-project"),
root: "/app/packages/a",
options: Options(
src: Some(SrcOptions(
root: Some("src"),
)),
),
)
"#);
});
Ok(())
}
#[test]
fn nested_projects_in_root_project() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files_all([
(
root.join("pyproject.toml"),
r#"
[project]
name = "project-root"
[tool.ty.src]
root = "src"
"#,
),
(
root.join("packages/a/pyproject.toml"),
r#"
[project]
name = "nested-project"
[tool.ty.src]
root = "src"
"#,
),
])
.context("Failed to write files")?;
let root = ProjectMetadata::discover(&root, &system)?;
with_escaped_paths(|| {
assert_ron_snapshot!(root, @r#"
ProjectMetadata(
name: Name("project-root"),
root: "/app",
options: Options(
src: Some(SrcOptions(
root: Some("src"),
)),
),
)
"#);
});
Ok(())
}
#[test]
fn nested_projects_without_ty_sections() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files_all([
(
root.join("pyproject.toml"),
r#"
[project]
name = "project-root"
"#,
),
(
root.join("packages/a/pyproject.toml"),
r#"
[project]
name = "nested-project"
"#,
),
])
.context("Failed to write files")?;
let sub_project = ProjectMetadata::discover(&root.join("packages/a"), &system)?;
with_escaped_paths(|| {
assert_ron_snapshot!(sub_project, @r#"
ProjectMetadata(
name: Name("nested-project"),
root: "/app/packages/a",
options: Options(),
)
"#);
});
Ok(())
}
#[test]
fn nested_projects_with_outer_ty_section() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files_all([
(
root.join("pyproject.toml"),
r#"
[project]
name = "project-root"
[tool.ty.environment]
python-version = "3.10"
"#,
),
(
root.join("packages/a/pyproject.toml"),
r#"
[project]
name = "nested-project"
"#,
),
])
.context("Failed to write files")?;
let root = ProjectMetadata::discover(&root.join("packages/a"), &system)?;
with_escaped_paths(|| {
assert_ron_snapshot!(root, @r#"
ProjectMetadata(
name: Name("project-root"),
root: "/app",
options: Options(
environment: Some(EnvironmentOptions(
r#python-version: Some("3.10"),
)),
),
)
"#);
});
Ok(())
}
/// A `ty.toml` takes precedence over any `pyproject.toml`.
///
/// However, the `pyproject.toml` is still loaded to get the project name and, in the future,
/// the requires-python constraint.
#[test]
fn project_with_ty_and_pyproject_toml() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files_all([
(
root.join("pyproject.toml"),
r#"
[project]
name = "super-app"
requires-python = ">=3.12"
[tool.ty.src]
root = "this_option_is_ignored"
"#,
),
(
root.join("ty.toml"),
r#"
[src]
root = "src"
"#,
),
])
.context("Failed to write files")?;
let root = ProjectMetadata::discover(&root, &system)?;
with_escaped_paths(|| {
assert_ron_snapshot!(root, @r#"
ProjectMetadata(
name: Name("super-app"),
root: "/app",
options: Options(
environment: Some(EnvironmentOptions(
r#python-version: Some("3.12"),
)),
src: Some(SrcOptions(
root: Some("src"),
)),
),
)
"#);
});
Ok(())
}
#[test]
fn requires_python_major_minor() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
requires-python = ">=3.12"
"#,
)
.context("Failed to write file")?;
let root = ProjectMetadata::discover(&root, &system)?;
assert_eq!(
root.options
.environment
.unwrap_or_default()
.python_version
.as_deref(),
Some(&PythonVersion::PY312)
);
Ok(())
}
#[test]
fn requires_python_major_only() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
requires-python = ">=3"
"#,
)
.context("Failed to write file")?;
let root = ProjectMetadata::discover(&root, &system)?;
assert_eq!(
root.options
.environment
.unwrap_or_default()
.python_version
.as_deref(),
Some(&PythonVersion::from((3, 0)))
);
Ok(())
}
/// A `requires-python` constraint with major, minor and patch can be simplified
/// to major and minor (e.g. 3.12.1 -> 3.12).
#[test]
fn requires_python_major_minor_patch() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
requires-python = ">=3.12.8"
"#,
)
.context("Failed to write file")?;
let root = ProjectMetadata::discover(&root, &system)?;
assert_eq!(
root.options
.environment
.unwrap_or_default()
.python_version
.as_deref(),
Some(&PythonVersion::PY312)
);
Ok(())
}
#[test]
fn requires_python_beta_version() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
requires-python = ">= 3.13.0b0"
"#,
)
.context("Failed to write file")?;
let root = ProjectMetadata::discover(&root, &system)?;
assert_eq!(
root.options
.environment
.unwrap_or_default()
.python_version
.as_deref(),
Some(&PythonVersion::PY313)
);
Ok(())
}
#[test]
fn requires_python_greater_than_major_minor() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
# This is somewhat nonsensical because 3.12.1 > 3.12 is true.
# That's why simplifying the constraint to >= 3.12 is correct
requires-python = ">3.12"
"#,
)
.context("Failed to write file")?;
let root = ProjectMetadata::discover(&root, &system)?;
assert_eq!(
root.options
.environment
.unwrap_or_default()
.python_version
.as_deref(),
Some(&PythonVersion::PY312)
);
Ok(())
}
/// `python-version` takes precedence if both `requires-python` and `python-version` are configured.
#[test]
fn requires_python_and_python_version() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
requires-python = ">=3.12"
[tool.ty.environment]
python-version = "3.10"
"#,
)
.context("Failed to write file")?;
let root = ProjectMetadata::discover(&root, &system)?;
assert_eq!(
root.options
.environment
.unwrap_or_default()
.python_version
.as_deref(),
Some(&PythonVersion::PY310)
);
Ok(())
}
#[test]
fn requires_python_less_than() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
requires-python = "<3.12"
"#,
)
.context("Failed to write file")?;
let Err(error) = ProjectMetadata::discover(&root, &system) else {
return Err(anyhow!(
"Expected project discovery to fail because the `requires-python` doesn't specify a lower bound (it only specifies an upper bound)."
));
};
assert_error_eq(
&error,
"Invalid `requires-python` version specifier (`/app/pyproject.toml`): value `<3.12` does not contain a lower bound. Add a lower bound to indicate the minimum compatible Python version (e.g., `>=3.13`) or specify a version in `environment.python-version`.",
);
Ok(())
}
#[test]
fn requires_python_no_specifiers() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
requires-python = ""
"#,
)
.context("Failed to write file")?;
let Err(error) = ProjectMetadata::discover(&root, &system) else {
return Err(anyhow!(
"Expected project discovery to fail because the `requires-python` specifiers are empty and don't define a lower bound."
));
};
assert_error_eq(
&error,
"Invalid `requires-python` version specifier (`/app/pyproject.toml`): value `` does not contain a lower bound. Add a lower bound to indicate the minimum compatible Python version (e.g., `>=3.13`) or specify a version in `environment.python-version`.",
);
Ok(())
}
#[test]
fn requires_python_too_large_major_version() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
requires-python = ">=999.0"
"#,
)
.context("Failed to write file")?;
let Err(error) = ProjectMetadata::discover(&root, &system) else {
return Err(anyhow!(
"Expected project discovery to fail because of the requires-python major version that is larger than 255."
));
};
assert_error_eq(
&error,
"Invalid `requires-python` version specifier (`/app/pyproject.toml`): The major version `999` is larger than the maximum supported value 255",
);
Ok(())
}
#[track_caller]
fn assert_error_eq(error: &ProjectMetadataError, message: &str) {
assert_eq!(error.to_string().replace('\\', "/"), message);
}
fn with_escaped_paths<R>(f: impl FnOnce() -> R) -> R {
let mut settings = insta::Settings::clone_current();
settings.add_dynamic_redaction(".root", |content, _path| {
content.as_str().unwrap().replace('\\', "/")
});
settings.bind(f)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/metadata/settings.rs | crates/ty_project/src/metadata/settings.rs | use std::sync::Arc;
use ruff_db::files::File;
use ty_combine::Combine;
use ty_python_semantic::AnalysisSettings;
use ty_python_semantic::lint::RuleSelection;
use crate::metadata::options::{InnerOverrideOptions, OutputFormat};
use crate::{Db, glob::IncludeExcludeFilter};
/// The resolved [`super::Options`] for the project.
///
/// Unlike [`super::Options`], the struct has default values filled in and
/// uses representations that are optimized for reads (instead of preserving the source representation).
/// It's also not required that this structure precisely resembles the TOML schema, although
/// it's encouraged to use a similar structure.
///
/// It's worth considering to adding a salsa query for specific settings to
/// limit the blast radius when only some settings change. For example,
/// changing the terminal settings shouldn't invalidate any core type-checking queries.
/// This can be achieved by adding a salsa query for the type checking specific settings.
///
/// Settings that are part of [`ty_python_semantic::ProgramSettings`] are not included here.
#[derive(Clone, Debug, Eq, PartialEq, get_size2::GetSize)]
pub struct Settings {
pub(super) rules: Arc<RuleSelection>,
pub(super) terminal: TerminalSettings,
pub(super) src: SrcSettings,
pub(super) analysis: AnalysisSettings,
/// Settings for configuration overrides that apply to specific file patterns.
///
/// Each override can specify include/exclude patterns and rule configurations
/// that apply to matching files. Multiple overrides can match the same file,
/// with later overrides taking precedence.
pub(super) overrides: Vec<Override>,
}
impl Settings {
pub fn rules(&self) -> &RuleSelection {
&self.rules
}
pub fn src(&self) -> &SrcSettings {
&self.src
}
pub fn to_rules(&self) -> Arc<RuleSelection> {
self.rules.clone()
}
pub fn terminal(&self) -> &TerminalSettings {
&self.terminal
}
pub fn overrides(&self) -> &[Override] {
&self.overrides
}
pub fn analysis(&self) -> &AnalysisSettings {
&self.analysis
}
}
#[derive(Debug, Clone, PartialEq, Eq, Default, get_size2::GetSize)]
pub struct TerminalSettings {
pub output_format: OutputFormat,
pub error_on_warning: bool,
}
#[derive(Debug, Clone, PartialEq, Eq, get_size2::GetSize)]
pub struct SrcSettings {
pub respect_ignore_files: bool,
pub files: IncludeExcludeFilter,
}
/// A single configuration override that applies to files matching specific patterns.
#[derive(Debug, Clone, PartialEq, Eq, get_size2::GetSize)]
pub struct Override {
/// File pattern filter to determine which files this override applies to.
pub(super) files: IncludeExcludeFilter,
/// The raw options as specified in the configuration (minus `include` and `exclude`.
/// Necessary to merge multiple overrides if necessary.
pub(super) options: Arc<InnerOverrideOptions>,
/// Pre-resolved rule selection for this override alone.
/// Used for efficient lookup when only this override matches a file.
pub(super) settings: Arc<OverrideSettings>,
}
impl Override {
/// Returns whether this override applies to the given file path.
pub fn matches_file(&self, path: &ruff_db::system::SystemPath) -> bool {
use crate::glob::{GlobFilterCheckMode, IncludeResult};
matches!(
self.files
.is_file_included(path, GlobFilterCheckMode::Adhoc),
IncludeResult::Included
)
}
}
/// Resolves the settings for a given file.
#[salsa::tracked(returns(ref), heap_size=ruff_memory_usage::heap_size)]
pub(crate) fn file_settings(db: &dyn Db, file: File) -> FileSettings {
let settings = db.project().settings(db);
let path = match file.path(db) {
ruff_db::files::FilePath::System(path) => path,
ruff_db::files::FilePath::SystemVirtual(_) | ruff_db::files::FilePath::Vendored(_) => {
return FileSettings::Global;
}
};
let mut matching_overrides = settings
.overrides()
.iter()
.filter(|over| over.matches_file(path));
let Some(first) = matching_overrides.next() else {
// If the file matches no override, it uses the global settings.
return FileSettings::Global;
};
let Some(second) = matching_overrides.next() else {
tracing::debug!("Applying override for file `{path}`: {}", first.files);
// If the file matches only one override, return that override's settings.
return FileSettings::File(Arc::clone(&first.settings));
};
let mut filters = tracing::enabled!(tracing::Level::DEBUG)
.then(|| format!("({}), ({})", first.files, second.files));
let mut overrides = vec![Arc::clone(&first.options), Arc::clone(&second.options)];
for over in matching_overrides {
use std::fmt::Write;
if let Some(filters) = &mut filters {
let _ = write!(filters, ", ({})", over.files);
}
overrides.push(Arc::clone(&over.options));
}
if let Some(filters) = &filters {
tracing::debug!("Applying multiple overrides for file `{path}`: {filters}");
}
merge_overrides(db, overrides, ())
}
/// Merges multiple override options, caching the result.
///
/// Overrides often apply to multiple files. This query ensures that we avoid
/// resolving the same override combinations multiple times.
///
/// ## What's up with the `()` argument?
///
/// This is to make Salsa happy because it requires that queries with only a single argument
/// take a salsa-struct as argument, which isn't the case here. The `()` enables salsa's
/// automatic interning for the arguments.
#[salsa::tracked(heap_size=ruff_memory_usage::heap_size)]
fn merge_overrides(db: &dyn Db, overrides: Vec<Arc<InnerOverrideOptions>>, _: ()) -> FileSettings {
let mut overrides = overrides.into_iter().rev();
let mut merged = (*overrides.next().unwrap()).clone();
for option in overrides {
merged.combine_with((*option).clone());
}
merged
.rules
.combine_with(db.project().metadata(db).options().rules.clone());
let Some(rules) = merged.rules else {
return FileSettings::Global;
};
// It's okay to ignore the errors here because the rules are eagerly validated
// during `overrides.to_settings()`.
let rules = rules.to_rule_selection(db, &mut Vec::new());
FileSettings::File(Arc::new(OverrideSettings { rules }))
}
/// The resolved settings for a file.
#[derive(Debug, Eq, PartialEq, Clone, get_size2::GetSize)]
pub enum FileSettings {
/// The file uses the global settings.
Global,
/// The file has specific override settings.
File(Arc<OverrideSettings>),
}
impl FileSettings {
pub fn rules<'a>(&'a self, db: &'a dyn Db) -> &'a RuleSelection {
match self {
FileSettings::Global => db.project().settings(db).rules(),
FileSettings::File(override_settings) => &override_settings.rules,
}
}
}
#[derive(Debug, Eq, PartialEq, Clone, get_size2::GetSize)]
pub struct OverrideSettings {
pub(super) rules: RuleSelection,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/metadata/pyproject.rs | crates/ty_project/src/metadata/pyproject.rs | use crate::metadata::options::Options;
use crate::metadata::value::{RangedValue, ValueSource, ValueSourceGuard};
use pep440_rs::{Version, VersionSpecifiers, release_specifiers_to_ranges};
use ruff_python_ast::PythonVersion;
use serde::{Deserialize, Deserializer, Serialize};
use std::collections::Bound;
use std::ops::Deref;
use thiserror::Error;
/// A `pyproject.toml` as specified in PEP 517.
#[derive(Deserialize, Serialize, Debug, Default, Clone)]
#[serde(rename_all = "kebab-case")]
pub struct PyProject {
/// PEP 621-compliant project metadata.
pub project: Option<Project>,
/// Tool-specific metadata.
pub tool: Option<Tool>,
}
impl PyProject {
pub(crate) fn ty(&self) -> Option<&Options> {
self.tool.as_ref().and_then(|tool| tool.ty.as_ref())
}
}
#[derive(Error, Debug)]
pub enum PyProjectError {
#[error(transparent)]
TomlSyntax(#[from] toml::de::Error),
}
impl PyProject {
pub(crate) fn from_toml_str(
content: &str,
source: ValueSource,
) -> Result<Self, PyProjectError> {
let _guard = ValueSourceGuard::new(source, true);
toml::from_str(content).map_err(PyProjectError::TomlSyntax)
}
}
/// PEP 621 project metadata (`project`).
///
/// See <https://packaging.python.org/en/latest/specifications/pyproject-toml>.
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
pub struct Project {
/// The name of the project
///
/// Note: Intentionally option to be more permissive during deserialization.
/// `PackageMetadata::from_pyproject` reports missing names.
pub name: Option<RangedValue<PackageName>>,
/// The version of the project
pub version: Option<RangedValue<Version>>,
/// The Python versions this project is compatible with.
pub requires_python: Option<RangedValue<VersionSpecifiers>>,
}
impl Project {
pub(super) fn resolve_requires_python_lower_bound(
&self,
) -> Result<Option<RangedValue<PythonVersion>>, ResolveRequiresPythonError> {
let Some(requires_python) = self.requires_python.as_ref() else {
return Ok(None);
};
tracing::debug!("Resolving requires-python constraint: `{requires_python}`");
let ranges = release_specifiers_to_ranges((**requires_python).clone());
let Some((lower, _)) = ranges.bounding_range() else {
return Ok(None);
};
let version = match lower {
// Ex) `>=3.10.1` -> `>=3.10`
Bound::Included(version) => version,
// Ex) `>3.10.1` -> `>=3.10` or `>3.10` -> `>=3.10`
// The second example looks obscure at first but it is required because
// `3.10.1 > 3.10` is true but we only have two digits here. So including 3.10 is the
// right move. Overall, using `>` without a patch release is most likely bogus.
Bound::Excluded(version) => version,
// Ex) `<3.10` or ``
Bound::Unbounded => {
return Err(ResolveRequiresPythonError::NoLowerBound(
requires_python.to_string(),
));
}
};
// Take the major and minor version
let mut versions = version.release().iter().take(2);
let Some(major) = versions.next().copied() else {
return Ok(None);
};
let minor = versions.next().copied().unwrap_or_default();
tracing::debug!("Resolved requires-python constraint to: {major}.{minor}");
let major =
u8::try_from(major).map_err(|_| ResolveRequiresPythonError::TooLargeMajor(major))?;
let minor =
u8::try_from(minor).map_err(|_| ResolveRequiresPythonError::TooLargeMinor(minor))?;
Ok(Some(
requires_python
.clone()
.map_value(|_| PythonVersion::from((major, minor))),
))
}
}
#[derive(Debug, Error)]
pub enum ResolveRequiresPythonError {
#[error("The major version `{0}` is larger than the maximum supported value 255")]
TooLargeMajor(u64),
#[error("The minor version `{0}` is larger than the maximum supported value 255")]
TooLargeMinor(u64),
#[error(
"value `{0}` does not contain a lower bound. Add a lower bound to indicate the minimum compatible Python version (e.g., `>=3.13`) or specify a version in `environment.python-version`."
)]
NoLowerBound(String),
}
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)]
#[serde(rename_all = "kebab-case")]
pub struct Tool {
pub ty: Option<Options>,
}
/// The normalized name of a package.
///
/// Converts the name to lowercase and collapses runs of `-`, `_`, and `.` down to a single `-`.
/// For example, `---`, `.`, and `__` are all converted to a single `-`.
///
/// See: <https://packaging.python.org/en/latest/specifications/name-normalization/>
#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
pub struct PackageName(String);
impl PackageName {
/// Create a validated, normalized package name.
pub(crate) fn new(name: String) -> Result<Self, InvalidPackageNameError> {
if name.is_empty() {
return Err(InvalidPackageNameError::Empty);
}
if name.starts_with(['-', '_', '.']) {
return Err(InvalidPackageNameError::NonAlphanumericStart(
name.chars().next().unwrap(),
));
}
if name.ends_with(['-', '_', '.']) {
return Err(InvalidPackageNameError::NonAlphanumericEnd(
name.chars().last().unwrap(),
));
}
let Some(start) = name.find(|c: char| {
!c.is_ascii() || c.is_ascii_uppercase() || matches!(c, '-' | '_' | '.')
}) else {
return Ok(Self(name));
};
let (already_normalized, maybe_normalized) = name.split_at(start);
let mut normalized = String::with_capacity(name.len());
normalized.push_str(already_normalized);
let mut last = None;
for c in maybe_normalized.chars() {
if !c.is_ascii() {
return Err(InvalidPackageNameError::InvalidCharacter(c));
}
if c.is_ascii_uppercase() {
normalized.push(c.to_ascii_lowercase());
} else if matches!(c, '-' | '_' | '.') {
if matches!(last, Some('-' | '_' | '.')) {
// Only keep a single instance of `-`, `_` and `.`
} else {
normalized.push('-');
}
} else {
normalized.push(c);
}
last = Some(c);
}
Ok(Self(normalized))
}
/// Returns the underlying package name.
pub(crate) fn as_str(&self) -> &str {
&self.0
}
}
impl From<PackageName> for String {
fn from(value: PackageName) -> Self {
value.0
}
}
impl<'de> Deserialize<'de> for PackageName {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
Self::new(s).map_err(serde::de::Error::custom)
}
}
impl std::fmt::Display for PackageName {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl Deref for PackageName {
type Target = str;
fn deref(&self) -> &Self::Target {
self.as_str()
}
}
#[derive(Error, Debug)]
pub(crate) enum InvalidPackageNameError {
#[error("name must start with letter or number but it starts with '{0}'")]
NonAlphanumericStart(char),
#[error("name must end with letter or number but it ends with '{0}'")]
NonAlphanumericEnd(char),
#[error(
"valid name consists only of ASCII letters and numbers, period, underscore and hyphen but name contains '{0}'"
)]
InvalidCharacter(char),
#[error("name must not be empty")]
Empty,
}
#[cfg(test)]
mod tests {
use super::PackageName;
#[test]
fn normalize() {
let inputs = [
"friendly-bard",
"Friendly-Bard",
"FRIENDLY-BARD",
"friendly.bard",
"friendly_bard",
"friendly--bard",
"friendly-.bard",
"FrIeNdLy-._.-bArD",
];
for input in inputs {
assert_eq!(
PackageName::new(input.to_string()).unwrap(),
PackageName::new("friendly-bard".to_string()).unwrap(),
);
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/metadata/value.rs | crates/ty_project/src/metadata/value.rs | use crate::Db;
use crate::glob::{
AbsolutePortableGlobPattern, PortableGlobError, PortableGlobKind, PortableGlobPattern,
};
use ruff_db::system::{System, SystemPath, SystemPathBuf};
use ruff_macros::Combine;
use ruff_text_size::{TextRange, TextSize};
use serde::{Deserialize, Deserializer};
use std::cell::RefCell;
use std::cmp::Ordering;
use std::fmt;
use std::fmt::Formatter;
use std::hash::{Hash, Hasher};
use std::ops::{Deref, DerefMut};
use std::sync::Arc;
use toml::Spanned;
use ty_combine::Combine;
#[derive(Clone, Debug, get_size2::GetSize)]
pub enum ValueSource {
/// Value loaded from a project's configuration file.
///
/// Ideally, we'd use [`ruff_db::files::File`] but we can't because the database hasn't been
/// created when loading the configuration.
File(Arc<SystemPathBuf>),
/// The value comes from a CLI argument, while it's left open if specified using a short argument,
/// long argument (`--extra-paths`) or `--config key=value`.
Cli,
/// The value comes from the user's editor,
/// while it's left open if specified as a setting
/// or if the value was auto-discovered by the editor
/// (e.g., the Python environment)
Editor,
}
impl ValueSource {
pub fn file(&self) -> Option<&SystemPath> {
match self {
ValueSource::File(path) => Some(&**path),
ValueSource::Cli => None,
ValueSource::Editor => None,
}
}
pub const fn is_cli(&self) -> bool {
matches!(self, ValueSource::Cli)
}
}
thread_local! {
/// Serde doesn't provide any easy means to pass a value to a [`Deserialize`] implementation,
/// but we want to associate each deserialized [`RelativePath`] with the source from
/// which it originated. We use a thread local variable to work around this limitation.
///
/// Use the [`ValueSourceGuard`] to initialize the thread local before calling into any
/// deserialization code. It ensures that the thread local variable gets cleaned up
/// once deserialization is done (once the guard gets dropped).
static VALUE_SOURCE: RefCell<Option<(ValueSource, bool)>> = const { RefCell::new(None) };
}
/// Guard to safely change the [`VALUE_SOURCE`] for the current thread.
#[must_use]
pub(super) struct ValueSourceGuard {
prev_value: Option<(ValueSource, bool)>,
}
impl ValueSourceGuard {
pub(super) fn new(source: ValueSource, is_toml: bool) -> Self {
let prev = VALUE_SOURCE.replace(Some((source, is_toml)));
Self { prev_value: prev }
}
}
impl Drop for ValueSourceGuard {
fn drop(&mut self) {
VALUE_SOURCE.set(self.prev_value.take());
}
}
/// A value that "remembers" where it comes from (source) and its range in source.
///
/// ## Equality, Hash, and Ordering
/// The equality, hash, and ordering are solely based on the value. They disregard the value's range
/// or source.
///
/// This ensures that two resolved configurations are identical even if the position of a value has changed
/// or if the values were loaded from different sources.
#[derive(Clone, serde::Serialize, get_size2::GetSize)]
#[serde(transparent)]
pub struct RangedValue<T> {
value: T,
#[serde(skip)]
source: ValueSource,
/// The byte range of `value` in `source`.
///
/// Can be `None` because not all sources support a range.
/// For example, arguments provided on the CLI won't have a range attached.
#[serde(skip)]
range: Option<TextRange>,
}
#[cfg(feature = "schemars")]
impl<T> schemars::JsonSchema for RangedValue<T>
where
T: schemars::JsonSchema,
{
fn schema_name() -> std::borrow::Cow<'static, str> {
T::schema_name()
}
fn schema_id() -> std::borrow::Cow<'static, str> {
T::schema_id()
}
fn json_schema(generator: &mut schemars::SchemaGenerator) -> schemars::Schema {
T::json_schema(generator)
}
fn _schemars_private_non_optional_json_schema(
generator: &mut schemars::SchemaGenerator,
) -> schemars::Schema {
T::_schemars_private_non_optional_json_schema(generator)
}
fn _schemars_private_is_option() -> bool {
T::_schemars_private_is_option()
}
}
impl<T> RangedValue<T> {
pub fn new(value: T, source: ValueSource) -> Self {
Self::with_range(value, source, TextRange::default())
}
pub fn cli(value: T) -> Self {
Self::with_range(value, ValueSource::Cli, TextRange::default())
}
pub fn python_extension(value: T) -> Self {
Self::with_range(value, ValueSource::Editor, TextRange::default())
}
pub fn with_range(value: T, source: ValueSource, range: TextRange) -> Self {
Self {
value,
range: Some(range),
source,
}
}
pub fn range(&self) -> Option<TextRange> {
self.range
}
pub fn source(&self) -> &ValueSource {
&self.source
}
#[must_use]
pub fn with_source(mut self, source: ValueSource) -> Self {
self.source = source;
self
}
#[must_use]
pub fn map_value<R>(self, f: impl FnOnce(T) -> R) -> RangedValue<R> {
RangedValue {
value: f(self.value),
source: self.source,
range: self.range,
}
}
pub fn into_inner(self) -> T {
self.value
}
}
impl<T> Combine for RangedValue<T>
where
T: Combine,
{
fn combine_with(&mut self, other: Self) {
self.value.combine_with(other.value);
}
}
impl<T> IntoIterator for RangedValue<T>
where
T: IntoIterator,
{
type Item = T::Item;
type IntoIter = T::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.value.into_iter()
}
}
// The type already has an `iter` method thanks to `Deref`.
#[expect(clippy::into_iter_without_iter)]
impl<'a, T> IntoIterator for &'a RangedValue<T>
where
&'a T: IntoIterator,
{
type Item = <&'a T as IntoIterator>::Item;
type IntoIter = <&'a T as IntoIterator>::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.value.into_iter()
}
}
// The type already has a `into_iter_mut` method thanks to `DerefMut`.
#[expect(clippy::into_iter_without_iter)]
impl<'a, T> IntoIterator for &'a mut RangedValue<T>
where
&'a mut T: IntoIterator,
{
type Item = <&'a mut T as IntoIterator>::Item;
type IntoIter = <&'a mut T as IntoIterator>::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.value.into_iter()
}
}
impl<T> fmt::Debug for RangedValue<T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.value.fmt(f)
}
}
impl<T> fmt::Display for RangedValue<T>
where
T: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.value.fmt(f)
}
}
impl<T> Deref for RangedValue<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.value
}
}
impl<T> DerefMut for RangedValue<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}
impl<T, U: ?Sized> AsRef<U> for RangedValue<T>
where
T: AsRef<U>,
{
fn as_ref(&self) -> &U {
self.value.as_ref()
}
}
impl<T: PartialEq> PartialEq for RangedValue<T> {
fn eq(&self, other: &Self) -> bool {
self.value.eq(&other.value)
}
}
impl<T: PartialEq<T>> PartialEq<T> for RangedValue<T> {
fn eq(&self, other: &T) -> bool {
self.value.eq(other)
}
}
impl<T: Eq> Eq for RangedValue<T> {}
impl<T: Hash> Hash for RangedValue<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.value.hash(state);
}
}
impl<T: PartialOrd> PartialOrd for RangedValue<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.value.partial_cmp(&other.value)
}
}
impl<T: PartialOrd<T>> PartialOrd<T> for RangedValue<T> {
fn partial_cmp(&self, other: &T) -> Option<Ordering> {
self.value.partial_cmp(other)
}
}
impl<T: Ord> Ord for RangedValue<T> {
fn cmp(&self, other: &Self) -> Ordering {
self.value.cmp(&other.value)
}
}
impl<'de, T> Deserialize<'de> for RangedValue<T>
where
T: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
VALUE_SOURCE.with_borrow(|source| {
let (source, has_span) = source.clone().unwrap();
if has_span {
let spanned: Spanned<T> = Spanned::deserialize(deserializer)?;
let span = spanned.span();
let range = TextRange::new(
TextSize::try_from(span.start)
.expect("Configuration file to be smaller than 4GB"),
TextSize::try_from(span.end)
.expect("Configuration file to be smaller than 4GB"),
);
Ok(Self::with_range(spanned.into_inner(), source, range))
} else {
Ok(Self::new(T::deserialize(deserializer)?, source))
}
})
}
}
/// A possibly relative path in a configuration file.
///
/// Relative paths in configuration files or from CLI options
/// require different anchoring:
///
/// * CLI: The path is relative to the current working directory
/// * Configuration file: The path is relative to the project's root.
#[derive(
Debug,
Clone,
serde::Serialize,
serde::Deserialize,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Combine,
get_size2::GetSize,
)]
#[serde(transparent)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct RelativePathBuf(RangedValue<SystemPathBuf>);
impl RelativePathBuf {
pub fn new(path: impl AsRef<SystemPath>, source: ValueSource) -> Self {
Self(RangedValue::new(path.as_ref().to_path_buf(), source))
}
pub fn cli(path: impl AsRef<SystemPath>) -> Self {
Self::new(path, ValueSource::Cli)
}
pub fn python_extension(path: impl AsRef<SystemPath>) -> Self {
Self::new(path, ValueSource::Editor)
}
/// Returns the relative path as specified by the user.
pub fn path(&self) -> &SystemPath {
&self.0
}
pub fn source(&self) -> &ValueSource {
self.0.source()
}
pub fn range(&self) -> Option<TextRange> {
self.0.range()
}
/// Returns the owned relative path.
pub fn into_path_buf(self) -> SystemPathBuf {
self.0.into_inner()
}
/// Resolves the absolute path for `self` based on its origin.
pub fn absolute_with_db(&self, db: &dyn Db) -> SystemPathBuf {
self.absolute(db.project().root(db), db.system())
}
/// Resolves the absolute path for `self` based on its origin.
pub fn absolute(&self, project_root: &SystemPath, system: &dyn System) -> SystemPathBuf {
let relative_to = match &self.0.source {
ValueSource::File(_) => project_root,
ValueSource::Cli | ValueSource::Editor => system.current_directory(),
};
SystemPath::absolute(&self.0, relative_to)
}
}
impl fmt::Display for RelativePathBuf {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
#[derive(
Debug,
Clone,
serde::Serialize,
serde::Deserialize,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Combine,
get_size2::GetSize,
)]
#[serde(transparent)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct RelativeGlobPattern(RangedValue<String>);
impl RelativeGlobPattern {
pub fn new(pattern: impl AsRef<str>, source: ValueSource) -> Self {
Self(RangedValue::new(pattern.as_ref().to_string(), source))
}
pub fn cli(pattern: impl AsRef<str>) -> Self {
Self::new(pattern, ValueSource::Cli)
}
pub(crate) fn source(&self) -> &ValueSource {
self.0.source()
}
pub(crate) fn range(&self) -> Option<TextRange> {
self.0.range()
}
/// Resolves the absolute pattern for `self` based on its origin.
pub(crate) fn absolute(
&self,
project_root: &SystemPath,
system: &dyn System,
kind: PortableGlobKind,
) -> Result<AbsolutePortableGlobPattern, PortableGlobError> {
let relative_to = match &self.0.source {
ValueSource::File(_) => project_root,
ValueSource::Cli | ValueSource::Editor => system.current_directory(),
};
let pattern = PortableGlobPattern::parse(&self.0, kind)?;
Ok(pattern.into_absolute(relative_to))
}
}
impl std::fmt::Display for RelativeGlobPattern {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/metadata/options.rs | crates/ty_project/src/metadata/options.rs | use crate::Db;
use crate::glob::{ExcludeFilter, IncludeExcludeFilter, IncludeFilter, PortableGlobKind};
use crate::metadata::settings::{OverrideSettings, SrcSettings};
use super::settings::{Override, Settings, TerminalSettings};
use crate::metadata::value::{
RangedValue, RelativeGlobPattern, RelativePathBuf, ValueSource, ValueSourceGuard,
};
use anyhow::Context;
use ordermap::OrderMap;
use ruff_db::RustDoc;
use ruff_db::diagnostic::{
Annotation, Diagnostic, DiagnosticFormat, DiagnosticId, DisplayDiagnosticConfig, Severity,
Span, SubDiagnostic, SubDiagnosticSeverity,
};
use ruff_db::files::system_path_to_file;
use ruff_db::system::{System, SystemPath, SystemPathBuf};
use ruff_db::vendored::VendoredFileSystem;
use ruff_macros::{Combine, OptionsMetadata, RustDoc};
use ruff_options_metadata::{OptionSet, OptionsMetadata, Visit};
use ruff_python_ast::PythonVersion;
use rustc_hash::FxHasher;
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use std::fmt::{self, Debug, Display};
use std::hash::BuildHasherDefault;
use std::ops::Deref;
use std::sync::Arc;
use thiserror::Error;
use ty_combine::Combine;
use ty_module_resolver::{SearchPathSettings, SearchPathSettingsError, SearchPaths};
use ty_python_semantic::lint::{Level, LintSource, RuleSelection};
use ty_python_semantic::{
AnalysisSettings, MisconfigurationMode, ProgramSettings, PythonEnvironment, PythonPlatform,
PythonVersionFileSource, PythonVersionSource, PythonVersionWithSource, SitePackagesPaths,
SysPrefixPathOrigin,
};
use ty_static::EnvVars;
#[derive(
Debug,
Default,
Clone,
PartialEq,
Eq,
Combine,
Serialize,
Deserialize,
OptionsMetadata,
get_size2::GetSize,
)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Options {
/// Configures the type checking environment.
#[option_group]
#[serde(skip_serializing_if = "Option::is_none")]
pub environment: Option<EnvironmentOptions>,
#[serde(skip_serializing_if = "Option::is_none")]
#[option_group]
pub src: Option<SrcOptions>,
/// Configures the enabled rules and their severity.
///
/// See [the rules documentation](https://ty.dev/rules) for a list of all available rules.
///
/// Valid severities are:
///
/// * `ignore`: Disable the rule.
/// * `warn`: Enable the rule and create a warning diagnostic.
/// * `error`: Enable the rule and create an error diagnostic.
/// ty will exit with a non-zero code if any error diagnostics are emitted.
#[serde(skip_serializing_if = "Option::is_none")]
#[option(
default = r#"{...}"#,
value_type = r#"dict[RuleName, "ignore" | "warn" | "error"]"#,
example = r#"
[tool.ty.rules]
possibly-unresolved-reference = "warn"
division-by-zero = "ignore"
"#
)]
pub rules: Option<Rules>,
#[serde(skip_serializing_if = "Option::is_none")]
#[option_group]
pub terminal: Option<TerminalOptions>,
#[serde(skip_serializing_if = "Option::is_none")]
#[option_group]
pub analysis: Option<AnalysisOptions>,
/// Override configurations for specific file patterns.
///
/// Each override specifies include/exclude patterns and rule configurations
/// that apply to matching files. Multiple overrides can match the same file,
/// with later overrides taking precedence.
#[serde(skip_serializing_if = "Option::is_none")]
#[option_group]
pub overrides: Option<OverridesOptions>,
}
impl Options {
pub fn from_toml_str(content: &str, source: ValueSource) -> Result<Self, TyTomlError> {
let _guard = ValueSourceGuard::new(source, true);
let options = toml::from_str(content)?;
Ok(options)
}
pub fn deserialize_with<'de, D>(source: ValueSource, deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let _guard = ValueSourceGuard::new(source, false);
Self::deserialize(deserializer)
}
pub(crate) fn to_program_settings(
&self,
project_root: &SystemPath,
project_name: &str,
system: &dyn System,
vendored: &VendoredFileSystem,
misconfiguration_mode: MisconfigurationMode,
) -> anyhow::Result<ProgramSettings> {
let environment = self.environment.or_default();
let options_python_version =
environment
.python_version
.as_ref()
.map(|ranged_version| PythonVersionWithSource {
version: **ranged_version,
source: match ranged_version.source() {
ValueSource::Cli => PythonVersionSource::Cli,
ValueSource::File(path) => PythonVersionSource::ConfigFile(
PythonVersionFileSource::new(path.clone(), ranged_version.range()),
),
ValueSource::Editor => PythonVersionSource::Editor,
},
});
let python_platform = environment
.python_platform
.as_deref()
.cloned()
.unwrap_or_else(|| {
let default = PythonPlatform::default();
tracing::info!("Defaulting to python-platform `{default}`");
default
});
let python_environment = if let Some(python_path) = environment.python.as_ref() {
let origin = match python_path.source() {
ValueSource::Cli => SysPrefixPathOrigin::PythonCliFlag,
ValueSource::File(path) => {
SysPrefixPathOrigin::ConfigFileSetting(path.clone(), python_path.range())
}
ValueSource::Editor => SysPrefixPathOrigin::Editor,
};
PythonEnvironment::new(python_path.absolute(project_root, system), origin, system)
.map_err(anyhow::Error::from)
.map(Some)
} else {
PythonEnvironment::discover(project_root, system)
.context("Failed to discover local Python environment")
};
// If in safe-mode, fallback to None if this fails instead of erroring.
let python_environment = match python_environment {
Ok(python_environment) => python_environment,
Err(err) => {
if misconfiguration_mode == MisconfigurationMode::UseDefault {
tracing::debug!("Default settings failed to discover local Python environment");
None
} else {
return Err(err);
}
}
};
let self_site_packages = self_environment_search_paths(
python_environment
.as_ref()
.map(ty_python_semantic::PythonEnvironment::origin)
.cloned(),
system,
)
.unwrap_or_default();
let site_packages_paths = if let Some(python_environment) = python_environment.as_ref() {
let site_packages_paths = python_environment
.site_packages_paths(system)
.context("Failed to discover the site-packages directory");
let site_packages_paths = match site_packages_paths {
Ok(paths) => paths,
Err(err) => {
if misconfiguration_mode == MisconfigurationMode::UseDefault {
tracing::debug!(
"Default settings failed to discover site-packages directory"
);
SitePackagesPaths::default()
} else {
return Err(err);
}
}
};
self_site_packages.concatenate(site_packages_paths)
} else {
tracing::debug!("No virtual environment found");
self_site_packages
};
let real_stdlib_path = python_environment.as_ref().and_then(|python_environment| {
// For now this is considered non-fatal, we don't Need this for anything.
python_environment.real_stdlib_path(system).map_err(|err| {
tracing::info!("No real stdlib found, stdlib goto-definition may have degraded quality: {err}");
}).ok()
});
let python_version = options_python_version
.or_else(|| {
python_environment
.as_ref()?
.python_version_from_metadata()
.cloned()
})
.or_else(|| site_packages_paths.python_version_from_layout())
.unwrap_or_default();
// Safe mode is handled inside this function, so we just assume this can't fail
let search_paths = self.to_search_paths(
project_root,
project_name,
site_packages_paths,
real_stdlib_path,
system,
vendored,
misconfiguration_mode,
)?;
tracing::info!(
"Python version: Python {python_version}, platform: {python_platform}",
python_version = python_version.version
);
Ok(ProgramSettings {
python_version,
python_platform,
search_paths,
})
}
#[expect(clippy::too_many_arguments)]
fn to_search_paths(
&self,
project_root: &SystemPath,
project_name: &str,
site_packages_paths: SitePackagesPaths,
real_stdlib_path: Option<SystemPathBuf>,
system: &dyn System,
vendored: &VendoredFileSystem,
misconfiguration_mode: MisconfigurationMode,
) -> Result<SearchPaths, SearchPathSettingsError> {
let environment = self.environment.or_default();
let src = self.src.or_default();
#[allow(deprecated)]
let src_roots = if let Some(roots) = environment
.root
.as_deref()
.or_else(|| Some(std::slice::from_ref(src.root.as_ref()?)))
{
roots
.iter()
.map(|root| root.absolute(project_root, system))
.collect()
} else {
let mut roots = vec![];
let src = project_root.join("src");
if system.is_directory(&src) {
// Default to `src` and the project root if `src` exists and the root hasn't been specified.
// This corresponds to the `src-layout`
tracing::debug!(
"Including `.` and `./src` in `environment.root` because a `./src` directory exists"
);
roots.push(src);
} else if system.is_directory(&project_root.join(project_name).join(project_name)) {
// `src-layout` but when the folder isn't called `src` but has the same name as the project.
// For example, the "src" folder for `psycopg` is called `psycopg` and the python files are in `psycopg/psycopg/_adapters_map.py`
tracing::debug!(
"Including `.` and `/{project_name}` in `environment.root` because a `./{project_name}/{project_name}` directory exists"
);
roots.push(project_root.join(project_name));
} else {
// Default to a [flat project structure](https://packaging.python.org/en/latest/discussions/src-layout-vs-flat-layout/).
tracing::debug!("Including `.` in `environment.root`");
}
let python = project_root.join("python");
if system.is_directory(&python)
&& !system.is_file(&python.join("__init__.py"))
&& !system.is_file(&python.join("__init__.pyi"))
&& !roots.contains(&python)
{
// If a `./python` directory exists, include it as a source root. This is the recommended layout
// for maturin-based rust/python projects [1].
//
// https://github.com/PyO3/maturin/blob/979fe1db42bb9e58bc150fa6fc45360b377288bf/README.md?plain=1#L88-L99
tracing::debug!(
"Including `./python` in `environment.root` because a `./python` directory exists"
);
roots.push(python);
}
// The project root should always be included, and should always come
// after any subdirectories such as `./src`, `./tests` and/or `./python`.
roots.push(project_root.to_path_buf());
roots
};
// collect the existing site packages
let mut extra_paths: Vec<SystemPathBuf> = environment
.extra_paths
.as_deref()
.unwrap_or_default()
.iter()
.map(|path| path.absolute(project_root, system))
.collect();
// read all the paths off the PYTHONPATH environment variable, check
// they exist as a directory, and add them to the vec of extra_paths
// as they should be checked before site-packages just like python
// interpreter does
if let Ok(python_path) = system.env_var(EnvVars::PYTHONPATH) {
for path in std::env::split_paths(python_path.as_str()) {
let path = match SystemPathBuf::from_path_buf(path) {
Ok(path) => path,
Err(path) => {
tracing::debug!(
"Skipping `{path}` listed in `PYTHONPATH` because the path is not valid UTF-8",
path = path.display()
);
continue;
}
};
let abspath = SystemPath::absolute(path, system.current_directory());
if !system.is_directory(&abspath) {
tracing::debug!(
"Skipping `{abspath}` listed in `PYTHONPATH` because the path doesn't exist or isn't a directory"
);
continue;
}
tracing::debug!(
"Adding `{abspath}` from the `PYTHONPATH` environment variable to `extra_paths`"
);
extra_paths.push(abspath);
}
}
let settings = SearchPathSettings {
extra_paths,
src_roots,
custom_typeshed: environment
.typeshed
.as_ref()
.map(|path| path.absolute(project_root, system)),
site_packages_paths: site_packages_paths.into_vec(),
real_stdlib_path,
misconfiguration_mode,
};
settings.to_search_paths(system, vendored)
}
pub(crate) fn to_settings(
&self,
db: &dyn Db,
project_root: &SystemPath,
) -> Result<(Settings, Vec<OptionDiagnostic>), ToSettingsError> {
let mut diagnostics = Vec::new();
let rules = self.to_rule_selection(db, &mut diagnostics);
let terminal_options = self.terminal.or_default();
let terminal = TerminalSettings {
output_format: terminal_options
.output_format
.as_deref()
.copied()
.unwrap_or_default(),
error_on_warning: terminal_options.error_on_warning.unwrap_or_default(),
};
let src_options = self.src.or_default();
#[allow(deprecated)]
if let Some(src_root) = src_options.root.as_ref() {
let mut diagnostic = OptionDiagnostic::new(
DiagnosticId::DeprecatedSetting,
"The `src.root` setting is deprecated. Use `environment.root` instead.".to_string(),
Severity::Warning,
);
if let Some(file) = src_root
.source()
.file()
.and_then(|path| system_path_to_file(db, path).ok())
{
diagnostic = diagnostic.with_annotation(Some(Annotation::primary(
Span::from(file).with_optional_range(src_root.range()),
)));
}
if self.environment.or_default().root.is_some() {
diagnostic = diagnostic.sub(SubDiagnostic::new(
SubDiagnosticSeverity::Info,
"The `src.root` setting was ignored in favor of the `environment.root` setting",
));
}
diagnostics.push(diagnostic);
}
let src = src_options
.to_settings(db, project_root, &mut diagnostics)
.map_err(|err| ToSettingsError {
diagnostic: err,
output_format: terminal.output_format,
color: colored::control::SHOULD_COLORIZE.should_colorize(),
})?;
let analysis = self.analysis.or_default().to_settings();
let overrides = self
.to_overrides_settings(db, project_root, &mut diagnostics)
.map_err(|err| ToSettingsError {
diagnostic: err,
output_format: terminal.output_format,
color: colored::control::SHOULD_COLORIZE.should_colorize(),
})?;
let settings = Settings {
rules: Arc::new(rules),
terminal,
src,
analysis,
overrides,
};
Ok((settings, diagnostics))
}
#[must_use]
fn to_rule_selection(
&self,
db: &dyn Db,
diagnostics: &mut Vec<OptionDiagnostic>,
) -> RuleSelection {
self.rules.or_default().to_rule_selection(db, diagnostics)
}
fn to_overrides_settings(
&self,
db: &dyn Db,
project_root: &SystemPath,
diagnostics: &mut Vec<OptionDiagnostic>,
) -> Result<Vec<Override>, Box<OptionDiagnostic>> {
let override_options = &**self.overrides.or_default();
let mut overrides = Vec::with_capacity(override_options.len());
for override_option in override_options {
let override_instance =
override_option.to_override(db, project_root, self.rules.as_ref(), diagnostics)?;
if let Some(value) = override_instance {
overrides.push(value);
}
}
Ok(overrides)
}
}
/// Return the site-packages from the environment ty is installed in, as derived from ty's
/// executable.
///
/// If there's an existing environment with an origin that does not allow including site-packages
/// from ty's environment, discovery of ty's environment is skipped and [`None`] is returned.
///
/// Since ty may be executed from an arbitrary non-Python location, errors during discovery of ty's
/// environment are not raised, instead [`None`] is returned.
fn self_environment_search_paths(
existing_origin: Option<SysPrefixPathOrigin>,
system: &dyn System,
) -> Option<SitePackagesPaths> {
if existing_origin.is_some_and(|origin| !origin.allows_concatenation_with_self_environment()) {
return None;
}
let Ok(exe_path) = std::env::current_exe() else {
return None;
};
let ty_path = SystemPath::from_std_path(exe_path.as_path())?;
let environment = PythonEnvironment::new(ty_path, SysPrefixPathOrigin::SelfEnvironment, system)
.inspect_err(|err| tracing::debug!("Failed to discover ty's environment: {err}"))
.ok()?;
let search_paths = environment
.site_packages_paths(system)
.inspect_err(|err| {
tracing::debug!("Failed to discover site-packages in ty's environment: {err}");
})
.ok();
tracing::debug!("Using site-packages from ty's environment");
search_paths
}
#[derive(
Debug,
Default,
Clone,
Eq,
PartialEq,
Combine,
Serialize,
Deserialize,
OptionsMetadata,
get_size2::GetSize,
)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct EnvironmentOptions {
/// The root paths of the project, used for finding first-party modules.
///
/// Accepts a list of directory paths searched in priority order (first has highest priority).
///
/// If left unspecified, ty will try to detect common project layouts and initialize `root` accordingly:
///
/// * if a `./src` directory exists, include `.` and `./src` in the first party search path (src layout or flat)
/// * if a `./<project-name>/<project-name>` directory exists, include `.` and `./<project-name>` in the first party search path
/// * otherwise, default to `.` (flat layout)
///
/// Additionally, if a `./python` directory exists and is not a package (i.e. it does not contain an `__init__.py` or `__init__.pyi` file),
/// it will also be included in the first party search path.
#[serde(skip_serializing_if = "Option::is_none")]
#[option(
default = r#"null"#,
value_type = "list[str]",
example = r#"
# Multiple directories (priority order)
root = ["./src", "./lib", "./vendor"]
"#
)]
pub root: Option<Vec<RelativePathBuf>>,
/// Specifies the version of Python that will be used to analyze the source code.
/// The version should be specified as a string in the format `M.m` where `M` is the major version
/// and `m` is the minor (e.g. `"3.0"` or `"3.6"`).
/// If a version is provided, ty will generate errors if the source code makes use of language features
/// that are not supported in that version.
///
/// If a version is not specified, ty will try the following techniques in order of preference
/// to determine a value:
/// 1. Check for the `project.requires-python` setting in a `pyproject.toml` file
/// and use the minimum version from the specified range
/// 2. Check for an activated or configured Python environment
/// and attempt to infer the Python version of that environment
/// 3. Fall back to the default value (see below)
///
/// For some language features, ty can also understand conditionals based on comparisons
/// with `sys.version_info`. These are commonly found in typeshed, for example,
/// to reflect the differing contents of the standard library across Python versions.
#[serde(skip_serializing_if = "Option::is_none")]
#[option(
default = r#""3.14""#,
value_type = r#""3.7" | "3.8" | "3.9" | "3.10" | "3.11" | "3.12" | "3.13" | "3.14" | <major>.<minor>"#,
example = r#"
python-version = "3.12"
"#
)]
pub python_version: Option<RangedValue<PythonVersion>>,
/// Specifies the target platform that will be used to analyze the source code.
/// If specified, ty will understand conditions based on comparisons with `sys.platform`, such
/// as are commonly found in typeshed to reflect the differing contents of the standard library across platforms.
/// If `all` is specified, ty will assume that the source code can run on any platform.
///
/// If no platform is specified, ty will use the current platform:
/// - `win32` for Windows
/// - `darwin` for macOS
/// - `android` for Android
/// - `ios` for iOS
/// - `linux` for everything else
#[serde(skip_serializing_if = "Option::is_none")]
#[option(
default = r#"<current-platform>"#,
value_type = r#""win32" | "darwin" | "android" | "ios" | "linux" | "all" | str"#,
example = r#"
# Tailor type stubs and conditionalized type definitions to windows.
python-platform = "win32"
"#
)]
pub python_platform: Option<RangedValue<PythonPlatform>>,
/// User-provided paths that should take first priority in module resolution.
///
/// This is an advanced option that should usually only be used for first-party or third-party
/// modules that are not installed into your Python environment in a conventional way.
/// Use the `python` option to specify the location of your Python environment.
///
/// This option is similar to mypy's `MYPYPATH` environment variable and pyright's `stubPath`
/// configuration setting.
#[serde(skip_serializing_if = "Option::is_none")]
#[option(
default = r#"[]"#,
value_type = "list[str]",
example = r#"
extra-paths = ["./shared/my-search-path"]
"#
)]
pub extra_paths: Option<Vec<RelativePathBuf>>,
/// Optional path to a "typeshed" directory on disk for us to use for standard-library types.
/// If this is not provided, we will fallback to our vendored typeshed stubs for the stdlib,
/// bundled as a zip file in the binary
#[serde(skip_serializing_if = "Option::is_none")]
#[option(
default = r#"null"#,
value_type = "str",
example = r#"
typeshed = "/path/to/custom/typeshed"
"#
)]
pub typeshed: Option<RelativePathBuf>,
/// Path to your project's Python environment or interpreter.
///
/// ty uses the `site-packages` directory of your project's Python environment
/// to resolve third-party (and, in some cases, first-party) imports in your code.
///
/// If you're using a project management tool such as uv, you should not generally need
/// to specify this option, as commands such as `uv run` will set the `VIRTUAL_ENV`
/// environment variable to point to your project's virtual environment. ty can also infer
/// the location of your environment from an activated Conda environment, and will look for
/// a `.venv` directory in the project root if none of the above apply.
///
/// Passing a path to a Python executable is supported, but passing a path to a dynamic executable
/// (such as a shim) is not currently supported.
///
/// This option can be used to point to virtual or system Python environments.
#[serde(skip_serializing_if = "Option::is_none")]
#[option(
default = r#"null"#,
value_type = "str",
example = r#"
python = "./custom-venv-location/.venv"
"#
)]
pub python: Option<RelativePathBuf>,
}
#[derive(
Debug,
Default,
Clone,
Eq,
PartialEq,
Combine,
Serialize,
Deserialize,
OptionsMetadata,
get_size2::GetSize,
)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SrcOptions {
/// The root of the project, used for finding first-party modules.
///
/// If left unspecified, ty will try to detect common project layouts and initialize `src.root` accordingly:
///
/// * if a `./src` directory exists, include `.` and `./src` in the first party search path (src layout or flat)
/// * if a `./<project-name>/<project-name>` directory exists, include `.` and `./<project-name>` in the first party search path
/// * otherwise, default to `.` (flat layout)
///
/// Additionally, if a `./python` directory exists and is not a package (i.e. it does not contain an `__init__.py` file),
/// it will also be included in the first party search path.
#[serde(skip_serializing_if = "Option::is_none")]
#[option(
default = r#"null"#,
value_type = "str",
example = r#"
root = "./app"
"#
)]
#[deprecated(note = "Use `environment.root` instead.")]
pub root: Option<RelativePathBuf>,
/// Whether to automatically exclude files that are ignored by `.ignore`,
/// `.gitignore`, `.git/info/exclude`, and global `gitignore` files.
/// Enabled by default.
#[option(
default = r#"true"#,
value_type = r#"bool"#,
example = r#"
respect-ignore-files = false
"#
)]
#[serde(skip_serializing_if = "Option::is_none")]
pub respect_ignore_files: Option<bool>,
/// A list of files and directories to check. The `include` option
/// follows a similar syntax to `.gitignore` but reversed:
/// Including a file or directory will make it so that it (and its contents)
/// are type checked.
///
/// - `./src/` matches only a directory
/// - `./src` matches both files and directories
/// - `src` matches a file or directory named `src`
/// - `*` matches any (possibly empty) sequence of characters (except `/`).
/// - `**` matches zero or more path components.
/// This sequence **must** form a single path component, so both `**a` and `b**` are invalid and will result in an error.
/// A sequence of more than two consecutive `*` characters is also invalid.
/// - `?` matches any single character except `/`
/// - `[abc]` matches any character inside the brackets. Character sequences can also specify ranges of characters, as ordered by Unicode,
/// so e.g. `[0-9]` specifies any character between `0` and `9` inclusive. An unclosed bracket is invalid.
///
/// All paths are anchored relative to the project root (`src` only
/// matches `<project_root>/src` and not `<project_root>/test/src`).
///
/// `exclude` takes precedence over `include`.
#[serde(skip_serializing_if = "Option::is_none")]
#[option(
default = r#"null"#,
value_type = r#"list[str]"#,
example = r#"
include = [
"src",
"tests",
]
"#
)]
pub include: Option<RangedValue<Vec<RelativeGlobPattern>>>,
/// A list of file and directory patterns to exclude from type checking.
///
/// Patterns follow a syntax similar to `.gitignore`:
///
/// - `./src/` matches only a directory
/// - `./src` matches both files and directories
/// - `src` matches files or directories named `src`
/// - `*` matches any (possibly empty) sequence of characters (except `/`).
/// - `**` matches zero or more path components.
/// This sequence **must** form a single path component, so both `**a` and `b**` are invalid and will result in an error.
/// A sequence of more than two consecutive `*` characters is also invalid.
/// - `?` matches any single character except `/`
/// - `[abc]` matches any character inside the brackets. Character sequences can also specify ranges of characters, as ordered by Unicode,
/// so e.g. `[0-9]` specifies any character between `0` and `9` inclusive. An unclosed bracket is invalid.
/// - `!pattern` negates a pattern (undoes the exclusion of files that would otherwise be excluded)
///
/// All paths are anchored relative to the project root (`src` only
/// matches `<project_root>/src` and not `<project_root>/test/src`).
/// To exclude any directory or file named `src`, use `**/src` instead.
///
/// By default, ty excludes commonly ignored directories:
///
/// - `**/.bzr/`
/// - `**/.direnv/`
/// - `**/.eggs/`
/// - `**/.git/`
/// - `**/.git-rewrite/`
/// - `**/.hg/`
/// - `**/.mypy_cache/`
/// - `**/.nox/`
/// - `**/.pants.d/`
/// - `**/.pytype/`
/// - `**/.ruff_cache/`
/// - `**/.svn/`
/// - `**/.tox/`
/// - `**/.venv/`
/// - `**/__pypackages__/`
/// - `**/_build/`
/// - `**/buck-out/`
/// - `**/dist/`
/// - `**/node_modules/`
/// - `**/venv/`
///
/// You can override any default exclude by using a negated pattern. For example,
/// to re-include `dist` use `exclude = ["!dist"]`
#[option(
default = r#"null"#,
value_type = r#"list[str]"#,
example = r#"
exclude = [
"generated",
"*.proto",
"tests/fixtures/**",
"!tests/fixtures/important.py" # Include this one file
]
"#
)]
#[serde(skip_serializing_if = "Option::is_none")]
pub exclude: Option<RangedValue<Vec<RelativeGlobPattern>>>,
}
impl SrcOptions {
fn to_settings(
&self,
db: &dyn Db,
project_root: &SystemPath,
diagnostics: &mut Vec<OptionDiagnostic>,
) -> Result<SrcSettings, Box<OptionDiagnostic>> {
let include = build_include_filter(
db,
project_root,
self.include.as_ref(),
GlobFilterContext::SrcRoot,
diagnostics,
)?;
let exclude = build_exclude_filter(
db,
project_root,
self.exclude.as_ref(),
DEFAULT_SRC_EXCLUDES,
GlobFilterContext::SrcRoot,
)?;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/metadata/configuration_file.rs | crates/ty_project/src/metadata/configuration_file.rs | use std::sync::Arc;
use ruff_db::system::{System, SystemPath, SystemPathBuf};
use thiserror::Error;
use crate::metadata::value::ValueSource;
use super::options::{Options, TyTomlError};
/// A `ty.toml` configuration file with the options it contains.
pub(crate) struct ConfigurationFile {
path: SystemPathBuf,
options: Options,
}
impl ConfigurationFile {
pub(crate) fn from_path(
path: SystemPathBuf,
system: &dyn System,
) -> Result<Self, ConfigurationFileError> {
let ty_toml_str = system.read_to_string(&path).map_err(|source| {
ConfigurationFileError::FileReadError {
source,
path: path.clone(),
}
})?;
match Options::from_toml_str(&ty_toml_str, ValueSource::File(Arc::new(path.clone()))) {
Ok(options) => Ok(Self { path, options }),
Err(error) => Err(ConfigurationFileError::InvalidTyToml {
source: Box::new(error),
path,
}),
}
}
/// Loads the user-level configuration file if it exists.
///
/// Returns `None` if the file does not exist or if the concept of user-level configurations
/// doesn't exist on `system`.
pub(crate) fn user(system: &dyn System) -> Result<Option<Self>, ConfigurationFileError> {
let Some(configuration_directory) = system.user_config_directory() else {
return Ok(None);
};
let ty_toml_path = configuration_directory.join("ty").join("ty.toml");
tracing::debug!(
"Searching for a user-level configuration at `{path}`",
path = &ty_toml_path
);
let Ok(ty_toml_str) = system.read_to_string(&ty_toml_path) else {
return Ok(None);
};
match Options::from_toml_str(
&ty_toml_str,
ValueSource::File(Arc::new(ty_toml_path.clone())),
) {
Ok(options) => Ok(Some(Self {
path: ty_toml_path,
options,
})),
Err(error) => Err(ConfigurationFileError::InvalidTyToml {
source: Box::new(error),
path: ty_toml_path,
}),
}
}
/// Returns the path to the configuration file.
pub(crate) fn path(&self) -> &SystemPath {
&self.path
}
pub(crate) fn into_options(self) -> Options {
self.options
}
}
#[derive(Debug, Error)]
pub enum ConfigurationFileError {
#[error("{path} is not a valid `ty.toml`: {source}")]
InvalidTyToml {
source: Box<TyTomlError>,
path: SystemPathBuf,
},
#[error("Failed to read `{path}`: {source}")]
FileReadError {
#[source]
source: std::io::Error,
path: SystemPathBuf,
},
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/db/changes.rs | crates/ty_project/src/db/changes.rs | use crate::db::{Db, ProjectDatabase};
use crate::metadata::options::ProjectOptionsOverrides;
use crate::watch::{ChangeEvent, CreatedKind, DeletedKind};
use crate::{Project, ProjectMetadata};
use std::collections::BTreeSet;
use crate::walk::ProjectFilesWalker;
use ruff_db::Db as _;
use ruff_db::file_revision::FileRevision;
use ruff_db::files::{File, FileRootKind, Files};
use ruff_db::system::SystemPath;
use rustc_hash::FxHashSet;
use salsa::Setter;
use ty_python_semantic::Program;
/// Represents the result of applying changes to the project database.
pub struct ChangeResult {
project_changed: bool,
custom_stdlib_changed: bool,
}
impl ChangeResult {
/// Returns `true` if the project structure has changed.
pub fn project_changed(&self) -> bool {
self.project_changed
}
/// Returns `true` if the custom stdlib's VERSIONS file has changed.
pub fn custom_stdlib_changed(&self) -> bool {
self.custom_stdlib_changed
}
}
impl ProjectDatabase {
#[tracing::instrument(level = "debug", skip(self, changes, project_options_overrides))]
pub fn apply_changes(
&mut self,
changes: Vec<ChangeEvent>,
project_options_overrides: Option<&ProjectOptionsOverrides>,
) -> ChangeResult {
let mut project = self.project();
let project_root = project.root(self).to_path_buf();
let config_file_override =
project_options_overrides.and_then(|options| options.config_file_override.clone());
let program = Program::get(self);
let custom_stdlib_versions_path = program
.custom_stdlib_search_path(self)
.map(|path| path.join("VERSIONS"));
let mut result = ChangeResult {
project_changed: false,
custom_stdlib_changed: false,
};
// Paths that were added
let mut added_paths = FxHashSet::default();
// Deduplicate the `sync` calls. Many file watchers emit multiple events for the same path.
let mut synced_files = FxHashSet::default();
let mut sync_recursively = BTreeSet::default();
for change in changes {
tracing::trace!("Handle change: {:?}", change);
if let Some(path) = change.system_path() {
if let Some(config_file) = &config_file_override {
if config_file.as_path() == path {
result.project_changed = true;
continue;
}
}
if matches!(
path.file_name(),
Some(".gitignore" | ".ignore" | "ty.toml" | "pyproject.toml")
) {
// Changes to ignore files or settings can change the project structure or add/remove files.
result.project_changed = true;
continue;
}
if Some(path) == custom_stdlib_versions_path.as_deref() {
result.custom_stdlib_changed = true;
}
}
match change {
ChangeEvent::Changed { path, kind: _ } | ChangeEvent::Opened(path) => {
if synced_files.insert(path.to_path_buf()) {
let absolute =
SystemPath::absolute(&path, self.system().current_directory());
File::sync_path_only(self, &absolute);
if let Some(root) = self.files().root(self, &absolute) {
match root.kind_at_time_of_creation(self) {
// When a file inside the root of
// the project is changed, we don't
// want to mark the entire root as
// having changed too. In theory it
// might make sense to, but at time
// of writing, the file root revision
// on a project is used to invalidate
// the submodule files found within a
// directory. If we bumped the revision
// on every change within a project,
// then this caching technique would be
// effectively useless.
//
// It's plausible we should explore
// a more robust cache invalidation
// strategy that models more directly
// what we care about. For example, by
// keeping track of directories and
// their direct children explicitly,
// and then keying the submodule cache
// off of that instead. ---AG
FileRootKind::Project => {}
FileRootKind::LibrarySearchPath => {
root.set_revision(self).to(FileRevision::now());
}
}
}
}
}
ChangeEvent::Created { kind, path } => {
match kind {
CreatedKind::File => {
if synced_files.insert(path.to_path_buf()) {
File::sync_path(self, &path);
}
}
CreatedKind::Directory | CreatedKind::Any => {
sync_recursively.insert(path.clone());
}
}
// Unlike other files, it's not only important to update the status of existing
// and known `File`s (`sync_recursively`), it's also important to discover new files
// that were added in the project's root (or any of the paths included for checking).
//
// This is important because `Project::check` iterates over all included files.
// The code below walks the `added_paths` and adds all files that
// should be included in the project. We can skip this check for
// paths that aren't part of the project or shouldn't be included
// when checking the project.
if self.system().is_file(&path) {
if project.is_file_included(self, &path) {
// Add the parent directory because `walkdir` always visits explicitly passed files
// even if they match an exclude filter.
added_paths.insert(path.parent().unwrap().to_path_buf());
}
} else if project.is_directory_included(self, &path) {
added_paths.insert(path);
}
}
ChangeEvent::Deleted { kind, path } => {
let is_file = match kind {
DeletedKind::File => true,
DeletedKind::Directory => {
// file watchers emit an event for every deleted file. No need to scan the entire dir.
continue;
}
DeletedKind::Any => self
.files
.try_system(self, &path)
.is_some_and(|file| file.exists(self)),
};
if is_file {
if synced_files.insert(path.to_path_buf()) {
File::sync_path(self, &path);
}
if let Some(file) = self.files().try_system(self, &path) {
project.remove_file(self, file);
}
} else {
sync_recursively.insert(path.clone());
if custom_stdlib_versions_path
.as_ref()
.is_some_and(|versions_path| versions_path.starts_with(&path))
{
result.custom_stdlib_changed = true;
}
let directory_included = project.is_directory_included(self, &path);
if directory_included || path == project_root {
// TODO: Shouldn't it be enough to simply traverse the project files and remove all
// that start with the given path?
tracing::debug!(
"Reload project because of a path that could have been a directory."
);
// Perform a full-reload in case the deleted directory contained the pyproject.toml.
// We may want to make this more clever in the future, to e.g. iterate over the
// indexed files and remove the once that start with the same path, unless
// the deleted path is the project configuration.
result.project_changed = true;
} else if !directory_included {
tracing::debug!(
"Skipping reload because directory '{path}' isn't included in the project"
);
}
}
}
ChangeEvent::CreatedVirtual(path) | ChangeEvent::ChangedVirtual(path) => {
File::sync_virtual_path(self, &path);
}
ChangeEvent::DeletedVirtual(path) => {
if let Some(virtual_file) = self.files().try_virtual_file(&path) {
virtual_file.close(self);
}
}
ChangeEvent::Rescan => {
result.project_changed = true;
Files::sync_all(self);
sync_recursively.clear();
break;
}
}
}
let sync_recursively = sync_recursively.into_iter();
let mut last = None;
for path in sync_recursively {
// Avoid re-syncing paths that are sub-paths of each other.
if let Some(last) = &last {
if path.starts_with(last) {
continue;
}
}
Files::sync_recursively(self, &path);
last = Some(path);
}
if result.project_changed {
let new_project_metadata = match config_file_override {
Some(config_file) => {
ProjectMetadata::from_config_file(config_file, &project_root, self.system())
}
None => ProjectMetadata::discover(&project_root, self.system()),
};
match new_project_metadata {
Ok(mut metadata) => {
if let Err(error) = metadata.apply_configuration_files(self.system()) {
tracing::error!(
"Failed to apply configuration files, continuing without applying them: {error}"
);
}
if let Some(overrides) = project_options_overrides {
metadata.apply_overrides(overrides);
}
match metadata.to_program_settings(self.system(), self.vendored()) {
Ok(program_settings) => {
let program = Program::get(self);
program.update_from_settings(self, program_settings);
}
Err(error) => {
tracing::error!(
"Failed to convert metadata to program settings, continuing without applying them: {error}"
);
}
}
if metadata.root() == project.root(self) {
tracing::debug!("Reloading project after structural change");
project.reload(self, metadata);
} else {
match Project::from_metadata(self, metadata) {
Ok(new_project) => {
tracing::debug!("Replace project after structural change");
project = new_project;
}
Err(error) => {
tracing::error!(
"Keeping old project configuration because loading the new settings failed with: {error}"
);
project
.set_settings_diagnostics(self)
.to(vec![error.into_diagnostic()]);
}
}
self.project = Some(project);
}
}
Err(error) => {
tracing::error!(
"Failed to load project, keeping old project configuration: {error}"
);
}
}
return result;
} else if result.custom_stdlib_changed {
match project
.metadata(self)
.to_program_settings(self.system(), self.vendored())
{
Ok(program_settings) => {
program.update_from_settings(self, program_settings);
}
Err(error) => {
tracing::error!("Failed to resolve program settings: {error}");
}
}
}
let diagnostics = if let Some(walker) = ProjectFilesWalker::incremental(self, added_paths) {
// Use directory walking to discover newly added files.
let (files, diagnostics) = walker.collect_vec(self);
for file in files {
project.add_file(self, file);
}
diagnostics
} else {
Vec::new()
};
// Note: We simply replace all IO related diagnostics here. This isn't ideal, because
// it removes IO errors that may still be relevant. However, tracking IO errors correctly
// across revisions doesn't feel essential, considering that they're rare. However, we could
// implement a `BTreeMap` or similar and only prune the diagnostics from paths that we've
// re-scanned (or that were removed etc).
project.replace_index_diagnostics(self, diagnostics);
result
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/glob/include.rs | crates/ty_project/src/glob/include.rs | use globset::{Glob, GlobBuilder, GlobSet, GlobSetBuilder};
use regex_automata::dfa;
use regex_automata::dfa::Automaton;
use ruff_db::system::SystemPath;
use std::fmt::Formatter;
use std::path::{MAIN_SEPARATOR, MAIN_SEPARATOR_STR};
use tracing::warn;
use crate::glob::portable::AbsolutePortableGlobPattern;
/// Chosen at a whim -Konsti
const DFA_SIZE_LIMIT: usize = 1_000_000;
/// Path filter based on a set of include globs.
///
/// The patterns are similar to gitignore, but reversed:
///
/// * `/src`: matches a file or directory with its content named `src`
/// * `/src/`: matches a directory with its content named `src`
/// * `/src/**` or `/src/*`: matches the content of `src`, but not a file named `src`
///
/// Negated patterns are not supported.
///
/// Internally, the globs are converted to a regex and then to a DFA, which unlike the globs and the
/// regex allows to check for prefix matches.
///
/// ## Equality
/// Equality is based on the patterns from which a filter was constructed.
///
/// Because of that, two filters that include the exact same files but were
/// constructed from different patterns (or even just order) compare unequal.
#[derive(Clone, get_size2::GetSize)]
pub(crate) struct IncludeFilter {
#[get_size(ignore)]
glob_set: GlobSet,
original_patterns: Box<[String]>,
#[get_size(size_fn = dfa_memory_usage)]
dfa: Option<dfa::dense::DFA<Vec<u32>>>,
}
#[allow(clippy::ref_option)]
fn dfa_memory_usage(dfa: &Option<dfa::dense::DFA<Vec<u32>>>) -> usize {
dfa.as_ref().map(dfa::dense::DFA::memory_usage).unwrap_or(0)
}
impl IncludeFilter {
/// Whether the file matches any of the globs.
pub(crate) fn match_file(&self, path: impl AsRef<SystemPath>) -> bool {
let path = path.as_ref();
self.glob_set.is_match(path)
}
/// Check whether a directory or any of its children can be matched by any of the globs.
///
/// This never returns `false` if any child matches, but it may return `true` even if we
/// don't end up including any child.
pub(crate) fn match_directory(&self, path: impl AsRef<SystemPath>) -> bool {
self.match_directory_impl(path.as_ref())
}
fn match_directory_impl(&self, path: &SystemPath) -> bool {
let Some(dfa) = &self.dfa else {
return true;
};
// Allow the root path
if path == SystemPath::new("") {
return true;
}
let config_anchored =
regex_automata::util::start::Config::new().anchored(regex_automata::Anchored::Yes);
let mut state = dfa.start_state(&config_anchored).unwrap();
let byte_path = path
.as_str()
.strip_suffix('/')
.unwrap_or(path.as_str())
.as_bytes();
for b in byte_path {
state = dfa.next_state(state, *b);
}
// Say we're looking at a directory `foo/bar`. We want to continue if either `foo/bar` is
// a match, e.g., from `foo/*`, or a path below it can match, e.g., from `foo/bar/*`.
let eoi_state = dfa.next_eoi_state(state);
// We must not call `next_eoi_state` on the slash state, we want to only check if more
// characters (path components) are allowed, not if we're matching the `$` anchor at the
// end.
let slash_state = dfa.next_state(state, u8::try_from(MAIN_SEPARATOR).unwrap());
debug_assert!(
!dfa.is_quit_state(eoi_state) && !dfa.is_quit_state(slash_state),
"matcher is in quit state"
);
dfa.is_match_state(eoi_state) || !dfa.is_dead_state(slash_state)
}
}
impl std::fmt::Debug for IncludeFilter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("IncludeFilter")
.field(&self.original_patterns)
.finish_non_exhaustive()
}
}
impl std::fmt::Display for IncludeFilter {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_list().entries(&self.original_patterns).finish()
}
}
impl PartialEq for IncludeFilter {
fn eq(&self, other: &Self) -> bool {
self.original_patterns == other.original_patterns
}
}
impl Eq for IncludeFilter {}
#[derive(Debug)]
pub(crate) struct IncludeFilterBuilder {
set: GlobSetBuilder,
original_pattern: Vec<String>,
regexes: Vec<String>,
}
impl IncludeFilterBuilder {
pub(crate) fn new() -> Self {
Self {
set: GlobSetBuilder::new(),
original_pattern: Vec::new(),
regexes: Vec::new(),
}
}
/// Adds an include pattern to the filter.
pub(crate) fn add(
&mut self,
input: &AbsolutePortableGlobPattern,
) -> Result<&mut Self, globset::Error> {
let mut glob_pattern = input.absolute();
let mut only_directory = false;
// A pattern ending with a `/` should only match directories. E.g. `src/` only matches directories
// whereas `src` matches both files and directories.
// We need to remove the `/` to ensure that a path missing the trailing `/` matches.
if let Some(after) = glob_pattern.strip_suffix('/') {
// Escaped `/` or `\` aren't allowed. `portable_glob::parse` will error
only_directory = true;
glob_pattern = after;
}
// If regex ends with `/**`, only push that one glob and regex
// Otherwise, push two regex, one for `/**` and one for without
let glob = GlobBuilder::new(glob_pattern)
.literal_separator(true)
// No need to support Windows-style paths, so the backslash can be used a escape.
.backslash_escape(true)
.build()?;
self.original_pattern.push(input.relative().to_string());
// `lib` is the same as `lib/**`
// Add a glob that matches `lib` exactly, change the glob to `lib/**`.
if glob_pattern.ends_with("**") {
self.push_prefix_regex(&glob);
self.set.add(glob);
} else {
let prefix_glob = GlobBuilder::new(&format!("{glob_pattern}/**"))
.literal_separator(true)
// No need to support Windows-style paths, so the backslash can be used a escape.
.backslash_escape(true)
.build()?;
self.push_prefix_regex(&prefix_glob);
self.set.add(prefix_glob);
// The reason we add the exact glob, e.g. `src` when the original pattern was `src/` is
// so that `match_file` returns true when matching against a file. However, we don't
// need to do this if this is a pattern that should only match a directory (specifically, its contents).
if !only_directory {
self.set.add(glob);
}
}
Ok(self)
}
fn push_prefix_regex(&mut self, glob: &Glob) {
let main_separator = regex::escape(MAIN_SEPARATOR_STR);
let regex = glob
.regex()
// We are using a custom DFA builder
.strip_prefix("(?-u)")
.expect("a glob is a non-unicode byte regex")
// Match windows paths if applicable
.replace('/', &main_separator);
self.regexes.push(regex);
}
/// The filter matches if any of the globs matches.
///
/// See <https://github.com/BurntSushi/ripgrep/discussions/2927> for the error returned.
pub(crate) fn build(self) -> Result<IncludeFilter, globset::Error> {
let glob_set = self.set.build()?;
let dfa_builder = dfa::dense::Builder::new()
.syntax(
// The glob regex is a byte matcher
regex_automata::util::syntax::Config::new()
.unicode(false)
.utf8(false),
)
.configure(
dfa::dense::Config::new()
.start_kind(dfa::StartKind::Anchored)
// DFA can grow exponentially, in which case we bail out
.dfa_size_limit(Some(DFA_SIZE_LIMIT))
.determinize_size_limit(Some(DFA_SIZE_LIMIT)),
)
.build_many(&self.regexes);
let dfa = if let Ok(dfa) = dfa_builder {
Some(dfa)
} else {
// TODO(konsti): `regex_automata::dfa::dense::BuildError` should allow asking whether
// is a size error
warn!(
"Glob expressions regex is larger than {DFA_SIZE_LIMIT} bytes, \
falling back to full directory traversal!"
);
None
};
Ok(IncludeFilter {
glob_set,
dfa,
original_patterns: self.original_pattern.into(),
})
}
}
#[cfg(test)]
mod tests {
use std::path::{MAIN_SEPARATOR, MAIN_SEPARATOR_STR};
use crate::glob::include::{IncludeFilter, IncludeFilterBuilder};
use crate::glob::{PortableGlobKind, PortableGlobPattern};
use ruff_db::system::{MemoryFileSystem, walk_directory::WalkState};
fn create_filter(patterns: impl IntoIterator<Item = &'static str>) -> IncludeFilter {
let mut builder = IncludeFilterBuilder::new();
for pattern in patterns {
builder
.add(
&PortableGlobPattern::parse(pattern, PortableGlobKind::Include)
.unwrap()
.into_absolute(""),
)
.unwrap();
}
builder.build().unwrap()
}
fn setup_files(files: impl IntoIterator<Item = &'static str>) -> MemoryFileSystem {
let fs = MemoryFileSystem::new();
fs.write_files_all(files.into_iter().map(|name| (name, "")))
.unwrap();
fs
}
#[track_caller]
fn assert_match_directory(filter: &IncludeFilter, path: &str) {
assert!(filter.match_directory(path.replace('/', MAIN_SEPARATOR_STR)));
}
#[track_caller]
fn assert_not_match_directory(filter: &IncludeFilter, path: &str) {
assert!(!filter.match_directory(path.replace('/', MAIN_SEPARATOR_STR)));
}
#[test]
fn match_directory() {
// `lib` is the same as `src/**`. It includes a file or directory (including its contents)
// `src/*`: The same as `src/**`
let filter = create_filter(["lib", "src/*", "tests/**", "a/test-*/b", "files/*.py"]);
assert_match_directory(&filter, "lib");
assert_match_directory(&filter, "lib/more/test");
assert_match_directory(&filter, "src");
assert_match_directory(&filter, "src/more/test");
assert_match_directory(&filter, "tests");
assert_match_directory(&filter, "tests/more/test");
assert_match_directory(&filter, "a");
assert_match_directory(&filter, "a/test-b");
assert_not_match_directory(&filter, "a/test-b/x");
assert_not_match_directory(&filter, "a/test");
assert_match_directory(&filter, "files/a.py");
assert_match_directory(&filter, "files/a.py/bcd");
assert_not_match_directory(&filter, "not_included");
assert_not_match_directory(&filter, "files/a.pi");
}
#[test]
fn match_file() {
// `lib` is the same as `src/**`. It includes a file or directory (including its contents)
// `src/*`: The same as `src/**`
let filter = create_filter([
"lib",
"src/*",
"directory/",
"tests/**",
"a/test-*/b",
"files/*.py",
]);
assert!(filter.match_file("lib"));
assert!(filter.match_file("lib/more/test"));
// Unlike `directory`, `directory/` only includes a directory with the given name and its contents
assert!(!filter.match_file("directory"));
assert!(filter.match_file("directory/more/test"));
// Unlike `src`, `src/*` only includes a directory with the given name.
assert!(!filter.match_file("src"));
assert!(filter.match_file("src/more/test"));
// Unlike `tests`, `tests/**` only includes files under `tests`, but not a file named tests
assert!(!filter.match_file("tests"));
assert!(filter.match_file("tests/more/test"));
// Unlike `match_directory`, prefixes should not be included.
assert!(!filter.match_file("a"));
assert!(!filter.match_file("a/test-b"));
assert!(!filter.match_file("a/test-b/x"));
assert!(!filter.match_file("a/test"));
assert!(filter.match_file("files/a.py"));
assert!(filter.match_file("files/a.py/bcd"));
assert!(!filter.match_file("not_included"));
assert!(!filter.match_file("files/a.pi"));
}
/// Check that we skip directories that can never match.
#[test]
fn prefilter() {
let filter = create_filter(["/a/b/test-*/d", "/a/b/c/e", "/b/c"]);
let fs = setup_files([
// Should visit
"/a/b/test-a/d",
"/a/b/c/e",
"/b/c",
// Can skip
"/d/e",
"/a/b/x/f",
]);
let visited = std::sync::Mutex::new(Vec::new());
// Test the prefix filtering
fs.walk_directory("/").run(|| {
Box::new(|entry| {
let entry = entry.unwrap();
if entry.file_type().is_directory() {
if !filter.match_directory(entry.path()) {
return WalkState::Skip;
}
}
visited
.lock()
.unwrap()
.push(entry.path().as_str().replace(MAIN_SEPARATOR, "/"));
WalkState::Continue
})
});
let mut visited = visited.into_inner().unwrap();
visited.sort();
// Assert that it didn't traverse into `/d` or `/a/b/x`
assert_eq!(
visited,
[
"/",
"/a",
"/a/b",
"/a/b/c",
"/a/b/c/e",
"/a/b/test-a",
"/a/b/test-a/d",
"/b",
"/b/c"
]
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/glob/portable.rs | crates/ty_project/src/glob/portable.rs | //! Cross-language glob syntax from
//! [PEP 639](https://packaging.python.org/en/latest/specifications/glob-patterns/).
//!
//! The glob syntax matches the `uv` variant of uv's `uv-globfilter` crate.
//! We intentionally use the same syntax to give users a consistent experience
//! across our tools.
//!
//! [Source](https://github.com/astral-sh/uv/blob/main/crates/uv-globfilter/src/portable_glob.rs)
use ruff_db::system::SystemPath;
use std::error::Error as _;
use std::ops::Deref;
use std::{fmt::Write, path::MAIN_SEPARATOR};
use thiserror::Error;
/// Pattern that only uses cross-language glob syntax based on [PEP 639](https://packaging.python.org/en/latest/specifications/glob-patterns/):
///
/// - Alphanumeric characters, underscores (`_`), hyphens (`-`) and dots (`.`) are matched verbatim.
/// - The special glob characters are:
/// - `*`: Matches any number of characters except path separators
/// - `?`: Matches a single character except the path separator
/// - `**`: Matches any number of characters including path separators
/// - `[]`, containing only the verbatim matched characters: Matches a single of the characters contained. Within
/// `[...]`, the hyphen indicates a locale-agnostic range (e.g. `a-z`, order based on Unicode code points). Hyphens at
/// the start or end are matched literally.
/// - `\`: It escapes the following character to be matched verbatim (extension to PEP 639).
/// - The path separator is the forward slash character (`/`). Patterns are relative to the given directory, a leading slash
/// character for absolute paths is not supported.
/// - Parent directory indicators (`..`) are not allowed.
///
/// These rules mean that matching the backslash (`\`) is forbidden, which avoid collisions with the windows path separator.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub(crate) struct PortableGlobPattern<'a> {
pattern: &'a str,
kind: PortableGlobKind,
}
impl<'a> PortableGlobPattern<'a> {
/// Parses a portable glob pattern. Returns an error if the pattern isn't valid.
pub(crate) fn parse(glob: &'a str, kind: PortableGlobKind) -> Result<Self, PortableGlobError> {
let mut chars = glob.chars().enumerate().peekable();
if matches!(kind, PortableGlobKind::Exclude) {
chars.next_if(|(_, c)| *c == '!');
}
// A `..` is on a parent directory indicator at the start of the string or after a directory
// separator.
let mut start_or_slash = true;
// The number of consecutive stars before the current character.
while let Some((offset, c)) = chars.next() {
let pos = offset + 1;
// `***` or `**literals` can be correctly represented with less stars. They are banned by
// `glob`, they are allowed by `globset` and PEP 639 is ambiguous, so we're filtering them
// out.
if c == '*' {
let mut star_run = 1;
while let Some((_, c)) = chars.peek() {
if *c == '*' {
star_run += 1;
chars.next();
} else {
break;
}
}
if star_run >= 3 {
return Err(PortableGlobError::TooManyStars {
// We don't update pos for the stars.
pos,
});
} else if star_run == 2 {
if chars.peek().is_some_and(|(_, c)| *c != '/') {
return Err(PortableGlobError::TooManyStars {
// We don't update pos for the stars.
pos,
});
}
}
start_or_slash = false;
} else if c.is_alphanumeric() || matches!(c, '_' | '-' | '?') {
start_or_slash = false;
} else if c == '.' {
if start_or_slash && matches!(chars.peek(), Some((_, '.'))) {
return Err(PortableGlobError::ParentDirectory { pos });
}
start_or_slash = false;
} else if c == '/' {
start_or_slash = true;
} else if c == '[' {
for (pos, c) in chars.by_ref() {
if c.is_alphanumeric() || matches!(c, '_' | '-' | '.') {
// Allowed.
} else if c == ']' {
break;
} else {
return Err(PortableGlobError::InvalidCharacterRange {
pos,
invalid: InvalidChar(c),
});
}
}
start_or_slash = false;
} else if c == '\\' {
match chars.next() {
Some((pos, '/' | '\\')) => {
// For cross-platform compatibility, we don't allow forward slashes or
// backslashes to be escaped.
return Err(PortableGlobError::InvalidEscapee { pos });
}
Some(_) => {
// Escaped character
}
None => {
return Err(PortableGlobError::TrailingEscape { pos });
}
}
} else {
return Err(PortableGlobError::InvalidCharacter {
pos,
invalid: InvalidChar(c),
});
}
}
Ok(PortableGlobPattern {
pattern: glob,
kind,
})
}
/// Anchors pattern at `cwd`.
///
/// `is_exclude` indicates whether this is a pattern in an exclude filter.
///
/// This method similar to [`SystemPath::absolute`] but for a glob pattern.
/// The main difference is that this method always uses `/` as path separator.
pub(crate) fn into_absolute(self, cwd: impl AsRef<SystemPath>) -> AbsolutePortableGlobPattern {
let mut pattern = self.pattern;
let mut negated = false;
if matches!(self.kind, PortableGlobKind::Exclude) {
// If the pattern starts with `!`, we need to remove it and then anchor the rest.
if let Some(after) = self.pattern.strip_prefix('!') {
pattern = after;
negated = true;
}
}
if pattern.starts_with('/') {
return AbsolutePortableGlobPattern {
absolute: pattern.to_string(),
relative: self.pattern.to_string(),
};
}
let mut rest = pattern;
let mut prefix = cwd.as_ref().to_path_buf().into_utf8_path_buf();
loop {
if let Some(after) = rest.strip_prefix("./") {
rest = after;
} else if let Some(after) = rest.strip_prefix("../") {
prefix.pop();
rest = after;
} else {
break;
}
}
let mut output = String::with_capacity(prefix.as_str().len() + rest.len());
for component in prefix.components() {
match component {
camino::Utf8Component::Prefix(utf8_prefix_component) => {
output.push_str(&utf8_prefix_component.as_str().replace(MAIN_SEPARATOR, "/"));
}
camino::Utf8Component::RootDir => {
output.push('/');
continue;
}
camino::Utf8Component::CurDir => {}
camino::Utf8Component::ParentDir => output.push_str("../"),
camino::Utf8Component::Normal(component) => {
output.push_str(component);
output.push('/');
}
}
}
output.push_str(rest);
if negated {
// If the pattern is negated, we need to keep the leading `!`.
AbsolutePortableGlobPattern {
absolute: format!("!{output}"),
relative: self.pattern.to_string(),
}
} else {
AbsolutePortableGlobPattern {
absolute: output,
relative: self.pattern.to_string(),
}
}
}
}
impl Deref for PortableGlobPattern<'_> {
type Target = str;
fn deref(&self) -> &Self::Target {
self.pattern
}
}
/// A portable glob pattern that uses absolute paths.
///
/// E.g., `./src/**` becomes `/root/src/**` when anchored to `/root`.
#[derive(Debug, Eq, PartialEq, Hash)]
pub(crate) struct AbsolutePortableGlobPattern {
absolute: String,
relative: String,
}
impl AbsolutePortableGlobPattern {
/// Returns the absolute path of this glob pattern.
pub(crate) fn absolute(&self) -> &str {
&self.absolute
}
/// Returns the relative path of this glob pattern.
pub(crate) fn relative(&self) -> &str {
&self.relative
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub(crate) enum PortableGlobKind {
/// An include pattern. Doesn't allow negated patterns.
Include,
/// An exclude pattern. Allows for negated patterns.
Exclude,
}
#[derive(Debug, Error)]
pub(crate) enum PortableGlobError {
/// Shows the failing glob in the error message.
#[error("{desc}", desc=.0.description())]
GlobError(#[from] globset::Error),
#[error("The parent directory operator (`..`) at position {pos} is not allowed")]
ParentDirectory { pos: usize },
#[error(
"Invalid character `{invalid}` at position {pos}. hint: Characters can be escaped with a backslash"
)]
InvalidCharacter { pos: usize, invalid: InvalidChar },
#[error("Path separators can't be escaped, invalid character at position {pos}")]
InvalidEscapee { pos: usize },
#[error("Invalid character `{invalid}` in range at position {pos}")]
InvalidCharacterRange { pos: usize, invalid: InvalidChar },
#[error("Too many stars at position {pos}")]
TooManyStars { pos: usize },
#[error("Trailing backslash at position {pos}")]
TrailingEscape { pos: usize },
}
#[derive(Copy, Clone, Debug)]
pub(crate) struct InvalidChar(pub char);
impl std::fmt::Display for InvalidChar {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.0 {
'\'' => f.write_char('\''),
c => c.escape_debug().fmt(f),
}
}
}
#[cfg(test)]
mod tests {
use crate::glob::{PortableGlobKind, PortableGlobPattern};
use insta::assert_snapshot;
use ruff_db::system::SystemPath;
#[test]
fn test_error() {
#[track_caller]
fn parse_err(glob: &str) -> String {
let error = PortableGlobPattern::parse(glob, PortableGlobKind::Exclude).unwrap_err();
error.to_string()
}
assert_snapshot!(
parse_err(".."),
@"The parent directory operator (`..`) at position 1 is not allowed"
);
assert_snapshot!(
parse_err("licenses/.."),
@"The parent directory operator (`..`) at position 10 is not allowed"
);
assert_snapshot!(
parse_err("licenses/LICEN!E.txt"),
@"Invalid character `!` at position 15. hint: Characters can be escaped with a backslash"
);
assert_snapshot!(
parse_err("licenses/LICEN[!C]E.txt"),
@"Invalid character `!` in range at position 15"
);
assert_snapshot!(
parse_err("licenses/LICEN[C?]E.txt"),
@"Invalid character `?` in range at position 16"
);
assert_snapshot!(
parse_err("******"),
@"Too many stars at position 1"
);
assert_snapshot!(
parse_err("licenses/**license"),
@"Too many stars at position 10"
);
assert_snapshot!(
parse_err("licenses/***/licenses.csv"),
@"Too many stars at position 10"
);
assert_snapshot!(
parse_err(r"**/@test"),
@"Invalid character `@` at position 4. hint: Characters can be escaped with a backslash"
);
// Escapes are not allowed in strict PEP 639 mode
assert_snapshot!(
parse_err(r"public domain/Gulliver\\’s Travels.txt"),
@r"Invalid character ` ` at position 7. hint: Characters can be escaped with a backslash"
);
assert_snapshot!(
parse_err(r"**/@test"),
@"Invalid character `@` at position 4. hint: Characters can be escaped with a backslash"
);
// Escaping slashes is not allowed.
assert_snapshot!(
parse_err(r"licenses\\MIT.txt"),
@r"Path separators can't be escaped, invalid character at position 9"
);
assert_snapshot!(
parse_err(r"licenses\/MIT.txt"),
@r"Path separators can't be escaped, invalid character at position 9"
);
}
#[test]
fn test_valid() {
let cases = [
r"licenses/*.txt",
r"licenses/**/*.txt",
r"LICEN[CS]E.txt",
r"LICEN?E.txt",
r"[a-z].txt",
r"[a-z._-].txt",
r"*/**",
r"LICENSE..txt",
r"LICENSE_file-1.txt",
// (google translate)
r"licenses/라이센스*.txt",
r"licenses/ライセンス*.txt",
r"licenses/执照*.txt",
r"src/**",
];
let cases_uv = [
r"public-domain/Gulliver\’s\ Travels.txt",
// https://github.com/astral-sh/uv/issues/13280
r"**/\@test",
];
for case in cases.iter().chain(cases_uv.iter()) {
PortableGlobPattern::parse(case, PortableGlobKind::Exclude).unwrap();
}
}
#[track_caller]
fn assert_absolute_path(pattern: &str, relative_to: impl AsRef<SystemPath>, expected: &str) {
let pattern = PortableGlobPattern::parse(pattern, PortableGlobKind::Exclude).unwrap();
let pattern = pattern.into_absolute(relative_to);
assert_eq!(pattern.absolute(), expected);
}
#[test]
fn absolute_pattern() {
assert_absolute_path("/src", "/root", "/src");
assert_absolute_path("./src", "/root", "/root/src");
}
#[test]
#[cfg(windows)]
fn absolute_pattern_windows() {
assert_absolute_path("./src", r"C:\root", "C:/root/src");
assert_absolute_path("./src", r"\\server\test", "//server/test/src");
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/glob/exclude.rs | crates/ty_project/src/glob/exclude.rs | //! Exclude filter supporting gitignore-like globs.
//!
//! * `src` excludes a file or directory named `src` anywhere in the path.
//! * `/src/` excludes a directory named `src` at the root of the path.
//! * `/src` excludes a directory or file named `src` at the root of the path.
//! * `/src/**` excludes all files and directories inside a directory named `src` but not `src` itself.
//! * `!src` allows a file or directory named `src` anywhere in the path
use std::fmt::Formatter;
use std::sync::Arc;
use globset::{Candidate, GlobBuilder, GlobSet, GlobSetBuilder};
use regex_automata::util::pool::Pool;
use ruff_db::system::SystemPath;
use crate::GlobFilterCheckMode;
use crate::glob::portable::AbsolutePortableGlobPattern;
/// A filter for gitignore-like globs that excludes files and directories.
///
/// # Equality
///
/// Two filters are equal if they're constructed from the same patterns (including order).
/// Two filters that exclude the exact same files but were constructed from different patterns aren't considered
/// equal.
#[derive(Clone, Debug, PartialEq, Eq, get_size2::GetSize)]
pub(crate) struct ExcludeFilter {
ignore: Gitignore,
}
impl ExcludeFilter {
/// Returns `true` if the path to a directory is definitely excluded and `false` otherwise.
pub(crate) fn match_directory(&self, path: &SystemPath, mode: GlobFilterCheckMode) -> bool {
self.matches(path, mode, true)
}
/// Returns `true` if the path to a file is definitely excluded and `false` otherwise.
pub(crate) fn match_file(&self, path: &SystemPath, mode: GlobFilterCheckMode) -> bool {
self.matches(path, mode, false)
}
fn matches(&self, path: &SystemPath, mode: GlobFilterCheckMode, directory: bool) -> bool {
match mode {
GlobFilterCheckMode::TopDown => {
match self.ignore.matched(path, directory) {
// No hit or an allow hit means the file or directory is not excluded.
Match::None | Match::Allow => false,
Match::Ignore => true,
}
}
GlobFilterCheckMode::Adhoc => {
for ancestor in path.ancestors() {
match self.ignore.matched(ancestor, directory) {
// If the path is allowlisted or there's no hit, try the parent to ensure we don't return false
// for a folder where there's an exclude for a parent.
Match::None | Match::Allow => {}
Match::Ignore => return true,
}
}
false
}
}
}
}
impl std::fmt::Display for ExcludeFilter {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_list().entries(&self.ignore.globs).finish()
}
}
pub(crate) struct ExcludeFilterBuilder {
ignore: GitignoreBuilder,
}
impl ExcludeFilterBuilder {
pub(crate) fn new() -> Self {
Self {
ignore: GitignoreBuilder::new(),
}
}
pub(crate) fn add(
&mut self,
pattern: &AbsolutePortableGlobPattern,
) -> Result<&mut Self, globset::Error> {
self.ignore.add(pattern)?;
Ok(self)
}
pub(crate) fn build(self) -> Result<ExcludeFilter, globset::Error> {
Ok(ExcludeFilter {
ignore: self.ignore.build()?,
})
}
}
/// Matcher for gitignore like globs.
///
/// This code is our own vendored copy of the ignore's crate `Gitignore` type.
///
/// The differences with the ignore's crate version are:
///
/// * All globs are anchored. `src` matches `./src` only and not `**/src` to be consistent with `include`.
/// * It makes use of the fact that all our globs are absolute. This simplifies the implementation a fair bit.
/// Making globs absolute is also motivated by the fact that the globs can come from both the CLI and configuration files,
/// where the paths are anchored relative to the current working directory or the project root respectively.
/// * It uses [`globset::Error`] over the ignore's crate `Error` type.
/// * Removes supported for commented lines, because the patterns aren't read
/// from a `.gitignore` file. This removes the need to escape `#` for file names starting with `#`,
///
/// You can find the original source on [GitHub](https://github.com/BurntSushi/ripgrep/blob/cbc598f245f3c157a872b69102653e2e349b6d92/crates/ignore/src/gitignore.rs#L81).
///
/// # Equality
///
/// Two ignore matches are only equal if they're constructed from the same patterns (including order).
/// Two matchers that were constructed from different patterns but result in
/// including the same files don't compare equal.
#[derive(Clone, get_size2::GetSize)]
struct Gitignore {
#[get_size(ignore)]
set: GlobSet,
globs: Vec<IgnoreGlob>,
#[get_size(ignore)]
matches: Option<Arc<Pool<Vec<usize>>>>,
}
impl Gitignore {
/// Returns whether the given path (file or directory) matched a pattern in
/// this gitignore matcher.
///
/// `is_dir` should be true if the path refers to a directory and false
/// otherwise.
///
/// The path must be absolute or it will only match prefix-wildcard patterns.
fn matched(&self, path: &SystemPath, is_dir: bool) -> Match {
if self.globs.is_empty() {
return Match::None;
}
let mut matches = self.matches.as_ref().unwrap().get();
let candidate = Candidate::new(path);
self.set.matches_candidate_into(&candidate, &mut matches);
for &i in matches.iter().rev() {
let glob = &self.globs[i];
if !glob.is_only_dir || is_dir {
return if glob.is_ignore() {
Match::Ignore
} else {
Match::Allow
};
}
}
Match::None
}
}
impl std::fmt::Debug for Gitignore {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("Gitignore")
.field(&self.globs)
.finish_non_exhaustive()
}
}
impl PartialEq for Gitignore {
fn eq(&self, other: &Self) -> bool {
self.globs == other.globs
}
}
impl Eq for Gitignore {}
#[derive(Copy, Clone, Debug)]
enum Match {
/// The path matches no pattern.
None,
/// The path matches an ignore pattern (a positive pattern)
/// It should be ignored.
Ignore,
/// The path matches an allow pattern (a negative pattern).
/// It should not be ignored.
Allow,
}
#[derive(Debug, Clone, PartialEq, Eq, get_size2::GetSize)]
struct IgnoreGlob {
/// The pattern that was originally parsed.
original: String,
/// This is a pattern allowing a path (it starts with a `!`, possibly undoing a previous ignore)
is_allow: bool,
/// Whether this pattern only matches directories.
is_only_dir: bool,
}
impl IgnoreGlob {
const fn is_ignore(&self) -> bool {
!self.is_allow
}
}
/// Builds a matcher for git-ignore like globs.
///
/// All globs need to use absolute paths, unless they're unanchored (contain no `/`).
#[derive(Clone, Debug)]
struct GitignoreBuilder {
builder: GlobSetBuilder,
globs: Vec<IgnoreGlob>,
}
impl GitignoreBuilder {
/// Create a new builder for a gitignore file.
fn new() -> GitignoreBuilder {
GitignoreBuilder {
builder: GlobSetBuilder::new(),
globs: vec![],
}
}
/// Builds a new matcher from the globs added so far.
///
/// Once a matcher is built, no new globs can be added to it.
fn build(&self) -> Result<Gitignore, globset::Error> {
let set = self.builder.build()?;
Ok(Gitignore {
set,
globs: self.globs.clone(),
matches: Some(Arc::new(Pool::new(Vec::new))),
})
}
/// Adds a gitignore like glob pattern to this builder.
///
/// If the pattern could not be parsed as a glob, then an error is returned.
fn add(
&mut self,
pattern: &AbsolutePortableGlobPattern,
) -> Result<&mut GitignoreBuilder, globset::Error> {
let mut glob = IgnoreGlob {
original: pattern.relative().to_string(),
is_allow: false,
is_only_dir: false,
};
let mut pattern = pattern.absolute();
// File names starting with `!` are escaped with a backslash. Strip the backslash.
// This is not a negated pattern!
if pattern.starts_with("\\!") {
pattern = &pattern[1..];
} else if let Some(after) = pattern.strip_prefix("!") {
glob.is_allow = true;
pattern = after;
}
// If it ends with a slash, then this should only match directories,
// but the slash should otherwise not be used while globbing.
if let Some(before) = pattern.strip_suffix('/') {
glob.is_only_dir = true;
pattern = before;
}
let mut actual = pattern.to_string();
// If the glob ends with `/**`, then we should only match everything
// inside a directory, but not the directory itself. Standard globs
// will match the directory. So we add `/*` to force the issue.
if actual.ends_with("/**") {
actual = format!("{actual}/*");
}
let parsed = GlobBuilder::new(&actual)
.literal_separator(true)
// No need to support Windows-style paths, so the backslash can be used an escape.
.backslash_escape(true)
.build()?;
self.builder.add(parsed);
self.globs.push(glob);
Ok(self)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/watch/project_watcher.rs | crates/ty_project/src/watch/project_watcher.rs | use std::fmt::{Formatter, Write};
use std::hash::Hasher;
use tracing::info;
use ruff_cache::{CacheKey, CacheKeyHasher};
use ruff_db::system::{SystemPath, SystemPathBuf};
use ty_module_resolver::system_module_search_paths;
use crate::db::{Db, ProjectDatabase};
use crate::watch::Watcher;
/// Wrapper around a [`Watcher`] that watches the relevant paths of a project.
pub struct ProjectWatcher {
watcher: Watcher,
/// The paths that need to be watched. This includes paths for which setting up file watching failed.
watched_paths: Vec<SystemPathBuf>,
/// True if registering a watcher for any path failed.
has_errored_paths: bool,
/// Cache key over the paths that need watching. It allows short-circuiting if the paths haven't changed.
cache_key: Option<u64>,
}
impl ProjectWatcher {
/// Create a new project watcher.
pub fn new(watcher: Watcher, db: &ProjectDatabase) -> Self {
let mut watcher = Self {
watcher,
watched_paths: Vec::new(),
cache_key: None,
has_errored_paths: false,
};
watcher.update(db);
watcher
}
pub fn update(&mut self, db: &ProjectDatabase) {
let search_paths: Vec<_> = system_module_search_paths(db).collect();
let project_path = db.project().root(db);
let new_cache_key = Self::compute_cache_key(project_path, &search_paths);
if self.cache_key == Some(new_cache_key) {
return;
}
// Unregister all watch paths because ordering is important for linux because
// it only emits an event for the last added watcher if a subtree is covered by multiple watchers.
// A path can be covered by multiple watchers if a subdirectory symlinks to a path that's covered by another watch path:
// ```text
// - bar
// - baz.py
// - project
// - bar -> /bar
// - foo.py
// ```
for path in self.watched_paths.drain(..) {
if let Err(error) = self.watcher.unwatch(&path) {
info!("Failed to remove the file watcher for path `{path}`: {error}");
}
}
self.has_errored_paths = false;
let config_paths = db
.project()
.metadata(db)
.extra_configuration_paths()
.iter()
.map(SystemPathBuf::as_path);
// Watch both the project root and any paths provided by the user on the CLI (removing any redundant nested paths).
// This is necessary to observe changes to files that are outside the project root.
// We always need to watch the project root to observe changes to its configuration.
let included_paths = ruff_db::system::deduplicate_nested_paths(
std::iter::once(project_path).chain(
db.project()
.included_paths_list(db)
.iter()
.map(SystemPathBuf::as_path),
),
);
// Find the non-overlapping module search paths and filter out paths that are already covered by the project.
// Module search paths are already canonicalized.
let unique_module_paths = ruff_db::system::deduplicate_nested_paths(
search_paths
.into_iter()
.filter(|path| !path.starts_with(project_path)),
);
// Now add the new paths, first starting with the project path and then
// adding the library search paths, and finally the paths for configurations.
for path in included_paths
.chain(unique_module_paths)
.chain(config_paths)
{
// Log a warning. It's not worth aborting if registering a single folder fails because
// Ruff otherwise stills works as expected.
if let Err(error) = self.watcher.watch(path) {
// TODO: Log a user-facing warning.
tracing::warn!(
"Failed to setup watcher for path `{path}`: {error}. You have to restart Ruff after making changes to files under this path or you might see stale results."
);
self.has_errored_paths = true;
} else {
self.watched_paths.push(path.to_path_buf());
}
}
info!(
"Set up file watchers for {}",
DisplayWatchedPaths {
paths: &self.watched_paths
}
);
self.cache_key = Some(new_cache_key);
}
fn compute_cache_key(project_root: &SystemPath, search_paths: &[&SystemPath]) -> u64 {
let mut cache_key_hasher = CacheKeyHasher::new();
search_paths.cache_key(&mut cache_key_hasher);
project_root.cache_key(&mut cache_key_hasher);
cache_key_hasher.finish()
}
/// Returns `true` if setting up watching for any path failed.
pub fn has_errored_paths(&self) -> bool {
self.has_errored_paths
}
pub fn flush(&self) {
self.watcher.flush();
}
pub fn stop(self) {
self.watcher.stop();
}
}
struct DisplayWatchedPaths<'a> {
paths: &'a [SystemPathBuf],
}
impl std::fmt::Display for DisplayWatchedPaths<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_char('[')?;
let mut iter = self.paths.iter();
if let Some(first) = iter.next() {
write!(f, "\"{first}\"")?;
for path in iter {
write!(f, ", \"{path}\"")?;
}
}
f.write_char(']')
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_project/src/watch/watcher.rs | crates/ty_project/src/watch/watcher.rs | #![allow(
clippy::disallowed_methods,
reason = "This implementation is specific to real file systems."
)]
use notify::event::{CreateKind, MetadataKind, ModifyKind, RemoveKind, RenameMode};
use notify::{EventKind, RecommendedWatcher, RecursiveMode, Watcher as _, recommended_watcher};
use ruff_db::system::{SystemPath, SystemPathBuf};
use crate::watch::{ChangeEvent, ChangedKind, CreatedKind, DeletedKind};
/// Creates a new watcher observing file system changes.
///
/// The watcher debounces events, but guarantees to send all changes eventually (even if the file system keeps changing).
pub fn directory_watcher<H>(handler: H) -> notify::Result<Watcher>
where
H: EventHandler,
{
let (sender, receiver) = crossbeam::channel::bounded(20);
let debouncer = std::thread::Builder::new()
.name("watcher::debouncer".to_string())
.spawn(move || {
// Wait for the next set of changes
for message in &receiver {
let event = match message {
DebouncerMessage::Event(event) => event,
DebouncerMessage::Flush => {
continue;
}
};
let mut debouncer = Debouncer::default();
debouncer.add_result(event);
// Debounce any new incoming changes:
// * Take any new incoming change events and merge them with the previous change events
// * If there are no new incoming change events after 10 ms, flush the changes and wait for the next notify event.
// * Flush no later than after 3s.
loop {
let start = std::time::Instant::now();
crossbeam::select! {
recv(receiver) -> message => {
match message {
Ok(DebouncerMessage::Event(event)) => {
debouncer.add_result(event);
// Ensure that we flush the changes eventually.
if start.elapsed() > std::time::Duration::from_secs(3) {
break;
}
}
Ok(DebouncerMessage::Flush) => {
break;
}
Err(_) => {
// There are no more senders. That means `stop` was called.
// Drop all events and exit immediately.
return;
}
}
},
default(std::time::Duration::from_millis(10)) => {
break;
}
}
}
// No more file changes after 10 ms, send the changes and schedule a new analysis
let events = debouncer.into_events();
if !events.is_empty() {
handler.handle(events);
}
}
})
.unwrap();
let debouncer_sender = sender.clone();
let watcher =
recommended_watcher(move |event| sender.send(DebouncerMessage::Event(event)).unwrap())?;
Ok(Watcher {
inner: Some(WatcherInner {
watcher,
debouncer_sender,
debouncer_thread: debouncer,
}),
})
}
#[derive(Debug)]
enum DebouncerMessage {
/// A new file system event.
Event(notify::Result<notify::Event>),
Flush,
}
pub struct Watcher {
inner: Option<WatcherInner>,
}
struct WatcherInner {
watcher: RecommendedWatcher,
debouncer_sender: crossbeam::channel::Sender<DebouncerMessage>,
debouncer_thread: std::thread::JoinHandle<()>,
}
impl Watcher {
/// Sets up file watching for `path`.
pub fn watch(&mut self, path: &SystemPath) -> notify::Result<()> {
tracing::debug!("Watching path: `{path}`");
self.inner_mut()
.watcher
.watch(path.as_std_path(), RecursiveMode::Recursive)
}
/// Stops file watching for `path`.
pub fn unwatch(&mut self, path: &SystemPath) -> notify::Result<()> {
tracing::debug!("Unwatching path: `{path}`");
self.inner_mut().watcher.unwatch(path.as_std_path())
}
/// Stops the file watcher.
///
/// Pending events will be discarded.
///
/// The call blocks until the watcher has stopped.
pub fn stop(mut self) {
tracing::debug!("Stop file watcher");
self.set_stop();
}
/// Flushes any pending events.
pub fn flush(&self) {
self.inner()
.debouncer_sender
.send(DebouncerMessage::Flush)
.unwrap();
}
fn set_stop(&mut self) {
if let Some(inner) = self.inner.take() {
// drop the watcher to ensure there will be no more events.
// and to drop the sender used by the notify callback.
drop(inner.watcher);
// Drop "our" sender to ensure the sender count goes down to 0.
// The debouncer thread will end as soon as the sender count is 0.
drop(inner.debouncer_sender);
// Wait for the debouncer to finish, propagate any panics
inner.debouncer_thread.join().unwrap();
}
}
fn inner(&self) -> &WatcherInner {
self.inner.as_ref().expect("Watcher to be running")
}
fn inner_mut(&mut self) -> &mut WatcherInner {
self.inner.as_mut().expect("Watcher to be running")
}
}
impl Drop for Watcher {
fn drop(&mut self) {
self.set_stop();
}
}
#[derive(Default)]
struct Debouncer {
events: Vec<ChangeEvent>,
rescan_event: Option<ChangeEvent>,
}
impl Debouncer {
fn add_result(&mut self, result: notify::Result<notify::Event>) {
tracing::trace!("Handling file watcher event: {result:?}");
match result {
Ok(event) => self.add_event(event),
Err(error) => self.add_error(error),
}
}
#[expect(clippy::unused_self, clippy::needless_pass_by_value)]
fn add_error(&mut self, error: notify::Error) {
// Micha: I skimmed through some of notify's source code and it seems the most common errors
// are IO errors. All other errors should really only happen when adding or removing a watched folders.
// It's not clear what an upstream handler should do in the case of an IOError (other than logging it).
// That's what we do for now as well.
tracing::warn!("File watcher error: {error:?}");
}
fn add_event(&mut self, event: notify::Event) {
if self.rescan_event.is_some() {
// We're already in a rescan state, ignore all other events
return;
}
// If the file watcher is out of sync or we observed too many changes, trigger a full rescan
if event.need_rescan() || self.events.len() > 10000 {
self.events = Vec::new();
self.rescan_event = Some(ChangeEvent::Rescan);
return;
}
let kind = event.kind;
// There are cases where paths can be empty.
// https://github.com/astral-sh/ruff/issues/14222
let Some(path) = event.paths.into_iter().next() else {
tracing::debug!("Ignoring change event with kind '{kind:?}' without a path",);
return;
};
let path = match SystemPathBuf::from_path_buf(path) {
Ok(path) => path,
Err(path) => {
tracing::debug!(
"Ignore change to non-UTF8 path `{path}`: {kind:?}",
path = path.display()
);
// Ignore non-UTF8 paths because they aren't handled by the rest of the system.
return;
}
};
let event = match kind {
EventKind::Create(create) => {
let kind = match create {
CreateKind::File => CreatedKind::File,
CreateKind::Folder => CreatedKind::Directory,
CreateKind::Any | CreateKind::Other => {
CreatedKind::from(FileType::from_path(&path))
}
};
ChangeEvent::Created { path, kind }
}
EventKind::Modify(modify) => match modify {
ModifyKind::Metadata(metadata) => {
if FileType::from_path(&path) != FileType::File {
// Only interested in file metadata events.
return;
}
match metadata {
MetadataKind::Any | MetadataKind::Permissions | MetadataKind::Other => {
ChangeEvent::Changed {
path,
kind: ChangedKind::FileMetadata,
}
}
MetadataKind::AccessTime
| MetadataKind::WriteTime
| MetadataKind::Ownership
| MetadataKind::Extended => {
// We're not interested in these metadata changes
return;
}
}
}
ModifyKind::Data(_) => ChangeEvent::Changed {
kind: ChangedKind::FileContent,
path,
},
ModifyKind::Name(rename) => match rename {
RenameMode::From => {
// TODO: notify_debouncer_full matches the `RenameMode::From` and `RenameMode::To` events.
// Matching the from and to event would have the added advantage that we know the
// type of the path that was renamed, allowing `apply_changes` to avoid traversing the
// entire package.
// https://github.com/notify-rs/notify/blob/128bf6230c03d39dbb7f301ff7b20e594e34c3a2/notify-debouncer-full/src/lib.rs#L293-L297
ChangeEvent::Deleted {
kind: DeletedKind::Any,
path,
}
}
RenameMode::To => ChangeEvent::Created {
kind: CreatedKind::from(FileType::from_path(&path)),
path,
},
RenameMode::Both => {
// Both is only emitted when moving a path from within a watched directory
// to another watched directory. The event is not emitted if the `to` or `from` path
// lay outside the watched directory. However, the `To` and `From` events are always emitted.
// That's why we ignore `Both` and instead rely on `To` and `From`.
return;
}
RenameMode::Other => {
// Skip over any other rename events
return;
}
RenameMode::Any => {
// Guess the action based on the current file system state
if path.as_std_path().exists() {
let file_type = FileType::from_path(&path);
ChangeEvent::Created {
kind: file_type.into(),
path,
}
} else {
ChangeEvent::Deleted {
kind: DeletedKind::Any,
path,
}
}
}
},
ModifyKind::Other => {
// Skip other modification events that are not content or metadata related
return;
}
ModifyKind::Any => {
if !path.as_std_path().is_file() {
return;
}
ChangeEvent::Changed {
path,
kind: ChangedKind::Any,
}
}
},
EventKind::Access(_) => {
// We're not interested in any access events
return;
}
EventKind::Remove(kind) => {
let kind = match kind {
RemoveKind::File => DeletedKind::File,
RemoveKind::Folder => DeletedKind::Directory,
RemoveKind::Any | RemoveKind::Other => DeletedKind::Any,
};
ChangeEvent::Deleted { path, kind }
}
EventKind::Other => {
// Skip over meta events
return;
}
EventKind::Any => {
tracing::debug!("Skipping any FS event for `{path}`");
return;
}
};
self.events.push(event);
}
fn into_events(self) -> Vec<ChangeEvent> {
if let Some(rescan_event) = self.rescan_event {
vec![rescan_event]
} else {
self.events
}
}
}
pub trait EventHandler: Send + 'static {
fn handle(&self, changes: Vec<ChangeEvent>);
}
impl<F> EventHandler for F
where
F: Fn(Vec<ChangeEvent>) + Send + 'static,
{
fn handle(&self, changes: Vec<ChangeEvent>) {
let f = self;
f(changes);
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum FileType {
/// The event is related to a directory.
File,
/// The event is related to a directory.
Directory,
/// It's unknown whether the event is related to a file or a directory or if it is any other file type.
Any,
}
impl FileType {
fn from_path(path: &SystemPath) -> FileType {
match path.as_std_path().metadata() {
Ok(metadata) if metadata.is_file() => FileType::File,
Ok(metadata) if metadata.is_dir() => FileType::Directory,
Ok(_) | Err(_) => FileType::Any,
}
}
}
impl From<FileType> for CreatedKind {
fn from(value: FileType) -> Self {
match value {
FileType::File => Self::File,
FileType::Directory => Self::Directory,
FileType::Any => Self::Any,
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_codegen/src/lib.rs | crates/ruff_python_codegen/src/lib.rs | pub use generator::{Generator, Mode};
use ruff_python_parser::{ParseError, parse_module};
pub use stylist::{Indentation, Stylist};
mod generator;
mod stylist;
/// Run round-trip source code generation on a given Python code.
pub fn round_trip(code: &str) -> Result<String, ParseError> {
let parsed = parse_module(code)?;
let stylist = Stylist::from_tokens(parsed.tokens(), code);
let mut generator: Generator = (&stylist).into();
generator.unparse_suite(parsed.suite());
Ok(generator.generate())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_codegen/src/generator.rs | crates/ruff_python_codegen/src/generator.rs | //! Generate Python source code from an abstract syntax tree (AST).
use std::fmt::Write;
use std::ops::Deref;
use ruff_python_ast::str::Quote;
use ruff_python_ast::{
self as ast, Alias, AnyStringFlags, ArgOrKeyword, BoolOp, BytesLiteralFlags, CmpOp,
Comprehension, ConversionFlag, DebugText, ExceptHandler, Expr, Identifier, MatchCase, Operator,
Parameter, Parameters, Pattern, Singleton, Stmt, StringFlags, Suite, TypeParam,
TypeParamParamSpec, TypeParamTypeVar, TypeParamTypeVarTuple, WithItem,
};
use ruff_python_ast::{ParameterWithDefault, TypeParams};
use ruff_python_literal::escape::{AsciiEscape, Escape, UnicodeEscape};
use ruff_source_file::LineEnding;
use super::stylist::{Indentation, Stylist};
mod precedence {
pub(crate) const MIN: u8 = 0;
pub(crate) const NAMED_EXPR: u8 = 1;
pub(crate) const ASSIGN: u8 = 3;
pub(crate) const ANN_ASSIGN: u8 = 5;
pub(crate) const AUG_ASSIGN: u8 = 5;
pub(crate) const EXPR: u8 = 5;
pub(crate) const YIELD: u8 = 7;
pub(crate) const YIELD_FROM: u8 = 7;
pub(crate) const IF: u8 = 9;
pub(crate) const FOR: u8 = 9;
pub(crate) const WHILE: u8 = 9;
pub(crate) const RETURN: u8 = 11;
pub(crate) const SLICE: u8 = 13;
pub(crate) const SUBSCRIPT: u8 = 13;
pub(crate) const COMPREHENSION_TARGET: u8 = 19;
pub(crate) const TUPLE: u8 = 19;
pub(crate) const FORMATTED_VALUE: u8 = 19;
pub(crate) const COMMA: u8 = 21;
pub(crate) const ASSERT: u8 = 23;
pub(crate) const COMPREHENSION_ELEMENT: u8 = 27;
pub(crate) const LAMBDA: u8 = 27;
pub(crate) const IF_EXP: u8 = 27;
pub(crate) const COMPREHENSION: u8 = 29;
pub(crate) const OR: u8 = 31;
pub(crate) const AND: u8 = 33;
pub(crate) const NOT: u8 = 35;
pub(crate) const CMP: u8 = 37;
pub(crate) const BIT_OR: u8 = 39;
pub(crate) const BIT_XOR: u8 = 41;
pub(crate) const BIT_AND: u8 = 43;
pub(crate) const LSHIFT: u8 = 45;
pub(crate) const RSHIFT: u8 = 45;
pub(crate) const ADD: u8 = 47;
pub(crate) const SUB: u8 = 47;
pub(crate) const MULT: u8 = 49;
pub(crate) const DIV: u8 = 49;
pub(crate) const MOD: u8 = 49;
pub(crate) const FLOORDIV: u8 = 49;
pub(crate) const MAT_MULT: u8 = 49;
pub(crate) const INVERT: u8 = 53;
pub(crate) const UADD: u8 = 53;
pub(crate) const USUB: u8 = 53;
pub(crate) const POW: u8 = 55;
pub(crate) const AWAIT: u8 = 57;
pub(crate) const MAX: u8 = 63;
}
#[derive(Default)]
pub enum Mode {
/// Ruff's default unparsing behaviour.
#[default]
Default,
/// Emits same output as [`ast.unparse`](https://docs.python.org/3/library/ast.html#ast.unparse).
AstUnparse,
}
impl Mode {
/// Quote style to use.
///
/// - [`Default`](`Mode::Default`): Output of `[AnyStringFlags.quote_style`].
/// - [`AstUnparse`](`Mode::AstUnparse`): Always return [`Quote::Single`].
#[must_use]
fn quote_style(&self, flags: impl StringFlags) -> Quote {
match self {
Self::Default => flags.quote_style(),
Self::AstUnparse => Quote::Single,
}
}
}
pub struct Generator<'a> {
/// The indentation style to use.
indent: &'a Indentation,
/// The line ending to use.
line_ending: LineEnding,
/// Unparsed code style. See [`Mode`] for more info.
mode: Mode,
buffer: String,
indent_depth: usize,
num_newlines: usize,
initial: bool,
}
impl<'a> From<&'a Stylist<'a>> for Generator<'a> {
fn from(stylist: &'a Stylist<'a>) -> Self {
Self {
indent: stylist.indentation(),
line_ending: stylist.line_ending(),
mode: Mode::default(),
buffer: String::new(),
indent_depth: 0,
num_newlines: 0,
initial: true,
}
}
}
impl<'a> Generator<'a> {
pub const fn new(indent: &'a Indentation, line_ending: LineEnding) -> Self {
Self {
// Style preferences.
indent,
line_ending,
mode: Mode::Default,
// Internal state.
buffer: String::new(),
indent_depth: 0,
num_newlines: 0,
initial: true,
}
}
/// Sets the mode for code unparsing.
#[must_use]
pub fn with_mode(mut self, mode: Mode) -> Self {
self.mode = mode;
self
}
/// Generate source code from a [`Stmt`].
pub fn stmt(mut self, stmt: &Stmt) -> String {
self.unparse_stmt(stmt);
self.generate()
}
/// Generate source code from an [`Expr`].
pub fn expr(mut self, expr: &Expr) -> String {
self.unparse_expr(expr, 0);
self.generate()
}
fn newline(&mut self) {
if !self.initial {
self.num_newlines = std::cmp::max(self.num_newlines, 1);
}
}
fn newlines(&mut self, extra: usize) {
if !self.initial {
self.num_newlines = std::cmp::max(self.num_newlines, 1 + extra);
}
}
fn body(&mut self, stmts: &[Stmt]) {
self.indent_depth = self.indent_depth.saturating_add(1);
for stmt in stmts {
self.unparse_stmt(stmt);
}
self.indent_depth = self.indent_depth.saturating_sub(1);
}
fn p(&mut self, s: &str) {
if self.num_newlines > 0 {
for _ in 0..self.num_newlines {
self.buffer += &self.line_ending;
}
self.num_newlines = 0;
}
self.buffer += s;
}
fn p_id(&mut self, s: &Identifier) {
self.p(s.as_str());
}
fn p_bytes_repr(&mut self, s: &[u8], flags: BytesLiteralFlags) {
// raw bytes are interpreted without escapes and should all be ascii (it's a python syntax
// error otherwise), but if this assumption is violated, a `Utf8Error` will be returned from
// `p_raw_bytes`, and we should fall back on the normal escaping behavior instead of
// panicking
if flags.prefix().is_raw() {
if let Ok(s) = std::str::from_utf8(s) {
write!(self.buffer, "{}", flags.display_contents(s))
.expect("Writing to a String buffer should never fail");
return;
}
}
let quote_style = self.mode.quote_style(flags);
let escape = AsciiEscape::with_preferred_quote(s, quote_style);
if let Some(len) = escape.layout().len {
self.buffer.reserve(len);
}
escape
.bytes_repr(flags.triple_quotes())
.write(&mut self.buffer)
.expect("Writing to a String buffer should never fail");
}
fn p_str_repr(&mut self, s: &str, flags: impl Into<AnyStringFlags>) {
let flags = flags.into();
if flags.prefix().is_raw() {
write!(self.buffer, "{}", flags.display_contents(s))
.expect("Writing to a String buffer should never fail");
return;
}
self.p(flags.prefix().as_str());
let quote_style = self.mode.quote_style(flags);
let escape = UnicodeEscape::with_preferred_quote(s, quote_style);
if let Some(len) = escape.layout().len {
self.buffer.reserve(len);
}
escape
.str_repr(flags.triple_quotes())
.write(&mut self.buffer)
.expect("Writing to a String buffer should never fail");
}
fn p_if(&mut self, cond: bool, s: &str) {
if cond {
self.p(s);
}
}
fn p_delim(&mut self, first: &mut bool, s: &str) {
self.p_if(!std::mem::take(first), s);
}
pub(crate) fn generate(self) -> String {
self.buffer
}
pub fn unparse_suite(&mut self, suite: &Suite) {
for stmt in suite {
self.unparse_stmt(stmt);
}
}
pub(crate) fn unparse_stmt(&mut self, ast: &Stmt) {
macro_rules! statement {
($body:block) => {{
self.newline();
self.p(&self.indent.deref().repeat(self.indent_depth));
$body
self.initial = false;
}};
}
match ast {
Stmt::FunctionDef(ast::StmtFunctionDef {
is_async,
name,
parameters,
body,
returns,
decorator_list,
type_params,
..
}) => {
self.newlines(if self.indent_depth == 0 { 2 } else { 1 });
for decorator in decorator_list {
statement!({
self.p("@");
self.unparse_expr(&decorator.expression, precedence::MAX);
});
}
statement!({
if *is_async {
self.p("async ");
}
self.p("def ");
self.p_id(name);
if let Some(type_params) = type_params {
self.unparse_type_params(type_params);
}
self.p("(");
self.unparse_parameters(parameters);
self.p(")");
if let Some(returns) = returns {
self.p(" -> ");
self.unparse_expr(returns, precedence::MAX);
}
self.p(":");
});
self.body(body);
if self.indent_depth == 0 {
self.newlines(2);
}
}
Stmt::ClassDef(ast::StmtClassDef {
name,
arguments,
body,
decorator_list,
type_params,
range: _,
node_index: _,
}) => {
self.newlines(if self.indent_depth == 0 { 2 } else { 1 });
for decorator in decorator_list {
statement!({
self.p("@");
self.unparse_expr(&decorator.expression, precedence::MAX);
});
}
statement!({
self.p("class ");
self.p_id(name);
if let Some(type_params) = type_params {
self.unparse_type_params(type_params);
}
if let Some(arguments) = arguments {
self.p("(");
let mut first = true;
for arg_or_keyword in arguments.arguments_source_order() {
match arg_or_keyword {
ArgOrKeyword::Arg(arg) => {
self.p_delim(&mut first, ", ");
self.unparse_expr(arg, precedence::MAX);
}
ArgOrKeyword::Keyword(keyword) => {
self.p_delim(&mut first, ", ");
if let Some(arg) = &keyword.arg {
self.p_id(arg);
self.p("=");
} else {
self.p("**");
}
self.unparse_expr(&keyword.value, precedence::MAX);
}
}
}
self.p(")");
}
self.p(":");
});
self.body(body);
if self.indent_depth == 0 {
self.newlines(2);
}
}
Stmt::Return(ast::StmtReturn {
value,
range: _,
node_index: _,
}) => {
statement!({
if let Some(expr) = value {
self.p("return ");
self.unparse_expr(expr, precedence::RETURN);
} else {
self.p("return");
}
});
}
Stmt::Delete(ast::StmtDelete {
targets,
range: _,
node_index: _,
}) => {
statement!({
self.p("del ");
let mut first = true;
for expr in targets {
self.p_delim(&mut first, ", ");
self.unparse_expr(expr, precedence::COMMA);
}
});
}
Stmt::Assign(ast::StmtAssign { targets, value, .. }) => {
statement!({
for target in targets {
self.unparse_expr(target, precedence::ASSIGN);
self.p(" = ");
}
self.unparse_expr(value, precedence::ASSIGN);
});
}
Stmt::AugAssign(ast::StmtAugAssign {
target,
op,
value,
range: _,
node_index: _,
}) => {
statement!({
self.unparse_expr(target, precedence::AUG_ASSIGN);
self.p(" ");
self.p(match op {
Operator::Add => "+",
Operator::Sub => "-",
Operator::Mult => "*",
Operator::MatMult => "@",
Operator::Div => "/",
Operator::Mod => "%",
Operator::Pow => "**",
Operator::LShift => "<<",
Operator::RShift => ">>",
Operator::BitOr => "|",
Operator::BitXor => "^",
Operator::BitAnd => "&",
Operator::FloorDiv => "//",
});
self.p("= ");
self.unparse_expr(value, precedence::AUG_ASSIGN);
});
}
Stmt::AnnAssign(ast::StmtAnnAssign {
target,
annotation,
value,
simple,
range: _,
node_index: _,
}) => {
statement!({
let need_parens = matches!(target.as_ref(), Expr::Name(_)) && !simple;
self.p_if(need_parens, "(");
self.unparse_expr(target, precedence::ANN_ASSIGN);
self.p_if(need_parens, ")");
self.p(": ");
self.unparse_expr(annotation, precedence::COMMA);
if let Some(value) = value {
self.p(" = ");
self.unparse_expr(value, precedence::COMMA);
}
});
}
Stmt::For(ast::StmtFor {
is_async,
target,
iter,
body,
orelse,
..
}) => {
statement!({
if *is_async {
self.p("async ");
}
self.p("for ");
self.unparse_expr(target, precedence::FOR);
self.p(" in ");
self.unparse_expr(iter, precedence::MAX);
self.p(":");
});
self.body(body);
if !orelse.is_empty() {
statement!({
self.p("else:");
});
self.body(orelse);
}
}
Stmt::While(ast::StmtWhile {
test,
body,
orelse,
range: _,
node_index: _,
}) => {
statement!({
self.p("while ");
self.unparse_expr(test, precedence::WHILE);
self.p(":");
});
self.body(body);
if !orelse.is_empty() {
statement!({
self.p("else:");
});
self.body(orelse);
}
}
Stmt::If(ast::StmtIf {
test,
body,
elif_else_clauses,
range: _,
node_index: _,
}) => {
statement!({
self.p("if ");
self.unparse_expr(test, precedence::IF);
self.p(":");
});
self.body(body);
for clause in elif_else_clauses {
if let Some(test) = &clause.test {
statement!({
self.p("elif ");
self.unparse_expr(test, precedence::IF);
self.p(":");
});
} else {
statement!({
self.p("else:");
});
}
self.body(&clause.body);
}
}
Stmt::With(ast::StmtWith {
is_async,
items,
body,
..
}) => {
statement!({
if *is_async {
self.p("async ");
}
self.p("with ");
let mut first = true;
for item in items {
self.p_delim(&mut first, ", ");
self.unparse_with_item(item);
}
self.p(":");
});
self.body(body);
}
Stmt::Match(ast::StmtMatch {
subject,
cases,
range: _,
node_index: _,
}) => {
statement!({
self.p("match ");
self.unparse_expr(subject, precedence::MAX);
self.p(":");
});
for case in cases {
self.indent_depth = self.indent_depth.saturating_add(1);
statement!({
self.unparse_match_case(case);
});
self.indent_depth = self.indent_depth.saturating_sub(1);
}
}
Stmt::TypeAlias(ast::StmtTypeAlias {
name,
range: _,
node_index: _,
type_params,
value,
}) => {
statement!({
self.p("type ");
self.unparse_expr(name, precedence::MAX);
if let Some(type_params) = type_params {
self.unparse_type_params(type_params);
}
self.p(" = ");
self.unparse_expr(value, precedence::ASSIGN);
});
}
Stmt::Raise(ast::StmtRaise {
exc,
cause,
range: _,
node_index: _,
}) => {
statement!({
self.p("raise");
if let Some(exc) = exc {
self.p(" ");
self.unparse_expr(exc, precedence::MAX);
}
if let Some(cause) = cause {
self.p(" from ");
self.unparse_expr(cause, precedence::MAX);
}
});
}
Stmt::Try(ast::StmtTry {
body,
handlers,
orelse,
finalbody,
is_star,
range: _,
node_index: _,
}) => {
statement!({
self.p("try:");
});
self.body(body);
for handler in handlers {
statement!({
self.unparse_except_handler(handler, *is_star);
});
}
if !orelse.is_empty() {
statement!({
self.p("else:");
});
self.body(orelse);
}
if !finalbody.is_empty() {
statement!({
self.p("finally:");
});
self.body(finalbody);
}
}
Stmt::Assert(ast::StmtAssert {
test,
msg,
range: _,
node_index: _,
}) => {
statement!({
self.p("assert ");
self.unparse_expr(test, precedence::ASSERT);
if let Some(msg) = msg {
self.p(", ");
self.unparse_expr(msg, precedence::ASSERT);
}
});
}
Stmt::Import(ast::StmtImport {
names,
range: _,
node_index: _,
}) => {
statement!({
self.p("import ");
let mut first = true;
for alias in names {
self.p_delim(&mut first, ", ");
self.unparse_alias(alias);
}
});
}
Stmt::ImportFrom(ast::StmtImportFrom {
module,
names,
level,
range: _,
node_index: _,
}) => {
statement!({
self.p("from ");
if *level > 0 {
for _ in 0..*level {
self.p(".");
}
}
if let Some(module) = module {
self.p_id(module);
}
self.p(" import ");
let mut first = true;
for alias in names {
self.p_delim(&mut first, ", ");
self.unparse_alias(alias);
}
});
}
Stmt::Global(ast::StmtGlobal {
names,
range: _,
node_index: _,
}) => {
statement!({
self.p("global ");
let mut first = true;
for name in names {
self.p_delim(&mut first, ", ");
self.p_id(name);
}
});
}
Stmt::Nonlocal(ast::StmtNonlocal {
names,
range: _,
node_index: _,
}) => {
statement!({
self.p("nonlocal ");
let mut first = true;
for name in names {
self.p_delim(&mut first, ", ");
self.p_id(name);
}
});
}
Stmt::Expr(ast::StmtExpr {
value,
range: _,
node_index: _,
}) => {
statement!({
self.unparse_expr(value, precedence::EXPR);
});
}
Stmt::Pass(_) => {
statement!({
self.p("pass");
});
}
Stmt::Break(_) => {
statement!({
self.p("break");
});
}
Stmt::Continue(_) => {
statement!({
self.p("continue");
});
}
Stmt::IpyEscapeCommand(ast::StmtIpyEscapeCommand { kind, value, .. }) => {
statement!({
self.p(&format!("{kind}{value}"));
});
}
}
}
fn unparse_except_handler(&mut self, ast: &ExceptHandler, star: bool) {
match ast {
ExceptHandler::ExceptHandler(ast::ExceptHandlerExceptHandler {
type_,
name,
body,
range: _,
node_index: _,
}) => {
self.p("except");
if star {
self.p("*");
}
if let Some(type_) = type_ {
self.p(" ");
self.unparse_expr(type_, precedence::MAX);
}
if let Some(name) = name {
self.p(" as ");
self.p_id(name);
}
self.p(":");
self.body(body);
}
}
}
fn unparse_pattern(&mut self, ast: &Pattern) {
match ast {
Pattern::MatchValue(ast::PatternMatchValue {
value,
range: _,
node_index: _,
}) => {
self.unparse_expr(value, precedence::MAX);
}
Pattern::MatchSingleton(ast::PatternMatchSingleton {
value,
range: _,
node_index: _,
}) => {
self.unparse_singleton(*value);
}
Pattern::MatchSequence(ast::PatternMatchSequence {
patterns,
range: _,
node_index: _,
}) => {
self.p("[");
let mut first = true;
for pattern in patterns {
self.p_delim(&mut first, ", ");
self.unparse_pattern(pattern);
}
self.p("]");
}
Pattern::MatchMapping(ast::PatternMatchMapping {
keys,
patterns,
rest,
range: _,
node_index: _,
}) => {
self.p("{");
let mut first = true;
for (key, pattern) in keys.iter().zip(patterns) {
self.p_delim(&mut first, ", ");
self.unparse_expr(key, precedence::MAX);
self.p(": ");
self.unparse_pattern(pattern);
}
if let Some(rest) = rest {
self.p_delim(&mut first, ", ");
self.p("**");
self.p_id(rest);
}
self.p("}");
}
Pattern::MatchClass(_) => {}
Pattern::MatchStar(ast::PatternMatchStar {
name,
range: _,
node_index: _,
}) => {
self.p("*");
if let Some(name) = name {
self.p_id(name);
} else {
self.p("_");
}
}
Pattern::MatchAs(ast::PatternMatchAs {
pattern,
name,
range: _,
node_index: _,
}) => {
if let Some(pattern) = pattern {
self.unparse_pattern(pattern);
self.p(" as ");
}
if let Some(name) = name {
self.p_id(name);
} else {
self.p("_");
}
}
Pattern::MatchOr(ast::PatternMatchOr {
patterns,
range: _,
node_index: _,
}) => {
let mut first = true;
for pattern in patterns {
self.p_delim(&mut first, " | ");
self.unparse_pattern(pattern);
}
}
}
}
fn unparse_match_case(&mut self, ast: &MatchCase) {
self.p("case ");
self.unparse_pattern(&ast.pattern);
if let Some(guard) = &ast.guard {
self.p(" if ");
self.unparse_expr(guard, precedence::MAX);
}
self.p(":");
self.body(&ast.body);
}
fn unparse_type_params(&mut self, type_params: &TypeParams) {
self.p("[");
let mut first = true;
for type_param in type_params.iter() {
self.p_delim(&mut first, ", ");
self.unparse_type_param(type_param);
}
self.p("]");
}
pub(crate) fn unparse_type_param(&mut self, ast: &TypeParam) {
match ast {
TypeParam::TypeVar(TypeParamTypeVar {
name,
bound,
default,
..
}) => {
self.p_id(name);
if let Some(expr) = bound {
self.p(": ");
self.unparse_expr(expr, precedence::MAX);
}
if let Some(expr) = default {
self.p(" = ");
self.unparse_expr(expr, precedence::MAX);
}
}
TypeParam::TypeVarTuple(TypeParamTypeVarTuple { name, default, .. }) => {
self.p("*");
self.p_id(name);
if let Some(expr) = default {
self.p(" = ");
self.unparse_expr(expr, precedence::MAX);
}
}
TypeParam::ParamSpec(TypeParamParamSpec { name, default, .. }) => {
self.p("**");
self.p_id(name);
if let Some(expr) = default {
self.p(" = ");
self.unparse_expr(expr, precedence::MAX);
}
}
}
}
pub(crate) fn unparse_expr(&mut self, ast: &Expr, level: u8) {
macro_rules! opprec {
($opty:ident, $x:expr, $enu:path, $($var:ident($op:literal, $prec:ident)),*$(,)?) => {
match $x {
$(<$enu>::$var => (opprec!(@space $opty, $op), precedence::$prec),)*
}
};
(@space bin, $op:literal) => {
concat!(" ", $op, " ")
};
(@space un, $op:literal) => {
$op
};
}
macro_rules! group_if {
($lvl:expr, $body:block) => {{
let group = level > $lvl;
self.p_if(group, "(");
let ret = $body;
self.p_if(group, ")");
ret
}};
}
match ast {
Expr::BoolOp(ast::ExprBoolOp {
op,
values,
range: _,
node_index: _,
}) => {
let (op, prec) = opprec!(bin, op, BoolOp, And("and", AND), Or("or", OR));
group_if!(prec, {
let mut first = true;
for val in values {
self.p_delim(&mut first, op);
self.unparse_expr(val, prec + 1);
}
});
}
Expr::Named(ast::ExprNamed {
target,
value,
range: _,
node_index: _,
}) => {
group_if!(precedence::NAMED_EXPR, {
self.unparse_expr(target, precedence::NAMED_EXPR);
self.p(" := ");
self.unparse_expr(value, precedence::NAMED_EXPR + 1);
});
}
Expr::BinOp(ast::ExprBinOp {
left,
op,
right,
range: _,
node_index: _,
}) => {
let rassoc = matches!(op, Operator::Pow);
let (op, prec) = opprec!(
bin,
op,
Operator,
Add("+", ADD),
Sub("-", SUB),
Mult("*", MULT),
MatMult("@", MAT_MULT),
Div("/", DIV),
Mod("%", MOD),
Pow("**", POW),
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_codegen/src/stylist.rs | crates/ruff_python_codegen/src/stylist.rs | //! Detect code style from Python source code.
use std::borrow::Cow;
use std::cell::OnceCell;
use std::ops::Deref;
use ruff_python_ast::str::Quote;
use ruff_python_ast::token::{Token, TokenKind, Tokens};
use ruff_source_file::{LineEnding, LineRanges, find_newline};
use ruff_text_size::Ranged;
#[derive(Debug, Clone)]
pub struct Stylist<'a> {
source: Cow<'a, str>,
indentation: Indentation,
quote: Quote,
line_ending: OnceCell<LineEnding>,
}
impl<'a> Stylist<'a> {
pub fn indentation(&self) -> &Indentation {
&self.indentation
}
pub fn quote(&self) -> Quote {
self.quote
}
pub fn line_ending(&self) -> LineEnding {
*self.line_ending.get_or_init(|| {
find_newline(&self.source)
.map(|(_, ending)| ending)
.unwrap_or_default()
})
}
pub fn into_owned(self) -> Stylist<'static> {
Stylist {
source: Cow::Owned(self.source.into_owned()),
indentation: self.indentation,
quote: self.quote,
line_ending: self.line_ending,
}
}
pub fn from_tokens(tokens: &Tokens, source: &'a str) -> Self {
let indentation = detect_indentation(tokens, source);
Self {
source: Cow::Borrowed(source),
indentation,
quote: detect_quote(tokens),
line_ending: OnceCell::default(),
}
}
}
fn detect_quote(tokens: &[Token]) -> Quote {
for token in tokens {
match token.kind() {
TokenKind::String if !token.is_triple_quoted_string() => {
return token.string_quote_style();
}
TokenKind::FStringStart => return token.string_quote_style(),
_ => continue,
}
}
Quote::default()
}
fn detect_indentation(tokens: &[Token], source: &str) -> Indentation {
let indent_range = tokens.iter().find_map(|token| {
if matches!(token.kind(), TokenKind::Indent) {
Some(token.range())
} else {
None
}
});
if let Some(indent_range) = indent_range {
let mut whitespace = &source[indent_range];
// https://docs.python.org/3/reference/lexical_analysis.html#indentation
// > A formfeed character may be present at the start of the line; it will be ignored for
// > the indentation calculations above. Formfeed characters occurring elsewhere in the
// > leading whitespace have an undefined effect (for instance, they may reset the space
// > count to zero).
// So there's UB in python lexer -.-
// In practice, they just reset the indentation:
// https://github.com/python/cpython/blob/df8b3a46a7aa369f246a09ffd11ceedf1d34e921/Parser/tokenizer.c#L1819-L1821
// https://github.com/astral-sh/ruff/blob/a41bb2733fe75a71f4cf6d4bb21e659fc4630b30/crates/ruff_python_parser/src/lexer.rs#L664-L667
// We also reset the indentation when we see a formfeed character.
// See also https://github.com/astral-sh/ruff/issues/7455#issuecomment-1722458825
if let Some((_before, after)) = whitespace.rsplit_once('\x0C') {
whitespace = after;
}
Indentation(whitespace.to_string())
} else {
// If we can't find a logical indent token, search for a non-logical indent, to cover cases
// like:
//```python
// from math import (
// sin,
// tan,
// cos,
// )
// ```
for token in tokens {
if token.kind() == TokenKind::NonLogicalNewline {
let line = source.line_str(token.end());
let indent_index = line.find(|c: char| !c.is_whitespace());
if let Some(indent_index) = indent_index {
if indent_index > 0 {
let whitespace = &line[..indent_index];
return Indentation(whitespace.to_string());
}
}
}
}
Indentation::default()
}
}
/// The indentation style used in Python source code.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Indentation(String);
impl Indentation {
pub const fn new(indentation: String) -> Self {
Self(indentation)
}
}
impl Default for Indentation {
fn default() -> Self {
Indentation(" ".to_string())
}
}
impl Indentation {
pub fn as_str(&self) -> &str {
self.0.as_str()
}
pub fn as_char(&self) -> char {
self.0.chars().next().unwrap()
}
}
impl Deref for Indentation {
type Target = str;
fn deref(&self) -> &Self::Target {
self.as_str()
}
}
#[cfg(test)]
mod tests {
use ruff_python_parser::{Mode, ParseOptions, parse_module, parse_unchecked};
use ruff_source_file::{LineEnding, find_newline};
use super::{Indentation, Quote, Stylist};
#[test]
fn indentation() {
let contents = r"x = 1";
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.indentation(), &Indentation::default());
let contents = r"
if True:
pass
";
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.indentation(), &Indentation(" ".to_string()));
let contents = r"
if True:
pass
";
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.indentation(), &Indentation(" ".to_string()));
let contents = r"
if True:
pass
";
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.indentation(), &Indentation("\t".to_string()));
let contents = r"
x = (
1,
2,
3,
)
";
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.indentation(), &Indentation(" ".to_string()));
// formfeed indent, see `detect_indentation` comment.
let contents = r"
class FormFeedIndent:
def __init__(self, a=[]):
print(a)
";
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.indentation(), &Indentation(" ".to_string()));
}
#[test]
fn indent_non_breaking_whitespace() {
let contents = r"
x = (
1,
2,
3,
)
";
let parsed = parse_unchecked(contents, ParseOptions::from(Mode::Module));
assert_eq!(
Stylist::from_tokens(parsed.tokens(), contents).indentation(),
&Indentation(" ".to_string())
);
}
#[test]
fn quote() {
let contents = r"x = 1";
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::default());
let contents = r"x = '1'";
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Single);
let contents = r"x = f'1'";
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Single);
let contents = r#"x = "1""#;
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Double);
let contents = r#"x = f"1""#;
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Double);
let contents = r#"s = "It's done.""#;
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Double);
// No style if only double quoted docstring (will take default Double)
let contents = r#"
def f():
"""Docstring."""
pass
"#;
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::default());
// Detect from string literal appearing after docstring
let contents = r#"
"""Module docstring."""
a = 'v'
"#;
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Single);
let contents = r#"
'''Module docstring.'''
a = "v"
"#;
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Double);
// Detect from f-string appearing after docstring
let contents = r#"
"""Module docstring."""
a = f'v'
"#;
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Single);
let contents = r#"
'''Module docstring.'''
a = f"v"
"#;
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Double);
let contents = r"
f'''Module docstring.'''
";
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Single);
}
#[test]
fn line_ending() {
let contents = "x = 1";
assert_eq!(find_newline(contents).map(|(_, ending)| ending), None);
let contents = "x = 1\n";
assert_eq!(
find_newline(contents).map(|(_, ending)| ending),
Some(LineEnding::Lf)
);
let contents = "x = 1\r";
assert_eq!(
find_newline(contents).map(|(_, ending)| ending),
Some(LineEnding::Cr)
);
let contents = "x = 1\r\n";
assert_eq!(
find_newline(contents).map(|(_, ending)| ending),
Some(LineEnding::CrLf)
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/relocate.rs | crates/ruff_python_ast/src/relocate.rs | use ruff_text_size::TextRange;
use crate::visitor::transformer::{Transformer, walk_expr, walk_keyword};
use crate::{self as ast};
use crate::{Expr, Keyword};
/// Change an expression's location (recursively) to match a desired, fixed
/// range.
pub fn relocate_expr(expr: &mut Expr, range: TextRange) {
Relocator { range }.visit_expr(expr);
}
#[derive(Debug)]
struct Relocator {
range: TextRange,
}
impl Transformer for Relocator {
fn visit_expr(&self, expr: &mut Expr) {
match expr {
Expr::BoolOp(ast::ExprBoolOp { range, .. }) => {
*range = self.range;
}
Expr::Named(ast::ExprNamed { range, .. }) => {
*range = self.range;
}
Expr::BinOp(ast::ExprBinOp { range, .. }) => {
*range = self.range;
}
Expr::UnaryOp(ast::ExprUnaryOp { range, .. }) => {
*range = self.range;
}
Expr::Lambda(ast::ExprLambda { range, .. }) => {
*range = self.range;
}
Expr::If(ast::ExprIf { range, .. }) => {
*range = self.range;
}
Expr::Dict(ast::ExprDict { range, .. }) => {
*range = self.range;
}
Expr::Set(ast::ExprSet { range, .. }) => {
*range = self.range;
}
Expr::ListComp(ast::ExprListComp { range, .. }) => {
*range = self.range;
}
Expr::SetComp(ast::ExprSetComp { range, .. }) => {
*range = self.range;
}
Expr::DictComp(ast::ExprDictComp { range, .. }) => {
*range = self.range;
}
Expr::Generator(ast::ExprGenerator { range, .. }) => {
*range = self.range;
}
Expr::Await(ast::ExprAwait { range, .. }) => {
*range = self.range;
}
Expr::Yield(ast::ExprYield { range, .. }) => {
*range = self.range;
}
Expr::YieldFrom(ast::ExprYieldFrom { range, .. }) => {
*range = self.range;
}
Expr::Compare(ast::ExprCompare { range, .. }) => {
*range = self.range;
}
Expr::Call(ast::ExprCall { range, .. }) => {
*range = self.range;
}
Expr::FString(ast::ExprFString { range, .. }) => {
*range = self.range;
}
Expr::TString(ast::ExprTString { range, .. }) => {
*range = self.range;
}
Expr::StringLiteral(ast::ExprStringLiteral { range, .. }) => {
*range = self.range;
}
Expr::BytesLiteral(ast::ExprBytesLiteral { range, .. }) => {
*range = self.range;
}
Expr::NumberLiteral(ast::ExprNumberLiteral { range, .. }) => {
*range = self.range;
}
Expr::BooleanLiteral(ast::ExprBooleanLiteral { range, .. }) => {
*range = self.range;
}
Expr::NoneLiteral(ast::ExprNoneLiteral { range, .. }) => {
*range = self.range;
}
Expr::EllipsisLiteral(ast::ExprEllipsisLiteral { range, .. }) => {
*range = self.range;
}
Expr::Attribute(ast::ExprAttribute { range, .. }) => {
*range = self.range;
}
Expr::Subscript(ast::ExprSubscript { range, .. }) => {
*range = self.range;
}
Expr::Starred(ast::ExprStarred { range, .. }) => {
*range = self.range;
}
Expr::Name(ast::ExprName { range, .. }) => {
*range = self.range;
}
Expr::List(ast::ExprList { range, .. }) => {
*range = self.range;
}
Expr::Tuple(ast::ExprTuple { range, .. }) => {
*range = self.range;
}
Expr::Slice(ast::ExprSlice { range, .. }) => {
*range = self.range;
}
Expr::IpyEscapeCommand(ast::ExprIpyEscapeCommand { range, .. }) => {
*range = self.range;
}
}
walk_expr(self, expr);
}
fn visit_keyword(&self, keyword: &mut Keyword) {
keyword.range = self.range;
walk_keyword(self, keyword);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/node_index.rs | crates/ruff_python_ast/src/node_index.rs | use std::num::NonZeroU32;
use std::sync::atomic::{AtomicU32, Ordering};
/// An AST node that has an index.
pub trait HasNodeIndex {
/// Returns the [`AtomicNodeIndex`] for this node.
fn node_index(&self) -> &AtomicNodeIndex;
}
impl<T> HasNodeIndex for &T
where
T: HasNodeIndex,
{
fn node_index(&self) -> &AtomicNodeIndex {
T::node_index(*self)
}
}
/// A unique index for a node within an AST.
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Hash)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct NodeIndex(NonZeroU32);
impl NodeIndex {
/// A placeholder `NodeIndex`.
pub const NONE: NodeIndex = NodeIndex(NonZeroU32::new(NodeIndex::_NONE).unwrap());
// Note that the index `u32::MAX` is reserved for the `NonZeroU32` niche, and
// this placeholder also reserves the second highest index.
const _NONE: u32 = u32::MAX - 1;
/// Returns the index as a `u32`. or `None` for `NodeIndex::NONE`.
pub fn as_u32(self) -> Option<u32> {
if self == NodeIndex::NONE {
None
} else {
Some(self.0.get() - 1)
}
}
}
impl From<u32> for NodeIndex {
fn from(value: u32) -> Self {
match NonZeroU32::new(value + 1).map(NodeIndex) {
None | Some(NodeIndex::NONE) => panic!("exceeded maximum `NodeIndex`"),
Some(index) => index,
}
}
}
impl std::fmt::Debug for NodeIndex {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if *self == Self::NONE {
f.debug_tuple("NodeIndex(None)").finish()
} else {
f.debug_tuple("NodeIndex").field(&self.0).finish()
}
}
}
/// A unique index for a node within an AST.
///
/// This type is interiorly mutable to allow assigning node indices
/// on-demand after parsing.
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct AtomicNodeIndex(AtomicU32);
#[allow(clippy::declare_interior_mutable_const)]
impl AtomicNodeIndex {
/// A placeholder `AtomicNodeIndex`.
pub const NONE: AtomicNodeIndex = AtomicNodeIndex(AtomicU32::new(NodeIndex::_NONE));
/// Load the current value of the `AtomicNodeIndex`.
pub fn load(&self) -> NodeIndex {
let index = NonZeroU32::new(self.0.load(Ordering::Relaxed))
.expect("value stored was a valid `NodeIndex`");
NodeIndex(index)
}
/// Set the value of the `AtomicNodeIndex`.
pub fn set(&self, index: NodeIndex) {
self.0.store(index.0.get(), Ordering::Relaxed);
}
}
impl Default for AtomicNodeIndex {
fn default() -> Self {
Self::NONE
}
}
impl std::fmt::Debug for AtomicNodeIndex {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Debug::fmt(&self.load(), f)
}
}
impl std::hash::Hash for AtomicNodeIndex {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.load().hash(state);
}
}
impl PartialOrd for AtomicNodeIndex {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for AtomicNodeIndex {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.load().cmp(&other.load())
}
}
impl Eq for AtomicNodeIndex {}
impl PartialEq for AtomicNodeIndex {
fn eq(&self, other: &Self) -> bool {
self.load() == other.load()
}
}
impl Clone for AtomicNodeIndex {
fn clone(&self) -> Self {
Self(AtomicU32::from(self.0.load(Ordering::Relaxed)))
}
}
#[cfg(test)]
mod tests {
use super::{AtomicNodeIndex, NodeIndex};
#[test]
fn test_node_index() {
let index = AtomicNodeIndex::NONE;
assert_eq!(index.load(), NodeIndex::NONE);
assert_eq!(format!("{index:?}"), "NodeIndex(None)");
index.set(NodeIndex::from(1));
assert_eq!(index.load(), NodeIndex::from(1));
assert_eq!(index.load().as_u32(), Some(1));
let index = NodeIndex::from(0);
assert_eq!(index.as_u32(), Some(0));
let index = NodeIndex::NONE;
assert_eq!(index.as_u32(), None);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/node.rs | crates/ruff_python_ast/src/node.rs | use ruff_text_size::Ranged;
use crate::visitor::source_order::SourceOrderVisitor;
use crate::{
self as ast, Alias, AnyNodeRef, AnyParameterRef, ArgOrKeyword, MatchCase, PatternArguments,
PatternKeyword,
};
impl ast::ElifElseClause {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::ElifElseClause {
range: _,
node_index: _,
test,
body,
} = self;
if let Some(test) = test {
visitor.visit_expr(test);
}
visitor.visit_body(body);
}
}
impl ast::ExprDict {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::ExprDict {
items,
range: _,
node_index: _,
} = self;
for ast::DictItem { key, value } in items {
if let Some(key) = key {
visitor.visit_expr(key);
}
visitor.visit_expr(value);
}
}
}
impl ast::ExprBoolOp {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::ExprBoolOp {
op,
values,
range: _,
node_index: _,
} = self;
match values.as_slice() {
[left, rest @ ..] => {
visitor.visit_expr(left);
visitor.visit_bool_op(op);
for expr in rest {
visitor.visit_expr(expr);
}
}
[] => {
visitor.visit_bool_op(op);
}
}
}
}
impl ast::ExprCompare {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::ExprCompare {
left,
ops,
comparators,
range: _,
node_index: _,
} = self;
visitor.visit_expr(left);
for (op, comparator) in ops.iter().zip(comparators) {
visitor.visit_cmp_op(op);
visitor.visit_expr(comparator);
}
}
}
impl ast::InterpolatedStringFormatSpec {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
for element in &self.elements {
visitor.visit_interpolated_string_element(element);
}
}
}
impl ast::InterpolatedElement {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::InterpolatedElement {
expression,
format_spec,
..
} = self;
visitor.visit_expr(expression);
if let Some(format_spec) = format_spec {
for spec_part in &format_spec.elements {
visitor.visit_interpolated_string_element(spec_part);
}
}
}
}
impl ast::InterpolatedStringLiteralElement {
pub(crate) fn visit_source_order<'a, V>(&'a self, _visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::InterpolatedStringLiteralElement {
range: _,
node_index: _,
value: _,
} = self;
}
}
impl ast::ExprFString {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::ExprFString {
value,
range: _,
node_index: _,
} = self;
for f_string_part in value {
match f_string_part {
ast::FStringPart::Literal(string_literal) => {
visitor.visit_string_literal(string_literal);
}
ast::FStringPart::FString(f_string) => {
visitor.visit_f_string(f_string);
}
}
}
}
}
impl ast::ExprTString {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::ExprTString {
value,
range: _,
node_index: _,
} = self;
for t_string in value {
visitor.visit_t_string(t_string);
}
}
}
impl ast::ExprStringLiteral {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::ExprStringLiteral {
value,
range: _,
node_index: _,
} = self;
for string_literal in value {
visitor.visit_string_literal(string_literal);
}
}
}
impl ast::ExprBytesLiteral {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::ExprBytesLiteral {
value,
range: _,
node_index: _,
} = self;
for bytes_literal in value {
visitor.visit_bytes_literal(bytes_literal);
}
}
}
impl ast::ExceptHandlerExceptHandler {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::ExceptHandlerExceptHandler {
range: _,
node_index: _,
type_,
name,
body,
} = self;
if let Some(expr) = type_ {
visitor.visit_expr(expr);
}
if let Some(name) = name {
visitor.visit_identifier(name);
}
visitor.visit_body(body);
}
}
impl ast::PatternMatchMapping {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::PatternMatchMapping {
keys,
patterns,
rest,
range: _,
node_index: _,
} = self;
let mut rest = rest.as_ref();
for (key, pattern) in keys.iter().zip(patterns) {
if let Some(rest_identifier) = rest {
if rest_identifier.start() < key.start() {
visitor.visit_identifier(rest_identifier);
rest = None;
}
}
visitor.visit_expr(key);
visitor.visit_pattern(pattern);
}
if let Some(rest) = rest {
visitor.visit_identifier(rest);
}
}
}
impl ast::PatternArguments {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let PatternArguments {
range: _,
node_index: _,
patterns,
keywords,
} = self;
for pattern in patterns {
visitor.visit_pattern(pattern);
}
for keyword in keywords {
visitor.visit_pattern_keyword(keyword);
}
}
}
impl ast::PatternKeyword {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let PatternKeyword {
range: _,
node_index: _,
attr,
pattern,
} = self;
visitor.visit_identifier(attr);
visitor.visit_pattern(pattern);
}
}
impl ast::Comprehension {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::Comprehension {
range: _,
node_index: _,
target,
iter,
ifs,
is_async: _,
} = self;
visitor.visit_expr(target);
visitor.visit_expr(iter);
for expr in ifs {
visitor.visit_expr(expr);
}
}
}
impl ast::Arguments {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
for arg_or_keyword in self.arguments_source_order() {
match arg_or_keyword {
ArgOrKeyword::Arg(arg) => visitor.visit_expr(arg),
ArgOrKeyword::Keyword(keyword) => visitor.visit_keyword(keyword),
}
}
}
}
impl ast::Parameters {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
for parameter in self {
match parameter {
AnyParameterRef::NonVariadic(parameter_with_default) => {
visitor.visit_parameter_with_default(parameter_with_default);
}
AnyParameterRef::Variadic(parameter) => visitor.visit_parameter(parameter),
}
}
}
}
impl ast::Parameter {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::Parameter {
range: _,
node_index: _,
name,
annotation,
} = self;
visitor.visit_identifier(name);
if let Some(expr) = annotation {
visitor.visit_annotation(expr);
}
}
}
impl ast::ParameterWithDefault {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::ParameterWithDefault {
range: _,
node_index: _,
parameter,
default,
} = self;
visitor.visit_parameter(parameter);
if let Some(expr) = default {
visitor.visit_expr(expr);
}
}
}
impl ast::Keyword {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::Keyword {
range: _,
node_index: _,
arg,
value,
} = self;
if let Some(arg) = arg {
visitor.visit_identifier(arg);
}
visitor.visit_expr(value);
}
}
impl Alias {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::Alias {
range: _,
node_index: _,
name,
asname,
} = self;
visitor.visit_identifier(name);
if let Some(asname) = asname {
visitor.visit_identifier(asname);
}
}
}
impl ast::WithItem {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::WithItem {
range: _,
node_index: _,
context_expr,
optional_vars,
} = self;
visitor.visit_expr(context_expr);
if let Some(expr) = optional_vars {
visitor.visit_expr(expr);
}
}
}
impl ast::MatchCase {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::MatchCase {
range: _,
node_index: _,
pattern,
guard,
body,
} = self;
visitor.visit_pattern(pattern);
if let Some(expr) = guard {
visitor.visit_expr(expr);
}
visitor.visit_body(body);
}
}
impl ast::Decorator {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::Decorator {
range: _,
node_index: _,
expression,
} = self;
visitor.visit_expr(expression);
}
}
impl ast::TypeParams {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::TypeParams {
range: _,
node_index: _,
type_params,
} = self;
for type_param in type_params {
visitor.visit_type_param(type_param);
}
}
}
impl ast::FString {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::FString {
elements,
range: _,
node_index: _,
flags: _,
} = self;
for fstring_element in elements {
visitor.visit_interpolated_string_element(fstring_element);
}
}
}
impl ast::TString {
pub(crate) fn visit_source_order<'a, V>(&'a self, visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::TString {
elements,
range: _,
node_index: _,
flags: _,
} = self;
for tstring_element in elements {
visitor.visit_interpolated_string_element(tstring_element);
}
}
}
impl ast::StringLiteral {
#[inline]
pub(crate) fn visit_source_order<'a, V>(&'a self, _visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::StringLiteral {
range: _,
node_index: _,
value: _,
flags: _,
} = self;
}
}
impl ast::BytesLiteral {
#[inline]
pub(crate) fn visit_source_order<'a, V>(&'a self, _visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::BytesLiteral {
range: _,
node_index: _,
value: _,
flags: _,
} = self;
}
}
impl ast::Identifier {
#[inline]
pub(crate) fn visit_source_order<'a, V>(&'a self, _visitor: &mut V)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let ast::Identifier {
range: _,
node_index: _,
id: _,
} = self;
}
}
impl<'a> AnyNodeRef<'a> {
/// Compares two any node refs by their pointers (referential equality).
pub fn ptr_eq(self, other: AnyNodeRef) -> bool {
self.as_ptr().eq(&other.as_ptr()) && self.kind() == other.kind()
}
/// In our AST, only some alternative branches are represented as a node. This has historical
/// reasons, e.g. we added a node for elif/else in if statements which was not originally
/// present in the parser.
pub const fn is_alternative_branch_with_node(self) -> bool {
matches!(
self,
AnyNodeRef::ExceptHandlerExceptHandler(_) | AnyNodeRef::ElifElseClause(_)
)
}
/// The last child of the last branch, if the node has multiple branches.
pub fn last_child_in_body(&self) -> Option<AnyNodeRef<'a>> {
let body =
match self {
AnyNodeRef::StmtFunctionDef(ast::StmtFunctionDef { body, .. })
| AnyNodeRef::StmtClassDef(ast::StmtClassDef { body, .. })
| AnyNodeRef::StmtWith(ast::StmtWith { body, .. })
| AnyNodeRef::MatchCase(MatchCase { body, .. })
| AnyNodeRef::ExceptHandlerExceptHandler(ast::ExceptHandlerExceptHandler {
body,
..
})
| AnyNodeRef::ElifElseClause(ast::ElifElseClause { body, .. }) => body,
AnyNodeRef::StmtIf(ast::StmtIf {
body,
elif_else_clauses,
..
}) => elif_else_clauses.last().map_or(body, |clause| &clause.body),
AnyNodeRef::StmtFor(ast::StmtFor { body, orelse, .. })
| AnyNodeRef::StmtWhile(ast::StmtWhile { body, orelse, .. }) => {
if orelse.is_empty() { body } else { orelse }
}
AnyNodeRef::StmtMatch(ast::StmtMatch { cases, .. }) => {
return cases.last().map(AnyNodeRef::from);
}
AnyNodeRef::StmtTry(ast::StmtTry {
body,
handlers,
orelse,
finalbody,
..
}) => {
if finalbody.is_empty() {
if orelse.is_empty() {
if handlers.is_empty() {
body
} else {
return handlers.last().map(AnyNodeRef::from);
}
} else {
orelse
}
} else {
finalbody
}
}
// Not a node that contains an indented child node.
_ => return None,
};
body.last().map(AnyNodeRef::from)
}
/// Check if the given statement is the first statement after the colon of a branch, be it in if
/// statements, for statements, after each part of a try-except-else-finally or function/class
/// definitions.
///
///
/// ```python
/// if True: <- has body
/// a <- first statement
/// b
/// elif b: <- has body
/// c <- first statement
/// d
/// else: <- has body
/// e <- first statement
/// f
///
/// class: <- has body
/// a: int <- first statement
/// b: int
///
/// ```
///
/// For nodes with multiple bodies, we check all bodies that don't have their own node. For
/// try-except-else-finally, each except branch has it's own node, so for the `StmtTry`, we check
/// the `try:`, `else:` and `finally:`, bodies, while `ExceptHandlerExceptHandler` has it's own
/// check. For for-else and while-else, we check both branches for the whole statement.
///
/// ```python
/// try: <- has body (a)
/// 6/8 <- first statement (a)
/// 1/0
/// except: <- has body (b)
/// a <- first statement (b)
/// b
/// else:
/// c <- first statement (a)
/// d
/// finally:
/// e <- first statement (a)
/// f
/// ```
pub fn is_first_statement_in_body(&self, body: AnyNodeRef) -> bool {
match body {
AnyNodeRef::StmtFor(ast::StmtFor { body, orelse, .. })
| AnyNodeRef::StmtWhile(ast::StmtWhile { body, orelse, .. }) => {
are_same_optional(*self, body.first()) || are_same_optional(*self, orelse.first())
}
AnyNodeRef::StmtTry(ast::StmtTry {
body,
orelse,
finalbody,
..
}) => {
are_same_optional(*self, body.first())
|| are_same_optional(*self, orelse.first())
|| are_same_optional(*self, finalbody.first())
}
AnyNodeRef::StmtIf(ast::StmtIf { body, .. })
| AnyNodeRef::ElifElseClause(ast::ElifElseClause { body, .. })
| AnyNodeRef::StmtWith(ast::StmtWith { body, .. })
| AnyNodeRef::ExceptHandlerExceptHandler(ast::ExceptHandlerExceptHandler {
body,
..
})
| AnyNodeRef::MatchCase(MatchCase { body, .. })
| AnyNodeRef::StmtFunctionDef(ast::StmtFunctionDef { body, .. })
| AnyNodeRef::StmtClassDef(ast::StmtClassDef { body, .. }) => {
are_same_optional(*self, body.first())
}
AnyNodeRef::StmtMatch(ast::StmtMatch { cases, .. }) => {
are_same_optional(*self, cases.first())
}
_ => false,
}
}
/// Returns `true` if `statement` is the first statement in an alternate `body` (e.g. the else of an if statement)
pub fn is_first_statement_in_alternate_body(&self, body: AnyNodeRef) -> bool {
match body {
AnyNodeRef::StmtFor(ast::StmtFor { orelse, .. })
| AnyNodeRef::StmtWhile(ast::StmtWhile { orelse, .. }) => {
are_same_optional(*self, orelse.first())
}
AnyNodeRef::StmtTry(ast::StmtTry {
handlers,
orelse,
finalbody,
..
}) => {
are_same_optional(*self, handlers.first())
|| are_same_optional(*self, orelse.first())
|| are_same_optional(*self, finalbody.first())
}
AnyNodeRef::StmtIf(ast::StmtIf {
elif_else_clauses, ..
}) => are_same_optional(*self, elif_else_clauses.first()),
_ => false,
}
}
}
/// Returns `true` if `right` is `Some` and `left` and `right` are referentially equal.
fn are_same_optional<'a, T>(left: AnyNodeRef, right: Option<T>) -> bool
where
T: Into<AnyNodeRef<'a>>,
{
right.is_some_and(|right| left.ptr_eq(right.into()))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/visitor.rs | crates/ruff_python_ast/src/visitor.rs | //! AST visitor trait and walk functions.
pub mod source_order;
pub mod transformer;
use crate::{
self as ast, Alias, AnyParameterRef, Arguments, BoolOp, BytesLiteral, CmpOp, Comprehension,
Decorator, ElifElseClause, ExceptHandler, Expr, ExprContext, FString, FStringPart,
InterpolatedStringElement, Keyword, MatchCase, Operator, Parameter, Parameters, Pattern,
PatternArguments, PatternKeyword, Stmt, StringLiteral, TString, TypeParam, TypeParamParamSpec,
TypeParamTypeVar, TypeParamTypeVarTuple, TypeParams, UnaryOp, WithItem,
};
/// A trait for AST visitors. Visits all nodes in the AST recursively in evaluation-order.
///
/// Prefer [`crate::statement_visitor::StatementVisitor`] for visitors that only need to visit
/// statements.
///
/// Use the [`PreorderVisitor`](source_order::SourceOrderVisitor) if you want to visit the nodes
/// in source-order rather than evaluation order.
///
/// Use the [`Transformer`](transformer::Transformer) if you want to modify the nodes.
pub trait Visitor<'a> {
fn visit_stmt(&mut self, stmt: &'a Stmt) {
walk_stmt(self, stmt);
}
fn visit_annotation(&mut self, expr: &'a Expr) {
walk_annotation(self, expr);
}
fn visit_decorator(&mut self, decorator: &'a Decorator) {
walk_decorator(self, decorator);
}
fn visit_expr(&mut self, expr: &'a Expr) {
walk_expr(self, expr);
}
fn visit_expr_context(&mut self, expr_context: &'a ExprContext) {
walk_expr_context(self, expr_context);
}
fn visit_bool_op(&mut self, bool_op: &'a BoolOp) {
walk_bool_op(self, bool_op);
}
fn visit_operator(&mut self, operator: &'a Operator) {
walk_operator(self, operator);
}
fn visit_unary_op(&mut self, unary_op: &'a UnaryOp) {
walk_unary_op(self, unary_op);
}
fn visit_cmp_op(&mut self, cmp_op: &'a CmpOp) {
walk_cmp_op(self, cmp_op);
}
fn visit_comprehension(&mut self, comprehension: &'a Comprehension) {
walk_comprehension(self, comprehension);
}
fn visit_except_handler(&mut self, except_handler: &'a ExceptHandler) {
walk_except_handler(self, except_handler);
}
fn visit_arguments(&mut self, arguments: &'a Arguments) {
walk_arguments(self, arguments);
}
fn visit_parameters(&mut self, parameters: &'a Parameters) {
walk_parameters(self, parameters);
}
fn visit_parameter(&mut self, parameter: &'a Parameter) {
walk_parameter(self, parameter);
}
fn visit_keyword(&mut self, keyword: &'a Keyword) {
walk_keyword(self, keyword);
}
fn visit_alias(&mut self, alias: &'a Alias) {
walk_alias(self, alias);
}
fn visit_with_item(&mut self, with_item: &'a WithItem) {
walk_with_item(self, with_item);
}
fn visit_type_params(&mut self, type_params: &'a TypeParams) {
walk_type_params(self, type_params);
}
fn visit_type_param(&mut self, type_param: &'a TypeParam) {
walk_type_param(self, type_param);
}
fn visit_match_case(&mut self, match_case: &'a MatchCase) {
walk_match_case(self, match_case);
}
fn visit_pattern(&mut self, pattern: &'a Pattern) {
walk_pattern(self, pattern);
}
fn visit_pattern_arguments(&mut self, pattern_arguments: &'a PatternArguments) {
walk_pattern_arguments(self, pattern_arguments);
}
fn visit_pattern_keyword(&mut self, pattern_keyword: &'a PatternKeyword) {
walk_pattern_keyword(self, pattern_keyword);
}
fn visit_body(&mut self, body: &'a [Stmt]) {
walk_body(self, body);
}
fn visit_elif_else_clause(&mut self, elif_else_clause: &'a ElifElseClause) {
walk_elif_else_clause(self, elif_else_clause);
}
fn visit_f_string(&mut self, f_string: &'a FString) {
walk_f_string(self, f_string);
}
fn visit_interpolated_string_element(
&mut self,
interpolated_string_element: &'a InterpolatedStringElement,
) {
walk_interpolated_string_element(self, interpolated_string_element);
}
fn visit_t_string(&mut self, t_string: &'a TString) {
walk_t_string(self, t_string);
}
fn visit_string_literal(&mut self, string_literal: &'a StringLiteral) {
walk_string_literal(self, string_literal);
}
fn visit_bytes_literal(&mut self, bytes_literal: &'a BytesLiteral) {
walk_bytes_literal(self, bytes_literal);
}
}
pub fn walk_body<'a, V: Visitor<'a> + ?Sized>(visitor: &mut V, body: &'a [Stmt]) {
for stmt in body {
visitor.visit_stmt(stmt);
}
}
pub fn walk_elif_else_clause<'a, V: Visitor<'a> + ?Sized>(
visitor: &mut V,
elif_else_clause: &'a ElifElseClause,
) {
if let Some(test) = &elif_else_clause.test {
visitor.visit_expr(test);
}
visitor.visit_body(&elif_else_clause.body);
}
pub fn walk_stmt<'a, V: Visitor<'a> + ?Sized>(visitor: &mut V, stmt: &'a Stmt) {
match stmt {
Stmt::FunctionDef(ast::StmtFunctionDef {
parameters,
body,
decorator_list,
returns,
type_params,
..
}) => {
for decorator in decorator_list {
visitor.visit_decorator(decorator);
}
if let Some(type_params) = type_params {
visitor.visit_type_params(type_params);
}
visitor.visit_parameters(parameters);
if let Some(expr) = returns {
visitor.visit_annotation(expr);
}
visitor.visit_body(body);
}
Stmt::ClassDef(ast::StmtClassDef {
arguments,
body,
decorator_list,
type_params,
..
}) => {
for decorator in decorator_list {
visitor.visit_decorator(decorator);
}
if let Some(type_params) = type_params {
visitor.visit_type_params(type_params);
}
if let Some(arguments) = arguments {
visitor.visit_arguments(arguments);
}
visitor.visit_body(body);
}
Stmt::Return(ast::StmtReturn {
value,
range: _,
node_index: _,
}) => {
if let Some(expr) = value {
visitor.visit_expr(expr);
}
}
Stmt::Delete(ast::StmtDelete {
targets,
range: _,
node_index: _,
}) => {
for expr in targets {
visitor.visit_expr(expr);
}
}
Stmt::TypeAlias(ast::StmtTypeAlias {
range: _,
node_index: _,
name,
type_params,
value,
}) => {
visitor.visit_expr(value);
if let Some(type_params) = type_params {
visitor.visit_type_params(type_params);
}
visitor.visit_expr(name);
}
Stmt::Assign(ast::StmtAssign { targets, value, .. }) => {
visitor.visit_expr(value);
for expr in targets {
visitor.visit_expr(expr);
}
}
Stmt::AugAssign(ast::StmtAugAssign {
target,
op,
value,
range: _,
node_index: _,
}) => {
visitor.visit_expr(value);
visitor.visit_operator(op);
visitor.visit_expr(target);
}
Stmt::AnnAssign(ast::StmtAnnAssign {
target,
annotation,
value,
..
}) => {
if let Some(expr) = value {
visitor.visit_expr(expr);
}
visitor.visit_annotation(annotation);
visitor.visit_expr(target);
}
Stmt::For(ast::StmtFor {
target,
iter,
body,
orelse,
..
}) => {
visitor.visit_expr(iter);
visitor.visit_expr(target);
visitor.visit_body(body);
visitor.visit_body(orelse);
}
Stmt::While(ast::StmtWhile {
test,
body,
orelse,
range: _,
node_index: _,
}) => {
visitor.visit_expr(test);
visitor.visit_body(body);
visitor.visit_body(orelse);
}
Stmt::If(ast::StmtIf {
test,
body,
elif_else_clauses,
range: _,
node_index: _,
}) => {
visitor.visit_expr(test);
visitor.visit_body(body);
for clause in elif_else_clauses {
if let Some(test) = &clause.test {
visitor.visit_expr(test);
}
walk_elif_else_clause(visitor, clause);
}
}
Stmt::With(ast::StmtWith { items, body, .. }) => {
for with_item in items {
visitor.visit_with_item(with_item);
}
visitor.visit_body(body);
}
Stmt::Match(ast::StmtMatch {
subject,
cases,
range: _,
node_index: _,
}) => {
visitor.visit_expr(subject);
for match_case in cases {
visitor.visit_match_case(match_case);
}
}
Stmt::Raise(ast::StmtRaise {
exc,
cause,
range: _,
node_index: _,
}) => {
if let Some(expr) = exc {
visitor.visit_expr(expr);
}
if let Some(expr) = cause {
visitor.visit_expr(expr);
}
}
Stmt::Try(ast::StmtTry {
body,
handlers,
orelse,
finalbody,
is_star: _,
range: _,
node_index: _,
}) => {
visitor.visit_body(body);
for except_handler in handlers {
visitor.visit_except_handler(except_handler);
}
visitor.visit_body(orelse);
visitor.visit_body(finalbody);
}
Stmt::Assert(ast::StmtAssert {
test,
msg,
range: _,
node_index: _,
}) => {
visitor.visit_expr(test);
if let Some(expr) = msg {
visitor.visit_expr(expr);
}
}
Stmt::Import(ast::StmtImport {
names,
range: _,
node_index: _,
}) => {
for alias in names {
visitor.visit_alias(alias);
}
}
Stmt::ImportFrom(ast::StmtImportFrom { names, .. }) => {
for alias in names {
visitor.visit_alias(alias);
}
}
Stmt::Global(_) => {}
Stmt::Nonlocal(_) => {}
Stmt::Expr(ast::StmtExpr {
value,
range: _,
node_index: _,
}) => visitor.visit_expr(value),
Stmt::Pass(_) | Stmt::Break(_) | Stmt::Continue(_) | Stmt::IpyEscapeCommand(_) => {}
}
}
pub fn walk_annotation<'a, V: Visitor<'a> + ?Sized>(visitor: &mut V, expr: &'a Expr) {
visitor.visit_expr(expr);
}
pub fn walk_decorator<'a, V: Visitor<'a> + ?Sized>(visitor: &mut V, decorator: &'a Decorator) {
visitor.visit_expr(&decorator.expression);
}
pub fn walk_expr<'a, V: Visitor<'a> + ?Sized>(visitor: &mut V, expr: &'a Expr) {
match expr {
Expr::BoolOp(ast::ExprBoolOp {
op,
values,
range: _,
node_index: _,
}) => {
visitor.visit_bool_op(op);
for expr in values {
visitor.visit_expr(expr);
}
}
Expr::Named(ast::ExprNamed {
target,
value,
range: _,
node_index: _,
}) => {
visitor.visit_expr(value);
visitor.visit_expr(target);
}
Expr::BinOp(ast::ExprBinOp {
left,
op,
right,
range: _,
node_index: _,
}) => {
visitor.visit_expr(left);
visitor.visit_operator(op);
visitor.visit_expr(right);
}
Expr::UnaryOp(ast::ExprUnaryOp {
op,
operand,
range: _,
node_index: _,
}) => {
visitor.visit_unary_op(op);
visitor.visit_expr(operand);
}
Expr::Lambda(ast::ExprLambda {
parameters,
body,
range: _,
node_index: _,
}) => {
if let Some(parameters) = parameters {
visitor.visit_parameters(parameters);
}
visitor.visit_expr(body);
}
Expr::If(ast::ExprIf {
test,
body,
orelse,
range: _,
node_index: _,
}) => {
visitor.visit_expr(test);
visitor.visit_expr(body);
visitor.visit_expr(orelse);
}
Expr::Dict(ast::ExprDict {
items,
range: _,
node_index: _,
}) => {
for ast::DictItem { key, value } in items {
if let Some(key) = key {
visitor.visit_expr(key);
}
visitor.visit_expr(value);
}
}
Expr::Set(ast::ExprSet {
elts,
range: _,
node_index: _,
}) => {
for expr in elts {
visitor.visit_expr(expr);
}
}
Expr::ListComp(ast::ExprListComp {
elt,
generators,
range: _,
node_index: _,
}) => {
for comprehension in generators {
visitor.visit_comprehension(comprehension);
}
visitor.visit_expr(elt);
}
Expr::SetComp(ast::ExprSetComp {
elt,
generators,
range: _,
node_index: _,
}) => {
for comprehension in generators {
visitor.visit_comprehension(comprehension);
}
visitor.visit_expr(elt);
}
Expr::DictComp(ast::ExprDictComp {
key,
value,
generators,
range: _,
node_index: _,
}) => {
for comprehension in generators {
visitor.visit_comprehension(comprehension);
}
visitor.visit_expr(key);
visitor.visit_expr(value);
}
Expr::Generator(ast::ExprGenerator {
elt,
generators,
range: _,
node_index: _,
parenthesized: _,
}) => {
for comprehension in generators {
visitor.visit_comprehension(comprehension);
}
visitor.visit_expr(elt);
}
Expr::Await(ast::ExprAwait {
value,
range: _,
node_index: _,
}) => visitor.visit_expr(value),
Expr::Yield(ast::ExprYield {
value,
range: _,
node_index: _,
}) => {
if let Some(expr) = value {
visitor.visit_expr(expr);
}
}
Expr::YieldFrom(ast::ExprYieldFrom {
value,
range: _,
node_index: _,
}) => visitor.visit_expr(value),
Expr::Compare(ast::ExprCompare {
left,
ops,
comparators,
range: _,
node_index: _,
}) => {
visitor.visit_expr(left);
for cmp_op in ops {
visitor.visit_cmp_op(cmp_op);
}
for expr in comparators {
visitor.visit_expr(expr);
}
}
Expr::Call(ast::ExprCall {
func,
arguments,
range: _,
node_index: _,
}) => {
visitor.visit_expr(func);
visitor.visit_arguments(arguments);
}
Expr::FString(ast::ExprFString { value, .. }) => {
for part in value {
match part {
FStringPart::Literal(string_literal) => {
visitor.visit_string_literal(string_literal);
}
FStringPart::FString(f_string) => visitor.visit_f_string(f_string),
}
}
}
Expr::TString(ast::ExprTString { value, .. }) => {
for t_string in value {
visitor.visit_t_string(t_string);
}
}
Expr::StringLiteral(ast::ExprStringLiteral { value, .. }) => {
for string_literal in value {
visitor.visit_string_literal(string_literal);
}
}
Expr::BytesLiteral(ast::ExprBytesLiteral { value, .. }) => {
for bytes_literal in value {
visitor.visit_bytes_literal(bytes_literal);
}
}
Expr::NumberLiteral(_)
| Expr::BooleanLiteral(_)
| Expr::NoneLiteral(_)
| Expr::EllipsisLiteral(_) => {}
Expr::Attribute(ast::ExprAttribute { value, ctx, .. }) => {
visitor.visit_expr(value);
visitor.visit_expr_context(ctx);
}
Expr::Subscript(ast::ExprSubscript {
value,
slice,
ctx,
range: _,
node_index: _,
}) => {
visitor.visit_expr(value);
visitor.visit_expr(slice);
visitor.visit_expr_context(ctx);
}
Expr::Starred(ast::ExprStarred {
value,
ctx,
range: _,
node_index: _,
}) => {
visitor.visit_expr(value);
visitor.visit_expr_context(ctx);
}
Expr::Name(ast::ExprName { ctx, .. }) => {
visitor.visit_expr_context(ctx);
}
Expr::List(ast::ExprList {
elts,
ctx,
range: _,
node_index: _,
}) => {
for expr in elts {
visitor.visit_expr(expr);
}
visitor.visit_expr_context(ctx);
}
Expr::Tuple(ast::ExprTuple {
elts,
ctx,
range: _,
node_index: _,
parenthesized: _,
}) => {
for expr in elts {
visitor.visit_expr(expr);
}
visitor.visit_expr_context(ctx);
}
Expr::Slice(ast::ExprSlice {
lower,
upper,
step,
range: _,
node_index: _,
}) => {
if let Some(expr) = lower {
visitor.visit_expr(expr);
}
if let Some(expr) = upper {
visitor.visit_expr(expr);
}
if let Some(expr) = step {
visitor.visit_expr(expr);
}
}
Expr::IpyEscapeCommand(_) => {}
}
}
pub fn walk_comprehension<'a, V: Visitor<'a> + ?Sized>(
visitor: &mut V,
comprehension: &'a Comprehension,
) {
visitor.visit_expr(&comprehension.iter);
visitor.visit_expr(&comprehension.target);
for expr in &comprehension.ifs {
visitor.visit_expr(expr);
}
}
pub fn walk_except_handler<'a, V: Visitor<'a> + ?Sized>(
visitor: &mut V,
except_handler: &'a ExceptHandler,
) {
match except_handler {
ExceptHandler::ExceptHandler(ast::ExceptHandlerExceptHandler { type_, body, .. }) => {
if let Some(expr) = type_ {
visitor.visit_expr(expr);
}
visitor.visit_body(body);
}
}
}
pub fn walk_arguments<'a, V: Visitor<'a> + ?Sized>(visitor: &mut V, arguments: &'a Arguments) {
// Note that there might be keywords before the last arg, e.g. in
// f(*args, a=2, *args2, **kwargs)`, but we follow Python in evaluating first `args` and then
// `keywords`. See also [Arguments::arguments_source_order`].
for arg in &*arguments.args {
visitor.visit_expr(arg);
}
for keyword in &*arguments.keywords {
visitor.visit_keyword(keyword);
}
}
pub fn walk_parameters<'a, V: Visitor<'a> + ?Sized>(visitor: &mut V, parameters: &'a Parameters) {
// Defaults are evaluated before annotations.
for default in parameters
.iter_non_variadic_params()
.filter_map(|param| param.default.as_deref())
{
visitor.visit_expr(default);
}
for parameter in parameters.iter().map(AnyParameterRef::as_parameter) {
visitor.visit_parameter(parameter);
}
}
pub fn walk_parameter<'a, V: Visitor<'a> + ?Sized>(visitor: &mut V, parameter: &'a Parameter) {
if let Some(expr) = ¶meter.annotation {
visitor.visit_annotation(expr);
}
}
pub fn walk_keyword<'a, V: Visitor<'a> + ?Sized>(visitor: &mut V, keyword: &'a Keyword) {
visitor.visit_expr(&keyword.value);
}
pub fn walk_with_item<'a, V: Visitor<'a> + ?Sized>(visitor: &mut V, with_item: &'a WithItem) {
visitor.visit_expr(&with_item.context_expr);
if let Some(expr) = &with_item.optional_vars {
visitor.visit_expr(expr);
}
}
pub fn walk_type_params<'a, V: Visitor<'a> + ?Sized>(visitor: &mut V, type_params: &'a TypeParams) {
for type_param in &type_params.type_params {
visitor.visit_type_param(type_param);
}
}
pub fn walk_type_param<'a, V: Visitor<'a> + ?Sized>(visitor: &mut V, type_param: &'a TypeParam) {
match type_param {
TypeParam::TypeVar(TypeParamTypeVar {
bound,
default,
name: _,
range: _,
node_index: _,
}) => {
if let Some(expr) = bound {
visitor.visit_expr(expr);
}
if let Some(expr) = default {
visitor.visit_expr(expr);
}
}
TypeParam::TypeVarTuple(TypeParamTypeVarTuple {
default,
name: _,
range: _,
node_index: _,
}) => {
if let Some(expr) = default {
visitor.visit_expr(expr);
}
}
TypeParam::ParamSpec(TypeParamParamSpec {
default,
name: _,
range: _,
node_index: _,
}) => {
if let Some(expr) = default {
visitor.visit_expr(expr);
}
}
}
}
pub fn walk_match_case<'a, V: Visitor<'a> + ?Sized>(visitor: &mut V, match_case: &'a MatchCase) {
visitor.visit_pattern(&match_case.pattern);
if let Some(expr) = &match_case.guard {
visitor.visit_expr(expr);
}
visitor.visit_body(&match_case.body);
}
pub fn walk_pattern<'a, V: Visitor<'a> + ?Sized>(visitor: &mut V, pattern: &'a Pattern) {
match pattern {
Pattern::MatchValue(ast::PatternMatchValue { value, .. }) => {
visitor.visit_expr(value);
}
Pattern::MatchSingleton(_) => {}
Pattern::MatchSequence(ast::PatternMatchSequence { patterns, .. }) => {
for pattern in patterns {
visitor.visit_pattern(pattern);
}
}
Pattern::MatchMapping(ast::PatternMatchMapping { keys, patterns, .. }) => {
for expr in keys {
visitor.visit_expr(expr);
}
for pattern in patterns {
visitor.visit_pattern(pattern);
}
}
Pattern::MatchClass(ast::PatternMatchClass { cls, arguments, .. }) => {
visitor.visit_expr(cls);
visitor.visit_pattern_arguments(arguments);
}
Pattern::MatchStar(_) => {}
Pattern::MatchAs(ast::PatternMatchAs { pattern, .. }) => {
if let Some(pattern) = pattern {
visitor.visit_pattern(pattern);
}
}
Pattern::MatchOr(ast::PatternMatchOr { patterns, .. }) => {
for pattern in patterns {
visitor.visit_pattern(pattern);
}
}
}
}
pub fn walk_pattern_arguments<'a, V: Visitor<'a> + ?Sized>(
visitor: &mut V,
pattern_arguments: &'a PatternArguments,
) {
for pattern in &pattern_arguments.patterns {
visitor.visit_pattern(pattern);
}
for keyword in &pattern_arguments.keywords {
visitor.visit_pattern_keyword(keyword);
}
}
pub fn walk_pattern_keyword<'a, V: Visitor<'a> + ?Sized>(
visitor: &mut V,
pattern_keyword: &'a PatternKeyword,
) {
visitor.visit_pattern(&pattern_keyword.pattern);
}
pub fn walk_f_string<'a, V: Visitor<'a> + ?Sized>(visitor: &mut V, f_string: &'a FString) {
for interpolated_string_element in &f_string.elements {
visitor.visit_interpolated_string_element(interpolated_string_element);
}
}
pub fn walk_interpolated_string_element<'a, V: Visitor<'a> + ?Sized>(
visitor: &mut V,
interpolated_string_element: &'a InterpolatedStringElement,
) {
if let ast::InterpolatedStringElement::Interpolation(ast::InterpolatedElement {
expression,
format_spec,
..
}) = interpolated_string_element
{
visitor.visit_expr(expression);
if let Some(format_spec) = format_spec {
for spec_element in &format_spec.elements {
visitor.visit_interpolated_string_element(spec_element);
}
}
}
}
pub fn walk_t_string<'a, V: Visitor<'a> + ?Sized>(visitor: &mut V, t_string: &'a TString) {
for t_string_element in &t_string.elements {
visitor.visit_interpolated_string_element(t_string_element);
}
}
pub fn walk_expr_context<'a, V: Visitor<'a> + ?Sized>(
_visitor: &V,
_expr_context: &'a ExprContext,
) {
}
pub fn walk_bool_op<'a, V: Visitor<'a> + ?Sized>(_visitor: &V, _bool_op: &'a BoolOp) {}
pub fn walk_operator<'a, V: Visitor<'a> + ?Sized>(_visitor: &V, _operator: &'a Operator) {}
pub fn walk_unary_op<'a, V: Visitor<'a> + ?Sized>(_visitor: &V, _unary_op: &'a UnaryOp) {}
pub fn walk_cmp_op<'a, V: Visitor<'a> + ?Sized>(_visitor: &V, _cmp_op: &'a CmpOp) {}
pub fn walk_alias<'a, V: Visitor<'a> + ?Sized>(_visitor: &V, _alias: &'a Alias) {}
pub fn walk_string_literal<'a, V: Visitor<'a> + ?Sized>(
_visitor: &V,
_string_literal: &'a StringLiteral,
) {
}
pub fn walk_bytes_literal<'a, V: Visitor<'a> + ?Sized>(
_visitor: &V,
_bytes_literal: &'a BytesLiteral,
) {
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/lib.rs | crates/ruff_python_ast/src/lib.rs | use std::ffi::OsStr;
use std::path::Path;
pub use expression::*;
pub use generated::*;
pub use int::*;
pub use node_index::*;
pub use nodes::*;
pub use operator_precedence::*;
pub use python_version::*;
pub mod comparable;
pub mod docstrings;
mod expression;
pub mod find_node;
mod generated;
pub mod helpers;
pub mod identifier;
mod int;
pub mod name;
mod node;
mod node_index;
mod nodes;
pub mod operator_precedence;
pub mod parenthesize;
mod python_version;
pub mod relocate;
pub mod script;
pub mod statement_visitor;
pub mod stmt_if;
pub mod str;
pub mod str_prefix;
pub mod token;
pub mod traversal;
pub mod types;
pub mod visitor;
pub mod whitespace;
/// The type of a source file.
#[derive(Clone, Copy, Debug, PartialEq, is_macro::Is)]
pub enum SourceType {
/// The file contains Python source code.
Python(PySourceType),
/// The file contains TOML.
Toml(TomlSourceType),
}
impl Default for SourceType {
fn default() -> Self {
Self::Python(PySourceType::Python)
}
}
impl<P: AsRef<Path>> From<P> for SourceType {
fn from(path: P) -> Self {
match path.as_ref().file_name() {
Some(filename) if filename == "pyproject.toml" => Self::Toml(TomlSourceType::Pyproject),
Some(filename) if filename == "Pipfile" => Self::Toml(TomlSourceType::Pipfile),
Some(filename) if filename == "poetry.lock" => Self::Toml(TomlSourceType::Poetry),
_ => match path.as_ref().extension() {
Some(ext) if ext == "toml" => Self::Toml(TomlSourceType::Unrecognized),
_ => Self::Python(PySourceType::from(path)),
},
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, is_macro::Is)]
pub enum TomlSourceType {
/// The source is a `pyproject.toml`.
Pyproject,
/// The source is a `Pipfile`.
Pipfile,
/// The source is a `poetry.lock`.
Poetry,
/// The source is an unrecognized TOML file.
Unrecognized,
}
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum PySourceType {
/// The source is a Python file (`.py`, `.pyw`).
/// Note: `.pyw` files contain Python code, but do not represent importable namespaces.
/// Consider adding a separate source type later if combining the two causes issues.
#[default]
Python,
/// The source is a Python stub file (`.pyi`).
Stub,
/// The source is a Jupyter notebook (`.ipynb`).
Ipynb,
}
impl PySourceType {
/// Infers the source type from the file extension.
///
/// Falls back to `Python` if the extension is not recognized.
pub fn from_extension(extension: &str) -> Self {
Self::try_from_extension(extension).unwrap_or_default()
}
/// Infers the source type from the file extension.
pub fn try_from_extension(extension: &str) -> Option<Self> {
let ty = match extension {
"py" => Self::Python,
"pyi" => Self::Stub,
"pyw" => Self::Python,
"ipynb" => Self::Ipynb,
_ => return None,
};
Some(ty)
}
pub fn try_from_path(path: impl AsRef<Path>) -> Option<Self> {
path.as_ref()
.extension()
.and_then(OsStr::to_str)
.and_then(Self::try_from_extension)
}
pub const fn is_py_file(self) -> bool {
matches!(self, Self::Python)
}
pub const fn is_stub(self) -> bool {
matches!(self, Self::Stub)
}
pub const fn is_py_file_or_stub(self) -> bool {
matches!(self, Self::Python | Self::Stub)
}
pub const fn is_ipynb(self) -> bool {
matches!(self, Self::Ipynb)
}
}
impl<P: AsRef<Path>> From<P> for PySourceType {
fn from(path: P) -> Self {
Self::try_from_path(path).unwrap_or_default()
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/python_version.rs | crates/ruff_python_ast/src/python_version.rs | use std::{fmt, str::FromStr};
/// Representation of a Python version.
///
/// N.B. This does not necessarily represent a Python version that we actually support.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[cfg_attr(feature = "cache", derive(ruff_macros::CacheKey))]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct PythonVersion {
pub major: u8,
pub minor: u8,
}
impl PythonVersion {
pub const PY37: PythonVersion = PythonVersion { major: 3, minor: 7 };
pub const PY38: PythonVersion = PythonVersion { major: 3, minor: 8 };
pub const PY39: PythonVersion = PythonVersion { major: 3, minor: 9 };
pub const PY310: PythonVersion = PythonVersion {
major: 3,
minor: 10,
};
pub const PY311: PythonVersion = PythonVersion {
major: 3,
minor: 11,
};
pub const PY312: PythonVersion = PythonVersion {
major: 3,
minor: 12,
};
pub const PY313: PythonVersion = PythonVersion {
major: 3,
minor: 13,
};
pub const PY314: PythonVersion = PythonVersion {
major: 3,
minor: 14,
};
pub fn iter() -> impl Iterator<Item = PythonVersion> {
[
PythonVersion::PY37,
PythonVersion::PY38,
PythonVersion::PY39,
PythonVersion::PY310,
PythonVersion::PY311,
PythonVersion::PY312,
PythonVersion::PY313,
PythonVersion::PY314,
]
.into_iter()
}
/// The minimum supported Python version.
pub const fn lowest() -> Self {
Self::PY37
}
pub const fn latest() -> Self {
Self::PY314
}
/// The latest Python version supported in preview
pub fn latest_preview() -> Self {
let latest_preview = Self::PY314;
debug_assert!(latest_preview >= Self::latest());
latest_preview
}
pub const fn latest_ty() -> Self {
// Make sure to update the default value for `EnvironmentOptions::python_version` when bumping this version.
Self::PY314
}
pub const fn as_tuple(self) -> (u8, u8) {
(self.major, self.minor)
}
pub fn free_threaded_build_available(self) -> bool {
self >= PythonVersion::PY313
}
/// Return `true` if the current version supports [PEP 701].
///
/// [PEP 701]: https://peps.python.org/pep-0701/
pub fn supports_pep_701(self) -> bool {
self >= Self::PY312
}
pub fn defers_annotations(self) -> bool {
self >= Self::PY314
}
}
impl Default for PythonVersion {
fn default() -> Self {
Self::PY310
}
}
impl From<(u8, u8)> for PythonVersion {
fn from(value: (u8, u8)) -> Self {
let (major, minor) = value;
Self { major, minor }
}
}
impl fmt::Display for PythonVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let PythonVersion { major, minor } = self;
write!(f, "{major}.{minor}")
}
}
#[derive(thiserror::Error, Debug, PartialEq, Eq, Clone)]
pub enum PythonVersionDeserializationError {
#[error("Invalid python version `{0}`: expected `major.minor`")]
WrongPeriodNumber(Box<str>),
#[error("Invalid major version `{0}`: {1}")]
InvalidMajorVersion(Box<str>, #[source] std::num::ParseIntError),
#[error("Invalid minor version `{0}`: {1}")]
InvalidMinorVersion(Box<str>, #[source] std::num::ParseIntError),
}
impl TryFrom<(&str, &str)> for PythonVersion {
type Error = PythonVersionDeserializationError;
fn try_from(value: (&str, &str)) -> Result<Self, Self::Error> {
let (major, minor) = value;
Ok(Self {
major: major.parse().map_err(|err| {
PythonVersionDeserializationError::InvalidMajorVersion(Box::from(major), err)
})?,
minor: minor.parse().map_err(|err| {
PythonVersionDeserializationError::InvalidMinorVersion(Box::from(minor), err)
})?,
})
}
}
impl FromStr for PythonVersion {
type Err = PythonVersionDeserializationError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let (major, minor) = s
.split_once('.')
.ok_or_else(|| PythonVersionDeserializationError::WrongPeriodNumber(Box::from(s)))?;
Self::try_from((major, minor)).map_err(|err| {
// Give a better error message for something like `3.8.5` or `3..8`
if matches!(
err,
PythonVersionDeserializationError::InvalidMinorVersion(_, _)
) && minor.contains('.')
{
PythonVersionDeserializationError::WrongPeriodNumber(Box::from(s))
} else {
err
}
})
}
}
#[cfg(feature = "serde")]
mod serde {
use super::PythonVersion;
impl<'de> serde::Deserialize<'de> for PythonVersion {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
String::deserialize(deserializer)?
.parse()
.map_err(serde::de::Error::custom)
}
}
impl serde::Serialize for PythonVersion {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
}
#[cfg(feature = "schemars")]
mod schemars {
use super::PythonVersion;
use schemars::{JsonSchema, Schema, SchemaGenerator};
use serde_json::Value;
impl JsonSchema for PythonVersion {
fn schema_name() -> std::borrow::Cow<'static, str> {
std::borrow::Cow::Borrowed("PythonVersion")
}
fn json_schema(_gen: &mut SchemaGenerator) -> Schema {
let mut any_of: Vec<Value> = vec![
schemars::json_schema!({
"type": "string",
"pattern": r"^\d+\.\d+$",
})
.into(),
];
for version in Self::iter() {
let mut schema = schemars::json_schema!({
"const": version.to_string(),
});
schema.ensure_object().insert(
"description".to_string(),
Value::String(format!("Python {version}")),
);
any_of.push(schema.into());
}
let mut schema = Schema::default();
schema
.ensure_object()
.insert("anyOf".to_string(), Value::Array(any_of));
schema
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/stmt_if.rs | crates/ruff_python_ast/src/stmt_if.rs | use std::iter;
use ruff_python_trivia::{SimpleTokenKind, SimpleTokenizer};
use ruff_text_size::{Ranged, TextRange};
use crate::{ElifElseClause, Expr, Stmt, StmtIf};
/// Return the `Range` of the first `Elif` or `Else` token in an `If` statement.
pub fn elif_else_range(clause: &ElifElseClause, contents: &str) -> Option<TextRange> {
let token = SimpleTokenizer::new(contents, clause.range)
.skip_trivia()
.next()?;
matches!(token.kind, SimpleTokenKind::Elif | SimpleTokenKind::Else).then_some(token.range())
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum BranchKind {
If,
Elif,
}
#[derive(Debug)]
pub struct IfElifBranch<'a> {
pub kind: BranchKind,
pub test: &'a Expr,
pub body: &'a [Stmt],
range: TextRange,
}
impl Ranged for IfElifBranch<'_> {
fn range(&self) -> TextRange {
self.range
}
}
pub fn if_elif_branches(stmt_if: &StmtIf) -> impl Iterator<Item = IfElifBranch<'_>> {
iter::once(IfElifBranch {
kind: BranchKind::If,
test: stmt_if.test.as_ref(),
body: stmt_if.body.as_slice(),
range: TextRange::new(stmt_if.start(), stmt_if.body.last().unwrap().end()),
})
.chain(stmt_if.elif_else_clauses.iter().filter_map(|clause| {
Some(IfElifBranch {
kind: BranchKind::Elif,
test: clause.test.as_ref()?,
body: clause.body.as_slice(),
range: clause.range,
})
}))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/helpers.rs | crates/ruff_python_ast/src/helpers.rs | use std::borrow::Cow;
use std::path::Path;
use rustc_hash::FxHashMap;
use ruff_python_trivia::{SimpleTokenKind, SimpleTokenizer, indentation_at_offset};
use ruff_source_file::LineRanges;
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use crate::name::{Name, QualifiedName, QualifiedNameBuilder};
use crate::statement_visitor::StatementVisitor;
use crate::token::Tokens;
use crate::token::parenthesized_range;
use crate::visitor::Visitor;
use crate::{
self as ast, Arguments, AtomicNodeIndex, CmpOp, DictItem, ExceptHandler, Expr, ExprNoneLiteral,
InterpolatedStringElement, MatchCase, Operator, Pattern, Stmt, TypeParam,
};
use crate::{AnyNodeRef, ExprContext};
/// Return `true` if the `Stmt` is a compound statement (as opposed to a simple statement).
pub const fn is_compound_statement(stmt: &Stmt) -> bool {
matches!(
stmt,
Stmt::FunctionDef(_)
| Stmt::ClassDef(_)
| Stmt::While(_)
| Stmt::For(_)
| Stmt::Match(_)
| Stmt::With(_)
| Stmt::If(_)
| Stmt::Try(_)
)
}
fn is_iterable_initializer<F>(id: &str, is_builtin: F) -> bool
where
F: Fn(&str) -> bool,
{
matches!(id, "list" | "tuple" | "set" | "dict" | "frozenset") && is_builtin(id)
}
/// Return `true` if the `Expr` contains an expression that appears to include a
/// side-effect (like a function call).
///
/// Accepts a closure that determines whether a given name (e.g., `"list"`) is a Python builtin.
pub fn contains_effect<F>(expr: &Expr, is_builtin: F) -> bool
where
F: Fn(&str) -> bool,
{
any_over_expr(expr, &|expr| {
// Accept empty initializers.
if let Expr::Call(ast::ExprCall {
func,
arguments,
range: _,
node_index: _,
}) = expr
{
// Ex) `list()`
if arguments.is_empty() {
if let Expr::Name(ast::ExprName { id, .. }) = func.as_ref() {
if !is_iterable_initializer(id.as_str(), |id| is_builtin(id)) {
return true;
}
return false;
}
}
}
// Avoid false positive for overloaded operators.
if let Expr::BinOp(ast::ExprBinOp { left, right, .. }) = expr {
if !matches!(
left.as_ref(),
Expr::StringLiteral(_)
| Expr::BytesLiteral(_)
| Expr::NumberLiteral(_)
| Expr::BooleanLiteral(_)
| Expr::NoneLiteral(_)
| Expr::EllipsisLiteral(_)
| Expr::FString(_)
| Expr::List(_)
| Expr::Tuple(_)
| Expr::Set(_)
| Expr::Dict(_)
| Expr::ListComp(_)
| Expr::SetComp(_)
| Expr::DictComp(_)
) {
return true;
}
if !matches!(
right.as_ref(),
Expr::StringLiteral(_)
| Expr::BytesLiteral(_)
| Expr::NumberLiteral(_)
| Expr::BooleanLiteral(_)
| Expr::NoneLiteral(_)
| Expr::EllipsisLiteral(_)
| Expr::FString(_)
| Expr::List(_)
| Expr::Tuple(_)
| Expr::Set(_)
| Expr::Dict(_)
| Expr::ListComp(_)
| Expr::SetComp(_)
| Expr::DictComp(_)
) {
return true;
}
return false;
}
// Otherwise, avoid all complex expressions.
matches!(
expr,
Expr::Await(_)
| Expr::Call(_)
| Expr::DictComp(_)
| Expr::Generator(_)
| Expr::ListComp(_)
| Expr::SetComp(_)
| Expr::Subscript(_)
| Expr::Yield(_)
| Expr::YieldFrom(_)
| Expr::IpyEscapeCommand(_)
)
})
}
/// Call `func` over every `Expr` in `expr`, returning `true` if any expression
/// returns `true`..
pub fn any_over_expr(expr: &Expr, func: &dyn Fn(&Expr) -> bool) -> bool {
if func(expr) {
return true;
}
match expr {
Expr::BoolOp(ast::ExprBoolOp { values, .. }) => {
values.iter().any(|expr| any_over_expr(expr, func))
}
Expr::FString(ast::ExprFString { value, .. }) => value
.elements()
.any(|expr| any_over_interpolated_string_element(expr, func)),
Expr::TString(ast::ExprTString { value, .. }) => value
.elements()
.any(|expr| any_over_interpolated_string_element(expr, func)),
Expr::Named(ast::ExprNamed {
target,
value,
range: _,
node_index: _,
}) => any_over_expr(target, func) || any_over_expr(value, func),
Expr::BinOp(ast::ExprBinOp { left, right, .. }) => {
any_over_expr(left, func) || any_over_expr(right, func)
}
Expr::UnaryOp(ast::ExprUnaryOp { operand, .. }) => any_over_expr(operand, func),
Expr::Lambda(ast::ExprLambda { body, .. }) => any_over_expr(body, func),
Expr::If(ast::ExprIf {
test,
body,
orelse,
range: _,
node_index: _,
}) => any_over_expr(test, func) || any_over_expr(body, func) || any_over_expr(orelse, func),
Expr::Dict(ast::ExprDict {
items,
range: _,
node_index: _,
}) => items.iter().any(|ast::DictItem { key, value }| {
any_over_expr(value, func) || key.as_ref().is_some_and(|key| any_over_expr(key, func))
}),
Expr::Set(ast::ExprSet {
elts,
range: _,
node_index: _,
})
| Expr::List(ast::ExprList {
elts,
range: _,
node_index: _,
..
})
| Expr::Tuple(ast::ExprTuple {
elts,
range: _,
node_index: _,
..
}) => elts.iter().any(|expr| any_over_expr(expr, func)),
Expr::ListComp(ast::ExprListComp {
elt,
generators,
range: _,
node_index: _,
})
| Expr::SetComp(ast::ExprSetComp {
elt,
generators,
range: _,
node_index: _,
})
| Expr::Generator(ast::ExprGenerator {
elt,
generators,
range: _,
node_index: _,
parenthesized: _,
}) => {
any_over_expr(elt, func)
|| generators.iter().any(|generator| {
any_over_expr(&generator.target, func)
|| any_over_expr(&generator.iter, func)
|| generator.ifs.iter().any(|expr| any_over_expr(expr, func))
})
}
Expr::DictComp(ast::ExprDictComp {
key,
value,
generators,
range: _,
node_index: _,
}) => {
any_over_expr(key, func)
|| any_over_expr(value, func)
|| generators.iter().any(|generator| {
any_over_expr(&generator.target, func)
|| any_over_expr(&generator.iter, func)
|| generator.ifs.iter().any(|expr| any_over_expr(expr, func))
})
}
Expr::Await(ast::ExprAwait {
value,
range: _,
node_index: _,
})
| Expr::YieldFrom(ast::ExprYieldFrom {
value,
range: _,
node_index: _,
})
| Expr::Attribute(ast::ExprAttribute {
value,
range: _,
node_index: _,
..
})
| Expr::Starred(ast::ExprStarred {
value,
range: _,
node_index: _,
..
}) => any_over_expr(value, func),
Expr::Yield(ast::ExprYield {
value,
range: _,
node_index: _,
}) => value
.as_ref()
.is_some_and(|value| any_over_expr(value, func)),
Expr::Compare(ast::ExprCompare {
left, comparators, ..
}) => any_over_expr(left, func) || comparators.iter().any(|expr| any_over_expr(expr, func)),
Expr::Call(ast::ExprCall {
func: call_func,
arguments,
range: _,
node_index: _,
}) => {
any_over_expr(call_func, func)
// Note that this is the evaluation order but not necessarily the declaration order
// (e.g. for `f(*args, a=2, *args2, **kwargs)` it's not)
|| arguments.args.iter().any(|expr| any_over_expr(expr, func))
|| arguments.keywords
.iter()
.any(|keyword| any_over_expr(&keyword.value, func))
}
Expr::Subscript(ast::ExprSubscript { value, slice, .. }) => {
any_over_expr(value, func) || any_over_expr(slice, func)
}
Expr::Slice(ast::ExprSlice {
lower,
upper,
step,
range: _,
node_index: _,
}) => {
lower
.as_ref()
.is_some_and(|value| any_over_expr(value, func))
|| upper
.as_ref()
.is_some_and(|value| any_over_expr(value, func))
|| step
.as_ref()
.is_some_and(|value| any_over_expr(value, func))
}
Expr::Name(_)
| Expr::StringLiteral(_)
| Expr::BytesLiteral(_)
| Expr::NumberLiteral(_)
| Expr::BooleanLiteral(_)
| Expr::NoneLiteral(_)
| Expr::EllipsisLiteral(_)
| Expr::IpyEscapeCommand(_) => false,
}
}
pub fn any_over_type_param(type_param: &TypeParam, func: &dyn Fn(&Expr) -> bool) -> bool {
match type_param {
TypeParam::TypeVar(ast::TypeParamTypeVar { bound, default, .. }) => {
bound
.as_ref()
.is_some_and(|value| any_over_expr(value, func))
|| default
.as_ref()
.is_some_and(|value| any_over_expr(value, func))
}
TypeParam::TypeVarTuple(ast::TypeParamTypeVarTuple { default, .. }) => default
.as_ref()
.is_some_and(|value| any_over_expr(value, func)),
TypeParam::ParamSpec(ast::TypeParamParamSpec { default, .. }) => default
.as_ref()
.is_some_and(|value| any_over_expr(value, func)),
}
}
pub fn any_over_pattern(pattern: &Pattern, func: &dyn Fn(&Expr) -> bool) -> bool {
match pattern {
Pattern::MatchValue(ast::PatternMatchValue {
value,
range: _,
node_index: _,
}) => any_over_expr(value, func),
Pattern::MatchSingleton(_) => false,
Pattern::MatchSequence(ast::PatternMatchSequence {
patterns,
range: _,
node_index: _,
}) => patterns
.iter()
.any(|pattern| any_over_pattern(pattern, func)),
Pattern::MatchMapping(ast::PatternMatchMapping { keys, patterns, .. }) => {
keys.iter().any(|key| any_over_expr(key, func))
|| patterns
.iter()
.any(|pattern| any_over_pattern(pattern, func))
}
Pattern::MatchClass(ast::PatternMatchClass { cls, arguments, .. }) => {
any_over_expr(cls, func)
|| arguments
.patterns
.iter()
.any(|pattern| any_over_pattern(pattern, func))
|| arguments
.keywords
.iter()
.any(|keyword| any_over_pattern(&keyword.pattern, func))
}
Pattern::MatchStar(_) => false,
Pattern::MatchAs(ast::PatternMatchAs { pattern, .. }) => pattern
.as_ref()
.is_some_and(|pattern| any_over_pattern(pattern, func)),
Pattern::MatchOr(ast::PatternMatchOr {
patterns,
range: _,
node_index: _,
}) => patterns
.iter()
.any(|pattern| any_over_pattern(pattern, func)),
}
}
pub fn any_over_interpolated_string_element(
element: &ast::InterpolatedStringElement,
func: &dyn Fn(&Expr) -> bool,
) -> bool {
match element {
ast::InterpolatedStringElement::Literal(_) => false,
ast::InterpolatedStringElement::Interpolation(ast::InterpolatedElement {
expression,
format_spec,
..
}) => {
any_over_expr(expression, func)
|| format_spec.as_ref().is_some_and(|spec| {
spec.elements.iter().any(|spec_element| {
any_over_interpolated_string_element(spec_element, func)
})
})
}
}
}
pub fn any_over_stmt(stmt: &Stmt, func: &dyn Fn(&Expr) -> bool) -> bool {
match stmt {
Stmt::FunctionDef(ast::StmtFunctionDef {
parameters,
type_params,
body,
decorator_list,
returns,
..
}) => {
parameters.iter().any(|param| {
param
.default()
.is_some_and(|default| any_over_expr(default, func))
|| param
.annotation()
.is_some_and(|annotation| any_over_expr(annotation, func))
}) || type_params.as_ref().is_some_and(|type_params| {
type_params
.iter()
.any(|type_param| any_over_type_param(type_param, func))
}) || body.iter().any(|stmt| any_over_stmt(stmt, func))
|| decorator_list
.iter()
.any(|decorator| any_over_expr(&decorator.expression, func))
|| returns
.as_ref()
.is_some_and(|value| any_over_expr(value, func))
}
Stmt::ClassDef(ast::StmtClassDef {
arguments,
type_params,
body,
decorator_list,
..
}) => {
// Note that e.g. `class A(*args, a=2, *args2, **kwargs): pass` is a valid class
// definition
arguments
.as_deref()
.is_some_and(|Arguments { args, keywords, .. }| {
args.iter().any(|expr| any_over_expr(expr, func))
|| keywords
.iter()
.any(|keyword| any_over_expr(&keyword.value, func))
})
|| type_params.as_ref().is_some_and(|type_params| {
type_params
.iter()
.any(|type_param| any_over_type_param(type_param, func))
})
|| body.iter().any(|stmt| any_over_stmt(stmt, func))
|| decorator_list
.iter()
.any(|decorator| any_over_expr(&decorator.expression, func))
}
Stmt::Return(ast::StmtReturn {
value,
range: _,
node_index: _,
}) => value
.as_ref()
.is_some_and(|value| any_over_expr(value, func)),
Stmt::Delete(ast::StmtDelete {
targets,
range: _,
node_index: _,
}) => targets.iter().any(|expr| any_over_expr(expr, func)),
Stmt::TypeAlias(ast::StmtTypeAlias {
name,
type_params,
value,
..
}) => {
any_over_expr(name, func)
|| type_params.as_ref().is_some_and(|type_params| {
type_params
.iter()
.any(|type_param| any_over_type_param(type_param, func))
})
|| any_over_expr(value, func)
}
Stmt::Assign(ast::StmtAssign { targets, value, .. }) => {
targets.iter().any(|expr| any_over_expr(expr, func)) || any_over_expr(value, func)
}
Stmt::AugAssign(ast::StmtAugAssign { target, value, .. }) => {
any_over_expr(target, func) || any_over_expr(value, func)
}
Stmt::AnnAssign(ast::StmtAnnAssign {
target,
annotation,
value,
..
}) => {
any_over_expr(target, func)
|| any_over_expr(annotation, func)
|| value
.as_ref()
.is_some_and(|value| any_over_expr(value, func))
}
Stmt::For(ast::StmtFor {
target,
iter,
body,
orelse,
..
}) => {
any_over_expr(target, func)
|| any_over_expr(iter, func)
|| any_over_body(body, func)
|| any_over_body(orelse, func)
}
Stmt::While(ast::StmtWhile {
test,
body,
orelse,
range: _,
node_index: _,
}) => any_over_expr(test, func) || any_over_body(body, func) || any_over_body(orelse, func),
Stmt::If(ast::StmtIf {
test,
body,
elif_else_clauses,
range: _,
node_index: _,
}) => {
any_over_expr(test, func)
|| any_over_body(body, func)
|| elif_else_clauses.iter().any(|clause| {
clause
.test
.as_ref()
.is_some_and(|test| any_over_expr(test, func))
|| any_over_body(&clause.body, func)
})
}
Stmt::With(ast::StmtWith { items, body, .. }) => {
items.iter().any(|with_item| {
any_over_expr(&with_item.context_expr, func)
|| with_item
.optional_vars
.as_ref()
.is_some_and(|expr| any_over_expr(expr, func))
}) || any_over_body(body, func)
}
Stmt::Raise(ast::StmtRaise {
exc,
cause,
range: _,
node_index: _,
}) => {
exc.as_ref().is_some_and(|value| any_over_expr(value, func))
|| cause
.as_ref()
.is_some_and(|value| any_over_expr(value, func))
}
Stmt::Try(ast::StmtTry {
body,
handlers,
orelse,
finalbody,
is_star: _,
range: _,
node_index: _,
}) => {
any_over_body(body, func)
|| handlers.iter().any(|handler| {
let ExceptHandler::ExceptHandler(ast::ExceptHandlerExceptHandler {
type_,
body,
..
}) = handler;
type_.as_ref().is_some_and(|expr| any_over_expr(expr, func))
|| any_over_body(body, func)
})
|| any_over_body(orelse, func)
|| any_over_body(finalbody, func)
}
Stmt::Assert(ast::StmtAssert {
test,
msg,
range: _,
node_index: _,
}) => {
any_over_expr(test, func)
|| msg.as_ref().is_some_and(|value| any_over_expr(value, func))
}
Stmt::Match(ast::StmtMatch {
subject,
cases,
range: _,
node_index: _,
}) => {
any_over_expr(subject, func)
|| cases.iter().any(|case| {
let MatchCase {
pattern,
guard,
body,
range: _,
node_index: _,
} = case;
any_over_pattern(pattern, func)
|| guard.as_ref().is_some_and(|expr| any_over_expr(expr, func))
|| any_over_body(body, func)
})
}
Stmt::Import(_) => false,
Stmt::ImportFrom(_) => false,
Stmt::Global(_) => false,
Stmt::Nonlocal(_) => false,
Stmt::Expr(ast::StmtExpr {
value,
range: _,
node_index: _,
}) => any_over_expr(value, func),
Stmt::Pass(_) | Stmt::Break(_) | Stmt::Continue(_) => false,
Stmt::IpyEscapeCommand(_) => false,
}
}
pub fn any_over_body(body: &[Stmt], func: &dyn Fn(&Expr) -> bool) -> bool {
body.iter().any(|stmt| any_over_stmt(stmt, func))
}
pub fn is_dunder(id: &str) -> bool {
id.starts_with("__") && id.ends_with("__")
}
/// Whether a name starts and ends with a single underscore.
///
/// `_a__` is considered neither a dunder nor a sunder name.
pub fn is_sunder(id: &str) -> bool {
id.starts_with('_') && id.ends_with('_') && !id.starts_with("__") && !id.ends_with("__")
}
/// Return `true` if the [`Stmt`] is an assignment to a dunder (like `__all__`).
pub fn is_assignment_to_a_dunder(stmt: &Stmt) -> bool {
// Check whether it's an assignment to a dunder, with or without a type
// annotation. This is what pycodestyle (as of 2.9.1) does.
match stmt {
Stmt::Assign(ast::StmtAssign { targets, .. }) => {
if let [Expr::Name(ast::ExprName { id, .. })] = targets.as_slice() {
is_dunder(id)
} else {
false
}
}
Stmt::AnnAssign(ast::StmtAnnAssign { target, .. }) => {
if let Expr::Name(ast::ExprName { id, .. }) = target.as_ref() {
is_dunder(id)
} else {
false
}
}
_ => false,
}
}
/// Return `true` if the [`Expr`] is a singleton (`None`, `True`, `False`, or
/// `...`).
pub const fn is_singleton(expr: &Expr) -> bool {
matches!(
expr,
Expr::NoneLiteral(_) | Expr::BooleanLiteral(_) | Expr::EllipsisLiteral(_)
)
}
/// Return `true` if the [`Expr`] is a literal or tuple of literals.
pub fn is_constant(expr: &Expr) -> bool {
if let Expr::Tuple(tuple) = expr {
tuple.iter().all(is_constant)
} else {
expr.is_literal_expr()
}
}
/// Return `true` if the [`Expr`] is a non-singleton constant.
pub fn is_constant_non_singleton(expr: &Expr) -> bool {
is_constant(expr) && !is_singleton(expr)
}
/// Return `true` if an [`Expr`] is a literal `True`.
pub const fn is_const_true(expr: &Expr) -> bool {
matches!(
expr,
Expr::BooleanLiteral(ast::ExprBooleanLiteral { value: true, .. }),
)
}
/// Return `true` if an [`Expr`] is a literal `False`.
pub const fn is_const_false(expr: &Expr) -> bool {
matches!(
expr,
Expr::BooleanLiteral(ast::ExprBooleanLiteral { value: false, .. }),
)
}
/// Return `true` if the [`Expr`] is a mutable iterable initializer, like `{}` or `[]`.
pub const fn is_mutable_iterable_initializer(expr: &Expr) -> bool {
matches!(
expr,
Expr::Set(_)
| Expr::SetComp(_)
| Expr::List(_)
| Expr::ListComp(_)
| Expr::Dict(_)
| Expr::DictComp(_)
)
}
/// Extract the names of all handled exceptions.
pub fn extract_handled_exceptions(handlers: &[ExceptHandler]) -> Vec<&Expr> {
let mut handled_exceptions = Vec::new();
for handler in handlers {
match handler {
ExceptHandler::ExceptHandler(ast::ExceptHandlerExceptHandler { type_, .. }) => {
if let Some(type_) = type_ {
if let Expr::Tuple(tuple) = &**type_ {
for type_ in tuple {
handled_exceptions.push(type_);
}
} else {
handled_exceptions.push(type_);
}
}
}
}
}
handled_exceptions
}
/// Given an [`Expr`] that can be callable or not (like a decorator, which could
/// be used with or without explicit call syntax), return the underlying
/// callable.
pub fn map_callable(decorator: &Expr) -> &Expr {
if let Expr::Call(ast::ExprCall { func, .. }) = decorator {
// Ex) `@decorator()`
func
} else {
// Ex) `@decorator`
decorator
}
}
/// Given an [`Expr`] that can be a [`ExprSubscript`][ast::ExprSubscript] or not
/// (like an annotation that may be generic or not), return the underlying expr.
pub fn map_subscript(expr: &Expr) -> &Expr {
if let Expr::Subscript(ast::ExprSubscript { value, .. }) = expr {
// Ex) `Iterable[T]` => return `Iterable`
value
} else {
// Ex) `Iterable` => return `Iterable`
expr
}
}
/// Given an [`Expr`] that can be starred, return the underlying starred expression.
pub fn map_starred(expr: &Expr) -> &Expr {
if let Expr::Starred(ast::ExprStarred { value, .. }) = expr {
// Ex) `*args`
value
} else {
// Ex) `args`
expr
}
}
/// Return `true` if the body uses `locals()`, `globals()`, `vars()`, `eval()`.
///
/// Accepts a closure that determines whether a given name (e.g., `"list"`) is a Python builtin.
pub fn uses_magic_variable_access<F>(body: &[Stmt], is_builtin: F) -> bool
where
F: Fn(&str) -> bool,
{
any_over_body(body, &|expr| {
if let Expr::Call(ast::ExprCall { func, .. }) = expr {
if let Expr::Name(ast::ExprName { id, .. }) = func.as_ref() {
if matches!(id.as_str(), "locals" | "globals" | "vars" | "exec" | "eval") {
if is_builtin(id.as_str()) {
return true;
}
}
}
}
false
})
}
/// Format the module reference name for a relative import.
///
/// # Examples
///
/// ```rust
/// # use ruff_python_ast::helpers::format_import_from;
///
/// assert_eq!(format_import_from(0, None), "".to_string());
/// assert_eq!(format_import_from(1, None), ".".to_string());
/// assert_eq!(format_import_from(1, Some("foo")), ".foo".to_string());
/// ```
pub fn format_import_from(level: u32, module: Option<&str>) -> Cow<'_, str> {
match (level, module) {
(0, Some(module)) => Cow::Borrowed(module),
(level, module) => {
let mut module_name =
String::with_capacity((level as usize) + module.map_or(0, str::len));
for _ in 0..level {
module_name.push('.');
}
if let Some(module) = module {
module_name.push_str(module);
}
Cow::Owned(module_name)
}
}
}
/// Format the member reference name for a relative import.
///
/// # Examples
///
/// ```rust
/// # use ruff_python_ast::helpers::format_import_from_member;
///
/// assert_eq!(format_import_from_member(0, None, "bar"), "bar".to_string());
/// assert_eq!(format_import_from_member(1, None, "bar"), ".bar".to_string());
/// assert_eq!(format_import_from_member(1, Some("foo"), "bar"), ".foo.bar".to_string());
/// ```
pub fn format_import_from_member(level: u32, module: Option<&str>, member: &str) -> String {
let mut qualified_name =
String::with_capacity((level as usize) + module.map_or(0, str::len) + 1 + member.len());
if level > 0 {
for _ in 0..level {
qualified_name.push('.');
}
}
if let Some(module) = module {
qualified_name.push_str(module);
qualified_name.push('.');
}
qualified_name.push_str(member);
qualified_name
}
/// Create a module path from a (package, path) pair.
///
/// For example, if the package is `foo/bar` and the path is `foo/bar/baz.py`,
/// the call path is `["baz"]`.
pub fn to_module_path(package: &Path, path: &Path) -> Option<Vec<String>> {
path.strip_prefix(package.parent()?)
.ok()?
.iter()
.map(Path::new)
.map(Path::file_stem)
.map(|path| path.and_then(|path| path.to_os_string().into_string().ok()))
.collect::<Option<Vec<String>>>()
}
/// Format the call path for a relative import.
///
/// # Examples
///
/// ```rust
/// # use ruff_python_ast::helpers::collect_import_from_member;
///
/// assert_eq!(collect_import_from_member(0, None, "bar").segments(), ["bar"]);
/// assert_eq!(collect_import_from_member(1, None, "bar").segments(), [".", "bar"]);
/// assert_eq!(collect_import_from_member(1, Some("foo"), "bar").segments(), [".", "foo", "bar"]);
/// ```
pub fn collect_import_from_member<'a>(
level: u32,
module: Option<&'a str>,
member: &'a str,
) -> QualifiedName<'a> {
let mut qualified_name_builder = QualifiedNameBuilder::with_capacity(
level as usize
+ module
.map(|module| module.split('.').count())
.unwrap_or_default()
+ 1,
);
// Include the dots as standalone segments.
if level > 0 {
for _ in 0..level {
qualified_name_builder.push(".");
}
}
// Add the remaining segments.
if let Some(module) = module {
qualified_name_builder.extend(module.split('.'));
}
// Add the member.
qualified_name_builder.push(member);
qualified_name_builder.build()
}
/// Format the call path for a relative import, or `None` if the relative import extends beyond
/// the root module.
pub fn from_relative_import<'a>(
// The path from which the import is relative.
module: &'a [String],
// The path of the import itself (e.g., given `from ..foo import bar`, `[".", ".", "foo", "bar]`).
import: &[&'a str],
// The remaining segments to the call path (e.g., given `bar.baz`, `["baz"]`).
tail: &[&'a str],
) -> Option<QualifiedName<'a>> {
let mut qualified_name_builder =
QualifiedNameBuilder::with_capacity(module.len() + import.len() + tail.len());
// Start with the module path.
qualified_name_builder.extend(module.iter().map(String::as_str));
// Remove segments based on the number of dots.
for segment in import {
if *segment == "." {
if qualified_name_builder.is_empty() {
return None;
}
qualified_name_builder.pop();
} else {
qualified_name_builder.push(segment);
}
}
// Add the remaining segments.
qualified_name_builder.extend_from_slice(tail);
Some(qualified_name_builder.build())
}
/// Given an imported module (based on its relative import level and module name), return the
/// fully-qualified module path.
pub fn resolve_imported_module_path<'a>(
level: u32,
module: Option<&'a str>,
module_path: Option<&[String]>,
) -> Option<Cow<'a, str>> {
if level == 0 {
return Some(Cow::Borrowed(module.unwrap_or("")));
}
let module_path = module_path?;
if level as usize >= module_path.len() {
return None;
}
let mut qualified_path = module_path[..module_path.len() - level as usize].join(".");
if let Some(module) = module {
if !qualified_path.is_empty() {
qualified_path.push('.');
}
qualified_path.push_str(module);
}
Some(Cow::Owned(qualified_path))
}
/// A [`Visitor`] to collect all [`Expr::Name`] nodes in an AST.
#[derive(Debug, Default)]
pub struct NameFinder<'a> {
/// A map from identifier to defining expression.
pub names: FxHashMap<&'a str, &'a ast::ExprName>,
}
impl<'a> Visitor<'a> for NameFinder<'a> {
fn visit_expr(&mut self, expr: &'a Expr) {
if let Expr::Name(name) = expr {
self.names.insert(&name.id, name);
}
crate::visitor::walk_expr(self, expr);
}
}
/// A [`Visitor`] to collect all stored [`Expr::Name`] nodes in an AST.
#[derive(Debug, Default)]
pub struct StoredNameFinder<'a> {
/// A map from identifier to defining expression.
pub names: FxHashMap<&'a str, &'a ast::ExprName>,
}
impl<'a> Visitor<'a> for StoredNameFinder<'a> {
fn visit_expr(&mut self, expr: &'a Expr) {
if let Expr::Name(name) = expr {
if name.ctx.is_store() {
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/find_node.rs | crates/ruff_python_ast/src/find_node.rs | use crate::AnyNodeRef;
use crate::visitor::source_order::{SourceOrderVisitor, TraversalSignal, walk_node};
use ruff_text_size::{Ranged, TextRange};
use std::fmt;
use std::fmt::Formatter;
/// Returns the node with a minimal range that fully contains `range`.
///
/// If `range` is empty and falls within a parser *synthesized* node generated during error recovery,
/// then the first node with the given range is returned.
///
/// ## Panics
/// Panics if `range` is not contained within `root`.
pub fn covering_node(root: AnyNodeRef, range: TextRange) -> CoveringNode {
struct Visitor<'a> {
range: TextRange,
found: bool,
ancestors: Vec<AnyNodeRef<'a>>,
}
impl<'a> SourceOrderVisitor<'a> for Visitor<'a> {
fn enter_node(&mut self, node: AnyNodeRef<'a>) -> TraversalSignal {
// If the node fully contains the range, than it is a possible match but traverse into its children
// to see if there's a node with a narrower range.
if !self.found && node.range().contains_range(self.range) {
self.ancestors.push(node);
TraversalSignal::Traverse
} else {
TraversalSignal::Skip
}
}
fn leave_node(&mut self, node: AnyNodeRef<'a>) {
if !self.found && self.ancestors.last() == Some(&node) {
self.found = true;
}
}
}
assert!(
root.range().contains_range(range),
"Range is not contained within root"
);
let mut visitor = Visitor {
range,
found: false,
ancestors: Vec::new(),
};
walk_node(&mut visitor, root);
CoveringNode::from_ancestors(visitor.ancestors)
}
/// The node with a minimal range that fully contains the search range.
pub struct CoveringNode<'a> {
/// The covering node, along with all of its ancestors up to the
/// root. The root is always the first element and the covering
/// node found is always the last node. This sequence is guaranteed
/// to be non-empty.
nodes: Vec<AnyNodeRef<'a>>,
}
impl<'a> CoveringNode<'a> {
/// Creates a new `CoveringNode` from a list of ancestor nodes.
/// The ancestors should be ordered from root to the covering node.
pub fn from_ancestors(ancestors: Vec<AnyNodeRef<'a>>) -> Self {
Self { nodes: ancestors }
}
/// Returns the covering node found.
pub fn node(&self) -> AnyNodeRef<'a> {
*self
.nodes
.last()
.expect("`CoveringNode::nodes` should always be non-empty")
}
/// Returns the node's parent.
pub fn parent(&self) -> Option<AnyNodeRef<'a>> {
let penultimate = self.nodes.len().checked_sub(2)?;
self.nodes.get(penultimate).copied()
}
/// Finds the first node that fully covers the range and fulfills
/// the given predicate.
///
/// The "first" here means that the node closest to a leaf is
/// returned.
pub fn find_first(mut self, f: impl Fn(AnyNodeRef<'a>) -> bool) -> Result<Self, Self> {
let Some(index) = self.find_first_index(f) else {
return Err(self);
};
self.nodes.truncate(index + 1);
Ok(self)
}
/// Finds the last node that fully covers the range and fulfills
/// the given predicate.
///
/// The "last" here means that after finding the "first" such node,
/// the highest ancestor found satisfying the given predicate is
/// returned. Note that this is *not* the same as finding the node
/// closest to the root that satisfies the given predictate.
pub fn find_last(mut self, f: impl Fn(AnyNodeRef<'a>) -> bool) -> Result<Self, Self> {
let Some(mut index) = self.find_first_index(&f) else {
return Err(self);
};
while index > 0 && f(self.nodes[index - 1]) {
index -= 1;
}
self.nodes.truncate(index + 1);
Ok(self)
}
/// Returns an iterator over the ancestor nodes, starting with the node itself
/// and walking towards the root.
pub fn ancestors(&self) -> impl DoubleEndedIterator<Item = AnyNodeRef<'a>> + '_ {
self.nodes.iter().copied().rev()
}
/// Finds the index of the node that fully covers the range and
/// fulfills the given predicate.
///
/// If there are no nodes matching the given predictate, then
/// `None` is returned.
fn find_first_index(&self, f: impl Fn(AnyNodeRef<'a>) -> bool) -> Option<usize> {
self.nodes.iter().rposition(|node| f(*node))
}
}
impl fmt::Debug for CoveringNode<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_tuple("CoveringNode").field(&self.node()).finish()
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/comparable.rs | crates/ruff_python_ast/src/comparable.rs | //! An equivalent object hierarchy to the `RustPython` AST hierarchy, but with the
//! ability to compare expressions for equality (via [`Eq`] and [`Hash`]).
//!
//! Two [`ComparableExpr`]s are considered equal if the underlying AST nodes have the
//! same shape, ignoring trivia (e.g., parentheses, comments, and whitespace), the
//! location in the source code, and other contextual information (e.g., whether they
//! represent reads or writes, which is typically encoded in the Python AST).
//!
//! For example, in `[(a, b) for a, b in c]`, the `(a, b)` and `a, b` expressions are
//! considered equal, despite the former being parenthesized, and despite the former
//! being a write ([`ast::ExprContext::Store`]) and the latter being a read
//! ([`ast::ExprContext::Load`]).
//!
//! Similarly, `"a" "b"` and `"ab"` would be considered equal, despite the former being
//! an implicit concatenation of string literals, as these expressions are considered to
//! have the same shape in that they evaluate to the same value.
use crate as ast;
use crate::{Expr, Number};
use std::borrow::Cow;
use std::hash::Hash;
#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)]
pub enum ComparableBoolOp {
And,
Or,
}
impl From<ast::BoolOp> for ComparableBoolOp {
fn from(op: ast::BoolOp) -> Self {
match op {
ast::BoolOp::And => Self::And,
ast::BoolOp::Or => Self::Or,
}
}
}
#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)]
pub enum ComparableOperator {
Add,
Sub,
Mult,
MatMult,
Div,
Mod,
Pow,
LShift,
RShift,
BitOr,
BitXor,
BitAnd,
FloorDiv,
}
impl From<ast::Operator> for ComparableOperator {
fn from(op: ast::Operator) -> Self {
match op {
ast::Operator::Add => Self::Add,
ast::Operator::Sub => Self::Sub,
ast::Operator::Mult => Self::Mult,
ast::Operator::MatMult => Self::MatMult,
ast::Operator::Div => Self::Div,
ast::Operator::Mod => Self::Mod,
ast::Operator::Pow => Self::Pow,
ast::Operator::LShift => Self::LShift,
ast::Operator::RShift => Self::RShift,
ast::Operator::BitOr => Self::BitOr,
ast::Operator::BitXor => Self::BitXor,
ast::Operator::BitAnd => Self::BitAnd,
ast::Operator::FloorDiv => Self::FloorDiv,
}
}
}
#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)]
pub enum ComparableUnaryOp {
Invert,
Not,
UAdd,
USub,
}
impl From<ast::UnaryOp> for ComparableUnaryOp {
fn from(op: ast::UnaryOp) -> Self {
match op {
ast::UnaryOp::Invert => Self::Invert,
ast::UnaryOp::Not => Self::Not,
ast::UnaryOp::UAdd => Self::UAdd,
ast::UnaryOp::USub => Self::USub,
}
}
}
#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)]
pub enum ComparableCmpOp {
Eq,
NotEq,
Lt,
LtE,
Gt,
GtE,
Is,
IsNot,
In,
NotIn,
}
impl From<ast::CmpOp> for ComparableCmpOp {
fn from(op: ast::CmpOp) -> Self {
match op {
ast::CmpOp::Eq => Self::Eq,
ast::CmpOp::NotEq => Self::NotEq,
ast::CmpOp::Lt => Self::Lt,
ast::CmpOp::LtE => Self::LtE,
ast::CmpOp::Gt => Self::Gt,
ast::CmpOp::GtE => Self::GtE,
ast::CmpOp::Is => Self::Is,
ast::CmpOp::IsNot => Self::IsNot,
ast::CmpOp::In => Self::In,
ast::CmpOp::NotIn => Self::NotIn,
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableAlias<'a> {
name: &'a str,
asname: Option<&'a str>,
}
impl<'a> From<&'a ast::Alias> for ComparableAlias<'a> {
fn from(alias: &'a ast::Alias) -> Self {
Self {
name: alias.name.as_str(),
asname: alias.asname.as_deref(),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableWithItem<'a> {
context_expr: ComparableExpr<'a>,
optional_vars: Option<ComparableExpr<'a>>,
}
impl<'a> From<&'a ast::WithItem> for ComparableWithItem<'a> {
fn from(with_item: &'a ast::WithItem) -> Self {
Self {
context_expr: (&with_item.context_expr).into(),
optional_vars: with_item.optional_vars.as_ref().map(Into::into),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparablePatternArguments<'a> {
patterns: Vec<ComparablePattern<'a>>,
keywords: Vec<ComparablePatternKeyword<'a>>,
}
impl<'a> From<&'a ast::PatternArguments> for ComparablePatternArguments<'a> {
fn from(parameters: &'a ast::PatternArguments) -> Self {
Self {
patterns: parameters.patterns.iter().map(Into::into).collect(),
keywords: parameters.keywords.iter().map(Into::into).collect(),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparablePatternKeyword<'a> {
attr: &'a str,
pattern: ComparablePattern<'a>,
}
impl<'a> From<&'a ast::PatternKeyword> for ComparablePatternKeyword<'a> {
fn from(keyword: &'a ast::PatternKeyword) -> Self {
Self {
attr: keyword.attr.as_str(),
pattern: (&keyword.pattern).into(),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct PatternMatchValue<'a> {
value: ComparableExpr<'a>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct PatternMatchSingleton {
value: ComparableSingleton,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct PatternMatchSequence<'a> {
patterns: Vec<ComparablePattern<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct PatternMatchMapping<'a> {
keys: Vec<ComparableExpr<'a>>,
patterns: Vec<ComparablePattern<'a>>,
rest: Option<&'a str>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct PatternMatchClass<'a> {
cls: ComparableExpr<'a>,
arguments: ComparablePatternArguments<'a>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct PatternMatchStar<'a> {
name: Option<&'a str>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct PatternMatchAs<'a> {
pattern: Option<Box<ComparablePattern<'a>>>,
name: Option<&'a str>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct PatternMatchOr<'a> {
patterns: Vec<ComparablePattern<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum ComparablePattern<'a> {
MatchValue(PatternMatchValue<'a>),
MatchSingleton(PatternMatchSingleton),
MatchSequence(PatternMatchSequence<'a>),
MatchMapping(PatternMatchMapping<'a>),
MatchClass(PatternMatchClass<'a>),
MatchStar(PatternMatchStar<'a>),
MatchAs(PatternMatchAs<'a>),
MatchOr(PatternMatchOr<'a>),
}
impl<'a> From<&'a ast::Pattern> for ComparablePattern<'a> {
fn from(pattern: &'a ast::Pattern) -> Self {
match pattern {
ast::Pattern::MatchValue(ast::PatternMatchValue { value, .. }) => {
Self::MatchValue(PatternMatchValue {
value: value.into(),
})
}
ast::Pattern::MatchSingleton(ast::PatternMatchSingleton { value, .. }) => {
Self::MatchSingleton(PatternMatchSingleton {
value: value.into(),
})
}
ast::Pattern::MatchSequence(ast::PatternMatchSequence { patterns, .. }) => {
Self::MatchSequence(PatternMatchSequence {
patterns: patterns.iter().map(Into::into).collect(),
})
}
ast::Pattern::MatchMapping(ast::PatternMatchMapping {
keys,
patterns,
rest,
..
}) => Self::MatchMapping(PatternMatchMapping {
keys: keys.iter().map(Into::into).collect(),
patterns: patterns.iter().map(Into::into).collect(),
rest: rest.as_deref(),
}),
ast::Pattern::MatchClass(ast::PatternMatchClass { cls, arguments, .. }) => {
Self::MatchClass(PatternMatchClass {
cls: cls.into(),
arguments: arguments.into(),
})
}
ast::Pattern::MatchStar(ast::PatternMatchStar { name, .. }) => {
Self::MatchStar(PatternMatchStar {
name: name.as_deref(),
})
}
ast::Pattern::MatchAs(ast::PatternMatchAs { pattern, name, .. }) => {
Self::MatchAs(PatternMatchAs {
pattern: pattern.as_ref().map(Into::into),
name: name.as_deref(),
})
}
ast::Pattern::MatchOr(ast::PatternMatchOr { patterns, .. }) => {
Self::MatchOr(PatternMatchOr {
patterns: patterns.iter().map(Into::into).collect(),
})
}
}
}
}
impl<'a> From<&'a Box<ast::Pattern>> for Box<ComparablePattern<'a>> {
fn from(pattern: &'a Box<ast::Pattern>) -> Self {
Box::new((pattern.as_ref()).into())
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableMatchCase<'a> {
pattern: ComparablePattern<'a>,
guard: Option<ComparableExpr<'a>>,
body: Vec<ComparableStmt<'a>>,
}
impl<'a> From<&'a ast::MatchCase> for ComparableMatchCase<'a> {
fn from(match_case: &'a ast::MatchCase) -> Self {
Self {
pattern: (&match_case.pattern).into(),
guard: match_case.guard.as_ref().map(Into::into),
body: match_case.body.iter().map(Into::into).collect(),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableDecorator<'a> {
expression: ComparableExpr<'a>,
}
impl<'a> From<&'a ast::Decorator> for ComparableDecorator<'a> {
fn from(decorator: &'a ast::Decorator) -> Self {
Self {
expression: (&decorator.expression).into(),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum ComparableSingleton {
None,
True,
False,
}
impl From<&ast::Singleton> for ComparableSingleton {
fn from(singleton: &ast::Singleton) -> Self {
match singleton {
ast::Singleton::None => Self::None,
ast::Singleton::True => Self::True,
ast::Singleton::False => Self::False,
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum ComparableNumber<'a> {
Int(&'a ast::Int),
Float(u64),
Complex { real: u64, imag: u64 },
}
impl<'a> From<&'a ast::Number> for ComparableNumber<'a> {
fn from(number: &'a ast::Number) -> Self {
match number {
ast::Number::Int(value) => Self::Int(value),
ast::Number::Float(value) => Self::Float(value.to_bits()),
ast::Number::Complex { real, imag } => Self::Complex {
real: real.to_bits(),
imag: imag.to_bits(),
},
}
}
}
#[derive(Debug, Default, PartialEq, Eq, Hash)]
pub struct ComparableArguments<'a> {
args: Vec<ComparableExpr<'a>>,
keywords: Vec<ComparableKeyword<'a>>,
}
impl<'a> From<&'a ast::Arguments> for ComparableArguments<'a> {
fn from(arguments: &'a ast::Arguments) -> Self {
Self {
args: arguments.args.iter().map(Into::into).collect(),
keywords: arguments.keywords.iter().map(Into::into).collect(),
}
}
}
impl<'a> From<&'a Box<ast::Arguments>> for ComparableArguments<'a> {
fn from(arguments: &'a Box<ast::Arguments>) -> Self {
(arguments.as_ref()).into()
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableParameters<'a> {
posonlyargs: Vec<ComparableParameterWithDefault<'a>>,
args: Vec<ComparableParameterWithDefault<'a>>,
vararg: Option<ComparableParameter<'a>>,
kwonlyargs: Vec<ComparableParameterWithDefault<'a>>,
kwarg: Option<ComparableParameter<'a>>,
}
impl<'a> From<&'a ast::Parameters> for ComparableParameters<'a> {
fn from(parameters: &'a ast::Parameters) -> Self {
Self {
posonlyargs: parameters.posonlyargs.iter().map(Into::into).collect(),
args: parameters.args.iter().map(Into::into).collect(),
vararg: parameters.vararg.as_ref().map(Into::into),
kwonlyargs: parameters.kwonlyargs.iter().map(Into::into).collect(),
kwarg: parameters.kwarg.as_ref().map(Into::into),
}
}
}
impl<'a> From<&'a Box<ast::Parameters>> for ComparableParameters<'a> {
fn from(parameters: &'a Box<ast::Parameters>) -> Self {
(parameters.as_ref()).into()
}
}
impl<'a> From<&'a Box<ast::Parameter>> for ComparableParameter<'a> {
fn from(arg: &'a Box<ast::Parameter>) -> Self {
(arg.as_ref()).into()
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableParameter<'a> {
arg: &'a str,
annotation: Option<Box<ComparableExpr<'a>>>,
}
impl<'a> From<&'a ast::Parameter> for ComparableParameter<'a> {
fn from(arg: &'a ast::Parameter) -> Self {
Self {
arg: arg.name.as_str(),
annotation: arg.annotation.as_ref().map(Into::into),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableParameterWithDefault<'a> {
def: ComparableParameter<'a>,
default: Option<ComparableExpr<'a>>,
}
impl<'a> From<&'a ast::ParameterWithDefault> for ComparableParameterWithDefault<'a> {
fn from(arg: &'a ast::ParameterWithDefault) -> Self {
Self {
def: (&arg.parameter).into(),
default: arg.default.as_ref().map(Into::into),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableKeyword<'a> {
arg: Option<&'a str>,
value: ComparableExpr<'a>,
}
impl<'a> From<&'a ast::Keyword> for ComparableKeyword<'a> {
fn from(keyword: &'a ast::Keyword) -> Self {
Self {
arg: keyword.arg.as_ref().map(ast::Identifier::as_str),
value: (&keyword.value).into(),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableComprehension<'a> {
target: ComparableExpr<'a>,
iter: ComparableExpr<'a>,
ifs: Vec<ComparableExpr<'a>>,
is_async: bool,
}
impl<'a> From<&'a ast::Comprehension> for ComparableComprehension<'a> {
fn from(comprehension: &'a ast::Comprehension) -> Self {
Self {
target: (&comprehension.target).into(),
iter: (&comprehension.iter).into(),
ifs: comprehension.ifs.iter().map(Into::into).collect(),
is_async: comprehension.is_async,
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExceptHandlerExceptHandler<'a> {
type_: Option<Box<ComparableExpr<'a>>>,
name: Option<&'a str>,
body: Vec<ComparableStmt<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum ComparableExceptHandler<'a> {
ExceptHandler(ExceptHandlerExceptHandler<'a>),
}
impl<'a> From<&'a ast::ExceptHandler> for ComparableExceptHandler<'a> {
fn from(except_handler: &'a ast::ExceptHandler) -> Self {
let ast::ExceptHandler::ExceptHandler(ast::ExceptHandlerExceptHandler {
type_,
name,
body,
..
}) = except_handler;
Self::ExceptHandler(ExceptHandlerExceptHandler {
type_: type_.as_ref().map(Into::into),
name: name.as_deref(),
body: body.iter().map(Into::into).collect(),
})
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum ComparableInterpolatedStringElement<'a> {
Literal(Cow<'a, str>),
InterpolatedElement(InterpolatedElement<'a>),
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct InterpolatedElement<'a> {
expression: ComparableExpr<'a>,
debug_text: Option<&'a ast::DebugText>,
conversion: ast::ConversionFlag,
format_spec: Option<Vec<ComparableInterpolatedStringElement<'a>>>,
}
impl<'a> From<&'a ast::InterpolatedStringElement> for ComparableInterpolatedStringElement<'a> {
fn from(interpolated_string_element: &'a ast::InterpolatedStringElement) -> Self {
match interpolated_string_element {
ast::InterpolatedStringElement::Literal(ast::InterpolatedStringLiteralElement {
value,
..
}) => Self::Literal(value.as_ref().into()),
ast::InterpolatedStringElement::Interpolation(formatted_value) => {
formatted_value.into()
}
}
}
}
impl<'a> From<&'a ast::InterpolatedElement> for InterpolatedElement<'a> {
fn from(interpolated_element: &'a ast::InterpolatedElement) -> Self {
let ast::InterpolatedElement {
expression,
debug_text,
conversion,
format_spec,
range: _,
node_index: _,
} = interpolated_element;
Self {
expression: (expression).into(),
debug_text: debug_text.as_ref(),
conversion: *conversion,
format_spec: format_spec
.as_ref()
.map(|spec| spec.elements.iter().map(Into::into).collect()),
}
}
}
impl<'a> From<&'a ast::InterpolatedElement> for ComparableInterpolatedStringElement<'a> {
fn from(interpolated_element: &'a ast::InterpolatedElement) -> Self {
Self::InterpolatedElement(interpolated_element.into())
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableElifElseClause<'a> {
test: Option<ComparableExpr<'a>>,
body: Vec<ComparableStmt<'a>>,
}
impl<'a> From<&'a ast::ElifElseClause> for ComparableElifElseClause<'a> {
fn from(elif_else_clause: &'a ast::ElifElseClause) -> Self {
let ast::ElifElseClause {
range: _,
node_index: _,
test,
body,
} = elif_else_clause;
Self {
test: test.as_ref().map(Into::into),
body: body.iter().map(Into::into).collect(),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum ComparableLiteral<'a> {
None,
Ellipsis,
Bool(&'a bool),
Str(Vec<ComparableStringLiteral<'a>>),
Bytes(Vec<ComparableBytesLiteral<'a>>),
Number(ComparableNumber<'a>),
}
impl<'a> From<ast::LiteralExpressionRef<'a>> for ComparableLiteral<'a> {
fn from(literal: ast::LiteralExpressionRef<'a>) -> Self {
match literal {
ast::LiteralExpressionRef::NoneLiteral(_) => Self::None,
ast::LiteralExpressionRef::EllipsisLiteral(_) => Self::Ellipsis,
ast::LiteralExpressionRef::BooleanLiteral(ast::ExprBooleanLiteral {
value, ..
}) => Self::Bool(value),
ast::LiteralExpressionRef::StringLiteral(ast::ExprStringLiteral { value, .. }) => {
Self::Str(value.iter().map(Into::into).collect())
}
ast::LiteralExpressionRef::BytesLiteral(ast::ExprBytesLiteral { value, .. }) => {
Self::Bytes(value.iter().map(Into::into).collect())
}
ast::LiteralExpressionRef::NumberLiteral(ast::ExprNumberLiteral { value, .. }) => {
Self::Number(value.into())
}
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableFString<'a> {
elements: Box<[ComparableInterpolatedStringElement<'a>]>,
}
impl<'a> From<&'a ast::FStringValue> for ComparableFString<'a> {
// The approach below is somewhat complicated, so it may
// require some justification.
//
// Suppose given an f-string of the form
// `f"{foo!r} one" " and two " f" and three {bar!s}"`
// This decomposes as:
// - An `FStringPart::FString`, `f"{foo!r} one"` with elements
// - `FStringElement::Expression` encoding `{foo!r}`
// - `FStringElement::Literal` encoding " one"
// - An `FStringPart::Literal` capturing `" and two "`
// - An `FStringPart::FString`, `f" and three {bar!s}"` with elements
// - `FStringElement::Literal` encoding " and three "
// - `FStringElement::Expression` encoding `{bar!s}`
//
// We would like to extract from this a vector of (comparable) f-string
// _elements_ which alternate between expression elements and literal
// elements. In order to do so, we need to concatenate adjacent string
// literals. String literals may be separated for two reasons: either
// they appear in adjacent string literal parts, or else a string literal
// part is adjacent to a string literal _element_ inside of an f-string part.
fn from(value: &'a ast::FStringValue) -> Self {
#[derive(Default)]
struct Collector<'a> {
elements: Vec<ComparableInterpolatedStringElement<'a>>,
}
impl<'a> Collector<'a> {
// The logic for concatenating adjacent string literals
// occurs here, implicitly: when we encounter a sequence
// of string literals, the first gets pushed to the
// `elements` vector, while subsequent strings
// are concatenated onto this top string.
fn push_literal(&mut self, literal: &'a str) {
if let Some(ComparableInterpolatedStringElement::Literal(existing_literal)) =
self.elements.last_mut()
{
existing_literal.to_mut().push_str(literal);
} else {
self.elements
.push(ComparableInterpolatedStringElement::Literal(literal.into()));
}
}
fn push_expression(&mut self, expression: &'a ast::InterpolatedElement) {
self.elements.push(expression.into());
}
}
let mut collector = Collector::default();
for part in value {
match part {
ast::FStringPart::Literal(string_literal) => {
collector.push_literal(&string_literal.value);
}
ast::FStringPart::FString(fstring) => {
for element in &fstring.elements {
match element {
ast::InterpolatedStringElement::Literal(literal) => {
collector.push_literal(&literal.value);
}
ast::InterpolatedStringElement::Interpolation(expression) => {
collector.push_expression(expression);
}
}
}
}
}
}
Self {
elements: collector.elements.into_boxed_slice(),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableTString<'a> {
strings: Box<[ComparableInterpolatedStringElement<'a>]>,
interpolations: Box<[InterpolatedElement<'a>]>,
}
impl<'a> From<&'a ast::TStringValue> for ComparableTString<'a> {
// We model a [`ComparableTString`] on the actual
// [CPython implementation] of a `string.templatelib.Template` object.
//
// As in CPython, we must be careful to ensure that the length
// of `strings` is always one more than the length of `interpolations` -
// that way we can recover the original reading order by interleaving
// starting with `strings`. This is how we can tell the
// difference between, e.g. `t"{foo}bar"` and `t"bar{foo}"`.
//
// - [CPython implementation](https://github.com/python/cpython/blob/c91ad5da9d92eac4718e4da8d53689c3cc24535e/Python/codegen.c#L4052-L4103)
fn from(value: &'a ast::TStringValue) -> Self {
struct Collector<'a> {
strings: Vec<ComparableInterpolatedStringElement<'a>>,
interpolations: Vec<InterpolatedElement<'a>>,
}
impl Default for Collector<'_> {
fn default() -> Self {
Self {
strings: vec![ComparableInterpolatedStringElement::Literal("".into())],
interpolations: vec![],
}
}
}
impl<'a> Collector<'a> {
// The logic for concatenating adjacent string literals
// occurs here, implicitly: when we encounter a sequence
// of string literals, the first gets pushed to the
// `strings` vector, while subsequent strings
// are concatenated onto this top string.
fn push_literal(&mut self, literal: &'a str) {
if let Some(ComparableInterpolatedStringElement::Literal(existing_literal)) =
self.strings.last_mut()
{
existing_literal.to_mut().push_str(literal);
} else {
self.strings
.push(ComparableInterpolatedStringElement::Literal(literal.into()));
}
}
fn start_new_literal(&mut self) {
self.strings
.push(ComparableInterpolatedStringElement::Literal("".into()));
}
fn push_tstring_interpolation(&mut self, expression: &'a ast::InterpolatedElement) {
self.interpolations.push(expression.into());
self.start_new_literal();
}
}
let mut collector = Collector::default();
for element in value.elements() {
match element {
ast::InterpolatedStringElement::Literal(literal) => {
collector.push_literal(&literal.value);
}
ast::InterpolatedStringElement::Interpolation(interpolation) => {
collector.push_tstring_interpolation(interpolation);
}
}
}
Self {
strings: collector.strings.into_boxed_slice(),
interpolations: collector.interpolations.into_boxed_slice(),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableStringLiteral<'a> {
value: &'a str,
}
impl<'a> From<&'a ast::StringLiteral> for ComparableStringLiteral<'a> {
fn from(string_literal: &'a ast::StringLiteral) -> Self {
Self {
value: &string_literal.value,
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableBytesLiteral<'a> {
value: Cow<'a, [u8]>,
}
impl<'a> From<&'a ast::BytesLiteral> for ComparableBytesLiteral<'a> {
fn from(bytes_literal: &'a ast::BytesLiteral) -> Self {
Self {
value: Cow::Borrowed(&bytes_literal.value),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprBoolOp<'a> {
op: ComparableBoolOp,
values: Vec<ComparableExpr<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprNamed<'a> {
target: Box<ComparableExpr<'a>>,
value: Box<ComparableExpr<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprBinOp<'a> {
left: Box<ComparableExpr<'a>>,
op: ComparableOperator,
right: Box<ComparableExpr<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprUnaryOp<'a> {
op: ComparableUnaryOp,
operand: Box<ComparableExpr<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprLambda<'a> {
parameters: Option<ComparableParameters<'a>>,
body: Box<ComparableExpr<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprIf<'a> {
test: Box<ComparableExpr<'a>>,
body: Box<ComparableExpr<'a>>,
orelse: Box<ComparableExpr<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableDictItem<'a> {
key: Option<ComparableExpr<'a>>,
value: ComparableExpr<'a>,
}
impl<'a> From<&'a ast::DictItem> for ComparableDictItem<'a> {
fn from(ast::DictItem { key, value }: &'a ast::DictItem) -> Self {
Self {
key: key.as_ref().map(ComparableExpr::from),
value: value.into(),
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprDict<'a> {
items: Vec<ComparableDictItem<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprSet<'a> {
elts: Vec<ComparableExpr<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprListComp<'a> {
elt: Box<ComparableExpr<'a>>,
generators: Vec<ComparableComprehension<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprSetComp<'a> {
elt: Box<ComparableExpr<'a>>,
generators: Vec<ComparableComprehension<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprDictComp<'a> {
key: Box<ComparableExpr<'a>>,
value: Box<ComparableExpr<'a>>,
generators: Vec<ComparableComprehension<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprGenerator<'a> {
elt: Box<ComparableExpr<'a>>,
generators: Vec<ComparableComprehension<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprAwait<'a> {
value: Box<ComparableExpr<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprYield<'a> {
value: Option<Box<ComparableExpr<'a>>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprYieldFrom<'a> {
value: Box<ComparableExpr<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprCompare<'a> {
left: Box<ComparableExpr<'a>>,
ops: Vec<ComparableCmpOp>,
comparators: Vec<ComparableExpr<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprCall<'a> {
func: Box<ComparableExpr<'a>>,
arguments: ComparableArguments<'a>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprInterpolatedElement<'a> {
value: Box<ComparableExpr<'a>>,
debug_text: Option<&'a ast::DebugText>,
conversion: ast::ConversionFlag,
format_spec: Vec<ComparableInterpolatedStringElement<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprFString<'a> {
value: ComparableFString<'a>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprTString<'a> {
value: ComparableTString<'a>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprStringLiteral<'a> {
value: ComparableStringLiteral<'a>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprBytesLiteral<'a> {
value: ComparableBytesLiteral<'a>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprNumberLiteral<'a> {
value: ComparableNumber<'a>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprBoolLiteral {
value: bool,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprAttribute<'a> {
value: Box<ComparableExpr<'a>>,
attr: &'a str,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprSubscript<'a> {
value: Box<ComparableExpr<'a>>,
slice: Box<ComparableExpr<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprStarred<'a> {
value: Box<ComparableExpr<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprName<'a> {
id: &'a str,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprList<'a> {
elts: Vec<ComparableExpr<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprTuple<'a> {
elts: Vec<ComparableExpr<'a>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprSlice<'a> {
lower: Option<Box<ComparableExpr<'a>>>,
upper: Option<Box<ComparableExpr<'a>>>,
step: Option<Box<ComparableExpr<'a>>>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprIpyEscapeCommand<'a> {
kind: ast::IpyEscapeKind,
value: &'a str,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum ComparableExpr<'a> {
BoolOp(ExprBoolOp<'a>),
NamedExpr(ExprNamed<'a>),
BinOp(ExprBinOp<'a>),
UnaryOp(ExprUnaryOp<'a>),
Lambda(ExprLambda<'a>),
IfExp(ExprIf<'a>),
Dict(ExprDict<'a>),
Set(ExprSet<'a>),
ListComp(ExprListComp<'a>),
SetComp(ExprSetComp<'a>),
DictComp(ExprDictComp<'a>),
GeneratorExp(ExprGenerator<'a>),
Await(ExprAwait<'a>),
Yield(ExprYield<'a>),
YieldFrom(ExprYieldFrom<'a>),
Compare(ExprCompare<'a>),
Call(ExprCall<'a>),
FStringExpressionElement(ExprInterpolatedElement<'a>),
FString(ExprFString<'a>),
TStringInterpolationElement(ExprInterpolatedElement<'a>),
TString(ExprTString<'a>),
StringLiteral(ExprStringLiteral<'a>),
BytesLiteral(ExprBytesLiteral<'a>),
NumberLiteral(ExprNumberLiteral<'a>),
BoolLiteral(ExprBoolLiteral),
NoneLiteral,
EllipsisLiteral,
Attribute(ExprAttribute<'a>),
Subscript(ExprSubscript<'a>),
Starred(ExprStarred<'a>),
Name(ExprName<'a>),
List(ExprList<'a>),
Tuple(ExprTuple<'a>),
Slice(ExprSlice<'a>),
IpyEscapeCommand(ExprIpyEscapeCommand<'a>),
}
impl<'a> From<&'a Box<ast::Expr>> for Box<ComparableExpr<'a>> {
fn from(expr: &'a Box<ast::Expr>) -> Self {
Box::new((expr.as_ref()).into())
}
}
impl<'a> From<&'a Box<ast::Expr>> for ComparableExpr<'a> {
fn from(expr: &'a Box<ast::Expr>) -> Self {
(expr.as_ref()).into()
}
}
impl<'a> From<&'a ast::Expr> for ComparableExpr<'a> {
fn from(expr: &'a ast::Expr) -> Self {
match expr {
ast::Expr::BoolOp(ast::ExprBoolOp {
op,
values,
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/whitespace.rs | crates/ruff_python_ast/src/whitespace.rs | use ruff_python_trivia::{PythonWhitespace, indentation_at_offset, is_python_whitespace};
use ruff_source_file::{LineRanges, UniversalNewlineIterator};
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::Stmt;
/// Extract the leading indentation from a line.
#[inline]
pub fn indentation<'a, T>(source: &'a str, located: &T) -> Option<&'a str>
where
T: Ranged,
{
indentation_at_offset(located.start(), source)
}
/// Return the end offset at which the empty lines following a statement.
pub fn trailing_lines_end(stmt: &Stmt, source: &str) -> TextSize {
let line_end = source.full_line_end(stmt.end());
UniversalNewlineIterator::with_offset(&source[line_end.to_usize()..], line_end)
.take_while(|line| line.trim_whitespace().is_empty())
.last()
.map_or(line_end, |line| line.full_end())
}
/// If a [`Ranged`] has a trailing comment, return the index of the hash.
pub fn trailing_comment_start_offset<T>(located: &T, source: &str) -> Option<TextSize>
where
T: Ranged,
{
let line_end = source.line_end(located.end());
let trailing = &source[TextRange::new(located.end(), line_end)];
for (index, char) in trailing.char_indices() {
if char == '#' {
return TextSize::try_from(index).ok();
}
if !is_python_whitespace(char) {
return None;
}
}
None
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/identifier.rs | crates/ruff_python_ast/src/identifier.rs | //! Extract [`TextRange`] information from AST nodes.
//!
//! For example, given:
//! ```python
//! try:
//! ...
//! except Exception as e:
//! ...
//! ```
//!
//! This module can be used to identify the [`TextRange`] of the `except` token.
use crate::{self as ast, Alias, ExceptHandler, Parameter, ParameterWithDefault, Stmt};
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use ruff_python_trivia::{Cursor, is_python_whitespace};
pub trait Identifier {
/// Return the [`TextRange`] of the identifier in the given AST node.
fn identifier(&self) -> TextRange;
}
impl Identifier for ast::StmtFunctionDef {
/// Return the [`TextRange`] of the identifier in the given function definition.
///
/// For example, return the range of `f` in:
/// ```python
/// def f():
/// ...
/// ```
fn identifier(&self) -> TextRange {
self.name.range()
}
}
impl Identifier for ast::StmtClassDef {
/// Return the [`TextRange`] of the identifier in the given class definition.
///
/// For example, return the range of `C` in:
/// ```python
/// class C():
/// ...
/// ```
fn identifier(&self) -> TextRange {
self.name.range()
}
}
impl Identifier for Stmt {
/// Return the [`TextRange`] of the identifier in the given statement.
///
/// For example, return the range of `f` in:
/// ```python
/// def f():
/// ...
/// ```
fn identifier(&self) -> TextRange {
match self {
Stmt::ClassDef(class) => class.identifier(),
Stmt::FunctionDef(function) => function.identifier(),
_ => self.range(),
}
}
}
impl Identifier for Parameter {
/// Return the [`TextRange`] for the identifier defining an [`Parameter`].
///
/// For example, return the range of `x` in:
/// ```python
/// def f(x: int):
/// ...
/// ```
fn identifier(&self) -> TextRange {
self.name.range()
}
}
impl Identifier for ParameterWithDefault {
/// Return the [`TextRange`] for the identifier defining an [`ParameterWithDefault`].
///
/// For example, return the range of `x` in:
/// ```python
/// def f(x: int = 0):
/// ...
/// ```
fn identifier(&self) -> TextRange {
self.parameter.identifier()
}
}
impl Identifier for Alias {
/// Return the [`TextRange`] for the identifier defining an [`Alias`].
///
/// For example, return the range of `x` in:
/// ```python
/// from foo import bar as x
/// ```
fn identifier(&self) -> TextRange {
self.asname
.as_ref()
.map_or_else(|| self.name.range(), Ranged::range)
}
}
/// Return the [`TextRange`] of the `except` token in an [`ExceptHandler`].
pub fn except(handler: &ExceptHandler, source: &str) -> TextRange {
IdentifierTokenizer::new(source, handler.range())
.next()
.expect("Failed to find `except` token in `ExceptHandler`")
}
/// Return the [`TextRange`] of the `else` token in a `For` or `While` statement.
pub fn else_(stmt: &Stmt, source: &str) -> Option<TextRange> {
let (Stmt::For(ast::StmtFor { body, orelse, .. })
| Stmt::While(ast::StmtWhile { body, orelse, .. })) = stmt
else {
return None;
};
if orelse.is_empty() {
return None;
}
IdentifierTokenizer::starts_at(
body.last().expect("Expected body to be non-empty").end(),
source,
)
.next()
}
/// Return `true` if the given character starts a valid Python identifier.
///
/// Python identifiers must start with an alphabetic character or an underscore.
fn is_python_identifier_start(c: char) -> bool {
c.is_alphabetic() || c == '_'
}
/// Return `true` if the given character is a valid Python identifier continuation character.
///
/// Python identifiers can contain alphanumeric characters and underscores, but cannot start with a
/// number.
fn is_python_identifier_continue(c: char) -> bool {
c.is_alphanumeric() || c == '_'
}
/// Simple zero allocation tokenizer for Python identifiers.
///
/// The tokenizer must operate over a range that can only contain identifiers, keywords, and
/// comments (along with whitespace and continuation characters). It does not support other tokens,
/// like operators, literals, or delimiters. It also does not differentiate between keywords and
/// identifiers, treating every valid token as an "identifier".
///
/// This is useful for cases like, e.g., identifying the alias name in an aliased import (`bar` in
/// `import foo as bar`), where we're guaranteed to only have identifiers and keywords in the
/// relevant range.
pub(crate) struct IdentifierTokenizer<'a> {
cursor: Cursor<'a>,
offset: TextSize,
}
impl<'a> IdentifierTokenizer<'a> {
pub(crate) fn new(source: &'a str, range: TextRange) -> Self {
Self {
cursor: Cursor::new(&source[range]),
offset: range.start(),
}
}
pub(crate) fn starts_at(offset: TextSize, source: &'a str) -> Self {
let range = TextRange::new(offset, source.text_len());
Self::new(source, range)
}
fn next_token(&mut self) -> Option<TextRange> {
while let Some(c) = {
self.offset += self.cursor.token_len();
self.cursor.start_token();
self.cursor.bump()
} {
match c {
c if is_python_identifier_start(c) => {
self.cursor.eat_while(is_python_identifier_continue);
return Some(TextRange::at(self.offset, self.cursor.token_len()));
}
c if is_python_whitespace(c) => {
self.cursor.eat_while(is_python_whitespace);
}
'#' => {
self.cursor.eat_while(|c| !matches!(c, '\n' | '\r'));
}
'\r' => {
self.cursor.eat_char('\n');
}
'\n' => {
// Nothing to do.
}
'\\' => {
// Nothing to do.
}
_ => {
// Nothing to do.
}
}
}
None
}
}
impl Iterator for IdentifierTokenizer<'_> {
type Item = TextRange;
fn next(&mut self) -> Option<Self::Item> {
self.next_token()
}
}
#[cfg(test)]
mod tests {
use super::IdentifierTokenizer;
use ruff_text_size::{TextLen, TextRange, TextSize};
#[test]
fn extract_global_names() {
let contents = r"global X,Y, Z".trim();
let mut names = IdentifierTokenizer::new(
contents,
TextRange::new(TextSize::new(0), contents.text_len()),
);
let range = names.next_token().unwrap();
assert_eq!(&contents[range], "global");
assert_eq!(range, TextRange::new(TextSize::from(0), TextSize::from(6)));
let range = names.next_token().unwrap();
assert_eq!(&contents[range], "X");
assert_eq!(range, TextRange::new(TextSize::from(7), TextSize::from(8)));
let range = names.next_token().unwrap();
assert_eq!(&contents[range], "Y");
assert_eq!(range, TextRange::new(TextSize::from(9), TextSize::from(10)));
let range = names.next_token().unwrap();
assert_eq!(&contents[range], "Z");
assert_eq!(
range,
TextRange::new(TextSize::from(12), TextSize::from(13))
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/types.rs | crates/ruff_python_ast/src/types.rs | use crate::{Expr, Stmt};
#[derive(Clone)]
pub enum Node<'a> {
Stmt(&'a Stmt),
Expr(&'a Expr),
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/expression.rs | crates/ruff_python_ast/src/expression.rs | use std::iter::FusedIterator;
use ruff_text_size::{Ranged, TextRange};
use crate::{
self as ast, AnyNodeRef, AnyStringFlags, Expr, ExprBytesLiteral, ExprFString, ExprRef,
ExprStringLiteral, ExprTString, StringFlags,
};
impl<'a> From<&'a Box<Expr>> for ExprRef<'a> {
fn from(value: &'a Box<Expr>) -> Self {
ExprRef::from(value.as_ref())
}
}
/// Unowned pendant to all the literal variants of [`ast::Expr`] that stores a
/// reference instead of an owned value.
#[derive(Copy, Clone, Debug, PartialEq, is_macro::Is)]
pub enum LiteralExpressionRef<'a> {
StringLiteral(&'a ast::ExprStringLiteral),
BytesLiteral(&'a ast::ExprBytesLiteral),
NumberLiteral(&'a ast::ExprNumberLiteral),
BooleanLiteral(&'a ast::ExprBooleanLiteral),
NoneLiteral(&'a ast::ExprNoneLiteral),
EllipsisLiteral(&'a ast::ExprEllipsisLiteral),
}
impl Ranged for LiteralExpressionRef<'_> {
fn range(&self) -> TextRange {
match self {
LiteralExpressionRef::StringLiteral(expression) => expression.range(),
LiteralExpressionRef::BytesLiteral(expression) => expression.range(),
LiteralExpressionRef::NumberLiteral(expression) => expression.range(),
LiteralExpressionRef::BooleanLiteral(expression) => expression.range(),
LiteralExpressionRef::NoneLiteral(expression) => expression.range(),
LiteralExpressionRef::EllipsisLiteral(expression) => expression.range(),
}
}
}
impl<'a> From<LiteralExpressionRef<'a>> for AnyNodeRef<'a> {
fn from(value: LiteralExpressionRef<'a>) -> Self {
match value {
LiteralExpressionRef::StringLiteral(expression) => {
AnyNodeRef::ExprStringLiteral(expression)
}
LiteralExpressionRef::BytesLiteral(expression) => {
AnyNodeRef::ExprBytesLiteral(expression)
}
LiteralExpressionRef::NumberLiteral(expression) => {
AnyNodeRef::ExprNumberLiteral(expression)
}
LiteralExpressionRef::BooleanLiteral(expression) => {
AnyNodeRef::ExprBooleanLiteral(expression)
}
LiteralExpressionRef::NoneLiteral(expression) => {
AnyNodeRef::ExprNoneLiteral(expression)
}
LiteralExpressionRef::EllipsisLiteral(expression) => {
AnyNodeRef::ExprEllipsisLiteral(expression)
}
}
}
}
impl LiteralExpressionRef<'_> {
/// Returns `true` if the literal is either a string or bytes literal that
/// is implicitly concatenated.
pub fn is_implicit_concatenated(&self) -> bool {
match self {
LiteralExpressionRef::StringLiteral(expression) => {
expression.value.is_implicit_concatenated()
}
LiteralExpressionRef::BytesLiteral(expression) => {
expression.value.is_implicit_concatenated()
}
_ => false,
}
}
}
/// An enum that holds a reference to a string-like expression from the AST. This includes string
/// literals, bytes literals, f-strings, and t-strings.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum StringLike<'a> {
String(&'a ast::ExprStringLiteral),
Bytes(&'a ast::ExprBytesLiteral),
FString(&'a ast::ExprFString),
TString(&'a ast::ExprTString),
}
impl<'a> StringLike<'a> {
pub const fn is_interpolated_string(self) -> bool {
matches!(self, Self::TString(_) | Self::FString(_))
}
/// Returns an iterator over the [`StringLikePart`] contained in this string-like expression.
pub fn parts(&self) -> StringLikePartIter<'a> {
match self {
StringLike::String(expr) => StringLikePartIter::String(expr.value.iter()),
StringLike::Bytes(expr) => StringLikePartIter::Bytes(expr.value.iter()),
StringLike::FString(expr) => StringLikePartIter::FString(expr.value.iter()),
StringLike::TString(expr) => StringLikePartIter::TString(expr.value.iter()),
}
}
/// Returns `true` if the string is implicitly concatenated.
pub fn is_implicit_concatenated(self) -> bool {
match self {
Self::String(ExprStringLiteral { value, .. }) => value.is_implicit_concatenated(),
Self::Bytes(ExprBytesLiteral { value, .. }) => value.is_implicit_concatenated(),
Self::FString(ExprFString { value, .. }) => value.is_implicit_concatenated(),
Self::TString(ExprTString { value, .. }) => value.is_implicit_concatenated(),
}
}
pub const fn as_expression_ref(self) -> ExprRef<'a> {
match self {
StringLike::String(expr) => ExprRef::StringLiteral(expr),
StringLike::Bytes(expr) => ExprRef::BytesLiteral(expr),
StringLike::FString(expr) => ExprRef::FString(expr),
StringLike::TString(expr) => ExprRef::TString(expr),
}
}
}
impl<'a> From<&'a ast::ExprStringLiteral> for StringLike<'a> {
fn from(value: &'a ast::ExprStringLiteral) -> Self {
StringLike::String(value)
}
}
impl<'a> From<&'a ast::ExprBytesLiteral> for StringLike<'a> {
fn from(value: &'a ast::ExprBytesLiteral) -> Self {
StringLike::Bytes(value)
}
}
impl<'a> From<&'a ast::ExprFString> for StringLike<'a> {
fn from(value: &'a ast::ExprFString) -> Self {
StringLike::FString(value)
}
}
impl<'a> From<&'a ast::ExprTString> for StringLike<'a> {
fn from(value: &'a ast::ExprTString) -> Self {
StringLike::TString(value)
}
}
impl<'a> From<&StringLike<'a>> for ExprRef<'a> {
fn from(value: &StringLike<'a>) -> Self {
match value {
StringLike::String(expr) => ExprRef::StringLiteral(expr),
StringLike::Bytes(expr) => ExprRef::BytesLiteral(expr),
StringLike::FString(expr) => ExprRef::FString(expr),
StringLike::TString(expr) => ExprRef::TString(expr),
}
}
}
impl<'a> From<StringLike<'a>> for AnyNodeRef<'a> {
fn from(value: StringLike<'a>) -> Self {
AnyNodeRef::from(&value)
}
}
impl<'a> From<&StringLike<'a>> for AnyNodeRef<'a> {
fn from(value: &StringLike<'a>) -> Self {
match value {
StringLike::String(expr) => AnyNodeRef::ExprStringLiteral(expr),
StringLike::Bytes(expr) => AnyNodeRef::ExprBytesLiteral(expr),
StringLike::FString(expr) => AnyNodeRef::ExprFString(expr),
StringLike::TString(expr) => AnyNodeRef::ExprTString(expr),
}
}
}
impl<'a> TryFrom<&'a Expr> for StringLike<'a> {
type Error = ();
fn try_from(value: &'a Expr) -> Result<Self, Self::Error> {
match value {
Expr::StringLiteral(value) => Ok(Self::String(value)),
Expr::BytesLiteral(value) => Ok(Self::Bytes(value)),
Expr::FString(value) => Ok(Self::FString(value)),
Expr::TString(value) => Ok(Self::TString(value)),
_ => Err(()),
}
}
}
impl<'a> TryFrom<AnyNodeRef<'a>> for StringLike<'a> {
type Error = ();
fn try_from(value: AnyNodeRef<'a>) -> Result<Self, Self::Error> {
match value {
AnyNodeRef::ExprStringLiteral(value) => Ok(Self::String(value)),
AnyNodeRef::ExprBytesLiteral(value) => Ok(Self::Bytes(value)),
AnyNodeRef::ExprFString(value) => Ok(Self::FString(value)),
AnyNodeRef::ExprTString(value) => Ok(Self::TString(value)),
_ => Err(()),
}
}
}
impl Ranged for StringLike<'_> {
fn range(&self) -> TextRange {
match self {
StringLike::String(literal) => literal.range(),
StringLike::Bytes(literal) => literal.range(),
StringLike::FString(literal) => literal.range(),
StringLike::TString(literal) => literal.range(),
}
}
}
/// An enum that holds a reference to an individual part of a string-like expression.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum StringLikePart<'a> {
String(&'a ast::StringLiteral),
Bytes(&'a ast::BytesLiteral),
FString(&'a ast::FString),
TString(&'a ast::TString),
}
impl<'a> StringLikePart<'a> {
/// Returns the [`AnyStringFlags`] for the current string-like part.
pub fn flags(&self) -> AnyStringFlags {
match self {
StringLikePart::String(string) => AnyStringFlags::from(string.flags),
StringLikePart::Bytes(bytes) => AnyStringFlags::from(bytes.flags),
StringLikePart::FString(f_string) => AnyStringFlags::from(f_string.flags),
StringLikePart::TString(t_string) => AnyStringFlags::from(t_string.flags),
}
}
/// Returns the range of the string's content in the source (minus prefix and quotes).
pub fn content_range(self) -> TextRange {
let kind = self.flags();
TextRange::new(
self.start() + kind.opener_len(),
self.end() - kind.closer_len(),
)
}
pub const fn is_string_literal(self) -> bool {
matches!(self, Self::String(_))
}
pub const fn as_string_literal(self) -> Option<&'a ast::StringLiteral> {
match self {
StringLikePart::String(value) => Some(value),
_ => None,
}
}
pub const fn is_interpolated_string(self) -> bool {
matches!(self, Self::FString(_) | Self::TString(_))
}
}
impl<'a> From<&'a ast::StringLiteral> for StringLikePart<'a> {
fn from(value: &'a ast::StringLiteral) -> Self {
StringLikePart::String(value)
}
}
impl<'a> From<&'a ast::BytesLiteral> for StringLikePart<'a> {
fn from(value: &'a ast::BytesLiteral) -> Self {
StringLikePart::Bytes(value)
}
}
impl<'a> From<&'a ast::FString> for StringLikePart<'a> {
fn from(value: &'a ast::FString) -> Self {
StringLikePart::FString(value)
}
}
impl<'a> From<&'a ast::TString> for StringLikePart<'a> {
fn from(value: &'a ast::TString) -> Self {
StringLikePart::TString(value)
}
}
impl<'a> From<&StringLikePart<'a>> for AnyNodeRef<'a> {
fn from(value: &StringLikePart<'a>) -> Self {
AnyNodeRef::from(*value)
}
}
impl<'a> From<StringLikePart<'a>> for AnyNodeRef<'a> {
fn from(value: StringLikePart<'a>) -> Self {
match value {
StringLikePart::String(part) => AnyNodeRef::StringLiteral(part),
StringLikePart::Bytes(part) => AnyNodeRef::BytesLiteral(part),
StringLikePart::FString(part) => AnyNodeRef::FString(part),
StringLikePart::TString(part) => AnyNodeRef::TString(part),
}
}
}
impl Ranged for StringLikePart<'_> {
fn range(&self) -> TextRange {
match self {
StringLikePart::String(part) => part.range(),
StringLikePart::Bytes(part) => part.range(),
StringLikePart::FString(part) => part.range(),
StringLikePart::TString(part) => part.range(),
}
}
}
/// An iterator over all the [`StringLikePart`] of a string-like expression.
///
/// This is created by the [`StringLike::parts`] method.
#[derive(Clone)]
pub enum StringLikePartIter<'a> {
String(std::slice::Iter<'a, ast::StringLiteral>),
Bytes(std::slice::Iter<'a, ast::BytesLiteral>),
FString(std::slice::Iter<'a, ast::FStringPart>),
TString(std::slice::Iter<'a, ast::TString>),
}
impl<'a> Iterator for StringLikePartIter<'a> {
type Item = StringLikePart<'a>;
fn next(&mut self) -> Option<Self::Item> {
let part = match self {
StringLikePartIter::String(inner) => StringLikePart::String(inner.next()?),
StringLikePartIter::Bytes(inner) => StringLikePart::Bytes(inner.next()?),
StringLikePartIter::FString(inner) => {
let part = inner.next()?;
match part {
ast::FStringPart::Literal(string_literal) => {
StringLikePart::String(string_literal)
}
ast::FStringPart::FString(f_string) => StringLikePart::FString(f_string),
}
}
StringLikePartIter::TString(inner) => StringLikePart::TString(inner.next()?),
};
Some(part)
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self {
StringLikePartIter::String(inner) => inner.size_hint(),
StringLikePartIter::Bytes(inner) => inner.size_hint(),
StringLikePartIter::FString(inner) => inner.size_hint(),
StringLikePartIter::TString(inner) => inner.size_hint(),
}
}
}
impl DoubleEndedIterator for StringLikePartIter<'_> {
fn next_back(&mut self) -> Option<Self::Item> {
let part = match self {
StringLikePartIter::String(inner) => StringLikePart::String(inner.next_back()?),
StringLikePartIter::Bytes(inner) => StringLikePart::Bytes(inner.next_back()?),
StringLikePartIter::FString(inner) => {
let part = inner.next_back()?;
match part {
ast::FStringPart::Literal(string_literal) => {
StringLikePart::String(string_literal)
}
ast::FStringPart::FString(f_string) => StringLikePart::FString(f_string),
}
}
StringLikePartIter::TString(inner) => StringLikePart::TString(inner.next_back()?),
};
Some(part)
}
}
impl FusedIterator for StringLikePartIter<'_> {}
impl ExactSizeIterator for StringLikePartIter<'_> {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/str_prefix.rs | crates/ruff_python_ast/src/str_prefix.rs | use ruff_text_size::TextSize;
use std::fmt;
/// Enumerations of the valid prefixes a string literal can have.
///
/// Bytestrings and f-strings are excluded from this enumeration,
/// as they are represented by different AST nodes.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, is_macro::Is)]
pub enum StringLiteralPrefix {
/// Just a regular string with no prefixes
Empty,
/// A string with a `u` or `U` prefix, e.g. `u"foo"`.
/// Note that, despite this variant's name,
/// it is in fact a no-op at runtime to use the `u` or `U` prefix
/// in Python. All Python-3 strings are unicode strings;
/// this prefix is only allowed in Python 3 for backwards compatibility
/// with Python 2. However, using this prefix in a Python string
/// is mutually exclusive with an `r` or `R` prefix.
Unicode,
/// A "raw" string, that has an `r` or `R` prefix,
/// e.g. `r"foo\."` or `R'bar\d'`.
Raw { uppercase: bool },
}
impl StringLiteralPrefix {
/// Return a `str` representation of the prefix
pub const fn as_str(self) -> &'static str {
match self {
Self::Empty => "",
Self::Unicode => "u",
Self::Raw { uppercase: true } => "R",
Self::Raw { uppercase: false } => "r",
}
}
pub const fn text_len(self) -> TextSize {
match self {
Self::Empty => TextSize::new(0),
Self::Unicode | Self::Raw { .. } => TextSize::new(1),
}
}
}
impl fmt::Display for StringLiteralPrefix {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
/// Enumeration of the valid prefixes an f-string literal can have.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum FStringPrefix {
/// Just a regular f-string with no other prefixes, e.g. f"{bar}"
Regular,
/// A "raw" format-string, that has an `r` or `R` prefix,
/// e.g. `rf"{bar}"` or `Rf"{bar}"`
Raw { uppercase_r: bool },
}
impl FStringPrefix {
/// Return a `str` representation of the prefix
pub const fn as_str(self) -> &'static str {
match self {
Self::Regular => "f",
Self::Raw { uppercase_r: true } => "Rf",
Self::Raw { uppercase_r: false } => "rf",
}
}
pub const fn text_len(self) -> TextSize {
match self {
Self::Regular => TextSize::new(1),
Self::Raw { .. } => TextSize::new(2),
}
}
/// Return true if this prefix indicates a "raw f-string",
/// e.g. `rf"{bar}"` or `Rf"{bar}"`
pub const fn is_raw(self) -> bool {
matches!(self, Self::Raw { .. })
}
}
impl fmt::Display for FStringPrefix {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
/// Enumeration of the valid prefixes a t-string literal can have.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum TStringPrefix {
/// Just a regular t-string with no other prefixes, e.g. t"{bar}"
Regular,
/// A "raw" template string, that has an `r` or `R` prefix,
/// e.g. `rt"{bar}"` or `Rt"{bar}"`
Raw { uppercase_r: bool },
}
impl TStringPrefix {
/// Return a `str` representation of the prefix
pub const fn as_str(self) -> &'static str {
match self {
Self::Regular => "t",
Self::Raw { uppercase_r: true } => "Rt",
Self::Raw { uppercase_r: false } => "rt",
}
}
pub const fn text_len(self) -> TextSize {
match self {
Self::Regular => TextSize::new(1),
Self::Raw { .. } => TextSize::new(2),
}
}
/// Return true if this prefix indicates a "raw t-string",
/// e.g. `rt"{bar}"` or `Rt"{bar}"`
pub const fn is_raw(self) -> bool {
matches!(self, Self::Raw { .. })
}
}
impl fmt::Display for TStringPrefix {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
/// Enumeration of the valid prefixes a bytestring literal can have.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum ByteStringPrefix {
/// Just a regular bytestring with no other prefixes, e.g. `b"foo"`
Regular,
/// A "raw" bytestring, that has an `r` or `R` prefix,
/// e.g. `Rb"foo"` or `rb"foo"`
Raw { uppercase_r: bool },
}
impl ByteStringPrefix {
/// Return a `str` representation of the prefix
pub const fn as_str(self) -> &'static str {
match self {
Self::Regular => "b",
Self::Raw { uppercase_r: true } => "Rb",
Self::Raw { uppercase_r: false } => "rb",
}
}
pub const fn text_len(self) -> TextSize {
match self {
Self::Regular => TextSize::new(1),
Self::Raw { .. } => TextSize::new(2),
}
}
/// Return true if this prefix indicates a "raw bytestring",
/// e.g. `rb"foo"` or `Rb"foo"`
pub const fn is_raw(self) -> bool {
matches!(self, Self::Raw { .. })
}
}
impl fmt::Display for ByteStringPrefix {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
/// Enumeration of all the possible valid prefixes
/// prior to a Python string literal.
///
/// Using the `as_flags()` method on variants of this enum
/// is the recommended way to set `*_PREFIX` flags from the
/// `StringFlags` bitflag, as it means that you cannot accidentally
/// set a combination of `*_PREFIX` flags that would be invalid
/// at runtime in Python.
///
/// [String and Bytes literals]: https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
/// [PEP 701]: https://peps.python.org/pep-0701/
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, is_macro::Is)]
pub enum AnyStringPrefix {
/// Prefixes that indicate the string is a bytestring
Bytes(ByteStringPrefix),
/// Prefixes that indicate the string is an f-string
Format(FStringPrefix),
/// Prefixes that indicate the string is a t-string
Template(TStringPrefix),
/// All other prefixes
Regular(StringLiteralPrefix),
}
impl AnyStringPrefix {
pub const fn as_str(self) -> &'static str {
match self {
Self::Regular(regular_prefix) => regular_prefix.as_str(),
Self::Bytes(bytestring_prefix) => bytestring_prefix.as_str(),
Self::Format(fstring_prefix) => fstring_prefix.as_str(),
Self::Template(tstring_prefix) => tstring_prefix.as_str(),
}
}
pub const fn text_len(self) -> TextSize {
match self {
Self::Regular(regular_prefix) => regular_prefix.text_len(),
Self::Bytes(bytestring_prefix) => bytestring_prefix.text_len(),
Self::Format(fstring_prefix) => fstring_prefix.text_len(),
Self::Template(tstring_prefix) => tstring_prefix.text_len(),
}
}
pub const fn is_raw(self) -> bool {
match self {
Self::Regular(regular_prefix) => regular_prefix.is_raw(),
Self::Bytes(bytestring_prefix) => bytestring_prefix.is_raw(),
Self::Format(fstring_prefix) => fstring_prefix.is_raw(),
Self::Template(tstring_prefix) => tstring_prefix.is_raw(),
}
}
}
impl fmt::Display for AnyStringPrefix {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
impl Default for AnyStringPrefix {
fn default() -> Self {
Self::Regular(StringLiteralPrefix::Empty)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/parenthesize.rs | crates/ruff_python_ast/src/parenthesize.rs | use ruff_python_trivia::{BackwardsTokenizer, CommentRanges, SimpleTokenKind, SimpleTokenizer};
use ruff_text_size::{Ranged, TextLen, TextRange};
use crate::AnyNodeRef;
use crate::ExprRef;
/// Returns an iterator over the ranges of the optional parentheses surrounding an expression.
///
/// E.g. for `((f()))` with `f()` as expression, the iterator returns the ranges (1, 6) and (0, 7).
///
/// Note that without a parent the range can be inaccurate, e.g. `f(a)` we falsely return a set of
/// parentheses around `a` even if the parentheses actually belong to `f`. That is why you should
/// generally prefer [`parenthesized_range`].
///
/// Prefer [`crate::token::parentheses_iterator`] if you have access to [`crate::token::Tokens`].
pub fn parentheses_iterator<'a>(
expr: ExprRef<'a>,
parent: Option<AnyNodeRef>,
comment_ranges: &'a CommentRanges,
source: &'a str,
) -> impl Iterator<Item = TextRange> + 'a {
let right_tokenizer = if let Some(parent) = parent {
// If the parent is a node that brings its own parentheses, exclude the closing parenthesis
// from our search range. Otherwise, we risk matching on calls, like `func(x)`, for which
// the open and close parentheses are part of the `Arguments` node.
//
// There are a few other nodes that may have their own parentheses, but are fine to exclude:
// - `Parameters`: The parameters to a function definition. Any expressions would represent
// default arguments, and so must be preceded by _at least_ the parameter name. As such,
// we won't mistake any parentheses for the opening and closing parentheses on the
// `Parameters` node itself.
// - `Tuple`: The elements of a tuple. The only risk is a single-element tuple (e.g., `(x,)`),
// which must have a trailing comma anyway.
let exclusive_parent_end = if parent.is_arguments() {
parent.end() - ")".text_len()
} else {
parent.end()
};
SimpleTokenizer::new(source, TextRange::new(expr.end(), exclusive_parent_end))
} else {
SimpleTokenizer::starts_at(expr.end(), source)
};
let right_tokenizer = right_tokenizer
.skip_trivia()
.take_while(|token| token.kind == SimpleTokenKind::RParen);
let left_tokenizer = BackwardsTokenizer::up_to(expr.start(), source, comment_ranges)
.skip_trivia()
.take_while(|token| token.kind == SimpleTokenKind::LParen);
// Zip closing parenthesis with opening parenthesis. The order is intentional, as testing for
// closing parentheses is cheaper, and `zip` will avoid progressing the `left_tokenizer` if
// the `right_tokenizer` is exhausted.
right_tokenizer
.zip(left_tokenizer)
.map(|(right, left)| TextRange::new(left.start(), right.end()))
}
/// Returns the [`TextRange`] of a given expression including parentheses, if the expression is
/// parenthesized; or `None`, if the expression is not parenthesized.
///
/// Prefer [`crate::token::parenthesized_range`] if you have access to [`crate::token::Tokens`].
pub fn parenthesized_range(
expr: ExprRef,
parent: AnyNodeRef,
comment_ranges: &CommentRanges,
source: &str,
) -> Option<TextRange> {
parentheses_iterator(expr, Some(parent), comment_ranges, source).last()
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/operator_precedence.rs | crates/ruff_python_ast/src/operator_precedence.rs | use crate::{BoolOp, Expr, ExprRef, Operator, UnaryOp};
/// Represents the precedence levels for Python expressions.
/// Variants at the top have lower precedence and variants at the bottom have
/// higher precedence.
///
/// See: <https://docs.python.org/3/reference/expressions.html#operator-precedence>
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum OperatorPrecedence {
/// The lowest (virtual) precedence level
None,
/// Precedence of `yield` and `yield from` expressions.
Yield,
/// Precedence of assignment expressions (`name := expr`).
Assign,
/// Precedence of starred expressions (`*expr`).
Starred,
/// Precedence of lambda expressions (`lambda args: expr`).
Lambda,
/// Precedence of if/else expressions (`expr if cond else expr`).
IfElse,
/// Precedence of boolean `or` expressions.
Or,
/// Precedence of boolean `and` expressions.
And,
/// Precedence of boolean `not` expressions.
Not,
/// Precedence of comparisons (`<`, `<=`, `>`, `>=`, `!=`, `==`),
/// memberships (`in`, `not in`) and identity tests (`is`, `is not`).
ComparisonsMembershipIdentity,
/// Precedence of bitwise `|` operator.
BitOr,
/// Precedence of bitwise `^` operator.
BitXor,
/// Precedence of bitwise `&` operator.
BitAnd,
/// Precedence of left and right shift expressions (`<<`, `>>`).
LeftRightShift,
/// Precedence of addition and subtraction expressions (`+`, `-`).
AddSub,
/// Precedence of multiplication (`*`), matrix multiplication (`@`), division (`/`),
/// floor division (`//`) and remainder (`%`) expressions.
MulDivRemain,
/// Precedence of unary positive (`+`), negative (`-`), and bitwise NOT (`~`) expressions.
PosNegBitNot,
/// Precedence of exponentiation expressions (`**`).
Exponent,
/// Precedence of `await` expressions.
Await,
/// Precedence of call expressions (`()`), attribute access (`.`), and subscript (`[]`) expressions.
CallAttribute,
/// Precedence of atomic expressions (literals, names, containers).
Atomic,
}
impl OperatorPrecedence {
pub fn from_expr_ref(expr: &ExprRef) -> Self {
match expr {
// Binding or parenthesized expression, list display, dictionary display, set display
ExprRef::Tuple(_)
| ExprRef::Dict(_)
| ExprRef::Set(_)
| ExprRef::ListComp(_)
| ExprRef::List(_)
| ExprRef::SetComp(_)
| ExprRef::DictComp(_)
| ExprRef::Generator(_)
| ExprRef::Name(_)
| ExprRef::StringLiteral(_)
| ExprRef::BytesLiteral(_)
| ExprRef::NumberLiteral(_)
| ExprRef::BooleanLiteral(_)
| ExprRef::NoneLiteral(_)
| ExprRef::EllipsisLiteral(_)
| ExprRef::FString(_)
| ExprRef::TString(_) => Self::Atomic,
// Subscription, slicing, call, attribute reference
ExprRef::Attribute(_)
| ExprRef::Subscript(_)
| ExprRef::Call(_)
| ExprRef::Slice(_) => Self::CallAttribute,
// Await expression
ExprRef::Await(_) => Self::Await,
// Exponentiation **
// Handled below along with other binary operators
// Unary operators: +x, -x, ~x (except boolean not)
ExprRef::UnaryOp(operator) => match operator.op {
UnaryOp::UAdd | UnaryOp::USub | UnaryOp::Invert => Self::PosNegBitNot,
UnaryOp::Not => Self::Not,
},
// Math binary ops
ExprRef::BinOp(binary_operation) => Self::from(binary_operation.op),
// Comparisons: <, <=, >, >=, ==, !=, in, not in, is, is not
ExprRef::Compare(_) => Self::ComparisonsMembershipIdentity,
// Boolean not
// Handled above in unary operators
// Boolean operations: and, or
ExprRef::BoolOp(bool_op) => Self::from(bool_op.op),
// Conditional expressions: x if y else z
ExprRef::If(_) => Self::IfElse,
// Lambda expressions
ExprRef::Lambda(_) => Self::Lambda,
// Unpacking also omitted in the docs, but has almost the lowest precedence,
// except for assignment & yield expressions. E.g. `[*(v := [1,2])]` is valid
// but `[*v := [1,2]] would fail on incorrect syntax because * will associate
// `v` before the assignment.
ExprRef::Starred(_) => Self::Starred,
// Assignment expressions (aka named)
ExprRef::Named(_) => Self::Assign,
// Although omitted in docs, yield expressions may be used inside an expression
// but must be parenthesized. So for our purposes we assume they just have
// the lowest "real" precedence.
ExprRef::Yield(_) | ExprRef::YieldFrom(_) => Self::Yield,
// Not a real python expression, so treat as lowest as well
ExprRef::IpyEscapeCommand(_) => Self::None,
}
}
pub fn from_expr(expr: &Expr) -> Self {
Self::from(&ExprRef::from(expr))
}
/// Returns `true` if the precedence is right-associative i.e., the operations are evaluated
/// from right to left.
pub fn is_right_associative(self) -> bool {
matches!(self, OperatorPrecedence::Exponent)
}
}
impl From<&Expr> for OperatorPrecedence {
fn from(expr: &Expr) -> Self {
Self::from_expr(expr)
}
}
impl<'a> From<&ExprRef<'a>> for OperatorPrecedence {
fn from(expr_ref: &ExprRef<'a>) -> Self {
Self::from_expr_ref(expr_ref)
}
}
impl From<Operator> for OperatorPrecedence {
fn from(operator: Operator) -> Self {
match operator {
// Multiplication, matrix multiplication, division, floor division, remainder:
// *, @, /, //, %
Operator::Mult
| Operator::MatMult
| Operator::Div
| Operator::Mod
| Operator::FloorDiv => Self::MulDivRemain,
// Addition, subtraction
Operator::Add | Operator::Sub => Self::AddSub,
// Bitwise shifts: <<, >>
Operator::LShift | Operator::RShift => Self::LeftRightShift,
// Bitwise operations: &, ^, |
Operator::BitAnd => Self::BitAnd,
Operator::BitXor => Self::BitXor,
Operator::BitOr => Self::BitOr,
// Exponentiation **
Operator::Pow => Self::Exponent,
}
}
}
impl From<BoolOp> for OperatorPrecedence {
fn from(operator: BoolOp) -> Self {
match operator {
BoolOp::And => Self::And,
BoolOp::Or => Self::Or,
}
}
}
impl From<UnaryOp> for OperatorPrecedence {
fn from(unary_op: UnaryOp) -> Self {
match unary_op {
UnaryOp::UAdd | UnaryOp::USub | UnaryOp::Invert => Self::PosNegBitNot,
UnaryOp::Not => Self::Not,
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/nodes.rs | crates/ruff_python_ast/src/nodes.rs | #![allow(clippy::derive_partial_eq_without_eq)]
use crate::AtomicNodeIndex;
use crate::generated::{
ExprBytesLiteral, ExprDict, ExprFString, ExprList, ExprName, ExprSet, ExprStringLiteral,
ExprTString, ExprTuple, PatternMatchAs, PatternMatchOr, StmtClassDef,
};
use std::borrow::Cow;
use std::fmt;
use std::fmt::Debug;
use std::iter::FusedIterator;
use std::ops::{Deref, DerefMut};
use std::slice::{Iter, IterMut};
use std::sync::OnceLock;
use bitflags::bitflags;
use itertools::Itertools;
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use crate::str_prefix::{
AnyStringPrefix, ByteStringPrefix, FStringPrefix, StringLiteralPrefix, TStringPrefix,
};
use crate::{
Expr, ExprRef, InterpolatedStringElement, LiteralExpressionRef, OperatorPrecedence, Pattern,
Stmt, TypeParam, int,
name::Name,
str::{Quote, TripleQuotes},
};
impl StmtClassDef {
/// Return an iterator over the bases of the class.
pub fn bases(&self) -> &[Expr] {
match &self.arguments {
Some(arguments) => &arguments.args,
None => &[],
}
}
/// Return an iterator over the metaclass keywords of the class.
pub fn keywords(&self) -> &[Keyword] {
match &self.arguments {
Some(arguments) => &arguments.keywords,
None => &[],
}
}
}
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct ElifElseClause {
pub range: TextRange,
pub node_index: AtomicNodeIndex,
pub test: Option<Expr>,
pub body: Vec<Stmt>,
}
impl Expr {
/// Returns `true` if the expression is a literal expression.
///
/// A literal expression is either a string literal, bytes literal,
/// integer, float, complex number, boolean, `None`, or ellipsis (`...`).
pub fn is_literal_expr(&self) -> bool {
matches!(
self,
Expr::StringLiteral(_)
| Expr::BytesLiteral(_)
| Expr::NumberLiteral(_)
| Expr::BooleanLiteral(_)
| Expr::NoneLiteral(_)
| Expr::EllipsisLiteral(_)
)
}
/// Returns [`LiteralExpressionRef`] if the expression is a literal expression.
pub fn as_literal_expr(&self) -> Option<LiteralExpressionRef<'_>> {
match self {
Expr::StringLiteral(expr) => Some(LiteralExpressionRef::StringLiteral(expr)),
Expr::BytesLiteral(expr) => Some(LiteralExpressionRef::BytesLiteral(expr)),
Expr::NumberLiteral(expr) => Some(LiteralExpressionRef::NumberLiteral(expr)),
Expr::BooleanLiteral(expr) => Some(LiteralExpressionRef::BooleanLiteral(expr)),
Expr::NoneLiteral(expr) => Some(LiteralExpressionRef::NoneLiteral(expr)),
Expr::EllipsisLiteral(expr) => Some(LiteralExpressionRef::EllipsisLiteral(expr)),
_ => None,
}
}
/// Return the [`OperatorPrecedence`] of this expression
pub fn precedence(&self) -> OperatorPrecedence {
OperatorPrecedence::from(self)
}
}
impl ExprRef<'_> {
/// See [`Expr::is_literal_expr`].
pub fn is_literal_expr(&self) -> bool {
matches!(
self,
ExprRef::StringLiteral(_)
| ExprRef::BytesLiteral(_)
| ExprRef::NumberLiteral(_)
| ExprRef::BooleanLiteral(_)
| ExprRef::NoneLiteral(_)
| ExprRef::EllipsisLiteral(_)
)
}
pub fn precedence(&self) -> OperatorPrecedence {
OperatorPrecedence::from(self)
}
}
/// Represents an item in a [dictionary literal display][1].
///
/// Consider the following Python dictionary literal:
/// ```python
/// {key1: value1, **other_dictionary}
/// ```
///
/// In our AST, this would be represented using an `ExprDict` node containing
/// two `DictItem` nodes inside it:
/// ```ignore
/// [
/// DictItem {
/// key: Some(Expr::Name(ExprName { id: "key1" })),
/// value: Expr::Name(ExprName { id: "value1" }),
/// },
/// DictItem {
/// key: None,
/// value: Expr::Name(ExprName { id: "other_dictionary" }),
/// }
/// ]
/// ```
///
/// [1]: https://docs.python.org/3/reference/expressions.html#displays-for-lists-sets-and-dictionaries
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct DictItem {
pub key: Option<Expr>,
pub value: Expr,
}
impl DictItem {
fn key(&self) -> Option<&Expr> {
self.key.as_ref()
}
fn value(&self) -> &Expr {
&self.value
}
}
impl Ranged for DictItem {
fn range(&self) -> TextRange {
TextRange::new(
self.key.as_ref().map_or(self.value.start(), Ranged::start),
self.value.end(),
)
}
}
impl ExprDict {
/// Returns an `Iterator` over the AST nodes representing the
/// dictionary's keys.
pub fn iter_keys(&self) -> DictKeyIterator<'_> {
DictKeyIterator::new(&self.items)
}
/// Returns an `Iterator` over the AST nodes representing the
/// dictionary's values.
pub fn iter_values(&self) -> DictValueIterator<'_> {
DictValueIterator::new(&self.items)
}
/// Returns the AST node representing the *n*th key of this
/// dictionary.
///
/// Panics: If the index `n` is out of bounds.
pub fn key(&self, n: usize) -> Option<&Expr> {
self.items[n].key()
}
/// Returns the AST node representing the *n*th value of this
/// dictionary.
///
/// Panics: If the index `n` is out of bounds.
pub fn value(&self, n: usize) -> &Expr {
self.items[n].value()
}
pub fn iter(&self) -> std::slice::Iter<'_, DictItem> {
self.items.iter()
}
pub fn len(&self) -> usize {
self.items.len()
}
pub fn is_empty(&self) -> bool {
self.items.is_empty()
}
}
impl<'a> IntoIterator for &'a ExprDict {
type IntoIter = std::slice::Iter<'a, DictItem>;
type Item = &'a DictItem;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
#[derive(Debug, Clone)]
pub struct DictKeyIterator<'a> {
items: Iter<'a, DictItem>,
}
impl<'a> DictKeyIterator<'a> {
fn new(items: &'a [DictItem]) -> Self {
Self {
items: items.iter(),
}
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<'a> Iterator for DictKeyIterator<'a> {
type Item = Option<&'a Expr>;
fn next(&mut self) -> Option<Self::Item> {
self.items.next().map(DictItem::key)
}
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.items.size_hint()
}
}
impl DoubleEndedIterator for DictKeyIterator<'_> {
fn next_back(&mut self) -> Option<Self::Item> {
self.items.next_back().map(DictItem::key)
}
}
impl FusedIterator for DictKeyIterator<'_> {}
impl ExactSizeIterator for DictKeyIterator<'_> {}
#[derive(Debug, Clone)]
pub struct DictValueIterator<'a> {
items: Iter<'a, DictItem>,
}
impl<'a> DictValueIterator<'a> {
fn new(items: &'a [DictItem]) -> Self {
Self {
items: items.iter(),
}
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<'a> Iterator for DictValueIterator<'a> {
type Item = &'a Expr;
fn next(&mut self) -> Option<Self::Item> {
self.items.next().map(DictItem::value)
}
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.items.size_hint()
}
}
impl DoubleEndedIterator for DictValueIterator<'_> {
fn next_back(&mut self) -> Option<Self::Item> {
self.items.next_back().map(DictItem::value)
}
}
impl FusedIterator for DictValueIterator<'_> {}
impl ExactSizeIterator for DictValueIterator<'_> {}
impl ExprSet {
pub fn iter(&self) -> std::slice::Iter<'_, Expr> {
self.elts.iter()
}
pub fn len(&self) -> usize {
self.elts.len()
}
pub fn is_empty(&self) -> bool {
self.elts.is_empty()
}
}
impl<'a> IntoIterator for &'a ExprSet {
type IntoIter = std::slice::Iter<'a, Expr>;
type Item = &'a Expr;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct InterpolatedStringFormatSpec {
pub range: TextRange,
pub node_index: AtomicNodeIndex,
pub elements: InterpolatedStringElements,
}
/// See also [FormattedValue](https://docs.python.org/3/library/ast.html#ast.FormattedValue)
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct InterpolatedElement {
pub range: TextRange,
pub node_index: AtomicNodeIndex,
pub expression: Box<Expr>,
pub debug_text: Option<DebugText>,
pub conversion: ConversionFlag,
pub format_spec: Option<Box<InterpolatedStringFormatSpec>>,
}
/// An `FStringLiteralElement` with an empty `value` is an invalid f-string element.
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct InterpolatedStringLiteralElement {
pub range: TextRange,
pub node_index: AtomicNodeIndex,
pub value: Box<str>,
}
impl InterpolatedStringLiteralElement {
pub fn is_valid(&self) -> bool {
!self.value.is_empty()
}
}
impl Deref for InterpolatedStringLiteralElement {
type Target = str;
fn deref(&self) -> &Self::Target {
&self.value
}
}
/// Transforms a value prior to formatting it.
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, is_macro::Is)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
#[repr(i8)]
#[expect(clippy::cast_possible_wrap)]
pub enum ConversionFlag {
/// No conversion
None = -1, // CPython uses -1
/// Converts by calling `str(<value>)`.
Str = b's' as i8,
/// Converts by calling `ascii(<value>)`.
Ascii = b'a' as i8,
/// Converts by calling `repr(<value>)`.
Repr = b'r' as i8,
}
impl ConversionFlag {
pub fn to_byte(&self) -> Option<u8> {
match self {
Self::None => None,
flag => Some(*flag as u8),
}
}
pub fn to_char(&self) -> Option<char> {
Some(self.to_byte()? as char)
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct DebugText {
/// The text between the `{` and the expression node.
pub leading: String,
/// The text between the expression and the conversion, the `format_spec`, or the `}`, depending on what's present in the source
pub trailing: String,
}
impl ExprFString {
/// Returns the single [`FString`] if the f-string isn't implicitly concatenated, [`None`]
/// otherwise.
pub const fn as_single_part_fstring(&self) -> Option<&FString> {
match &self.value.inner {
FStringValueInner::Single(FStringPart::FString(fstring)) => Some(fstring),
_ => None,
}
}
}
/// The value representing an [`ExprFString`].
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct FStringValue {
inner: FStringValueInner,
}
impl FStringValue {
/// Creates a new f-string literal with a single [`FString`] part.
pub fn single(value: FString) -> Self {
Self {
inner: FStringValueInner::Single(FStringPart::FString(value)),
}
}
/// Creates a new f-string with the given values that represents an implicitly
/// concatenated f-string.
///
/// # Panics
///
/// Panics if `values` has less than 2 elements.
/// Use [`FStringValue::single`] instead.
pub fn concatenated(values: Vec<FStringPart>) -> Self {
assert!(
values.len() > 1,
"Use `FStringValue::single` to create single-part f-strings"
);
Self {
inner: FStringValueInner::Concatenated(values),
}
}
/// Returns `true` if the f-string is implicitly concatenated, `false` otherwise.
pub fn is_implicit_concatenated(&self) -> bool {
matches!(self.inner, FStringValueInner::Concatenated(_))
}
/// Returns a slice of all the [`FStringPart`]s contained in this value.
pub fn as_slice(&self) -> &[FStringPart] {
match &self.inner {
FStringValueInner::Single(part) => std::slice::from_ref(part),
FStringValueInner::Concatenated(parts) => parts,
}
}
/// Returns a mutable slice of all the [`FStringPart`]s contained in this value.
fn as_mut_slice(&mut self) -> &mut [FStringPart] {
match &mut self.inner {
FStringValueInner::Single(part) => std::slice::from_mut(part),
FStringValueInner::Concatenated(parts) => parts,
}
}
/// Returns an iterator over all the [`FStringPart`]s contained in this value.
pub fn iter(&self) -> Iter<'_, FStringPart> {
self.as_slice().iter()
}
/// Returns an iterator over all the [`FStringPart`]s contained in this value
/// that allows modification.
pub fn iter_mut(&mut self) -> IterMut<'_, FStringPart> {
self.as_mut_slice().iter_mut()
}
/// Returns an iterator over the [`StringLiteral`] parts contained in this value.
///
/// Note that this doesn't recurse into the f-string parts. For example,
///
/// ```python
/// "foo" f"bar {x}" "baz" f"qux"
/// ```
///
/// Here, the string literal parts returned would be `"foo"` and `"baz"`.
pub fn literals(&self) -> impl Iterator<Item = &StringLiteral> {
self.iter().filter_map(|part| part.as_literal())
}
/// Returns an iterator over the [`FString`] parts contained in this value.
///
/// Note that this doesn't recurse into the f-string parts. For example,
///
/// ```python
/// "foo" f"bar {x}" "baz" f"qux"
/// ```
///
/// Here, the f-string parts returned would be `f"bar {x}"` and `f"qux"`.
pub fn f_strings(&self) -> impl Iterator<Item = &FString> {
self.iter().filter_map(|part| part.as_f_string())
}
/// Returns an iterator over all the [`InterpolatedStringElement`] contained in this value.
///
/// An f-string element is what makes up an [`FString`] i.e., it is either a
/// string literal or an expression. In the following example,
///
/// ```python
/// "foo" f"bar {x}" "baz" f"qux"
/// ```
///
/// The f-string elements returned would be string literal (`"bar "`),
/// expression (`x`) and string literal (`"qux"`).
pub fn elements(&self) -> impl Iterator<Item = &InterpolatedStringElement> {
self.f_strings().flat_map(|fstring| fstring.elements.iter())
}
/// Returns `true` if the node represents an empty f-string literal.
///
/// Note that a [`FStringValue`] node will always have >= 1 [`FStringPart`]s inside it.
/// This method checks whether the value of the concatenated parts is equal to the empty
/// f-string, not whether the f-string has 0 parts inside it.
pub fn is_empty_literal(&self) -> bool {
match &self.inner {
FStringValueInner::Single(fstring_part) => fstring_part.is_empty_literal(),
FStringValueInner::Concatenated(fstring_parts) => {
fstring_parts.iter().all(FStringPart::is_empty_literal)
}
}
}
}
impl<'a> IntoIterator for &'a FStringValue {
type Item = &'a FStringPart;
type IntoIter = Iter<'a, FStringPart>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a> IntoIterator for &'a mut FStringValue {
type Item = &'a mut FStringPart;
type IntoIter = IterMut<'a, FStringPart>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
/// An internal representation of [`FStringValue`].
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
enum FStringValueInner {
/// A single f-string i.e., `f"foo"`.
///
/// This is always going to be `FStringPart::FString` variant which is
/// maintained by the `FStringValue::single` constructor.
Single(FStringPart),
/// An implicitly concatenated f-string i.e., `"foo" f"bar {x}"`.
Concatenated(Vec<FStringPart>),
}
/// An f-string part which is either a string literal or an f-string.
#[derive(Clone, Debug, PartialEq, is_macro::Is)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub enum FStringPart {
Literal(StringLiteral),
FString(FString),
}
impl FStringPart {
pub fn quote_style(&self) -> Quote {
match self {
Self::Literal(string_literal) => string_literal.flags.quote_style(),
Self::FString(f_string) => f_string.flags.quote_style(),
}
}
pub fn is_empty_literal(&self) -> bool {
match &self {
FStringPart::Literal(string_literal) => string_literal.value.is_empty(),
FStringPart::FString(f_string) => f_string.elements.is_empty(),
}
}
}
impl Ranged for FStringPart {
fn range(&self) -> TextRange {
match self {
FStringPart::Literal(string_literal) => string_literal.range(),
FStringPart::FString(f_string) => f_string.range(),
}
}
}
impl ExprTString {
/// Returns the single [`TString`] if the t-string isn't implicitly concatenated, [`None`]
/// otherwise.
pub const fn as_single_part_tstring(&self) -> Option<&TString> {
match &self.value.inner {
TStringValueInner::Single(tstring) => Some(tstring),
TStringValueInner::Concatenated(_) => None,
}
}
}
/// The value representing an [`ExprTString`].
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct TStringValue {
inner: TStringValueInner,
}
impl TStringValue {
/// Creates a new t-string literal with a single [`TString`] part.
pub fn single(value: TString) -> Self {
Self {
inner: TStringValueInner::Single(value),
}
}
/// Creates a new t-string with the given values that represents an implicitly
/// concatenated t-string.
///
/// # Panics
///
/// Panics if `values` has less than 2 elements.
/// Use [`TStringValue::single`] instead.
pub fn concatenated(values: Vec<TString>) -> Self {
assert!(
values.len() > 1,
"Use `TStringValue::single` to create single-part t-strings"
);
Self {
inner: TStringValueInner::Concatenated(values),
}
}
/// Returns `true` if the t-string is implicitly concatenated, `false` otherwise.
pub fn is_implicit_concatenated(&self) -> bool {
matches!(self.inner, TStringValueInner::Concatenated(_))
}
/// Returns a slice of all the [`TString`]s contained in this value.
pub fn as_slice(&self) -> &[TString] {
match &self.inner {
TStringValueInner::Single(part) => std::slice::from_ref(part),
TStringValueInner::Concatenated(parts) => parts,
}
}
/// Returns a mutable slice of all the [`TString`]s contained in this value.
fn as_mut_slice(&mut self) -> &mut [TString] {
match &mut self.inner {
TStringValueInner::Single(part) => std::slice::from_mut(part),
TStringValueInner::Concatenated(parts) => parts,
}
}
/// Returns an iterator over all the [`TString`]s contained in this value.
pub fn iter(&self) -> Iter<'_, TString> {
self.as_slice().iter()
}
/// Returns an iterator over all the [`TString`]s contained in this value
/// that allows modification.
pub fn iter_mut(&mut self) -> IterMut<'_, TString> {
self.as_mut_slice().iter_mut()
}
/// Returns an iterator over all the [`InterpolatedStringElement`] contained in this value.
///
/// An interpolated string element is what makes up an [`TString`] i.e., it is either a
/// string literal or an interpolation. In the following example,
///
/// ```python
/// t"foo" t"bar {x}" t"baz" t"qux"
/// ```
///
/// The interpolated string elements returned would be string literal (`"bar "`),
/// interpolation (`x`) and string literal (`"qux"`).
pub fn elements(&self) -> impl Iterator<Item = &InterpolatedStringElement> {
self.iter().flat_map(|tstring| tstring.elements.iter())
}
/// Returns `true` if the node represents an empty t-string in the
/// sense that `__iter__` returns an empty iterable.
///
/// Beware that empty t-strings are still truthy, i.e. `bool(t"") == True`.
///
/// Note that a [`TStringValue`] node will always contain at least one
/// [`TString`] node. This method checks whether each of the constituent
/// t-strings (in an implicitly concatenated t-string) are empty
/// in the above sense.
pub fn is_empty_iterable(&self) -> bool {
match &self.inner {
TStringValueInner::Single(tstring) => tstring.is_empty(),
TStringValueInner::Concatenated(tstrings) => tstrings.iter().all(TString::is_empty),
}
}
}
impl<'a> IntoIterator for &'a TStringValue {
type Item = &'a TString;
type IntoIter = Iter<'a, TString>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a> IntoIterator for &'a mut TStringValue {
type Item = &'a mut TString;
type IntoIter = IterMut<'a, TString>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
/// An internal representation of [`TStringValue`].
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
enum TStringValueInner {
/// A single t-string i.e., `t"foo"`.
Single(TString),
/// An implicitly concatenated t-string i.e., `t"foo" t"bar {x}"`.
Concatenated(Vec<TString>),
}
pub trait StringFlags: Copy {
/// Does the string use single or double quotes in its opener and closer?
fn quote_style(self) -> Quote;
fn triple_quotes(self) -> TripleQuotes;
fn prefix(self) -> AnyStringPrefix;
fn is_unclosed(self) -> bool;
/// Is the string triple-quoted, i.e.,
/// does it begin and end with three consecutive quote characters?
fn is_triple_quoted(self) -> bool {
self.triple_quotes().is_yes()
}
/// A `str` representation of the quotes used to start and close.
/// This does not include any prefixes the string has in its opener.
fn quote_str(self) -> &'static str {
match (self.triple_quotes(), self.quote_style()) {
(TripleQuotes::Yes, Quote::Single) => "'''",
(TripleQuotes::Yes, Quote::Double) => r#"""""#,
(TripleQuotes::No, Quote::Single) => "'",
(TripleQuotes::No, Quote::Double) => "\"",
}
}
/// The length of the quotes used to start and close the string.
/// This does not include the length of any prefixes the string has
/// in its opener.
fn quote_len(self) -> TextSize {
if self.is_triple_quoted() {
TextSize::new(3)
} else {
TextSize::new(1)
}
}
/// The total length of the string's opener,
/// i.e., the length of the prefixes plus the length
/// of the quotes used to open the string.
fn opener_len(self) -> TextSize {
self.prefix().text_len() + self.quote_len()
}
/// The total length of the string's closer.
/// This is always equal to `self.quote_len()`, except when the string is unclosed,
/// in which case the length is zero.
fn closer_len(self) -> TextSize {
if self.is_unclosed() {
TextSize::default()
} else {
self.quote_len()
}
}
fn as_any_string_flags(self) -> AnyStringFlags {
AnyStringFlags::new(self.prefix(), self.quote_style(), self.triple_quotes())
.with_unclosed(self.is_unclosed())
}
fn display_contents(self, contents: &str) -> DisplayFlags<'_> {
DisplayFlags {
flags: self.as_any_string_flags(),
contents,
}
}
}
pub struct DisplayFlags<'a> {
flags: AnyStringFlags,
contents: &'a str,
}
impl std::fmt::Display for DisplayFlags<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{prefix}{quote}{contents}{quote}",
prefix = self.flags.prefix(),
quote = self.flags.quote_str(),
contents = self.contents
)
}
}
bitflags! {
#[derive(Default, Copy, Clone, PartialEq, Eq, Hash)]
struct InterpolatedStringFlagsInner: u8 {
/// The f-string uses double quotes (`"`) for its opener and closer.
/// If this flag is not set, the f-string uses single quotes (`'`)
/// for its opener and closer.
const DOUBLE = 1 << 0;
/// The f-string is triple-quoted:
/// it begins and ends with three consecutive quote characters.
/// For example: `f"""{bar}"""`.
const TRIPLE_QUOTED = 1 << 1;
/// The f-string has an `r` prefix, meaning it is a raw f-string
/// with a lowercase 'r'. For example: `rf"{bar}"`
const R_PREFIX_LOWER = 1 << 2;
/// The f-string has an `R` prefix, meaning it is a raw f-string
/// with an uppercase 'r'. For example: `Rf"{bar}"`.
/// See https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#r-strings-and-r-strings
/// for why we track the casing of the `r` prefix,
/// but not for any other prefix
const R_PREFIX_UPPER = 1 << 3;
/// The f-string is unclosed, meaning it is missing a closing quote.
/// For example: `f"{bar`
const UNCLOSED = 1 << 4;
}
}
#[cfg(feature = "get-size")]
impl get_size2::GetSize for InterpolatedStringFlagsInner {}
/// Flags that can be queried to obtain information
/// regarding the prefixes and quotes used for an f-string.
///
/// Note: This is identical to [`TStringFlags`] except that
/// the implementation of the `prefix` method of the
/// [`StringFlags`] trait returns a variant of
/// `AnyStringPrefix::Format`.
///
/// ## Notes on usage
///
/// If you're using a `Generator` from the `ruff_python_codegen` crate to generate a lint-rule fix
/// from an existing f-string literal, consider passing along the [`FString::flags`] field. If you
/// don't have an existing literal but have a `Checker` from the `ruff_linter` crate available,
/// consider using `Checker::default_fstring_flags` to create instances of this struct; this method
/// will properly handle nested f-strings. For usage that doesn't fit into one of these categories,
/// the public constructor [`FStringFlags::empty`] can be used.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct FStringFlags(InterpolatedStringFlagsInner);
impl FStringFlags {
/// Construct a new [`FStringFlags`] with **no flags set**.
///
/// See [`FStringFlags::with_quote_style`], [`FStringFlags::with_triple_quotes`], and
/// [`FStringFlags::with_prefix`] for ways of setting the quote style (single or double),
/// enabling triple quotes, and adding prefixes (such as `r`), respectively.
///
/// See the documentation for [`FStringFlags`] for additional caveats on this constructor, and
/// situations in which alternative ways to construct this struct should be used, especially
/// when writing lint rules.
pub fn empty() -> Self {
Self(InterpolatedStringFlagsInner::empty())
}
#[must_use]
pub fn with_quote_style(mut self, quote_style: Quote) -> Self {
self.0.set(
InterpolatedStringFlagsInner::DOUBLE,
quote_style.is_double(),
);
self
}
#[must_use]
pub fn with_triple_quotes(mut self, triple_quotes: TripleQuotes) -> Self {
self.0.set(
InterpolatedStringFlagsInner::TRIPLE_QUOTED,
triple_quotes.is_yes(),
);
self
}
#[must_use]
pub fn with_unclosed(mut self, unclosed: bool) -> Self {
self.0.set(InterpolatedStringFlagsInner::UNCLOSED, unclosed);
self
}
#[must_use]
pub fn with_prefix(mut self, prefix: FStringPrefix) -> Self {
match prefix {
FStringPrefix::Regular => Self(
self.0
- InterpolatedStringFlagsInner::R_PREFIX_LOWER
- InterpolatedStringFlagsInner::R_PREFIX_UPPER,
),
FStringPrefix::Raw { uppercase_r } => {
self.0
.set(InterpolatedStringFlagsInner::R_PREFIX_UPPER, uppercase_r);
self.0
.set(InterpolatedStringFlagsInner::R_PREFIX_LOWER, !uppercase_r);
self
}
}
}
pub const fn prefix(self) -> FStringPrefix {
if self
.0
.contains(InterpolatedStringFlagsInner::R_PREFIX_LOWER)
{
debug_assert!(
!self
.0
.contains(InterpolatedStringFlagsInner::R_PREFIX_UPPER)
);
FStringPrefix::Raw { uppercase_r: false }
} else if self
.0
.contains(InterpolatedStringFlagsInner::R_PREFIX_UPPER)
{
FStringPrefix::Raw { uppercase_r: true }
} else {
FStringPrefix::Regular
}
}
}
// TODO(dylan): the documentation about using
// `Checker::default_tstring_flags` is not yet
// correct. This method does not yet exist because
// introducing it would emit a dead code warning
// until we call it in lint rules.
/// Flags that can be queried to obtain information
/// regarding the prefixes and quotes used for an f-string.
///
/// Note: This is identical to [`FStringFlags`] except that
/// the implementation of the `prefix` method of the
/// [`StringFlags`] trait returns a variant of
/// `AnyStringPrefix::Template`.
///
/// ## Notes on usage
///
/// If you're using a `Generator` from the `ruff_python_codegen` crate to generate a lint-rule fix
/// from an existing t-string literal, consider passing along the [`FString::flags`] field. If you
/// don't have an existing literal but have a `Checker` from the `ruff_linter` crate available,
/// consider using `Checker::default_tstring_flags` to create instances of this struct; this method
/// will properly handle nested t-strings. For usage that doesn't fit into one of these categories,
/// the public constructor [`TStringFlags::empty`] can be used.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct TStringFlags(InterpolatedStringFlagsInner);
impl TStringFlags {
/// Construct a new [`TStringFlags`] with **no flags set**.
///
/// See [`TStringFlags::with_quote_style`], [`TStringFlags::with_triple_quotes`], and
/// [`TStringFlags::with_prefix`] for ways of setting the quote style (single or double),
/// enabling triple quotes, and adding prefixes (such as `r`), respectively.
///
/// See the documentation for [`TStringFlags`] for additional caveats on this constructor, and
/// situations in which alternative ways to construct this struct should be used, especially
/// when writing lint rules.
pub fn empty() -> Self {
Self(InterpolatedStringFlagsInner::empty())
}
#[must_use]
pub fn with_quote_style(mut self, quote_style: Quote) -> Self {
self.0.set(
InterpolatedStringFlagsInner::DOUBLE,
quote_style.is_double(),
);
self
}
#[must_use]
pub fn with_triple_quotes(mut self, triple_quotes: TripleQuotes) -> Self {
self.0.set(
InterpolatedStringFlagsInner::TRIPLE_QUOTED,
triple_quotes.is_yes(),
);
self
}
#[must_use]
pub fn with_unclosed(mut self, unclosed: bool) -> Self {
self.0.set(InterpolatedStringFlagsInner::UNCLOSED, unclosed);
self
}
#[must_use]
pub fn with_prefix(mut self, prefix: TStringPrefix) -> Self {
match prefix {
TStringPrefix::Regular => Self(
self.0
- InterpolatedStringFlagsInner::R_PREFIX_LOWER
- InterpolatedStringFlagsInner::R_PREFIX_UPPER,
),
TStringPrefix::Raw { uppercase_r } => {
self.0
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/statement_visitor.rs | crates/ruff_python_ast/src/statement_visitor.rs | //! Specialized AST visitor trait and walk functions that only visit statements.
use crate::{self as ast, ElifElseClause, ExceptHandler, MatchCase, Stmt};
/// A trait for AST visitors that only need to visit statements.
pub trait StatementVisitor<'a> {
fn visit_body(&mut self, body: &'a [Stmt]) {
walk_body(self, body);
}
fn visit_stmt(&mut self, stmt: &'a Stmt) {
walk_stmt(self, stmt);
}
fn visit_except_handler(&mut self, except_handler: &'a ExceptHandler) {
walk_except_handler(self, except_handler);
}
fn visit_elif_else_clause(&mut self, elif_else_clause: &'a ElifElseClause) {
walk_elif_else_clause(self, elif_else_clause);
}
fn visit_match_case(&mut self, match_case: &'a MatchCase) {
walk_match_case(self, match_case);
}
}
pub fn walk_body<'a, V: StatementVisitor<'a> + ?Sized>(visitor: &mut V, body: &'a [Stmt]) {
for stmt in body {
visitor.visit_stmt(stmt);
}
}
pub fn walk_stmt<'a, V: StatementVisitor<'a> + ?Sized>(visitor: &mut V, stmt: &'a Stmt) {
match stmt {
Stmt::FunctionDef(ast::StmtFunctionDef { body, .. }) => {
visitor.visit_body(body);
}
Stmt::For(ast::StmtFor { body, orelse, .. }) => {
visitor.visit_body(body);
visitor.visit_body(orelse);
}
Stmt::ClassDef(ast::StmtClassDef { body, .. }) => {
visitor.visit_body(body);
}
Stmt::While(ast::StmtWhile { body, orelse, .. }) => {
visitor.visit_body(body);
visitor.visit_body(orelse);
}
Stmt::If(ast::StmtIf {
body,
elif_else_clauses,
..
}) => {
visitor.visit_body(body);
for clause in elif_else_clauses {
visitor.visit_elif_else_clause(clause);
}
}
Stmt::With(ast::StmtWith { body, .. }) => {
visitor.visit_body(body);
}
Stmt::Match(ast::StmtMatch { cases, .. }) => {
for match_case in cases {
visitor.visit_match_case(match_case);
}
}
Stmt::Try(ast::StmtTry {
body,
handlers,
orelse,
finalbody,
..
}) => {
visitor.visit_body(body);
for except_handler in handlers {
visitor.visit_except_handler(except_handler);
}
visitor.visit_body(orelse);
visitor.visit_body(finalbody);
}
_ => {}
}
}
pub fn walk_except_handler<'a, V: StatementVisitor<'a> + ?Sized>(
visitor: &mut V,
except_handler: &'a ExceptHandler,
) {
match except_handler {
ExceptHandler::ExceptHandler(ast::ExceptHandlerExceptHandler { body, .. }) => {
visitor.visit_body(body);
}
}
}
pub fn walk_elif_else_clause<'a, V: StatementVisitor<'a> + ?Sized>(
visitor: &mut V,
elif_else_clause: &'a ElifElseClause,
) {
visitor.visit_body(&elif_else_clause.body);
}
pub fn walk_match_case<'a, V: StatementVisitor<'a> + ?Sized>(
visitor: &mut V,
match_case: &'a MatchCase,
) {
visitor.visit_body(&match_case.body);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/docstrings.rs | crates/ruff_python_ast/src/docstrings.rs | //! Utilities for parsing Python docstrings.
/// Extract the leading words from a line of text within a Python docstring.
pub fn leading_words(line: &str) -> &str {
let line = line.trim();
line.find(|char: char| !char.is_alphanumeric() && !char.is_whitespace())
.map_or(line, |index| &line[..index])
}
/// Extract the leading whitespace from a line of text within a Python docstring.
pub fn leading_space(line: &str) -> &str {
line.find(|char: char| !char.is_whitespace())
.map_or(line, |index| &line[..index])
}
/// Replace any non-whitespace characters from an indentation string within a Python docstring.
pub fn clean_space(indentation: &str) -> String {
indentation
.chars()
.map(|char| if char.is_whitespace() { char } else { ' ' })
.collect()
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/name.rs | crates/ruff_python_ast/src/name.rs | use std::borrow::{Borrow, Cow};
use std::fmt::{Debug, Display, Formatter, Write};
use std::hash::{Hash, Hasher};
use std::ops::Deref;
use crate::Expr;
use crate::generated::ExprName;
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "cache", derive(ruff_macros::CacheKey))]
#[cfg_attr(feature = "salsa", derive(salsa::Update))]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
#[cfg_attr(
feature = "schemars",
derive(schemars::JsonSchema),
schemars(with = "String")
)]
pub struct Name(compact_str::CompactString);
impl Name {
#[inline]
pub fn empty() -> Self {
Self(compact_str::CompactString::default())
}
#[inline]
pub fn new(name: impl AsRef<str>) -> Self {
Self(compact_str::CompactString::new(name))
}
#[inline]
pub const fn new_static(name: &'static str) -> Self {
Self(compact_str::CompactString::const_new(name))
}
pub fn shrink_to_fit(&mut self) {
self.0.shrink_to_fit();
}
pub fn as_str(&self) -> &str {
self.0.as_str()
}
pub fn push_str(&mut self, s: &str) {
self.0.push_str(s);
}
}
impl Debug for Name {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "Name({:?})", self.as_str())
}
}
impl std::fmt::Write for Name {
fn write_str(&mut self, s: &str) -> std::fmt::Result {
self.0.push_str(s);
Ok(())
}
}
impl AsRef<str> for Name {
#[inline]
fn as_ref(&self) -> &str {
self.as_str()
}
}
impl Deref for Name {
type Target = str;
#[inline]
fn deref(&self) -> &Self::Target {
self.as_str()
}
}
impl Borrow<str> for Name {
#[inline]
fn borrow(&self) -> &str {
self.as_str()
}
}
impl<'a> From<&'a str> for Name {
#[inline]
fn from(s: &'a str) -> Self {
Name(s.into())
}
}
impl From<String> for Name {
#[inline]
fn from(s: String) -> Self {
Name(s.into())
}
}
impl<'a> From<&'a String> for Name {
#[inline]
fn from(s: &'a String) -> Self {
Name(s.into())
}
}
impl<'a> From<Cow<'a, str>> for Name {
#[inline]
fn from(cow: Cow<'a, str>) -> Self {
Name(cow.into())
}
}
impl From<Box<str>> for Name {
#[inline]
fn from(b: Box<str>) -> Self {
Name(b.into())
}
}
impl From<compact_str::CompactString> for Name {
#[inline]
fn from(value: compact_str::CompactString) -> Self {
Self(value)
}
}
impl From<Name> for compact_str::CompactString {
#[inline]
fn from(name: Name) -> Self {
name.0
}
}
impl From<Name> for String {
#[inline]
fn from(name: Name) -> Self {
name.as_str().into()
}
}
impl FromIterator<char> for Name {
fn from_iter<I: IntoIterator<Item = char>>(iter: I) -> Self {
Self(iter.into_iter().collect())
}
}
impl std::fmt::Display for Name {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}
impl PartialEq<str> for Name {
#[inline]
fn eq(&self, other: &str) -> bool {
self.as_str() == other
}
}
impl PartialEq<Name> for str {
#[inline]
fn eq(&self, other: &Name) -> bool {
other == self
}
}
impl PartialEq<&str> for Name {
#[inline]
fn eq(&self, other: &&str) -> bool {
self.as_str() == *other
}
}
impl PartialEq<Name> for &str {
#[inline]
fn eq(&self, other: &Name) -> bool {
other == self
}
}
impl PartialEq<String> for Name {
fn eq(&self, other: &String) -> bool {
self == other.as_str()
}
}
impl PartialEq<Name> for String {
#[inline]
fn eq(&self, other: &Name) -> bool {
other == self
}
}
impl PartialEq<&String> for Name {
#[inline]
fn eq(&self, other: &&String) -> bool {
self.as_str() == *other
}
}
impl PartialEq<Name> for &String {
#[inline]
fn eq(&self, other: &Name) -> bool {
other == self
}
}
/// A representation of a qualified name, like `typing.List`.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct QualifiedName<'a>(SegmentsVec<'a>);
impl<'a> QualifiedName<'a> {
/// Create a [`QualifiedName`] from a dotted name.
///
/// ```rust
/// # use ruff_python_ast::name::QualifiedName;
///
/// assert_eq!(QualifiedName::from_dotted_name("typing.List").segments(), ["typing", "List"]);
/// assert_eq!(QualifiedName::from_dotted_name("list").segments(), ["", "list"]);
/// ```
#[inline]
pub fn from_dotted_name(name: &'a str) -> Self {
if let Some(dot) = name.find('.') {
let mut builder = QualifiedNameBuilder::default();
builder.push(&name[..dot]);
builder.extend(name[dot + 1..].split('.'));
builder.build()
} else {
Self::builtin(name)
}
}
/// Creates a name that's guaranteed not be a built in
#[inline]
pub fn user_defined(name: &'a str) -> Self {
name.split('.').collect()
}
/// Creates a qualified name for a built in
#[inline]
pub fn builtin(name: &'a str) -> Self {
debug_assert!(!name.contains('.'));
Self(SegmentsVec::Stack(SegmentsStack::from_slice(&["", name])))
}
#[inline]
pub fn segments(&self) -> &[&'a str] {
self.0.as_slice()
}
/// If the first segment is empty, the `CallPath` represents a "builtin binding".
///
/// A builtin binding is the binding that a symbol has if it was part of Python's
/// global scope without any imports taking place. However, if builtin members are
/// accessed explicitly via the `builtins` module, they will not have a
/// "builtin binding", so this method will return `false`.
///
/// Ex) `["", "bool"]` -> `"bool"`
fn is_builtin(&self) -> bool {
matches!(self.segments(), ["", ..])
}
/// If the call path is dot-prefixed, it's an unresolved relative import.
/// Ex) `[".foo", "bar"]` -> `".foo.bar"`
pub fn is_unresolved_import(&self) -> bool {
matches!(self.segments(), [".", ..])
}
pub fn starts_with(&self, other: &QualifiedName<'_>) -> bool {
self.segments().starts_with(other.segments())
}
/// Appends a member to the qualified name.
#[must_use]
pub fn append_member(self, member: &'a str) -> Self {
let mut inner = self.0;
inner.push(member);
Self(inner)
}
/// Extends the qualified name using the given members.
#[must_use]
pub fn extend_members<T: IntoIterator<Item = &'a str>>(self, members: T) -> Self {
let mut inner = self.0;
inner.extend(members);
Self(inner)
}
}
impl Display for QualifiedName<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let segments = self.segments();
if self.is_unresolved_import() {
let mut iter = segments.iter();
for segment in iter.by_ref() {
if *segment == "." {
f.write_char('.')?;
} else {
f.write_str(segment)?;
break;
}
}
for segment in iter {
f.write_char('.')?;
f.write_str(segment)?;
}
} else {
let segments = if self.is_builtin() {
&segments[1..]
} else {
segments
};
let mut first = true;
for segment in segments {
if !first {
f.write_char('.')?;
}
f.write_str(segment)?;
first = false;
}
}
Ok(())
}
}
impl<'a> FromIterator<&'a str> for QualifiedName<'a> {
fn from_iter<T: IntoIterator<Item = &'a str>>(iter: T) -> Self {
Self(SegmentsVec::from_iter(iter))
}
}
#[derive(Debug, Clone, Default)]
pub struct QualifiedNameBuilder<'a> {
segments: SegmentsVec<'a>,
}
impl<'a> QualifiedNameBuilder<'a> {
pub fn with_capacity(capacity: usize) -> Self {
Self {
segments: SegmentsVec::with_capacity(capacity),
}
}
#[inline]
pub fn is_empty(&self) -> bool {
self.segments.is_empty()
}
#[inline]
pub fn push(&mut self, segment: &'a str) {
self.segments.push(segment);
}
#[inline]
pub fn pop(&mut self) {
self.segments.pop();
}
#[inline]
pub fn extend(&mut self, segments: impl IntoIterator<Item = &'a str>) {
self.segments.extend(segments);
}
#[inline]
pub fn extend_from_slice(&mut self, segments: &[&'a str]) {
self.segments.extend_from_slice(segments);
}
pub fn build(self) -> QualifiedName<'a> {
QualifiedName(self.segments)
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct UnqualifiedName<'a>(SegmentsVec<'a>);
impl<'a> UnqualifiedName<'a> {
/// Convert an `Expr` to its [`UnqualifiedName`] (like `["typing", "List"]`).
pub fn from_expr(expr: &'a Expr) -> Option<Self> {
// Unroll the loop up to eight times, to match the maximum number of expected attributes.
// In practice, unrolling appears to give about a 4x speed-up on this hot path.
let attr1 = match expr {
Expr::Attribute(attr1) => attr1,
// Ex) `foo`
Expr::Name(ExprName { id, .. }) => return Some(Self::from_slice(&[id.as_str()])),
_ => return None,
};
let attr2 = match attr1.value.as_ref() {
Expr::Attribute(attr2) => attr2,
// Ex) `foo.bar`
Expr::Name(ExprName { id, .. }) => {
return Some(Self::from_slice(&[id.as_str(), attr1.attr.as_str()]));
}
_ => return None,
};
let attr3 = match attr2.value.as_ref() {
Expr::Attribute(attr3) => attr3,
// Ex) `foo.bar.baz`
Expr::Name(ExprName { id, .. }) => {
return Some(Self::from_slice(&[
id.as_str(),
attr2.attr.as_str(),
attr1.attr.as_str(),
]));
}
_ => return None,
};
let attr4 = match attr3.value.as_ref() {
Expr::Attribute(attr4) => attr4,
// Ex) `foo.bar.baz.bop`
Expr::Name(ExprName { id, .. }) => {
return Some(Self::from_slice(&[
id.as_str(),
attr3.attr.as_str(),
attr2.attr.as_str(),
attr1.attr.as_str(),
]));
}
_ => return None,
};
let attr5 = match attr4.value.as_ref() {
Expr::Attribute(attr5) => attr5,
// Ex) `foo.bar.baz.bop.bap`
Expr::Name(ExprName { id, .. }) => {
return Some(Self::from_slice(&[
id.as_str(),
attr4.attr.as_str(),
attr3.attr.as_str(),
attr2.attr.as_str(),
attr1.attr.as_str(),
]));
}
_ => return None,
};
let attr6 = match attr5.value.as_ref() {
Expr::Attribute(attr6) => attr6,
// Ex) `foo.bar.baz.bop.bap.bab`
Expr::Name(ExprName { id, .. }) => {
return Some(Self::from_slice(&[
id.as_str(),
attr5.attr.as_str(),
attr4.attr.as_str(),
attr3.attr.as_str(),
attr2.attr.as_str(),
attr1.attr.as_str(),
]));
}
_ => return None,
};
let attr7 = match attr6.value.as_ref() {
Expr::Attribute(attr7) => attr7,
// Ex) `foo.bar.baz.bop.bap.bab.bob`
Expr::Name(ExprName { id, .. }) => {
return Some(Self::from_slice(&[
id.as_str(),
attr6.attr.as_str(),
attr5.attr.as_str(),
attr4.attr.as_str(),
attr3.attr.as_str(),
attr2.attr.as_str(),
attr1.attr.as_str(),
]));
}
_ => return None,
};
let attr8 = match attr7.value.as_ref() {
Expr::Attribute(attr8) => attr8,
// Ex) `foo.bar.baz.bop.bap.bab.bob.bib`
Expr::Name(ExprName { id, .. }) => {
return Some(Self(SegmentsVec::from([
id.as_str(),
attr7.attr.as_str(),
attr6.attr.as_str(),
attr5.attr.as_str(),
attr4.attr.as_str(),
attr3.attr.as_str(),
attr2.attr.as_str(),
attr1.attr.as_str(),
])));
}
_ => return None,
};
let mut segments = Vec::with_capacity(SMALL_LEN * 2);
let mut current = &*attr8.value;
loop {
current = match current {
Expr::Attribute(attr) => {
segments.push(attr.attr.as_str());
&*attr.value
}
Expr::Name(ExprName { id, .. }) => {
segments.push(id.as_str());
break;
}
_ => {
return None;
}
}
}
segments.reverse();
// Append the attributes we visited before calling into the recursion.
segments.extend_from_slice(&[
attr8.attr.as_str(),
attr7.attr.as_str(),
attr6.attr.as_str(),
attr5.attr.as_str(),
attr4.attr.as_str(),
attr3.attr.as_str(),
attr2.attr.as_str(),
attr1.attr.as_str(),
]);
Some(Self(SegmentsVec::from(segments)))
}
#[inline]
pub fn from_slice(segments: &[&'a str]) -> Self {
Self(SegmentsVec::from_slice(segments))
}
pub fn segments(&self) -> &[&'a str] {
self.0.as_slice()
}
}
impl Display for UnqualifiedName<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let mut first = true;
for segment in self.segments() {
if !first {
f.write_char('.')?;
}
f.write_str(segment)?;
first = false;
}
Ok(())
}
}
impl<'a> FromIterator<&'a str> for UnqualifiedName<'a> {
#[inline]
fn from_iter<T: IntoIterator<Item = &'a str>>(iter: T) -> Self {
Self(iter.into_iter().collect())
}
}
/// A smallvec like storage for qualified and unqualified name segments.
///
/// Stores up to 8 segments inline, and falls back to a heap-allocated vector for names with more segments.
///
/// ## Note
/// The implementation doesn't use `SmallVec` v1 because its type definition has a variance problem.
/// The incorrect variance leads the lifetime inference in the `SemanticModel` astray, causing
/// all sort of "strange" lifetime errors. We can switch back to `SmallVec` when v2 is released.
#[derive(Clone)]
enum SegmentsVec<'a> {
Stack(SegmentsStack<'a>),
Heap(Vec<&'a str>),
}
impl<'a> SegmentsVec<'a> {
/// Creates an empty segment vec.
fn new() -> Self {
Self::Stack(SegmentsStack::default())
}
/// Creates a segment vec that has reserved storage for up to `capacity` items.
fn with_capacity(capacity: usize) -> Self {
if capacity <= SMALL_LEN {
Self::new()
} else {
Self::Heap(Vec::with_capacity(capacity))
}
}
#[cfg(test)]
const fn is_spilled(&self) -> bool {
matches!(self, SegmentsVec::Heap(_))
}
/// Initializes the segments from a slice.
#[inline]
fn from_slice(slice: &[&'a str]) -> Self {
if slice.len() <= SMALL_LEN {
SegmentsVec::Stack(SegmentsStack::from_slice(slice))
} else {
SegmentsVec::Heap(slice.to_vec())
}
}
/// Returns the segments as a slice.
#[inline]
fn as_slice(&self) -> &[&'a str] {
match self {
Self::Stack(stack) => stack.as_slice(),
Self::Heap(heap) => heap.as_slice(),
}
}
/// Pushes `name` to the end of the segments.
///
/// Spills to the heap if the segments are stored on the stack and the 9th segment is pushed.
#[inline]
fn push(&mut self, name: &'a str) {
match self {
SegmentsVec::Stack(stack) => {
if let Err(segments) = stack.push(name) {
*self = SegmentsVec::Heap(segments);
}
}
SegmentsVec::Heap(heap) => {
heap.push(name);
}
}
}
/// Pops the last segment from the end and returns it.
///
/// Returns `None` if the vector is empty.
#[inline]
fn pop(&mut self) -> Option<&'a str> {
match self {
SegmentsVec::Stack(stack) => stack.pop(),
SegmentsVec::Heap(heap) => heap.pop(),
}
}
#[inline]
fn extend_from_slice(&mut self, slice: &[&'a str]) {
match self {
SegmentsVec::Stack(stack) => {
if let Err(segments) = stack.extend_from_slice(slice) {
*self = SegmentsVec::Heap(segments);
}
}
SegmentsVec::Heap(heap) => heap.extend_from_slice(slice),
}
}
}
impl Default for SegmentsVec<'_> {
fn default() -> Self {
Self::new()
}
}
impl Debug for SegmentsVec<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_list().entries(self.as_slice()).finish()
}
}
impl<'a> Deref for SegmentsVec<'a> {
type Target = [&'a str];
fn deref(&self) -> &Self::Target {
self.as_slice()
}
}
impl<'b> PartialEq<SegmentsVec<'b>> for SegmentsVec<'_> {
fn eq(&self, other: &SegmentsVec<'b>) -> bool {
self.as_slice() == other.as_slice()
}
}
impl Eq for SegmentsVec<'_> {}
impl Hash for SegmentsVec<'_> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.as_slice().hash(state);
}
}
impl<'a> FromIterator<&'a str> for SegmentsVec<'a> {
#[inline]
fn from_iter<T: IntoIterator<Item = &'a str>>(iter: T) -> Self {
let mut segments = SegmentsVec::default();
segments.extend(iter);
segments
}
}
impl<'a> From<[&'a str; 8]> for SegmentsVec<'a> {
#[inline]
fn from(segments: [&'a str; 8]) -> Self {
SegmentsVec::Stack(SegmentsStack {
segments,
len: segments.len(),
})
}
}
impl<'a> From<Vec<&'a str>> for SegmentsVec<'a> {
#[inline]
fn from(segments: Vec<&'a str>) -> Self {
SegmentsVec::Heap(segments)
}
}
impl<'a> Extend<&'a str> for SegmentsVec<'a> {
#[inline]
fn extend<T: IntoIterator<Item = &'a str>>(&mut self, iter: T) {
match self {
SegmentsVec::Stack(stack) => {
if let Err(segments) = stack.extend(iter) {
*self = SegmentsVec::Heap(segments);
}
}
SegmentsVec::Heap(heap) => {
heap.extend(iter);
}
}
}
}
const SMALL_LEN: usize = 8;
#[derive(Debug, Clone, Default)]
struct SegmentsStack<'a> {
segments: [&'a str; SMALL_LEN],
len: usize,
}
impl<'a> SegmentsStack<'a> {
#[inline]
fn from_slice(slice: &[&'a str]) -> Self {
assert!(slice.len() <= SMALL_LEN);
let mut segments: [&'a str; SMALL_LEN] = Default::default();
segments[..slice.len()].copy_from_slice(slice);
SegmentsStack {
segments,
len: slice.len(),
}
}
const fn capacity(&self) -> usize {
SMALL_LEN - self.len
}
#[inline]
fn as_slice(&self) -> &[&'a str] {
&self.segments[..self.len]
}
/// Pushes `name` to the end of the segments.
///
/// Returns `Err` with a `Vec` containing all items (including `name`) if there's not enough capacity to push the name.
#[inline]
fn push(&mut self, name: &'a str) -> Result<(), Vec<&'a str>> {
if self.len < self.segments.len() {
self.segments[self.len] = name;
self.len += 1;
Ok(())
} else {
let mut segments = Vec::with_capacity(self.len * 2);
segments.extend_from_slice(&self.segments);
segments.push(name);
Err(segments)
}
}
/// Reserves spaces for `additional` segments.
///
/// Returns `Err` with a `Vec` containing all segments and a capacity to store `additional` segments if
/// the stack needs to spill over to the heap to store `additional` segments.
#[inline]
fn reserve(&mut self, additional: usize) -> Result<(), Vec<&'a str>> {
if self.capacity() >= additional {
Ok(())
} else {
let mut segments = Vec::with_capacity(self.len + additional);
segments.extend_from_slice(self.as_slice());
Err(segments)
}
}
#[inline]
fn pop(&mut self) -> Option<&'a str> {
if self.len > 0 {
self.len -= 1;
Some(self.segments[self.len])
} else {
None
}
}
/// Extends the segments by appending `slice` to the end.
///
/// Returns `Err` with a `Vec` containing all segments and the segments in `slice` if there's not enough capacity to append the names.
#[inline]
fn extend_from_slice(&mut self, slice: &[&'a str]) -> Result<(), Vec<&'a str>> {
let new_len = self.len + slice.len();
if slice.len() <= self.capacity() {
self.segments[self.len..new_len].copy_from_slice(slice);
self.len = new_len;
Ok(())
} else {
let mut segments = Vec::with_capacity(new_len);
segments.extend_from_slice(self.as_slice());
segments.extend_from_slice(slice);
Err(segments)
}
}
#[inline]
fn extend<I>(&mut self, iter: I) -> Result<(), Vec<&'a str>>
where
I: IntoIterator<Item = &'a str>,
{
let mut iter = iter.into_iter();
let (lower, _) = iter.size_hint();
// There's not enough space to store the lower bound of items. Spill to the heap.
if let Err(mut segments) = self.reserve(lower) {
segments.extend(iter);
return Err(segments);
}
// Copy over up to capacity items
for name in iter.by_ref().take(self.capacity()) {
self.segments[self.len] = name;
self.len += 1;
}
let Some(item) = iter.next() else {
// There are no more items to copy over and they all fit into capacity.
return Ok(());
};
// There are more items and there's not enough capacity to store them on the stack.
// Spill over to the heap and append the remaining items.
let mut segments = Vec::with_capacity(self.len * 2);
segments.extend_from_slice(self.as_slice());
segments.push(item);
segments.extend(iter);
Err(segments)
}
}
#[cfg(test)]
mod tests {
use crate::name::SegmentsVec;
#[test]
fn empty_vec() {
let empty = SegmentsVec::new();
assert_eq!(empty.as_slice(), &[] as &[&str]);
assert!(!empty.is_spilled());
}
#[test]
fn from_slice_stack() {
let stack = SegmentsVec::from_slice(&["a", "b", "c"]);
assert_eq!(stack.as_slice(), &["a", "b", "c"]);
assert!(!stack.is_spilled());
}
#[test]
fn from_slice_stack_capacity() {
let stack = SegmentsVec::from_slice(&["a", "b", "c", "d", "e", "f", "g", "h"]);
assert_eq!(stack.as_slice(), &["a", "b", "c", "d", "e", "f", "g", "h"]);
assert!(!stack.is_spilled());
}
#[test]
fn from_slice_heap() {
let heap = SegmentsVec::from_slice(&["a", "b", "c", "d", "e", "f", "g", "h", "i"]);
assert_eq!(
heap.as_slice(),
&["a", "b", "c", "d", "e", "f", "g", "h", "i"]
);
assert!(heap.is_spilled());
}
#[test]
fn push_stack() {
let mut stack = SegmentsVec::from_slice(&["a", "b", "c"]);
stack.push("d");
stack.push("e");
assert_eq!(stack.as_slice(), &["a", "b", "c", "d", "e"]);
assert!(!stack.is_spilled());
}
#[test]
fn push_stack_spill() {
let mut stack = SegmentsVec::from_slice(&["a", "b", "c", "d", "e", "f", "g"]);
stack.push("h");
assert!(!stack.is_spilled());
stack.push("i");
assert_eq!(
stack.as_slice(),
&["a", "b", "c", "d", "e", "f", "g", "h", "i"]
);
assert!(stack.is_spilled());
}
#[test]
fn pop_stack() {
let mut stack = SegmentsVec::from_slice(&["a", "b", "c", "d", "e"]);
assert_eq!(stack.pop(), Some("e"));
assert_eq!(stack.pop(), Some("d"));
assert_eq!(stack.pop(), Some("c"));
assert_eq!(stack.pop(), Some("b"));
assert_eq!(stack.pop(), Some("a"));
assert_eq!(stack.pop(), None);
assert!(!stack.is_spilled());
}
#[test]
fn pop_heap() {
let mut heap = SegmentsVec::from_slice(&["a", "b", "c", "d", "e", "f", "g", "h", "i"]);
assert_eq!(heap.pop(), Some("i"));
assert_eq!(heap.pop(), Some("h"));
assert_eq!(heap.pop(), Some("g"));
assert!(heap.is_spilled());
}
#[test]
fn extend_from_slice_stack() {
let mut stack = SegmentsVec::from_slice(&["a", "b", "c"]);
stack.extend_from_slice(&["d", "e", "f"]);
assert_eq!(stack.as_slice(), &["a", "b", "c", "d", "e", "f"]);
assert!(!stack.is_spilled());
}
#[test]
fn extend_from_slice_stack_spill() {
let mut spilled = SegmentsVec::from_slice(&["a", "b", "c", "d", "e", "f"]);
spilled.extend_from_slice(&["g", "h", "i", "j"]);
assert_eq!(
spilled.as_slice(),
&["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]
);
assert!(spilled.is_spilled());
}
#[test]
fn extend_from_slice_heap() {
let mut heap = SegmentsVec::from_slice(&["a", "b", "c", "d", "e", "f", "g", "h", "i"]);
assert!(heap.is_spilled());
heap.extend_from_slice(&["j", "k", "l"]);
assert_eq!(
heap.as_slice(),
&["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"]
);
}
#[test]
fn extend_stack() {
let mut stack = SegmentsVec::from_slice(&["a", "b", "c"]);
stack.extend(["d", "e", "f"]);
assert_eq!(stack.as_slice(), &["a", "b", "c", "d", "e", "f"]);
assert!(!stack.is_spilled());
}
#[test]
fn extend_stack_spilled() {
let mut stack = SegmentsVec::from_slice(&["a", "b", "c", "d", "e", "f"]);
stack.extend(["g", "h", "i", "j"]);
assert_eq!(
stack.as_slice(),
&["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]
);
assert!(stack.is_spilled());
}
#[test]
fn extend_heap() {
let mut heap = SegmentsVec::from_slice(&["a", "b", "c", "d", "e", "f", "g", "h", "i"]);
assert!(heap.is_spilled());
heap.extend(["j", "k", "l"]);
assert_eq!(
heap.as_slice(),
&["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"]
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/script.rs | crates/ruff_python_ast/src/script.rs | use std::sync::LazyLock;
use memchr::memmem::Finder;
static FINDER: LazyLock<Finder> = LazyLock::new(|| Finder::new(b"# /// script"));
/// PEP 723 metadata as parsed from a `script` comment block.
///
/// See: <https://peps.python.org/pep-0723/>
///
/// Vendored from: <https://github.com/astral-sh/uv/blob/debe67ffdb0cd7835734100e909b2d8f79613743/crates/uv-scripts/src/lib.rs#L283>
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct ScriptTag {
/// The content of the script before the metadata block.
prelude: String,
/// The metadata block.
metadata: String,
/// The content of the script after the metadata block.
postlude: String,
}
impl ScriptTag {
/// Given the contents of a Python file, extract the `script` metadata block with leading
/// comment hashes removed, any preceding shebang or content (prelude), and the remaining Python
/// script.
///
/// Given the following input string representing the contents of a Python script:
///
/// ```python
/// #!/usr/bin/env python3
/// # /// script
/// # requires-python = '>=3.11'
/// # dependencies = [
/// # 'requests<3',
/// # 'rich',
/// # ]
/// # ///
///
/// import requests
///
/// print("Hello, World!")
/// ```
///
/// This function would return:
///
/// - Preamble: `#!/usr/bin/env python3\n`
/// - Metadata: `requires-python = '>=3.11'\ndependencies = [\n 'requests<3',\n 'rich',\n]`
/// - Postlude: `import requests\n\nprint("Hello, World!")\n`
///
/// See: <https://peps.python.org/pep-0723/>
pub fn parse(contents: &[u8]) -> Option<Self> {
// Identify the opening pragma.
let index = FINDER.find(contents)?;
// The opening pragma must be the first line, or immediately preceded by a newline.
if !(index == 0 || matches!(contents[index - 1], b'\r' | b'\n')) {
return None;
}
// Extract the preceding content.
let prelude = std::str::from_utf8(&contents[..index]).ok()?;
// Decode as UTF-8.
let contents = &contents[index..];
let contents = std::str::from_utf8(contents).ok()?;
let mut lines = contents.lines();
// Ensure that the first line is exactly `# /// script`.
if lines.next().is_none_or(|line| line != "# /// script") {
return None;
}
// > Every line between these two lines (# /// TYPE and # ///) MUST be a comment starting
// > with #. If there are characters after the # then the first character MUST be a space. The
// > embedded content is formed by taking away the first two characters of each line if the
// > second character is a space, otherwise just the first character (which means the line
// > consists of only a single #).
let mut toml = vec![];
// Extract the content that follows the metadata block.
let mut python_script = vec![];
while let Some(line) = lines.next() {
// Remove the leading `#`.
let Some(line) = line.strip_prefix('#') else {
python_script.push(line);
python_script.extend(lines);
break;
};
// If the line is empty, continue.
if line.is_empty() {
toml.push("");
continue;
}
// Otherwise, the line _must_ start with ` `.
let Some(line) = line.strip_prefix(' ') else {
python_script.push(line);
python_script.extend(lines);
break;
};
toml.push(line);
}
// Find the closing `# ///`. The precedence is such that we need to identify the _last_ such
// line.
//
// For example, given:
// ```python
// # /// script
// #
// # ///
// #
// # ///
// ```
//
// The latter `///` is the closing pragma
let index = toml.iter().rev().position(|line| *line == "///")?;
let index = toml.len() - index;
// Discard any lines after the closing `# ///`.
//
// For example, given:
// ```python
// # /// script
// #
// # ///
// #
// #
// ```
//
// We need to discard the last two lines.
toml.truncate(index - 1);
// Join the lines into a single string.
let prelude = prelude.to_string();
let metadata = toml.join("\n") + "\n";
let postlude = python_script.join("\n") + "\n";
Some(Self {
prelude,
metadata,
postlude,
})
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/int.rs | crates/ruff_python_ast/src/int.rs | use std::fmt::Debug;
use std::str::FromStr;
/// A Python integer literal. Represents both small (fits in an `i64`) and large integers.
#[derive(Clone, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct Int(Number);
impl FromStr for Int {
type Err = std::num::ParseIntError;
/// Parse an [`Int`] from a string.
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.parse::<u64>() {
Ok(value) => Ok(Int::small(value)),
Err(err) => {
if matches!(
err.kind(),
std::num::IntErrorKind::PosOverflow | std::num::IntErrorKind::NegOverflow
) {
Ok(Int::big(s))
} else {
Err(err)
}
}
}
}
}
impl Int {
pub const ZERO: Int = Int(Number::Small(0));
pub const ONE: Int = Int(Number::Small(1));
/// Create an [`Int`] to represent a value that can be represented as an `i64`.
fn small(value: u64) -> Self {
Self(Number::Small(value))
}
/// Create an [`Int`] to represent a value that cannot be represented as an `i64`.
fn big(value: impl Into<Box<str>>) -> Self {
Self(Number::Big(value.into()))
}
/// Parse an [`Int`] from a string with a given radix, like `0x95D`.
///
/// Takes, as input, the numerical portion (`95D`), the parsed base (`16`), and the entire
/// token (`0x95D`).
pub fn from_str_radix(
number: &str,
radix: u32,
token: &str,
) -> Result<Self, std::num::ParseIntError> {
match u64::from_str_radix(number, radix) {
Ok(value) => Ok(Int::small(value)),
Err(err) => {
if matches!(
err.kind(),
std::num::IntErrorKind::PosOverflow | std::num::IntErrorKind::NegOverflow
) {
Ok(Int::big(token))
} else {
Err(err)
}
}
}
}
/// Return the [`Int`] as an u8, if it can be represented as that data type.
pub fn as_u8(&self) -> Option<u8> {
match &self.0 {
Number::Small(small) => u8::try_from(*small).ok(),
Number::Big(_) => None,
}
}
/// Return the [`Int`] as an u16, if it can be represented as that data type.
pub fn as_u16(&self) -> Option<u16> {
match &self.0 {
Number::Small(small) => u16::try_from(*small).ok(),
Number::Big(_) => None,
}
}
/// Return the [`Int`] as an u32, if it can be represented as that data type.
pub fn as_u32(&self) -> Option<u32> {
match &self.0 {
Number::Small(small) => u32::try_from(*small).ok(),
Number::Big(_) => None,
}
}
/// Return the [`Int`] as an u64, if it can be represented as that data type.
pub const fn as_u64(&self) -> Option<u64> {
match &self.0 {
Number::Small(small) => Some(*small),
Number::Big(_) => None,
}
}
/// Return the [`Int`] as an usize, if it can be represented as that data type.
pub fn as_usize(&self) -> Option<usize> {
match &self.0 {
Number::Small(small) => usize::try_from(*small).ok(),
Number::Big(_) => None,
}
}
/// Return the [`Int`] as an i8, if it can be represented as that data type.
pub fn as_i8(&self) -> Option<i8> {
match &self.0 {
Number::Small(small) => i8::try_from(*small).ok(),
Number::Big(_) => None,
}
}
/// Return the [`Int`] as an i16, if it can be represented as that data type.
pub fn as_i16(&self) -> Option<i16> {
match &self.0 {
Number::Small(small) => i16::try_from(*small).ok(),
Number::Big(_) => None,
}
}
/// Return the [`Int`] as an i32, if it can be represented as that data type.
pub fn as_i32(&self) -> Option<i32> {
match &self.0 {
Number::Small(small) => i32::try_from(*small).ok(),
Number::Big(_) => None,
}
}
/// Return the [`Int`] as an i64, if it can be represented as that data type.
pub fn as_i64(&self) -> Option<i64> {
match &self.0 {
Number::Small(small) => i64::try_from(*small).ok(),
Number::Big(_) => None,
}
}
}
impl std::fmt::Display for Int {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
impl Debug for Int {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl PartialEq<u8> for Int {
fn eq(&self, other: &u8) -> bool {
self.as_u8() == Some(*other)
}
}
impl PartialEq<u16> for Int {
fn eq(&self, other: &u16) -> bool {
self.as_u16() == Some(*other)
}
}
impl PartialEq<u32> for Int {
fn eq(&self, other: &u32) -> bool {
self.as_u32() == Some(*other)
}
}
impl PartialEq<i8> for Int {
fn eq(&self, other: &i8) -> bool {
self.as_i8() == Some(*other)
}
}
impl PartialEq<i16> for Int {
fn eq(&self, other: &i16) -> bool {
self.as_i16() == Some(*other)
}
}
impl PartialEq<i32> for Int {
fn eq(&self, other: &i32) -> bool {
self.as_i32() == Some(*other)
}
}
impl PartialEq<i64> for Int {
fn eq(&self, other: &i64) -> bool {
self.as_i64() == Some(*other)
}
}
impl From<u8> for Int {
fn from(value: u8) -> Self {
Self::small(u64::from(value))
}
}
impl From<u16> for Int {
fn from(value: u16) -> Self {
Self::small(u64::from(value))
}
}
impl From<u32> for Int {
fn from(value: u32) -> Self {
Self::small(u64::from(value))
}
}
impl From<u64> for Int {
fn from(value: u64) -> Self {
Self::small(value)
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
enum Number {
/// A "small" number that can be represented as an `u64`.
Small(u64),
/// A "large" number that cannot be represented as an `u64`.
Big(Box<str>),
}
impl std::fmt::Display for Number {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Number::Small(value) => write!(f, "{value}"),
Number::Big(value) => write!(f, "{value}"),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/traversal.rs | crates/ruff_python_ast/src/traversal.rs | //! Utilities for manually traversing a Python AST.
use crate::{self as ast, AnyNodeRef, ExceptHandler, Stmt};
/// Given a [`Stmt`] and its parent, return the [`ast::Suite`] that contains the [`Stmt`].
pub fn suite<'a>(
stmt: impl Into<AnyNodeRef<'a>>,
parent: impl Into<AnyNodeRef<'a>>,
) -> Option<EnclosingSuite<'a>> {
// TODO: refactor this to work without a parent, ie when `stmt` is at the top level
let stmt = stmt.into();
match parent.into() {
AnyNodeRef::ModModule(ast::ModModule { body, .. }) => EnclosingSuite::new(body, stmt),
AnyNodeRef::StmtFunctionDef(ast::StmtFunctionDef { body, .. }) => {
EnclosingSuite::new(body, stmt)
}
AnyNodeRef::StmtClassDef(ast::StmtClassDef { body, .. }) => EnclosingSuite::new(body, stmt),
AnyNodeRef::StmtFor(ast::StmtFor { body, orelse, .. }) => [body, orelse]
.iter()
.find_map(|suite| EnclosingSuite::new(suite, stmt)),
AnyNodeRef::StmtWhile(ast::StmtWhile { body, orelse, .. }) => [body, orelse]
.iter()
.find_map(|suite| EnclosingSuite::new(suite, stmt)),
AnyNodeRef::StmtIf(ast::StmtIf {
body,
elif_else_clauses,
..
}) => [body]
.into_iter()
.chain(elif_else_clauses.iter().map(|clause| &clause.body))
.find_map(|suite| EnclosingSuite::new(suite, stmt)),
AnyNodeRef::StmtWith(ast::StmtWith { body, .. }) => EnclosingSuite::new(body, stmt),
AnyNodeRef::StmtMatch(ast::StmtMatch { cases, .. }) => cases
.iter()
.map(|case| &case.body)
.find_map(|body| EnclosingSuite::new(body, stmt)),
AnyNodeRef::StmtTry(ast::StmtTry {
body,
handlers,
orelse,
finalbody,
..
}) => [body, orelse, finalbody]
.into_iter()
.chain(
handlers
.iter()
.filter_map(ExceptHandler::as_except_handler)
.map(|handler| &handler.body),
)
.find_map(|suite| EnclosingSuite::new(suite, stmt)),
_ => None,
}
}
pub struct EnclosingSuite<'a> {
suite: &'a [Stmt],
position: usize,
}
impl<'a> EnclosingSuite<'a> {
pub fn new(suite: &'a [Stmt], stmt: AnyNodeRef<'a>) -> Option<Self> {
let position = suite
.iter()
.position(|sibling| AnyNodeRef::ptr_eq(sibling.into(), stmt))?;
Some(EnclosingSuite { suite, position })
}
pub fn next_sibling(&self) -> Option<&'a Stmt> {
self.suite.get(self.position + 1)
}
pub fn next_siblings(&self) -> &'a [Stmt] {
self.suite.get(self.position + 1..).unwrap_or_default()
}
pub fn previous_sibling(&self) -> Option<&'a Stmt> {
self.suite.get(self.position.checked_sub(1)?)
}
}
impl std::ops::Deref for EnclosingSuite<'_> {
type Target = [Stmt];
fn deref(&self) -> &Self::Target {
self.suite
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/str.rs | crates/ruff_python_ast/src/str.rs | use aho_corasick::{AhoCorasick, AhoCorasickKind, Anchored, Input, MatchKind, StartKind};
use std::fmt;
use std::sync::LazyLock;
use ruff_text_size::{TextLen, TextRange};
/// Enumeration of the two kinds of quotes that can be used
/// for Python string/f/t-string/bytestring literals
#[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq, is_macro::Is)]
pub enum Quote {
/// E.g. `'`
Single,
/// E.g. `"`
#[default]
Double,
}
impl Quote {
#[inline]
pub const fn as_char(self) -> char {
match self {
Self::Single => '\'',
Self::Double => '"',
}
}
#[inline]
pub const fn as_str(self) -> &'static str {
match self {
Self::Single => "'",
Self::Double => "\"",
}
}
#[must_use]
#[inline]
pub const fn opposite(self) -> Self {
match self {
Self::Single => Self::Double,
Self::Double => Self::Single,
}
}
#[inline]
pub const fn as_byte(self) -> u8 {
match self {
Self::Single => b'\'',
Self::Double => b'"',
}
}
}
impl fmt::Display for Quote {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.as_char())
}
}
impl TryFrom<char> for Quote {
type Error = ();
fn try_from(value: char) -> Result<Self, Self::Error> {
match value {
'\'' => Ok(Quote::Single),
'"' => Ok(Quote::Double),
_ => Err(()),
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum TripleQuotes {
Yes,
No,
}
impl TripleQuotes {
#[must_use]
pub const fn is_yes(self) -> bool {
matches!(self, Self::Yes)
}
#[must_use]
pub const fn is_no(self) -> bool {
matches!(self, Self::No)
}
}
/// Includes all permutations of `r`, `u`, `f`, and `fr` (`ur` is invalid, as is `uf`). This
/// includes all possible orders, and all possible casings, for both single and triple quotes.
///
/// See: <https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals>
#[rustfmt::skip]
const TRIPLE_QUOTE_STR_PREFIXES: &[&str] = &[
"FR\"\"\"",
"Fr\"\"\"",
"fR\"\"\"",
"fr\"\"\"",
"RF\"\"\"",
"Rf\"\"\"",
"rF\"\"\"",
"rf\"\"\"",
"FR'''",
"Fr'''",
"fR'''",
"fr'''",
"RF'''",
"Rf'''",
"rF'''",
"rf'''",
"R\"\"\"",
"r\"\"\"",
"R'''",
"r'''",
"F\"\"\"",
"f\"\"\"",
"F'''",
"f'''",
"U\"\"\"",
"u\"\"\"",
"U'''",
"u'''",
"\"\"\"",
"'''",
];
#[rustfmt::skip]
const SINGLE_QUOTE_STR_PREFIXES: &[&str] = &[
"FR\"",
"Fr\"",
"fR\"",
"fr\"",
"RF\"",
"Rf\"",
"rF\"",
"rf\"",
"FR'",
"Fr'",
"fR'",
"fr'",
"RF'",
"Rf'",
"rF'",
"rf'",
"R\"",
"r\"",
"R'",
"r'",
"F\"",
"f\"",
"F'",
"f'",
"U\"",
"u\"",
"U'",
"u'",
"\"",
"'",
];
/// Includes all permutations of `b` and `rb`. This includes all possible orders, and all possible
/// casings, for both single and triple quotes.
///
/// See: <https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals>
#[rustfmt::skip]
pub const TRIPLE_QUOTE_BYTE_PREFIXES: &[&str] = &[
"BR\"\"\"",
"Br\"\"\"",
"bR\"\"\"",
"br\"\"\"",
"RB\"\"\"",
"Rb\"\"\"",
"rB\"\"\"",
"rb\"\"\"",
"BR'''",
"Br'''",
"bR'''",
"br'''",
"RB'''",
"Rb'''",
"rB'''",
"rb'''",
"B\"\"\"",
"b\"\"\"",
"B'''",
"b'''",
];
#[rustfmt::skip]
pub const SINGLE_QUOTE_BYTE_PREFIXES: &[&str] = &[
"BR\"",
"Br\"",
"bR\"",
"br\"",
"RB\"",
"Rb\"",
"rB\"",
"rb\"",
"BR'",
"Br'",
"bR'",
"br'",
"RB'",
"Rb'",
"rB'",
"rb'",
"B\"",
"b\"",
"B'",
"b'",
];
/// Includes all permutations of `t` and `rt`. This includes all possible orders, and all possible
/// casings, for both single and triple quotes.
///
/// See: <https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals>
#[rustfmt::skip]
pub const TRIPLE_QUOTE_TEMPLATE_PREFIXES: &[&str] = &[
"TR\"\"\"",
"Tr\"\"\"",
"tR\"\"\"",
"tr\"\"\"",
"RT\"\"\"",
"Rt\"\"\"",
"rT\"\"\"",
"rt\"\"\"",
"TR'''",
"Tr'''",
"tR'''",
"tr'''",
"RT'''",
"Rt'''",
"rT'''",
"rt'''",
"T\"\"\"",
"t\"\"\"",
"T'''",
"t'''",
];
#[rustfmt::skip]
pub const SINGLE_QUOTE_TEMPLATE_PREFIXES: &[&str] = &[
"TR\"",
"Tr\"",
"tR\"",
"tr\"",
"RT\"",
"Rt\"",
"rT\"",
"rt\"",
"TR'",
"Tr'",
"tR'",
"tr'",
"RT'",
"Rt'",
"rT'",
"rt'",
"T\"",
"t\"",
"T'",
"t'",
];
/// Strip the leading and trailing quotes from a string.
/// Assumes that the string is a valid string literal, but does not verify that the string
/// is a "simple" string literal (i.e., that it does not contain any implicit concatenations).
pub fn raw_contents(contents: &str) -> Option<&str> {
let range = raw_contents_range(contents)?;
Some(&contents[range])
}
pub fn raw_contents_range(contents: &str) -> Option<TextRange> {
let leading_quote_str = leading_quote(contents)?;
let trailing_quote_str = trailing_quote(contents)?;
Some(TextRange::new(
leading_quote_str.text_len(),
contents.text_len() - trailing_quote_str.text_len(),
))
}
/// An [`AhoCorasick`] matcher for string, template, and bytes literal prefixes.
static PREFIX_MATCHER: LazyLock<AhoCorasick> = LazyLock::new(|| {
AhoCorasick::builder()
.start_kind(StartKind::Anchored)
.match_kind(MatchKind::LeftmostLongest)
.kind(Some(AhoCorasickKind::DFA))
.build(
TRIPLE_QUOTE_STR_PREFIXES
.iter()
.chain(TRIPLE_QUOTE_BYTE_PREFIXES)
.chain(TRIPLE_QUOTE_TEMPLATE_PREFIXES)
.chain(SINGLE_QUOTE_STR_PREFIXES)
.chain(SINGLE_QUOTE_BYTE_PREFIXES)
.chain(SINGLE_QUOTE_TEMPLATE_PREFIXES),
)
.unwrap()
});
/// Return the leading quote for a string, template, or bytes literal (e.g., `"""`).
pub fn leading_quote(content: &str) -> Option<&str> {
let mat = PREFIX_MATCHER.find(Input::new(content).anchored(Anchored::Yes))?;
Some(&content[mat.start()..mat.end()])
}
/// Return the trailing quote string for a string, template, or bytes literal (e.g., `"""`).
pub fn trailing_quote(content: &str) -> Option<&str> {
if content.ends_with("'''") {
Some("'''")
} else if content.ends_with("\"\"\"") {
Some("\"\"\"")
} else if content.ends_with('\'') {
Some("'")
} else if content.ends_with('\"') {
Some("\"")
} else {
None
}
}
/// Return `true` if the string is a triple-quote string or byte prefix.
pub fn is_triple_quote(content: &str) -> bool {
TRIPLE_QUOTE_STR_PREFIXES.contains(&content)
|| TRIPLE_QUOTE_BYTE_PREFIXES.contains(&content)
|| TRIPLE_QUOTE_TEMPLATE_PREFIXES.contains(&content)
}
#[cfg(test)]
mod tests {
use super::{
SINGLE_QUOTE_BYTE_PREFIXES, SINGLE_QUOTE_STR_PREFIXES, SINGLE_QUOTE_TEMPLATE_PREFIXES,
TRIPLE_QUOTE_BYTE_PREFIXES, TRIPLE_QUOTE_STR_PREFIXES, TRIPLE_QUOTE_TEMPLATE_PREFIXES,
};
#[test]
fn prefix_uniqueness() {
let prefixes = TRIPLE_QUOTE_STR_PREFIXES
.iter()
.chain(TRIPLE_QUOTE_BYTE_PREFIXES)
.chain(TRIPLE_QUOTE_TEMPLATE_PREFIXES)
.chain(SINGLE_QUOTE_STR_PREFIXES)
.chain(SINGLE_QUOTE_BYTE_PREFIXES)
.chain(SINGLE_QUOTE_TEMPLATE_PREFIXES)
.collect::<Vec<_>>();
for (i, prefix_i) in prefixes.iter().enumerate() {
for (j, prefix_j) in prefixes.iter().enumerate() {
if i > j {
assert!(
!prefix_i.starts_with(*prefix_j),
"Prefixes are not unique: {prefix_i} starts with {prefix_j}",
);
}
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/generated.rs | crates/ruff_python_ast/src/generated.rs | // This is a generated file. Don't modify it by hand!
// Run `crates/ruff_python_ast/generate.py` to re-generate the file.
use crate::name::Name;
use crate::visitor::source_order::SourceOrderVisitor;
/// See also [mod](https://docs.python.org/3/library/ast.html#ast.mod)
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub enum Mod {
Module(crate::ModModule),
Expression(crate::ModExpression),
}
impl From<crate::ModModule> for Mod {
fn from(node: crate::ModModule) -> Self {
Self::Module(node)
}
}
impl From<crate::ModExpression> for Mod {
fn from(node: crate::ModExpression) -> Self {
Self::Expression(node)
}
}
impl ruff_text_size::Ranged for Mod {
fn range(&self) -> ruff_text_size::TextRange {
match self {
Self::Module(node) => node.range(),
Self::Expression(node) => node.range(),
}
}
}
impl crate::HasNodeIndex for Mod {
fn node_index(&self) -> &crate::AtomicNodeIndex {
match self {
Self::Module(node) => node.node_index(),
Self::Expression(node) => node.node_index(),
}
}
}
#[allow(dead_code, clippy::match_wildcard_for_single_variants)]
impl Mod {
#[inline]
pub const fn is_module(&self) -> bool {
matches!(self, Self::Module(_))
}
#[inline]
pub fn module(self) -> Option<crate::ModModule> {
match self {
Self::Module(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_module(self) -> crate::ModModule {
match self {
Self::Module(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_module_mut(&mut self) -> Option<&mut crate::ModModule> {
match self {
Self::Module(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_module(&self) -> Option<&crate::ModModule> {
match self {
Self::Module(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_expression(&self) -> bool {
matches!(self, Self::Expression(_))
}
#[inline]
pub fn expression(self) -> Option<crate::ModExpression> {
match self {
Self::Expression(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_expression(self) -> crate::ModExpression {
match self {
Self::Expression(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_expression_mut(&mut self) -> Option<&mut crate::ModExpression> {
match self {
Self::Expression(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_expression(&self) -> Option<&crate::ModExpression> {
match self {
Self::Expression(val) => Some(val),
_ => None,
}
}
}
/// See also [stmt](https://docs.python.org/3/library/ast.html#ast.stmt)
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub enum Stmt {
FunctionDef(crate::StmtFunctionDef),
ClassDef(crate::StmtClassDef),
Return(crate::StmtReturn),
Delete(crate::StmtDelete),
TypeAlias(crate::StmtTypeAlias),
Assign(crate::StmtAssign),
AugAssign(crate::StmtAugAssign),
AnnAssign(crate::StmtAnnAssign),
For(crate::StmtFor),
While(crate::StmtWhile),
If(crate::StmtIf),
With(crate::StmtWith),
Match(crate::StmtMatch),
Raise(crate::StmtRaise),
Try(crate::StmtTry),
Assert(crate::StmtAssert),
Import(crate::StmtImport),
ImportFrom(crate::StmtImportFrom),
Global(crate::StmtGlobal),
Nonlocal(crate::StmtNonlocal),
Expr(crate::StmtExpr),
Pass(crate::StmtPass),
Break(crate::StmtBreak),
Continue(crate::StmtContinue),
IpyEscapeCommand(crate::StmtIpyEscapeCommand),
}
impl From<crate::StmtFunctionDef> for Stmt {
fn from(node: crate::StmtFunctionDef) -> Self {
Self::FunctionDef(node)
}
}
impl From<crate::StmtClassDef> for Stmt {
fn from(node: crate::StmtClassDef) -> Self {
Self::ClassDef(node)
}
}
impl From<crate::StmtReturn> for Stmt {
fn from(node: crate::StmtReturn) -> Self {
Self::Return(node)
}
}
impl From<crate::StmtDelete> for Stmt {
fn from(node: crate::StmtDelete) -> Self {
Self::Delete(node)
}
}
impl From<crate::StmtTypeAlias> for Stmt {
fn from(node: crate::StmtTypeAlias) -> Self {
Self::TypeAlias(node)
}
}
impl From<crate::StmtAssign> for Stmt {
fn from(node: crate::StmtAssign) -> Self {
Self::Assign(node)
}
}
impl From<crate::StmtAugAssign> for Stmt {
fn from(node: crate::StmtAugAssign) -> Self {
Self::AugAssign(node)
}
}
impl From<crate::StmtAnnAssign> for Stmt {
fn from(node: crate::StmtAnnAssign) -> Self {
Self::AnnAssign(node)
}
}
impl From<crate::StmtFor> for Stmt {
fn from(node: crate::StmtFor) -> Self {
Self::For(node)
}
}
impl From<crate::StmtWhile> for Stmt {
fn from(node: crate::StmtWhile) -> Self {
Self::While(node)
}
}
impl From<crate::StmtIf> for Stmt {
fn from(node: crate::StmtIf) -> Self {
Self::If(node)
}
}
impl From<crate::StmtWith> for Stmt {
fn from(node: crate::StmtWith) -> Self {
Self::With(node)
}
}
impl From<crate::StmtMatch> for Stmt {
fn from(node: crate::StmtMatch) -> Self {
Self::Match(node)
}
}
impl From<crate::StmtRaise> for Stmt {
fn from(node: crate::StmtRaise) -> Self {
Self::Raise(node)
}
}
impl From<crate::StmtTry> for Stmt {
fn from(node: crate::StmtTry) -> Self {
Self::Try(node)
}
}
impl From<crate::StmtAssert> for Stmt {
fn from(node: crate::StmtAssert) -> Self {
Self::Assert(node)
}
}
impl From<crate::StmtImport> for Stmt {
fn from(node: crate::StmtImport) -> Self {
Self::Import(node)
}
}
impl From<crate::StmtImportFrom> for Stmt {
fn from(node: crate::StmtImportFrom) -> Self {
Self::ImportFrom(node)
}
}
impl From<crate::StmtGlobal> for Stmt {
fn from(node: crate::StmtGlobal) -> Self {
Self::Global(node)
}
}
impl From<crate::StmtNonlocal> for Stmt {
fn from(node: crate::StmtNonlocal) -> Self {
Self::Nonlocal(node)
}
}
impl From<crate::StmtExpr> for Stmt {
fn from(node: crate::StmtExpr) -> Self {
Self::Expr(node)
}
}
impl From<crate::StmtPass> for Stmt {
fn from(node: crate::StmtPass) -> Self {
Self::Pass(node)
}
}
impl From<crate::StmtBreak> for Stmt {
fn from(node: crate::StmtBreak) -> Self {
Self::Break(node)
}
}
impl From<crate::StmtContinue> for Stmt {
fn from(node: crate::StmtContinue) -> Self {
Self::Continue(node)
}
}
impl From<crate::StmtIpyEscapeCommand> for Stmt {
fn from(node: crate::StmtIpyEscapeCommand) -> Self {
Self::IpyEscapeCommand(node)
}
}
impl ruff_text_size::Ranged for Stmt {
fn range(&self) -> ruff_text_size::TextRange {
match self {
Self::FunctionDef(node) => node.range(),
Self::ClassDef(node) => node.range(),
Self::Return(node) => node.range(),
Self::Delete(node) => node.range(),
Self::TypeAlias(node) => node.range(),
Self::Assign(node) => node.range(),
Self::AugAssign(node) => node.range(),
Self::AnnAssign(node) => node.range(),
Self::For(node) => node.range(),
Self::While(node) => node.range(),
Self::If(node) => node.range(),
Self::With(node) => node.range(),
Self::Match(node) => node.range(),
Self::Raise(node) => node.range(),
Self::Try(node) => node.range(),
Self::Assert(node) => node.range(),
Self::Import(node) => node.range(),
Self::ImportFrom(node) => node.range(),
Self::Global(node) => node.range(),
Self::Nonlocal(node) => node.range(),
Self::Expr(node) => node.range(),
Self::Pass(node) => node.range(),
Self::Break(node) => node.range(),
Self::Continue(node) => node.range(),
Self::IpyEscapeCommand(node) => node.range(),
}
}
}
impl crate::HasNodeIndex for Stmt {
fn node_index(&self) -> &crate::AtomicNodeIndex {
match self {
Self::FunctionDef(node) => node.node_index(),
Self::ClassDef(node) => node.node_index(),
Self::Return(node) => node.node_index(),
Self::Delete(node) => node.node_index(),
Self::TypeAlias(node) => node.node_index(),
Self::Assign(node) => node.node_index(),
Self::AugAssign(node) => node.node_index(),
Self::AnnAssign(node) => node.node_index(),
Self::For(node) => node.node_index(),
Self::While(node) => node.node_index(),
Self::If(node) => node.node_index(),
Self::With(node) => node.node_index(),
Self::Match(node) => node.node_index(),
Self::Raise(node) => node.node_index(),
Self::Try(node) => node.node_index(),
Self::Assert(node) => node.node_index(),
Self::Import(node) => node.node_index(),
Self::ImportFrom(node) => node.node_index(),
Self::Global(node) => node.node_index(),
Self::Nonlocal(node) => node.node_index(),
Self::Expr(node) => node.node_index(),
Self::Pass(node) => node.node_index(),
Self::Break(node) => node.node_index(),
Self::Continue(node) => node.node_index(),
Self::IpyEscapeCommand(node) => node.node_index(),
}
}
}
#[allow(dead_code, clippy::match_wildcard_for_single_variants)]
impl Stmt {
#[inline]
pub const fn is_function_def_stmt(&self) -> bool {
matches!(self, Self::FunctionDef(_))
}
#[inline]
pub fn function_def_stmt(self) -> Option<crate::StmtFunctionDef> {
match self {
Self::FunctionDef(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_function_def_stmt(self) -> crate::StmtFunctionDef {
match self {
Self::FunctionDef(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_function_def_stmt_mut(&mut self) -> Option<&mut crate::StmtFunctionDef> {
match self {
Self::FunctionDef(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_function_def_stmt(&self) -> Option<&crate::StmtFunctionDef> {
match self {
Self::FunctionDef(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_class_def_stmt(&self) -> bool {
matches!(self, Self::ClassDef(_))
}
#[inline]
pub fn class_def_stmt(self) -> Option<crate::StmtClassDef> {
match self {
Self::ClassDef(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_class_def_stmt(self) -> crate::StmtClassDef {
match self {
Self::ClassDef(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_class_def_stmt_mut(&mut self) -> Option<&mut crate::StmtClassDef> {
match self {
Self::ClassDef(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_class_def_stmt(&self) -> Option<&crate::StmtClassDef> {
match self {
Self::ClassDef(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_return_stmt(&self) -> bool {
matches!(self, Self::Return(_))
}
#[inline]
pub fn return_stmt(self) -> Option<crate::StmtReturn> {
match self {
Self::Return(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_return_stmt(self) -> crate::StmtReturn {
match self {
Self::Return(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_return_stmt_mut(&mut self) -> Option<&mut crate::StmtReturn> {
match self {
Self::Return(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_return_stmt(&self) -> Option<&crate::StmtReturn> {
match self {
Self::Return(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_delete_stmt(&self) -> bool {
matches!(self, Self::Delete(_))
}
#[inline]
pub fn delete_stmt(self) -> Option<crate::StmtDelete> {
match self {
Self::Delete(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_delete_stmt(self) -> crate::StmtDelete {
match self {
Self::Delete(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_delete_stmt_mut(&mut self) -> Option<&mut crate::StmtDelete> {
match self {
Self::Delete(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_delete_stmt(&self) -> Option<&crate::StmtDelete> {
match self {
Self::Delete(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_type_alias_stmt(&self) -> bool {
matches!(self, Self::TypeAlias(_))
}
#[inline]
pub fn type_alias_stmt(self) -> Option<crate::StmtTypeAlias> {
match self {
Self::TypeAlias(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_type_alias_stmt(self) -> crate::StmtTypeAlias {
match self {
Self::TypeAlias(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_type_alias_stmt_mut(&mut self) -> Option<&mut crate::StmtTypeAlias> {
match self {
Self::TypeAlias(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_type_alias_stmt(&self) -> Option<&crate::StmtTypeAlias> {
match self {
Self::TypeAlias(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_assign_stmt(&self) -> bool {
matches!(self, Self::Assign(_))
}
#[inline]
pub fn assign_stmt(self) -> Option<crate::StmtAssign> {
match self {
Self::Assign(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_assign_stmt(self) -> crate::StmtAssign {
match self {
Self::Assign(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_assign_stmt_mut(&mut self) -> Option<&mut crate::StmtAssign> {
match self {
Self::Assign(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_assign_stmt(&self) -> Option<&crate::StmtAssign> {
match self {
Self::Assign(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_aug_assign_stmt(&self) -> bool {
matches!(self, Self::AugAssign(_))
}
#[inline]
pub fn aug_assign_stmt(self) -> Option<crate::StmtAugAssign> {
match self {
Self::AugAssign(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_aug_assign_stmt(self) -> crate::StmtAugAssign {
match self {
Self::AugAssign(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_aug_assign_stmt_mut(&mut self) -> Option<&mut crate::StmtAugAssign> {
match self {
Self::AugAssign(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_aug_assign_stmt(&self) -> Option<&crate::StmtAugAssign> {
match self {
Self::AugAssign(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_ann_assign_stmt(&self) -> bool {
matches!(self, Self::AnnAssign(_))
}
#[inline]
pub fn ann_assign_stmt(self) -> Option<crate::StmtAnnAssign> {
match self {
Self::AnnAssign(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_ann_assign_stmt(self) -> crate::StmtAnnAssign {
match self {
Self::AnnAssign(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_ann_assign_stmt_mut(&mut self) -> Option<&mut crate::StmtAnnAssign> {
match self {
Self::AnnAssign(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_ann_assign_stmt(&self) -> Option<&crate::StmtAnnAssign> {
match self {
Self::AnnAssign(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_for_stmt(&self) -> bool {
matches!(self, Self::For(_))
}
#[inline]
pub fn for_stmt(self) -> Option<crate::StmtFor> {
match self {
Self::For(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_for_stmt(self) -> crate::StmtFor {
match self {
Self::For(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_for_stmt_mut(&mut self) -> Option<&mut crate::StmtFor> {
match self {
Self::For(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_for_stmt(&self) -> Option<&crate::StmtFor> {
match self {
Self::For(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_while_stmt(&self) -> bool {
matches!(self, Self::While(_))
}
#[inline]
pub fn while_stmt(self) -> Option<crate::StmtWhile> {
match self {
Self::While(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_while_stmt(self) -> crate::StmtWhile {
match self {
Self::While(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_while_stmt_mut(&mut self) -> Option<&mut crate::StmtWhile> {
match self {
Self::While(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_while_stmt(&self) -> Option<&crate::StmtWhile> {
match self {
Self::While(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_if_stmt(&self) -> bool {
matches!(self, Self::If(_))
}
#[inline]
pub fn if_stmt(self) -> Option<crate::StmtIf> {
match self {
Self::If(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_if_stmt(self) -> crate::StmtIf {
match self {
Self::If(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_if_stmt_mut(&mut self) -> Option<&mut crate::StmtIf> {
match self {
Self::If(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_if_stmt(&self) -> Option<&crate::StmtIf> {
match self {
Self::If(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_with_stmt(&self) -> bool {
matches!(self, Self::With(_))
}
#[inline]
pub fn with_stmt(self) -> Option<crate::StmtWith> {
match self {
Self::With(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_with_stmt(self) -> crate::StmtWith {
match self {
Self::With(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_with_stmt_mut(&mut self) -> Option<&mut crate::StmtWith> {
match self {
Self::With(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_with_stmt(&self) -> Option<&crate::StmtWith> {
match self {
Self::With(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_match_stmt(&self) -> bool {
matches!(self, Self::Match(_))
}
#[inline]
pub fn match_stmt(self) -> Option<crate::StmtMatch> {
match self {
Self::Match(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_match_stmt(self) -> crate::StmtMatch {
match self {
Self::Match(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_match_stmt_mut(&mut self) -> Option<&mut crate::StmtMatch> {
match self {
Self::Match(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_match_stmt(&self) -> Option<&crate::StmtMatch> {
match self {
Self::Match(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_raise_stmt(&self) -> bool {
matches!(self, Self::Raise(_))
}
#[inline]
pub fn raise_stmt(self) -> Option<crate::StmtRaise> {
match self {
Self::Raise(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_raise_stmt(self) -> crate::StmtRaise {
match self {
Self::Raise(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_raise_stmt_mut(&mut self) -> Option<&mut crate::StmtRaise> {
match self {
Self::Raise(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_raise_stmt(&self) -> Option<&crate::StmtRaise> {
match self {
Self::Raise(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_try_stmt(&self) -> bool {
matches!(self, Self::Try(_))
}
#[inline]
pub fn try_stmt(self) -> Option<crate::StmtTry> {
match self {
Self::Try(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_try_stmt(self) -> crate::StmtTry {
match self {
Self::Try(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_try_stmt_mut(&mut self) -> Option<&mut crate::StmtTry> {
match self {
Self::Try(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_try_stmt(&self) -> Option<&crate::StmtTry> {
match self {
Self::Try(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_assert_stmt(&self) -> bool {
matches!(self, Self::Assert(_))
}
#[inline]
pub fn assert_stmt(self) -> Option<crate::StmtAssert> {
match self {
Self::Assert(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_assert_stmt(self) -> crate::StmtAssert {
match self {
Self::Assert(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_assert_stmt_mut(&mut self) -> Option<&mut crate::StmtAssert> {
match self {
Self::Assert(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_assert_stmt(&self) -> Option<&crate::StmtAssert> {
match self {
Self::Assert(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_import_stmt(&self) -> bool {
matches!(self, Self::Import(_))
}
#[inline]
pub fn import_stmt(self) -> Option<crate::StmtImport> {
match self {
Self::Import(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_import_stmt(self) -> crate::StmtImport {
match self {
Self::Import(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_import_stmt_mut(&mut self) -> Option<&mut crate::StmtImport> {
match self {
Self::Import(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_import_stmt(&self) -> Option<&crate::StmtImport> {
match self {
Self::Import(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_import_from_stmt(&self) -> bool {
matches!(self, Self::ImportFrom(_))
}
#[inline]
pub fn import_from_stmt(self) -> Option<crate::StmtImportFrom> {
match self {
Self::ImportFrom(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_import_from_stmt(self) -> crate::StmtImportFrom {
match self {
Self::ImportFrom(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_import_from_stmt_mut(&mut self) -> Option<&mut crate::StmtImportFrom> {
match self {
Self::ImportFrom(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_import_from_stmt(&self) -> Option<&crate::StmtImportFrom> {
match self {
Self::ImportFrom(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_global_stmt(&self) -> bool {
matches!(self, Self::Global(_))
}
#[inline]
pub fn global_stmt(self) -> Option<crate::StmtGlobal> {
match self {
Self::Global(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_global_stmt(self) -> crate::StmtGlobal {
match self {
Self::Global(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_global_stmt_mut(&mut self) -> Option<&mut crate::StmtGlobal> {
match self {
Self::Global(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_global_stmt(&self) -> Option<&crate::StmtGlobal> {
match self {
Self::Global(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_nonlocal_stmt(&self) -> bool {
matches!(self, Self::Nonlocal(_))
}
#[inline]
pub fn nonlocal_stmt(self) -> Option<crate::StmtNonlocal> {
match self {
Self::Nonlocal(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_nonlocal_stmt(self) -> crate::StmtNonlocal {
match self {
Self::Nonlocal(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_nonlocal_stmt_mut(&mut self) -> Option<&mut crate::StmtNonlocal> {
match self {
Self::Nonlocal(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_nonlocal_stmt(&self) -> Option<&crate::StmtNonlocal> {
match self {
Self::Nonlocal(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_expr_stmt(&self) -> bool {
matches!(self, Self::Expr(_))
}
#[inline]
pub fn expr_stmt(self) -> Option<crate::StmtExpr> {
match self {
Self::Expr(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_expr_stmt(self) -> crate::StmtExpr {
match self {
Self::Expr(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_expr_stmt_mut(&mut self) -> Option<&mut crate::StmtExpr> {
match self {
Self::Expr(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_expr_stmt(&self) -> Option<&crate::StmtExpr> {
match self {
Self::Expr(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_pass_stmt(&self) -> bool {
matches!(self, Self::Pass(_))
}
#[inline]
pub fn pass_stmt(self) -> Option<crate::StmtPass> {
match self {
Self::Pass(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_pass_stmt(self) -> crate::StmtPass {
match self {
Self::Pass(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_pass_stmt_mut(&mut self) -> Option<&mut crate::StmtPass> {
match self {
Self::Pass(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_pass_stmt(&self) -> Option<&crate::StmtPass> {
match self {
Self::Pass(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_break_stmt(&self) -> bool {
matches!(self, Self::Break(_))
}
#[inline]
pub fn break_stmt(self) -> Option<crate::StmtBreak> {
match self {
Self::Break(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_break_stmt(self) -> crate::StmtBreak {
match self {
Self::Break(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_break_stmt_mut(&mut self) -> Option<&mut crate::StmtBreak> {
match self {
Self::Break(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_break_stmt(&self) -> Option<&crate::StmtBreak> {
match self {
Self::Break(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_continue_stmt(&self) -> bool {
matches!(self, Self::Continue(_))
}
#[inline]
pub fn continue_stmt(self) -> Option<crate::StmtContinue> {
match self {
Self::Continue(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_continue_stmt(self) -> crate::StmtContinue {
match self {
Self::Continue(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_continue_stmt_mut(&mut self) -> Option<&mut crate::StmtContinue> {
match self {
Self::Continue(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_continue_stmt(&self) -> Option<&crate::StmtContinue> {
match self {
Self::Continue(val) => Some(val),
_ => None,
}
}
#[inline]
pub const fn is_ipy_escape_command_stmt(&self) -> bool {
matches!(self, Self::IpyEscapeCommand(_))
}
#[inline]
pub fn ipy_escape_command_stmt(self) -> Option<crate::StmtIpyEscapeCommand> {
match self {
Self::IpyEscapeCommand(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn expect_ipy_escape_command_stmt(self) -> crate::StmtIpyEscapeCommand {
match self {
Self::IpyEscapeCommand(val) => val,
_ => panic!("called expect on {self:?}"),
}
}
#[inline]
pub fn as_ipy_escape_command_stmt_mut(&mut self) -> Option<&mut crate::StmtIpyEscapeCommand> {
match self {
Self::IpyEscapeCommand(val) => Some(val),
_ => None,
}
}
#[inline]
pub fn as_ipy_escape_command_stmt(&self) -> Option<&crate::StmtIpyEscapeCommand> {
match self {
Self::IpyEscapeCommand(val) => Some(val),
_ => None,
}
}
}
/// See also [expr](https://docs.python.org/3/library/ast.html#ast.expr)
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub enum Expr {
BoolOp(crate::ExprBoolOp),
Named(crate::ExprNamed),
BinOp(crate::ExprBinOp),
UnaryOp(crate::ExprUnaryOp),
Lambda(crate::ExprLambda),
If(crate::ExprIf),
Dict(crate::ExprDict),
Set(crate::ExprSet),
ListComp(crate::ExprListComp),
SetComp(crate::ExprSetComp),
DictComp(crate::ExprDictComp),
Generator(crate::ExprGenerator),
Await(crate::ExprAwait),
Yield(crate::ExprYield),
YieldFrom(crate::ExprYieldFrom),
Compare(crate::ExprCompare),
Call(crate::ExprCall),
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/token.rs | crates/ruff_python_ast/src/token.rs | //! Token kinds for Python source code created by the lexer and consumed by the `ruff_python_parser`.
//!
//! This module defines the tokens that the lexer recognizes. The tokens are
//! loosely based on the token definitions found in the [CPython source].
//!
//! [CPython source]: https://github.com/python/cpython/blob/dfc2e065a2e71011017077e549cd2f9bf4944c54/Grammar/Tokens
use std::fmt;
use bitflags::bitflags;
use crate::str::{Quote, TripleQuotes};
use crate::str_prefix::{
AnyStringPrefix, ByteStringPrefix, FStringPrefix, StringLiteralPrefix, TStringPrefix,
};
use crate::{AnyStringFlags, BoolOp, Operator, StringFlags, UnaryOp};
use ruff_text_size::{Ranged, TextRange};
mod parentheses;
mod tokens;
pub use parentheses::{parentheses_iterator, parenthesized_range};
pub use tokens::{TokenAt, TokenIterWithContext, Tokens};
#[derive(Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct Token {
/// The kind of the token.
kind: TokenKind,
/// The range of the token.
range: TextRange,
/// The set of flags describing this token.
flags: TokenFlags,
}
impl Token {
pub fn new(kind: TokenKind, range: TextRange, flags: TokenFlags) -> Token {
Self { kind, range, flags }
}
/// Returns the token kind.
#[inline]
pub const fn kind(&self) -> TokenKind {
self.kind
}
/// Returns the token as a tuple of (kind, range).
#[inline]
pub const fn as_tuple(&self) -> (TokenKind, TextRange) {
(self.kind, self.range)
}
/// Returns `true` if the current token is a triple-quoted string of any kind.
///
/// # Panics
///
/// If it isn't a string or any f/t-string tokens.
pub fn is_triple_quoted_string(self) -> bool {
self.unwrap_string_flags().is_triple_quoted()
}
/// Returns the [`Quote`] style for the current string token of any kind.
///
/// # Panics
///
/// If it isn't a string or any f/t-string tokens.
pub fn string_quote_style(self) -> Quote {
self.unwrap_string_flags().quote_style()
}
/// Returns the [`AnyStringFlags`] style for the current string token of any kind.
///
/// # Panics
///
/// If it isn't a string or any f/t-string tokens.
pub fn unwrap_string_flags(self) -> AnyStringFlags {
self.string_flags()
.unwrap_or_else(|| panic!("token to be a string"))
}
/// Returns true if the current token is a string and it is raw.
pub fn string_flags(self) -> Option<AnyStringFlags> {
if self.is_any_string() {
Some(self.flags.as_any_string_flags())
} else {
None
}
}
/// Returns `true` if this is any kind of string token - including
/// tokens in t-strings (which do not have type `str`).
const fn is_any_string(self) -> bool {
matches!(
self.kind,
TokenKind::String
| TokenKind::FStringStart
| TokenKind::FStringMiddle
| TokenKind::FStringEnd
| TokenKind::TStringStart
| TokenKind::TStringMiddle
| TokenKind::TStringEnd
)
}
}
impl Ranged for Token {
fn range(&self) -> TextRange {
self.range
}
}
impl fmt::Debug for Token {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?} {:?}", self.kind, self.range)?;
if !self.flags.is_empty() {
f.write_str(" (flags = ")?;
let mut first = true;
for (name, _) in self.flags.iter_names() {
if first {
first = false;
} else {
f.write_str(" | ")?;
}
f.write_str(name)?;
}
f.write_str(")")?;
}
Ok(())
}
}
/// A kind of a token.
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub enum TokenKind {
/// Token kind for a name, commonly known as an identifier.
Name,
/// Token kind for an integer.
Int,
/// Token kind for a floating point number.
Float,
/// Token kind for a complex number.
Complex,
/// Token kind for a string.
String,
/// Token kind for the start of an f-string. This includes the `f`/`F`/`fr` prefix
/// and the opening quote(s).
FStringStart,
/// Token kind that includes the portion of text inside the f-string that's not
/// part of the expression part and isn't an opening or closing brace.
FStringMiddle,
/// Token kind for the end of an f-string. This includes the closing quote.
FStringEnd,
/// Token kind for the start of a t-string. This includes the `t`/`T`/`tr` prefix
/// and the opening quote(s).
TStringStart,
/// Token kind that includes the portion of text inside the t-string that's not
/// part of the interpolation part and isn't an opening or closing brace.
TStringMiddle,
/// Token kind for the end of a t-string. This includes the closing quote.
TStringEnd,
/// Token kind for a IPython escape command.
IpyEscapeCommand,
/// Token kind for a comment. These are filtered out of the token stream prior to parsing.
Comment,
/// Token kind for a newline.
Newline,
/// Token kind for a newline that is not a logical line break. These are filtered out of
/// the token stream prior to parsing.
NonLogicalNewline,
/// Token kind for an indent.
Indent,
/// Token kind for a dedent.
Dedent,
EndOfFile,
/// Token kind for a question mark `?`.
Question,
/// Token kind for an exclamation mark `!`.
Exclamation,
/// Token kind for a left parenthesis `(`.
Lpar,
/// Token kind for a right parenthesis `)`.
Rpar,
/// Token kind for a left square bracket `[`.
Lsqb,
/// Token kind for a right square bracket `]`.
Rsqb,
/// Token kind for a colon `:`.
Colon,
/// Token kind for a comma `,`.
Comma,
/// Token kind for a semicolon `;`.
Semi,
/// Token kind for plus `+`.
Plus,
/// Token kind for minus `-`.
Minus,
/// Token kind for star `*`.
Star,
/// Token kind for slash `/`.
Slash,
/// Token kind for vertical bar `|`.
Vbar,
/// Token kind for ampersand `&`.
Amper,
/// Token kind for less than `<`.
Less,
/// Token kind for greater than `>`.
Greater,
/// Token kind for equal `=`.
Equal,
/// Token kind for dot `.`.
Dot,
/// Token kind for percent `%`.
Percent,
/// Token kind for left bracket `{`.
Lbrace,
/// Token kind for right bracket `}`.
Rbrace,
/// Token kind for double equal `==`.
EqEqual,
/// Token kind for not equal `!=`.
NotEqual,
/// Token kind for less than or equal `<=`.
LessEqual,
/// Token kind for greater than or equal `>=`.
GreaterEqual,
/// Token kind for tilde `~`.
Tilde,
/// Token kind for caret `^`.
CircumFlex,
/// Token kind for left shift `<<`.
LeftShift,
/// Token kind for right shift `>>`.
RightShift,
/// Token kind for double star `**`.
DoubleStar,
/// Token kind for double star equal `**=`.
DoubleStarEqual,
/// Token kind for plus equal `+=`.
PlusEqual,
/// Token kind for minus equal `-=`.
MinusEqual,
/// Token kind for star equal `*=`.
StarEqual,
/// Token kind for slash equal `/=`.
SlashEqual,
/// Token kind for percent equal `%=`.
PercentEqual,
/// Token kind for ampersand equal `&=`.
AmperEqual,
/// Token kind for vertical bar equal `|=`.
VbarEqual,
/// Token kind for caret equal `^=`.
CircumflexEqual,
/// Token kind for left shift equal `<<=`.
LeftShiftEqual,
/// Token kind for right shift equal `>>=`.
RightShiftEqual,
/// Token kind for double slash `//`.
DoubleSlash,
/// Token kind for double slash equal `//=`.
DoubleSlashEqual,
/// Token kind for colon equal `:=`.
ColonEqual,
/// Token kind for at `@`.
At,
/// Token kind for at equal `@=`.
AtEqual,
/// Token kind for arrow `->`.
Rarrow,
/// Token kind for ellipsis `...`.
Ellipsis,
// The keywords should be sorted in alphabetical order. If the boundary tokens for the
// "Keywords" and "Soft keywords" group change, update the related methods on `TokenKind`.
// Keywords
And,
As,
Assert,
Async,
Await,
Break,
Class,
Continue,
Def,
Del,
Elif,
Else,
Except,
False,
Finally,
For,
From,
Global,
If,
Import,
In,
Is,
Lambda,
None,
Nonlocal,
Not,
Or,
Pass,
Raise,
Return,
True,
Try,
While,
With,
Yield,
// Soft keywords
Case,
Match,
Type,
Unknown,
}
impl TokenKind {
/// Returns `true` if this is an end of file token.
#[inline]
pub const fn is_eof(self) -> bool {
matches!(self, TokenKind::EndOfFile)
}
/// Returns `true` if this is either a newline or non-logical newline token.
#[inline]
pub const fn is_any_newline(self) -> bool {
matches!(self, TokenKind::Newline | TokenKind::NonLogicalNewline)
}
/// Returns `true` if the token is a keyword (including soft keywords).
///
/// See also [`is_soft_keyword`], [`is_non_soft_keyword`].
///
/// [`is_soft_keyword`]: TokenKind::is_soft_keyword
/// [`is_non_soft_keyword`]: TokenKind::is_non_soft_keyword
#[inline]
pub fn is_keyword(self) -> bool {
TokenKind::And <= self && self <= TokenKind::Type
}
/// Returns `true` if the token is strictly a soft keyword.
///
/// See also [`is_keyword`], [`is_non_soft_keyword`].
///
/// [`is_keyword`]: TokenKind::is_keyword
/// [`is_non_soft_keyword`]: TokenKind::is_non_soft_keyword
#[inline]
pub fn is_soft_keyword(self) -> bool {
TokenKind::Case <= self && self <= TokenKind::Type
}
/// Returns `true` if the token is strictly a non-soft keyword.
///
/// See also [`is_keyword`], [`is_soft_keyword`].
///
/// [`is_keyword`]: TokenKind::is_keyword
/// [`is_soft_keyword`]: TokenKind::is_soft_keyword
#[inline]
pub fn is_non_soft_keyword(self) -> bool {
TokenKind::And <= self && self <= TokenKind::Yield
}
#[inline]
pub const fn is_operator(self) -> bool {
matches!(
self,
TokenKind::Lpar
| TokenKind::Rpar
| TokenKind::Lsqb
| TokenKind::Rsqb
| TokenKind::Comma
| TokenKind::Semi
| TokenKind::Plus
| TokenKind::Minus
| TokenKind::Star
| TokenKind::Slash
| TokenKind::Vbar
| TokenKind::Amper
| TokenKind::Less
| TokenKind::Greater
| TokenKind::Equal
| TokenKind::Dot
| TokenKind::Percent
| TokenKind::Lbrace
| TokenKind::Rbrace
| TokenKind::EqEqual
| TokenKind::NotEqual
| TokenKind::LessEqual
| TokenKind::GreaterEqual
| TokenKind::Tilde
| TokenKind::CircumFlex
| TokenKind::LeftShift
| TokenKind::RightShift
| TokenKind::DoubleStar
| TokenKind::PlusEqual
| TokenKind::MinusEqual
| TokenKind::StarEqual
| TokenKind::SlashEqual
| TokenKind::PercentEqual
| TokenKind::AmperEqual
| TokenKind::VbarEqual
| TokenKind::CircumflexEqual
| TokenKind::LeftShiftEqual
| TokenKind::RightShiftEqual
| TokenKind::DoubleStarEqual
| TokenKind::DoubleSlash
| TokenKind::DoubleSlashEqual
| TokenKind::At
| TokenKind::AtEqual
| TokenKind::Rarrow
| TokenKind::Ellipsis
| TokenKind::ColonEqual
| TokenKind::Colon
| TokenKind::And
| TokenKind::Or
| TokenKind::Not
| TokenKind::In
| TokenKind::Is
)
}
/// Returns `true` if this is a singleton token i.e., `True`, `False`, or `None`.
#[inline]
pub const fn is_singleton(self) -> bool {
matches!(self, TokenKind::False | TokenKind::True | TokenKind::None)
}
/// Returns `true` if this is a trivia token i.e., a comment or a non-logical newline.
#[inline]
pub const fn is_trivia(&self) -> bool {
matches!(self, TokenKind::Comment | TokenKind::NonLogicalNewline)
}
/// Returns `true` if this is a comment token.
#[inline]
pub const fn is_comment(&self) -> bool {
matches!(self, TokenKind::Comment)
}
#[inline]
pub const fn is_arithmetic(self) -> bool {
matches!(
self,
TokenKind::DoubleStar
| TokenKind::Star
| TokenKind::Plus
| TokenKind::Minus
| TokenKind::Slash
| TokenKind::DoubleSlash
| TokenKind::At
)
}
#[inline]
pub const fn is_bitwise_or_shift(self) -> bool {
matches!(
self,
TokenKind::LeftShift
| TokenKind::LeftShiftEqual
| TokenKind::RightShift
| TokenKind::RightShiftEqual
| TokenKind::Amper
| TokenKind::AmperEqual
| TokenKind::Vbar
| TokenKind::VbarEqual
| TokenKind::CircumFlex
| TokenKind::CircumflexEqual
| TokenKind::Tilde
)
}
/// Returns `true` if the current token is a unary arithmetic operator.
#[inline]
pub const fn is_unary_arithmetic_operator(self) -> bool {
matches!(self, TokenKind::Plus | TokenKind::Minus)
}
#[inline]
pub const fn is_interpolated_string_end(self) -> bool {
matches!(self, TokenKind::FStringEnd | TokenKind::TStringEnd)
}
/// Returns the [`UnaryOp`] that corresponds to this token kind, if it is a unary arithmetic
/// operator, otherwise return [None].
///
/// Use [`as_unary_operator`] to match against any unary operator.
///
/// [`as_unary_operator`]: TokenKind::as_unary_operator
#[inline]
pub const fn as_unary_arithmetic_operator(self) -> Option<UnaryOp> {
Some(match self {
TokenKind::Plus => UnaryOp::UAdd,
TokenKind::Minus => UnaryOp::USub,
_ => return None,
})
}
/// Returns the [`UnaryOp`] that corresponds to this token kind, if it is a unary operator,
/// otherwise return [None].
///
/// Use [`as_unary_arithmetic_operator`] to match against only an arithmetic unary operator.
///
/// [`as_unary_arithmetic_operator`]: TokenKind::as_unary_arithmetic_operator
#[inline]
pub const fn as_unary_operator(self) -> Option<UnaryOp> {
Some(match self {
TokenKind::Plus => UnaryOp::UAdd,
TokenKind::Minus => UnaryOp::USub,
TokenKind::Tilde => UnaryOp::Invert,
TokenKind::Not => UnaryOp::Not,
_ => return None,
})
}
/// Returns the [`BoolOp`] that corresponds to this token kind, if it is a boolean operator,
/// otherwise return [None].
#[inline]
pub const fn as_bool_operator(self) -> Option<BoolOp> {
Some(match self {
TokenKind::And => BoolOp::And,
TokenKind::Or => BoolOp::Or,
_ => return None,
})
}
/// Returns the binary [`Operator`] that corresponds to the current token, if it's a binary
/// operator, otherwise return [None].
///
/// Use [`as_augmented_assign_operator`] to match against an augmented assignment token.
///
/// [`as_augmented_assign_operator`]: TokenKind::as_augmented_assign_operator
pub const fn as_binary_operator(self) -> Option<Operator> {
Some(match self {
TokenKind::Plus => Operator::Add,
TokenKind::Minus => Operator::Sub,
TokenKind::Star => Operator::Mult,
TokenKind::At => Operator::MatMult,
TokenKind::DoubleStar => Operator::Pow,
TokenKind::Slash => Operator::Div,
TokenKind::DoubleSlash => Operator::FloorDiv,
TokenKind::Percent => Operator::Mod,
TokenKind::Amper => Operator::BitAnd,
TokenKind::Vbar => Operator::BitOr,
TokenKind::CircumFlex => Operator::BitXor,
TokenKind::LeftShift => Operator::LShift,
TokenKind::RightShift => Operator::RShift,
_ => return None,
})
}
/// Returns the [`Operator`] that corresponds to this token kind, if it is
/// an augmented assignment operator, or [`None`] otherwise.
#[inline]
pub const fn as_augmented_assign_operator(self) -> Option<Operator> {
Some(match self {
TokenKind::PlusEqual => Operator::Add,
TokenKind::MinusEqual => Operator::Sub,
TokenKind::StarEqual => Operator::Mult,
TokenKind::AtEqual => Operator::MatMult,
TokenKind::DoubleStarEqual => Operator::Pow,
TokenKind::SlashEqual => Operator::Div,
TokenKind::DoubleSlashEqual => Operator::FloorDiv,
TokenKind::PercentEqual => Operator::Mod,
TokenKind::AmperEqual => Operator::BitAnd,
TokenKind::VbarEqual => Operator::BitOr,
TokenKind::CircumflexEqual => Operator::BitXor,
TokenKind::LeftShiftEqual => Operator::LShift,
TokenKind::RightShiftEqual => Operator::RShift,
_ => return None,
})
}
}
impl From<BoolOp> for TokenKind {
#[inline]
fn from(op: BoolOp) -> Self {
match op {
BoolOp::And => TokenKind::And,
BoolOp::Or => TokenKind::Or,
}
}
}
impl From<UnaryOp> for TokenKind {
#[inline]
fn from(op: UnaryOp) -> Self {
match op {
UnaryOp::Invert => TokenKind::Tilde,
UnaryOp::Not => TokenKind::Not,
UnaryOp::UAdd => TokenKind::Plus,
UnaryOp::USub => TokenKind::Minus,
}
}
}
impl From<Operator> for TokenKind {
#[inline]
fn from(op: Operator) -> Self {
match op {
Operator::Add => TokenKind::Plus,
Operator::Sub => TokenKind::Minus,
Operator::Mult => TokenKind::Star,
Operator::MatMult => TokenKind::At,
Operator::Div => TokenKind::Slash,
Operator::Mod => TokenKind::Percent,
Operator::Pow => TokenKind::DoubleStar,
Operator::LShift => TokenKind::LeftShift,
Operator::RShift => TokenKind::RightShift,
Operator::BitOr => TokenKind::Vbar,
Operator::BitXor => TokenKind::CircumFlex,
Operator::BitAnd => TokenKind::Amper,
Operator::FloorDiv => TokenKind::DoubleSlash,
}
}
}
impl fmt::Display for TokenKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let value = match self {
TokenKind::Unknown => "Unknown",
TokenKind::Newline => "newline",
TokenKind::NonLogicalNewline => "NonLogicalNewline",
TokenKind::Indent => "indent",
TokenKind::Dedent => "dedent",
TokenKind::EndOfFile => "end of file",
TokenKind::Name => "name",
TokenKind::Int => "int",
TokenKind::Float => "float",
TokenKind::Complex => "complex",
TokenKind::String => "string",
TokenKind::FStringStart => "FStringStart",
TokenKind::FStringMiddle => "FStringMiddle",
TokenKind::FStringEnd => "FStringEnd",
TokenKind::TStringStart => "TStringStart",
TokenKind::TStringMiddle => "TStringMiddle",
TokenKind::TStringEnd => "TStringEnd",
TokenKind::IpyEscapeCommand => "IPython escape command",
TokenKind::Comment => "comment",
TokenKind::Question => "`?`",
TokenKind::Exclamation => "`!`",
TokenKind::Lpar => "`(`",
TokenKind::Rpar => "`)`",
TokenKind::Lsqb => "`[`",
TokenKind::Rsqb => "`]`",
TokenKind::Lbrace => "`{`",
TokenKind::Rbrace => "`}`",
TokenKind::Equal => "`=`",
TokenKind::ColonEqual => "`:=`",
TokenKind::Dot => "`.`",
TokenKind::Colon => "`:`",
TokenKind::Semi => "`;`",
TokenKind::Comma => "`,`",
TokenKind::Rarrow => "`->`",
TokenKind::Plus => "`+`",
TokenKind::Minus => "`-`",
TokenKind::Star => "`*`",
TokenKind::DoubleStar => "`**`",
TokenKind::Slash => "`/`",
TokenKind::DoubleSlash => "`//`",
TokenKind::Percent => "`%`",
TokenKind::Vbar => "`|`",
TokenKind::Amper => "`&`",
TokenKind::CircumFlex => "`^`",
TokenKind::LeftShift => "`<<`",
TokenKind::RightShift => "`>>`",
TokenKind::Tilde => "`~`",
TokenKind::At => "`@`",
TokenKind::Less => "`<`",
TokenKind::Greater => "`>`",
TokenKind::EqEqual => "`==`",
TokenKind::NotEqual => "`!=`",
TokenKind::LessEqual => "`<=`",
TokenKind::GreaterEqual => "`>=`",
TokenKind::PlusEqual => "`+=`",
TokenKind::MinusEqual => "`-=`",
TokenKind::StarEqual => "`*=`",
TokenKind::DoubleStarEqual => "`**=`",
TokenKind::SlashEqual => "`/=`",
TokenKind::DoubleSlashEqual => "`//=`",
TokenKind::PercentEqual => "`%=`",
TokenKind::VbarEqual => "`|=`",
TokenKind::AmperEqual => "`&=`",
TokenKind::CircumflexEqual => "`^=`",
TokenKind::LeftShiftEqual => "`<<=`",
TokenKind::RightShiftEqual => "`>>=`",
TokenKind::AtEqual => "`@=`",
TokenKind::Ellipsis => "`...`",
TokenKind::False => "`False`",
TokenKind::None => "`None`",
TokenKind::True => "`True`",
TokenKind::And => "`and`",
TokenKind::As => "`as`",
TokenKind::Assert => "`assert`",
TokenKind::Async => "`async`",
TokenKind::Await => "`await`",
TokenKind::Break => "`break`",
TokenKind::Class => "`class`",
TokenKind::Continue => "`continue`",
TokenKind::Def => "`def`",
TokenKind::Del => "`del`",
TokenKind::Elif => "`elif`",
TokenKind::Else => "`else`",
TokenKind::Except => "`except`",
TokenKind::Finally => "`finally`",
TokenKind::For => "`for`",
TokenKind::From => "`from`",
TokenKind::Global => "`global`",
TokenKind::If => "`if`",
TokenKind::Import => "`import`",
TokenKind::In => "`in`",
TokenKind::Is => "`is`",
TokenKind::Lambda => "`lambda`",
TokenKind::Nonlocal => "`nonlocal`",
TokenKind::Not => "`not`",
TokenKind::Or => "`or`",
TokenKind::Pass => "`pass`",
TokenKind::Raise => "`raise`",
TokenKind::Return => "`return`",
TokenKind::Try => "`try`",
TokenKind::While => "`while`",
TokenKind::Match => "`match`",
TokenKind::Type => "`type`",
TokenKind::Case => "`case`",
TokenKind::With => "`with`",
TokenKind::Yield => "`yield`",
};
f.write_str(value)
}
}
bitflags! {
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct TokenFlags: u16 {
/// The token is a string with double quotes (`"`).
const DOUBLE_QUOTES = 1 << 0;
/// The token is a triple-quoted string i.e., it starts and ends with three consecutive
/// quote characters (`"""` or `'''`).
const TRIPLE_QUOTED_STRING = 1 << 1;
/// The token is a unicode string i.e., prefixed with `u` or `U`
const UNICODE_STRING = 1 << 2;
/// The token is a byte string i.e., prefixed with `b` or `B`
const BYTE_STRING = 1 << 3;
/// The token is an f-string i.e., prefixed with `f` or `F`
const F_STRING = 1 << 4;
/// The token is a t-string i.e., prefixed with `t` or `T`
const T_STRING = 1 << 5;
/// The token is a raw string and the prefix character is in lowercase.
const RAW_STRING_LOWERCASE = 1 << 6;
/// The token is a raw string and the prefix character is in uppercase.
const RAW_STRING_UPPERCASE = 1 << 7;
/// String without matching closing quote(s)
const UNCLOSED_STRING = 1 << 8;
/// The token is a raw string i.e., prefixed with `r` or `R`
const RAW_STRING = Self::RAW_STRING_LOWERCASE.bits() | Self::RAW_STRING_UPPERCASE.bits();
}
}
#[cfg(feature = "get-size")]
impl get_size2::GetSize for TokenFlags {}
impl StringFlags for TokenFlags {
fn quote_style(self) -> Quote {
if self.intersects(TokenFlags::DOUBLE_QUOTES) {
Quote::Double
} else {
Quote::Single
}
}
fn triple_quotes(self) -> TripleQuotes {
if self.intersects(TokenFlags::TRIPLE_QUOTED_STRING) {
TripleQuotes::Yes
} else {
TripleQuotes::No
}
}
fn prefix(self) -> AnyStringPrefix {
if self.intersects(TokenFlags::F_STRING) {
if self.intersects(TokenFlags::RAW_STRING_LOWERCASE) {
AnyStringPrefix::Format(FStringPrefix::Raw { uppercase_r: false })
} else if self.intersects(TokenFlags::RAW_STRING_UPPERCASE) {
AnyStringPrefix::Format(FStringPrefix::Raw { uppercase_r: true })
} else {
AnyStringPrefix::Format(FStringPrefix::Regular)
}
} else if self.intersects(TokenFlags::T_STRING) {
if self.intersects(TokenFlags::RAW_STRING_LOWERCASE) {
AnyStringPrefix::Template(TStringPrefix::Raw { uppercase_r: false })
} else if self.intersects(TokenFlags::RAW_STRING_UPPERCASE) {
AnyStringPrefix::Template(TStringPrefix::Raw { uppercase_r: true })
} else {
AnyStringPrefix::Template(TStringPrefix::Regular)
}
} else if self.intersects(TokenFlags::BYTE_STRING) {
if self.intersects(TokenFlags::RAW_STRING_LOWERCASE) {
AnyStringPrefix::Bytes(ByteStringPrefix::Raw { uppercase_r: false })
} else if self.intersects(TokenFlags::RAW_STRING_UPPERCASE) {
AnyStringPrefix::Bytes(ByteStringPrefix::Raw { uppercase_r: true })
} else {
AnyStringPrefix::Bytes(ByteStringPrefix::Regular)
}
} else if self.intersects(TokenFlags::RAW_STRING_LOWERCASE) {
AnyStringPrefix::Regular(StringLiteralPrefix::Raw { uppercase: false })
} else if self.intersects(TokenFlags::RAW_STRING_UPPERCASE) {
AnyStringPrefix::Regular(StringLiteralPrefix::Raw { uppercase: true })
} else if self.intersects(TokenFlags::UNICODE_STRING) {
AnyStringPrefix::Regular(StringLiteralPrefix::Unicode)
} else {
AnyStringPrefix::Regular(StringLiteralPrefix::Empty)
}
}
fn is_unclosed(self) -> bool {
self.intersects(TokenFlags::UNCLOSED_STRING)
}
}
impl TokenFlags {
/// Returns `true` if the token is an f-string.
pub const fn is_f_string(self) -> bool {
self.intersects(TokenFlags::F_STRING)
}
/// Returns `true` if the token is a t-string.
pub const fn is_t_string(self) -> bool {
self.intersects(TokenFlags::T_STRING)
}
/// Returns `true` if the token is a t-string.
pub const fn is_interpolated_string(self) -> bool {
self.intersects(TokenFlags::T_STRING.union(TokenFlags::F_STRING))
}
/// Returns `true` if the token is a triple-quoted t-string.
pub fn is_triple_quoted_interpolated_string(self) -> bool {
self.intersects(TokenFlags::TRIPLE_QUOTED_STRING) && self.is_interpolated_string()
}
/// Returns `true` if the token is a raw string.
pub const fn is_raw_string(self) -> bool {
self.intersects(TokenFlags::RAW_STRING)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/visitor/source_order.rs | crates/ruff_python_ast/src/visitor/source_order.rs | use crate::{
Alias, Arguments, BoolOp, BytesLiteral, CmpOp, Comprehension, Decorator, ElifElseClause,
ExceptHandler, Expr, FString, InterpolatedStringElement, Keyword, MatchCase, Mod, Operator,
Parameter, ParameterWithDefault, Parameters, Pattern, PatternArguments, PatternKeyword,
Singleton, Stmt, StringLiteral, TString, TypeParam, TypeParams, UnaryOp, WithItem,
};
use crate::{AnyNodeRef, Identifier};
/// Visitor that traverses all nodes recursively in the order they appear in the source.
///
/// If you need a visitor that visits the nodes in the order they're evaluated at runtime,
/// use [`Visitor`](super::Visitor) instead.
pub trait SourceOrderVisitor<'a> {
#[inline]
fn enter_node(&mut self, _node: AnyNodeRef<'a>) -> TraversalSignal {
TraversalSignal::Traverse
}
#[inline(always)]
fn leave_node(&mut self, _node: AnyNodeRef<'a>) {}
#[inline]
fn visit_mod(&mut self, module: &'a Mod) {
walk_module(self, module);
}
#[inline]
fn visit_stmt(&mut self, stmt: &'a Stmt) {
walk_stmt(self, stmt);
}
#[inline]
fn visit_annotation(&mut self, expr: &'a Expr) {
walk_annotation(self, expr);
}
#[inline]
fn visit_expr(&mut self, expr: &'a Expr) {
walk_expr(self, expr);
}
#[inline]
fn visit_decorator(&mut self, decorator: &'a Decorator) {
walk_decorator(self, decorator);
}
#[inline]
fn visit_singleton(&mut self, _singleton: &'a Singleton) {}
#[inline]
fn visit_bool_op(&mut self, bool_op: &'a BoolOp) {
walk_bool_op(self, bool_op);
}
#[inline]
fn visit_operator(&mut self, operator: &'a Operator) {
walk_operator(self, operator);
}
#[inline]
fn visit_unary_op(&mut self, unary_op: &'a UnaryOp) {
walk_unary_op(self, unary_op);
}
#[inline]
fn visit_cmp_op(&mut self, cmp_op: &'a CmpOp) {
walk_cmp_op(self, cmp_op);
}
#[inline]
fn visit_comprehension(&mut self, comprehension: &'a Comprehension) {
walk_comprehension(self, comprehension);
}
#[inline]
fn visit_except_handler(&mut self, except_handler: &'a ExceptHandler) {
walk_except_handler(self, except_handler);
}
#[inline]
fn visit_arguments(&mut self, arguments: &'a Arguments) {
walk_arguments(self, arguments);
}
#[inline]
fn visit_parameters(&mut self, parameters: &'a Parameters) {
walk_parameters(self, parameters);
}
#[inline]
fn visit_parameter(&mut self, arg: &'a Parameter) {
walk_parameter(self, arg);
}
fn visit_parameter_with_default(&mut self, parameter_with_default: &'a ParameterWithDefault) {
walk_parameter_with_default(self, parameter_with_default);
}
#[inline]
fn visit_keyword(&mut self, keyword: &'a Keyword) {
walk_keyword(self, keyword);
}
#[inline]
fn visit_alias(&mut self, alias: &'a Alias) {
walk_alias(self, alias);
}
#[inline]
fn visit_with_item(&mut self, with_item: &'a WithItem) {
walk_with_item(self, with_item);
}
#[inline]
fn visit_type_params(&mut self, type_params: &'a TypeParams) {
walk_type_params(self, type_params);
}
#[inline]
fn visit_type_param(&mut self, type_param: &'a TypeParam) {
walk_type_param(self, type_param);
}
#[inline]
fn visit_match_case(&mut self, match_case: &'a MatchCase) {
walk_match_case(self, match_case);
}
#[inline]
fn visit_pattern(&mut self, pattern: &'a Pattern) {
walk_pattern(self, pattern);
}
#[inline]
fn visit_pattern_arguments(&mut self, pattern_arguments: &'a PatternArguments) {
walk_pattern_arguments(self, pattern_arguments);
}
#[inline]
fn visit_pattern_keyword(&mut self, pattern_keyword: &'a PatternKeyword) {
walk_pattern_keyword(self, pattern_keyword);
}
#[inline]
fn visit_body(&mut self, body: &'a [Stmt]) {
walk_body(self, body);
}
#[inline]
fn visit_elif_else_clause(&mut self, elif_else_clause: &'a ElifElseClause) {
walk_elif_else_clause(self, elif_else_clause);
}
#[inline]
fn visit_f_string(&mut self, f_string: &'a FString) {
walk_f_string(self, f_string);
}
#[inline]
fn visit_interpolated_string_element(
&mut self,
interpolated_string_element: &'a InterpolatedStringElement,
) {
walk_interpolated_string_element(self, interpolated_string_element);
}
#[inline]
fn visit_t_string(&mut self, t_string: &'a TString) {
walk_t_string(self, t_string);
}
#[inline]
fn visit_string_literal(&mut self, string_literal: &'a StringLiteral) {
walk_string_literal(self, string_literal);
}
#[inline]
fn visit_bytes_literal(&mut self, bytes_literal: &'a BytesLiteral) {
walk_bytes_literal(self, bytes_literal);
}
#[inline]
fn visit_identifier(&mut self, identifier: &'a Identifier) {
walk_identifier(self, identifier);
}
}
pub fn walk_module<'a, V>(visitor: &mut V, module: &'a Mod)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(module);
if visitor.enter_node(node).is_traverse() {
match module {
Mod::Module(module) => module.visit_source_order(visitor),
Mod::Expression(module) => module.visit_source_order(visitor),
}
}
visitor.leave_node(node);
}
pub fn walk_body<'a, V>(visitor: &mut V, body: &'a [Stmt])
where
V: SourceOrderVisitor<'a> + ?Sized,
{
for stmt in body {
visitor.visit_stmt(stmt);
}
}
pub fn walk_stmt<'a, V>(visitor: &mut V, stmt: &'a Stmt)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(stmt);
if visitor.enter_node(node).is_traverse() {
stmt.visit_source_order(visitor);
}
visitor.leave_node(node);
}
pub fn walk_node<'a, V>(visitor: &mut V, node: AnyNodeRef<'a>)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
if visitor.enter_node(node).is_traverse() {
node.visit_source_order(visitor);
}
visitor.leave_node(node);
}
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub enum TraversalSignal {
Traverse,
Skip,
}
impl TraversalSignal {
pub const fn is_traverse(self) -> bool {
matches!(self, TraversalSignal::Traverse)
}
}
pub fn walk_annotation<'a, V: SourceOrderVisitor<'a> + ?Sized>(visitor: &mut V, expr: &'a Expr) {
visitor.visit_expr(expr);
}
pub fn walk_decorator<'a, V>(visitor: &mut V, decorator: &'a Decorator)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(decorator);
if visitor.enter_node(node).is_traverse() {
decorator.visit_source_order(visitor);
}
visitor.leave_node(node);
}
pub fn walk_expr<'a, V>(visitor: &mut V, expr: &'a Expr)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(expr);
if visitor.enter_node(node).is_traverse() {
match expr {
Expr::BoolOp(expr) => expr.visit_source_order(visitor),
Expr::Named(expr) => expr.visit_source_order(visitor),
Expr::BinOp(expr) => expr.visit_source_order(visitor),
Expr::UnaryOp(expr) => expr.visit_source_order(visitor),
Expr::Lambda(expr) => expr.visit_source_order(visitor),
Expr::If(expr) => expr.visit_source_order(visitor),
Expr::Dict(expr) => expr.visit_source_order(visitor),
Expr::Set(expr) => expr.visit_source_order(visitor),
Expr::ListComp(expr) => expr.visit_source_order(visitor),
Expr::SetComp(expr) => expr.visit_source_order(visitor),
Expr::DictComp(expr) => expr.visit_source_order(visitor),
Expr::Generator(expr) => expr.visit_source_order(visitor),
Expr::Await(expr) => expr.visit_source_order(visitor),
Expr::Yield(expr) => expr.visit_source_order(visitor),
Expr::YieldFrom(expr) => expr.visit_source_order(visitor),
Expr::Compare(expr) => expr.visit_source_order(visitor),
Expr::Call(expr) => expr.visit_source_order(visitor),
Expr::FString(expr) => expr.visit_source_order(visitor),
Expr::TString(expr) => expr.visit_source_order(visitor),
Expr::StringLiteral(expr) => expr.visit_source_order(visitor),
Expr::BytesLiteral(expr) => expr.visit_source_order(visitor),
Expr::NumberLiteral(expr) => expr.visit_source_order(visitor),
Expr::BooleanLiteral(expr) => expr.visit_source_order(visitor),
Expr::NoneLiteral(expr) => expr.visit_source_order(visitor),
Expr::EllipsisLiteral(expr) => expr.visit_source_order(visitor),
Expr::Attribute(expr) => expr.visit_source_order(visitor),
Expr::Subscript(expr) => expr.visit_source_order(visitor),
Expr::Starred(expr) => expr.visit_source_order(visitor),
Expr::Name(expr) => expr.visit_source_order(visitor),
Expr::List(expr) => expr.visit_source_order(visitor),
Expr::Tuple(expr) => expr.visit_source_order(visitor),
Expr::Slice(expr) => expr.visit_source_order(visitor),
Expr::IpyEscapeCommand(expr) => expr.visit_source_order(visitor),
}
}
visitor.leave_node(node);
}
pub fn walk_comprehension<'a, V>(visitor: &mut V, comprehension: &'a Comprehension)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(comprehension);
if visitor.enter_node(node).is_traverse() {
comprehension.visit_source_order(visitor);
}
visitor.leave_node(node);
}
pub fn walk_elif_else_clause<'a, V>(visitor: &mut V, elif_else_clause: &'a ElifElseClause)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(elif_else_clause);
if visitor.enter_node(node).is_traverse() {
elif_else_clause.visit_source_order(visitor);
}
visitor.leave_node(node);
}
pub fn walk_except_handler<'a, V>(visitor: &mut V, except_handler: &'a ExceptHandler)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(except_handler);
if visitor.enter_node(node).is_traverse() {
match except_handler {
ExceptHandler::ExceptHandler(except_handler) => {
except_handler.visit_source_order(visitor);
}
}
}
visitor.leave_node(node);
}
pub fn walk_format_spec<'a, V: SourceOrderVisitor<'a> + ?Sized>(
visitor: &mut V,
format_spec: &'a Expr,
) {
let node = AnyNodeRef::from(format_spec);
if visitor.enter_node(node).is_traverse() {
visitor.visit_expr(format_spec);
}
visitor.leave_node(node);
}
pub fn walk_arguments<'a, V>(visitor: &mut V, arguments: &'a Arguments)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(arguments);
if visitor.enter_node(node).is_traverse() {
arguments.visit_source_order(visitor);
}
visitor.leave_node(node);
}
pub fn walk_parameters<'a, V>(visitor: &mut V, parameters: &'a Parameters)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(parameters);
if visitor.enter_node(node).is_traverse() {
parameters.visit_source_order(visitor);
}
visitor.leave_node(node);
}
pub fn walk_parameter<'a, V>(visitor: &mut V, parameter: &'a Parameter)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(parameter);
if visitor.enter_node(node).is_traverse() {
parameter.visit_source_order(visitor);
}
visitor.leave_node(node);
}
pub fn walk_parameter_with_default<'a, V>(
visitor: &mut V,
parameter_with_default: &'a ParameterWithDefault,
) where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(parameter_with_default);
if visitor.enter_node(node).is_traverse() {
parameter_with_default.visit_source_order(visitor);
}
visitor.leave_node(node);
}
#[inline]
pub fn walk_keyword<'a, V>(visitor: &mut V, keyword: &'a Keyword)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(keyword);
if visitor.enter_node(node).is_traverse() {
keyword.visit_source_order(visitor);
}
visitor.leave_node(node);
}
pub fn walk_with_item<'a, V>(visitor: &mut V, with_item: &'a WithItem)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(with_item);
if visitor.enter_node(node).is_traverse() {
with_item.visit_source_order(visitor);
}
visitor.leave_node(node);
}
pub fn walk_type_params<'a, V>(visitor: &mut V, type_params: &'a TypeParams)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(type_params);
if visitor.enter_node(node).is_traverse() {
type_params.visit_source_order(visitor);
}
visitor.leave_node(node);
}
pub fn walk_type_param<'a, V>(visitor: &mut V, type_param: &'a TypeParam)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(type_param);
if visitor.enter_node(node).is_traverse() {
type_param.visit_source_order(visitor);
}
visitor.leave_node(node);
}
pub fn walk_match_case<'a, V>(visitor: &mut V, match_case: &'a MatchCase)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(match_case);
if visitor.enter_node(node).is_traverse() {
match_case.visit_source_order(visitor);
}
visitor.leave_node(node);
}
pub fn walk_pattern<'a, V>(visitor: &mut V, pattern: &'a Pattern)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(pattern);
if visitor.enter_node(node).is_traverse() {
match pattern {
Pattern::MatchValue(pattern) => pattern.visit_source_order(visitor),
Pattern::MatchSingleton(pattern) => pattern.visit_source_order(visitor),
Pattern::MatchSequence(pattern) => pattern.visit_source_order(visitor),
Pattern::MatchMapping(pattern) => pattern.visit_source_order(visitor),
Pattern::MatchClass(pattern) => pattern.visit_source_order(visitor),
Pattern::MatchStar(pattern) => pattern.visit_source_order(visitor),
Pattern::MatchAs(pattern) => pattern.visit_source_order(visitor),
Pattern::MatchOr(pattern) => pattern.visit_source_order(visitor),
}
}
visitor.leave_node(node);
}
pub fn walk_pattern_arguments<'a, V>(visitor: &mut V, pattern_arguments: &'a PatternArguments)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(pattern_arguments);
if visitor.enter_node(node).is_traverse() {
for pattern in &pattern_arguments.patterns {
visitor.visit_pattern(pattern);
}
for keyword in &pattern_arguments.keywords {
visitor.visit_pattern_keyword(keyword);
}
}
visitor.leave_node(node);
}
pub fn walk_pattern_keyword<'a, V>(visitor: &mut V, pattern_keyword: &'a PatternKeyword)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(pattern_keyword);
if visitor.enter_node(node).is_traverse() {
pattern_keyword.visit_source_order(visitor);
}
visitor.leave_node(node);
}
pub fn walk_interpolated_string_element<'a, V: SourceOrderVisitor<'a> + ?Sized>(
visitor: &mut V,
f_string_element: &'a InterpolatedStringElement,
) {
let node = AnyNodeRef::from(f_string_element);
if visitor.enter_node(node).is_traverse() {
match f_string_element {
InterpolatedStringElement::Interpolation(element) => {
element.visit_source_order(visitor);
}
InterpolatedStringElement::Literal(element) => element.visit_source_order(visitor),
}
}
visitor.leave_node(node);
}
pub fn walk_bool_op<'a, V>(_visitor: &mut V, _bool_op: &'a BoolOp)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
}
#[inline]
pub fn walk_operator<'a, V>(_visitor: &mut V, _operator: &'a Operator)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
}
#[inline]
pub fn walk_unary_op<'a, V>(_visitor: &mut V, _unary_op: &'a UnaryOp)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
}
#[inline]
pub fn walk_cmp_op<'a, V>(_visitor: &mut V, _cmp_op: &'a CmpOp)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
}
#[inline]
pub fn walk_f_string<'a, V>(visitor: &mut V, f_string: &'a FString)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(f_string);
if visitor.enter_node(node).is_traverse() {
f_string.visit_source_order(visitor);
}
visitor.leave_node(node);
}
#[inline]
pub fn walk_t_string<'a, V>(visitor: &mut V, t_string: &'a TString)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(t_string);
if visitor.enter_node(node).is_traverse() {
t_string.visit_source_order(visitor);
}
visitor.leave_node(node);
}
#[inline]
pub fn walk_string_literal<'a, V>(visitor: &mut V, string_literal: &'a StringLiteral)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(string_literal);
if visitor.enter_node(node).is_traverse() {
string_literal.visit_source_order(visitor);
}
visitor.leave_node(node);
}
#[inline]
pub fn walk_bytes_literal<'a, V>(visitor: &mut V, bytes_literal: &'a BytesLiteral)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(bytes_literal);
if visitor.enter_node(node).is_traverse() {
bytes_literal.visit_source_order(visitor);
}
visitor.leave_node(node);
}
#[inline]
pub fn walk_alias<'a, V>(visitor: &mut V, alias: &'a Alias)
where
V: SourceOrderVisitor<'a> + ?Sized,
{
let node = AnyNodeRef::from(alias);
if visitor.enter_node(node).is_traverse() {
alias.visit_source_order(visitor);
}
visitor.leave_node(node);
}
#[inline]
pub fn walk_identifier<'a, V: SourceOrderVisitor<'a> + ?Sized>(
visitor: &mut V,
identifier: &'a Identifier,
) {
let node = AnyNodeRef::from(identifier);
if visitor.enter_node(node).is_traverse() {
identifier.visit_source_order(visitor);
}
visitor.leave_node(node);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/visitor/transformer.rs | crates/ruff_python_ast/src/visitor/transformer.rs | use crate::{
self as ast, Alias, Arguments, BoolOp, BytesLiteral, CmpOp, Comprehension, Decorator,
ElifElseClause, ExceptHandler, Expr, ExprContext, FString, InterpolatedStringElement, Keyword,
MatchCase, Operator, Parameter, Parameters, Pattern, PatternArguments, PatternKeyword, Stmt,
StringLiteral, TString, TypeParam, TypeParamParamSpec, TypeParamTypeVar, TypeParamTypeVarTuple,
TypeParams, UnaryOp, WithItem,
};
/// A trait for transforming ASTs. Visits all nodes in the AST recursively in evaluation-order.
pub trait Transformer {
fn visit_stmt(&self, stmt: &mut Stmt) {
walk_stmt(self, stmt);
}
fn visit_annotation(&self, expr: &mut Expr) {
walk_annotation(self, expr);
}
fn visit_decorator(&self, decorator: &mut Decorator) {
walk_decorator(self, decorator);
}
fn visit_expr(&self, expr: &mut Expr) {
walk_expr(self, expr);
}
fn visit_expr_context(&self, expr_context: &mut ExprContext) {
walk_expr_context(self, expr_context);
}
fn visit_bool_op(&self, bool_op: &mut BoolOp) {
walk_bool_op(self, bool_op);
}
fn visit_operator(&self, operator: &mut Operator) {
walk_operator(self, operator);
}
fn visit_unary_op(&self, unary_op: &mut UnaryOp) {
walk_unary_op(self, unary_op);
}
fn visit_cmp_op(&self, cmp_op: &mut CmpOp) {
walk_cmp_op(self, cmp_op);
}
fn visit_comprehension(&self, comprehension: &mut Comprehension) {
walk_comprehension(self, comprehension);
}
fn visit_except_handler(&self, except_handler: &mut ExceptHandler) {
walk_except_handler(self, except_handler);
}
fn visit_arguments(&self, arguments: &mut Arguments) {
walk_arguments(self, arguments);
}
fn visit_parameters(&self, parameters: &mut Parameters) {
walk_parameters(self, parameters);
}
fn visit_parameter(&self, parameter: &mut Parameter) {
walk_parameter(self, parameter);
}
fn visit_keyword(&self, keyword: &mut Keyword) {
walk_keyword(self, keyword);
}
fn visit_alias(&self, alias: &mut Alias) {
walk_alias(self, alias);
}
fn visit_with_item(&self, with_item: &mut WithItem) {
walk_with_item(self, with_item);
}
fn visit_type_params(&self, type_params: &mut TypeParams) {
walk_type_params(self, type_params);
}
fn visit_type_param(&self, type_param: &mut TypeParam) {
walk_type_param(self, type_param);
}
fn visit_match_case(&self, match_case: &mut MatchCase) {
walk_match_case(self, match_case);
}
fn visit_pattern(&self, pattern: &mut Pattern) {
walk_pattern(self, pattern);
}
fn visit_pattern_arguments(&self, pattern_arguments: &mut PatternArguments) {
walk_pattern_arguments(self, pattern_arguments);
}
fn visit_pattern_keyword(&self, pattern_keyword: &mut PatternKeyword) {
walk_pattern_keyword(self, pattern_keyword);
}
fn visit_body(&self, body: &mut [Stmt]) {
walk_body(self, body);
}
fn visit_elif_else_clause(&self, elif_else_clause: &mut ElifElseClause) {
walk_elif_else_clause(self, elif_else_clause);
}
fn visit_f_string(&self, f_string: &mut FString) {
walk_f_string(self, f_string);
}
fn visit_interpolated_string_element(
&self,
interpolated_string_element: &mut InterpolatedStringElement,
) {
walk_interpolated_string_element(self, interpolated_string_element);
}
fn visit_t_string(&self, t_string: &mut TString) {
walk_t_string(self, t_string);
}
fn visit_string_literal(&self, string_literal: &mut StringLiteral) {
walk_string_literal(self, string_literal);
}
fn visit_bytes_literal(&self, bytes_literal: &mut BytesLiteral) {
walk_bytes_literal(self, bytes_literal);
}
}
pub fn walk_body<V: Transformer + ?Sized>(visitor: &V, body: &mut [Stmt]) {
for stmt in body {
visitor.visit_stmt(stmt);
}
}
pub fn walk_elif_else_clause<V: Transformer + ?Sized>(
visitor: &V,
elif_else_clause: &mut ElifElseClause,
) {
if let Some(test) = &mut elif_else_clause.test {
visitor.visit_expr(test);
}
visitor.visit_body(&mut elif_else_clause.body);
}
pub fn walk_stmt<V: Transformer + ?Sized>(visitor: &V, stmt: &mut Stmt) {
match stmt {
Stmt::FunctionDef(ast::StmtFunctionDef {
parameters,
body,
decorator_list,
returns,
type_params,
..
}) => {
for decorator in decorator_list {
visitor.visit_decorator(decorator);
}
if let Some(type_params) = type_params {
visitor.visit_type_params(type_params);
}
visitor.visit_parameters(parameters);
if let Some(expr) = returns {
visitor.visit_annotation(expr);
}
visitor.visit_body(body);
}
Stmt::ClassDef(ast::StmtClassDef {
arguments,
body,
decorator_list,
type_params,
..
}) => {
for decorator in decorator_list {
visitor.visit_decorator(decorator);
}
if let Some(type_params) = type_params {
visitor.visit_type_params(type_params);
}
if let Some(arguments) = arguments {
visitor.visit_arguments(arguments);
}
visitor.visit_body(body);
}
Stmt::Return(ast::StmtReturn {
value,
range: _,
node_index: _,
}) => {
if let Some(expr) = value {
visitor.visit_expr(expr);
}
}
Stmt::Delete(ast::StmtDelete {
targets,
range: _,
node_index: _,
}) => {
for expr in targets {
visitor.visit_expr(expr);
}
}
Stmt::TypeAlias(ast::StmtTypeAlias {
range: _,
node_index: _,
name,
type_params,
value,
}) => {
visitor.visit_expr(value);
if let Some(type_params) = type_params {
visitor.visit_type_params(type_params);
}
visitor.visit_expr(name);
}
Stmt::Assign(ast::StmtAssign { targets, value, .. }) => {
visitor.visit_expr(value);
for expr in targets {
visitor.visit_expr(expr);
}
}
Stmt::AugAssign(ast::StmtAugAssign {
target,
op,
value,
range: _,
node_index: _,
}) => {
visitor.visit_expr(value);
visitor.visit_operator(op);
visitor.visit_expr(target);
}
Stmt::AnnAssign(ast::StmtAnnAssign {
target,
annotation,
value,
..
}) => {
if let Some(expr) = value {
visitor.visit_expr(expr);
}
visitor.visit_annotation(annotation);
visitor.visit_expr(target);
}
Stmt::For(ast::StmtFor {
target,
iter,
body,
orelse,
..
}) => {
visitor.visit_expr(iter);
visitor.visit_expr(target);
visitor.visit_body(body);
visitor.visit_body(orelse);
}
Stmt::While(ast::StmtWhile {
test,
body,
orelse,
range: _,
node_index: _,
}) => {
visitor.visit_expr(test);
visitor.visit_body(body);
visitor.visit_body(orelse);
}
Stmt::If(ast::StmtIf {
test,
body,
elif_else_clauses,
range: _,
node_index: _,
}) => {
visitor.visit_expr(test);
visitor.visit_body(body);
for clause in elif_else_clauses {
walk_elif_else_clause(visitor, clause);
}
}
Stmt::With(ast::StmtWith { items, body, .. }) => {
for with_item in items {
visitor.visit_with_item(with_item);
}
visitor.visit_body(body);
}
Stmt::Match(ast::StmtMatch {
subject,
cases,
range: _,
node_index: _,
}) => {
visitor.visit_expr(subject);
for match_case in cases {
visitor.visit_match_case(match_case);
}
}
Stmt::Raise(ast::StmtRaise {
exc,
cause,
range: _,
node_index: _,
}) => {
if let Some(expr) = exc {
visitor.visit_expr(expr);
}
if let Some(expr) = cause {
visitor.visit_expr(expr);
}
}
Stmt::Try(ast::StmtTry {
body,
handlers,
orelse,
finalbody,
is_star: _,
range: _,
node_index: _,
}) => {
visitor.visit_body(body);
for except_handler in handlers {
visitor.visit_except_handler(except_handler);
}
visitor.visit_body(orelse);
visitor.visit_body(finalbody);
}
Stmt::Assert(ast::StmtAssert {
test,
msg,
range: _,
node_index: _,
}) => {
visitor.visit_expr(test);
if let Some(expr) = msg {
visitor.visit_expr(expr);
}
}
Stmt::Import(ast::StmtImport {
names,
range: _,
node_index: _,
}) => {
for alias in names {
visitor.visit_alias(alias);
}
}
Stmt::ImportFrom(ast::StmtImportFrom { names, .. }) => {
for alias in names {
visitor.visit_alias(alias);
}
}
Stmt::Global(_) => {}
Stmt::Nonlocal(_) => {}
Stmt::Expr(ast::StmtExpr {
value,
range: _,
node_index: _,
}) => visitor.visit_expr(value),
Stmt::Pass(_) | Stmt::Break(_) | Stmt::Continue(_) | Stmt::IpyEscapeCommand(_) => {}
}
}
pub fn walk_annotation<V: Transformer + ?Sized>(visitor: &V, expr: &mut Expr) {
visitor.visit_expr(expr);
}
pub fn walk_decorator<V: Transformer + ?Sized>(visitor: &V, decorator: &mut Decorator) {
visitor.visit_expr(&mut decorator.expression);
}
pub fn walk_expr<V: Transformer + ?Sized>(visitor: &V, expr: &mut Expr) {
match expr {
Expr::BoolOp(ast::ExprBoolOp {
op,
values,
range: _,
node_index: _,
}) => {
visitor.visit_bool_op(op);
for expr in values {
visitor.visit_expr(expr);
}
}
Expr::Named(ast::ExprNamed {
target,
value,
range: _,
node_index: _,
}) => {
visitor.visit_expr(value);
visitor.visit_expr(target);
}
Expr::BinOp(ast::ExprBinOp {
left,
op,
right,
range: _,
node_index: _,
}) => {
visitor.visit_expr(left);
visitor.visit_operator(op);
visitor.visit_expr(right);
}
Expr::UnaryOp(ast::ExprUnaryOp {
op,
operand,
range: _,
node_index: _,
}) => {
visitor.visit_unary_op(op);
visitor.visit_expr(operand);
}
Expr::Lambda(ast::ExprLambda {
parameters,
body,
range: _,
node_index: _,
}) => {
if let Some(parameters) = parameters {
visitor.visit_parameters(parameters);
}
visitor.visit_expr(body);
}
Expr::If(ast::ExprIf {
test,
body,
orelse,
range: _,
node_index: _,
}) => {
visitor.visit_expr(test);
visitor.visit_expr(body);
visitor.visit_expr(orelse);
}
Expr::Dict(ast::ExprDict {
items,
range: _,
node_index: _,
}) => {
for ast::DictItem { key, value } in items {
if let Some(key) = key {
visitor.visit_expr(key);
}
visitor.visit_expr(value);
}
}
Expr::Set(ast::ExprSet {
elts,
range: _,
node_index: _,
}) => {
for expr in elts {
visitor.visit_expr(expr);
}
}
Expr::ListComp(ast::ExprListComp {
elt,
generators,
range: _,
node_index: _,
}) => {
for comprehension in generators {
visitor.visit_comprehension(comprehension);
}
visitor.visit_expr(elt);
}
Expr::SetComp(ast::ExprSetComp {
elt,
generators,
range: _,
node_index: _,
}) => {
for comprehension in generators {
visitor.visit_comprehension(comprehension);
}
visitor.visit_expr(elt);
}
Expr::DictComp(ast::ExprDictComp {
key,
value,
generators,
range: _,
node_index: _,
}) => {
for comprehension in generators {
visitor.visit_comprehension(comprehension);
}
visitor.visit_expr(key);
visitor.visit_expr(value);
}
Expr::Generator(ast::ExprGenerator {
elt,
generators,
range: _,
node_index: _,
parenthesized: _,
}) => {
for comprehension in generators {
visitor.visit_comprehension(comprehension);
}
visitor.visit_expr(elt);
}
Expr::Await(ast::ExprAwait {
value,
range: _,
node_index: _,
}) => visitor.visit_expr(value),
Expr::Yield(ast::ExprYield {
value,
range: _,
node_index: _,
}) => {
if let Some(expr) = value {
visitor.visit_expr(expr);
}
}
Expr::YieldFrom(ast::ExprYieldFrom {
value,
range: _,
node_index: _,
}) => visitor.visit_expr(value),
Expr::Compare(ast::ExprCompare {
left,
ops,
comparators,
range: _,
node_index: _,
}) => {
visitor.visit_expr(left);
for cmp_op in &mut **ops {
visitor.visit_cmp_op(cmp_op);
}
for expr in &mut **comparators {
visitor.visit_expr(expr);
}
}
Expr::Call(ast::ExprCall {
func,
arguments,
range: _,
node_index: _,
}) => {
visitor.visit_expr(func);
visitor.visit_arguments(arguments);
}
Expr::FString(ast::ExprFString { value, .. }) => {
for f_string_part in value.iter_mut() {
match f_string_part {
ast::FStringPart::Literal(string_literal) => {
visitor.visit_string_literal(string_literal);
}
ast::FStringPart::FString(f_string) => {
visitor.visit_f_string(f_string);
}
}
}
}
Expr::TString(ast::ExprTString { value, .. }) => {
for t_string in value.iter_mut() {
visitor.visit_t_string(t_string);
}
}
Expr::StringLiteral(ast::ExprStringLiteral { value, .. }) => {
for string_literal in value.iter_mut() {
visitor.visit_string_literal(string_literal);
}
}
Expr::BytesLiteral(ast::ExprBytesLiteral { value, .. }) => {
for bytes_literal in value.iter_mut() {
visitor.visit_bytes_literal(bytes_literal);
}
}
Expr::NumberLiteral(_)
| Expr::BooleanLiteral(_)
| Expr::NoneLiteral(_)
| Expr::EllipsisLiteral(_) => {}
Expr::Attribute(ast::ExprAttribute { value, ctx, .. }) => {
visitor.visit_expr(value);
visitor.visit_expr_context(ctx);
}
Expr::Subscript(ast::ExprSubscript {
value,
slice,
ctx,
range: _,
node_index: _,
}) => {
visitor.visit_expr(value);
visitor.visit_expr(slice);
visitor.visit_expr_context(ctx);
}
Expr::Starred(ast::ExprStarred {
value,
ctx,
range: _,
node_index: _,
}) => {
visitor.visit_expr(value);
visitor.visit_expr_context(ctx);
}
Expr::Name(ast::ExprName { ctx, .. }) => {
visitor.visit_expr_context(ctx);
}
Expr::List(ast::ExprList {
elts,
ctx,
range: _,
node_index: _,
}) => {
for expr in elts {
visitor.visit_expr(expr);
}
visitor.visit_expr_context(ctx);
}
Expr::Tuple(ast::ExprTuple {
elts,
ctx,
range: _,
node_index: _,
parenthesized: _,
}) => {
for expr in elts {
visitor.visit_expr(expr);
}
visitor.visit_expr_context(ctx);
}
Expr::Slice(ast::ExprSlice {
lower,
upper,
step,
range: _,
node_index: _,
}) => {
if let Some(expr) = lower {
visitor.visit_expr(expr);
}
if let Some(expr) = upper {
visitor.visit_expr(expr);
}
if let Some(expr) = step {
visitor.visit_expr(expr);
}
}
Expr::IpyEscapeCommand(_) => {}
}
}
pub fn walk_comprehension<V: Transformer + ?Sized>(visitor: &V, comprehension: &mut Comprehension) {
visitor.visit_expr(&mut comprehension.iter);
visitor.visit_expr(&mut comprehension.target);
for expr in &mut comprehension.ifs {
visitor.visit_expr(expr);
}
}
pub fn walk_except_handler<V: Transformer + ?Sized>(
visitor: &V,
except_handler: &mut ExceptHandler,
) {
match except_handler {
ExceptHandler::ExceptHandler(ast::ExceptHandlerExceptHandler { type_, body, .. }) => {
if let Some(expr) = type_ {
visitor.visit_expr(expr);
}
visitor.visit_body(body);
}
}
}
pub fn walk_arguments<V: Transformer + ?Sized>(visitor: &V, arguments: &mut Arguments) {
// Note that there might be keywords before the last arg, e.g. in
// f(*args, a=2, *args2, **kwargs)`, but we follow Python in evaluating first `args` and then
// `keywords`. See also [Arguments::arguments_source_order`].
for arg in &mut *arguments.args {
visitor.visit_expr(arg);
}
for keyword in &mut *arguments.keywords {
visitor.visit_keyword(keyword);
}
}
pub fn walk_parameters<V: Transformer + ?Sized>(visitor: &V, parameters: &mut Parameters) {
// Defaults are evaluated before annotations.
for arg in &mut parameters.posonlyargs {
if let Some(default) = &mut arg.default {
visitor.visit_expr(default);
}
}
for arg in &mut parameters.args {
if let Some(default) = &mut arg.default {
visitor.visit_expr(default);
}
}
for arg in &mut parameters.kwonlyargs {
if let Some(default) = &mut arg.default {
visitor.visit_expr(default);
}
}
for arg in &mut parameters.posonlyargs {
visitor.visit_parameter(&mut arg.parameter);
}
for arg in &mut parameters.args {
visitor.visit_parameter(&mut arg.parameter);
}
if let Some(arg) = &mut parameters.vararg {
visitor.visit_parameter(arg);
}
for arg in &mut parameters.kwonlyargs {
visitor.visit_parameter(&mut arg.parameter);
}
if let Some(arg) = &mut parameters.kwarg {
visitor.visit_parameter(arg);
}
}
pub fn walk_parameter<V: Transformer + ?Sized>(visitor: &V, parameter: &mut Parameter) {
if let Some(expr) = &mut parameter.annotation {
visitor.visit_annotation(expr);
}
}
pub fn walk_keyword<V: Transformer + ?Sized>(visitor: &V, keyword: &mut Keyword) {
visitor.visit_expr(&mut keyword.value);
}
pub fn walk_with_item<V: Transformer + ?Sized>(visitor: &V, with_item: &mut WithItem) {
visitor.visit_expr(&mut with_item.context_expr);
if let Some(expr) = &mut with_item.optional_vars {
visitor.visit_expr(expr);
}
}
pub fn walk_type_params<V: Transformer + ?Sized>(visitor: &V, type_params: &mut TypeParams) {
for type_param in &mut type_params.type_params {
visitor.visit_type_param(type_param);
}
}
pub fn walk_type_param<V: Transformer + ?Sized>(visitor: &V, type_param: &mut TypeParam) {
match type_param {
TypeParam::TypeVar(TypeParamTypeVar {
bound,
default,
name: _,
range: _,
node_index: _,
}) => {
if let Some(expr) = bound {
visitor.visit_expr(expr);
}
if let Some(expr) = default {
visitor.visit_expr(expr);
}
}
TypeParam::TypeVarTuple(TypeParamTypeVarTuple {
default,
name: _,
range: _,
node_index: _,
}) => {
if let Some(expr) = default {
visitor.visit_expr(expr);
}
}
TypeParam::ParamSpec(TypeParamParamSpec {
default,
name: _,
range: _,
node_index: _,
}) => {
if let Some(expr) = default {
visitor.visit_expr(expr);
}
}
}
}
pub fn walk_match_case<V: Transformer + ?Sized>(visitor: &V, match_case: &mut MatchCase) {
visitor.visit_pattern(&mut match_case.pattern);
if let Some(expr) = &mut match_case.guard {
visitor.visit_expr(expr);
}
visitor.visit_body(&mut match_case.body);
}
pub fn walk_pattern<V: Transformer + ?Sized>(visitor: &V, pattern: &mut Pattern) {
match pattern {
Pattern::MatchValue(ast::PatternMatchValue { value, .. }) => {
visitor.visit_expr(value);
}
Pattern::MatchSingleton(_) => {}
Pattern::MatchSequence(ast::PatternMatchSequence { patterns, .. }) => {
for pattern in patterns {
visitor.visit_pattern(pattern);
}
}
Pattern::MatchMapping(ast::PatternMatchMapping { keys, patterns, .. }) => {
for expr in keys {
visitor.visit_expr(expr);
}
for pattern in patterns {
visitor.visit_pattern(pattern);
}
}
Pattern::MatchClass(ast::PatternMatchClass { cls, arguments, .. }) => {
visitor.visit_expr(cls);
visitor.visit_pattern_arguments(arguments);
}
Pattern::MatchStar(_) => {}
Pattern::MatchAs(ast::PatternMatchAs { pattern, .. }) => {
if let Some(pattern) = pattern {
visitor.visit_pattern(pattern);
}
}
Pattern::MatchOr(ast::PatternMatchOr { patterns, .. }) => {
for pattern in patterns {
visitor.visit_pattern(pattern);
}
}
}
}
pub fn walk_pattern_arguments<V: Transformer + ?Sized>(
visitor: &V,
pattern_arguments: &mut PatternArguments,
) {
for pattern in &mut pattern_arguments.patterns {
visitor.visit_pattern(pattern);
}
for keyword in &mut pattern_arguments.keywords {
visitor.visit_pattern_keyword(keyword);
}
}
pub fn walk_pattern_keyword<V: Transformer + ?Sized>(
visitor: &V,
pattern_keyword: &mut PatternKeyword,
) {
visitor.visit_pattern(&mut pattern_keyword.pattern);
}
pub fn walk_f_string<V: Transformer + ?Sized>(visitor: &V, f_string: &mut FString) {
for element in &mut f_string.elements {
visitor.visit_interpolated_string_element(element);
}
}
pub fn walk_interpolated_string_element<V: Transformer + ?Sized>(
visitor: &V,
interpolated_string_element: &mut InterpolatedStringElement,
) {
if let ast::InterpolatedStringElement::Interpolation(ast::InterpolatedElement {
expression,
format_spec,
..
}) = interpolated_string_element
{
visitor.visit_expr(expression);
if let Some(format_spec) = format_spec {
for spec_element in &mut format_spec.elements {
visitor.visit_interpolated_string_element(spec_element);
}
}
}
}
pub fn walk_t_string<V: Transformer + ?Sized>(visitor: &V, t_string: &mut TString) {
for element in &mut t_string.elements {
visitor.visit_interpolated_string_element(element);
}
}
pub fn walk_expr_context<V: Transformer + ?Sized>(_visitor: &V, _expr_context: &mut ExprContext) {}
pub fn walk_bool_op<V: Transformer + ?Sized>(_visitor: &V, _bool_op: &mut BoolOp) {}
pub fn walk_operator<V: Transformer + ?Sized>(_visitor: &V, _operator: &mut Operator) {}
pub fn walk_unary_op<V: Transformer + ?Sized>(_visitor: &V, _unary_op: &mut UnaryOp) {}
pub fn walk_cmp_op<V: Transformer + ?Sized>(_visitor: &V, _cmp_op: &mut CmpOp) {}
pub fn walk_alias<V: Transformer + ?Sized>(_visitor: &V, _alias: &mut Alias) {}
pub fn walk_string_literal<V: Transformer + ?Sized>(
_visitor: &V,
_string_literal: &mut StringLiteral,
) {
}
pub fn walk_bytes_literal<V: Transformer + ?Sized>(
_visitor: &V,
_bytes_literal: &mut BytesLiteral,
) {
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/token/tokens.rs | crates/ruff_python_ast/src/token/tokens.rs | use std::{iter::FusedIterator, ops::Deref};
use super::{Token, TokenKind};
use ruff_python_trivia::CommentRanges;
use ruff_text_size::{Ranged as _, TextRange, TextSize};
/// Tokens represents a vector of lexed [`Token`].
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct Tokens {
raw: Vec<Token>,
}
impl Tokens {
pub fn new(tokens: Vec<Token>) -> Tokens {
Tokens { raw: tokens }
}
/// Returns an iterator over all the tokens that provides context.
pub fn iter_with_context(&self) -> TokenIterWithContext<'_> {
TokenIterWithContext::new(&self.raw)
}
/// Performs a binary search to find the index of the **first** token that starts at the given `offset`.
///
/// Unlike `binary_search_by_key`, this method ensures that if multiple tokens start at the same offset,
/// it returns the index of the first one. Multiple tokens can start at the same offset in cases where
/// zero-length tokens are involved (like `Dedent` or `Newline` at the end of the file).
pub fn binary_search_by_start(&self, offset: TextSize) -> Result<usize, usize> {
let partition_point = self.partition_point(|token| token.start() < offset);
let after = &self[partition_point..];
if after.first().is_some_and(|first| first.start() == offset) {
Ok(partition_point)
} else {
Err(partition_point)
}
}
/// Returns a slice of [`Token`] that are within the given `range`.
///
/// The start and end offset of the given range should be either:
/// 1. Token boundary
/// 2. Gap between the tokens
///
/// For example, considering the following tokens and their corresponding range:
///
/// | Token | Range |
/// |---------------------|-----------|
/// | `Def` | `0..3` |
/// | `Name` | `4..7` |
/// | `Lpar` | `7..8` |
/// | `Rpar` | `8..9` |
/// | `Colon` | `9..10` |
/// | `Newline` | `10..11` |
/// | `Comment` | `15..24` |
/// | `NonLogicalNewline` | `24..25` |
/// | `Indent` | `25..29` |
/// | `Pass` | `29..33` |
///
/// Here, for (1) a token boundary is considered either the start or end offset of any of the
/// above tokens. For (2), the gap would be any offset between the `Newline` and `Comment`
/// token which are 12, 13, and 14.
///
/// Examples:
/// 1) `4..10` would give `Name`, `Lpar`, `Rpar`, `Colon`
/// 2) `11..25` would give `Comment`, `NonLogicalNewline`
/// 3) `12..25` would give same as (2) and offset 12 is in the "gap"
/// 4) `9..12` would give `Colon`, `Newline` and offset 12 is in the "gap"
/// 5) `18..27` would panic because both the start and end offset is within a token
///
/// ## Note
///
/// The returned slice can contain the [`TokenKind::Unknown`] token if there was a lexical
/// error encountered within the given range.
///
/// # Panics
///
/// If either the start or end offset of the given range is within a token range.
pub fn in_range(&self, range: TextRange) -> &[Token] {
let tokens_after_start = self.after(range.start());
Self::before_impl(tokens_after_start, range.end())
}
/// Searches the token(s) at `offset`.
///
/// Returns [`TokenAt::Between`] if `offset` points directly inbetween two tokens
/// (the left token ends at `offset` and the right token starts at `offset`).
pub fn at_offset(&self, offset: TextSize) -> TokenAt {
match self.binary_search_by_start(offset) {
// The token at `index` starts exactly at `offset.
// ```python
// object.attribute
// ^ OFFSET
// ```
Ok(index) => {
let token = self[index];
// `token` starts exactly at `offset`. Test if the offset is right between
// `token` and the previous token (if there's any)
if let Some(previous) = index.checked_sub(1).map(|idx| self[idx]) {
if previous.end() == offset {
return TokenAt::Between(previous, token);
}
}
TokenAt::Single(token)
}
// No token found that starts exactly at the given offset. But it's possible that
// the token starting before `offset` fully encloses `offset` (it's end range ends after `offset`).
// ```python
// object.attribute
// ^ OFFSET
// # or
// if True:
// print("test")
// ^ OFFSET
// ```
Err(index) => {
if let Some(previous) = index.checked_sub(1).map(|idx| self[idx]) {
if previous.range().contains_inclusive(offset) {
return TokenAt::Single(previous);
}
}
TokenAt::None
}
}
}
/// Returns a slice of tokens before the given [`TextSize`] offset.
///
/// If the given offset is between two tokens, the returned slice will end just before the
/// following token. In other words, if the offset is between the end of previous token and
/// start of next token, the returned slice will end just before the next token.
///
/// # Panics
///
/// If the given offset is inside a token range at any point
/// other than the start of the range.
pub fn before(&self, offset: TextSize) -> &[Token] {
Self::before_impl(&self.raw, offset)
}
fn before_impl(tokens: &[Token], offset: TextSize) -> &[Token] {
let partition_point = tokens.partition_point(|token| token.start() < offset);
let before = &tokens[..partition_point];
if let Some(last) = before.last() {
// If it's equal to the end offset, then it's at a token boundary which is
// valid. If it's greater than the end offset, then it's in the gap between
// the tokens which is valid as well.
assert!(
offset >= last.end(),
"Offset {offset:?} is inside token `{last:?}`",
);
}
before
}
/// Returns a slice of tokens after the given [`TextSize`] offset.
///
/// If the given offset is between two tokens, the returned slice will start from the following
/// token. In other words, if the offset is between the end of previous token and start of next
/// token, the returned slice will start from the next token.
///
/// # Panics
///
/// If the given offset is inside a token range at any point
/// other than the start of the range.
pub fn after(&self, offset: TextSize) -> &[Token] {
let partition_point = self.partition_point(|token| token.end() <= offset);
let after = &self[partition_point..];
if let Some(first) = after.first() {
// valid. If it's greater than the end offset, then it's in the gap between
// the tokens which is valid as well.
assert!(
offset <= first.start(),
"Offset {offset:?} is inside token `{first:?}`",
);
}
after
}
}
impl<'a> IntoIterator for &'a Tokens {
type Item = &'a Token;
type IntoIter = std::slice::Iter<'a, Token>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl Deref for Tokens {
type Target = [Token];
fn deref(&self) -> &Self::Target {
&self.raw
}
}
/// A token that encloses a given offset or ends exactly at it.
#[derive(Debug, Clone)]
pub enum TokenAt {
/// There's no token at the given offset
None,
/// There's a single token at the given offset.
Single(Token),
/// The offset falls exactly between two tokens. E.g. `CURSOR` in `call<CURSOR>(arguments)` is
/// positioned exactly between the `call` and `(` tokens.
Between(Token, Token),
}
impl Iterator for TokenAt {
type Item = Token;
fn next(&mut self) -> Option<Self::Item> {
match *self {
TokenAt::None => None,
TokenAt::Single(token) => {
*self = TokenAt::None;
Some(token)
}
TokenAt::Between(first, second) => {
*self = TokenAt::Single(second);
Some(first)
}
}
}
}
impl FusedIterator for TokenAt {}
impl From<&Tokens> for CommentRanges {
fn from(tokens: &Tokens) -> Self {
let mut ranges = vec![];
for token in tokens {
if token.kind() == TokenKind::Comment {
ranges.push(token.range());
}
}
CommentRanges::new(ranges)
}
}
/// An iterator over the [`Token`]s with context.
///
/// This struct is created by the [`iter_with_context`] method on [`Tokens`]. Refer to its
/// documentation for more details.
///
/// [`iter_with_context`]: Tokens::iter_with_context
#[derive(Debug, Clone)]
pub struct TokenIterWithContext<'a> {
inner: std::slice::Iter<'a, Token>,
nesting: u32,
}
impl<'a> TokenIterWithContext<'a> {
fn new(tokens: &'a [Token]) -> TokenIterWithContext<'a> {
TokenIterWithContext {
inner: tokens.iter(),
nesting: 0,
}
}
/// Return the nesting level the iterator is currently in.
pub const fn nesting(&self) -> u32 {
self.nesting
}
/// Returns `true` if the iterator is within a parenthesized context.
pub const fn in_parenthesized_context(&self) -> bool {
self.nesting > 0
}
/// Returns the next [`Token`] in the iterator without consuming it.
pub fn peek(&self) -> Option<&'a Token> {
self.clone().next()
}
}
impl<'a> Iterator for TokenIterWithContext<'a> {
type Item = &'a Token;
fn next(&mut self) -> Option<Self::Item> {
let token = self.inner.next()?;
match token.kind() {
TokenKind::Lpar | TokenKind::Lbrace | TokenKind::Lsqb => self.nesting += 1,
TokenKind::Rpar | TokenKind::Rbrace | TokenKind::Rsqb => {
self.nesting = self.nesting.saturating_sub(1);
}
// This mimics the behavior of re-lexing which reduces the nesting level on the lexer.
// We don't need to reduce it by 1 because unlike the lexer we see the final token
// after recovering from every unclosed parenthesis.
TokenKind::Newline if self.nesting > 0 => {
self.nesting = 0;
}
_ => {}
}
Some(token)
}
}
impl FusedIterator for TokenIterWithContext<'_> {}
#[cfg(test)]
mod tests {
use std::ops::Range;
use ruff_text_size::TextSize;
use crate::token::{Token, TokenFlags, TokenKind};
use super::*;
/// Test case containing a "gap" between two tokens.
///
/// Code: <https://play.ruff.rs/a3658340-6df8-42c5-be80-178744bf1193>
const TEST_CASE_WITH_GAP: [(TokenKind, Range<u32>); 10] = [
(TokenKind::Def, 0..3),
(TokenKind::Name, 4..7),
(TokenKind::Lpar, 7..8),
(TokenKind::Rpar, 8..9),
(TokenKind::Colon, 9..10),
(TokenKind::Newline, 10..11),
// Gap ||..||
(TokenKind::Comment, 15..24),
(TokenKind::NonLogicalNewline, 24..25),
(TokenKind::Indent, 25..29),
(TokenKind::Pass, 29..33),
// No newline at the end to keep the token set full of unique tokens
];
/// Helper function to create [`Tokens`] from an iterator of (kind, range).
fn new_tokens(tokens: impl Iterator<Item = (TokenKind, Range<u32>)>) -> Tokens {
Tokens::new(
tokens
.map(|(kind, range)| {
Token::new(
kind,
TextRange::new(TextSize::new(range.start), TextSize::new(range.end)),
TokenFlags::empty(),
)
})
.collect(),
)
}
#[test]
fn tokens_after_offset_at_token_start() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
let after = tokens.after(TextSize::new(8));
assert_eq!(after.len(), 7);
assert_eq!(after.first().unwrap().kind(), TokenKind::Rpar);
}
#[test]
fn tokens_after_offset_at_token_end() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
let after = tokens.after(TextSize::new(11));
assert_eq!(after.len(), 4);
assert_eq!(after.first().unwrap().kind(), TokenKind::Comment);
}
#[test]
fn tokens_after_offset_between_tokens() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
let after = tokens.after(TextSize::new(13));
assert_eq!(after.len(), 4);
assert_eq!(after.first().unwrap().kind(), TokenKind::Comment);
}
#[test]
fn tokens_after_offset_at_last_token_end() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
let after = tokens.after(TextSize::new(33));
assert_eq!(after.len(), 0);
}
#[test]
#[should_panic(expected = "Offset 5 is inside token `Name 4..7`")]
fn tokens_after_offset_inside_token() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
tokens.after(TextSize::new(5));
}
#[test]
fn tokens_before_offset_at_first_token_start() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
let before = tokens.before(TextSize::new(0));
assert_eq!(before.len(), 0);
}
#[test]
fn tokens_before_offset_after_first_token_gap() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
let before = tokens.before(TextSize::new(3));
assert_eq!(before.len(), 1);
assert_eq!(before.last().unwrap().kind(), TokenKind::Def);
}
#[test]
fn tokens_before_offset_at_second_token_start() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
let before = tokens.before(TextSize::new(4));
assert_eq!(before.len(), 1);
assert_eq!(before.last().unwrap().kind(), TokenKind::Def);
}
#[test]
fn tokens_before_offset_at_token_start() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
let before = tokens.before(TextSize::new(8));
assert_eq!(before.len(), 3);
assert_eq!(before.last().unwrap().kind(), TokenKind::Lpar);
}
#[test]
fn tokens_before_offset_at_token_end() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
let before = tokens.before(TextSize::new(11));
assert_eq!(before.len(), 6);
assert_eq!(before.last().unwrap().kind(), TokenKind::Newline);
}
#[test]
fn tokens_before_offset_between_tokens() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
let before = tokens.before(TextSize::new(13));
assert_eq!(before.len(), 6);
assert_eq!(before.last().unwrap().kind(), TokenKind::Newline);
}
#[test]
fn tokens_before_offset_at_last_token_end() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
let before = tokens.before(TextSize::new(33));
assert_eq!(before.len(), 10);
assert_eq!(before.last().unwrap().kind(), TokenKind::Pass);
}
#[test]
#[should_panic(expected = "Offset 5 is inside token `Name 4..7`")]
fn tokens_before_offset_inside_token() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
tokens.before(TextSize::new(5));
}
#[test]
fn tokens_in_range_at_token_offset() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
let in_range = tokens.in_range(TextRange::new(4.into(), 10.into()));
assert_eq!(in_range.len(), 4);
assert_eq!(in_range.first().unwrap().kind(), TokenKind::Name);
assert_eq!(in_range.last().unwrap().kind(), TokenKind::Colon);
}
#[test]
fn tokens_in_range_start_offset_at_token_end() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
let in_range = tokens.in_range(TextRange::new(11.into(), 29.into()));
assert_eq!(in_range.len(), 3);
assert_eq!(in_range.first().unwrap().kind(), TokenKind::Comment);
assert_eq!(in_range.last().unwrap().kind(), TokenKind::Indent);
}
#[test]
fn tokens_in_range_end_offset_at_token_start() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
let in_range = tokens.in_range(TextRange::new(8.into(), 15.into()));
assert_eq!(in_range.len(), 3);
assert_eq!(in_range.first().unwrap().kind(), TokenKind::Rpar);
assert_eq!(in_range.last().unwrap().kind(), TokenKind::Newline);
}
#[test]
fn tokens_in_range_start_offset_between_tokens() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
let in_range = tokens.in_range(TextRange::new(13.into(), 29.into()));
assert_eq!(in_range.len(), 3);
assert_eq!(in_range.first().unwrap().kind(), TokenKind::Comment);
assert_eq!(in_range.last().unwrap().kind(), TokenKind::Indent);
}
#[test]
fn tokens_in_range_end_offset_between_tokens() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
let in_range = tokens.in_range(TextRange::new(9.into(), 13.into()));
assert_eq!(in_range.len(), 2);
assert_eq!(in_range.first().unwrap().kind(), TokenKind::Colon);
assert_eq!(in_range.last().unwrap().kind(), TokenKind::Newline);
}
#[test]
#[should_panic(expected = "Offset 5 is inside token `Name 4..7`")]
fn tokens_in_range_start_offset_inside_token() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
tokens.in_range(TextRange::new(5.into(), 10.into()));
}
#[test]
#[should_panic(expected = "Offset 6 is inside token `Name 4..7`")]
fn tokens_in_range_end_offset_inside_token() {
let tokens = new_tokens(TEST_CASE_WITH_GAP.into_iter());
tokens.in_range(TextRange::new(0.into(), 6.into()));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast/src/token/parentheses.rs | crates/ruff_python_ast/src/token/parentheses.rs | use ruff_text_size::{Ranged, TextLen, TextRange};
use super::{TokenKind, Tokens};
use crate::{AnyNodeRef, ExprRef};
/// Returns an iterator over the ranges of the optional parentheses surrounding an expression.
///
/// E.g. for `((f()))` with `f()` as expression, the iterator returns the ranges (1, 6) and (0, 7).
///
/// Note that without a parent the range can be inaccurate, e.g. `f(a)` we falsely return a set of
/// parentheses around `a` even if the parentheses actually belong to `f`. That is why you should
/// generally prefer [`parenthesized_range`].
pub fn parentheses_iterator<'a>(
expr: ExprRef<'a>,
parent: Option<AnyNodeRef>,
tokens: &'a Tokens,
) -> impl Iterator<Item = TextRange> + 'a {
let after_tokens = if let Some(parent) = parent {
// If the parent is a node that brings its own parentheses, exclude the closing parenthesis
// from our search range. Otherwise, we risk matching on calls, like `func(x)`, for which
// the open and close parentheses are part of the `Arguments` node.
let exclusive_parent_end = if parent.is_arguments() {
parent.end() - ")".text_len()
} else {
parent.end()
};
tokens.in_range(TextRange::new(expr.end(), exclusive_parent_end))
} else {
tokens.after(expr.end())
};
let right_parens = after_tokens
.iter()
.filter(|token| !token.kind().is_trivia())
.take_while(move |token| token.kind() == TokenKind::Rpar);
let left_parens = tokens
.before(expr.start())
.iter()
.rev()
.filter(|token| !token.kind().is_trivia())
.take_while(|token| token.kind() == TokenKind::Lpar);
right_parens
.zip(left_parens)
.map(|(right, left)| TextRange::new(left.start(), right.end()))
}
/// Returns the [`TextRange`] of a given expression including parentheses, if the expression is
/// parenthesized; or `None`, if the expression is not parenthesized.
pub fn parenthesized_range(
expr: ExprRef,
parent: AnyNodeRef,
tokens: &Tokens,
) -> Option<TextRange> {
parentheses_iterator(expr, Some(parent), tokens).last()
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_completion_eval/src/main.rs | crates/ty_completion_eval/src/main.rs | /*!
A simple command line tool for running a completion evaluation.
See `crates/ty_completion_eval/README.md` for examples and more docs.
*/
use std::io::Write;
use std::process::ExitCode;
use std::sync::LazyLock;
use anyhow::{Context, anyhow};
use clap::Parser;
use regex::bytes::Regex;
use ruff_db::files::system_path_to_file;
use ruff_db::system::{OsSystem, SystemPath, SystemPathBuf};
use ty_ide::Completion;
use ty_module_resolver::ModuleName;
use ty_project::metadata::Options;
use ty_project::metadata::options::EnvironmentOptions;
use ty_project::metadata::value::RelativePathBuf;
use ty_project::{ProjectDatabase, ProjectMetadata};
#[derive(Debug, clap::Parser)]
#[command(
author,
name = "ty_completion_eval",
about = "Run a information retrieval evaluation on ty-powered completions."
)]
struct Cli {
#[command(subcommand)]
command: Command,
}
#[derive(Debug, clap::Subcommand)]
enum Command {
/// Run an evaluation on all tasks.
All(AllCommand),
/// Show the completions for a single task.
///
/// This is useful for debugging one single completion task. For
/// example, let's say you make a change to a ranking heuristic and
/// everything looks good except for a few tasks where the rank for
/// the expected answer regressed. Just use this command to run a
/// specific task and you'll get the actual completions for that
/// task printed to stdout.
///
/// If the expected answer is found in the completion list, then
/// it is marked with an `*` along with its rank.
ShowOne(ShowOneCommand),
}
#[derive(Debug, clap::Parser)]
struct AllCommand {
/// The mean reciprocal rank threshold that the evaluation must
/// meet or exceed in order for the evaluation to pass.
#[arg(
long,
help = "The mean reciprocal rank threshold.",
value_name = "FLOAT",
default_value_t = 0.001
)]
threshold: f64,
/// If given, a CSV file of the results for each individual task
/// is written to the path given.
#[arg(
long,
help = "When provided, write individual task results in CSV format.",
value_name = "FILE"
)]
tasks: Option<String>,
/// Whether to keep the temporary evaluation directory around
/// after finishing or not. Keeping it around is useful for
/// debugging when something has gone wrong.
#[arg(
long,
help = "Whether to keep the temporary evaluation directory around or not."
)]
keep_tmp_dir: bool,
}
#[derive(Debug, clap::Parser)]
struct ShowOneCommand {
/// The name of one or more completion tasks to run in isolation.
///
/// The name corresponds to the name of a directory in
/// `./crates/ty_completion_eval/truth/`.
#[arg(help = "The task name to run.", value_name = "TASK_NAME")]
task_name: String,
/// The name of the file, relative to the root of the
/// Python project, that contains one or more completion
/// tasks to run in isolation.
#[arg(long, help = "The file name to run.", value_name = "FILE_NAME")]
file_name: Option<String>,
/// The index of the cursor directive within `file_name`
/// to select.
#[arg(
long,
help = "The index of the cursor directive to run.",
value_name = "INDEX"
)]
index: Option<usize>,
/// Whether to keep the temporary evaluation directory around
/// after finishing or not. Keeping it around is useful for
/// debugging when something has gone wrong.
#[arg(
long,
help = "Whether to keep the temporary evaluation directory around or not."
)]
keep_tmp_dir: bool,
}
impl ShowOneCommand {
fn matches_source_task(&self, task_source: &TaskSource) -> bool {
self.task_name == task_source.name
}
fn matches_task(&self, task: &Task) -> bool {
self.task_name == task.name
&& self
.file_name
.as_ref()
.is_none_or(|name| name == task.cursor_name())
&& self.index.is_none_or(|index| index == task.cursor.index)
}
}
fn main() -> anyhow::Result<ExitCode> {
let args = Cli::parse();
// The base path to which all CLI arguments are relative to.
let cwd = {
let cwd = std::env::current_dir().context("Failed to get the current working directory")?;
SystemPathBuf::from_path_buf(cwd).map_err(|path| {
anyhow!(
"The current working directory `{}` contains non-Unicode characters. \
ty only supports Unicode paths.",
path.display()
)
})?
};
// Where we store our truth data.
let truth = cwd.join("crates").join("ty_completion_eval").join("truth");
anyhow::ensure!(
truth.as_std_path().exists(),
"{truth} does not exist: ty's completion evaluation must be run from the root \
of the ruff repository",
truth = truth.as_std_path().display(),
);
// The temporary directory at which we copy our truth
// data to. We do this because we can't use the truth
// data as-is with its `<CURSOR>` annotations (and perhaps
// any other future annotations we add).
let mut tmp_eval_dir = tempfile::Builder::new()
.prefix("ty-completion-eval-")
.tempdir()
.context("Failed to create temporary directory")?;
let tmp_eval_path = SystemPath::from_std_path(tmp_eval_dir.path())
.ok_or_else(|| {
anyhow::anyhow!(
"Temporary directory path is not valid UTF-8: {}",
tmp_eval_dir.path().display()
)
})?
.to_path_buf();
let sources = TaskSource::all(&truth)?;
match args.command {
Command::ShowOne(ref cmd) => {
tmp_eval_dir.disable_cleanup(cmd.keep_tmp_dir);
let Some(source) = sources
.iter()
.find(|source| cmd.matches_source_task(source))
else {
anyhow::bail!("could not find task named `{}`", cmd.task_name);
};
let tasks = source.to_tasks(&tmp_eval_path)?;
let matching: Vec<&Task> = tasks.iter().filter(|task| cmd.matches_task(task)).collect();
anyhow::ensure!(
!matching.is_empty(),
"could not find any tasks matching the given criteria",
);
anyhow::ensure!(
matching.len() < 2,
"found more than one task matching the given criteria",
);
let task = &matching[0];
let completions = task.completions()?;
let mut stdout = std::io::stdout().lock();
for (i, c) in completions.iter().enumerate() {
write!(stdout, "{}", c.name.as_str())?;
if let Some(module_name) = c.module_name {
write!(stdout, " (module: {module_name})")?;
}
if task.cursor.answer.matches(c) {
write!(stdout, " (*, {}/{})", i + 1, completions.len())?;
}
writeln!(stdout)?;
}
writeln!(stdout, "-----")?;
writeln!(stdout, "found {} completions", completions.len())?;
Ok(ExitCode::SUCCESS)
}
Command::All(AllCommand {
threshold,
tasks,
keep_tmp_dir,
}) => {
tmp_eval_dir.disable_cleanup(keep_tmp_dir);
let mut precision_sum = 0.0;
let mut task_count = 0.0f64;
let mut results_wtr = None;
if let Some(ref tasks) = tasks {
let mut wtr = csv::Writer::from_path(SystemPath::new(tasks))?;
wtr.serialize(("name", "file", "index", "rank"))?;
results_wtr = Some(wtr);
}
for source in &sources {
for task in source.to_tasks(&tmp_eval_path)? {
task_count += 1.0;
let completions = task.completions()?;
let rank = task.rank(&completions)?;
precision_sum += rank.map(|rank| 1.0 / f64::from(rank)).unwrap_or(0.0);
if let Some(ref mut wtr) = results_wtr {
wtr.serialize((&task.name, &task.cursor_name(), task.cursor.index, rank))?;
}
}
}
let mrr = precision_sum / task_count;
if let Some(ref mut wtr) = results_wtr {
wtr.flush()?;
}
let mut out = std::io::stdout().lock();
writeln!(out, "mean reciprocal rank: {mrr:.4}")?;
if mrr < threshold {
writeln!(
out,
"Failure: MRR does not exceed minimum threshold of {threshold}"
)?;
Ok(ExitCode::FAILURE)
} else {
writeln!(out, "Success: MRR exceeds minimum threshold of {threshold}")?;
Ok(ExitCode::SUCCESS)
}
}
}
}
/// A single completion task.
///
/// The task is oriented in such a way that we have a single "cursor"
/// position in a Python project. This allows us to ask for completions
/// at that position.
struct Task {
db: ProjectDatabase,
dir: SystemPathBuf,
name: String,
cursor: Cursor,
settings: ty_ide::CompletionSettings,
}
impl Task {
/// Create a new task for the Python project at `project_path`.
///
/// `truth` should correspond to the completion configuration and the
/// expected answer for completions at the given `cursor` position.
fn new(
project_path: &SystemPath,
truth: &CompletionTruth,
cursor: Cursor,
) -> anyhow::Result<Task> {
let name = project_path.file_name().ok_or_else(|| {
anyhow::anyhow!("project directory `{project_path}` does not contain a base name")
})?;
let system = OsSystem::new(project_path);
let mut project_metadata = ProjectMetadata::discover(project_path, &system)?;
// Explicitly point ty to the .venv to avoid any set VIRTUAL_ENV variable to take precedence.
project_metadata.apply_options(Options {
environment: Some(EnvironmentOptions {
python: Some(RelativePathBuf::cli(".venv")),
..EnvironmentOptions::default()
}),
..Options::default()
});
project_metadata.apply_configuration_files(&system)?;
let db = ProjectDatabase::new(project_metadata, system)?;
Ok(Task {
db,
dir: project_path.to_path_buf(),
name: name.to_string(),
cursor,
settings: (&truth.settings).into(),
})
}
/// Returns the rank of the expected answer in the completions
/// given.
///
/// The rank is the position (one indexed) at which the expected
/// answer appears in the slice given, or `None` if the answer
/// isn't found at all. A position of zero is maximally correct. A
/// missing position is maximally wrong. Anything in the middle is
/// a grey area with a lower rank being better.
///
/// Because the rank is one indexed, if this returns a rank, then
/// it is guaranteed to be non-zero.
fn rank(&self, completions: &[Completion<'_>]) -> anyhow::Result<Option<u32>> {
completions
.iter()
.position(|completion| self.cursor.answer.matches(completion))
.map(|rank| u32::try_from(rank + 1).context("rank of completion is too big"))
.transpose()
}
/// Return completions for this task.
fn completions(&self) -> anyhow::Result<Vec<Completion<'_>>> {
let file = system_path_to_file(&self.db, &self.cursor.path)
.with_context(|| format!("failed to get database file for `{}`", self.cursor.path))?;
let offset = ruff_text_size::TextSize::try_from(self.cursor.offset).with_context(|| {
format!(
"failed to convert `<CURSOR>` file offset `{}` to 32-bit integer",
self.cursor.offset
)
})?;
let completions = ty_ide::completion(&self.db, &self.settings, file, offset);
Ok(completions)
}
/// Returns the file name, relative to this project's root
/// directory, that contains the cursor directive that we
/// are evaluating.
fn cursor_name(&self) -> &str {
self.cursor
.path
.strip_prefix(&self.dir)
.expect("task directory is a parent of cursor")
.as_str()
}
}
impl std::fmt::Debug for Task {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("Test")
.field("db", &"<ProjectDatabase>")
.field("dir", &self.dir)
.field("name", &self.name)
.field("cursor", &self.cursor)
.field("settings", &self.settings)
.finish()
}
}
/// Truth data for a single completion evaluation test.
#[derive(Debug, Default, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
struct CompletionTruth {
#[serde(default)]
settings: CompletionSettings,
}
/// Settings to forward to our completion routine.
#[derive(Debug, Default, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
struct CompletionSettings {
#[serde(default)]
auto_import: bool,
}
impl From<&CompletionSettings> for ty_ide::CompletionSettings {
fn from(x: &CompletionSettings) -> ty_ide::CompletionSettings {
ty_ide::CompletionSettings {
auto_import: x.auto_import,
}
}
}
/// The "source" of a task, as found in ty's git repository.
#[derive(Debug)]
struct TaskSource {
/// The directory containing this task.
dir: SystemPathBuf,
/// The name of this task (the basename of `dir`).
name: String,
/// The "truth" data for this task along with any
/// settings. This is pulled from `{dir}/completion.toml`.
truth: CompletionTruth,
}
impl TaskSource {
fn all(src_dir: &SystemPath) -> anyhow::Result<Vec<TaskSource>> {
let mut sources = vec![];
let read_dir = src_dir
.as_std_path()
.read_dir()
.with_context(|| format!("failed to read directory entries in `{src_dir}`"))?;
for result in read_dir {
let dent = result
.with_context(|| format!("failed to get directory entry from `{src_dir}`"))?;
let path = dent.path();
if !path.is_dir() {
continue;
}
let dir = SystemPath::from_std_path(&path).ok_or_else(|| {
anyhow::anyhow!(
"truth source directory `{path}` contains invalid UTF-8",
path = path.display()
)
})?;
sources.push(TaskSource::new(dir)?);
}
// Sort our sources so that we always run in the same order.
// And also so that the CSV output is deterministic across
// all platforms.
sources.sort_by(|source1, source2| source1.name.cmp(&source2.name));
Ok(sources)
}
fn new(dir: &SystemPath) -> anyhow::Result<TaskSource> {
let name = dir.file_name().ok_or_else(|| {
anyhow::anyhow!("truth source directory `{dir}` does not contain a base name")
})?;
let truth_path = dir.join("completion.toml");
let truth_data = std::fs::read(truth_path.as_std_path())
.with_context(|| format!("failed to read truth data at `{truth_path}`"))?;
let truth = toml::from_slice(&truth_data).with_context(|| {
format!("failed to parse TOML completion truth data from `{truth_path}`")
})?;
Ok(TaskSource {
dir: dir.to_path_buf(),
name: name.to_string(),
truth,
})
}
/// Convert this "source" task (from the Ruff repository) into
/// one or more evaluation tasks within a single Python project.
/// Exactly one task is created for each cursor directive found in
/// this source task.
///
/// This includes running `uv sync` to set up a full virtual
/// environment.
fn to_tasks(&self, parent_dst_dir: &SystemPath) -> anyhow::Result<Vec<Task>> {
let dir = parent_dst_dir.join(&self.name);
let cursors = copy_project(&self.dir, &dir)?;
let uv_sync_output = std::process::Command::new("uv")
.arg("sync")
.current_dir(dir.as_std_path())
.output()
.with_context(|| format!("failed to run `uv sync` in `{dir}`"))?;
if !uv_sync_output.status.success() {
let code = uv_sync_output
.status
.code()
.map(|code| code.to_string())
.unwrap_or_else(|| "UNKNOWN".to_string());
let stderr = bstr::BStr::new(&uv_sync_output.stderr);
anyhow::bail!("`uv sync` failed to run with exit code `{code}`, stderr: {stderr}")
}
cursors
.into_iter()
.map(|cursor| Task::new(&dir, &self.truth, cursor))
.collect()
}
}
/// A single cursor directive within a single Python project.
///
/// Each cursor directive looks like:
/// `<CURSOR [expected-module.]expected-symbol>`.
///
/// That is, each cursor directive corresponds to a single completion
/// request, and each request is a single evaluation task.
#[derive(Clone, Debug)]
struct Cursor {
/// The path to the file containing this directive.
path: SystemPathBuf,
/// The index (starting at 0) of this cursor directive
/// within `path`.
index: usize,
/// The byte offset at which this cursor was located
/// within `path`.
offset: usize,
/// The expected symbol (and optionally module) for this
/// completion request.
answer: CompletionAnswer,
}
/// The answer for a single completion request.
#[derive(Clone, Debug, Default, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
struct CompletionAnswer {
symbol: String,
module: Option<String>,
}
impl CompletionAnswer {
/// Returns true when this answer matches the completion given.
fn matches(&self, completion: &Completion) -> bool {
if let Some(ref qualified) = completion.qualified {
if qualified.as_str() == self.qualified() {
return true;
}
}
self.symbol == completion.name.as_str()
&& self.module.as_deref() == completion.module_name.map(ModuleName::as_str)
}
fn qualified(&self) -> String {
self.module
.as_ref()
.map(|module| format!("{module}.{}", self.symbol))
.unwrap_or_else(|| self.symbol.clone())
}
}
/// Copy the Python project from `src_dir` to `dst_dir`.
///
/// This also looks for occurrences of cursor directives among the
/// project files and returns them. The original cursor directives are
/// deleted.
///
/// Hidden files or directories are skipped.
///
/// # Errors
///
/// Any underlying I/O errors are bubbled up. Also, if no cursor
/// directives are found, then an error is returned. This guarantees
/// that the `Vec<Cursor>` is always non-empty.
fn copy_project(src_dir: &SystemPath, dst_dir: &SystemPath) -> anyhow::Result<Vec<Cursor>> {
std::fs::create_dir_all(dst_dir).with_context(|| dst_dir.to_string())?;
let mut cursors = vec![];
for result in walkdir::WalkDir::new(src_dir.as_std_path()) {
let dent =
result.with_context(|| format!("failed to get directory entry from {src_dir}"))?;
if dent
.file_name()
.to_str()
.is_some_and(|name| name.starts_with('.'))
{
continue;
}
let src = SystemPath::from_std_path(dent.path()).ok_or_else(|| {
anyhow::anyhow!("path `{}` is not valid UTF-8", dent.path().display())
})?;
let name = src
.strip_prefix(src_dir)
.expect("descendent of `src_dir` must start with `src`");
// let name = src
// .file_name()
// .ok_or_else(|| anyhow::anyhow!("path `{src}` is missing a basename"))?;
let dst = dst_dir.join(name);
if dent.file_type().is_dir() {
std::fs::create_dir_all(dst.as_std_path())
.with_context(|| format!("failed to create directory `{dst}`"))?;
} else {
cursors.extend(copy_file(src, &dst)?);
}
}
anyhow::ensure!(
!cursors.is_empty(),
"could not find any `<CURSOR>` directives in any of the files in `{src_dir}`",
);
Ok(cursors)
}
/// Copies `src` to `dst` while looking for cursor directives.
///
/// Each cursor directive looks like:
/// `<CURSOR [expected-module.]expected-symbol>`.
///
/// When occurrences of cursor directives are found, then they are
/// replaced with the empty string. The position of each occurrence is
/// recorded, which points to the correct place in a document where all
/// cursor directives are omitted.
///
/// # Errors
///
/// When an underlying I/O error occurs.
fn copy_file(src: &SystemPath, dst: &SystemPath) -> anyhow::Result<Vec<Cursor>> {
static RE: LazyLock<Regex> = LazyLock::new(|| {
// Our module/symbol identifier regex here is certainly more
// permissive than necessary, but I think that should be fine
// for this silly little syntax. ---AG
Regex::new(r"<CURSOR:\s*(?:(?<module>[\S--.]+)\.)?(?<symbol>[\S--.]+)>").unwrap()
});
let src_data =
std::fs::read(src).with_context(|| format!("failed to read `{src}` for copying"))?;
let mut cursors = vec![];
// The new data, without cursor directives.
let mut new = Vec::with_capacity(src_data.len());
// An index into `src_data` corresponding to either the start of
// the data or the end of the previous cursor directive that we
// found.
let mut prev_match_end = 0;
// The total bytes removed so far by replacing cursor directives
// with empty strings.
let mut bytes_removed = 0;
for (index, caps) in RE.captures_iter(&src_data).enumerate() {
let overall = caps.get(0).expect("zeroth group is always available");
new.extend_from_slice(&src_data[prev_match_end..overall.start()]);
prev_match_end = overall.end();
let offset = overall.start() - bytes_removed;
bytes_removed += overall.len();
let symbol = str::from_utf8(&caps["symbol"])
.context("expected symbol in cursor directive in `{src}` is not valid UTF-8")?
.to_string();
let module = caps
.name("module")
.map(|module| {
str::from_utf8(module.as_bytes())
.context("expected module in cursor directive in `{src}` is not valid UTF-8")
})
.transpose()?
.map(ToString::to_string);
let answer = CompletionAnswer { symbol, module };
cursors.push(Cursor {
path: dst.to_path_buf(),
index,
offset,
answer,
});
}
new.extend_from_slice(&src_data[prev_match_end..]);
std::fs::write(dst, &new)
.with_context(|| format!("failed to write contents of `{src}` to `{dst}`"))?;
Ok(cursors)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_options_metadata/src/lib.rs | crates/ruff_options_metadata/src/lib.rs | use std::fmt::{Debug, Display, Formatter};
/// Visits [`OptionsMetadata`].
///
/// An instance of [`Visit`] represents the logic for inspecting an object's options metadata.
pub trait Visit {
/// Visits an [`OptionField`] value named `name`.
fn record_field(&mut self, name: &str, field: OptionField);
/// Visits an [`OptionSet`] value named `name`.
fn record_set(&mut self, name: &str, group: OptionSet);
}
/// Returns metadata for its options.
pub trait OptionsMetadata {
/// Visits the options metadata of this object by calling `visit` for each option.
fn record(visit: &mut dyn Visit);
fn documentation() -> Option<&'static str> {
None
}
/// Returns the extracted metadata.
fn metadata() -> OptionSet
where
Self: Sized + 'static,
{
OptionSet::of::<Self>()
}
}
impl<T> OptionsMetadata for Option<T>
where
T: OptionsMetadata,
{
fn record(visit: &mut dyn Visit) {
T::record(visit);
}
}
/// Metadata of an option that can either be a [`OptionField`] or [`OptionSet`].
#[derive(Clone, Debug)]
#[cfg_attr(feature = "serde", derive(::serde::Serialize), serde(untagged))]
pub enum OptionEntry {
/// A single option.
Field(OptionField),
/// A set of options.
Set(OptionSet),
}
impl OptionEntry {
pub fn into_field(self) -> Option<OptionField> {
match self {
OptionEntry::Field(field) => Some(field),
OptionEntry::Set(_) => None,
}
}
}
impl Display for OptionEntry {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
OptionEntry::Set(set) => std::fmt::Display::fmt(set, f),
OptionEntry::Field(field) => std::fmt::Display::fmt(&field, f),
}
}
}
/// A set of options.
///
/// It extracts the options by calling the [`OptionsMetadata::record`] of a type implementing
/// [`OptionsMetadata`].
#[derive(Copy, Clone)]
pub struct OptionSet {
record: fn(&mut dyn Visit),
doc: fn() -> Option<&'static str>,
}
impl OptionSet {
pub fn of<T>() -> Self
where
T: OptionsMetadata + 'static,
{
Self {
record: T::record,
doc: T::documentation,
}
}
/// Visits the options in this set by calling `visit` for each option.
pub fn record(&self, visit: &mut dyn Visit) {
let record = self.record;
record(visit);
}
pub fn documentation(&self) -> Option<&'static str> {
let documentation = self.doc;
documentation()
}
/// Returns `true` if this set has an option that resolves to `name`.
///
/// The name can be separated by `.` to find a nested option.
///
/// ## Examples
///
/// ### Test for the existence of a child option
///
/// ```rust
/// # use ruff_options_metadata::{OptionField, OptionsMetadata, Visit};
///
/// struct WithOptions;
///
/// impl OptionsMetadata for WithOptions {
/// fn record(visit: &mut dyn Visit) {
/// visit.record_field("ignore-git-ignore", OptionField {
/// doc: "Whether Ruff should respect the gitignore file",
/// default: "false",
/// value_type: "bool",
/// example: "",
/// scope: None,
/// deprecated: None,
/// });
/// }
/// }
///
/// assert!(WithOptions::metadata().has("ignore-git-ignore"));
/// assert!(!WithOptions::metadata().has("does-not-exist"));
/// ```
/// ### Test for the existence of a nested option
///
/// ```rust
/// # use ruff_options_metadata::{OptionField, OptionsMetadata, Visit};
///
/// struct Root;
///
/// impl OptionsMetadata for Root {
/// fn record(visit: &mut dyn Visit) {
/// visit.record_field("ignore-git-ignore", OptionField {
/// doc: "Whether Ruff should respect the gitignore file",
/// default: "false",
/// value_type: "bool",
/// example: "",
/// scope: None,
/// deprecated: None
/// });
///
/// visit.record_set("format", Nested::metadata());
/// }
/// }
///
/// struct Nested;
///
/// impl OptionsMetadata for Nested {
/// fn record(visit: &mut dyn Visit) {
/// visit.record_field("hard-tabs", OptionField {
/// doc: "Use hard tabs for indentation and spaces for alignment.",
/// default: "false",
/// value_type: "bool",
/// example: "",
/// scope: None,
/// deprecated: None
/// });
/// }
/// }
///
/// assert!(Root::metadata().has("format.hard-tabs"));
/// assert!(!Root::metadata().has("format.spaces"));
/// assert!(!Root::metadata().has("lint.hard-tabs"));
/// ```
pub fn has(&self, name: &str) -> bool {
self.find(name).is_some()
}
/// Returns `Some` if this set has an option that resolves to `name` and `None` otherwise.
///
/// The name can be separated by `.` to find a nested option.
///
/// ## Examples
///
/// ### Find a child option
///
/// ```rust
/// # use ruff_options_metadata::{OptionEntry, OptionField, OptionsMetadata, Visit};
///
/// struct WithOptions;
///
/// static IGNORE_GIT_IGNORE: OptionField = OptionField {
/// doc: "Whether Ruff should respect the gitignore file",
/// default: "false",
/// value_type: "bool",
/// example: "",
/// scope: None,
/// deprecated: None
/// };
///
/// impl OptionsMetadata for WithOptions {
/// fn record(visit: &mut dyn Visit) {
/// visit.record_field("ignore-git-ignore", IGNORE_GIT_IGNORE.clone());
/// }
/// }
///
/// assert_eq!(WithOptions::metadata().find("ignore-git-ignore").and_then(OptionEntry::into_field), Some(IGNORE_GIT_IGNORE.clone()));
/// assert!(WithOptions::metadata().find("does-not-exist").is_none());
/// ```
/// ### Find a nested option
///
/// ```rust
/// # use ruff_options_metadata::{OptionEntry, OptionField, OptionsMetadata, Visit};
///
/// static HARD_TABS: OptionField = OptionField {
/// doc: "Use hard tabs for indentation and spaces for alignment.",
/// default: "false",
/// value_type: "bool",
/// example: "",
/// scope: None,
/// deprecated: None
/// };
///
/// struct Root;
///
/// impl OptionsMetadata for Root {
/// fn record(visit: &mut dyn Visit) {
/// visit.record_field("ignore-git-ignore", OptionField {
/// doc: "Whether Ruff should respect the gitignore file",
/// default: "false",
/// value_type: "bool",
/// example: "",
/// scope: None,
/// deprecated: None
/// });
///
/// visit.record_set("format", Nested::metadata());
/// }
/// }
///
/// struct Nested;
///
/// impl OptionsMetadata for Nested {
/// fn record(visit: &mut dyn Visit) {
/// visit.record_field("hard-tabs", HARD_TABS.clone());
/// }
/// }
///
/// assert_eq!(Root::metadata().find("format.hard-tabs").and_then(OptionEntry::into_field), Some(HARD_TABS.clone()));
/// assert!(matches!(Root::metadata().find("format"), Some(OptionEntry::Set(_))));
/// assert!(Root::metadata().find("format.spaces").is_none());
/// assert!(Root::metadata().find("lint.hard-tabs").is_none());
/// ```
pub fn find(&self, name: &str) -> Option<OptionEntry> {
struct FindOptionVisitor<'a> {
option: Option<OptionEntry>,
parts: std::str::Split<'a, char>,
needle: &'a str,
}
impl Visit for FindOptionVisitor<'_> {
fn record_set(&mut self, name: &str, set: OptionSet) {
if self.option.is_none() && name == self.needle {
if let Some(next) = self.parts.next() {
self.needle = next;
set.record(self);
} else {
self.option = Some(OptionEntry::Set(set));
}
}
}
fn record_field(&mut self, name: &str, field: OptionField) {
if self.option.is_none() && name == self.needle {
if self.parts.next().is_none() {
self.option = Some(OptionEntry::Field(field));
}
}
}
}
let mut parts = name.split('.');
if let Some(first) = parts.next() {
let mut visitor = FindOptionVisitor {
parts,
needle: first,
option: None,
};
self.record(&mut visitor);
visitor.option
} else {
None
}
}
pub fn collect_fields(&self) -> Vec<(String, OptionField)> {
struct FieldsCollector(Vec<(String, OptionField)>);
impl Visit for FieldsCollector {
fn record_field(&mut self, name: &str, field: OptionField) {
self.0.push((name.to_string(), field));
}
fn record_set(&mut self, _name: &str, _group: OptionSet) {}
}
let mut visitor = FieldsCollector(vec![]);
self.record(&mut visitor);
visitor.0
}
}
/// Visitor that writes out the names of all fields and sets.
struct DisplayVisitor<'fmt, 'buf> {
f: &'fmt mut Formatter<'buf>,
result: std::fmt::Result,
}
impl<'fmt, 'buf> DisplayVisitor<'fmt, 'buf> {
fn new(f: &'fmt mut Formatter<'buf>) -> Self {
Self { f, result: Ok(()) }
}
fn finish(self) -> std::fmt::Result {
self.result
}
}
impl Visit for DisplayVisitor<'_, '_> {
fn record_set(&mut self, name: &str, _: OptionSet) {
self.result = self.result.and_then(|()| writeln!(self.f, "{name}"));
}
fn record_field(&mut self, name: &str, field: OptionField) {
self.result = self.result.and_then(|()| {
write!(self.f, "{name}")?;
if field.deprecated.is_some() {
write!(self.f, " (deprecated)")?;
}
writeln!(self.f)
});
}
}
impl Display for OptionSet {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let mut visitor = DisplayVisitor::new(f);
self.record(&mut visitor);
visitor.finish()
}
}
impl Debug for OptionSet {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
Display::fmt(self, f)
}
}
#[derive(Debug, Eq, PartialEq, Clone)]
#[cfg_attr(feature = "serde", derive(::serde::Serialize))]
pub struct OptionField {
pub doc: &'static str,
/// Ex) `"false"`
pub default: &'static str,
/// Ex) `"bool"`
pub value_type: &'static str,
/// Ex) `"per-file-ignores"`
pub scope: Option<&'static str>,
pub example: &'static str,
pub deprecated: Option<Deprecated>,
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(::serde::Serialize))]
pub struct Deprecated {
pub since: Option<&'static str>,
pub message: Option<&'static str>,
}
impl Display for OptionField {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
writeln!(f, "{}", self.doc)?;
writeln!(f)?;
writeln!(f, "Default value: {}", self.default)?;
writeln!(f, "Type: {}", self.value_type)?;
if let Some(deprecated) = &self.deprecated {
write!(f, "Deprecated")?;
if let Some(since) = deprecated.since {
write!(f, " (since {since})")?;
}
if let Some(message) = deprecated.message {
write!(f, ": {message}")?;
}
writeln!(f)?;
}
writeln!(f, "Example usage:\n```toml\n{}\n```", self.example)
}
}
#[cfg(feature = "serde")]
mod serde {
use super::{OptionField, OptionSet, Visit};
use serde::{Serialize, Serializer};
use std::collections::BTreeMap;
impl Serialize for OptionSet {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut entries = BTreeMap::new();
let mut visitor = SerializeVisitor {
entries: &mut entries,
};
self.record(&mut visitor);
entries.serialize(serializer)
}
}
struct SerializeVisitor<'a> {
entries: &'a mut BTreeMap<String, OptionField>,
}
impl Visit for SerializeVisitor<'_> {
fn record_set(&mut self, name: &str, set: OptionSet) {
// Collect the entries of the set.
let mut entries = BTreeMap::new();
let mut visitor = SerializeVisitor {
entries: &mut entries,
};
set.record(&mut visitor);
// Insert the set into the entries.
for (key, value) in entries {
self.entries.insert(format!("{name}.{key}"), value);
}
}
fn record_field(&mut self, name: &str, field: OptionField) {
self.entries.insert(name.to_string(), field);
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
GraphiteEditor/Graphite | https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/tools/crate-hierarchy-viz/src/main.rs | tools/crate-hierarchy-viz/src/main.rs | use anyhow::{Context, Result, anyhow};
use clap::{Parser, ValueEnum};
use serde::Deserialize;
use std::collections::{HashMap, HashSet};
use std::fs;
use std::path::PathBuf;
use std::process::Command;
#[derive(Debug, Clone, ValueEnum)]
enum OutputFormat {
/// Output DOT format (GraphViz)
Dot,
/// Output PNG image (requires GraphViz)
Png,
/// Output SVG image (requires GraphViz)
Svg,
}
#[derive(Parser)]
#[command(name = "crate-hierarchy-viz")]
#[command(about = "Visualize the crate hierarchy in the Graphite workspace")]
struct Args {
/// Workspace root directory (defaults to current directory)
#[arg(short, long)]
workspace: Option<PathBuf>,
/// Output file (defaults to stdout for DOT format, required for PNG/SVG)
#[arg(short, long)]
output: Option<PathBuf>,
/// Output format
#[arg(short, long, value_enum, default_value = "dot")]
format: OutputFormat,
}
#[derive(Debug, Deserialize)]
struct WorkspaceToml {
workspace: WorkspaceConfig,
}
#[derive(Debug, Deserialize)]
struct WorkspaceConfig {
members: Vec<String>,
dependencies: Option<HashMap<String, WorkspaceDependency>>,
}
/// Represents a workspace-level dependency in Cargo.toml
/// The Simple variant's String is needed for serde deserialization but never read directly
#[derive(Debug, Deserialize)]
#[serde(untagged)]
#[allow(dead_code)]
enum WorkspaceDependency {
Simple(String),
Detailed {
#[serde(flatten)]
_other: HashMap<String, toml::Value>,
},
}
#[derive(Debug, Deserialize)]
struct CrateToml {
package: PackageConfig,
dependencies: Option<HashMap<String, CrateDependency>>,
}
#[derive(Debug, Deserialize)]
struct PackageConfig {
name: String,
}
/// Represents a crate-level dependency in Cargo.toml
/// The Simple variant's String is needed for serde deserialization but never read directly
#[derive(Debug, Deserialize)]
#[serde(untagged)]
#[allow(dead_code)]
enum CrateDependency {
Simple(String),
Detailed {
path: Option<String>,
workspace: Option<bool>,
#[serde(flatten)]
other: HashMap<String, toml::Value>,
},
}
#[derive(Debug, Clone, PartialEq)]
struct CrateInfo {
name: String,
path: PathBuf,
dependencies: Vec<String>,
external_dependencies: Vec<String>,
}
/// Remove transitive dependencies from the crate list.
/// If A depends on B and C, and B depends on C, then A->C is removed.
fn remove_transitive_dependencies(crates: &mut [CrateInfo]) {
// Build a map from crate name to its dependencies for quick lookup
let dep_map: HashMap<String, HashSet<String>> = crates.iter().map(|c| (c.name.clone(), c.dependencies.iter().cloned().collect())).collect();
// For each crate, compute which dependencies are reachable through other dependencies
for crate_info in crates.iter_mut() {
let mut transitive_deps = HashSet::new();
// For each direct dependency, find all its transitive dependencies
for direct_dep in &crate_info.dependencies {
// Recursively collect all transitive dependencies of this direct dependency
let mut visited = HashSet::new();
collect_all_dependencies(direct_dep, &dep_map, &mut visited);
// Remove the direct dependency itself from the visited set
visited.remove(direct_dep);
transitive_deps.extend(visited);
}
// Remove dependencies that are transitive
crate_info.dependencies.retain(|dep| !transitive_deps.contains(dep));
}
}
/// Recursively collect all dependencies of a crate
fn collect_all_dependencies(crate_name: &str, dep_map: &HashMap<String, HashSet<String>>, visited: &mut HashSet<String>) {
if !visited.insert(crate_name.to_string()) {
return; // Already visited, avoid cycles
}
if let Some(deps) = dep_map.get(crate_name) {
for dep in deps {
collect_all_dependencies(dep, dep_map, visited);
}
}
}
fn main() -> Result<()> {
let args = Args::parse();
let workspace_root = args.workspace.unwrap_or_else(|| std::env::current_dir().unwrap());
let workspace_toml_path = workspace_root.join("Cargo.toml");
// Parse workspace Cargo.toml
let workspace_content = fs::read_to_string(&workspace_toml_path).with_context(|| format!("Failed to read {:?}", workspace_toml_path))?;
let workspace_toml: WorkspaceToml = toml::from_str(&workspace_content).with_context(|| "Failed to parse workspace Cargo.toml")?;
// Get workspace dependencies (external crates defined at workspace level)
let workspace_deps: HashSet<String> = workspace_toml.workspace.dependencies.unwrap_or_default().keys().cloned().collect();
// Parse each member crate and build name mapping
let mut crates = Vec::new();
let mut workspace_crate_names = HashSet::new();
// First pass: collect all workspace crate names
for member in &workspace_toml.workspace.members {
let crate_path = workspace_root.join(member);
let cargo_toml_path = crate_path.join("Cargo.toml");
if !cargo_toml_path.exists() {
eprintln!("Warning: Cargo.toml not found for member: {}", member);
continue;
}
let crate_content = fs::read_to_string(&cargo_toml_path).with_context(|| format!("Failed to read {:?}", cargo_toml_path))?;
let crate_toml: CrateToml = toml::from_str(&crate_content).with_context(|| format!("Failed to parse Cargo.toml for {}", member))?;
workspace_crate_names.insert(crate_toml.package.name.clone());
}
// Second pass: parse dependencies now that we know all workspace crate names
for member in &workspace_toml.workspace.members {
let crate_path = workspace_root.join(member);
let cargo_toml_path = crate_path.join("Cargo.toml");
if !cargo_toml_path.exists() {
continue;
}
let crate_content = fs::read_to_string(&cargo_toml_path).with_context(|| format!("Failed to read {:?}", cargo_toml_path))?;
let crate_toml: CrateToml = toml::from_str(&crate_content).with_context(|| format!("Failed to parse Cargo.toml for {}", member))?;
let mut dependencies = Vec::new();
let mut external_dependencies = Vec::new();
if let Some(deps) = &crate_toml.dependencies {
for (dep_name, dep_config) in deps {
let is_workspace_crate = workspace_crate_names.contains(dep_name);
let is_workspace_dep = workspace_deps.contains(dep_name);
let is_local_dep = match dep_config {
CrateDependency::Detailed { workspace: Some(true), .. } => is_workspace_dep,
CrateDependency::Detailed { path: Some(_), .. } => true,
CrateDependency::Simple(_) => is_workspace_dep,
_ => false,
};
// Check if this dependency has a different package name
let actual_dep_name = match dep_config {
CrateDependency::Detailed { other, .. } => {
// Check if there's a "package" field that renames the dependency
if let Some(toml::Value::String(package_name)) = other.get("package") {
package_name.clone()
} else {
dep_name.clone()
}
}
_ => dep_name.clone(),
};
let is_actual_workspace_crate = workspace_crate_names.contains(&actual_dep_name);
if is_workspace_crate || is_actual_workspace_crate || is_local_dep {
dependencies.push(actual_dep_name);
} else {
external_dependencies.push(actual_dep_name);
}
}
}
crates.push(CrateInfo {
name: crate_toml.package.name.clone(),
path: crate_path,
dependencies,
external_dependencies,
});
}
// Filter dependencies to only include workspace crates
for crate_info in &mut crates {
crate_info.dependencies.retain(|dep| workspace_crate_names.contains(dep));
}
// Remove transitive dependencies
remove_transitive_dependencies(&mut crates);
// Generate DOT format
let dot_content = generate_dot_format(&crates)?;
// Handle output based on format
match args.format {
OutputFormat::Dot => {
// Write DOT output
if let Some(output_path) = args.output {
fs::write(&output_path, &dot_content).with_context(|| format!("Failed to write to {:?}", output_path))?;
println!("DOT output written to: {:?}", output_path);
} else {
print!("{}", dot_content);
}
}
OutputFormat::Png | OutputFormat::Svg => {
// Require output file for PNG/SVG
let output_path = args.output.ok_or_else(|| anyhow!("Output file (-o/--output) is required for PNG/SVG formats"))?;
// Check if dot command is available
let dot_check = Command::new("dot").arg("-V").output();
if dot_check.is_err() || !dot_check.as_ref().unwrap().status.success() {
return Err(anyhow!(
"GraphViz 'dot' command not found. Please install GraphViz to generate PNG/SVG output.\n\
On Ubuntu/Debian: sudo apt-get install graphviz\n\
On macOS: brew install graphviz\n\
On Windows: Download from https://graphviz.org/download/"
));
}
// Determine the format argument for dot
let format_arg = match args.format {
OutputFormat::Png => "png",
OutputFormat::Svg => "svg",
_ => unreachable!(),
};
// Run dot command to generate the output
let mut dot_process = Command::new("dot")
.arg(format!("-T{}", format_arg))
.arg("-o")
.arg(&output_path)
.stdin(std::process::Stdio::piped())
.spawn()
.with_context(|| "Failed to spawn 'dot' command")?;
// Write DOT content to stdin
use std::io::Write;
if let Some(mut stdin) = dot_process.stdin.take() {
stdin.write_all(dot_content.as_bytes()).with_context(|| "Failed to write DOT content to 'dot' command")?;
// Close stdin to signal EOF
drop(stdin);
}
// Wait for the command to complete
let status = dot_process.wait().with_context(|| "Failed to wait for 'dot' command")?;
if !status.success() {
return Err(anyhow!("'dot' command failed with exit code: {:?}", status.code()));
}
println!("{} output written to: {:?}", format_arg.to_uppercase(), output_path);
}
}
Ok(())
}
fn generate_dot_format(crates: &[CrateInfo]) -> Result<String> {
let mut output = String::new();
output.push_str("digraph CrateHierarchy {\n");
output.push_str(" rankdir=LR;\n");
output.push_str(" node [shape=box, style=\"rounded,filled\", fillcolor=lightblue];\n");
output.push_str(" edge [color=gray];\n\n");
// Add subgraphs for different categories
output.push_str(" subgraph cluster_core {\n");
output.push_str(" label=\"Core Components\";\n");
output.push_str(" style=filled;\n");
output.push_str(" fillcolor=lightgray;\n");
let core_crates: Vec<_> = crates
.iter()
.filter(|c| (c.name.starts_with("graphite-") || c.name == "editor" || c.name == "graphene-cli") && !c.name.contains("desktop"))
.collect();
for crate_info in &core_crates {
output.push_str(&format!(" \"{}\";\n", crate_info.name));
}
output.push_str(" }\n\n");
output.push_str(" subgraph cluster_nodegraph {\n");
output.push_str(" label=\"Node Graph System\";\n");
output.push_str(" style=filled;\n");
output.push_str(" fillcolor=lightyellow;\n");
let nodegraph_crates: Vec<_> = crates
.iter()
.filter(|c| c.name == "graph-craft" || c.name == "interpreted-executor" || c.name == "node-macro" || c.name == "preprocessor" || c.name == "graphene-cli")
.collect();
for crate_info in &nodegraph_crates {
output.push_str(&format!(" \"{}\";\n", crate_info.name));
}
output.push_str(" }\n\n");
output.push_str(" subgraph cluster_node_libraries {\n");
output.push_str(" label=\"Node Graph Libraries\";\n");
output.push_str(" style=filled;\n");
output.push_str(" fillcolor=lightcyan;\n");
let node_library_crates: Vec<_> = crates
.iter()
.filter(|c| {
let path_str = c.path.to_string_lossy();
path_str.contains("node-graph/libraries")
})
.collect();
for crate_info in &node_library_crates {
output.push_str(&format!(" \"{}\";\n", crate_info.name));
}
output.push_str(" }\n\n");
output.push_str(" subgraph cluster_nodes {\n");
output.push_str(" label=\"Nodes\";\n");
output.push_str(" style=filled;\n");
output.push_str(" fillcolor=lightblue;\n");
let node_crates: Vec<_> = crates
.iter()
.filter(|c| {
let path_str = c.path.to_string_lossy();
path_str.contains("node-graph/nodes")
})
.collect();
for crate_info in &node_crates {
output.push_str(&format!(" \"{}\";\n", crate_info.name));
}
output.push_str(" }\n\n");
output.push_str(" subgraph cluster_desktop{\n");
output.push_str(" label=\"Desktop\";\n");
output.push_str(" style=filled;\n");
output.push_str(" fillcolor=lightgreen;\n");
let desktop_crates: Vec<_> = crates
.iter()
.filter(|c| {
let path_str = c.path.to_string_lossy();
path_str.contains("desktop")
})
.collect();
for crate_info in &desktop_crates {
output.push_str(&format!(" \"{}\";\n", crate_info.name));
}
output.push_str(" }\n\n");
// Add dependencies as edges
for crate_info in crates {
for dep in &crate_info.dependencies {
if dep == "dyn-any" || dep == "node-macro" {
continue;
}
output.push_str(&format!(" \"{}\" -> \"{}\";\n", crate_info.name, dep));
}
}
output.push_str("}\n");
Ok(output)
}
| rust | Apache-2.0 | 42440c0d0bcf5735b05d8a9e5bd27187f74b1589 | 2026-01-04T15:38:29.103662Z | false |
GraphiteEditor/Graphite | https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/editor/build.rs | editor/build.rs | use std::env;
use std::process::Command;
const GRAPHITE_RELEASE_SERIES: &str = "Alpha 4";
fn main() {
// Instruct Cargo to rerun this build script if any of these environment variables change.
println!("cargo:rerun-if-env-changed=GRAPHITE_GIT_COMMIT_DATE");
println!("cargo:rerun-if-env-changed=GRAPHITE_GIT_COMMIT_HASH");
println!("cargo:rerun-if-env-changed=GRAPHITE_GIT_COMMIT_BRANCH");
println!("cargo:rerun-if-env-changed=GITHUB_HEAD_REF");
// Try to get the commit information from the environment (e.g. set by CI), otherwise fall back to Git commands.
let commit_date = env_or_else("GRAPHITE_GIT_COMMIT_DATE", || git_or_unknown(&["log", "-1", "--format=%cI"]));
let commit_hash = env_or_else("GRAPHITE_GIT_COMMIT_HASH", || git_or_unknown(&["rev-parse", "HEAD"]));
let commit_branch = env_or_else("GRAPHITE_GIT_COMMIT_BRANCH", || {
let gh = env::var("GITHUB_HEAD_REF").unwrap_or_default();
if !gh.trim().is_empty() {
gh.trim().to_string()
} else {
git_or_unknown(&["rev-parse", "--abbrev-ref", "HEAD"])
}
});
// Instruct Cargo to set environment variables for compile time.
// They are accessed with the `env!("GRAPHITE_*")` macro in the codebase.
println!("cargo:rustc-env=GRAPHITE_GIT_COMMIT_DATE={commit_date}");
println!("cargo:rustc-env=GRAPHITE_GIT_COMMIT_HASH={commit_hash}");
println!("cargo:rustc-env=GRAPHITE_GIT_COMMIT_BRANCH={commit_branch}");
println!("cargo:rustc-env=GRAPHITE_RELEASE_SERIES={GRAPHITE_RELEASE_SERIES}");
}
/// Get an environment variable, or if it is not set or empty, use the provided fallback function. Returns a string with trimmed whitespace.
fn env_or_else(key: &str, fallback: impl FnOnce() -> String) -> String {
match env::var(key) {
Ok(v) if !v.trim().is_empty() => v.trim().to_string(),
_ => fallback().trim().to_string(),
}
}
/// Execute a Git command to obtain its output. Return "unknown" if it fails for any of the possible reasons.
fn git_or_unknown(args: &[&str]) -> String {
git(args).unwrap_or_else(|| "unknown".to_string())
}
/// Run a git command and capture trimmed stdout.
/// Returns None if git is missing, exits with error, or stdout is empty/non-UTF8.
fn git(args: &[&str]) -> Option<String> {
let output = Command::new("git").args(args).output().ok()?;
if !output.status.success() {
return None;
}
let s = String::from_utf8(output.stdout).ok()?;
let t = s.trim();
if t.is_empty() { None } else { Some(t.to_string()) }
}
| rust | Apache-2.0 | 42440c0d0bcf5735b05d8a9e5bd27187f74b1589 | 2026-01-04T15:38:29.103662Z | false |
GraphiteEditor/Graphite | https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/editor/src/consts.rs | editor/src/consts.rs | // GRAPH
pub const GRID_SIZE: u32 = 24;
pub const EXPORTS_TO_TOP_EDGE_PIXEL_GAP: u32 = 72;
pub const EXPORTS_TO_RIGHT_EDGE_PIXEL_GAP: u32 = 120;
pub const IMPORTS_TO_TOP_EDGE_PIXEL_GAP: u32 = 72;
pub const IMPORTS_TO_LEFT_EDGE_PIXEL_GAP: u32 = 120;
// VIEWPORT
pub const VIEWPORT_ZOOM_WHEEL_RATE: f64 = (1. / 600.) * 3.;
pub const VIEWPORT_ZOOM_MOUSE_RATE: f64 = 1. / 400.;
pub const VIEWPORT_ZOOM_SCALE_MIN: f64 = 0.000_000_1;
pub const VIEWPORT_ZOOM_SCALE_MAX: f64 = 10_000.;
pub const VIEWPORT_ZOOM_MIN_FRACTION_COVER: f64 = 0.01;
pub const VIEWPORT_ZOOM_LEVELS: [f64; 74] = [
0.0001, 0.000125, 0.00016, 0.0002, 0.00025, 0.00032, 0.0004, 0.0005, 0.00064, 0.0008, 0.001, 0.0016, 0.002, 0.0025, 0.0032, 0.004, 0.005, 0.0064, 0.008, 0.01, 0.01125, 0.015, 0.02, 0.025, 0.03,
0.04, 0.05, 0.06, 0.08, 0.1, 0.125, 0.15, 0.2, 0.25, 0.33333333, 0.4, 0.5, 0.66666666, 0.8, 1., 1.25, 1.6, 2., 2.5, 3.2, 4., 5., 6.4, 8., 10., 12.5, 16., 20., 25., 32., 40., 50., 64., 80., 100.,
128., 160., 200., 256., 320., 400., 512., 640., 800., 1024., 1280., 1600., 2048., 2560.,
];
/// Higher values create a steeper curve (a faster zoom rate change)
pub const VIEWPORT_ZOOM_WHEEL_RATE_CHANGE: f64 = 3.;
/// Helps push values that end in approximately half, plus or minus some floating point imprecision, towards the same side of the round() function.
pub const VIEWPORT_GRID_ROUNDING_BIAS: f64 = 0.002;
pub const VIEWPORT_SCROLL_RATE: f64 = 0.6;
pub const VIEWPORT_ROTATE_SNAP_INTERVAL: f64 = 15.;
pub const VIEWPORT_ZOOM_TO_FIT_PADDING_SCALE_FACTOR: f64 = 0.95;
pub const DRAG_BEYOND_VIEWPORT_MAX_OVEREXTENSION_PIXELS: f64 = 50.;
pub const DRAG_BEYOND_VIEWPORT_SPEED_FACTOR: f64 = 20.;
// SNAPPING POINT
pub const SNAP_POINT_TOLERANCE: f64 = 5.;
/// These are layers whose bounding boxes are used for alignment.
pub const MAX_ALIGNMENT_CANDIDATES: usize = 100;
/// These are layers that are used for the layer snapper.
pub const MAX_SNAP_CANDIDATES: usize = 10;
/// These are points (anchors and bounding box corners etc.) in the layer snapper.
pub const MAX_LAYER_SNAP_POINTS: usize = 100;
pub const DRAG_THRESHOLD: f64 = 1.;
// TRANSFORMING LAYER
pub const ROTATE_INCREMENT: f64 = 15.;
pub const SCALE_INCREMENT: f64 = 0.1;
pub const SLOWING_DIVISOR: f64 = 10.;
pub const NUDGE_AMOUNT: f64 = 1.;
pub const BIG_NUDGE_AMOUNT: f64 = 10.;
// TOOLS
pub const DEFAULT_STROKE_WIDTH: f64 = 2.;
// SELECT TOOL
pub const SELECTION_TOLERANCE: f64 = 5.;
pub const DRAG_DIRECTION_MODE_DETERMINATION_THRESHOLD: f64 = 15.;
pub const SELECTION_DRAG_ANGLE: f64 = 90.;
// PIVOT
pub const PIVOT_CROSSHAIR_THICKNESS: f64 = 1.;
pub const PIVOT_CROSSHAIR_LENGTH: f64 = 9.;
pub const PIVOT_DIAMETER: f64 = 5.;
pub const DOWEL_PIN_RADIUS: f64 = 4.;
// COMPASS ROSE
pub const COMPASS_ROSE_RING_INNER_DIAMETER: f64 = 13.;
pub const COMPASS_ROSE_MAIN_RING_DIAMETER: f64 = 15.;
pub const COMPASS_ROSE_HOVER_RING_DIAMETER: f64 = 23.;
pub const COMPASS_ROSE_ARROW_SIZE: f64 = 5.;
// Angle to either side of the compass arrows where they are targetted by the cursor (in degrees, must be less than 45°)
pub const COMPASS_ROSE_ARROW_CLICK_TARGET_ANGLE: f64 = 20.;
// TRANSFORM OVERLAY
pub const ANGLE_MEASURE_RADIUS_FACTOR: f64 = 0.04;
pub const ARC_MEASURE_RADIUS_FACTOR_RANGE: (f64, f64) = (0.05, 0.15);
// TRANSFORM CAGE
pub const RESIZE_HANDLE_SIZE: f64 = 6.;
pub const BOUNDS_SELECT_THRESHOLD: f64 = 10.;
pub const BOUNDS_ROTATE_THRESHOLD: f64 = 20.;
pub const MIN_LENGTH_FOR_MIDPOINT_VISIBILITY: f64 = 20.;
pub const MIN_LENGTH_FOR_CORNERS_VISIBILITY: f64 = 12.;
/// The width or height that the transform cage needs to be (at least) before the corner resize handle click targets take up their full surroundings. Otherwise, when less than this value, the interior edge resize handle takes precedence so the corner handles don't eat into the edge area, making it harder to resize the cage from its edges.
pub const MIN_LENGTH_FOR_EDGE_RESIZE_PRIORITY_OVER_CORNERS: f64 = 10.;
/// When the width or height of the transform cage is less than this value, only the exterior of the bounding box will act as a click target for resizing.
pub const MIN_LENGTH_FOR_RESIZE_TO_INCLUDE_INTERIOR: f64 = 40.;
/// When dragging the edge of a cage with Alt, it centers around the pivot.
/// However if the pivot is on or near the same edge you are dragging, we should avoid scaling by a massive factor caused by the small denominator.
///
/// The motion of the user's cursor by an `x` pixel offset results in `x * scale_factor` pixels of offset on the other side.
pub const MAXIMUM_ALT_SCALE_FACTOR: f64 = 25.;
/// The width or height that the transform cage needs before it is considered to have no width or height.
pub const MAX_LENGTH_FOR_NO_WIDTH_OR_HEIGHT: f64 = 1e-4;
// SKEW TRIANGLES
pub const SKEW_TRIANGLE_SIZE: f64 = 7.;
pub const SKEW_TRIANGLE_OFFSET: f64 = 4.;
pub const MIN_LENGTH_FOR_SKEW_TRIANGLE_VISIBILITY: f64 = 48.;
// PATH TOOL
pub const MANIPULATOR_GROUP_MARKER_SIZE: f64 = 6.;
pub const SELECTION_THRESHOLD: f64 = 10.;
pub const DRILL_THROUGH_THRESHOLD: f64 = 10.;
pub const HIDE_HANDLE_DISTANCE: f64 = 3.;
pub const HANDLE_ROTATE_SNAP_ANGLE: f64 = 15.;
pub const SEGMENT_INSERTION_DISTANCE: f64 = 5.;
pub const SEGMENT_OVERLAY_SIZE: f64 = 10.;
pub const SEGMENT_SELECTED_THICKNESS: f64 = 3.;
pub const HANDLE_LENGTH_FACTOR: f64 = 0.5;
// PEN TOOL
pub const CREATE_CURVE_THRESHOLD: f64 = 5.;
// SPLINE TOOL
pub const PATH_JOIN_THRESHOLD: f64 = 5.;
// LINE TOOL
pub const LINE_ROTATE_SNAP_ANGLE: f64 = 15.;
// BRUSH TOOL
pub const BRUSH_SIZE_CHANGE_KEYBOARD: f64 = 5.;
pub const DEFAULT_BRUSH_SIZE: f64 = 20.;
// GIZMOS
pub const POINT_RADIUS_HANDLE_SNAP_THRESHOLD: f64 = 8.;
pub const POINT_RADIUS_HANDLE_SEGMENT_THRESHOLD: f64 = 7.9;
pub const NUMBER_OF_POINTS_DIAL_SPOKE_EXTENSION: f64 = 1.2;
pub const NUMBER_OF_POINTS_DIAL_SPOKE_LENGTH: f64 = 10.;
pub const ARC_SNAP_THRESHOLD: f64 = 5.;
pub const ARC_SWEEP_GIZMO_RADIUS: f64 = 14.;
pub const ARC_SWEEP_GIZMO_TEXT_HEIGHT: f64 = 12.;
pub const GIZMO_HIDE_THRESHOLD: f64 = 20.;
pub const GRID_ROW_COLUMN_GIZMO_OFFSET: f64 = 15.;
// SCROLLBARS
pub const SCROLLBAR_SPACING: f64 = 0.1;
pub const ASYMPTOTIC_EFFECT: f64 = 0.5;
pub const SCALE_EFFECT: f64 = 0.5;
// COLORS
pub const COLOR_OVERLAY_BLUE: &str = "#00a8ff";
pub const COLOR_OVERLAY_BLUE_50: &str = "#00a8ff80";
pub const COLOR_OVERLAY_YELLOW: &str = "#ffc848";
pub const COLOR_OVERLAY_YELLOW_DULL: &str = "#d7ba8b";
pub const COLOR_OVERLAY_GREEN: &str = "#63ce63";
pub const COLOR_OVERLAY_RED: &str = "#ef5454";
pub const COLOR_OVERLAY_GRAY: &str = "#cccccc";
pub const COLOR_OVERLAY_GRAY_25: &str = "#cccccc40";
pub const COLOR_OVERLAY_WHITE: &str = "#ffffff";
pub const COLOR_OVERLAY_BLACK_75: &str = "#000000bf";
// DOCUMENT
pub const FILE_EXTENSION: &str = "graphite";
pub const DEFAULT_DOCUMENT_NAME: &str = "Untitled Document";
pub const MAX_UNDO_HISTORY_LEN: usize = 100; // TODO: Add this to user preferences
pub const AUTO_SAVE_TIMEOUT_SECONDS: u64 = 1;
// INPUT
pub const DOUBLE_CLICK_MILLISECONDS: u64 = 500;
// UI
pub const UI_SCALE_DEFAULT: f64 = 1.;
pub const UI_SCALE_MIN: f64 = 0.5;
pub const UI_SCALE_MAX: f64 = 3.;
| rust | Apache-2.0 | 42440c0d0bcf5735b05d8a9e5bd27187f74b1589 | 2026-01-04T15:38:29.103662Z | false |
GraphiteEditor/Graphite | https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/editor/src/utility_types.rs | editor/src/utility_types.rs | #[derive(Debug)]
pub struct MessageData {
name: String,
fields: Vec<(String, usize)>,
path: &'static str,
}
impl MessageData {
pub fn new(name: String, fields: Vec<(String, usize)>, path: &'static str) -> MessageData {
MessageData { name, fields, path }
}
pub fn name(&self) -> &str {
&self.name
}
pub fn fields(&self) -> &Vec<(String, usize)> {
&self.fields
}
pub fn path(&self) -> &'static str {
self.path
}
}
#[derive(Debug)]
pub struct DebugMessageTree {
name: String,
fields: Option<Vec<String>>,
variants: Option<Vec<DebugMessageTree>>,
message_handler: Option<MessageData>,
message_handler_data: Option<MessageData>,
path: &'static str,
}
impl DebugMessageTree {
pub fn new(name: &str) -> DebugMessageTree {
DebugMessageTree {
name: name.to_string(),
fields: None,
variants: None,
message_handler: None,
message_handler_data: None,
path: "",
}
}
pub fn add_fields(&mut self, fields: Vec<String>) {
self.fields = Some(fields);
}
pub fn set_path(&mut self, path: &'static str) {
self.path = path;
}
pub fn add_variant(&mut self, variant: DebugMessageTree) {
if let Some(variants) = &mut self.variants {
variants.push(variant);
} else {
self.variants = Some(vec![variant]);
}
}
pub fn add_message_handler_data_field(&mut self, message_handler_data: MessageData) {
self.message_handler_data = Some(message_handler_data);
}
pub fn add_message_handler_field(&mut self, message_handler: MessageData) {
self.message_handler = Some(message_handler);
}
pub fn name(&self) -> &str {
&self.name
}
pub fn fields(&self) -> Option<&Vec<String>> {
self.fields.as_ref()
}
pub fn path(&self) -> &'static str {
self.path
}
pub fn variants(&self) -> Option<&Vec<DebugMessageTree>> {
self.variants.as_ref()
}
pub fn message_handler_data_fields(&self) -> Option<&MessageData> {
self.message_handler_data.as_ref()
}
pub fn message_handler_fields(&self) -> Option<&MessageData> {
self.message_handler.as_ref()
}
}
| rust | Apache-2.0 | 42440c0d0bcf5735b05d8a9e5bd27187f74b1589 | 2026-01-04T15:38:29.103662Z | false |
GraphiteEditor/Graphite | https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/editor/src/lib.rs | editor/src/lib.rs | extern crate graphite_proc_macros;
// `macro_use` puts these macros into scope for all descendant code files
#[macro_use]
mod macros;
mod generate_ts_types;
#[macro_use]
extern crate log;
pub mod application;
pub mod consts;
pub mod dispatcher;
pub mod messages;
pub mod node_graph_executor;
#[cfg(test)]
pub mod test_utils;
pub mod utility_traits;
pub mod utility_types;
| rust | Apache-2.0 | 42440c0d0bcf5735b05d8a9e5bd27187f74b1589 | 2026-01-04T15:38:29.103662Z | false |
GraphiteEditor/Graphite | https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/editor/src/node_graph_executor.rs | editor/src/node_graph_executor.rs | use crate::messages::frontend::utility_types::{ExportBounds, FileType};
use crate::messages::prelude::*;
use glam::{DAffine2, DVec2, UVec2};
use graph_craft::document::value::{RenderOutput, TaggedValue};
use graph_craft::document::{DocumentNode, DocumentNodeImplementation, NodeId, NodeInput};
use graph_craft::proto::GraphErrors;
use graph_craft::wasm_application_io::EditorPreferences;
use graphene_std::application_io::{NodeGraphUpdateMessage, RenderConfig};
use graphene_std::application_io::{SurfaceFrame, TimingInformation};
use graphene_std::renderer::{RenderMetadata, format_transform_matrix};
use graphene_std::text::FontCache;
use graphene_std::transform::Footprint;
use graphene_std::vector::Vector;
use graphene_std::wasm_application_io::RenderOutputType;
use interpreted_executor::dynamic_executor::ResolvedDocumentNodeTypesDelta;
mod runtime_io;
pub use runtime_io::NodeRuntimeIO;
mod runtime;
pub use runtime::*;
#[derive(Debug, serde::Serialize, serde::Deserialize)]
pub struct ExecutionRequest {
execution_id: u64,
render_config: RenderConfig,
}
pub struct ExecutionResponse {
execution_id: u64,
result: Result<TaggedValue, String>,
responses: VecDeque<FrontendMessage>,
vector_modify: HashMap<NodeId, Vector>,
/// The resulting value from the temporary inspected during execution
inspect_result: Option<InspectResult>,
}
#[derive(serde::Serialize, serde::Deserialize)]
pub struct CompilationResponse {
result: Result<ResolvedDocumentNodeTypesDelta, (ResolvedDocumentNodeTypesDelta, String)>,
node_graph_errors: GraphErrors,
}
pub enum NodeGraphUpdate {
ExecutionResponse(ExecutionResponse),
CompilationResponse(CompilationResponse),
NodeGraphUpdateMessage(NodeGraphUpdateMessage),
}
#[derive(Debug, Default)]
pub struct NodeGraphExecutor {
runtime_io: NodeRuntimeIO,
current_execution_id: u64,
futures: VecDeque<(u64, ExecutionContext)>,
node_graph_hash: u64,
previous_node_to_inspect: Option<NodeId>,
last_svg_canvas: Option<SurfaceFrame>,
}
#[derive(Debug, Clone)]
struct ExecutionContext {
export_config: Option<ExportConfig>,
document_id: DocumentId,
}
impl NodeGraphExecutor {
/// A local runtime is useful on threads since having global state causes flakes
#[cfg(test)]
pub(crate) fn new_with_local_runtime() -> (NodeRuntime, Self) {
let (request_sender, request_receiver) = std::sync::mpsc::channel();
let (response_sender, response_receiver) = std::sync::mpsc::channel();
let node_runtime = NodeRuntime::new(request_receiver, response_sender);
let node_executor = Self {
futures: Default::default(),
runtime_io: NodeRuntimeIO::with_channels(request_sender, response_receiver),
node_graph_hash: 0,
current_execution_id: 0,
previous_node_to_inspect: None,
last_svg_canvas: None,
};
(node_runtime, node_executor)
}
/// Execute the network by flattening it and creating a borrow stack.
fn queue_execution(&mut self, render_config: RenderConfig) -> u64 {
let execution_id = self.current_execution_id;
self.current_execution_id += 1;
let request = ExecutionRequest { execution_id, render_config };
self.runtime_io.send(GraphRuntimeRequest::ExecutionRequest(request)).expect("Failed to send generation request");
execution_id
}
pub fn update_font_cache(&self, font_cache: FontCache) {
self.runtime_io.send(GraphRuntimeRequest::FontCacheUpdate(font_cache)).expect("Failed to send font cache update");
}
pub fn update_editor_preferences(&self, editor_preferences: EditorPreferences) {
self.runtime_io
.send(GraphRuntimeRequest::EditorPreferencesUpdate(editor_preferences))
.expect("Failed to send editor preferences");
}
/// Updates the network to monitor all inputs. Useful for the testing.
#[cfg(test)]
pub(crate) fn update_node_graph_instrumented(&mut self, document: &mut DocumentMessageHandler) -> Result<Instrumented, String> {
// We should always invalidate the cache.
self.node_graph_hash = crate::application::generate_uuid();
let mut network = document.network_interface.document_network().clone();
let instrumented = Instrumented::new(&mut network);
self.runtime_io
.send(GraphRuntimeRequest::GraphUpdate(GraphUpdate { network, node_to_inspect: None }))
.map_err(|e| e.to_string())?;
Ok(instrumented)
}
/// Update the cached network if necessary.
fn update_node_graph(&mut self, document: &mut DocumentMessageHandler, node_to_inspect: Option<NodeId>, ignore_hash: bool) -> Result<(), String> {
let network_hash = document.network_interface.network_hash();
// Refresh the graph when it changes or the inspect node changes
if network_hash != self.node_graph_hash || self.previous_node_to_inspect != node_to_inspect || ignore_hash {
let network = document.network_interface.document_network().clone();
self.previous_node_to_inspect = node_to_inspect;
self.node_graph_hash = network_hash;
self.runtime_io
.send(GraphRuntimeRequest::GraphUpdate(GraphUpdate { network, node_to_inspect }))
.map_err(|e| e.to_string())?;
}
Ok(())
}
/// Adds an evaluate request for whatever current network is cached.
pub(crate) fn submit_current_node_graph_evaluation(
&mut self,
document: &mut DocumentMessageHandler,
document_id: DocumentId,
viewport_resolution: UVec2,
viewport_scale: f64,
time: TimingInformation,
pointer: DVec2,
) -> Result<Message, String> {
let viewport = Footprint {
transform: document.metadata().document_to_viewport,
resolution: viewport_resolution,
..Default::default()
};
let render_config = RenderConfig {
viewport,
scale: viewport_scale,
time,
pointer,
export_format: graphene_std::application_io::ExportFormat::Raster,
render_mode: document.render_mode,
hide_artboards: false,
for_export: false,
};
// Execute the node graph
let execution_id = self.queue_execution(render_config);
self.futures.push_back((execution_id, ExecutionContext { export_config: None, document_id }));
Ok(DeferMessage::SetGraphSubmissionIndex { execution_id }.into())
}
/// Evaluates a node graph, computing the entire graph
#[allow(clippy::too_many_arguments)]
pub fn submit_node_graph_evaluation(
&mut self,
document: &mut DocumentMessageHandler,
document_id: DocumentId,
viewport_resolution: UVec2,
viewport_scale: f64,
time: TimingInformation,
node_to_inspect: Option<NodeId>,
ignore_hash: bool,
pointer: DVec2,
) -> Result<Message, String> {
self.update_node_graph(document, node_to_inspect, ignore_hash)?;
self.submit_current_node_graph_evaluation(document, document_id, viewport_resolution, viewport_scale, time, pointer)
}
/// Evaluates a node graph for export
pub fn submit_document_export(&mut self, document: &mut DocumentMessageHandler, document_id: DocumentId, mut export_config: ExportConfig) -> Result<(), String> {
let network = document.network_interface.document_network().clone();
let export_format = if export_config.file_type == FileType::Svg {
graphene_std::application_io::ExportFormat::Svg
} else {
graphene_std::application_io::ExportFormat::Raster
};
// Calculate the bounding box of the region to be exported
let bounds = match export_config.bounds {
ExportBounds::AllArtwork => document.network_interface.document_bounds_document_space(!export_config.transparent_background),
ExportBounds::Selection => document.network_interface.selected_bounds_document_space(!export_config.transparent_background, &[]),
ExportBounds::Artboard(id) => document.metadata().bounding_box_document(id),
}
.ok_or_else(|| "No bounding box".to_string())?;
let resolution = (bounds[1] - bounds[0]).as_uvec2();
let transform = DAffine2::from_translation(bounds[0]).inverse();
let render_config = RenderConfig {
viewport: Footprint {
resolution,
transform,
..Default::default()
},
scale: export_config.scale_factor,
time: Default::default(),
pointer: DVec2::ZERO,
export_format,
render_mode: document.render_mode,
hide_artboards: export_config.transparent_background,
for_export: true,
};
export_config.size = resolution.as_dvec2();
// Execute the node graph
self.runtime_io
.send(GraphRuntimeRequest::GraphUpdate(GraphUpdate { network, node_to_inspect: None }))
.map_err(|e| e.to_string())?;
let execution_id = self.queue_execution(render_config);
let execution_context = ExecutionContext {
export_config: Some(export_config),
document_id,
};
self.futures.push_back((execution_id, execution_context));
Ok(())
}
fn export(&self, node_graph_output: TaggedValue, export_config: ExportConfig, responses: &mut VecDeque<Message>) -> Result<(), String> {
let ExportConfig {
file_type,
name,
size,
scale_factor,
#[cfg(feature = "gpu")]
transparent_background,
..
} = export_config;
let file_extension = match file_type {
FileType::Svg => "svg",
FileType::Png => "png",
FileType::Jpg => "jpg",
};
let name = format!("{name}.{file_extension}");
match node_graph_output {
TaggedValue::RenderOutput(RenderOutput {
data: RenderOutputType::Svg { svg, .. },
..
}) => {
if file_type == FileType::Svg {
responses.add(FrontendMessage::TriggerSaveFile { name, content: svg.into_bytes() });
} else {
let mime = file_type.to_mime().to_string();
let size = (size * scale_factor).into();
responses.add(FrontendMessage::TriggerExportImage { svg, name, mime, size });
}
}
#[cfg(feature = "gpu")]
TaggedValue::RenderOutput(RenderOutput {
data: RenderOutputType::Buffer { data, width, height },
..
}) if file_type != FileType::Svg => {
use image::buffer::ConvertBuffer;
use image::{ImageFormat, RgbImage, RgbaImage};
let Some(image) = RgbaImage::from_raw(width, height, data) else {
return Err("Failed to create image buffer for export".to_string());
};
let mut encoded = Vec::new();
let mut cursor = std::io::Cursor::new(&mut encoded);
match file_type {
FileType::Png => {
let result = if transparent_background {
image.write_to(&mut cursor, ImageFormat::Png)
} else {
let image: RgbImage = image.convert();
image.write_to(&mut cursor, ImageFormat::Png)
};
if let Err(err) = result {
return Err(format!("Failed to encode PNG: {err}"));
}
}
FileType::Jpg => {
let image: RgbImage = image.convert();
let result = image.write_to(&mut cursor, ImageFormat::Jpeg);
if let Err(err) = result {
return Err(format!("Failed to encode JPG: {err}"));
}
}
FileType::Svg => {
return Err("SVG cannot be exported from an image buffer".to_string());
}
}
responses.add(FrontendMessage::TriggerSaveFile { name, content: encoded });
}
_ => {
return Err(format!("Incorrect render type for exporting to an SVG ({file_type:?}, {node_graph_output})"));
}
};
Ok(())
}
pub fn poll_node_graph_evaluation(&mut self, document: &mut DocumentMessageHandler, responses: &mut VecDeque<Message>) -> Result<(), String> {
let results = self.runtime_io.receive().collect::<Vec<_>>();
for response in results {
match response {
NodeGraphUpdate::ExecutionResponse(execution_response) => {
let ExecutionResponse {
execution_id,
result,
responses: existing_responses,
vector_modify,
inspect_result,
} = execution_response;
responses.add(OverlaysMessage::Draw);
let node_graph_output = match result {
Ok(output) => output,
Err(e) => {
// Clear the click targets while the graph is in an un-renderable state
document.network_interface.update_click_targets(HashMap::new());
document.network_interface.update_vector_modify(HashMap::new());
return Err(format!("Node graph evaluation failed:\n{e}"));
}
};
responses.extend(existing_responses.into_iter().map(Into::into));
document.network_interface.update_vector_modify(vector_modify);
while let Some(&(fid, _)) = self.futures.front() {
if fid < execution_id {
self.futures.pop_front();
} else {
break;
}
}
let Some((fid, execution_context)) = self.futures.pop_front() else {
panic!("InvalidGenerationId")
};
assert_eq!(fid, execution_id, "Missmatch in execution id");
if let Some(export_config) = execution_context.export_config {
// Special handling for exporting the artwork
self.export(node_graph_output, export_config, responses)?;
} else {
self.process_node_graph_output(node_graph_output, responses)?;
}
responses.add(DeferMessage::TriggerGraphRun {
execution_id,
document_id: execution_context.document_id,
});
// Update the Data panel on the frontend using the value of the inspect result.
if let Some(inspect_result) = (self.previous_node_to_inspect.is_some()).then_some(inspect_result).flatten() {
responses.add(DataPanelMessage::UpdateLayout { inspect_result });
} else {
responses.add(DataPanelMessage::ClearLayout);
}
}
NodeGraphUpdate::CompilationResponse(execution_response) => {
let CompilationResponse { node_graph_errors, result } = execution_response;
let type_delta = match result {
Err((incomplete_delta, e)) => {
// Clear the click targets while the graph is in an un-renderable state
document.network_interface.update_click_targets(HashMap::new());
document.network_interface.update_vector_modify(HashMap::new());
log::trace!("{e}");
responses.add(NodeGraphMessage::UpdateTypes {
resolved_types: incomplete_delta,
node_graph_errors,
});
responses.add(NodeGraphMessage::SendGraph);
return Err(format!("Node graph evaluation failed:\n{e}"));
}
Ok(result) => result,
};
responses.add(NodeGraphMessage::UpdateTypes {
resolved_types: type_delta,
node_graph_errors,
});
responses.add(NodeGraphMessage::SendGraph);
}
}
}
Ok(())
}
fn process_node_graph_output(&mut self, node_graph_output: TaggedValue, responses: &mut VecDeque<Message>) -> Result<(), String> {
let TaggedValue::RenderOutput(render_output) = node_graph_output else {
return Err(format!("Invalid node graph output type: {node_graph_output:#?}"));
};
match render_output.data {
RenderOutputType::Svg { svg, image_data } => {
// Send to frontend
responses.add(FrontendMessage::UpdateImageData { image_data });
responses.add(FrontendMessage::UpdateDocumentArtwork { svg });
self.last_svg_canvas = None;
}
RenderOutputType::CanvasFrame(frame) => 'block: {
if self.last_svg_canvas == Some(frame) {
break 'block;
}
let matrix = format_transform_matrix(frame.transform);
let transform = if matrix.is_empty() { String::new() } else { format!(" transform=\"{matrix}\"") };
let svg = format!(
r#"<svg><foreignObject width="{}" height="{}"{transform}><div data-canvas-placeholder="{}" data-is-viewport="true"></div></foreignObject></svg>"#,
frame.resolution.x, frame.resolution.y, frame.surface_id.0,
);
self.last_svg_canvas = Some(frame);
responses.add(FrontendMessage::UpdateDocumentArtwork { svg });
}
RenderOutputType::Texture { .. } => {}
_ => return Err(format!("Invalid node graph output type: {:#?}", render_output.data)),
}
let RenderMetadata {
upstream_footprints,
local_transforms,
first_element_source_id,
click_targets,
clip_targets,
} = render_output.metadata;
// Run these update state messages immediately
responses.add(DocumentMessage::UpdateUpstreamTransforms {
upstream_footprints,
local_transforms,
first_element_source_id,
});
responses.add(DocumentMessage::UpdateClickTargets { click_targets });
responses.add(DocumentMessage::UpdateClipTargets { clip_targets });
responses.add(DocumentMessage::RenderScrollbars);
responses.add(DocumentMessage::RenderRulers);
responses.add(OverlaysMessage::Draw);
Ok(())
}
}
// Re-export for usage by tests in other modules
#[cfg(test)]
pub use test::Instrumented;
#[cfg(test)]
mod test {
use std::sync::Arc;
use super::*;
use crate::messages::portfolio::document::utility_types::network_interface::NodeNetworkInterface;
use crate::test_utils::test_prelude::{self, NodeGraphLayer};
use graph_craft::ProtoNodeIdentifier;
use graph_craft::document::NodeNetwork;
use graphene_std::Context;
use graphene_std::NodeInputDecleration;
use graphene_std::memo::IORecord;
use test_prelude::LayerNodeIdentifier;
/// Stores all of the monitor nodes that have been attached to a graph
#[derive(Default)]
pub struct Instrumented {
protonodes_by_name: HashMap<ProtoNodeIdentifier, Vec<Vec<Vec<NodeId>>>>,
protonodes_by_path: HashMap<Vec<NodeId>, Vec<Vec<NodeId>>>,
}
impl Instrumented {
/// Adds montior nodes to the network
fn add(&mut self, network: &mut NodeNetwork, path: &mut Vec<NodeId>) {
// Required to do seperately to satiate the borrow checker.
let mut monitor_nodes = Vec::new();
for (id, node) in network.nodes.iter_mut() {
// Recursively instrument
if let DocumentNodeImplementation::Network(nested) = &mut node.implementation {
path.push(*id);
self.add(nested, path);
path.pop();
}
let mut monitor_node_ids = Vec::with_capacity(node.inputs.len());
for input in &mut node.inputs {
let node_id = NodeId::new();
let old_input = std::mem::replace(input, NodeInput::node(node_id, 0));
monitor_nodes.push((old_input, node_id));
path.push(node_id);
monitor_node_ids.push(path.clone());
path.pop();
}
if let DocumentNodeImplementation::ProtoNode(identifier) = &mut node.implementation {
path.push(*id);
self.protonodes_by_name.entry(identifier.clone()).or_default().push(monitor_node_ids.clone());
self.protonodes_by_path.insert(path.clone(), monitor_node_ids);
path.pop();
}
}
for (input, monitor_id) in monitor_nodes {
let monitor_node = DocumentNode {
inputs: vec![input],
implementation: DocumentNodeImplementation::ProtoNode(graphene_std::memo::monitor::IDENTIFIER),
call_argument: graph_craft::generic!(T),
skip_deduplication: true,
..Default::default()
};
network.nodes.insert(monitor_id, monitor_node);
}
}
/// Instrument a graph and return a new [Instrumented] state.
pub fn new(network: &mut NodeNetwork) -> Self {
let mut instrumented = Self::default();
instrumented.add(network, &mut Vec::new());
instrumented
}
fn downcast<Input: NodeInputDecleration>(dynamic: Arc<dyn std::any::Any + Send + Sync>) -> Option<Input::Result>
where
Input::Result: Send + Sync + Clone + 'static,
{
// This is quite inflexible since it only allows the footprint as inputs.
if let Some(x) = dynamic.downcast_ref::<IORecord<(), Input::Result>>() {
Some(x.output.clone())
} else if let Some(x) = dynamic.downcast_ref::<IORecord<Footprint, Input::Result>>() {
Some(x.output.clone())
} else if let Some(x) = dynamic.downcast_ref::<IORecord<Context, Input::Result>>() {
Some(x.output.clone())
} else {
warn!("cannot downcast type for introspection");
None
}
}
/// Grab all of the values of the input every time it occurs in the graph.
pub fn grab_all_input<'a, Input: NodeInputDecleration + 'a>(&'a self, runtime: &'a NodeRuntime) -> impl Iterator<Item = Input::Result> + 'a
where
Input::Result: Send + Sync + Clone + 'static,
{
self.protonodes_by_name
.get(&Input::identifier())
.map_or([].as_slice(), |x| x.as_slice())
.iter()
.filter_map(|inputs| inputs.get(Input::INDEX))
.filter_map(|input_monitor_node| runtime.executor.introspect(input_monitor_node).ok())
.filter_map(Instrumented::downcast::<Input>) // Some might not resolve (e.g. generics that don't work properly)
}
pub fn grab_protonode_input<Input: NodeInputDecleration>(&self, path: &Vec<NodeId>, runtime: &NodeRuntime) -> Option<Input::Result>
where
Input::Result: Send + Sync + Clone + 'static,
{
let input_monitor_node = self.protonodes_by_path.get(path)?.get(Input::INDEX)?;
let dynamic = runtime.executor.introspect(input_monitor_node).ok()?;
Self::downcast::<Input>(dynamic)
}
pub fn grab_input_from_layer<Input: NodeInputDecleration>(&self, layer: LayerNodeIdentifier, network_interface: &NodeNetworkInterface, runtime: &NodeRuntime) -> Option<Input::Result>
where
Input::Result: Send + Sync + Clone + 'static,
{
let node_graph_layer = NodeGraphLayer::new(layer, network_interface);
let node = node_graph_layer.upstream_node_id_from_protonode(Input::identifier())?;
self.grab_protonode_input::<Input>(&vec![node], runtime)
}
}
}
| rust | Apache-2.0 | 42440c0d0bcf5735b05d8a9e5bd27187f74b1589 | 2026-01-04T15:38:29.103662Z | false |
GraphiteEditor/Graphite | https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/editor/src/test_utils.rs | editor/src/test_utils.rs | use crate::application::Editor;
use crate::application::set_uuid_seed;
use crate::messages::input_mapper::utility_types::input_keyboard::ModifierKeys;
use crate::messages::input_mapper::utility_types::input_mouse::{EditorMouseState, MouseKeys, ScrollDelta, ViewportPosition};
use crate::messages::portfolio::utility_types::Platform;
use crate::messages::prelude::*;
use crate::messages::tool::tool_messages::tool_prelude::Key;
use crate::messages::tool::utility_types::ToolType;
use crate::node_graph_executor::Instrumented;
use crate::node_graph_executor::NodeRuntime;
use crate::test_utils::test_prelude::LayerNodeIdentifier;
use glam::{DVec2, UVec2};
use graph_craft::document::DocumentNode;
use graphene_std::InputAccessor;
use graphene_std::raster::color::Color;
use graphene_std::uuid::NodeId;
/// A set of utility functions to make the writing of editor test more declarative
pub struct EditorTestUtils {
pub editor: Editor,
pub runtime: NodeRuntime,
}
impl EditorTestUtils {
pub fn create() -> Self {
let _ = env_logger::builder().is_test(true).try_init();
set_uuid_seed(0);
let (mut editor, runtime) = Editor::new_local_executor();
// We have to set this directly instead of using `GlobalsMessage::SetPlatform` because race conditions with multiple tests can cause that message handler to set it more than once, which is a failure.
// It isn't sufficient to guard the message dispatch here with a check if the once_cell is empty, because that isn't atomic and the time between checking and handling the dispatch can let multiple through.
let _ = GLOBAL_PLATFORM.set(Platform::Windows).is_ok();
editor.handle_message(PortfolioMessage::Init);
Self { editor, runtime }
}
pub fn eval_graph<'a>(&'a mut self) -> impl std::future::Future<Output = Result<Instrumented, String>> + 'a {
// An inner function is required since async functions in traits are a bit weird
async fn run<'a>(editor: &'a mut Editor, runtime: &'a mut NodeRuntime) -> Result<Instrumented, String> {
let portfolio = &mut editor.dispatcher.message_handlers.portfolio_message_handler;
let exector = &mut portfolio.executor;
let document = portfolio.documents.get_mut(&portfolio.active_document_id.unwrap()).unwrap();
let instrumented = match exector.update_node_graph_instrumented(document) {
Ok(instrumented) => instrumented,
Err(e) => return Err(format!("update_node_graph_instrumented failed\n\n{e}")),
};
if let Err(e) = exector.submit_current_node_graph_evaluation(document, DocumentId(0), UVec2::ONE, 1., Default::default(), DVec2::ZERO) {
return Err(format!("submit_current_node_graph_evaluation failed\n\n{e}"));
}
runtime.run().await;
let mut messages = VecDeque::new();
if let Err(e) = editor.poll_node_graph_evaluation(&mut messages) {
return Err(format!("Graph should render\n\n{e}"));
}
let frontend_messages = messages.into_iter().flat_map(|message| editor.handle_message(message));
for message in frontend_messages {
message.check_node_graph_error();
}
Ok(instrumented)
}
run(&mut self.editor, &mut self.runtime)
}
pub async fn handle_message(&mut self, message: impl Into<Message>) -> Vec<FrontendMessage> {
let frontend_messages_from_msg = self.editor.handle_message(message);
// Required to process any buffered messages
if let Err(e) = self.eval_graph().await {
panic!("Failed to evaluate graph: {e}");
}
frontend_messages_from_msg
}
pub async fn new_document(&mut self) {
self.handle_message(Message::Portfolio(PortfolioMessage::NewDocumentWithName { name: String::from("Test document") }))
.await;
}
pub async fn draw_rect(&mut self, x1: f64, y1: f64, x2: f64, y2: f64) {
self.drag_tool(ToolType::Rectangle, x1, y1, x2, y2, ModifierKeys::default()).await;
}
pub async fn draw_polygon(&mut self, x1: f64, y1: f64, x2: f64, y2: f64) {
self.drag_tool(ToolType::Shape, x1, y1, x2, y2, ModifierKeys::default()).await;
}
pub async fn draw_ellipse(&mut self, x1: f64, y1: f64, x2: f64, y2: f64) {
self.drag_tool(ToolType::Ellipse, x1, y1, x2, y2, ModifierKeys::default()).await;
}
pub async fn click_tool(&mut self, typ: ToolType, button: MouseKeys, position: DVec2, modifier_keys: ModifierKeys) {
self.select_tool(typ).await;
self.move_mouse(position.x, position.y, modifier_keys, MouseKeys::empty()).await;
self.mousedown(
EditorMouseState {
editor_position: position,
mouse_keys: button,
..Default::default()
},
modifier_keys,
)
.await;
self.mouseup(
EditorMouseState {
editor_position: position,
..Default::default()
},
modifier_keys,
)
.await;
}
pub async fn drag_tool(&mut self, typ: ToolType, x1: f64, y1: f64, x2: f64, y2: f64, modifier_keys: ModifierKeys) {
self.select_tool(typ).await;
self.move_mouse(x1, y1, modifier_keys, MouseKeys::empty()).await;
self.left_mousedown(x1, y1, modifier_keys).await;
self.move_mouse(x2, y2, modifier_keys, MouseKeys::LEFT).await;
self.mouseup(
EditorMouseState {
editor_position: (x2, y2).into(),
mouse_keys: MouseKeys::empty(),
scroll_delta: ScrollDelta::default(),
},
modifier_keys,
)
.await;
}
pub async fn drag_tool_cancel_rmb(&mut self, typ: ToolType) {
self.select_tool(typ).await;
self.move_mouse(50., 50., ModifierKeys::default(), MouseKeys::empty()).await;
self.left_mousedown(50., 50., ModifierKeys::default()).await;
self.move_mouse(100., 100., ModifierKeys::default(), MouseKeys::LEFT).await;
self.mousedown(
EditorMouseState {
editor_position: (100., 100.).into(),
mouse_keys: MouseKeys::LEFT | MouseKeys::RIGHT,
scroll_delta: ScrollDelta::default(),
},
ModifierKeys::default(),
)
.await;
}
pub fn active_document(&self) -> &DocumentMessageHandler {
self.editor.dispatcher.message_handlers.portfolio_message_handler.active_document().unwrap()
}
pub fn active_document_mut(&mut self) -> &mut DocumentMessageHandler {
self.editor.dispatcher.message_handlers.portfolio_message_handler.active_document_mut().unwrap()
}
pub fn get_node<'a, T: InputAccessor<'a, DocumentNode>>(&'a self) -> impl Iterator<Item = T> + 'a {
self.active_document()
.network_interface
.document_network()
.recursive_nodes()
.inspect(|(_, node, _)| println!("{:#?}", node.implementation))
.filter_map(move |(_, document, _)| T::new_with_source(document))
}
pub async fn move_mouse(&mut self, x: f64, y: f64, modifier_keys: ModifierKeys, mouse_keys: MouseKeys) {
let editor_mouse_state = EditorMouseState {
editor_position: ViewportPosition::new(x, y),
mouse_keys,
..Default::default()
};
self.input(InputPreprocessorMessage::PointerMove { editor_mouse_state, modifier_keys }).await;
}
pub async fn mousedown(&mut self, editor_mouse_state: EditorMouseState, modifier_keys: ModifierKeys) {
self.input(InputPreprocessorMessage::PointerDown { editor_mouse_state, modifier_keys }).await;
}
pub async fn mouseup(&mut self, editor_mouse_state: EditorMouseState, modifier_keys: ModifierKeys) {
self.handle_message(InputPreprocessorMessage::PointerUp { editor_mouse_state, modifier_keys }).await;
}
pub async fn press(&mut self, key: Key, modifier_keys: ModifierKeys) {
let key_repeat = false;
self.handle_message(InputPreprocessorMessage::KeyDown { key, modifier_keys, key_repeat }).await;
self.handle_message(InputPreprocessorMessage::KeyUp { key, modifier_keys, key_repeat }).await;
}
pub async fn left_mousedown(&mut self, x: f64, y: f64, modifier_keys: ModifierKeys) {
self.mousedown(
EditorMouseState {
editor_position: (x, y).into(),
mouse_keys: MouseKeys::LEFT,
scroll_delta: ScrollDelta::default(),
},
modifier_keys,
)
.await;
}
pub async fn input(&mut self, message: InputPreprocessorMessage) {
self.handle_message(Message::InputPreprocessor(message)).await;
}
pub async fn select_tool(&mut self, tool_type: ToolType) {
match tool_type {
ToolType::Line => self.handle_message(Message::Tool(ToolMessage::ActivateToolShapeLine)).await,
ToolType::Rectangle => self.handle_message(Message::Tool(ToolMessage::ActivateToolShapeRectangle)).await,
ToolType::Ellipse => self.handle_message(Message::Tool(ToolMessage::ActivateToolShapeEllipse)).await,
_ => self.handle_message(Message::Tool(ToolMessage::ActivateTool { tool_type })).await,
};
}
pub async fn select_primary_color(&mut self, color: Color) {
self.handle_message(Message::Tool(ToolMessage::SelectWorkingColor { color, primary: true })).await;
}
pub async fn select_secondary_color(&mut self, color: Color) {
self.handle_message(Message::Tool(ToolMessage::SelectWorkingColor { color, primary: false })).await;
}
pub async fn create_raster_image(&mut self, image: graphene_std::raster::Image<Color>, mouse: Option<(f64, f64)>) {
self.handle_message(PortfolioMessage::PasteImage {
name: None,
image,
mouse,
parent_and_insert_index: None,
})
.await;
}
pub async fn draw_spline(&mut self, points: &[DVec2]) {
self.select_tool(ToolType::Spline).await;
for &point in points {
self.click_tool(ToolType::Spline, MouseKeys::LEFT, point, ModifierKeys::empty()).await;
}
self.press(Key::Enter, ModifierKeys::empty()).await;
}
pub async fn get_selected_layer(&mut self) -> Option<LayerNodeIdentifier> {
self.active_document().network_interface.selected_nodes().selected_layers(self.active_document().metadata()).next()
}
pub async fn double_click(&mut self, position: DVec2) {
self.handle_message(InputPreprocessorMessage::DoubleClick {
editor_mouse_state: EditorMouseState {
editor_position: position,
mouse_keys: MouseKeys::LEFT,
scroll_delta: ScrollDelta::default(),
},
modifier_keys: ModifierKeys::empty(),
})
.await;
}
pub async fn drag_path(&mut self, points: &[DVec2], modifier_keys: ModifierKeys) {
if points.is_empty() {
return;
}
let first_point = points[0];
self.move_mouse(first_point.x, first_point.y, modifier_keys, MouseKeys::empty()).await;
self.left_mousedown(first_point.x, first_point.y, modifier_keys).await;
for &point in &points[1..] {
self.move_mouse(point.x, point.y, modifier_keys, MouseKeys::LEFT).await;
}
self.mouseup(
EditorMouseState {
editor_position: points[points.len() - 1],
mouse_keys: MouseKeys::empty(),
scroll_delta: ScrollDelta::default(),
},
modifier_keys,
)
.await;
}
/// Necessary for doing snapping since snaps outside of the viewport are discarded
pub async fn set_viewport_size(&mut self, top_left: DVec2, bottom_right: DVec2) {
self.handle_message(ViewportMessage::Update {
x: top_left.x,
y: top_left.y,
width: bottom_right.x - top_left.x,
height: bottom_right.y - top_left.y,
scale: 1.,
})
.await;
}
pub async fn create_node_by_name(&mut self, name: impl Into<String>) -> NodeId {
let node_id = NodeId::new();
self.handle_message(NodeGraphMessage::CreateNodeFromContextMenu {
node_id: Some(node_id),
node_type: name.into(),
xy: None,
add_transaction: true,
})
.await;
node_id
}
}
pub trait FrontendMessageTestUtils {
fn check_node_graph_error(&self);
}
impl FrontendMessageTestUtils for FrontendMessage {
fn check_node_graph_error(&self) {
let FrontendMessage::UpdateNodeGraphErrorDiagnostic { error } = self else { return };
if let Some(error) = error {
panic!("error: {:?}", error);
}
}
}
#[cfg(test)]
pub mod test_prelude {
pub use super::FrontendMessageTestUtils;
pub use crate::application::Editor;
pub use crate::float_eq;
pub use crate::messages::input_mapper::utility_types::input_keyboard::{Key, ModifierKeys};
pub use crate::messages::input_mapper::utility_types::input_mouse::MouseKeys;
pub use crate::messages::portfolio::document::utility_types::clipboards::Clipboard;
pub use crate::messages::portfolio::document::utility_types::document_metadata::LayerNodeIdentifier;
pub use crate::messages::prelude::*;
pub use crate::messages::tool::common_functionality::graph_modification_utils::{NodeGraphLayer, is_layer_fed_by_node_of_name};
pub use crate::messages::tool::utility_types::ToolType;
pub use crate::node_graph_executor::NodeRuntime;
pub use crate::test_utils::EditorTestUtils;
pub use core::f64;
pub use glam::{DVec2, IVec2};
pub use graph_craft::document::DocumentNode;
pub use graphene_std::raster::{Color, Image};
pub use graphene_std::transform::Footprint;
pub use graphene_std::{InputAccessor, InputAccessorSource};
#[macro_export]
macro_rules! float_eq {
($left:expr, $right:expr $(,)?) => {
match (&$left, &$right) {
(left_val, right_val) => {
if (*left_val - *right_val).abs() > 1e-10 {
panic!("assertion `left == right` failed\n left: {}\n right: {}", *left_val, *right_val)
}
}
}
};
}
}
| rust | Apache-2.0 | 42440c0d0bcf5735b05d8a9e5bd27187f74b1589 | 2026-01-04T15:38:29.103662Z | false |
GraphiteEditor/Graphite | https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/editor/src/dispatcher.rs | editor/src/dispatcher.rs | use crate::messages::debug::utility_types::MessageLoggingVerbosity;
use crate::messages::defer::DeferMessageContext;
use crate::messages::dialog::DialogMessageContext;
use crate::messages::layout::layout_message_handler::LayoutMessageContext;
use crate::messages::preferences::preferences_message_handler::PreferencesMessageContext;
use crate::messages::prelude::*;
use crate::messages::tool::common_functionality::utility_functions::make_path_editable_is_allowed;
#[derive(Debug, Default)]
pub struct Dispatcher {
message_queues: Vec<VecDeque<Message>>,
pub responses: Vec<FrontendMessage>,
pub frontend_update_messages: Vec<Message>,
pub message_handlers: DispatcherMessageHandlers,
}
#[derive(Debug, Default)]
pub struct DispatcherMessageHandlers {
animation_message_handler: AnimationMessageHandler,
app_window_message_handler: AppWindowMessageHandler,
broadcast_message_handler: BroadcastMessageHandler,
clipboard_message_handler: ClipboardMessageHandler,
debug_message_handler: DebugMessageHandler,
defer_message_handler: DeferMessageHandler,
dialog_message_handler: DialogMessageHandler,
globals_message_handler: GlobalsMessageHandler,
input_preprocessor_message_handler: InputPreprocessorMessageHandler,
key_mapping_message_handler: KeyMappingMessageHandler,
layout_message_handler: LayoutMessageHandler,
menu_bar_message_handler: MenuBarMessageHandler,
pub portfolio_message_handler: PortfolioMessageHandler,
preferences_message_handler: PreferencesMessageHandler,
tool_message_handler: ToolMessageHandler,
viewport_message_handler: ViewportMessageHandler,
}
impl DispatcherMessageHandlers {
pub fn with_executor(executor: crate::node_graph_executor::NodeGraphExecutor) -> Self {
Self {
portfolio_message_handler: PortfolioMessageHandler::with_executor(executor),
..Default::default()
}
}
}
/// For optimization, these are messages guaranteed to be redundant when repeated.
/// The last occurrence of the message in the message queue is sufficient to ensure correct behavior.
/// In addition, these messages do not change any state in the backend (aside from caches).
const SIDE_EFFECT_FREE_MESSAGES: &[MessageDiscriminant] = &[
MessageDiscriminant::Portfolio(PortfolioMessageDiscriminant::Document(DocumentMessageDiscriminant::DocumentStructureChanged)),
MessageDiscriminant::Portfolio(PortfolioMessageDiscriminant::Document(DocumentMessageDiscriminant::NodeGraph(
NodeGraphMessageDiscriminant::RunDocumentGraph,
))),
MessageDiscriminant::Portfolio(PortfolioMessageDiscriminant::SubmitActiveGraphRender),
MessageDiscriminant::Frontend(FrontendMessageDiscriminant::TriggerFontDataLoad),
MessageDiscriminant::Frontend(FrontendMessageDiscriminant::UpdateUIScale),
];
/// Since we don't need to update the frontend multiple times per frame,
/// we have a set of messages which we will buffer until the next frame is requested.
const FRONTEND_UPDATE_MESSAGES: &[MessageDiscriminant] = &[
MessageDiscriminant::Portfolio(PortfolioMessageDiscriminant::Document(DocumentMessageDiscriminant::PropertiesPanel(
PropertiesPanelMessageDiscriminant::Refresh,
))),
MessageDiscriminant::Portfolio(PortfolioMessageDiscriminant::UpdateDocumentWidgets),
MessageDiscriminant::Portfolio(PortfolioMessageDiscriminant::Document(DocumentMessageDiscriminant::Overlays(OverlaysMessageDiscriminant::Draw))),
MessageDiscriminant::Portfolio(PortfolioMessageDiscriminant::Document(DocumentMessageDiscriminant::RenderRulers)),
MessageDiscriminant::Portfolio(PortfolioMessageDiscriminant::Document(DocumentMessageDiscriminant::RenderScrollbars)),
MessageDiscriminant::Frontend(FrontendMessageDiscriminant::UpdateDocumentLayerStructure),
];
const DEBUG_MESSAGE_BLOCK_LIST: &[MessageDiscriminant] = &[
MessageDiscriminant::Broadcast(BroadcastMessageDiscriminant::TriggerEvent(EventMessageDiscriminant::AnimationFrame)),
MessageDiscriminant::Animation(AnimationMessageDiscriminant::IncrementFrameCounter),
MessageDiscriminant::Portfolio(PortfolioMessageDiscriminant::AutoSaveAllDocuments),
];
// TODO: Find a way to combine these with the list above. We use strings for now since these are the standard variant names used by multiple messages. But having these also type-checked would be best.
const DEBUG_MESSAGE_ENDING_BLOCK_LIST: &[&str] = &["PointerMove", "PointerOutsideViewport", "Overlays", "Draw", "CurrentTime", "Time"];
impl Dispatcher {
pub fn new() -> Self {
Self::default()
}
pub fn with_executor(executor: crate::node_graph_executor::NodeGraphExecutor) -> Self {
Self {
message_handlers: DispatcherMessageHandlers::with_executor(executor),
..Default::default()
}
}
// If the deepest queues (higher index in queues list) are now empty (after being popped from) then remove them
fn cleanup_queues(&mut self, leave_last: bool) {
while self.message_queues.last().filter(|queue| queue.is_empty()).is_some() {
if leave_last && self.message_queues.len() == 1 {
break;
}
self.message_queues.pop();
}
}
/// Add a message to a queue so that it can be executed.
/// If `process_after_all_current` is set, all currently queued messages (including children) will be processed first.
/// If not set, it (and its children) will be processed as soon as possible.
pub fn schedule_execution(message_queues: &mut Vec<VecDeque<Message>>, process_after_all_current: bool, messages: impl IntoIterator<Item = Message>) {
match message_queues.first_mut() {
// If there are currently messages being processed and we are processing after them, add to the end of the first queue
Some(queue) if process_after_all_current => queue.extend(messages),
// In all other cases, make a new inner queue and add our message there
_ => message_queues.push(VecDeque::from_iter(messages)),
}
}
pub fn handle_message<T: Into<Message>>(&mut self, message: T, process_after_all_current: bool) {
let message = message.into();
// If we are not maintaining the buffer, simply add to the current queue
Self::schedule_execution(&mut self.message_queues, process_after_all_current, [message]);
while let Some(message) = self.message_queues.last_mut().and_then(VecDeque::pop_front) {
// Skip processing of this message if it will be processed later (at the end of the shallowest level queue)
if FRONTEND_UPDATE_MESSAGES.contains(&message.to_discriminant()) {
let already_in_queue = self.message_queues.first().is_some_and(|queue| queue.contains(&message));
if already_in_queue {
self.cleanup_queues(false);
continue;
} else if self.message_queues.len() > 1 {
if !self.frontend_update_messages.contains(&message) {
self.frontend_update_messages.push(message);
}
self.cleanup_queues(false);
continue;
}
}
if SIDE_EFFECT_FREE_MESSAGES.contains(&message.to_discriminant()) {
let already_in_queue = self.message_queues.first().filter(|queue| queue.contains(&message)).is_some();
if already_in_queue {
self.log_deferred_message(&message, &self.message_queues, self.message_handlers.debug_message_handler.message_logging_verbosity);
self.cleanup_queues(false);
continue;
} else if self.message_queues.len() > 1 {
self.log_deferred_message(&message, &self.message_queues, self.message_handlers.debug_message_handler.message_logging_verbosity);
self.cleanup_queues(true);
self.message_queues[0].add(message);
continue;
}
}
// Print the message at a verbosity level of `info`
self.log_message(&message, &self.message_queues, self.message_handlers.debug_message_handler.message_logging_verbosity);
// Create a new queue for the child messages
let mut queue = VecDeque::new();
// Process the action by forwarding it to the relevant message handler, or saving the FrontendMessage to be sent to the frontend
match message {
Message::Animation(message) => {
if let AnimationMessage::IncrementFrameCounter = &message {
self.message_queues[0].extend(self.frontend_update_messages.drain(..));
}
self.message_handlers.animation_message_handler.process_message(message, &mut queue, ());
}
Message::AppWindow(message) => {
self.message_handlers.app_window_message_handler.process_message(message, &mut queue, ());
}
Message::Broadcast(message) => self.message_handlers.broadcast_message_handler.process_message(message, &mut queue, ()),
Message::Clipboard(message) => self.message_handlers.clipboard_message_handler.process_message(message, &mut queue, ()),
Message::Debug(message) => {
self.message_handlers.debug_message_handler.process_message(message, &mut queue, ());
}
Message::Defer(message) => {
let context = DeferMessageContext {
portfolio: &self.message_handlers.portfolio_message_handler,
};
self.message_handlers.defer_message_handler.process_message(message, &mut queue, context);
}
Message::Dialog(message) => {
let context = DialogMessageContext {
portfolio: &self.message_handlers.portfolio_message_handler,
preferences: &self.message_handlers.preferences_message_handler,
};
self.message_handlers.dialog_message_handler.process_message(message, &mut queue, context);
}
Message::Frontend(message) => {
// Handle these messages immediately by returning early
if let FrontendMessage::TriggerFontDataLoad { .. } | FrontendMessage::TriggerFontCatalogLoad = message {
self.responses.push(message);
self.cleanup_queues(false);
// Return early to avoid running the code after the match block
return;
} else {
// `FrontendMessage`s are saved and will be sent to the frontend after the message queue is done being processed
self.responses.push(message);
}
}
Message::Globals(message) => {
self.message_handlers.globals_message_handler.process_message(message, &mut queue, ());
}
Message::InputPreprocessor(message) => {
let keyboard_platform = GLOBAL_PLATFORM.get().copied().unwrap_or_default().as_keyboard_platform_layout();
self.message_handlers.input_preprocessor_message_handler.process_message(
message,
&mut queue,
InputPreprocessorMessageContext {
keyboard_platform,
viewport: &self.message_handlers.viewport_message_handler,
},
);
}
Message::KeyMapping(message) => {
let input = &self.message_handlers.input_preprocessor_message_handler;
let actions = self.collect_actions();
self.message_handlers
.key_mapping_message_handler
.process_message(message, &mut queue, KeyMappingMessageContext { input, actions });
}
Message::Layout(message) => {
let action_input_mapping = &|action_to_find: &MessageDiscriminant| self.message_handlers.key_mapping_message_handler.action_input_mapping(action_to_find);
let context = LayoutMessageContext { action_input_mapping };
self.message_handlers.layout_message_handler.process_message(message, &mut queue, context);
}
Message::Portfolio(message) => {
self.message_handlers.portfolio_message_handler.process_message(
message,
&mut queue,
PortfolioMessageContext {
ipp: &self.message_handlers.input_preprocessor_message_handler,
preferences: &self.message_handlers.preferences_message_handler,
current_tool: &self.message_handlers.tool_message_handler.tool_state.tool_data.active_tool_type,
reset_node_definitions_on_open: self.message_handlers.portfolio_message_handler.reset_node_definitions_on_open,
timing_information: self.message_handlers.animation_message_handler.timing_information(),
animation: &self.message_handlers.animation_message_handler,
viewport: &self.message_handlers.viewport_message_handler,
},
);
}
Message::MenuBar(message) => {
let menu_bar_message_handler = &mut self.message_handlers.menu_bar_message_handler;
menu_bar_message_handler.data_panel_open = self.message_handlers.portfolio_message_handler.data_panel_open;
menu_bar_message_handler.layers_panel_open = self.message_handlers.portfolio_message_handler.layers_panel_open;
menu_bar_message_handler.properties_panel_open = self.message_handlers.portfolio_message_handler.properties_panel_open;
menu_bar_message_handler.message_logging_verbosity = self.message_handlers.debug_message_handler.message_logging_verbosity;
menu_bar_message_handler.reset_node_definitions_on_open = self.message_handlers.portfolio_message_handler.reset_node_definitions_on_open;
if let Some(document) = self
.message_handlers
.portfolio_message_handler
.active_document_id
.and_then(|document_id| self.message_handlers.portfolio_message_handler.documents.get_mut(&document_id))
{
let selected_nodes = document.network_interface.selected_nodes();
let metadata = &document.network_interface.document_network_metadata().persistent_metadata;
menu_bar_message_handler.has_active_document = true;
menu_bar_message_handler.canvas_tilted = document.document_ptz.tilt() != 0.;
menu_bar_message_handler.canvas_flipped = document.document_ptz.flip;
menu_bar_message_handler.rulers_visible = document.rulers_visible;
menu_bar_message_handler.node_graph_open = document.is_graph_overlay_open();
menu_bar_message_handler.has_selected_nodes = selected_nodes.selected_nodes().next().is_some();
menu_bar_message_handler.has_selected_layers = selected_nodes.selected_visible_layers(&document.network_interface).next().is_some();
menu_bar_message_handler.has_selection_history = (!metadata.selection_undo_history.is_empty(), !metadata.selection_redo_history.is_empty());
menu_bar_message_handler.make_path_editable_is_allowed = make_path_editable_is_allowed(&mut document.network_interface).is_some();
} else {
menu_bar_message_handler.has_active_document = false;
menu_bar_message_handler.canvas_tilted = false;
menu_bar_message_handler.canvas_flipped = false;
menu_bar_message_handler.rulers_visible = false;
menu_bar_message_handler.node_graph_open = false;
menu_bar_message_handler.has_selected_nodes = false;
menu_bar_message_handler.has_selected_layers = false;
menu_bar_message_handler.has_selection_history = (false, false);
menu_bar_message_handler.make_path_editable_is_allowed = false;
}
menu_bar_message_handler.process_message(message, &mut queue, ());
}
Message::Preferences(message) => {
let context = PreferencesMessageContext {
tool_message_handler: &self.message_handlers.tool_message_handler,
};
self.message_handlers.preferences_message_handler.process_message(message, &mut queue, context);
}
Message::Tool(message) => {
let Some(document_id) = self.message_handlers.portfolio_message_handler.active_document_id() else {
warn!("Called ToolMessage without an active document.\nGot {message:?}");
return;
};
let Some(document) = self.message_handlers.portfolio_message_handler.documents.get_mut(&document_id) else {
warn!("Called ToolMessage with an invalid active document.\nGot {message:?}");
return;
};
let context = ToolMessageContext {
document_id,
document,
input: &self.message_handlers.input_preprocessor_message_handler,
persistent_data: &self.message_handlers.portfolio_message_handler.persistent_data,
node_graph: &self.message_handlers.portfolio_message_handler.executor,
preferences: &self.message_handlers.preferences_message_handler,
viewport: &self.message_handlers.viewport_message_handler,
};
self.message_handlers.tool_message_handler.process_message(message, &mut queue, context);
}
Message::Viewport(message) => {
self.message_handlers.viewport_message_handler.process_message(message, &mut queue, ());
}
Message::NoOp => {}
Message::Batched { messages } => {
messages.into_iter().for_each(|message| self.handle_message(message, false));
}
}
// If there are child messages, append the queue to the list of queues
if !queue.is_empty() {
self.message_queues.push(queue);
}
self.cleanup_queues(false);
}
}
pub fn collect_actions(&self) -> ActionList {
// TODO: Reduce the number of heap allocations
let mut list = Vec::new();
list.extend(self.message_handlers.app_window_message_handler.actions());
list.extend(self.message_handlers.clipboard_message_handler.actions());
list.extend(self.message_handlers.dialog_message_handler.actions());
list.extend(self.message_handlers.animation_message_handler.actions());
list.extend(self.message_handlers.input_preprocessor_message_handler.actions());
list.extend(self.message_handlers.key_mapping_message_handler.actions());
list.extend(self.message_handlers.debug_message_handler.actions());
if let Some(document) = self.message_handlers.portfolio_message_handler.active_document()
&& !document.graph_view_overlay_open
{
list.extend(self.message_handlers.tool_message_handler.actions_with_preferences(&self.message_handlers.preferences_message_handler));
}
list.extend(self.message_handlers.portfolio_message_handler.actions());
list
}
pub fn poll_node_graph_evaluation(&mut self, responses: &mut VecDeque<Message>) -> Result<(), String> {
self.message_handlers.portfolio_message_handler.poll_node_graph_evaluation(responses)
}
/// Create the tree structure for logging the messages as a tree
fn create_indents(queues: &[VecDeque<Message>]) -> String {
String::from_iter(queues.iter().enumerate().skip(1).map(|(index, queue)| {
if index == queues.len() - 1 {
if queue.is_empty() { "└── " } else { "├── " }
} else if queue.is_empty() {
" "
} else {
"│ "
}
}))
}
/// Logs a message that is about to be executed, either as a tree
/// with a discriminant or the entire payload (depending on settings)
fn log_message(&self, message: &Message, queues: &[VecDeque<Message>], message_logging_verbosity: MessageLoggingVerbosity) {
let discriminant = MessageDiscriminant::from(message);
let is_blocked = DEBUG_MESSAGE_BLOCK_LIST.contains(&discriminant) || DEBUG_MESSAGE_ENDING_BLOCK_LIST.iter().any(|blocked_name| discriminant.local_name().ends_with(blocked_name));
let is_empty_batched = if let Message::Batched { messages } = message { messages.is_empty() } else { false };
if !is_blocked && !is_empty_batched {
match message_logging_verbosity {
MessageLoggingVerbosity::Off => {}
MessageLoggingVerbosity::Names => {
info!("{}{:?}", Self::create_indents(queues), message.to_discriminant());
}
MessageLoggingVerbosity::Contents => {
if !(matches!(message, Message::InputPreprocessor(_))) {
info!("Message: {}{:?}", Self::create_indents(queues), message);
}
}
}
}
}
/// Logs into the tree that the message is in the side effect free messages and its execution will be deferred
fn log_deferred_message(&self, message: &Message, queues: &[VecDeque<Message>], message_logging_verbosity: MessageLoggingVerbosity) {
if let MessageLoggingVerbosity::Names = message_logging_verbosity {
info!("{}Deferred \"{:?}\" because it's a SIDE_EFFECT_FREE_MESSAGE", Self::create_indents(queues), message.to_discriminant());
}
}
}
#[cfg(test)]
mod test {
pub use crate::test_utils::test_prelude::*;
/// Create an editor with three layers
/// 1. A red rectangle
/// 2. A blue shape
/// 3. A green ellipse
async fn create_editor_with_three_layers() -> EditorTestUtils {
let mut editor = EditorTestUtils::create();
editor.new_document().await;
editor.select_primary_color(Color::RED).await;
editor.draw_rect(100., 200., 300., 400.).await;
editor.select_primary_color(Color::BLUE).await;
editor.draw_polygon(10., 1200., 1300., 400.).await;
editor.select_primary_color(Color::GREEN).await;
editor.draw_ellipse(104., 1200., 1300., 400.).await;
editor
}
/// - create rect, shape and ellipse
/// - copy
/// - paste
/// - assert that ellipse was copied
#[tokio::test]
async fn copy_paste_single_layer() {
let mut editor = create_editor_with_three_layers().await;
let layers_before_copy = editor.active_document().metadata().all_layers().collect::<Vec<_>>();
editor.handle_message(PortfolioMessage::Copy { clipboard: Clipboard::Internal }).await;
editor
.handle_message(PortfolioMessage::PasteIntoFolder {
clipboard: Clipboard::Internal,
parent: LayerNodeIdentifier::ROOT_PARENT,
insert_index: 0,
})
.await;
let layers_after_copy = editor.active_document().metadata().all_layers().collect::<Vec<_>>();
assert_eq!(layers_before_copy.len(), 3);
assert_eq!(layers_after_copy.len(), 4);
// Existing layers are unaffected
for i in 0..=2 {
assert_eq!(layers_before_copy[i], layers_after_copy[i + 1]);
}
}
#[cfg_attr(miri, ignore)]
/// - create rect, shape and ellipse
/// - select shape
/// - copy
/// - paste
/// - assert that shape was copied
#[tokio::test]
async fn copy_paste_single_layer_from_middle() {
let mut editor = create_editor_with_three_layers().await;
let layers_before_copy = editor.active_document().metadata().all_layers().collect::<Vec<_>>();
let shape_id = editor.active_document().metadata().all_layers().nth(1).unwrap();
editor.handle_message(NodeGraphMessage::SelectedNodesSet { nodes: vec![shape_id.to_node()] }).await;
editor.handle_message(PortfolioMessage::Copy { clipboard: Clipboard::Internal }).await;
editor
.handle_message(PortfolioMessage::PasteIntoFolder {
clipboard: Clipboard::Internal,
parent: LayerNodeIdentifier::ROOT_PARENT,
insert_index: 0,
})
.await;
let layers_after_copy = editor.active_document().metadata().all_layers().collect::<Vec<_>>();
assert_eq!(layers_before_copy.len(), 3);
assert_eq!(layers_after_copy.len(), 4);
// Existing layers are unaffected
for i in 0..=2 {
assert_eq!(layers_before_copy[i], layers_after_copy[i + 1]);
}
}
#[cfg_attr(miri, ignore)]
/// - create rect, shape and ellipse
/// - select ellipse and rect
/// - copy
/// - delete
/// - create another rect
/// - paste
/// - paste
#[tokio::test]
async fn copy_paste_deleted_layers() {
let mut editor = create_editor_with_three_layers().await;
assert_eq!(editor.active_document().metadata().all_layers().count(), 3);
let layers_before_copy = editor.active_document().metadata().all_layers().collect::<Vec<_>>();
let rect_id = layers_before_copy[0];
let shape_id = layers_before_copy[1];
let ellipse_id = layers_before_copy[2];
editor
.handle_message(NodeGraphMessage::SelectedNodesSet {
nodes: vec![rect_id.to_node(), ellipse_id.to_node()],
})
.await;
editor.handle_message(PortfolioMessage::Copy { clipboard: Clipboard::Internal }).await;
editor.handle_message(NodeGraphMessage::DeleteSelectedNodes { delete_children: true }).await;
editor.draw_rect(0., 800., 12., 200.).await;
editor
.handle_message(PortfolioMessage::PasteIntoFolder {
clipboard: Clipboard::Internal,
parent: LayerNodeIdentifier::ROOT_PARENT,
insert_index: 0,
})
.await;
editor
.handle_message(PortfolioMessage::PasteIntoFolder {
clipboard: Clipboard::Internal,
parent: LayerNodeIdentifier::ROOT_PARENT,
insert_index: 0,
})
.await;
let layers_after_copy = editor.active_document().metadata().all_layers().collect::<Vec<_>>();
assert_eq!(layers_before_copy.len(), 3);
assert_eq!(layers_after_copy.len(), 6);
println!("{layers_after_copy:?} {layers_before_copy:?}");
assert_eq!(layers_after_copy[5], shape_id);
}
#[tokio::test]
/// This test will fail when you make changes to the underlying serialization format for a document.
async fn check_if_demo_art_opens() {
use crate::messages::layout::utility_types::widget_prelude::*;
let print_problem_to_terminal_on_failure = |value: &String| {
println!();
println!("-------------------------------------------------");
println!("Failed test due to receiving a DisplayDialogError while loading a Graphite demo file.");
println!();
println!("NOTE:");
println!("Document upgrading isn't performed in tests like when opening in the actual editor.");
println!("You may need to open and re-save a document in the editor to apply its migrations.");
println!();
println!("DisplayDialogError details:");
println!();
println!("Description:");
println!("{value}");
println!("-------------------------------------------------");
println!();
panic!()
};
let mut editor = EditorTestUtils::create();
// UNCOMMENT THIS FOR RUNNING UNDER MIRI
//
// let files = [
// include_str!("../../demo-artwork/changing-seasons.graphite"),
// include_str!("../../demo-artwork/isometric-fountain.graphite"),
// include_str!("../../demo-artwork/painted-dreams.graphite"),
// include_str!("../../demo-artwork/procedural-string-lights.graphite"),
// include_str!("../../demo-artwork/parametric-dunescape.graphite"),
// include_str!("../../demo-artwork/red-dress.graphite"),
// include_str!("../../demo-artwork/valley-of-spires.graphite"),
// ];
// for (id, document_serialized_content) in files.iter().enumerate() {
// let document_name = format!("document {id}");
for (document_name, _, file_name) in crate::messages::dialog::simple_dialogs::ARTWORK {
let document_serialized_content = std::fs::read_to_string(format!("../demo-artwork/{file_name}")).unwrap();
assert_eq!(
document_serialized_content.lines().count(),
1,
"Demo artwork '{document_name}' has more than 1 line (remember to open and re-save it in Graphite)",
);
let responses = editor.editor.handle_message(PortfolioMessage::OpenDocumentFile {
document_name: Some(document_name.to_string()),
document_path: None,
document_serialized_content,
});
// Check if the graph renders
if let Err(e) = editor.eval_graph().await {
print_problem_to_terminal_on_failure(&format!("Failed to evaluate the graph for document '{document_name}':\n{e}"));
}
for response in responses {
// Check for the existence of the file format incompatibility warning dialog after opening the test file
if let FrontendMessage::UpdateDialogColumn1 { diff } = response {
if let DiffUpdate::Layout(sub_layout) = &diff[0].new_value {
if let LayoutGroup::Row { widgets } = &sub_layout.0[0] {
if let Widget::TextLabel(TextLabel { value, .. }) = &*widgets[0].widget {
print_problem_to_terminal_on_failure(value);
}
}
}
}
}
}
}
}
| rust | Apache-2.0 | 42440c0d0bcf5735b05d8a9e5bd27187f74b1589 | 2026-01-04T15:38:29.103662Z | false |
GraphiteEditor/Graphite | https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/editor/src/application.rs | editor/src/application.rs | use crate::dispatcher::Dispatcher;
use crate::messages::prelude::*;
pub use graphene_std::uuid::*;
// TODO: serialize with serde to save the current editor state
pub struct Editor {
pub dispatcher: Dispatcher,
}
impl Editor {
/// Construct the editor.
/// Remember to provide a random seed with `editor::set_uuid_seed(seed)` before any editors can be used.
pub fn new() -> Self {
Self { dispatcher: Dispatcher::new() }
}
#[cfg(test)]
pub(crate) fn new_local_executor() -> (Self, crate::node_graph_executor::NodeRuntime) {
let (runtime, executor) = crate::node_graph_executor::NodeGraphExecutor::new_with_local_runtime();
let dispatcher = Dispatcher::with_executor(executor);
(Self { dispatcher }, runtime)
}
pub fn handle_message<T: Into<Message>>(&mut self, message: T) -> Vec<FrontendMessage> {
self.dispatcher.handle_message(message, true);
std::mem::take(&mut self.dispatcher.responses)
}
pub fn poll_node_graph_evaluation(&mut self, responses: &mut VecDeque<Message>) -> Result<(), String> {
self.dispatcher.poll_node_graph_evaluation(responses)
}
}
impl Default for Editor {
fn default() -> Self {
Self::new()
}
}
pub const GRAPHITE_RELEASE_SERIES: &str = env!("GRAPHITE_RELEASE_SERIES");
pub const GRAPHITE_GIT_COMMIT_DATE: &str = env!("GRAPHITE_GIT_COMMIT_DATE");
pub const GRAPHITE_GIT_COMMIT_HASH: &str = env!("GRAPHITE_GIT_COMMIT_HASH");
pub const GRAPHITE_GIT_COMMIT_BRANCH: &str = env!("GRAPHITE_GIT_COMMIT_BRANCH");
pub fn commit_info_localized(localized_commit_date: &str) -> String {
format!(
"Release Series: {}\n\
Branch: {}\n\
Commit: {}\n\
{}",
GRAPHITE_RELEASE_SERIES,
GRAPHITE_GIT_COMMIT_BRANCH,
GRAPHITE_GIT_COMMIT_HASH.get(..8).unwrap_or(GRAPHITE_GIT_COMMIT_HASH),
localized_commit_date
)
}
| rust | Apache-2.0 | 42440c0d0bcf5735b05d8a9e5bd27187f74b1589 | 2026-01-04T15:38:29.103662Z | false |
GraphiteEditor/Graphite | https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/editor/src/macros.rs | editor/src/macros.rs | /// Syntax sugar for initializing an `ActionList`
///
/// # Example
///
/// ```ignore
/// actions!(DocumentMessage::Undo, DocumentMessage::Redo);
/// ```
///
/// expands to:
/// ```ignore
/// vec![vec![DocumentMessage::Undo, DocumentMessage::Redo]];
/// ```
///
/// and
/// ```ignore
/// actions!(DocumentMessage;
/// Undo,
/// Redo,
/// );
/// ```
///
/// expands to:
/// ```ignore
/// vec![vec![DocumentMessage::Undo, DocumentMessage::Redo]];
/// ```
///
macro_rules! actions {
($($v:expr_2021),* $(,)?) => {{
vec![$(vec![$v.into()]),*]
}};
($name:ident; $($v:ident),* $(,)?) => {{
vec![vec![$(($name::$v).into()),*]]
}};
}
/// Does the same thing as the `actions!` macro but wraps everything in:
///
/// ```ignore
/// fn actions(&self) -> ActionList {
/// actions!(…)
/// }
/// ```
macro_rules! advertise_actions {
($($v:expr_2021),* $(,)?) => {
fn actions(&self) -> $crate::utility_traits::ActionList {
actions!($($v),*)
}
};
($name:ident; $($v:ident),* $(,)?) => {
fn actions(&self) -> $crate::utility_traits::ActionList {
actions!($name; $($v),*)
}
}
}
| rust | Apache-2.0 | 42440c0d0bcf5735b05d8a9e5bd27187f74b1589 | 2026-01-04T15:38:29.103662Z | false |
GraphiteEditor/Graphite | https://github.com/GraphiteEditor/Graphite/blob/42440c0d0bcf5735b05d8a9e5bd27187f74b1589/editor/src/generate_ts_types.rs | editor/src/generate_ts_types.rs | /// Running this test will generate a `types.ts` file at the root of the repo,
/// containing every type annotated with `specta::Type`
// #[cfg(all(test, feature = "specta-export"))]
#[ignore]
#[test]
fn generate_ts_types() {
// TODO: Un-comment this out when we figure out how to reenable the "typescript` Specta feature flag
// use crate::messages::prelude::FrontendMessage;
// use specta::ts::{export_named_datatype, BigIntExportBehavior, ExportConfig};
// use specta::{NamedType, TypeMap};
// use std::fs::File;
// use std::io::Write;
// let config = ExportConfig::new().bigint(BigIntExportBehavior::Number);
// let mut type_map = TypeMap::default();
// let datatype = FrontendMessage::definition_named_data_type(&mut type_map);
// let mut export = String::new();
// export += &export_named_datatype(&config, &datatype, &type_map).unwrap();
// type_map
// .iter()
// .map(|(_, v)| v)
// .flat_map(|v| export_named_datatype(&config, v, &type_map))
// .for_each(|e| export += &format!("\n\n{e}"));
// let mut file = File::create("../types.ts").unwrap();
// write!(file, "{export}").ok();
}
| rust | Apache-2.0 | 42440c0d0bcf5735b05d8a9e5bd27187f74b1589 | 2026-01-04T15:38:29.103662Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.