repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/workspace_symbols.rs | crates/ty_server/src/server/api/requests/workspace_symbols.rs | use lsp_types::request::WorkspaceSymbolRequest;
use lsp_types::{WorkspaceSymbolParams, WorkspaceSymbolResponse};
use ty_ide::{WorkspaceSymbolInfo, workspace_symbols};
use crate::server::api::symbols::convert_to_lsp_symbol_information;
use crate::server::api::traits::{
BackgroundRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::SessionSnapshot;
use crate::session::client::Client;
pub(crate) struct WorkspaceSymbolRequestHandler;
impl RequestHandler for WorkspaceSymbolRequestHandler {
type RequestType = WorkspaceSymbolRequest;
}
impl BackgroundRequestHandler for WorkspaceSymbolRequestHandler {
fn run(
snapshot: &SessionSnapshot,
_client: &Client,
params: WorkspaceSymbolParams,
) -> crate::server::Result<Option<WorkspaceSymbolResponse>> {
let query = ¶ms.query;
let mut all_symbols = Vec::new();
// Iterate through all projects in the session
for db in snapshot.projects() {
// Get workspace symbols matching the query
let start = std::time::Instant::now();
let workspace_symbol_infos = workspace_symbols(db, query);
tracing::debug!(
"Found {len} workspace symbols in {elapsed:?}",
len = workspace_symbol_infos.len(),
elapsed = std::time::Instant::now().duration_since(start)
);
// Convert to LSP SymbolInformation
for workspace_symbol_info in workspace_symbol_infos {
let WorkspaceSymbolInfo { symbol, file } = workspace_symbol_info;
// Get position encoding from session
let encoding = snapshot.position_encoding();
let Some(symbol) = convert_to_lsp_symbol_information(db, file, symbol, encoding)
else {
tracing::debug!(
"Failed to convert symbol '{}' to LSP symbol information",
file.path(db)
);
continue;
};
all_symbols.push(symbol);
}
}
if all_symbols.is_empty() {
Ok(None)
} else {
Ok(Some(WorkspaceSymbolResponse::Flat(all_symbols)))
}
}
}
impl RetriableRequestHandler for WorkspaceSymbolRequestHandler {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/code_action.rs | crates/ty_server/src/server/api/requests/code_action.rs | use std::borrow::Cow;
use std::collections::HashMap;
use lsp_types::{self as types, NumberOrString, TextEdit, Url, request as req};
use ruff_db::files::File;
use ruff_diagnostics::Edit;
use ruff_text_size::Ranged;
use ty_ide::code_actions;
use ty_project::ProjectDatabase;
use types::{CodeActionKind, CodeActionOrCommand};
use crate::db::Db;
use crate::document::{RangeExt, ToRangeExt};
use crate::server::Result;
use crate::server::api::RequestHandler;
use crate::server::api::diagnostics::DiagnosticData;
use crate::server::api::traits::{BackgroundDocumentRequestHandler, RetriableRequestHandler};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
use crate::{DIAGNOSTIC_NAME, PositionEncoding};
pub(crate) struct CodeActionRequestHandler;
impl RequestHandler for CodeActionRequestHandler {
type RequestType = req::CodeActionRequest;
}
impl BackgroundDocumentRequestHandler for CodeActionRequestHandler {
fn document_url(params: &types::CodeActionParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
params: types::CodeActionParams,
) -> Result<Option<types::CodeActionResponse>> {
let diagnostics = params.context.diagnostics;
let Some(file) = snapshot.to_notebook_or_file(db) else {
return Ok(None);
};
let mut actions = Vec::new();
for mut diagnostic in diagnostics.into_iter().filter(|diagnostic| {
diagnostic.source.as_deref() == Some(DIAGNOSTIC_NAME)
&& range_intersect(&diagnostic.range, ¶ms.range)
}) {
// If the diagnostic includes fixes, offer those up as options.
if let Some(data) = diagnostic.data.take() {
let data: DiagnosticData = match serde_json::from_value(data) {
Ok(data) => data,
Err(err) => {
tracing::warn!("Failed to deserialize diagnostic data: {err}");
continue;
}
};
actions.push(CodeActionOrCommand::CodeAction(lsp_types::CodeAction {
title: data.fix_title,
kind: Some(CodeActionKind::QUICKFIX),
diagnostics: Some(vec![diagnostic.clone()]),
edit: Some(lsp_types::WorkspaceEdit {
changes: Some(data.edits),
document_changes: None,
change_annotations: None,
}),
is_preferred: Some(true),
command: None,
disabled: None,
data: None,
}));
}
// Try to find other applicable actions.
//
// This is only for actions that are messy to compute at the time of the diagnostic.
// For instance, suggesting imports requires finding symbols for the entire project,
// which is dubious when you're in the middle of resolving symbols.
let url = snapshot.url();
let encoding = snapshot.encoding();
if let Some(NumberOrString::String(diagnostic_id)) = &diagnostic.code
&& let Some(range) = diagnostic.range.to_text_range(db, file, url, encoding)
{
for action in code_actions(db, file, range, diagnostic_id) {
actions.push(CodeActionOrCommand::CodeAction(lsp_types::CodeAction {
title: action.title,
kind: Some(CodeActionKind::QUICKFIX),
diagnostics: Some(vec![diagnostic.clone()]),
edit: Some(lsp_types::WorkspaceEdit {
changes: to_lsp_edits(db, file, encoding, action.edits),
document_changes: None,
change_annotations: None,
}),
is_preferred: Some(action.preferred),
command: None,
disabled: None,
data: None,
}));
}
}
}
if actions.is_empty() {
Ok(None)
} else {
Ok(Some(actions))
}
}
}
fn to_lsp_edits(
db: &dyn Db,
file: File,
encoding: PositionEncoding,
edits: Vec<Edit>,
) -> Option<HashMap<Url, Vec<TextEdit>>> {
let mut lsp_edits: HashMap<Url, Vec<lsp_types::TextEdit>> = HashMap::new();
for edit in edits {
let location = edit
.range()
.to_lsp_range(db, file, encoding)?
.to_location()?;
lsp_edits
.entry(location.uri)
.or_default()
.push(lsp_types::TextEdit {
range: location.range,
new_text: edit.content().unwrap_or_default().to_string(),
});
}
Some(lsp_edits)
}
fn range_intersect(range: &lsp_types::Range, other: &lsp_types::Range) -> bool {
let start = range.start.max(other.start);
let end = range.end.min(other.end);
end >= start
}
impl RetriableRequestHandler for CodeActionRequestHandler {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/diagnostic.rs | crates/ty_server/src/server/api/requests/diagnostic.rs | use std::borrow::Cow;
use lsp_types::request::DocumentDiagnosticRequest;
use lsp_types::{
DocumentDiagnosticParams, DocumentDiagnosticReport, DocumentDiagnosticReportResult,
FullDocumentDiagnosticReport, RelatedFullDocumentDiagnosticReport,
RelatedUnchangedDocumentDiagnosticReport, UnchangedDocumentDiagnosticReport, Url,
};
use crate::server::Result;
use crate::server::api::diagnostics::compute_diagnostics;
use crate::server::api::traits::{
BackgroundDocumentRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
use ty_project::ProjectDatabase;
pub(crate) struct DocumentDiagnosticRequestHandler;
impl RequestHandler for DocumentDiagnosticRequestHandler {
type RequestType = DocumentDiagnosticRequest;
}
impl BackgroundDocumentRequestHandler for DocumentDiagnosticRequestHandler {
fn document_url(params: &DocumentDiagnosticParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
params: DocumentDiagnosticParams,
) -> Result<DocumentDiagnosticReportResult> {
if snapshot.global_settings().diagnostic_mode().is_off() {
return Ok(DocumentDiagnosticReportResult::Report(
DocumentDiagnosticReport::Full(RelatedFullDocumentDiagnosticReport::default()),
));
}
let diagnostics = compute_diagnostics(db, snapshot.document(), snapshot.encoding());
let Some(diagnostics) = diagnostics else {
return Ok(DocumentDiagnosticReportResult::Report(
DocumentDiagnosticReport::Full(RelatedFullDocumentDiagnosticReport::default()),
));
};
let result_id = diagnostics.result_id();
let report = match result_id {
Some(new_id) if Some(&new_id) == params.previous_result_id.as_ref() => {
DocumentDiagnosticReport::Unchanged(RelatedUnchangedDocumentDiagnosticReport {
related_documents: None,
unchanged_document_diagnostic_report: UnchangedDocumentDiagnosticReport {
result_id: new_id,
},
})
}
new_id => {
DocumentDiagnosticReport::Full(RelatedFullDocumentDiagnosticReport {
related_documents: None,
full_document_diagnostic_report: FullDocumentDiagnosticReport {
result_id: new_id,
// SAFETY: Pull diagnostic requests are only called for text documents, not for
// notebook documents.
items: diagnostics
.to_lsp_diagnostics(
db,
snapshot.resolved_client_capabilities(),
snapshot.global_settings(),
)
.expect_text_document(),
},
})
}
};
Ok(DocumentDiagnosticReportResult::Report(report))
}
}
impl RetriableRequestHandler for DocumentDiagnosticRequestHandler {
fn salsa_cancellation_error() -> lsp_server::ResponseError {
lsp_server::ResponseError {
code: lsp_server::ErrorCode::ServerCancelled as i32,
message: "server cancelled the request".to_owned(),
data: serde_json::to_value(lsp_types::DiagnosticServerCancellationData {
retrigger_request: true,
})
.ok(),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/notifications/did_open_notebook.rs | crates/ty_server/src/server/api/notifications/did_open_notebook.rs | use lsp_server::ErrorCode;
use lsp_types::DidOpenNotebookDocumentParams;
use lsp_types::notification::DidOpenNotebookDocument;
use crate::TextDocument;
use crate::document::NotebookDocument;
use crate::server::Result;
use crate::server::api::LSPResult;
use crate::server::api::diagnostics::publish_diagnostics;
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
use crate::session::Session;
use crate::session::client::Client;
pub(crate) struct DidOpenNotebookHandler;
impl NotificationHandler for DidOpenNotebookHandler {
type NotificationType = DidOpenNotebookDocument;
}
impl SyncNotificationHandler for DidOpenNotebookHandler {
fn run(
session: &mut Session,
client: &Client,
params: DidOpenNotebookDocumentParams,
) -> Result<()> {
let lsp_types::NotebookDocument {
version,
cells,
metadata,
uri: notebook_uri,
..
} = params.notebook_document;
let notebook =
NotebookDocument::new(notebook_uri, version, cells, metadata.unwrap_or_default())
.with_failure_code(ErrorCode::InternalError)?;
let document = session.open_notebook_document(notebook);
let notebook_path = document.notebook_or_file_path();
for cell in params.cell_text_documents {
let cell_document = TextDocument::new(cell.uri, cell.text, cell.version)
.with_language_id(&cell.language_id)
.with_notebook(notebook_path.clone());
session.open_text_document(cell_document);
}
// Always publish diagnostics because notebooks only support publish diagnostics.
publish_diagnostics(&document, session, client);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/notifications/did_close_notebook.rs | crates/ty_server/src/server/api/notifications/did_close_notebook.rs | use lsp_types::notification::DidCloseNotebookDocument;
use lsp_types::{DidCloseNotebookDocumentParams, NotebookDocumentIdentifier};
use crate::server::Result;
use crate::server::api::LSPResult;
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
use crate::session::Session;
use crate::session::client::Client;
pub(crate) struct DidCloseNotebookHandler;
impl NotificationHandler for DidCloseNotebookHandler {
type NotificationType = DidCloseNotebookDocument;
}
impl SyncNotificationHandler for DidCloseNotebookHandler {
fn run(
session: &mut Session,
_client: &Client,
params: DidCloseNotebookDocumentParams,
) -> Result<()> {
let DidCloseNotebookDocumentParams {
notebook_document: NotebookDocumentIdentifier { uri },
..
} = params;
let document = session
.document_handle(&uri)
.with_failure_code(lsp_server::ErrorCode::InternalError)?;
// We don't need to call publish any diagnostics because we clear
// the diagnostics when closing the corresponding cell documents.
let _ = document
.close(session)
.with_failure_code(lsp_server::ErrorCode::InternalError)?;
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/notifications/cancel.rs | crates/ty_server/src/server/api/notifications/cancel.rs | use lsp_server::RequestId;
use lsp_types::CancelParams;
use lsp_types::notification::Cancel;
use crate::server::Result;
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
use crate::session::Session;
use crate::session::client::Client;
pub(crate) struct CancelNotificationHandler;
impl NotificationHandler for CancelNotificationHandler {
type NotificationType = Cancel;
}
impl SyncNotificationHandler for CancelNotificationHandler {
fn run(session: &mut Session, client: &Client, params: CancelParams) -> Result<()> {
let id: RequestId = match params.id {
lsp_types::NumberOrString::Number(id) => id.into(),
lsp_types::NumberOrString::String(id) => id.into(),
};
client.cancel(session, id);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/notifications/did_open.rs | crates/ty_server/src/server/api/notifications/did_open.rs | use lsp_types::notification::DidOpenTextDocument;
use lsp_types::{DidOpenTextDocumentParams, TextDocumentItem};
use crate::TextDocument;
use crate::server::Result;
use crate::server::api::diagnostics::publish_diagnostics_if_needed;
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
use crate::session::Session;
use crate::session::client::Client;
pub(crate) struct DidOpenTextDocumentHandler;
impl NotificationHandler for DidOpenTextDocumentHandler {
type NotificationType = DidOpenTextDocument;
}
impl SyncNotificationHandler for DidOpenTextDocumentHandler {
fn run(
session: &mut Session,
client: &Client,
params: DidOpenTextDocumentParams,
) -> Result<()> {
let DidOpenTextDocumentParams {
text_document:
TextDocumentItem {
uri,
text,
version,
language_id,
},
} = params;
let document = session.open_text_document(
TextDocument::new(uri, text, version).with_language_id(&language_id),
);
publish_diagnostics_if_needed(&document, session, client);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/notifications/did_change_notebook.rs | crates/ty_server/src/server/api/notifications/did_change_notebook.rs | use lsp_server::ErrorCode;
use lsp_types as types;
use lsp_types::notification as notif;
use crate::server::Result;
use crate::server::api::LSPResult;
use crate::server::api::diagnostics::publish_diagnostics;
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
use crate::session::Session;
use crate::session::client::Client;
pub(crate) struct DidChangeNotebookHandler;
impl NotificationHandler for DidChangeNotebookHandler {
type NotificationType = notif::DidChangeNotebookDocument;
}
impl SyncNotificationHandler for DidChangeNotebookHandler {
fn run(
session: &mut Session,
client: &Client,
types::DidChangeNotebookDocumentParams {
notebook_document: types::VersionedNotebookDocumentIdentifier { uri, version },
change: types::NotebookDocumentChangeEvent { cells, metadata },
}: types::DidChangeNotebookDocumentParams,
) -> Result<()> {
let mut document = session
.document_handle(&uri)
.with_failure_code(ErrorCode::InternalError)?;
document
.update_notebook_document(session, cells, metadata, version)
.with_failure_code(ErrorCode::InternalError)?;
// Always publish diagnostics because notebooks only support publish diagnostics.
publish_diagnostics(&document, session, client);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/notifications/did_change.rs | crates/ty_server/src/server/api/notifications/did_change.rs | use lsp_server::ErrorCode;
use lsp_types::notification::DidChangeTextDocument;
use lsp_types::{DidChangeTextDocumentParams, VersionedTextDocumentIdentifier};
use crate::server::Result;
use crate::server::api::LSPResult;
use crate::server::api::diagnostics::publish_diagnostics_if_needed;
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
use crate::session::Session;
use crate::session::client::Client;
pub(crate) struct DidChangeTextDocumentHandler;
impl NotificationHandler for DidChangeTextDocumentHandler {
type NotificationType = DidChangeTextDocument;
}
impl SyncNotificationHandler for DidChangeTextDocumentHandler {
fn run(
session: &mut Session,
client: &Client,
params: DidChangeTextDocumentParams,
) -> Result<()> {
let DidChangeTextDocumentParams {
text_document: VersionedTextDocumentIdentifier { uri, version },
content_changes,
} = params;
let mut document = session
.document_handle(&uri)
.with_failure_code(ErrorCode::InternalError)?;
document
.update_text_document(session, content_changes, version)
.with_failure_code(ErrorCode::InternalError)?;
publish_diagnostics_if_needed(&document, session, client);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/notifications/did_change_watched_files.rs | crates/ty_server/src/server/api/notifications/did_change_watched_files.rs | use crate::document::DocumentKey;
use crate::server::Result;
use crate::server::api::diagnostics::{
publish_diagnostics_if_needed, publish_settings_diagnostics,
};
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
use crate::session::Session;
use crate::session::client::Client;
use crate::system::AnySystemPath;
use lsp_types as types;
use lsp_types::{FileChangeType, notification as notif};
use rustc_hash::FxHashMap;
use ty_project::Db as _;
use ty_project::watch::{ChangeEvent, ChangedKind, CreatedKind, DeletedKind};
pub(crate) struct DidChangeWatchedFiles;
impl NotificationHandler for DidChangeWatchedFiles {
type NotificationType = notif::DidChangeWatchedFiles;
}
impl SyncNotificationHandler for DidChangeWatchedFiles {
fn run(
session: &mut Session,
client: &Client,
params: types::DidChangeWatchedFilesParams,
) -> Result<()> {
let mut events_by_db: FxHashMap<_, Vec<ChangeEvent>> = FxHashMap::default();
for change in params.changes {
let path = DocumentKey::from_url(&change.uri).into_file_path();
let system_path = match path {
AnySystemPath::System(system) => system,
AnySystemPath::SystemVirtual(path) => {
tracing::debug!("Ignoring virtual path from change event: `{path}`");
continue;
}
};
let Some(db) = session.project_db_for_path(&system_path) else {
tracing::trace!(
"Ignoring change event for `{system_path}` because it's not in any workspace"
);
continue;
};
let change_event = match change.typ {
FileChangeType::CREATED => ChangeEvent::Created {
path: system_path,
kind: CreatedKind::Any,
},
FileChangeType::CHANGED => ChangeEvent::Changed {
path: system_path,
kind: ChangedKind::Any,
},
FileChangeType::DELETED => ChangeEvent::Deleted {
path: system_path,
kind: DeletedKind::Any,
},
_ => {
tracing::debug!(
"Ignoring unsupported change event type: `{:?}` for {system_path}",
change.typ
);
continue;
}
};
events_by_db
.entry(db.project().root(db).to_path_buf())
.or_default()
.push(change_event);
}
if events_by_db.is_empty() {
return Ok(());
}
for (root, changes) in events_by_db {
tracing::debug!("Applying changes to `{root}`");
session.apply_changes(&AnySystemPath::System(root.clone()), changes);
publish_settings_diagnostics(session, client, root);
}
let client_capabilities = session.client_capabilities();
if client_capabilities.supports_workspace_diagnostic_refresh() {
client.send_request::<types::request::WorkspaceDiagnosticRefresh>(
session,
(),
|_, ()| {},
);
} else {
for key in session.text_document_handles() {
publish_diagnostics_if_needed(&key, session, client);
}
}
if client_capabilities.supports_inlay_hint_refresh() {
client.send_request::<types::request::InlayHintRefreshRequest>(session, (), |_, ()| {});
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/notifications/did_close.rs | crates/ty_server/src/server/api/notifications/did_close.rs | use lsp_server::ErrorCode;
use lsp_types::notification::DidCloseTextDocument;
use lsp_types::{DidCloseTextDocumentParams, TextDocumentIdentifier};
use crate::server::Result;
use crate::server::api::LSPResult;
use crate::server::api::diagnostics::clear_diagnostics_if_needed;
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
use crate::session::Session;
use crate::session::client::Client;
pub(crate) struct DidCloseTextDocumentHandler;
impl NotificationHandler for DidCloseTextDocumentHandler {
type NotificationType = DidCloseTextDocument;
}
impl SyncNotificationHandler for DidCloseTextDocumentHandler {
fn run(
session: &mut Session,
client: &Client,
params: DidCloseTextDocumentParams,
) -> Result<()> {
let DidCloseTextDocumentParams {
text_document: TextDocumentIdentifier { uri },
} = params;
let document = session
.document_handle(&uri)
.with_failure_code(ErrorCode::InternalError)?;
let should_clear_diagnostics = document
.close(session)
.with_failure_code(ErrorCode::InternalError)?;
if should_clear_diagnostics {
clear_diagnostics_if_needed(&document, session, client);
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/schedule/task.rs | crates/ty_server/src/server/schedule/task.rs | use lsp_server::RequestId;
use serde::Serialize;
use crate::session::Session;
use crate::session::client::Client;
type LocalFn = Box<dyn FnOnce(&mut Session, &Client)>;
type BackgroundFn = Box<dyn FnOnce(&Client) + Send + 'static>;
type BackgroundFnBuilder = Box<dyn FnOnce(&Session) -> BackgroundFn>;
/// Describes how the task should be run.
#[derive(Clone, Copy, Debug, Default)]
pub(in crate::server) enum BackgroundSchedule {
/// The task should be run on the background thread designated
/// for formatting actions. This is a high priority thread.
#[expect(dead_code)]
Fmt,
/// The task should be run on the general high-priority background
/// thread. Reserved for actions caused by the user typing (e.g.syntax highlighting).
LatencySensitive,
/// The task should be run on a regular-priority background thread.
/// The default for any request that isn't in the critical path of the user typing.
#[default]
Worker,
}
/// A [`Task`] is a future that has not yet started, and it is the job of
/// the [`super::Scheduler`] to make that happen, via [`super::Scheduler::dispatch`].
/// A task can either run on the main thread (in other words, the same thread as the
/// scheduler) or it can run in a background thread. The main difference between
/// the two is that background threads only have a read-only snapshot of the session,
/// while local tasks have exclusive access and can modify it as they please. Keep in mind that
/// local tasks will **block** the main event loop, so only use local tasks if you **need**
/// mutable state access or you need the absolute lowest latency possible.
#[must_use]
pub(in crate::server) enum Task {
Background(BackgroundTaskBuilder),
Sync(SyncTask),
}
// The reason why this isn't just a 'static background closure
// is because we need to take a snapshot of the session before sending
// this task to the background, and the inner closure can't take the session
// as an immutable reference since it's used mutably elsewhere. So instead,
// a background task is built using an outer closure that borrows the session to take a snapshot,
// that the inner closure can capture. This builder closure has a lifetime linked to the scheduler.
// When the task is dispatched, the scheduler runs the synchronous builder, which takes the session
// as a reference, to create the inner 'static closure. That closure is then moved to a background task pool.
pub(in crate::server) struct BackgroundTaskBuilder {
pub(super) schedule: BackgroundSchedule,
pub(super) builder: BackgroundFnBuilder,
}
pub(in crate::server) struct SyncTask {
pub(super) func: LocalFn,
}
impl Task {
/// Creates a new background task.
pub(crate) fn background<F>(schedule: BackgroundSchedule, func: F) -> Self
where
F: FnOnce(&Session) -> Box<dyn FnOnce(&Client) + Send + 'static> + 'static,
{
Self::Background(BackgroundTaskBuilder {
schedule,
builder: Box::new(func),
})
}
/// Creates a new local task.
pub(crate) fn sync<F>(func: F) -> Self
where
F: FnOnce(&mut Session, &Client) + 'static,
{
Self::Sync(SyncTask {
func: Box::new(func),
})
}
/// Creates a local task that immediately
/// responds with the provided `request`.
pub(crate) fn immediate<R>(id: RequestId, result: crate::server::Result<R>) -> Self
where
R: Serialize + Send + 'static,
{
Self::sync(move |_, client| {
client.respond(&id, result);
})
}
/// Creates a local task that does nothing.
pub(crate) fn nothing() -> Self {
Self::sync(move |_, _| {})
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/schedule/thread.rs | crates/ty_server/src/server/schedule/thread.rs | // +------------------------------------------------------------+
// | Code adopted from: |
// | Repository: https://github.com/rust-lang/rust-analyzer.git |
// | File: `crates/stdx/src/thread.rs` |
// | Commit: 03b3cb6be9f21c082f4206b35c7fe7f291c94eaa |
// +------------------------------------------------------------+
//! A utility module for working with threads that automatically joins threads upon drop
//! and abstracts over operating system quality of service (QoS) APIs
//! through the concept of a “thread priority”.
//!
//! The priority of a thread is frozen at thread creation time,
//! i.e. there is no API to change the priority of a thread once it has been spawned.
//!
//! As a system, rust-analyzer should have the property that
//! old manual scheduling APIs are replaced entirely by QoS.
//! To maintain this invariant, we panic when it is clear that
//! old scheduling APIs have been used.
//!
//! Moreover, we also want to ensure that every thread has an priority set explicitly
//! to force a decision about its importance to the system.
//! Thus, [`ThreadPriority`] has no default value
//! and every entry point to creating a thread requires a [`ThreadPriority`] upfront.
// Keeps us from getting warnings about the word `QoS`
#![allow(clippy::doc_markdown)]
use std::fmt;
mod pool;
mod priority;
pub(super) use pool::Pool;
pub(super) use priority::ThreadPriority;
pub(super) struct Builder {
priority: ThreadPriority,
inner: jod_thread::Builder,
}
impl Builder {
pub(super) fn new(priority: ThreadPriority) -> Builder {
Builder {
priority,
inner: jod_thread::Builder::new(),
}
}
pub(super) fn name(self, name: String) -> Builder {
Builder {
inner: self.inner.name(name),
..self
}
}
pub(super) fn stack_size(self, size: usize) -> Builder {
Builder {
inner: self.inner.stack_size(size),
..self
}
}
pub(super) fn spawn<F, T>(self, f: F) -> std::io::Result<JoinHandle<T>>
where
F: FnOnce() -> T,
F: Send + 'static,
T: Send + 'static,
{
let inner_handle = self.inner.spawn(move || {
self.priority.apply_to_current_thread();
f()
})?;
Ok(JoinHandle {
inner: Some(inner_handle),
allow_leak: false,
})
}
}
pub(crate) struct JoinHandle<T = ()> {
// `inner` is an `Option` so that we can
// take ownership of the contained `JoinHandle`.
inner: Option<jod_thread::JoinHandle<T>>,
allow_leak: bool,
}
impl<T> JoinHandle<T> {
pub(crate) fn join(mut self) -> T {
self.inner.take().unwrap().join()
}
}
impl<T> Drop for JoinHandle<T> {
fn drop(&mut self) {
if !self.allow_leak {
return;
}
if let Some(join_handle) = self.inner.take() {
join_handle.detach();
}
}
}
impl<T> fmt::Debug for JoinHandle<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("JoinHandle { .. }")
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/schedule/thread/priority.rs | crates/ty_server/src/server/schedule/thread/priority.rs | // +------------------------------------------------------------+
// | Code adopted from: |
// | Repository: https://github.com/rust-lang/rust-analyzer.git |
// | File: `crates/stdx/src/thread/intent.rs` |
// | Commit: 03b3cb6be9f21c082f4206b35c7fe7f291c94eaa |
// +------------------------------------------------------------+
//! An opaque façade around platform-specific QoS APIs.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
// Please maintain order from least to most priority for the derived `Ord` impl.
pub(crate) enum ThreadPriority {
/// Any thread which does work that isn't in a critical path.
Worker,
/// Any thread which does work caused by the user typing, or
/// work that the editor may wait on.
LatencySensitive,
}
impl ThreadPriority {
// These APIs must remain private;
// we only want consumers to set thread priority
// during thread creation.
pub(crate) fn apply_to_current_thread(self) {
let class = thread_priority_to_qos_class(self);
set_current_thread_qos_class(class);
}
pub(crate) fn assert_is_used_on_current_thread(self) {
if IS_QOS_AVAILABLE {
let class = thread_priority_to_qos_class(self);
assert_eq!(get_current_thread_qos_class(), Some(class));
}
}
}
use imp::QoSClass;
const IS_QOS_AVAILABLE: bool = imp::IS_QOS_AVAILABLE;
fn set_current_thread_qos_class(class: QoSClass) {
imp::set_current_thread_qos_class(class);
}
fn get_current_thread_qos_class() -> Option<QoSClass> {
imp::get_current_thread_qos_class()
}
fn thread_priority_to_qos_class(priority: ThreadPriority) -> QoSClass {
imp::thread_priority_to_qos_class(priority)
}
// All Apple platforms use XNU as their kernel
// and thus have the concept of QoS.
#[cfg(target_vendor = "apple")]
mod imp {
use super::ThreadPriority;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
// Please maintain order from least to most priority for the derived `Ord` impl.
pub(super) enum QoSClass {
// Documentation adapted from https://github.com/apple-oss-distributions/libpthread/blob/67e155c94093be9a204b69637d198eceff2c7c46/include/sys/qos.h#L55
//
/// TLDR: invisible maintenance tasks
///
/// Contract:
///
/// * **You do not care about how long it takes for work to finish.**
/// * **You do not care about work being deferred temporarily.**
/// (e.g. if the device's battery is in a critical state)
///
/// Examples:
///
/// * in a video editor:
/// creating periodic backups of project files
/// * in a browser:
/// cleaning up cached sites which have not been accessed in a long time
/// * in a collaborative word processor:
/// creating a searchable index of all documents
///
/// Use this QoS class for background tasks
/// which the user did not initiate themselves
/// and which are invisible to the user.
/// It is expected that this work will take significant time to complete:
/// minutes or even hours.
///
/// This QoS class provides the most energy and thermally-efficient execution possible.
/// All other work is prioritized over background tasks.
Background,
/// TLDR: tasks that don't block using your app
///
/// Contract:
///
/// * **Your app remains useful even as the task is executing.**
///
/// Examples:
///
/// * in a video editor:
/// exporting a video to disk -
/// the user can still work on the timeline
/// * in a browser:
/// automatically extracting a downloaded zip file -
/// the user can still switch tabs
/// * in a collaborative word processor:
/// downloading images embedded in a document -
/// the user can still make edits
///
/// Use this QoS class for tasks which
/// may or may not be initiated by the user,
/// but whose result is visible.
/// It is expected that this work will take a few seconds to a few minutes.
/// Typically your app will include a progress bar
/// for tasks using this class.
///
/// This QoS class provides a balance between
/// performance, responsiveness and efficiency.
Utility,
/// TLDR: tasks that block using your app
///
/// Contract:
///
/// * **You need this work to complete
/// before the user can keep interacting with your app.**
/// * **Your work will not take more than a few seconds to complete.**
///
/// Examples:
///
/// * in a video editor:
/// opening a saved project
/// * in a browser:
/// loading a list of the user's bookmarks and top sites
/// when a new tab is created
/// * in a collaborative word processor:
/// running a search on the document's content
///
/// Use this QoS class for tasks which were initiated by the user
/// and block the usage of your app while they are in progress.
/// It is expected that this work will take a few seconds or less to complete;
/// not long enough to cause the user to switch to something else.
/// Your app will likely indicate progress on these tasks
/// through the display of placeholder content or modals.
///
/// This QoS class is not energy-efficient.
/// Rather, it provides responsiveness
/// by prioritizing work above other tasks on the system
/// except for critical user-interactive work.
UserInitiated,
/// TLDR: render loops and nothing else
///
/// Contract:
///
/// * **You absolutely need this work to complete immediately
/// or your app will appear to freeze.**
/// * **Your work will always complete virtually instantaneously.**
///
/// Examples:
///
/// * the main thread in a GUI application
/// * the update & render loop in a game
/// * a secondary thread which progresses an animation
///
/// Use this QoS class for any work which, if delayed,
/// will make your user interface unresponsive.
/// It is expected that this work will be virtually instantaneous.
///
/// This QoS class is not energy-efficient.
/// Specifying this class is a request to run with
/// nearly all available system CPU and I/O bandwidth even under contention.
UserInteractive,
}
pub(super) const IS_QOS_AVAILABLE: bool = true;
pub(super) fn set_current_thread_qos_class(class: QoSClass) {
let c = match class {
QoSClass::UserInteractive => libc::qos_class_t::QOS_CLASS_USER_INTERACTIVE,
QoSClass::UserInitiated => libc::qos_class_t::QOS_CLASS_USER_INITIATED,
QoSClass::Utility => libc::qos_class_t::QOS_CLASS_UTILITY,
QoSClass::Background => libc::qos_class_t::QOS_CLASS_BACKGROUND,
};
#[expect(unsafe_code)]
let code = unsafe { libc::pthread_set_qos_class_self_np(c, 0) };
if code == 0 {
return;
}
#[expect(unsafe_code)]
let errno = unsafe { *libc::__error() };
match errno {
libc::EPERM => {
// This thread has been excluded from the QoS system
// due to a previous call to a function such as `pthread_setschedparam`
// which is incompatible with QoS.
//
// Panic instead of returning an error
// to maintain the invariant that we only use QoS APIs.
panic!("tried to set QoS of thread which has opted out of QoS (os error {errno})")
}
libc::EINVAL => {
// This is returned if we pass something other than a qos_class_t
// to `pthread_set_qos_class_self_np`.
//
// This is impossible, so again panic.
unreachable!(
"invalid qos_class_t value was passed to pthread_set_qos_class_self_np"
)
}
_ => {
// `pthread_set_qos_class_self_np`’s documentation
// does not mention any other errors.
unreachable!("`pthread_set_qos_class_self_np` returned unexpected error {errno}")
}
}
}
pub(super) fn get_current_thread_qos_class() -> Option<QoSClass> {
#[expect(unsafe_code)]
let current_thread = unsafe { libc::pthread_self() };
let mut qos_class_raw = libc::qos_class_t::QOS_CLASS_UNSPECIFIED;
#[expect(unsafe_code)]
let code = unsafe {
libc::pthread_get_qos_class_np(
current_thread,
&raw mut qos_class_raw,
std::ptr::null_mut(),
)
};
if code != 0 {
// `pthread_get_qos_class_np`’s documentation states that
// an error value is placed into errno if the return code is not zero.
// However, it never states what errors are possible.
// Inspecting the source[0] shows that, as of this writing, it always returns zero.
//
// Whatever errors the function could report in future are likely to be
// ones which we cannot handle anyway
//
// 0: https://github.com/apple-oss-distributions/libpthread/blob/67e155c94093be9a204b69637d198eceff2c7c46/src/qos.c#L171-L177
#[expect(unsafe_code)]
let errno = unsafe { *libc::__error() };
unreachable!("`pthread_get_qos_class_np` failed unexpectedly (os error {errno})");
}
match qos_class_raw {
libc::qos_class_t::QOS_CLASS_USER_INTERACTIVE => Some(QoSClass::UserInteractive),
libc::qos_class_t::QOS_CLASS_USER_INITIATED => Some(QoSClass::UserInitiated),
libc::qos_class_t::QOS_CLASS_DEFAULT => None, // QoS has never been set
libc::qos_class_t::QOS_CLASS_UTILITY => Some(QoSClass::Utility),
libc::qos_class_t::QOS_CLASS_BACKGROUND => Some(QoSClass::Background),
libc::qos_class_t::QOS_CLASS_UNSPECIFIED => {
// Using manual scheduling APIs causes threads to “opt out” of QoS.
// At this point they become incompatible with QoS,
// and as such have the “unspecified” QoS class.
//
// Panic instead of returning an error
// to maintain the invariant that we only use QoS APIs.
panic!("tried to get QoS of thread which has opted out of QoS")
}
}
}
pub(super) fn thread_priority_to_qos_class(priority: ThreadPriority) -> QoSClass {
match priority {
ThreadPriority::Worker => QoSClass::Utility,
ThreadPriority::LatencySensitive => QoSClass::UserInitiated,
}
}
}
// FIXME: Windows has QoS APIs, we should use them!
#[cfg(not(target_vendor = "apple"))]
mod imp {
use super::ThreadPriority;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub(super) enum QoSClass {
Default,
}
pub(super) const IS_QOS_AVAILABLE: bool = false;
pub(super) fn set_current_thread_qos_class(_: QoSClass) {}
pub(super) fn get_current_thread_qos_class() -> Option<QoSClass> {
None
}
pub(super) fn thread_priority_to_qos_class(_: ThreadPriority) -> QoSClass {
QoSClass::Default
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/schedule/thread/pool.rs | crates/ty_server/src/server/schedule/thread/pool.rs | // +------------------------------------------------------------+
// | Code adopted from: |
// | Repository: https://github.com/rust-lang/rust-analyzer.git |
// | File: `crates/stdx/src/thread/pool.rs` |
// | Commit: 03b3cb6be9f21c082f4206b35c7fe7f291c94eaa |
// +------------------------------------------------------------+
//! [`Pool`] implements a basic custom thread pool
//! inspired by the [`threadpool` crate](http://docs.rs/threadpool).
//! When you spawn a task you specify a thread priority
//! so the pool can schedule it to run on a thread with that priority.
//! rust-analyzer uses this to prioritize work based on latency requirements.
//!
//! The thread pool is implemented entirely using
//! the threading utilities in [`crate::server::schedule::thread`].
use crossbeam::channel::{Receiver, Sender};
use std::panic::AssertUnwindSafe;
use std::{
num::NonZeroUsize,
sync::{
Arc,
atomic::{AtomicUsize, Ordering},
},
};
use super::{Builder, JoinHandle, ThreadPriority};
pub(crate) struct Pool {
// `_handles` is never read: the field is present
// only for its `Drop` impl.
// The worker threads exit once the channel closes;
// make sure to keep `job_sender` above `handles`
// so that the channel is actually closed
// before we join the worker threads!
job_sender: Sender<Job>,
_handles: Vec<JoinHandle>,
extant_tasks: Arc<AtomicUsize>,
}
struct Job {
requested_priority: ThreadPriority,
f: Box<dyn FnOnce() + Send + 'static>,
}
impl Pool {
pub(crate) fn new(threads: NonZeroUsize) -> Pool {
// Override OS defaults to avoid stack overflows on platforms with low stack size defaults.
const STACK_SIZE: usize = 2 * 1024 * 1024;
const INITIAL_PRIORITY: ThreadPriority = ThreadPriority::Worker;
let threads = usize::from(threads);
let (job_sender, job_receiver) = crossbeam::channel::bounded(std::cmp::min(threads * 2, 4));
let extant_tasks = Arc::new(AtomicUsize::new(0));
let mut handles = Vec::with_capacity(threads);
for i in 0..threads {
let handle = Builder::new(INITIAL_PRIORITY)
.stack_size(STACK_SIZE)
.name(format!("ty:worker:{i}"))
.spawn({
let extant_tasks = Arc::clone(&extant_tasks);
let job_receiver: Receiver<Job> = job_receiver.clone();
move || {
let mut current_priority = INITIAL_PRIORITY;
for job in job_receiver {
if job.requested_priority != current_priority {
job.requested_priority.apply_to_current_thread();
current_priority = job.requested_priority;
}
extant_tasks.fetch_add(1, Ordering::SeqCst);
// SAFETY: it's safe to assume that `job.f` is unwind safe because we always
// abort the process if it panics.
// Panicking here ensures that we don't swallow errors and is the same as
// what rayon does.
// Any recovery should be implemented outside the thread pool (e.g. when
// dispatching requests/notifications etc).
if let Err(error) = std::panic::catch_unwind(AssertUnwindSafe(job.f)) {
if let Some(msg) = error.downcast_ref::<String>() {
tracing::error!("Worker thread panicked with: {msg}; aborting");
} else if let Some(msg) = error.downcast_ref::<&str>() {
tracing::error!("Worker thread panicked with: {msg}; aborting");
} else if let Some(cancelled) =
error.downcast_ref::<salsa::Cancelled>()
{
tracing::error!(
"Worker thread got cancelled: {cancelled}; aborting"
);
} else {
tracing::error!(
"Worker thread panicked with: {error:?}; aborting"
);
}
std::process::abort();
}
extant_tasks.fetch_sub(1, Ordering::SeqCst);
}
}
})
.expect("failed to spawn thread");
handles.push(handle);
}
Pool {
_handles: handles,
extant_tasks,
job_sender,
}
}
pub(crate) fn spawn<F>(&self, priority: ThreadPriority, f: F)
where
F: FnOnce() + Send + 'static,
{
let f = Box::new(move || {
if cfg!(debug_assertions) {
priority.assert_is_used_on_current_thread();
}
f();
});
let job = Job {
requested_priority: priority,
f,
};
self.job_sender.send(job).unwrap();
}
#[expect(dead_code)]
pub(super) fn len(&self) -> usize {
self.extant_tasks.load(Ordering::SeqCst)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/document/notebook.rs | crates/ty_server/src/document/notebook.rs | use lsp_types::NotebookCellKind;
use ruff_notebook::CellMetadata;
use ruff_source_file::OneIndexed;
use rustc_hash::FxHashMap;
use super::{DocumentKey, DocumentVersion};
use crate::session::index::Index;
/// A notebook document.
///
/// This notebook document only stores the metadata about the notebook
/// and the cell metadata. The cell contents are stored as separate
/// [`super::TextDocument`]s (they can be looked up by the Cell's URL).
#[derive(Clone, Debug)]
pub struct NotebookDocument {
url: lsp_types::Url,
cells: Vec<NotebookCell>,
metadata: ruff_notebook::RawNotebookMetadata,
version: DocumentVersion,
/// Map from Cell URL to their index in `cells`
cell_index: FxHashMap<lsp_types::Url, usize>,
}
/// The metadata of a single cell within a notebook.
///
/// The cell's content are stored as a [`TextDocument`] and can be looked up by the Cell's URL.
#[derive(Clone, Debug)]
struct NotebookCell {
/// The URL uniquely identifying the cell.
///
/// > Cell text documents have a URI, but servers should not rely on any
/// > format for this URI, since it is up to the client on how it will
/// > create these URIs. The URIs must be unique across ALL notebook
/// > cells and can therefore be used to uniquely identify a notebook cell
/// > or the cell’s text document.
/// > <https://microsoft.github.io/language-server-protocol/specifications/lsp/3.18/specification/#notebookDocument_synchronization>
url: lsp_types::Url,
kind: NotebookCellKind,
execution_summary: Option<lsp_types::ExecutionSummary>,
}
impl NotebookDocument {
pub fn new(
url: lsp_types::Url,
notebook_version: DocumentVersion,
cells: Vec<lsp_types::NotebookCell>,
metadata: serde_json::Map<String, serde_json::Value>,
) -> crate::Result<Self> {
let cells: Vec<_> = cells.into_iter().map(NotebookCell::new).collect();
let index = cells
.iter()
.enumerate()
.map(|(index, cell)| (cell.url.clone(), index))
.collect();
Ok(Self {
cell_index: index,
url,
version: notebook_version,
cells,
metadata: serde_json::from_value(serde_json::Value::Object(metadata))?,
})
}
pub(crate) fn url(&self) -> &lsp_types::Url {
&self.url
}
/// Generates a pseudo-representation of a notebook that lacks per-cell metadata and contextual information
/// but should still work with Ruff's linter.
pub(crate) fn to_ruff_notebook(&self, index: &Index) -> ruff_notebook::Notebook {
let cells = self
.cells
.iter()
.map(|cell| {
let cell_text =
if let Ok(document) = index.document(&DocumentKey::from_url(&cell.url)) {
if let Some(text_document) = document.as_text() {
Some(text_document.contents().to_string())
} else {
tracing::warn!("Non-text document found for cell `{}`", cell.url);
None
}
} else {
tracing::warn!("Text document not found for cell `{}`", cell.url);
None
}
.unwrap_or_default();
let source = ruff_notebook::SourceValue::String(cell_text);
match cell.kind {
NotebookCellKind::Code => ruff_notebook::Cell::Code(ruff_notebook::CodeCell {
execution_count: cell
.execution_summary
.as_ref()
.map(|summary| i64::from(summary.execution_order)),
id: None,
metadata: CellMetadata::default(),
outputs: vec![],
source,
}),
NotebookCellKind::Markup => {
ruff_notebook::Cell::Markdown(ruff_notebook::MarkdownCell {
attachments: None,
id: None,
metadata: CellMetadata::default(),
source,
})
}
}
})
.collect();
let raw_notebook = ruff_notebook::RawNotebook {
cells,
metadata: self.metadata.clone(),
nbformat: 4,
nbformat_minor: 5,
};
ruff_notebook::Notebook::from_raw_notebook(raw_notebook, false)
.unwrap_or_else(|err| panic!("Server notebook document could not be converted to ty's notebook document format: {err}"))
}
pub(crate) fn update(
&mut self,
array: lsp_types::NotebookCellArrayChange,
updated_cells: Vec<lsp_types::NotebookCell>,
metadata_change: Option<serde_json::Map<String, serde_json::Value>>,
version: DocumentVersion,
) -> crate::Result<()> {
self.version = version;
let new_cells = array.cells.unwrap_or_default();
let start = array.start as usize;
let added = new_cells.len();
let deleted_range = start..start + array.delete_count as usize;
self.cells.splice(
deleted_range.clone(),
new_cells.into_iter().map(NotebookCell::new),
);
// Re-build the cell-index if new cells were added, deleted or removed
if !deleted_range.is_empty() || added > 0 {
self.cell_index.clear();
self.cell_index.extend(
self.cells
.iter()
.enumerate()
.map(|(i, cell)| (cell.url.clone(), i)),
);
}
for cell in updated_cells {
if let Some(existing_cell_index) = self.cell_index.get(&cell.document).copied() {
self.cells[existing_cell_index].kind = cell.kind;
}
}
if let Some(metadata_change) = metadata_change {
self.metadata = serde_json::from_value(serde_json::Value::Object(metadata_change))?;
}
Ok(())
}
/// Get the current version of the notebook document.
pub(crate) fn version(&self) -> DocumentVersion {
self.version
}
/// Get the URI for a cell by its index within the cell array.
pub(crate) fn cell_uri_by_index(&self, index: OneIndexed) -> Option<&lsp_types::Url> {
self.cells
.get(index.to_zero_indexed())
.map(|cell| &cell.url)
}
/// Returns a list of cell URIs in the order they appear in the array.
pub(crate) fn cell_urls(&self) -> impl Iterator<Item = &lsp_types::Url> {
self.cells.iter().map(|cell| &cell.url)
}
pub(crate) fn cell_index_by_uri(&self, cell_url: &lsp_types::Url) -> Option<OneIndexed> {
Some(OneIndexed::from_zero_indexed(
self.cell_index.get(cell_url).copied()?,
))
}
}
impl NotebookCell {
pub(crate) fn new(cell: lsp_types::NotebookCell) -> Self {
Self {
url: cell.document,
kind: cell.kind,
execution_summary: cell.execution_summary,
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/document/range.rs | crates/ty_server/src/document/range.rs | use super::PositionEncoding;
use crate::Db;
use crate::system::file_to_url;
use ruff_db::files::{File, FileRange};
use ruff_db::source::{line_index, source_text};
use ruff_source_file::LineIndex;
use ruff_source_file::{OneIndexed, SourceLocation};
use ruff_text_size::{Ranged, TextRange, TextSize};
/// A range in an LSP text document (cell or a regular document).
#[derive(Clone, Debug, Default)]
pub(crate) struct LspRange {
range: lsp_types::Range,
/// The URI of this range's text document
uri: Option<lsp_types::Url>,
}
impl LspRange {
/// Returns the range within this document.
///
/// Only use `range` when you already have a URI context and this range is guaranteed
/// to be within the same document/cell:
/// - Selection ranges within a `LocationLink` (where `target_uri` provides context)
/// - Additional ranges in the same cell (e.g., `selection_range` when you already have `target_range`)
///
/// Do NOT use this for standalone ranges - use [`Self::to_location`] instead to ensure
/// the URI and range are consistent.
pub(crate) fn local_range(&self) -> lsp_types::Range {
self.range
}
/// Converts this range into an LSP location.
///
/// Returns `None` if the URI for this file couldn't be resolved.
pub(crate) fn to_location(&self) -> Option<lsp_types::Location> {
Some(lsp_types::Location {
uri: self.uri.clone()?,
range: self.range,
})
}
pub(crate) fn into_location(self) -> Option<lsp_types::Location> {
Some(lsp_types::Location {
uri: self.uri?,
range: self.range,
})
}
}
/// A position in an LSP text document (cell or a regular document).
#[derive(Clone, Debug, Default)]
pub(crate) struct LspPosition {
position: lsp_types::Position,
/// The URI of this range's text document
uri: Option<lsp_types::Url>,
}
impl LspPosition {
/// Returns the position within this document.
///
/// Only use [`Self::local_position`] when you already have a URI context and this position is guaranteed
/// to be within the same document/cell
///
/// Do NOT use this for standalone positions - use [`Self::to_location`] instead to ensure
/// the URI and position are consistent.
pub(crate) fn local_position(&self) -> lsp_types::Position {
self.position
}
/// Returns the uri of the text document this position belongs to.
#[expect(unused)]
pub(crate) fn uri(&self) -> Option<&lsp_types::Url> {
self.uri.as_ref()
}
}
pub(crate) trait RangeExt {
/// Convert an LSP Range to a [`TextRange`].
///
/// Returns `None` if `file` is a notebook and the
/// cell identified by `url` can't be looked up or if the notebook
/// isn't open in the editor.
fn to_text_range(
&self,
db: &dyn Db,
file: File,
url: &lsp_types::Url,
encoding: PositionEncoding,
) -> Option<TextRange>;
}
impl RangeExt for lsp_types::Range {
fn to_text_range(
&self,
db: &dyn Db,
file: File,
url: &lsp_types::Url,
encoding: PositionEncoding,
) -> Option<TextRange> {
let start = self.start.to_text_size(db, file, url, encoding)?;
let end = self.end.to_text_size(db, file, url, encoding)?;
Some(TextRange::new(start, end))
}
}
pub(crate) trait PositionExt {
/// Convert an LSP Position to internal `TextSize`.
///
/// For notebook support, this uses the URI to determine which cell the position
/// refers to, and maps the cell-relative position to the absolute position in the
/// concatenated notebook file.
///
/// Returns `None` if `file` is a notebook and the
/// cell identified by `url` can't be looked up or if the notebook
/// isn't open in the editor.
fn to_text_size(
&self,
db: &dyn Db,
file: File,
url: &lsp_types::Url,
encoding: PositionEncoding,
) -> Option<TextSize>;
}
impl PositionExt for lsp_types::Position {
fn to_text_size(
&self,
db: &dyn Db,
file: File,
url: &lsp_types::Url,
encoding: PositionEncoding,
) -> Option<TextSize> {
let source = source_text(db, file);
let index = line_index(db, file);
if let Some(notebook) = source.as_notebook() {
let notebook_document = db.notebook_document(file)?;
let cell_index = notebook_document.cell_index_by_uri(url)?;
let cell_start_offset = notebook.cell_offset(cell_index).unwrap_or_default();
let cell_relative_line = OneIndexed::from_zero_indexed(u32_index_to_usize(self.line));
let cell_start_location =
index.source_location(cell_start_offset, source.as_str(), encoding.into());
assert_eq!(cell_start_location.character_offset, OneIndexed::MIN);
// Absolute position into the concatenated notebook source text.
let absolute_position = SourceLocation {
line: cell_start_location
.line
.saturating_add(cell_relative_line.to_zero_indexed()),
character_offset: OneIndexed::from_zero_indexed(u32_index_to_usize(self.character)),
};
return Some(index.offset(absolute_position, &source, encoding.into()));
}
Some(lsp_position_to_text_size(*self, &source, &index, encoding))
}
}
pub(crate) trait TextSizeExt {
/// Converts `self` into a position in an LSP text document (can be a cell or regular document).
///
/// Returns `None` if the position can't be converted:
///
/// * If `file` is a notebook but the notebook isn't open in the editor,
/// preventing us from looking up the corresponding cell.
/// * If `position` is out of bounds.
fn to_lsp_position(
&self,
db: &dyn Db,
file: File,
encoding: PositionEncoding,
) -> Option<LspPosition>
where
Self: Sized;
}
impl TextSizeExt for TextSize {
fn to_lsp_position(
&self,
db: &dyn Db,
file: File,
encoding: PositionEncoding,
) -> Option<LspPosition> {
let source = source_text(db, file);
let index = line_index(db, file);
if let Some(notebook) = source.as_notebook() {
let notebook_document = db.notebook_document(file)?;
let start = index.source_location(*self, source.as_str(), encoding.into());
let cell = notebook.index().cell(start.line)?;
let cell_relative_start = notebook.index().translate_source_location(&start);
return Some(LspPosition {
uri: Some(notebook_document.cell_uri_by_index(cell)?.clone()),
position: source_location_to_position(&cell_relative_start),
});
}
let uri = file_to_url(db, file);
let position = text_size_to_lsp_position(*self, &source, &index, encoding);
Some(LspPosition { position, uri })
}
}
pub(crate) trait ToRangeExt {
/// Converts self into a range into an LSP text document (can be a cell or regular document).
///
/// Returns `None` if the range can't be converted:
///
/// * If `file` is a notebook but the notebook isn't open in the editor,
/// preventing us from looking up the corresponding cell.
/// * If range is out of bounds.
fn to_lsp_range(&self, db: &dyn Db, file: File, encoding: PositionEncoding)
-> Option<LspRange>;
}
fn u32_index_to_usize(index: u32) -> usize {
usize::try_from(index).expect("u32 fits in usize")
}
fn text_size_to_lsp_position(
offset: TextSize,
text: &str,
index: &LineIndex,
encoding: PositionEncoding,
) -> lsp_types::Position {
let source_location = index.source_location(offset, text, encoding.into());
source_location_to_position(&source_location)
}
fn text_range_to_lsp_range(
range: TextRange,
text: &str,
index: &LineIndex,
encoding: PositionEncoding,
) -> lsp_types::Range {
lsp_types::Range {
start: text_size_to_lsp_position(range.start(), text, index, encoding),
end: text_size_to_lsp_position(range.end(), text, index, encoding),
}
}
/// Helper function to convert an LSP Position to internal `TextSize`.
/// This is used internally by the `PositionExt` trait and other helpers.
fn lsp_position_to_text_size(
position: lsp_types::Position,
text: &str,
index: &LineIndex,
encoding: PositionEncoding,
) -> TextSize {
index.offset(
SourceLocation {
line: OneIndexed::from_zero_indexed(u32_index_to_usize(position.line)),
character_offset: OneIndexed::from_zero_indexed(u32_index_to_usize(position.character)),
},
text,
encoding.into(),
)
}
/// Helper function to convert an LSP Range to internal `TextRange`.
/// This is used internally by the `RangeExt` trait and in special cases
/// where `db` and `file` are not available (e.g., when applying document changes).
pub(crate) fn lsp_range_to_text_range(
range: lsp_types::Range,
text: &str,
index: &LineIndex,
encoding: PositionEncoding,
) -> TextRange {
TextRange::new(
lsp_position_to_text_size(range.start, text, index, encoding),
lsp_position_to_text_size(range.end, text, index, encoding),
)
}
impl ToRangeExt for TextRange {
fn to_lsp_range(
&self,
db: &dyn Db,
file: File,
encoding: PositionEncoding,
) -> Option<LspRange> {
let source = source_text(db, file);
let index = line_index(db, file);
if let Some(notebook) = source.as_notebook() {
let notebook_index = notebook.index();
let notebook_document = db.notebook_document(file)?;
let start_in_concatenated =
index.source_location(self.start(), &source, encoding.into());
let cell_index = notebook_index.cell(start_in_concatenated.line)?;
let end_in_concatenated = index.source_location(self.end(), &source, encoding.into());
let start_in_cell = source_location_to_position(
¬ebook_index.translate_source_location(&start_in_concatenated),
);
let end_in_cell = source_location_to_position(
¬ebook_index.translate_source_location(&end_in_concatenated),
);
let cell_uri = notebook_document
.cell_uri_by_index(cell_index)
.expect("Index to contain an URI for every cell");
return Some(LspRange {
uri: Some(cell_uri.clone()),
range: lsp_types::Range::new(start_in_cell, end_in_cell),
});
}
let range = text_range_to_lsp_range(*self, &source, &index, encoding);
let uri = file_to_url(db, file);
Some(LspRange { range, uri })
}
}
fn source_location_to_position(location: &SourceLocation) -> lsp_types::Position {
lsp_types::Position {
line: u32::try_from(location.line.to_zero_indexed()).expect("line usize fits in u32"),
character: u32::try_from(location.character_offset.to_zero_indexed())
.expect("character usize fits in u32"),
}
}
pub(crate) trait FileRangeExt {
/// Converts this file range to an `LspRange`, which then requires an explicit
/// decision about how to use it (as a local range or as a location).
fn to_lsp_range(&self, db: &dyn Db, encoding: PositionEncoding) -> Option<LspRange>;
}
impl FileRangeExt for FileRange {
fn to_lsp_range(&self, db: &dyn Db, encoding: PositionEncoding) -> Option<LspRange> {
self.range().to_lsp_range(db, self.file(), encoding)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/document/text_document.rs | crates/ty_server/src/document/text_document.rs | use lsp_types::{TextDocumentContentChangeEvent, Url};
use ruff_source_file::LineIndex;
use crate::PositionEncoding;
use crate::document::range::lsp_range_to_text_range;
use crate::system::AnySystemPath;
pub(crate) type DocumentVersion = i32;
/// A regular text file or the content of a notebook cell.
///
/// The state of an individual document in the server. Stays up-to-date
/// with changes made by the user, including unsaved changes.
#[derive(Debug, Clone)]
pub struct TextDocument {
/// The URL as sent by the client
url: Url,
/// The string contents of the document.
contents: String,
/// The latest version of the document, set by the LSP client. The server will panic in
/// debug mode if we attempt to update the document with an 'older' version.
version: DocumentVersion,
/// The language ID of the document as provided by the client.
language_id: Option<LanguageId>,
/// For cells, the path to the notebook document.
notebook: Option<AnySystemPath>,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum LanguageId {
Python,
Other,
}
impl From<&str> for LanguageId {
fn from(language_id: &str) -> Self {
match language_id {
"python" => Self::Python,
_ => Self::Other,
}
}
}
impl TextDocument {
pub fn new(url: Url, contents: String, version: DocumentVersion) -> Self {
Self {
url,
contents,
version,
language_id: None,
notebook: None,
}
}
#[must_use]
pub fn with_language_id(mut self, language_id: &str) -> Self {
self.language_id = Some(LanguageId::from(language_id));
self
}
#[must_use]
pub(crate) fn with_notebook(mut self, notebook: AnySystemPath) -> Self {
self.notebook = Some(notebook);
self
}
pub fn into_contents(self) -> String {
self.contents
}
pub(crate) fn url(&self) -> &Url {
&self.url
}
pub fn contents(&self) -> &str {
&self.contents
}
pub fn version(&self) -> DocumentVersion {
self.version
}
pub fn language_id(&self) -> Option<LanguageId> {
self.language_id
}
pub(crate) fn notebook(&self) -> Option<&AnySystemPath> {
self.notebook.as_ref()
}
pub fn apply_changes(
&mut self,
changes: Vec<lsp_types::TextDocumentContentChangeEvent>,
new_version: DocumentVersion,
encoding: PositionEncoding,
) {
if let [
lsp_types::TextDocumentContentChangeEvent {
range: None, text, ..
},
] = changes.as_slice()
{
tracing::debug!("Fast path - replacing entire document");
self.modify(|contents, version| {
contents.clone_from(text);
*version = new_version;
});
return;
}
let mut new_contents = self.contents().to_string();
let mut active_index = LineIndex::from_source_text(&new_contents);
for TextDocumentContentChangeEvent {
range,
text: change,
..
} in changes
{
if let Some(range) = range {
let range = lsp_range_to_text_range(range, &new_contents, &active_index, encoding);
new_contents.replace_range(
usize::from(range.start())..usize::from(range.end()),
&change,
);
} else {
new_contents = change;
}
active_index = LineIndex::from_source_text(&new_contents);
}
self.modify(|contents, version| {
*contents = new_contents;
*version = new_version;
});
}
pub fn update_version(&mut self, new_version: DocumentVersion) {
self.modify(|_, version| {
*version = new_version;
});
}
// A private function for overriding how we update the line index by default.
fn modify(&mut self, func: impl FnOnce(&mut String, &mut DocumentVersion)) {
let old_version = self.version;
func(&mut self.contents, &mut self.version);
debug_assert!(self.version >= old_version);
}
}
#[cfg(test)]
mod tests {
use crate::{PositionEncoding, TextDocument};
use lsp_types::{Position, TextDocumentContentChangeEvent, Url};
#[test]
fn redo_edit() {
let mut document = TextDocument::new(
Url::parse("file:///test").unwrap(),
r#""""
测试comment
一些测试内容
"""
import click
@click.group()
def interface():
pas
"#
.to_string(),
0,
);
// Add an `s`, remove it again (back to the original code), and then re-add the `s`
document.apply_changes(
vec![
TextDocumentContentChangeEvent {
range: Some(lsp_types::Range::new(
Position::new(9, 7),
Position::new(9, 7),
)),
range_length: Some(0),
text: "s".to_string(),
},
TextDocumentContentChangeEvent {
range: Some(lsp_types::Range::new(
Position::new(9, 7),
Position::new(9, 8),
)),
range_length: Some(1),
text: String::new(),
},
TextDocumentContentChangeEvent {
range: Some(lsp_types::Range::new(
Position::new(9, 7),
Position::new(9, 7),
)),
range_length: Some(0),
text: "s".to_string(),
},
],
1,
PositionEncoding::UTF16,
);
assert_eq!(
&document.contents,
r#""""
测试comment
一些测试内容
"""
import click
@click.group()
def interface():
pass
"#
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/document/location.rs | crates/ty_server/src/document/location.rs | use lsp_types::Location;
use ruff_db::files::FileRange;
use ty_ide::{NavigationTarget, ReferenceTarget};
use crate::Db;
use crate::PositionEncoding;
use crate::document::{FileRangeExt, ToRangeExt};
pub(crate) trait ToLink {
fn to_location(&self, db: &dyn Db, encoding: PositionEncoding) -> Option<Location>;
fn to_link(
&self,
db: &dyn Db,
src: Option<FileRange>,
encoding: PositionEncoding,
) -> Option<lsp_types::LocationLink>;
}
impl ToLink for NavigationTarget {
fn to_location(&self, db: &dyn Db, encoding: PositionEncoding) -> Option<Location> {
FileRange::new(self.file(), self.focus_range())
.to_lsp_range(db, encoding)?
.to_location()
}
fn to_link(
&self,
db: &dyn Db,
src: Option<FileRange>,
encoding: PositionEncoding,
) -> Option<lsp_types::LocationLink> {
let file = self.file();
// Get target_range and URI together to ensure they're consistent (same cell for notebooks)
let target_location = self
.full_range()
.to_lsp_range(db, file, encoding)?
.into_location()?;
let target_range = target_location.range;
// For selection_range, we can use as_local_range since we know it's in the same document/cell
let selection_range = self
.focus_range()
.to_lsp_range(db, file, encoding)?
.local_range();
let src = src.and_then(|src| Some(src.to_lsp_range(db, encoding)?.local_range()));
Some(lsp_types::LocationLink {
target_uri: target_location.uri,
target_range,
target_selection_range: selection_range,
origin_selection_range: src,
})
}
}
impl ToLink for ReferenceTarget {
fn to_location(&self, db: &dyn Db, encoding: PositionEncoding) -> Option<Location> {
self.file_range()
.to_lsp_range(db, encoding)?
.into_location()
}
fn to_link(
&self,
db: &dyn Db,
src: Option<FileRange>,
encoding: PositionEncoding,
) -> Option<lsp_types::LocationLink> {
// Get target_range and URI together to ensure they're consistent (same cell for notebooks)
let target_location = self
.range()
.to_lsp_range(db, self.file(), encoding)?
.into_location()?;
let target_range = target_location.range;
let selection_range = target_range;
let src = src.and_then(|src| Some(src.to_lsp_range(db, encoding)?.local_range()));
Some(lsp_types::LocationLink {
target_uri: target_location.uri,
target_range,
target_selection_range: selection_range,
origin_selection_range: src,
})
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/tests/e2e/notebook.rs | crates/ty_server/tests/e2e/notebook.rs | use insta::assert_json_snapshot;
use lsp_types::{NotebookCellKind, Position, Range};
use ruff_db::system::SystemPath;
use ty_server::ClientOptions;
use crate::{TestServer, TestServerBuilder};
static FILTERS: &[(&str, &str)] = &[(r#""sortText": "[0-9 ]+""#, r#""sortText": "[RANKING]""#)];
#[test]
fn publish_diagnostics_open() -> anyhow::Result<()> {
let mut server = TestServerBuilder::new()?
.enable_diagnostic_related_information(true)
.build()
.wait_until_workspaces_are_initialized();
server.initialization_result().unwrap();
let mut builder = NotebookBuilder::virtual_file("test.ipynb");
builder.add_python_cell(
r#"from typing import Literal
type Style = Literal["italic", "bold", "underline"]"#,
);
builder.add_python_cell(
r#"def with_style(line: str, word, style: Style) -> str:
if style == "italic":
return line.replace(word, f"*{word}*")
elif style == "bold":
return line.replace(word, f"__{word}__")
position = line.find(word)
output = line + "\n"
output += " " * position
output += "-" * len(word)
"#,
);
builder.add_python_cell(
r#"print(with_style("ty is a fast type checker for Python.", "fast", "underlined"))
"#,
);
builder.open(&mut server);
let cell1_diagnostics =
server.await_notification::<lsp_types::notification::PublishDiagnostics>();
let cell2_diagnostics =
server.await_notification::<lsp_types::notification::PublishDiagnostics>();
let cell3_diagnostics =
server.await_notification::<lsp_types::notification::PublishDiagnostics>();
assert_json_snapshot!([cell1_diagnostics, cell2_diagnostics, cell3_diagnostics]);
Ok(())
}
#[test]
fn diagnostic_end_of_file() -> anyhow::Result<()> {
let mut server = TestServerBuilder::new()?
.build()
.wait_until_workspaces_are_initialized();
server.initialization_result().unwrap();
let mut builder = NotebookBuilder::virtual_file("test.ipynb");
builder.add_python_cell(
r#"from typing import Literal
type Style = Literal["italic", "bold", "underline"]"#,
);
builder.add_python_cell(
r#"def with_style(line: str, word, style: Style) -> str:
if style == "italic":
return line.replace(word, f"*{word}*")
elif style == "bold":
return line.replace(word, f"__{word}__")
position = line.find(word)
output = line + "\n"
output += " " * position
output += "-" * len(word)
"#,
);
let cell_3 = builder.add_python_cell(
r#"with_style("test", "word", "underline")
IOError"#,
);
let notebook_url = builder.open(&mut server);
server.collect_publish_diagnostic_notifications(3);
server.send_notification::<lsp_types::notification::DidChangeNotebookDocument>(
lsp_types::DidChangeNotebookDocumentParams {
notebook_document: lsp_types::VersionedNotebookDocumentIdentifier {
version: 0,
uri: notebook_url,
},
change: lsp_types::NotebookDocumentChangeEvent {
metadata: None,
cells: Some(lsp_types::NotebookDocumentCellChange {
structure: None,
data: None,
text_content: Some(vec![lsp_types::NotebookDocumentChangeTextContent {
document: lsp_types::VersionedTextDocumentIdentifier {
uri: cell_3,
version: 0,
},
changes: {
vec![lsp_types::TextDocumentContentChangeEvent {
range: Some(Range::new(Position::new(0, 16), Position::new(0, 17))),
range_length: Some(1),
text: String::new(),
}]
},
}]),
}),
},
},
);
let diagnostics = server.collect_publish_diagnostic_notifications(3);
assert_json_snapshot!(diagnostics);
Ok(())
}
#[test]
fn semantic_tokens() -> anyhow::Result<()> {
let mut server = TestServerBuilder::new()?
.build()
.wait_until_workspaces_are_initialized();
server.initialization_result().unwrap();
let mut builder = NotebookBuilder::virtual_file("src/test.ipynb");
let first_cell = builder.add_python_cell(
r#"from typing import Literal
type Style = Literal["italic", "bold", "underline"]"#,
);
let second_cell = builder.add_python_cell(
r#"def with_style(line: str, word, style: Style) -> str:
if style == "italic":
return line.replace(word, f"*{word}*")
elif style == "bold":
return line.replace(word, f"__{word}__")
position = line.find(word)
output = line + "\n"
output += " " * position
output += "-" * len(word)
"#,
);
let third_cell = builder.add_python_cell(
r#"print(with_style("ty is a fast type checker for Python.", "fast", "underlined"))
"#,
);
builder.open(&mut server);
let cell1_tokens = semantic_tokens_full_for_cell(&mut server, &first_cell);
let cell2_tokens = semantic_tokens_full_for_cell(&mut server, &second_cell);
let cell3_tokens = semantic_tokens_full_for_cell(&mut server, &third_cell);
assert_json_snapshot!([cell1_tokens, cell2_tokens, cell3_tokens]);
server.collect_publish_diagnostic_notifications(3);
Ok(())
}
#[test]
fn swap_cells() -> anyhow::Result<()> {
let mut server = TestServerBuilder::new()?
.build()
.wait_until_workspaces_are_initialized();
server.initialization_result().unwrap();
let mut builder = NotebookBuilder::virtual_file("src/test.ipynb");
let first_cell = builder.add_python_cell(
r#"b = a
"#,
);
let second_cell = builder.add_python_cell(r#"a = 10"#);
builder.add_python_cell(r#"c = b"#);
let notebook = builder.open(&mut server);
let diagnostics = server.collect_publish_diagnostic_notifications(3);
assert_json_snapshot!(diagnostics, @r###"
{
"vscode-notebook-cell://src/test.ipynb#0": [
{
"range": {
"start": {
"line": 0,
"character": 4
},
"end": {
"line": 0,
"character": 5
}
},
"severity": 1,
"code": "unresolved-reference",
"codeDescription": {
"href": "https://ty.dev/rules#unresolved-reference"
},
"source": "ty",
"message": "Name `a` used when not defined"
}
],
"vscode-notebook-cell://src/test.ipynb#1": [],
"vscode-notebook-cell://src/test.ipynb#2": []
}
"###);
// Re-order the cells from `b`, `a`, `c` to `a`, `b`, `c` (swapping cell 1 and 2)
server.send_notification::<lsp_types::notification::DidChangeNotebookDocument>(
lsp_types::DidChangeNotebookDocumentParams {
notebook_document: lsp_types::VersionedNotebookDocumentIdentifier {
version: 1,
uri: notebook,
},
change: lsp_types::NotebookDocumentChangeEvent {
metadata: None,
cells: Some(lsp_types::NotebookDocumentCellChange {
structure: Some(lsp_types::NotebookDocumentCellChangeStructure {
array: lsp_types::NotebookCellArrayChange {
start: 0,
delete_count: 2,
cells: Some(vec![
lsp_types::NotebookCell {
kind: NotebookCellKind::Code,
document: second_cell,
metadata: None,
execution_summary: None,
},
lsp_types::NotebookCell {
kind: NotebookCellKind::Code,
document: first_cell,
metadata: None,
execution_summary: None,
},
]),
},
did_open: None,
did_close: None,
}),
data: None,
text_content: None,
}),
},
},
);
let diagnostics = server.collect_publish_diagnostic_notifications(3);
assert_json_snapshot!(diagnostics, @r###"
{
"vscode-notebook-cell://src/test.ipynb#0": [],
"vscode-notebook-cell://src/test.ipynb#1": [],
"vscode-notebook-cell://src/test.ipynb#2": []
}
"###);
Ok(())
}
#[test]
fn auto_import() -> anyhow::Result<()> {
let mut server = TestServerBuilder::new()?
.with_workspace(
SystemPath::new("src"),
Some(ClientOptions::default().with_auto_import(true)),
)?
.build()
.wait_until_workspaces_are_initialized();
server.initialization_result().unwrap();
let mut builder = NotebookBuilder::virtual_file("src/test.ipynb");
builder.add_python_cell(
r#"from typing import TYPE_CHECKING
"#,
);
let second_cell = builder.add_python_cell(
r#"# leading comment
b: Litera
"#,
);
builder.open(&mut server);
server.collect_publish_diagnostic_notifications(2);
let completions = literal_completions(&mut server, &second_cell, Position::new(1, 9));
insta::with_settings!({
filters => FILTERS.iter().copied(),
}, {
assert_json_snapshot!(completions);
});
Ok(())
}
#[test]
fn auto_import_same_cell() -> anyhow::Result<()> {
let mut server = TestServerBuilder::new()?
.with_workspace(
SystemPath::new("src"),
Some(ClientOptions::default().with_auto_import(true)),
)?
.build()
.wait_until_workspaces_are_initialized();
server.initialization_result().unwrap();
let mut builder = NotebookBuilder::virtual_file("src/test.ipynb");
let first_cell = builder.add_python_cell(
r#"from typing import TYPE_CHECKING
b: Litera
"#,
);
builder.open(&mut server);
server.collect_publish_diagnostic_notifications(1);
let completions = literal_completions(&mut server, &first_cell, Position::new(1, 9));
insta::with_settings!({
filters => FILTERS.iter().copied(),
}, {
assert_json_snapshot!(completions);
});
Ok(())
}
#[test]
fn auto_import_from_future() -> anyhow::Result<()> {
let mut server = TestServerBuilder::new()?
.with_workspace(
SystemPath::new("src"),
Some(ClientOptions::default().with_auto_import(true)),
)?
.build()
.wait_until_workspaces_are_initialized();
server.initialization_result().unwrap();
let mut builder = NotebookBuilder::virtual_file("src/test.ipynb");
builder.add_python_cell(r#"from typing import TYPE_CHECKING"#);
let second_cell = builder.add_python_cell(
r#"from __future__ import annotations
b: Litera
"#,
);
builder.open(&mut server);
server.collect_publish_diagnostic_notifications(2);
let completions = literal_completions(&mut server, &second_cell, Position::new(1, 9));
insta::with_settings!({
filters => FILTERS.iter().copied(),
}, {
assert_json_snapshot!(completions);
});
Ok(())
}
#[test]
fn auto_import_docstring() -> anyhow::Result<()> {
let mut server = TestServerBuilder::new()?
.with_workspace(
SystemPath::new("src"),
Some(ClientOptions::default().with_auto_import(true)),
)?
.build()
.wait_until_workspaces_are_initialized();
server.initialization_result().unwrap();
let mut builder = NotebookBuilder::virtual_file("src/test.ipynb");
builder.add_python_cell(
r#"from typing import TYPE_CHECKING
"#,
);
let second_cell = builder.add_python_cell(
r#""""A cell level docstring"""
b: Litera
"#,
);
builder.open(&mut server);
server.collect_publish_diagnostic_notifications(2);
let completions = literal_completions(&mut server, &second_cell, Position::new(1, 9));
insta::with_settings!({
filters => FILTERS.iter().copied(),
}, {
assert_json_snapshot!(completions);
});
Ok(())
}
#[test]
fn invalid_syntax_with_syntax_errors_disabled() -> anyhow::Result<()> {
let mut server = TestServerBuilder::new()?
.with_workspace(
SystemPath::new("src"),
Some(ClientOptions::default().with_show_syntax_errors(false)),
)?
.build()
.wait_until_workspaces_are_initialized();
server.initialization_result().unwrap();
let mut builder = NotebookBuilder::virtual_file("src/test.ipynb");
builder.add_python_cell(
r#"def foo(
"#,
);
builder.add_python_cell(
r#"x = 1 +
"#,
);
builder.open(&mut server);
let diagnostics = server.collect_publish_diagnostic_notifications(2);
assert_json_snapshot!(diagnostics, @r###"
{
"vscode-notebook-cell://src/test.ipynb#0": [],
"vscode-notebook-cell://src/test.ipynb#1": []
}
"###);
Ok(())
}
fn semantic_tokens_full_for_cell(
server: &mut TestServer,
cell_uri: &lsp_types::Url,
) -> Option<lsp_types::SemanticTokensResult> {
let cell1_tokens_req_id = server.send_request::<lsp_types::request::SemanticTokensFullRequest>(
lsp_types::SemanticTokensParams {
work_done_progress_params: lsp_types::WorkDoneProgressParams::default(),
partial_result_params: lsp_types::PartialResultParams::default(),
text_document: lsp_types::TextDocumentIdentifier {
uri: cell_uri.clone(),
},
},
);
server.await_response::<lsp_types::request::SemanticTokensFullRequest>(&cell1_tokens_req_id)
}
#[derive(Debug)]
pub(crate) struct NotebookBuilder {
notebook_url: lsp_types::Url,
// The cells: (cell_metadata, content, language_id)
cells: Vec<(lsp_types::NotebookCell, String, String)>,
}
impl NotebookBuilder {
pub(crate) fn virtual_file(name: &str) -> Self {
let url: lsp_types::Url = format!("vs-code:/{name}").parse().unwrap();
Self {
notebook_url: url,
cells: Vec::new(),
}
}
pub(crate) fn add_python_cell(&mut self, content: &str) -> lsp_types::Url {
let index = self.cells.len();
let id = format!(
"vscode-notebook-cell:/{}#{}",
self.notebook_url.path(),
index
);
let url: lsp_types::Url = id.parse().unwrap();
self.cells.push((
lsp_types::NotebookCell {
kind: NotebookCellKind::Code,
document: url.clone(),
metadata: None,
execution_summary: None,
},
content.to_string(),
"python".to_string(),
));
url
}
pub(crate) fn open(self, server: &mut TestServer) -> lsp_types::Url {
server.send_notification::<lsp_types::notification::DidOpenNotebookDocument>(
lsp_types::DidOpenNotebookDocumentParams {
notebook_document: lsp_types::NotebookDocument {
uri: self.notebook_url.clone(),
notebook_type: "jupyter-notebook".to_string(),
version: 0,
metadata: None,
cells: self.cells.iter().map(|(cell, _, _)| cell.clone()).collect(),
},
cell_text_documents: self
.cells
.iter()
.map(|(cell, content, language_id)| lsp_types::TextDocumentItem {
uri: cell.document.clone(),
language_id: language_id.clone(),
version: 0,
text: content.clone(),
})
.collect(),
},
);
self.notebook_url
}
}
fn literal_completions(
server: &mut TestServer,
cell: &lsp_types::Url,
position: Position,
) -> Vec<lsp_types::CompletionItem> {
let mut items = server.completion_request(cell, position);
// There are a ton of imports we don't care about in here...
// The import bit is that an edit is always restricted to the current cell. That means,
// we can't add `Literal` to the `from typing import TYPE_CHECKING` import in cell 1
items.retain(|item| item.label.starts_with("Litera"));
items
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/tests/e2e/code_actions.rs | crates/ty_server/tests/e2e/code_actions.rs | use crate::{TestServer, TestServerBuilder};
use anyhow::Result;
use lsp_types::{DocumentDiagnosticReportResult, Position, Range, request::CodeActionRequest};
use ruff_db::system::SystemPath;
fn code_actions_at(
server: &TestServer,
diagnostics: DocumentDiagnosticReportResult,
file: &SystemPath,
range: Range,
) -> lsp_types::CodeActionParams {
lsp_types::CodeActionParams {
text_document: lsp_types::TextDocumentIdentifier {
uri: server.file_uri(file),
},
range,
context: lsp_types::CodeActionContext {
diagnostics: match diagnostics {
lsp_types::DocumentDiagnosticReportResult::Report(
lsp_types::DocumentDiagnosticReport::Full(report),
) => report.full_document_diagnostic_report.items,
_ => panic!("Expected full diagnostic report"),
},
only: None,
trigger_kind: None,
},
work_done_progress_params: lsp_types::WorkDoneProgressParams::default(),
partial_result_params: lsp_types::PartialResultParams::default(),
}
}
#[allow(clippy::cast_possible_truncation)]
fn full_range(input: &str) -> Range {
let (num_lines, last_line) = input
.lines()
.enumerate()
.last()
.expect("non-empty document");
let last_char = last_line.len() as u32;
Range::new(
Position::new(0, 0),
Position::new(num_lines as u32, last_char),
)
}
#[test]
fn code_action() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
x = 20 / 2 # ty: ignore[division-by-zero]
";
let ty_toml = SystemPath::new("ty.toml");
let ty_toml_content = "\
[rules]
unused-ignore-comment = \"warn\"
";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(ty_toml, ty_toml_content)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
// Wait for diagnostics to be computed.
let diagnostics = server.document_diagnostic_request(foo, None);
let range = full_range(foo_content);
let code_action_params = code_actions_at(&server, diagnostics, foo, range);
// Get code actions for the line with the unused ignore comment.
let code_action_id = server.send_request::<CodeActionRequest>(code_action_params);
let code_actions = server.await_response::<CodeActionRequest>(&code_action_id);
insta::assert_json_snapshot!(code_actions);
Ok(())
}
#[test]
fn no_code_action_for_non_overlapping_range_on_same_line() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
x = 20 / 2 # ty: ignore[division-by-zero]
";
let ty_toml = SystemPath::new("ty.toml");
let ty_toml_content = "\
[rules]
unused-ignore-comment = \"warn\"
";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(ty_toml, ty_toml_content)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
// Wait for diagnostics to be computed.
let diagnostics = server.document_diagnostic_request(foo, None);
// Get code actions for a range that doesn't overlap with the diagnostic.
// The diagnostic is at characters 12-42, so we request actions for characters 0-10.
let range = Range::new(Position::new(0, 0), Position::new(0, 10));
let code_action_params = code_actions_at(&server, diagnostics, foo, range);
let code_action_id = server.send_request::<CodeActionRequest>(code_action_params);
let code_actions = server.await_response::<CodeActionRequest>(&code_action_id);
// Should return None because the range doesn't overlap with the diagnostic.
assert_eq!(code_actions, None);
Ok(())
}
// `Literal` is available from two places so we should suggest two possible imports
#[test]
fn code_action_undefined_reference_multi() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
x: Literal[1] = 1
";
let ty_toml = SystemPath::new("ty.toml");
let ty_toml_content = "";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(ty_toml, ty_toml_content)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
// Wait for diagnostics to be computed.
let diagnostics = server.document_diagnostic_request(foo, None);
let range = full_range(foo_content);
let code_action_params = code_actions_at(&server, diagnostics, foo, range);
// Get code actions
let code_action_id = server.send_request::<CodeActionRequest>(code_action_params);
let code_actions = server.await_response::<CodeActionRequest>(&code_action_id);
insta::assert_json_snapshot!(code_actions);
Ok(())
}
// Using an unimported decorator `@deprecated`
#[test]
fn code_action_undefined_decorator() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = r#"\
@deprecated("do not use!!!")
def my_func(): ...
"#;
let ty_toml = SystemPath::new("ty.toml");
let ty_toml_content = "";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(ty_toml, ty_toml_content)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
// Wait for diagnostics to be computed.
let diagnostics = server.document_diagnostic_request(foo, None);
let range = full_range(foo_content);
let code_action_params = code_actions_at(&server, diagnostics, foo, range);
// Get code actions
let code_action_id = server.send_request::<CodeActionRequest>(code_action_params);
let code_actions = server.await_response::<CodeActionRequest>(&code_action_id);
insta::assert_json_snapshot!(code_actions);
Ok(())
}
// Using an unimported decorator `@deprecated`
#[test]
fn code_action_existing_import_undefined_decorator() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = r#"\
import warnings
@deprecated("do not use!!!")
def my_func(): ...
"#;
let ty_toml = SystemPath::new("ty.toml");
let ty_toml_content = "";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(ty_toml, ty_toml_content)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
// Wait for diagnostics to be computed.
let diagnostics = server.document_diagnostic_request(foo, None);
let range = full_range(foo_content);
let code_action_params = code_actions_at(&server, diagnostics, foo, range);
// Get code actions
let code_action_id = server.send_request::<CodeActionRequest>(code_action_params);
let code_actions = server.await_response::<CodeActionRequest>(&code_action_id);
insta::assert_json_snapshot!(code_actions);
Ok(())
}
// Accessing `typing.Literal` without `typing` imported (ideally we suggest importing `typing`)
#[test]
fn code_action_attribute_access_on_unimported() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
x: typing.Literal[1] = 1
";
let ty_toml = SystemPath::new("ty.toml");
let ty_toml_content = "";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(ty_toml, ty_toml_content)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
// Wait for diagnostics to be computed.
let diagnostics = server.document_diagnostic_request(foo, None);
let range = full_range(foo_content);
let code_action_params = code_actions_at(&server, diagnostics, foo, range);
// Get code actions
let code_action_id = server.send_request::<CodeActionRequest>(code_action_params);
let code_actions = server.await_response::<CodeActionRequest>(&code_action_id);
insta::assert_json_snapshot!(code_actions);
Ok(())
}
// Accessing `html.parser` when we've imported `html` but not `html.parser`
#[test]
fn code_action_possible_missing_submodule_attribute() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
import html
html.parser
";
let ty_toml = SystemPath::new("ty.toml");
let ty_toml_content = "";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(ty_toml, ty_toml_content)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
// Wait for diagnostics to be computed.
let diagnostics = server.document_diagnostic_request(foo, None);
let range = full_range(foo_content);
let code_action_params = code_actions_at(&server, diagnostics, foo, range);
// Get code actions
let code_action_id = server.send_request::<CodeActionRequest>(code_action_params);
let code_actions = server.await_response::<CodeActionRequest>(&code_action_id);
insta::assert_json_snapshot!(code_actions);
Ok(())
}
/// Regression test for a panic when a code-fix diagnostic points at a string annotation
#[test]
fn code_action_invalid_string_annotations() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = r#"
ab: "foobar"
"#;
let ty_toml = SystemPath::new("ty.toml");
let ty_toml_content = "";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(ty_toml, ty_toml_content)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
// Wait for diagnostics to be computed.
let diagnostics = server.document_diagnostic_request(foo, None);
let range = full_range(foo_content);
let code_action_params = code_actions_at(&server, diagnostics, foo, range);
// Get code actions for the line with the unused ignore comment.
let code_action_id = server.send_request::<CodeActionRequest>(code_action_params);
let code_actions = server.await_response::<CodeActionRequest>(&code_action_id);
insta::assert_json_snapshot!(code_actions);
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/tests/e2e/pull_diagnostics.rs | crates/ty_server/tests/e2e/pull_diagnostics.rs | use std::time::Duration;
use anyhow::Result;
use insta::{assert_compact_json_snapshot, assert_debug_snapshot};
use lsp_server::RequestId;
use lsp_types::request::WorkspaceDiagnosticRequest;
use lsp_types::{
NumberOrString, PartialResultParams, PreviousResultId, Url, WorkDoneProgressParams,
WorkspaceDiagnosticParams, WorkspaceDiagnosticReportResult, WorkspaceDocumentDiagnosticReport,
};
use ruff_db::system::SystemPath;
use ty_server::{ClientOptions, DiagnosticMode, PartialWorkspaceProgress};
use crate::{AwaitResponseError, TestServer, TestServerBuilder};
#[test]
fn on_did_open() -> Result<()> {
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo() -> str:
return 42
";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let diagnostics = server.document_diagnostic_request(foo, None);
assert_debug_snapshot!(diagnostics);
Ok(())
}
#[test]
fn on_did_open_diagnostics_off() -> Result<()> {
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo() -> str:
return 42
";
let mut server = TestServerBuilder::new()?
.with_workspace(
workspace_root,
Some(ClientOptions::default().with_diagnostic_mode(DiagnosticMode::Off)),
)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let diagnostics = server.document_diagnostic_request(foo, None);
assert_compact_json_snapshot!(diagnostics, @r#"{"kind": "full", "items": []}"#);
Ok(())
}
#[test]
fn invalid_syntax_with_syntax_errors_disabled() -> Result<()> {
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo(
";
let mut server = TestServerBuilder::new()?
.with_workspace(
workspace_root,
Some(ClientOptions::default().with_show_syntax_errors(false)),
)?
.with_file(foo, foo_content)?
.with_initialization_options(
ClientOptions::default()
.with_show_syntax_errors(false)
.with_diagnostic_mode(DiagnosticMode::Workspace),
)
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
let workspace_diagnostics = server.workspace_diagnostic_request(None, None);
assert_compact_json_snapshot!(workspace_diagnostics, @r#"
{
"items": [
{
"kind": "full",
"uri": "file://<temp_dir>/src/foo.py",
"version": null,
"resultId": "[RESULT_ID]",
"items": []
}
]
}
"#);
server.open_text_document(foo, foo_content, 1);
let diagnostics = server.document_diagnostic_request(foo, None);
assert_compact_json_snapshot!(diagnostics, @r#"{"kind": "full", "resultId": "[RESULT_ID]", "items": []}"#);
Ok(())
}
#[test]
fn document_diagnostic_caching_unchanged() -> Result<()> {
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo() -> str:
return 42
";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
// First request with no previous result ID
let first_response = server.document_diagnostic_request(foo, None);
// Extract result ID from first response
let result_id = match &first_response {
lsp_types::DocumentDiagnosticReportResult::Report(
lsp_types::DocumentDiagnosticReport::Full(report),
) => report
.full_document_diagnostic_report
.result_id
.as_ref()
.expect("First response should have a result ID")
.clone(),
_ => panic!("First response should be a full report"),
};
// Second request with the previous result ID - should return Unchanged
let second_response = server.document_diagnostic_request(foo, Some(result_id));
// Verify it's an unchanged report
match second_response {
lsp_types::DocumentDiagnosticReportResult::Report(
lsp_types::DocumentDiagnosticReport::Unchanged(_),
) => {
// Success - got unchanged report as expected
}
_ => panic!("Expected an unchanged report when diagnostics haven't changed"),
}
Ok(())
}
#[test]
fn document_diagnostic_caching_changed() -> Result<()> {
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content_v1 = "\
def foo() -> str:
return 42
";
let foo_content_v2 = "\
def foo() -> str:
return \"fixed\"
";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content_v1)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content_v1, 1);
// First request with no previous result ID
let first_response = server.document_diagnostic_request(foo, None);
// Extract result ID from first response
let result_id = match &first_response {
lsp_types::DocumentDiagnosticReportResult::Report(
lsp_types::DocumentDiagnosticReport::Full(report),
) => report
.full_document_diagnostic_report
.result_id
.as_ref()
.expect("First response should have a result ID")
.clone(),
_ => panic!("First response should be a full report"),
};
// Change the document to fix the error
server.change_text_document(
foo,
vec![lsp_types::TextDocumentContentChangeEvent {
range: None,
range_length: None,
text: foo_content_v2.to_string(),
}],
2,
);
// Second request with the previous result ID - should return a new full report
let second_response = server.document_diagnostic_request(foo, Some(result_id));
// Verify it's a full report (not unchanged)
match second_response {
lsp_types::DocumentDiagnosticReportResult::Report(
lsp_types::DocumentDiagnosticReport::Full(report),
) => {
// Should have no diagnostics now
assert_eq!(report.full_document_diagnostic_report.items.len(), 0);
}
_ => panic!("Expected a full report when diagnostics have changed"),
}
Ok(())
}
#[test]
fn workspace_diagnostic_caching() -> Result<()> {
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
// File A: Will have an unchanged diagnostic
let file_a = SystemPath::new("src/unchanged.py");
let file_a_content = "\
def foo() -> str:
return 42 # This error will remain the same
";
// File B: Initially no error, will get a new error (added diagnostic)
let file_b = SystemPath::new("src/new_error.py");
let file_b_content_v1 = "\
def foo() -> int:
return 42 # No error initially
";
let file_b_content_v2 = "\
def foo() -> str:
return 42 # Error appears
";
// File C: Initially has error, will be fixed (removed diagnostic)
let file_c = SystemPath::new("src/fixed_error.py");
let file_c_content_v1 = "\
def foo() -> str:
return 42 # Error initially
";
let file_c_content_v2 = "\
def foo() -> str:
return \"fixed\" # Error removed
";
// File D: Has error that changes content (changed diagnostic)
let file_d = SystemPath::new("src/changed_error.py");
let file_d_content_v1 = "\
def foo() -> str:
return 42 # First error: expected str, got int
";
let file_d_content_v2 = "\
def foo() -> int:
return \"hello\" # Different error: expected int, got str
";
// File E: Modified but same diagnostic (e.g., new function added but original error remains)
let file_e = SystemPath::new("src/modified_same_error.py");
let file_e_content_v1 = "\
def foo() -> str:
return 42 # Error: expected str, got int
";
let file_e_content_v2 = "\
def bar() -> int:
return 100 # New function added at the top
def foo() -> str:
return 42 # Same error: expected str, got int
";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_initialization_options(
ClientOptions::default().with_diagnostic_mode(DiagnosticMode::Workspace),
)
.with_file(file_a, file_a_content)?
.with_file(file_b, file_b_content_v1)?
.with_file(file_c, file_c_content_v1)?
.with_file(file_d, file_d_content_v1)?
.with_file(file_e, file_e_content_v1)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(file_a, file_a_content, 1);
// First request with no previous result IDs
let mut first_response = server
.workspace_diagnostic_request(Some(NumberOrString::String("progress-1".to_string())), None);
sort_workspace_diagnostic_response(&mut first_response);
assert_debug_snapshot!("workspace_diagnostic_initial_state", first_response);
// Consume all progress notifications sent during workspace diagnostics
consume_all_progress_notifications(&mut server)?;
// Extract result IDs from the first response
let previous_result_ids = extract_result_ids_from_response(&first_response);
// Make changes to files B, C, D, and E (leave A unchanged)
// Need to open files before changing them
server.open_text_document(file_b, file_b_content_v1, 1);
server.open_text_document(file_c, file_c_content_v1, 1);
server.open_text_document(file_d, file_d_content_v1, 1);
server.open_text_document(file_e, file_e_content_v1, 1);
// File B: Add a new error
server.change_text_document(
file_b,
vec![lsp_types::TextDocumentContentChangeEvent {
range: None,
range_length: None,
text: file_b_content_v2.to_string(),
}],
2,
);
// File C: Fix the error
server.change_text_document(
file_c,
vec![lsp_types::TextDocumentContentChangeEvent {
range: None,
range_length: None,
text: file_c_content_v2.to_string(),
}],
2,
);
// File D: Change the error
server.change_text_document(
file_d,
vec![lsp_types::TextDocumentContentChangeEvent {
range: None,
range_length: None,
text: file_d_content_v2.to_string(),
}],
2,
);
// File E: Modify the file but keep the same diagnostic
server.change_text_document(
file_e,
vec![lsp_types::TextDocumentContentChangeEvent {
range: None,
range_length: None,
text: file_e_content_v2.to_string(),
}],
2,
);
// Second request with previous result IDs
// Expected results:
// - File A: Unchanged report (diagnostic hasn't changed)
// - File B: Full report (new diagnostic appeared)
// - File C: Full report with empty diagnostics (diagnostic was removed)
// - File D: Full report (diagnostic content changed)
// - File E: Full report (the range changes)
let mut second_response = server.workspace_diagnostic_request(
Some(NumberOrString::String("progress-2".to_string())),
Some(previous_result_ids),
);
sort_workspace_diagnostic_response(&mut second_response);
// Consume all progress notifications sent during the second workspace diagnostics
consume_all_progress_notifications(&mut server)?;
assert_debug_snapshot!("workspace_diagnostic_after_changes", second_response);
Ok(())
}
#[test]
#[cfg(unix)]
fn workspace_diagnostic_caching_unchanged_with_colon_in_path() -> Result<()> {
let _filter = filter_result_id();
let workspace_root = SystemPath::new("astral:test");
let foo = SystemPath::new("astral:test/test.py");
let foo_content = "\
def foo() -> str:
return 42
";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.with_initialization_options(
ClientOptions::default().with_diagnostic_mode(DiagnosticMode::Workspace),
)
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
let first_response = server.workspace_diagnostic_request(None, None);
// Extract result IDs from the first response
let mut previous_result_ids = extract_result_ids_from_response(&first_response);
for previous_id in &mut previous_result_ids {
// VS Code URL encodes paths, so that `:` is encoded as `%3A`.
previous_id
.uri
.set_path(&previous_id.uri.path().replace(':', "%3A"));
}
let workspace_request_id =
server.send_request::<WorkspaceDiagnosticRequest>(WorkspaceDiagnosticParams {
identifier: None,
previous_result_ids,
work_done_progress_params: WorkDoneProgressParams::default(),
partial_result_params: PartialResultParams::default(),
});
// The URL mismatch shouldn't result in a full document report.
// The server needs to match the previous result IDs by the path, not the URL.
assert_workspace_diagnostics_suspends_for_long_polling(&mut server, &workspace_request_id);
let second_response = shutdown_and_await_workspace_diagnostic(server, &workspace_request_id);
insta::assert_compact_debug_snapshot!(second_response, @"Report(WorkspaceDiagnosticReport { items: [] })");
Ok(())
}
// Redact result_id values since they are hash-based and non-deterministic
pub(crate) fn filter_result_id() -> insta::internals::SettingsBindDropGuard {
let mut settings = insta::Settings::clone_current();
settings.add_filter(r#""[a-f0-9]{16}""#, r#""[RESULT_ID]""#);
settings.bind_to_scope()
}
fn consume_all_progress_notifications(server: &mut TestServer) -> Result<()> {
// Always consume Begin
let begin_params = server.await_notification::<lsp_types::notification::Progress>();
// The params are already the ProgressParams type
let lsp_types::ProgressParamsValue::WorkDone(lsp_types::WorkDoneProgress::Begin(_)) =
begin_params.value
else {
return Err(anyhow::anyhow!("Expected Begin progress notification"));
};
// Consume Report notifications - there may be multiple based on number of files
// Keep consuming until we hit the End notification
loop {
let params = server.await_notification::<lsp_types::notification::Progress>();
if let lsp_types::ProgressParamsValue::WorkDone(lsp_types::WorkDoneProgress::End(_)) =
params.value
{
// Found the End notification, we're done
break;
}
// Otherwise it's a Report notification, continue
}
Ok(())
}
/// Tests that the server sends partial results for workspace diagnostics
/// if a client sets the `partial_result_token` in the request.
///
/// Note: In production, the server throttles the partial results to one every 50ms. However,
/// this behavior makes testing very hard. That's why the server, in tests, sends a partial response
/// as soon as it batched at least 2 diagnostics together.
#[test]
fn workspace_diagnostic_streaming() -> Result<()> {
const NUM_FILES: usize = 5;
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
// Create 60 files with the same error to trigger streaming batching (server batches at 50 files)
let error_content = "\
def foo() -> str:
return 42 # Type error: expected str, got int
";
let mut builder = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_initialization_options(
ClientOptions::default().with_diagnostic_mode(DiagnosticMode::Workspace),
);
for i in 0..NUM_FILES {
let file_path_string = format!("src/file_{i:03}.py");
let file_path = SystemPath::new(&file_path_string);
builder = builder.with_file(file_path, error_content)?;
}
let mut server = builder
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
let partial_token = lsp_types::ProgressToken::String("streaming-diagnostics".to_string());
let request_id = server.send_request::<WorkspaceDiagnosticRequest>(WorkspaceDiagnosticParams {
identifier: None,
previous_result_ids: Vec::new(),
work_done_progress_params: WorkDoneProgressParams {
work_done_token: None,
},
partial_result_params: PartialResultParams {
partial_result_token: Some(partial_token.clone()),
},
});
let mut received_results = 0usize;
// First, read the response of the workspace diagnostic request.
// Note: This response comes after the progress notifications but it simplifies the test to read it first.
let final_response = server.await_response::<WorkspaceDiagnosticRequest>(&request_id);
// Process the final report.
// This should always be a partial report. However, the type definition in the LSP specification
// is broken in the sense that both `Report` and `Partial` have the exact same shape
// and deserializing a previously serialized `Partial` result will yield a `Report` type.
let response_items = match final_response {
WorkspaceDiagnosticReportResult::Report(report) => report.items,
WorkspaceDiagnosticReportResult::Partial(partial) => partial.items,
};
// The last batch should contain 1 item because the server sends a partial result with
// 2 items each.
assert_eq!(response_items.len(), 1);
received_results += response_items.len();
// Collect any partial results sent via progress notifications
while let Ok(params) =
server.try_await_notification::<PartialWorkspaceProgress>(Some(Duration::from_secs(1)))
{
if params.token == partial_token {
let streamed_items = match params.value {
// Ideally we'd assert that only the first response is a full report
// However, the type definition in the LSP specification is broken
// in the sense that both `Report` and `Partial` have the exact same structure
// but it also doesn't use a tag to tell them apart...
// That means, a client can never tell if it's a full report or a partial report
WorkspaceDiagnosticReportResult::Report(report) => report.items,
WorkspaceDiagnosticReportResult::Partial(partial) => partial.items,
};
// All streamed batches should contain 2 items (test behavior).
assert_eq!(streamed_items.len(), 2);
received_results += streamed_items.len();
if received_results == NUM_FILES {
break;
}
}
}
assert_eq!(received_results, NUM_FILES);
Ok(())
}
/// Tests that the server's diagnostic streaming (partial results) work correctly
/// with result ids.
#[test]
fn workspace_diagnostic_streaming_with_caching() -> Result<()> {
const NUM_FILES: usize = 7;
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
let error_content = "def foo() -> str:\n return 42 # Error";
let changed_content = "def foo() -> str:\n return true # Error";
let mut builder = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_initialization_options(
ClientOptions::default().with_diagnostic_mode(DiagnosticMode::Workspace),
);
for i in 0..NUM_FILES {
let file_path_string = format!("src/error_{i}.py");
let file_path = SystemPath::new(&file_path_string);
builder = builder.with_file(file_path, error_content)?; // All files have errors initially
}
let mut server = builder
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(SystemPath::new("src/error_0.py"), error_content, 1);
server.open_text_document(SystemPath::new("src/error_1.py"), error_content, 1);
server.open_text_document(SystemPath::new("src/error_2.py"), error_content, 1);
// First request to get result IDs (non-streaming for simplicity)
let first_response = server.workspace_diagnostic_request(None, None);
let result_ids = extract_result_ids_from_response(&first_response);
assert_eq!(result_ids.len(), NUM_FILES);
// Fix three errors
server.change_text_document(
SystemPath::new("src/error_0.py"),
vec![lsp_types::TextDocumentContentChangeEvent {
range: None,
range_length: None,
text: changed_content.to_string(),
}],
2,
);
server.change_text_document(
SystemPath::new("src/error_1.py"),
vec![lsp_types::TextDocumentContentChangeEvent {
range: None,
range_length: None,
text: changed_content.to_string(),
}],
2,
);
server.change_text_document(
SystemPath::new("src/error_2.py"),
vec![lsp_types::TextDocumentContentChangeEvent {
range: None,
range_length: None,
text: changed_content.to_string(),
}],
2,
);
// Second request with caching - use streaming to test the caching behavior
let partial_token = lsp_types::ProgressToken::String("streaming-diagnostics".to_string());
let request2_id =
server.send_request::<WorkspaceDiagnosticRequest>(WorkspaceDiagnosticParams {
identifier: None,
previous_result_ids: result_ids,
work_done_progress_params: WorkDoneProgressParams {
work_done_token: None,
},
partial_result_params: PartialResultParams {
partial_result_token: Some(partial_token.clone()),
},
});
let final_response2 = server.await_response::<WorkspaceDiagnosticRequest>(&request2_id);
let mut all_items = Vec::new();
// The final response should contain one fixed file and all unchanged files
let items = match final_response2 {
WorkspaceDiagnosticReportResult::Report(report) => report.items,
WorkspaceDiagnosticReportResult::Partial(partial) => partial.items,
};
assert_eq!(items.len(), NUM_FILES - 3 + 1); // 3 fixed, 4 unchanged, 1 full report for fixed file
all_items.extend(items);
// Collect any partial results sent via progress notifications
while let Ok(params) =
server.try_await_notification::<PartialWorkspaceProgress>(Some(Duration::from_secs(1)))
{
if params.token == partial_token {
let streamed_items = match params.value {
// Ideally we'd assert that only the first response is a full report
// However, the type definition in the LSP specification is broken
// in the sense that both `Report` and `Partial` have the exact same structure
// but it also doesn't use a tag to tell them apart...
// That means, a client can never tell if it's a full report or a partial report
WorkspaceDiagnosticReportResult::Report(report) => report.items,
WorkspaceDiagnosticReportResult::Partial(partial) => partial.items,
};
// All streamed batches should contain 2 items.
assert_eq!(streamed_items.len(), 2);
all_items.extend(streamed_items);
if all_items.len() == NUM_FILES {
break;
}
}
}
sort_workspace_report_items(&mut all_items);
assert_debug_snapshot!(all_items);
Ok(())
}
fn sort_workspace_diagnostic_response(response: &mut WorkspaceDiagnosticReportResult) {
let items = match response {
WorkspaceDiagnosticReportResult::Report(report) => &mut report.items,
WorkspaceDiagnosticReportResult::Partial(partial) => &mut partial.items,
};
sort_workspace_report_items(items);
}
fn sort_workspace_report_items(items: &mut [WorkspaceDocumentDiagnosticReport]) {
fn item_uri(item: &WorkspaceDocumentDiagnosticReport) -> &Url {
match item {
WorkspaceDocumentDiagnosticReport::Full(full_report) => &full_report.uri,
WorkspaceDocumentDiagnosticReport::Unchanged(unchanged_report) => &unchanged_report.uri,
}
}
items.sort_unstable_by(|a, b| item_uri(a).cmp(item_uri(b)));
}
/// The LSP specification requires that the server sends a response for every request.
///
/// This test verifies that the server responds to a long-polling workspace diagnostic request
/// when the server is shut down.
#[test]
fn workspace_diagnostic_long_polling_responds_on_shutdown() -> Result<()> {
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
let file_path = SystemPath::new("src/test.py");
let file_content = "\
def hello() -> str:
return \"world\"
";
// Create a project with one file but no diagnostics
let mut server = create_workspace_server_with_file(workspace_root, file_path, file_content)?;
// Make a workspace diagnostic request to a project with one file but no diagnostics
// This should trigger long-polling since the project has no diagnostics
let request_id = send_workspace_diagnostic_request(&mut server);
assert_workspace_diagnostics_suspends_for_long_polling(&mut server, &request_id);
// The workspace diagnostic request should now respond with an empty report
let workspace_response = shutdown_and_await_workspace_diagnostic(server, &request_id);
// Verify we got an empty report (default response during shutdown)
assert_debug_snapshot!(
"workspace_diagnostic_long_polling_shutdown_response",
workspace_response
);
Ok(())
}
/// Tests that the server responds to a long-polling workspace diagnostic request
/// after a change introduced a new diagnostic.
#[test]
fn workspace_diagnostic_long_polling_responds_on_change() -> Result<()> {
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
let file_path = SystemPath::new("src/test.py");
let file_content_no_error = "\
def hello() -> str:
return \"world\"
";
let file_content_with_error = "\
def hello() -> str:
return 42 # Type error: expected str, got int
";
// Create a project with one file but no diagnostics
let mut server =
create_workspace_server_with_file(workspace_root, file_path, file_content_no_error)?;
// Open the file first
server.open_text_document(file_path, file_content_no_error, 1);
// Make a workspace diagnostic request to a project with one file but no diagnostics
// This should trigger long-polling since the project has no diagnostics
let request_id = send_workspace_diagnostic_request(&mut server);
// Verify the request doesn't complete immediately (should be long-polling)
assert_workspace_diagnostics_suspends_for_long_polling(&mut server, &request_id);
// Now introduce an error to the file - this should trigger the long-polling request to complete
server.change_text_document(
file_path,
vec![lsp_types::TextDocumentContentChangeEvent {
range: None,
range_length: None,
text: file_content_with_error.to_string(),
}],
2,
);
// The workspace diagnostic request should now complete with the new diagnostic
let workspace_response = server.await_response::<WorkspaceDiagnosticRequest>(&request_id);
// Verify we got a report with one file containing the new diagnostic
assert_debug_snapshot!(
"workspace_diagnostic_long_polling_change_response",
workspace_response
);
Ok(())
}
/// The LSP specification requires that the server responds to each request with exactly one response.
///
/// This test verifies that the server sends one response (and not two) if a long polling workspace diagnostic request
/// is cancelled.
#[test]
fn workspace_diagnostic_long_polling_responds_on_cancellation() -> Result<()> {
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
let file_path = SystemPath::new("src/test.py");
let file_content = "\
def hello() -> str:
return \"world\"
";
// Create a project with one file but no diagnostics
let mut server = create_workspace_server_with_file(workspace_root, file_path, file_content)?;
// Make a workspace diagnostic request to a project with one file but no diagnostics
// This should trigger long-polling since the project has no diagnostics
let request_id = send_workspace_diagnostic_request(&mut server);
// Verify the request doesn't complete immediately (should be long-polling)
assert_workspace_diagnostics_suspends_for_long_polling(&mut server, &request_id);
// Send a cancel request notification for the suspended request
// The request_id from send_request should match the ID that the server expects
// Based on logs, the server shows request id=2, so let's try using that directly
server.cancel(&request_id);
// The workspace diagnostic request should now respond with a cancellation response (Err).
let result = server.try_await_response::<WorkspaceDiagnosticRequest>(&request_id, None);
assert_debug_snapshot!(
"workspace_diagnostic_long_polling_cancellation_result",
result
);
// The test server's drop implementation asserts that we aren't sending the response twice.
Ok(())
}
/// This test verifies an entire workspace diagnostic cycle with long-polling:
/// * Initial suspend with no diagnostics
/// * Change the file to introduce a diagnostic, server should respond with the new diagnostics
/// * Send a second workspace diagnostic request, which should suspend again because the diagnostics haven't changed
/// * Change the file again to fix the diagnostic, server should respond with no diagnostics
#[test]
fn workspace_diagnostic_long_polling_suspend_change_suspend_cycle() -> Result<()> {
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
let file_path = SystemPath::new("src/test.py");
let file_content_no_error = "\
def hello() -> str:
return \"world\"
";
let file_content_with_error = "\
def hello() -> str:
return 42 # Type error: expected str, got int
";
let file_content_fixed = "\
def hello() -> str:
return \"fixed\"
";
// Create a project with one file but no diagnostics
let mut server =
create_workspace_server_with_file(workspace_root, file_path, file_content_no_error)?;
// Open the file first
server.open_text_document(file_path, file_content_no_error, 1);
// PHASE 1: Initial suspend (no diagnostics)
let request_id_1 = send_workspace_diagnostic_request(&mut server);
assert_workspace_diagnostics_suspends_for_long_polling(&mut server, &request_id_1);
// PHASE 2: Introduce error to trigger response
server.change_text_document(
file_path,
vec![lsp_types::TextDocumentContentChangeEvent {
range: None,
range_length: None,
text: file_content_with_error.to_string(),
}],
2,
);
// First request should complete with diagnostics
let first_response = server.await_response::<WorkspaceDiagnosticRequest>(&request_id_1);
// Extract result IDs from the first response for the second request
let previous_result_ids = extract_result_ids_from_response(&first_response);
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/tests/e2e/configuration.rs | crates/ty_server/tests/e2e/configuration.rs | use anyhow::Result;
use insta::assert_json_snapshot;
use ruff_db::system::SystemPath;
use serde_json::{Map, json};
use ty_server::{ClientOptions, WorkspaceOptions};
use crate::TestServerBuilder;
use crate::pull_diagnostics::filter_result_id;
#[test]
fn configuration_file() -> Result<()> {
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo() -> str:
return a
";
let builder = TestServerBuilder::new()?;
let settings_path = builder.file_path("ty2.toml");
let mut server = builder
.with_workspace(
workspace_root,
Some(ClientOptions {
workspace: WorkspaceOptions {
configuration_file: Some(settings_path.to_string()),
..WorkspaceOptions::default()
},
..ClientOptions::default()
}),
)?
.with_file(foo, foo_content)?
.with_file(
settings_path,
r#"
[rules]
unresolved-reference="warn"
"#,
)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let diagnostics = server.document_diagnostic_request(foo, None);
assert_json_snapshot!(diagnostics);
Ok(())
}
#[test]
fn invalid_configuration_file() -> Result<()> {
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo() -> str:
return a
";
let builder = TestServerBuilder::new()?;
let settings_path = builder.file_path("ty2.toml");
let mut server = builder
.with_workspace(
workspace_root,
Some(ClientOptions {
workspace: WorkspaceOptions {
configuration_file: Some(settings_path.to_string()),
..WorkspaceOptions::default()
},
..ClientOptions::default()
}),
)?
.with_file(foo, foo_content)?
.with_file(
settings_path,
r#"
[rule]
unresolved-reference="warn"
"#,
)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let show_message = server.await_notification::<lsp_types::notification::ShowMessage>();
let diagnostics = server.document_diagnostic_request(foo, None);
assert_json_snapshot!(show_message, @r#"
{
"type": 1,
"message": "Failed to load project for workspace file://<temp_dir>/src. Please refer to the logs for more details."
}
"#);
assert_json_snapshot!(diagnostics);
Ok(())
}
#[test]
fn configuration_overrides() -> Result<()> {
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo() -> str:
return a
";
let mut server = TestServerBuilder::new()?
.with_workspace(
workspace_root,
Some(ClientOptions {
workspace: WorkspaceOptions {
configuration: Some(
Map::from_iter([(
"rules".to_string(),
json!({"unresolved-reference": "warn"}),
)])
.into(),
),
..WorkspaceOptions::default()
},
..ClientOptions::default()
}),
)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let diagnostics = server.document_diagnostic_request(foo, None);
assert_json_snapshot!(diagnostics);
Ok(())
}
#[test]
fn configuration_file_and_overrides() -> Result<()> {
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo() -> str:
return a
";
let builder = TestServerBuilder::new()?;
let settings_path = builder.file_path("ty2.toml");
let mut server = builder
.with_workspace(
workspace_root,
Some(ClientOptions {
workspace: WorkspaceOptions {
configuration_file: Some(settings_path.to_string()),
configuration: Some(
Map::from_iter([(
"rules".to_string(),
json!({"unresolved-reference": "ignore"}),
)])
.into(),
),
..WorkspaceOptions::default()
},
..ClientOptions::default()
}),
)?
.with_file(foo, foo_content)?
.with_file(
settings_path,
r#"
[rules]
unresolved-reference="warn"
"#,
)?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let diagnostics = server.document_diagnostic_request(foo, None);
assert_json_snapshot!(diagnostics);
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/tests/e2e/rename.rs | crates/ty_server/tests/e2e/rename.rs | use crate::TestServerBuilder;
use crate::notebook::NotebookBuilder;
use insta::assert_json_snapshot;
#[test]
fn text_document() -> anyhow::Result<()> {
let mut server = TestServerBuilder::new()?
.with_file("foo.py", "")?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(
"foo.py",
r#"def test(): ...
test()
"#,
1,
);
let edits = server
.rename(
&server.file_uri("foo.py"),
lsp_types::Position {
line: 0,
character: 5,
},
"new_name",
)
.expect("Can rename `test` function");
assert_json_snapshot!(edits);
Ok(())
}
#[test]
fn notebook() -> anyhow::Result<()> {
let mut server = TestServerBuilder::new()?
.with_file("test.ipynb", "")?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
let mut builder = NotebookBuilder::virtual_file("test.ipynb");
builder.add_python_cell(
r#"from typing import Literal
type Style = Literal["italic", "bold", "underline"]"#,
);
let cell2 = builder.add_python_cell(
r#"def with_style(line: str, word, style: Style) -> str:
if style == "italic":
return line.replace(word, f"*{word}*")
elif style == "bold":
return line.replace(word, f"__{word}__")
position = line.find(word)
output = line + "\n"
output += " " * position
output += "-" * len(word)
"#,
);
builder.open(&mut server);
let edits = server
.rename(
&cell2,
lsp_types::Position {
line: 0,
character: 16,
},
"text",
)
.expect("Can rename `line` parameter");
assert_json_snapshot!(edits);
server.collect_publish_diagnostic_notifications(2);
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/tests/e2e/semantic_tokens.rs | crates/ty_server/tests/e2e/semantic_tokens.rs | use anyhow::Result;
use ruff_db::system::SystemPath;
use crate::TestServerBuilder;
#[test]
fn multiline_token_client_not_supporting_multiline_tokens() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = r#"def my_function(param1: int, param2: str) -> bool:
"""Example function with PEP 484 type annotations.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
The return value. True for success, False otherwise.
"""
"#;
let mut server = TestServerBuilder::new()?
.enable_pull_diagnostics(true)
.enable_multiline_token_support(false)
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let tokens = server.semantic_tokens_full_request(&server.file_uri(foo));
insta::assert_json_snapshot!(tokens);
Ok(())
}
#[test]
fn multiline_token_client_supporting_multiline_tokens() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = r#"def my_function(param1: int, param2: str) -> bool:
"""Example function with PEP 484 type annotations.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
The return value. True for success, False otherwise.
"""
"#;
let mut server = TestServerBuilder::new()?
.enable_pull_diagnostics(true)
.enable_multiline_token_support(true)
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let tokens = server.semantic_tokens_full_request(&server.file_uri(foo));
insta::assert_json_snapshot!(tokens);
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/tests/e2e/inlay_hints.rs | crates/ty_server/tests/e2e/inlay_hints.rs | use anyhow::Result;
use lsp_types::{Position, Range, notification::PublishDiagnostics};
use ruff_db::system::SystemPath;
use ty_server::ClientOptions;
use crate::TestServerBuilder;
/// Tests that the default value of inlay hints settings is correct i.e., they're all enabled
/// by default.
#[test]
fn default_inlay_hints() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
x = 1
def foo(a: int) -> int:
return a + 1
y = foo(1)
";
let mut server = TestServerBuilder::new()?
.with_initialization_options(ClientOptions::default())
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.enable_inlay_hints(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let _ = server.await_notification::<PublishDiagnostics>();
let hints = server
.inlay_hints_request(foo, Range::new(Position::new(0, 0), Position::new(6, 0)))
.unwrap();
insta::assert_json_snapshot!(hints, @r#"
[
{
"position": {
"line": 5,
"character": 1
},
"label": [
{
"value": ": "
},
{
"value": "int",
"location": {
"uri": "file://<typeshed>/stdlib/builtins.pyi",
"range": {
"start": {
"line": 347,
"character": 6
},
"end": {
"line": 347,
"character": 9
}
}
}
}
],
"kind": 1,
"textEdits": [
{
"range": {
"start": {
"line": 5,
"character": 1
},
"end": {
"line": 5,
"character": 1
}
},
"newText": ": int"
}
]
},
{
"position": {
"line": 5,
"character": 8
},
"label": [
{
"value": "a",
"location": {
"uri": "file://<temp_dir>/src/foo.py",
"range": {
"start": {
"line": 2,
"character": 8
},
"end": {
"line": 2,
"character": 9
}
}
}
},
{
"value": "="
}
],
"kind": 2,
"textEdits": []
}
]
"#);
Ok(())
}
/// Tests that disabling variable types inlay hints works correctly.
#[test]
fn variable_inlay_hints_disabled() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "x = 1";
let mut server = TestServerBuilder::new()?
.with_initialization_options(
ClientOptions::default().with_variable_types_inlay_hints(false),
)
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.enable_inlay_hints(true)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let _ = server.await_notification::<PublishDiagnostics>();
let hints = server
.inlay_hints_request(foo, Range::new(Position::new(0, 0), Position::new(0, 5)))
.unwrap();
assert!(
hints.is_empty(),
"Expected no inlay hints, but found: {hints:?}"
);
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/tests/e2e/initialize.rs | crates/ty_server/tests/e2e/initialize.rs | use anyhow::Result;
use lsp_types::notification::ShowMessage;
use lsp_types::{Position, request::RegisterCapability};
use ruff_db::system::SystemPath;
use serde_json::Value;
use ty_server::{ClientOptions, DiagnosticMode};
use crate::TestServerBuilder;
#[test]
fn empty_workspace_folders() -> Result<()> {
let server = TestServerBuilder::new()?
.build()
.wait_until_workspaces_are_initialized();
let initialization_result = server.initialization_result().unwrap();
insta::assert_json_snapshot!("initialization", initialization_result);
Ok(())
}
#[test]
fn single_workspace_folder() -> Result<()> {
let workspace_root = SystemPath::new("foo");
let server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.build()
.wait_until_workspaces_are_initialized();
let initialization_result = server.initialization_result().unwrap();
insta::assert_json_snapshot!("initialization_with_workspace", initialization_result);
Ok(())
}
/// Tests that the server sends a registration request for diagnostics if workspace diagnostics
/// are enabled via initialization options and dynamic registration is enabled, even if the
/// workspace configuration is not supported by the client.
#[test]
fn workspace_diagnostic_registration_without_configuration() -> Result<()> {
let workspace_root = SystemPath::new("foo");
let mut server = TestServerBuilder::new()?
.with_initialization_options(
ClientOptions::default().with_diagnostic_mode(DiagnosticMode::Workspace),
)
.with_workspace(workspace_root, None)?
.enable_workspace_configuration(false)
.enable_diagnostic_dynamic_registration(true)
.build();
// No need to wait for workspaces to initialize as the client does not support workspace
// configuration.
let (_, params) = server.await_request::<RegisterCapability>();
let [registration] = params.registrations.as_slice() else {
panic!(
"Expected a single registration, got: {:#?}",
params.registrations
);
};
insta::assert_json_snapshot!(registration, @r#"
{
"id": "ty/textDocument/diagnostic",
"method": "textDocument/diagnostic",
"registerOptions": {
"documentSelector": null,
"identifier": "ty",
"interFileDependencies": true,
"workDoneProgress": true,
"workspaceDiagnostics": true
}
}
"#);
Ok(())
}
/// Tests that the server sends a registration request for diagnostics if open files diagnostics
/// are enabled via initialization options and dynamic registration is enabled, even if the
/// workspace configuration is not supported by the client.
#[test]
fn open_files_diagnostic_registration_without_configuration() -> Result<()> {
let workspace_root = SystemPath::new("foo");
let mut server = TestServerBuilder::new()?
.with_initialization_options(
ClientOptions::default().with_diagnostic_mode(DiagnosticMode::OpenFilesOnly),
)
.with_workspace(workspace_root, None)?
.enable_workspace_configuration(false)
.enable_diagnostic_dynamic_registration(true)
.build();
// No need to wait for workspaces to initialize as the client does not support workspace
// configuration.
let (_, params) = server.await_request::<RegisterCapability>();
let [registration] = params.registrations.as_slice() else {
panic!(
"Expected a single registration, got: {:#?}",
params.registrations
);
};
insta::assert_json_snapshot!(registration, @r#"
{
"id": "ty/textDocument/diagnostic",
"method": "textDocument/diagnostic",
"registerOptions": {
"documentSelector": null,
"identifier": "ty",
"interFileDependencies": true,
"workDoneProgress": false,
"workspaceDiagnostics": false
}
}
"#);
Ok(())
}
/// Tests that the server sends a registration request for diagnostics if workspace diagnostics
/// are enabled via initialization options and dynamic registration is enabled.
#[test]
fn workspace_diagnostic_registration_via_initialization() -> Result<()> {
let workspace_root = SystemPath::new("foo");
let mut server = TestServerBuilder::new()?
.with_initialization_options(
ClientOptions::default().with_diagnostic_mode(DiagnosticMode::Workspace),
)
.with_workspace(workspace_root, None)?
.enable_diagnostic_dynamic_registration(true)
.build()
.wait_until_workspaces_are_initialized();
let (_, params) = server.await_request::<RegisterCapability>();
let [registration] = params.registrations.as_slice() else {
panic!(
"Expected a single registration, got: {:#?}",
params.registrations
);
};
insta::assert_json_snapshot!(registration, @r#"
{
"id": "ty/textDocument/diagnostic",
"method": "textDocument/diagnostic",
"registerOptions": {
"documentSelector": null,
"identifier": "ty",
"interFileDependencies": true,
"workDoneProgress": true,
"workspaceDiagnostics": true
}
}
"#);
Ok(())
}
/// Tests that the server sends a registration request for diagnostics if open files diagnostics
/// are enabled via initialization options and dynamic registration is enabled.
#[test]
fn open_files_diagnostic_registration_via_initialization() -> Result<()> {
let workspace_root = SystemPath::new("foo");
let mut server = TestServerBuilder::new()?
.with_initialization_options(
ClientOptions::default().with_diagnostic_mode(DiagnosticMode::OpenFilesOnly),
)
.with_workspace(workspace_root, None)?
.enable_diagnostic_dynamic_registration(true)
.build()
.wait_until_workspaces_are_initialized();
let (_, params) = server.await_request::<RegisterCapability>();
let [registration] = params.registrations.as_slice() else {
panic!(
"Expected a single registration, got: {:#?}",
params.registrations
);
};
insta::assert_json_snapshot!(registration, @r#"
{
"id": "ty/textDocument/diagnostic",
"method": "textDocument/diagnostic",
"registerOptions": {
"documentSelector": null,
"identifier": "ty",
"interFileDependencies": true,
"workDoneProgress": false,
"workspaceDiagnostics": false
}
}
"#);
Ok(())
}
/// Tests that the server sends a registration request for diagnostics if workspace diagnostics
/// are enabled and dynamic registration is enabled.
#[test]
fn workspace_diagnostic_registration() -> Result<()> {
let workspace_root = SystemPath::new("foo");
let mut server = TestServerBuilder::new()?
.with_workspace(
workspace_root,
Some(ClientOptions::default().with_diagnostic_mode(DiagnosticMode::Workspace)),
)?
.enable_diagnostic_dynamic_registration(true)
.build()
.wait_until_workspaces_are_initialized();
let (_, params) = server.await_request::<RegisterCapability>();
let [registration] = params.registrations.as_slice() else {
panic!(
"Expected a single registration, got: {:#?}",
params.registrations
);
};
insta::assert_json_snapshot!(registration, @r#"
{
"id": "ty/textDocument/diagnostic",
"method": "textDocument/diagnostic",
"registerOptions": {
"documentSelector": null,
"identifier": "ty",
"interFileDependencies": true,
"workDoneProgress": true,
"workspaceDiagnostics": true
}
}
"#);
Ok(())
}
/// Tests that the server sends a registration request for diagnostics if workspace diagnostics are
/// disabled and dynamic registration is enabled.
#[test]
fn open_files_diagnostic_registration() -> Result<()> {
let workspace_root = SystemPath::new("foo");
let mut server = TestServerBuilder::new()?
.with_workspace(
workspace_root,
Some(ClientOptions::default().with_diagnostic_mode(DiagnosticMode::OpenFilesOnly)),
)?
.enable_diagnostic_dynamic_registration(true)
.build()
.wait_until_workspaces_are_initialized();
let (_, params) = server.await_request::<RegisterCapability>();
let [registration] = params.registrations.as_slice() else {
panic!(
"Expected a single registration, got: {:#?}",
params.registrations
);
};
insta::assert_json_snapshot!(registration, @r#"
{
"id": "ty/textDocument/diagnostic",
"method": "textDocument/diagnostic",
"registerOptions": {
"documentSelector": null,
"identifier": "ty",
"interFileDependencies": true,
"workDoneProgress": false,
"workspaceDiagnostics": false
}
}
"#);
Ok(())
}
/// Tests that the server can disable language services for a workspace via initialization options.
#[test]
fn disable_language_services_set_on_initialization() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo() -> str:
return 42
";
let mut server = TestServerBuilder::new()?
.with_initialization_options(ClientOptions::default().with_disable_language_services(true))
.with_workspace(workspace_root, None)?
.enable_pull_diagnostics(true)
.with_file(foo, foo_content)?
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let hover = server.hover_request(foo, Position::new(0, 5));
assert!(
hover.is_none(),
"Expected no hover information, got: {hover:?}"
);
Ok(())
}
/// Tests that the server can disable language services for a workspace via workspace configuration
/// request.
#[test]
fn disable_language_services_set_on_workspace() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo() -> str:
return 42
";
let mut server = TestServerBuilder::new()?
.with_workspace(
workspace_root,
Some(ClientOptions::default().with_disable_language_services(true)),
)?
.enable_pull_diagnostics(true)
.with_file(foo, foo_content)?
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let hover = server.hover_request(foo, Position::new(0, 5));
assert!(
hover.is_none(),
"Expected no hover information, got: {hover:?}"
);
Ok(())
}
/// Tests that the server can disable language services for one workspace while keeping them
/// enabled for another.
#[test]
#[ignore = "Requires multiple workspace support in the server and test server"]
fn disable_language_services_for_one_workspace() -> Result<()> {
let workspace_a = SystemPath::new("src/a");
let workspace_b = SystemPath::new("src/b");
let foo = SystemPath::new("src/a/foo.py");
let bar = SystemPath::new("src/b/bar.py");
let foo_content = "\
def foo() -> str:
return 42
";
let bar_content = "\
def bar() -> str:
return 42
";
let mut server = TestServerBuilder::new()?
.with_workspace(
workspace_a,
Some(ClientOptions::default().with_disable_language_services(true)),
)?
.with_workspace(workspace_b, None)?
.enable_pull_diagnostics(true)
.with_file(foo, foo_content)?
.with_file(bar, bar_content)?
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let hover_foo = server.hover_request(foo, Position::new(0, 5));
assert!(
hover_foo.is_none(),
"Expected no hover information for workspace A, got: {hover_foo:?}"
);
server.open_text_document(bar, bar_content, 1);
let hover_bar = server.hover_request(bar, Position::new(0, 5));
assert!(
hover_bar.is_some(),
"Expected hover information for workspace B, got: {hover_bar:?}"
);
Ok(())
}
/// Tests that the server sends a warning notification if user provided unknown options during
/// initialization.
#[test]
fn unknown_initialization_options() -> Result<()> {
let workspace_root = SystemPath::new("foo");
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_initialization_options(
ClientOptions::default().with_unknown([("bar".to_string(), Value::Null)].into()),
)
.build()
.wait_until_workspaces_are_initialized();
let show_message_params = server.await_notification::<ShowMessage>();
insta::assert_json_snapshot!(show_message_params, @r#"
{
"type": 2,
"message": "Received unknown options during initialization: {\n /"bar/": null\n}"
}
"#);
Ok(())
}
/// Tests that the server sends a warning notification if user provided unknown options in the
/// workspace configuration.
#[test]
fn unknown_options_in_workspace_configuration() -> Result<()> {
let workspace_root = SystemPath::new("foo");
let mut server = TestServerBuilder::new()?
.with_workspace(
workspace_root,
Some(ClientOptions::default().with_unknown([("bar".to_string(), Value::Null)].into())),
)?
.build()
.wait_until_workspaces_are_initialized();
let show_message_params = server.await_notification::<ShowMessage>();
insta::assert_json_snapshot!(show_message_params, @r#"
{
"type": 2,
"message": "Received unknown options for workspace `file://<temp_dir>/foo`: {\n /"bar/": null\n}"
}
"#);
Ok(())
}
/// Tests that the server can register multiple capabilities at once.
///
/// This test would need to be updated when the server supports additional capabilities in the
/// future.
///
/// TODO: This test currently only verifies a single capability. It should be
/// updated with more dynamic capabilities when the server supports it.
#[test]
fn register_multiple_capabilities() -> Result<()> {
let workspace_root = SystemPath::new("foo");
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_initialization_options(
ClientOptions::default().with_diagnostic_mode(DiagnosticMode::Workspace),
)
.enable_diagnostic_dynamic_registration(true)
.build()
.wait_until_workspaces_are_initialized();
let (_, params) = server.await_request::<RegisterCapability>();
let registrations = params.registrations;
insta::assert_json_snapshot!(registrations, @r#"
[
{
"id": "ty/textDocument/diagnostic",
"method": "textDocument/diagnostic",
"registerOptions": {
"documentSelector": null,
"identifier": "ty",
"interFileDependencies": true,
"workDoneProgress": true,
"workspaceDiagnostics": true
}
}
]
"#);
Ok(())
}
/// Tests that the server doesn't panic when `VIRTUAL_ENV` points to a non-existent directory.
///
/// See: <https://github.com/astral-sh/ty/issues/2031>
#[test]
fn missing_virtual_env_does_not_panic() -> Result<()> {
let workspace_root = SystemPath::new("project");
// This should not panic even though VIRTUAL_ENV points to a non-existent path
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_env_var("VIRTUAL_ENV", "/nonexistent/virtual/env/path")
.build()
.wait_until_workspaces_are_initialized();
let _show_message_params = server.await_notification::<ShowMessage>();
// Something accursed in the escaping pipeline produces `\/` in windows paths
// and I can't for the life of me get insta to escape it properly, so I just
// need to move on with my life and not debug this right now, but ideally we
// would snapshot the message here.
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/tests/e2e/commands.rs | crates/ty_server/tests/e2e/commands.rs | use anyhow::Result;
use lsp_types::{ExecuteCommandParams, WorkDoneProgressParams, request::ExecuteCommand};
use ruff_db::system::SystemPath;
use crate::{TestServer, TestServerBuilder};
// Sends an executeCommand request to the TestServer
fn execute_command(
server: &mut TestServer,
command: String,
arguments: Vec<serde_json::Value>,
) -> Option<serde_json::Value> {
let params = ExecuteCommandParams {
command,
arguments,
work_done_progress_params: WorkDoneProgressParams::default(),
};
let id = server.send_request::<ExecuteCommand>(params);
server.await_response::<ExecuteCommand>(&id)
}
#[test]
fn debug_command() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo() -> str:
return 42
";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(false)
.build()
.wait_until_workspaces_are_initialized();
let response = execute_command(&mut server, "ty.printDebugInformation".to_string(), vec![]);
let response = response.expect("expect server response");
let response = response
.as_str()
.expect("debug command to return a string response");
insta::with_settings!({
filters => vec![
(r"\b[0-9]+.[0-9]+MB\b", "[X.XXMB]"),
(r"Workspace .+\)", "Workspace XXX"),
(r"Project at .+", "Project at XXX"),
(r"rules: \{(.|\n)+?\}\,", "rules: <RULES>,"),
]}, {
insta::assert_snapshot!(response);
});
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/tests/e2e/completions.rs | crates/ty_server/tests/e2e/completions.rs | use anyhow::Result;
use lsp_types::{Position, notification::PublishDiagnostics};
use ruff_db::system::SystemPath;
use ty_server::ClientOptions;
use crate::TestServerBuilder;
/// Tests that auto-import is enabled by default.
#[test]
fn default_auto_import() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
walktr
";
let mut server = TestServerBuilder::new()?
.with_initialization_options(ClientOptions::default())
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let _ = server.await_notification::<PublishDiagnostics>();
let completions = server.completion_request(&server.file_uri(foo), Position::new(0, 6));
insta::assert_json_snapshot!(completions, @r#"
[
{
"label": "walktree (import inspect)",
"kind": 3,
"sortText": "0",
"insertText": "walktree",
"additionalTextEdits": [
{
"range": {
"start": {
"line": 0,
"character": 0
},
"end": {
"line": 0,
"character": 0
}
},
"newText": "from inspect import walktree\n"
}
]
}
]
"#);
Ok(())
}
/// Tests that disabling auto-import works.
#[test]
fn disable_auto_import() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
walktr
";
let mut server = TestServerBuilder::new()?
.with_initialization_options(ClientOptions::default().with_auto_import(false))
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let _ = server.await_notification::<PublishDiagnostics>();
let completions = server.completion_request(&server.file_uri(foo), Position::new(0, 6));
insta::assert_json_snapshot!(completions, @"[]");
Ok(())
}
/// Tests that auto-import completions show the fully
/// qualified form when it will insert it for you. Also,
/// that an `import` won't be shown when it won't
/// actually be inserted.
#[test]
fn auto_import_shows_qualification() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
import typing
TypedDi<CURSOR>
";
let mut server = TestServerBuilder::new()?
.with_initialization_options(ClientOptions::default())
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let _ = server.await_notification::<PublishDiagnostics>();
let completions = server.completion_request(&server.file_uri(foo), Position::new(2, 7));
insta::assert_json_snapshot!(completions, @r#"
[
{
"label": "typing.TypedDict",
"kind": 6,
"sortText": "0",
"insertText": "typing.TypedDict"
},
{
"label": "typing.is_typeddict",
"kind": 3,
"sortText": "1",
"insertText": "typing.is_typeddict"
},
{
"label": "_FilterConfigurationTypedDict (import logging.config)",
"kind": 7,
"sortText": "2",
"insertText": "_FilterConfigurationTypedDict",
"additionalTextEdits": [
{
"range": {
"start": {
"line": 0,
"character": 0
},
"end": {
"line": 0,
"character": 0
}
},
"newText": "from logging.config import _FilterConfigurationTypedDict\n"
}
]
},
{
"label": "_FormatterConfigurationTypedDict (import logging.config)",
"kind": 6,
"sortText": "3",
"insertText": "_FormatterConfigurationTypedDict",
"additionalTextEdits": [
{
"range": {
"start": {
"line": 0,
"character": 0
},
"end": {
"line": 0,
"character": 0
}
},
"newText": "from logging.config import _FormatterConfigurationTypedDict\n"
}
]
}
]
"#);
Ok(())
}
/// Tests that auto-import completions show the fully
/// qualified form when it will insert it for you *and*
/// will also show the import when it will be inserted.
#[test]
fn auto_import_shows_qualification_and_import() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
TypedDi<CURSOR>
";
let mut server = TestServerBuilder::new()?
.with_initialization_options(ClientOptions::default())
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let _ = server.await_notification::<PublishDiagnostics>();
let completions = server.completion_request(&server.file_uri(foo), Position::new(0, 7));
insta::assert_json_snapshot!(completions, @r#"
[
{
"label": "TypedDict (import typing)",
"kind": 6,
"sortText": "0",
"insertText": "TypedDict",
"additionalTextEdits": [
{
"range": {
"start": {
"line": 0,
"character": 0
},
"end": {
"line": 0,
"character": 0
}
},
"newText": "from typing import TypedDict\n"
}
]
},
{
"label": "is_typeddict (import typing)",
"kind": 3,
"sortText": "1",
"insertText": "is_typeddict",
"additionalTextEdits": [
{
"range": {
"start": {
"line": 0,
"character": 0
},
"end": {
"line": 0,
"character": 0
}
},
"newText": "from typing import is_typeddict\n"
}
]
},
{
"label": "_FilterConfigurationTypedDict (import logging.config)",
"kind": 7,
"sortText": "2",
"insertText": "_FilterConfigurationTypedDict",
"additionalTextEdits": [
{
"range": {
"start": {
"line": 0,
"character": 0
},
"end": {
"line": 0,
"character": 0
}
},
"newText": "from logging.config import _FilterConfigurationTypedDict\n"
}
]
},
{
"label": "_FormatterConfigurationTypedDict (import logging.config)",
"kind": 6,
"sortText": "3",
"insertText": "_FormatterConfigurationTypedDict",
"additionalTextEdits": [
{
"range": {
"start": {
"line": 0,
"character": 0
},
"end": {
"line": 0,
"character": 0
}
},
"newText": "from logging.config import _FormatterConfigurationTypedDict\n"
}
]
}
]
"#);
Ok(())
}
/// Tests that completions for function arguments will
/// show a `=` suffix.
#[test]
fn function_parameter_shows_equals_suffix() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
import re
re.match('', '', fla<CURSOR>
";
let mut server = TestServerBuilder::new()?
.with_initialization_options(ClientOptions::default().with_auto_import(false))
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let _ = server.await_notification::<PublishDiagnostics>();
let completions = server.completion_request(&server.file_uri(foo), Position::new(1, 20));
insta::assert_json_snapshot!(completions, @r#"
[
{
"label": "flags=",
"kind": 6,
"detail": "int",
"sortText": "0",
"insertText": "flags="
},
{
"label": "FloatingPointError",
"kind": 7,
"detail": "<class 'FloatingPointError'>",
"documentation": {
"kind": "plaintext",
"value": "Floating-point operation failed.\n"
},
"sortText": "1"
},
{
"label": "PythonFinalizationError",
"kind": 7,
"detail": "<class 'PythonFinalizationError'>",
"documentation": {
"kind": "plaintext",
"value": "Operation blocked during Python finalization.\n"
},
"sortText": "2"
},
{
"label": "float",
"kind": 7,
"detail": "<class 'float'>",
"documentation": {
"kind": "plaintext",
"value": "Convert a string or number to a floating-point number, if possible.\n"
},
"sortText": "3"
}
]
"#);
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/tests/e2e/publish_diagnostics.rs | crates/ty_server/tests/e2e/publish_diagnostics.rs | use std::time::Duration;
use anyhow::Result;
use lsp_types::{
DidOpenTextDocumentParams, FileChangeType, FileEvent, TextDocumentItem,
notification::{DidOpenTextDocument, PublishDiagnostics},
};
use ruff_db::system::SystemPath;
use ty_server::ClientOptions;
use crate::TestServerBuilder;
#[test]
fn on_did_open() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo() -> str:
return 42
";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(false)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let diagnostics = server.await_notification::<PublishDiagnostics>();
insta::assert_debug_snapshot!(diagnostics);
Ok(())
}
#[test]
fn on_did_open_diagnostics_off() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo() -> str:
return 42
";
let mut server = TestServerBuilder::new()?
.with_workspace(
workspace_root,
Some(ClientOptions::default().with_diagnostic_mode(ty_server::DiagnosticMode::Off)),
)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(false)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let diagnostics =
server.try_await_notification::<PublishDiagnostics>(Some(Duration::from_millis(100)));
assert!(
diagnostics.is_err(),
"Server should not send a publish diagnostics notification when diagnostics are off"
);
Ok(())
}
#[test]
fn on_did_change() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo() -> str:
return 42
";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(false)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let _ = server.await_notification::<PublishDiagnostics>();
let changes = vec![lsp_types::TextDocumentContentChangeEvent {
range: None,
range_length: None,
text: "def foo() -> int: return 42".to_string(),
}];
server.change_text_document(foo, changes, 2);
let diagnostics = server.await_notification::<PublishDiagnostics>();
assert_eq!(diagnostics.version, Some(2));
insta::assert_debug_snapshot!(diagnostics);
Ok(())
}
#[test]
fn on_did_change_diagnostics_off() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo() -> str:
return 42
";
let mut server = TestServerBuilder::new()?
.with_workspace(
workspace_root,
Some(ClientOptions::default().with_diagnostic_mode(ty_server::DiagnosticMode::Off)),
)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(false)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let changes = vec![lsp_types::TextDocumentContentChangeEvent {
range: None,
range_length: None,
text: "def foo() -> int: return 42".to_string(),
}];
server.change_text_document(foo, changes, 2);
let diagnostics =
server.try_await_notification::<PublishDiagnostics>(Some(Duration::from_millis(100)));
assert!(
diagnostics.is_err(),
"Server should not send a publish diagnostics notification when diagnostics are off"
);
Ok(())
}
#[test]
fn message_without_related_information_support() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = r#"
from typing import assert_type
assert_type("test", list[str])
"#;
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(false)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let diagnostics = server.await_notification::<PublishDiagnostics>();
insta::assert_debug_snapshot!(diagnostics);
Ok(())
}
#[test]
fn message_with_related_information_support() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = r#"
from typing import assert_type
assert_type("test", list[str])
"#;
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.enable_diagnostic_related_information(true)
.enable_pull_diagnostics(false)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let diagnostics = server.await_notification::<PublishDiagnostics>();
insta::assert_debug_snapshot!(diagnostics);
Ok(())
}
#[test]
fn on_did_change_watched_files() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo() -> str:
print(a)
";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(foo, "")?
.enable_pull_diagnostics(false)
.build()
.wait_until_workspaces_are_initialized();
let foo = server.file_path(foo);
server.open_text_document(&foo, "", 1);
let _open_diagnostics = server.await_notification::<PublishDiagnostics>();
std::fs::write(&foo, foo_content)?;
server.did_change_watched_files(vec![FileEvent {
uri: server.file_uri(foo),
typ: FileChangeType::CHANGED,
}]);
let diagnostics = server.await_notification::<PublishDiagnostics>();
// Note how ty reports no diagnostics here. This is because
// the contents received by didOpen/didChange take precedence over the file
// content on disk. Or, more specifically, because the revision
// of the file is not bumped, because it still uses the version
// from the `didOpen` notification but we don't have any notification
// that we can use here.
insta::assert_json_snapshot!(diagnostics);
Ok(())
}
#[test]
fn on_did_change_watched_files_pull_diagnostics() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo() -> str:
print(a)
";
let mut server = TestServerBuilder::new()?
.with_workspace(workspace_root, None)?
.with_file(foo, "")?
.enable_pull_diagnostics(true)
.build()
.wait_until_workspaces_are_initialized();
let foo = server.file_path(foo);
server.open_text_document(&foo, "", 1);
std::fs::write(&foo, foo_content)?;
server.did_change_watched_files(vec![FileEvent {
uri: server.file_uri(foo),
typ: FileChangeType::CHANGED,
}]);
let diagnostics =
server.try_await_notification::<PublishDiagnostics>(Some(Duration::from_millis(100)));
assert!(
diagnostics.is_err(),
"Server should not send a publish diagnostic notification if the client supports pull diagnostics"
);
Ok(())
}
#[test]
fn on_did_open_file_without_extension_but_python_language() -> Result<()> {
let foo = SystemPath::new("src/foo");
let foo_content = "\
def foo() -> str:
return 42
";
let mut server = TestServerBuilder::new()?
.with_workspace(SystemPath::new("src"), None)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(false)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let diagnostics = server.await_notification::<PublishDiagnostics>();
insta::assert_debug_snapshot!(diagnostics);
Ok(())
}
#[test]
fn changing_language_of_file_without_extension() -> Result<()> {
let foo = SystemPath::new("src/foo");
let foo_content = "\
def foo() -> str:
return 42
";
let mut server = TestServerBuilder::new()?
.with_workspace(SystemPath::new("src"), None)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(false)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let diagnostics = server.await_notification::<PublishDiagnostics>();
insta::assert_debug_snapshot!(diagnostics);
server.close_text_document(foo);
let params = DidOpenTextDocumentParams {
text_document: TextDocumentItem {
uri: server.file_uri(foo),
language_id: "text".to_string(),
version: 1,
text: foo_content.to_string(),
},
};
server.send_notification::<DidOpenTextDocument>(params);
let _close_diagnostics = server.await_notification::<PublishDiagnostics>();
let diagnostics = server.await_notification::<PublishDiagnostics>();
insta::assert_debug_snapshot!(diagnostics);
Ok(())
}
#[test]
fn invalid_syntax_with_syntax_errors_disabled() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
def foo(
";
let mut server = TestServerBuilder::new()?
.with_workspace(
workspace_root,
Some(ClientOptions::default().with_show_syntax_errors(false)),
)?
.with_file(foo, foo_content)?
.enable_pull_diagnostics(false)
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let diagnostics = server.await_notification::<PublishDiagnostics>();
insta::assert_debug_snapshot!(diagnostics);
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/tests/e2e/main.rs | crates/ty_server/tests/e2e/main.rs | //! Testing server for the ty language server.
//!
//! This module provides mock server infrastructure for testing LSP functionality using a
//! temporary directory on the real filesystem.
//!
//! The design is inspired by the Starlark LSP test server but adapted for ty server architecture.
//!
//! To get started, use the [`TestServerBuilder`] to configure the server with workspace folders,
//! enable or disable specific client capabilities, and add test files. Then, use the [`build`]
//! method to create the [`TestServer`]. This will start the server and perform the initialization
//! handshake. It might be useful to call [`wait_until_workspaces_are_initialized`] to ensure that
//! the server side initialization is complete before sending any requests.
//!
//! Once the setup is done, you can use the server to [`send_request`] and [`send_notification`] to
//! send messages to the server and [`await_response`], [`await_request`], and
//! [`await_notification`] to wait for responses, requests, and notifications from the server.
//!
//! The [`Drop`] implementation of the [`TestServer`] ensures that the server is shut down
//! gracefully using the LSP protocol. It also asserts that all messages sent by the server
//! have been handled by the test client before the server is dropped.
//!
//! [`build`]: TestServerBuilder::build
//! [`wait_until_workspaces_are_initialized`]: TestServer::wait_until_workspaces_are_initialized
//! [`send_request`]: TestServer::send_request
//! [`send_notification`]: TestServer::send_notification
//! [`await_response`]: TestServer::await_response
//! [`await_request`]: TestServer::await_request
//! [`await_notification`]: TestServer::await_notification
mod code_actions;
mod commands;
mod completions;
mod configuration;
mod initialize;
mod inlay_hints;
mod notebook;
mod publish_diagnostics;
mod pull_diagnostics;
mod rename;
mod semantic_tokens;
mod signature_help;
use std::collections::{BTreeMap, HashMap, VecDeque};
use std::num::NonZeroUsize;
use std::sync::{Arc, OnceLock};
use std::thread::JoinHandle;
use std::time::Duration;
use std::{fmt, fs};
use anyhow::{Context, Result, anyhow};
use crossbeam::channel::RecvTimeoutError;
use insta::internals::SettingsBindDropGuard;
use lsp_server::{Connection, Message, RequestId, Response, ResponseError};
use lsp_types::notification::{
DidChangeTextDocument, DidChangeWatchedFiles, DidCloseTextDocument, DidOpenTextDocument, Exit,
Initialized, Notification,
};
use lsp_types::request::{
Completion, DocumentDiagnosticRequest, HoverRequest, Initialize, InlayHintRequest,
PrepareRenameRequest, Request, Shutdown, SignatureHelpRequest, WorkspaceConfiguration,
WorkspaceDiagnosticRequest,
};
use lsp_types::{
ClientCapabilities, CompletionItem, CompletionParams, CompletionResponse,
CompletionTriggerKind, ConfigurationParams, DiagnosticClientCapabilities,
DidChangeTextDocumentParams, DidChangeWatchedFilesClientCapabilities,
DidChangeWatchedFilesParams, DidCloseTextDocumentParams, DidOpenTextDocumentParams,
DocumentDiagnosticParams, DocumentDiagnosticReportResult, FileEvent, Hover, HoverParams,
InitializeParams, InitializeResult, InitializedParams, InlayHint, InlayHintClientCapabilities,
InlayHintParams, NumberOrString, PartialResultParams, Position, PreviousResultId,
PublishDiagnosticsClientCapabilities, Range, SemanticTokensResult, SignatureHelp,
SignatureHelpParams, SignatureHelpTriggerKind, TextDocumentClientCapabilities,
TextDocumentContentChangeEvent, TextDocumentIdentifier, TextDocumentItem,
TextDocumentPositionParams, Url, VersionedTextDocumentIdentifier, WorkDoneProgressParams,
WorkspaceClientCapabilities, WorkspaceDiagnosticParams, WorkspaceDiagnosticReportResult,
WorkspaceEdit, WorkspaceFolder,
};
use ruff_db::system::{OsSystem, SystemPath, SystemPathBuf, TestSystem};
use rustc_hash::FxHashMap;
use tempfile::TempDir;
use ty_server::{ClientOptions, LogLevel, Server, init_logging};
/// Number of times to retry receiving a message before giving up
const RETRY_COUNT: usize = 5;
static INIT_TRACING: OnceLock<()> = OnceLock::new();
/// Setup tracing for the test server.
///
/// This will make sure that the tracing subscriber is initialized only once, so that running
/// multiple tests does not cause multiple subscribers to be registered.
fn setup_tracing() {
INIT_TRACING.get_or_init(|| {
init_logging(LogLevel::Debug, None);
});
}
/// Errors when receiving a notification or request from the server.
#[derive(thiserror::Error, Debug)]
pub(crate) enum ServerMessageError {
#[error("waiting for message timed out")]
Timeout,
#[error("server disconnected")]
ServerDisconnected,
#[error("Failed to deserialize message body: {0}")]
DeserializationError(#[from] serde_json::Error),
}
impl From<ReceiveError> for ServerMessageError {
fn from(value: ReceiveError) -> Self {
match value {
ReceiveError::Timeout => Self::Timeout,
ReceiveError::ServerDisconnected => Self::ServerDisconnected,
}
}
}
/// Errors when receiving a response from the server.
#[derive(thiserror::Error, Debug)]
pub(crate) enum AwaitResponseError {
/// The response came back, but was an error response, not a successful one.
#[error("request failed because the server replied with an error: {0:?}")]
RequestFailed(ResponseError),
#[error("malformed response message with both result and error: {0:#?}")]
MalformedResponse(Box<Response>),
#[error("received multiple responses for the same request ID: {0:#?}")]
MultipleResponses(Box<[Response]>),
#[error("waiting for response timed out")]
Timeout,
#[error("server disconnected")]
ServerDisconnected,
#[error("failed to deserialize response result: {0}")]
DeserializationError(#[from] serde_json::Error),
}
impl From<ReceiveError> for AwaitResponseError {
fn from(err: ReceiveError) -> Self {
match err {
ReceiveError::Timeout => Self::Timeout,
ReceiveError::ServerDisconnected => Self::ServerDisconnected,
}
}
}
#[derive(thiserror::Error, Debug)]
pub(crate) enum ReceiveError {
#[error("waiting for message timed out")]
Timeout,
#[error("server disconnected")]
ServerDisconnected,
}
/// A test server for the ty language server that provides helpers for sending requests,
/// correlating responses, and handling notifications.
pub(crate) struct TestServer {
/// The thread that's actually running the server.
///
/// This is an [`Option`] so that the join handle can be taken out when the server is dropped,
/// allowing the server thread to be joined and cleaned up properly.
server_thread: Option<JoinHandle<()>>,
/// Connection to communicate with the server.
///
/// This is an [`Option`] so that it can be taken out when the server is dropped, allowing
/// the connection to be cleaned up properly.
client_connection: Option<Connection>,
/// Test context that provides the project root directory that holds all test files.
///
/// This directory is automatically cleaned up when the [`TestServer`] is dropped.
test_context: TestContext,
/// Incrementing counter to automatically generate request IDs
request_counter: i32,
/// A mapping of request IDs to responses received from the server.
///
/// Valid responses contain exactly one response but may contain multiple responses
/// when the server sends multiple responses for a single request.
/// The responses are guaranteed to never be empty.
responses: FxHashMap<RequestId, smallvec::SmallVec<[Response; 1]>>,
/// An ordered queue of all the notifications received from the server
notifications: VecDeque<lsp_server::Notification>,
/// An ordered queue of all the requests received from the server
requests: VecDeque<lsp_server::Request>,
/// The response from server initialization
initialize_response: Option<InitializeResult>,
/// Workspace configurations for `workspace/configuration` requests
workspace_configurations: HashMap<Url, ClientOptions>,
/// Whether a Shutdown request has been sent by the test
/// and the exit sequence should be skipped during `Drop`
shutdown_requested: bool,
}
impl TestServer {
/// Create a new test server with the given workspace configurations
fn new(
workspaces: Vec<(WorkspaceFolder, Option<ClientOptions>)>,
test_context: TestContext,
capabilities: ClientCapabilities,
initialization_options: Option<ClientOptions>,
env_vars: Vec<(String, Option<String>)>,
) -> Self {
setup_tracing();
tracing::debug!("Starting test client with capabilities {:#?}", capabilities);
let (server_connection, client_connection) = Connection::memory();
// Create OS system with the test directory as cwd
let os_system = OsSystem::new(test_context.root());
// Create test system and set environment variable overrides
let test_system = Arc::new(TestSystem::new(os_system));
for (name, value) in env_vars {
match value {
Some(value) => {
test_system.set_env_var(name, value);
}
None => test_system.remove_env_var(name),
}
}
// Start the server in a separate thread
let server_thread = std::thread::spawn(move || {
// TODO: This should probably be configurable to test concurrency issues
let worker_threads = NonZeroUsize::new(1).unwrap();
match Server::new(worker_threads, server_connection, test_system, true) {
Ok(server) => {
if let Err(err) = server.run() {
panic!("Server stopped with error: {err:?}");
}
}
Err(err) => {
panic!("Failed to create server: {err:?}");
}
}
});
let workspace_folders = workspaces
.iter()
.map(|(folder, _)| folder.clone())
.collect::<Vec<_>>();
let workspace_configurations = workspaces
.into_iter()
.filter_map(|(folder, options)| Some((folder.uri, options?)))
.collect::<HashMap<_, _>>();
Self {
server_thread: Some(server_thread),
client_connection: Some(client_connection),
test_context,
request_counter: 0,
responses: FxHashMap::default(),
notifications: VecDeque::new(),
requests: VecDeque::new(),
initialize_response: None,
workspace_configurations,
shutdown_requested: false,
}
.initialize(workspace_folders, capabilities, initialization_options)
}
/// Perform LSP initialization handshake
///
/// # Panics
///
/// If the `initialization_options` cannot be serialized to JSON
fn initialize(
mut self,
workspace_folders: Vec<WorkspaceFolder>,
capabilities: ClientCapabilities,
initialization_options: Option<ClientOptions>,
) -> Self {
let init_params = InitializeParams {
capabilities,
workspace_folders: Some(workspace_folders),
initialization_options: initialization_options.map(|options| {
serde_json::to_value(options)
.context("Failed to serialize initialization options to `ClientOptions`")
.unwrap()
}),
..Default::default()
};
let init_request_id = self.send_request::<Initialize>(init_params);
self.initialize_response = Some(self.await_response::<Initialize>(&init_request_id));
self.send_notification::<Initialized>(InitializedParams {});
self
}
/// Wait until the server has initialized all workspaces.
///
/// This will wait until the client receives a `workspace/configuration` request from the
/// server, and handles the request.
///
/// This should only be called if the server is expected to send this request.
#[track_caller]
pub(crate) fn wait_until_workspaces_are_initialized(mut self) -> Self {
let (request_id, params) = self.await_request::<WorkspaceConfiguration>();
self.handle_workspace_configuration_request(request_id, ¶ms);
self
}
/// Drain all messages from the server.
fn drain_messages(&mut self) {
// Don't wait too long to drain the messages, as this is called in the `Drop`
// implementation which happens everytime the test ends.
while let Ok(()) = self.receive(Some(Duration::from_millis(10))) {}
}
/// Validate that there are no pending messages from the server.
///
/// This should be called before the test server is dropped to ensure that all server messages
/// have been properly consumed by the test. If there are any pending messages, this will panic
/// with detailed information about what was left unconsumed.
#[track_caller]
fn assert_no_pending_messages(&self) {
let mut errors = Vec::new();
if !self.responses.is_empty() {
errors.push(format!("Unclaimed responses: {:#?}", self.responses));
}
if !self.notifications.is_empty() {
errors.push(format!(
"Unclaimed notifications: {:#?}",
self.notifications
));
}
if !self.requests.is_empty() {
errors.push(format!("Unclaimed requests: {:#?}", self.requests));
}
assert!(
errors.is_empty(),
"Test server has pending messages that were not consumed by the test:\n{}",
errors.join("\n")
);
}
/// Generate a new request ID
fn next_request_id(&mut self) -> RequestId {
self.request_counter += 1;
RequestId::from(self.request_counter)
}
/// Send a message to the server.
///
/// # Panics
///
/// If the server is still running but the client connection got dropped, or if the server
/// exited unexpectedly or panicked.
#[track_caller]
fn send(&mut self, message: Message) {
if self
.client_connection
.as_ref()
.unwrap()
.sender
.send(message)
.is_err()
{
self.panic_on_server_disconnect();
}
}
/// Send a request to the server and return the request ID.
///
/// The caller can use this ID to later retrieve the response using [`await_response`].
///
/// [`await_response`]: TestServer::await_response
pub(crate) fn send_request<R>(&mut self, params: R::Params) -> RequestId
where
R: Request,
{
// Track if an Exit notification is being sent
if R::METHOD == lsp_types::request::Shutdown::METHOD {
self.shutdown_requested = true;
}
let id = self.next_request_id();
tracing::debug!("Client sends request `{}` with ID {}", R::METHOD, id);
let request = lsp_server::Request::new(id.clone(), R::METHOD.to_string(), params);
self.send(Message::Request(request));
id
}
/// Send a notification to the server.
pub(crate) fn send_notification<N>(&mut self, params: N::Params)
where
N: Notification,
{
let notification = lsp_server::Notification::new(N::METHOD.to_string(), params);
tracing::debug!("Client sends notification `{}`", N::METHOD);
self.send(Message::Notification(notification));
}
/// Wait for a server response corresponding to the given request ID.
///
/// This should only be called if a request was already sent to the server via [`send_request`]
/// which returns the request ID that should be used here.
///
/// This method will remove the response from the internal data structure, so it can only be
/// called once per request ID.
///
/// # Panics
///
/// If the server didn't send a response, the response failed with an error code, failed to deserialize,
/// or the server responded twice. Use [`Self::try_await_response`] if you want a non-panicking version.
///
/// [`send_request`]: TestServer::send_request
#[track_caller]
pub(crate) fn await_response<R>(&mut self, id: &RequestId) -> R::Result
where
R: Request,
{
self.try_await_response::<R>(id, None)
.unwrap_or_else(|err| panic!("Failed to receive response for request {id}: {err}"))
}
#[track_caller]
pub(crate) fn send_request_await<R>(&mut self, params: R::Params) -> R::Result
where
R: Request,
{
let id = self.send_request::<R>(params);
self.try_await_response::<R>(&id, None)
.unwrap_or_else(|err| panic!("Failed to receive response for request {id}: {err}"))
}
/// Wait for a server response corresponding to the given request ID.
///
/// This should only be called if a request was already sent to the server via [`send_request`]
/// which returns the request ID that should be used here.
///
/// This method will remove the response from the internal data structure, so it can only be
/// called once per request ID.
///
/// [`send_request`]: TestServer::send_request
pub(crate) fn try_await_response<R>(
&mut self,
id: &RequestId,
timeout: Option<Duration>,
) -> Result<R::Result, AwaitResponseError>
where
R: Request,
{
loop {
if let Some(mut responses) = self.responses.remove(id) {
if responses.len() > 1 {
return Err(AwaitResponseError::MultipleResponses(
responses.into_boxed_slice(),
));
}
let response = responses.pop().unwrap();
match response {
Response {
error: None,
result: Some(result),
..
} => {
return Ok(serde_json::from_value::<R::Result>(result)?);
}
Response {
error: Some(err),
result: None,
..
} => {
return Err(AwaitResponseError::RequestFailed(err));
}
response => {
return Err(AwaitResponseError::MalformedResponse(Box::new(response)));
}
}
}
self.receive(timeout)?;
}
}
/// Wait for a notification of the specified type from the server and return its parameters.
///
/// The caller should ensure that the server is expected to send this notification type. It
/// will keep polling the server for this notification up to 10 times before giving up after
/// which it will return an error. It will also return an error if the notification is not
/// received within `recv_timeout` duration.
///
/// This method will remove the notification from the internal data structure, so it should
/// only be called if the notification is expected to be sent by the server.
///
/// # Panics
///
/// If the server doesn't send the notification within the default timeout or
/// the notification failed to deserialize. Use [`Self::try_await_notification`] for
/// a panic-free alternative.
#[track_caller]
pub(crate) fn await_notification<N: Notification>(&mut self) -> N::Params {
match self.try_await_notification::<N>(None) {
Ok(result) => result,
Err(err) => {
panic!("Failed to receive notification `{}`: {err}", N::METHOD)
}
}
}
/// Wait for a notification of the specified type from the server and return its parameters.
///
/// The caller should ensure that the server is expected to send this notification type. It
/// will keep polling the server for this notification up to 10 times before giving up after
/// which it will return an error. It will also return an error if the notification is not
/// received within `recv_timeout` duration.
///
/// This method will remove the notification from the internal data structure, so it should
/// only be called if the notification is expected to be sent by the server.
pub(crate) fn try_await_notification<N: Notification>(
&mut self,
timeout: Option<Duration>,
) -> Result<N::Params, ServerMessageError> {
for retry_count in 0..RETRY_COUNT {
if retry_count > 0 {
tracing::info!("Retrying to receive `{}` notification", N::METHOD);
}
let notification = self
.notifications
.iter()
.position(|notification| N::METHOD == notification.method)
.and_then(|index| self.notifications.remove(index));
if let Some(notification) = notification {
let params = serde_json::from_value(notification.params)?;
return Ok(params);
}
self.receive(timeout)?;
}
Err(ServerMessageError::Timeout)
}
/// Collects `N` publish diagnostic notifications into a map, indexed by the document url.
///
/// ## Panics
/// If there are multiple publish diagnostics notifications for the same document.
#[track_caller]
pub(crate) fn collect_publish_diagnostic_notifications(
&mut self,
count: usize,
) -> BTreeMap<lsp_types::Url, Vec<lsp_types::Diagnostic>> {
let mut results = BTreeMap::default();
for _ in 0..count {
let notification =
self.await_notification::<lsp_types::notification::PublishDiagnostics>();
if let Some(existing) =
results.insert(notification.uri.clone(), notification.diagnostics)
{
panic!(
"Received multiple publish diagnostic notifications for {url}: ({existing:#?})",
url = ¬ification.uri
);
}
}
results
}
/// Wait for a request of the specified type from the server and return the request ID and
/// parameters.
///
/// The caller should ensure that the server is expected to send this request type. It will
/// keep polling the server for this request up to 10 times before giving up after which it
/// will return an error. It can also return an error if the request is not received within
/// `recv_timeout` duration.
///
/// This method will remove the request from the internal data structure, so it should only be
/// called if the request is expected to be sent by the server.
///
/// # Panics
///
/// If receiving the request fails.
#[track_caller]
pub(crate) fn await_request<R: Request>(&mut self) -> (RequestId, R::Params) {
match self.try_await_request::<R>(None) {
Ok(result) => result,
Err(err) => {
panic!("Failed to receive server request `{}`: {err}", R::METHOD)
}
}
}
/// Wait for a request of the specified type from the server and return the request ID and
/// parameters.
///
/// The caller should ensure that the server is expected to send this request type. It will
/// keep polling the server for this request up to 10 times before giving up after which it
/// will return an error. It can also return an error if the request is not received within
/// `recv_timeout` duration.
///
/// This method will remove the request from the internal data structure, so it should only be
/// called if the request is expected to be sent by the server.
#[track_caller]
pub(crate) fn try_await_request<R: Request>(
&mut self,
timeout: Option<Duration>,
) -> Result<(RequestId, R::Params), ServerMessageError> {
for retry_count in 0..RETRY_COUNT {
if retry_count > 0 {
tracing::info!("Retrying to receive `{}` request", R::METHOD);
}
let request = self
.requests
.iter()
.position(|request| R::METHOD == request.method)
.and_then(|index| self.requests.remove(index));
if let Some(request) = request {
let params = serde_json::from_value(request.params)?;
return Ok((request.id, params));
}
self.receive(timeout)?;
}
Err(ServerMessageError::Timeout)
}
/// Receive a message from the server.
///
/// It will wait for `timeout` duration for a message to arrive. If no message is received
/// within that time, it will return an error.
///
/// If `timeout` is `None`, it will use a default timeout of 10 second.
fn receive(&mut self, timeout: Option<Duration>) -> Result<(), ReceiveError> {
static DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
let receiver = self.client_connection.as_ref().unwrap().receiver.clone();
let message = receiver
.recv_timeout(timeout.unwrap_or(DEFAULT_TIMEOUT))
.map_err(|err| match err {
RecvTimeoutError::Disconnected => ReceiveError::ServerDisconnected,
RecvTimeoutError::Timeout => ReceiveError::Timeout,
})?;
self.handle_message(message);
for message in receiver.try_iter() {
self.handle_message(message);
}
Ok(())
}
/// Handle the incoming message from the server.
///
/// This method will store the message as follows:
/// - Requests are stored in `self.requests`
/// - Responses are stored in `self.responses` with the request ID as the key
/// - Notifications are stored in `self.notifications`
fn handle_message(&mut self, message: Message) {
match message {
Message::Request(request) => {
tracing::debug!("Received server request `{}`", &request.method);
self.requests.push_back(request);
}
Message::Response(response) => {
tracing::debug!("Received server response for request {}", &response.id);
self.responses
.entry(response.id.clone())
.or_default()
.push(response);
}
Message::Notification(notification) => {
tracing::debug!("Received notification `{}`", ¬ification.method);
self.notifications.push_back(notification);
}
}
}
#[track_caller]
fn panic_on_server_disconnect(&mut self) -> ! {
if let Some(handle) = &self.server_thread {
if handle.is_finished() {
let handle = self.server_thread.take().unwrap();
if let Err(panic) = handle.join() {
std::panic::resume_unwind(panic);
}
panic!("Server exited unexpectedly");
}
}
panic!("Server dropped client receiver while still running");
}
pub(crate) fn cancel(&mut self, request_id: &RequestId) {
let id_string = request_id.to_string();
self.send_notification::<lsp_types::notification::Cancel>(lsp_types::CancelParams {
id: match id_string.parse() {
Ok(id) => NumberOrString::Number(id),
Err(_) => NumberOrString::String(id_string),
},
});
}
/// Handle workspace configuration requests from the server.
///
/// Use the [`get_request`] method to wait for the server to send this request.
///
/// [`get_request`]: TestServer::get_request
#[track_caller]
fn handle_workspace_configuration_request(
&mut self,
request_id: RequestId,
params: &ConfigurationParams,
) {
let mut results = Vec::new();
for item in ¶ms.items {
let Some(scope_uri) = &item.scope_uri else {
unimplemented!("Handling global configuration requests is not implemented yet");
};
let config_value = if let Some(options) = self.workspace_configurations.get(scope_uri) {
// Return the configuration for the specific workspace
//
// As per the spec:
//
// > If the client can't provide a configuration setting for a given scope
// > then null needs to be present in the returned array.
match item.section.as_deref() {
Some("ty") => match serde_json::to_value(options) {
Ok(value) => value,
Err(err) => {
panic!("Failed to deserialize workspace configuration options: {err}",)
}
},
Some(section) => {
tracing::debug!("Unrecognized section `{section}` for {scope_uri}");
serde_json::Value::Null
}
None => {
tracing::debug!(
"No section specified for workspace configuration of {scope_uri}",
);
serde_json::Value::Null
}
}
} else {
tracing::debug!("No workspace configuration provided for {scope_uri}");
serde_json::Value::Null
};
results.push(config_value);
}
let response = Response::new_ok(request_id, results);
self.send(Message::Response(response));
}
/// Get the initialization result
pub(crate) fn initialization_result(&self) -> Option<&InitializeResult> {
self.initialize_response.as_ref()
}
pub(crate) fn file_uri(&self, path: impl AsRef<SystemPath>) -> Url {
Url::from_file_path(self.file_path(path).as_std_path()).expect("Path must be a valid URL")
}
pub(crate) fn file_path(&self, path: impl AsRef<SystemPath>) -> SystemPathBuf {
self.test_context.root().join(path)
}
/// Send a `textDocument/didOpen` notification
pub(crate) fn open_text_document(
&mut self,
path: impl AsRef<SystemPath>,
content: impl AsRef<str>,
version: i32,
) {
let params = DidOpenTextDocumentParams {
text_document: TextDocumentItem {
uri: self.file_uri(path),
language_id: "python".to_string(),
version,
text: content.as_ref().to_string(),
},
};
self.send_notification::<DidOpenTextDocument>(params);
}
/// Send a `textDocument/didChange` notification with the given content changes
pub(crate) fn change_text_document(
&mut self,
path: impl AsRef<SystemPath>,
changes: Vec<TextDocumentContentChangeEvent>,
version: i32,
) {
let params = DidChangeTextDocumentParams {
text_document: VersionedTextDocumentIdentifier {
uri: self.file_uri(path),
version,
},
content_changes: changes,
};
self.send_notification::<DidChangeTextDocument>(params);
}
/// Send a `textDocument/didClose` notification
pub(crate) fn close_text_document(&mut self, path: impl AsRef<SystemPath>) {
let params = DidCloseTextDocumentParams {
text_document: TextDocumentIdentifier {
uri: self.file_uri(path),
},
};
self.send_notification::<DidCloseTextDocument>(params);
}
/// Send a `workspace/didChangeWatchedFiles` notification with the given file events
pub(crate) fn did_change_watched_files(&mut self, events: Vec<FileEvent>) {
let params = DidChangeWatchedFilesParams { changes: events };
self.send_notification::<DidChangeWatchedFiles>(params);
}
pub(crate) fn rename(
&mut self,
document: &Url,
position: lsp_types::Position,
new_name: &str,
) -> Result<Option<WorkspaceEdit>, ()> {
if self
.send_request_await::<PrepareRenameRequest>(lsp_types::TextDocumentPositionParams {
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/tests/e2e/signature_help.rs | crates/ty_server/tests/e2e/signature_help.rs | use anyhow::Result;
use lsp_types::{Position, notification::PublishDiagnostics};
use ruff_db::system::SystemPath;
use ty_server::ClientOptions;
use crate::TestServerBuilder;
/// Tests that we get signature help even when the cursor
/// is on the function name.
///
/// This is a regression test to ensure we don't accidentally
/// cause this case to stop working.
#[test]
fn works_in_function_name() -> Result<()> {
let workspace_root = SystemPath::new("src");
let foo = SystemPath::new("src/foo.py");
let foo_content = "\
import re
re.match('', '')
";
let mut server = TestServerBuilder::new()?
.with_initialization_options(ClientOptions::default())
.with_workspace(workspace_root, None)?
.with_file(foo, foo_content)?
.build()
.wait_until_workspaces_are_initialized();
server.open_text_document(foo, foo_content, 1);
let _ = server.await_notification::<PublishDiagnostics>();
let signature_help = server.signature_help_request(&server.file_uri(foo), Position::new(1, 6));
insta::assert_json_snapshot!(signature_help);
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_workspace/src/settings.rs | crates/ruff_workspace/src/settings.rs | use path_absolutize::path_dedot;
use ruff_cache::cache_dir;
use ruff_formatter::{FormatOptions, IndentStyle, IndentWidth, LineWidth};
use ruff_graph::AnalyzeSettings;
use ruff_linter::display_settings;
use ruff_linter::settings::LinterSettings;
use ruff_linter::settings::types::{
CompiledPerFileTargetVersionList, ExtensionMapping, FilePattern, FilePatternSet, OutputFormat,
UnsafeFixes,
};
use ruff_macros::CacheKey;
use ruff_python_ast::{PySourceType, PythonVersion};
use ruff_python_formatter::{
DocstringCode, DocstringCodeLineWidth, MagicTrailingComma, PreviewMode, PyFormatOptions,
QuoteStyle,
};
use ruff_source_file::find_newline;
use std::fmt;
use std::path::{Path, PathBuf};
#[derive(Debug, CacheKey)]
pub struct Settings {
#[cache_key(ignore)]
pub cache_dir: PathBuf,
#[cache_key(ignore)]
pub fix: bool,
#[cache_key(ignore)]
pub fix_only: bool,
#[cache_key(ignore)]
pub unsafe_fixes: UnsafeFixes,
#[cache_key(ignore)]
pub output_format: OutputFormat,
#[cache_key(ignore)]
pub show_fixes: bool,
pub file_resolver: FileResolverSettings,
pub linter: LinterSettings,
pub formatter: FormatterSettings,
pub analyze: AnalyzeSettings,
}
impl Default for Settings {
fn default() -> Self {
let project_root = path_dedot::CWD.as_path();
Self {
cache_dir: cache_dir(project_root),
fix: false,
fix_only: false,
output_format: OutputFormat::default(),
show_fixes: false,
unsafe_fixes: UnsafeFixes::default(),
linter: LinterSettings::new(project_root),
file_resolver: FileResolverSettings::new(project_root),
formatter: FormatterSettings::default(),
analyze: AnalyzeSettings::default(),
}
}
}
impl fmt::Display for Settings {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "\n# General Settings")?;
display_settings! {
formatter = f,
fields = [
self.cache_dir | path,
self.fix,
self.fix_only,
self.output_format,
self.show_fixes,
self.unsafe_fixes,
self.file_resolver | nested,
self.linter | nested,
self.formatter | nested,
self.analyze | nested,
]
}
Ok(())
}
}
#[derive(Debug, CacheKey)]
pub struct FileResolverSettings {
pub exclude: FilePatternSet,
pub extend_exclude: FilePatternSet,
pub force_exclude: bool,
pub include: FilePatternSet,
pub extend_include: FilePatternSet,
pub respect_gitignore: bool,
pub project_root: PathBuf,
}
impl fmt::Display for FileResolverSettings {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "\n# File Resolver Settings")?;
display_settings! {
formatter = f,
namespace = "file_resolver",
fields = [
self.exclude,
self.extend_exclude,
self.force_exclude,
self.include,
self.extend_include,
self.respect_gitignore,
self.project_root | path,
]
}
Ok(())
}
}
pub(crate) static EXCLUDE: &[FilePattern] = &[
FilePattern::Builtin(".bzr"),
FilePattern::Builtin(".direnv"),
FilePattern::Builtin(".eggs"),
FilePattern::Builtin(".git"),
FilePattern::Builtin(".git-rewrite"),
FilePattern::Builtin(".hg"),
FilePattern::Builtin(".ipynb_checkpoints"),
FilePattern::Builtin(".mypy_cache"),
FilePattern::Builtin(".nox"),
FilePattern::Builtin(".pants.d"),
FilePattern::Builtin(".pyenv"),
FilePattern::Builtin(".pytest_cache"),
FilePattern::Builtin(".pytype"),
FilePattern::Builtin(".ruff_cache"),
FilePattern::Builtin(".svn"),
FilePattern::Builtin(".tox"),
FilePattern::Builtin(".venv"),
FilePattern::Builtin(".vscode"),
FilePattern::Builtin("__pypackages__"),
FilePattern::Builtin("_build"),
FilePattern::Builtin("buck-out"),
FilePattern::Builtin("dist"),
FilePattern::Builtin("node_modules"),
FilePattern::Builtin("site-packages"),
FilePattern::Builtin("venv"),
];
pub(crate) static INCLUDE: &[FilePattern] = &[
FilePattern::Builtin("*.py"),
FilePattern::Builtin("*.pyi"),
FilePattern::Builtin("*.ipynb"),
FilePattern::Builtin("**/pyproject.toml"),
];
pub(crate) static INCLUDE_PREVIEW: &[FilePattern] = &[
FilePattern::Builtin("*.py"),
FilePattern::Builtin("*.pyi"),
FilePattern::Builtin("*.pyw"),
FilePattern::Builtin("*.ipynb"),
FilePattern::Builtin("**/pyproject.toml"),
];
impl FileResolverSettings {
fn new(project_root: &Path) -> Self {
Self {
project_root: project_root.to_path_buf(),
exclude: FilePatternSet::try_from_iter(EXCLUDE.iter().cloned()).unwrap(),
extend_exclude: FilePatternSet::default(),
extend_include: FilePatternSet::default(),
force_exclude: false,
respect_gitignore: true,
include: FilePatternSet::try_from_iter(INCLUDE.iter().cloned()).unwrap(),
}
}
}
#[derive(CacheKey, Clone, Debug)]
pub struct FormatterSettings {
pub exclude: FilePatternSet,
pub extension: ExtensionMapping,
pub preview: PreviewMode,
/// The non-path-resolved Python version specified by the `target-version` input option.
///
/// See [`FormatterSettings::resolve_target_version`] for a way to obtain the Python version for
/// a given file, while respecting the overrides in `per_file_target_version`.
pub unresolved_target_version: PythonVersion,
/// Path-specific overrides to `unresolved_target_version`.
///
/// See [`FormatterSettings::resolve_target_version`] for a way to check a given [`Path`]
/// against these patterns, while falling back to `unresolved_target_version` if none of them
/// match.
pub per_file_target_version: CompiledPerFileTargetVersionList,
pub line_width: LineWidth,
pub indent_style: IndentStyle,
pub indent_width: IndentWidth,
pub quote_style: QuoteStyle,
pub magic_trailing_comma: MagicTrailingComma,
pub line_ending: LineEnding,
pub docstring_code_format: DocstringCode,
pub docstring_code_line_width: DocstringCodeLineWidth,
}
impl FormatterSettings {
pub fn to_format_options(
&self,
source_type: PySourceType,
source: &str,
path: Option<&Path>,
) -> PyFormatOptions {
let target_version = path
.map(|path| self.resolve_target_version(path))
.unwrap_or(self.unresolved_target_version);
let line_ending = match self.line_ending {
LineEnding::Lf => ruff_formatter::printer::LineEnding::LineFeed,
LineEnding::CrLf => ruff_formatter::printer::LineEnding::CarriageReturnLineFeed,
#[cfg(target_os = "windows")]
LineEnding::Native => ruff_formatter::printer::LineEnding::CarriageReturnLineFeed,
#[cfg(not(target_os = "windows"))]
LineEnding::Native => ruff_formatter::printer::LineEnding::LineFeed,
LineEnding::Auto => match find_newline(source) {
Some((_, ruff_source_file::LineEnding::Lf)) => {
ruff_formatter::printer::LineEnding::LineFeed
}
Some((_, ruff_source_file::LineEnding::CrLf)) => {
ruff_formatter::printer::LineEnding::CarriageReturnLineFeed
}
Some((_, ruff_source_file::LineEnding::Cr)) => {
ruff_formatter::printer::LineEnding::CarriageReturn
}
None => ruff_formatter::printer::LineEnding::LineFeed,
},
};
PyFormatOptions::from_source_type(source_type)
.with_target_version(target_version)
.with_indent_style(self.indent_style)
.with_indent_width(self.indent_width)
.with_quote_style(self.quote_style)
.with_magic_trailing_comma(self.magic_trailing_comma)
.with_preview(self.preview)
.with_line_ending(line_ending)
.with_line_width(self.line_width)
.with_docstring_code(self.docstring_code_format)
.with_docstring_code_line_width(self.docstring_code_line_width)
}
/// Resolve the [`PythonVersion`] to use for formatting.
///
/// This method respects the per-file version overrides in
/// [`FormatterSettings::per_file_target_version`] and falls back on
/// [`FormatterSettings::unresolved_target_version`] if none of the override patterns match.
pub fn resolve_target_version(&self, path: &Path) -> PythonVersion {
self.per_file_target_version
.is_match(path)
.unwrap_or(self.unresolved_target_version)
}
}
impl Default for FormatterSettings {
fn default() -> Self {
let default_options = PyFormatOptions::default();
Self {
exclude: FilePatternSet::default(),
extension: ExtensionMapping::default(),
unresolved_target_version: default_options.target_version(),
per_file_target_version: CompiledPerFileTargetVersionList::default(),
preview: PreviewMode::Disabled,
line_width: default_options.line_width(),
line_ending: LineEnding::Auto,
indent_style: default_options.indent_style(),
indent_width: default_options.indent_width(),
quote_style: default_options.quote_style(),
magic_trailing_comma: default_options.magic_trailing_comma(),
docstring_code_format: default_options.docstring_code(),
docstring_code_line_width: default_options.docstring_code_line_width(),
}
}
}
impl fmt::Display for FormatterSettings {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "\n# Formatter Settings")?;
display_settings! {
formatter = f,
namespace = "formatter",
fields = [
self.exclude,
self.unresolved_target_version,
self.per_file_target_version,
self.preview,
self.line_width,
self.line_ending,
self.indent_style,
self.indent_width,
self.quote_style,
self.magic_trailing_comma,
self.docstring_code_format,
self.docstring_code_line_width,
]
}
Ok(())
}
}
#[derive(
Copy, Clone, Debug, Eq, PartialEq, Default, CacheKey, serde::Serialize, serde::Deserialize,
)]
#[serde(rename_all = "kebab-case")]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub enum LineEnding {
/// The newline style is detected automatically on a file per file basis.
/// Files with mixed line endings will be converted to the first detected line ending.
/// Defaults to [`LineEnding::Lf`] for a files that contain no line endings.
#[default]
Auto,
/// Line endings will be converted to `\n` as is common on Unix.
Lf,
/// Line endings will be converted to `\r\n` as is common on Windows.
CrLf,
/// Line endings will be converted to `\n` on Unix and `\r\n` on Windows.
Native,
}
impl fmt::Display for LineEnding {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Auto => write!(f, "auto"),
Self::Lf => write!(f, "lf"),
Self::CrLf => write!(f, "crlf"),
Self::Native => write!(f, "native"),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_workspace/src/pyproject.rs | crates/ruff_workspace/src/pyproject.rs | //! Utilities for locating (and extracting configuration from) a pyproject.toml.
use std::path::{Path, PathBuf};
use anyhow::{Context, Result};
use log::debug;
use pep440_rs::{Operator, Version, VersionSpecifiers};
use serde::{Deserialize, Serialize};
use strum::IntoEnumIterator;
use ruff_linter::settings::types::PythonVersion;
use crate::options::Options;
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
struct Tools {
ruff: Option<Options>,
}
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
struct Project {
#[serde(alias = "requires-python", alias = "requires_python")]
requires_python: Option<VersionSpecifiers>,
}
#[derive(Debug, PartialEq, Eq, Default, Serialize, Deserialize)]
pub struct Pyproject {
tool: Option<Tools>,
project: Option<Project>,
}
impl Pyproject {
pub const fn new(options: Options) -> Self {
Self {
tool: Some(Tools {
ruff: Some(options),
}),
project: None,
}
}
}
/// Parse a `ruff.toml` file.
fn parse_ruff_toml<P: AsRef<Path>>(path: P) -> Result<Options> {
let path = path.as_ref();
let contents = std::fs::read_to_string(path)
.with_context(|| format!("Failed to read {}", path.display()))?;
toml::from_str(&contents).with_context(|| format!("Failed to parse {}", path.display()))
}
/// Parse a `pyproject.toml` file.
fn parse_pyproject_toml<P: AsRef<Path>>(path: P) -> Result<Pyproject> {
let path = path.as_ref();
let contents = std::fs::read_to_string(path)
.with_context(|| format!("Failed to read {}", path.display()))?;
toml::from_str(&contents).with_context(|| format!("Failed to parse {}", path.display()))
}
/// Return `true` if a `pyproject.toml` contains a `[tool.ruff]` section.
pub fn ruff_enabled<P: AsRef<Path>>(path: P) -> Result<bool> {
let pyproject = parse_pyproject_toml(path)?;
Ok(pyproject.tool.and_then(|tool| tool.ruff).is_some())
}
/// Return the path to the `pyproject.toml` or `ruff.toml` file in a given
/// directory.
pub fn settings_toml<P: AsRef<Path>>(path: P) -> Result<Option<PathBuf>> {
let path = path.as_ref();
// Check for `.ruff.toml`.
let ruff_toml = path.join(".ruff.toml");
if ruff_toml.is_file() {
return Ok(Some(ruff_toml));
}
// Check for `ruff.toml`.
let ruff_toml = path.join("ruff.toml");
if ruff_toml.is_file() {
return Ok(Some(ruff_toml));
}
// Check for `pyproject.toml`.
let pyproject_toml = path.join("pyproject.toml");
if pyproject_toml.is_file() && ruff_enabled(&pyproject_toml)? {
return Ok(Some(pyproject_toml));
}
Ok(None)
}
/// Find the path to the `pyproject.toml` or `ruff.toml` file, if such a file
/// exists.
pub fn find_settings_toml<P: AsRef<Path>>(path: P) -> Result<Option<PathBuf>> {
for directory in path.as_ref().ancestors() {
if let Some(pyproject) = settings_toml(directory)? {
return Ok(Some(pyproject));
}
}
Ok(None)
}
/// Derive target version from `required-version` in `pyproject.toml`, if
/// such a file exists in an ancestor directory.
pub fn find_fallback_target_version<P: AsRef<Path>>(path: P) -> Option<PythonVersion> {
for directory in path.as_ref().ancestors() {
if let Some(fallback) = get_fallback_target_version(directory) {
return Some(fallback);
}
}
None
}
/// Find the path to the user-specific `pyproject.toml` or `ruff.toml`, if it
/// exists.
#[cfg(not(target_arch = "wasm32"))]
pub fn find_user_settings_toml() -> Option<PathBuf> {
use etcetera::BaseStrategy;
let strategy = etcetera::base_strategy::choose_base_strategy().ok()?;
let config_dir = strategy.config_dir().join("ruff");
// Search for a user-specific `.ruff.toml`, then a `ruff.toml`, then a `pyproject.toml`.
for filename in [".ruff.toml", "ruff.toml", "pyproject.toml"] {
let path = config_dir.join(filename);
if path.is_file() {
return Some(path);
}
}
None
}
#[cfg(target_arch = "wasm32")]
pub fn find_user_settings_toml() -> Option<PathBuf> {
None
}
/// Load `Options` from a `pyproject.toml` or `ruff.toml` file.
pub(super) fn load_options<P: AsRef<Path>>(
path: P,
version_strategy: &TargetVersionStrategy,
) -> Result<Options> {
let path = path.as_ref();
if path.ends_with("pyproject.toml") {
let pyproject = parse_pyproject_toml(path)?;
let mut ruff = pyproject
.tool
.and_then(|tool| tool.ruff)
.unwrap_or_default();
if ruff.target_version.is_none() {
if let Some(project) = pyproject.project {
if let Some(requires_python) = project.requires_python {
ruff.target_version = get_minimum_supported_version(&requires_python);
}
}
}
Ok(ruff)
} else {
let mut ruff = parse_ruff_toml(path);
if let Ok(ref mut ruff) = ruff {
if ruff.target_version.is_none() {
debug!("No `target-version` found in `ruff.toml`");
match version_strategy {
TargetVersionStrategy::UseDefault => {}
TargetVersionStrategy::RequiresPythonFallback => {
if let Some(dir) = path.parent() {
let fallback = get_fallback_target_version(dir);
if let Some(version) = fallback {
debug!(
"Derived `target-version` from `requires-python` in `pyproject.toml`: {version:?}"
);
} else {
debug!(
"No `pyproject.toml` with `requires-python` in same directory; `target-version` unspecified"
);
}
ruff.target_version = fallback;
}
}
}
}
}
ruff
}
}
/// Extract `target-version` from `pyproject.toml` in the given directory
/// if the file exists and has `requires-python`.
fn get_fallback_target_version(dir: &Path) -> Option<PythonVersion> {
let pyproject_path = dir.join("pyproject.toml");
if !pyproject_path.exists() {
return None;
}
let parsed_pyproject = parse_pyproject_toml(&pyproject_path);
let pyproject = match parsed_pyproject {
Ok(pyproject) => pyproject,
Err(err) => {
debug!("Failed to find fallback `target-version` due to: {err}");
return None;
}
};
if let Some(project) = pyproject.project {
if let Some(requires_python) = project.requires_python {
return get_minimum_supported_version(&requires_python);
}
}
None
}
/// Infer the minimum supported [`PythonVersion`] from a `requires-python` specifier.
fn get_minimum_supported_version(requires_version: &VersionSpecifiers) -> Option<PythonVersion> {
/// Truncate a version to its major and minor components.
fn major_minor(version: &Version) -> Option<Version> {
let major = version.release().first()?;
let minor = version.release().get(1)?;
Some(Version::new([major, minor]))
}
// Extract the minimum supported version from the specifiers.
let minimum_version = requires_version
.iter()
.filter(|specifier| {
matches!(
specifier.operator(),
Operator::Equal
| Operator::EqualStar
| Operator::ExactEqual
| Operator::TildeEqual
| Operator::GreaterThan
| Operator::GreaterThanEqual
)
})
.filter_map(|specifier| major_minor(specifier.version()))
.min()?;
debug!("Detected minimum supported `requires-python` version: {minimum_version}");
// Find the Python version that matches the minimum supported version.
PythonVersion::iter().find(|version| Version::from(*version) == minimum_version)
}
/// Strategy for handling missing `target-version` in configuration.
#[derive(Debug)]
pub(super) enum TargetVersionStrategy {
/// Use default `target-version`
UseDefault,
/// Derive from `requires-python` if available
RequiresPythonFallback,
}
#[cfg(test)]
mod tests {
use std::fs;
use std::str::FromStr;
use anyhow::{Context, Result};
use rustc_hash::FxHashMap;
use tempfile::TempDir;
use ruff_linter::codes;
use ruff_linter::line_width::LineLength;
use ruff_linter::settings::types::PatternPrefixPair;
use crate::options::{Flake8BuiltinsOptions, LintCommonOptions, LintOptions, Options};
use crate::pyproject::{Pyproject, Tools, find_settings_toml, parse_pyproject_toml};
#[test]
fn deserialize() -> Result<()> {
let pyproject: Pyproject = toml::from_str(r"")?;
assert_eq!(pyproject.tool, None);
let pyproject: Pyproject = toml::from_str(
r"
[tool.black]
",
)?;
assert_eq!(pyproject.tool, Some(Tools { ruff: None }));
let pyproject: Pyproject = toml::from_str(
r"
[tool.black]
[tool.ruff]
",
)?;
assert_eq!(
pyproject.tool,
Some(Tools {
ruff: Some(Options::default())
})
);
let pyproject: Pyproject = toml::from_str(
r"
[tool.black]
[tool.ruff]
line-length = 79
",
)?;
assert_eq!(
pyproject.tool,
Some(Tools {
ruff: Some(Options {
line_length: Some(LineLength::try_from(79).unwrap()),
..Options::default()
})
})
);
let pyproject: Pyproject = toml::from_str(
r#"
[tool.black]
[tool.ruff]
exclude = ["foo.py"]
"#,
)?;
assert_eq!(
pyproject.tool,
Some(Tools {
ruff: Some(Options {
exclude: Some(vec!["foo.py".to_string()]),
..Options::default()
})
})
);
let pyproject: Pyproject = toml::from_str(
r#"
[tool.black]
[tool.ruff.lint]
select = ["E501"]
"#,
)?;
assert_eq!(
pyproject.tool,
Some(Tools {
ruff: Some(Options {
lint: Some(LintOptions {
common: LintCommonOptions {
select: Some(vec![codes::Pycodestyle::E501.into()]),
..LintCommonOptions::default()
},
..LintOptions::default()
}),
..Options::default()
})
})
);
let pyproject: Pyproject = toml::from_str(
r#"
[tool.black]
[tool.ruff.lint]
extend-select = ["RUF100"]
ignore = ["E501"]
"#,
)?;
assert_eq!(
pyproject.tool,
Some(Tools {
ruff: Some(Options {
lint: Some(LintOptions {
common: LintCommonOptions {
extend_select: Some(vec![codes::Ruff::_100.into()]),
ignore: Some(vec![codes::Pycodestyle::E501.into()]),
..LintCommonOptions::default()
},
..LintOptions::default()
}),
..Options::default()
})
})
);
let pyproject: Pyproject = toml::from_str(
r#"
[tool.ruff.lint.flake8-builtins]
builtins-allowed-modules = ["asyncio"]
builtins-ignorelist = ["argparse", 'typing']
builtins-strict-checking = true
allowed-modules = ['sys']
ignorelist = ["os", 'io']
strict-checking = false
"#,
)?;
#[expect(deprecated)]
let expected = Flake8BuiltinsOptions {
builtins_allowed_modules: Some(vec!["asyncio".to_string()]),
allowed_modules: Some(vec!["sys".to_string()]),
builtins_ignorelist: Some(vec!["argparse".to_string(), "typing".to_string()]),
ignorelist: Some(vec!["os".to_string(), "io".to_string()]),
builtins_strict_checking: Some(true),
strict_checking: Some(false),
};
assert_eq!(
pyproject.tool,
Some(Tools {
ruff: Some(Options {
lint: Some(LintOptions {
common: LintCommonOptions {
flake8_builtins: Some(expected.clone()),
..LintCommonOptions::default()
},
..LintOptions::default()
}),
..Options::default()
})
})
);
let settings = expected.into_settings();
assert_eq!(settings.allowed_modules, vec!["sys".to_string()]);
assert_eq!(
settings.ignorelist,
vec!["os".to_string(), "io".to_string()]
);
assert!(!settings.strict_checking);
assert!(
toml::from_str::<Pyproject>(
r"
[tool.black]
[tool.ruff]
line_length = 79
",
)
.is_err()
);
assert!(
toml::from_str::<Pyproject>(
r#"
[tool.black]
[tool.ruff.lint]
select = ["E123"]
"#,
)
.is_err()
);
assert!(
toml::from_str::<Pyproject>(
r"
[tool.black]
[tool.ruff]
line-length = 79
other-attribute = 1
",
)
.is_err()
);
let invalid_line_length = toml::from_str::<Pyproject>(
r"
[tool.ruff]
line-length = 500
",
)
.expect_err("Deserialization should have failed for a too large line-length");
assert_eq!(
invalid_line_length.message(),
"line-length must be between 1 and 320 (got 500)"
);
// Test value at u16::MAX boundary (65535) - should show range error
let invalid_line_length_65535 = toml::from_str::<Pyproject>(
r"
[tool.ruff]
line-length = 65535
",
)
.expect_err("Deserialization should have failed for line-length at u16::MAX");
assert_eq!(
invalid_line_length_65535.message(),
"line-length must be between 1 and 320 (got 65535)"
);
// Test value exceeding u16::MAX (65536) - should show clear error
let invalid_line_length_65536 = toml::from_str::<Pyproject>(
r"
[tool.ruff]
line-length = 65536
",
)
.expect_err("Deserialization should have failed for line-length exceeding u16::MAX");
assert_eq!(
invalid_line_length_65536.message(),
"line-length must be between 1 and 320 (got 65536)"
);
// Test value far exceeding u16::MAX (99_999) - should show clear error
let invalid_line_length_99999 = toml::from_str::<Pyproject>(
r"
[tool.ruff]
line-length = 99_999
",
)
.expect_err("Deserialization should have failed for line-length far exceeding u16::MAX");
assert_eq!(
invalid_line_length_99999.message(),
"line-length must be between 1 and 320 (got 99999)"
);
// Test negative value - should show clear error
let invalid_line_length_negative = toml::from_str::<Pyproject>(
r"
[tool.ruff]
line-length = -5
",
)
.expect_err("Deserialization should have failed for negative line-length");
assert_eq!(
invalid_line_length_negative.message(),
"line-length must be between 1 and 320 (got -5)"
);
Ok(())
}
#[test]
fn find_and_parse_pyproject_toml() -> Result<()> {
let tempdir = TempDir::new()?;
let ruff_toml = tempdir.path().join("pyproject.toml");
fs::write(
ruff_toml,
r#"
[tool.ruff]
line-length = 88
extend-exclude = [
"excluded_file.py",
"migrations",
"with_excluded_file/other_excluded_file.py",
]
[tool.ruff.lint]
per-file-ignores = { "__init__.py" = ["F401"] }
"#,
)?;
let pyproject =
find_settings_toml(tempdir.path())?.context("Failed to find pyproject.toml")?;
let pyproject = parse_pyproject_toml(pyproject)?;
let config = pyproject
.tool
.context("Expected to find [tool] field")?
.ruff
.context("Expected to find [tool.ruff] field")?;
assert_eq!(
config,
Options {
line_length: Some(LineLength::try_from(88).unwrap()),
extend_exclude: Some(vec![
"excluded_file.py".to_string(),
"migrations".to_string(),
"with_excluded_file/other_excluded_file.py".to_string(),
]),
lint: Some(LintOptions {
common: LintCommonOptions {
per_file_ignores: Some(FxHashMap::from_iter([(
"__init__.py".to_string(),
vec![codes::Pyflakes::_401.into()]
)])),
..LintCommonOptions::default()
},
..LintOptions::default()
}),
..Options::default()
}
);
Ok(())
}
#[test]
fn str_pattern_prefix_pair() {
let result = PatternPrefixPair::from_str("foo:E501");
assert!(result.is_ok());
let result = PatternPrefixPair::from_str("foo: E501");
assert!(result.is_ok());
let result = PatternPrefixPair::from_str("E501:foo");
assert!(result.is_err());
let result = PatternPrefixPair::from_str("E501");
assert!(result.is_err());
let result = PatternPrefixPair::from_str("foo");
assert!(result.is_err());
let result = PatternPrefixPair::from_str("foo:E501:E402");
assert!(result.is_err());
let result = PatternPrefixPair::from_str("**/bar:E501");
assert!(result.is_ok());
let result = PatternPrefixPair::from_str("bar:E503");
assert!(result.is_err());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_workspace/src/lib.rs | crates/ruff_workspace/src/lib.rs | pub mod configuration;
pub mod options;
pub mod pyproject;
pub mod resolver;
mod settings;
pub use settings::{FileResolverSettings, FormatterSettings, Settings};
#[cfg(test)]
mod tests {
use std::path::Path;
pub(crate) fn test_resource_path(path: impl AsRef<Path>) -> std::path::PathBuf {
Path::new("../ruff_linter/resources/test/").join(path)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_workspace/src/configuration.rs | crates/ruff_workspace/src/configuration.rs | //! User-provided program settings, taking into account pyproject.toml and
//! command-line options. Structure mirrors the user-facing representation of
//! the various parameters.
use std::borrow::Cow;
use std::collections::BTreeMap;
use std::env::VarError;
use std::num::{NonZeroU8, NonZeroU16};
use std::path::{Path, PathBuf};
use std::str::FromStr;
use anyhow::{Context, Result, anyhow};
use glob::{GlobError, Paths, PatternError, glob};
use itertools::Itertools;
use regex::Regex;
use rustc_hash::{FxHashMap, FxHashSet};
use shellexpand;
use shellexpand::LookupError;
use strum::IntoEnumIterator;
use ruff_cache::cache_dir;
use ruff_formatter::IndentStyle;
use ruff_graph::{AnalyzeSettings, Direction, StringImports};
use ruff_linter::line_width::{IndentWidth, LineLength};
use ruff_linter::registry::{INCOMPATIBLE_CODES, Rule, RuleNamespace, RuleSet};
use ruff_linter::rule_selector::{PreviewOptions, Specificity};
use ruff_linter::rules::{flake8_import_conventions, isort, pycodestyle};
use ruff_linter::settings::fix_safety_table::FixSafetyTable;
use ruff_linter::settings::rule_table::RuleTable;
use ruff_linter::settings::types::{
CompiledPerFileIgnoreList, CompiledPerFileTargetVersionList, ExtensionMapping, FilePattern,
FilePatternSet, GlobPath, OutputFormat, PerFileIgnore, PerFileTargetVersion, PreviewMode,
RequiredVersion, UnsafeFixes,
};
use ruff_linter::settings::{
DEFAULT_SELECTORS, DUMMY_VARIABLE_RGX, LinterSettings, TASK_TAGS, TargetVersion,
};
use ruff_linter::{
RUFF_PKG_VERSION, RuleSelector, fs, warn_user_once, warn_user_once_by_id,
warn_user_once_by_message,
};
use ruff_python_ast as ast;
use ruff_python_formatter::{
DocstringCode, DocstringCodeLineWidth, MagicTrailingComma, QuoteStyle,
};
use crate::options::{
AnalyzeOptions, Flake8AnnotationsOptions, Flake8BanditOptions, Flake8BooleanTrapOptions,
Flake8BugbearOptions, Flake8BuiltinsOptions, Flake8ComprehensionsOptions,
Flake8CopyrightOptions, Flake8ErrMsgOptions, Flake8GetTextOptions,
Flake8ImplicitStrConcatOptions, Flake8ImportConventionsOptions, Flake8PytestStyleOptions,
Flake8QuotesOptions, Flake8SelfOptions, Flake8TidyImportsOptions, Flake8TypeCheckingOptions,
Flake8UnusedArgumentsOptions, FormatOptions, IsortOptions, LintCommonOptions, LintOptions,
McCabeOptions, Options, Pep8NamingOptions, PyUpgradeOptions, PycodestyleOptions,
PydoclintOptions, PydocstyleOptions, PyflakesOptions, PylintOptions, RuffOptions,
};
use crate::settings::{
EXCLUDE, FileResolverSettings, FormatterSettings, INCLUDE, INCLUDE_PREVIEW, LineEnding,
Settings,
};
#[derive(Clone, Debug, Default)]
pub struct RuleSelection {
pub select: Option<Vec<RuleSelector>>,
pub ignore: Vec<RuleSelector>,
pub extend_select: Vec<RuleSelector>,
pub fixable: Option<Vec<RuleSelector>>,
pub unfixable: Vec<RuleSelector>,
pub extend_fixable: Vec<RuleSelector>,
}
#[derive(Debug, Eq, PartialEq, is_macro::Is)]
pub enum RuleSelectorKind {
/// Enables the selected rules
Enable,
/// Disables the selected rules
Disable,
/// Modifies the behavior of selected rules
Modify,
}
impl RuleSelection {
pub fn selectors_by_kind(&self) -> impl Iterator<Item = (RuleSelectorKind, &RuleSelector)> {
self.select
.iter()
.flatten()
.map(|selector| (RuleSelectorKind::Enable, selector))
.chain(
self.fixable
.iter()
.flatten()
.map(|selector| (RuleSelectorKind::Modify, selector)),
)
.chain(
self.ignore
.iter()
.map(|selector| (RuleSelectorKind::Disable, selector)),
)
.chain(
self.extend_select
.iter()
.map(|selector| (RuleSelectorKind::Enable, selector)),
)
.chain(
self.unfixable
.iter()
.map(|selector| (RuleSelectorKind::Modify, selector)),
)
.chain(
self.extend_fixable
.iter()
.map(|selector| (RuleSelectorKind::Modify, selector)),
)
}
}
#[derive(Debug, Default, Clone)]
pub struct Configuration {
// Global options
pub cache_dir: Option<PathBuf>,
pub extend: Option<PathBuf>,
pub fix: Option<bool>,
pub fix_only: Option<bool>,
pub unsafe_fixes: Option<UnsafeFixes>,
pub output_format: Option<OutputFormat>,
pub preview: Option<PreviewMode>,
pub required_version: Option<RequiredVersion>,
pub extension: Option<ExtensionMapping>,
pub show_fixes: Option<bool>,
// File resolver options
pub exclude: Option<Vec<FilePattern>>,
pub extend_exclude: Vec<FilePattern>,
pub extend_include: Vec<FilePattern>,
pub force_exclude: Option<bool>,
pub include: Option<Vec<FilePattern>>,
pub respect_gitignore: Option<bool>,
// Generic python options settings
pub builtins: Option<Vec<String>>,
pub namespace_packages: Option<Vec<PathBuf>>,
pub src: Option<Vec<PathBuf>>,
pub target_version: Option<ast::PythonVersion>,
pub per_file_target_version: Option<Vec<PerFileTargetVersion>>,
// Global formatting options
pub line_length: Option<LineLength>,
pub indent_width: Option<IndentWidth>,
pub lint: LintConfiguration,
pub format: FormatConfiguration,
pub analyze: AnalyzeConfiguration,
}
impl Configuration {
pub fn into_settings(self, project_root: &Path) -> Result<Settings> {
if let Some(required_version) = &self.required_version {
let ruff_pkg_version = pep440_rs::Version::from_str(RUFF_PKG_VERSION)
.expect("RUFF_PKG_VERSION is not a valid PEP 440 version specifier");
if !required_version.contains(&ruff_pkg_version) {
return Err(anyhow!(
"Required version `{required_version}` does not match the running version `{RUFF_PKG_VERSION}`"
));
}
}
let linter_target_version = TargetVersion(self.target_version);
let target_version = self.target_version.unwrap_or_default();
let global_preview = self.preview.unwrap_or_default();
let format = self.format;
let format_defaults = FormatterSettings::default();
let quote_style = format.quote_style.unwrap_or(format_defaults.quote_style);
let format_preview = match format.preview.unwrap_or(global_preview) {
PreviewMode::Disabled => ruff_python_formatter::PreviewMode::Disabled,
PreviewMode::Enabled => ruff_python_formatter::PreviewMode::Enabled,
};
let per_file_target_version = CompiledPerFileTargetVersionList::resolve(
self.per_file_target_version.unwrap_or_default(),
)
.context("failed to resolve `per-file-target-version` table")?;
let formatter = FormatterSettings {
exclude: FilePatternSet::try_from_iter(format.exclude.unwrap_or_default())?,
extension: self.extension.clone().unwrap_or_default(),
preview: format_preview,
unresolved_target_version: target_version,
per_file_target_version: per_file_target_version.clone(),
line_width: self
.line_length
.map_or(format_defaults.line_width, |length| {
ruff_formatter::LineWidth::from(NonZeroU16::from(length))
}),
line_ending: format.line_ending.unwrap_or(format_defaults.line_ending),
indent_style: format.indent_style.unwrap_or(format_defaults.indent_style),
indent_width: self
.indent_width
.map_or(format_defaults.indent_width, |tab_size| {
ruff_formatter::IndentWidth::from(NonZeroU8::from(tab_size))
}),
quote_style,
magic_trailing_comma: format
.magic_trailing_comma
.unwrap_or(format_defaults.magic_trailing_comma),
docstring_code_format: format
.docstring_code_format
.unwrap_or(format_defaults.docstring_code_format),
docstring_code_line_width: format
.docstring_code_line_width
.unwrap_or(format_defaults.docstring_code_line_width),
};
let analyze = self.analyze;
let analyze_preview = analyze.preview.unwrap_or(global_preview);
let analyze_defaults = AnalyzeSettings::default();
let analyze = AnalyzeSettings {
exclude: FilePatternSet::try_from_iter(analyze.exclude.unwrap_or_default())?,
preview: analyze_preview,
target_version,
extension: self.extension.clone().unwrap_or_default(),
string_imports: StringImports {
enabled: analyze
.detect_string_imports
.unwrap_or(analyze_defaults.string_imports.enabled),
min_dots: analyze
.string_imports_min_dots
.unwrap_or(analyze_defaults.string_imports.min_dots),
},
include_dependencies: analyze
.include_dependencies
.unwrap_or(analyze_defaults.include_dependencies),
type_checking_imports: analyze
.type_checking_imports
.unwrap_or(analyze_defaults.type_checking_imports),
};
let lint = self.lint;
let lint_preview = lint.preview.unwrap_or(global_preview);
let line_length = self.line_length.unwrap_or_default();
let rules = lint.as_rule_table(lint_preview)?;
// LinterSettings validation
let isort = lint
.isort
.map(IsortOptions::try_into_settings)
.transpose()?
.unwrap_or_default();
let flake8_import_conventions = lint
.flake8_import_conventions
.map(Flake8ImportConventionsOptions::try_into_settings)
.transpose()?
.unwrap_or_default();
conflicting_import_settings(&isort, &flake8_import_conventions)?;
let future_annotations = lint.future_annotations.unwrap_or_default();
Ok(Settings {
cache_dir: self
.cache_dir
.clone()
.unwrap_or_else(|| cache_dir(project_root)),
fix: self.fix.unwrap_or(false),
fix_only: self.fix_only.unwrap_or(false),
unsafe_fixes: self.unsafe_fixes.unwrap_or_default(),
output_format: self.output_format.unwrap_or_default(),
show_fixes: self.show_fixes.unwrap_or(false),
file_resolver: FileResolverSettings {
exclude: FilePatternSet::try_from_iter(
self.exclude.unwrap_or_else(|| EXCLUDE.to_vec()),
)?,
extend_exclude: FilePatternSet::try_from_iter(self.extend_exclude)?,
extend_include: FilePatternSet::try_from_iter(self.extend_include)?,
force_exclude: self.force_exclude.unwrap_or(false),
include: match global_preview {
PreviewMode::Disabled => FilePatternSet::try_from_iter(
self.include.unwrap_or_else(|| INCLUDE.to_vec()),
)?,
PreviewMode::Enabled => FilePatternSet::try_from_iter(
self.include.unwrap_or_else(|| INCLUDE_PREVIEW.to_vec()),
)?,
},
respect_gitignore: self.respect_gitignore.unwrap_or(true),
project_root: project_root.to_path_buf(),
},
linter: LinterSettings {
rules,
exclude: FilePatternSet::try_from_iter(lint.exclude.unwrap_or_default())?,
extension: self.extension.unwrap_or_default(),
preview: lint_preview,
unresolved_target_version: linter_target_version,
per_file_target_version,
project_root: project_root.to_path_buf(),
allowed_confusables: lint
.allowed_confusables
.map(FxHashSet::from_iter)
.unwrap_or_default(),
builtins: self.builtins.unwrap_or_default(),
dummy_variable_rgx: lint
.dummy_variable_rgx
.unwrap_or_else(|| DUMMY_VARIABLE_RGX.clone()),
external: lint.external.unwrap_or_default(),
ignore_init_module_imports: lint.ignore_init_module_imports.unwrap_or(true),
line_length,
tab_size: self.indent_width.unwrap_or_default(),
namespace_packages: self.namespace_packages.unwrap_or_default(),
per_file_ignores: CompiledPerFileIgnoreList::resolve(
lint.per_file_ignores
.unwrap_or_default()
.into_iter()
.chain(lint.extend_per_file_ignores)
.collect(),
)?,
fix_safety: FixSafetyTable::from_rule_selectors(
&lint.extend_safe_fixes,
&lint.extend_unsafe_fixes,
&PreviewOptions {
mode: lint_preview,
require_explicit: false,
},
),
src: self
.src
.unwrap_or_else(|| vec![project_root.to_path_buf(), project_root.join("src")]),
explicit_preview_rules: lint.explicit_preview_rules.unwrap_or_default(),
task_tags: lint
.task_tags
.unwrap_or_else(|| TASK_TAGS.iter().map(ToString::to_string).collect()),
logger_objects: lint.logger_objects.unwrap_or_default(),
typing_modules: lint.typing_modules.unwrap_or_default(),
// Plugins
flake8_annotations: lint
.flake8_annotations
.map(Flake8AnnotationsOptions::into_settings)
.unwrap_or_default(),
flake8_bandit: lint
.flake8_bandit
.map(|flake8_bandit| flake8_bandit.into_settings(lint.ruff.as_ref()))
.unwrap_or_default(),
flake8_boolean_trap: lint
.flake8_boolean_trap
.map(Flake8BooleanTrapOptions::into_settings)
.unwrap_or_default(),
flake8_bugbear: lint
.flake8_bugbear
.map(Flake8BugbearOptions::into_settings)
.unwrap_or_default(),
flake8_builtins: lint
.flake8_builtins
.map(Flake8BuiltinsOptions::into_settings)
.unwrap_or_default(),
flake8_comprehensions: lint
.flake8_comprehensions
.map(Flake8ComprehensionsOptions::into_settings)
.unwrap_or_default(),
flake8_copyright: lint
.flake8_copyright
.map(Flake8CopyrightOptions::try_into_settings)
.transpose()?
.unwrap_or_default(),
flake8_errmsg: lint
.flake8_errmsg
.map(Flake8ErrMsgOptions::into_settings)
.unwrap_or_default(),
flake8_implicit_str_concat: lint
.flake8_implicit_str_concat
.map(Flake8ImplicitStrConcatOptions::into_settings)
.unwrap_or_default(),
flake8_import_conventions,
flake8_pytest_style: lint
.flake8_pytest_style
.map(Flake8PytestStyleOptions::try_into_settings)
.transpose()?
.unwrap_or_default(),
flake8_quotes: lint
.flake8_quotes
.map(Flake8QuotesOptions::into_settings)
.unwrap_or_default(),
flake8_self: lint
.flake8_self
.map(Flake8SelfOptions::into_settings)
.unwrap_or_default(),
flake8_tidy_imports: lint
.flake8_tidy_imports
.map(Flake8TidyImportsOptions::into_settings)
.unwrap_or_default(),
flake8_type_checking: lint
.flake8_type_checking
.map(Flake8TypeCheckingOptions::into_settings)
.unwrap_or_default(),
flake8_unused_arguments: lint
.flake8_unused_arguments
.map(Flake8UnusedArgumentsOptions::into_settings)
.unwrap_or_default(),
flake8_gettext: lint
.flake8_gettext
.map(Flake8GetTextOptions::into_settings)
.unwrap_or_default(),
isort,
mccabe: lint
.mccabe
.map(McCabeOptions::into_settings)
.unwrap_or_default(),
pep8_naming: lint
.pep8_naming
.map(Pep8NamingOptions::try_into_settings)
.transpose()?
.unwrap_or_default(),
pycodestyle: if let Some(pycodestyle) = lint.pycodestyle {
pycodestyle.into_settings(line_length)
} else {
pycodestyle::settings::Settings {
max_line_length: line_length,
..pycodestyle::settings::Settings::default()
}
},
pydoclint: lint
.pydoclint
.map(PydoclintOptions::into_settings)
.unwrap_or_default(),
pydocstyle: lint
.pydocstyle
.map(PydocstyleOptions::into_settings)
.unwrap_or_default(),
pyflakes: lint
.pyflakes
.map(PyflakesOptions::into_settings)
.unwrap_or_default(),
pylint: lint
.pylint
.map(PylintOptions::into_settings)
.unwrap_or_default(),
pyupgrade: lint
.pyupgrade
.map(PyUpgradeOptions::into_settings)
.unwrap_or_default(),
ruff: lint
.ruff
.map(RuffOptions::into_settings)
.unwrap_or_default(),
typing_extensions: lint.typing_extensions.unwrap_or(true),
future_annotations,
},
formatter,
analyze,
})
}
/// Convert the [`Options`] read from the given [`Path`] into a [`Configuration`].
/// If `None` is supplied for `path`, it indicates that the `Options` instance
/// was created via "inline TOML" from the `--config` flag
pub fn from_options(
options: Options,
path: Option<&Path>,
project_root: &Path,
) -> Result<Self> {
warn_about_deprecated_top_level_lint_options(&options.lint_top_level.0, path);
let lint = if let Some(mut lint) = options.lint {
lint.common = lint.common.combine(options.lint_top_level.0);
lint
} else {
LintOptions {
common: options.lint_top_level.0,
..LintOptions::default()
}
};
Ok(Self {
builtins: options.builtins,
cache_dir: options
.cache_dir
.map(|dir| {
let dir = shellexpand::full(&dir);
dir.map(|dir| fs::normalize_path_to(dir.as_ref(), project_root))
})
.transpose()
.map_err(|e| anyhow!("Invalid `cache-dir` value: {e}"))?,
exclude: options.exclude.map(|paths| {
paths
.into_iter()
.map(|pattern| {
let absolute = GlobPath::normalize(&pattern, project_root);
FilePattern::User(pattern, absolute)
})
.collect()
}),
extend: options
.extend
.map(|extend| {
let extend = shellexpand::full(&extend);
extend.map(|extend| PathBuf::from(extend.as_ref()))
})
.transpose()
.map_err(|e| anyhow!("Invalid `extend` value: {e}"))?,
extend_exclude: options
.extend_exclude
.map(|paths| {
paths
.into_iter()
.map(|pattern| {
let absolute = GlobPath::normalize(&pattern, project_root);
FilePattern::User(pattern, absolute)
})
.collect()
})
.unwrap_or_default(),
extend_include: options
.extend_include
.map(|paths| {
paths
.into_iter()
.map(|pattern| {
let absolute = GlobPath::normalize(&pattern, project_root);
FilePattern::User(pattern, absolute)
})
.collect()
})
.unwrap_or_default(),
include: options.include.map(|paths| {
paths
.into_iter()
.map(|pattern| {
let absolute = GlobPath::normalize(&pattern, project_root);
FilePattern::User(pattern, absolute)
})
.collect()
}),
fix: options.fix,
fix_only: options.fix_only,
unsafe_fixes: options.unsafe_fixes.map(UnsafeFixes::from),
output_format: options.output_format,
force_exclude: options.force_exclude,
line_length: options.line_length,
indent_width: options.indent_width,
namespace_packages: options
.namespace_packages
.map(|namespace_package| resolve_src(&namespace_package, project_root))
.transpose()?,
preview: options.preview.map(PreviewMode::from),
required_version: options.required_version,
respect_gitignore: options.respect_gitignore,
show_fixes: options.show_fixes,
src: options
.src
.map(|src| resolve_src(&src, project_root))
.transpose()?,
target_version: options.target_version.map(ast::PythonVersion::from),
per_file_target_version: options.per_file_target_version.map(|versions| {
versions
.into_iter()
.map(|(pattern, version)| {
PerFileTargetVersion::new(
pattern,
ast::PythonVersion::from(version),
Some(project_root),
)
})
.collect()
}),
// `--extension` is a hidden command-line argument that isn't supported in configuration
// files at present.
extension: None,
lint: LintConfiguration::from_options(lint, project_root)?,
format: FormatConfiguration::from_options(
options.format.unwrap_or_default(),
project_root,
)?,
analyze: AnalyzeConfiguration::from_options(
options.analyze.unwrap_or_default(),
project_root,
)?,
})
}
#[must_use]
pub fn combine(self, config: Self) -> Self {
Self {
builtins: self.builtins.or(config.builtins),
cache_dir: self.cache_dir.or(config.cache_dir),
exclude: self.exclude.or(config.exclude),
extend: self.extend.or(config.extend),
extend_exclude: config
.extend_exclude
.into_iter()
.chain(self.extend_exclude)
.collect(),
extend_include: config
.extend_include
.into_iter()
.chain(self.extend_include)
.collect(),
include: self.include.or(config.include),
fix: self.fix.or(config.fix),
fix_only: self.fix_only.or(config.fix_only),
unsafe_fixes: self.unsafe_fixes.or(config.unsafe_fixes),
output_format: self.output_format.or(config.output_format),
force_exclude: self.force_exclude.or(config.force_exclude),
line_length: self.line_length.or(config.line_length),
indent_width: self.indent_width.or(config.indent_width),
namespace_packages: self.namespace_packages.or(config.namespace_packages),
required_version: self.required_version.or(config.required_version),
respect_gitignore: self.respect_gitignore.or(config.respect_gitignore),
show_fixes: self.show_fixes.or(config.show_fixes),
src: self.src.or(config.src),
target_version: self.target_version.or(config.target_version),
per_file_target_version: self
.per_file_target_version
.or(config.per_file_target_version),
preview: self.preview.or(config.preview),
extension: self.extension.or(config.extension),
lint: self.lint.combine(config.lint),
format: self.format.combine(config.format),
analyze: self.analyze.combine(config.analyze),
}
}
}
#[derive(Clone, Debug, Default)]
pub struct LintConfiguration {
pub exclude: Option<Vec<FilePattern>>,
pub preview: Option<PreviewMode>,
// Rule selection
pub extend_per_file_ignores: Vec<PerFileIgnore>,
pub per_file_ignores: Option<Vec<PerFileIgnore>>,
pub rule_selections: Vec<RuleSelection>,
pub explicit_preview_rules: Option<bool>,
// Fix configuration
pub extend_unsafe_fixes: Vec<RuleSelector>,
pub extend_safe_fixes: Vec<RuleSelector>,
// Global lint settings
pub allowed_confusables: Option<Vec<char>>,
pub dummy_variable_rgx: Option<Regex>,
pub external: Option<Vec<String>>,
pub ignore_init_module_imports: Option<bool>,
pub logger_objects: Option<Vec<String>>,
pub task_tags: Option<Vec<String>>,
pub typing_modules: Option<Vec<String>>,
pub typing_extensions: Option<bool>,
pub future_annotations: Option<bool>,
// Plugins
pub flake8_annotations: Option<Flake8AnnotationsOptions>,
pub flake8_bandit: Option<Flake8BanditOptions>,
pub flake8_boolean_trap: Option<Flake8BooleanTrapOptions>,
pub flake8_bugbear: Option<Flake8BugbearOptions>,
pub flake8_builtins: Option<Flake8BuiltinsOptions>,
pub flake8_comprehensions: Option<Flake8ComprehensionsOptions>,
pub flake8_copyright: Option<Flake8CopyrightOptions>,
pub flake8_errmsg: Option<Flake8ErrMsgOptions>,
pub flake8_gettext: Option<Flake8GetTextOptions>,
pub flake8_implicit_str_concat: Option<Flake8ImplicitStrConcatOptions>,
pub flake8_import_conventions: Option<Flake8ImportConventionsOptions>,
pub flake8_pytest_style: Option<Flake8PytestStyleOptions>,
pub flake8_quotes: Option<Flake8QuotesOptions>,
pub flake8_self: Option<Flake8SelfOptions>,
pub flake8_tidy_imports: Option<Flake8TidyImportsOptions>,
pub flake8_type_checking: Option<Flake8TypeCheckingOptions>,
pub flake8_unused_arguments: Option<Flake8UnusedArgumentsOptions>,
pub isort: Option<IsortOptions>,
pub mccabe: Option<McCabeOptions>,
pub pep8_naming: Option<Pep8NamingOptions>,
pub pycodestyle: Option<PycodestyleOptions>,
pub pydoclint: Option<PydoclintOptions>,
pub pydocstyle: Option<PydocstyleOptions>,
pub pyflakes: Option<PyflakesOptions>,
pub pylint: Option<PylintOptions>,
pub pyupgrade: Option<PyUpgradeOptions>,
pub ruff: Option<RuffOptions>,
}
impl LintConfiguration {
fn from_options(options: LintOptions, project_root: &Path) -> Result<Self> {
#[expect(deprecated)]
let ignore = options
.common
.ignore
.into_iter()
.flatten()
.chain(options.common.extend_ignore.into_iter().flatten())
.collect();
#[expect(deprecated)]
let unfixable = options
.common
.unfixable
.into_iter()
.flatten()
.chain(options.common.extend_unfixable.into_iter().flatten())
.collect();
#[expect(deprecated)]
let ignore_init_module_imports = {
if options.common.ignore_init_module_imports.is_some() {
warn_user_once!(
"The `ignore-init-module-imports` option is deprecated and will be removed in a future release. Ruff's handling of imports in `__init__.py` files has been improved (in preview) and unused imports will always be flagged."
);
}
options.common.ignore_init_module_imports
};
Ok(LintConfiguration {
exclude: options.exclude.map(|paths| {
paths
.into_iter()
.map(|pattern| {
let absolute = GlobPath::normalize(&pattern, project_root);
FilePattern::User(pattern, absolute)
})
.collect()
}),
preview: options.preview.map(PreviewMode::from),
rule_selections: vec![RuleSelection {
select: options.common.select,
ignore,
extend_select: options.common.extend_select.unwrap_or_default(),
fixable: options.common.fixable,
unfixable,
extend_fixable: options.common.extend_fixable.unwrap_or_default(),
}],
extend_safe_fixes: options.common.extend_safe_fixes.unwrap_or_default(),
extend_unsafe_fixes: options.common.extend_unsafe_fixes.unwrap_or_default(),
allowed_confusables: options.common.allowed_confusables,
dummy_variable_rgx: options
.common
.dummy_variable_rgx
.map(|pattern| Regex::new(&pattern))
.transpose()
.map_err(|e| anyhow!("Invalid `dummy-variable-rgx` value: {e}"))?,
extend_per_file_ignores: options
.common
.extend_per_file_ignores
.map(|per_file_ignores| {
per_file_ignores
.into_iter()
.map(|(pattern, prefixes)| {
PerFileIgnore::new(pattern, &prefixes, Some(project_root))
})
.collect()
})
.unwrap_or_default(),
external: options.common.external,
ignore_init_module_imports,
explicit_preview_rules: options.common.explicit_preview_rules,
per_file_ignores: options.common.per_file_ignores.map(|per_file_ignores| {
per_file_ignores
.into_iter()
.map(|(pattern, prefixes)| {
PerFileIgnore::new(pattern, &prefixes, Some(project_root))
})
.collect()
}),
task_tags: options.common.task_tags,
logger_objects: options.common.logger_objects,
typing_modules: options.common.typing_modules,
typing_extensions: options.typing_extensions,
future_annotations: options.future_annotations,
// Plugins
flake8_annotations: options.common.flake8_annotations,
flake8_bandit: options.common.flake8_bandit,
flake8_boolean_trap: options.common.flake8_boolean_trap,
flake8_bugbear: options.common.flake8_bugbear,
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_workspace/src/options.rs | crates/ruff_workspace/src/options.rs | use regex::Regex;
use rustc_hash::{FxBuildHasher, FxHashMap, FxHashSet};
use serde::de::{self};
use serde::{Deserialize, Deserializer, Serialize};
use std::collections::{BTreeMap, BTreeSet};
use std::path::PathBuf;
use strum::IntoEnumIterator;
use unicode_normalization::UnicodeNormalization;
use crate::settings::LineEnding;
use ruff_formatter::IndentStyle;
use ruff_graph::Direction;
use ruff_linter::line_width::{IndentWidth, LineLength};
use ruff_linter::rules::flake8_import_conventions::settings::BannedAliases;
use ruff_linter::rules::flake8_pytest_style::settings::SettingsError;
use ruff_linter::rules::flake8_pytest_style::types;
use ruff_linter::rules::flake8_quotes::settings::Quote;
use ruff_linter::rules::flake8_tidy_imports::settings::{ApiBan, Strictness};
use ruff_linter::rules::isort::settings::RelativeImportsOrder;
use ruff_linter::rules::isort::{ImportSection, ImportType};
use ruff_linter::rules::pep8_naming::settings::IgnoreNames;
use ruff_linter::rules::pydocstyle::settings::Convention;
use ruff_linter::rules::pylint::settings::ConstantType;
use ruff_linter::rules::{
flake8_copyright, flake8_errmsg, flake8_gettext, flake8_implicit_str_concat,
flake8_import_conventions, flake8_pytest_style, flake8_quotes, flake8_self,
flake8_tidy_imports, flake8_type_checking, flake8_unused_arguments, isort, mccabe, pep8_naming,
pycodestyle, pydoclint, pydocstyle, pyflakes, pylint, pyupgrade, ruff,
};
use ruff_linter::settings::types::{
IdentifierPattern, OutputFormat, PythonVersion, RequiredVersion,
};
use ruff_linter::{RuleSelector, warn_user_once};
use ruff_macros::{CombineOptions, OptionsMetadata};
use ruff_options_metadata::{OptionsMetadata, Visit};
use ruff_python_ast::name::Name;
use ruff_python_formatter::{DocstringCodeLineWidth, QuoteStyle};
use ruff_python_semantic::NameImports;
use ruff_python_stdlib::identifiers::is_identifier;
#[derive(Clone, Debug, PartialEq, Eq, Default, OptionsMetadata, Serialize, Deserialize)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Options {
/// A path to the cache directory.
///
/// By default, Ruff stores cache results in a `.ruff_cache` directory in
/// the current project root.
///
/// However, Ruff will also respect the `RUFF_CACHE_DIR` environment
/// variable, which takes precedence over that default.
///
/// This setting will override even the `RUFF_CACHE_DIR` environment
/// variable, if set.
#[option(
default = r#"".ruff_cache""#,
value_type = "str",
example = r#"cache-dir = "~/.cache/ruff""#
)]
pub cache_dir: Option<String>,
/// A path to a local `pyproject.toml` or `ruff.toml` file to merge into this
/// configuration. User home directory and environment variables will be
/// expanded.
///
/// To resolve the current configuration file, Ruff will first load
/// this base configuration file, then merge in properties defined
/// in the current configuration file. Most settings follow simple override
/// behavior where the child value replaces the parent value. However,
/// rule selection (`lint.select` and `lint.ignore`) has special merging
/// behavior: if the child configuration specifies `lint.select`, it
/// establishes a new baseline rule set and the parent's `lint.ignore`
/// rules are discarded; if the child configuration omits `lint.select`,
/// the parent's rule selection is inherited and both parent and child
/// `lint.ignore` rules are accumulated together.
#[option(
default = r#"null"#,
value_type = "str",
example = r#"
# Extend the `pyproject.toml` file in the parent directory.
extend = "../pyproject.toml"
# But use a different line length.
line-length = 100
"#
)]
pub extend: Option<String>,
/// The style in which violation messages should be formatted: `"full"` (default)
/// (shows source), `"concise"`, `"grouped"` (group messages by file), `"json"`
/// (machine-readable), `"junit"` (machine-readable XML), `"github"` (GitHub
/// Actions annotations), `"gitlab"` (GitLab CI code quality report),
/// `"pylint"` (Pylint text format) or `"azure"` (Azure Pipeline logging commands).
#[option(
default = r#""full""#,
value_type = r#""full" | "concise" | "grouped" | "json" | "junit" | "github" | "gitlab" | "pylint" | "azure""#,
example = r#"
# Group violations by containing file.
output-format = "grouped"
"#
)]
pub output_format: Option<OutputFormat>,
/// Enable fix behavior by-default when running `ruff` (overridden
/// by the `--fix` and `--no-fix` command-line flags).
/// Only includes automatic fixes unless `--unsafe-fixes` is provided.
#[option(default = "false", value_type = "bool", example = "fix = true")]
pub fix: Option<bool>,
/// Enable application of unsafe fixes.
/// If excluded, a hint will be displayed when unsafe fixes are available.
/// If set to false, the hint will be hidden.
#[option(
default = r#"null"#,
value_type = "bool",
example = "unsafe-fixes = true"
)]
pub unsafe_fixes: Option<bool>,
/// Like [`fix`](#fix), but disables reporting on leftover violation. Implies [`fix`](#fix).
#[option(default = "false", value_type = "bool", example = "fix-only = true")]
pub fix_only: Option<bool>,
/// Whether to show an enumeration of all fixed lint violations
/// (overridden by the `--show-fixes` command-line flag).
#[option(
default = "false",
value_type = "bool",
example = r#"
# Enumerate all fixed violations.
show-fixes = true
"#
)]
pub show_fixes: Option<bool>,
/// Enforce a requirement on the version of Ruff, to enforce at runtime.
/// If the version of Ruff does not meet the requirement, Ruff will exit
/// with an error.
///
/// Useful for unifying results across many environments, e.g., with a
/// `pyproject.toml` file.
///
/// Accepts a [PEP 440](https://peps.python.org/pep-0440/) specifier, like `==0.3.1` or `>=0.3.1`.
#[option(
default = "null",
value_type = "str",
example = r#"
required-version = ">=0.0.193"
"#
)]
pub required_version: Option<RequiredVersion>,
/// Whether to enable preview mode. When preview mode is enabled, Ruff will
/// use unstable rules, fixes, and formatting.
#[option(
default = "false",
value_type = "bool",
example = r#"
# Enable preview features.
preview = true
"#
)]
pub preview: Option<bool>,
// File resolver options
/// A list of file patterns to exclude from formatting and linting.
///
/// Exclusions are based on globs, and can be either:
///
/// - Single-path patterns, like `.mypy_cache` (to exclude any directory
/// named `.mypy_cache` in the tree), `foo.py` (to exclude any file named
/// `foo.py`), or `foo_*.py` (to exclude any file matching `foo_*.py` ).
/// - Relative patterns, like `directory/foo.py` (to exclude that specific
/// file) or `directory/*.py` (to exclude any Python files in
/// `directory`). Note that these paths are relative to the project root
/// (e.g., the directory containing your `pyproject.toml`).
///
/// For more information on the glob syntax, refer to the [`globset` documentation](https://docs.rs/globset/latest/globset/#syntax).
///
/// Note that you'll typically want to use
/// [`extend-exclude`](#extend-exclude) to modify the excluded paths.
#[option(
default = r#"[".bzr", ".direnv", ".eggs", ".git", ".git-rewrite", ".hg", ".mypy_cache", ".nox", ".pants.d", ".pytype", ".ruff_cache", ".svn", ".tox", ".venv", "__pypackages__", "_build", "buck-out", "dist", "node_modules", "venv"]"#,
value_type = "list[str]",
example = r#"
exclude = [".venv"]
"#
)]
pub exclude: Option<Vec<String>>,
/// A list of file patterns to omit from formatting and linting, in addition to those
/// specified by [`exclude`](#exclude).
///
/// Exclusions are based on globs, and can be either:
///
/// - Single-path patterns, like `.mypy_cache` (to exclude any directory
/// named `.mypy_cache` in the tree), `foo.py` (to exclude any file named
/// `foo.py`), or `foo_*.py` (to exclude any file matching `foo_*.py` ).
/// - Relative patterns, like `directory/foo.py` (to exclude that specific
/// file) or `directory/*.py` (to exclude any Python files in
/// `directory`). Note that these paths are relative to the project root
/// (e.g., the directory containing your `pyproject.toml`).
///
/// For more information on the glob syntax, refer to the [`globset` documentation](https://docs.rs/globset/latest/globset/#syntax).
#[option(
default = "[]",
value_type = "list[str]",
example = r#"
# In addition to the standard set of exclusions, omit all tests, plus a specific file.
extend-exclude = ["tests", "src/bad.py"]
"#
)]
pub extend_exclude: Option<Vec<String>>,
/// A list of file patterns to include when linting, in addition to those
/// specified by [`include`](#include).
///
/// Inclusion are based on globs, and should be single-path patterns, like
/// `*.pyw`, to include any file with the `.pyw` extension.
///
/// For more information on the glob syntax, refer to the [`globset` documentation](https://docs.rs/globset/latest/globset/#syntax).
#[option(
default = "[]",
value_type = "list[str]",
example = r#"
# In addition to the standard set of inclusions, include `.pyw` files.
extend-include = ["*.pyw"]
"#
)]
pub extend_include: Option<Vec<String>>,
/// Whether to enforce [`exclude`](#exclude) and [`extend-exclude`](#extend-exclude) patterns,
/// even for paths that are passed to Ruff explicitly. Typically, Ruff will lint
/// any paths passed in directly, even if they would typically be
/// excluded. Setting `force-exclude = true` will cause Ruff to
/// respect these exclusions unequivocally.
///
/// This is useful for [`pre-commit`](https://pre-commit.com/), which explicitly passes all
/// changed files to the [`ruff-pre-commit`](https://github.com/astral-sh/ruff-pre-commit)
/// plugin, regardless of whether they're marked as excluded by Ruff's own
/// settings.
#[option(
default = r#"false"#,
value_type = "bool",
example = r#"
force-exclude = true
"#
)]
pub force_exclude: Option<bool>,
/// A list of file patterns to include when linting.
///
/// Inclusion are based on globs, and should be single-path patterns, like
/// `*.pyw`, to include any file with the `.pyw` extension. `pyproject.toml` is
/// included here not for configuration but because we lint whether e.g. the
/// `[project]` matches the schema.
///
/// Notebook files (`.ipynb` extension) are included by default on Ruff 0.6.0+.
///
/// For more information on the glob syntax, refer to the [`globset` documentation](https://docs.rs/globset/latest/globset/#syntax).
#[option(
default = r#"["*.py", "*.pyi", "*.pyw", "*.ipynb", "**/pyproject.toml"]"#,
value_type = "list[str]",
example = r#"
include = ["*.py"]
"#
)]
pub include: Option<Vec<String>>,
/// Whether to automatically exclude files that are ignored by `.ignore`,
/// `.gitignore`, `.git/info/exclude`, and global `gitignore` files.
/// Enabled by default.
#[option(
default = "true",
value_type = "bool",
example = r#"
respect-gitignore = false
"#
)]
pub respect_gitignore: Option<bool>,
// Generic python options
/// A list of builtins to treat as defined references, in addition to the
/// system builtins.
#[option(
default = r#"[]"#,
value_type = "list[str]",
example = r#"
builtins = ["_"]
"#
)]
pub builtins: Option<Vec<String>>,
/// Mark the specified directories as namespace packages. For the purpose of
/// module resolution, Ruff will treat those directories and all their subdirectories
/// as if they contained an `__init__.py` file.
#[option(
default = r#"[]"#,
value_type = "list[str]",
example = r#"
namespace-packages = ["airflow/providers"]
"#
)]
pub namespace_packages: Option<Vec<String>>,
/// The minimum Python version to target, e.g., when considering automatic
/// code upgrades, like rewriting type annotations. Ruff will not propose
/// changes using features that are not available in the given version.
///
/// For example, to represent supporting Python >=3.11 or ==3.11
/// specify `target-version = "py311"`.
///
/// If you're already using a `pyproject.toml` file, we recommend
/// `project.requires-python` instead, as it's based on Python packaging
/// standards, and will be respected by other tools. For example, Ruff
/// treats the following as identical to `target-version = "py38"`:
///
/// ```toml
/// [project]
/// requires-python = ">=3.8"
/// ```
///
/// If both are specified, `target-version` takes precedence over
/// `requires-python`. See [_Inferring the Python version_](https://docs.astral.sh/ruff/configuration/#inferring-the-python-version)
/// for a complete description of how the `target-version` is determined
/// when left unspecified.
///
/// Note that a stub file can [sometimes make use of a typing feature](https://typing.python.org/en/latest/spec/distributing.html#syntax)
/// before it is available at runtime, as long as the stub does not make
/// use of new *syntax*. For example, a type checker will understand
/// `int | str` in a stub as being a `Union` type annotation, even if the
/// type checker is run using Python 3.9, despite the fact that the `|`
/// operator can only be used to create union types at runtime on Python
/// 3.10+. As such, Ruff will often recommend newer features in a stub
/// file than it would for an equivalent runtime file with the same target
/// version.
#[option(
default = r#""py310""#,
value_type = r#""py37" | "py38" | "py39" | "py310" | "py311" | "py312" | "py313" | "py314""#,
example = r#"
# Always generate Python 3.7-compatible code.
target-version = "py37"
"#
)]
pub target_version: Option<PythonVersion>,
/// A list of mappings from glob-style file pattern to Python version to use when checking the
/// corresponding file(s).
///
/// This may be useful for overriding the global Python version settings in `target-version` or
/// `requires-python` for a subset of files. For example, if you have a project with a minimum
/// supported Python version of 3.9 but a subdirectory of developer scripts that want to use a
/// newer feature like the `match` statement from Python 3.10, you can use
/// `per-file-target-version` to specify `"developer_scripts/*.py" = "py310"`.
///
/// This setting is used by the linter to enforce any enabled version-specific lint rules, as
/// well as by the formatter for any version-specific formatting options, such as parenthesizing
/// context managers on Python 3.10+.
#[option(
default = "{}",
value_type = "dict[str, PythonVersion]",
scope = "per-file-target-version",
example = r#"
# Override the project-wide Python version for a developer scripts directory:
"scripts/*.py" = "py312"
"#
)]
pub per_file_target_version: Option<FxHashMap<String, PythonVersion>>,
/// The directories to consider when resolving first- vs. third-party
/// imports.
///
/// When omitted, the `src` directory will typically default to including both:
///
/// 1. The directory containing the nearest `pyproject.toml`, `ruff.toml`, or `.ruff.toml` file (the "project root").
/// 2. The `"src"` subdirectory of the project root.
///
/// These defaults ensure that Ruff supports both flat layouts and `src` layouts out-of-the-box.
/// (If a configuration file is explicitly provided (e.g., via the `--config` command-line
/// flag), the current working directory will be considered the project root.)
///
/// As an example, consider an alternative project structure, like:
///
/// ```text
/// my_project
/// ├── pyproject.toml
/// └── lib
/// └── my_package
/// ├── __init__.py
/// ├── foo.py
/// └── bar.py
/// ```
///
/// In this case, the `./lib` directory should be included in the `src` option
/// (e.g., `src = ["lib"]`), such that when resolving imports, `my_package.foo`
/// is considered first-party.
///
/// This field supports globs. For example, if you have a series of Python
/// packages in a `python_modules` directory, `src = ["python_modules/*"]`
/// would expand to incorporate all packages in that directory. User home
/// directory and environment variables will also be expanded.
#[option(
default = r#"[".", "src"]"#,
value_type = "list[str]",
example = r#"
# Allow imports relative to the "src" and "test" directories.
src = ["src", "test"]
"#
)]
pub src: Option<Vec<String>>,
// Global Formatting options
/// The line length to use when enforcing long-lines violations (like `E501`)
/// and at which `isort` and the formatter prefers to wrap lines.
///
/// The length is determined by the number of characters per line, except for lines containing East Asian characters or emojis.
/// For these lines, the [unicode width](https://unicode.org/reports/tr11/) of each character is added up to determine the length.
///
/// The value must be greater than `0` and less than or equal to `320`.
///
/// Note: While the formatter will attempt to format lines such that they remain
/// within the `line-length`, it isn't a hard upper bound, and formatted lines may
/// exceed the `line-length`.
///
/// See [`pycodestyle.max-line-length`](#lint_pycodestyle_max-line-length) to configure different lengths for `E501` and the formatter.
#[option(
default = "88",
value_type = "int",
example = r#"
# Allow lines to be as long as 120.
line-length = 120
"#
)]
pub line_length: Option<LineLength>,
/// The number of spaces per indentation level (tab).
///
/// Used by the formatter and when enforcing long-line violations (like `E501`) to determine the visual
/// width of a tab.
///
/// This option changes the number of spaces the formatter inserts when
/// using soft-tabs (`indent-style = space`).
///
/// PEP 8 recommends using 4 spaces per [indentation level](https://peps.python.org/pep-0008/#indentation).
#[option(
default = "4",
value_type = "int",
example = r#"
indent-width = 2
"#
)]
pub indent_width: Option<IndentWidth>,
#[option_group]
pub lint: Option<LintOptions>,
/// The lint sections specified at the top level.
#[serde(flatten)]
pub lint_top_level: DeprecatedTopLevelLintOptions,
/// Options to configure code formatting.
#[option_group]
pub format: Option<FormatOptions>,
/// Options to configure import map generation.
#[option_group]
pub analyze: Option<AnalyzeOptions>,
}
/// Configures how Ruff checks your code.
///
/// Options specified in the `lint` section take precedence over the deprecated top-level settings.
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[derive(Clone, Debug, PartialEq, Eq, Default, OptionsMetadata, Serialize, Deserialize)]
#[serde(
from = "LintOptionsWire",
deny_unknown_fields,
rename_all = "kebab-case"
)]
#[cfg_attr(feature = "schemars", schemars(!from))]
pub struct LintOptions {
#[serde(flatten)]
pub common: LintCommonOptions,
/// A list of file patterns to exclude from linting in addition to the files excluded globally (see [`exclude`](#exclude), and [`extend-exclude`](#extend-exclude)).
///
/// Exclusions are based on globs, and can be either:
///
/// - Single-path patterns, like `.mypy_cache` (to exclude any directory
/// named `.mypy_cache` in the tree), `foo.py` (to exclude any file named
/// `foo.py`), or `foo_*.py` (to exclude any file matching `foo_*.py` ).
/// - Relative patterns, like `directory/foo.py` (to exclude that specific
/// file) or `directory/*.py` (to exclude any Python files in
/// `directory`). Note that these paths are relative to the project root
/// (e.g., the directory containing your `pyproject.toml`).
///
/// For more information on the glob syntax, refer to the [`globset` documentation](https://docs.rs/globset/latest/globset/#syntax).
#[option(
default = r#"[]"#,
value_type = "list[str]",
example = r#"
exclude = ["generated"]
"#
)]
pub exclude: Option<Vec<String>>,
/// Options for the `pydoclint` plugin.
#[option_group]
pub pydoclint: Option<PydoclintOptions>,
/// Options for the `ruff` plugin
#[option_group]
pub ruff: Option<RuffOptions>,
/// Whether to enable preview mode. When preview mode is enabled, Ruff will
/// use unstable rules and fixes.
#[option(
default = "false",
value_type = "bool",
example = r#"
# Enable preview features.
preview = true
"#
)]
pub preview: Option<bool>,
/// Whether to allow imports from the third-party `typing_extensions` module for Python versions
/// before a symbol was added to the first-party `typing` module.
///
/// Many rules try to import symbols from the `typing` module but fall back to
/// `typing_extensions` for earlier versions of Python. This option can be used to disable this
/// fallback behavior in cases where `typing_extensions` is not installed.
#[option(
default = "true",
value_type = "bool",
example = r#"
# Disable `typing_extensions` imports
typing-extensions = false
"#
)]
pub typing_extensions: Option<bool>,
/// Whether to allow rules to add `from __future__ import annotations` in cases where this would
/// simplify a fix or enable a new diagnostic.
///
/// For example, `TC001`, `TC002`, and `TC003` can move more imports into `TYPE_CHECKING` blocks
/// if `__future__` annotations are enabled.
///
#[option(
default = "false",
value_type = "bool",
example = r#"
# Enable `from __future__ import annotations` imports
future-annotations = true
"#
)]
pub future_annotations: Option<bool>,
}
/// Newtype wrapper for [`LintCommonOptions`] that allows customizing the JSON schema and omitting the fields from the [`OptionsMetadata`].
#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)]
#[serde(transparent)]
pub struct DeprecatedTopLevelLintOptions(pub LintCommonOptions);
impl OptionsMetadata for DeprecatedTopLevelLintOptions {
fn record(_visit: &mut dyn Visit) {
// Intentionally empty. Omit all fields from the documentation and instead promote the options under the `lint.` section.
// This doesn't create an empty 'common' option because the field in the `Options` struct is marked with `#[serde(flatten)]`.
// Meaning, the code here flattens no-properties into the parent, which is what we want.
}
}
#[cfg(feature = "schemars")]
impl schemars::JsonSchema for DeprecatedTopLevelLintOptions {
fn schema_name() -> std::borrow::Cow<'static, str> {
std::borrow::Cow::Borrowed("DeprecatedTopLevelLintOptions")
}
fn schema_id() -> std::borrow::Cow<'static, str> {
std::borrow::Cow::Borrowed(concat!(
module_path!(),
"::",
"DeprecatedTopLevelLintOptions"
))
}
fn json_schema(generator: &mut schemars::SchemaGenerator) -> schemars::Schema {
use serde_json::Value;
let mut schema = LintCommonOptions::json_schema(generator);
if let Some(properties) = schema
.ensure_object()
.get_mut("properties")
.and_then(|value| value.as_object_mut())
{
for property in properties.values_mut() {
if let Ok(property_schema) = <&mut schemars::Schema>::try_from(property) {
property_schema
.ensure_object()
.insert("deprecated".to_string(), Value::Bool(true));
}
}
}
schema
}
}
// Note: This struct should be inlined into [`LintOptions`] once support for the top-level lint settings
// is removed.
// Don't add any new options to this struct. Add them to [`LintOptions`] directly to avoid exposing them in the
// global settings.
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[derive(
Clone, Debug, PartialEq, Eq, Default, OptionsMetadata, CombineOptions, Serialize, Deserialize,
)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
pub struct LintCommonOptions {
// WARNING: Don't add new options to this type. Add them to `LintOptions` instead.
/// A list of allowed "confusable" Unicode characters to ignore when
/// enforcing `RUF001`, `RUF002`, and `RUF003`.
#[option(
default = r#"[]"#,
value_type = "list[str]",
example = r#"
# Allow minus-sign (U+2212), greek-small-letter-rho (U+03C1), and the asterisk-operator (U+2217),
# which could be confused for "-", "p", and "*", respectively.
allowed-confusables = ["−", "ρ", "∗"]
"#
)]
pub allowed_confusables: Option<Vec<char>>,
/// A regular expression used to identify "dummy" variables, or those which
/// should be ignored when enforcing (e.g.) unused-variable rules. The
/// default expression matches `_`, `__`, and `_var`, but not `_var_`.
#[option(
default = r#""^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$""#,
value_type = "str",
example = r#"
# Only ignore variables named "_".
dummy-variable-rgx = "^_$"
"#
)]
pub dummy_variable_rgx: Option<String>,
/// A list of rule codes or prefixes to ignore, in addition to those
/// specified by `ignore`.
#[option(
default = "[]",
value_type = "list[RuleSelector]",
example = r#"
# Skip unused variable rules (`F841`).
extend-ignore = ["F841"]
"#
)]
#[deprecated(
note = "The `extend-ignore` option is now interchangeable with [`ignore`](#lint_ignore). Please update your configuration to use the [`ignore`](#lint_ignore) option instead."
)]
pub extend_ignore: Option<Vec<RuleSelector>>,
/// A list of rule codes or prefixes to enable, in addition to those
/// specified by [`select`](#lint_select).
#[option(
default = "[]",
value_type = "list[RuleSelector]",
example = r#"
# On top of the default `select` (`E4`, E7`, `E9`, and `F`), enable flake8-bugbear (`B`) and flake8-quotes (`Q`).
extend-select = ["B", "Q"]
"#
)]
pub extend_select: Option<Vec<RuleSelector>>,
/// A list of rule codes or prefixes to consider fixable, in addition to those
/// specified by [`fixable`](#lint_fixable).
#[option(
default = r#"[]"#,
value_type = "list[RuleSelector]",
example = r#"
# Enable fix for flake8-bugbear (`B`), on top of any rules specified by `fixable`.
extend-fixable = ["B"]
"#
)]
pub extend_fixable: Option<Vec<RuleSelector>>,
/// A list of rule codes or prefixes to consider non-auto-fixable, in addition to those
/// specified by [`unfixable`](#lint_unfixable).
#[deprecated(
note = "The `extend-unfixable` option is now interchangeable with [`unfixable`](#lint_unfixable). Please update your configuration to use the `unfixable` option instead."
)]
pub extend_unfixable: Option<Vec<RuleSelector>>,
/// A list of rule codes or prefixes that are unsupported by Ruff, but should be
/// preserved when (e.g.) validating `# noqa` directives. Useful for
/// retaining `# noqa` directives that cover plugins not yet implemented
/// by Ruff.
#[option(
default = "[]",
value_type = "list[str]",
example = r#"
# Avoiding flagging (and removing) any codes starting with `V` from any
# `# noqa` directives, despite Ruff's lack of support for `vulture`.
external = ["V"]
"#
)]
pub external: Option<Vec<String>>,
/// A list of rule codes or prefixes to consider fixable. By default,
/// all rules are considered fixable.
#[option(
default = r#"["ALL"]"#,
value_type = "list[RuleSelector]",
example = r#"
# Only allow fix behavior for `E` and `F` rules.
fixable = ["E", "F"]
"#
)]
pub fixable: Option<Vec<RuleSelector>>,
/// A list of rule codes or prefixes to ignore. Prefixes can specify exact
/// rules (like `F841`), entire categories (like `F`), or anything in
/// between.
///
/// When breaking ties between enabled and disabled rules (via `select` and
/// `ignore`, respectively), more specific prefixes override less
/// specific prefixes. `ignore` takes precedence over `select` if the same
/// prefix appears in both.
#[option(
default = "[]",
value_type = "list[RuleSelector]",
example = r#"
# Skip unused variable rules (`F841`).
ignore = ["F841"]
"#
)]
pub ignore: Option<Vec<RuleSelector>>,
/// A list of rule codes or prefixes for which unsafe fixes should be considered
/// safe.
#[option(
default = "[]",
value_type = "list[RuleSelector]",
example = r#"
# Allow applying all unsafe fixes in the `E` rules and `F401` without the `--unsafe-fixes` flag
extend-safe-fixes = ["E", "F401"]
"#
)]
pub extend_safe_fixes: Option<Vec<RuleSelector>>,
/// A list of rule codes or prefixes for which safe fixes should be considered
/// unsafe.
#[option(
default = "[]",
value_type = "list[RuleSelector]",
example = r#"
# Require the `--unsafe-fixes` flag when fixing the `E` rules and `F401`
extend-unsafe-fixes = ["E", "F401"]
"#
)]
pub extend_unsafe_fixes: Option<Vec<RuleSelector>>,
/// Avoid automatically removing unused imports in `__init__.py` files. Such
/// imports will still be flagged, but with a dedicated message suggesting
/// that the import is either added to the module's `__all__` symbol, or
/// re-exported with a redundant alias (e.g., `import os as os`).
///
/// This option is enabled by default, but you can opt-in to removal of imports
/// via an unsafe fix.
#[option(
default = "true",
value_type = "bool",
example = r#"
ignore-init-module-imports = false
"#
)]
#[deprecated(
since = "0.4.4",
note = "`ignore-init-module-imports` will be removed in a future version because F401 now recommends appropriate fixes for unused imports in `__init__.py` (currently in preview mode). See documentation for more information and please update your configuration."
)]
pub ignore_init_module_imports: Option<bool>,
/// A list of objects that should be treated equivalently to a
/// `logging.Logger` object.
///
/// This is useful for ensuring proper diagnostics (e.g., to identify
/// `logging` deprecations and other best-practices) for projects that
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_workspace/src/resolver.rs | crates/ruff_workspace/src/resolver.rs | //! Discover Python files, and their corresponding [`Settings`], from the
//! filesystem.
use std::cmp::Ordering;
use std::collections::BTreeSet;
use std::ffi::OsStr;
use std::path::{Path, PathBuf};
use std::sync::RwLock;
use anyhow::{Context, Result};
use anyhow::{anyhow, bail};
use globset::{Candidate, GlobSet};
use ignore::{DirEntry, Error, ParallelVisitor, WalkBuilder, WalkState};
use itertools::Itertools;
use log::debug;
use matchit::{InsertError, Match, Router};
use path_absolutize::path_dedot;
use path_slash::PathExt;
use rustc_hash::{FxHashMap, FxHashSet};
use ruff_linter::fs;
use ruff_linter::package::PackageRoot;
use ruff_linter::packaging::is_package;
use crate::configuration::Configuration;
use crate::pyproject::{TargetVersionStrategy, settings_toml};
use crate::settings::Settings;
use crate::{FileResolverSettings, pyproject};
/// The configuration information from a `pyproject.toml` file.
#[derive(Debug)]
pub struct PyprojectConfig {
/// The strategy used to discover the relevant `pyproject.toml` file for
/// each Python file.
pub strategy: PyprojectDiscoveryStrategy,
/// All settings from the `pyproject.toml` file.
pub settings: Settings,
/// Absolute path to the `pyproject.toml` file. This would be `None` when
/// either using the default settings or the `--isolated` flag is set.
pub path: Option<PathBuf>,
}
impl PyprojectConfig {
pub fn new(
strategy: PyprojectDiscoveryStrategy,
settings: Settings,
path: Option<PathBuf>,
) -> Self {
Self {
strategy,
settings,
path: path.map(fs::normalize_path),
}
}
}
/// The strategy used to discover the relevant `pyproject.toml` file for each
/// Python file.
#[derive(Debug, Copy, Clone)]
pub enum PyprojectDiscoveryStrategy {
/// Use a fixed `pyproject.toml` file for all Python files (i.e., one
/// provided on the command-line).
Fixed,
/// Use the closest `pyproject.toml` file in the filesystem hierarchy, or
/// the default settings.
Hierarchical,
}
impl PyprojectDiscoveryStrategy {
#[inline]
pub const fn is_fixed(self) -> bool {
matches!(self, PyprojectDiscoveryStrategy::Fixed)
}
#[inline]
pub const fn is_hierarchical(self) -> bool {
matches!(self, PyprojectDiscoveryStrategy::Hierarchical)
}
}
/// The strategy for resolving file paths in a `pyproject.toml`.
#[derive(Copy, Clone)]
pub enum Relativity {
/// Resolve file paths relative to the current working directory.
Cwd,
/// Resolve file paths relative to the directory containing the
/// `pyproject.toml`.
Parent,
}
impl Relativity {
pub fn resolve(self, path: &Path) -> &Path {
match self {
Relativity::Parent => path
.parent()
.expect("Expected pyproject.toml file to be in parent directory"),
Relativity::Cwd => &path_dedot::CWD,
}
}
}
#[derive(Debug)]
pub struct Resolver<'a> {
pyproject_config: &'a PyprojectConfig,
/// All [`Settings`] that have been added to the resolver.
settings: Vec<Settings>,
/// A router from path to index into the `settings` vector.
router: Router<usize>,
}
impl<'a> Resolver<'a> {
/// Create a new [`Resolver`] for the given [`PyprojectConfig`].
pub fn new(pyproject_config: &'a PyprojectConfig) -> Self {
Self {
pyproject_config,
settings: Vec::new(),
router: Router::new(),
}
}
/// Return the [`Settings`] from the [`PyprojectConfig`].
#[inline]
pub fn base_settings(&self) -> &Settings {
&self.pyproject_config.settings
}
/// Return `true` if the [`Resolver`] is using a hierarchical discovery strategy.
#[inline]
pub fn is_hierarchical(&self) -> bool {
self.pyproject_config.strategy.is_hierarchical()
}
/// Return `true` if the [`Resolver`] should force-exclude files passed directly to the CLI.
#[inline]
pub fn force_exclude(&self) -> bool {
self.pyproject_config.settings.file_resolver.force_exclude
}
/// Return `true` if the [`Resolver`] should respect `.gitignore` files.
#[inline]
pub fn respect_gitignore(&self) -> bool {
self.pyproject_config
.settings
.file_resolver
.respect_gitignore
}
/// Add a resolved [`Settings`] under a given [`PathBuf`] scope.
fn add(&mut self, path: &Path, settings: Settings) {
self.settings.push(settings);
// Normalize the path to use `/` separators and escape the '{' and '}' characters,
// which matchit uses for routing parameters.
let path = path.to_slash_lossy().replace('{', "{{").replace('}', "}}");
match self
.router
.insert(format!("{path}/{{*filepath}}"), self.settings.len() - 1)
{
Ok(()) => {}
Err(InsertError::Conflict { .. }) => {
return;
}
Err(_) => unreachable!("file paths are escaped before being inserted in the router"),
}
// Insert a mapping that matches the directory itself (without a trailing slash).
// Inserting should always succeed because conflicts are resolved above and the above insertion guarantees
// that the path is correctly escaped.
self.router.insert(path, self.settings.len() - 1).unwrap();
}
/// Return the appropriate [`Settings`] for a given [`Path`].
pub fn resolve(&self, path: &Path) -> &Settings {
match self.pyproject_config.strategy {
PyprojectDiscoveryStrategy::Fixed => &self.pyproject_config.settings,
PyprojectDiscoveryStrategy::Hierarchical => self
.router
.at(path.to_slash_lossy().as_ref())
.map(|Match { value, .. }| &self.settings[*value])
.unwrap_or(&self.pyproject_config.settings),
}
}
/// Return a mapping from Python package to its package root.
pub fn package_roots(
&'a self,
files: &[&'a Path],
) -> FxHashMap<&'a Path, Option<PackageRoot<'a>>> {
// Pre-populate the module cache, since the list of files could (but isn't
// required to) contain some `__init__.py` files.
let mut package_cache: FxHashMap<&Path, bool> = FxHashMap::default();
for file in files {
if file.ends_with("__init__.py") {
if let Some(parent) = file.parent() {
package_cache.insert(parent, true);
}
}
}
// Determine whether any of the settings require namespace packages. If not, we can save
// a lookup for every file.
let has_namespace_packages = self
.settings()
.any(|settings| !settings.linter.namespace_packages.is_empty());
// Search for the package root for each file.
let mut package_roots: FxHashMap<&Path, Option<PackageRoot<'_>>> = FxHashMap::default();
for file in files {
if let Some(package) = file.parent() {
package_roots.entry(package).or_insert_with(|| {
let namespace_packages = if has_namespace_packages {
self.resolve(file).linter.namespace_packages.as_slice()
} else {
&[]
};
detect_package_root_with_cache(package, namespace_packages, &mut package_cache)
.map(|path| PackageRoot::Root { path })
});
}
}
// Discard any nested roots.
//
// For example, if `./foo/__init__.py` is a root, and then `./foo/bar` is empty, and
// `./foo/bar/baz/__init__.py` was detected as a root, we should only consider
// `./foo/__init__.py`.
let mut non_roots = FxHashSet::default();
let mut router: Router<&Path> = Router::new();
for root in package_roots
.values()
.flatten()
.copied()
.map(PackageRoot::path)
.collect::<BTreeSet<_>>()
{
// Normalize the path to use `/` separators and escape the '{' and '}' characters,
// which matchit uses for routing parameters.
let path = root.to_slash_lossy().replace('{', "{{").replace('}', "}}");
if let Ok(matched) = router.at_mut(&path) {
debug!(
"Ignoring nested package root: {} (under {})",
root.display(),
matched.value.display()
);
package_roots.insert(root, Some(PackageRoot::nested(root)));
non_roots.insert(root);
} else {
let _ = router.insert(format!("{path}/{{*filepath}}"), root);
}
}
package_roots
}
/// Return an iterator over the resolved [`Settings`] in this [`Resolver`].
pub fn settings(&self) -> impl Iterator<Item = &Settings> {
std::iter::once(&self.pyproject_config.settings).chain(&self.settings)
}
}
/// A wrapper around `detect_package_root` to cache filesystem lookups.
fn detect_package_root_with_cache<'a>(
path: &'a Path,
namespace_packages: &[PathBuf],
package_cache: &mut FxHashMap<&'a Path, bool>,
) -> Option<&'a Path> {
let mut current = None;
for parent in path.ancestors() {
if !is_package_with_cache(parent, namespace_packages, package_cache) {
return current;
}
current = Some(parent);
}
current
}
/// A wrapper around `is_package` to cache filesystem lookups.
fn is_package_with_cache<'a>(
path: &'a Path,
namespace_packages: &[PathBuf],
package_cache: &mut FxHashMap<&'a Path, bool>,
) -> bool {
*package_cache
.entry(path)
.or_insert_with(|| is_package(path, namespace_packages))
}
/// Applies a transformation to a [`Configuration`].
///
/// Used to override options with the values provided by the CLI.
pub trait ConfigurationTransformer {
fn transform(&self, config: Configuration) -> Configuration;
}
/// Recursively resolve a [`Configuration`] from a `pyproject.toml` file at the
/// specified [`Path`].
// TODO(charlie): This whole system could do with some caching. Right now, if a
// configuration file extends another in the same path, we'll re-parse the same
// file at least twice (possibly more than twice, since we'll also parse it when
// resolving the "default" configuration).
pub fn resolve_configuration(
pyproject: &Path,
transformer: &dyn ConfigurationTransformer,
origin: ConfigurationOrigin,
) -> Result<Configuration> {
let relativity = Relativity::from(origin);
let mut configurations = indexmap::IndexMap::new();
let mut next = Some(fs::normalize_path(pyproject));
while let Some(path) = next {
if configurations.contains_key(&path) {
bail!(format!(
"Circular configuration detected: {chain}",
chain = configurations
.keys()
.chain([&path])
.map(|p| format!("`{}`", p.display()))
.join(" extends "),
));
}
// Resolve the current path.
let version_strategy =
if configurations.is_empty() && matches!(origin, ConfigurationOrigin::Ancestor) {
// For configurations that are discovered by
// walking back from a file, we will attempt to
// infer the `target-version` if it is missing
TargetVersionStrategy::RequiresPythonFallback
} else {
// In all other cases (e.g. for configurations
// inherited via `extend`, or user-level settings)
// we do not attempt to infer a missing `target-version`
TargetVersionStrategy::UseDefault
};
let options = pyproject::load_options(&path, &version_strategy).with_context(|| {
if configurations.is_empty() {
format!(
"Failed to load configuration `{path}`",
path = path.display()
)
} else {
let chain = configurations
.keys()
.chain([&path])
.map(|p| format!("`{}`", p.display()))
.join(" extends ");
format!(
"Failed to load extended configuration `{path}` ({chain})",
path = path.display()
)
}
})?;
let project_root = relativity.resolve(&path);
let configuration = Configuration::from_options(options, Some(&path), project_root)?;
// If extending, continue to collect.
next = configuration.extend.as_ref().map(|extend| {
fs::normalize_path_to(
extend,
path.parent()
.expect("Expected pyproject.toml file to be in parent directory"),
)
});
// Keep track of (1) the paths we've already resolved (to avoid cycles), and (2)
// the base configuration for every path.
configurations.insert(path, configuration);
}
// Merge the configurations, in order.
let mut configurations = configurations.into_values();
let mut configuration = configurations.next().unwrap();
for extend in configurations {
configuration = configuration.combine(extend);
}
Ok(transformer.transform(configuration))
}
/// Extract the project root (scope) and [`Settings`] from a given
/// `pyproject.toml`.
fn resolve_scoped_settings<'a>(
pyproject: &'a Path,
transformer: &dyn ConfigurationTransformer,
origin: ConfigurationOrigin,
) -> Result<(&'a Path, Settings)> {
let relativity = Relativity::from(origin);
let configuration = resolve_configuration(pyproject, transformer, origin)?;
let project_root = relativity.resolve(pyproject);
let settings = configuration.into_settings(project_root)?;
Ok((project_root, settings))
}
/// Extract the [`Settings`] from a given `pyproject.toml` and process the
/// configuration with the given [`ConfigurationTransformer`].
pub fn resolve_root_settings(
pyproject: &Path,
transformer: &dyn ConfigurationTransformer,
origin: ConfigurationOrigin,
) -> Result<Settings> {
let (_project_root, settings) = resolve_scoped_settings(pyproject, transformer, origin)?;
Ok(settings)
}
#[derive(Debug, Clone, Copy)]
/// How the configuration is provided.
pub enum ConfigurationOrigin {
/// Origin is unknown to the caller
Unknown,
/// User specified path to specific configuration file
UserSpecified,
/// User-level configuration (e.g. in `~/.config/ruff/pyproject.toml`)
UserSettings,
/// In parent or higher ancestor directory of path
Ancestor,
}
impl From<ConfigurationOrigin> for Relativity {
fn from(value: ConfigurationOrigin) -> Self {
match value {
ConfigurationOrigin::Unknown => Self::Parent,
ConfigurationOrigin::UserSpecified => Self::Cwd,
ConfigurationOrigin::UserSettings => Self::Cwd,
ConfigurationOrigin::Ancestor => Self::Parent,
}
}
}
/// Find all Python (`.py`, `.pyi`, `.pyw`, and `.ipynb` files) in a set of paths.
pub fn python_files_in_path<'a>(
paths: &[PathBuf],
pyproject_config: &'a PyprojectConfig,
transformer: &(dyn ConfigurationTransformer + Sync),
) -> Result<(Vec<Result<ResolvedFile, ignore::Error>>, Resolver<'a>)> {
// Normalize every path (e.g., convert from relative to absolute).
let mut paths: Vec<PathBuf> = paths.iter().map(fs::normalize_path).unique().collect();
// Search for `pyproject.toml` files in all parent directories.
let mut resolver = Resolver::new(pyproject_config);
let mut seen = FxHashSet::default();
// Insert the path to the root configuration to avoid parsing the configuration a second time.
if let Some(config_path) = &pyproject_config.path {
seen.insert(config_path.parent().unwrap());
}
if resolver.is_hierarchical() {
for path in &paths {
for ancestor in path.ancestors() {
if seen.insert(ancestor) {
if let Some(pyproject) = settings_toml(ancestor)? {
let (root, settings) = resolve_scoped_settings(
&pyproject,
transformer,
ConfigurationOrigin::Ancestor,
)?;
resolver.add(root, settings);
// We found the closest configuration.
break;
}
} else {
// We already visited this ancestor, we can stop here.
break;
}
}
}
}
// Check if the paths themselves are excluded.
if resolver.force_exclude() {
paths.retain(|path| !is_file_excluded(path, &resolver));
if paths.is_empty() {
return Ok((vec![], resolver));
}
}
let (first_path, rest_paths) = paths
.split_first()
.ok_or_else(|| anyhow!("Expected at least one path to search for Python files"))?;
// Create the `WalkBuilder`.
let mut builder = WalkBuilder::new(first_path);
if let Ok(cwd) = std::env::current_dir() {
builder.current_dir(cwd);
}
for path in rest_paths {
builder.add(path);
}
builder.standard_filters(resolver.respect_gitignore());
builder.hidden(false);
builder.threads(
std::thread::available_parallelism()
.map_or(1, std::num::NonZeroUsize::get)
.min(12),
);
let walker = builder.build_parallel();
// Run the `WalkParallel` to collect all Python files.
let state = WalkPythonFilesState::new(resolver);
let mut visitor = PythonFilesVisitorBuilder::new(transformer, &state);
walker.visit(&mut visitor);
state.finish()
}
type ResolvedFiles = Vec<Result<ResolvedFile, ignore::Error>>;
struct WalkPythonFilesState<'config> {
is_hierarchical: bool,
merged: std::sync::Mutex<(ResolvedFiles, Result<()>)>,
resolver: RwLock<Resolver<'config>>,
}
impl<'config> WalkPythonFilesState<'config> {
fn new(resolver: Resolver<'config>) -> Self {
Self {
is_hierarchical: resolver.is_hierarchical(),
merged: std::sync::Mutex::new((Vec::new(), Ok(()))),
resolver: RwLock::new(resolver),
}
}
fn finish(self) -> Result<(Vec<Result<ResolvedFile, ignore::Error>>, Resolver<'config>)> {
let (files, error) = self.merged.into_inner().unwrap();
error?;
let deduplicated_files = deduplicate_files(files);
Ok((deduplicated_files, self.resolver.into_inner().unwrap()))
}
}
/// Deduplicate files by path, prioritizing `Root` files over `Nested` files.
///
/// When the same path appears both as a directly specified input (`Root`)
/// and via directory traversal (`Nested`), keep the `Root` entry and drop
/// the `Nested` entry.
///
/// Dropping the root entry means that the explicitly passed path may be
/// unintentionally ignored, since it is treated as nested and can be excluded
/// despite being requested.
///
/// Concretely, with `lint.exclude = ["foo.py"]` and `ruff check . foo.py`,
/// we must keep `Root(foo.py)` and drop `Nested(foo.py)` so `foo.py` is
/// linted as the user requested.
fn deduplicate_files(mut files: ResolvedFiles) -> ResolvedFiles {
// Sort by path; for identical paths, prefer Root over Nested; place errors after files
files.sort_by(|a, b| match (a, b) {
(Ok(a_file), Ok(b_file)) => a_file.cmp(b_file),
(Ok(_), Err(_)) => Ordering::Less,
(Err(_), Ok(_)) => Ordering::Greater,
(Err(_), Err(_)) => Ordering::Equal,
});
files.dedup_by(|a, b| match (a, b) {
(Ok(a_file), Ok(b_file)) => a_file.path() == b_file.path(),
_ => false,
});
files
}
struct PythonFilesVisitorBuilder<'s, 'config> {
state: &'s WalkPythonFilesState<'config>,
transformer: &'s (dyn ConfigurationTransformer + Sync),
}
impl<'s, 'config> PythonFilesVisitorBuilder<'s, 'config> {
fn new(
transformer: &'s (dyn ConfigurationTransformer + Sync),
state: &'s WalkPythonFilesState<'config>,
) -> Self {
Self { state, transformer }
}
}
struct PythonFilesVisitor<'s, 'config> {
local_files: Vec<Result<ResolvedFile, ignore::Error>>,
local_error: Result<()>,
global: &'s WalkPythonFilesState<'config>,
transformer: &'s (dyn ConfigurationTransformer + Sync),
}
impl<'config, 's> ignore::ParallelVisitorBuilder<'s> for PythonFilesVisitorBuilder<'s, 'config>
where
'config: 's,
{
fn build(&mut self) -> Box<dyn ignore::ParallelVisitor + 's> {
Box::new(PythonFilesVisitor {
local_files: vec![],
local_error: Ok(()),
global: self.state,
transformer: self.transformer,
})
}
}
impl ParallelVisitor for PythonFilesVisitor<'_, '_> {
fn visit(&mut self, result: std::result::Result<DirEntry, Error>) -> WalkState {
// Respect our own exclusion behavior.
if let Ok(entry) = &result {
if entry.depth() > 0 {
let path = entry.path();
let resolver = self.global.resolver.read().unwrap();
let settings = resolver.resolve(path);
if let Some(file_name) = path.file_name() {
let file_path = Candidate::new(path);
let file_basename = Candidate::new(file_name);
if match_candidate_exclusion(
&file_path,
&file_basename,
&settings.file_resolver.exclude,
) {
debug!("Ignored path via `exclude`: {path:?}");
return WalkState::Skip;
} else if match_candidate_exclusion(
&file_path,
&file_basename,
&settings.file_resolver.extend_exclude,
) {
debug!("Ignored path via `extend-exclude`: {path:?}");
return WalkState::Skip;
}
} else {
debug!("Ignored path due to error in parsing: {path:?}");
return WalkState::Skip;
}
}
}
// Search for the `pyproject.toml` file in this directory, before we visit any
// of its contents.
if self.global.is_hierarchical {
if let Ok(entry) = &result {
if entry
.file_type()
.is_some_and(|file_type| file_type.is_dir())
{
match settings_toml(entry.path()) {
Ok(Some(pyproject)) => match resolve_scoped_settings(
&pyproject,
self.transformer,
ConfigurationOrigin::Ancestor,
) {
Ok((root, settings)) => {
self.global.resolver.write().unwrap().add(root, settings);
}
Err(err) => {
self.local_error = Err(err);
return WalkState::Quit;
}
},
Ok(None) => {}
Err(err) => {
self.local_error = Err(err);
return WalkState::Quit;
}
}
}
}
}
match result {
Ok(entry) => {
// Ignore directories
let resolved = if entry.file_type().is_none_or(|ft| ft.is_dir()) {
None
} else if entry.depth() == 0 {
// Accept all files that are passed-in directly.
Some(ResolvedFile::Root(entry.into_path()))
} else {
// Otherwise, check if the file is included.
let path = entry.path();
let resolver = self.global.resolver.read().unwrap();
let settings = resolver.resolve(path);
if settings.file_resolver.include.is_match(path) {
debug!("Included path via `include`: {path:?}");
Some(ResolvedFile::Nested(entry.into_path()))
} else if settings.file_resolver.extend_include.is_match(path) {
debug!("Included path via `extend-include`: {path:?}");
Some(ResolvedFile::Nested(entry.into_path()))
} else {
None
}
};
if let Some(resolved) = resolved {
self.local_files.push(Ok(resolved));
}
}
Err(err) => {
self.local_files.push(Err(err));
}
}
WalkState::Continue
}
}
impl Drop for PythonFilesVisitor<'_, '_> {
fn drop(&mut self) {
let mut merged = self.global.merged.lock().unwrap();
let (files, error) = &mut *merged;
if files.is_empty() {
*files = std::mem::take(&mut self.local_files);
} else {
files.append(&mut self.local_files);
}
let local_error = std::mem::replace(&mut self.local_error, Ok(()));
if error.is_ok() {
*error = local_error;
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd)]
pub enum ResolvedFile {
/// File explicitly passed to the CLI
Root(PathBuf),
/// File in a sub-directory
Nested(PathBuf),
}
impl ResolvedFile {
pub fn into_path(self) -> PathBuf {
match self {
ResolvedFile::Root(path) => path,
ResolvedFile::Nested(path) => path,
}
}
pub fn path(&self) -> &Path {
match self {
ResolvedFile::Root(root) => root.as_path(),
ResolvedFile::Nested(root) => root.as_path(),
}
}
pub fn file_name(&self) -> &OsStr {
let path = self.path();
path.file_name().unwrap_or(path.as_os_str())
}
pub fn is_root(&self) -> bool {
matches!(self, ResolvedFile::Root(_))
}
}
/// Return `true` if the Python file at [`Path`] is _not_ excluded.
pub fn python_file_at_path(
path: &Path,
resolver: &mut Resolver,
transformer: &dyn ConfigurationTransformer,
) -> Result<bool> {
// Normalize the path (e.g., convert from relative to absolute).
let path = fs::normalize_path(path);
// Search for `pyproject.toml` files in all parent directories.
if resolver.is_hierarchical() {
for ancestor in path.ancestors() {
if let Some(pyproject) = settings_toml(ancestor)? {
let (root, settings) =
resolve_scoped_settings(&pyproject, transformer, ConfigurationOrigin::Unknown)?;
resolver.add(root, settings);
break;
}
}
}
// Check exclusions.
Ok(!is_file_excluded(&path, resolver))
}
/// Return `true` if the given top-level [`Path`] should be excluded.
fn is_file_excluded(path: &Path, resolver: &Resolver) -> bool {
// TODO(charlie): Respect gitignore.
for path in path.ancestors() {
let settings = resolver.resolve(path);
if let Some(file_name) = path.file_name() {
let file_path = Candidate::new(path);
let file_basename = Candidate::new(file_name);
if match_candidate_exclusion(
&file_path,
&file_basename,
&settings.file_resolver.exclude,
) {
debug!("Ignored path via `exclude`: {path:?}");
return true;
} else if match_candidate_exclusion(
&file_path,
&file_basename,
&settings.file_resolver.extend_exclude,
) {
debug!("Ignored path via `extend-exclude`: {path:?}");
return true;
}
} else {
break;
}
if path == settings.file_resolver.project_root {
// Bail out; we'd end up past the project root on the next iteration
// (excludes etc. are thus "rooted" to the project).
break;
}
}
false
}
/// Return `true` if the given file should be ignored based on the exclusion
/// criteria.
#[inline]
pub fn match_exclusion<P: AsRef<Path>, R: AsRef<Path>>(
file_path: P,
file_basename: R,
exclusion: &GlobSet,
) -> bool {
match_candidate_exclusion(
&Candidate::new(file_path.as_ref()),
&Candidate::new(file_basename.as_ref()),
exclusion,
)
}
/// Return `true` if the given candidates should be ignored based on the exclusion
/// criteria.
pub fn match_candidate_exclusion(
file_path: &Candidate,
file_basename: &Candidate,
exclusion: &GlobSet,
) -> bool {
if exclusion.is_empty() {
return false;
}
exclusion.is_match_candidate(file_path) || exclusion.is_match_candidate(file_basename)
}
#[derive(Debug, Copy, Clone)]
pub enum ExclusionKind {
/// The exclusion came from the `exclude` setting.
Exclude,
/// The exclusion came from the `extend-exclude` setting.
ExtendExclude,
/// The exclusion came from the `lint.exclude` setting.
LintExclude,
/// The exclusion came from the `lint.extend-exclude` setting.
FormatExclude,
}
impl std::fmt::Display for ExclusionKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ExclusionKind::Exclude => write!(f, "exclude"),
ExclusionKind::ExtendExclude => write!(f, "extend-exclude"),
ExclusionKind::LintExclude => write!(f, "lint.exclude"),
ExclusionKind::FormatExclude => write!(f, "lint.extend-exclude"),
}
}
}
/// Return the [`ExclusionKind`] for a given [`Path`], if the path or any of its ancestors match
/// any of the exclusion criteria.
pub fn match_any_exclusion(
path: &Path,
resolver_settings: &FileResolverSettings,
lint_exclude: Option<&GlobSet>,
format_exclude: Option<&GlobSet>,
) -> Option<ExclusionKind> {
for path in path.ancestors() {
if let Some(basename) = path.file_name() {
let path = Candidate::new(path);
let basename = Candidate::new(basename);
if match_candidate_exclusion(&path, &basename, &resolver_settings.exclude) {
return Some(ExclusionKind::Exclude);
}
if match_candidate_exclusion(&path, &basename, &resolver_settings.extend_exclude) {
return Some(ExclusionKind::ExtendExclude);
}
if let Some(lint_exclude) = lint_exclude {
if match_candidate_exclusion(&path, &basename, lint_exclude) {
return Some(ExclusionKind::LintExclude);
}
}
if let Some(format_exclude) = format_exclude {
if match_candidate_exclusion(&path, &basename, format_exclude) {
return Some(ExclusionKind::FormatExclude);
}
}
}
if path == resolver_settings.project_root {
// Bail out; we'd end up past the project root on the next iteration
// (excludes etc. are thus "rooted" to the project).
break;
}
}
None
}
#[derive(Debug, Copy, Clone)]
pub enum InclusionKind {
/// The inclusion came from the `include` setting.
Include,
/// The inclusion came from the `extend-include` setting.
ExtendInclude,
}
impl std::fmt::Display for InclusionKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
InclusionKind::Include => write!(f, "include"),
InclusionKind::ExtendInclude => write!(f, "extend-include"),
}
}
}
/// Return the [`InclusionKind`] for a given [`Path`], if the path match any of the inclusion
/// criteria.
pub fn match_any_inclusion(
path: &Path,
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/shared_traits.rs | crates/ruff_formatter/shared_traits.rs | /// Used to get an object that knows how to format this object.
pub trait AsFormat<Context> {
type Format<'a>: ruff_formatter::Format<Context>
where
Self: 'a;
/// Returns an object that is able to format this object.
fn format(&self) -> Self::Format<'_>;
}
/// Implement [`AsFormat`] for references to types that implement [`AsFormat`].
impl<T, C> AsFormat<C> for &T
where
T: AsFormat<C>,
{
type Format<'a> = T::Format<'a> where Self: 'a;
fn format(&self) -> Self::Format<'_> {
AsFormat::format(&**self)
}
}
/// Used to convert this object into an object that can be formatted.
///
/// The difference to [`AsFormat`] is that this trait takes ownership of `self`.
pub trait IntoFormat<Context> {
type Format: ruff_formatter::Format<Context>;
fn into_format(self) -> Self::Format;
}
/// Implement [`IntoFormat`] for [`Option`] when `T` implements [`IntoFormat`]
///
/// Allows to call format on optional AST fields without having to unwrap the
/// field first.
impl<T, Context> IntoFormat<Context> for Option<T>
where
T: IntoFormat<Context>,
{
type Format = Option<T::Format>;
fn into_format(self) -> Self::Format {
self.map(IntoFormat::into_format)
}
}
/// Implement [`IntoFormat`] for references to types that implement [`AsFormat`].
impl<'a, T, C> IntoFormat<C> for &'a T
where
T: AsFormat<C>,
{
type Format = T::Format<'a>;
fn into_format(self) -> Self::Format {
AsFormat::format(self)
}
}
/// Formatting specific [`Iterator`] extensions
pub trait FormattedIterExt {
/// Converts every item to an object that knows how to format it.
fn formatted<Context>(self) -> FormattedIter<Self, Self::Item, Context>
where
Self: Iterator + Sized,
Self::Item: IntoFormat<Context>,
{
FormattedIter {
inner: self,
options: std::marker::PhantomData,
}
}
}
impl<I> FormattedIterExt for I where I: std::iter::Iterator {}
pub struct FormattedIter<Iter, Item, Context>
where
Iter: Iterator<Item = Item>,
{
inner: Iter,
options: std::marker::PhantomData<Context>,
}
impl<Iter, Item, Context> std::iter::Iterator for FormattedIter<Iter, Item, Context>
where
Iter: Iterator<Item = Item>,
Item: IntoFormat<Context>,
{
type Item = Item::Format;
fn next(&mut self) -> Option<Self::Item> {
Some(self.inner.next()?.into_format())
}
}
impl<Iter, Item, Context> std::iter::FusedIterator for FormattedIter<Iter, Item, Context>
where
Iter: std::iter::FusedIterator<Item = Item>,
Item: IntoFormat<Context>,
{
}
impl<Iter, Item, Context> std::iter::ExactSizeIterator for FormattedIter<Iter, Item, Context>
where
Iter: Iterator<Item = Item> + std::iter::ExactSizeIterator,
Item: IntoFormat<Context>,
{
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/diagnostics.rs | crates/ruff_formatter/src/diagnostics.rs | use crate::GroupId;
use crate::prelude::TagKind;
use ruff_text_size::TextRange;
use std::error::Error;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
/// Series of errors encountered during formatting
pub enum FormatError {
/// In case a node can't be formatted because it either misses a require child element or
/// a child is present that should not (e.g. a trailing comma after a rest element).
SyntaxError { message: &'static str },
/// In case range formatting failed because the provided range was larger
/// than the formatted syntax tree
RangeError { input: TextRange, tree: TextRange },
/// In case printing the document failed because it has an invalid structure.
InvalidDocument(InvalidDocumentError),
/// Formatting failed because some content encountered a situation where a layout
/// choice by an enclosing [`crate::Format`] resulted in a poor layout for a child [`crate::Format`].
///
/// It's up to an enclosing [`crate::Format`] to handle the error and pick another layout.
/// This error should not be raised if there's no outer [`crate::Format`] handling the poor layout error,
/// avoiding that formatting of the whole document fails.
PoorLayout,
}
impl std::fmt::Display for FormatError {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
FormatError::SyntaxError { message } => {
std::write!(fmt, "syntax error: {message}")
}
FormatError::RangeError { input, tree } => std::write!(
fmt,
"formatting range {input:?} is larger than syntax tree {tree:?}"
),
FormatError::InvalidDocument(error) => std::write!(
fmt,
"Invalid document: {error}\n\n This is an internal Ruff error. Please report if necessary."
),
FormatError::PoorLayout => {
std::write!(
fmt,
"Poor layout: The formatter wasn't able to pick a good layout for your document. This is an internal Ruff error. Please report if necessary."
)
}
}
}
}
impl Error for FormatError {}
impl From<PrintError> for FormatError {
fn from(error: PrintError) -> Self {
FormatError::from(&error)
}
}
impl From<&PrintError> for FormatError {
fn from(error: &PrintError) -> Self {
match error {
PrintError::InvalidDocument(reason) => FormatError::InvalidDocument(*reason),
}
}
}
impl FormatError {
pub fn syntax_error(message: &'static str) -> Self {
Self::SyntaxError { message }
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum InvalidDocumentError {
/// Mismatching start/end kinds
///
/// ```plain
/// StartIndent
/// ...
/// EndGroup
/// ```
StartEndTagMismatch {
start_kind: TagKind,
end_kind: TagKind,
},
/// End tag without a corresponding start tag.
///
/// ```plain
/// Text
/// EndGroup
/// ```
StartTagMissing {
kind: TagKind,
},
/// Expected a specific start tag but instead is:
/// - at the end of the document
/// - at another start tag
/// - at an end tag
ExpectedStart {
expected_start: TagKind,
actual: ActualStart,
},
UnknownGroupId {
group_id: GroupId,
},
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum ActualStart {
/// The actual element is not a tag.
Content,
/// The actual element was a start tag of another kind.
Start(TagKind),
/// The actual element is an end tag instead of a start tag.
End(TagKind),
/// Reached the end of the document
EndOfDocument,
}
impl std::fmt::Display for InvalidDocumentError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
InvalidDocumentError::StartEndTagMismatch {
start_kind,
end_kind,
} => {
std::write!(
f,
"Expected end tag of kind {start_kind:?} but found {end_kind:?}."
)
}
InvalidDocumentError::StartTagMissing { kind } => {
std::write!(f, "End tag of kind {kind:?} without matching start tag.")
}
InvalidDocumentError::ExpectedStart {
expected_start,
actual,
} => match actual {
ActualStart::EndOfDocument => {
std::write!(
f,
"Expected start tag of kind {expected_start:?} but at the end of document."
)
}
ActualStart::Start(start) => {
std::write!(
f,
"Expected start tag of kind {expected_start:?} but found start tag of kind {start:?}."
)
}
ActualStart::End(end) => {
std::write!(
f,
"Expected start tag of kind {expected_start:?} but found end tag of kind {end:?}."
)
}
ActualStart::Content => {
std::write!(
f,
"Expected start tag of kind {expected_start:?} but found non-tag element."
)
}
},
InvalidDocumentError::UnknownGroupId { group_id } => {
std::write!(
f,
"Encountered unknown group id {group_id:?}. Ensure that the group with the id {group_id:?} exists and that the group is a parent of or comes before the element referring to it."
)
}
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum PrintError {
InvalidDocument(InvalidDocumentError),
}
impl Error for PrintError {}
impl std::fmt::Display for PrintError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
PrintError::InvalidDocument(inner) => {
std::write!(f, "Invalid document: {inner}")
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/prelude.rs | crates/ruff_formatter/src/prelude.rs | pub use crate::builders::*;
pub use crate::format_element::document::Document;
pub use crate::format_element::tag::{LabelId, Tag, TagKind};
pub use crate::format_element::*;
pub use crate::format_extensions::{MemoizeFormat, Memoized};
pub use crate::formatter::Formatter;
pub use crate::printer::PrinterOptions;
pub use crate::{
Buffer as _, BufferExtensions, Format, Format as _, FormatResult, FormatRule,
FormatWithRule as _, SimpleFormatContext, best_fitting, dbg_write, format, format_args, write,
};
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/lib.rs | crates/ruff_formatter/src/lib.rs | //! Infrastructure for code formatting
//!
//! This module defines [`FormatElement`], an IR to format code documents and provides a means to print
//! such a document to a string. Objects that know how to format themselves implement the [Format] trait.
//!
//! ## Formatting Traits
//!
//! * [`Format`]: Implemented by objects that can be formatted.
//! * [`FormatRule`]: Rule that knows how to format an object of another type. Useful in the situation where
//! it's necessary to implement [Format] on an object from another crate. This module defines the
//! [`FormatRefWithRule`] and [`FormatOwnedWithRule`] structs to pass an item with its corresponding rule.
//! * [`FormatWithRule`] implemented by objects that know how to format another type. Useful for implementing
//! some reusable formatting logic inside of this module if the type itself doesn't implement [Format]
//!
//! ## Formatting Macros
//!
//! This crate defines two macros to construct the IR. These are inspired by Rust's `fmt` macros
//! * [`format!`]: Formats a formattable object
//! * [`format_args!`]: Concatenates a sequence of Format objects.
//! * [`write!`]: Writes a sequence of formattable objects into an output buffer.
mod arguments;
mod buffer;
mod builders;
pub mod diagnostics;
pub mod format_element;
mod format_extensions;
pub mod formatter;
pub mod group_id;
pub mod macros;
pub mod prelude;
pub mod printer;
mod source_code;
use crate::formatter::Formatter;
use crate::group_id::UniqueGroupIdBuilder;
use crate::prelude::TagKind;
use std::fmt;
use std::fmt::{Debug, Display};
use std::marker::PhantomData;
use std::num::{NonZeroU8, NonZeroU16, TryFromIntError};
use crate::format_element::document::Document;
use crate::printer::{Printer, PrinterOptions};
pub use arguments::{Argument, Arguments};
pub use buffer::{
Buffer, BufferExtensions, BufferSnapshot, Inspect, RemoveSoftLinesBuffer, VecBuffer,
};
pub use builders::BestFitting;
pub use source_code::{SourceCode, SourceCodeSlice};
pub use crate::diagnostics::{ActualStart, FormatError, InvalidDocumentError, PrintError};
pub use format_element::{FormatElement, LINE_TERMINATORS, normalize_newlines};
pub use group_id::GroupId;
use ruff_macros::CacheKey;
use ruff_text_size::{TextLen, TextRange, TextSize};
#[derive(Debug, Eq, PartialEq, Clone, Copy, Hash, CacheKey)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(rename_all = "kebab-case")
)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[derive(Default)]
pub enum IndentStyle {
/// Use tabs to indent code.
#[default]
Tab,
/// Use [`IndentWidth`] spaces to indent code.
Space,
}
impl IndentStyle {
/// Returns `true` if this is an [`IndentStyle::Tab`].
pub const fn is_tab(&self) -> bool {
matches!(self, IndentStyle::Tab)
}
/// Returns `true` if this is an [`IndentStyle::Space`].
pub const fn is_space(&self) -> bool {
matches!(self, IndentStyle::Space)
}
/// Returns the string representation of the indent style.
pub const fn as_str(&self) -> &'static str {
match self {
IndentStyle::Tab => "tab",
IndentStyle::Space => "space",
}
}
}
impl std::fmt::Display for IndentStyle {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}
/// The visual width of an indentation.
///
/// Determines the visual width of a tab character (`\t`) and the number of
/// spaces per indent when using [`IndentStyle::Space`].
#[derive(Clone, Copy, Debug, Eq, PartialEq, CacheKey)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct IndentWidth(NonZeroU8);
impl IndentWidth {
/// Return the numeric value for this [`LineWidth`]
pub const fn value(&self) -> u32 {
self.0.get() as u32
}
}
impl Default for IndentWidth {
fn default() -> Self {
Self(NonZeroU8::new(2).unwrap())
}
}
impl Display for IndentWidth {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
Display::fmt(&self.0, f)
}
}
impl TryFrom<u8> for IndentWidth {
type Error = TryFromIntError;
fn try_from(value: u8) -> Result<Self, Self::Error> {
NonZeroU8::try_from(value).map(Self)
}
}
impl From<NonZeroU8> for IndentWidth {
fn from(value: NonZeroU8) -> Self {
Self(value)
}
}
/// The maximum visual width to which the formatter should try to limit a line.
#[derive(Clone, Copy, Debug, Eq, PartialEq, CacheKey)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct LineWidth(NonZeroU16);
impl LineWidth {
/// Return the numeric value for this [`LineWidth`]
pub const fn value(&self) -> u16 {
self.0.get()
}
}
impl Default for LineWidth {
fn default() -> Self {
Self(NonZeroU16::new(80).unwrap())
}
}
impl Display for LineWidth {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
Display::fmt(&self.0, f)
}
}
impl TryFrom<u16> for LineWidth {
type Error = TryFromIntError;
fn try_from(value: u16) -> Result<LineWidth, Self::Error> {
NonZeroU16::try_from(value).map(LineWidth)
}
}
impl From<LineWidth> for u16 {
fn from(value: LineWidth) -> Self {
value.0.get()
}
}
impl From<LineWidth> for u32 {
fn from(value: LineWidth) -> Self {
u32::from(value.0.get())
}
}
impl From<NonZeroU16> for LineWidth {
fn from(value: NonZeroU16) -> Self {
Self(value)
}
}
/// Context object storing data relevant when formatting an object.
pub trait FormatContext {
type Options: FormatOptions;
/// Returns the formatting options
fn options(&self) -> &Self::Options;
/// Returns the source code from the document that gets formatted.
fn source_code(&self) -> SourceCode<'_>;
}
/// Options customizing how the source code should be formatted.
pub trait FormatOptions {
/// The indent style.
fn indent_style(&self) -> IndentStyle;
/// The visual width of an indent
fn indent_width(&self) -> IndentWidth;
/// What's the max width of a line. Defaults to 80.
fn line_width(&self) -> LineWidth;
/// Derives the print options from these format options
fn as_print_options(&self) -> PrinterOptions;
}
#[derive(Debug, Default, Eq, PartialEq)]
pub struct SimpleFormatContext {
options: SimpleFormatOptions,
source_code: String,
}
impl SimpleFormatContext {
pub fn new(options: SimpleFormatOptions) -> Self {
Self {
options,
source_code: String::new(),
}
}
#[must_use]
pub fn with_source_code(mut self, code: &str) -> Self {
self.source_code = String::from(code);
self
}
}
impl FormatContext for SimpleFormatContext {
type Options = SimpleFormatOptions;
fn options(&self) -> &Self::Options {
&self.options
}
fn source_code(&self) -> SourceCode<'_> {
SourceCode::new(&self.source_code)
}
}
#[derive(Debug, Default, Eq, PartialEq, Clone)]
pub struct SimpleFormatOptions {
pub indent_style: IndentStyle,
pub indent_width: IndentWidth,
pub line_width: LineWidth,
}
impl FormatOptions for SimpleFormatOptions {
fn indent_style(&self) -> IndentStyle {
self.indent_style
}
fn indent_width(&self) -> IndentWidth {
self.indent_width
}
fn line_width(&self) -> LineWidth {
self.line_width
}
fn as_print_options(&self) -> PrinterOptions {
PrinterOptions {
line_width: self.line_width,
indent_style: self.indent_style,
indent_width: self.indent_width,
..PrinterOptions::default()
}
}
}
/// Lightweight sourcemap marker between source and output tokens
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SourceMarker {
/// Position of the marker in the original source
pub source: TextSize,
/// Position of the marker in the output code
pub dest: TextSize,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Formatted<Context> {
document: Document,
context: Context,
}
impl<Context> Formatted<Context> {
pub fn new(document: Document, context: Context) -> Self {
Self { document, context }
}
/// Returns the context used during formatting.
pub fn context(&self) -> &Context {
&self.context
}
/// Returns the formatted document.
pub fn document(&self) -> &Document {
&self.document
}
/// Consumes `self` and returns the formatted document.
pub fn into_document(self) -> Document {
self.document
}
}
impl<Context> Formatted<Context>
where
Context: FormatContext,
{
pub fn print(&self) -> PrintResult<Printed> {
let printer = self.create_printer();
printer.print(&self.document)
}
pub fn print_with_indent(&self, indent: u16) -> PrintResult<Printed> {
let printer = self.create_printer();
printer.print_with_indent(&self.document, indent)
}
fn create_printer(&self) -> Printer<'_> {
let source_code = self.context.source_code();
let print_options = self.context.options().as_print_options();
Printer::new(source_code, print_options)
}
}
impl<Context> Display for Formatted<Context>
where
Context: FormatContext,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(&self.document.display(self.context.source_code()), f)
}
}
pub type PrintResult<T> = Result<T, PrintError>;
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Printed {
code: String,
range: Option<TextRange>,
sourcemap: Vec<SourceMarker>,
verbatim_ranges: Vec<TextRange>,
}
impl Printed {
pub fn new(
code: String,
range: Option<TextRange>,
sourcemap: Vec<SourceMarker>,
verbatim_source: Vec<TextRange>,
) -> Self {
Self {
code,
range,
sourcemap,
verbatim_ranges: verbatim_source,
}
}
/// Construct an empty formatter result
pub fn new_empty() -> Self {
Self {
code: String::new(),
range: None,
sourcemap: Vec::new(),
verbatim_ranges: Vec::new(),
}
}
/// Range of the input source file covered by this formatted code,
/// or None if the entire file is covered in this instance
pub fn range(&self) -> Option<TextRange> {
self.range
}
/// Returns a list of [`SourceMarker`] mapping byte positions
/// in the output string to the input source code.
/// It's not guaranteed that the markers are sorted by source position.
pub fn sourcemap(&self) -> &[SourceMarker] {
&self.sourcemap
}
/// Returns a list of [`SourceMarker`] mapping byte positions
/// in the output string to the input source code, consuming the result
pub fn into_sourcemap(self) -> Vec<SourceMarker> {
self.sourcemap
}
/// Takes the list of [`SourceMarker`] mapping byte positions in the output string
/// to the input source code.
pub fn take_sourcemap(&mut self) -> Vec<SourceMarker> {
std::mem::take(&mut self.sourcemap)
}
/// Access the resulting code, borrowing the result
pub fn as_code(&self) -> &str {
&self.code
}
/// Access the resulting code, consuming the result
pub fn into_code(self) -> String {
self.code
}
/// The text in the formatted code that has been formatted as verbatim.
pub fn verbatim(&self) -> impl Iterator<Item = (TextRange, &str)> {
self.verbatim_ranges
.iter()
.map(|range| (*range, &self.code[*range]))
}
/// Ranges of the formatted code that have been formatted as verbatim.
pub fn verbatim_ranges(&self) -> &[TextRange] {
&self.verbatim_ranges
}
/// Takes the ranges of nodes that have been formatted as verbatim, replacing them with an empty list.
pub fn take_verbatim_ranges(&mut self) -> Vec<TextRange> {
std::mem::take(&mut self.verbatim_ranges)
}
/// Slices the formatted code to the sub-slices that covers the passed `source_range` in `source`.
///
/// The implementation uses the source map generated during formatting to find the closest range
/// in the formatted document that covers `source_range` or more. The returned slice
/// matches the `source_range` exactly (except indent, see below) if the formatter emits [`FormatElement::SourcePosition`] for
/// the range's offsets.
///
/// ## Indentation
/// The indentation before `source_range.start` is replaced with the indentation returned by the formatter
/// to fix up incorrectly intended code.
///
/// Returns the entire document if the source map is empty.
///
/// # Panics
/// If `source_range` points to offsets that are not in the bounds of `source`.
#[must_use]
pub fn slice_range(self, source_range: TextRange, source: &str) -> PrintedRange {
let mut start_marker: Option<SourceMarker> = None;
let mut end_marker: Option<SourceMarker> = None;
// Note: The printer can generate multiple source map entries for the same source position.
// For example if you have:
// * token("a + b")
// * `source_position(276)`
// * `token(")")`
// * `source_position(276)`
// * `hard_line_break`
// The printer uses the source position 276 for both the tokens `)` and the `\n` because
// there were multiple `source_position` entries in the IR with the same offset.
// This can happen if multiple nodes start or end at the same position. A common example
// for this are expressions and expression statement that always end at the same offset.
//
// Warning: Source markers are often emitted sorted by their source position but it's not guaranteed
// and depends on the emitted `IR`.
// They are only guaranteed to be sorted in increasing order by their destination position.
for marker in self.sourcemap {
// Take the closest start marker, but skip over start_markers that have the same start.
if marker.source <= source_range.start()
&& start_marker.is_none_or(|existing| existing.source < marker.source)
{
start_marker = Some(marker);
}
if marker.source >= source_range.end()
&& end_marker.is_none_or(|existing| existing.source > marker.source)
{
end_marker = Some(marker);
}
}
let (source_start, formatted_start) = start_marker
.map(|marker| (marker.source, marker.dest))
.unwrap_or_default();
let (source_end, formatted_end) = end_marker
.map_or((source.text_len(), self.code.text_len()), |marker| {
(marker.source, marker.dest)
});
let source_range = TextRange::new(source_start, source_end);
let formatted_range = TextRange::new(formatted_start, formatted_end);
// Extend both ranges to include the indentation
let source_range = extend_range_to_include_indent(source_range, source);
let formatted_range = extend_range_to_include_indent(formatted_range, &self.code);
PrintedRange {
code: self.code[formatted_range].to_string(),
source_range,
}
}
}
/// Extends `range` backwards (by reducing `range.start`) to include any directly preceding whitespace (`\t` or ` `).
///
/// # Panics
/// If `range.start` is out of `source`'s bounds.
fn extend_range_to_include_indent(range: TextRange, source: &str) -> TextRange {
let whitespace_len: TextSize = source[..usize::from(range.start())]
.chars()
.rev()
.take_while(|c| matches!(c, ' ' | '\t'))
.map(TextLen::text_len)
.sum();
TextRange::new(range.start() - whitespace_len, range.end())
}
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct PrintedRange {
code: String,
source_range: TextRange,
}
impl PrintedRange {
pub fn new(code: String, source_range: TextRange) -> Self {
Self { code, source_range }
}
pub fn empty() -> Self {
Self {
code: String::new(),
source_range: TextRange::default(),
}
}
/// The formatted code.
pub fn as_code(&self) -> &str {
&self.code
}
pub fn into_code(self) -> String {
self.code
}
/// The range the formatted code corresponds to in the source document.
pub fn source_range(&self) -> TextRange {
self.source_range
}
}
/// Public return type of the formatter
pub type FormatResult<F> = Result<F, FormatError>;
/// Formatting trait for types that can create a formatted representation. The `ruff_formatter` equivalent
/// to [`std::fmt::Display`].
///
/// ## Example
/// Implementing `Format` for a custom struct
///
/// ```
/// use ruff_formatter::{format, write, IndentStyle};
/// use ruff_formatter::prelude::*;
/// use ruff_text_size::TextSize;
///
/// struct Paragraph(String);
///
/// impl Format<SimpleFormatContext> for Paragraph {
/// fn fmt(&self, f: &mut Formatter<SimpleFormatContext>) -> FormatResult<()> {
/// write!(f, [
/// text(&self.0),
/// hard_line_break(),
/// ])
/// }
/// }
///
/// # fn main() -> FormatResult<()> {
/// let paragraph = Paragraph(String::from("test"));
/// let formatted = format!(SimpleFormatContext::default(), [paragraph])?;
///
/// assert_eq!("test\n", formatted.print()?.as_code());
/// # Ok(())
/// # }
/// ```
pub trait Format<Context> {
/// Formats the object using the given formatter.
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()>;
}
impl<T, Context> Format<Context> for &T
where
T: ?Sized + Format<Context>,
{
#[inline]
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
Format::fmt(&**self, f)
}
}
impl<T, Context> Format<Context> for &mut T
where
T: ?Sized + Format<Context>,
{
#[inline]
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
Format::fmt(&**self, f)
}
}
impl<T, Context> Format<Context> for Option<T>
where
T: Format<Context>,
{
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
match self {
Some(value) => value.fmt(f),
None => Ok(()),
}
}
}
impl<Context> Format<Context> for () {
#[inline]
fn fmt(&self, _: &mut Formatter<Context>) -> FormatResult<()> {
// Intentionally left empty
Ok(())
}
}
/// Rule that knows how to format an object of type `T`.
///
/// Implementing [Format] on the object itself is preferred over implementing [`FormatRule`] but
/// this isn't possible inside of a dependent crate for external type.
///
/// For example, the `ruff_js_formatter` crate isn't able to implement [Format] on `JsIfStatement`
/// because both the [Format] trait and `JsIfStatement` are external types (Rust's orphan rule).
///
/// That's why the `ruff_js_formatter` crate must define a new-type that implements the formatting
/// of `JsIfStatement`.
pub trait FormatRule<T, C> {
fn fmt(&self, item: &T, f: &mut Formatter<C>) -> FormatResult<()>;
}
/// Rule that supports customizing how it formats an object of type `T`.
pub trait FormatRuleWithOptions<T, C>: FormatRule<T, C> {
type Options;
/// Returns a new rule that uses the given options to format an object.
#[must_use]
fn with_options(self, options: Self::Options) -> Self;
}
/// Trait for an object that formats an object with a specified rule.
///
/// Gives access to the underlying item.
///
/// Useful in situation where a type itself doesn't implement [Format] (e.g. because of Rust's orphan rule)
/// but you want to implement some common formatting logic.
///
/// ## Examples
///
/// This can be useful if you want to format a `SyntaxNode` inside `ruff_formatter`.. `SyntaxNode` doesn't implement [Format]
/// itself but the language specific crate implements `AsFormat` and `IntoFormat` for it and the returned [Format]
/// implement [`FormatWithRule`].
///
/// ```ignore
/// use ruff_formatter::prelude::*;
/// use ruff_formatter::{format, Formatted, FormatWithRule};
/// use ruff_rowan::{Language, SyntaxNode};
/// fn format_node<L: Language, F: FormatWithRule<SimpleFormatContext, Item=SyntaxNode<L>>>(node: F) -> FormatResult<Formatted<SimpleFormatContext>> {
/// let formatted = format!(SimpleFormatContext::default(), [node]);
/// let syntax = node.item();
/// // Do something with syntax
/// formatted;
/// }
/// ```
pub trait FormatWithRule<Context>: Format<Context> {
type Item;
/// Returns the associated item
fn item(&self) -> &Self::Item;
}
/// Formats the referenced `item` with the specified rule.
#[derive(Debug, Copy, Clone)]
pub struct FormatRefWithRule<'a, T, R, C>
where
R: FormatRule<T, C>,
{
item: &'a T,
rule: R,
context: PhantomData<C>,
}
impl<'a, T, R, C> FormatRefWithRule<'a, T, R, C>
where
R: FormatRule<T, C>,
{
pub fn new(item: &'a T, rule: R) -> Self {
Self {
item,
rule,
context: PhantomData,
}
}
pub fn rule(&self) -> &R {
&self.rule
}
}
impl<T, R, O, C> FormatRefWithRule<'_, T, R, C>
where
R: FormatRuleWithOptions<T, C, Options = O>,
{
#[must_use]
pub fn with_options(mut self, options: O) -> Self {
self.rule = self.rule.with_options(options);
self
}
}
impl<T, R, C> FormatWithRule<C> for FormatRefWithRule<'_, T, R, C>
where
R: FormatRule<T, C>,
{
type Item = T;
fn item(&self) -> &Self::Item {
self.item
}
}
impl<T, R, C> Format<C> for FormatRefWithRule<'_, T, R, C>
where
R: FormatRule<T, C>,
{
#[inline]
fn fmt(&self, f: &mut Formatter<C>) -> FormatResult<()> {
self.rule.fmt(self.item, f)
}
}
/// Formats the `item` with the specified rule.
#[derive(Debug, Clone)]
pub struct FormatOwnedWithRule<T, R, C>
where
R: FormatRule<T, C>,
{
item: T,
rule: R,
context: PhantomData<C>,
}
impl<T, R, C> FormatOwnedWithRule<T, R, C>
where
R: FormatRule<T, C>,
{
pub fn new(item: T, rule: R) -> Self {
Self {
item,
rule,
context: PhantomData,
}
}
#[must_use]
pub fn with_item(mut self, item: T) -> Self {
self.item = item;
self
}
}
impl<T, R, C> Format<C> for FormatOwnedWithRule<T, R, C>
where
R: FormatRule<T, C>,
{
#[inline]
fn fmt(&self, f: &mut Formatter<C>) -> FormatResult<()> {
self.rule.fmt(&self.item, f)
}
}
impl<T, R, O, C> FormatOwnedWithRule<T, R, C>
where
R: FormatRuleWithOptions<T, C, Options = O>,
{
#[must_use]
pub fn with_options(mut self, options: O) -> Self {
self.rule = self.rule.with_options(options);
self
}
}
impl<T, R, C> FormatWithRule<C> for FormatOwnedWithRule<T, R, C>
where
R: FormatRule<T, C>,
{
type Item = T;
fn item(&self) -> &Self::Item {
&self.item
}
}
/// The `write` function takes a target buffer and an `Arguments` struct that can be precompiled with the `format_args!` macro.
///
/// The arguments will be formatted in-order into the output buffer provided.
///
/// # Examples
///
/// ```
/// use ruff_formatter::prelude::*;
/// use ruff_formatter::{VecBuffer, format_args, FormatState, write, Formatted};
///
/// # fn main() -> FormatResult<()> {
/// let mut state = FormatState::new(SimpleFormatContext::default());
/// let mut buffer = VecBuffer::new(&mut state);
///
/// write!(&mut buffer, [format_args!(token("Hello World"))])?;
///
/// let formatted = Formatted::new(Document::from(buffer.into_vec()), SimpleFormatContext::default());
///
/// assert_eq!("Hello World", formatted.print()?.as_code());
/// # Ok(())
/// # }
/// ```
///
/// Please note that using [`write!`] might be preferable. Example:
///
/// ```
/// use ruff_formatter::prelude::*;
/// use ruff_formatter::{VecBuffer, format_args, FormatState, write, Formatted};
///
/// # fn main() -> FormatResult<()> {
/// let mut state = FormatState::new(SimpleFormatContext::default());
/// let mut buffer = VecBuffer::new(&mut state);
///
/// write!(&mut buffer, [token("Hello World")])?;
///
/// let formatted = Formatted::new(Document::from(buffer.into_vec()), SimpleFormatContext::default());
///
/// assert_eq!("Hello World", formatted.print()?.as_code());
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn write<Context>(
output: &mut dyn Buffer<Context = Context>,
args: Arguments<Context>,
) -> FormatResult<()> {
let mut f = Formatter::new(output);
f.write_fmt(args)
}
/// The `format` function takes an [`Arguments`] struct and returns the resulting formatting IR.
///
/// The [`Arguments`] instance can be created with the [`format_args!`].
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use ruff_formatter::prelude::*;
/// use ruff_formatter::{format, format_args};
///
/// # fn main() -> FormatResult<()> {
/// let formatted = format!(SimpleFormatContext::default(), [&format_args!(token("test"))])?;
/// assert_eq!("test", formatted.print()?.as_code());
/// # Ok(())
/// # }
/// ```
///
/// Please note that using [`format!`] might be preferable. Example:
///
/// ```
/// use ruff_formatter::prelude::*;
/// use ruff_formatter::{format};
///
/// # fn main() -> FormatResult<()> {
/// let formatted = format!(SimpleFormatContext::default(), [token("test")])?;
/// assert_eq!("test", formatted.print()?.as_code());
/// # Ok(())
/// # }
/// ```
pub fn format<Context>(
context: Context,
arguments: Arguments<Context>,
) -> FormatResult<Formatted<Context>>
where
Context: FormatContext,
{
let source_length = context.source_code().as_str().len();
// Use a simple heuristic to guess the number of expected format elements.
// See [#6612](https://github.com/astral-sh/ruff/pull/6612) for more details on how the formula was determined. Changes to our formatter, or supporting
// more languages may require fine tuning the formula.
let estimated_buffer_size = source_length / 2;
let mut state = FormatState::new(context);
let mut buffer = VecBuffer::with_capacity(estimated_buffer_size, &mut state);
buffer.write_fmt(arguments)?;
let mut document = Document::from(buffer.into_vec());
document.propagate_expand();
Ok(Formatted::new(document, state.into_context()))
}
/// This structure stores the state that is relevant for the formatting of the whole document.
///
/// This structure is different from [`crate::Formatter`] in that the formatting infrastructure
/// creates a new [`crate::Formatter`] for every [`crate::write`!] call, whereas this structure stays alive
/// for the whole process of formatting a root with [`crate::format`!].
pub struct FormatState<Context> {
context: Context,
group_id_builder: UniqueGroupIdBuilder,
}
#[expect(clippy::missing_fields_in_debug)]
impl<Context> std::fmt::Debug for FormatState<Context>
where
Context: std::fmt::Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("FormatState")
.field("context", &self.context)
.finish()
}
}
impl<Context> FormatState<Context> {
/// Creates a new state with the given language specific context
pub fn new(context: Context) -> Self {
Self {
context,
group_id_builder: UniqueGroupIdBuilder::default(),
}
}
pub fn into_context(self) -> Context {
self.context
}
/// Returns the context specifying how to format the current CST
pub fn context(&self) -> &Context {
&self.context
}
/// Returns a mutable reference to the context
pub fn context_mut(&mut self) -> &mut Context {
&mut self.context
}
/// Creates a new group id that is unique to this document. The passed debug name is used in the
/// [`std::fmt::Debug`] of the document if this is a debug build.
/// The name is unused for production builds and has no meaning on the equality of two group ids.
pub fn group_id(&self, debug_name: &'static str) -> GroupId {
self.group_id_builder.group_id(debug_name)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/source_code.rs | crates/ruff_formatter/src/source_code.rs | use std::fmt::{Debug, Formatter};
use ruff_text_size::{Ranged, TextRange};
/// The source code of a document that gets formatted
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Default)]
pub struct SourceCode<'a> {
text: &'a str,
}
impl<'a> SourceCode<'a> {
pub fn new(text: &'a str) -> Self {
Self { text }
}
pub fn slice(self, range: TextRange) -> SourceCodeSlice {
assert!(
usize::from(range.end()) <= self.text.len(),
"Range end {:?} out of bounds {}.",
range.end(),
self.text.len()
);
assert!(
self.text.is_char_boundary(usize::from(range.start())),
"The range start position {:?} is not a char boundary.",
range.start()
);
assert!(
self.text.is_char_boundary(usize::from(range.end())),
"The range end position {:?} is not a char boundary.",
range.end()
);
SourceCodeSlice {
range,
#[cfg(debug_assertions)]
text: String::from(&self.text[range]).into_boxed_str(),
}
}
pub fn as_str(&self) -> &'a str {
self.text
}
}
impl Debug for SourceCode<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("SourceCode").field(&self.text).finish()
}
}
/// A slice into the source text of a document.
///
/// It only stores the range in production builds for a more compact representation, but it
/// keeps the original text in debug builds for better developer experience.
#[derive(Clone, Eq, PartialEq)]
pub struct SourceCodeSlice {
range: TextRange,
#[cfg(debug_assertions)]
text: Box<str>,
}
impl SourceCodeSlice {
/// Returns the slice's text.
pub fn text<'a>(&self, code: SourceCode<'a>) -> &'a str {
assert!(
usize::from(self.range.end()) <= code.text.len(),
"The range of this slice is out of bounds. Did you provide the correct source code for this slice?"
);
&code.text[self.range]
}
}
impl Ranged for SourceCodeSlice {
fn range(&self) -> TextRange {
self.range
}
}
impl Debug for SourceCodeSlice {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let mut tuple = f.debug_tuple("SourceCodeSlice");
#[cfg(debug_assertions)]
tuple.field(&self.text);
tuple.field(&self.range).finish()
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/format_extensions.rs | crates/ruff_formatter/src/format_extensions.rs | use crate::prelude::*;
use std::cell::OnceCell;
use std::marker::PhantomData;
use crate::Buffer;
/// Utility trait that allows memorizing the output of a [`Format`].
/// Useful to avoid re-formatting the same object twice.
pub trait MemoizeFormat<Context> {
/// Returns a formattable object that memoizes the result of `Format` by cloning.
/// Mainly useful if the same sub-tree can appear twice in the formatted output because it's
/// used inside of `if_group_breaks` or `if_group_fits_single_line`.
///
/// ```
/// use std::cell::Cell;
/// use ruff_formatter::{format, write};
/// use ruff_formatter::prelude::*;
/// use ruff_text_size::{Ranged, TextSize};
///
/// struct MyFormat {
/// value: Cell<u64>
/// }
///
/// impl MyFormat {
/// pub fn new() -> Self {
/// Self { value: Cell::new(1) }
/// }
/// }
///
/// impl Format<SimpleFormatContext> for MyFormat {
/// fn fmt(&self, f: &mut Formatter<SimpleFormatContext>) -> FormatResult<()> {
/// let value = self.value.get();
/// self.value.set(value + 1);
///
/// write!(f, [text(&std::format!("Formatted {value} times."))])
/// }
/// }
///
/// # fn main() -> FormatResult<()> {
/// let normal = MyFormat::new();
///
/// // Calls `format` every time the object gets formatted
/// assert_eq!(
/// "Formatted 1 times. Formatted 2 times.",
/// format!(SimpleFormatContext::default(), [normal, space(), normal])?.print()?.as_code()
/// );
///
/// // Memoized memoizes the result and calls `format` only once.
/// let memoized = normal.memoized();
/// assert_eq!(
/// "Formatted 3 times. Formatted 3 times.",
/// format![SimpleFormatContext::default(), [memoized, space(), memoized]]?.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
fn memoized(self) -> Memoized<Self, Context>
where
Self: Sized + Format<Context>,
{
Memoized::new(self)
}
}
impl<T, Context> MemoizeFormat<Context> for T where T: Format<Context> {}
/// Memoizes the output of its inner [`Format`] to avoid re-formatting a potential expensive object.
#[derive(Debug)]
pub struct Memoized<F, Context> {
inner: F,
memory: OnceCell<FormatResult<Option<FormatElement>>>,
options: PhantomData<Context>,
}
impl<F, Context> Memoized<F, Context>
where
F: Format<Context>,
{
fn new(inner: F) -> Self {
Self {
inner,
memory: OnceCell::new(),
options: PhantomData,
}
}
/// Gives access to the memoized content.
///
/// Performs the formatting if the content hasn't been formatted at this point.
///
/// # Example
///
/// Inspect if some memoized content breaks.
///
/// ```rust
/// use std::cell::Cell;
/// use ruff_formatter::{format, write};
/// use ruff_formatter::prelude::*;
/// use ruff_text_size::{Ranged, TextSize};
///
/// #[derive(Default)]
/// struct Counter {
/// value: Cell<u64>
/// }
///
/// impl Format<SimpleFormatContext> for Counter {
/// fn fmt(&self, f: &mut Formatter<SimpleFormatContext>) -> FormatResult<()> {
/// let current = self.value.get();
///
/// write!(f, [
/// token("Count:"),
/// space(),
/// text(&std::format!("{current}")),
/// hard_line_break()
/// ])?;
///
/// self.value.set(current + 1);
/// Ok(())
/// }
/// }
///
/// # fn main() -> FormatResult<()> {
/// let content = format_with(|f| {
/// let mut counter = Counter::default().memoized();
/// let counter_content = counter.inspect(f)?;
///
/// if counter_content.will_break() {
/// write!(f, [token("Counter:"), block_indent(&counter)])
/// } else {
/// write!(f, [token("Counter:"), counter])
/// }?;
///
/// write!(f, [counter])
/// });
///
///
/// let formatted = format!(SimpleFormatContext::default(), [content])?;
/// assert_eq!("Counter:\n\tCount: 0\nCount: 0\n", formatted.print()?.as_code());
/// # Ok(())
/// # }
/// ```
pub fn inspect(&self, f: &mut Formatter<Context>) -> FormatResult<&[FormatElement]> {
let result = self.memory.get_or_init(|| f.intern(&self.inner));
match result.as_ref() {
Ok(Some(FormatElement::Interned(interned))) => Ok(&**interned),
Ok(Some(other)) => Ok(std::slice::from_ref(other)),
Ok(None) => Ok(&[]),
Err(error) => Err(*error),
}
}
}
impl<F, Context> Format<Context> for Memoized<F, Context>
where
F: Format<Context>,
{
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
let result = self.memory.get_or_init(|| f.intern(&self.inner));
match result {
Ok(Some(elements)) => {
f.write_element(elements.clone());
Ok(())
}
Ok(None) => Ok(()),
Err(err) => Err(*err),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/arguments.rs | crates/ruff_formatter/src/arguments.rs | use super::{Buffer, Format, Formatter};
use crate::FormatResult;
/// A convenience wrapper for representing a formattable argument.
pub struct Argument<'fmt, Context> {
value: &'fmt dyn Format<Context>,
}
impl<Context> Clone for Argument<'_, Context> {
fn clone(&self) -> Self {
*self
}
}
impl<Context> Copy for Argument<'_, Context> {}
impl<'fmt, Context> Argument<'fmt, Context> {
/// Called by the [ruff_formatter::format_args] macro.
#[doc(hidden)]
#[inline]
pub const fn new<F: Format<Context>>(value: &'fmt F) -> Self {
Self { value }
}
/// Formats the value stored by this argument using the given formatter.
#[inline]
// Seems to only be triggered on wasm32 and looks like a false positive?
#[allow(clippy::trivially_copy_pass_by_ref)]
pub(super) fn format(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
self.value.fmt(f)
}
}
/// Sequence of objects that should be formatted in the specified order.
///
/// The [`format_args!`] macro will safely create an instance of this structure.
///
/// You can use the `Arguments<a>` that [`format_args!`] return in `Format` context as seen below.
/// It will call the `format` function for each of its objects.
///
/// ```rust
/// use ruff_formatter::prelude::*;
/// use ruff_formatter::{format, format_args};
///
/// # fn main() -> FormatResult<()> {
/// let formatted = format!(SimpleFormatContext::default(), [
/// format_args!(token("a"), space(), token("b"))
/// ])?;
///
/// assert_eq!("a b", formatted.print()?.as_code());
/// # Ok(())
/// # }
/// ```
pub struct Arguments<'fmt, Context>(pub &'fmt [Argument<'fmt, Context>]);
impl<'fmt, Context> Arguments<'fmt, Context> {
#[doc(hidden)]
#[inline]
pub const fn new(arguments: &'fmt [Argument<'fmt, Context>]) -> Self {
Self(arguments)
}
/// Returns the arguments
#[inline]
#[allow(clippy::trivially_copy_pass_by_ref)] // Bug in Clippy? Sizeof Arguments is 16
pub(super) fn items(&self) -> &'fmt [Argument<'fmt, Context>] {
self.0
}
}
impl<Context> Copy for Arguments<'_, Context> {}
impl<Context> Clone for Arguments<'_, Context> {
fn clone(&self) -> Self {
*self
}
}
impl<Context> Format<Context> for Arguments<'_, Context> {
#[inline]
fn fmt(&self, formatter: &mut Formatter<Context>) -> FormatResult<()> {
formatter.write_fmt(*self)
}
}
impl<Context> std::fmt::Debug for Arguments<'_, Context> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("Arguments[...]")
}
}
impl<'fmt, Context> From<&'fmt Argument<'fmt, Context>> for Arguments<'fmt, Context> {
fn from(argument: &'fmt Argument<'fmt, Context>) -> Self {
Arguments::new(std::slice::from_ref(argument))
}
}
#[cfg(test)]
mod tests {
use crate::format_element::tag::Tag;
use crate::prelude::*;
use crate::{FormatState, VecBuffer, format_args, write};
#[test]
fn test_nesting() {
let mut context = FormatState::new(SimpleFormatContext::default());
let mut buffer = VecBuffer::new(&mut context);
write!(
&mut buffer,
[
token("function"),
space(),
token("a"),
space(),
group(&format_args!(token("("), token(")")))
]
)
.unwrap();
assert_eq!(
buffer.into_vec(),
vec![
FormatElement::Token { text: "function" },
FormatElement::Space,
FormatElement::Token { text: "a" },
FormatElement::Space,
// Group
FormatElement::Tag(Tag::StartGroup(tag::Group::new())),
FormatElement::Token { text: "(" },
FormatElement::Token { text: ")" },
FormatElement::Tag(Tag::EndGroup)
]
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/builders.rs | crates/ruff_formatter/src/builders.rs | use std::cell::Cell;
use std::marker::PhantomData;
use std::num::NonZeroU8;
#[allow(clippy::enum_glob_use)]
use Tag::*;
use ruff_text_size::TextRange;
use crate::format_element::tag::{Condition, Tag};
use crate::prelude::tag::{DedentMode, GroupMode, LabelId};
use crate::prelude::*;
use crate::{Argument, Arguments, FormatContext, FormatOptions, GroupId, TextSize, write};
use crate::{Buffer, VecBuffer};
/// A line break that only gets printed if the enclosing `Group` doesn't fit on a single line.
/// It's omitted if the enclosing `Group` fits on a single line.
/// A soft line break is identical to a hard line break when not enclosed inside of a `Group`.
///
/// # Examples
///
/// Soft line breaks are omitted if the enclosing `Group` fits on a single line
///
/// ```
/// use ruff_formatter::{format, format_args};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let elements = format!(SimpleFormatContext::default(), [
/// group(&format_args![token("a,"), soft_line_break(), token("b")])
/// ])?;
///
/// assert_eq!(
/// "a,b",
/// elements.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
/// See [`soft_line_break_or_space`] if you want to insert a space between the elements if the enclosing
/// `Group` fits on a single line.
///
/// Soft line breaks are emitted if the enclosing `Group` doesn't fit on a single line
/// ```
/// use ruff_formatter::{format, format_args, LineWidth, SimpleFormatOptions};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let context = SimpleFormatContext::new(SimpleFormatOptions {
/// line_width: LineWidth::try_from(10).unwrap(),
/// ..SimpleFormatOptions::default()
/// });
///
/// let elements = format!(context, [
/// group(&format_args![
/// token("a long word,"),
/// soft_line_break(),
/// token("so that the group doesn't fit on a single line"),
/// ])
/// ])?;
///
/// assert_eq!(
/// "a long word,\nso that the group doesn't fit on a single line",
/// elements.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
#[inline]
pub const fn soft_line_break() -> Line {
Line::new(LineMode::Soft)
}
/// A forced line break that are always printed. A hard line break forces any enclosing `Group`
/// to be printed over multiple lines.
///
/// # Examples
///
/// It forces a line break, even if the enclosing `Group` would otherwise fit on a single line.
/// ```
/// use ruff_formatter::{format, format_args};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let elements = format!(SimpleFormatContext::default(), [
/// group(&format_args![
/// token("a,"),
/// hard_line_break(),
/// token("b"),
/// hard_line_break()
/// ])
/// ])?;
///
/// assert_eq!(
/// "a,\nb\n",
/// elements.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
#[inline]
pub const fn hard_line_break() -> Line {
Line::new(LineMode::Hard)
}
/// A forced empty line. An empty line inserts enough line breaks in the output for
/// the previous and next element to be separated by an empty line.
///
/// # Examples
///
/// ```
/// use ruff_formatter::{format, format_args};
/// use ruff_formatter::prelude::*;
///
/// fn main() -> FormatResult<()> {
/// let elements = format!(
/// SimpleFormatContext::default(), [
/// group(&format_args![
/// token("a,"),
/// empty_line(),
/// token("b"),
/// empty_line()
/// ])
/// ])?;
///
/// assert_eq!(
/// "a,\n\nb\n\n",
/// elements.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
#[inline]
pub const fn empty_line() -> Line {
Line::new(LineMode::Empty)
}
/// A line break if the enclosing `Group` doesn't fit on a single line, a space otherwise.
///
/// # Examples
///
/// The line breaks are emitted as spaces if the enclosing `Group` fits on a single line:
/// ```
/// use ruff_formatter::{format, format_args};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let elements = format!(SimpleFormatContext::default(), [
/// group(&format_args![
/// token("a,"),
/// soft_line_break_or_space(),
/// token("b"),
/// ])
/// ])?;
///
/// assert_eq!(
/// "a, b",
/// elements.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
///
/// The printer breaks the lines if the enclosing `Group` doesn't fit on a single line:
/// ```
/// use ruff_formatter::{format_args, format, LineWidth, SimpleFormatOptions};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let context = SimpleFormatContext::new(SimpleFormatOptions {
/// line_width: LineWidth::try_from(10).unwrap(),
/// ..SimpleFormatOptions::default()
/// });
///
/// let elements = format!(context, [
/// group(&format_args![
/// token("a long word,"),
/// soft_line_break_or_space(),
/// token("so that the group doesn't fit on a single line"),
/// ])
/// ])?;
///
/// assert_eq!(
/// "a long word,\nso that the group doesn't fit on a single line",
/// elements.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
#[inline]
pub const fn soft_line_break_or_space() -> Line {
Line::new(LineMode::SoftOrSpace)
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct Line {
mode: LineMode,
}
impl Line {
const fn new(mode: LineMode) -> Self {
Self { mode }
}
}
impl<Context> Format<Context> for Line {
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
f.write_element(FormatElement::Line(self.mode));
Ok(())
}
}
impl std::fmt::Debug for Line {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("Line").field(&self.mode).finish()
}
}
/// Creates a token that gets written as is to the output. A token must be ASCII only and is not allowed
/// to contain any line breaks or tab characters.
///
/// # Examples
///
/// ```
/// use ruff_formatter::format;
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let elements = format!(SimpleFormatContext::default(), [token("Hello World")])?;
///
/// assert_eq!(
/// "Hello World",
/// elements.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
///
/// Printing a string literal as a literal requires that the string literal is properly escaped and
/// enclosed in quotes (depending on the target language).
///
/// ```
/// use ruff_formatter::format;
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// // the tab must be encoded as \\t to not literally print a tab character ("Hello{tab}World" vs "Hello\tWorld")
/// let elements = format!(SimpleFormatContext::default(), [token("\"Hello\\tWorld\"")])?;
///
/// assert_eq!(r#""Hello\tWorld""#, elements.print()?.as_code());
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn token(text: &'static str) -> Token {
debug_assert!(text.is_ascii(), "Token must be ASCII text only");
debug_assert!(
!text.contains(['\n', '\r', '\t']),
"A token should not contain any newlines or tab characters"
);
Token { text }
}
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct Token {
text: &'static str,
}
impl<Context> Format<Context> for Token {
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
f.write_element(FormatElement::Token { text: self.text });
Ok(())
}
}
impl std::fmt::Debug for Token {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::write!(f, "Token({})", self.text)
}
}
/// Creates a source map entry from the passed source `position` to the position in the formatted output.
///
/// ## Examples
///
/// ```
/// use ruff_formatter::format;
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// // the tab must be encoded as \\t to not literally print a tab character ("Hello{tab}World" vs "Hello\tWorld")
/// use ruff_text_size::TextSize;
/// use ruff_formatter::SourceMarker;
///
///
/// let elements = format!(SimpleFormatContext::default(), [
/// source_position(TextSize::new(0)),
/// token("\"Hello "),
/// source_position(TextSize::new(8)),
/// token("'Ruff'"),
/// source_position(TextSize::new(14)),
/// token("\""),
/// source_position(TextSize::new(20))
/// ])?;
///
/// let printed = elements.print()?;
///
/// assert_eq!(printed.as_code(), r#""Hello 'Ruff'""#);
/// assert_eq!(printed.sourcemap(), [
/// SourceMarker { source: TextSize::new(0), dest: TextSize::new(0) },
/// SourceMarker { source: TextSize::new(8), dest: TextSize::new(7) },
/// SourceMarker { source: TextSize::new(14), dest: TextSize::new(13) },
/// SourceMarker { source: TextSize::new(20), dest: TextSize::new(14) },
/// ]);
///
/// # Ok(())
/// # }
/// ```
pub const fn source_position(position: TextSize) -> SourcePosition {
SourcePosition(position)
}
#[derive(Eq, PartialEq, Copy, Clone, Debug)]
pub struct SourcePosition(TextSize);
impl<Context> Format<Context> for SourcePosition {
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
if let Some(FormatElement::SourcePosition(last_position)) = f.buffer.elements().last() {
if *last_position == self.0 {
return Ok(());
}
}
f.write_element(FormatElement::SourcePosition(self.0));
Ok(())
}
}
/// Creates a text from a dynamic string.
///
/// This is done by allocating a new string internally.
pub fn text(text: &str) -> Text<'_> {
debug_assert_no_newlines(text);
Text { text }
}
#[derive(Eq, PartialEq)]
pub struct Text<'a> {
text: &'a str,
}
impl<Context> Format<Context> for Text<'_>
where
Context: FormatContext,
{
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
f.write_element(FormatElement::Text {
text: self.text.to_string().into_boxed_str(),
text_width: TextWidth::from_text(self.text, f.options().indent_width()),
});
Ok(())
}
}
impl std::fmt::Debug for Text<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::write!(f, "Text({})", self.text)
}
}
/// Emits a text as it is written in the source document. Optimized to avoid allocations.
pub const fn source_text_slice(range: TextRange) -> SourceTextSliceBuilder {
SourceTextSliceBuilder { range }
}
#[derive(Eq, PartialEq, Debug)]
pub struct SourceTextSliceBuilder {
range: TextRange,
}
impl<Context> Format<Context> for SourceTextSliceBuilder
where
Context: FormatContext,
{
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
let source_code = f.context().source_code();
let slice = source_code.slice(self.range);
debug_assert_no_newlines(slice.text(source_code));
let text_width = TextWidth::from_text(
slice.text(source_code),
f.context().options().indent_width(),
);
f.write_element(FormatElement::SourceCodeSlice { slice, text_width });
Ok(())
}
}
fn debug_assert_no_newlines(text: &str) {
debug_assert!(
!text.contains('\r'),
"The content '{text}' contains an unsupported '\\r' line terminator character but text must only use line feeds '\\n' as line separator. Use '\\n' instead of '\\r' and '\\r\\n' to insert a line break in strings."
);
}
/// Pushes some content to the end of the current line.
///
/// ## Examples
///
/// ```rust
/// use ruff_formatter::format;
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let elements = format!(SimpleFormatContext::default(), [
/// token("a"),
/// line_suffix(&token("c"), 0),
/// token("b")
/// ])?;
///
/// assert_eq!("abc", elements.print()?.as_code());
/// # Ok(())
/// # }
/// ```
///
/// Provide reserved width for the line suffix to include it during measurement.
/// ```rust
/// use ruff_formatter::{format, format_args, LineWidth, SimpleFormatContext, SimpleFormatOptions};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let context = SimpleFormatContext::new(SimpleFormatOptions {
/// line_width: LineWidth::try_from(10).unwrap(),
/// ..SimpleFormatOptions::default()
/// });
///
/// let elements = format!(context, [
/// // Breaks
/// group(&format_args![
/// if_group_breaks(&token("(")),
/// soft_block_indent(&format_args![token("a"), line_suffix(&token(" // a comment"), 13)]),
/// if_group_breaks(&token(")"))
/// ]),
///
/// // Fits
/// group(&format_args![
/// if_group_breaks(&token("(")),
/// soft_block_indent(&format_args![token("a"), line_suffix(&token(" // a comment"), 0)]),
/// if_group_breaks(&token(")"))
/// ]),
/// ])?;
/// # assert_eq!("(\n\ta // a comment\n)a // a comment", elements.print()?.as_code());
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn line_suffix<Content, Context>(
inner: &Content,
reserved_width: u32,
) -> LineSuffix<'_, Context>
where
Content: Format<Context>,
{
LineSuffix {
content: Argument::new(inner),
reserved_width,
}
}
#[derive(Copy, Clone)]
pub struct LineSuffix<'a, Context> {
content: Argument<'a, Context>,
reserved_width: u32,
}
impl<Context> Format<Context> for LineSuffix<'_, Context> {
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
f.write_element(FormatElement::Tag(StartLineSuffix {
reserved_width: self.reserved_width,
}));
Arguments::from(&self.content).fmt(f)?;
f.write_element(FormatElement::Tag(EndLineSuffix));
Ok(())
}
}
impl<Context> std::fmt::Debug for LineSuffix<'_, Context> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("LineSuffix").field(&"{{content}}").finish()
}
}
/// Inserts a boundary for line suffixes that forces the printer to print all pending line suffixes.
/// Helpful if a line suffix shouldn't pass a certain point.
///
/// ## Examples
///
/// Forces the line suffix "c" to be printed before the token `d`.
/// ```
/// use ruff_formatter::format;
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let elements = format!(SimpleFormatContext::default(), [
/// token("a"),
/// line_suffix(&token("c"), 0),
/// token("b"),
/// line_suffix_boundary(),
/// token("d")
/// ])?;
///
/// assert_eq!(
/// "abc\nd",
/// elements.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
pub const fn line_suffix_boundary() -> LineSuffixBoundary {
LineSuffixBoundary
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct LineSuffixBoundary;
impl<Context> Format<Context> for LineSuffixBoundary {
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
f.write_element(FormatElement::LineSuffixBoundary);
Ok(())
}
}
/// Marks some content with a label.
///
/// This does not directly influence how this content will be printed, but some
/// parts of the formatter may inspect the [labelled element](Tag::StartLabelled)
/// using [`FormatElements::has_label`].
///
/// ## Examples
///
/// ```rust
/// use ruff_formatter::prelude::*;
/// use ruff_formatter::{format, write, LineWidth};
///
/// #[derive(Debug, Copy, Clone)]
/// enum MyLabels {
/// Main
/// }
///
/// impl tag::LabelDefinition for MyLabels {
/// fn value(&self) -> u64 {
/// *self as u64
/// }
///
/// fn name(&self) -> &'static str {
/// match self {
/// Self::Main => "Main"
/// }
/// }
/// }
///
/// # fn main() -> FormatResult<()> {
/// let formatted = format!(
/// SimpleFormatContext::default(),
/// [format_with(|f| {
/// let mut recording = f.start_recording();
/// write!(recording, [
/// labelled(
/// LabelId::of(MyLabels::Main),
/// &token("'I have a label'")
/// )
/// ])?;
///
/// let recorded = recording.stop();
///
/// let is_labelled = recorded.first().is_some_and( |element| element.has_label(LabelId::of(MyLabels::Main)));
///
/// if is_labelled {
/// write!(f, [token(" has label `Main`")])
/// } else {
/// write!(f, [token(" doesn't have label `Main`")])
/// }
/// })]
/// )?;
///
/// assert_eq!("'I have a label' has label `Main`", formatted.print()?.as_code());
/// # Ok(())
/// # }
/// ```
///
/// ## Alternatives
///
/// Use `Memoized.inspect(f)?.has_label(LabelId::of::<SomeLabelId>()` if you need to know if some content breaks that should
/// only be written later.
#[inline]
pub fn labelled<Content, Context>(
label_id: LabelId,
content: &Content,
) -> FormatLabelled<'_, Context>
where
Content: Format<Context>,
{
FormatLabelled {
label_id,
content: Argument::new(content),
}
}
#[derive(Copy, Clone)]
pub struct FormatLabelled<'a, Context> {
label_id: LabelId,
content: Argument<'a, Context>,
}
impl<Context> Format<Context> for FormatLabelled<'_, Context> {
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
f.write_element(FormatElement::Tag(StartLabelled(self.label_id)));
Arguments::from(&self.content).fmt(f)?;
f.write_element(FormatElement::Tag(EndLabelled));
Ok(())
}
}
impl<Context> std::fmt::Debug for FormatLabelled<'_, Context> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("Label")
.field(&self.label_id)
.field(&"{{content}}")
.finish()
}
}
/// Inserts a single space. Allows to separate different tokens.
///
/// # Examples
///
/// ```
/// use ruff_formatter::format;
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// // the tab must be encoded as \\t to not literally print a tab character ("Hello{tab}World" vs "Hello\tWorld")
/// let elements = format!(SimpleFormatContext::default(), [token("a"), space(), token("b")])?;
///
/// assert_eq!("a b", elements.print()?.as_code());
/// # Ok(())
/// # }
/// ```
#[inline]
pub const fn space() -> Space {
Space
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct Space;
impl<Context> Format<Context> for Space {
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
f.write_element(FormatElement::Space);
Ok(())
}
}
/// It adds a level of indentation to the given content
///
/// It doesn't add any line breaks at the edges of the content, meaning that
/// the line breaks have to be manually added.
///
/// This helper should be used only in rare cases, instead you should rely more on
/// [`block_indent`] and [`soft_block_indent`]
///
/// # Examples
///
/// ```
/// use ruff_formatter::{format, format_args};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let block = format!(SimpleFormatContext::default(), [
/// token("switch {"),
/// block_indent(&format_args![
/// token("default:"),
/// indent(&format_args![
/// // this is where we want to use a
/// hard_line_break(),
/// token("break;"),
/// ])
/// ]),
/// token("}"),
/// ])?;
///
/// assert_eq!(
/// "switch {\n\tdefault:\n\t\tbreak;\n}",
/// block.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn indent<Content, Context>(content: &Content) -> Indent<'_, Context>
where
Content: Format<Context>,
{
Indent {
content: Argument::new(content),
}
}
#[derive(Copy, Clone)]
pub struct Indent<'a, Context> {
content: Argument<'a, Context>,
}
impl<Context> Format<Context> for Indent<'_, Context> {
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
f.write_element(FormatElement::Tag(StartIndent));
Arguments::from(&self.content).fmt(f)?;
f.write_element(FormatElement::Tag(EndIndent));
Ok(())
}
}
impl<Context> std::fmt::Debug for Indent<'_, Context> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("Indent").field(&"{{content}}").finish()
}
}
/// It reduces the indentation for the given content depending on the closest [indent] or [align] parent element.
/// - [align] Undoes the spaces added by [align]
/// - [indent] Reduces the indentation level by one
///
/// This is a No-op if the indentation level is zero.
///
/// # Examples
///
/// ```
/// use ruff_formatter::{format, format_args};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let block = format!(SimpleFormatContext::default(), [
/// token("root"),
/// align(2, &format_args![
/// hard_line_break(),
/// token("aligned"),
/// dedent(&format_args![
/// hard_line_break(),
/// token("not aligned"),
/// ]),
/// dedent(&indent(&format_args![
/// hard_line_break(),
/// token("Indented, not aligned")
/// ]))
/// ]),
/// dedent(&format_args![
/// hard_line_break(),
/// token("Dedent on root level is a no-op.")
/// ])
/// ])?;
///
/// assert_eq!(
/// "root\n aligned\nnot aligned\n\tIndented, not aligned\nDedent on root level is a no-op.",
/// block.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn dedent<Content, Context>(content: &Content) -> Dedent<'_, Context>
where
Content: Format<Context>,
{
Dedent {
content: Argument::new(content),
mode: DedentMode::Level,
}
}
#[derive(Copy, Clone)]
pub struct Dedent<'a, Context> {
content: Argument<'a, Context>,
mode: DedentMode,
}
impl<Context> Format<Context> for Dedent<'_, Context> {
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
f.write_element(FormatElement::Tag(StartDedent(self.mode)));
Arguments::from(&self.content).fmt(f)?;
f.write_element(FormatElement::Tag(EndDedent));
Ok(())
}
}
impl<Context> std::fmt::Debug for Dedent<'_, Context> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("Dedent").field(&"{{content}}").finish()
}
}
/// It resets the indent document so that the content will be printed at the start of the line.
///
/// # Examples
///
/// ```
/// use ruff_formatter::{format, format_args};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let block = format!(SimpleFormatContext::default(), [
/// token("root"),
/// indent(&format_args![
/// hard_line_break(),
/// token("indent level 1"),
/// indent(&format_args![
/// hard_line_break(),
/// token("indent level 2"),
/// align(2, &format_args![
/// hard_line_break(),
/// token("two space align"),
/// dedent_to_root(&format_args![
/// hard_line_break(),
/// token("starts at the beginning of the line")
/// ]),
/// ]),
/// hard_line_break(),
/// token("end indent level 2"),
/// ])
/// ]),
/// ])?;
///
/// assert_eq!(
/// "root\n\tindent level 1\n\t\tindent level 2\n\t\t two space align\nstarts at the beginning of the line\n\t\tend indent level 2",
/// block.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
///
/// ## Prettier
///
/// This resembles the behaviour of Prettier's `align(Number.NEGATIVE_INFINITY, content)` IR element.
#[inline]
pub fn dedent_to_root<Content, Context>(content: &Content) -> Dedent<'_, Context>
where
Content: Format<Context>,
{
Dedent {
content: Argument::new(content),
mode: DedentMode::Root,
}
}
/// Aligns its content by indenting the content by `count` spaces.
///
/// [align] is a variant of `[indent]` that indents its content by a specified number of spaces rather than
/// using the configured indent character (tab or a specified number of spaces).
///
/// You should use [align] when you want to indent a content by a specific number of spaces.
/// Using [indent] is preferred in all other situations as it respects the users preferred indent character.
///
/// # Examples
///
/// ## Tab indentation
///
/// ```
/// use std::num::NonZeroU8;
/// use ruff_formatter::{format, format_args};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let block = format!(SimpleFormatContext::default(), [
/// token("a"),
/// hard_line_break(),
/// token("?"),
/// space(),
/// align(2, &format_args![
/// token("function () {"),
/// hard_line_break(),
/// token("}"),
/// ]),
/// hard_line_break(),
/// token(":"),
/// space(),
/// align(2, &format_args![
/// token("function () {"),
/// block_indent(&token("console.log('test');")),
/// token("}"),
/// ]),
/// token(";")
/// ])?;
///
/// assert_eq!(
/// "a\n? function () {\n }\n: function () {\n\t\tconsole.log('test');\n };",
/// block.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
///
/// You can see that:
///
/// - the printer indents the function's `}` by two spaces because it is inside of an `align`.
/// - the block `console.log` gets indented by two tabs.
/// This is because `align` increases the indentation level by one (same as `indent`)
/// if you nest an `indent` inside an `align`.
/// Meaning that, `align > ... > indent` results in the same indentation as `indent > ... > indent`.
///
/// ## Spaces indentation
///
/// ```
/// use std::num::NonZeroU8;
/// use ruff_formatter::{format, format_args, IndentStyle, SimpleFormatOptions};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// use ruff_formatter::IndentWidth;
/// let context = SimpleFormatContext::new(SimpleFormatOptions {
/// indent_style: IndentStyle::Space,
/// indent_width: IndentWidth::try_from(4).unwrap(),
/// ..SimpleFormatOptions::default()
/// });
///
/// let block = format!(context, [
/// token("a"),
/// hard_line_break(),
/// token("?"),
/// space(),
/// align(2, &format_args![
/// token("function () {"),
/// hard_line_break(),
/// token("}"),
/// ]),
/// hard_line_break(),
/// token(":"),
/// space(),
/// align(2, &format_args![
/// token("function () {"),
/// block_indent(&token("console.log('test');")),
/// token("}"),
/// ]),
/// token(";")
/// ])?;
///
/// assert_eq!(
/// "a\n? function () {\n }\n: function () {\n console.log('test');\n };",
/// block.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
///
/// The printing of `align` differs if using spaces as indentation sequence *and* it contains an `indent`.
/// You can see the difference when comparing the indentation of the `console.log(...)` expression to the previous example:
///
/// - tab indentation: Printer indents the expression with two tabs because the `align` increases the indentation level.
/// - space indentation: Printer indents the expression by 4 spaces (one indentation level) **and** 2 spaces for the align.
pub fn align<Content, Context>(count: u8, content: &Content) -> Align<'_, Context>
where
Content: Format<Context>,
{
Align {
count: NonZeroU8::new(count).expect("Alignment count must be a non-zero number."),
content: Argument::new(content),
}
}
#[derive(Copy, Clone)]
pub struct Align<'a, Context> {
count: NonZeroU8,
content: Argument<'a, Context>,
}
impl<Context> Format<Context> for Align<'_, Context> {
fn fmt(&self, f: &mut Formatter<Context>) -> FormatResult<()> {
f.write_element(FormatElement::Tag(StartAlign(tag::Align(self.count))));
Arguments::from(&self.content).fmt(f)?;
f.write_element(FormatElement::Tag(EndAlign));
Ok(())
}
}
impl<Context> std::fmt::Debug for Align<'_, Context> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Align")
.field("count", &self.count)
.field("content", &"{{content}}")
.finish()
}
}
/// Inserts a hard line break before and after the content and increases the indentation level for the content by one.
///
/// Block indents indent a block of code, such as in a function body, and therefore insert a line
/// break before and after the content.
///
/// Doesn't create an indentation if the passed in content is empty.
///
/// # Examples
///
/// ```
/// use ruff_formatter::{format, format_args};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let block = format![
/// SimpleFormatContext::default(),
/// [
/// token("{"),
/// block_indent(&format_args![
/// token("let a = 10;"),
/// hard_line_break(),
/// token("let c = a + 5;"),
/// ]),
/// token("}"),
/// ]
/// ]?;
///
/// assert_eq!(
/// "{\n\tlet a = 10;\n\tlet c = a + 5;\n}",
/// block.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn block_indent<Context>(content: &impl Format<Context>) -> BlockIndent<'_, Context> {
BlockIndent {
content: Argument::new(content),
mode: IndentMode::Block,
}
}
/// Indents the content by inserting a line break before and after the content and increasing
/// the indentation level for the content by one if the enclosing group doesn't fit on a single line.
/// Doesn't change the formatting if the enclosing group fits on a single line.
///
/// # Examples
///
/// Indents the content by one level and puts in new lines if the enclosing `Group` doesn't fit on a single line
///
/// ```
/// use ruff_formatter::{format, format_args, LineWidth, SimpleFormatOptions};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let context = SimpleFormatContext::new(SimpleFormatOptions {
/// line_width: LineWidth::try_from(10).unwrap(),
/// ..SimpleFormatOptions::default()
/// });
///
/// let elements = format!(context, [
/// group(&format_args![
/// token("["),
/// soft_block_indent(&format_args![
/// token("'First string',"),
/// soft_line_break_or_space(),
/// token("'second string',"),
/// ]),
/// token("]"),
/// ])
/// ])?;
///
/// assert_eq!(
/// "[\n\t'First string',\n\t'second string',\n]",
/// elements.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
///
/// Doesn't change the formatting if the enclosing `Group` fits on a single line
/// ```
/// use ruff_formatter::{format, format_args};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let elements = format!(SimpleFormatContext::default(), [
/// group(&format_args![
/// token("["),
/// soft_block_indent(&format_args![
/// token("5,"),
/// soft_line_break_or_space(),
/// token("10"),
/// ]),
/// token("]"),
/// ])
/// ])?;
///
/// assert_eq!(
/// "[5, 10]",
/// elements.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn soft_block_indent<Context>(content: &impl Format<Context>) -> BlockIndent<'_, Context> {
BlockIndent {
content: Argument::new(content),
mode: IndentMode::Soft,
}
}
/// If the enclosing `Group` doesn't fit on a single line, inserts a line break and indent.
/// Otherwise, just inserts a space.
///
/// Line indents are used to break a single line of code, and therefore only insert a line
/// break before the content and not after the content.
///
/// # Examples
///
/// Indents the content by one level and puts in new lines if the enclosing `Group` doesn't
/// fit on a single line. Otherwise, just inserts a space.
///
/// ```
/// use ruff_formatter::{format, format_args, LineWidth, SimpleFormatOptions};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let context = SimpleFormatContext::new(SimpleFormatOptions {
/// line_width: LineWidth::try_from(10).unwrap(),
/// ..SimpleFormatOptions::default()
/// });
///
/// let elements = format!(context, [
/// group(&format_args![
/// token("name"),
/// space(),
/// token("="),
/// soft_line_indent_or_space(&format_args![
/// token("firstName"),
/// space(),
/// token("+"),
/// space(),
/// token("lastName"),
/// ]),
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/macros.rs | crates/ruff_formatter/src/macros.rs | /// Constructs the parameters for other formatting macros.
///
/// This macro functions by taking a list of objects implementing [`crate::Format`]. It will canonicalize the
/// arguments into a single type.
///
/// This macro produces a value of type [`crate::Arguments`]. This value can be passed to
/// the macros within [crate]. All other formatting macros ([`format!`](crate::format!),
/// [`write!`](crate::write!)) are proxied through this one. This macro avoids heap allocations.
///
/// You can use the [`Arguments`] value that `format_args!` returns in `Format` contexts
/// as seen below.
///
/// ```rust
/// use ruff_formatter::{SimpleFormatContext, format, format_args};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let formatted = format!(SimpleFormatContext::default(), [
/// format_args!(token("Hello World"))
/// ])?;
///
/// assert_eq!("Hello World", formatted.print()?.as_code());
/// # Ok(())
/// # }
/// ```
///
/// [`Format`]: crate::Format
/// [`Arguments`]: crate::Arguments
#[macro_export]
macro_rules! format_args {
($($value:expr),+ $(,)?) => {
$crate::Arguments::new(&[
$(
$crate::Argument::new(&$value)
),+
])
}
}
/// Writes formatted data into a buffer.
///
/// This macro accepts a 'buffer' and a list of format arguments. Each argument will be formatted
/// and the result will be passed to the buffer. The writer may be any value with a `write_fmt` method;
/// generally this comes from an implementation of the [`crate::Buffer`] trait.
///
/// # Examples
///
/// ```rust
/// use ruff_formatter::prelude::*;
/// use ruff_formatter::{Buffer, FormatState, SimpleFormatContext, VecBuffer, write};
///
/// # fn main() -> FormatResult<()> {
/// let mut state = FormatState::new(SimpleFormatContext::default());
/// let mut buffer = VecBuffer::new(&mut state);
/// write!(&mut buffer, [token("Hello"), space()])?;
/// write!(&mut buffer, [token("World")])?;
///
/// assert_eq!(
/// buffer.into_vec(),
/// vec![
/// FormatElement::Token { text: "Hello" },
/// FormatElement::Space,
/// FormatElement::Token { text: "World" },
/// ]
/// );
/// # Ok(())
/// # }
/// ```
#[macro_export]
macro_rules! write {
($dst:expr, [$($arg:expr),+ $(,)?]) => {{
let result = $dst.write_fmt($crate::format_args!($($arg),+));
result
}}
}
/// Writes formatted data into the given buffer and prints all written elements for a quick and dirty debugging.
///
/// An example:
///
/// ```rust
/// use ruff_formatter::prelude::*;
/// use ruff_formatter::{FormatState, VecBuffer};
///
/// # fn main() -> FormatResult<()> {
/// let mut state = FormatState::new(SimpleFormatContext::default());
/// let mut buffer = VecBuffer::new(&mut state);
///
/// dbg_write!(buffer, [token("Hello")])?;
/// // ^-- prints: [src/main.rs:7][0] = StaticToken("Hello")
///
/// assert_eq!(buffer.into_vec(), vec![FormatElement::Token { text: "Hello" }]);
/// # Ok(())
/// # }
/// ```
///
/// Note that the macro is intended as debugging tool and therefore you should avoid having
/// uses of it in version control for long periods (other than in tests and similar). Format output
/// from production code is better done with `[write!]`
#[macro_export]
macro_rules! dbg_write {
($dst:expr, [$($arg:expr),+ $(,)?]) => {{
use $crate::BufferExtensions;
let mut count = 0;
let mut inspect = $dst.inspect(|element: &FormatElement| {
std::eprintln!(
"[{}:{}][{}] = {element:#?}",
std::file!(), std::line!(), count
);
count += 1;
});
let result = inspect.write_fmt($crate::format_args!($($arg),+));
result
}}
}
/// Creates the Format IR for a value.
///
/// The first argument `format!` receives is the [`crate::FormatContext`] that specify how elements must be formatted.
/// Additional parameters passed get formatted by using their [`crate::Format`] implementation.
///
///
/// ## Examples
///
/// ```
/// use ruff_formatter::prelude::*;
/// use ruff_formatter::format;
///
/// let formatted = format!(SimpleFormatContext::default(), [token("("), token("a"), token(")")]).unwrap();
///
/// assert_eq!(
/// formatted.into_document(),
/// Document::from(vec![
/// FormatElement::Token { text: "(" },
/// FormatElement::Token { text: "a" },
/// FormatElement::Token { text: ")" },
/// ])
/// );
/// ```
#[macro_export]
macro_rules! format {
($context:expr, [$($arg:expr),+ $(,)?]) => {{
($crate::format($context, $crate::format_args!($($arg),+)))
}}
}
/// Provides multiple different alternatives and the printer picks the first one that fits.
/// Use this as last resort because it requires that the printer must try all variants in the worst case.
/// The passed variants must be in the following order:
/// - First: The variant that takes up most space horizontally
/// - Last: The variant that takes up the least space horizontally by splitting the content over multiple lines.
///
/// ## Examples
///
/// ```
/// use ruff_formatter::{Formatted, LineWidth, format, format_args, SimpleFormatOptions};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let formatted = format!(
/// SimpleFormatContext::default(),
/// [
/// token("aVeryLongIdentifier"),
/// best_fitting!(
/// // Everything fits on a single line
/// format_args!(
/// token("("),
/// group(&format_args![
/// token("["),
/// soft_block_indent(&format_args![
/// token("1,"),
/// soft_line_break_or_space(),
/// token("2,"),
/// soft_line_break_or_space(),
/// token("3"),
/// ]),
/// token("]")
/// ]),
/// token(")")
/// ),
///
/// // Breaks after `[`, but prints all elements on a single line
/// format_args!(
/// token("("),
/// token("["),
/// block_indent(&token("1, 2, 3")),
/// token("]"),
/// token(")"),
/// ),
///
/// // Breaks after `[` and prints each element on a single line
/// format_args!(
/// token("("),
/// block_indent(&format_args![
/// token("["),
/// block_indent(&format_args![
/// token("1,"),
/// hard_line_break(),
/// token("2,"),
/// hard_line_break(),
/// token("3"),
/// ]),
/// token("]"),
/// ]),
/// token(")")
/// )
/// )
/// ]
/// )?;
///
/// let document = formatted.into_document();
///
/// // Takes the first variant if everything fits on a single line
/// assert_eq!(
/// "aVeryLongIdentifier([1, 2, 3])",
/// Formatted::new(document.clone(), SimpleFormatContext::default())
/// .print()?
/// .as_code()
/// );
///
/// // It takes the second if the first variant doesn't fit on a single line. The second variant
/// // has some additional line breaks to make sure inner groups don't break
/// assert_eq!(
/// "aVeryLongIdentifier([\n\t1, 2, 3\n])",
/// Formatted::new(document.clone(), SimpleFormatContext::new(SimpleFormatOptions { line_width: 21.try_into().unwrap(), ..SimpleFormatOptions::default() }))
/// .print()?
/// .as_code()
/// );
///
/// // Prints the last option as last resort
/// assert_eq!(
/// "aVeryLongIdentifier(\n\t[\n\t\t1,\n\t\t2,\n\t\t3\n\t]\n)",
/// Formatted::new(document.clone(), SimpleFormatContext::new(SimpleFormatOptions { line_width: 20.try_into().unwrap(), ..SimpleFormatOptions::default() }))
/// .print()?
/// .as_code()
/// );
/// # Ok(())
/// # }
/// ```
///
/// ### Enclosing group with `should_expand: true`
///
/// ```
/// use ruff_formatter::{Formatted, LineWidth, format, format_args, SimpleFormatOptions};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let formatted = format!(
/// SimpleFormatContext::default(),
/// [
/// best_fitting!(
/// // Prints the method call on the line but breaks the array.
/// format_args!(
/// token("expect(a).toMatch("),
/// group(&format_args![
/// token("["),
/// soft_block_indent(&format_args![
/// token("1,"),
/// soft_line_break_or_space(),
/// token("2,"),
/// soft_line_break_or_space(),
/// token("3"),
/// ]),
/// token("]")
/// ]).should_expand(true),
/// token(")")
/// ),
///
/// // Breaks after `(`
/// format_args!(
/// token("expect(a).toMatch("),
/// group(&soft_block_indent(
/// &group(&format_args![
/// token("["),
/// soft_block_indent(&format_args![
/// token("1,"),
/// soft_line_break_or_space(),
/// token("2,"),
/// soft_line_break_or_space(),
/// token("3"),
/// ]),
/// token("]")
/// ]).should_expand(true),
/// )).should_expand(true),
/// token(")")
/// ),
/// )
/// ]
/// )?;
///
/// let document = formatted.into_document();
///
/// assert_eq!(
/// "expect(a).toMatch([\n\t1,\n\t2,\n\t3\n])",
/// Formatted::new(document.clone(), SimpleFormatContext::default())
/// .print()?
/// .as_code()
/// );
///
/// # Ok(())
/// # }
/// ```
///
/// The first variant fits because all its content up to the first line break fit on the line without exceeding
/// the configured print width.
///
/// ## Complexity
/// Be mindful of using this IR element as it has a considerable performance penalty:
/// - There are multiple representation for the same content. This results in increased memory usage
/// and traversal time in the printer.
/// - The worst case complexity is that the printer tires each variant. This can result in quadratic
/// complexity if used in nested structures.
///
/// ## Behavior
/// This IR is similar to Prettier's `conditionalGroup`. The printer measures each variant, except the [`MostExpanded`], in [`Flat`] mode
/// to find the first variant that fits and prints this variant in [`Flat`] mode. If no variant fits, then
/// the printer falls back to printing the [`MostExpanded`] variant in [`Expanded`] mode.
///
/// The definition of *fits* differs to groups in that the printer only tests if it is possible to print
/// the content up to the first non-soft line break without exceeding the configured print width.
/// This definition differs from groups as that non-soft line breaks make group expand.
///
/// [`crate::BestFitting`] acts as a "break" boundary, meaning that it is considered to fit
///
///
/// [`Flat`]: crate::format_element::PrintMode::Flat
/// [`Expanded`]: crate::format_element::PrintMode::Expanded
/// [`MostExpanded`]: crate::format_element::BestFittingVariants::most_expanded
#[macro_export]
macro_rules! best_fitting {
($least_expanded:expr, $($tail:expr),+ $(,)?) => {
// OK because the macro syntax requires at least two variants.
$crate::BestFitting::from_arguments_unchecked($crate::format_args!($least_expanded, $($tail),+))
}
}
#[cfg(test)]
mod tests {
use crate::prelude::*;
use crate::{FormatState, SimpleFormatOptions, VecBuffer};
struct TestFormat;
impl Format<SimpleFormatContext> for TestFormat {
fn fmt(&self, f: &mut Formatter<SimpleFormatContext>) -> FormatResult<()> {
write!(f, [token("test")])
}
}
#[test]
fn test_single_element() {
let mut state = FormatState::new(SimpleFormatContext::default());
let mut buffer = VecBuffer::new(&mut state);
write![&mut buffer, [TestFormat]].unwrap();
assert_eq!(
buffer.into_vec(),
vec![FormatElement::Token { text: "test" }]
);
}
#[test]
fn test_multiple_elements() {
let mut state = FormatState::new(SimpleFormatContext::default());
let mut buffer = VecBuffer::new(&mut state);
write![
&mut buffer,
[token("a"), space(), token("simple"), space(), TestFormat]
]
.unwrap();
assert_eq!(
buffer.into_vec(),
vec![
FormatElement::Token { text: "a" },
FormatElement::Space,
FormatElement::Token { text: "simple" },
FormatElement::Space,
FormatElement::Token { text: "test" }
]
);
}
#[test]
fn best_fitting_variants_print_as_lists() {
use crate::Formatted;
use crate::prelude::*;
// The second variant below should be selected when printing at a width of 30
let formatted_best_fitting = format!(
SimpleFormatContext::default(),
[
token("aVeryLongIdentifier"),
soft_line_break_or_space(),
best_fitting![
format_args![token(
"Something that will not fit on a line with 30 character print width."
)],
format_args![
group(&format_args![
token("Start"),
soft_line_break(),
group(&soft_block_indent(&format_args![
token("1,"),
soft_line_break_or_space(),
token("2,"),
soft_line_break_or_space(),
token("3"),
])),
soft_line_break_or_space(),
soft_block_indent(&format_args![
token("1,"),
soft_line_break_or_space(),
token("2,"),
soft_line_break_or_space(),
group(&format_args!(
token("A,"),
soft_line_break_or_space(),
token("B")
)),
soft_line_break_or_space(),
token("3")
]),
soft_line_break_or_space(),
token("End")
])
.should_expand(true)
],
format_args!(token("Most"), hard_line_break(), token("Expanded"))
]
]
)
.unwrap();
// This matches the IR above except that the `best_fitting` was replaced with
// the contents of its second variant.
let formatted_normal_list = format!(
SimpleFormatContext::default(),
[
token("aVeryLongIdentifier"),
soft_line_break_or_space(),
format_args![
token("Start"),
soft_line_break(),
&group(&soft_block_indent(&format_args![
token("1,"),
soft_line_break_or_space(),
token("2,"),
soft_line_break_or_space(),
token("3"),
])),
soft_line_break_or_space(),
&soft_block_indent(&format_args![
token("1,"),
soft_line_break_or_space(),
token("2,"),
soft_line_break_or_space(),
group(&format_args!(
token("A,"),
soft_line_break_or_space(),
token("B")
)),
soft_line_break_or_space(),
token("3")
]),
soft_line_break_or_space(),
token("End")
],
]
)
.unwrap();
let best_fitting_code = Formatted::new(
formatted_best_fitting.into_document(),
SimpleFormatContext::new(SimpleFormatOptions {
line_width: 30.try_into().unwrap(),
..SimpleFormatOptions::default()
}),
)
.print()
.expect("Document to be valid")
.as_code()
.to_string();
let normal_list_code = Formatted::new(
formatted_normal_list.into_document(),
SimpleFormatContext::new(SimpleFormatOptions {
line_width: 30.try_into().unwrap(),
..SimpleFormatOptions::default()
}),
)
.print()
.expect("Document to be valid")
.as_code()
.to_string();
// The variant that "fits" will print its contents as if it were a normal list
// outside of a BestFitting element.
assert_eq!(best_fitting_code, normal_list_code);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/group_id.rs | crates/ruff_formatter/src/group_id.rs | use std::num::NonZeroU32;
use std::sync::atomic::{AtomicU32, Ordering};
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[derive(Clone, Copy, Eq, PartialEq, Hash)]
pub struct DebugGroupId {
value: NonZeroU32,
#[cfg_attr(feature = "serde", serde(skip))]
name: &'static str,
}
impl DebugGroupId {
#[allow(unused)]
fn new(value: NonZeroU32, debug_name: &'static str) -> Self {
Self {
value,
name: debug_name,
}
}
}
impl std::fmt::Debug for DebugGroupId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "#{}-{}", self.name, self.value)
}
}
/// Unique identification for a group.
///
/// See [`crate::Formatter::group_id`] on how to get a unique id.
#[repr(transparent)]
#[derive(Clone, Copy, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct ReleaseGroupId {
value: NonZeroU32,
}
impl ReleaseGroupId {
/// Creates a new unique group id with the given debug name (only stored in debug builds)
#[allow(unused)]
fn new(value: NonZeroU32, _: &'static str) -> Self {
Self { value }
}
}
impl From<GroupId> for u32 {
fn from(id: GroupId) -> Self {
id.value.get()
}
}
impl std::fmt::Debug for ReleaseGroupId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "#{}", self.value)
}
}
#[cfg(not(debug_assertions))]
pub type GroupId = ReleaseGroupId;
#[cfg(debug_assertions)]
pub type GroupId = DebugGroupId;
/// Builder to construct unique group ids that are unique if created with the same builder.
pub(super) struct UniqueGroupIdBuilder {
next_id: AtomicU32,
}
impl UniqueGroupIdBuilder {
/// Creates a new unique group id with the given debug name.
pub(crate) fn group_id(&self, debug_name: &'static str) -> GroupId {
let id = self.next_id.fetch_add(1, Ordering::Relaxed);
let id = NonZeroU32::new(id).unwrap_or_else(|| panic!("Group ID counter overflowed"));
GroupId::new(id, debug_name)
}
}
impl Default for UniqueGroupIdBuilder {
fn default() -> Self {
UniqueGroupIdBuilder {
// Start with 1 because `GroupId` wraps a `NonZeroU32` to reduce memory usage.
next_id: AtomicU32::new(1),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/buffer.rs | crates/ruff_formatter/src/buffer.rs | use super::{Arguments, FormatElement, write};
use crate::format_element::Interned;
use crate::prelude::{LineMode, Tag};
use crate::{FormatResult, FormatState};
use rustc_hash::FxHashMap;
use std::any::{Any, TypeId};
use std::fmt::Debug;
use std::num::NonZeroUsize;
use std::ops::{Deref, DerefMut};
/// A trait for writing or formatting into [`FormatElement`]-accepting buffers or streams.
pub trait Buffer {
/// The context used during formatting
type Context;
/// Writes a [`crate::FormatElement`] into this buffer, returning whether the write succeeded.
///
/// # Errors
/// This function will return an instance of [`crate::FormatError`] on error.
///
/// # Examples
///
/// ```
/// use ruff_formatter::{Buffer, FormatElement, FormatState, SimpleFormatContext, VecBuffer};
///
/// let mut state = FormatState::new(SimpleFormatContext::default());
/// let mut buffer = VecBuffer::new(&mut state);
///
/// buffer.write_element(FormatElement::Token { text: "test"});
///
/// assert_eq!(buffer.into_vec(), vec![FormatElement::Token { text: "test" }]);
/// ```
fn write_element(&mut self, element: FormatElement);
/// Returns a slice containing all elements written into this buffer.
///
/// Prefer using [BufferExtensions::start_recording] over accessing [Buffer::elements] directly.
#[doc(hidden)]
fn elements(&self) -> &[FormatElement];
/// Glue for usage of the [`write!`] macro with implementers of this trait.
///
/// This method should generally not be invoked manually, but rather through the [`write!`] macro itself.
///
/// # Examples
///
/// ```
/// use ruff_formatter::prelude::*;
/// use ruff_formatter::{Buffer, FormatState, SimpleFormatContext, VecBuffer, format_args};
///
/// let mut state = FormatState::new(SimpleFormatContext::default());
/// let mut buffer = VecBuffer::new(&mut state);
///
/// buffer.write_fmt(format_args!(token("Hello World"))).unwrap();
///
/// assert_eq!(buffer.into_vec(), vec![FormatElement::Token{ text: "Hello World" }]);
/// ```
fn write_fmt(mut self: &mut Self, arguments: Arguments<Self::Context>) -> FormatResult<()> {
write(&mut self, arguments)
}
/// Returns the formatting state relevant for this formatting session.
fn state(&self) -> &FormatState<Self::Context>;
/// Returns the mutable formatting state relevant for this formatting session.
fn state_mut(&mut self) -> &mut FormatState<Self::Context>;
/// Takes a snapshot of the Buffers state, excluding the formatter state.
fn snapshot(&self) -> BufferSnapshot;
/// Restores the snapshot buffer
///
/// ## Panics
/// If the passed snapshot id is a snapshot of another buffer OR
/// if the snapshot is restored out of order
fn restore_snapshot(&mut self, snapshot: BufferSnapshot);
}
/// Snapshot of a buffer state that can be restored at a later point.
///
/// Used in cases where the formatting of an object fails but a parent formatter knows an alternative
/// strategy on how to format the object that might succeed.
#[derive(Debug)]
pub enum BufferSnapshot {
/// Stores an absolute position of a buffers state, for example, the offset of the last written element.
Position(usize),
/// Generic structure for custom buffers that need to store more complex data. Slightly more
/// expensive because it requires allocating the buffer state on the heap.
Any(Box<dyn Any>),
}
impl BufferSnapshot {
/// Creates a new buffer snapshot that points to the specified position.
pub const fn position(index: usize) -> Self {
Self::Position(index)
}
/// Unwraps the position value.
///
/// # Panics
///
/// If self is not a [`BufferSnapshot::Position`]
pub fn unwrap_position(&self) -> usize {
match self {
BufferSnapshot::Position(index) => *index,
BufferSnapshot::Any(_) => panic!("Tried to unwrap Any snapshot as a position."),
}
}
/// Unwraps the any value.
///
/// # Panics
///
/// If `self` is not a [`BufferSnapshot::Any`].
pub fn unwrap_any<T: 'static>(self) -> T {
match self {
BufferSnapshot::Position(_) => {
panic!("Tried to unwrap Position snapshot as Any snapshot.")
}
BufferSnapshot::Any(value) => match value.downcast::<T>() {
Ok(snapshot) => *snapshot,
Err(err) => {
panic!(
"Tried to unwrap snapshot of type {:?} as {:?}",
(*err).type_id(),
TypeId::of::<T>()
)
}
},
}
}
}
/// Implements the `[Buffer]` trait for all mutable references of objects implementing [Buffer].
impl<W: Buffer<Context = Context> + ?Sized, Context> Buffer for &mut W {
type Context = Context;
fn write_element(&mut self, element: FormatElement) {
(**self).write_element(element);
}
fn elements(&self) -> &[FormatElement] {
(**self).elements()
}
fn write_fmt(&mut self, args: Arguments<Context>) -> FormatResult<()> {
(**self).write_fmt(args)
}
fn state(&self) -> &FormatState<Self::Context> {
(**self).state()
}
fn state_mut(&mut self) -> &mut FormatState<Self::Context> {
(**self).state_mut()
}
fn snapshot(&self) -> BufferSnapshot {
(**self).snapshot()
}
fn restore_snapshot(&mut self, snapshot: BufferSnapshot) {
(**self).restore_snapshot(snapshot);
}
}
/// Vector backed [`Buffer`] implementation.
///
/// The buffer writes all elements into the internal elements buffer.
#[derive(Debug)]
pub struct VecBuffer<'a, Context> {
state: &'a mut FormatState<Context>,
elements: Vec<FormatElement>,
}
impl<'a, Context> VecBuffer<'a, Context> {
pub fn new(state: &'a mut FormatState<Context>) -> Self {
Self::new_with_vec(state, Vec::new())
}
pub fn new_with_vec(state: &'a mut FormatState<Context>, elements: Vec<FormatElement>) -> Self {
Self { state, elements }
}
/// Creates a buffer with the specified capacity
pub fn with_capacity(capacity: usize, state: &'a mut FormatState<Context>) -> Self {
Self {
state,
elements: Vec::with_capacity(capacity),
}
}
/// Consumes the buffer and returns the written [`FormatElement]`s as a vector.
pub fn into_vec(self) -> Vec<FormatElement> {
self.elements
}
/// Takes the elements without consuming self
pub fn take_vec(&mut self) -> Vec<FormatElement> {
std::mem::take(&mut self.elements)
}
}
impl<Context> Deref for VecBuffer<'_, Context> {
type Target = [FormatElement];
fn deref(&self) -> &Self::Target {
&self.elements
}
}
impl<Context> DerefMut for VecBuffer<'_, Context> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.elements
}
}
impl<Context> Buffer for VecBuffer<'_, Context> {
type Context = Context;
fn write_element(&mut self, element: FormatElement) {
self.elements.push(element);
}
fn elements(&self) -> &[FormatElement] {
self
}
fn state(&self) -> &FormatState<Self::Context> {
self.state
}
fn state_mut(&mut self) -> &mut FormatState<Self::Context> {
self.state
}
fn snapshot(&self) -> BufferSnapshot {
BufferSnapshot::position(self.elements.len())
}
fn restore_snapshot(&mut self, snapshot: BufferSnapshot) {
let position = snapshot.unwrap_position();
assert!(
self.elements.len() >= position,
r#"Outdated snapshot. This buffer contains fewer elements than at the time the snapshot was taken.
Make sure that you take and restore the snapshot in order and that this snapshot belongs to the current buffer."#
);
self.elements.truncate(position);
}
}
/// Buffer that allows you inspecting elements as they get written to the formatter.
pub struct Inspect<'inner, Context, Inspector> {
inner: &'inner mut dyn Buffer<Context = Context>,
inspector: Inspector,
}
impl<'inner, Context, Inspector> Inspect<'inner, Context, Inspector> {
fn new(inner: &'inner mut dyn Buffer<Context = Context>, inspector: Inspector) -> Self {
Self { inner, inspector }
}
}
impl<Context, Inspector> Buffer for Inspect<'_, Context, Inspector>
where
Inspector: FnMut(&FormatElement),
{
type Context = Context;
fn write_element(&mut self, element: FormatElement) {
(self.inspector)(&element);
self.inner.write_element(element);
}
fn elements(&self) -> &[FormatElement] {
self.inner.elements()
}
fn state(&self) -> &FormatState<Self::Context> {
self.inner.state()
}
fn state_mut(&mut self) -> &mut FormatState<Self::Context> {
self.inner.state_mut()
}
fn snapshot(&self) -> BufferSnapshot {
self.inner.snapshot()
}
fn restore_snapshot(&mut self, snapshot: BufferSnapshot) {
self.inner.restore_snapshot(snapshot);
}
}
/// A Buffer that removes any soft line breaks or [`if_group_breaks`](crate::builders::if_group_breaks) elements.
///
/// - Removes [`lines`](FormatElement::Line) with the mode [`Soft`](LineMode::Soft).
/// - Replaces [`lines`](FormatElement::Line) with the mode [`Soft`](LineMode::SoftOrSpace) with a [`Space`](FormatElement::Space)
/// - Removes [`if_group_breaks`](crate::builders::if_group_breaks) and all its content.
/// - Unwraps the content of [`if_group_fits_on_line`](crate::builders::if_group_fits_on_line) elements (but retains it).
///
/// # Examples
///
/// ```
/// use ruff_formatter::prelude::*;
/// use ruff_formatter::{format, write};
///
/// # fn main() -> FormatResult<()> {
/// use ruff_formatter::{RemoveSoftLinesBuffer, SimpleFormatContext, VecBuffer};
/// use ruff_formatter::prelude::format_with;
/// let formatted = format!(
/// SimpleFormatContext::default(),
/// [format_with(|f| {
/// let mut buffer = RemoveSoftLinesBuffer::new(f);
///
/// write!(
/// buffer,
/// [
/// token("The next soft line or space gets replaced by a space"),
/// soft_line_break_or_space(),
/// token("and the line here"),
/// soft_line_break(),
/// token("is removed entirely.")
/// ]
/// )
/// })]
/// )?;
///
/// assert_eq!(
/// formatted.document().as_ref(),
/// &[
/// FormatElement::Token { text: "The next soft line or space gets replaced by a space" },
/// FormatElement::Space,
/// FormatElement::Token { text: "and the line here" },
/// FormatElement::Token { text: "is removed entirely." }
/// ]
/// );
///
/// # Ok(())
/// # }
/// ```
pub struct RemoveSoftLinesBuffer<'a, Context> {
inner: &'a mut dyn Buffer<Context = Context>,
/// Caches the interned elements after the soft line breaks have been removed.
///
/// The `key` is the [Interned] element as it has been passed to [`Self::write_element`] or the child of another
/// [Interned] element. The `value` is the matching document of the key where all soft line breaks have been removed.
///
/// It's fine to not snapshot the cache. The worst that can happen is that it holds on interned elements
/// that are now unused. But there's little harm in that and the cache is cleaned when dropping the buffer.
interned_cache: FxHashMap<Interned, Interned>,
state: RemoveSoftLineBreaksState,
}
impl<'a, Context> RemoveSoftLinesBuffer<'a, Context> {
/// Creates a new buffer that removes the soft line breaks before writing them into `buffer`.
pub fn new(inner: &'a mut dyn Buffer<Context = Context>) -> Self {
Self {
inner,
state: RemoveSoftLineBreaksState::default(),
interned_cache: FxHashMap::default(),
}
}
/// Removes the soft line breaks from an interned element.
fn clean_interned(&mut self, interned: &Interned) -> Interned {
clean_interned(interned, &mut self.interned_cache)
}
}
// Extracted to function to avoid monomorphization
fn clean_interned(
interned: &Interned,
interned_cache: &mut FxHashMap<Interned, Interned>,
) -> Interned {
if let Some(cleaned) = interned_cache.get(interned) {
cleaned.clone()
} else {
let mut state = RemoveSoftLineBreaksState::default();
// Find the first soft line break element or interned element that must be changed
let result = interned
.iter()
.enumerate()
.find_map(|(index, element)| match element {
FormatElement::Line(LineMode::SoftOrSpace) => {
let mut cleaned = Vec::new();
let (before, after) = interned.split_at(index);
cleaned.extend_from_slice(before);
Some((cleaned, &after[1..]))
}
FormatElement::Interned(inner) => {
let cleaned_inner = clean_interned(inner, interned_cache);
if &cleaned_inner == inner {
None
} else {
let mut cleaned = Vec::with_capacity(interned.len());
cleaned.extend_from_slice(&interned[..index]);
cleaned.push(FormatElement::Interned(cleaned_inner));
Some((cleaned, &interned[index + 1..]))
}
}
element => {
if state.should_drop(element) {
let mut cleaned = Vec::new();
let (before, after) = interned.split_at(index);
cleaned.extend_from_slice(before);
Some((cleaned, &after[1..]))
} else {
None
}
}
});
let result = match result {
// Copy the whole interned buffer so that becomes possible to change the necessary elements.
Some((mut cleaned, rest)) => {
for element in rest {
if state.should_drop(element) {
continue;
}
let element = match element {
FormatElement::Line(LineMode::SoftOrSpace) => FormatElement::Space,
FormatElement::Interned(interned) => {
FormatElement::Interned(clean_interned(interned, interned_cache))
}
element => element.clone(),
};
cleaned.push(element);
}
Interned::new(cleaned)
}
// No change necessary, return existing interned element
None => interned.clone(),
};
interned_cache.insert(interned.clone(), result.clone());
result
}
}
impl<Context> Buffer for RemoveSoftLinesBuffer<'_, Context> {
type Context = Context;
fn write_element(&mut self, element: FormatElement) {
if self.state.should_drop(&element) {
return;
}
let element = match element {
FormatElement::Line(LineMode::SoftOrSpace) => FormatElement::Space,
FormatElement::Interned(interned) => {
FormatElement::Interned(self.clean_interned(&interned))
}
element => element,
};
self.inner.write_element(element);
}
fn elements(&self) -> &[FormatElement] {
self.inner.elements()
}
fn state(&self) -> &FormatState<Self::Context> {
self.inner.state()
}
fn state_mut(&mut self) -> &mut FormatState<Self::Context> {
self.inner.state_mut()
}
fn snapshot(&self) -> BufferSnapshot {
BufferSnapshot::Any(Box::new(RemoveSoftLinebreaksSnapshot {
inner: self.inner.snapshot(),
state: self.state,
}))
}
fn restore_snapshot(&mut self, snapshot: BufferSnapshot) {
let RemoveSoftLinebreaksSnapshot { inner, state } = snapshot.unwrap_any();
self.inner.restore_snapshot(inner);
self.state = state;
}
}
#[derive(Copy, Clone, Debug, Default)]
enum RemoveSoftLineBreaksState {
#[default]
Default,
InIfGroupBreaks {
conditional_content_level: NonZeroUsize,
},
}
impl RemoveSoftLineBreaksState {
fn should_drop(&mut self, element: &FormatElement) -> bool {
match self {
Self::Default => match element {
FormatElement::Line(LineMode::Soft) => true,
// Entered the start of an `if_group_breaks` or `if_group_fits`
// For `if_group_breaks`: Remove the start and end tag and all content in between.
// For `if_group_fits_on_line`: Unwrap the content. This is important because the enclosing group
// might still *expand* if the content exceeds the line width limit, in which case the
// `if_group_fits_on_line` content would be removed.
FormatElement::Tag(Tag::StartConditionalContent(condition)) => {
if condition.mode.is_expanded() {
*self = Self::InIfGroupBreaks {
conditional_content_level: NonZeroUsize::new(1).unwrap(),
};
}
true
}
FormatElement::Tag(Tag::EndConditionalContent) => true,
_ => false,
},
Self::InIfGroupBreaks {
conditional_content_level,
} => {
match element {
// A nested `if_group_breaks` or `if_group_fits_on_line`
FormatElement::Tag(Tag::StartConditionalContent(_)) => {
*conditional_content_level = conditional_content_level.saturating_add(1);
}
// The end of an `if_group_breaks` or `if_group_fits_on_line`.
FormatElement::Tag(Tag::EndConditionalContent) => {
if let Some(level) = NonZeroUsize::new(conditional_content_level.get() - 1)
{
*conditional_content_level = level;
} else {
// Found the end tag of the initial `if_group_breaks`. Skip this element but retain
// the elements coming after
*self = RemoveSoftLineBreaksState::Default;
}
}
_ => {}
}
true
}
}
}
}
struct RemoveSoftLinebreaksSnapshot {
inner: BufferSnapshot,
state: RemoveSoftLineBreaksState,
}
pub trait BufferExtensions: Buffer + Sized {
/// Returns a new buffer that calls the passed inspector for every element that gets written to the output
#[must_use]
fn inspect<F>(&mut self, inspector: F) -> Inspect<'_, Self::Context, F>
where
F: FnMut(&FormatElement),
{
Inspect::new(self, inspector)
}
/// Starts a recording that gives you access to all elements that have been written between the start
/// and end of the recording
///
/// #Examples
///
/// ```
/// use std::ops::Deref;
/// use ruff_formatter::prelude::*;
/// use ruff_formatter::{write, format, SimpleFormatContext};
///
/// # fn main() -> FormatResult<()> {
/// let formatted = format!(SimpleFormatContext::default(), [format_with(|f| {
/// let mut recording = f.start_recording();
///
/// write!(recording, [token("A")])?;
/// write!(recording, [token("B")])?;
///
/// write!(recording, [format_with(|f| write!(f, [token("C"), token("D")]))])?;
///
/// let recorded = recording.stop();
/// assert_eq!(
/// recorded.deref(),
/// &[
/// FormatElement::Token{ text: "A" },
/// FormatElement::Token{ text: "B" },
/// FormatElement::Token{ text: "C" },
/// FormatElement::Token{ text: "D" }
/// ]
/// );
///
/// Ok(())
/// })])?;
///
/// assert_eq!(formatted.print()?.as_code(), "ABCD");
/// # Ok(())
/// # }
/// ```
#[must_use]
fn start_recording(&mut self) -> Recording<'_, Self> {
Recording::new(self)
}
/// Writes a sequence of elements into this buffer.
fn write_elements<I>(&mut self, elements: I)
where
I: IntoIterator<Item = FormatElement>,
{
for element in elements {
self.write_element(element);
}
}
}
impl<T> BufferExtensions for T where T: Buffer {}
#[derive(Debug)]
pub struct Recording<'buf, Buffer> {
start: usize,
buffer: &'buf mut Buffer,
}
impl<'buf, B> Recording<'buf, B>
where
B: Buffer,
{
fn new(buffer: &'buf mut B) -> Self {
Self {
start: buffer.elements().len(),
buffer,
}
}
#[inline]
pub fn write_fmt(&mut self, arguments: Arguments<B::Context>) -> FormatResult<()> {
self.buffer.write_fmt(arguments)
}
#[inline]
pub fn write_element(&mut self, element: FormatElement) {
self.buffer.write_element(element);
}
pub fn stop(self) -> Recorded<'buf> {
let buffer: &'buf B = self.buffer;
let elements = buffer.elements();
let recorded = if self.start > elements.len() {
// May happen if buffer was rewound.
&[]
} else {
&elements[self.start..]
};
Recorded(recorded)
}
}
#[derive(Debug, Copy, Clone)]
pub struct Recorded<'a>(&'a [FormatElement]);
impl Deref for Recorded<'_> {
type Target = [FormatElement];
fn deref(&self) -> &Self::Target {
self.0
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/formatter.rs | crates/ruff_formatter/src/formatter.rs | use crate::buffer::BufferSnapshot;
use crate::builders::{FillBuilder, JoinBuilder};
use crate::prelude::*;
use crate::{Arguments, Buffer, FormatContext, FormatState, GroupId, VecBuffer};
/// Handles the formatting of a CST and stores the context how the CST should be formatted (user preferences).
/// The formatter is passed to the [Format] implementation of every node in the CST so that they
/// can use it to format their children.
pub struct Formatter<'buf, Context> {
pub(super) buffer: &'buf mut dyn Buffer<Context = Context>,
}
impl<'buf, Context> Formatter<'buf, Context> {
/// Creates a new context that uses the given formatter context
pub fn new(buffer: &'buf mut (dyn Buffer<Context = Context> + 'buf)) -> Self {
Self { buffer }
}
/// Returns the format options
pub fn options(&self) -> &Context::Options
where
Context: FormatContext,
{
self.context().options()
}
/// Returns the Context specifying how to format the current CST
pub fn context(&self) -> &Context {
self.state().context()
}
/// Returns a mutable reference to the context.
pub fn context_mut(&mut self) -> &mut Context {
self.state_mut().context_mut()
}
/// Creates a new group id that is unique to this document. The passed debug name is used in the
/// [`std::fmt::Debug`] of the document if this is a debug build.
/// The name is unused for production builds and has no meaning on the equality of two group ids.
pub fn group_id(&self, debug_name: &'static str) -> GroupId {
self.state().group_id(debug_name)
}
/// Joins multiple [Format] together without any separator
///
/// ## Examples
///
/// ```rust
/// use ruff_formatter::format;
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let formatted = format!(SimpleFormatContext::default(), [format_with(|f| {
/// f.join()
/// .entry(&token("a"))
/// .entry(&space())
/// .entry(&token("+"))
/// .entry(&space())
/// .entry(&token("b"))
/// .finish()
/// })])?;
///
/// assert_eq!(
/// "a + b",
/// formatted.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
pub fn join<'a>(&'a mut self) -> JoinBuilder<'a, 'buf, (), Context> {
JoinBuilder::new(self)
}
/// Joins the objects by placing the specified separator between every two items.
///
/// ## Examples
///
/// Joining different tokens by separating them with a comma and a space.
///
/// ```
/// use ruff_formatter::{format, format_args};
/// use ruff_formatter::prelude::*;
///
/// # fn main() -> FormatResult<()> {
/// let formatted = format!(SimpleFormatContext::default(), [format_with(|f| {
/// f.join_with(&format_args!(token(","), space()))
/// .entry(&token("1"))
/// .entry(&token("2"))
/// .entry(&token("3"))
/// .entry(&token("4"))
/// .finish()
/// })])?;
///
/// assert_eq!(
/// "1, 2, 3, 4",
/// formatted.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
pub fn join_with<'a, Joiner>(
&'a mut self,
joiner: Joiner,
) -> JoinBuilder<'a, 'buf, Joiner, Context>
where
Joiner: Format<Context>,
{
JoinBuilder::with_separator(self, joiner)
}
/// Concatenates a list of [`crate::Format`] objects with spaces and line breaks to fit
/// them on as few lines as possible. Each element introduces a conceptual group. The printer
/// first tries to print the item in flat mode but then prints it in expanded mode if it doesn't fit.
///
/// ## Examples
///
/// ```rust
/// use ruff_formatter::prelude::*;
/// use ruff_formatter::{format, format_args};
///
/// # fn main() -> FormatResult<()> {
/// let formatted = format!(SimpleFormatContext::default(), [format_with(|f| {
/// f.fill()
/// .entry(&soft_line_break_or_space(), &token("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))
/// .entry(&soft_line_break_or_space(), &token("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"))
/// .entry(&soft_line_break_or_space(), &token("cccccccccccccccccccccccccccccc"))
/// .entry(&soft_line_break_or_space(), &token("dddddddddddddddddddddddddddddd"))
/// .finish()
/// })])?;
///
/// assert_eq!(
/// "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\ncccccccccccccccccccccccccccccc dddddddddddddddddddddddddddddd",
/// formatted.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
///
/// ```rust
/// use ruff_formatter::prelude::*;
/// use ruff_formatter::{format, format_args};
///
/// # fn main() -> FormatResult<()> {
/// let entries = vec![
/// token("<b>Important: </b>"),
/// token("Please do not commit memory bugs such as segfaults, buffer overflows, etc. otherwise you "),
/// token("<em>will</em>"),
/// token(" be reprimanded")
/// ];
///
/// let formatted = format!(SimpleFormatContext::default(), [format_with(|f| {
/// f.fill().entries(&soft_line_break(), entries.iter()).finish()
/// })])?;
///
/// assert_eq!(
/// &std::format!("<b>Important: </b>\nPlease do not commit memory bugs such as segfaults, buffer overflows, etc. otherwise you \n<em>will</em> be reprimanded"),
/// formatted.print()?.as_code()
/// );
/// # Ok(())
/// # }
/// ```
pub fn fill<'a>(&'a mut self) -> FillBuilder<'a, 'buf, Context> {
FillBuilder::new(self)
}
/// Formats `content` into an interned element without writing it to the formatter's buffer.
pub fn intern(&mut self, content: &dyn Format<Context>) -> FormatResult<Option<FormatElement>> {
let mut buffer = VecBuffer::new(self.state_mut());
crate::write!(&mut buffer, [content])?;
let elements = buffer.into_vec();
Ok(self.intern_vec(elements))
}
pub fn intern_vec(&mut self, mut elements: Vec<FormatElement>) -> Option<FormatElement> {
match elements.len() {
0 => None,
// Doesn't get cheaper than calling clone, use the element directly
// SAFETY: Safe because of the `len == 1` check in the match arm.
1 => Some(elements.pop().unwrap()),
_ => Some(FormatElement::Interned(Interned::new(elements))),
}
}
}
impl<Context> Formatter<'_, Context>
where
Context: FormatContext,
{
/// Take a snapshot of the state of the formatter
#[inline]
pub fn state_snapshot(&self) -> FormatterSnapshot {
FormatterSnapshot {
buffer: self.buffer.snapshot(),
}
}
#[inline]
/// Restore the state of the formatter to a previous snapshot
pub fn restore_state_snapshot(&mut self, snapshot: FormatterSnapshot) {
self.buffer.restore_snapshot(snapshot.buffer);
}
}
impl<Context> Buffer for Formatter<'_, Context> {
type Context = Context;
#[inline]
fn write_element(&mut self, element: FormatElement) {
self.buffer.write_element(element);
}
fn elements(&self) -> &[FormatElement] {
self.buffer.elements()
}
#[inline]
fn write_fmt(&mut self, arguments: Arguments<Self::Context>) -> FormatResult<()> {
for argument in arguments.items() {
argument.format(self)?;
}
Ok(())
}
fn state(&self) -> &FormatState<Self::Context> {
self.buffer.state()
}
fn state_mut(&mut self) -> &mut FormatState<Self::Context> {
self.buffer.state_mut()
}
fn snapshot(&self) -> BufferSnapshot {
self.buffer.snapshot()
}
fn restore_snapshot(&mut self, snapshot: BufferSnapshot) {
self.buffer.restore_snapshot(snapshot);
}
}
/// Snapshot of the formatter state used to handle backtracking if
/// errors are encountered in the formatting process and the formatter
/// has to fallback to printing raw tokens
///
/// In practice this only saves the set of printed tokens in debug
/// mode and compiled to nothing in release mode
pub struct FormatterSnapshot {
buffer: BufferSnapshot,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/format_element.rs | crates/ruff_formatter/src/format_element.rs | pub mod document;
pub mod tag;
use std::borrow::Cow;
use std::hash::{Hash, Hasher};
use std::iter::FusedIterator;
use std::num::NonZeroU32;
use std::ops::Deref;
use std::rc::Rc;
use unicode_width::UnicodeWidthChar;
use crate::format_element::tag::{GroupMode, LabelId, Tag};
use crate::source_code::SourceCodeSlice;
use crate::{IndentWidth, TagKind};
use ruff_text_size::TextSize;
/// Language agnostic IR for formatting source code.
///
/// Use the helper functions like [`crate::builders::space`], [`crate::builders::soft_line_break`] etc. defined in this file to create elements.
#[derive(Clone, Eq, PartialEq)]
pub enum FormatElement {
/// A space token, see [`crate::builders::space`] for documentation.
Space,
/// A new line, see [`crate::builders::soft_line_break`], [`crate::builders::hard_line_break`], and [`crate::builders::soft_line_break_or_space`] for documentation.
Line(LineMode),
/// Forces the parent group to print in expanded mode.
ExpandParent,
/// Indicates the position of the elements coming after this element in the source document.
/// The printer will create a source map entry from this position in the source document to the
/// formatted position.
SourcePosition(TextSize),
/// A ASCII only Token that contains no line breaks or tab characters.
Token { text: &'static str },
/// An arbitrary text that can contain tabs, newlines, and unicode characters.
Text {
/// There's no need for the text to be mutable, using `Box<str>` safes 8 bytes over `String`.
text: Box<str>,
text_width: TextWidth,
},
/// Text that gets emitted as it is in the source code. Optimized to avoid any allocations.
SourceCodeSlice {
slice: SourceCodeSlice,
text_width: TextWidth,
},
/// Prevents that line suffixes move past this boundary. Forces the printer to print any pending
/// line suffixes, potentially by inserting a hard line break.
LineSuffixBoundary,
/// An interned format element. Useful when the same content must be emitted multiple times to avoid
/// deep cloning the IR when using the `best_fitting!` macro or `if_group_fits_on_line` and `if_group_breaks`.
Interned(Interned),
/// A list of different variants representing the same content. The printer picks the best fitting content.
/// Line breaks inside of a best fitting don't propagate to parent groups.
BestFitting {
variants: BestFittingVariants,
mode: BestFittingMode,
},
/// A [Tag] that marks the start/end of some content to which some special formatting is applied.
Tag(Tag),
}
impl FormatElement {
pub fn tag_kind(&self) -> Option<TagKind> {
if let FormatElement::Tag(tag) = self {
Some(tag.kind())
} else {
None
}
}
}
impl std::fmt::Debug for FormatElement {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
FormatElement::Space => write!(fmt, "Space"),
FormatElement::Line(mode) => fmt.debug_tuple("Line").field(mode).finish(),
FormatElement::ExpandParent => write!(fmt, "ExpandParent"),
FormatElement::Token { text } => fmt.debug_tuple("Token").field(text).finish(),
FormatElement::Text { text, .. } => fmt.debug_tuple("DynamicText").field(text).finish(),
FormatElement::SourceCodeSlice { slice, text_width } => fmt
.debug_tuple("Text")
.field(slice)
.field(text_width)
.finish(),
FormatElement::LineSuffixBoundary => write!(fmt, "LineSuffixBoundary"),
FormatElement::BestFitting { variants, mode } => fmt
.debug_struct("BestFitting")
.field("variants", variants)
.field("mode", &mode)
.finish(),
FormatElement::Interned(interned) => fmt.debug_list().entries(&**interned).finish(),
FormatElement::Tag(tag) => fmt.debug_tuple("Tag").field(tag).finish(),
FormatElement::SourcePosition(position) => {
fmt.debug_tuple("SourcePosition").field(position).finish()
}
}
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum LineMode {
/// See [`crate::builders::soft_line_break_or_space`] for documentation.
SoftOrSpace,
/// See [`crate::builders::soft_line_break`] for documentation.
Soft,
/// See [`crate::builders::hard_line_break`] for documentation.
Hard,
/// See [`crate::builders::empty_line`] for documentation.
Empty,
}
impl LineMode {
pub const fn is_hard(&self) -> bool {
matches!(self, LineMode::Hard)
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum PrintMode {
/// Omits any soft line breaks
Flat,
/// Prints soft line breaks as line breaks
Expanded,
}
impl PrintMode {
pub const fn is_flat(&self) -> bool {
matches!(self, PrintMode::Flat)
}
pub const fn is_expanded(&self) -> bool {
matches!(self, PrintMode::Expanded)
}
}
impl From<GroupMode> for PrintMode {
fn from(value: GroupMode) -> Self {
match value {
GroupMode::Flat => PrintMode::Flat,
GroupMode::Expand | GroupMode::Propagated => PrintMode::Expanded,
}
}
}
#[derive(Clone)]
pub struct Interned(Rc<[FormatElement]>);
impl Interned {
pub(super) fn new(content: Vec<FormatElement>) -> Self {
Self(content.into())
}
}
impl PartialEq for Interned {
fn eq(&self, other: &Interned) -> bool {
Rc::ptr_eq(&self.0, &other.0)
}
}
impl Eq for Interned {}
impl Hash for Interned {
fn hash<H>(&self, hasher: &mut H)
where
H: Hasher,
{
Rc::as_ptr(&self.0).hash(hasher);
}
}
impl std::fmt::Debug for Interned {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl Deref for Interned {
type Target = [FormatElement];
fn deref(&self) -> &Self::Target {
&self.0
}
}
const LINE_SEPARATOR: char = '\u{2028}';
const PARAGRAPH_SEPARATOR: char = '\u{2029}';
pub const LINE_TERMINATORS: [char; 3] = ['\r', LINE_SEPARATOR, PARAGRAPH_SEPARATOR];
/// Replace the line terminators matching the provided list with "\n"
/// since its the only line break type supported by the printer
pub fn normalize_newlines<const N: usize>(text: &str, terminators: [char; N]) -> Cow<'_, str> {
let mut result = String::new();
let mut last_end = 0;
for (start, part) in text.match_indices(terminators) {
result.push_str(&text[last_end..start]);
result.push('\n');
last_end = start + part.len();
// If the current character is \r and the
// next is \n, skip over the entire sequence
if part == "\r" && text[last_end..].starts_with('\n') {
last_end += 1;
}
}
// If the result is empty no line terminators were matched,
// return the entire input text without allocating a new String
if result.is_empty() {
Cow::Borrowed(text)
} else {
result.push_str(&text[last_end..text.len()]);
Cow::Owned(result)
}
}
impl FormatElement {
/// Returns `true` if self is a [`FormatElement::Tag`]
pub const fn is_tag(&self) -> bool {
matches!(self, FormatElement::Tag(_))
}
/// Returns `true` if self is a [`FormatElement::Tag`] and [`Tag::is_start`] is `true`.
pub const fn is_start_tag(&self) -> bool {
match self {
FormatElement::Tag(tag) => tag.is_start(),
_ => false,
}
}
/// Returns `true` if self is a [`FormatElement::Tag`] and [`Tag::is_end`] is `true`.
pub const fn is_end_tag(&self) -> bool {
match self {
FormatElement::Tag(tag) => tag.is_end(),
_ => false,
}
}
pub const fn is_text(&self) -> bool {
matches!(
self,
FormatElement::SourceCodeSlice { .. }
| FormatElement::Text { .. }
| FormatElement::Token { .. }
)
}
pub const fn is_space(&self) -> bool {
matches!(self, FormatElement::Space)
}
}
impl FormatElements for FormatElement {
fn will_break(&self) -> bool {
match self {
FormatElement::ExpandParent => true,
FormatElement::Tag(Tag::StartGroup(group)) => !group.mode().is_flat(),
FormatElement::Line(line_mode) => matches!(line_mode, LineMode::Hard | LineMode::Empty),
FormatElement::Text { text_width, .. } => text_width.is_multiline(),
FormatElement::SourceCodeSlice { text_width, .. } => text_width.is_multiline(),
FormatElement::Interned(interned) => interned.will_break(),
// Traverse into the most flat version because the content is guaranteed to expand when even
// the most flat version contains some content that forces a break.
FormatElement::BestFitting {
variants: best_fitting,
..
} => best_fitting.most_flat().will_break(),
FormatElement::LineSuffixBoundary
| FormatElement::Space
| FormatElement::Tag(_)
| FormatElement::Token { .. }
| FormatElement::SourcePosition(_) => false,
}
}
fn has_label(&self, label_id: LabelId) -> bool {
match self {
FormatElement::Tag(Tag::StartLabelled(actual)) => *actual == label_id,
FormatElement::Interned(interned) => interned.deref().has_label(label_id),
_ => false,
}
}
fn start_tag(&self, _: TagKind) -> Option<&Tag> {
None
}
fn end_tag(&self, kind: TagKind) -> Option<&Tag> {
match self {
FormatElement::Tag(tag) if tag.kind() == kind && tag.is_end() => Some(tag),
_ => None,
}
}
}
/// Mode used to determine if any variant (except the most expanded) fits for [`BestFittingVariants`].
#[repr(u8)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
pub enum BestFittingMode {
/// The variant fits if the content up to the first hard or a soft line break inside a [`Group`] with
/// [`PrintMode::Expanded`] fits on the line. The default mode.
///
/// [`Group`]: tag::Group
#[default]
FirstLine,
/// A variant fits if all lines fit into the configured print width. A line ends if by any
/// hard or a soft line break inside a [`Group`] with [`PrintMode::Expanded`].
/// The content doesn't fit if there's any hard line break outside a [`Group`] with [`PrintMode::Expanded`]
/// (a hard line break in content that should be considered in [`PrintMode::Flat`].
///
/// Use this mode with caution as it requires measuring all content of the variant which is more
/// expensive than using [`BestFittingMode::FirstLine`].
///
/// [`Group`]: tag::Group
AllLines,
}
/// The different variants for this element.
/// The first element is the one that takes up the most space horizontally (the most flat),
/// The last element takes up the least space horizontally (but most horizontal space).
#[derive(Clone, Eq, PartialEq, Debug)]
pub struct BestFittingVariants(Box<[FormatElement]>);
impl BestFittingVariants {
/// Creates a new best fitting IR with the given variants.
///
/// Callers are required to ensure that the number of variants given
/// is at least 2 when using `most_expanded` or `most_flag`.
///
/// You're looking for a way to create a `BestFitting` object, use the `best_fitting![least_expanded, most_expanded]` macro.
#[doc(hidden)]
pub fn from_vec_unchecked(variants: Vec<FormatElement>) -> Self {
debug_assert!(
variants
.iter()
.filter(|element| matches!(element, FormatElement::Tag(Tag::StartBestFittingEntry)))
.count()
>= 2,
"Requires at least the least expanded and most expanded variants"
);
Self(variants.into_boxed_slice())
}
/// Returns the most expanded variant
///
/// # Panics
///
/// When the number of variants is less than two.
pub fn most_expanded(&self) -> &[FormatElement] {
assert!(
self.as_slice()
.iter()
.filter(|element| matches!(element, FormatElement::Tag(Tag::StartBestFittingEntry)))
.count()
>= 2,
"Requires at least the least expanded and most expanded variants"
);
self.into_iter().last().unwrap()
}
pub fn as_slice(&self) -> &[FormatElement] {
&self.0
}
/// Returns the least expanded variant
///
/// # Panics
///
/// When the number of variants is less than two.
pub fn most_flat(&self) -> &[FormatElement] {
assert!(
self.as_slice()
.iter()
.filter(|element| matches!(element, FormatElement::Tag(Tag::StartBestFittingEntry)))
.count()
>= 2,
"Requires at least the least expanded and most expanded variants"
);
self.into_iter().next().unwrap()
}
}
impl Deref for BestFittingVariants {
type Target = [FormatElement];
fn deref(&self) -> &Self::Target {
self.as_slice()
}
}
pub struct BestFittingVariantsIter<'a> {
elements: &'a [FormatElement],
}
impl<'a> IntoIterator for &'a BestFittingVariants {
type Item = &'a [FormatElement];
type IntoIter = BestFittingVariantsIter<'a>;
fn into_iter(self) -> Self::IntoIter {
BestFittingVariantsIter { elements: &self.0 }
}
}
impl<'a> Iterator for BestFittingVariantsIter<'a> {
type Item = &'a [FormatElement];
fn next(&mut self) -> Option<Self::Item> {
match self.elements.first()? {
FormatElement::Tag(Tag::StartBestFittingEntry) => {
let end = self
.elements
.iter()
.position(|element| {
matches!(element, FormatElement::Tag(Tag::EndBestFittingEntry))
})
.map_or(self.elements.len(), |position| position + 1);
let (variant, rest) = self.elements.split_at(end);
self.elements = rest;
Some(variant)
}
_ => None,
}
}
fn last(mut self) -> Option<Self::Item>
where
Self: Sized,
{
self.next_back()
}
}
impl DoubleEndedIterator for BestFittingVariantsIter<'_> {
fn next_back(&mut self) -> Option<Self::Item> {
let start_position = self.elements.iter().rposition(|element| {
matches!(element, FormatElement::Tag(Tag::StartBestFittingEntry))
})?;
let (rest, variant) = self.elements.split_at(start_position);
self.elements = rest;
Some(variant)
}
}
impl FusedIterator for BestFittingVariantsIter<'_> {}
pub trait FormatElements {
/// Returns true if this [`FormatElement`] is guaranteed to break across multiple lines by the printer.
/// This is the case if this format element recursively contains a:
/// - [`crate::builders::empty_line`] or [`crate::builders::hard_line_break`]
/// - A token containing '\n'
///
/// Use this with caution, this is only a heuristic and the printer may print the element over multiple
/// lines if this element is part of a group and the group doesn't fit on a single line.
fn will_break(&self) -> bool;
/// Returns true if the element has the given label.
fn has_label(&self, label: LabelId) -> bool;
/// Returns the start tag of `kind` if:
/// - the last element is an end tag of `kind`.
/// - there's a matching start tag in this document (may not be true if this slice is an interned element and the `start` is in the document storing the interned element).
fn start_tag(&self, kind: TagKind) -> Option<&Tag>;
/// Returns the end tag if:
/// - the last element is an end tag of `kind`
fn end_tag(&self, kind: TagKind) -> Option<&Tag>;
}
/// New-type wrapper for a single-line text unicode width.
/// Mainly to prevent access to the inner value.
///
/// ## Representation
///
/// Represents the width by adding 1 to the actual width so that the width can be represented by a [`NonZeroU32`],
/// allowing [`TextWidth`] or [`Option<Width>`] fit in 4 bytes rather than 8.
///
/// This means that 2^32 cannot be precisely represented and instead has the same value as 2^32-1.
/// This imprecision shouldn't matter in practice because either text are longer than any configured line width
/// and thus, the text should break.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Width(NonZeroU32);
impl Width {
pub(crate) const fn new(width: u32) -> Self {
Width(NonZeroU32::MIN.saturating_add(width))
}
pub const fn value(self) -> u32 {
self.0.get() - 1
}
}
/// The pre-computed unicode width of a text if it is a single-line text or a marker
/// that it is a multiline text if it contains a line feed.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum TextWidth {
Width(Width),
Multiline,
}
impl TextWidth {
pub fn from_text(text: &str, indent_width: IndentWidth) -> TextWidth {
let mut width = 0u32;
for c in text.chars() {
let char_width = match c {
'\t' => indent_width.value(),
'\n' => return TextWidth::Multiline,
#[expect(clippy::cast_possible_truncation)]
c => c.width().unwrap_or(0) as u32,
};
width += char_width;
}
Self::Width(Width::new(width))
}
pub const fn width(self) -> Option<Width> {
match self {
TextWidth::Width(width) => Some(width),
TextWidth::Multiline => None,
}
}
pub(crate) const fn is_multiline(self) -> bool {
matches!(self, TextWidth::Multiline)
}
}
#[cfg(test)]
mod tests {
use crate::format_element::{LINE_TERMINATORS, normalize_newlines};
#[test]
fn test_normalize_newlines() {
assert_eq!(normalize_newlines("a\nb", LINE_TERMINATORS), "a\nb");
assert_eq!(normalize_newlines("a\n\n\nb", LINE_TERMINATORS), "a\n\n\nb");
assert_eq!(normalize_newlines("a\rb", LINE_TERMINATORS), "a\nb");
assert_eq!(normalize_newlines("a\r\nb", LINE_TERMINATORS), "a\nb");
assert_eq!(
normalize_newlines("a\r\n\r\n\r\nb", LINE_TERMINATORS),
"a\n\n\nb"
);
assert_eq!(normalize_newlines("a\u{2028}b", LINE_TERMINATORS), "a\nb");
assert_eq!(normalize_newlines("a\u{2029}b", LINE_TERMINATORS), "a\nb");
}
}
#[cfg(target_pointer_width = "64")]
mod sizes {
// Increasing the size of FormatElement has serious consequences on runtime performance and memory footprint.
// Is there a more efficient way to encode the data to avoid increasing its size? Can the information
// be recomputed at a later point in time?
// You reduced the size of a format element? Excellent work!
use super::{BestFittingVariants, Interned, TextWidth};
use static_assertions::assert_eq_size;
assert_eq_size!(ruff_text_size::TextRange, [u8; 8]);
assert_eq_size!(TextWidth, [u8; 4]);
assert_eq_size!(super::tag::VerbatimKind, [u8; 8]);
assert_eq_size!(Interned, [u8; 16]);
assert_eq_size!(BestFittingVariants, [u8; 16]);
#[cfg(not(debug_assertions))]
assert_eq_size!(crate::SourceCodeSlice, [u8; 8]);
#[cfg(not(debug_assertions))]
assert_eq_size!(super::Tag, [u8; 16]);
#[cfg(not(debug_assertions))]
assert_eq_size!(super::FormatElement, [u8; 24]);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/format_element/document.rs | crates/ruff_formatter/src/format_element/document.rs | use std::collections::HashMap;
use std::ops::Deref;
use rustc_hash::FxHashMap;
use crate::format_element::tag::{Condition, DedentMode};
use crate::prelude::tag::GroupMode;
use crate::prelude::*;
use crate::source_code::SourceCode;
use crate::{
BufferExtensions, Format, FormatContext, FormatElement, FormatOptions, FormatResult, Formatter,
IndentStyle, IndentWidth, LineWidth, PrinterOptions, format, write,
};
use super::tag::Tag;
/// A formatted document.
#[derive(Debug, Clone, Eq, PartialEq, Default)]
pub struct Document {
elements: Vec<FormatElement>,
}
impl Document {
/// Sets [`expand`](tag::Group::expand) to [`GroupMode::Propagated`] if the group contains any of:
/// - a group with [`expand`](tag::Group::expand) set to [`GroupMode::Propagated`] or [`GroupMode::Expand`].
/// - a non-soft [line break](FormatElement::Line) with mode [`LineMode::Hard`], [`LineMode::Empty`], or [`LineMode::Literal`].
/// - a [`FormatElement::ExpandParent`]
///
/// [`BestFitting`] elements act as expand boundaries, meaning that the fact that a
/// [`BestFitting`]'s content expands is not propagated past the [`BestFitting`] element.
///
/// [`BestFitting`]: FormatElement::BestFitting
pub(crate) fn propagate_expand(&mut self) {
#[derive(Debug)]
enum Enclosing<'a> {
Group(&'a tag::Group),
ConditionalGroup(&'a tag::ConditionalGroup),
FitsExpanded {
tag: &'a tag::FitsExpanded,
expands_before: bool,
},
BestFitting,
BestFitParenthesize {
expanded: bool,
},
}
fn expand_parent(enclosing: &[Enclosing]) {
match enclosing.last() {
Some(Enclosing::Group(group)) => group.propagate_expand(),
Some(Enclosing::ConditionalGroup(group)) => group.propagate_expand(),
Some(Enclosing::FitsExpanded { tag, .. }) => tag.propagate_expand(),
_ => {}
}
}
fn propagate_expands<'a>(
elements: &'a [FormatElement],
enclosing: &mut Vec<Enclosing<'a>>,
checked_interned: &mut FxHashMap<&'a Interned, bool>,
) -> bool {
let mut expands = false;
for element in elements {
let element_expands = match element {
FormatElement::Tag(Tag::StartGroup(group)) => {
enclosing.push(Enclosing::Group(group));
false
}
FormatElement::Tag(Tag::EndGroup) => match enclosing.pop() {
Some(Enclosing::Group(group)) => !group.mode().is_flat(),
_ => false,
},
FormatElement::Tag(Tag::StartBestFitParenthesize { .. }) => {
enclosing.push(Enclosing::BestFitParenthesize { expanded: expands });
expands = false;
continue;
}
FormatElement::Tag(Tag::EndBestFitParenthesize) => {
if let Some(Enclosing::BestFitParenthesize { expanded }) = enclosing.pop() {
expands = expanded;
}
continue;
}
FormatElement::Tag(Tag::StartConditionalGroup(group)) => {
enclosing.push(Enclosing::ConditionalGroup(group));
false
}
FormatElement::Tag(Tag::EndConditionalGroup) => match enclosing.pop() {
Some(Enclosing::ConditionalGroup(group)) => !group.mode().is_flat(),
_ => false,
},
FormatElement::Interned(interned) => {
if let Some(interned_expands) = checked_interned.get(interned) {
*interned_expands
} else {
let interned_expands =
propagate_expands(interned, enclosing, checked_interned);
checked_interned.insert(interned, interned_expands);
interned_expands
}
}
FormatElement::BestFitting { variants, mode: _ } => {
enclosing.push(Enclosing::BestFitting);
propagate_expands(variants, enclosing, checked_interned);
enclosing.pop();
continue;
}
FormatElement::Tag(Tag::StartFitsExpanded(fits_expanded)) => {
enclosing.push(Enclosing::FitsExpanded {
tag: fits_expanded,
expands_before: expands,
});
false
}
FormatElement::Tag(Tag::EndFitsExpanded) => {
if let Some(Enclosing::FitsExpanded { expands_before, .. }) =
enclosing.pop()
{
expands = expands_before;
}
continue;
}
FormatElement::Text {
text: _,
text_width,
} => text_width.is_multiline(),
FormatElement::SourceCodeSlice { text_width, .. } => text_width.is_multiline(),
FormatElement::ExpandParent
| FormatElement::Line(LineMode::Hard | LineMode::Empty) => true,
_ => false,
};
if element_expands {
expands = true;
expand_parent(enclosing);
}
}
expands
}
let mut enclosing = Vec::with_capacity(if self.is_empty() {
0
} else {
self.len().ilog2() as usize
});
let mut interned = FxHashMap::default();
propagate_expands(self, &mut enclosing, &mut interned);
}
pub fn display<'a>(&'a self, source_code: SourceCode<'a>) -> DisplayDocument<'a> {
DisplayDocument {
elements: self.elements.as_slice(),
source_code,
}
}
}
impl From<Vec<FormatElement>> for Document {
fn from(elements: Vec<FormatElement>) -> Self {
Self { elements }
}
}
impl Deref for Document {
type Target = [FormatElement];
fn deref(&self) -> &Self::Target {
self.elements.as_slice()
}
}
pub struct DisplayDocument<'a> {
elements: &'a [FormatElement],
source_code: SourceCode<'a>,
}
impl std::fmt::Display for DisplayDocument<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let formatted = format!(IrFormatContext::new(self.source_code), [self.elements])
.expect("Formatting not to throw any FormatErrors");
f.write_str(
formatted
.print()
.expect("Expected a valid document")
.as_code(),
)
}
}
impl std::fmt::Debug for DisplayDocument<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
#[derive(Clone, Debug)]
struct IrFormatContext<'a> {
/// The interned elements that have been printed to this point
printed_interned_elements: HashMap<Interned, usize>,
source_code: SourceCode<'a>,
}
impl<'a> IrFormatContext<'a> {
fn new(source_code: SourceCode<'a>) -> Self {
Self {
source_code,
printed_interned_elements: HashMap::new(),
}
}
}
impl FormatContext for IrFormatContext<'_> {
type Options = IrFormatOptions;
fn options(&self) -> &Self::Options {
&IrFormatOptions
}
fn source_code(&self) -> SourceCode<'_> {
self.source_code
}
}
#[derive(Debug, Clone, Default)]
struct IrFormatOptions;
impl FormatOptions for IrFormatOptions {
fn indent_style(&self) -> IndentStyle {
IndentStyle::Space
}
fn indent_width(&self) -> IndentWidth {
IndentWidth::default()
}
fn line_width(&self) -> LineWidth {
LineWidth::try_from(80).unwrap()
}
fn as_print_options(&self) -> PrinterOptions {
PrinterOptions {
line_width: self.line_width(),
indent_style: IndentStyle::Space,
..PrinterOptions::default()
}
}
}
impl Format<IrFormatContext<'_>> for &[FormatElement] {
fn fmt(&self, f: &mut Formatter<IrFormatContext>) -> FormatResult<()> {
#[allow(clippy::enum_glob_use)]
use Tag::*;
write!(f, [ContentArrayStart])?;
let mut tag_stack = Vec::new();
let mut first_element = true;
let mut in_text = false;
let mut iter = self.iter().peekable();
while let Some(element) = iter.next() {
if !first_element && !in_text && !element.is_end_tag() {
// Write a separator between every two elements
write!(f, [token(","), soft_line_break_or_space()])?;
}
first_element = false;
match element {
element @ (FormatElement::Space
| FormatElement::Token { .. }
| FormatElement::Text { .. }
| FormatElement::SourceCodeSlice { .. }) => {
fn write_escaped(element: &FormatElement, f: &mut Formatter<IrFormatContext>) {
let (text, text_width) = match element {
#[expect(clippy::cast_possible_truncation)]
FormatElement::Token { text } => {
(*text, TextWidth::Width(Width::new(text.len() as u32)))
}
FormatElement::Text { text, text_width } => {
(text.as_ref(), *text_width)
}
FormatElement::SourceCodeSlice { slice, text_width } => {
(slice.text(f.context().source_code()), *text_width)
}
_ => unreachable!(),
};
if text.contains('"') {
f.write_element(FormatElement::Text {
text: text.replace('"', r#"\""#).into(),
text_width,
});
} else {
f.write_element(element.clone());
}
}
if !in_text {
write!(f, [token("\"")])?;
}
in_text = true;
match element {
FormatElement::Space => {
write!(f, [token(" ")])?;
}
element if element.is_text() => {
write_escaped(element, f);
}
_ => unreachable!(),
}
let is_next_text = iter.peek().is_some_and(|e| e.is_text() || e.is_space());
if !is_next_text {
write!(f, [token("\"")])?;
in_text = false;
}
}
FormatElement::Line(mode) => match mode {
LineMode::SoftOrSpace => {
write!(f, [token("soft_line_break_or_space")])?;
}
LineMode::Soft => {
write!(f, [token("soft_line_break")])?;
}
LineMode::Hard => {
write!(f, [token("hard_line_break")])?;
}
LineMode::Empty => {
write!(f, [token("empty_line")])?;
}
},
FormatElement::ExpandParent => {
write!(f, [token("expand_parent")])?;
}
FormatElement::SourcePosition(position) => {
write!(f, [text(&std::format!("source_position({position:?})"))])?;
}
FormatElement::LineSuffixBoundary => {
write!(f, [token("line_suffix_boundary")])?;
}
FormatElement::BestFitting { variants, mode } => {
write!(f, [token("best_fitting(")])?;
if *mode != BestFittingMode::FirstLine {
write!(f, [text(&std::format!("mode: {mode:?}, "))])?;
}
write!(f, [token("[")])?;
f.write_elements([
FormatElement::Tag(StartIndent),
FormatElement::Line(LineMode::Hard),
]);
for variant in variants {
write!(f, [variant, hard_line_break()])?;
}
f.write_elements([
FormatElement::Tag(EndIndent),
FormatElement::Line(LineMode::Hard),
]);
write!(f, [token("])")])?;
}
FormatElement::Interned(interned) => {
let interned_elements = &mut f.context_mut().printed_interned_elements;
match interned_elements.get(interned).copied() {
None => {
let index = interned_elements.len();
interned_elements.insert(interned.clone(), index);
write!(
f,
[
text(&std::format!("<interned {index}>")),
space(),
&&**interned,
]
)?;
}
Some(reference) => {
write!(f, [text(&std::format!("<ref interned *{reference}>"))])?;
}
}
}
FormatElement::Tag(tag) => {
if tag.is_start() {
first_element = true;
tag_stack.push(tag.kind());
}
// Handle documents with mismatching start/end or superfluous end tags
else {
match tag_stack.pop() {
None => {
// Only write the end tag without any indent to ensure the output document is valid.
write!(
f,
[
token("<END_TAG_WITHOUT_START<"),
text(&std::format!("{:?}", tag.kind())),
token(">>"),
]
)?;
first_element = false;
continue;
}
Some(start_kind) if start_kind != tag.kind() => {
write!(
f,
[
ContentArrayEnd,
token(")"),
soft_line_break_or_space(),
token("ERROR<START_END_TAG_MISMATCH<start: "),
text(&std::format!("{start_kind:?}")),
token(", end: "),
text(&std::format!("{:?}", tag.kind())),
token(">>")
]
)?;
first_element = false;
continue;
}
_ => {
// all ok
}
}
}
match tag {
StartIndent => {
write!(f, [token("indent(")])?;
}
StartDedent(mode) => {
let label = match mode {
DedentMode::Level => "dedent",
DedentMode::Root => "dedentRoot",
};
write!(f, [token(label), token("(")])?;
}
StartAlign(tag::Align(count)) => {
write!(
f,
[
token("align("),
text(&count.to_string()),
token(","),
space(),
]
)?;
}
StartLineSuffix { reserved_width } => {
write!(
f,
[
token("line_suffix("),
text(&std::format!("{reserved_width:?}")),
token(","),
space(),
]
)?;
}
StartVerbatim(_) => {
write!(f, [token("verbatim(")])?;
}
StartGroup(group) => {
write!(f, [token("group(")])?;
if let Some(group_id) = group.id() {
write!(
f,
[text(&std::format!("\"{group_id:?}\"")), token(","), space(),]
)?;
}
match group.mode() {
GroupMode::Flat => {}
GroupMode::Expand => {
write!(f, [token("expand: true,"), space()])?;
}
GroupMode::Propagated => {
write!(f, [token("expand: propagated,"), space()])?;
}
}
}
StartBestFitParenthesize { id } => {
write!(f, [token("best_fit_parenthesize(")])?;
if let Some(group_id) = id {
write!(
f,
[text(&std::format!("\"{group_id:?}\"")), token(","), space(),]
)?;
}
}
StartConditionalGroup(group) => {
write!(
f,
[
token("conditional_group(condition:"),
space(),
group.condition(),
token(","),
space()
]
)?;
match group.mode() {
GroupMode::Flat => {}
GroupMode::Expand => {
write!(f, [token("expand: true,"), space()])?;
}
GroupMode::Propagated => {
write!(f, [token("expand: propagated,"), space()])?;
}
}
}
StartIndentIfGroupBreaks(id) => {
write!(
f,
[
token("indent_if_group_breaks("),
text(&std::format!("\"{id:?}\"")),
token(","),
space(),
]
)?;
}
StartConditionalContent(condition) => {
match condition.mode {
PrintMode::Flat => {
write!(f, [token("if_group_fits_on_line(")])?;
}
PrintMode::Expanded => {
write!(f, [token("if_group_breaks(")])?;
}
}
if let Some(group_id) = condition.group_id {
write!(
f,
[text(&std::format!("\"{group_id:?}\"")), token(","), space()]
)?;
}
}
StartLabelled(label_id) => {
write!(
f,
[
token("label("),
text(&std::format!("\"{label_id:?}\"")),
token(","),
space(),
]
)?;
}
StartFill => {
write!(f, [token("fill(")])?;
}
StartFitsExpanded(tag::FitsExpanded {
condition,
propagate_expand,
}) => {
write!(f, [token("fits_expanded(propagate_expand:"), space()])?;
if propagate_expand.get() {
write!(f, [token("true")])?;
} else {
write!(f, [token("false")])?;
}
write!(f, [token(","), space()])?;
if let Some(condition) = condition {
write!(
f,
[token("condition:"), space(), condition, token(","), space()]
)?;
}
}
StartEntry | StartBestFittingEntry => {
// handled after the match for all start tags
}
EndEntry | EndBestFittingEntry => write!(f, [ContentArrayEnd])?,
EndFill
| EndLabelled
| EndConditionalContent
| EndIndentIfGroupBreaks
| EndAlign
| EndIndent
| EndGroup
| EndConditionalGroup
| EndBestFitParenthesize
| EndLineSuffix
| EndDedent
| EndFitsExpanded
| EndVerbatim => {
write!(f, [ContentArrayEnd, token(")")])?;
}
}
if tag.is_start() {
write!(f, [ContentArrayStart])?;
}
}
}
}
while let Some(top) = tag_stack.pop() {
write!(
f,
[
ContentArrayEnd,
token(")"),
soft_line_break_or_space(),
text(&std::format!("<START_WITHOUT_END<{top:?}>>")),
]
)?;
}
write!(f, [ContentArrayEnd])
}
}
struct ContentArrayStart;
impl Format<IrFormatContext<'_>> for ContentArrayStart {
fn fmt(&self, f: &mut Formatter<IrFormatContext>) -> FormatResult<()> {
use Tag::{StartGroup, StartIndent};
write!(f, [token("[")])?;
f.write_elements([
FormatElement::Tag(StartGroup(tag::Group::new())),
FormatElement::Tag(StartIndent),
FormatElement::Line(LineMode::Soft),
]);
Ok(())
}
}
struct ContentArrayEnd;
impl Format<IrFormatContext<'_>> for ContentArrayEnd {
fn fmt(&self, f: &mut Formatter<IrFormatContext>) -> FormatResult<()> {
use Tag::{EndGroup, EndIndent};
f.write_elements([
FormatElement::Tag(EndIndent),
FormatElement::Line(LineMode::Soft),
FormatElement::Tag(EndGroup),
]);
write!(f, [token("]")])
}
}
impl FormatElements for [FormatElement] {
fn will_break(&self) -> bool {
let mut ignore_depth = 0usize;
for element in self {
match element {
// Line suffix
// Ignore if any of its content breaks
FormatElement::Tag(
Tag::StartLineSuffix { reserved_width: _ } | Tag::StartFitsExpanded(_),
) => {
ignore_depth += 1;
}
FormatElement::Tag(Tag::EndLineSuffix | Tag::EndFitsExpanded) => {
ignore_depth = ignore_depth.saturating_sub(1);
}
FormatElement::Interned(interned) if ignore_depth == 0 => {
if interned.will_break() {
return true;
}
}
element if ignore_depth == 0 && element.will_break() => {
return true;
}
_ => continue,
}
}
debug_assert_eq!(ignore_depth, 0, "Unclosed start container");
false
}
fn has_label(&self, expected: LabelId) -> bool {
self.first()
.is_some_and(|element| element.has_label(expected))
}
fn start_tag(&self, kind: TagKind) -> Option<&Tag> {
fn traverse_slice<'a>(
slice: &'a [FormatElement],
kind: TagKind,
depth: &mut usize,
) -> Option<&'a Tag> {
for element in slice.iter().rev() {
match element {
FormatElement::Tag(tag) if tag.kind() == kind => {
if tag.is_start() {
if *depth == 0 {
// Invalid document
return None;
} else if *depth == 1 {
return Some(tag);
}
*depth -= 1;
} else {
*depth += 1;
}
}
FormatElement::Interned(interned) => {
match traverse_slice(interned, kind, depth) {
Some(start) => {
return Some(start);
}
// Reached end or invalid document
None if *depth == 0 => {
return None;
}
_ => {
// continue with other elements
}
}
}
_ => {}
}
}
None
}
// Assert that the document ends at a tag with the specified kind;
let _ = self.end_tag(kind)?;
let mut depth = 0usize;
traverse_slice(self, kind, &mut depth)
}
fn end_tag(&self, kind: TagKind) -> Option<&Tag> {
self.last().and_then(|element| element.end_tag(kind))
}
}
impl Format<IrFormatContext<'_>> for Condition {
fn fmt(&self, f: &mut Formatter<IrFormatContext>) -> FormatResult<()> {
match (self.mode, self.group_id) {
(PrintMode::Flat, None) => write!(f, [token("if_fits_on_line")]),
(PrintMode::Flat, Some(id)) => write!(
f,
[
token("if_group_fits_on_line("),
text(&std::format!("\"{id:?}\"")),
token(")")
]
),
(PrintMode::Expanded, None) => write!(f, [token("if_breaks")]),
(PrintMode::Expanded, Some(id)) => write!(
f,
[
token("if_group_breaks("),
text(&std::format!("\"{id:?}\"")),
token(")")
]
),
}
}
}
#[cfg(test)]
mod tests {
use ruff_text_size::{TextRange, TextSize};
use crate::prelude::*;
use crate::{SimpleFormatContext, SourceCode};
use crate::{format, format_args, write};
#[test]
fn display_elements() {
let formatted = format!(
SimpleFormatContext::default(),
[format_with(|f| {
write!(
f,
[group(&format_args![
token("("),
soft_block_indent(&format_args![
token("Some longer content"),
space(),
token("That should ultimately break"),
])
])]
)
})]
)
.unwrap();
let document = formatted.into_document();
assert_eq!(
&std::format!("{}", document.display(SourceCode::default())),
r#"[
group([
"(",
indent([
soft_line_break,
"Some longer content That should ultimately break"
]),
soft_line_break
])
]"#
);
}
#[test]
fn escapes_quotes() {
let formatted = format!(
SimpleFormatContext::default(),
[token(r#""""Python docstring""""#)]
)
.unwrap();
let document = formatted.into_document();
assert_eq!(
&std::format!("{}", document.display(SourceCode::default())),
r#"["\"\"\"Python docstring\"\"\""]"#
);
}
#[test]
fn display_elements_with_source_text_slice() {
let source_code = "Some longer content\nThat should ultimately break";
let formatted = format!(
SimpleFormatContext::default().with_source_code(source_code),
[format_with(|f| {
write!(
f,
[group(&format_args![
token("("),
soft_block_indent(&format_args![
source_text_slice(TextRange::at(TextSize::new(0), TextSize::new(19))),
space(),
source_text_slice(TextRange::at(TextSize::new(20), TextSize::new(28))),
])
])]
)
})]
)
.unwrap();
let document = formatted.into_document();
assert_eq!(
&std::format!("{}", document.display(SourceCode::new(source_code))),
r#"[
group([
"(",
indent([
soft_line_break,
"Some longer content That should ultimately break"
]),
soft_line_break
])
]"#
);
}
#[test]
fn display_invalid_document() {
use Tag::*;
let document = Document::from(vec![
FormatElement::Token { text: "[" },
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/format_element/tag.rs | crates/ruff_formatter/src/format_element/tag.rs | use crate::format_element::PrintMode;
use crate::{GroupId, TextSize};
use std::cell::Cell;
use std::num::NonZeroU8;
/// A Tag marking the start and end of some content to which some special formatting should be applied.
///
/// Tags always come in pairs of a start and an end tag and the styling defined by this tag
/// will be applied to all elements in between the start/end tags.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum Tag {
/// Indents the content one level deeper, see [`crate::builders::indent`] for documentation and examples.
StartIndent,
EndIndent,
/// Variant of [`TagKind::Indent`] that indents content by a number of spaces. For example, `Align(2)`
/// indents any content following a line break by an additional two spaces.
///
/// Nesting (Aligns)[`TagKind::Align`] has the effect that all except the most inner align are handled as (Indent)[`TagKind::Indent`].
StartAlign(Align),
EndAlign,
/// Reduces the indentation of the specified content either by one level or to the root, depending on the mode.
/// Reverse operation of `Indent` and can be used to *undo* an `Align` for nested content.
StartDedent(DedentMode),
EndDedent,
/// Creates a logical group where its content is either consistently printed:
/// - on a single line: Omitting `LineMode::Soft` line breaks and printing spaces for `LineMode::SoftOrSpace`
/// - on multiple lines: Printing all line breaks
///
/// See [`crate::builders::group`] for documentation and examples.
StartGroup(Group),
EndGroup,
/// Creates a logical group similar to [`Tag::StartGroup`] but only if the condition is met.
/// This is an optimized representation for (assuming the content should only be grouped if another group fits):
///
/// ```text
/// if_group_breaks(content, other_group_id),
/// if_group_fits_on_line(group(&content), other_group_id)
/// ```
StartConditionalGroup(ConditionalGroup),
EndConditionalGroup,
/// Allows to specify content that gets printed depending on whatever the enclosing group
/// is printed on a single line or multiple lines. See [`crate::builders::if_group_breaks`] for examples.
StartConditionalContent(Condition),
EndConditionalContent,
/// Optimized version of [`Tag::StartConditionalContent`] for the case where some content
/// should be indented if the specified group breaks.
StartIndentIfGroupBreaks(GroupId),
EndIndentIfGroupBreaks,
/// Concatenates multiple elements together with a given separator printed in either
/// flat or expanded mode to fill the print width. Expect that the content is a list of alternating
/// [element, separator] See [`crate::Formatter::fill`].
StartFill,
EndFill,
/// Entry inside of a [`Tag::StartFill`]
StartEntry,
EndEntry,
/// Delay the printing of its content until the next line break. Using reserved width will include
/// the associated line suffix during measurement.
StartLineSuffix {
reserved_width: u32,
},
EndLineSuffix,
/// A token that tracks tokens/nodes that are printed as verbatim.
StartVerbatim(VerbatimKind),
EndVerbatim,
/// Special semantic element marking the content with a label.
/// This does not directly influence how the content will be printed.
///
/// See [`crate::builders::labelled`] for documentation.
StartLabelled(LabelId),
EndLabelled,
StartFitsExpanded(FitsExpanded),
EndFitsExpanded,
/// Marks the start and end of a best-fitting variant.
StartBestFittingEntry,
EndBestFittingEntry,
/// Parenthesizes the content but only if adding the parentheses and indenting the content
/// makes the content fit in the configured line width.
///
/// See [`crate::builders::best_fit_parenthesize`] for an in-depth explanation.
StartBestFitParenthesize {
id: Option<GroupId>,
},
EndBestFitParenthesize,
}
impl Tag {
/// Returns `true` if `self` is any start tag.
pub const fn is_start(&self) -> bool {
matches!(
self,
Tag::StartIndent
| Tag::StartAlign(_)
| Tag::StartDedent(_)
| Tag::StartGroup(_)
| Tag::StartConditionalGroup(_)
| Tag::StartConditionalContent(_)
| Tag::StartIndentIfGroupBreaks(_)
| Tag::StartFill
| Tag::StartEntry
| Tag::StartLineSuffix { .. }
| Tag::StartVerbatim(_)
| Tag::StartLabelled(_)
| Tag::StartFitsExpanded(_)
| Tag::StartBestFittingEntry
| Tag::StartBestFitParenthesize { .. }
)
}
/// Returns `true` if `self` is any end tag.
pub const fn is_end(&self) -> bool {
!self.is_start()
}
pub const fn kind(&self) -> TagKind {
#[allow(clippy::enum_glob_use)]
use Tag::*;
match self {
StartIndent | EndIndent => TagKind::Indent,
StartAlign(_) | EndAlign => TagKind::Align,
StartDedent(_) | EndDedent => TagKind::Dedent,
StartGroup(_) | EndGroup => TagKind::Group,
StartConditionalGroup(_) | EndConditionalGroup => TagKind::ConditionalGroup,
StartConditionalContent(_) | EndConditionalContent => TagKind::ConditionalContent,
StartIndentIfGroupBreaks(_) | EndIndentIfGroupBreaks => TagKind::IndentIfGroupBreaks,
StartFill | EndFill => TagKind::Fill,
StartEntry | EndEntry => TagKind::Entry,
StartLineSuffix { reserved_width: _ } | EndLineSuffix => TagKind::LineSuffix,
StartVerbatim(_) | EndVerbatim => TagKind::Verbatim,
StartLabelled(_) | EndLabelled => TagKind::Labelled,
StartFitsExpanded { .. } | EndFitsExpanded => TagKind::FitsExpanded,
StartBestFittingEntry | EndBestFittingEntry => TagKind::BestFittingEntry,
StartBestFitParenthesize { .. } | EndBestFitParenthesize => {
TagKind::BestFitParenthesize
}
}
}
}
/// The kind of a [Tag].
///
/// Each start end tag pair has its own [tag kind](TagKind).
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum TagKind {
Indent,
Align,
Dedent,
Group,
ConditionalGroup,
ConditionalContent,
IndentIfGroupBreaks,
Fill,
Entry,
LineSuffix,
Verbatim,
Labelled,
FitsExpanded,
BestFittingEntry,
BestFitParenthesize,
}
#[derive(Debug, Copy, Default, Clone, Eq, PartialEq)]
pub enum GroupMode {
/// Print group in flat mode.
#[default]
Flat,
/// The group should be printed in expanded mode
Expand,
/// Expand mode has been propagated from an enclosing group to this group.
Propagated,
}
impl GroupMode {
pub const fn is_flat(&self) -> bool {
matches!(self, GroupMode::Flat)
}
}
#[derive(Debug, Clone, Eq, PartialEq, Default)]
pub struct FitsExpanded {
pub(crate) condition: Option<Condition>,
pub(crate) propagate_expand: Cell<bool>,
}
impl FitsExpanded {
pub fn new() -> Self {
Self::default()
}
#[must_use]
pub fn with_condition(mut self, condition: Option<Condition>) -> Self {
self.condition = condition;
self
}
pub fn propagate_expand(&self) {
self.propagate_expand.set(true);
}
}
#[derive(Debug, Clone, Eq, PartialEq, Default)]
pub struct Group {
id: Option<GroupId>,
mode: Cell<GroupMode>,
}
impl Group {
pub fn new() -> Self {
Self {
id: None,
mode: Cell::new(GroupMode::Flat),
}
}
#[must_use]
pub fn with_id(mut self, id: Option<GroupId>) -> Self {
self.id = id;
self
}
#[must_use]
pub fn with_mode(mut self, mode: GroupMode) -> Self {
self.mode = Cell::new(mode);
self
}
pub fn mode(&self) -> GroupMode {
self.mode.get()
}
pub fn propagate_expand(&self) {
if self.mode.get() == GroupMode::Flat {
self.mode.set(GroupMode::Propagated);
}
}
pub fn id(&self) -> Option<GroupId> {
self.id
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct ConditionalGroup {
mode: Cell<GroupMode>,
condition: Condition,
}
impl ConditionalGroup {
pub fn new(condition: Condition) -> Self {
Self {
mode: Cell::new(GroupMode::Flat),
condition,
}
}
pub fn condition(&self) -> Condition {
self.condition
}
pub fn propagate_expand(&self) {
self.mode.set(GroupMode::Propagated);
}
pub fn mode(&self) -> GroupMode {
self.mode.get()
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum DedentMode {
/// Reduces the indent by a level (if the current indent is > 0)
Level,
/// Reduces the indent to the root
Root,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct Condition {
/// - `Flat` -> Omitted if the enclosing group is a multiline group, printed for groups fitting on a single line
/// - `Expanded` -> Omitted if the enclosing group fits on a single line, printed if the group breaks over multiple lines.
pub(crate) mode: PrintMode,
/// The id of the group for which it should check if it breaks or not. The group must appear in the document
/// before the conditional group (but doesn't have to be in the ancestor chain).
pub(crate) group_id: Option<GroupId>,
}
impl Condition {
pub(crate) fn new(mode: PrintMode) -> Self {
Self {
mode,
group_id: None,
}
}
pub fn if_fits_on_line() -> Self {
Self {
mode: PrintMode::Flat,
group_id: None,
}
}
pub fn if_group_fits_on_line(group_id: GroupId) -> Self {
Self {
mode: PrintMode::Flat,
group_id: Some(group_id),
}
}
pub fn if_breaks() -> Self {
Self {
mode: PrintMode::Expanded,
group_id: None,
}
}
pub fn if_group_breaks(group_id: GroupId) -> Self {
Self {
mode: PrintMode::Expanded,
group_id: Some(group_id),
}
}
#[must_use]
pub fn with_group_id(mut self, id: Option<GroupId>) -> Self {
self.group_id = id;
self
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Align(pub(crate) NonZeroU8);
impl Align {
pub fn count(&self) -> NonZeroU8 {
self.0
}
}
#[derive(Debug, Eq, Copy, Clone)]
pub struct LabelId {
value: u64,
#[cfg(debug_assertions)]
name: &'static str,
}
impl PartialEq for LabelId {
fn eq(&self, other: &Self) -> bool {
let is_equal = self.value == other.value;
#[cfg(debug_assertions)]
{
if is_equal {
assert_eq!(
self.name, other.name,
"Two `LabelId`s with different names have the same `value`. Are you mixing labels of two different `LabelDefinition` or are the values returned by the `LabelDefinition` not unique?"
);
}
}
is_equal
}
}
impl LabelId {
#[expect(clippy::needless_pass_by_value)]
pub fn of<T: LabelDefinition>(label: T) -> Self {
Self {
value: label.value(),
#[cfg(debug_assertions)]
name: label.name(),
}
}
}
/// Defines the valid labels of a language. You want to have at most one implementation per formatter
/// project.
pub trait LabelDefinition {
/// Returns the `u64` uniquely identifying this specific label.
fn value(&self) -> u64;
/// Returns the name of the label that is shown in debug builds.
fn name(&self) -> &'static str;
}
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub enum VerbatimKind {
Bogus,
Suppressed,
Verbatim {
/// the length of the formatted node
length: TextSize,
},
}
impl VerbatimKind {
pub const fn is_bogus(&self) -> bool {
matches!(self, VerbatimKind::Bogus)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/printer/line_suffixes.rs | crates/ruff_formatter/src/printer/line_suffixes.rs | use crate::FormatElement;
use crate::printer::call_stack::PrintElementArgs;
/// Stores the queued line suffixes.
#[derive(Debug, Default)]
pub(super) struct LineSuffixes<'a> {
suffixes: Vec<LineSuffixEntry<'a>>,
}
impl<'a> LineSuffixes<'a> {
/// Extends the line suffixes with `elements`, storing their call stack arguments with them.
pub(super) fn extend<I>(&mut self, args: PrintElementArgs, elements: I)
where
I: IntoIterator<Item = &'a FormatElement>,
{
self.suffixes
.extend(elements.into_iter().map(LineSuffixEntry::Suffix));
self.suffixes.push(LineSuffixEntry::Args(args));
}
/// Takes all the pending line suffixes.
pub(super) fn take_pending<'l>(
&'l mut self,
) -> impl DoubleEndedIterator<Item = LineSuffixEntry<'a>> + 'l + ExactSizeIterator {
self.suffixes.drain(..)
}
/// Returns `true` if there are any line suffixes and `false` otherwise.
pub(super) fn has_pending(&self) -> bool {
!self.suffixes.is_empty()
}
}
#[derive(Debug, Copy, Clone)]
pub(super) enum LineSuffixEntry<'a> {
/// A line suffix to print
Suffix(&'a FormatElement),
/// Potentially changed call arguments that should be used to format any following items.
Args(PrintElementArgs),
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/printer/call_stack.rs | crates/ruff_formatter/src/printer/call_stack.rs | use crate::format_element::PrintMode;
use crate::format_element::tag::TagKind;
use crate::printer::stack::{Stack, StackedStack};
use crate::printer::{Indentation, MeasureMode};
use crate::{IndentStyle, InvalidDocumentError, PrintError, PrintResult};
use std::fmt::Debug;
use std::num::NonZeroU8;
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub(super) enum StackFrameKind {
Root,
Tag(TagKind),
}
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub(super) struct StackFrame {
kind: StackFrameKind,
args: PrintElementArgs,
}
/// Stores arguments passed to `print_element` call, holding the state specific to printing an element.
/// E.g. the `indent` depends on the token the Printer's currently processing. That's why
/// it must be stored outside of the [`PrinterState`] that stores the state common to all elements.
///
/// The state is passed by value, which is why it's important that it isn't storing any heavy
/// data structures. Such structures should be stored on the [`PrinterState`] instead.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub(super) struct PrintElementArgs {
indent: Indentation,
mode: PrintMode,
measure_mode: MeasureMode,
}
impl PrintElementArgs {
pub(crate) fn new(indent: Indentation) -> Self {
Self {
indent,
..Self::default()
}
}
pub(super) fn mode(self) -> PrintMode {
self.mode
}
pub(super) fn measure_mode(self) -> MeasureMode {
self.measure_mode
}
pub(super) fn indentation(self) -> Indentation {
self.indent
}
pub(crate) fn increment_indent_level(mut self, indent_style: IndentStyle) -> Self {
self.indent = self.indent.increment_level(indent_style);
self
}
pub(crate) fn decrement_indent(mut self) -> Self {
self.indent = self.indent.decrement();
self
}
pub(crate) fn reset_indent(mut self) -> Self {
self.indent = Indentation::default();
self
}
pub(crate) fn set_indent_align(mut self, count: NonZeroU8) -> Self {
self.indent = self.indent.set_align(count);
self
}
pub(crate) fn with_print_mode(mut self, mode: PrintMode) -> Self {
self.mode = mode;
self
}
pub(crate) fn with_measure_mode(mut self, mode: MeasureMode) -> Self {
self.measure_mode = mode;
self
}
}
impl Default for PrintElementArgs {
fn default() -> Self {
Self {
indent: Indentation::Level(0),
mode: PrintMode::Expanded,
measure_mode: MeasureMode::FirstLine,
}
}
}
/// Call stack that stores the [`PrintElementCallArgs`].
///
/// New [`PrintElementCallArgs`] are pushed onto the stack for every [`start`](Tag::is_start) [`Tag`](FormatElement::Tag)
/// and popped when reaching the corresponding [`end`](Tag::is_end) [`Tag`](FormatElement::Tag).
pub(super) trait CallStack {
type Stack: Stack<StackFrame> + Debug;
fn stack(&self) -> &Self::Stack;
fn stack_mut(&mut self) -> &mut Self::Stack;
/// Pops the call arguments at the top and asserts that they correspond to a start tag of `kind`.
///
/// Returns `Ok` with the arguments if the kind of the top stack frame matches `kind`, otherwise
/// returns `Err`.
fn pop(&mut self, kind: TagKind) -> PrintResult<PrintElementArgs> {
let last = self.stack_mut().pop();
match last {
Some(StackFrame {
kind: StackFrameKind::Tag(actual_kind),
args,
}) if actual_kind == kind => Ok(args),
// Start / End kind don't match
Some(StackFrame {
kind: StackFrameKind::Tag(expected_kind),
..
}) => Err(PrintError::InvalidDocument(Self::invalid_document_error(
kind,
Some(expected_kind),
))),
// Tried to pop the outer most stack frame, which is not valid
Some(
frame @ StackFrame {
kind: StackFrameKind::Root,
..
},
) => {
// Put it back in to guarantee that the stack is never empty
self.stack_mut().push(frame);
Err(PrintError::InvalidDocument(Self::invalid_document_error(
kind, None,
)))
}
// This should be unreachable but having it for completeness. Happens if the stack is empty.
None => Err(PrintError::InvalidDocument(Self::invalid_document_error(
kind, None,
))),
}
}
#[cold]
fn invalid_document_error(
end_kind: TagKind,
start_kind: Option<TagKind>,
) -> InvalidDocumentError {
match start_kind {
None => InvalidDocumentError::StartTagMissing { kind: end_kind },
Some(start_kind) => InvalidDocumentError::StartEndTagMismatch {
start_kind,
end_kind,
},
}
}
/// Returns the [`PrintElementArgs`] for the current stack frame.
fn top(&self) -> PrintElementArgs {
self.stack()
.top()
.expect("Expected `stack` to never be empty.")
.args
}
/// Returns the [`TagKind`] of the current stack frame or [None] if this is the root stack frame.
fn top_kind(&self) -> Option<TagKind> {
match self
.stack()
.top()
.expect("Expected `stack` to never be empty.")
.kind
{
StackFrameKind::Root => None,
StackFrameKind::Tag(kind) => Some(kind),
}
}
/// Creates a new stack frame for a [`FormatElement::Tag`] of `kind` with `args` as the call arguments.
fn push(&mut self, kind: TagKind, args: PrintElementArgs) {
self.stack_mut().push(StackFrame {
kind: StackFrameKind::Tag(kind),
args,
});
}
}
/// Call stack used for printing the [`FormatElement`]s
#[derive(Debug, Clone)]
pub(super) struct PrintCallStack(Vec<StackFrame>);
impl PrintCallStack {
pub(super) fn new(args: PrintElementArgs) -> Self {
Self(vec![StackFrame {
kind: StackFrameKind::Root,
args,
}])
}
}
impl CallStack for PrintCallStack {
type Stack = Vec<StackFrame>;
fn stack(&self) -> &Self::Stack {
&self.0
}
fn stack_mut(&mut self) -> &mut Self::Stack {
&mut self.0
}
}
/// Call stack used for measuring if some content fits on the line.
///
/// The stack is a view on top of the [`PrintCallStack`] because the stack frames are still necessary for printing.
#[must_use]
pub(super) struct FitsCallStack<'print> {
stack: StackedStack<'print, StackFrame>,
}
impl<'print> FitsCallStack<'print> {
pub(super) fn new(print: &'print PrintCallStack, saved: Vec<StackFrame>) -> Self {
let stack = StackedStack::with_vec(&print.0, saved);
Self { stack }
}
pub(super) fn finish(self) -> Vec<StackFrame> {
self.stack.into_vec()
}
}
impl<'a> CallStack for FitsCallStack<'a> {
type Stack = StackedStack<'a, StackFrame>;
fn stack(&self) -> &Self::Stack {
&self.stack
}
fn stack_mut(&mut self) -> &mut Self::Stack {
&mut self.stack
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/printer/mod.rs | crates/ruff_formatter/src/printer/mod.rs | use std::num::NonZeroU8;
use drop_bomb::DebugDropBomb;
use unicode_width::UnicodeWidthChar;
pub use printer_options::*;
use ruff_text_size::{TextLen, TextSize};
use crate::format_element::document::Document;
use crate::format_element::tag::{Condition, GroupMode};
use crate::format_element::{BestFittingMode, BestFittingVariants, LineMode, PrintMode};
use crate::prelude::tag::{DedentMode, Tag, TagKind, VerbatimKind};
use crate::prelude::{TextWidth, tag};
use crate::printer::call_stack::{
CallStack, FitsCallStack, PrintCallStack, PrintElementArgs, StackFrame,
};
use crate::printer::line_suffixes::{LineSuffixEntry, LineSuffixes};
use crate::printer::queue::{
AllPredicate, FitsEndPredicate, FitsQueue, PrintQueue, Queue, SingleEntryPredicate,
};
use crate::source_code::SourceCode;
use crate::{
ActualStart, FormatElement, GroupId, IndentStyle, InvalidDocumentError, PrintError,
PrintResult, Printed, SourceMarker, TextRange,
};
mod call_stack;
mod line_suffixes;
mod printer_options;
mod queue;
mod stack;
/// Prints the format elements into a string
#[derive(Debug, Default)]
pub struct Printer<'a> {
options: PrinterOptions,
source_code: SourceCode<'a>,
state: PrinterState<'a>,
}
impl<'a> Printer<'a> {
pub fn new(source_code: SourceCode<'a>, options: PrinterOptions) -> Self {
Self {
source_code,
options,
state: PrinterState::with_capacity(source_code.as_str().len()),
}
}
/// Prints the passed in element as well as all its content
pub fn print(self, document: &'a Document) -> PrintResult<Printed> {
self.print_with_indent(document, 0)
}
/// Prints the passed in element as well as all its content,
/// starting at the specified indentation level
#[tracing::instrument(level = "debug", name = "Printer::print", skip_all)]
pub fn print_with_indent(
mut self,
document: &'a Document,
indent: u16,
) -> PrintResult<Printed> {
let indentation = Indentation::Level(indent);
self.state.pending_indent = indentation;
let mut stack = PrintCallStack::new(PrintElementArgs::new(indentation));
let mut queue: PrintQueue<'a> = PrintQueue::new(document.as_ref());
loop {
if let Some(element) = queue.pop() {
self.print_element(&mut stack, &mut queue, element)?;
} else {
if !self.flush_line_suffixes(&mut queue, &mut stack, None) {
break;
}
}
}
// Push any pending marker
self.push_marker();
Ok(Printed::new(
self.state.buffer,
None,
self.state.source_markers,
self.state.verbatim_markers,
))
}
/// Prints a single element and push the following elements to queue
fn print_element(
&mut self,
stack: &mut PrintCallStack,
queue: &mut PrintQueue<'a>,
element: &'a FormatElement,
) -> PrintResult<()> {
#[allow(clippy::enum_glob_use)]
use Tag::*;
let args = stack.top();
match element {
FormatElement::Space => self.print_text(Text::Token(" ")),
FormatElement::Token { text } => self.print_text(Text::Token(text)),
FormatElement::Text { text, text_width } => self.print_text(Text::Text {
text,
text_width: *text_width,
}),
FormatElement::SourceCodeSlice { slice, text_width } => {
let text = slice.text(self.source_code);
self.print_text(Text::Text {
text,
text_width: *text_width,
});
}
FormatElement::Line(line_mode) => {
if args.mode().is_flat()
&& matches!(line_mode, LineMode::Soft | LineMode::SoftOrSpace)
{
if line_mode == &LineMode::SoftOrSpace {
self.print_text(Text::Token(" "));
}
} else if self.state.line_suffixes.has_pending() {
self.flush_line_suffixes(queue, stack, Some(element));
} else {
// Only print a newline if the current line isn't already empty
if !self.state.buffer[self.state.line_start..].is_empty() {
self.push_marker();
self.print_char('\n');
}
// Print a second line break if this is an empty line
if line_mode == &LineMode::Empty {
self.push_marker();
self.print_char('\n');
}
self.state.pending_indent = args.indentation();
}
}
FormatElement::ExpandParent => {
// Handled in `Document::propagate_expands()
}
FormatElement::SourcePosition(position) => {
// The printer defers printing indents until the next text
// is printed. Pushing the marker now would mean that the
// mapped range includes the indent range, which we don't want.
// Queue the source map position and emit it when printing the next character
self.state.pending_source_position = Some(*position);
}
FormatElement::LineSuffixBoundary => {
const HARD_BREAK: &FormatElement = &FormatElement::Line(LineMode::Hard);
self.flush_line_suffixes(queue, stack, Some(HARD_BREAK));
}
FormatElement::BestFitting { variants, mode } => {
self.print_best_fitting(variants, *mode, queue, stack)?;
}
FormatElement::Interned(content) => {
queue.extend_back(content);
}
FormatElement::Tag(StartGroup(group)) => {
let print_mode = match group.mode() {
GroupMode::Expand | GroupMode::Propagated => PrintMode::Expanded,
GroupMode::Flat => {
self.flat_group_print_mode(TagKind::Group, group.id(), args, queue, stack)?
}
};
if let Some(id) = group.id() {
self.state.group_modes.insert_print_mode(id, print_mode);
}
stack.push(TagKind::Group, args.with_print_mode(print_mode));
}
FormatElement::Tag(StartBestFitParenthesize { id }) => {
const OPEN_PAREN: FormatElement = FormatElement::Token { text: "(" };
const INDENT: FormatElement = FormatElement::Tag(Tag::StartIndent);
const HARD_LINE_BREAK: FormatElement = FormatElement::Line(LineMode::Hard);
let fits_flat = self.flat_group_print_mode(
TagKind::BestFitParenthesize,
*id,
args,
queue,
stack,
)? == PrintMode::Flat;
let print_mode = if fits_flat {
PrintMode::Flat
} else {
// Test if the content fits in expanded mode. If not, prefer avoiding the parentheses
// over parenthesizing the expression.
if let Some(id) = id {
self.state
.group_modes
.insert_print_mode(*id, PrintMode::Expanded);
}
stack.push(
TagKind::BestFitParenthesize,
args.with_measure_mode(MeasureMode::AllLines),
);
queue.extend_back(&[OPEN_PAREN, INDENT, HARD_LINE_BREAK]);
let fits_expanded = self.fits(queue, stack)?;
queue.pop_slice();
stack.pop(TagKind::BestFitParenthesize)?;
if fits_expanded {
PrintMode::Expanded
} else {
PrintMode::Flat
}
};
if let Some(id) = id {
self.state.group_modes.insert_print_mode(*id, print_mode);
}
if print_mode.is_expanded() {
// Parenthesize the content. The `EndIndent` is handled inside of the `EndBestFitParenthesize`
queue.extend_back(&[OPEN_PAREN, INDENT, HARD_LINE_BREAK]);
}
stack.push(
TagKind::BestFitParenthesize,
args.with_print_mode(print_mode),
);
}
FormatElement::Tag(EndBestFitParenthesize) => {
if args.mode().is_expanded() {
const HARD_LINE_BREAK: FormatElement = FormatElement::Line(LineMode::Hard);
const CLOSE_PAREN: FormatElement = FormatElement::Token { text: ")" };
// Finish the indent and print the hardline break and closing parentheses.
stack.pop(TagKind::Indent)?;
queue.extend_back(&[HARD_LINE_BREAK, CLOSE_PAREN]);
}
stack.pop(TagKind::BestFitParenthesize)?;
}
FormatElement::Tag(StartConditionalGroup(group)) => {
let condition = group.condition();
let expected_mode = match condition.group_id {
None => args.mode(),
Some(id) => self.state.group_modes.get_print_mode(id)?,
};
if expected_mode == condition.mode {
let print_mode = match group.mode() {
GroupMode::Expand | GroupMode::Propagated => PrintMode::Expanded,
GroupMode::Flat => self.flat_group_print_mode(
TagKind::ConditionalGroup,
None,
args,
queue,
stack,
)?,
};
stack.push(TagKind::ConditionalGroup, args.with_print_mode(print_mode));
} else {
// Condition isn't met, render as normal content
stack.push(TagKind::ConditionalGroup, args);
}
}
FormatElement::Tag(StartFill) => {
self.print_fill_entries(queue, stack)?;
}
FormatElement::Tag(StartIndent) => {
stack.push(
TagKind::Indent,
args.increment_indent_level(self.options.indent_style()),
);
}
FormatElement::Tag(StartDedent(mode)) => {
let args = match mode {
DedentMode::Level => args.decrement_indent(),
DedentMode::Root => args.reset_indent(),
};
stack.push(TagKind::Dedent, args);
}
FormatElement::Tag(StartAlign(align)) => {
stack.push(TagKind::Align, args.set_indent_align(align.count()));
}
FormatElement::Tag(StartConditionalContent(Condition { mode, group_id })) => {
let group_mode = match group_id {
None => args.mode(),
Some(id) => self.state.group_modes.get_print_mode(*id)?,
};
if *mode == group_mode {
stack.push(TagKind::ConditionalContent, args);
} else {
queue.skip_content(TagKind::ConditionalContent);
}
}
FormatElement::Tag(StartIndentIfGroupBreaks(group_id)) => {
let group_mode = self.state.group_modes.get_print_mode(*group_id)?;
let args = match group_mode {
PrintMode::Flat => args,
PrintMode::Expanded => args.increment_indent_level(self.options.indent_style),
};
stack.push(TagKind::IndentIfGroupBreaks, args);
}
FormatElement::Tag(StartLineSuffix { reserved_width }) => {
self.state.line_width += reserved_width;
self.state
.line_suffixes
.extend(args, queue.iter_content(TagKind::LineSuffix));
}
FormatElement::Tag(StartVerbatim(kind)) => {
if let VerbatimKind::Verbatim { length } = kind {
// SAFETY: Ruff only supports formatting files <= 4GB
#[expect(clippy::cast_possible_truncation)]
self.state.verbatim_markers.push(TextRange::at(
TextSize::from(self.state.buffer.len() as u32),
*length,
));
}
stack.push(TagKind::Verbatim, args);
}
FormatElement::Tag(StartFitsExpanded(tag::FitsExpanded { condition, .. })) => {
let condition_met = match condition {
Some(condition) => {
let group_mode = match condition.group_id {
Some(group_id) => self.state.group_modes.get_print_mode(group_id)?,
None => args.mode(),
};
condition.mode == group_mode
}
None => true,
};
if condition_met {
// We measured the inner groups all in expanded. It now is necessary to measure if the inner groups fit as well.
self.state.measured_group_fits = false;
}
stack.push(TagKind::FitsExpanded, args);
}
FormatElement::Tag(tag @ (StartLabelled(_) | StartEntry | StartBestFittingEntry)) => {
stack.push(tag.kind(), args);
}
FormatElement::Tag(
tag @ (EndLabelled
| EndEntry
| EndGroup
| EndConditionalGroup
| EndIndent
| EndDedent
| EndAlign
| EndConditionalContent
| EndIndentIfGroupBreaks
| EndFitsExpanded
| EndVerbatim
| EndLineSuffix
| EndBestFittingEntry
| EndFill),
) => {
stack.pop(tag.kind())?;
}
}
Ok(())
}
fn fits(&mut self, queue: &PrintQueue<'a>, stack: &PrintCallStack) -> PrintResult<bool> {
let mut measure = FitsMeasurer::new(queue, stack, self);
let result = measure.fits(&mut AllPredicate);
measure.finish();
result
}
fn flat_group_print_mode(
&mut self,
kind: TagKind,
id: Option<GroupId>,
args: PrintElementArgs,
queue: &PrintQueue<'a>,
stack: &mut PrintCallStack,
) -> PrintResult<PrintMode> {
let print_mode = match args.mode() {
PrintMode::Flat if self.state.measured_group_fits => {
// A parent group has already verified that this group fits on a single line
// Thus, just continue in flat mode
PrintMode::Flat
}
// The printer is either in expanded mode or it's necessary to re-measure if the group fits
// because the printer printed a line break
_ => {
self.state.measured_group_fits = true;
if let Some(id) = id {
self.state
.group_modes
.insert_print_mode(id, PrintMode::Flat);
}
// Measure to see if the group fits up on a single line. If that's the case,
// print the group in "flat" mode, otherwise continue in expanded mode
stack.push(kind, args.with_print_mode(PrintMode::Flat));
let fits = self.fits(queue, stack)?;
stack.pop(kind)?;
if fits {
PrintMode::Flat
} else {
PrintMode::Expanded
}
}
};
Ok(print_mode)
}
fn print_text(&mut self, text: Text) {
if !self.state.pending_indent.is_empty() {
let (indent_char, repeat_count) = match self.options.indent_style() {
IndentStyle::Tab => ('\t', 1),
IndentStyle::Space => (' ', self.options.indent_width()),
};
let indent = std::mem::take(&mut self.state.pending_indent);
let total_indent_char_count = indent.level() as usize * repeat_count as usize;
self.state
.buffer
.reserve(total_indent_char_count + indent.align() as usize);
for _ in 0..total_indent_char_count {
self.print_char(indent_char);
}
for _ in 0..indent.align() {
self.print_char(' ');
}
}
self.push_marker();
match text {
#[expect(clippy::cast_possible_truncation)]
Text::Token(token) => {
self.state.buffer.push_str(token);
self.state.line_width += token.len() as u32;
}
Text::Text {
text,
text_width: width,
} => {
if let Some(width) = width.width() {
self.state.buffer.push_str(text);
self.state.line_width += width.value();
} else {
for char in text.chars() {
self.print_char(char);
}
}
}
}
}
fn push_marker(&mut self) {
let Some(source_position) = self.state.pending_source_position.take() else {
return;
};
let marker = SourceMarker {
source: source_position,
dest: self.state.buffer.text_len(),
};
if self.state.source_markers.last() != Some(&marker) {
self.state.source_markers.push(marker);
}
}
fn flush_line_suffixes(
&mut self,
queue: &mut PrintQueue<'a>,
stack: &mut PrintCallStack,
line_break: Option<&'a FormatElement>,
) -> bool {
let suffixes = self.state.line_suffixes.take_pending();
if suffixes.len() > 0 {
// Print this line break element again once all the line suffixes have been flushed
if let Some(line_break) = line_break {
queue.push(line_break);
}
for entry in suffixes.rev() {
match entry {
LineSuffixEntry::Suffix(suffix) => {
queue.push(suffix);
}
LineSuffixEntry::Args(args) => {
const LINE_SUFFIX_END: &FormatElement =
&FormatElement::Tag(Tag::EndLineSuffix);
stack.push(TagKind::LineSuffix, args);
queue.push(LINE_SUFFIX_END);
}
}
}
true
} else {
false
}
}
fn print_best_fitting(
&mut self,
variants: &'a BestFittingVariants,
mode: BestFittingMode,
queue: &mut PrintQueue<'a>,
stack: &mut PrintCallStack,
) -> PrintResult<()> {
let args = stack.top();
if args.mode().is_flat() && self.state.measured_group_fits {
queue.extend_back(variants.most_flat());
self.print_entry(queue, stack, args, TagKind::BestFittingEntry)
} else {
self.state.measured_group_fits = true;
let mut variants_iter = variants.into_iter();
let mut current = variants_iter.next().unwrap();
for next in variants_iter {
// Test if this variant fits and if so, use it. Otherwise try the next
// variant.
// Try to fit only the first variant on a single line
if !matches!(
current.first(),
Some(&FormatElement::Tag(Tag::StartBestFittingEntry))
) {
return invalid_start_tag(TagKind::BestFittingEntry, current.first());
}
// Skip the first element because we want to override the args for the entry and the
// args must be popped from the stack as soon as it sees the matching end entry.
let content = ¤t[1..];
let entry_args = args
.with_print_mode(PrintMode::Flat)
.with_measure_mode(MeasureMode::from(mode));
queue.extend_back(content);
stack.push(TagKind::BestFittingEntry, entry_args);
let variant_fits = self.fits(queue, stack)?;
stack.pop(TagKind::BestFittingEntry)?;
// Remove the content slice because printing needs the variant WITH the start entry
let popped_slice = queue.pop_slice();
debug_assert_eq!(popped_slice, Some(content));
if variant_fits {
queue.extend_back(current);
return self.print_entry(
queue,
stack,
args.with_print_mode(PrintMode::Flat),
TagKind::BestFittingEntry,
);
}
current = next;
}
// At this stage current is the most expanded.
// No variant fits, take the last (most expanded) as fallback
queue.extend_back(current);
self.print_entry(
queue,
stack,
args.with_print_mode(PrintMode::Expanded),
TagKind::BestFittingEntry,
)
}
}
/// Tries to fit as much content as possible on a single line.
///
/// `Fill` is a sequence of *item*, *separator*, *item*, *separator*, *item*, ... entries.
/// The goal is to fit as many items (with their separators) on a single line as possible and
/// first expand the *separator* if the content exceeds the print width and only fallback to expanding
/// the *item*s if the *item* or the *item* and the expanded *separator* don't fit on the line.
///
/// The implementation handles the following 5 cases:
///
/// - The *item*, *separator*, and the *next item* fit on the same line.
/// Print the *item* and *separator* in flat mode.
/// - The *item* and *separator* fit on the line but there's not enough space for the *next item*.
/// Print the *item* in flat mode and the *separator* in expanded mode.
/// - The *item* fits on the line but the *separator* does not in flat mode.
/// Print the *item* in flat mode and the *separator* in expanded mode.
/// - The *item* fits on the line but the *separator* does not in flat **NOR** expanded mode.
/// Print the *item* and *separator* in expanded mode.
/// - The *item* does not fit on the line.
/// Print the *item* and *separator* in expanded mode.
fn print_fill_entries(
&mut self,
queue: &mut PrintQueue<'a>,
stack: &mut PrintCallStack,
) -> PrintResult<()> {
let args = stack.top();
// It's already known that the content fit, print all items in flat mode.
if self.state.measured_group_fits && args.mode().is_flat() {
stack.push(TagKind::Fill, args.with_print_mode(PrintMode::Flat));
return Ok(());
}
stack.push(TagKind::Fill, args);
while matches!(queue.top(), Some(FormatElement::Tag(Tag::StartEntry))) {
let mut measurer = FitsMeasurer::new_flat(queue, stack, self);
// The number of item/separator pairs that fit on the same line.
let mut flat_pairs = 0usize;
let mut item_fits = measurer.fill_item_fits()?;
let last_pair_layout = if item_fits {
// Measure the remaining pairs until the first item or separator that does not fit (or the end of the fill element).
// Optimisation to avoid re-measuring the next-item twice:
// * Once when measuring if the *item*, *separator*, *next-item* fit
// * A second time when measuring if *next-item*, *separator*, *next-next-item* fit.
loop {
// Item that fits without a following separator.
if !matches!(
measurer.queue.top(),
Some(FormatElement::Tag(Tag::StartEntry))
) {
break FillPairLayout::Flat;
}
let separator_fits = measurer.fill_separator_fits(PrintMode::Flat)?;
// Item fits but the flat separator does not.
if !separator_fits {
break FillPairLayout::ItemMaybeFlat;
}
// Last item/separator pair that both fit
if !matches!(
measurer.queue.top(),
Some(FormatElement::Tag(Tag::StartEntry))
) {
break FillPairLayout::Flat;
}
item_fits = measurer.fill_item_fits()?;
if item_fits {
flat_pairs += 1;
} else {
// Item and separator both fit, but the next element doesn't.
// Print the separator in expanded mode and then re-measure if the item now
// fits in the next iteration of the outer loop.
break FillPairLayout::ItemFlatSeparatorExpanded;
}
}
} else {
// Neither item nor separator fit, print both in expanded mode.
FillPairLayout::Expanded
};
measurer.finish();
self.state.measured_group_fits = true;
// Print all pairs that fit in flat mode.
for _ in 0..flat_pairs {
self.print_fill_item(queue, stack, args.with_print_mode(PrintMode::Flat))?;
self.print_fill_separator(queue, stack, args.with_print_mode(PrintMode::Flat))?;
}
let item_mode = match last_pair_layout {
FillPairLayout::Flat | FillPairLayout::ItemFlatSeparatorExpanded => PrintMode::Flat,
FillPairLayout::Expanded => PrintMode::Expanded,
FillPairLayout::ItemMaybeFlat => {
let mut measurer = FitsMeasurer::new_flat(queue, stack, self);
// SAFETY: That the item fits is guaranteed by `ItemMaybeFlat`.
// Re-measuring is required to get the measurer in the correct state for measuring the separator.
assert!(measurer.fill_item_fits()?);
let separator_fits = measurer.fill_separator_fits(PrintMode::Expanded)?;
measurer.finish();
if separator_fits {
PrintMode::Flat
} else {
PrintMode::Expanded
}
}
};
self.print_fill_item(queue, stack, args.with_print_mode(item_mode))?;
if matches!(queue.top(), Some(FormatElement::Tag(Tag::StartEntry))) {
let separator_mode = match last_pair_layout {
FillPairLayout::Flat => PrintMode::Flat,
FillPairLayout::ItemFlatSeparatorExpanded
| FillPairLayout::Expanded
| FillPairLayout::ItemMaybeFlat => PrintMode::Expanded,
};
// Push a new stack frame with print mode `Flat` for the case where the separator gets printed in expanded mode
// but does contain a group to ensure that the group will measure "fits" with the "flat" versions of the next item/separator.
stack.push(TagKind::Fill, args.with_print_mode(PrintMode::Flat));
self.print_fill_separator(queue, stack, args.with_print_mode(separator_mode))?;
stack.pop(TagKind::Fill)?;
}
}
if queue.top() == Some(&FormatElement::Tag(Tag::EndFill)) {
Ok(())
} else {
invalid_end_tag(TagKind::Fill, stack.top_kind())
}
}
/// Semantic alias for [`Self::print_entry`] for fill items.
fn print_fill_item(
&mut self,
queue: &mut PrintQueue<'a>,
stack: &mut PrintCallStack,
args: PrintElementArgs,
) -> PrintResult<()> {
self.print_entry(queue, stack, args, TagKind::Entry)
}
/// Semantic alias for [`Self::print_entry`] for fill separators.
fn print_fill_separator(
&mut self,
queue: &mut PrintQueue<'a>,
stack: &mut PrintCallStack,
args: PrintElementArgs,
) -> PrintResult<()> {
self.print_entry(queue, stack, args, TagKind::Entry)
}
/// Fully print an element (print the element itself and all its descendants)
///
/// Unlike [`print_element`], this function ensures the entire element has
/// been printed when it returns and the queue is back to its original state
fn print_entry(
&mut self,
queue: &mut PrintQueue<'a>,
stack: &mut PrintCallStack,
args: PrintElementArgs,
kind: TagKind,
) -> PrintResult<()> {
let start_entry = queue.top();
if queue
.pop()
.is_some_and(|start| start.tag_kind() == Some(kind))
{
stack.push(kind, args);
} else {
return invalid_start_tag(kind, start_entry);
}
let mut depth = 1u32;
while let Some(element) = queue.pop() {
match element {
FormatElement::Tag(Tag::StartEntry | Tag::StartBestFittingEntry) => {
depth += 1;
}
FormatElement::Tag(end_tag @ (Tag::EndEntry | Tag::EndBestFittingEntry)) => {
depth -= 1;
// Reached the end entry, pop the entry from the stack and return.
if depth == 0 {
stack.pop(end_tag.kind())?;
return Ok(());
}
}
_ => {
// Fall through
}
}
self.print_element(stack, queue, element)?;
}
invalid_end_tag(kind, stack.top_kind())
}
fn print_char(&mut self, char: char) {
if char == '\n' {
self.state
.buffer
.push_str(self.options.line_ending.as_str());
self.state.line_width = 0;
self.state.line_start = self.state.buffer.len();
// Fit's only tests if groups up to the first line break fit.
// The next group must re-measure if it still fits.
self.state.measured_group_fits = false;
} else {
self.state.buffer.push(char);
#[expect(clippy::cast_possible_truncation)]
let char_width = if char == '\t' {
self.options.indent_width.value()
} else {
// SAFETY: A u32 is sufficient to represent the width of a file <= 4GB
char.width().unwrap_or(0) as u32
};
self.state.line_width += char_width;
}
}
}
#[derive(Copy, Clone, Debug)]
enum FillPairLayout {
/// The item, separator, and next item fit. Print the first item and the separator in flat mode.
Flat,
/// The item and separator fit but the next element does not. Print the item in flat mode and
/// the separator in expanded mode.
ItemFlatSeparatorExpanded,
/// The item does not fit. Print the item and any potential separator in expanded mode.
Expanded,
/// The item fits but the separator does not in flat mode. If the separator fits in expanded mode then
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/printer/stack.rs | crates/ruff_formatter/src/printer/stack.rs | /// A school book stack. Allows adding, removing, and inspecting elements at the back.
pub(super) trait Stack<T> {
/// Removes the last element if any and returns it
fn pop(&mut self) -> Option<T>;
/// Pushes a new element at the back
fn push(&mut self, value: T);
/// Returns the last element if any
fn top(&self) -> Option<&T>;
}
impl<T> Stack<T> for Vec<T> {
fn pop(&mut self) -> Option<T> {
self.pop()
}
fn push(&mut self, value: T) {
self.push(value);
}
fn top(&self) -> Option<&T> {
self.last()
}
}
/// A Stack that is stacked on top of another stack. Guarantees that the underlying stack remains unchanged.
#[derive(Debug, Clone)]
pub(super) struct StackedStack<'a, T> {
/// The content of the original stack.
original: std::slice::Iter<'a, T>,
/// Items that have been pushed since the creation of this stack and aren't part of the `original` stack.
stack: Vec<T>,
}
impl<'a, T> StackedStack<'a, T> {
#[cfg(test)]
pub(super) fn new(original: &'a [T]) -> Self {
Self::with_vec(original, Vec::new())
}
/// Creates a new stack that uses `stack` for storing its elements.
pub(super) fn with_vec(original: &'a [T], stack: Vec<T>) -> Self {
Self {
original: original.iter(),
stack,
}
}
/// Returns the underlying `stack` vector.
pub(super) fn into_vec(self) -> Vec<T> {
self.stack
}
}
impl<T> Stack<T> for StackedStack<'_, T>
where
T: Copy,
{
fn pop(&mut self) -> Option<T> {
self.stack
.pop()
.or_else(|| self.original.next_back().copied())
}
fn push(&mut self, value: T) {
self.stack.push(value);
}
fn top(&self) -> Option<&T> {
self.stack
.last()
.or_else(|| self.original.as_slice().last())
}
}
#[cfg(test)]
mod tests {
use crate::printer::stack::{Stack, StackedStack};
#[test]
fn restore_consumed_stack() {
let original = vec![1, 2, 3];
let mut restorable = StackedStack::new(&original);
restorable.push(4);
assert_eq!(restorable.pop(), Some(4));
assert_eq!(restorable.pop(), Some(3));
assert_eq!(restorable.pop(), Some(2));
assert_eq!(restorable.pop(), Some(1));
assert_eq!(restorable.pop(), None);
assert_eq!(original, vec![1, 2, 3]);
}
#[test]
fn restore_partially_consumed_stack() {
let original = vec![1, 2, 3];
let mut restorable = StackedStack::new(&original);
restorable.push(4);
assert_eq!(restorable.pop(), Some(4));
assert_eq!(restorable.pop(), Some(3));
assert_eq!(restorable.pop(), Some(2));
restorable.push(5);
restorable.push(6);
restorable.push(7);
assert_eq!(original, vec![1, 2, 3]);
}
#[test]
fn restore_stack() {
let original = vec![1, 2, 3];
let mut restorable = StackedStack::new(&original);
restorable.push(4);
restorable.push(5);
restorable.push(6);
restorable.push(7);
assert_eq!(restorable.pop(), Some(7));
assert_eq!(restorable.pop(), Some(6));
assert_eq!(restorable.pop(), Some(5));
assert_eq!(original, vec![1, 2, 3]);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/printer/queue.rs | crates/ruff_formatter/src/printer/queue.rs | use crate::format_element::tag::TagKind;
use crate::prelude::Tag;
use crate::printer::{invalid_end_tag, invalid_start_tag};
use crate::{FormatElement, PrintResult};
use std::fmt::Debug;
use std::iter::FusedIterator;
use std::marker::PhantomData;
/// Queue of [`FormatElement`]s.
pub(super) trait Queue<'a> {
/// Pops the element at the end of the queue.
fn pop(&mut self) -> Option<&'a FormatElement>;
/// Returns the next element, not traversing into [`FormatElement::Interned`].
fn top_with_interned(&self) -> Option<&'a FormatElement>;
/// Returns the next element, recursively resolving the first element of [`FormatElement::Interned`].
fn top(&self) -> Option<&'a FormatElement> {
let mut top = self.top_with_interned();
while let Some(FormatElement::Interned(interned)) = top {
top = interned.first();
}
top
}
/// Queues a single element to process before the other elements in this queue.
fn push(&mut self, element: &'a FormatElement) {
self.extend_back(std::slice::from_ref(element));
}
/// Queues a slice of elements to process before the other elements in this queue.
fn extend_back(&mut self, elements: &'a [FormatElement]);
/// Removes top slice.
fn pop_slice(&mut self) -> Option<&'a [FormatElement]>;
/// Skips all content until it finds the corresponding end tag with the given kind.
fn skip_content(&mut self, kind: TagKind)
where
Self: Sized,
{
let iter = self.iter_content(kind);
for _ in iter {
// consume whole iterator until end
}
}
/// Iterates over all elements until it finds the matching end tag of the specified kind.
fn iter_content<'q>(&'q mut self, kind: TagKind) -> QueueContentIterator<'a, 'q, Self>
where
Self: Sized,
{
QueueContentIterator::new(self, kind)
}
}
/// Queue with the elements to print.
#[derive(Debug, Default, Clone)]
pub(super) struct PrintQueue<'a> {
element_slices: Vec<std::slice::Iter<'a, FormatElement>>,
}
impl<'a> PrintQueue<'a> {
pub(super) fn new(slice: &'a [FormatElement]) -> Self {
Self {
element_slices: if slice.is_empty() {
Vec::new()
} else {
vec![slice.iter()]
},
}
}
}
impl<'a> Queue<'a> for PrintQueue<'a> {
fn pop(&mut self) -> Option<&'a FormatElement> {
let elements = self.element_slices.last_mut()?;
elements.next().or_else(
#[cold]
|| {
self.element_slices.pop();
let elements = self.element_slices.last_mut()?;
elements.next()
},
)
}
fn top_with_interned(&self) -> Option<&'a FormatElement> {
let mut slices = self.element_slices.iter().rev();
let slice = slices.next()?;
slice.as_slice().first().or_else(
#[cold]
|| {
slices
.next()
.and_then(|next_elements| next_elements.as_slice().first())
},
)
}
fn extend_back(&mut self, elements: &'a [FormatElement]) {
if !elements.is_empty() {
self.element_slices.push(elements.iter());
}
}
/// Removes top slice.
fn pop_slice(&mut self) -> Option<&'a [FormatElement]> {
self.element_slices
.pop()
.map(|elements| elements.as_slice())
}
}
/// Queue for measuring if an element fits on the line.
///
/// The queue is a view on top of the [`PrintQueue`] because no elements should be removed
/// from the [`PrintQueue`] while measuring.
#[must_use]
#[derive(Debug)]
pub(super) struct FitsQueue<'a, 'print> {
queue: PrintQueue<'a>,
rest_elements: std::slice::Iter<'print, std::slice::Iter<'a, FormatElement>>,
}
impl<'a, 'print> FitsQueue<'a, 'print> {
pub(super) fn new(
rest_queue: &'print PrintQueue<'a>,
queue_vec: Vec<std::slice::Iter<'a, FormatElement>>,
) -> Self {
Self {
queue: PrintQueue {
element_slices: queue_vec,
},
rest_elements: rest_queue.element_slices.iter(),
}
}
pub(super) fn finish(self) -> Vec<std::slice::Iter<'a, FormatElement>> {
self.queue.element_slices
}
}
impl<'a> Queue<'a> for FitsQueue<'a, '_> {
fn pop(&mut self) -> Option<&'a FormatElement> {
self.queue.pop().or_else(
#[cold]
|| {
if let Some(next_slice) = self.rest_elements.next_back() {
self.queue.extend_back(next_slice.as_slice());
self.queue.pop()
} else {
None
}
},
)
}
fn top_with_interned(&self) -> Option<&'a FormatElement> {
self.queue.top_with_interned().or_else(
#[cold]
|| {
if let Some(next_elements) = self.rest_elements.as_slice().last() {
next_elements.as_slice().first()
} else {
None
}
},
)
}
fn extend_back(&mut self, elements: &'a [FormatElement]) {
if !elements.is_empty() {
self.queue.extend_back(elements);
}
}
/// Removes top slice.
fn pop_slice(&mut self) -> Option<&'a [FormatElement]> {
self.queue.pop_slice().or_else(|| {
self.rest_elements
.next_back()
.map(std::slice::Iter::as_slice)
})
}
}
pub(super) struct QueueContentIterator<'a, 'q, Q: Queue<'a>> {
queue: &'q mut Q,
kind: TagKind,
depth: usize,
lifetime: PhantomData<&'a ()>,
}
impl<'a, 'q, Q> QueueContentIterator<'a, 'q, Q>
where
Q: Queue<'a>,
{
fn new(queue: &'q mut Q, kind: TagKind) -> Self {
Self {
queue,
kind,
depth: 1,
lifetime: PhantomData,
}
}
}
impl<'a, Q> Iterator for QueueContentIterator<'a, '_, Q>
where
Q: Queue<'a>,
{
type Item = &'a FormatElement;
fn next(&mut self) -> Option<Self::Item> {
if self.depth == 0 {
None
} else {
let mut top = self.queue.pop();
while let Some(FormatElement::Interned(interned)) = top {
self.queue.extend_back(interned);
top = self.queue.pop();
}
match top.expect("Missing end signal.") {
element @ FormatElement::Tag(tag) if tag.kind() == self.kind => {
if tag.is_start() {
self.depth += 1;
} else {
self.depth -= 1;
if self.depth == 0 {
return None;
}
}
Some(element)
}
element => Some(element),
}
}
}
}
impl<'a, Q> FusedIterator for QueueContentIterator<'a, '_, Q> where Q: Queue<'a> {}
/// A predicate determining when to end measuring if some content fits on the line.
///
/// Called for every [`element`](FormatElement) in the [`FitsQueue`] when measuring if a content
/// fits on the line. The measuring of the content ends after the first element [`element`](FormatElement) for which this
/// predicate returns `true` (similar to a take while iterator except that it takes while the predicate returns `false`).
pub(super) trait FitsEndPredicate {
fn is_end(&mut self, element: &FormatElement) -> PrintResult<bool>;
}
/// Filter that includes all elements until it reaches the end of the document.
pub(super) struct AllPredicate;
impl FitsEndPredicate for AllPredicate {
fn is_end(&mut self, _element: &FormatElement) -> PrintResult<bool> {
Ok(false)
}
}
/// Filter that takes all elements between two matching [`Tag::StartEntry`] and [`Tag::EndEntry`] tags.
#[derive(Debug)]
pub(super) enum SingleEntryPredicate {
Entry { depth: usize },
Done,
}
impl SingleEntryPredicate {
pub(super) const fn is_done(&self) -> bool {
matches!(self, SingleEntryPredicate::Done)
}
}
impl Default for SingleEntryPredicate {
fn default() -> Self {
SingleEntryPredicate::Entry { depth: 0 }
}
}
impl FitsEndPredicate for SingleEntryPredicate {
fn is_end(&mut self, element: &FormatElement) -> PrintResult<bool> {
let result = match self {
SingleEntryPredicate::Done => true,
SingleEntryPredicate::Entry { depth } => match element {
FormatElement::Tag(Tag::StartEntry) => {
*depth += 1;
false
}
FormatElement::Tag(Tag::EndEntry) => {
if *depth == 0 {
return invalid_end_tag(TagKind::Entry, None);
}
*depth -= 1;
let is_end = *depth == 0;
if is_end {
*self = SingleEntryPredicate::Done;
}
is_end
}
FormatElement::Interned(_) => false,
element if *depth == 0 => {
return invalid_start_tag(TagKind::Entry, Some(element));
}
_ => false,
},
};
Ok(result)
}
}
#[cfg(test)]
mod tests {
use crate::FormatElement;
use crate::format_element::LineMode;
use crate::prelude::Tag;
use crate::printer::queue::{PrintQueue, Queue};
#[test]
fn extend_back_pop_last() {
let mut queue =
PrintQueue::new(&[FormatElement::Tag(Tag::StartEntry), FormatElement::Space]);
assert_eq!(queue.pop(), Some(&FormatElement::Tag(Tag::StartEntry)));
queue.extend_back(&[FormatElement::Line(LineMode::SoftOrSpace)]);
assert_eq!(
queue.pop(),
Some(&FormatElement::Line(LineMode::SoftOrSpace))
);
assert_eq!(queue.pop(), Some(&FormatElement::Space));
assert_eq!(queue.pop(), None);
}
#[test]
fn extend_back_empty_queue() {
let mut queue =
PrintQueue::new(&[FormatElement::Tag(Tag::StartEntry), FormatElement::Space]);
assert_eq!(queue.pop(), Some(&FormatElement::Tag(Tag::StartEntry)));
assert_eq!(queue.pop(), Some(&FormatElement::Space));
queue.extend_back(&[FormatElement::Line(LineMode::SoftOrSpace)]);
assert_eq!(
queue.pop(),
Some(&FormatElement::Line(LineMode::SoftOrSpace))
);
assert_eq!(queue.pop(), None);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_formatter/src/printer/printer_options/mod.rs | crates/ruff_formatter/src/printer/printer_options/mod.rs | use crate::{FormatOptions, IndentStyle, IndentWidth, LineWidth};
/// Options that affect how the [`crate::Printer`] prints the format tokens
#[derive(Clone, Debug, Eq, PartialEq, Default)]
pub struct PrinterOptions {
/// Width of a single tab character (does it equal 2, 4, ... spaces?)
pub indent_width: IndentWidth,
/// Whether the printer should use tabs or spaces to indent code.
pub indent_style: IndentStyle,
/// What's the max width of a line. Defaults to 80
pub line_width: LineWidth,
/// The type of line ending to apply to the printed input
pub line_ending: LineEnding,
}
impl<'a, O> From<&'a O> for PrinterOptions
where
O: FormatOptions,
{
fn from(options: &'a O) -> Self {
PrinterOptions::default()
.with_indent(options.indent_style())
.with_line_width(options.line_width())
}
}
impl PrinterOptions {
#[must_use]
pub fn with_line_width(mut self, width: LineWidth) -> Self {
self.line_width = width;
self
}
#[must_use]
pub fn with_indent(mut self, style: IndentStyle) -> Self {
self.indent_style = style;
self
}
#[must_use]
pub fn with_tab_width(mut self, width: IndentWidth) -> Self {
self.indent_width = width;
self
}
pub(crate) fn indent_style(&self) -> IndentStyle {
self.indent_style
}
/// Width of an indent in characters.
pub(super) const fn indent_width(&self) -> u32 {
self.indent_width.value()
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct PrintWidth(u16);
impl PrintWidth {
pub fn new(width: u16) -> Self {
Self(width)
}
}
impl Default for PrintWidth {
fn default() -> Self {
LineWidth::default().into()
}
}
impl From<LineWidth> for PrintWidth {
fn from(width: LineWidth) -> Self {
Self(u16::from(width))
}
}
impl From<PrintWidth> for u32 {
fn from(width: PrintWidth) -> Self {
u32::from(width.0)
}
}
impl From<PrintWidth> for u16 {
fn from(width: PrintWidth) -> Self {
width.0
}
}
/// Configures whether the formatter and printer generate a source map that allows mapping
/// positions in the source document to positions in the formatted code.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum SourceMapGeneration {
/// The formatter generates no source map.
#[default]
Disabled,
/// The formatter generates a source map that allows mapping positions in the source document
/// to positions in the formatted document. The ability to map positions is useful for range formatting
/// or when trying to identify where to move the cursor so that it matches its position in the source document.
Enabled,
}
impl SourceMapGeneration {
pub const fn is_enabled(self) -> bool {
matches!(self, SourceMapGeneration::Enabled)
}
pub const fn is_disabled(self) -> bool {
matches!(self, SourceMapGeneration::Disabled)
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum LineEnding {
/// Line Feed only (\n), common on Linux and macOS as well as inside git repos
#[default]
LineFeed,
/// Carriage Return + Line Feed characters (\r\n), common on Windows
CarriageReturnLineFeed,
/// Carriage Return character only (\r), used very rarely
CarriageReturn,
}
impl LineEnding {
#[inline]
pub const fn as_str(&self) -> &'static str {
match self {
LineEnding::LineFeed => "\n",
LineEnding::CarriageReturnLineFeed => "\r\n",
LineEnding::CarriageReturn => "\r",
}
}
/// Returns the string used to configure this line ending.
///
/// See [`LineEnding::as_str`] for the actual string representation of the line ending.
#[inline]
pub const fn as_setting_str(&self) -> &'static str {
match self {
LineEnding::LineFeed => "lf",
LineEnding::CarriageReturnLineFeed => "crlf",
LineEnding::CarriageReturn => "cr",
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_memory_usage/src/lib.rs | crates/ruff_memory_usage/src/lib.rs | use std::cell::RefCell;
use get_size2::{GetSize, StandardTracker};
use ordermap::{OrderMap, OrderSet};
thread_local! {
pub static TRACKER: RefCell<Option<StandardTracker>>= const { RefCell::new(None) };
}
struct TrackerGuard(Option<StandardTracker>);
impl Drop for TrackerGuard {
fn drop(&mut self) {
TRACKER.set(self.0.take());
}
}
pub fn attach_tracker<R>(tracker: StandardTracker, f: impl FnOnce() -> R) -> R {
let prev = TRACKER.replace(Some(tracker));
let _guard = TrackerGuard(prev);
f()
}
fn with_tracker<F, R>(f: F) -> R
where
F: FnOnce(Option<&mut StandardTracker>) -> R,
{
TRACKER.with(|tracker| {
let mut tracker = tracker.borrow_mut();
f(tracker.as_mut())
})
}
/// Returns the memory usage of the provided object, using a global tracker to avoid
/// double-counting shared objects.
pub fn heap_size<T: GetSize>(value: &T) -> usize {
with_tracker(|tracker| {
if let Some(tracker) = tracker {
value.get_heap_size_with_tracker(tracker).0
} else {
value.get_heap_size()
}
})
}
/// An implementation of [`GetSize::get_heap_size`] for [`OrderSet`].
pub fn order_set_heap_size<T: GetSize, S>(set: &OrderSet<T, S>) -> usize {
(set.capacity() * T::get_stack_size()) + set.iter().map(heap_size).sum::<usize>()
}
/// An implementation of [`GetSize::get_heap_size`] for [`OrderMap`].
pub fn order_map_heap_size<K: GetSize, V: GetSize, S>(map: &OrderMap<K, V, S>) -> usize {
(map.capacity() * (K::get_stack_size() + V::get_stack_size()))
+ (map.iter())
.map(|(k, v)| heap_size(k) + heap_size(v))
.sum::<usize>()
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast_integration_tests/src/lib.rs | crates/ruff_python_ast_integration_tests/src/lib.rs | rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false | |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast_integration_tests/tests/visitor.rs | crates/ruff_python_ast_integration_tests/tests/visitor.rs | use std::fmt::{Debug, Write};
use insta::assert_snapshot;
use ruff_python_ast::visitor::{
Visitor, walk_alias, walk_bytes_literal, walk_comprehension, walk_except_handler, walk_expr,
walk_f_string, walk_interpolated_string_element, walk_keyword, walk_match_case, walk_parameter,
walk_parameters, walk_pattern, walk_stmt, walk_string_literal, walk_t_string, walk_type_param,
walk_with_item,
};
use ruff_python_ast::{
self as ast, Alias, AnyNodeRef, BoolOp, BytesLiteral, CmpOp, Comprehension, ExceptHandler,
Expr, FString, InterpolatedStringElement, Keyword, MatchCase, Operator, Parameter, Parameters,
Pattern, Stmt, StringLiteral, TString, TypeParam, UnaryOp, WithItem,
};
use ruff_python_parser::{Mode, ParseOptions, parse};
#[test]
fn function_arguments() {
let source = r"def a(b, c,/, d, e = 20, *args, named=5, other=20, **kwargs): pass";
let trace = trace_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn function_positional_only_with_default() {
let source = r"def a(b, c = 34,/, e = 20, *args): pass";
let trace = trace_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn compare() {
let source = r"4 < x < 5";
let trace = trace_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn list_comprehension() {
let source = "[x for x in numbers]";
let trace = trace_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn dict_comprehension() {
let source = "{x: x**2 for x in numbers}";
let trace = trace_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn set_comprehension() {
let source = "{x for x in numbers}";
let trace = trace_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn match_class_pattern() {
let source = r"
match x:
case Point2D(0, 0):
...
case Point3D(x=0, y=0, z=0):
...
";
let trace = trace_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn decorators() {
let source = r"
@decorator
def a():
pass
@test
class A:
pass
";
let trace = trace_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn type_aliases() {
let source = r"type X[T: str, U, *Ts, **P] = list[T]";
let trace = trace_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn class_type_parameters() {
let source = r"class X[T: str, U, *Ts, **P]: ...";
let trace = trace_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn function_type_parameters() {
let source = r"def X[T: str, U, *Ts, **P](): ...";
let trace = trace_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn string_literals() {
let source = r"'a' 'b' 'c'";
let trace = trace_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn bytes_literals() {
let source = r"b'a' b'b' b'c'";
let trace = trace_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn f_strings() {
let source = r"'pre' f'foo {bar:.{x}f} baz'";
let trace = trace_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn t_strings() {
let source = r"t'pre' t'foo {bar:.{x}f} baz'";
let trace = trace_visitation(source);
assert_snapshot!(trace);
}
fn trace_visitation(source: &str) -> String {
let parsed = parse(source, ParseOptions::from(Mode::Module)).unwrap();
let mut visitor = RecordVisitor::default();
walk_module(&mut visitor, parsed.syntax());
visitor.output
}
fn walk_module<'a, V>(visitor: &mut V, module: &'a ast::Mod)
where
V: Visitor<'a> + ?Sized,
{
match module {
ast::Mod::Module(ast::ModModule {
body,
range: _,
node_index: _,
}) => {
visitor.visit_body(body);
}
ast::Mod::Expression(ast::ModExpression {
body,
range: _,
node_index: _,
}) => visitor.visit_expr(body),
}
}
/// Emits a `tree` with a node for every visited AST node (labelled by the AST node's kind)
/// and leaves for attributes.
#[derive(Default)]
struct RecordVisitor {
depth: usize,
output: String,
}
impl RecordVisitor {
fn enter_node<'a, T>(&mut self, node: T)
where
T: Into<AnyNodeRef<'a>>,
{
self.emit(&node.into().kind());
self.depth += 1;
}
fn exit_node(&mut self) {
self.depth -= 1;
}
fn emit(&mut self, text: &dyn Debug) {
for _ in 0..self.depth {
self.output.push_str(" ");
}
writeln!(self.output, "- {text:?}").unwrap();
}
}
impl Visitor<'_> for RecordVisitor {
fn visit_stmt(&mut self, stmt: &Stmt) {
self.enter_node(stmt);
walk_stmt(self, stmt);
self.exit_node();
}
fn visit_annotation(&mut self, expr: &Expr) {
self.enter_node(expr);
walk_expr(self, expr);
self.exit_node();
}
fn visit_expr(&mut self, expr: &Expr) {
self.enter_node(expr);
walk_expr(self, expr);
self.exit_node();
}
fn visit_bool_op(&mut self, bool_op: &BoolOp) {
self.emit(&bool_op);
}
fn visit_operator(&mut self, operator: &Operator) {
self.emit(&operator);
}
fn visit_unary_op(&mut self, unary_op: &UnaryOp) {
self.emit(&unary_op);
}
fn visit_cmp_op(&mut self, cmp_op: &CmpOp) {
self.emit(&cmp_op);
}
fn visit_comprehension(&mut self, comprehension: &Comprehension) {
self.enter_node(comprehension);
walk_comprehension(self, comprehension);
self.exit_node();
}
fn visit_except_handler(&mut self, except_handler: &ExceptHandler) {
self.enter_node(except_handler);
walk_except_handler(self, except_handler);
self.exit_node();
}
fn visit_parameters(&mut self, parameters: &Parameters) {
self.enter_node(parameters);
walk_parameters(self, parameters);
self.exit_node();
}
fn visit_parameter(&mut self, parameter: &Parameter) {
self.enter_node(parameter);
walk_parameter(self, parameter);
self.exit_node();
}
fn visit_keyword(&mut self, keyword: &Keyword) {
self.enter_node(keyword);
walk_keyword(self, keyword);
self.exit_node();
}
fn visit_alias(&mut self, alias: &Alias) {
self.enter_node(alias);
walk_alias(self, alias);
self.exit_node();
}
fn visit_with_item(&mut self, with_item: &WithItem) {
self.enter_node(with_item);
walk_with_item(self, with_item);
self.exit_node();
}
fn visit_match_case(&mut self, match_case: &MatchCase) {
self.enter_node(match_case);
walk_match_case(self, match_case);
self.exit_node();
}
fn visit_pattern(&mut self, pattern: &Pattern) {
self.enter_node(pattern);
walk_pattern(self, pattern);
self.exit_node();
}
fn visit_type_param(&mut self, type_param: &TypeParam) {
self.enter_node(type_param);
walk_type_param(self, type_param);
self.exit_node();
}
fn visit_string_literal(&mut self, string_literal: &StringLiteral) {
self.enter_node(string_literal);
walk_string_literal(self, string_literal);
self.exit_node();
}
fn visit_bytes_literal(&mut self, bytes_literal: &BytesLiteral) {
self.enter_node(bytes_literal);
walk_bytes_literal(self, bytes_literal);
self.exit_node();
}
fn visit_f_string(&mut self, f_string: &FString) {
self.enter_node(f_string);
walk_f_string(self, f_string);
self.exit_node();
}
fn visit_interpolated_string_element(&mut self, f_string_element: &InterpolatedStringElement) {
self.enter_node(f_string_element);
walk_interpolated_string_element(self, f_string_element);
self.exit_node();
}
fn visit_t_string(&mut self, t_string: &TString) {
self.enter_node(t_string);
walk_t_string(self, t_string);
self.exit_node();
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast_integration_tests/tests/stmt_if.rs | crates/ruff_python_ast_integration_tests/tests/stmt_if.rs | use ruff_python_ast::stmt_if::elif_else_range;
use ruff_python_parser::{ParseError, parse_module};
use ruff_text_size::TextSize;
#[test]
fn extract_elif_else_range() -> Result<(), ParseError> {
let contents = "if a:
...
elif b:
...
";
let parsed = parse_module(contents)?;
let if_stmt = parsed
.suite()
.first()
.expect("module should contain at least one statement")
.as_if_stmt()
.expect("first statement should be an `if` statement");
let range = elif_else_range(&if_stmt.elif_else_clauses[0], contents).unwrap();
assert_eq!(range.start(), TextSize::from(14));
assert_eq!(range.end(), TextSize::from(18));
let contents = "if a:
...
else:
...
";
let parsed = parse_module(contents)?;
let if_stmt = parsed
.suite()
.first()
.expect("module should contain at least one statement")
.as_if_stmt()
.expect("first statement should be an `if` statement");
let range = elif_else_range(&if_stmt.elif_else_clauses[0], contents).unwrap();
assert_eq!(range.start(), TextSize::from(14));
assert_eq!(range.end(), TextSize::from(18));
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast_integration_tests/tests/match_pattern.rs | crates/ruff_python_ast_integration_tests/tests/match_pattern.rs | use ruff_python_parser::parse_module;
#[test]
fn pattern_is_wildcard() {
let source_code = r"
match subject:
case _ as x: ...
case _ | _: ...
case _: ...
";
let parsed = parse_module(source_code).unwrap();
let cases = &parsed.syntax().body[0].as_match_stmt().unwrap().cases;
for case in cases {
assert!(case.pattern.is_wildcard());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast_integration_tests/tests/comparable.rs | crates/ruff_python_ast_integration_tests/tests/comparable.rs | use ruff_python_ast::comparable::ComparableExpr;
use ruff_python_parser::{ParseError, parse_expression};
#[track_caller]
fn assert_comparable(left: &str, right: &str) -> Result<(), ParseError> {
let left_parsed = parse_expression(left)?;
let right_parsed = parse_expression(right)?;
let left_compr = ComparableExpr::from(left_parsed.expr());
let right_compr = ComparableExpr::from(right_parsed.expr());
assert_eq!(left_compr, right_compr);
Ok(())
}
#[track_caller]
fn assert_noncomparable(left: &str, right: &str) -> Result<(), ParseError> {
let left_parsed = parse_expression(left)?;
let right_parsed = parse_expression(right)?;
let left_compr = ComparableExpr::from(left_parsed.expr());
let right_compr = ComparableExpr::from(right_parsed.expr());
assert_ne!(left_compr, right_compr);
Ok(())
}
#[test]
fn concatenated_strings_compare_equal() -> Result<(), ParseError> {
let split_contents = r#"'a' 'b' r'\n raw'"#;
let value_contents = r#"'ab\\n raw'"#;
assert_comparable(split_contents, value_contents)
}
#[test]
fn concatenated_bytes_compare_equal() -> Result<(), ParseError> {
let split_contents = r#"b'a' b'b'"#;
let value_contents = r#"b'ab'"#;
assert_comparable(split_contents, value_contents)
}
#[test]
fn concatenated_fstrings_compare_equal() -> Result<(), ParseError> {
let split_contents = r#"f"{foo!r} this" r"\n raw" f" and {bar!s} that""#;
let value_contents = r#"f"{foo!r} this\\n raw and {bar!s} that""#;
assert_comparable(split_contents, value_contents)
}
#[test]
fn concatenated_tstrings_compare_equal() -> Result<(), ParseError> {
let split_contents = r#"t"{foo!r} this" rt"\n raw" t" and {bar!s} that""#;
let value_contents = r#"t"{foo!r} this\\n raw and {bar!s} that""#;
assert_comparable(split_contents, value_contents)
}
#[test]
fn t_strings_literal_order_matters_compare_unequal() -> Result<(), ParseError> {
let interp_then_literal_contents = r#"t"{foo}bar""#;
let literal_then_interp_contents = r#"t"bar{foo}""#;
assert_noncomparable(interp_then_literal_contents, literal_then_interp_contents)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast_integration_tests/tests/parentheses.rs | crates/ruff_python_ast_integration_tests/tests/parentheses.rs | //! Tests for [`ruff_python_ast::tokens::parentheses_iterator`] and
//! [`ruff_python_ast::tokens::parenthesized_range`].
use ruff_python_ast::{
self as ast, Expr,
token::{parentheses_iterator, parenthesized_range},
};
use ruff_python_parser::parse_module;
#[test]
fn test_no_parentheses() {
let source = "x = 2 + 2";
let parsed = parse_module(source).expect("should parse valid python");
let tokens = parsed.tokens();
let module = parsed.syntax();
let stmt = module.body.first().expect("module should have a statement");
let ast::Stmt::Assign(assign) = stmt else {
panic!("expected `Assign` statement, got {stmt:?}");
};
let result = parenthesized_range(assign.value.as_ref().into(), stmt.into(), tokens);
assert_eq!(result, None);
}
#[test]
fn test_single_parentheses() {
let source = "x = (2 + 2)";
let parsed = parse_module(source).expect("should parse valid python");
let tokens = parsed.tokens();
let module = parsed.syntax();
let stmt = module.body.first().expect("module should have a statement");
let ast::Stmt::Assign(assign) = stmt else {
panic!("expected `Assign` statement, got {stmt:?}");
};
let result = parenthesized_range(assign.value.as_ref().into(), stmt.into(), tokens);
let range = result.expect("should find parentheses");
assert_eq!(&source[range], "(2 + 2)");
}
#[test]
fn test_double_parentheses() {
let source = "x = ((2 + 2))";
let parsed = parse_module(source).expect("should parse valid python");
let tokens = parsed.tokens();
let module = parsed.syntax();
let stmt = module.body.first().expect("module should have a statement");
let ast::Stmt::Assign(assign) = stmt else {
panic!("expected `Assign` statement, got {stmt:?}");
};
let result = parenthesized_range(assign.value.as_ref().into(), stmt.into(), tokens);
let range = result.expect("should find parentheses");
assert_eq!(&source[range], "((2 + 2))");
}
#[test]
fn test_parentheses_with_whitespace() {
let source = "x = ( 2 + 2 )";
let parsed = parse_module(source).expect("should parse valid python");
let tokens = parsed.tokens();
let module = parsed.syntax();
let stmt = module.body.first().expect("module should have a statement");
let ast::Stmt::Assign(assign) = stmt else {
panic!("expected `Assign` statement, got {stmt:?}");
};
let result = parenthesized_range(assign.value.as_ref().into(), stmt.into(), tokens);
let range = result.expect("should find parentheses");
assert_eq!(&source[range], "( 2 + 2 )");
}
#[test]
fn test_parentheses_with_comments() {
let source = "x = ( # comment\n 2 + 2\n)";
let parsed = parse_module(source).expect("should parse valid python");
let tokens = parsed.tokens();
let module = parsed.syntax();
let stmt = module.body.first().expect("module should have a statement");
let ast::Stmt::Assign(assign) = stmt else {
panic!("expected `Assign` statement, got {stmt:?}");
};
let result = parenthesized_range(assign.value.as_ref().into(), stmt.into(), tokens);
let range = result.expect("should find parentheses");
assert_eq!(&source[range], "( # comment\n 2 + 2\n)");
}
#[test]
fn test_parenthesized_range_multiple() {
let source = "x = (((2 + 2)))";
let parsed = parse_module(source).expect("should parse valid python");
let tokens = parsed.tokens();
let module = parsed.syntax();
let stmt = module.body.first().expect("module should have a statement");
let ast::Stmt::Assign(assign) = stmt else {
panic!("expected `Assign` statement, got {stmt:?}");
};
let result = parenthesized_range(assign.value.as_ref().into(), stmt.into(), tokens);
let range = result.expect("should find parentheses");
assert_eq!(&source[range], "(((2 + 2)))");
}
#[test]
fn test_parentheses_iterator_multiple() {
let source = "x = (((2 + 2)))";
let parsed = parse_module(source).expect("should parse valid python");
let tokens = parsed.tokens();
let module = parsed.syntax();
let stmt = module.body.first().expect("module should have a statement");
let ast::Stmt::Assign(assign) = stmt else {
panic!("expected `Assign` statement, got {stmt:?}");
};
let ranges: Vec<_> =
parentheses_iterator(assign.value.as_ref().into(), Some(stmt.into()), tokens).collect();
assert_eq!(ranges.len(), 3);
assert_eq!(&source[ranges[0]], "(2 + 2)");
assert_eq!(&source[ranges[1]], "((2 + 2))");
assert_eq!(&source[ranges[2]], "(((2 + 2)))");
}
#[test]
fn test_call_arguments_not_counted() {
let source = "f(x)";
let parsed = parse_module(source).expect("should parse valid python");
let tokens = parsed.tokens();
let module = parsed.syntax();
let stmt = module.body.first().expect("module should have a statement");
let ast::Stmt::Expr(expr_stmt) = stmt else {
panic!("expected `Expr` statement, got {stmt:?}");
};
let Expr::Call(call) = expr_stmt.value.as_ref() else {
panic!("expected Call expression, got {:?}", expr_stmt.value);
};
let arg = call
.arguments
.args
.first()
.expect("call should have an argument");
let result = parenthesized_range(arg.into(), (&call.arguments).into(), tokens);
// The parentheses belong to the call, not the argument
assert_eq!(result, None);
}
#[test]
fn test_call_with_parenthesized_argument() {
let source = "f((x))";
let parsed = parse_module(source).expect("should parse valid python");
let tokens = parsed.tokens();
let module = parsed.syntax();
let stmt = module.body.first().expect("module should have a statement");
let ast::Stmt::Expr(expr_stmt) = stmt else {
panic!("expected Expr statement, got {stmt:?}");
};
let Expr::Call(call) = expr_stmt.value.as_ref() else {
panic!("expected `Call` expression, got {:?}", expr_stmt.value);
};
let arg = call
.arguments
.args
.first()
.expect("call should have an argument");
let result = parenthesized_range(arg.into(), (&call.arguments).into(), tokens);
let range = result.expect("should find parentheses around argument");
assert_eq!(&source[range], "(x)");
}
#[test]
fn test_multiline_with_parentheses() {
let source = "x = (\n 2 + 2 + 2\n)";
let parsed = parse_module(source).expect("should parse valid python");
let tokens = parsed.tokens();
let module = parsed.syntax();
let stmt = module.body.first().expect("module should have a statement");
let ast::Stmt::Assign(assign) = stmt else {
panic!("expected `Assign` statement, got {stmt:?}");
};
let result = parenthesized_range(assign.value.as_ref().into(), stmt.into(), tokens);
let range = result.expect("should find parentheses");
assert_eq!(&source[range], "(\n 2 + 2 + 2\n)");
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast_integration_tests/tests/identifier.rs | crates/ruff_python_ast_integration_tests/tests/identifier.rs | use ruff_python_ast::identifier;
use ruff_python_parser::{ParseError, parse_module};
use ruff_text_size::{TextRange, TextSize};
#[test]
fn extract_else_range() -> Result<(), ParseError> {
let contents = r"
for x in y:
pass
else:
pass
"
.trim();
let stmts = parse_module(contents)?.into_suite();
let stmt = stmts.first().unwrap();
let range = identifier::else_(stmt, contents).unwrap();
assert_eq!(&contents[range], "else");
assert_eq!(
range,
TextRange::new(TextSize::from(21), TextSize::from(25))
);
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast_integration_tests/tests/source_order.rs | crates/ruff_python_ast_integration_tests/tests/source_order.rs | use std::fmt::{Debug, Write};
use insta::assert_snapshot;
use ruff_python_ast::visitor::source_order::{SourceOrderVisitor, TraversalSignal};
use ruff_python_ast::{AnyNodeRef, BoolOp, CmpOp, Operator, Singleton, UnaryOp};
use ruff_python_parser::{Mode, ParseOptions, parse};
#[test]
fn function_arguments() {
let source = r"def a(b, c,/, d, e = 20, *args, named=5, other=20, **kwargs): pass";
let trace = trace_source_order_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn function_positional_only_with_default() {
let source = r"def a(b, c = 34,/, e = 20, *args): pass";
let trace = trace_source_order_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn compare() {
let source = r"4 < x < 5";
let trace = trace_source_order_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn list_comprehension() {
let source = "[x for x in numbers]";
let trace = trace_source_order_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn dict_comprehension() {
let source = "{x: x**2 for x in numbers}";
let trace = trace_source_order_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn set_comprehension() {
let source = "{x for x in numbers}";
let trace = trace_source_order_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn match_class_pattern() {
let source = r"
match x:
case Point2D(0, 0):
...
case Point3D(x=0, y=0, z=0):
...
";
let trace = trace_source_order_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn decorators() {
let source = r"
@decorator
def a():
pass
@test
class A:
pass
";
let trace = trace_source_order_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn type_aliases() {
let source = r"type X[T: str, U, *Ts, **P] = list[T]";
let trace = trace_source_order_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn class_type_parameters() {
let source = r"class X[T: str, U, *Ts, **P]: ...";
let trace = trace_source_order_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn function_type_parameters() {
let source = r"def X[T: str, U, *Ts, **P](): ...";
let trace = trace_source_order_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn string_literals() {
let source = r"'a' 'b' 'c'";
let trace = trace_source_order_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn bytes_literals() {
let source = r"b'a' b'b' b'c'";
let trace = trace_source_order_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn f_strings() {
let source = r"'pre' f'foo {bar:.{x}f} baz'";
let trace = trace_source_order_visitation(source);
assert_snapshot!(trace);
}
#[test]
fn t_strings() {
let source = r"t'pre' t'foo {bar:.{x}f} baz'";
let trace = trace_source_order_visitation(source);
assert_snapshot!(trace);
}
fn trace_source_order_visitation(source: &str) -> String {
let parsed = parse(source, ParseOptions::from(Mode::Module)).unwrap();
let mut visitor = RecordVisitor::default();
visitor.visit_mod(parsed.syntax());
visitor.output
}
/// Emits a `tree` with a node for every visited AST node (labelled by the AST node's kind)
/// and leaves for attributes.
#[derive(Default)]
struct RecordVisitor {
depth: usize,
output: String,
}
impl RecordVisitor {
fn emit(&mut self, text: &dyn Debug) {
for _ in 0..self.depth {
self.output.push_str(" ");
}
writeln!(self.output, "- {text:?}").unwrap();
}
}
impl<'a> SourceOrderVisitor<'a> for RecordVisitor {
fn enter_node(&mut self, node: AnyNodeRef<'a>) -> TraversalSignal {
self.emit(&node.kind());
self.depth += 1;
TraversalSignal::Traverse
}
fn leave_node(&mut self, _node: AnyNodeRef<'a>) {
self.depth -= 1;
}
fn visit_singleton(&mut self, singleton: &Singleton) {
self.emit(&singleton);
}
fn visit_bool_op(&mut self, bool_op: &BoolOp) {
self.emit(&bool_op);
}
fn visit_operator(&mut self, operator: &Operator) {
self.emit(&operator);
}
fn visit_unary_op(&mut self, unary_op: &UnaryOp) {
self.emit(&unary_op);
}
fn visit_cmp_op(&mut self, cmp_op: &CmpOp) {
self.emit(&cmp_op);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_ast_integration_tests/tests/parenthesize.rs | crates/ruff_python_ast_integration_tests/tests/parenthesize.rs | use ruff_python_ast::parenthesize::parenthesized_range;
use ruff_python_parser::parse_expression;
use ruff_python_trivia::CommentRanges;
use ruff_text_size::TextRange;
#[test]
fn test_parenthesized_name() {
let source_code = r"(x) + 1";
let parsed = parse_expression(source_code).unwrap();
let bin_op = parsed.expr().as_bin_op_expr().unwrap();
let name = bin_op.left.as_ref();
let parenthesized = parenthesized_range(
name.into(),
bin_op.into(),
&CommentRanges::default(),
source_code,
);
assert_eq!(parenthesized, Some(TextRange::new(0.into(), 3.into())));
}
#[test]
fn test_non_parenthesized_name() {
let source_code = r"x + 1";
let parsed = parse_expression(source_code).unwrap();
let bin_op = parsed.expr().as_bin_op_expr().unwrap();
let name = bin_op.left.as_ref();
let parenthesized = parenthesized_range(
name.into(),
bin_op.into(),
&CommentRanges::default(),
source_code,
);
assert_eq!(parenthesized, None);
}
#[test]
fn test_parenthesized_argument() {
let source_code = r"f((a))";
let parsed = parse_expression(source_code).unwrap();
let call = parsed.expr().as_call_expr().unwrap();
let arguments = &call.arguments;
let argument = arguments.args.first().unwrap();
let parenthesized = parenthesized_range(
argument.into(),
arguments.into(),
&CommentRanges::default(),
source_code,
);
assert_eq!(parenthesized, Some(TextRange::new(2.into(), 5.into())));
}
#[test]
fn test_non_parenthesized_argument() {
let source_code = r"f(a)";
let parsed = parse_expression(source_code).unwrap();
let call = parsed.expr().as_call_expr().unwrap();
let arguments = &call.arguments;
let argument = arguments.args.first().unwrap();
let parenthesized = parenthesized_range(
argument.into(),
arguments.into(),
&CommentRanges::default(),
source_code,
);
assert_eq!(parenthesized, None);
}
#[test]
fn test_parenthesized_tuple_member() {
let source_code = r"(a, (b))";
let parsed = parse_expression(source_code).unwrap();
let tuple = parsed.expr().as_tuple_expr().unwrap();
let member = tuple.elts.last().unwrap();
let parenthesized = parenthesized_range(
member.into(),
tuple.into(),
&CommentRanges::default(),
source_code,
);
assert_eq!(parenthesized, Some(TextRange::new(4.into(), 7.into())));
}
#[test]
fn test_non_parenthesized_tuple_member() {
let source_code = r"(a, b)";
let parsed = parse_expression(source_code).unwrap();
let tuple = parsed.expr().as_tuple_expr().unwrap();
let member = tuple.elts.last().unwrap();
let parenthesized = parenthesized_range(
member.into(),
tuple.into(),
&CommentRanges::default(),
source_code,
);
assert_eq!(parenthesized, None);
}
#[test]
fn test_twice_parenthesized_name() {
let source_code = r"((x)) + 1";
let parsed = parse_expression(source_code).unwrap();
let bin_op = parsed.expr().as_bin_op_expr().unwrap();
let name = bin_op.left.as_ref();
let parenthesized = parenthesized_range(
name.into(),
bin_op.into(),
&CommentRanges::default(),
source_code,
);
assert_eq!(parenthesized, Some(TextRange::new(0.into(), 5.into())));
}
#[test]
fn test_twice_parenthesized_argument() {
let source_code = r"f(((a + 1)))";
let parsed = parse_expression(source_code).unwrap();
let call = parsed.expr().as_call_expr().unwrap();
let arguments = &call.arguments;
let argument = arguments.args.first().unwrap();
let parenthesized = parenthesized_range(
argument.into(),
arguments.into(),
&CommentRanges::default(),
source_code,
);
assert_eq!(parenthesized, Some(TextRange::new(2.into(), 11.into())));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty/build.rs | crates/ty/build.rs | use std::{
fs,
path::{Path, PathBuf},
process::Command,
};
fn main() {
// The workspace root directory is not available without walking up the tree
// https://github.com/rust-lang/cargo/issues/3946
let ruff_workspace_root = Path::new(&std::env::var("CARGO_MANIFEST_DIR").unwrap())
.join("..")
.join("..");
let ty_workspace_root = ruff_workspace_root.join("..");
version_info(&ty_workspace_root);
// If not in a git repository, do not attempt to retrieve commit information
let git_dir = ty_workspace_root.join(".git");
if git_dir.exists() {
commit_info(&git_dir, &ty_workspace_root, false);
} else {
// Try if we're inside the ruff repository and, if so, use that commit hash.
let git_dir = ruff_workspace_root.join(".git");
if git_dir.exists() {
commit_info(&git_dir, &ruff_workspace_root, true);
}
}
let target = std::env::var("TARGET").unwrap();
println!("cargo::rustc-env=RUST_HOST_TARGET={target}");
}
/// Retrieve the version from the `dist-workspace.toml` file and set `TY_VERSION`.
fn version_info(workspace_root: &Path) {
let dist_file = workspace_root.join("dist-workspace.toml");
if !dist_file.exists() {
return;
}
println!("cargo:rerun-if-changed={}", dist_file.display());
let dist_file = fs::read_to_string(dist_file);
if let Ok(dist_file) = dist_file {
let lines = dist_file.lines();
for line in lines {
if line.starts_with("version =") {
let (_key, version) = line.split_once('=').unwrap();
println!(
"cargo::rustc-env=TY_VERSION={}",
version.trim().trim_matches('"')
);
break;
}
}
}
}
/// Retrieve commit information from the Git repository.
fn commit_info(git_dir: &Path, workspace_root: &Path, is_ruff: bool) {
if let Some(git_head_path) = git_head(git_dir) {
println!("cargo:rerun-if-changed={}", git_head_path.display());
let git_head_contents = fs::read_to_string(git_head_path);
if let Ok(git_head_contents) = git_head_contents {
// The contents are either a commit or a reference in the following formats
// - "<commit>" when the head is detached
// - "ref <ref>" when working on a branch
// If a commit, checking if the HEAD file has changed is sufficient
// If a ref, we need to add the head file for that ref to rebuild on commit
let mut git_ref_parts = git_head_contents.split_whitespace();
git_ref_parts.next();
if let Some(git_ref) = git_ref_parts.next() {
let git_ref_path = git_dir.join(git_ref);
println!("cargo:rerun-if-changed={}", git_ref_path.display());
}
}
}
let output = match Command::new("git")
.arg("log")
.arg("-1")
.arg("--date=short")
.arg("--abbrev=9")
.arg("--format=%H %h %cd %(describe:tags)")
.current_dir(workspace_root)
.output()
{
Ok(output) if output.status.success() => output,
_ => return,
};
let stdout = String::from_utf8(output.stdout).unwrap();
let mut parts = stdout.split_whitespace();
let mut next = || parts.next().unwrap();
let _commit_hash = next();
println!("cargo::rustc-env=TY_COMMIT_SHORT_HASH={}", next());
println!("cargo::rustc-env=TY_COMMIT_DATE={}", next());
// Describe can fail for some commits
// https://git-scm.com/docs/pretty-formats#Documentation/pretty-formats.txt-emdescribeoptionsem
if let Some(describe) = parts.next() {
let mut describe_parts = describe.split('-');
let last_tag = describe_parts.next().unwrap();
println!(
"cargo::rustc-env=TY_LAST_TAG={ruff}{last_tag}",
ruff = if is_ruff { "ruff/" } else { "" }
);
// If this is the tagged commit, this component will be missing
println!(
"cargo::rustc-env=TY_LAST_TAG_DISTANCE={}",
describe_parts.next().unwrap_or("0")
);
}
}
fn git_head(git_dir: &Path) -> Option<PathBuf> {
// The typical case is a standard git repository.
let git_head_path = git_dir.join("HEAD");
if git_head_path.exists() {
return Some(git_head_path);
}
if !git_dir.is_file() {
return None;
}
// If `.git/HEAD` doesn't exist and `.git` is actually a file,
// then let's try to attempt to read it as a worktree. If it's
// a worktree, then its contents will look like this, e.g.:
//
// gitdir: /home/andrew/astral/uv/main/.git/worktrees/pr2
//
// And the HEAD file we want to watch will be at:
//
// /home/andrew/astral/uv/main/.git/worktrees/pr2/HEAD
let contents = fs::read_to_string(git_dir).ok()?;
let (label, worktree_path) = contents.split_once(':')?;
if label != "gitdir" {
return None;
}
let worktree_path = worktree_path.trim();
Some(PathBuf::from(worktree_path))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty/src/lib.rs | crates/ty/src/lib.rs | mod args;
mod logging;
mod printer;
mod python_version;
mod version;
pub use args::Cli;
use ty_project::metadata::settings::TerminalSettings;
use ty_static::EnvVars;
use std::fmt::Write;
use std::process::{ExitCode, Termination};
use std::sync::Mutex;
use anyhow::Result;
use crate::args::{CheckCommand, Command, TerminalColor};
use crate::logging::{VerbosityLevel, setup_tracing};
use crate::printer::Printer;
use anyhow::{Context, anyhow};
use clap::{CommandFactory, Parser};
use colored::Colorize;
use crossbeam::channel as crossbeam_channel;
use rayon::ThreadPoolBuilder;
use ruff_db::cancellation::{CancellationToken, CancellationTokenSource};
use ruff_db::diagnostic::{
Diagnostic, DiagnosticId, DisplayDiagnosticConfig, DisplayDiagnostics, Severity,
};
use ruff_db::files::File;
use ruff_db::max_parallelism;
use ruff_db::system::{OsSystem, SystemPath, SystemPathBuf};
use salsa::Database;
use ty_project::metadata::options::ProjectOptionsOverrides;
use ty_project::watch::ProjectWatcher;
use ty_project::{CollectReporter, Db, watch};
use ty_project::{ProjectDatabase, ProjectMetadata};
use ty_server::run_server;
pub fn run() -> anyhow::Result<ExitStatus> {
setup_rayon();
ruff_db::set_program_version(crate::version::version().to_string()).unwrap();
let args = wild::args_os();
let args = argfile::expand_args_from(args, argfile::parse_fromfile, argfile::PREFIX)
.context("Failed to read CLI arguments from file")?;
let args = Cli::parse_from(args);
match args.command {
Command::Server => run_server().map(|()| ExitStatus::Success),
Command::Check(check_args) => run_check(check_args),
Command::Version => version().map(|()| ExitStatus::Success),
Command::GenerateShellCompletion { shell } => {
use std::io::stdout;
shell.generate(&mut Cli::command(), &mut stdout());
Ok(ExitStatus::Success)
}
}
}
pub(crate) fn version() -> Result<()> {
let mut stdout = Printer::default().stream_for_requested_summary().lock();
let version_info = crate::version::version();
writeln!(stdout, "ty {}", &version_info)?;
Ok(())
}
fn run_check(args: CheckCommand) -> anyhow::Result<ExitStatus> {
// Enabled ANSI colors on Windows 10.
#[cfg(windows)]
assert!(colored::control::set_virtual_terminal(true).is_ok());
set_colored_override(args.color);
let verbosity = args.verbosity.level();
let _guard = setup_tracing(verbosity, args.color.unwrap_or_default())?;
let printer = Printer::new(verbosity, args.no_progress);
tracing::debug!("Version: {}", version::version());
// The base path to which all CLI arguments are relative to.
let cwd = {
let cwd = std::env::current_dir().context("Failed to get the current working directory")?;
SystemPathBuf::from_path_buf(cwd)
.map_err(|path| {
anyhow!(
"The current working directory `{}` contains non-Unicode characters. ty only supports Unicode paths.",
path.display()
)
})?
};
let project_path = args
.project
.as_ref()
.map(|project| {
if project.as_std_path().is_dir() {
Ok(SystemPath::absolute(project, &cwd))
} else {
Err(anyhow!(
"Provided project path `{project}` is not a directory"
))
}
})
.transpose()?
.unwrap_or_else(|| cwd.clone());
let check_paths: Vec<_> = args
.paths
.iter()
.map(|path| SystemPath::absolute(path, &cwd))
.collect();
let system = OsSystem::new(&cwd);
let watch = args.watch;
let exit_zero = args.exit_zero;
let config_file = args
.config_file
.as_ref()
.map(|path| SystemPath::absolute(path, &cwd));
let force_exclude = args.force_exclude();
let mut project_metadata = match &config_file {
Some(config_file) => {
ProjectMetadata::from_config_file(config_file.clone(), &project_path, &system)?
}
None => ProjectMetadata::discover(&project_path, &system)?,
};
project_metadata.apply_configuration_files(&system)?;
let project_options_overrides = ProjectOptionsOverrides::new(config_file, args.into_options());
project_metadata.apply_overrides(&project_options_overrides);
let mut db = ProjectDatabase::new(project_metadata, system)?;
let project = db.project();
project.set_verbose(&mut db, verbosity >= VerbosityLevel::Verbose);
project.set_force_exclude(&mut db, force_exclude);
if !check_paths.is_empty() {
project.set_included_paths(&mut db, check_paths);
}
let (main_loop, main_loop_cancellation_token) =
MainLoop::new(project_options_overrides, printer);
// Listen to Ctrl+C and abort the watch mode.
let main_loop_cancellation_token = Mutex::new(Some(main_loop_cancellation_token));
ctrlc::set_handler(move || {
let mut lock = main_loop_cancellation_token.lock().unwrap();
if let Some(token) = lock.take() {
token.stop();
}
})?;
let exit_status = if watch {
main_loop.watch(&mut db)?
} else {
main_loop.run(&mut db)?
};
let mut stdout = printer.stream_for_requested_summary().lock();
match std::env::var(EnvVars::TY_MEMORY_REPORT).as_deref() {
Ok("short") => write!(stdout, "{}", db.salsa_memory_dump().display_short())?,
Ok("mypy_primer") => write!(stdout, "{}", db.salsa_memory_dump().display_mypy_primer())?,
Ok("full") => {
write!(stdout, "{}", db.salsa_memory_dump().display_full())?;
}
Ok(other) => {
tracing::warn!(
"Unknown value for `TY_MEMORY_REPORT`: `{other}`. Valid values are `short`, `mypy_primer`, and `full`."
);
}
Err(_) => {}
}
std::mem::forget(db);
if exit_zero {
Ok(ExitStatus::Success)
} else {
Ok(exit_status)
}
}
#[derive(Copy, Clone)]
pub enum ExitStatus {
/// Checking was successful and there were no errors.
Success = 0,
/// Checking was successful but there were errors.
Failure = 1,
/// Checking failed due to an invocation error (e.g. the current directory no longer exists, incorrect CLI arguments, ...)
Error = 2,
/// Internal ty error (panic, or any other error that isn't due to the user using the
/// program incorrectly or transient environment errors).
InternalError = 101,
}
impl ExitStatus {
pub const fn is_internal_error(self) -> bool {
matches!(self, ExitStatus::InternalError)
}
}
impl Termination for ExitStatus {
fn report(self) -> ExitCode {
ExitCode::from(self as u8)
}
}
struct MainLoop {
/// Sender that can be used to send messages to the main loop.
sender: crossbeam_channel::Sender<MainLoopMessage>,
/// Receiver for the messages sent **to** the main loop.
receiver: crossbeam_channel::Receiver<MainLoopMessage>,
/// The file system watcher, if running in watch mode.
watcher: Option<ProjectWatcher>,
/// Interface for displaying information to the user.
printer: Printer,
project_options_overrides: ProjectOptionsOverrides,
/// Cancellation token that gets set by Ctrl+C.
/// Used for long-running operations on the main thread. Operations on background threads
/// use Salsa's cancellation mechanism.
cancellation_token: CancellationToken,
}
impl MainLoop {
fn new(
project_options_overrides: ProjectOptionsOverrides,
printer: Printer,
) -> (Self, MainLoopCancellationToken) {
let (sender, receiver) = crossbeam_channel::bounded(10);
let cancellation_token_source = CancellationTokenSource::new();
let cancellation_token = cancellation_token_source.token();
(
Self {
sender: sender.clone(),
receiver,
watcher: None,
project_options_overrides,
printer,
cancellation_token,
},
MainLoopCancellationToken {
sender,
source: cancellation_token_source,
},
)
}
fn watch(mut self, db: &mut ProjectDatabase) -> Result<ExitStatus> {
tracing::debug!("Starting watch mode");
let sender = self.sender.clone();
let watcher = watch::directory_watcher(move |event| {
sender.send(MainLoopMessage::ApplyChanges(event)).unwrap();
})?;
self.watcher = Some(ProjectWatcher::new(watcher, db));
self.run(db)?;
Ok(ExitStatus::Success)
}
fn run(self, db: &mut ProjectDatabase) -> Result<ExitStatus> {
self.sender.send(MainLoopMessage::CheckWorkspace).unwrap();
let result = self.main_loop(db);
tracing::debug!("Exiting main loop");
result
}
fn main_loop(mut self, db: &mut ProjectDatabase) -> Result<ExitStatus> {
// Schedule the first check.
tracing::debug!("Starting main loop");
let mut revision = 0u64;
while let Ok(message) = self.receiver.recv() {
match message {
MainLoopMessage::CheckWorkspace => {
let db = db.clone();
let sender = self.sender.clone();
// Spawn a new task that checks the project. This needs to be done in a separate thread
// to prevent blocking the main loop here.
rayon::spawn(move || {
let mut reporter = IndicatifReporter::from(self.printer);
let bar = reporter.bar.clone();
match salsa::Cancelled::catch(|| {
db.check_with_reporter(&mut reporter);
reporter.bar.finish_and_clear();
reporter.collector.into_sorted(&db)
}) {
Ok(result) => {
// Send the result back to the main loop for printing.
sender
.send(MainLoopMessage::CheckCompleted { result, revision })
.unwrap();
}
Err(cancelled) => {
bar.finish_and_clear();
tracing::debug!("Check has been cancelled: {cancelled:?}");
}
}
});
}
MainLoopMessage::CheckCompleted {
result,
revision: check_revision,
} => {
let terminal_settings = db.project().settings(db).terminal();
let display_config = DisplayDiagnosticConfig::default()
.format(terminal_settings.output_format.into())
.color(colored::control::SHOULD_COLORIZE.should_colorize())
.with_cancellation_token(Some(self.cancellation_token.clone()))
.show_fix_diff(true);
if check_revision == revision {
if db.project().files(db).is_empty() {
tracing::warn!("No python files found under the given path(s)");
}
// TODO: We should have an official flag to silence workspace diagnostics.
if std::env::var("TY_MEMORY_REPORT").as_deref() == Ok("mypy_primer") {
return Ok(ExitStatus::Success);
}
let is_human_readable = terminal_settings.output_format.is_human_readable();
if result.is_empty() {
if is_human_readable {
writeln!(
self.printer.stream_for_success_summary(),
"{}",
"All checks passed!".green().bold()
)?;
}
if self.watcher.is_none() {
return Ok(ExitStatus::Success);
}
} else {
let diagnostics_count = result.len();
let mut stdout = self.printer.stream_for_details().lock();
let exit_status =
exit_status_from_diagnostics(&result, terminal_settings);
// Only render diagnostics if they're going to be displayed, since doing
// so is expensive.
if stdout.is_enabled() {
write!(
stdout,
"{}",
DisplayDiagnostics::new(db, &display_config, &result)
)?;
}
if !self.cancellation_token.is_cancelled() {
if is_human_readable {
writeln!(
self.printer.stream_for_failure_summary(),
"Found {} diagnostic{}",
diagnostics_count,
if diagnostics_count > 1 { "s" } else { "" }
)?;
}
if exit_status.is_internal_error() {
tracing::warn!(
"A fatal error occurred while checking some files. Not all project files were analyzed. See the diagnostics list above for details."
);
}
}
if self.watcher.is_none() {
return Ok(exit_status);
}
}
} else {
tracing::debug!(
"Discarding check result for outdated revision: current: {revision}, result revision: {check_revision}"
);
}
}
MainLoopMessage::ApplyChanges(changes) => {
Printer::clear_screen()?;
revision += 1;
// Automatically cancels any pending queries and waits for them to complete.
db.apply_changes(changes, Some(&self.project_options_overrides));
if let Some(watcher) = self.watcher.as_mut() {
watcher.update(db);
}
self.sender.send(MainLoopMessage::CheckWorkspace).unwrap();
}
MainLoopMessage::Exit => {
// Cancel any pending queries and wait for them to complete.
db.trigger_cancellation();
return Ok(ExitStatus::Success);
}
}
tracing::debug!("Waiting for next main loop message.");
}
Ok(ExitStatus::Success)
}
}
fn exit_status_from_diagnostics(
diagnostics: &[Diagnostic],
terminal_settings: &TerminalSettings,
) -> ExitStatus {
if diagnostics.is_empty() {
return ExitStatus::Success;
}
let mut max_severity = Severity::Info;
let mut io_error = false;
for diagnostic in diagnostics {
max_severity = max_severity.max(diagnostic.severity());
io_error = io_error || matches!(diagnostic.id(), DiagnosticId::Io);
}
if !max_severity.is_fatal() && io_error {
return ExitStatus::Error;
}
match max_severity {
Severity::Info => ExitStatus::Success,
Severity::Warning => {
if terminal_settings.error_on_warning {
ExitStatus::Failure
} else {
ExitStatus::Success
}
}
Severity::Error => ExitStatus::Failure,
Severity::Fatal => ExitStatus::InternalError,
}
}
/// A progress reporter for `ty check`.
struct IndicatifReporter {
collector: CollectReporter,
/// A reporter that is ready, containing a progress bar to report to.
///
/// Initialization of the bar is deferred to [`ty_project::ProgressReporter::set_files`] so we
/// do not initialize the bar too early as it may take a while to collect the number of files to
/// process and we don't want to display an empty "0/0" bar.
bar: indicatif::ProgressBar,
printer: Printer,
}
impl From<Printer> for IndicatifReporter {
fn from(printer: Printer) -> Self {
Self {
bar: indicatif::ProgressBar::hidden(),
collector: CollectReporter::default(),
printer,
}
}
}
impl ty_project::ProgressReporter for IndicatifReporter {
fn set_files(&mut self, files: usize) {
self.collector.set_files(files);
self.bar.set_length(files as u64);
self.bar.set_message("Checking");
self.bar.set_style(
indicatif::ProgressStyle::with_template(
"{msg:8.dim} {bar:60.green/dim} {pos}/{len} files",
)
.unwrap()
.progress_chars("--"),
);
self.bar.set_draw_target(self.printer.progress_target());
}
fn report_checked_file(&self, db: &ProjectDatabase, file: File, diagnostics: &[Diagnostic]) {
self.collector.report_checked_file(db, file, diagnostics);
self.bar.inc(1);
}
fn report_diagnostics(&mut self, db: &ProjectDatabase, diagnostics: Vec<Diagnostic>) {
self.collector.report_diagnostics(db, diagnostics);
}
}
#[derive(Debug)]
struct MainLoopCancellationToken {
sender: crossbeam_channel::Sender<MainLoopMessage>,
source: CancellationTokenSource,
}
impl MainLoopCancellationToken {
fn stop(self) {
self.source.cancel();
self.sender.send(MainLoopMessage::Exit).unwrap();
}
}
/// Message sent from the orchestrator to the main loop.
#[derive(Debug)]
enum MainLoopMessage {
CheckWorkspace,
CheckCompleted {
/// The diagnostics that were found during the check.
result: Vec<Diagnostic>,
revision: u64,
},
ApplyChanges(Vec<watch::ChangeEvent>),
Exit,
}
fn set_colored_override(color: Option<TerminalColor>) {
let Some(color) = color else {
return;
};
match color {
TerminalColor::Auto => {
colored::control::unset_override();
}
TerminalColor::Always => {
colored::control::set_override(true);
}
TerminalColor::Never => {
colored::control::set_override(false);
}
}
}
/// Initializes the global rayon thread pool to never use more than `TY_MAX_PARALLELISM` threads.
fn setup_rayon() {
ThreadPoolBuilder::default()
.num_threads(max_parallelism().get())
// Use a reasonably large stack size to avoid running into stack overflows too easily. The
// size was chosen in such a way as to still be able to handle large expressions involving
// binary operators (x + x + … + x) both during the AST walk in semantic index building as
// well as during type checking. Using this stack size, we can handle handle expressions
// that are several times larger than the corresponding limits in existing type checkers.
.stack_size(16 * 1024 * 1024)
.build_global()
.unwrap();
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty/src/python_version.rs | crates/ty/src/python_version.rs | /// Enumeration of all supported Python versions
///
/// TODO: unify with the `PythonVersion` enum in the linter/formatter crates?
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord, Default, clap::ValueEnum)]
pub enum PythonVersion {
#[value(name = "3.7")]
Py37,
#[value(name = "3.8")]
Py38,
#[value(name = "3.9")]
Py39,
#[default]
#[value(name = "3.10")]
Py310,
#[value(name = "3.11")]
Py311,
#[value(name = "3.12")]
Py312,
#[value(name = "3.13")]
Py313,
#[value(name = "3.14")]
Py314,
}
impl PythonVersion {
const fn as_str(self) -> &'static str {
match self {
Self::Py37 => "3.7",
Self::Py38 => "3.8",
Self::Py39 => "3.9",
Self::Py310 => "3.10",
Self::Py311 => "3.11",
Self::Py312 => "3.12",
Self::Py313 => "3.13",
Self::Py314 => "3.14",
}
}
}
impl std::fmt::Display for PythonVersion {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}
impl From<PythonVersion> for ruff_python_ast::PythonVersion {
fn from(value: PythonVersion) -> Self {
match value {
PythonVersion::Py37 => Self::PY37,
PythonVersion::Py38 => Self::PY38,
PythonVersion::Py39 => Self::PY39,
PythonVersion::Py310 => Self::PY310,
PythonVersion::Py311 => Self::PY311,
PythonVersion::Py312 => Self::PY312,
PythonVersion::Py313 => Self::PY313,
PythonVersion::Py314 => Self::PY314,
}
}
}
#[cfg(test)]
mod tests {
use crate::python_version::PythonVersion;
#[test]
fn same_default_as_python_version() {
assert_eq!(
ruff_python_ast::PythonVersion::from(PythonVersion::default()),
ruff_python_ast::PythonVersion::default()
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty/src/version.rs | crates/ty/src/version.rs | //! Code for representing ty's release version number.
use std::fmt;
/// Information about the git repository where ty was built from.
pub(crate) struct CommitInfo {
short_commit_hash: String,
commit_date: String,
commits_since_last_tag: u32,
last_tag: Option<String>,
}
/// ty's version.
pub(crate) struct VersionInfo {
/// ty's version, such as "0.5.1"
version: String,
/// Information about the git commit we may have been built from.
///
/// `None` if not built from a git repo or if retrieval failed.
commit_info: Option<CommitInfo>,
}
impl fmt::Display for VersionInfo {
/// Formatted version information: `<version>[+<commits>] (<commit> <date>)`
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.version)?;
if let Some(ref ci) = self.commit_info {
if ci.commits_since_last_tag > 0 {
write!(f, "+{}", ci.commits_since_last_tag)?;
}
write!(f, " ({} {})", ci.short_commit_hash, ci.commit_date)?;
}
Ok(())
}
}
impl From<VersionInfo> for clap::builder::Str {
fn from(val: VersionInfo) -> Self {
val.to_string().into()
}
}
/// Returns information about ty's version.
pub(crate) fn version() -> VersionInfo {
// Environment variables are only read at compile-time
macro_rules! option_env_str {
($name:expr) => {
option_env!($name).map(|s| s.to_string())
};
}
// Commit info is pulled from git and set by `build.rs`
let commit_info = option_env_str!("TY_COMMIT_SHORT_HASH").map(|short_commit_hash| CommitInfo {
short_commit_hash,
commit_date: option_env_str!("TY_COMMIT_DATE").unwrap(),
commits_since_last_tag: option_env_str!("TY_LAST_TAG_DISTANCE")
.as_deref()
.map_or(0, |value| value.parse::<u32>().unwrap_or(0)),
last_tag: option_env_str!("TY_LAST_TAG"),
});
// The version is pulled from `dist-workspace.toml` and set by `build.rs`
let version = option_env_str!("TY_VERSION").unwrap_or_else(|| {
// If missing, using the last tag
commit_info
.as_ref()
.and_then(|info| {
info.last_tag.as_ref().map(|tag| {
tag.strip_prefix("v")
.map(std::string::ToString::to_string)
.unwrap_or(tag.clone())
})
})
.unwrap_or("unknown".to_string())
});
VersionInfo {
version,
commit_info,
}
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use super::{CommitInfo, VersionInfo};
#[test]
fn version_formatting() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: None,
};
assert_snapshot!(version, @"0.0.0");
}
#[test]
fn version_formatting_with_commit_info() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: Some(CommitInfo {
short_commit_hash: "53b0f5d92".to_string(),
commit_date: "2023-10-19".to_string(),
commits_since_last_tag: 0,
last_tag: None,
}),
};
assert_snapshot!(version, @"0.0.0 (53b0f5d92 2023-10-19)");
}
#[test]
fn version_formatting_with_commits_since_last_tag() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: Some(CommitInfo {
short_commit_hash: "53b0f5d92".to_string(),
commit_date: "2023-10-19".to_string(),
commits_since_last_tag: 24,
last_tag: None,
}),
};
assert_snapshot!(version, @"0.0.0+24 (53b0f5d92 2023-10-19)");
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty/src/args.rs | crates/ty/src/args.rs | use crate::logging::Verbosity;
use crate::python_version::PythonVersion;
use clap::builder::Styles;
use clap::builder::styling::{AnsiColor, Effects};
use clap::error::ErrorKind;
use clap::{ArgAction, ArgMatches, Error, Parser};
use ruff_db::system::SystemPathBuf;
use ty_combine::Combine;
use ty_project::metadata::options::{EnvironmentOptions, Options, SrcOptions, TerminalOptions};
use ty_project::metadata::value::{RangedValue, RelativeGlobPattern, RelativePathBuf, ValueSource};
use ty_python_semantic::lint;
use ty_static::EnvVars;
// Configures Clap v3-style help menu colors
const STYLES: Styles = Styles::styled()
.header(AnsiColor::Green.on_default().effects(Effects::BOLD))
.usage(AnsiColor::Green.on_default().effects(Effects::BOLD))
.literal(AnsiColor::Cyan.on_default().effects(Effects::BOLD))
.placeholder(AnsiColor::Cyan.on_default());
#[derive(Debug, Parser)]
#[command(author, name = "ty", about = "An extremely fast Python type checker.")]
#[command(long_version = crate::version::version())]
#[command(styles = STYLES)]
pub struct Cli {
#[command(subcommand)]
pub(crate) command: Command,
}
#[allow(clippy::large_enum_variant)]
#[derive(Debug, clap::Subcommand)]
pub(crate) enum Command {
/// Check a project for type errors.
Check(CheckCommand),
/// Start the language server
Server,
/// Display ty's version
Version,
/// Generate shell completion
#[clap(hide = true)]
GenerateShellCompletion { shell: clap_complete_command::Shell },
}
#[derive(Debug, Parser)]
#[expect(clippy::struct_excessive_bools)]
pub(crate) struct CheckCommand {
/// List of files or directories to check.
#[clap(
help = "List of files or directories to check [default: the project root]",
value_name = "PATH"
)]
pub paths: Vec<SystemPathBuf>,
/// Run the command within the given project directory.
///
/// All `pyproject.toml` files will be discovered by walking up the directory tree from the given project directory,
/// as will the project's virtual environment (`.venv`) unless the `venv-path` option is set.
///
/// Other command-line arguments (such as relative paths) will be resolved relative to the current working directory.
#[arg(long, value_name = "PROJECT")]
pub(crate) project: Option<SystemPathBuf>,
/// Path to your project's Python environment or interpreter.
///
/// ty uses your Python environment to resolve third-party imports in your code.
///
/// If you're using a project management tool such as uv or you have an activated Conda or virtual
/// environment, you should not generally need to specify this option.
///
/// This option can be used to point to virtual or system Python environments.
#[arg(long, value_name = "PATH", alias = "venv")]
pub(crate) python: Option<SystemPathBuf>,
/// Custom directory to use for stdlib typeshed stubs.
#[arg(long, value_name = "PATH", alias = "custom-typeshed-dir")]
pub(crate) typeshed: Option<SystemPathBuf>,
/// Additional path to use as a module-resolution source (can be passed multiple times).
///
/// This is an advanced option that should usually only be used for first-party or third-party
/// modules that are not installed into your Python environment in a conventional way.
/// Use `--python` to point ty to your Python environment if it is in an unusual location.
#[arg(long, value_name = "PATH")]
pub(crate) extra_search_path: Option<Vec<SystemPathBuf>>,
/// Python version to assume when resolving types.
///
/// The Python version affects allowed syntax, type definitions of the standard library, and
/// type definitions of first- and third-party modules that are conditional on the Python version.
///
/// If a version is not specified on the command line or in a configuration file,
/// ty will try the following techniques in order of preference to determine a value:
/// 1. Check for the `project.requires-python` setting in a `pyproject.toml` file
/// and use the minimum version from the specified range
/// 2. Check for an activated or configured Python environment
/// and attempt to infer the Python version of that environment
/// 3. Fall back to the latest stable Python version supported by ty (see `ty check --help` output)
#[arg(long, value_name = "VERSION", alias = "target-version")]
pub(crate) python_version: Option<PythonVersion>,
/// Target platform to assume when resolving types.
///
/// This is used to specialize the type of `sys.platform` and will affect the visibility
/// of platform-specific functions and attributes. If the value is set to `all`, no
/// assumptions are made about the target platform. If unspecified, the current system's
/// platform will be used.
#[arg(long, value_name = "PLATFORM", alias = "platform")]
pub(crate) python_platform: Option<String>,
#[clap(flatten)]
pub(crate) verbosity: Verbosity,
#[clap(flatten)]
pub(crate) rules: RulesArg,
#[clap(flatten)]
pub(crate) config: ConfigsArg,
/// The path to a `ty.toml` file to use for configuration.
///
/// While ty configuration can be included in a `pyproject.toml` file, it is not allowed in this context.
#[arg(long, env = EnvVars::TY_CONFIG_FILE, value_name = "PATH")]
pub(crate) config_file: Option<SystemPathBuf>,
/// The format to use for printing diagnostic messages.
#[arg(long)]
pub(crate) output_format: Option<OutputFormat>,
/// Use exit code 1 if there are any warning-level diagnostics.
#[arg(long, conflicts_with = "exit_zero", default_missing_value = "true", num_args=0..1)]
pub(crate) error_on_warning: Option<bool>,
/// Always use exit code 0, even when there are error-level diagnostics.
#[arg(long)]
pub(crate) exit_zero: bool,
/// Watch files for changes and recheck files related to the changed files.
#[arg(long, short = 'W')]
pub(crate) watch: bool,
/// Respect file exclusions via `.gitignore` and other standard ignore files.
/// Use `--no-respect-ignore-files` to disable.
#[arg(
long,
overrides_with("no_respect_ignore_files"),
help_heading = "File selection",
default_missing_value = "true",
num_args = 0..1
)]
respect_ignore_files: Option<bool>,
#[clap(long, overrides_with("respect_ignore_files"), hide = true)]
no_respect_ignore_files: bool,
/// Enforce exclusions, even for paths passed to ty directly on the command-line.
/// Use `--no-force-exclude` to disable.
#[arg(
long,
overrides_with("no_force_exclude"),
help_heading = "File selection"
)]
force_exclude: bool,
#[clap(long, overrides_with("force_exclude"), hide = true)]
no_force_exclude: bool,
/// Glob patterns for files to exclude from type checking.
///
/// Uses gitignore-style syntax to exclude files and directories from type checking.
/// Supports patterns like `tests/`, `*.tmp`, `**/__pycache__/**`.
#[arg(long, help_heading = "File selection")]
exclude: Option<Vec<String>>,
/// Control when colored output is used.
#[arg(
long,
value_name = "WHEN",
help_heading = "Global options",
display_order = 1000
)]
pub(crate) color: Option<TerminalColor>,
/// Hide all progress outputs.
///
/// For example, spinners or progress bars.
#[arg(global = true, long, value_parser = clap::builder::BoolishValueParser::new(), help_heading = "Global options")]
pub no_progress: bool,
}
impl CheckCommand {
pub(crate) fn force_exclude(&self) -> bool {
resolve_bool_arg(self.force_exclude, self.no_force_exclude).unwrap_or_default()
}
pub(crate) fn into_options(self) -> Options {
let rules = if self.rules.is_empty() {
None
} else {
Some(
self.rules
.into_iter()
.map(|(rule, level)| (RangedValue::cli(rule), RangedValue::cli(level)))
.collect(),
)
};
// --no-respect-gitignore defaults to false and is set true by CLI flag. If passed, override config file
// Otherwise, only pass this through if explicitly set (don't default to anything here to
// make sure that doesn't take precedence over an explicitly-set config file value)
let respect_ignore_files = self
.no_respect_ignore_files
.then_some(false)
.or(self.respect_ignore_files);
let options = Options {
environment: Some(EnvironmentOptions {
python_version: self
.python_version
.map(|version| RangedValue::cli(version.into())),
python_platform: self
.python_platform
.map(|platform| RangedValue::cli(platform.into())),
python: self.python.map(RelativePathBuf::cli),
typeshed: self.typeshed.map(RelativePathBuf::cli),
extra_paths: self.extra_search_path.map(|extra_search_paths| {
extra_search_paths
.into_iter()
.map(RelativePathBuf::cli)
.collect()
}),
..EnvironmentOptions::default()
}),
terminal: Some(TerminalOptions {
output_format: self
.output_format
.map(|output_format| RangedValue::cli(output_format.into())),
error_on_warning: self.error_on_warning,
}),
src: Some(SrcOptions {
respect_ignore_files,
exclude: self.exclude.map(|excludes| {
RangedValue::cli(excludes.iter().map(RelativeGlobPattern::cli).collect())
}),
..SrcOptions::default()
}),
rules,
..Options::default()
};
// Merge with options passed in via --config
options.combine(self.config.into_options().unwrap_or_default())
}
}
/// A list of rules to enable or disable with a given severity.
///
/// This type is used to parse the `--error`, `--warn`, and `--ignore` arguments
/// while preserving the order in which they were specified (arguments last override previous severities).
#[derive(Debug)]
pub(crate) struct RulesArg(Vec<(String, lint::Level)>);
impl RulesArg {
fn is_empty(&self) -> bool {
self.0.is_empty()
}
fn into_iter(self) -> impl Iterator<Item = (String, lint::Level)> {
self.0.into_iter()
}
}
impl clap::FromArgMatches for RulesArg {
fn from_arg_matches(matches: &ArgMatches) -> Result<Self, Error> {
let mut rules = Vec::new();
for (level, arg_id) in [
(lint::Level::Ignore, "ignore"),
(lint::Level::Warn, "warn"),
(lint::Level::Error, "error"),
] {
let indices = matches.indices_of(arg_id).into_iter().flatten();
let levels = matches.get_many::<String>(arg_id).into_iter().flatten();
rules.extend(
indices
.zip(levels)
.map(|(index, rule)| (index, rule, level)),
);
}
// Sort by their index so that values specified later override earlier ones.
rules.sort_by_key(|(index, _, _)| *index);
Ok(Self(
rules
.into_iter()
.map(|(_, rule, level)| (rule.to_owned(), level))
.collect(),
))
}
fn update_from_arg_matches(&mut self, matches: &ArgMatches) -> Result<(), Error> {
self.0 = Self::from_arg_matches(matches)?.0;
Ok(())
}
}
impl clap::Args for RulesArg {
fn augment_args(cmd: clap::Command) -> clap::Command {
const HELP_HEADING: &str = "Enabling / disabling rules";
cmd.arg(
clap::Arg::new("error")
.long("error")
.action(ArgAction::Append)
.help("Treat the given rule as having severity 'error'. Can be specified multiple times.")
.value_name("RULE")
.help_heading(HELP_HEADING),
)
.arg(
clap::Arg::new("warn")
.long("warn")
.action(ArgAction::Append)
.help("Treat the given rule as having severity 'warn'. Can be specified multiple times.")
.value_name("RULE")
.help_heading(HELP_HEADING),
)
.arg(
clap::Arg::new("ignore")
.long("ignore")
.action(ArgAction::Append)
.help("Disables the rule. Can be specified multiple times.")
.value_name("RULE")
.help_heading(HELP_HEADING),
)
}
fn augment_args_for_update(cmd: clap::Command) -> clap::Command {
Self::augment_args(cmd)
}
}
/// The diagnostic output format.
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord, Default, clap::ValueEnum)]
pub enum OutputFormat {
/// Print diagnostics verbosely, with context and helpful hints (default).
///
/// Diagnostic messages may include additional context and
/// annotations on the input to help understand the message.
#[default]
#[value(name = "full")]
Full,
/// Print diagnostics concisely, one per line.
///
/// This will guarantee that each diagnostic is printed on
/// a single line. Only the most important or primary aspects
/// of the diagnostic are included. Contextual information is
/// dropped.
#[value(name = "concise")]
Concise,
/// Print diagnostics in the JSON format expected by GitLab Code Quality reports.
#[value(name = "gitlab")]
Gitlab,
#[value(name = "github")]
/// Print diagnostics in the format used by GitHub Actions workflow error annotations.
Github,
}
impl From<OutputFormat> for ty_project::metadata::options::OutputFormat {
fn from(format: OutputFormat) -> ty_project::metadata::options::OutputFormat {
match format {
OutputFormat::Full => Self::Full,
OutputFormat::Concise => Self::Concise,
OutputFormat::Gitlab => Self::Gitlab,
OutputFormat::Github => Self::Github,
}
}
}
/// Control when colored output is used.
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord, Default, clap::ValueEnum)]
pub(crate) enum TerminalColor {
/// Display colors if the output goes to an interactive terminal.
#[default]
Auto,
/// Always display colors.
Always,
/// Never display colors.
Never,
}
/// A TOML `<KEY> = <VALUE>` pair
/// (such as you might find in a `ty.toml` configuration file)
/// overriding a specific configuration option.
///
/// Overrides of individual settings using this option always take precedence
/// over all configuration files.
#[derive(Debug, Clone)]
pub(crate) struct ConfigsArg(Option<Options>);
impl clap::FromArgMatches for ConfigsArg {
fn from_arg_matches(matches: &ArgMatches) -> Result<Self, Error> {
let combined = matches
.get_many::<String>("config")
.into_iter()
.flatten()
.map(|s| {
Options::from_toml_str(s, ValueSource::Cli)
.map_err(|err| Error::raw(ErrorKind::InvalidValue, err.to_string()))
})
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.reduce(|acc, item| item.combine(acc));
Ok(Self(combined))
}
fn update_from_arg_matches(&mut self, matches: &ArgMatches) -> Result<(), Error> {
self.0 = Self::from_arg_matches(matches)?.0;
Ok(())
}
}
impl clap::Args for ConfigsArg {
fn augment_args(cmd: clap::Command) -> clap::Command {
cmd.arg(
clap::Arg::new("config")
.short('c')
.long("config")
.value_name("CONFIG_OPTION")
.help("A TOML `<KEY> = <VALUE>` pair overriding a specific configuration option.")
.long_help(
"
A TOML `<KEY> = <VALUE>` pair (such as you might find in a `ty.toml` configuration file)
overriding a specific configuration option.
Overrides of individual settings using this option always take precedence
over all configuration files.",
)
.action(ArgAction::Append),
)
}
fn augment_args_for_update(cmd: clap::Command) -> clap::Command {
Self::augment_args(cmd)
}
}
impl ConfigsArg {
pub(crate) fn into_options(self) -> Option<Options> {
self.0
}
}
fn resolve_bool_arg(yes: bool, no: bool) -> Option<bool> {
match (yes, no) {
(true, false) => Some(true),
(false, true) => Some(false),
(false, false) => None,
(..) => unreachable!("Clap should make this impossible"),
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty/src/printer.rs | crates/ty/src/printer.rs | use std::io::StdoutLock;
use anyhow::Result;
use indicatif::ProgressDrawTarget;
use crate::logging::VerbosityLevel;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub(crate) struct Printer {
verbosity: VerbosityLevel,
no_progress: bool,
}
impl Printer {
pub(crate) fn new(verbosity: VerbosityLevel, no_progress: bool) -> Self {
Self {
verbosity,
no_progress,
}
}
/// Return the [`ProgressDrawTarget`] for this printer.
pub(crate) fn progress_target(self) -> ProgressDrawTarget {
if self.no_progress {
return ProgressDrawTarget::hidden();
}
match self.verbosity {
VerbosityLevel::Silent => ProgressDrawTarget::hidden(),
VerbosityLevel::Quiet => ProgressDrawTarget::hidden(),
VerbosityLevel::Default => ProgressDrawTarget::stderr(),
// Hide the progress bar when in verbose mode.
// Otherwise, it gets interleaved with log messages.
VerbosityLevel::Verbose => ProgressDrawTarget::hidden(),
VerbosityLevel::ExtraVerbose => ProgressDrawTarget::hidden(),
VerbosityLevel::Trace => ProgressDrawTarget::hidden(),
}
}
/// Return the [`Stdout`] stream for important messages.
///
/// Unlike [`Self::stdout_general`], the returned stream will be enabled when
/// [`VerbosityLevel::Quiet`] is used.
fn stdout_important(self) -> Stdout {
match self.verbosity {
VerbosityLevel::Silent => Stdout::disabled(),
VerbosityLevel::Quiet => Stdout::enabled(),
VerbosityLevel::Default => Stdout::enabled(),
VerbosityLevel::Verbose => Stdout::enabled(),
VerbosityLevel::ExtraVerbose => Stdout::enabled(),
VerbosityLevel::Trace => Stdout::enabled(),
}
}
/// Return the [`Stdout`] stream for general messages.
///
/// The returned stream will be disabled when [`VerbosityLevel::Quiet`] is used.
fn stdout_general(self) -> Stdout {
match self.verbosity {
VerbosityLevel::Silent => Stdout::disabled(),
VerbosityLevel::Quiet => Stdout::disabled(),
VerbosityLevel::Default => Stdout::enabled(),
VerbosityLevel::Verbose => Stdout::enabled(),
VerbosityLevel::ExtraVerbose => Stdout::enabled(),
VerbosityLevel::Trace => Stdout::enabled(),
}
}
/// Return the [`Stdout`] stream for a summary message that was explicitly requested by the
/// user.
///
/// For example, in `ty version` the user has requested the version information and we should
/// display it even if [`VerbosityLevel::Quiet`] is used. Or, in `ty check`, if the
/// `TY_MEMORY_REPORT` variable has been set, we should display the memory report because the
/// user has opted-in to display.
pub(crate) fn stream_for_requested_summary(self) -> Stdout {
self.stdout_important()
}
/// Return the [`Stdout`] stream for a summary message on failure.
///
/// For example, in `ty check`, this would be used for the message indicating the number of
/// diagnostics found. The failure summary should capture information that is not reflected in
/// the exit code.
pub(crate) fn stream_for_failure_summary(self) -> Stdout {
self.stdout_important()
}
/// Return the [`Stdout`] stream for a summary message on success.
///
/// For example, in `ty check`, this would be used for the message indicating that no diagnostic
/// were found. The success summary does not capture important information for users that have
/// opted-in to [`VerbosityLevel::Quiet`].
pub(crate) fn stream_for_success_summary(self) -> Stdout {
self.stdout_general()
}
/// Return the [`Stdout`] stream for detailed messages.
///
/// For example, in `ty check`, this would be used for the diagnostic output.
pub(crate) fn stream_for_details(self) -> Stdout {
self.stdout_general()
}
pub(crate) fn clear_screen() -> Result<()> {
clearscreen::clear()?;
Ok(())
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum StreamStatus {
Enabled,
Disabled,
}
#[derive(Debug)]
pub(crate) struct Stdout {
status: StreamStatus,
lock: Option<StdoutLock<'static>>,
}
impl Stdout {
fn enabled() -> Self {
Self {
status: StreamStatus::Enabled,
lock: None,
}
}
fn disabled() -> Self {
Self {
status: StreamStatus::Disabled,
lock: None,
}
}
pub(crate) fn lock(mut self) -> Self {
match self.status {
StreamStatus::Enabled => {
// Drop the previous lock first, to avoid deadlocking
self.lock.take();
self.lock = Some(std::io::stdout().lock());
}
StreamStatus::Disabled => self.lock = None,
}
self
}
fn handle(&mut self) -> Box<dyn std::io::Write + '_> {
match self.lock.as_mut() {
Some(lock) => Box::new(lock),
None => Box::new(std::io::stdout()),
}
}
pub(crate) fn is_enabled(&self) -> bool {
matches!(self.status, StreamStatus::Enabled)
}
}
impl std::fmt::Write for Stdout {
fn write_str(&mut self, s: &str) -> std::fmt::Result {
match self.status {
StreamStatus::Enabled => {
let _ = write!(self.handle(), "{s}");
Ok(())
}
StreamStatus::Disabled => Ok(()),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty/src/main.rs | crates/ty/src/main.rs | use colored::Colorize;
use std::io;
use ty::{ExitStatus, run};
#[cfg(all(
not(target_os = "macos"),
not(target_os = "windows"),
not(target_os = "openbsd"),
not(target_os = "aix"),
not(target_os = "android"),
any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
target_arch = "riscv64"
)
))]
#[global_allocator]
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
pub fn main() -> ExitStatus {
run().unwrap_or_else(|error| {
use io::Write;
// Use `writeln` instead of `eprintln` to avoid panicking when the stderr pipe is broken.
let mut stderr = io::stderr().lock();
// This communicates that this isn't a linter error but ty itself hard-errored for
// some reason (e.g. failed to resolve the configuration)
writeln!(stderr, "{}", "ty failed".red().bold()).ok();
// Currently we generally only see one error, but e.g. with io errors when resolving
// the configuration it is help to chain errors ("resolving configuration failed" ->
// "failed to read file: subdir/pyproject.toml")
for cause in error.chain() {
// Exit "gracefully" on broken pipe errors.
//
// See: https://github.com/BurntSushi/ripgrep/blob/bf63fe8f258afc09bae6caa48f0ae35eaf115005/crates/core/main.rs#L47C1-L61C14
if let Some(ioerr) = cause.downcast_ref::<io::Error>() {
if ioerr.kind() == io::ErrorKind::BrokenPipe {
return ExitStatus::Success;
}
}
writeln!(stderr, " {} {cause}", "Cause:".bold()).ok();
}
ExitStatus::Error
})
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty/src/logging.rs | crates/ty/src/logging.rs | //! Sets up logging for ty
use crate::args::TerminalColor;
use anyhow::Context;
use colored::Colorize;
use std::fmt;
use std::fs::File;
use std::io::{BufWriter, IsTerminal};
use tracing::{Event, Subscriber};
use tracing_subscriber::EnvFilter;
use tracing_subscriber::filter::LevelFilter;
use tracing_subscriber::fmt::format::Writer;
use tracing_subscriber::fmt::{FmtContext, FormatEvent, FormatFields};
use tracing_subscriber::registry::LookupSpan;
use ty_static::EnvVars;
/// Logging flags to `#[command(flatten)]` into your CLI
#[derive(clap::Args, Debug, Clone, Default)]
#[command(about = None, long_about = None)]
pub(crate) struct Verbosity {
#[arg(
long,
short = 'v',
help = "Use verbose output (or `-vv` and `-vvv` for more verbose output)",
action = clap::ArgAction::Count,
global = true,
overrides_with = "quiet",
)]
verbose: u8,
#[arg(
long,
short,
help = "Use quiet output (or `-qq` for silent output)",
action = clap::ArgAction::Count,
global = true,
overrides_with = "verbose",
)]
quiet: u8,
}
impl Verbosity {
/// Returns the verbosity level based on the number of `-v` and `-q` flags.
///
/// Returns `None` if the user did not specify any verbosity flags.
pub(crate) fn level(&self) -> VerbosityLevel {
// `--quiet` and `--verbose` are mutually exclusive in Clap, so we can just check one first.
match self.quiet {
0 => {}
1 => return VerbosityLevel::Quiet,
_ => return VerbosityLevel::Silent,
}
match self.verbose {
0 => VerbosityLevel::Default,
1 => VerbosityLevel::Verbose,
2 => VerbosityLevel::ExtraVerbose,
_ => VerbosityLevel::Trace,
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Default)]
pub(crate) enum VerbosityLevel {
/// Silent output. Does not show any logging output or summary information.
Silent,
/// Quiet output. Only shows Ruff and ty events up to the [`ERROR`](tracing::Level::ERROR).
/// Silences output except for summary information.
Quiet,
/// Default output level. Only shows Ruff and ty events up to the [`WARN`](tracing::Level::WARN).
#[default]
Default,
/// Enables verbose output. Emits Ruff and ty events up to the [`INFO`](tracing::Level::INFO).
/// Corresponds to `-v`.
Verbose,
/// Enables a more verbose tracing format and emits Ruff and ty events up to [`DEBUG`](tracing::Level::DEBUG).
/// Corresponds to `-vv`
ExtraVerbose,
/// Enables all tracing events and uses a tree-like output format. Corresponds to `-vvv`.
Trace,
}
impl VerbosityLevel {
const fn level_filter(self) -> LevelFilter {
match self {
VerbosityLevel::Silent => LevelFilter::OFF,
VerbosityLevel::Quiet => LevelFilter::ERROR,
VerbosityLevel::Default => LevelFilter::WARN,
VerbosityLevel::Verbose => LevelFilter::INFO,
VerbosityLevel::ExtraVerbose => LevelFilter::DEBUG,
VerbosityLevel::Trace => LevelFilter::TRACE,
}
}
pub(crate) const fn is_trace(self) -> bool {
matches!(self, VerbosityLevel::Trace)
}
pub(crate) const fn is_extra_verbose(self) -> bool {
matches!(self, VerbosityLevel::ExtraVerbose)
}
}
pub(crate) fn setup_tracing(
level: VerbosityLevel,
color: TerminalColor,
) -> anyhow::Result<TracingGuard> {
use tracing_subscriber::prelude::*;
// The `TY_LOG` environment variable overrides the default log level.
let filter = if let Ok(log_env_variable) = std::env::var(EnvVars::TY_LOG) {
EnvFilter::builder()
.parse(log_env_variable)
.context("Failed to parse directives specified in TY_LOG environment variable.")?
} else {
match level {
VerbosityLevel::Default => {
// Show warning traces for ty and ruff but not for other crates
EnvFilter::default()
.add_directive("ty=warn".parse().unwrap())
.add_directive("ruff=warn".parse().unwrap())
}
level => {
let level_filter = level.level_filter();
// Show info|debug|trace events, but allow `TY_LOG` to override
let filter = EnvFilter::default().add_directive(
format!("ty={level_filter}")
.parse()
.expect("Hardcoded directive to be valid"),
);
filter.add_directive(
format!("ruff={level_filter}")
.parse()
.expect("Hardcoded directive to be valid"),
)
}
}
};
let (profiling_layer, guard) = setup_profile();
let registry = tracing_subscriber::registry()
.with(filter)
.with(profiling_layer);
let ansi = match color {
TerminalColor::Auto => {
colored::control::SHOULD_COLORIZE.should_colorize() && std::io::stderr().is_terminal()
}
TerminalColor::Always => true,
TerminalColor::Never => false,
};
if level.is_trace() {
let subscriber = registry.with(
tracing_subscriber::fmt::layer()
.event_format(tracing_subscriber::fmt::format().pretty())
.with_thread_ids(true)
.with_ansi(ansi)
.with_writer(std::io::stderr),
);
subscriber.init();
} else {
let subscriber = registry.with(
tracing_subscriber::fmt::layer()
.event_format(TyFormat {
display_level: true,
display_timestamp: level.is_extra_verbose(),
show_spans: false,
})
.with_ansi(ansi)
.with_writer(std::io::stderr),
);
subscriber.init();
}
Ok(TracingGuard {
_flame_guard: guard,
})
}
#[expect(clippy::type_complexity)]
fn setup_profile<S>() -> (
Option<tracing_flame::FlameLayer<S, BufWriter<File>>>,
Option<tracing_flame::FlushGuard<BufWriter<File>>>,
)
where
S: Subscriber + for<'span> LookupSpan<'span>,
{
if let Ok("1" | "true") = std::env::var(EnvVars::TY_LOG_PROFILE).as_deref() {
let (layer, guard) = tracing_flame::FlameLayer::with_file("tracing.folded")
.expect("Flame layer to be created");
(Some(layer), Some(guard))
} else {
(None, None)
}
}
pub(crate) struct TracingGuard {
_flame_guard: Option<tracing_flame::FlushGuard<BufWriter<File>>>,
}
struct TyFormat {
display_timestamp: bool,
display_level: bool,
show_spans: bool,
}
/// See <https://docs.rs/tracing-subscriber/0.3.18/src/tracing_subscriber/fmt/format/mod.rs.html#1026-1156>
impl<S, N> FormatEvent<S, N> for TyFormat
where
S: Subscriber + for<'a> LookupSpan<'a>,
N: for<'a> FormatFields<'a> + 'static,
{
fn format_event(
&self,
ctx: &FmtContext<'_, S, N>,
mut writer: Writer<'_>,
event: &Event<'_>,
) -> fmt::Result {
let meta = event.metadata();
let ansi = writer.has_ansi_escapes();
if self.display_timestamp {
let timestamp = jiff::Zoned::now()
.strftime("%Y-%m-%d %H:%M:%S.%f")
.to_string();
if ansi {
write!(writer, "{} ", timestamp.dimmed())?;
} else {
write!(
writer,
"{} ",
jiff::Zoned::now().strftime("%Y-%m-%d %H:%M:%S.%f")
)?;
}
}
if self.display_level {
let level = meta.level();
// Same colors as tracing
if ansi {
let formatted_level = level.to_string();
match *level {
tracing::Level::TRACE => {
write!(writer, "{} ", formatted_level.purple().bold())?;
}
tracing::Level::DEBUG => write!(writer, "{} ", formatted_level.blue().bold())?,
tracing::Level::INFO => write!(writer, "{} ", formatted_level.green().bold())?,
tracing::Level::WARN => write!(writer, "{} ", formatted_level.yellow().bold())?,
tracing::Level::ERROR => write!(writer, "{} ", level.to_string().red().bold())?,
}
} else {
write!(writer, "{level} ")?;
}
}
if self.show_spans {
let span = event.parent();
let mut seen = false;
let span = span
.and_then(|id| ctx.span(id))
.or_else(|| ctx.lookup_current());
let scope = span.into_iter().flat_map(|span| span.scope().from_root());
for span in scope {
seen = true;
if ansi {
write!(writer, "{}:", span.metadata().name().bold())?;
} else {
write!(writer, "{}:", span.metadata().name())?;
}
}
if seen {
writer.write_char(' ')?;
}
}
ctx.field_format().format_fields(writer.by_ref(), event)?;
writeln!(writer)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty/tests/file_watching.rs | crates/ty/tests/file_watching.rs | use std::collections::HashSet;
use std::io::Write;
use std::time::{Duration, Instant};
use anyhow::{Context, anyhow};
use ruff_db::Db as _;
use ruff_db::files::{File, FileError, system_path_to_file};
use ruff_db::source::source_text;
use ruff_db::system::{
OsSystem, System, SystemPath, SystemPathBuf, UserConfigDirectoryOverrideGuard, file_time_now,
};
use ruff_python_ast::PythonVersion;
use ty_module_resolver::{Module, ModuleName, resolve_module_confident};
use ty_project::metadata::options::{EnvironmentOptions, Options, ProjectOptionsOverrides};
use ty_project::metadata::pyproject::{PyProject, Tool};
use ty_project::metadata::value::{RangedValue, RelativePathBuf};
use ty_project::watch::{ChangeEvent, ProjectWatcher, directory_watcher};
use ty_project::{Db, ProjectDatabase, ProjectMetadata};
use ty_python_semantic::PythonPlatform;
struct TestCase {
db: ProjectDatabase,
watcher: Option<ProjectWatcher>,
changes_receiver: crossbeam::channel::Receiver<Vec<ChangeEvent>>,
/// The temporary directory that contains the test files.
/// We need to hold on to it in the test case or the temp files get deleted.
_temp_dir: tempfile::TempDir,
root_dir: SystemPathBuf,
}
impl TestCase {
fn project_path(&self, relative: impl AsRef<SystemPath>) -> SystemPathBuf {
SystemPath::absolute(relative, self.db.project().root(&self.db))
}
fn root_path(&self) -> &SystemPath {
&self.root_dir
}
fn db(&self) -> &ProjectDatabase {
&self.db
}
/// Stops file-watching and returns the collected change events.
///
/// The caller must pass a `MatchEvent` filter that is applied to
/// the change events returned. To get all change events, use `|_:
/// &ChangeEvent| true`. If possible, callers should pass a filter for a
/// specific file name, e.g., `event_for_file("foo.py")`. When done this
/// way, the watcher will specifically try to wait for a change event
/// matching the filter. This can help avoid flakes.
#[track_caller]
fn stop_watch<M>(&mut self, matcher: M) -> Vec<ChangeEvent>
where
M: MatchEvent,
{
// track_caller is unstable for lambdas -> That's why this is a fn
#[track_caller]
fn panic_with_formatted_events(events: Vec<ChangeEvent>) -> Vec<ChangeEvent> {
panic!(
"Didn't observe the expected event. The following events occurred:\n{}",
events
.into_iter()
.map(|event| format!(" - {event:?}"))
.collect::<Vec<_>>()
.join("\n")
)
}
self.try_stop_watch(matcher, Duration::from_secs(10))
.unwrap_or_else(panic_with_formatted_events)
}
fn try_stop_watch<M>(
&mut self,
mut matcher: M,
timeout: Duration,
) -> Result<Vec<ChangeEvent>, Vec<ChangeEvent>>
where
M: MatchEvent,
{
tracing::debug!("Try stopping watch with timeout {:?}", timeout);
let watcher = self
.watcher
.take()
.expect("Cannot call `stop_watch` more than once");
let start = Instant::now();
let mut all_events = Vec::new();
loop {
let events = self
.changes_receiver
.recv_timeout(Duration::from_millis(100))
.unwrap_or_default();
if events
.iter()
.any(|event| matcher.match_event(event) || event.is_rescan())
{
all_events.extend(events);
break;
}
all_events.extend(events);
if start.elapsed() > timeout {
return Err(all_events);
}
}
watcher.flush();
tracing::debug!("Flushed file watcher");
watcher.stop();
tracing::debug!("Stopping file watcher");
// Consume remaining events
for event in &self.changes_receiver {
all_events.extend(event);
}
Ok(all_events)
}
fn take_watch_changes<M: MatchEvent>(&self, matcher: M) -> Vec<ChangeEvent> {
self.try_take_watch_changes(matcher, Duration::from_secs(10))
.expect("Expected watch changes but observed none")
}
fn try_take_watch_changes<M: MatchEvent>(
&self,
mut matcher: M,
timeout: Duration,
) -> Result<Vec<ChangeEvent>, Vec<ChangeEvent>> {
let watcher = self
.watcher
.as_ref()
.expect("Cannot call `try_take_watch_changes` after `stop_watch`");
let start = Instant::now();
let mut all_events = Vec::new();
loop {
let events = self
.changes_receiver
.recv_timeout(Duration::from_millis(100))
.unwrap_or_default();
if events
.iter()
.any(|event| matcher.match_event(event) || event.is_rescan())
{
all_events.extend(events);
break;
}
all_events.extend(events);
if start.elapsed() > timeout {
return Err(all_events);
}
}
while let Ok(event) = self
.changes_receiver
.recv_timeout(Duration::from_millis(10))
{
all_events.extend(event);
watcher.flush();
}
Ok(all_events)
}
fn apply_changes(
&mut self,
changes: Vec<ChangeEvent>,
project_options_overrides: Option<&ProjectOptionsOverrides>,
) {
self.db.apply_changes(changes, project_options_overrides);
}
fn update_options(&mut self, options: Options) -> anyhow::Result<()> {
std::fs::write(
self.project_path("pyproject.toml").as_std_path(),
toml::to_string(&PyProject {
project: None,
tool: Some(Tool { ty: Some(options) }),
})
.context("Failed to serialize options")?,
)
.context("Failed to write configuration")?;
let changes = self.take_watch_changes(event_for_file("pyproject.toml"));
self.apply_changes(changes, None);
if let Some(watcher) = &mut self.watcher {
watcher.update(&self.db);
assert!(!watcher.has_errored_paths());
}
Ok(())
}
#[track_caller]
fn assert_indexed_project_files(&self, expected: impl IntoIterator<Item = File>) {
let mut expected: HashSet<_> = expected.into_iter().collect();
let actual = self.db().project().files(self.db());
for file in &actual {
assert!(
expected.remove(&file),
"Indexed project files contains '{}' which was not expected.",
file.path(self.db())
);
}
if !expected.is_empty() {
let paths: Vec<_> = expected
.iter()
.map(|file| file.path(self.db()).as_str())
.collect();
panic!(
"Indexed project files are missing the following files: {:?}",
paths.join(", ")
);
}
}
fn system_file(&self, path: impl AsRef<SystemPath>) -> Result<File, FileError> {
system_path_to_file(self.db(), path.as_ref())
}
fn module<'c>(&'c self, name: &str) -> Module<'c> {
resolve_module_confident(self.db(), &ModuleName::new(name).unwrap())
.expect("module to be present")
}
fn sorted_submodule_names(&self, parent_module_name: &str) -> Vec<String> {
let mut names = self
.module(parent_module_name)
.all_submodules(self.db())
.iter()
.map(|submodule| submodule.name(self.db()).to_string())
.collect::<Vec<String>>();
names.sort();
names
}
}
trait MatchEvent {
fn match_event(&mut self, event: &ChangeEvent) -> bool;
}
fn event_for_file(name: &str) -> impl MatchEvent + '_ {
|event: &ChangeEvent| event.file_name() == Some(name)
}
impl<F> MatchEvent for F
where
F: FnMut(&ChangeEvent) -> bool,
{
fn match_event(&mut self, event: &ChangeEvent) -> bool {
(*self)(event)
}
}
trait Setup {
fn setup(self, context: &mut SetupContext) -> anyhow::Result<()>;
}
struct SetupContext<'a> {
system: &'a OsSystem,
root_path: &'a SystemPath,
options: Option<Options>,
included_paths: Option<Vec<SystemPathBuf>>,
}
impl<'a> SetupContext<'a> {
fn system(&self) -> &'a OsSystem {
self.system
}
fn join_project_path(&self, relative: impl AsRef<SystemPath>) -> SystemPathBuf {
self.project_path().join(relative)
}
fn project_path(&self) -> &SystemPath {
self.system.current_directory()
}
fn root_path(&self) -> &'a SystemPath {
self.root_path
}
fn join_root_path(&self, relative: impl AsRef<SystemPath>) -> SystemPathBuf {
self.root_path().join(relative)
}
fn write_project_file(
&self,
relative_path: impl AsRef<SystemPath>,
content: &str,
) -> anyhow::Result<()> {
let relative_path = relative_path.as_ref();
let absolute_path = self.join_project_path(relative_path);
Self::write_file_impl(absolute_path, content)
}
fn write_file(
&self,
relative_path: impl AsRef<SystemPath>,
content: &str,
) -> anyhow::Result<()> {
let relative_path = relative_path.as_ref();
let absolute_path = self.join_root_path(relative_path);
Self::write_file_impl(absolute_path, content)
}
fn write_file_impl(path: impl AsRef<SystemPath>, content: &str) -> anyhow::Result<()> {
let path = path.as_ref();
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)
.with_context(|| format!("Failed to create parent directory for file `{path}`"))?;
}
let mut file = std::fs::File::create(path.as_std_path())
.with_context(|| format!("Failed to open file `{path}`"))?;
file.write_all(content.as_bytes())
.with_context(|| format!("Failed to write to file `{path}`"))?;
file.sync_data()?;
Ok(())
}
fn set_options(&mut self, options: Options) {
self.options = Some(options);
}
fn set_included_paths(&mut self, paths: Vec<SystemPathBuf>) {
self.included_paths = Some(paths);
}
}
impl<const N: usize, P> Setup for [(P, &'static str); N]
where
P: AsRef<SystemPath>,
{
fn setup(self, context: &mut SetupContext) -> anyhow::Result<()> {
for (relative_path, content) in self {
context.write_project_file(relative_path, content)?;
}
Ok(())
}
}
impl<F> Setup for F
where
F: FnOnce(&mut SetupContext) -> anyhow::Result<()>,
{
fn setup(self, context: &mut SetupContext) -> anyhow::Result<()> {
self(context)
}
}
fn setup<F>(setup_files: F) -> anyhow::Result<TestCase>
where
F: Setup,
{
let temp_dir = tempfile::tempdir()?;
let root_path = SystemPath::from_std_path(temp_dir.path()).ok_or_else(|| {
anyhow!(
"Temporary directory `{}` is not a valid UTF-8 path.",
temp_dir.path().display()
)
})?;
let root_path = SystemPathBuf::from_utf8_path_buf(
root_path
.as_utf8_path()
.canonicalize_utf8()
.with_context(|| "Failed to canonicalize root path.")?,
)
.simplified()
.to_path_buf();
let project_path = root_path.join("project");
std::fs::create_dir_all(project_path.as_std_path())
.with_context(|| format!("Failed to create project directory `{project_path}`"))?;
let system = OsSystem::new(&project_path);
let mut setup_context = SetupContext {
system: &system,
root_path: &root_path,
options: None,
included_paths: None,
};
setup_files
.setup(&mut setup_context)
.context("Failed to setup test files")?;
if let Some(options) = setup_context.options {
std::fs::write(
project_path.join("pyproject.toml").as_std_path(),
toml::to_string(&PyProject {
project: None,
tool: Some(Tool { ty: Some(options) }),
})
.context("Failed to serialize options")?,
)
.context("Failed to write configuration")?;
}
let included_paths = setup_context.included_paths;
let mut project = ProjectMetadata::discover(&project_path, &system)?;
project.apply_configuration_files(&system)?;
// We need a chance to create the directories here.
if let Some(environment) = project.options().environment.as_ref() {
for path in environment
.extra_paths
.as_deref()
.unwrap_or_default()
.iter()
.chain(environment.typeshed.as_ref())
{
std::fs::create_dir_all(path.absolute(&project_path, &system).as_std_path())
.with_context(|| format!("Failed to create search path `{path}`"))?;
}
}
let mut db = ProjectDatabase::new(project, system)?;
if let Some(included_paths) = included_paths {
db.project().set_included_paths(&mut db, included_paths);
}
let (sender, receiver) = crossbeam::channel::unbounded();
let watcher = directory_watcher(move |events| sender.send(events).unwrap())
.with_context(|| "Failed to create directory watcher")?;
let watcher = ProjectWatcher::new(watcher, &db);
assert!(!watcher.has_errored_paths());
let test_case = TestCase {
db,
changes_receiver: receiver,
watcher: Some(watcher),
_temp_dir: temp_dir,
root_dir: root_path,
};
// Sometimes the file watcher reports changes for events that happened before the watcher was started.
// Do a best effort at dropping these events.
let _ =
test_case.try_take_watch_changes(|_event: &ChangeEvent| true, Duration::from_millis(100));
Ok(test_case)
}
/// Updates the content of a file and ensures that the last modified file time is updated.
fn update_file(path: impl AsRef<SystemPath>, content: &str) -> anyhow::Result<()> {
let path = path.as_ref().as_std_path();
let metadata = path.metadata()?;
let last_modified_time = filetime::FileTime::from_last_modification_time(&metadata);
let mut file = std::fs::OpenOptions::new()
.create(false)
.write(true)
.truncate(true)
.open(path)?;
file.write_all(content.as_bytes())?;
loop {
file.sync_all()?;
let modified_time = filetime::FileTime::from_last_modification_time(&path.metadata()?);
if modified_time != last_modified_time {
break Ok(());
}
std::thread::sleep(Duration::from_nanos(10));
filetime::set_file_handle_times(&file, None, Some(file_time_now()))?;
}
}
#[test]
fn new_file() -> anyhow::Result<()> {
let mut case = setup([("bar.py", "")])?;
let bar_path = case.project_path("bar.py");
let bar_file = case.system_file(&bar_path).unwrap();
let foo_path = case.project_path("foo.py");
assert_eq!(case.system_file(&foo_path), Err(FileError::NotFound));
case.assert_indexed_project_files([bar_file]);
std::fs::write(foo_path.as_std_path(), "print('Hello')")?;
let changes = case.stop_watch(event_for_file("foo.py"));
case.apply_changes(changes, None);
let foo = case.system_file(&foo_path).expect("foo.py to exist.");
case.assert_indexed_project_files([bar_file, foo]);
Ok(())
}
#[test]
fn new_ignored_file() -> anyhow::Result<()> {
let mut case = setup([("bar.py", ""), (".ignore", "foo.py")])?;
let bar_path = case.project_path("bar.py");
let bar_file = case.system_file(&bar_path).unwrap();
let foo_path = case.project_path("foo.py");
assert_eq!(case.system_file(&foo_path), Err(FileError::NotFound));
case.assert_indexed_project_files([bar_file]);
std::fs::write(foo_path.as_std_path(), "print('Hello')")?;
let changes = case.stop_watch(event_for_file("foo.py"));
case.apply_changes(changes, None);
assert!(case.system_file(&foo_path).is_ok());
case.assert_indexed_project_files([bar_file]);
Ok(())
}
#[test]
fn new_non_project_file() -> anyhow::Result<()> {
let mut case = setup(|context: &mut SetupContext| {
context.write_project_file("bar.py", "")?;
context.set_options(Options {
environment: Some(EnvironmentOptions {
extra_paths: Some(vec![RelativePathBuf::cli(
context.join_root_path("site_packages"),
)]),
..EnvironmentOptions::default()
}),
..Options::default()
});
Ok(())
})?;
let bar_path = case.project_path("bar.py");
let bar_file = case.system_file(&bar_path).unwrap();
case.assert_indexed_project_files([bar_file]);
// Add a file to site packages
let black_path = case.root_path().join("site_packages/black.py");
std::fs::write(black_path.as_std_path(), "print('Hello')")?;
let changes = case.stop_watch(event_for_file("black.py"));
case.apply_changes(changes, None);
assert!(case.system_file(&black_path).is_ok());
// The file should not have been added to the project files
case.assert_indexed_project_files([bar_file]);
Ok(())
}
#[test]
fn new_files_with_explicit_included_paths() -> anyhow::Result<()> {
let mut case = setup(|context: &mut SetupContext| {
context.write_project_file("src/main.py", "")?;
context.write_project_file("src/sub/__init__.py", "")?;
context.write_project_file("src/test.py", "")?;
context.set_included_paths(vec![
context.join_project_path("src/main.py"),
context.join_project_path("src/sub"),
]);
Ok(())
})?;
let main_path = case.project_path("src/main.py");
let main_file = case.system_file(&main_path).unwrap();
let sub_init_path = case.project_path("src/sub/__init__.py");
let sub_init = case.system_file(&sub_init_path).unwrap();
case.assert_indexed_project_files([main_file, sub_init]);
// Write a new file to `sub` which is an included path
let sub_a_path = case.project_path("src/sub/a.py");
std::fs::write(sub_a_path.as_std_path(), "print('Hello')")?;
// and write a second file in the root directory -- this should not be included
let test2_path = case.project_path("src/test2.py");
std::fs::write(test2_path.as_std_path(), "print('Hello')")?;
let changes = case.stop_watch(event_for_file("test2.py"));
case.apply_changes(changes, None);
let sub_a_file = case.system_file(&sub_a_path).expect("sub/a.py to exist");
case.assert_indexed_project_files([main_file, sub_init, sub_a_file]);
Ok(())
}
#[test]
fn new_file_in_included_out_of_project_directory() -> anyhow::Result<()> {
let mut case = setup(|context: &mut SetupContext| {
context.write_project_file("src/main.py", "")?;
context.write_project_file("script.py", "")?;
context.write_file("outside_project/a.py", "")?;
context.set_included_paths(vec![
context.join_root_path("outside_project"),
context.join_project_path("src"),
]);
Ok(())
})?;
let main_path = case.project_path("src/main.py");
let main_file = case.system_file(&main_path).unwrap();
let outside_a_path = case.root_path().join("outside_project/a.py");
let outside_a = case.system_file(&outside_a_path).unwrap();
case.assert_indexed_project_files([outside_a, main_file]);
// Write a new file to `src` which should be watched
let src_a = case.project_path("src/a.py");
std::fs::write(src_a.as_std_path(), "print('Hello')")?;
// and write a second file to `outside_project` which should be watched too
let outside_b_path = case.root_path().join("outside_project/b.py");
std::fs::write(outside_b_path.as_std_path(), "print('Hello')")?;
// and a third file in the project's root that should not be included
let script2_path = case.project_path("script2.py");
std::fs::write(script2_path.as_std_path(), "print('Hello')")?;
let changes = case.stop_watch(event_for_file("script2.py"));
case.apply_changes(changes, None);
let src_a_file = case.system_file(&src_a).unwrap();
let outside_b_file = case.system_file(&outside_b_path).unwrap();
// The file should not have been added to the project files
case.assert_indexed_project_files([main_file, outside_a, outside_b_file, src_a_file]);
Ok(())
}
#[test]
fn changed_file() -> anyhow::Result<()> {
let foo_source = "print('Hello, world!')";
let mut case = setup([("foo.py", foo_source)])?;
let foo_path = case.project_path("foo.py");
let foo = case.system_file(&foo_path)?;
assert_eq!(source_text(case.db(), foo).as_str(), foo_source);
case.assert_indexed_project_files([foo]);
update_file(&foo_path, "print('Version 2')")?;
let changes = case.stop_watch(event_for_file("foo.py"));
assert!(!changes.is_empty());
case.apply_changes(changes, None);
assert_eq!(source_text(case.db(), foo).as_str(), "print('Version 2')");
case.assert_indexed_project_files([foo]);
Ok(())
}
#[test]
fn deleted_file() -> anyhow::Result<()> {
let foo_source = "print('Hello, world!')";
let mut case = setup([("foo.py", foo_source)])?;
let foo_path = case.project_path("foo.py");
let foo = case.system_file(&foo_path)?;
assert!(foo.exists(case.db()));
case.assert_indexed_project_files([foo]);
std::fs::remove_file(foo_path.as_std_path())?;
let changes = case.stop_watch(event_for_file("foo.py"));
case.apply_changes(changes, None);
assert!(!foo.exists(case.db()));
case.assert_indexed_project_files([]);
Ok(())
}
/// Tests the case where a file is moved from inside a watched directory to a directory that is not watched.
///
/// This matches the behavior of deleting a file in VS code.
#[test]
fn move_file_to_trash() -> anyhow::Result<()> {
let foo_source = "print('Hello, world!')";
let mut case = setup([("foo.py", foo_source)])?;
let foo_path = case.project_path("foo.py");
let trash_path = case.root_path().join(".trash");
std::fs::create_dir_all(trash_path.as_std_path())?;
let foo = case.system_file(&foo_path)?;
assert!(foo.exists(case.db()));
case.assert_indexed_project_files([foo]);
std::fs::rename(
foo_path.as_std_path(),
trash_path.join("foo.py").as_std_path(),
)?;
let changes = case.stop_watch(event_for_file("foo.py"));
case.apply_changes(changes, None);
assert!(!foo.exists(case.db()));
case.assert_indexed_project_files([]);
Ok(())
}
/// Move a file from a non-project (non-watched) location into the project.
#[test]
fn move_file_to_project() -> anyhow::Result<()> {
let mut case = setup([("bar.py", "")])?;
let bar_path = case.project_path("bar.py");
let bar = case.system_file(&bar_path).unwrap();
let foo_path = case.root_path().join("foo.py");
std::fs::write(foo_path.as_std_path(), "")?;
let foo_in_project = case.project_path("foo.py");
assert!(case.system_file(&foo_path).is_ok());
case.assert_indexed_project_files([bar]);
std::fs::rename(foo_path.as_std_path(), foo_in_project.as_std_path())?;
let changes = case.stop_watch(event_for_file("foo.py"));
case.apply_changes(changes, None);
let foo_in_project = case.system_file(&foo_in_project)?;
assert!(foo_in_project.exists(case.db()));
case.assert_indexed_project_files([bar, foo_in_project]);
Ok(())
}
/// Rename a project file.
#[test]
fn rename_file() -> anyhow::Result<()> {
let mut case = setup([("foo.py", "")])?;
let foo_path = case.project_path("foo.py");
let bar_path = case.project_path("bar.py");
let foo = case.system_file(&foo_path)?;
case.assert_indexed_project_files([foo]);
std::fs::rename(foo_path.as_std_path(), bar_path.as_std_path())?;
let changes = case.stop_watch(event_for_file("bar.py"));
case.apply_changes(changes, None);
assert!(!foo.exists(case.db()));
let bar = case.system_file(&bar_path)?;
assert!(bar.exists(case.db()));
case.assert_indexed_project_files([bar]);
Ok(())
}
#[test]
fn directory_moved_to_project() -> anyhow::Result<()> {
let mut case = setup([("bar.py", "import sub.a")])?;
let bar = case.system_file(case.project_path("bar.py")).unwrap();
let sub_original_path = case.root_path().join("sub");
let init_original_path = sub_original_path.join("__init__.py");
let a_original_path = sub_original_path.join("a.py");
std::fs::create_dir(sub_original_path.as_std_path())
.with_context(|| "Failed to create sub directory")?;
std::fs::write(init_original_path.as_std_path(), "")
.with_context(|| "Failed to create __init__.py")?;
std::fs::write(a_original_path.as_std_path(), "").with_context(|| "Failed to create a.py")?;
let sub_a_module =
resolve_module_confident(case.db(), &ModuleName::new_static("sub.a").unwrap());
assert_eq!(sub_a_module, None);
case.assert_indexed_project_files([bar]);
let sub_new_path = case.project_path("sub");
std::fs::rename(sub_original_path.as_std_path(), sub_new_path.as_std_path())
.with_context(|| "Failed to move sub directory")?;
let changes = case.stop_watch(event_for_file("sub"));
case.apply_changes(changes, None);
let init_file = case
.system_file(sub_new_path.join("__init__.py"))
.expect("__init__.py to exist");
let a_file = case
.system_file(sub_new_path.join("a.py"))
.expect("a.py to exist");
// `import sub.a` should now resolve
assert!(
resolve_module_confident(case.db(), &ModuleName::new_static("sub.a").unwrap()).is_some()
);
case.assert_indexed_project_files([bar, init_file, a_file]);
Ok(())
}
#[test]
fn directory_moved_to_trash() -> anyhow::Result<()> {
let mut case = setup([
("bar.py", "import sub.a"),
("sub/__init__.py", ""),
("sub/a.py", ""),
])?;
let bar = case.system_file(case.project_path("bar.py")).unwrap();
assert!(
resolve_module_confident(case.db(), &ModuleName::new_static("sub.a").unwrap()).is_some()
);
let sub_path = case.project_path("sub");
let init_file = case
.system_file(sub_path.join("__init__.py"))
.expect("__init__.py to exist");
let a_file = case
.system_file(sub_path.join("a.py"))
.expect("a.py to exist");
case.assert_indexed_project_files([bar, init_file, a_file]);
std::fs::create_dir(case.root_path().join(".trash").as_std_path())?;
let trashed_sub = case.root_path().join(".trash/sub");
std::fs::rename(sub_path.as_std_path(), trashed_sub.as_std_path())
.with_context(|| "Failed to move the sub directory to the trash")?;
let changes = case.stop_watch(event_for_file("sub"));
case.apply_changes(changes, None);
// `import sub.a` should no longer resolve
assert!(
resolve_module_confident(case.db(), &ModuleName::new_static("sub.a").unwrap()).is_none()
);
assert!(!init_file.exists(case.db()));
assert!(!a_file.exists(case.db()));
case.assert_indexed_project_files([bar]);
Ok(())
}
#[test]
fn directory_renamed() -> anyhow::Result<()> {
let mut case = setup([
("bar.py", "import sub.a"),
("sub/__init__.py", ""),
("sub/a.py", ""),
])?;
let bar = case.system_file(case.project_path("bar.py")).unwrap();
assert!(
resolve_module_confident(case.db(), &ModuleName::new_static("sub.a").unwrap()).is_some()
);
assert!(
resolve_module_confident(case.db(), &ModuleName::new_static("foo.baz").unwrap()).is_none()
);
let sub_path = case.project_path("sub");
let sub_init = case
.system_file(sub_path.join("__init__.py"))
.expect("__init__.py to exist");
let sub_a = case
.system_file(sub_path.join("a.py"))
.expect("a.py to exist");
case.assert_indexed_project_files([bar, sub_init, sub_a]);
let foo_baz = case.project_path("foo/baz");
std::fs::create_dir(case.project_path("foo").as_std_path())?;
std::fs::rename(sub_path.as_std_path(), foo_baz.as_std_path())
.with_context(|| "Failed to move the sub directory")?;
// Linux and windows only emit an event for the newly created root directory, but not for every new component.
let changes = case.stop_watch(event_for_file("sub"));
case.apply_changes(changes, None);
// `import sub.a` should no longer resolve
assert!(
resolve_module_confident(case.db(), &ModuleName::new_static("sub.a").unwrap()).is_none()
);
// `import foo.baz` should now resolve
assert!(
resolve_module_confident(case.db(), &ModuleName::new_static("foo.baz").unwrap()).is_some()
);
// The old paths are no longer tracked
assert!(!sub_init.exists(case.db()));
assert!(!sub_a.exists(case.db()));
let foo_baz_init = case
.system_file(foo_baz.join("__init__.py"))
.expect("__init__.py to exist");
let foo_baz_a = case
.system_file(foo_baz.join("a.py"))
.expect("a.py to exist");
// The new paths are synced
assert!(foo_baz_init.exists(case.db()));
assert!(foo_baz_a.exists(case.db()));
case.assert_indexed_project_files([bar, foo_baz_init, foo_baz_a]);
Ok(())
}
#[test]
fn directory_deleted() -> anyhow::Result<()> {
let mut case = setup([
("bar.py", "import sub.a"),
("sub/__init__.py", ""),
("sub/a.py", ""),
])?;
let bar = case.system_file(case.project_path("bar.py")).unwrap();
assert!(
resolve_module_confident(case.db(), &ModuleName::new_static("sub.a").unwrap()).is_some()
);
let sub_path = case.project_path("sub");
let init_file = case
.system_file(sub_path.join("__init__.py"))
.expect("__init__.py to exist");
let a_file = case
.system_file(sub_path.join("a.py"))
.expect("a.py to exist");
case.assert_indexed_project_files([bar, init_file, a_file]);
std::fs::remove_dir_all(sub_path.as_std_path())
.with_context(|| "Failed to remove the sub directory")?;
let changes = case.stop_watch(event_for_file("sub"));
case.apply_changes(changes, None);
// `import sub.a` should no longer resolve
assert!(
resolve_module_confident(case.db(), &ModuleName::new_static("sub.a").unwrap()).is_none()
);
assert!(!init_file.exists(case.db()));
assert!(!a_file.exists(case.db()));
case.assert_indexed_project_files([bar]);
Ok(())
}
#[test]
fn search_path() -> anyhow::Result<()> {
let mut case = setup(|context: &mut SetupContext| {
context.write_project_file("bar.py", "import sub.a")?;
context.set_options(Options {
environment: Some(EnvironmentOptions {
extra_paths: Some(vec![RelativePathBuf::cli(
context.join_root_path("site_packages"),
)]),
..EnvironmentOptions::default()
}),
..Options::default()
});
Ok(())
})?;
let site_packages = case.root_path().join("site_packages");
assert_eq!(
resolve_module_confident(case.db(), &ModuleName::new_static("a").unwrap()),
None
);
std::fs::write(site_packages.join("a.py").as_std_path(), "class A: ...")?;
let changes = case.stop_watch(event_for_file("a.py"));
case.apply_changes(changes, None);
assert!(resolve_module_confident(case.db(), &ModuleName::new_static("a").unwrap()).is_some());
case.assert_indexed_project_files([case.system_file(case.project_path("bar.py")).unwrap()]);
Ok(())
}
#[test]
fn add_search_path() -> anyhow::Result<()> {
let mut case = setup([("bar.py", "import sub.a")])?;
let site_packages = case.project_path("site_packages");
std::fs::create_dir_all(site_packages.as_std_path())?;
assert!(resolve_module_confident(case.db(), &ModuleName::new_static("a").unwrap()).is_none());
// Register site-packages as a search path.
case.update_options(Options {
environment: Some(EnvironmentOptions {
extra_paths: Some(vec![RelativePathBuf::cli("site_packages")]),
..EnvironmentOptions::default()
}),
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty/tests/cli/rule_selection.rs | crates/ty/tests/cli/rule_selection.rs | use insta_cmd::assert_cmd_snapshot;
use crate::CliTest;
/// The rule severity can be changed in the configuration file
#[test]
fn configuration_rule_severity() -> anyhow::Result<()> {
let case = CliTest::with_file(
"test.py",
r#"
y = 4 / 0
for a in range(0, int(y)):
x = a
prin(x) # unresolved-reference
"#,
)?;
// Assert that there's an `unresolved-reference` diagnostic (error).
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `prin` used when not defined
--> test.py:7:1
|
5 | x = a
6 |
7 | prin(x) # unresolved-reference
| ^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
case.write_file(
"pyproject.toml",
r#"
[tool.ty.rules]
division-by-zero = "warn" # promote to warn
unresolved-reference = "ignore"
"#,
)?;
assert_cmd_snapshot!(case.command(), @r###"
success: true
exit_code: 0
----- stdout -----
warning[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> test.py:2:5
|
2 | y = 4 / 0
| ^^^^^
3 |
4 | for a in range(0, int(y)):
|
info: rule `division-by-zero` was selected in the configuration file
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
/// The rule severity can be changed using `--ignore`, `--warn`, and `--error`
#[test]
fn cli_rule_severity() -> anyhow::Result<()> {
let case = CliTest::with_file(
"test.py",
r#"
import does_not_exit
y = 4 / 0
for a in range(0, int(y)):
x = a
prin(x) # unresolved-reference
"#,
)?;
// Assert that there's an `unresolved-reference` diagnostic (error)
// and an unresolved-import (error) diagnostic by default.
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-import]: Cannot resolve imported module `does_not_exit`
--> test.py:2:8
|
2 | import does_not_exit
| ^^^^^^^^^^^^^
3 |
4 | y = 4 / 0
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/ (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
error[unresolved-reference]: Name `prin` used when not defined
--> test.py:9:1
|
7 | x = a
8 |
9 | prin(x) # unresolved-reference
| ^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 2 diagnostics
----- stderr -----
"###);
assert_cmd_snapshot!(
case
.command()
.arg("--ignore")
.arg("unresolved-reference")
.arg("--warn")
.arg("division-by-zero")
.arg("--warn")
.arg("unresolved-import"),
@r###"
success: true
exit_code: 0
----- stdout -----
warning[unresolved-import]: Cannot resolve imported module `does_not_exit`
--> test.py:2:8
|
2 | import does_not_exit
| ^^^^^^^^^^^^^
3 |
4 | y = 4 / 0
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/ (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` was selected on the command line
warning[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> test.py:4:5
|
2 | import does_not_exit
3 |
4 | y = 4 / 0
| ^^^^^
5 |
6 | for a in range(0, int(y)):
|
info: rule `division-by-zero` was selected on the command line
Found 2 diagnostics
----- stderr -----
"###
);
Ok(())
}
/// The rule severity can be changed using `--ignore`, `--warn`, and `--error` and
/// values specified last override previous severities.
#[test]
fn cli_rule_severity_precedence() -> anyhow::Result<()> {
let case = CliTest::with_file(
"test.py",
r#"
y = 4 / 0
for a in range(0, int(y)):
x = a
prin(x) # unresolved-reference
"#,
)?;
// Assert that there's a `unresolved-reference` diagnostic (error) by default.
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `prin` used when not defined
--> test.py:7:1
|
5 | x = a
6 |
7 | prin(x) # unresolved-reference
| ^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
assert_cmd_snapshot!(
case
.command()
.arg("--warn")
.arg("unresolved-reference")
.arg("--warn")
.arg("division-by-zero")
.arg("--ignore")
.arg("unresolved-reference"),
@r###"
success: true
exit_code: 0
----- stdout -----
warning[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> test.py:2:5
|
2 | y = 4 / 0
| ^^^^^
3 |
4 | for a in range(0, int(y)):
|
info: rule `division-by-zero` was selected on the command line
Found 1 diagnostic
----- stderr -----
"###
);
Ok(())
}
/// ty warns about unknown rules specified in a configuration file
#[test]
fn configuration_unknown_rules() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.rules]
division-by-zer = "warn" # incorrect rule name
"#,
),
("test.py", "print(10)"),
])?;
assert_cmd_snapshot!(case.command(), @r#"
success: true
exit_code: 0
----- stdout -----
warning[unknown-rule]: Unknown rule `division-by-zer`. Did you mean `division-by-zero`?
--> pyproject.toml:3:1
|
2 | [tool.ty.rules]
3 | division-by-zer = "warn" # incorrect rule name
| ^^^^^^^^^^^^^^^
|
Found 1 diagnostic
----- stderr -----
"#);
Ok(())
}
/// ty warns about unknown rules specified in a CLI argument
#[test]
fn cli_unknown_rules() -> anyhow::Result<()> {
let case = CliTest::with_file("test.py", "print(10)")?;
assert_cmd_snapshot!(case.command().arg("--ignore").arg("division-by-zer"), @r"
success: true
exit_code: 0
----- stdout -----
warning[unknown-rule]: Unknown rule `division-by-zer`. Did you mean `division-by-zero`?
Found 1 diagnostic
----- stderr -----
");
Ok(())
}
/// Basic override functionality: override rules for specific files
#[test]
fn overrides_basic() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.rules]
division-by-zero = "error"
unresolved-reference = "error"
[[tool.ty.overrides]]
include = ["tests/**"]
[tool.ty.overrides.rules]
division-by-zero = "warn"
unresolved-reference = "ignore"
"#,
),
(
"main.py",
r#"
y = 4 / 0 # division-by-zero: error (global)
x = 1
prin(x) # unresolved-reference: error (global)
"#,
),
(
"tests/test_main.py",
r#"
y = 4 / 0 # division-by-zero: warn (override)
x = 1
prin(x) # unresolved-reference: ignore (override)
"#,
),
])?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> main.py:2:5
|
2 | y = 4 / 0 # division-by-zero: error (global)
| ^^^^^
3 | x = 1
4 | prin(x) # unresolved-reference: error (global)
|
info: rule `division-by-zero` was selected in the configuration file
error[unresolved-reference]: Name `prin` used when not defined
--> main.py:4:1
|
2 | y = 4 / 0 # division-by-zero: error (global)
3 | x = 1
4 | prin(x) # unresolved-reference: error (global)
| ^^^^
|
info: rule `unresolved-reference` was selected in the configuration file
warning[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> tests/test_main.py:2:5
|
2 | y = 4 / 0 # division-by-zero: warn (override)
| ^^^^^
3 | x = 1
4 | prin(x) # unresolved-reference: ignore (override)
|
info: rule `division-by-zero` was selected in the configuration file
Found 3 diagnostics
----- stderr -----
"###);
Ok(())
}
/// Multiple overrides: later overrides take precedence
#[test]
fn overrides_precedence() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.rules]
division-by-zero = "error"
# First override: all test files
[[tool.ty.overrides]]
include = ["tests/**"]
[tool.ty.overrides.rules]
division-by-zero = "warn"
# Second override: specific test file (takes precedence)
[[tool.ty.overrides]]
include = ["tests/important.py"]
[tool.ty.overrides.rules]
division-by-zero = "ignore"
"#,
),
(
"tests/test_main.py",
r#"
y = 4 / 0 # division-by-zero: warn (first override)
"#,
),
(
"tests/important.py",
r#"
y = 4 / 0 # division-by-zero: ignore (second override)
"#,
),
])?;
assert_cmd_snapshot!(case.command(), @r###"
success: true
exit_code: 0
----- stdout -----
warning[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> tests/test_main.py:2:5
|
2 | y = 4 / 0 # division-by-zero: warn (first override)
| ^^^^^
|
info: rule `division-by-zero` was selected in the configuration file
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
/// Override with exclude patterns
#[test]
fn overrides_exclude() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.rules]
division-by-zero = "error"
[[tool.ty.overrides]]
include = ["tests/**"]
exclude = ["tests/important.py"]
[tool.ty.overrides.rules]
division-by-zero = "warn"
"#,
),
(
"tests/test_main.py",
r#"
y = 4 / 0 # division-by-zero: warn (override applies)
"#,
),
(
"tests/important.py",
r#"
y = 4 / 0 # division-by-zero: error (override excluded)
"#,
),
])?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> tests/important.py:2:5
|
2 | y = 4 / 0 # division-by-zero: error (override excluded)
| ^^^^^
|
info: rule `division-by-zero` was selected in the configuration file
warning[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> tests/test_main.py:2:5
|
2 | y = 4 / 0 # division-by-zero: warn (override applies)
| ^^^^^
|
info: rule `division-by-zero` was selected in the configuration file
Found 2 diagnostics
----- stderr -----
"###);
Ok(())
}
/// Override without rules inherits global rules
#[test]
fn overrides_inherit_global() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.rules]
division-by-zero = "warn"
unresolved-reference = "error"
[[tool.ty.overrides]]
include = ["tests/**"]
[tool.ty.overrides.rules]
# Override only division-by-zero, unresolved-reference should inherit from global
division-by-zero = "ignore"
"#,
),
(
"main.py",
r#"
y = 4 / 0 # division-by-zero: warn (global)
prin(y) # unresolved-reference: error (global)
"#,
),
(
"tests/test_main.py",
r#"
y = 4 / 0 # division-by-zero: ignore (overridden)
prin(y) # unresolved-reference: error (inherited from global)
"#,
),
])?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
warning[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> main.py:2:5
|
2 | y = 4 / 0 # division-by-zero: warn (global)
| ^^^^^
3 | prin(y) # unresolved-reference: error (global)
|
info: rule `division-by-zero` was selected in the configuration file
error[unresolved-reference]: Name `prin` used when not defined
--> main.py:3:1
|
2 | y = 4 / 0 # division-by-zero: warn (global)
3 | prin(y) # unresolved-reference: error (global)
| ^^^^
|
info: rule `unresolved-reference` was selected in the configuration file
error[unresolved-reference]: Name `prin` used when not defined
--> tests/test_main.py:3:1
|
2 | y = 4 / 0 # division-by-zero: ignore (overridden)
3 | prin(y) # unresolved-reference: error (inherited from global)
| ^^^^
|
info: rule `unresolved-reference` was selected in the configuration file
Found 3 diagnostics
----- stderr -----
"###);
Ok(())
}
/// ty warns about invalid glob patterns in override include patterns
#[test]
fn overrides_invalid_include_glob() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.rules]
division-by-zero = "error"
[[tool.ty.overrides]]
include = ["tests/[invalid"] # Invalid glob: unclosed bracket
[tool.ty.overrides.rules]
division-by-zero = "warn"
"#,
),
(
"test.py",
r#"
y = 4 / 0
"#,
),
])?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ty failed
Cause: error[invalid-glob]: Invalid include pattern
--> pyproject.toml:6:12
|
5 | [[tool.ty.overrides]]
6 | include = ["tests/[invalid"] # Invalid glob: unclosed bracket
| ^^^^^^^^^^^^^^^^ unclosed character class; missing ']'
7 | [tool.ty.overrides.rules]
8 | division-by-zero = "warn"
|
"###);
Ok(())
}
/// ty warns about invalid glob patterns in override exclude patterns
#[test]
fn overrides_invalid_exclude_glob() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.rules]
division-by-zero = "error"
[[tool.ty.overrides]]
include = ["tests/**"]
exclude = ["***/invalid"] # Invalid glob: triple asterisk
[tool.ty.overrides.rules]
division-by-zero = "warn"
"#,
),
(
"test.py",
r#"
y = 4 / 0
"#,
),
])?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ty failed
Cause: error[invalid-glob]: Invalid exclude pattern
--> pyproject.toml:7:12
|
5 | [[tool.ty.overrides]]
6 | include = ["tests/**"]
7 | exclude = ["***/invalid"] # Invalid glob: triple asterisk
| ^^^^^^^^^^^^^ Too many stars at position 1
8 | [tool.ty.overrides.rules]
9 | division-by-zero = "warn"
|
"###);
Ok(())
}
/// ty warns when an overrides section has neither include nor exclude
#[test]
fn overrides_missing_include_exclude() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.rules]
division-by-zero = "error"
[[tool.ty.overrides]]
# Missing both include and exclude - should warn
[tool.ty.overrides.rules]
division-by-zero = "warn"
"#,
),
(
"test.py",
r#"
y = 4 / 0
"#,
),
])?;
assert_cmd_snapshot!(case.command(), @r###"
success: true
exit_code: 0
----- stdout -----
warning[unnecessary-overrides-section]: Unnecessary `overrides` section
--> pyproject.toml:5:1
|
3 | division-by-zero = "error"
4 |
5 | [[tool.ty.overrides]]
| ^^^^^^^^^^^^^^^^^^^^^ This overrides section applies to all files
6 | # Missing both include and exclude - should warn
7 | [tool.ty.overrides.rules]
|
info: It has no `include` or `exclude` option restricting the files
info: Restrict the files by adding a pattern to `include` or `exclude`...
info: or remove the `[[overrides]]` section and merge the configuration into the root `[rules]` table if the configuration should apply to all files
warning[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> test.py:2:5
|
2 | y = 4 / 0
| ^^^^^
|
info: rule `division-by-zero` was selected in the configuration file
Found 2 diagnostics
----- stderr -----
"###);
Ok(())
}
/// ty warns when an overrides section has an empty include array
#[test]
fn overrides_empty_include() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.rules]
division-by-zero = "error"
[[tool.ty.overrides]]
include = [] # Empty include - won't match any files
[tool.ty.overrides.rules]
division-by-zero = "warn"
"#,
),
(
"test.py",
r#"
y = 4 / 0
"#,
),
])?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
warning[empty-include]: Empty include matches no files
--> pyproject.toml:6:11
|
5 | [[tool.ty.overrides]]
6 | include = [] # Empty include - won't match any files
| ^^ This `include` list is empty
7 | [tool.ty.overrides.rules]
8 | division-by-zero = "warn"
|
info: Remove the `include` option to match all files or add a pattern to match specific files
error[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> test.py:2:5
|
2 | y = 4 / 0
| ^^^^^
|
info: rule `division-by-zero` was selected in the configuration file
Found 2 diagnostics
----- stderr -----
"###);
Ok(())
}
/// ty warns when an overrides section has no actual overrides
#[test]
fn overrides_no_actual_overrides() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.rules]
division-by-zero = "error"
[[tool.ty.overrides]]
include = ["*.py"] # Has patterns but no rule overrides
# Missing [tool.ty.overrides.rules] section entirely
"#,
),
(
"test.py",
r#"
y = 4 / 0
"#,
),
])?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
warning[useless-overrides-section]: Useless `overrides` section
--> pyproject.toml:5:1
|
3 | division-by-zero = "error"
4 |
5 | [[tool.ty.overrides]]
| ^^^^^^^^^^^^^^^^^^^^^ This overrides section configures no rules
6 | include = ["*.py"] # Has patterns but no rule overrides
7 | # Missing [tool.ty.overrides.rules] section entirely
|
info: It has no `rules` table
info: Add a `[overrides.rules]` table...
info: or remove the `[[overrides]]` section if there's nothing to override
error[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> test.py:2:5
|
2 | y = 4 / 0
| ^^^^^
|
info: rule `division-by-zero` was selected in the configuration file
Found 2 diagnostics
----- stderr -----
"###);
Ok(())
}
/// ty warns about unknown rules specified in an overrides section
#[test]
fn overrides_unknown_rules() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.rules]
division-by-zero = "error"
[[tool.ty.overrides]]
include = ["tests/**"]
[tool.ty.overrides.rules]
division-by-zero = "warn"
division-by-zer = "error" # incorrect rule name
"#,
),
(
"main.py",
r#"
y = 4 / 0
"#,
),
(
"tests/test_main.py",
r#"
y = 4 / 0
"#,
),
])?;
assert_cmd_snapshot!(case.command(), @r#"
success: false
exit_code: 1
----- stdout -----
error[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> main.py:2:5
|
2 | y = 4 / 0
| ^^^^^
|
info: rule `division-by-zero` was selected in the configuration file
warning[unknown-rule]: Unknown rule `division-by-zer`. Did you mean `division-by-zero`?
--> pyproject.toml:10:1
|
8 | [tool.ty.overrides.rules]
9 | division-by-zero = "warn"
10 | division-by-zer = "error" # incorrect rule name
| ^^^^^^^^^^^^^^^
|
warning[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> tests/test_main.py:2:5
|
2 | y = 4 / 0
| ^^^^^
|
info: rule `division-by-zero` was selected in the configuration file
Found 3 diagnostics
----- stderr -----
"#);
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty/tests/cli/analysis_options.rs | crates/ty/tests/cli/analysis_options.rs | use insta_cmd::assert_cmd_snapshot;
use crate::CliTest;
/// ty ignores `type: ignore` comments when setting `respect-type-ignore-comments=false`
#[test]
fn respect_type_ignore_comments_is_turned_off() -> anyhow::Result<()> {
let case = CliTest::with_file(
"test.py",
r#"
y = a + 5 # type: ignore
"#,
)?;
// Assert that there's an `unresolved-reference` diagnostic (error).
assert_cmd_snapshot!(case.command(), @r"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
");
assert_cmd_snapshot!(case.command().arg("--config").arg("analysis.respect-type-ignore-comments=false"), @r"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `a` used when not defined
--> test.py:2:5
|
2 | y = a + 5 # type: ignore
| ^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
");
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty/tests/cli/config_option.rs | crates/ty/tests/cli/config_option.rs | use insta_cmd::assert_cmd_snapshot;
use crate::CliTest;
#[test]
fn cli_config_args_toml_string_basic() -> anyhow::Result<()> {
let case = CliTest::with_file("test.py", r"print(x) # [unresolved-reference]")?;
// Long flag
assert_cmd_snapshot!(case.command().arg("--warn").arg("unresolved-reference").arg("--config").arg("terminal.error-on-warning=true"), @r###"
success: false
exit_code: 1
----- stdout -----
warning[unresolved-reference]: Name `x` used when not defined
--> test.py:1:7
|
1 | print(x) # [unresolved-reference]
| ^
|
info: rule `unresolved-reference` was selected on the command line
Found 1 diagnostic
----- stderr -----
"###);
// Short flag
assert_cmd_snapshot!(case.command().arg("-c").arg("terminal.error-on-warning=true"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `x` used when not defined
--> test.py:1:7
|
1 | print(x) # [unresolved-reference]
| ^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
#[test]
fn cli_config_args_overrides_ty_toml() -> anyhow::Result<()> {
let case = CliTest::with_files(vec![
(
"ty.toml",
r#"
[terminal]
error-on-warning = true
"#,
),
("test.py", r"print(x) # [unresolved-reference]"),
])?;
// Exit code of 1 due to the setting in `ty.toml`
assert_cmd_snapshot!(case.command().arg("--warn").arg("unresolved-reference"), @r###"
success: false
exit_code: 1
----- stdout -----
warning[unresolved-reference]: Name `x` used when not defined
--> test.py:1:7
|
1 | print(x) # [unresolved-reference]
| ^
|
info: rule `unresolved-reference` was selected on the command line
Found 1 diagnostic
----- stderr -----
"###);
// Exit code of 0 because the `ty.toml` setting is overwritten by `--config`
assert_cmd_snapshot!(case.command().arg("--warn").arg("unresolved-reference").arg("--config").arg("terminal.error-on-warning=false"), @r###"
success: true
exit_code: 0
----- stdout -----
warning[unresolved-reference]: Name `x` used when not defined
--> test.py:1:7
|
1 | print(x) # [unresolved-reference]
| ^
|
info: rule `unresolved-reference` was selected on the command line
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
#[test]
fn cli_config_args_later_overrides_earlier() -> anyhow::Result<()> {
let case = CliTest::with_file("test.py", r"print(x) # [unresolved-reference]")?;
assert_cmd_snapshot!(case.command().arg("--warn").arg("unresolved-reference").arg("--config").arg("terminal.error-on-warning=true").arg("--config").arg("terminal.error-on-warning=false"), @r###"
success: true
exit_code: 0
----- stdout -----
warning[unresolved-reference]: Name `x` used when not defined
--> test.py:1:7
|
1 | print(x) # [unresolved-reference]
| ^
|
info: rule `unresolved-reference` was selected on the command line
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
#[test]
fn cli_config_args_invalid_option() -> anyhow::Result<()> {
let case = CliTest::with_file("test.py", r"print(1)")?;
assert_cmd_snapshot!(case.command().arg("--config").arg("bad-option=true"), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: TOML parse error at line 1, column 1
|
1 | bad-option=true
| ^^^^^^^^^^
unknown field `bad-option`, expected one of `environment`, `src`, `rules`, `terminal`, `analysis`, `overrides`
Usage: ty <COMMAND>
For more information, try '--help'.
");
Ok(())
}
#[test]
fn config_file_override() -> anyhow::Result<()> {
// Set `error-on-warning` to true in the configuration file
// Explicitly set `--warn unresolved-reference` to ensure the rule warns instead of errors
let case = CliTest::with_files(vec![
("test.py", r"print(x) # [unresolved-reference]"),
(
"ty-override.toml",
r#"
[terminal]
error-on-warning = true
"#,
),
])?;
// Ensure flag works via CLI arg
assert_cmd_snapshot!(case.command().arg("--warn").arg("unresolved-reference").arg("--config-file").arg("ty-override.toml"), @r###"
success: false
exit_code: 1
----- stdout -----
warning[unresolved-reference]: Name `x` used when not defined
--> test.py:1:7
|
1 | print(x) # [unresolved-reference]
| ^
|
info: rule `unresolved-reference` was selected on the command line
Found 1 diagnostic
----- stderr -----
"###);
// Ensure the flag works via an environment variable
assert_cmd_snapshot!(case.command().arg("--warn").arg("unresolved-reference").env("TY_CONFIG_FILE", "ty-override.toml"), @r###"
success: false
exit_code: 1
----- stdout -----
warning[unresolved-reference]: Name `x` used when not defined
--> test.py:1:7
|
1 | print(x) # [unresolved-reference]
| ^
|
info: rule `unresolved-reference` was selected on the command line
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty/tests/cli/exit_code.rs | crates/ty/tests/cli/exit_code.rs | use insta_cmd::assert_cmd_snapshot;
use crate::CliTest;
#[test]
fn only_warnings() -> anyhow::Result<()> {
let case = CliTest::with_file("test.py", r"print(x) # [unresolved-reference]")?;
assert_cmd_snapshot!(case.command().arg("--warn").arg("unresolved-reference"), @r###"
success: true
exit_code: 0
----- stdout -----
warning[unresolved-reference]: Name `x` used when not defined
--> test.py:1:7
|
1 | print(x) # [unresolved-reference]
| ^
|
info: rule `unresolved-reference` was selected on the command line
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
#[test]
fn only_info() -> anyhow::Result<()> {
let case = CliTest::with_file(
"test.py",
r#"
from typing_extensions import reveal_type
reveal_type(1)
"#,
)?;
assert_cmd_snapshot!(case.command(), @r###"
success: true
exit_code: 0
----- stdout -----
info[revealed-type]: Revealed type
--> test.py:3:13
|
2 | from typing_extensions import reveal_type
3 | reveal_type(1)
| ^ `Literal[1]`
|
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
#[test]
fn only_info_and_error_on_warning_is_true() -> anyhow::Result<()> {
let case = CliTest::with_file(
"test.py",
r#"
from typing_extensions import reveal_type
reveal_type(1)
"#,
)?;
assert_cmd_snapshot!(case.command().arg("--error-on-warning"), @r###"
success: true
exit_code: 0
----- stdout -----
info[revealed-type]: Revealed type
--> test.py:3:13
|
2 | from typing_extensions import reveal_type
3 | reveal_type(1)
| ^ `Literal[1]`
|
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
#[test]
fn no_errors_but_error_on_warning_is_true() -> anyhow::Result<()> {
let case = CliTest::with_file("test.py", r"print(x) # [unresolved-reference]")?;
assert_cmd_snapshot!(case.command().arg("--error-on-warning").arg("--warn").arg("unresolved-reference"), @r###"
success: false
exit_code: 1
----- stdout -----
warning[unresolved-reference]: Name `x` used when not defined
--> test.py:1:7
|
1 | print(x) # [unresolved-reference]
| ^
|
info: rule `unresolved-reference` was selected on the command line
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
#[test]
fn no_errors_but_error_on_warning_is_enabled_in_configuration() -> anyhow::Result<()> {
let case = CliTest::with_files([
("test.py", r"print(x) # [unresolved-reference]"),
(
"ty.toml",
r#"
[terminal]
error-on-warning = true
"#,
),
])?;
assert_cmd_snapshot!(case.command().arg("--warn").arg("unresolved-reference"), @r###"
success: false
exit_code: 1
----- stdout -----
warning[unresolved-reference]: Name `x` used when not defined
--> test.py:1:7
|
1 | print(x) # [unresolved-reference]
| ^
|
info: rule `unresolved-reference` was selected on the command line
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
#[test]
fn both_warnings_and_errors() -> anyhow::Result<()> {
let case = CliTest::with_file(
"test.py",
r#"
print(x) # [unresolved-reference]
print(4[1]) # [not-subscriptable]
"#,
)?;
assert_cmd_snapshot!(case.command().arg("--warn").arg("unresolved-reference"), @r"
success: false
exit_code: 1
----- stdout -----
warning[unresolved-reference]: Name `x` used when not defined
--> test.py:2:7
|
2 | print(x) # [unresolved-reference]
| ^
3 | print(4[1]) # [not-subscriptable]
|
info: rule `unresolved-reference` was selected on the command line
error[not-subscriptable]: Cannot subscript object of type `Literal[4]` with no `__getitem__` method
--> test.py:3:7
|
2 | print(x) # [unresolved-reference]
3 | print(4[1]) # [not-subscriptable]
| ^^^^
|
info: rule `not-subscriptable` is enabled by default
Found 2 diagnostics
----- stderr -----
");
Ok(())
}
#[test]
fn both_warnings_and_errors_and_error_on_warning_is_true() -> anyhow::Result<()> {
let case = CliTest::with_file(
"test.py",
r###"
print(x) # [unresolved-reference]
print(4[1]) # [not-subscriptable]
"###,
)?;
assert_cmd_snapshot!(case.command().arg("--warn").arg("unresolved-reference").arg("--error-on-warning"), @r"
success: false
exit_code: 1
----- stdout -----
warning[unresolved-reference]: Name `x` used when not defined
--> test.py:2:7
|
2 | print(x) # [unresolved-reference]
| ^
3 | print(4[1]) # [not-subscriptable]
|
info: rule `unresolved-reference` was selected on the command line
error[not-subscriptable]: Cannot subscript object of type `Literal[4]` with no `__getitem__` method
--> test.py:3:7
|
2 | print(x) # [unresolved-reference]
3 | print(4[1]) # [not-subscriptable]
| ^^^^
|
info: rule `not-subscriptable` is enabled by default
Found 2 diagnostics
----- stderr -----
");
Ok(())
}
#[test]
fn exit_zero_is_true() -> anyhow::Result<()> {
let case = CliTest::with_file(
"test.py",
r#"
print(x) # [unresolved-reference]
print(4[1]) # [not-subscriptable]
"#,
)?;
assert_cmd_snapshot!(case.command().arg("--exit-zero").arg("--warn").arg("unresolved-reference"), @r"
success: true
exit_code: 0
----- stdout -----
warning[unresolved-reference]: Name `x` used when not defined
--> test.py:2:7
|
2 | print(x) # [unresolved-reference]
| ^
3 | print(4[1]) # [not-subscriptable]
|
info: rule `unresolved-reference` was selected on the command line
error[not-subscriptable]: Cannot subscript object of type `Literal[4]` with no `__getitem__` method
--> test.py:3:7
|
2 | print(x) # [unresolved-reference]
3 | print(4[1]) # [not-subscriptable]
| ^^^^
|
info: rule `not-subscriptable` is enabled by default
Found 2 diagnostics
----- stderr -----
");
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty/tests/cli/main.rs | crates/ty/tests/cli/main.rs | mod analysis_options;
mod config_option;
mod exit_code;
mod file_selection;
mod python_environment;
mod rule_selection;
use anyhow::Context as _;
use insta::Settings;
use insta::internals::SettingsBindDropGuard;
use insta_cmd::{assert_cmd_snapshot, get_cargo_bin};
use std::{
fmt::Write,
path::{Path, PathBuf},
process::Command,
};
use tempfile::TempDir;
#[test]
fn test_quiet_output() -> anyhow::Result<()> {
let case = CliTest::with_file("test.py", "x: int = 1")?;
// By default, we emit an "all checks passed" message
assert_cmd_snapshot!(case.command(), @r###"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
"###);
// With `quiet`, the message is not displayed
assert_cmd_snapshot!(case.command().arg("--quiet"), @r"
success: true
exit_code: 0
----- stdout -----
----- stderr -----
");
let case = CliTest::with_file("test.py", "x: int = 'foo'")?;
// By default, we emit a diagnostic
assert_cmd_snapshot!(case.command(), @r#"
success: false
exit_code: 1
----- stdout -----
error[invalid-assignment]: Object of type `Literal["foo"]` is not assignable to `int`
--> test.py:1:4
|
1 | x: int = 'foo'
| --- ^^^^^ Incompatible value of type `Literal["foo"]`
| |
| Declared type
|
info: rule `invalid-assignment` is enabled by default
Found 1 diagnostic
----- stderr -----
"#);
// With `quiet`, the diagnostic is not displayed, just the summary message
assert_cmd_snapshot!(case.command().arg("--quiet"), @r"
success: false
exit_code: 1
----- stdout -----
Found 1 diagnostic
----- stderr -----
");
// We allow `-q`
assert_cmd_snapshot!(case.command().arg("-q"), @r"
success: false
exit_code: 1
----- stdout -----
Found 1 diagnostic
----- stderr -----
");
// And repeated `-qq`
assert_cmd_snapshot!(case.command().arg("-qq"), @r"
success: false
exit_code: 1
----- stdout -----
----- stderr -----
");
Ok(())
}
#[test]
fn test_run_in_sub_directory() -> anyhow::Result<()> {
let case = CliTest::with_files([("test.py", "~"), ("subdir/nothing", "")])?;
assert_cmd_snapshot!(case.command().current_dir(case.root().join("subdir")).arg(".."), @r"
success: false
exit_code: 1
----- stdout -----
error[invalid-syntax]: Expected an expression
--> <temp_dir>/test.py:1:2
|
1 | ~
| ^
|
Found 1 diagnostic
----- stderr -----
");
Ok(())
}
#[test]
fn test_include_hidden_files_by_default() -> anyhow::Result<()> {
let case = CliTest::with_files([(".test.py", "~")])?;
assert_cmd_snapshot!(case.command(), @r"
success: false
exit_code: 1
----- stdout -----
error[invalid-syntax]: Expected an expression
--> .test.py:1:2
|
1 | ~
| ^
|
Found 1 diagnostic
----- stderr -----
");
Ok(())
}
#[test]
fn test_respect_ignore_files() -> anyhow::Result<()> {
// First test that the default option works correctly (the file is skipped)
let case = CliTest::with_files([(".ignore", "test.py"), ("test.py", "~")])?;
assert_cmd_snapshot!(case.command(), @r###"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
WARN No python files found under the given path(s)
"###);
// Test that we can set to false via CLI
assert_cmd_snapshot!(case.command().arg("--no-respect-ignore-files"), @r"
success: false
exit_code: 1
----- stdout -----
error[invalid-syntax]: Expected an expression
--> test.py:1:2
|
1 | ~
| ^
|
Found 1 diagnostic
----- stderr -----
");
// Test that we can set to false via config file
case.write_file("ty.toml", "src.respect-ignore-files = false")?;
assert_cmd_snapshot!(case.command(), @r"
success: false
exit_code: 1
----- stdout -----
error[invalid-syntax]: Expected an expression
--> test.py:1:2
|
1 | ~
| ^
|
Found 1 diagnostic
----- stderr -----
");
// Ensure CLI takes precedence
case.write_file("ty.toml", "src.respect-ignore-files = true")?;
assert_cmd_snapshot!(case.command().arg("--no-respect-ignore-files"), @r"
success: false
exit_code: 1
----- stdout -----
error[invalid-syntax]: Expected an expression
--> test.py:1:2
|
1 | ~
| ^
|
Found 1 diagnostic
----- stderr -----
");
Ok(())
}
/// Paths specified on the CLI are relative to the current working directory and not the project root.
///
/// We test this by adding an extra search path from the CLI to the libs directory when
/// running the CLI from the child directory (using relative paths).
///
/// Project layout:
/// ```
/// - libs
/// |- utils.py
/// - child
/// | - test.py
/// - pyproject.toml
/// ```
///
/// And the command is run in the `child` directory.
#[test]
fn cli_arguments_are_relative_to_the_current_directory() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.environment]
python-version = "3.11"
"#,
),
(
"libs/utils.py",
r#"
def add(a: int, b: int) -> int:
return a + b
"#,
),
(
"child/test.py",
r#"
from utils import add
stat = add(10, 15)
"#,
),
])?;
// Make sure that the CLI fails when the `libs` directory is not in the search path.
assert_cmd_snapshot!(case.command().current_dir(case.root().join("child")), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-import]: Cannot resolve imported module `utils`
--> test.py:2:6
|
2 | from utils import add
| ^^^^^
3 |
4 | stat = add(10, 15)
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/ (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
assert_cmd_snapshot!(case.command().current_dir(case.root().join("child")).arg("--extra-search-path").arg("../libs"), @r###"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
"###);
Ok(())
}
/// Paths specified in a configuration file are relative to the project root.
///
/// We test this by adding `libs` (as a relative path) to the extra search path in the configuration and run
/// the CLI from a subdirectory.
///
/// Project layout:
/// ```
/// - libs
/// |- utils.py
/// - child
/// | - test.py
/// - pyproject.toml
/// ```
#[test]
fn paths_in_configuration_files_are_relative_to_the_project_root() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.environment]
python-version = "3.11"
extra-paths = ["libs"]
"#,
),
(
"libs/utils.py",
r#"
def add(a: int, b: int) -> int:
return a + b
"#,
),
(
"child/test.py",
r#"
from utils import add
stat = add(10, 15)
"#,
),
])?;
assert_cmd_snapshot!(case.command().current_dir(case.root().join("child")), @r###"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
"###);
Ok(())
}
#[test]
fn user_configuration() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"project/ty.toml",
r#"
[rules]
division-by-zero = "warn"
"#,
),
(
"project/main.py",
r#"
y = 4 / 0
for a in range(0, int(y)):
x = a
prin(x)
"#,
),
])?;
let config_directory = case.root().join("home/.config");
let config_env_var = if cfg!(windows) {
"APPDATA"
} else {
"XDG_CONFIG_HOME"
};
assert_cmd_snapshot!(
case.command().current_dir(case.root().join("project")).env(config_env_var, config_directory.as_os_str()),
@r###"
success: false
exit_code: 1
----- stdout -----
warning[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> main.py:2:5
|
2 | y = 4 / 0
| ^^^^^
3 |
4 | for a in range(0, int(y)):
|
info: rule `division-by-zero` was selected in the configuration file
error[unresolved-reference]: Name `prin` used when not defined
--> main.py:7:1
|
5 | x = a
6 |
7 | prin(x)
| ^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 2 diagnostics
----- stderr -----
"###
);
// The user-level configuration sets the severity for `unresolved-reference` to warn.
// Changing the level for `division-by-zero` has no effect, because the project-level configuration
// has higher precedence.
case.write_file(
config_directory.join("ty/ty.toml"),
r#"
[rules]
division-by-zero = "error"
unresolved-reference = "warn"
"#,
)?;
assert_cmd_snapshot!(
case.command().current_dir(case.root().join("project")).env(config_env_var, config_directory.as_os_str()),
@r###"
success: true
exit_code: 0
----- stdout -----
warning[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> main.py:2:5
|
2 | y = 4 / 0
| ^^^^^
3 |
4 | for a in range(0, int(y)):
|
info: rule `division-by-zero` was selected in the configuration file
warning[unresolved-reference]: Name `prin` used when not defined
--> main.py:7:1
|
5 | x = a
6 |
7 | prin(x)
| ^^^^
|
info: rule `unresolved-reference` was selected in the configuration file
Found 2 diagnostics
----- stderr -----
"###
);
Ok(())
}
#[test]
fn check_specific_paths() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"project/main.py",
r#"
y = 4 / 0 # error: division-by-zero
"#,
),
(
"project/tests/test_main.py",
r#"
import does_not_exist # error: unresolved-import
"#,
),
(
"project/other.py",
r#"
from main2 import z # error: unresolved-import
print(z)
"#,
),
])?;
assert_cmd_snapshot!(
case.command(),
@r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-import]: Cannot resolve imported module `main2`
--> project/other.py:2:6
|
2 | from main2 import z # error: unresolved-import
| ^^^^^
3 |
4 | print(z)
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/ (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
error[unresolved-import]: Cannot resolve imported module `does_not_exist`
--> project/tests/test_main.py:2:8
|
2 | import does_not_exist # error: unresolved-import
| ^^^^^^^^^^^^^^
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/ (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
Found 2 diagnostics
----- stderr -----
"###
);
// Now check only the `tests` and `other.py` files.
// We should no longer see any diagnostics related to `main.py`.
assert_cmd_snapshot!(
case.command().arg("project/tests").arg("project/other.py"),
@r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-import]: Cannot resolve imported module `main2`
--> project/other.py:2:6
|
2 | from main2 import z # error: unresolved-import
| ^^^^^
3 |
4 | print(z)
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/ (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
error[unresolved-import]: Cannot resolve imported module `does_not_exist`
--> project/tests/test_main.py:2:8
|
2 | import does_not_exist # error: unresolved-import
| ^^^^^^^^^^^^^^
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/ (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
Found 2 diagnostics
----- stderr -----
"###
);
Ok(())
}
#[test]
fn check_non_existing_path() -> anyhow::Result<()> {
let case = CliTest::with_files([])?;
let mut settings = insta::Settings::clone_current();
settings.add_filter(
®ex::escape("The system cannot find the path specified. (os error 3)"),
"No such file or directory (os error 2)",
);
let _s = settings.bind_to_scope();
assert_cmd_snapshot!(
case.command().arg("project/main.py").arg("project/tests"),
@r"
success: false
exit_code: 2
----- stdout -----
error[io]: `<temp_dir>/project/main.py`: No such file or directory (os error 2)
error[io]: `<temp_dir>/project/tests`: No such file or directory (os error 2)
Found 2 diagnostics
----- stderr -----
WARN No python files found under the given path(s)
"
);
Ok(())
}
#[test]
fn check_file_without_extension() -> anyhow::Result<()> {
let case = CliTest::with_file("main", "a = b")?;
assert_cmd_snapshot!(
case.command().arg("main"),
@r"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `b` used when not defined
--> main:1:5
|
1 | a = b
| ^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"
);
Ok(())
}
#[test]
fn check_file_without_extension_in_subfolder() -> anyhow::Result<()> {
let case = CliTest::with_file("src/main", "a = b")?;
assert_cmd_snapshot!(
case.command().arg("src"),
@r"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
WARN No python files found under the given path(s)
"
);
Ok(())
}
#[test]
fn concise_diagnostics() -> anyhow::Result<()> {
let case = CliTest::with_file(
"test.py",
r#"
print(x) # [unresolved-reference]
print(4[1]) # [not-subscriptable]
"#,
)?;
assert_cmd_snapshot!(case.command().arg("--output-format=concise").arg("--warn").arg("unresolved-reference"), @r###"
success: false
exit_code: 1
----- stdout -----
test.py:2:7: warning[unresolved-reference] Name `x` used when not defined
test.py:3:7: error[not-subscriptable] Cannot subscript object of type `Literal[4]` with no `__getitem__` method
Found 2 diagnostics
----- stderr -----
"###);
Ok(())
}
#[test]
fn gitlab_diagnostics() -> anyhow::Result<()> {
let case = CliTest::with_file(
"test.py",
r#"
print(x) # [unresolved-reference]
print(4[1]) # [not-subscriptable]
from typing_extensions import reveal_type
reveal_type('str'.lower()) # [revealed-type]
"#,
)?;
let mut settings = insta::Settings::clone_current();
settings.add_filter(r#"("fingerprint": ")[a-z0-9]+(",)"#, "$1[FINGERPRINT]$2");
let _s = settings.bind_to_scope();
assert_cmd_snapshot!(case.command().arg("--output-format=gitlab").arg("--warn").arg("unresolved-reference")
.env("CI_PROJECT_DIR", case.project_dir), @r#"
success: false
exit_code: 1
----- stdout -----
[
{
"check_name": "unresolved-reference",
"description": "unresolved-reference: Name `x` used when not defined",
"severity": "minor",
"fingerprint": "[FINGERPRINT]",
"location": {
"path": "test.py",
"positions": {
"begin": {
"line": 2,
"column": 7
},
"end": {
"line": 2,
"column": 8
}
}
}
},
{
"check_name": "not-subscriptable",
"description": "not-subscriptable: Cannot subscript object of type `Literal[4]` with no `__getitem__` method",
"severity": "major",
"fingerprint": "[FINGERPRINT]",
"location": {
"path": "test.py",
"positions": {
"begin": {
"line": 3,
"column": 7
},
"end": {
"line": 3,
"column": 11
}
}
}
},
{
"check_name": "revealed-type",
"description": "revealed-type: Revealed type: `LiteralString`",
"severity": "info",
"fingerprint": "[FINGERPRINT]",
"location": {
"path": "test.py",
"positions": {
"begin": {
"line": 5,
"column": 13
},
"end": {
"line": 5,
"column": 26
}
}
}
}
]
----- stderr -----
"#);
Ok(())
}
#[test]
fn github_diagnostics() -> anyhow::Result<()> {
let case = CliTest::with_file(
"test.py",
r#"
print(x) # [unresolved-reference]
print(4[1]) # [not-subscriptable]
from typing_extensions import reveal_type
reveal_type('str'.lower()) # [revealed-type]
"#,
)?;
assert_cmd_snapshot!(case.command().arg("--output-format=github").arg("--warn").arg("unresolved-reference"), @r"
success: false
exit_code: 1
----- stdout -----
::warning title=ty (unresolved-reference),file=<temp_dir>/test.py,line=2,col=7,endLine=2,endColumn=8::test.py:2:7: unresolved-reference: Name `x` used when not defined
::error title=ty (not-subscriptable),file=<temp_dir>/test.py,line=3,col=7,endLine=3,endColumn=11::test.py:3:7: not-subscriptable: Cannot subscript object of type `Literal[4]` with no `__getitem__` method
::notice title=ty (revealed-type),file=<temp_dir>/test.py,line=5,col=13,endLine=5,endColumn=26::test.py:5:13: revealed-type: Revealed type: `LiteralString`
----- stderr -----
");
Ok(())
}
/// This tests the diagnostic format for revealed type.
///
/// This test was introduced because changes were made to
/// how the revealed type diagnostic was constructed and
/// formatted in "verbose" mode. But it required extra
/// logic to ensure the concise version didn't regress on
/// information content. So this test was introduced to
/// capture that.
#[test]
fn concise_revealed_type() -> anyhow::Result<()> {
let case = CliTest::with_file(
"test.py",
r#"
from typing_extensions import reveal_type
x = "hello"
reveal_type(x)
"#,
)?;
assert_cmd_snapshot!(case.command().arg("--output-format=concise"), @r###"
success: true
exit_code: 0
----- stdout -----
test.py:5:13: info[revealed-type] Revealed type: `Literal["hello"]`
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
#[test]
fn can_handle_large_binop_expressions() -> anyhow::Result<()> {
let mut content = String::new();
writeln!(
&mut content,
"
from typing_extensions import reveal_type
total = 1{plus_one_repeated}
reveal_type(total)
",
plus_one_repeated = " + 1".repeat(2000 - 1)
)?;
let case = CliTest::with_file("test.py", &ruff_python_trivia::textwrap::dedent(&content))?;
assert_cmd_snapshot!(case.command(), @r###"
success: true
exit_code: 0
----- stdout -----
info[revealed-type]: Revealed type
--> test.py:4:13
|
2 | from typing_extensions import reveal_type
3 | total = 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +…
4 | reveal_type(total)
| ^^^^^ `Literal[2000]`
|
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
pub(crate) struct CliTest {
_temp_dir: TempDir,
settings: Settings,
settings_scope: Option<SettingsBindDropGuard>,
project_dir: PathBuf,
ty_binary_path: PathBuf,
}
impl CliTest {
pub(crate) fn new() -> anyhow::Result<Self> {
let temp_dir = TempDir::new()?;
// Canonicalize the tempdir path because macos uses symlinks for tempdirs
// and that doesn't play well with our snapshot filtering.
// Simplify with dunce because otherwise we get UNC paths on Windows.
let project_dir = dunce::simplified(
&temp_dir
.path()
.canonicalize()
.context("Failed to canonicalize project path")?,
)
.to_path_buf();
let mut settings = insta::Settings::clone_current();
settings.add_filter(&tempdir_filter(&project_dir), "<temp_dir>/");
settings.add_filter(r#"\\(\w\w|\s|\.|")"#, "/$1");
// 0.003s
settings.add_filter(r"\d.\d\d\ds", "0.000s");
settings.add_filter(
r#"The system cannot find the file specified."#,
"No such file or directory",
);
let settings_scope = settings.bind_to_scope();
Ok(Self {
project_dir,
_temp_dir: temp_dir,
settings,
settings_scope: Some(settings_scope),
ty_binary_path: get_cargo_bin("ty"),
})
}
pub(crate) fn with_files<'a>(
files: impl IntoIterator<Item = (&'a str, &'a str)>,
) -> anyhow::Result<Self> {
let case = Self::new()?;
case.write_files(files)?;
Ok(case)
}
pub(crate) fn with_file(path: impl AsRef<Path>, content: &str) -> anyhow::Result<Self> {
let case = Self::new()?;
case.write_file(path, content)?;
Ok(case)
}
pub(crate) fn write_files<'a>(
&self,
files: impl IntoIterator<Item = (&'a str, &'a str)>,
) -> anyhow::Result<()> {
for (path, content) in files {
self.write_file(path, content)?;
}
Ok(())
}
/// Return [`Self`] with the ty binary copied to the specified path instead.
pub(crate) fn with_ty_at(mut self, dest_path: impl AsRef<Path>) -> anyhow::Result<Self> {
let dest_path = dest_path.as_ref();
let dest_path = self.project_dir.join(dest_path);
Self::ensure_parent_directory(&dest_path)?;
std::fs::copy(&self.ty_binary_path, &dest_path)
.with_context(|| format!("Failed to copy ty binary to `{}`", dest_path.display()))?;
self.ty_binary_path = dest_path;
Ok(self)
}
/// Add a filter to the settings and rebind them.
pub(crate) fn with_filter(mut self, pattern: &str, replacement: &str) -> Self {
self.settings.add_filter(pattern, replacement);
// Drop the old scope before binding a new one, otherwise the old scope is dropped _after_
// binding and assigning the new one, restoring the settings to their state before the old
// scope was bound.
drop(self.settings_scope.take());
self.settings_scope = Some(self.settings.bind_to_scope());
self
}
fn ensure_parent_directory(path: &Path) -> anyhow::Result<()> {
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)
.with_context(|| format!("Failed to create directory `{}`", parent.display()))?;
}
Ok(())
}
pub(crate) fn write_file(&self, path: impl AsRef<Path>, content: &str) -> anyhow::Result<()> {
let path = path.as_ref();
let path = self.project_dir.join(path);
Self::ensure_parent_directory(&path)?;
std::fs::write(&path, &*ruff_python_trivia::textwrap::dedent(content))
.with_context(|| format!("Failed to write file `{path}`", path = path.display()))?;
Ok(())
}
#[cfg(unix)]
pub(crate) fn write_symlink(
&self,
original: impl AsRef<Path>,
link: impl AsRef<Path>,
) -> anyhow::Result<()> {
let link = link.as_ref();
let link = self.project_dir.join(link);
let original = original.as_ref();
let original = self.project_dir.join(original);
Self::ensure_parent_directory(&link)?;
std::os::unix::fs::symlink(original, &link)
.with_context(|| format!("Failed to write symlink `{link}`", link = link.display()))?;
Ok(())
}
pub(crate) fn root(&self) -> &Path {
&self.project_dir
}
pub(crate) fn command(&self) -> Command {
let mut command = Command::new(&self.ty_binary_path);
command.current_dir(&self.project_dir).arg("check");
// Unset all environment variables because they can affect test behavior.
command.env_clear();
command
}
}
fn tempdir_filter(path: &Path) -> String {
format!(r"{}\\?/?", regex::escape(path.to_str().unwrap()))
}
fn site_packages_filter(python_version: &str) -> String {
if cfg!(windows) {
"Lib/site-packages".to_string()
} else {
format!("lib/python{}/site-packages", regex::escape(python_version))
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty/tests/cli/python_environment.rs | crates/ty/tests/cli/python_environment.rs | use insta_cmd::assert_cmd_snapshot;
use ruff_python_ast::PythonVersion;
use crate::{CliTest, site_packages_filter};
/// Specifying an option on the CLI should take precedence over the same setting in the
/// project's configuration. Here, this is tested for the Python version.
#[test]
fn config_override_python_version() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.environment]
python-version = "3.11"
"#,
),
(
"test.py",
r#"
import sys
# Access `sys.last_exc` that was only added in Python 3.12
print(sys.last_exc)
"#,
),
])?;
assert_cmd_snapshot!(case.command(), @r#"
success: false
exit_code: 1
----- stdout -----
error[unresolved-attribute]: Module `sys` has no member `last_exc`
--> test.py:5:7
|
4 | # Access `sys.last_exc` that was only added in Python 3.12
5 | print(sys.last_exc)
| ^^^^^^^^^^^^
|
info: The member may be available on other Python versions or platforms
info: Python 3.11 was assumed when resolving the `last_exc` attribute
--> pyproject.toml:3:18
|
2 | [tool.ty.environment]
3 | python-version = "3.11"
| ^^^^^^ Python version configuration
|
info: rule `unresolved-attribute` is enabled by default
Found 1 diagnostic
----- stderr -----
"#);
assert_cmd_snapshot!(case.command().arg("--python-version").arg("3.12"), @r###"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
"###);
Ok(())
}
/// Same as above, but for the Python platform.
#[test]
fn config_override_python_platform() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.environment]
python-platform = "linux"
"#,
),
(
"test.py",
r#"
import sys
from typing_extensions import reveal_type
reveal_type(sys.platform)
"#,
),
])?;
assert_cmd_snapshot!(case.command(), @r###"
success: true
exit_code: 0
----- stdout -----
info[revealed-type]: Revealed type
--> test.py:5:13
|
3 | from typing_extensions import reveal_type
4 |
5 | reveal_type(sys.platform)
| ^^^^^^^^^^^^ `Literal["linux"]`
|
Found 1 diagnostic
----- stderr -----
"###);
assert_cmd_snapshot!(case.command().arg("--python-platform").arg("all"), @r###"
success: true
exit_code: 0
----- stdout -----
info[revealed-type]: Revealed type
--> test.py:5:13
|
3 | from typing_extensions import reveal_type
4 |
5 | reveal_type(sys.platform)
| ^^^^^^^^^^^^ `LiteralString`
|
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
#[test]
fn config_file_annotation_showing_where_python_version_set_typing_error() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.environment]
python-version = "3.8"
"#,
),
(
"test.py",
r#"
aiter
"#,
),
])?;
assert_cmd_snapshot!(case.command(), @r#"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `aiter` used when not defined
--> test.py:2:1
|
2 | aiter
| ^^^^^
|
info: `aiter` was added as a builtin in Python 3.10
info: Python 3.8 was assumed when resolving types
--> pyproject.toml:3:18
|
2 | [tool.ty.environment]
3 | python-version = "3.8"
| ^^^^^ Python version configuration
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"#);
assert_cmd_snapshot!(case.command().arg("--python-version=3.9"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `aiter` used when not defined
--> test.py:2:1
|
2 | aiter
| ^^^^^
|
info: `aiter` was added as a builtin in Python 3.10
info: Python 3.9 was assumed when resolving types because it was specified on the command line
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
/// If `.` and `./src` are both registered as first-party search paths,
/// the `./src` directory should take precedence for module resolution,
/// because it is relative to `.`.
#[test]
fn src_subdirectory_takes_precedence_over_repo_root() -> anyhow::Result<()> {
let case = CliTest::with_files([(
"src/package/__init__.py",
"from . import nonexistent_submodule",
)])?;
// If `./src` didn't take priority over `.` here, we would report
// "Module `src.package` has no member `nonexistent_submodule`"
// instead of "Module `package` has no member `nonexistent_submodule`".
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-import]: Module `package` has no member `nonexistent_submodule`
--> src/package/__init__.py:1:15
|
1 | from . import nonexistent_submodule
| ^^^^^^^^^^^^^^^^^^^^^
|
info: rule `unresolved-import` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
/// This tests that, even if no Python *version* has been specified on the CLI or in a config file,
/// ty is still able to infer the Python version from a `--python` argument on the CLI,
/// *even if* the `--python` argument points to a system installation.
///
/// We currently cannot infer the Python version from a system installation on Windows:
/// on Windows, we can only infer the Python version from a virtual environment.
/// This is because we use the layout of the Python installation to infer the Python version:
/// on Unix, the `site-packages` directory of an installation will be located at
/// `<sys.prefix>/lib/pythonX.Y/site-packages`. On Windows, however, the `site-packages`
/// directory will be located at `<sys.prefix>/Lib/site-packages`, which doesn't give us the
/// same information.
#[cfg(not(windows))]
#[test]
fn python_version_inferred_from_system_installation() -> anyhow::Result<()> {
let cpython_case = CliTest::with_files([
("pythons/Python3.8/bin/python", ""),
("pythons/Python3.8/lib/python3.8/site-packages/foo.py", ""),
("test.py", "aiter"),
])?;
assert_cmd_snapshot!(cpython_case.command().arg("--python").arg("pythons/Python3.8/bin/python"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `aiter` used when not defined
--> test.py:1:1
|
1 | aiter
| ^^^^^
|
info: `aiter` was added as a builtin in Python 3.10
info: Python 3.8 was assumed when resolving types because of the layout of your Python installation
info: The primary `site-packages` directory of your installation was found at `lib/python3.8/site-packages/`
info: No Python version was specified on the command line or in a configuration file
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
let pypy_case = CliTest::with_files([
("pythons/pypy3.8/bin/python", ""),
("pythons/pypy3.8/lib/pypy3.8/site-packages/foo.py", ""),
("test.py", "aiter"),
])?;
assert_cmd_snapshot!(pypy_case.command().arg("--python").arg("pythons/pypy3.8/bin/python"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `aiter` used when not defined
--> test.py:1:1
|
1 | aiter
| ^^^^^
|
info: `aiter` was added as a builtin in Python 3.10
info: Python 3.8 was assumed when resolving types because of the layout of your Python installation
info: The primary `site-packages` directory of your installation was found at `lib/pypy3.8/site-packages/`
info: No Python version was specified on the command line or in a configuration file
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
let free_threaded_case = CliTest::with_files([
("pythons/Python3.13t/bin/python", ""),
(
"pythons/Python3.13t/lib/python3.13t/site-packages/foo.py",
"",
),
("test.py", "import string.templatelib"),
])?;
assert_cmd_snapshot!(free_threaded_case.command().arg("--python").arg("pythons/Python3.13t/bin/python"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-import]: Cannot resolve imported module `string.templatelib`
--> test.py:1:8
|
1 | import string.templatelib
| ^^^^^^^^^^^^^^^^^^
|
info: The stdlib module `string.templatelib` is only available on Python 3.14+
info: Python 3.13 was assumed when resolving modules because of the layout of your Python installation
info: The primary `site-packages` directory of your installation was found at `lib/python3.13t/site-packages/`
info: No Python version was specified on the command line or in a configuration file
info: rule `unresolved-import` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
/// This attempts to simulate the tangled web of symlinks that a homebrew install has
/// which can easily confuse us if we're ever told to use it.
///
/// The main thing this is regression-testing is a panic in one *extremely* specific case
/// that you have to try really hard to hit (but vscode, hilariously, did hit).
#[cfg(unix)]
#[test]
fn python_argument_trapped_in_a_symlink_factory() -> anyhow::Result<()> {
let case = CliTest::with_files([
// This is the real python binary.
(
"opt/homebrew/Cellar/python@3.13/3.13.5/Frameworks/Python.framework/Versions/3.13/bin/python3.13",
"",
),
// There's a real site-packages here (although it's basically empty).
(
"opt/homebrew/Cellar/python@3.13/3.13.5/lib/python3.13/site-packages/foo.py",
"",
),
// There's also a real site-packages here (although it's basically empty).
("opt/homebrew/lib/python3.13/site-packages/bar.py", ""),
// This has the real stdlib, but the site-packages in this dir is a symlink.
(
"opt/homebrew/Cellar/python@3.13/3.13.5/Frameworks/Python.framework/Versions/3.13/lib/python3.13/abc.py",
"",
),
// It's important that this our faux-homebrew not be in the same dir as our working directory
// to reproduce the crash, don't ask me why.
(
"project/test.py",
"\
import foo
import bar
import colorama
",
),
])?;
// many python symlinks pointing to a single real python (the longest path)
case.write_symlink(
"opt/homebrew/Cellar/python@3.13/3.13.5/Frameworks/Python.framework/Versions/3.13/bin/python3.13",
"opt/homebrew/Cellar/python@3.13/3.13.5/Frameworks/Python.framework/Versions/3.13/bin/python3",
)?;
case.write_symlink(
"opt/homebrew/Cellar/python@3.13/3.13.5/Frameworks/Python.framework/Versions/3.13/bin/python3",
"opt/homebrew/Cellar/python@3.13/3.13.5/bin/python3",
)?;
case.write_symlink(
"opt/homebrew/Cellar/python@3.13/3.13.5/bin/python3",
"opt/homebrew/bin/python3",
)?;
// the "real" python's site-packages is a symlink to a different dir
case.write_symlink(
"opt/homebrew/Cellar/python@3.13/3.13.5/lib/python3.13/site-packages",
"opt/homebrew/Cellar/python@3.13/3.13.5/Frameworks/Python.framework/Versions/3.13/lib/python3.13/site-packages",
)?;
// Try all 4 pythons with absolute paths to our fauxbrew install
assert_cmd_snapshot!(case.command()
.current_dir(case.root().join("project"))
.arg("--python").arg(case.root().join("opt/homebrew/bin/python3")), @r"
success: false
exit_code: 1
----- stdout -----
error[unresolved-import]: Cannot resolve imported module `foo`
--> test.py:1:8
|
1 | import foo
| ^^^
2 | import bar
3 | import colorama
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/project (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: 3. <temp_dir>/opt/homebrew/lib/python3.13/site-packages (site-packages)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
error[unresolved-import]: Cannot resolve imported module `colorama`
--> test.py:3:8
|
1 | import foo
2 | import bar
3 | import colorama
| ^^^^^^^^
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/project (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: 3. <temp_dir>/opt/homebrew/lib/python3.13/site-packages (site-packages)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
Found 2 diagnostics
----- stderr -----
");
assert_cmd_snapshot!(case.command()
.current_dir(case.root().join("project"))
.arg("--python").arg(case.root().join("opt/homebrew/Cellar/python@3.13/3.13.5/bin/python3")), @r"
success: false
exit_code: 1
----- stdout -----
error[unresolved-import]: Cannot resolve imported module `bar`
--> test.py:2:8
|
1 | import foo
2 | import bar
| ^^^
3 | import colorama
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/project (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: 3. <temp_dir>/opt/homebrew/Cellar/python@3.13/3.13.5/lib/python3.13/site-packages (site-packages)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
error[unresolved-import]: Cannot resolve imported module `colorama`
--> test.py:3:8
|
1 | import foo
2 | import bar
3 | import colorama
| ^^^^^^^^
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/project (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: 3. <temp_dir>/opt/homebrew/Cellar/python@3.13/3.13.5/lib/python3.13/site-packages (site-packages)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
Found 2 diagnostics
----- stderr -----
");
assert_cmd_snapshot!(case.command()
.current_dir(case.root().join("project"))
.arg("--python").arg(case.root().join("opt/homebrew/Cellar/python@3.13/3.13.5/Frameworks/Python.framework/Versions/3.13/bin/python3")), @r"
success: false
exit_code: 1
----- stdout -----
error[unresolved-import]: Cannot resolve imported module `bar`
--> test.py:2:8
|
1 | import foo
2 | import bar
| ^^^
3 | import colorama
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/project (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: 3. <temp_dir>/opt/homebrew/Cellar/python@3.13/3.13.5/Frameworks/Python.framework/Versions/3.13/lib/python3.13/site-packages (site-packages)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
error[unresolved-import]: Cannot resolve imported module `colorama`
--> test.py:3:8
|
1 | import foo
2 | import bar
3 | import colorama
| ^^^^^^^^
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/project (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: 3. <temp_dir>/opt/homebrew/Cellar/python@3.13/3.13.5/Frameworks/Python.framework/Versions/3.13/lib/python3.13/site-packages (site-packages)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
Found 2 diagnostics
----- stderr -----
");
assert_cmd_snapshot!(case.command()
.current_dir(case.root().join("project"))
.arg("--python").arg(case.root().join("opt/homebrew/Cellar/python@3.13/3.13.5/Frameworks/Python.framework/Versions/3.13/bin/python3.13")), @r"
success: false
exit_code: 1
----- stdout -----
error[unresolved-import]: Cannot resolve imported module `bar`
--> test.py:2:8
|
1 | import foo
2 | import bar
| ^^^
3 | import colorama
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/project (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: 3. <temp_dir>/opt/homebrew/Cellar/python@3.13/3.13.5/Frameworks/Python.framework/Versions/3.13/lib/python3.13/site-packages (site-packages)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
error[unresolved-import]: Cannot resolve imported module `colorama`
--> test.py:3:8
|
1 | import foo
2 | import bar
3 | import colorama
| ^^^^^^^^
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/project (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: 3. <temp_dir>/opt/homebrew/Cellar/python@3.13/3.13.5/Frameworks/Python.framework/Versions/3.13/lib/python3.13/site-packages (site-packages)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
Found 2 diagnostics
----- stderr -----
");
Ok(())
}
/// On Unix systems, it's common for a Python installation at `.venv/bin/python` to only be a symlink
/// to a system Python installation. We must be careful not to resolve the symlink too soon!
/// If we do, we will incorrectly add the system installation's `site-packages` as a search path,
/// when we should be adding the virtual environment's `site-packages` directory as a search path instead.
#[cfg(unix)]
#[test]
fn python_argument_points_to_symlinked_executable() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"system-installation/lib/python3.13/site-packages/foo.py",
"",
),
("system-installation/bin/python", ""),
(
"strange-venv-location/lib/python3.13/site-packages/bar.py",
"",
),
(
"test.py",
"\
import foo
import bar",
),
])?;
case.write_symlink(
"system-installation/bin/python",
"strange-venv-location/bin/python",
)?;
assert_cmd_snapshot!(case.command().arg("--python").arg("strange-venv-location/bin/python"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-import]: Cannot resolve imported module `foo`
--> test.py:1:8
|
1 | import foo
| ^^^
2 | import bar
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/ (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: 3. <temp_dir>/strange-venv-location/lib/python3.13/site-packages (site-packages)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
/// On Unix systems, a virtual environment can come with multiple `site-packages` directories:
/// one at `<sys.prefix>/lib/pythonX.Y/site-packages` and one at
/// `<sys.prefix>/lib64/pythonX.Y/site-packages`. According to [the stdlib docs], the `lib64`
/// is not *meant* to have any Python files in it (only C extensions and similar). Empirically,
/// however, it sometimes does indeed have Python files in it: popular tools such as poetry
/// appear to sometimes install Python packages into the `lib64` site-packages directory even
/// though they probably shouldn't. We therefore check for both a `lib64` and a `lib` directory,
/// and add them both as search paths if they both exist.
///
/// See:
/// - <https://github.com/astral-sh/ty/issues/1043>
/// - <https://github.com/astral-sh/ty/issues/257>.
///
/// [the stdlib docs]: https://docs.python.org/3/library/sys.html#sys.platlibdir
#[cfg(unix)]
#[test]
fn lib64_site_packages_directory_on_unix() -> anyhow::Result<()> {
let case = CliTest::with_files([
(".venv/lib/python3.13/site-packages/foo.py", ""),
(".venv/lib64/python3.13/site-packages/bar.py", ""),
("test.py", "import foo, bar, baz"),
])?;
assert_cmd_snapshot!(case.command().arg("--python").arg(".venv"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-import]: Cannot resolve imported module `baz`
--> test.py:1:18
|
1 | import foo, bar, baz
| ^^^
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/ (first-party code)
info: 2. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: 3. <temp_dir>/.venv/lib/python3.13/site-packages (site-packages)
info: 4. <temp_dir>/.venv/lib64/python3.13/site-packages (site-packages)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
#[test]
fn many_search_paths() -> anyhow::Result<()> {
let case = CliTest::with_files([
("extra1/foo1.py", ""),
("extra2/foo2.py", ""),
("extra3/foo3.py", ""),
("extra4/foo4.py", ""),
("extra5/foo5.py", ""),
("extra6/foo6.py", ""),
("test.py", "import foo1, baz"),
])?;
assert_cmd_snapshot!(
case.command()
.arg("--python-platform").arg("linux")
.arg("--extra-search-path").arg("extra1")
.arg("--extra-search-path").arg("extra2")
.arg("--extra-search-path").arg("extra3")
.arg("--extra-search-path").arg("extra4")
.arg("--extra-search-path").arg("extra5")
.arg("--extra-search-path").arg("extra6"),
@r"
success: false
exit_code: 1
----- stdout -----
error[unresolved-import]: Cannot resolve imported module `baz`
--> test.py:1:14
|
1 | import foo1, baz
| ^^^
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/extra1 (extra search path specified on the CLI or in your config file)
info: 2. <temp_dir>/extra2 (extra search path specified on the CLI or in your config file)
info: 3. <temp_dir>/extra3 (extra search path specified on the CLI or in your config file)
info: 4. <temp_dir>/extra4 (extra search path specified on the CLI or in your config file)
info: 5. <temp_dir>/extra5 (extra search path specified on the CLI or in your config file)
info: ... and 3 more paths. Run with `-v` to see all paths.
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
Found 1 diagnostic
----- stderr -----
");
// Shows all with `-v`
assert_cmd_snapshot!(
case.command()
.arg("--python-platform").arg("linux")
.arg("--extra-search-path").arg("extra1")
.arg("--extra-search-path").arg("extra2")
.arg("--extra-search-path").arg("extra3")
.arg("--extra-search-path").arg("extra4")
.arg("--extra-search-path").arg("extra5")
.arg("--extra-search-path").arg("extra6")
.arg("-v"),
@r"
success: false
exit_code: 1
----- stdout -----
error[unresolved-import]: Cannot resolve imported module `baz`
--> test.py:1:14
|
1 | import foo1, baz
| ^^^
|
info: Searched in the following paths during module resolution:
info: 1. <temp_dir>/extra1 (extra search path specified on the CLI or in your config file)
info: 2. <temp_dir>/extra2 (extra search path specified on the CLI or in your config file)
info: 3. <temp_dir>/extra3 (extra search path specified on the CLI or in your config file)
info: 4. <temp_dir>/extra4 (extra search path specified on the CLI or in your config file)
info: 5. <temp_dir>/extra5 (extra search path specified on the CLI or in your config file)
info: 6. <temp_dir>/extra6 (extra search path specified on the CLI or in your config file)
info: 7. <temp_dir>/ (first-party code)
info: 8. vendored://stdlib (stdlib typeshed stubs vendored by ty)
info: make sure your Python environment is properly configured: https://docs.astral.sh/ty/modules/#python-environment
info: rule `unresolved-import` is enabled by default
Found 1 diagnostic
----- stderr -----
INFO Python version: Python 3.14, platform: linux
INFO Indexed 7 file(s) in 0.000s
");
Ok(())
}
#[test]
fn pyvenv_cfg_file_annotation_showing_where_python_version_set() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.environment]
python = "venv"
"#,
),
(
"venv/pyvenv.cfg",
r#"
version = 3.8
home = foo/bar/bin
"#,
),
if cfg!(target_os = "windows") {
("foo/bar/bin/python.exe", "")
} else {
("foo/bar/bin/python", "")
},
if cfg!(target_os = "windows") {
("venv/Lib/site-packages/foo.py", "")
} else {
("venv/lib/python3.8/site-packages/foo.py", "")
},
("test.py", "aiter"),
])?;
assert_cmd_snapshot!(case.command(), @r"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `aiter` used when not defined
--> test.py:1:1
|
1 | aiter
| ^^^^^
|
info: `aiter` was added as a builtin in Python 3.10
info: Python 3.8 was assumed when resolving types because of your virtual environment
--> venv/pyvenv.cfg:2:11
|
2 | version = 3.8
| ^^^ Virtual environment metadata
3 | home = foo/bar/bin
|
info: No Python version was specified on the command line or in a configuration file
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
");
Ok(())
}
#[test]
fn pyvenv_cfg_file_annotation_no_trailing_newline() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[tool.ty.environment]
python = "venv"
"#,
),
(
"venv/pyvenv.cfg",
r#"home = foo/bar/bin
version = 3.8"#,
),
if cfg!(target_os = "windows") {
("foo/bar/bin/python.exe", "")
} else {
("foo/bar/bin/python", "")
},
if cfg!(target_os = "windows") {
("venv/Lib/site-packages/foo.py", "")
} else {
("venv/lib/python3.8/site-packages/foo.py", "")
},
("test.py", "aiter"),
])?;
assert_cmd_snapshot!(case.command(), @r"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `aiter` used when not defined
--> test.py:1:1
|
1 | aiter
| ^^^^^
|
info: `aiter` was added as a builtin in Python 3.10
info: Python 3.8 was assumed when resolving types because of your virtual environment
--> venv/pyvenv.cfg:4:23
|
4 | version = 3.8
| ^^^ Virtual environment metadata
|
info: No Python version was specified on the command line or in a configuration file
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
");
Ok(())
}
#[test]
fn config_file_annotation_showing_where_python_version_set_syntax_error() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"pyproject.toml",
r#"
[project]
requires-python = ">=3.8"
"#,
),
(
"test.py",
r#"
match object():
case int():
pass
case _:
pass
"#,
),
])?;
assert_cmd_snapshot!(case.command(), @r#"
success: false
exit_code: 1
----- stdout -----
error[invalid-syntax]: Cannot use `match` statement on Python 3.8 (syntax was added in Python 3.10)
--> test.py:2:1
|
2 | match object():
| ^^^^^
3 | case int():
4 | pass
|
info: Python 3.8 was assumed when parsing syntax
--> pyproject.toml:3:19
|
2 | [project]
3 | requires-python = ">=3.8"
| ^^^^^^^ Python version configuration
|
Found 1 diagnostic
----- stderr -----
"#);
assert_cmd_snapshot!(case.command().arg("--python-version=3.9"), @r"
success: false
exit_code: 1
----- stdout -----
error[invalid-syntax]: Cannot use `match` statement on Python 3.9 (syntax was added in Python 3.10)
--> test.py:2:1
|
2 | match object():
| ^^^^^
3 | case int():
4 | pass
|
info: Python 3.9 was assumed when parsing syntax because it was specified on the command line
Found 1 diagnostic
----- stderr -----
");
Ok(())
}
#[test]
fn python_cli_argument_virtual_environment() -> anyhow::Result<()> {
let path_to_executable = if cfg!(windows) {
"my-venv/Scripts/python.exe"
} else {
"my-venv/bin/python"
};
let other_venv_path = "my-venv/foo/some_other_file.txt";
let case = CliTest::with_files([
("test.py", ""),
(
if cfg!(windows) {
"my-venv/Lib/site-packages/foo.py"
} else {
"my-venv/lib/python3.13/site-packages/foo.py"
},
"",
),
(path_to_executable, ""),
(other_venv_path, ""),
])?;
// Passing a path to the installation works
assert_cmd_snapshot!(case.command().arg("--python").arg("my-venv"), @r###"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
"###);
// And so does passing a path to the executable inside the installation
assert_cmd_snapshot!(case.command().arg("--python").arg(path_to_executable), @r###"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
"###);
// But random other paths inside the installation are rejected
assert_cmd_snapshot!(case.command().arg("--python").arg(other_venv_path), @r###"
success: false
exit_code: 2
----- stdout -----
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty/tests/cli/file_selection.rs | crates/ty/tests/cli/file_selection.rs | use insta_cmd::assert_cmd_snapshot;
use crate::CliTest;
/// Test exclude CLI argument functionality
#[test]
fn exclude_argument() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"src/main.py",
r#"
print(undefined_var) # error: unresolved-reference
"#,
),
(
"tests/test_main.py",
r#"
print(another_undefined_var) # error: unresolved-reference
"#,
),
(
"temp_file.py",
r#"
print(temp_undefined_var) # error: unresolved-reference
"#,
),
])?;
// Test that exclude argument is recognized and works
assert_cmd_snapshot!(case.command().arg("--exclude").arg("tests/"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
error[unresolved-reference]: Name `temp_undefined_var` used when not defined
--> temp_file.py:2:7
|
2 | print(temp_undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 2 diagnostics
----- stderr -----
"###);
// Test multiple exclude patterns
assert_cmd_snapshot!(case.command().arg("--exclude").arg("tests/").arg("--exclude").arg("temp_*.py"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
/// Test configuration file include functionality
#[test]
fn configuration_include() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"src/main.py",
r#"
print(undefined_var) # error: unresolved-reference
"#,
),
(
"tests/test_main.py",
r#"
print(another_undefined_var) # error: unresolved-reference
"#,
),
(
"other.py",
r#"
print(other_undefined_var) # error: unresolved-reference
"#,
),
])?;
// Test include via configuration - should only check included files
case.write_file(
"ty.toml",
r#"
[src]
include = ["src"]
"#,
)?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
// Test multiple include patterns via configuration
case.write_file(
"ty.toml",
r#"
[src]
include = ["src", "other.py"]
"#,
)?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `other_undefined_var` used when not defined
--> other.py:2:7
|
2 | print(other_undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 2 diagnostics
----- stderr -----
"###);
Ok(())
}
/// Test configuration file exclude functionality
#[test]
fn configuration_exclude() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"src/main.py",
r#"
print(undefined_var) # error: unresolved-reference
"#,
),
(
"tests/test_main.py",
r#"
print(another_undefined_var) # error: unresolved-reference
"#,
),
(
"temp_file.py",
r#"
print(temp_undefined_var) # error: unresolved-reference
"#,
),
])?;
// Test exclude via configuration
case.write_file(
"ty.toml",
r#"
[src]
exclude = ["tests/"]
"#,
)?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
error[unresolved-reference]: Name `temp_undefined_var` used when not defined
--> temp_file.py:2:7
|
2 | print(temp_undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 2 diagnostics
----- stderr -----
"###);
// Test multiple exclude patterns via configuration
case.write_file(
"ty.toml",
r#"
[src]
exclude = ["tests/", "temp_*.py"]
"#,
)?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
/// Test that exclude takes precedence over include in configuration
#[test]
fn exclude_precedence_over_include() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"src/main.py",
r#"
print(undefined_var) # error: unresolved-reference
"#,
),
(
"src/test_helper.py",
r#"
print(helper_undefined_var) # error: unresolved-reference
"#,
),
(
"other.py",
r#"
print(other_undefined_var) # error: unresolved-reference
"#,
),
])?;
// Include all src files but exclude test files - exclude should win
case.write_file(
"ty.toml",
r#"
[src]
include = ["src"]
exclude = ["**/test_*.py"]
"#,
)?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
/// Test that CLI exclude overrides configuration include
#[test]
fn exclude_argument_precedence_include_argument() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"src/main.py",
r#"
print(undefined_var) # error: unresolved-reference
"#,
),
(
"tests/test_main.py",
r#"
print(another_undefined_var) # error: unresolved-reference
"#,
),
(
"other.py",
r#"
print(other_undefined_var) # error: unresolved-reference
"#,
),
])?;
// Configuration includes all files, but CLI excludes tests
case.write_file(
"ty.toml",
r#"
[src]
include = ["src/", "tests/"]
"#,
)?;
assert_cmd_snapshot!(case.command().arg("--exclude").arg("tests/"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
/// Test that default excludes can be removed using negated patterns
#[test]
fn remove_default_exclude() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"src/main.py",
r#"
print(undefined_var) # error: unresolved-reference
"#,
),
(
"dist/generated.py",
r#"
print(another_undefined_var) # error: unresolved-reference
"#,
),
])?;
// By default, 'dist' directory should be excluded (see default excludes)
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
// Now override the default exclude by using a negated pattern to re-include 'dist'
case.write_file(
"ty.toml",
r#"
[src]
exclude = ["!**/dist/"]
"#,
)?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `another_undefined_var` used when not defined
--> dist/generated.py:2:7
|
2 | print(another_undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 2 diagnostics
----- stderr -----
"###);
Ok(())
}
/// Test that configuration excludes can be removed via CLI negation
#[test]
fn cli_removes_config_exclude() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"src/main.py",
r#"
print(undefined_var) # error: unresolved-reference
"#,
),
(
"build/output.py",
r#"
print(build_undefined_var) # error: unresolved-reference
"#,
),
])?;
// Configuration excludes the build directory
case.write_file(
"ty.toml",
r#"
[src]
exclude = ["build/"]
"#,
)?;
// Verify that build/ is excluded by configuration
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
// Now remove the configuration exclude via CLI negation
assert_cmd_snapshot!(case.command().arg("--exclude").arg("!build/"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `build_undefined_var` used when not defined
--> build/output.py:2:7
|
2 | print(build_undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 2 diagnostics
----- stderr -----
"###);
Ok(())
}
/// Test behavior when explicitly checking a path that matches an exclude pattern
#[test]
fn explicit_path_overrides_exclude() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"src/main.py",
r#"
print(undefined_var) # error: unresolved-reference
"#,
),
(
"tests/generated.py",
r#"
print(dist_undefined_var) # error: unresolved-reference
"#,
),
(
"dist/other.py",
r#"
print(other_undefined_var) # error: unresolved-reference
"#,
),
(
"ty.toml",
r#"
[src]
exclude = ["tests/generated.py"]
"#,
),
])?;
// dist is excluded by default and `tests/generated` is excluded in the project, so only src/main.py should be checked
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
// Explicitly checking a file in an excluded directory should still check that file
assert_cmd_snapshot!(case.command().arg("tests/generated.py"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `dist_undefined_var` used when not defined
--> tests/generated.py:2:7
|
2 | print(dist_undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
// Explicitly checking the entire excluded directory should check all files in it
assert_cmd_snapshot!(case.command().arg("dist/"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `other_undefined_var` used when not defined
--> dist/other.py:2:7
|
2 | print(other_undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
/// Test behavior when explicitly checking a path that matches an exclude pattern and `--force-exclude` is provided
#[test]
fn explicit_path_overrides_exclude_force_exclude() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"src/main.py",
r#"
print(undefined_var) # error: unresolved-reference
"#,
),
(
"tests/generated.py",
r#"
print(dist_undefined_var) # error: unresolved-reference
"#,
),
(
"dist/other.py",
r#"
print(other_undefined_var) # error: unresolved-reference
"#,
),
(
"ty.toml",
r#"
[src]
exclude = ["tests/generated.py"]
"#,
),
])?;
// Explicitly checking a file in an excluded directory should still check that file
assert_cmd_snapshot!(case.command().arg("tests/generated.py").arg("src/main.py"), @r"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
error[unresolved-reference]: Name `dist_undefined_var` used when not defined
--> tests/generated.py:2:7
|
2 | print(dist_undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 2 diagnostics
----- stderr -----
");
// Except when `--force-exclude` is set.
assert_cmd_snapshot!(case.command().arg("tests/generated.py").arg("src/main.py").arg("--force-exclude"), @r"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
");
// Explicitly checking the entire excluded directory should check all files in it
assert_cmd_snapshot!(case.command().arg("dist/").arg("src/main.py"), @r"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `other_undefined_var` used when not defined
--> dist/other.py:2:7
|
2 | print(other_undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 2 diagnostics
----- stderr -----
");
// Except when using `--force-exclude`
assert_cmd_snapshot!(case.command().arg("dist/").arg("src/main.py").arg("--force-exclude"), @r"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
");
Ok(())
}
#[test]
fn cli_and_configuration_exclude() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"src/main.py",
r#"
print(undefined_var) # error: unresolved-reference
"#,
),
(
"tests/generated.py",
r#"
print(dist_undefined_var) # error: unresolved-reference
"#,
),
(
"my_dist/other.py",
r#"
print(other_undefined_var) # error: unresolved-reference
"#,
),
(
"ty.toml",
r#"
[src]
exclude = ["tests/"]
"#,
),
])?;
assert_cmd_snapshot!(case.command(), @r"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `other_undefined_var` used when not defined
--> my_dist/other.py:2:7
|
2 | print(other_undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 2 diagnostics
----- stderr -----
");
assert_cmd_snapshot!(case.command().arg("--exclude").arg("my_dist/"), @r"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/main.py:2:7
|
2 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
");
Ok(())
}
#[test]
fn invalid_include_pattern() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"src/main.py",
r#"
print(undefined_var) # error: unresolved-reference
"#,
),
(
"ty.toml",
r#"
[src]
include = [
"src/**test/"
]
"#,
),
])?;
// By default, dist/ is excluded, so only src/main.py should be checked
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ty failed
Cause: error[invalid-glob]: Invalid include pattern
--> ty.toml:4:5
|
2 | [src]
3 | include = [
4 | "src/**test/"
| ^^^^^^^^^^^^^ Too many stars at position 5
5 | ]
|
"###);
Ok(())
}
#[test]
fn invalid_include_pattern_concise_output() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"src/main.py",
r#"
print(undefined_var) # error: unresolved-reference
"#,
),
(
"ty.toml",
r#"
[src]
include = [
"src/**test/"
]
"#,
),
])?;
// By default, dist/ is excluded, so only src/main.py should be checked
assert_cmd_snapshot!(case.command().arg("--output-format").arg("concise"), @r###"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ty failed
Cause: ty.toml:4:5: error[invalid-glob] Invalid include pattern: Too many stars at position 5
"###);
Ok(())
}
#[test]
fn invalid_exclude_pattern() -> anyhow::Result<()> {
let case = CliTest::with_files([
(
"src/main.py",
r#"
print(undefined_var) # error: unresolved-reference
"#,
),
(
"ty.toml",
r#"
[src]
exclude = [
"../src"
]
"#,
),
])?;
// By default, dist/ is excluded, so only src/main.py should be checked
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ty failed
Cause: error[invalid-glob]: Invalid exclude pattern
--> ty.toml:4:5
|
2 | [src]
3 | exclude = [
4 | "../src"
| ^^^^^^^^ The parent directory operator (`..`) at position 1 is not allowed
5 | ]
|
"###);
Ok(())
}
/// Test that ty works correctly with Bazel's symlinked file structure
#[test]
#[cfg(unix)]
fn bazel_symlinked_files() -> anyhow::Result<()> {
let case = CliTest::with_files([
// Original source files in the project
(
"main.py",
r#"
import library
result = library.process_data()
print(undefined_var) # error: unresolved-reference
"#,
),
(
"library.py",
r#"
def process_data():
return missing_value # error: unresolved-reference
"#,
),
// Another source file that won't be symlinked
(
"other.py",
r#"
print(other_undefined) # error: unresolved-reference
"#,
),
])?;
// Create Bazel-style symlinks pointing to the actual source files
// Bazel typically creates symlinks in bazel-out/k8-fastbuild/bin/ that point to actual sources
std::fs::create_dir_all(case.project_dir.join("bazel-out/k8-fastbuild/bin"))?;
// Use absolute paths to ensure the symlinks work correctly
case.write_symlink(
case.project_dir.join("main.py"),
"bazel-out/k8-fastbuild/bin/main.py",
)?;
case.write_symlink(
case.project_dir.join("library.py"),
"bazel-out/k8-fastbuild/bin/library.py",
)?;
// Change to the bazel-out directory and run ty from there
// The symlinks should be followed and errors should be found
assert_cmd_snapshot!(case.command().current_dir(case.project_dir.join("bazel-out/k8-fastbuild/bin")), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `missing_value` used when not defined
--> library.py:3:12
|
2 | def process_data():
3 | return missing_value # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
error[unresolved-reference]: Name `undefined_var` used when not defined
--> main.py:5:7
|
4 | result = library.process_data()
5 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 2 diagnostics
----- stderr -----
"###);
// Test that when checking a specific symlinked file from the bazel-out directory, it works correctly
assert_cmd_snapshot!(case.command().current_dir(case.project_dir.join("bazel-out/k8-fastbuild/bin")).arg("main.py"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> main.py:5:7
|
4 | result = library.process_data()
5 | print(undefined_var) # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
/// Test that exclude patterns match on symlink source names, not target names
#[test]
#[cfg(unix)]
fn exclude_symlink_source_not_target() -> anyhow::Result<()> {
let case = CliTest::with_files([
// Target files with generic names
(
"src/module.py",
r#"
def process():
return undefined_var # error: unresolved-reference
"#,
),
(
"src/utils.py",
r#"
def helper():
return missing_value # error: unresolved-reference
"#,
),
(
"regular.py",
r#"
print(regular_undefined) # error: unresolved-reference
"#,
),
])?;
// Create symlinks with names that differ from their targets
// This simulates build systems that rename files during symlinking
case.write_symlink("src/module.py", "generated_module.py")?;
case.write_symlink("src/utils.py", "generated_utils.py")?;
// Exclude pattern should match on the symlink name (generated_*), not the target name
assert_cmd_snapshot!(case.command().arg("--exclude").arg("generated_*.py"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `regular_undefined` used when not defined
--> regular.py:2:7
|
2 | print(regular_undefined) # error: unresolved-reference
| ^^^^^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
error[unresolved-reference]: Name `undefined_var` used when not defined
--> src/module.py:3:12
|
2 | def process():
3 | return undefined_var # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
error[unresolved-reference]: Name `missing_value` used when not defined
--> src/utils.py:3:12
|
2 | def helper():
3 | return missing_value # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 3 diagnostics
----- stderr -----
"###);
// Exclude pattern on target path should not affect symlinks with different names
assert_cmd_snapshot!(case.command().arg("--exclude").arg("src/*.py"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> generated_module.py:3:12
|
2 | def process():
3 | return undefined_var # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
error[unresolved-reference]: Name `missing_value` used when not defined
--> generated_utils.py:3:12
|
2 | def helper():
3 | return missing_value # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
error[unresolved-reference]: Name `regular_undefined` used when not defined
--> regular.py:2:7
|
2 | print(regular_undefined) # error: unresolved-reference
| ^^^^^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 3 diagnostics
----- stderr -----
"###);
// Test that explicitly passing a symlink always checks it, even if excluded
assert_cmd_snapshot!(case.command().arg("--exclude").arg("generated_*.py").arg("generated_module.py"), @r###"
success: false
exit_code: 1
----- stdout -----
error[unresolved-reference]: Name `undefined_var` used when not defined
--> generated_module.py:3:12
|
2 | def process():
3 | return undefined_var # error: unresolved-reference
| ^^^^^^^^^^^^^
|
info: rule `unresolved-reference` is enabled by default
Found 1 diagnostic
----- stderr -----
"###);
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/doc_highlights.rs | crates/ty_ide/src/doc_highlights.rs | use crate::goto::find_goto_target;
use crate::references::{ReferencesMode, references};
use crate::{Db, ReferenceTarget};
use ruff_db::files::File;
use ruff_text_size::TextSize;
use ty_python_semantic::SemanticModel;
/// Find all document highlights for a symbol at the given position.
/// Document highlights are limited to the current file only.
pub fn document_highlights(
db: &dyn Db,
file: File,
offset: TextSize,
) -> Option<Vec<ReferenceTarget>> {
let parsed = ruff_db::parsed::parsed_module(db, file);
let module = parsed.load(db);
let model = SemanticModel::new(db, file);
// Get the definitions for the symbol at the cursor position
let goto_target = find_goto_target(&model, &module, offset)?;
// Use DocumentHighlights mode which limits search to current file only
references(db, file, &goto_target, ReferencesMode::DocumentHighlights)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::{CursorTest, IntoDiagnostic, cursor_test};
use insta::assert_snapshot;
use ruff_db::diagnostic::{Annotation, Diagnostic, DiagnosticId, LintName, Severity, Span};
use ruff_db::files::FileRange;
use ruff_text_size::Ranged;
impl CursorTest {
fn document_highlights(&self) -> String {
let Some(highlight_results) =
document_highlights(&self.db, self.cursor.file, self.cursor.offset)
else {
return "No highlights found".to_string();
};
if highlight_results.is_empty() {
return "No highlights found".to_string();
}
self.render_diagnostics(highlight_results.into_iter().enumerate().map(
|(i, highlight_item)| -> HighlightResult {
HighlightResult {
index: i,
file_range: FileRange::new(highlight_item.file(), highlight_item.range()),
kind: highlight_item.kind(),
}
},
))
}
}
struct HighlightResult {
index: usize,
file_range: FileRange,
kind: crate::ReferenceKind,
}
impl IntoDiagnostic for HighlightResult {
fn into_diagnostic(self) -> Diagnostic {
let kind_str = match self.kind {
crate::ReferenceKind::Read => "Read",
crate::ReferenceKind::Write => "Write",
crate::ReferenceKind::Other => "Other",
};
let mut main = Diagnostic::new(
DiagnosticId::Lint(LintName::of("document_highlights")),
Severity::Info,
format!("Highlight {} ({})", self.index + 1, kind_str),
);
main.annotate(Annotation::primary(
Span::from(self.file_range.file()).with_range(self.file_range.range()),
));
main
}
}
#[test]
fn test_local_variable_highlights() {
let test = cursor_test(
"
def calculate_sum():
<CURSOR>value = 10
doubled = value * 2
result = value + doubled
return value
",
);
assert_snapshot!(test.document_highlights(), @r"
info[document_highlights]: Highlight 1 (Write)
--> main.py:3:5
|
2 | def calculate_sum():
3 | value = 10
| ^^^^^
4 | doubled = value * 2
5 | result = value + doubled
|
info[document_highlights]: Highlight 2 (Read)
--> main.py:4:15
|
2 | def calculate_sum():
3 | value = 10
4 | doubled = value * 2
| ^^^^^
5 | result = value + doubled
6 | return value
|
info[document_highlights]: Highlight 3 (Read)
--> main.py:5:14
|
3 | value = 10
4 | doubled = value * 2
5 | result = value + doubled
| ^^^^^
6 | return value
|
info[document_highlights]: Highlight 4 (Read)
--> main.py:6:12
|
4 | doubled = value * 2
5 | result = value + doubled
6 | return value
| ^^^^^
|
");
}
#[test]
fn test_parameter_highlights() {
let test = cursor_test(
"
def process_data(<CURSOR>data):
if data:
processed = data.upper()
return processed
return data
",
);
assert_snapshot!(test.document_highlights(), @r"
info[document_highlights]: Highlight 1 (Other)
--> main.py:2:18
|
2 | def process_data(data):
| ^^^^
3 | if data:
4 | processed = data.upper()
|
info[document_highlights]: Highlight 2 (Read)
--> main.py:3:8
|
2 | def process_data(data):
3 | if data:
| ^^^^
4 | processed = data.upper()
5 | return processed
|
info[document_highlights]: Highlight 3 (Read)
--> main.py:4:21
|
2 | def process_data(data):
3 | if data:
4 | processed = data.upper()
| ^^^^
5 | return processed
6 | return data
|
info[document_highlights]: Highlight 4 (Read)
--> main.py:6:12
|
4 | processed = data.upper()
5 | return processed
6 | return data
| ^^^^
|
");
}
#[test]
fn test_class_name_highlights() {
let test = cursor_test(
"
class <CURSOR>Calculator:
def __init__(self):
self.name = 'Calculator'
calc = Calculator()
",
);
assert_snapshot!(test.document_highlights(), @r"
info[document_highlights]: Highlight 1 (Other)
--> main.py:2:7
|
2 | class Calculator:
| ^^^^^^^^^^
3 | def __init__(self):
4 | self.name = 'Calculator'
|
info[document_highlights]: Highlight 2 (Read)
--> main.py:6:8
|
4 | self.name = 'Calculator'
5 |
6 | calc = Calculator()
| ^^^^^^^^^^
|
");
}
#[test]
fn test_no_highlights_for_unknown_symbol() {
let test = cursor_test(
"
def test():
# Cursor on a position with no symbol
<CURSOR>
",
);
assert_snapshot!(test.document_highlights(), @"No highlights found");
}
// TODO: Should only highlight the last use and the last declaration
#[test]
fn redeclarations() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
a: str = "test"
a: int = 10
print(a<CURSOR>)
"#,
)
.build();
assert_snapshot!(test.document_highlights(), @r#"
info[document_highlights]: Highlight 1 (Write)
--> main.py:2:1
|
2 | a: str = "test"
| ^
3 |
4 | a: int = 10
|
info[document_highlights]: Highlight 2 (Write)
--> main.py:4:1
|
2 | a: str = "test"
3 |
4 | a: int = 10
| ^
5 |
6 | print(a)
|
info[document_highlights]: Highlight 3 (Read)
--> main.py:6:7
|
4 | a: int = 10
5 |
6 | print(a)
| ^
|
"#);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/docstring.rs | crates/ty_ide/src/docstring.rs | //! Docstring parsing utilities for language server features.
//!
//! This module provides functionality for extracting structured information from
//! Python docstrings, including parameter documentation for signature help.
//! Supports Google-style, NumPy-style, and reST/Sphinx-style docstrings.
//! There are no formal specifications for any of these formats, so the parsing
//! logic needs to be tolerant of variations.
use regex::Regex;
use ruff_python_trivia::{PythonWhitespace, leading_indentation};
use ruff_source_file::UniversalNewlines;
use std::collections::HashMap;
use std::sync::LazyLock;
use crate::MarkupKind;
// Static regex instances to avoid recompilation
static GOOGLE_SECTION_REGEX: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"(?i)^\s*(Args|Arguments|Parameters)\s*:\s*$")
.expect("Google section regex should be valid")
});
static GOOGLE_PARAM_REGEX: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"^\s*(\*?\*?\w+)\s*(\(.*?\))?\s*:\s*(.+)")
.expect("Google parameter regex should be valid")
});
static NUMPY_SECTION_REGEX: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"(?i)^\s*Parameters\s*$").expect("NumPy section regex should be valid")
});
static NUMPY_UNDERLINE_REGEX: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"^\s*-+\s*$").expect("NumPy underline regex should be valid"));
static REST_PARAM_REGEX: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"^\s*:param\s+(?:(\w+)\s+)?(\w+)\s*:\s*(.+)")
.expect("reST parameter regex should be valid")
});
/// A docstring which hasn't yet been interpreted or rendered
///
/// Used to ensure handlers of docstrings select a rendering mode.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct Docstring(String);
impl Docstring {
/// Create a new docstring from the raw string literal contents
pub fn new(raw: String) -> Self {
Docstring(raw)
}
/// Render the docstring to the given markup format
pub fn render(&self, kind: MarkupKind) -> String {
match kind {
MarkupKind::PlainText => self.render_plaintext(),
MarkupKind::Markdown => self.render_markdown(),
}
}
/// Render the docstring for plaintext display
pub fn render_plaintext(&self) -> String {
documentation_trim(&self.0)
}
/// Render the docstring for markdown display
pub fn render_markdown(&self) -> String {
let trimmed = documentation_trim(&self.0);
render_markdown(&trimmed)
}
/// Extract parameter documentation from popular docstring formats.
/// Returns a map of parameter names to their documentation.
pub fn parameter_documentation(&self) -> HashMap<String, String> {
let mut param_docs = HashMap::new();
// Google-style docstrings
param_docs.extend(extract_google_style_params(&self.0));
// NumPy-style docstrings
param_docs.extend(extract_numpy_style_params(&self.0));
// reST/Sphinx-style docstrings
param_docs.extend(extract_rest_style_params(&self.0));
param_docs
}
}
/// Normalizes tabs and trims a docstring as specified in PEP-0257
///
/// See: <https://peps.python.org/pep-0257/#handling-docstring-indentation>
fn documentation_trim(docs: &str) -> String {
// First apply tab expansion as we don't want tabs in our output
// (python says tabs are equal to 8 spaces).
//
// We also trim off all trailing whitespace here to eliminate trailing newlines so we
// don't need to handle trailing blank lines later. We can't trim away leading
// whitespace yet, because we need to identify the first line and handle it specially.
let expanded = docs.trim_end().replace('\t', " ");
// Compute the minimum indention of all non-empty non-first lines
// and statistics about leading blank lines to help trim them later.
let mut min_indent = usize::MAX;
let mut leading_blank_lines = 0;
let mut is_first_line = true;
let mut found_non_blank_line = false;
for line_obj in expanded.universal_newlines() {
let line = line_obj.as_str();
let indent = leading_indentation(line);
if indent == line {
// Blank line
if !found_non_blank_line {
leading_blank_lines += 1;
}
} else {
// Non-blank line
found_non_blank_line = true;
// First line doesn't affect min-indent
if !is_first_line {
min_indent = min_indent.min(indent.len());
}
}
is_first_line = false;
}
let mut output = String::new();
let mut lines = expanded.universal_newlines();
// If the first line is non-blank then we need to include it *fully* trimmed
// As its indentation is ignored (effectively treated as having min_indent).
if leading_blank_lines == 0 {
if let Some(first_line) = lines.next() {
output.push_str(first_line.as_str().trim_whitespace());
output.push('\n');
}
}
// For the rest of the lines remove the minimum indent (if possible) and trailing whitespace.
//
// We computed min_indent by only counting python whitespace, and all python whitespace
// is ascii, so we can just remove that many bytes from the front.
for line_obj in lines.skip(leading_blank_lines) {
let line = line_obj.as_str();
let trimmed_line = line[min_indent.min(line.len())..].trim_whitespace_end();
output.push_str(trimmed_line);
output.push('\n');
}
output
}
/// Given a presumed reStructuredText docstring, render it to GitHub Flavored Markdown.
///
/// This function assumes the input has had its whitespace normalized by `documentation_trim`,
/// so leading whitespace is always a space, and newlines are always `\n`.
///
/// The general approach here is:
///
/// * Preserve the docstring verbatim by default, ensuring indent/linewraps are preserved
/// * Escape problematic things where necessary (bare `__dunder__` => `\_\_dunder\_\_`)
/// * Introduce code fences where appropriate
///
/// The first rule is significant in ensuring various docstring idioms render clearly.
/// In particular ensuring things like this are faithfully rendered:
///
/// ```text
/// param1 -- a good parameter
/// param2 -- another good parameter
/// with longer docs
/// ```
///
/// If we didn't go out of our way to preserve the indentation and line-breaks, markdown would
/// constantly render inputs like that into abominations like:
///
/// ```html
/// <p>
/// param1 -- a good parameter param2 -- another good parameter
/// </p>
///
/// <code>
/// with longer docs
/// </code>
/// ```
fn render_markdown(docstring: &str) -> String {
// Here lies a monumemnt to robust parsing and escaping:
// a codefence with SO MANY backticks that surely no one will ever accidentally
// break out of it, even if they're writing python documentation about markdown
// code fences and are showing off how you can use more than 3 backticks.
const FENCE: &str = "```````````";
// TODO: there is a convention that `singletick` is for items that can
// be looked up in-scope while ``multitick`` is for opaque inline code.
// While rendering this we should make note of all the `singletick` locations
// and (possibly in a higher up piece of logic) try to resolve the names for
// cross-linking. (Similar to `TypeDetails` in the type formatting code.)
let mut output = String::new();
let mut first_line = true;
let mut block_indent = 0;
let mut in_doctest = false;
let mut starting_literal = None;
let mut in_literal = false;
let mut in_any_code = false;
let mut temp_owned_line;
for untrimmed_line in docstring.lines() {
// We can assume leading whitespace has been normalized
let mut line = untrimmed_line.trim_start_matches(' ');
let line_indent = untrimmed_line.len() - line.len();
// First thing's first, add a newline to start the new line
if !first_line {
// If we're not in a codeblock, add trailing space to the line to authentically wrap it
// (Lines ending with two spaces tell markdown to preserve a linebreak)
if !in_any_code {
output.push_str(" ");
}
// Only push newlines if we're not scanning for a real line
if starting_literal.is_none() {
output.push('\n');
}
}
// If we're in a literal block and we find a non-empty dedented line, end the block
// TODO: we should remove all the trailing blank lines
// (Just pop all trailing `\n` from `output`?)
if in_literal && line_indent < block_indent && !line.is_empty() {
in_literal = false;
in_any_code = false;
block_indent = 0;
output.push_str(FENCE);
output.push('\n');
}
// We previously entered a literal block and we just found our first non-blank line
// So now we're actually in the literal block
if let Some(literal) = starting_literal
&& !line.is_empty()
{
starting_literal = None;
in_literal = true;
in_any_code = true;
block_indent = line_indent;
output.push('\n');
output.push_str(FENCE);
output.push_str(literal);
output.push('\n');
}
// If we're not in a codeblock and we see something that signals a doctest, start one
if !in_any_code && line.starts_with(">>>") {
block_indent = line_indent;
in_doctest = true;
in_any_code = true;
// TODO: is there something more specific? `pycon`?
output.push_str(FENCE);
output.push_str("python\n");
}
// If we're not in a codeblock and we see something that signals a literal block, start one
let parsed_lit = line
// first check for a line ending with `::`
.strip_suffix("::")
.map(|prefix| (prefix, None))
// if that fails, look for a line ending with `:: lang`
.or_else(|| {
let (prefix, lang) = line.rsplit_once(' ')?;
let prefix = prefix.trim_end().strip_suffix("::")?;
Some((prefix, Some(lang)))
});
if !in_any_code && let Some((without_lit, lang)) = parsed_lit {
let mut without_directive = without_lit;
let mut directive = None;
// Parse out a directive like `.. warning::`
if let Some((prefix, directive_str)) = without_lit.rsplit_once(' ')
&& let Some(without_directive_str) = prefix.strip_suffix("..")
{
directive = Some(directive_str);
without_directive = without_directive_str;
}
// Whether the `::` should become `:` or be erased
let include_colon = if let Some(character) = without_directive.chars().next_back() {
// If lang is set then we're either deleting the whole line or
// the special rendering below will add it itself
lang.is_none() && !character.is_whitespace()
} else {
// Delete whole line
false
};
if include_colon {
line = line.strip_suffix(":").unwrap();
} else {
line = without_directive.trim_end();
}
starting_literal = match directive {
// Special directives that should be plaintext
Some(
"attention" | "caution" | "danger" | "error" | "hint" | "important" | "note"
| "tip" | "warning" | "admonition" | "versionadded" | "version-added"
| "versionchanged" | "version-changed" | "version-deprecated" | "deprecated"
| "version-removed" | "versionremoved",
) => {
// Render the argument of things like `.. version-added:: 4.0`
let suffix = if let Some(lang) = lang {
format!(" *{lang}*")
} else {
String::new()
};
// We prepend without_directive here out of caution for preserving input.
// This is probably gibberish/invalid syntax? But it's a no-op in normal cases.
temp_owned_line =
format!("**{without_directive}{}:**{suffix}", directive.unwrap());
line = temp_owned_line.as_str();
None
}
// Things that just mean "it's code"
Some(
"code-block" | "sourcecode" | "code" | "testcode" | "testsetup" | "testcleanup",
) => lang.or(Some("python")),
// Unknown (python I guess?)
Some(_) => lang.or(Some("python")),
// default to python
None => lang.or(Some("python")),
};
}
// Add this line's indentation.
// We could subtract the block_indent here but in practice it's uglier
// TODO: should we not do this if the `line.is_empty()`? When would it matter?
for _ in 0..line_indent {
// If we're not in a codeblock use non-breaking spaces to preserve the indent
if !in_any_code {
// TODO: would the raw unicode codepoint be handled *better* or *worse*
// by various IDEs? VS Code handles this approach well, at least.
output.push_str(" ");
} else {
output.push(' ');
}
}
if !in_any_code {
// This line is plain text, so we need to escape things that are inert in reST
// but active syntax in markdown... but not if it's inside `inline code`.
// Inline-code syntax is shared by reST and markdown which is really convenient
// except we need to find and parse it anyway to do this escaping properly! :(
// For now we assume `inline code` does not span a line (I'm not even sure if can).
//
// Things that need to be escaped: underscores
//
// e.g. we want __init__ => \_\_init\_\_ but `__init__` => `__init__`
let escape = |input: &str| input.replace('_', "\\_");
let mut in_inline_code = false;
let mut first_chunk = true;
let mut opening_tick_count = 0;
let mut current_tick_count = 0;
for chunk in line.split('`') {
// First chunk is definitionally not in inline-code and so always plaintext
if first_chunk {
first_chunk = false;
output.push_str(&escape(chunk));
continue;
}
// Not in first chunk, emit the ` between the last chunk and this one
output.push('`');
current_tick_count += 1;
// If we're in an inline block and have enough close-ticks to terminate it, do so.
// TODO: we parse ``hello```there` as (hello)(there) which probably isn't correct
// (definitely not for markdown) but it's close enough for horse grenades in this
// MVP impl. Notably we're verbatime emitting all the `'s so as long as reST and
// markdown agree we're *fine*. The accuracy of this parsing only affects the
// accuracy of where we apply escaping (so we need to misparse and see escapables
// for any of this to matter).
if opening_tick_count > 0 && current_tick_count >= opening_tick_count {
opening_tick_count = 0;
current_tick_count = 0;
in_inline_code = false;
}
// If this chunk is completely empty we're just in a run of ticks, continue
if chunk.is_empty() {
continue;
}
// Ok the chunk is non-empty, our run of ticks is complete
if in_inline_code {
// The previous check for >= open_tick_count didn't trip, so these can't close
// and these ticks will be verbatim rendered in the content
current_tick_count = 0;
} else if current_tick_count > 0 {
// Ok we're now in inline code
opening_tick_count = current_tick_count;
current_tick_count = 0;
in_inline_code = true;
}
// Finally include the content either escaped or not
if in_inline_code {
output.push_str(chunk);
} else {
output.push_str(&escape(chunk));
}
}
// NOTE: explicitly not "flushing" the ticks here.
// We respect however the user closed their inline code.
} else if line.is_empty() {
if in_doctest {
// This is the end of a doctest
block_indent = 0;
in_any_code = false;
in_literal = false;
output.push_str(FENCE);
}
} else {
// Print the line verbatim, it's in code
output.push_str(line);
}
first_line = false;
}
// Flush codeblock
if in_any_code {
output.push('\n');
output.push_str(FENCE);
}
output
}
/// Extract parameter documentation from Google-style docstrings.
fn extract_google_style_params(docstring: &str) -> HashMap<String, String> {
let mut param_docs = HashMap::new();
let mut in_args_section = false;
let mut current_param: Option<String> = None;
let mut current_doc = String::new();
for line_obj in docstring.universal_newlines() {
let line = line_obj.as_str();
if GOOGLE_SECTION_REGEX.is_match(line) {
in_args_section = true;
continue;
}
if in_args_section {
// Check if we hit another section (starts with a word followed by colon at line start)
if !line.starts_with(' ') && !line.starts_with('\t') && line.contains(':') {
if let Some(colon_pos) = line.find(':') {
let section_name = line[..colon_pos].trim();
// If this looks like another section, stop processing args
if !section_name.is_empty()
&& section_name
.chars()
.all(|c| c.is_alphabetic() || c.is_whitespace())
{
// Check if this is a known section name
let known_sections = [
"Returns", "Return", "Raises", "Yields", "Yield", "Examples",
"Example", "Note", "Notes", "Warning", "Warnings",
];
if known_sections.contains(§ion_name) {
if let Some(param_name) = current_param.take() {
param_docs.insert(param_name, current_doc.trim().to_string());
current_doc.clear();
}
in_args_section = false;
continue;
}
}
}
}
if let Some(captures) = GOOGLE_PARAM_REGEX.captures(line) {
// Save previous parameter if exists
if let Some(param_name) = current_param.take() {
param_docs.insert(param_name, current_doc.trim().to_string());
current_doc.clear();
}
// Start new parameter
if let (Some(param), Some(desc)) = (captures.get(1), captures.get(3)) {
current_param = Some(param.as_str().to_string());
current_doc = desc.as_str().to_string();
}
} else if line.starts_with(' ') || line.starts_with('\t') {
// This is a continuation of the current parameter documentation
if current_param.is_some() {
if !current_doc.is_empty() {
current_doc.push('\n');
}
current_doc.push_str(line.trim());
}
} else {
// This is a line that doesn't start with whitespace and isn't a parameter
// It might be a section or other content, so stop processing args
if let Some(param_name) = current_param.take() {
param_docs.insert(param_name, current_doc.trim().to_string());
current_doc.clear();
}
in_args_section = false;
}
}
}
// Don't forget the last parameter
if let Some(param_name) = current_param {
param_docs.insert(param_name, current_doc.trim().to_string());
}
param_docs
}
/// Calculate the indentation level of a line.
///
/// Based on python's expandtabs (where tabs are considered 8 spaces).
fn get_indentation_level(line: &str) -> usize {
leading_indentation(line)
.chars()
.map(|s| if s == '\t' { 8 } else { 1 })
.sum()
}
/// Extract parameter documentation from NumPy-style docstrings.
fn extract_numpy_style_params(docstring: &str) -> HashMap<String, String> {
let mut param_docs = HashMap::new();
let mut lines = docstring
.universal_newlines()
.map(|line| line.as_str())
.peekable();
let mut in_params_section = false;
let mut found_underline = false;
let mut current_param: Option<String> = None;
let mut current_doc = String::new();
let mut base_param_indent: Option<usize> = None;
let mut base_content_indent: Option<usize> = None;
while let Some(line) = lines.next() {
if NUMPY_SECTION_REGEX.is_match(line) {
// Check if the next line is an underline
if let Some(next_line) = lines.peek() {
if NUMPY_UNDERLINE_REGEX.is_match(next_line) {
in_params_section = true;
found_underline = false;
base_param_indent = None;
base_content_indent = None;
continue;
}
}
}
if in_params_section && !found_underline {
if NUMPY_UNDERLINE_REGEX.is_match(line) {
found_underline = true;
continue;
}
}
if in_params_section && found_underline {
let current_indent = get_indentation_level(line);
let trimmed = line.trim();
// Skip empty lines
if trimmed.is_empty() {
continue;
}
// Check if we hit another section
if current_indent == 0 {
if let Some(next_line) = lines.peek() {
if NUMPY_UNDERLINE_REGEX.is_match(next_line) {
// This is another section
if let Some(param_name) = current_param.take() {
param_docs.insert(param_name, current_doc.trim().to_string());
current_doc.clear();
}
in_params_section = false;
continue;
}
}
}
// Determine if this could be a parameter line
let could_be_param = if let Some(base_indent) = base_param_indent {
// We've seen parameters before - check if this matches the expected parameter indentation
current_indent == base_indent
} else {
// First potential parameter - check if it has reasonable indentation and content
current_indent > 0
&& (trimmed.contains(':')
|| trimmed.chars().all(|c| c.is_alphanumeric() || c == '_'))
};
if could_be_param {
// Check if this could be a section header by looking at the next line
if let Some(next_line) = lines.peek() {
if NUMPY_UNDERLINE_REGEX.is_match(next_line) {
// This is a section header, not a parameter
if let Some(param_name) = current_param.take() {
param_docs.insert(param_name, current_doc.trim().to_string());
current_doc.clear();
}
in_params_section = false;
continue;
}
}
// Set base indentation levels on first parameter
if base_param_indent.is_none() {
base_param_indent = Some(current_indent);
}
// Handle parameter with type annotation (param : type)
if trimmed.contains(':') {
// Save previous parameter if exists
if let Some(param_name) = current_param.take() {
param_docs.insert(param_name, current_doc.trim().to_string());
current_doc.clear();
}
// Extract parameter name and description
let parts: Vec<&str> = trimmed.splitn(2, ':').collect();
if parts.len() == 2 {
let param_name = parts[0].trim();
// Extract just the parameter name (before any type info)
let param_name = param_name.split_whitespace().next().unwrap_or(param_name);
current_param = Some(param_name.to_string());
current_doc.clear(); // Description comes on following lines, not on this line
}
} else {
// Handle parameter without type annotation
// Save previous parameter if exists
if let Some(param_name) = current_param.take() {
param_docs.insert(param_name, current_doc.trim().to_string());
current_doc.clear();
}
// This line is the parameter name
current_param = Some(trimmed.to_string());
current_doc.clear();
}
} else if current_param.is_some() {
// Determine if this is content for the current parameter
let is_content = if let Some(base_content) = base_content_indent {
// We've seen content before - check if this matches expected content indentation
current_indent >= base_content
} else {
// First potential content line - should be more indented than parameter
if let Some(base_param) = base_param_indent {
current_indent > base_param
} else {
// Fallback: any indented content
current_indent > 0
}
};
if is_content {
// Set base content indentation on first content line
if base_content_indent.is_none() {
base_content_indent = Some(current_indent);
}
// This is a continuation of the current parameter documentation
if !current_doc.is_empty() {
current_doc.push('\n');
}
current_doc.push_str(trimmed);
} else {
// This line doesn't match our expected indentation patterns
// Save current parameter and stop processing
if let Some(param_name) = current_param.take() {
param_docs.insert(param_name, current_doc.trim().to_string());
current_doc.clear();
}
in_params_section = false;
}
}
}
}
// Don't forget the last parameter
if let Some(param_name) = current_param {
param_docs.insert(param_name, current_doc.trim().to_string());
}
param_docs
}
/// Extract parameter documentation from reST/Sphinx-style docstrings.
fn extract_rest_style_params(docstring: &str) -> HashMap<String, String> {
let mut param_docs = HashMap::new();
let mut current_param: Option<String> = None;
let mut current_doc = String::new();
for line_obj in docstring.universal_newlines() {
let line = line_obj.as_str();
if let Some(captures) = REST_PARAM_REGEX.captures(line) {
// Save previous parameter if exists
if let Some(param_name) = current_param.take() {
param_docs.insert(param_name, current_doc.trim().to_string());
current_doc.clear();
}
// Extract parameter name and description
if let (Some(param_match), Some(desc_match)) = (captures.get(2), captures.get(3)) {
current_param = Some(param_match.as_str().to_string());
current_doc = desc_match.as_str().to_string();
}
} else if current_param.is_some() {
let trimmed = line.trim();
// Check if this is a new section - stop processing if we hit section headers
if trimmed == "Parameters" || trimmed == "Args" || trimmed == "Arguments" {
// Save current param and stop processing
if let Some(param_name) = current_param.take() {
param_docs.insert(param_name, current_doc.trim().to_string());
current_doc.clear();
}
break;
}
// Check if this is another directive line starting with ':'
if trimmed.starts_with(':') {
// This is a new directive, save current param
if let Some(param_name) = current_param.take() {
param_docs.insert(param_name, current_doc.trim().to_string());
current_doc.clear();
}
// Let the next iteration handle this directive
continue;
}
// Check if this is a continuation line (indented)
if line.starts_with(" ") && !trimmed.is_empty() {
// This is a continuation line
if !current_doc.is_empty() {
current_doc.push('\n');
}
current_doc.push_str(trimmed);
} else if !trimmed.is_empty() && !line.starts_with(' ') && !line.starts_with('\t') {
// This is a non-indented line - likely end of the current parameter
if let Some(param_name) = current_param.take() {
param_docs.insert(param_name, current_doc.trim().to_string());
current_doc.clear();
}
break;
}
}
}
// Don't forget the last parameter
if let Some(param_name) = current_param {
param_docs.insert(param_name, current_doc.trim().to_string());
}
param_docs
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use super::*;
// A nice doctest that is surrounded by prose
#[test]
fn dunder_escape() {
let docstring = r#"
Here _this_ and ___that__ should be escaped
Here *this* and **that** should be untouched
Here `this` and ``that`` should be untouched
Here `_this_` and ``__that__`` should be untouched
Here `_this_` ``__that__`` should be untouched
`_this_too_should_be_untouched_`
Here `_this_```__that__`` should be untouched but this_is_escaped
Here ``_this_```__that__` should be untouched but this_is_escaped
Here `_this_ and _that_ should be escaped (but isn't)
Here _this_ and _that_` should be escaped
`Here _this_ and _that_ should be escaped (but isn't)
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/stub_mapping.rs | crates/ty_ide/src/stub_mapping.rs | use itertools::Either;
use ruff_db::system::SystemPathBuf;
use ty_python_semantic::{ResolvedDefinition, map_stub_definition};
use crate::cached_vendored_root;
/// Maps `ResolvedDefinitions` from stub files to corresponding definitions in source files.
///
/// This mapper is used to implement "Go To Definition" functionality that navigates from
/// stub file declarations to their actual implementations in source files. It also allows
/// other language server providers (like hover, completion, and signature help) to find
/// docstrings for functions that resolve to stubs.
pub(crate) struct StubMapper<'db> {
db: &'db dyn ty_python_semantic::Db,
cached_vendored_root: Option<SystemPathBuf>,
}
impl<'db> StubMapper<'db> {
pub(crate) fn new(db: &'db dyn ty_python_semantic::Db) -> Self {
let cached_vendored_root = cached_vendored_root(db);
Self {
db,
cached_vendored_root,
}
}
/// Map a `ResolvedDefinition` from a stub file to corresponding definitions in source files.
///
/// If the definition is in a stub file and a corresponding source file definition exists,
/// returns the source file definition(s). Otherwise, returns the original definition.
pub(crate) fn map_definition(
&self,
def: ResolvedDefinition<'db>,
) -> impl Iterator<Item = ResolvedDefinition<'db>> {
if let Some(definitions) =
map_stub_definition(self.db, &def, self.cached_vendored_root.as_deref())
{
return Either::Left(definitions.into_iter());
}
Either::Right(std::iter::once(def))
}
/// Map multiple `ResolvedDefinitions`, applying stub-to-source mapping to each.
///
/// This is a convenience method that applies `map_definition` to each element
/// in the input vector and flattens the results.
pub(crate) fn map_definitions(
&self,
defs: Vec<ResolvedDefinition<'db>>,
) -> Vec<ResolvedDefinition<'db>> {
defs.into_iter()
.flat_map(|def| self.map_definition(def))
.collect()
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/all_symbols.rs | crates/ty_ide/src/all_symbols.rs | use ruff_db::files::File;
use ty_module_resolver::{Module, ModuleName, all_modules, resolve_real_shadowable_module};
use ty_project::Db;
use crate::{
SymbolKind,
symbols::{QueryPattern, SymbolInfo, symbols_for_file_global_only},
};
/// Get all symbols matching the query string.
///
/// Returns symbols from all files in the workspace and dependencies, filtered
/// by the query.
pub fn all_symbols<'db>(
db: &'db dyn Db,
importing_from: File,
query: &QueryPattern,
) -> Vec<AllSymbolInfo<'db>> {
// If the query is empty, return immediately to avoid expensive file scanning
if query.will_match_everything() {
return Vec::new();
}
let all_symbols_span = tracing::debug_span!("all_symbols");
let _span = all_symbols_span.enter();
let typing_extensions = ModuleName::new_static("typing_extensions").unwrap();
let is_typing_extensions_available = importing_from.is_stub(db)
|| resolve_real_shadowable_module(db, importing_from, &typing_extensions).is_some();
let results = std::sync::Mutex::new(Vec::new());
{
let modules = all_modules(db);
let db = db.dyn_clone();
let all_symbols_span = &all_symbols_span;
let results = &results;
let query = &query;
rayon::scope(move |s| {
// For each file, extract symbols and add them to results
for module in modules {
let db = db.dyn_clone();
let Some(file) = module.file(&*db) else {
continue;
};
// By convention, modules starting with an underscore
// are generally considered unexported. However, we
// should consider first party modules fair game.
//
// Note that we apply this recursively. e.g.,
// `numpy._core.multiarray` is considered private
// because it's a child of `_core`.
if module.name(&*db).components().any(|c| c.starts_with('_'))
&& module
.search_path(&*db)
.is_none_or(|sp| !sp.is_first_party())
{
continue;
}
// TODO: also make it available in `TYPE_CHECKING` blocks
// (we'd need https://github.com/astral-sh/ty/issues/1553 to do this well)
if !is_typing_extensions_available && module.name(&*db) == &typing_extensions {
continue;
}
s.spawn(move |_| {
let symbols_for_file_span = tracing::debug_span!(parent: all_symbols_span, "symbols_for_file_global_only", ?file);
let _entered = symbols_for_file_span.entered();
if query.is_match_symbol_name(module.name(&*db)) {
results.lock().unwrap().push(AllSymbolInfo {
symbol: None,
module,
file,
});
}
for (_, symbol) in symbols_for_file_global_only(&*db, file).search(query) {
// It seems like we could do better here than
// locking `results` for every single symbol,
// but this works pretty well as it is.
results.lock().unwrap().push(AllSymbolInfo {
symbol: Some(symbol.to_owned()),
module,
file,
});
}
});
}
});
}
let mut results = results.into_inner().unwrap();
results.sort_by(|s1, s2| {
let key1 = (
s1.name_in_file()
.unwrap_or_else(|| s1.module().name(db).as_str()),
s1.file.path(db).as_str(),
);
let key2 = (
s2.name_in_file()
.unwrap_or_else(|| s2.module().name(db).as_str()),
s2.file.path(db).as_str(),
);
key1.cmp(&key2)
});
results
}
/// A symbol found in the workspace and dependencies, including the
/// file it was found in.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct AllSymbolInfo<'db> {
/// The symbol information.
///
/// When absent, this implies the symbol is the module itself.
symbol: Option<SymbolInfo<'static>>,
/// The module containing the symbol.
module: Module<'db>,
/// The file containing the symbol.
///
/// This `File` is guaranteed to be the same
/// as the `File` underlying `module`.
file: File,
}
impl<'db> AllSymbolInfo<'db> {
/// Returns the name of this symbol as it exists in a file.
///
/// When absent, there is no concrete symbol in a module
/// somewhere. Instead, this represents importing a module.
/// In this case, if the caller needs a symbol name, they
/// should use `AllSymbolInfo::module().name()`.
pub fn name_in_file(&self) -> Option<&str> {
self.symbol.as_ref().map(|symbol| &*symbol.name)
}
/// Returns the "kind" of this symbol.
///
/// The kind of a symbol in the context of auto-import is
/// determined on a best effort basis. It may be imprecise
/// in some cases, e.g., reporting a module as a variable.
pub fn kind(&self) -> SymbolKind {
self.symbol
.as_ref()
.map(|symbol| symbol.kind)
.unwrap_or(SymbolKind::Module)
}
/// Returns the module this symbol is exported from.
pub fn module(&self) -> Module<'db> {
self.module
}
/// Returns the `File` corresponding to the module.
///
/// This is always equivalent to
/// `AllSymbolInfo::module().file().unwrap()`.
pub fn file(&self) -> File {
self.file
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::CursorTest;
use crate::tests::IntoDiagnostic;
use insta::assert_snapshot;
use ruff_db::diagnostic::{
Annotation, Diagnostic, DiagnosticId, LintName, Severity, Span, SubDiagnostic,
SubDiagnosticSeverity,
};
#[test]
fn test_all_symbols_multi_file() {
// We use odd symbol names here so that we can
// write queries that target them specifically
// and (hopefully) nothing else.
let test = CursorTest::builder()
.source(
"utils.py",
"
def abcdefghijklmnop():
'''A helpful utility function'''
pass
",
)
.source(
"models.py",
"
class Abcdefghijklmnop:
'''A data model class'''
def __init__(self):
pass
",
)
.source(
"constants.py",
"
ABCDEFGHIJKLMNOP = 'https://api.example.com'
<CURSOR>",
)
.build();
assert_snapshot!(test.all_symbols("acegikmo"), @r"
info[all-symbols]: AllSymbolInfo
--> constants.py:2:1
|
2 | ABCDEFGHIJKLMNOP = 'https://api.example.com'
| ^^^^^^^^^^^^^^^^
|
info: Constant ABCDEFGHIJKLMNOP
info[all-symbols]: AllSymbolInfo
--> models.py:2:7
|
2 | class Abcdefghijklmnop:
| ^^^^^^^^^^^^^^^^
3 | '''A data model class'''
4 | def __init__(self):
|
info: Class Abcdefghijklmnop
info[all-symbols]: AllSymbolInfo
--> utils.py:2:5
|
2 | def abcdefghijklmnop():
| ^^^^^^^^^^^^^^^^
3 | '''A helpful utility function'''
4 | pass
|
info: Function abcdefghijklmnop
");
}
impl CursorTest {
fn all_symbols(&self, query: &str) -> String {
let symbols = all_symbols(&self.db, self.cursor.file, &QueryPattern::fuzzy(query));
if symbols.is_empty() {
return "No symbols found".to_string();
}
self.render_diagnostics(symbols.into_iter().map(|symbol_info| AllSymbolDiagnostic {
db: &self.db,
symbol_info,
}))
}
}
struct AllSymbolDiagnostic<'db> {
db: &'db dyn Db,
symbol_info: AllSymbolInfo<'db>,
}
impl IntoDiagnostic for AllSymbolDiagnostic<'_> {
fn into_diagnostic(self) -> Diagnostic {
let symbol_kind_str = self.symbol_info.kind().to_string();
let info_text = format!(
"{} {}",
symbol_kind_str,
self.symbol_info.name_in_file().unwrap_or_else(|| self
.symbol_info
.module()
.name(self.db)
.as_str())
);
let sub = SubDiagnostic::new(SubDiagnosticSeverity::Info, info_text);
let mut main = Diagnostic::new(
DiagnosticId::Lint(LintName::of("all-symbols")),
Severity::Info,
"AllSymbolInfo".to_string(),
);
let mut span = Span::from(self.symbol_info.file());
if let Some(ref symbol) = self.symbol_info.symbol {
span = span.with_range(symbol.name_range);
}
main.annotate(Annotation::primary(span));
main.sub(sub);
main
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/markup.rs | crates/ty_ide/src/markup.rs | use std::fmt;
use std::fmt::Formatter;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum MarkupKind {
PlainText,
Markdown,
}
impl MarkupKind {
pub(crate) const fn fenced_code_block<T>(
self,
code: T,
language: &str,
) -> FencedCodeBlock<'_, T>
where
T: fmt::Display,
{
FencedCodeBlock {
language,
code,
kind: self,
}
}
pub(crate) const fn horizontal_line(self) -> HorizontalLine {
HorizontalLine { kind: self }
}
}
pub(crate) struct FencedCodeBlock<'a, T> {
language: &'a str,
code: T,
kind: MarkupKind,
}
impl<T> fmt::Display for FencedCodeBlock<'_, T>
where
T: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.kind {
MarkupKind::PlainText => self.code.fmt(f),
MarkupKind::Markdown => write!(
f,
"```{language}\n{code}\n```",
language = self.language,
code = self.code
),
}
}
}
#[derive(Debug, Copy, Clone)]
pub(crate) struct HorizontalLine {
kind: MarkupKind,
}
impl fmt::Display for HorizontalLine {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self.kind {
MarkupKind::PlainText => {
f.write_str("\n---------------------------------------------\n")
}
MarkupKind::Markdown => {
write!(f, "\n---\n")
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/goto.rs | crates/ty_ide/src/goto.rs | use crate::docstring::Docstring;
pub use crate::goto_declaration::goto_declaration;
pub use crate::goto_definition::goto_definition;
pub use crate::goto_type_definition::goto_type_definition;
use std::borrow::Cow;
use crate::stub_mapping::StubMapper;
use ruff_db::parsed::ParsedModuleRef;
use ruff_python_ast::find_node::{CoveringNode, covering_node};
use ruff_python_ast::token::{TokenKind, Tokens};
use ruff_python_ast::{self as ast, AnyNodeRef};
use ruff_text_size::{Ranged, TextRange, TextSize};
use ty_python_semantic::ResolvedDefinition;
use ty_python_semantic::types::Type;
use ty_python_semantic::types::ide_support::{
call_signature_details, call_type_simplified_by_overloads, definitions_for_keyword_argument,
};
use ty_python_semantic::{
HasDefinition, HasType, ImportAliasResolution, SemanticModel, definitions_for_imported_symbol,
definitions_for_name,
};
#[derive(Clone, Debug)]
pub(crate) enum GotoTarget<'a> {
Expression(ast::ExprRef<'a>),
FunctionDef(&'a ast::StmtFunctionDef),
ClassDef(&'a ast::StmtClassDef),
Parameter(&'a ast::Parameter),
/// Go to on the operator of a binary operation.
///
/// ```py
/// a + b
/// ^
/// ```
BinOp {
expression: &'a ast::ExprBinOp,
operator_range: TextRange,
},
/// Go to where the operator of a unary operation is defined.
///
/// ```py
/// -a
/// ^
/// ```
UnaryOp {
expression: &'a ast::ExprUnaryOp,
operator_range: TextRange,
},
/// Multi-part module names
/// Handles both `import foo.bar` and `from foo.bar import baz` cases
/// ```py
/// import foo.bar
/// ^^^
/// from foo.bar import baz
/// ^^^
/// ```
ImportModuleComponent {
module_name: String,
level: u32,
component_index: usize,
component_range: TextRange,
},
/// Import alias in standard import statement
/// ```py
/// import foo.bar as baz
/// ^^^
/// ```
ImportModuleAlias {
alias: &'a ast::Alias,
asname: &'a ast::Identifier,
},
/// In an import statement, the named under which the symbol is exported
/// in the imported file.
///
/// ```py
/// from foo import bar as baz
/// ^^^
/// ```
ImportExportedName {
alias: &'a ast::Alias,
import_from: &'a ast::StmtImportFrom,
},
/// Import alias in from import statement
/// ```py
/// from foo import bar as baz
/// ^^^
/// ```
ImportSymbolAlias {
alias: &'a ast::Alias,
asname: &'a ast::Identifier,
},
/// Go to on the exception handler variable
/// ```py
/// try: ...
/// except Exception as e: ...
/// ^
/// ```
ExceptVariable(&'a ast::ExceptHandlerExceptHandler),
/// Go to on a keyword argument
/// ```py
/// test(a = 1)
/// ^
/// ```
KeywordArgument {
keyword: &'a ast::Keyword,
call_expression: &'a ast::ExprCall,
},
/// Go to on the rest parameter of a pattern match
///
/// ```py
/// match x:
/// case {"a": a, "b": b, **rest}: ...
/// ^^^^
/// ```
PatternMatchRest(&'a ast::PatternMatchMapping),
/// Go to on a keyword argument of a class pattern
///
/// ```py
/// match Point3D(0, 0, 0):
/// case Point3D(x=0, y=0, z=0): ...
/// ^ ^ ^
/// ```
PatternKeywordArgument(&'a ast::PatternKeyword),
/// Go to on a pattern star argument
///
/// ```py
/// match array:
/// case [*args]: ...
/// ^^^^
PatternMatchStarName(&'a ast::PatternMatchStar),
/// Go to on the name of a pattern match as pattern
///
/// ```py
/// match x:
/// case [x] as y: ...
/// ^
PatternMatchAsName(&'a ast::PatternMatchAs),
/// Go to on the name of a type variable
///
/// ```py
/// type Alias[T: int = bool] = list[T]
/// ^
/// ```
TypeParamTypeVarName(&'a ast::TypeParamTypeVar),
/// Go to on the name of a type param spec
///
/// ```py
/// type Alias[**P = [int, str]] = Callable[P, int]
/// ^
/// ```
TypeParamParamSpecName(&'a ast::TypeParamParamSpec),
/// Go to on the name of a type var tuple
///
/// ```py
/// type Alias[*Ts = ()] = tuple[*Ts]
/// ^^
/// ```
TypeParamTypeVarTupleName(&'a ast::TypeParamTypeVarTuple),
NonLocal {
identifier: &'a ast::Identifier,
},
Globals {
identifier: &'a ast::Identifier,
},
/// Go to on the invocation of a callable
///
/// ```py
/// x = mymodule.MyClass(1, 2)
/// ^^^^^^^
/// ```
///
/// This is equivalent to `GotoTarget::Expression(callable)` but enriched
/// with information about the actual callable implementation.
///
/// That is, if you click on `MyClass` in `MyClass()` it is *both* a
/// reference to the class and to the initializer of the class. Therefore
/// it would be ideal for goto-* and docstrings to be some intelligent
/// merging of both the class and the initializer.
Call {
/// The callable that can actually be selected by a cursor
callable: ast::ExprRef<'a>,
/// The call of the callable
call: &'a ast::ExprCall,
},
/// Go to on a sub-expression of a string annotation's sub-AST
///
/// ```py
/// x: "int | None"
/// ^^^^
/// ```
///
/// This is equivalent to `GotoTarget::Expression` but the expression
/// isn't actually in the AST.
StringAnnotationSubexpr {
/// The string literal that is a string annotation.
string_expr: &'a ast::ExprStringLiteral,
/// The range to query in the sub-AST for the sub-expression.
subrange: TextRange,
/// If the expression is a Name of some kind this is the name (just a cached result).
name: Option<String>,
},
}
/// The resolved definitions for a `GotoTarget`
#[derive(Debug, Clone)]
pub(crate) struct Definitions<'db>(pub Vec<ResolvedDefinition<'db>>);
impl<'db> Definitions<'db> {
pub(crate) fn from_ty(db: &'db dyn crate::Db, ty: Type<'db>) -> Option<Self> {
let ty_def = ty.definition(db)?;
let resolved = match ty_def {
ty_python_semantic::types::TypeDefinition::Module(module) => {
ResolvedDefinition::Module(module.file(db)?)
}
ty_python_semantic::types::TypeDefinition::Class(definition)
| ty_python_semantic::types::TypeDefinition::Function(definition)
| ty_python_semantic::types::TypeDefinition::TypeVar(definition)
| ty_python_semantic::types::TypeDefinition::TypeAlias(definition)
| ty_python_semantic::types::TypeDefinition::SpecialForm(definition)
| ty_python_semantic::types::TypeDefinition::NewType(definition) => {
ResolvedDefinition::Definition(definition)
}
};
Some(Definitions(vec![resolved]))
}
/// Get the "goto-declaration" interpretation of this definition
///
/// In this case it basically returns exactly what was found.
pub(crate) fn declaration_targets(
self,
db: &'db dyn ty_python_semantic::Db,
) -> Option<crate::NavigationTargets> {
definitions_to_navigation_targets(db, None, self.0)
}
/// Get the "goto-definition" interpretation of this definition
///
/// In this case we apply stub-mapping to try to find the "real" implementation
/// if the definition we have is found in a stub file.
pub(crate) fn definition_targets(
self,
db: &'db dyn ty_python_semantic::Db,
) -> Option<crate::NavigationTargets> {
definitions_to_navigation_targets(db, Some(&StubMapper::new(db)), self.0)
}
/// Get the docstring for this definition
///
/// Typically documentation only appears on implementations and not stubs,
/// so this will check both the goto-declarations and goto-definitions (in that order)
/// and return the first one found.
pub(crate) fn docstring(self, db: &'db dyn crate::Db) -> Option<Docstring> {
for definition in &self.0 {
// If we got a docstring from the original definition, use it
if let Some(docstring) = definition.docstring(db) {
return Some(Docstring::new(docstring));
}
}
// If the definition is located within a stub file and no docstring
// is present, try to map the symbol to an implementation file and extract
// the docstring from that location.
let stub_mapper = StubMapper::new(db);
// Try to find the corresponding implementation definition
for definition in stub_mapper.map_definitions(self.0) {
if let Some(docstring) = definition.docstring(db) {
return Some(Docstring::new(docstring));
}
}
None
}
}
impl GotoTarget<'_> {
pub(crate) fn inferred_type<'db>(&self, model: &SemanticModel<'db>) -> Option<Type<'db>> {
match self {
GotoTarget::Expression(expression) => expression.inferred_type(model),
GotoTarget::FunctionDef(function) => function.inferred_type(model),
GotoTarget::ClassDef(class) => class.inferred_type(model),
GotoTarget::Parameter(parameter) => parameter.inferred_type(model),
GotoTarget::ImportSymbolAlias { alias, .. }
| GotoTarget::ImportModuleAlias { alias, .. }
| GotoTarget::ImportExportedName { alias, .. } => alias.inferred_type(model),
GotoTarget::ExceptVariable(except) => except.inferred_type(model),
GotoTarget::KeywordArgument { keyword, .. } => keyword.value.inferred_type(model),
// When asking the type of a callable, usually you want the callable itself?
// (i.e. the type of `MyClass` in `MyClass()` is `<class MyClass>` and not `() -> MyClass`)
GotoTarget::Call { callable, .. } => callable.inferred_type(model),
GotoTarget::TypeParamTypeVarName(typevar) => typevar.inferred_type(model),
GotoTarget::ImportModuleComponent {
module_name,
component_index,
level,
..
} => {
// We don't currently support hovering the bare `.` so there is always a name
let module = import_name(module_name, *component_index);
model.resolve_module_type(Some(module), *level)
}
GotoTarget::StringAnnotationSubexpr {
string_expr,
subrange,
..
} => {
let (subast, _submodel) = model.enter_string_annotation(string_expr)?;
let submod = subast.syntax();
let subnode = covering_node(submod.into(), *subrange).node();
// The type checker knows the type of the full annotation but nothing else
if AnyNodeRef::from(&*submod.body) == subnode {
string_expr.inferred_type(model)
} else {
// TODO: force the typechecker to tell us its secrets
// (it computes but then immediately discards these types)
None
}
}
GotoTarget::BinOp { expression, .. } => {
let (_, ty) = ty_python_semantic::definitions_for_bin_op(model, expression)?;
Some(ty)
}
GotoTarget::UnaryOp { expression, .. } => {
let (_, ty) = ty_python_semantic::definitions_for_unary_op(model, expression)?;
Some(ty)
}
// TODO: Support identifier targets
GotoTarget::PatternMatchRest(_)
| GotoTarget::PatternKeywordArgument(_)
| GotoTarget::PatternMatchStarName(_)
| GotoTarget::PatternMatchAsName(_)
| GotoTarget::TypeParamParamSpecName(_)
| GotoTarget::TypeParamTypeVarTupleName(_)
| GotoTarget::NonLocal { .. }
| GotoTarget::Globals { .. } => None,
}
}
/// Try to get a simplified display of this callable type by resolving overloads
pub(crate) fn call_type_simplified_by_overloads(
&self,
model: &SemanticModel,
) -> Option<String> {
if let GotoTarget::Call { call, .. } = self {
call_type_simplified_by_overloads(model, call)
} else {
None
}
}
/// Gets the definitions for this goto target.
///
/// The `alias_resolution` parameter controls whether import aliases
/// (i.e. "x" in "from a import b as x") are resolved or returned as is.
/// We want to resolve them in some cases (like "goto declaration") but not in others
/// (like find references or rename).
///
///
/// Ideally this would always return `DefinitionsOrTargets::Definitions`
/// as this is more useful for doing stub mapping (goto-definition) and
/// retrieving docstrings. However for now some cases are stubbed out
/// as just returning a raw `NavigationTarget`.
pub(crate) fn get_definition_targets<'db>(
&self,
model: &SemanticModel<'db>,
alias_resolution: ImportAliasResolution,
) -> Option<Definitions<'db>> {
let definitions = match self {
GotoTarget::Expression(expression) => {
definitions_for_expression(model, *expression, alias_resolution)
}
// For already-defined symbols, they are their own definitions
GotoTarget::FunctionDef(function) => Some(vec![ResolvedDefinition::Definition(
function.definition(model),
)]),
GotoTarget::ClassDef(class) => Some(vec![ResolvedDefinition::Definition(
class.definition(model),
)]),
GotoTarget::Parameter(parameter) => Some(vec![ResolvedDefinition::Definition(
parameter.definition(model),
)]),
// For import aliases (offset within 'y' or 'z' in "from x import y as z")
GotoTarget::ImportSymbolAlias { asname, .. } => Some(definitions_for_name(
model,
asname.as_str(),
AnyNodeRef::from(*asname),
alias_resolution,
)),
GotoTarget::ImportExportedName { alias, import_from } => {
let symbol_name = alias.name.as_str();
Some(definitions_for_imported_symbol(
model,
import_from,
symbol_name,
alias_resolution,
))
}
GotoTarget::ImportModuleComponent {
module_name,
component_index,
level,
..
} => {
// We don't currently support hovering the bare `.` so there is always a name
let module = import_name(module_name, *component_index);
definitions_for_module(model, Some(module), *level)
}
// Handle import aliases (offset within 'z' in "import x.y as z")
GotoTarget::ImportModuleAlias { asname, .. } => Some(definitions_for_name(
model,
asname.as_str(),
AnyNodeRef::from(*asname),
alias_resolution,
)),
// Handle keyword arguments in call expressions
GotoTarget::KeywordArgument {
keyword,
call_expression,
} => Some(definitions_for_keyword_argument(
model,
keyword,
call_expression,
)),
// For exception variables, they are their own definitions (like parameters)
GotoTarget::ExceptVariable(except_handler) => {
Some(vec![ResolvedDefinition::Definition(
except_handler.definition(model),
)])
}
// Patterns are glorified assignments but we have to look them up by ident
// because they're not expressions
GotoTarget::PatternMatchRest(pattern_mapping) => {
pattern_mapping.rest.as_ref().map(|name| {
definitions_for_name(
model,
name.as_str(),
AnyNodeRef::Identifier(name),
alias_resolution,
)
})
}
GotoTarget::PatternMatchAsName(pattern_as) => pattern_as.name.as_ref().map(|name| {
definitions_for_name(
model,
name.as_str(),
AnyNodeRef::Identifier(name),
alias_resolution,
)
}),
GotoTarget::PatternKeywordArgument(pattern_keyword) => {
let name = &pattern_keyword.attr;
Some(definitions_for_name(
model,
name.as_str(),
AnyNodeRef::Identifier(name),
alias_resolution,
))
}
GotoTarget::PatternMatchStarName(pattern_star) => {
pattern_star.name.as_ref().map(|name| {
definitions_for_name(
model,
name.as_str(),
AnyNodeRef::Identifier(name),
alias_resolution,
)
})
}
// For callables, both the definition of the callable and the actual function impl are relevant.
//
// Prefer the function impl over the callable so that its docstrings win if defined.
GotoTarget::Call { callable, call } => {
let mut definitions = Vec::new();
// We prefer the specific overload for hover, go-to-def etc. However,
// `definitions_for_callable` always resolves import aliases. That's why we
// skip it in cases import alias resolution is turned of (rename, highlight references).
if alias_resolution == ImportAliasResolution::ResolveAliases {
definitions.extend(definitions_for_callable(model, call));
}
let expr_definitions =
definitions_for_expression(model, *callable, alias_resolution)
.unwrap_or_default();
definitions.extend(expr_definitions);
if definitions.is_empty() {
None
} else {
Some(definitions)
}
}
GotoTarget::BinOp { expression, .. } => {
let (definitions, _) =
ty_python_semantic::definitions_for_bin_op(model, expression)?;
Some(definitions)
}
GotoTarget::UnaryOp { expression, .. } => {
let (definitions, _) =
ty_python_semantic::definitions_for_unary_op(model, expression)?;
Some(definitions)
}
// String annotations sub-expressions require us to recurse into the sub-AST
GotoTarget::StringAnnotationSubexpr {
string_expr,
subrange,
..
} => {
let (subast, submodel) = model.enter_string_annotation(string_expr)?;
let subexpr = covering_node(subast.syntax().into(), *subrange)
.node()
.as_expr_ref()?;
definitions_for_expression(&submodel, subexpr, alias_resolution)
}
// nonlocal and global are essentially loads, but again they're statements,
// so we need to look them up by ident
GotoTarget::NonLocal { identifier } | GotoTarget::Globals { identifier } => {
Some(definitions_for_name(
model,
identifier.as_str(),
AnyNodeRef::Identifier(identifier),
alias_resolution,
))
}
// These are declarations of sorts, but they're stmts and not exprs, so look up by ident.
GotoTarget::TypeParamTypeVarName(type_var) => {
let name = &type_var.name;
Some(definitions_for_name(
model,
name.as_str(),
AnyNodeRef::Identifier(name),
alias_resolution,
))
}
GotoTarget::TypeParamParamSpecName(name) => {
let name = &name.name;
Some(definitions_for_name(
model,
name.as_str(),
AnyNodeRef::Identifier(name),
alias_resolution,
))
}
GotoTarget::TypeParamTypeVarTupleName(name) => {
let name = &name.name;
Some(definitions_for_name(
model,
name.as_str(),
AnyNodeRef::Identifier(name),
alias_resolution,
))
}
};
definitions.map(Definitions)
}
/// Returns the text representation of this goto target.
/// Returns `None` if no meaningful string representation can be provided.
/// This is used by the "references" feature, which looks for references
/// to this goto target.
pub(crate) fn to_string(&self) -> Option<Cow<'_, str>> {
match self {
GotoTarget::Call {
callable: expression,
..
}
| GotoTarget::Expression(expression) => match expression {
ast::ExprRef::Name(name) => Some(Cow::Borrowed(name.id.as_str())),
ast::ExprRef::Attribute(attr) => Some(Cow::Borrowed(attr.attr.as_str())),
_ => None,
},
GotoTarget::StringAnnotationSubexpr { name, .. } => name.as_deref().map(Cow::Borrowed),
GotoTarget::FunctionDef(function) => Some(Cow::Borrowed(function.name.as_str())),
GotoTarget::ClassDef(class) => Some(Cow::Borrowed(class.name.as_str())),
GotoTarget::Parameter(parameter) => Some(Cow::Borrowed(parameter.name.as_str())),
GotoTarget::ImportSymbolAlias { asname, .. } => Some(Cow::Borrowed(asname.as_str())),
GotoTarget::ImportExportedName { alias, .. } => {
Some(Cow::Borrowed(alias.name.as_str()))
}
GotoTarget::ImportModuleComponent {
module_name,
component_index,
..
} => {
let components: Vec<&str> = module_name.split('.').collect();
if let Some(component) = components.get(*component_index) {
Some(Cow::Borrowed(*component))
} else {
Some(Cow::Borrowed(module_name))
}
}
GotoTarget::ImportModuleAlias { asname, .. } => Some(Cow::Borrowed(asname.as_str())),
GotoTarget::ExceptVariable(except) => {
Some(Cow::Borrowed(except.name.as_ref()?.as_str()))
}
GotoTarget::KeywordArgument { keyword, .. } => {
Some(Cow::Borrowed(keyword.arg.as_ref()?.as_str()))
}
GotoTarget::PatternMatchRest(rest) => Some(Cow::Borrowed(rest.rest.as_ref()?.as_str())),
GotoTarget::PatternKeywordArgument(keyword) => {
Some(Cow::Borrowed(keyword.attr.as_str()))
}
GotoTarget::PatternMatchStarName(star) => {
Some(Cow::Borrowed(star.name.as_ref()?.as_str()))
}
GotoTarget::PatternMatchAsName(as_name) => {
Some(Cow::Borrowed(as_name.name.as_ref()?.as_str()))
}
GotoTarget::TypeParamTypeVarName(type_var) => {
Some(Cow::Borrowed(type_var.name.as_str()))
}
GotoTarget::TypeParamParamSpecName(spec) => Some(Cow::Borrowed(spec.name.as_str())),
GotoTarget::TypeParamTypeVarTupleName(tuple) => {
Some(Cow::Borrowed(tuple.name.as_str()))
}
GotoTarget::NonLocal { identifier, .. } => Some(Cow::Borrowed(identifier.as_str())),
GotoTarget::Globals { identifier, .. } => Some(Cow::Borrowed(identifier.as_str())),
GotoTarget::BinOp { .. } | GotoTarget::UnaryOp { .. } => None,
}
}
/// Creates a `GotoTarget` from a `CoveringNode` and an offset within the node
pub(crate) fn from_covering_node<'a>(
model: &SemanticModel,
covering_node: &CoveringNode<'a>,
offset: TextSize,
tokens: &Tokens,
) -> Option<GotoTarget<'a>> {
tracing::trace!("Covering node is of kind {:?}", covering_node.node().kind());
match covering_node.node() {
AnyNodeRef::Identifier(identifier) => match covering_node.parent() {
Some(AnyNodeRef::StmtFunctionDef(function)) => {
Some(GotoTarget::FunctionDef(function))
}
Some(AnyNodeRef::StmtClassDef(class)) => Some(GotoTarget::ClassDef(class)),
Some(AnyNodeRef::Parameter(parameter)) => Some(GotoTarget::Parameter(parameter)),
Some(AnyNodeRef::Alias(alias)) => {
// Find the containing import statement to determine the type
let import_stmt = covering_node.ancestors().find(|node| {
matches!(
node,
AnyNodeRef::StmtImport(_) | AnyNodeRef::StmtImportFrom(_)
)
});
match import_stmt {
Some(AnyNodeRef::StmtImport(_)) => {
// Regular import statement like "import x.y as z"
// Is the offset within the alias name (asname) part?
if let Some(asname) = &alias.asname {
if asname.range.contains_inclusive(offset) {
return Some(GotoTarget::ImportModuleAlias { alias, asname });
}
}
// Is the offset in the module name part?
if alias.name.range.contains_inclusive(offset) {
let full_name = alias.name.as_str();
if let Some((component_index, component_range)) =
find_module_component(
full_name,
alias.name.range.start(),
offset,
)
{
return Some(GotoTarget::ImportModuleComponent {
module_name: full_name.to_string(),
level: 0,
component_index,
component_range,
});
}
}
None
}
Some(AnyNodeRef::StmtImportFrom(import_from)) => {
// From import statement like "from x import y as z"
// Is the offset within the alias name (asname) part?
if let Some(asname) = &alias.asname {
if asname.range.contains_inclusive(offset) {
return Some(GotoTarget::ImportSymbolAlias { alias, asname });
}
}
// Is the offset in the original name part?
if alias.name.range.contains_inclusive(offset) {
return Some(GotoTarget::ImportExportedName { alias, import_from });
}
None
}
_ => None,
}
}
Some(AnyNodeRef::StmtImportFrom(from)) => {
// Handle offset within module name in from import statements
if let Some(module_expr) = &from.module {
let full_module_name = module_expr.to_string();
if let Some((component_index, component_range)) =
find_module_component(&full_module_name, module_expr.start(), offset)
{
return Some(GotoTarget::ImportModuleComponent {
module_name: full_module_name,
level: from.level,
component_index,
component_range,
});
}
}
None
}
Some(AnyNodeRef::ExceptHandlerExceptHandler(handler)) => {
Some(GotoTarget::ExceptVariable(handler))
}
Some(AnyNodeRef::Keyword(keyword)) => {
// Find the containing call expression from the ancestor chain
let call_expression = covering_node
.ancestors()
.find_map(ruff_python_ast::AnyNodeRef::expr_call)?;
Some(GotoTarget::KeywordArgument {
keyword,
call_expression,
})
}
Some(AnyNodeRef::PatternMatchMapping(mapping)) => {
Some(GotoTarget::PatternMatchRest(mapping))
}
Some(AnyNodeRef::PatternKeyword(keyword)) => {
Some(GotoTarget::PatternKeywordArgument(keyword))
}
Some(AnyNodeRef::PatternMatchStar(star)) => {
Some(GotoTarget::PatternMatchStarName(star))
}
Some(AnyNodeRef::PatternMatchAs(as_pattern)) => {
Some(GotoTarget::PatternMatchAsName(as_pattern))
}
Some(AnyNodeRef::TypeParamTypeVar(var)) => {
Some(GotoTarget::TypeParamTypeVarName(var))
}
Some(AnyNodeRef::TypeParamParamSpec(bound)) => {
Some(GotoTarget::TypeParamParamSpecName(bound))
}
Some(AnyNodeRef::TypeParamTypeVarTuple(var_tuple)) => {
Some(GotoTarget::TypeParamTypeVarTupleName(var_tuple))
}
Some(AnyNodeRef::ExprAttribute(attribute)) => {
// Check if this is seemingly a callable being invoked (the `y` in `x.y(...)`)
let grandparent_expr = covering_node.ancestors().nth(2);
let attribute_expr = attribute.into();
if let Some(AnyNodeRef::ExprCall(call)) = grandparent_expr {
if ruff_python_ast::ExprRef::from(&call.func) == attribute_expr {
return Some(GotoTarget::Call {
call,
callable: attribute_expr,
});
}
}
Some(GotoTarget::Expression(attribute_expr))
}
Some(AnyNodeRef::StmtNonlocal(_)) => Some(GotoTarget::NonLocal { identifier }),
Some(AnyNodeRef::StmtGlobal(_)) => Some(GotoTarget::Globals { identifier }),
None => None,
Some(parent) => {
tracing::debug!(
"Missing `GoToTarget` for identifier with parent {:?}",
parent.kind()
);
None
}
},
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/lib.rs | crates/ty_ide/src/lib.rs | #![warn(
clippy::disallowed_methods,
reason = "Prefer System trait methods over std methods in ty crates"
)]
mod all_symbols;
mod code_action;
mod completion;
mod doc_highlights;
mod docstring;
mod document_symbols;
mod find_references;
mod goto;
mod goto_declaration;
mod goto_definition;
mod goto_type_definition;
mod hover;
mod importer;
mod inlay_hints;
mod markup;
mod references;
mod rename;
mod selection_range;
mod semantic_tokens;
mod signature_help;
mod stub_mapping;
mod symbols;
mod workspace_symbols;
pub use all_symbols::{AllSymbolInfo, all_symbols};
pub use code_action::{QuickFix, code_actions};
pub use completion::{Completion, CompletionKind, CompletionSettings, completion};
pub use doc_highlights::document_highlights;
pub use document_symbols::document_symbols;
pub use find_references::find_references;
pub use goto::{goto_declaration, goto_definition, goto_type_definition};
pub use hover::hover;
pub use inlay_hints::{
InlayHintKind, InlayHintLabel, InlayHintSettings, InlayHintTextEdit, inlay_hints,
};
pub use markup::MarkupKind;
pub use references::ReferencesMode;
pub use rename::{can_rename, rename};
pub use selection_range::selection_range;
pub use semantic_tokens::{
SemanticToken, SemanticTokenModifier, SemanticTokenType, SemanticTokens, semantic_tokens,
};
pub use signature_help::{ParameterDetails, SignatureDetails, SignatureHelpInfo, signature_help};
pub use symbols::{FlatSymbols, HierarchicalSymbols, SymbolId, SymbolInfo, SymbolKind};
pub use workspace_symbols::{WorkspaceSymbolInfo, workspace_symbols};
use ruff_db::{
files::{File, FileRange},
system::SystemPathBuf,
vendored::VendoredPath,
};
use ruff_text_size::{Ranged, TextRange};
use rustc_hash::FxHashSet;
use std::ops::{Deref, DerefMut};
use ty_project::Db;
use ty_python_semantic::types::{Type, TypeDefinition};
/// Information associated with a text range.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct RangedValue<T> {
pub range: FileRange,
pub value: T,
}
impl<T> RangedValue<T> {
pub fn file_range(&self) -> FileRange {
self.range
}
}
impl<T> Deref for RangedValue<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.value
}
}
impl<T> DerefMut for RangedValue<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.value
}
}
impl<T> IntoIterator for RangedValue<T>
where
T: IntoIterator,
{
type Item = T::Item;
type IntoIter = T::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.value.into_iter()
}
}
/// Target to which the editor can navigate to.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct NavigationTarget {
file: File,
/// The range that should be focused when navigating to the target.
///
/// This is typically not the full range of the node. For example, it's the range of the class's name in a class definition.
///
/// The `focus_range` must be fully covered by `full_range`.
focus_range: TextRange,
/// The range covering the entire target.
full_range: TextRange,
}
impl NavigationTarget {
/// Creates a new `NavigationTarget` where the focus and full range are identical.
pub fn new(file: File, range: TextRange) -> Self {
Self {
file,
focus_range: range,
full_range: range,
}
}
pub fn file(&self) -> File {
self.file
}
pub fn focus_range(&self) -> TextRange {
self.focus_range
}
pub fn full_range(&self) -> TextRange {
self.full_range
}
pub fn full_file_range(&self) -> FileRange {
FileRange::new(self.file, self.full_range)
}
}
impl From<FileRange> for NavigationTarget {
fn from(value: FileRange) -> Self {
Self {
file: value.file(),
focus_range: value.range(),
full_range: value.range(),
}
}
}
/// Specifies the kind of reference operation.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum ReferenceKind {
/// A read reference to a symbol (e.g., using a variable's value)
Read,
/// A write reference to a symbol (e.g., assigning to a variable)
Write,
/// Neither a read or a write (e.g., a function or class declaration)
Other,
}
/// Target of a reference with information about the kind of operation.
/// Unlike `NavigationTarget`, this type is specifically designed for references
/// and contains only a single range (not separate focus/full ranges) and
/// includes information about whether the reference is a read or write operation.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ReferenceTarget {
file_range: FileRange,
kind: ReferenceKind,
}
impl ReferenceTarget {
/// Creates a new `ReferenceTarget`.
pub fn new(file: File, range: TextRange, kind: ReferenceKind) -> Self {
Self {
file_range: FileRange::new(file, range),
kind,
}
}
pub fn file(&self) -> File {
self.file_range.file()
}
pub fn range(&self) -> TextRange {
self.file_range.range()
}
pub fn file_range(&self) -> FileRange {
self.file_range
}
pub fn kind(&self) -> ReferenceKind {
self.kind
}
}
#[derive(Debug, Clone)]
pub struct NavigationTargets(smallvec::SmallVec<[NavigationTarget; 1]>);
impl NavigationTargets {
fn single(target: NavigationTarget) -> Self {
Self(smallvec::smallvec_inline![target])
}
fn empty() -> Self {
Self(smallvec::SmallVec::new_const())
}
fn unique(targets: impl IntoIterator<Item = NavigationTarget>) -> Self {
let unique: FxHashSet<_> = targets.into_iter().collect();
if unique.is_empty() {
Self::empty()
} else {
let mut targets = unique.into_iter().collect::<Vec<_>>();
targets.sort_by_key(|target| (target.file, target.focus_range.start()));
Self(targets.into())
}
}
fn iter(&self) -> std::slice::Iter<'_, NavigationTarget> {
self.0.iter()
}
#[cfg(test)]
fn is_empty(&self) -> bool {
self.0.is_empty()
}
#[cfg(test)]
fn len(&self) -> usize {
self.0.len()
}
}
impl IntoIterator for NavigationTargets {
type Item = NavigationTarget;
type IntoIter = smallvec::IntoIter<[NavigationTarget; 1]>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl<'a> IntoIterator for &'a NavigationTargets {
type Item = &'a NavigationTarget;
type IntoIter = std::slice::Iter<'a, NavigationTarget>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl FromIterator<NavigationTarget> for NavigationTargets {
fn from_iter<T: IntoIterator<Item = NavigationTarget>>(iter: T) -> Self {
Self::unique(iter)
}
}
pub trait HasNavigationTargets {
fn navigation_targets(&self, db: &dyn Db) -> NavigationTargets;
}
impl HasNavigationTargets for Type<'_> {
fn navigation_targets(&self, db: &dyn Db) -> NavigationTargets {
match self {
Type::Union(union) => union
.elements(db)
.iter()
.flat_map(|target| target.navigation_targets(db))
.collect(),
Type::Intersection(intersection) => {
// Only consider the positive elements because the negative elements are mainly from narrowing constraints.
let mut targets = intersection.iter_positive(db).filter(|ty| !ty.is_unknown());
let Some(first) = targets.next() else {
return NavigationTargets::empty();
};
match targets.next() {
Some(_) => {
// If there are multiple types in the intersection, we can't navigate to a single one
// because the type is the intersection of all those types.
NavigationTargets::empty()
}
None => first.navigation_targets(db),
}
}
ty => ty
.definition(db)
.map(|definition| definition.navigation_targets(db))
.unwrap_or_else(NavigationTargets::empty),
}
}
}
impl HasNavigationTargets for TypeDefinition<'_> {
fn navigation_targets(&self, db: &dyn Db) -> NavigationTargets {
let Some(full_range) = self.full_range(db) else {
return NavigationTargets::empty();
};
NavigationTargets::single(NavigationTarget {
file: full_range.file(),
focus_range: self.focus_range(db).unwrap_or(full_range).range(),
full_range: full_range.range(),
})
}
}
/// Get the cache-relative path where vendored paths should be written to.
pub fn relative_cached_vendored_root() -> SystemPathBuf {
// The vendored files are uniquely identified by the source commit.
SystemPathBuf::from(format!("vendored/typeshed/{}", ty_vendored::SOURCE_COMMIT))
}
/// Get the cached version of a vendored path in the cache, ensuring the file is written to disk.
pub fn cached_vendored_path(
db: &dyn ty_python_semantic::Db,
path: &VendoredPath,
) -> Option<SystemPathBuf> {
let writable = db.system().as_writable()?;
let mut relative_path = relative_cached_vendored_root();
relative_path.push(path.as_str());
// Extract the vendored file onto the system.
writable
.get_or_cache(&relative_path, &|| db.vendored().read_to_string(path))
.ok()
.flatten()
}
/// Get the absolute root path of all cached vendored paths.
///
/// This does not ensure that this path exists (this is only used for mapping cached paths
/// back to vendored ones, so this only matters if we've already been handed a path inside here).
pub fn cached_vendored_root(db: &dyn ty_python_semantic::Db) -> Option<SystemPathBuf> {
let writable = db.system().as_writable()?;
let relative_root = relative_cached_vendored_root();
Some(writable.cache_dir()?.join(relative_root))
}
#[cfg(test)]
mod tests {
use camino::Utf8Component;
use insta::internals::SettingsBindDropGuard;
use ruff_db::Db;
use ruff_db::diagnostic::{Diagnostic, DiagnosticFormat, DisplayDiagnosticConfig};
use ruff_db::files::{File, FileRootKind, system_path_to_file};
use ruff_db::parsed::{ParsedModuleRef, parsed_module};
use ruff_db::source::{SourceText, source_text};
use ruff_db::system::{DbWithWritableSystem, SystemPath, SystemPathBuf};
use ruff_python_codegen::Stylist;
use ruff_python_trivia::textwrap::dedent;
use ruff_text_size::TextSize;
use ty_project::ProjectMetadata;
/// A way to create a simple single-file (named `main.py`) cursor test.
///
/// Use cases that require multiple files with a `<CURSOR>` marker
/// in a file other than `main.py` can use `CursorTest::builder()`.
pub(super) fn cursor_test(source: &str) -> CursorTest {
CursorTest::builder().source("main.py", source).build()
}
pub(super) struct CursorTest {
pub(super) db: ty_project::TestDb,
pub(super) cursor: Cursor,
_insta_settings_guard: SettingsBindDropGuard,
}
impl CursorTest {
pub(super) fn builder() -> CursorTestBuilder {
CursorTestBuilder::default()
}
pub(super) fn write_file(
&mut self,
path: impl AsRef<SystemPath>,
content: &str,
) -> std::io::Result<()> {
self.db.write_file(path, content)
}
pub(super) fn render_diagnostics<I, D>(&self, diagnostics: I) -> String
where
I: IntoIterator<Item = D>,
D: IntoDiagnostic,
{
use std::fmt::Write;
let mut buf = String::new();
let config = DisplayDiagnosticConfig::default()
.color(false)
.format(DiagnosticFormat::Full);
for diagnostic in diagnostics {
let diag = diagnostic.into_diagnostic();
write!(buf, "{}", diag.display(&self.db, &config)).unwrap();
}
buf
}
}
/// The file and offset into that file where a `<CURSOR>` marker
/// is located.
///
/// (Along with other information about that file, such as the
/// parsed AST.)
pub(super) struct Cursor {
pub(super) file: File,
pub(super) offset: TextSize,
pub(super) parsed: ParsedModuleRef,
pub(super) source: SourceText,
pub(super) stylist: Stylist<'static>,
}
#[derive(Default)]
pub(super) struct CursorTestBuilder {
/// A list of source files, corresponding to the
/// file's path and its contents.
sources: Vec<Source>,
}
impl CursorTestBuilder {
pub(super) fn build(&self) -> CursorTest {
let mut db = ty_project::TestDb::new(ProjectMetadata::new(
"test".into(),
SystemPathBuf::from("/"),
));
db.init_program().unwrap();
let mut cursor: Option<Cursor> = None;
for &Source {
ref path,
ref contents,
cursor_offset,
} in &self.sources
{
db.write_file(path, contents)
.expect("write to memory file system to be successful");
// Add a root for the top-most component.
let top = path.components().find_map(|c| match c {
Utf8Component::Normal(c) => Some(c),
_ => None,
});
if let Some(top) = top {
let top = SystemPath::new(top);
if db.system().is_directory(top) {
db.files()
.try_add_root(&db, top, FileRootKind::LibrarySearchPath);
}
}
let file = system_path_to_file(&db, path).expect("newly written file to existing");
if let Some(offset) = cursor_offset {
// This assert should generally never trip, since
// we have an assert on `CursorTestBuilder::source`
// to ensure we never have more than one marker.
assert!(
cursor.is_none(),
"found more than one source that contains `<CURSOR>`"
);
let source = source_text(&db, file);
let parsed = parsed_module(&db, file).load(&db);
let stylist =
Stylist::from_tokens(parsed.tokens(), source.as_str()).into_owned();
cursor = Some(Cursor {
file,
offset,
parsed,
source,
stylist,
});
}
}
let mut insta_settings = insta::Settings::clone_current();
insta_settings.add_filter(r#"\\(\w\w|\.|")"#, "/$1");
// Filter out TODO types because they are different between debug and release builds.
insta_settings.add_filter(r"@Todo\(.+\)", "@Todo");
let insta_settings_guard = insta_settings.bind_to_scope();
CursorTest {
db,
cursor: cursor.expect("at least one source to contain `<CURSOR>`"),
_insta_settings_guard: insta_settings_guard,
}
}
pub(super) fn source(
&mut self,
path: impl Into<SystemPathBuf>,
contents: impl AsRef<str>,
) -> &mut CursorTestBuilder {
const MARKER: &str = "<CURSOR>";
let path = path.into();
let contents = dedent(contents.as_ref()).into_owned();
let Some(cursor_offset) = contents.find(MARKER) else {
self.sources.push(Source {
path,
contents,
cursor_offset: None,
});
return self;
};
if let Some(source) = self.sources.iter().find(|src| src.cursor_offset.is_some()) {
panic!(
"cursor tests must contain exactly one file \
with a `<CURSOR>` marker, but found a marker \
in both `{path1}` and `{path2}`",
path1 = source.path,
path2 = path,
);
}
let mut without_cursor_marker = contents[..cursor_offset].to_string();
without_cursor_marker.push_str(&contents[cursor_offset + MARKER.len()..]);
let cursor_offset =
TextSize::try_from(cursor_offset).expect("source to be smaller than 4GB");
self.sources.push(Source {
path,
contents: without_cursor_marker,
cursor_offset: Some(cursor_offset),
});
self
}
}
struct Source {
path: SystemPathBuf,
contents: String,
cursor_offset: Option<TextSize>,
}
pub(super) trait IntoDiagnostic {
fn into_diagnostic(self) -> Diagnostic;
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/goto_declaration.rs | crates/ty_ide/src/goto_declaration.rs | use crate::goto::find_goto_target;
use crate::{Db, NavigationTargets, RangedValue};
use ruff_db::files::{File, FileRange};
use ruff_db::parsed::parsed_module;
use ruff_text_size::{Ranged, TextSize};
use ty_python_semantic::{ImportAliasResolution, SemanticModel};
/// Navigate to the declaration of a symbol.
///
/// A "declaration" includes both formal declarations (class statements, def statements,
/// and variable annotations) but also variable assignments. This expansive definition
/// is needed because Python doesn't require formal declarations of variables like most languages do.
pub fn goto_declaration(
db: &dyn Db,
file: File,
offset: TextSize,
) -> Option<RangedValue<NavigationTargets>> {
let module = parsed_module(db, file).load(db);
let model = SemanticModel::new(db, file);
let goto_target = find_goto_target(&model, &module, offset)?;
let declaration_targets = goto_target
.get_definition_targets(&model, ImportAliasResolution::ResolveAliases)?
.declaration_targets(db)?;
Some(RangedValue {
range: FileRange::new(file, goto_target.range()),
value: declaration_targets,
})
}
#[cfg(test)]
mod tests {
use crate::goto_declaration;
use crate::tests::{CursorTest, cursor_test};
use insta::assert_snapshot;
#[test]
fn goto_declaration_function_call_to_definition() {
let test = cursor_test(
"
def my_function(x, y):
return x + y
result = my_func<CURSOR>tion(1, 2)
",
);
assert_snapshot!(test.goto_declaration(), @r"
info[goto-declaration]: Go to declaration
--> main.py:5:10
|
3 | return x + y
4 |
5 | result = my_function(1, 2)
| ^^^^^^^^^^^ Clicking here
|
info: Found 1 declaration
--> main.py:2:5
|
2 | def my_function(x, y):
| -----------
3 | return x + y
|
");
}
#[test]
fn goto_declaration_variable_assignment() {
let test = cursor_test(
"
x = 42
y = x<CURSOR>
",
);
assert_snapshot!(test.goto_declaration(), @r"
info[goto-declaration]: Go to declaration
--> main.py:3:5
|
2 | x = 42
3 | y = x
| ^ Clicking here
|
info: Found 1 declaration
--> main.py:2:1
|
2 | x = 42
| -
3 | y = x
|
");
}
#[test]
fn goto_declaration_class_instantiation() {
let test = cursor_test(
"
class MyClass:
def __init__(self):
pass
instance = My<CURSOR>Class()
",
);
assert_snapshot!(test.goto_declaration(), @r"
info[goto-declaration]: Go to declaration
--> main.py:6:12
|
4 | pass
5 |
6 | instance = MyClass()
| ^^^^^^^ Clicking here
|
info: Found 2 declarations
--> main.py:2:7
|
2 | class MyClass:
| -------
3 | def __init__(self):
| --------
4 | pass
|
");
}
#[test]
fn goto_declaration_parameter_usage() {
let test = cursor_test(
"
def foo(param):
return pa<CURSOR>ram * 2
",
);
assert_snapshot!(test.goto_declaration(), @r"
info[goto-declaration]: Go to declaration
--> main.py:3:12
|
2 | def foo(param):
3 | return param * 2
| ^^^^^ Clicking here
|
info: Found 1 declaration
--> main.py:2:9
|
2 | def foo(param):
| -----
3 | return param * 2
|
");
}
#[test]
fn goto_declaration_type_parameter() {
let test = cursor_test(
"
def generic_func[T](value: T) -> T:
v: T<CURSOR> = value
return v
",
);
assert_snapshot!(test.goto_declaration(), @r"
info[goto-declaration]: Go to declaration
--> main.py:3:8
|
2 | def generic_func[T](value: T) -> T:
3 | v: T = value
| ^ Clicking here
4 | return v
|
info: Found 1 declaration
--> main.py:2:18
|
2 | def generic_func[T](value: T) -> T:
| -
3 | v: T = value
4 | return v
|
");
}
#[test]
fn goto_declaration_type_parameter_class() {
let test = cursor_test(
"
class GenericClass[T]:
def __init__(self, value: T<CURSOR>):
self.value = value
",
);
assert_snapshot!(test.goto_declaration(), @r"
info[goto-declaration]: Go to declaration
--> main.py:3:31
|
2 | class GenericClass[T]:
3 | def __init__(self, value: T):
| ^ Clicking here
4 | self.value = value
|
info: Found 1 declaration
--> main.py:2:20
|
2 | class GenericClass[T]:
| -
3 | def __init__(self, value: T):
4 | self.value = value
|
");
}
#[test]
fn goto_declaration_nested_scope_variable() {
let test = cursor_test(
"
x = \"outer\"
def outer_func():
def inner_func():
return x<CURSOR> # Should find outer x
return inner_func
",
);
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> main.py:5:16
|
3 | def outer_func():
4 | def inner_func():
5 | return x # Should find outer x
| ^ Clicking here
6 | return inner_func
|
info: Found 1 declaration
--> main.py:2:1
|
2 | x = "outer"
| -
3 | def outer_func():
4 | def inner_func():
|
"#);
}
#[test]
fn goto_declaration_class_scope_skipped() {
let test = cursor_test(
r#"
class A:
x = 1
def method(self):
def inner():
return <CURSOR>x # Should NOT find class variable x
return inner
"#,
);
// Should not find the class variable 'x' due to Python's scoping rules
assert_snapshot!(test.goto_declaration(), @"No goto target found");
}
#[test]
fn goto_declaration_import_simple() {
let test = CursorTest::builder()
.source(
"main.py",
"
import mymodule
print(mymod<CURSOR>ule.function())
",
)
.source(
"mymodule.py",
r#"
def function():
return "hello from mymodule"
variable = 42
"#,
)
.build();
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> main.py:3:7
|
2 | import mymodule
3 | print(mymodule.function())
| ^^^^^^^^ Clicking here
|
info: Found 1 declaration
--> mymodule.py:1:1
|
1 |
| -
2 | def function():
3 | return "hello from mymodule"
|
"#);
}
#[test]
fn goto_declaration_import_from() {
let test = CursorTest::builder()
.source(
"main.py",
"
from mymodule import my_function
print(my_func<CURSOR>tion())
",
)
.source(
"mymodule.py",
r#"
def my_function():
return "hello"
def other_function():
return "other"
"#,
)
.build();
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> main.py:3:7
|
2 | from mymodule import my_function
3 | print(my_function())
| ^^^^^^^^^^^ Clicking here
|
info: Found 1 declaration
--> mymodule.py:2:5
|
2 | def my_function():
| -----------
3 | return "hello"
|
"#);
}
#[test]
fn goto_declaration_import_as() {
let test = CursorTest::builder()
.source(
"main.py",
"
import mymodule.submodule as sub
print(<CURSOR>sub.helper())
",
)
.source(
"mymodule/__init__.py",
"
# Main module init
",
)
.source(
"mymodule/submodule.py",
r#"
FOO = 0
"#,
)
.build();
// Should find the submodule file itself
assert_snapshot!(test.goto_declaration(), @r"
info[goto-declaration]: Go to declaration
--> main.py:3:7
|
2 | import mymodule.submodule as sub
3 | print(sub.helper())
| ^^^ Clicking here
|
info: Found 1 declaration
--> mymodule/submodule.py:1:1
|
1 |
| -
2 | FOO = 0
|
");
}
#[test]
fn goto_declaration_from_import_rhs_is_module() {
let test = CursorTest::builder()
.source("lib/__init__.py", r#""#)
.source("lib/module.py", r#""#)
.source("main.py", r#"from lib import module<CURSOR>"#)
.build();
// Should resolve to the actual function definition, not the import statement
assert_snapshot!(test.goto_declaration(), @r"
info[goto-declaration]: Go to declaration
--> main.py:1:17
|
1 | from lib import module
| ^^^^^^ Clicking here
|
info: Found 1 declaration
--> lib/module.py:1:1
|
|
");
}
#[test]
fn goto_declaration_from_import_as() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
from utils import func as h
print(<CURSOR>h("test"))
"#,
)
.source(
"utils.py",
r#"
def func(arg):
return f"Processed: {arg}"
"#,
)
.build();
// Should resolve to the actual function definition, not the import statement
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> main.py:3:7
|
2 | from utils import func as h
3 | print(h("test"))
| ^ Clicking here
|
info: Found 1 declaration
--> utils.py:2:5
|
2 | def func(arg):
| ----
3 | return f"Processed: {arg}"
|
"#);
}
#[test]
fn goto_declaration_from_import_chain() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
from intermediate import shared_function
print(shared_func<CURSOR>tion())
"#,
)
.source(
"intermediate.py",
r#"
# Re-export the function from the original module
from original import shared_function
"#,
)
.source(
"original.py",
r#"
def shared_function():
return "from original"
"#,
)
.build();
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> main.py:3:7
|
2 | from intermediate import shared_function
3 | print(shared_function())
| ^^^^^^^^^^^^^^^ Clicking here
|
info: Found 1 declaration
--> original.py:2:5
|
2 | def shared_function():
| ---------------
3 | return "from original"
|
"#);
}
#[test]
fn goto_declaration_from_star_import() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
from math_utils import *
result = add_n<CURSOR>umbers(5, 3)
"#,
)
.source(
"math_utils.py",
r#"
def add_numbers(a, b):
"""Add two numbers together."""
return a + b
def multiply_numbers(a, b):
"""Multiply two numbers together."""
return a * b
"#,
)
.build();
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> main.py:3:10
|
2 | from math_utils import *
3 | result = add_numbers(5, 3)
| ^^^^^^^^^^^ Clicking here
|
info: Found 1 declaration
--> math_utils.py:2:5
|
2 | def add_numbers(a, b):
| -----------
3 | """Add two numbers together."""
4 | return a + b
|
"#);
}
#[test]
fn goto_declaration_relative_import() {
let test = CursorTest::builder()
.source(
"package/main.py",
r#"
from .utils import helper_function
result = helper_func<CURSOR>tion("test")
"#,
)
.source(
"package/__init__.py",
r#"
# Package init file
"#,
)
.source(
"package/utils.py",
r#"
def helper_function(arg):
"""A helper function in utils module."""
return f"Processed: {arg}"
def another_helper():
"""Another helper function."""
pass
"#,
)
.build();
// Should resolve the relative import to find the actual function definition
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> package/main.py:3:10
|
2 | from .utils import helper_function
3 | result = helper_function("test")
| ^^^^^^^^^^^^^^^ Clicking here
|
info: Found 1 declaration
--> package/utils.py:2:5
|
2 | def helper_function(arg):
| ---------------
3 | """A helper function in utils module."""
4 | return f"Processed: {arg}"
|
"#);
}
#[test]
fn goto_declaration_relative_star_import() {
let test = CursorTest::builder()
.source(
"package/main.py",
r#"
from .utils import *
result = helper_func<CURSOR>tion("test")
"#,
)
.source(
"package/__init__.py",
r#"
# Package init file
"#,
)
.source(
"package/utils.py",
r#"
def helper_function(arg):
"""A helper function in utils module."""
return f"Processed: {arg}"
def another_helper():
"""Another helper function."""
pass
"#,
)
.build();
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> package/main.py:3:10
|
2 | from .utils import *
3 | result = helper_function("test")
| ^^^^^^^^^^^^^^^ Clicking here
|
info: Found 1 declaration
--> package/utils.py:2:5
|
2 | def helper_function(arg):
| ---------------
3 | """A helper function in utils module."""
4 | return f"Processed: {arg}"
|
"#);
}
#[test]
fn goto_declaration_import_as_alias_name() {
let test = CursorTest::builder()
.source(
"main.py",
"
import mymodule.submodule as su<CURSOR>b
print(sub.helper())
",
)
.source(
"mymodule/__init__.py",
"
# Main module init
",
)
.source(
"mymodule/submodule.py",
r#"
FOO = 0
"#,
)
.build();
assert_snapshot!(test.goto_declaration(), @r"
info[goto-declaration]: Go to declaration
--> main.py:2:30
|
2 | import mymodule.submodule as sub
| ^^^ Clicking here
3 | print(sub.helper())
|
info: Found 1 declaration
--> mymodule/submodule.py:1:1
|
1 |
| -
2 | FOO = 0
|
");
}
#[test]
fn goto_declaration_import_as_alias_name_on_module() {
let test = CursorTest::builder()
.source(
"main.py",
"
import mymodule.submod<CURSOR>ule as sub
print(sub.helper())
",
)
.source(
"mymodule/__init__.py",
"
# Main module init
",
)
.source(
"mymodule/submodule.py",
r#"
FOO = 0
"#,
)
.build();
assert_snapshot!(test.goto_declaration(), @r"
info[goto-declaration]: Go to declaration
--> main.py:2:17
|
2 | import mymodule.submodule as sub
| ^^^^^^^^^ Clicking here
3 | print(sub.helper())
|
info: Found 1 declaration
--> mymodule/submodule.py:1:1
|
1 |
| -
2 | FOO = 0
|
");
}
#[test]
fn goto_declaration_from_import_symbol_original() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
from mypackage.utils import hel<CURSOR>per as h
result = h("/a", "/b")
"#,
)
.source(
"mypackage/__init__.py",
r#"
# Package init
"#,
)
.source(
"mypackage/utils.py",
r#"
def helper(a, b):
return a + "/" + b
def another_helper(path):
return "processed"
"#,
)
.build();
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> main.py:2:29
|
2 | from mypackage.utils import helper as h
| ^^^^^^ Clicking here
3 | result = h("/a", "/b")
|
info: Found 1 declaration
--> mypackage/utils.py:2:5
|
2 | def helper(a, b):
| ------
3 | return a + "/" + b
|
"#);
}
#[test]
fn goto_declaration_from_import_symbol_alias() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
from mypackage.utils import helper as h<CURSOR>
result = h("/a", "/b")
"#,
)
.source(
"mypackage/__init__.py",
r#"
# Package init
"#,
)
.source(
"mypackage/utils.py",
r#"
def helper(a, b):
return a + "/" + b
def another_helper(path):
return "processed"
"#,
)
.build();
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> main.py:2:39
|
2 | from mypackage.utils import helper as h
| ^ Clicking here
3 | result = h("/a", "/b")
|
info: Found 1 declaration
--> mypackage/utils.py:2:5
|
2 | def helper(a, b):
| ------
3 | return a + "/" + b
|
"#);
}
#[test]
fn goto_declaration_from_import_module() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
from mypackage.ut<CURSOR>ils import helper as h
result = h("/a", "/b")
"#,
)
.source(
"mypackage/__init__.py",
r#"
# Package init
"#,
)
.source(
"mypackage/utils.py",
r#"
def helper(a, b):
return a + "/" + b
def another_helper(path):
return "processed"
"#,
)
.build();
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> main.py:2:16
|
2 | from mypackage.utils import helper as h
| ^^^^^ Clicking here
3 | result = h("/a", "/b")
|
info: Found 1 declaration
--> mypackage/utils.py:1:1
|
1 |
| -
2 | def helper(a, b):
3 | return a + "/" + b
|
"#);
}
#[test]
fn goto_declaration_instance_attribute() {
let test = cursor_test(
"
class C:
def __init__(self):
self.x: int = 1
c = C()
y = c.x<CURSOR>
",
);
assert_snapshot!(test.goto_declaration(), @r"
info[goto-declaration]: Go to declaration
--> main.py:7:7
|
6 | c = C()
7 | y = c.x
| ^ Clicking here
|
info: Found 1 declaration
--> main.py:4:9
|
2 | class C:
3 | def __init__(self):
4 | self.x: int = 1
| ------
5 |
6 | c = C()
|
");
}
#[test]
fn goto_declaration_string_annotation1() {
let test = cursor_test(
r#"
a: "MyCla<CURSOR>ss" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> main.py:2:5
|
2 | a: "MyClass" = 1
| ^^^^^^^ Clicking here
3 |
4 | class MyClass:
|
info: Found 1 declaration
--> main.py:4:7
|
2 | a: "MyClass" = 1
3 |
4 | class MyClass:
| -------
5 | """some docs"""
|
"#);
}
#[test]
fn goto_declaration_string_annotation2() {
let test = cursor_test(
r#"
a: "None | MyCl<CURSOR>ass" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> main.py:2:12
|
2 | a: "None | MyClass" = 1
| ^^^^^^^ Clicking here
3 |
4 | class MyClass:
|
info: Found 1 declaration
--> main.py:4:7
|
2 | a: "None | MyClass" = 1
3 |
4 | class MyClass:
| -------
5 | """some docs"""
|
"#);
}
#[test]
fn goto_declaration_string_annotation3() {
let test = cursor_test(
r#"
a: "None |<CURSOR> MyClass" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.goto_declaration(), @"No goto target found");
}
#[test]
fn goto_declaration_string_annotation4() {
let test = cursor_test(
r#"
a: "None | MyClass<CURSOR>" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> main.py:2:12
|
2 | a: "None | MyClass" = 1
| ^^^^^^^ Clicking here
3 |
4 | class MyClass:
|
info: Found 1 declaration
--> main.py:4:7
|
2 | a: "None | MyClass" = 1
3 |
4 | class MyClass:
| -------
5 | """some docs"""
|
"#);
}
#[test]
fn goto_declaration_string_annotation5() {
let test = cursor_test(
r#"
a: "None | MyClass"<CURSOR> = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.goto_declaration(), @"No goto target found");
}
#[test]
fn goto_declaration_string_annotation_dangling1() {
let test = cursor_test(
r#"
a: "MyCl<CURSOR>ass |" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.goto_declaration(), @"No goto target found");
}
#[test]
fn goto_declaration_string_annotation_dangling2() {
let test = cursor_test(
r#"
a: "MyCl<CURSOR>ass | No" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> main.py:2:5
|
2 | a: "MyClass | No" = 1
| ^^^^^^^ Clicking here
3 |
4 | class MyClass:
|
info: Found 1 declaration
--> main.py:4:7
|
2 | a: "MyClass | No" = 1
3 |
4 | class MyClass:
| -------
5 | """some docs"""
|
"#);
}
#[test]
fn goto_declaration_string_annotation_dangling3() {
let test = cursor_test(
r#"
a: "MyClass | N<CURSOR>o" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.goto_declaration(), @"No goto target found");
}
#[test]
fn goto_declaration_string_annotation_recursive() {
let test = cursor_test(
r#"
ab: "a<CURSOR>b"
"#,
);
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> main.py:2:6
|
2 | ab: "ab"
| ^^ Clicking here
|
info: Found 1 declaration
--> main.py:2:1
|
2 | ab: "ab"
| --
|
"#);
}
#[test]
fn goto_declaration_string_annotation_unknown() {
let test = cursor_test(
r#"
x: "foo<CURSOR>bar"
"#,
);
assert_snapshot!(test.goto_declaration(), @"No goto target found");
}
#[test]
fn goto_declaration_nested_instance_attribute() {
let test = cursor_test(
"
class C:
def __init__(self):
self.x: int = 1
class D:
def __init__(self):
self.y: C = C()
d = D()
y = d.y.x<CURSOR>
",
);
assert_snapshot!(test.goto_declaration(), @r"
info[goto-declaration]: Go to declaration
--> main.py:11:9
|
10 | d = D()
11 | y = d.y.x
| ^ Clicking here
|
info: Found 1 declaration
--> main.py:4:9
|
2 | class C:
3 | def __init__(self):
4 | self.x: int = 1
| ------
5 |
6 | class D:
|
");
}
#[test]
fn goto_declaration_instance_attribute_no_annotation() {
let test = cursor_test(
"
class C:
def __init__(self):
self.x = 1
c = C()
y = c.x<CURSOR>
",
);
assert_snapshot!(test.goto_declaration(), @r"
info[goto-declaration]: Go to declaration
--> main.py:7:7
|
6 | c = C()
7 | y = c.x
| ^ Clicking here
|
info: Found 1 declaration
--> main.py:4:9
|
2 | class C:
3 | def __init__(self):
4 | self.x = 1
| ------
5 |
6 | c = C()
|
");
}
#[test]
fn goto_declaration_method_call_to_definition() {
let test = cursor_test(
"
class C:
def foo(self):
return 42
c = C()
res = c.foo<CURSOR>()
",
);
assert_snapshot!(test.goto_declaration(), @r"
info[goto-declaration]: Go to declaration
--> main.py:7:9
|
6 | c = C()
7 | res = c.foo()
| ^^^ Clicking here
|
info: Found 1 declaration
--> main.py:3:9
|
2 | class C:
3 | def foo(self):
| ---
4 | return 42
|
");
}
#[test]
fn goto_declaration_module_attribute() {
let test = cursor_test(
r#"
x: i<CURSOR>nt = 42
"#,
);
// Test that we can navigate to builtin types, but don't snapshot the exact content
// since typeshed stubs can change frequently
let result = test.goto_declaration();
// Should not be "No goto target found" - we should find the builtin int type
assert!(
!result.contains("No goto target found"),
"Should find builtin int type"
);
assert!(
!result.contains("No declarations found"),
"Should find builtin int declarations"
);
// Should navigate to a stdlib file containing the int class
assert!(
result.contains("builtins.pyi"),
"Should navigate to builtins.pyi"
);
assert!(
result.contains("class int:"),
"Should find the int class definition"
);
assert!(
result.contains("info[goto-declaration]: Go to declaration"),
"Should be a goto-declaration result"
);
}
#[test]
fn goto_declaration_nonlocal_binding() {
let test = cursor_test(
r#"
def outer():
x = "outer_value"
def inner():
nonlocal x
x = "modified"
return x<CURSOR> # Should find the nonlocal x declaration in outer scope
return inner
"#,
);
// Should find the variable declaration in the outer scope, not the nonlocal statement
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> main.py:8:16
|
6 | nonlocal x
7 | x = "modified"
8 | return x # Should find the nonlocal x declaration in outer scope
| ^ Clicking here
9 |
10 | return inner
|
info: Found 1 declaration
--> main.py:3:5
|
2 | def outer():
3 | x = "outer_value"
| -
4 |
5 | def inner():
|
"#);
}
#[test]
fn goto_declaration_nonlocal_stmt() {
let test = cursor_test(
r#"
def outer():
xy = "outer_value"
def inner():
nonlocal x<CURSOR>y
xy = "modified"
return x # Should find the nonlocal x declaration in outer scope
return inner
"#,
);
// Should find the variable declaration in the outer scope, not the nonlocal statement
assert_snapshot!(test.goto_declaration(), @r#"
info[goto-declaration]: Go to declaration
--> main.py:6:18
|
5 | def inner():
6 | nonlocal xy
| ^^ Clicking here
7 | xy = "modified"
8 | return x # Should find the nonlocal x declaration in outer scope
|
info: Found 1 declaration
--> main.py:3:5
|
2 | def outer():
3 | xy = "outer_value"
| --
4 |
5 | def inner():
|
"#);
}
#[test]
fn goto_declaration_global_binding() {
let test = cursor_test(
r#"
global_var = "global_value"
def function():
global global_var
global_var = "modified"
return global_<CURSOR>var # Should find the global variable declaration
"#,
);
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/completion.rs | crates/ty_ide/src/completion.rs | use std::cmp::Ordering;
use ruff_db::files::File;
use ruff_db::parsed::{ParsedModuleRef, parsed_module};
use ruff_db::source::{SourceText, source_text};
use ruff_diagnostics::Edit;
use ruff_python_ast::find_node::{CoveringNode, covering_node};
use ruff_python_ast::name::Name;
use ruff_python_ast::token::{Token, TokenKind, Tokens};
use ruff_python_ast::{self as ast, AnyNodeRef};
use ruff_python_codegen::Stylist;
use ruff_text_size::{Ranged, TextRange, TextSize};
use rustc_hash::FxHashSet;
use ty_module_resolver::{KnownModule, ModuleName};
use ty_python_semantic::HasType;
use ty_python_semantic::types::UnionType;
use ty_python_semantic::{
Completion as SemanticCompletion, NameKind, SemanticModel,
types::{CycleDetector, KnownClass, Type},
};
use crate::docstring::Docstring;
use crate::goto::Definitions;
use crate::importer::{ImportRequest, Importer};
use crate::symbols::QueryPattern;
use crate::{Db, all_symbols, signature_help};
pub fn completion<'db>(
db: &'db dyn Db,
settings: &CompletionSettings,
file: File,
offset: TextSize,
) -> Vec<Completion<'db>> {
let parsed = parsed_module(db, file).load(db);
let source = source_text(db, file);
let Some(context) = Context::new(db, file, &parsed, &source, offset) else {
return vec![];
};
let query = context
.cursor
.typed
.map(QueryPattern::fuzzy)
.unwrap_or_else(QueryPattern::matches_all_symbols);
let mut completions = Completions::new(db, context.collection_context(db), query);
match context.kind {
ContextKind::Import(ref import) => {
import.add_completions(db, file, &mut completions);
}
ContextKind::NonImport(ref non_import) => {
let model = SemanticModel::new(db, file);
let (semantic_completions, scoped) = match non_import.target {
CompletionTargetAst::ObjectDot { expr } => {
(model.attribute_completions(expr), None)
}
CompletionTargetAst::Scoped(scoped) => {
(model.scoped_completions(scoped.node), Some(scoped))
}
};
completions.extend(semantic_completions);
if scoped.is_some() {
add_keyword_completions(db, &mut completions);
add_argument_completions(db, &model, &context.cursor, &mut completions);
}
if settings.auto_import {
if let Some(scoped) = scoped {
add_unimported_completions(
db,
file,
&parsed,
scoped,
|module_name: &ModuleName, symbol: &str| {
ImportRequest::import_from(module_name.as_str(), symbol)
},
&mut completions,
);
}
}
}
}
completions.into_completions()
}
/// A collection of completions built up from various sources.
struct Completions<'db> {
db: &'db dyn Db,
context: CollectionContext<'db>,
items: Vec<Completion<'db>>,
query: QueryPattern,
}
impl<'db> Completions<'db> {
/// Create a new empty collection of completions.
///
/// The given typed text should correspond to what we believe
/// the user has typed as part of the next symbol they are writing.
/// This collection will treat it as a query when present, and only
/// add completions that match it.
fn new(
db: &'db dyn Db,
context: CollectionContext<'db>,
query: QueryPattern,
) -> Completions<'db> {
Completions {
db,
context,
items: vec![],
query,
}
}
/// Convert this collection into a simple
/// sequence of completions.
fn into_completions(mut self) -> Vec<Completion<'db>> {
self.items.sort_by(|c1, c2| self.context.compare(c1, c2));
self.items
.dedup_by(|c1, c2| (&c1.name, c1.module_name) == (&c2.name, c2.module_name));
// A user should refine its completion request if the searched symbol doesn't appear in the first 1k results.
// Serializing/deserializing 1k completions can be expensive and result in noticeable lag.
self.items.truncate(1000);
self.items
}
// Convert this collection into a list of "import..." fixes
fn into_imports(mut self) -> Vec<ImportEdit> {
self.items.sort_by(|c1, c2| self.context.compare(c1, c2));
self.items
.dedup_by(|c1, c2| (&c1.name, c1.module_name) == (&c2.name, c2.module_name));
self.items
.into_iter()
.filter_map(|item| {
Some(ImportEdit {
label: format!("import {}", item.qualified?),
edit: item.import?,
})
})
.collect()
}
// Convert this collection into a list of "qualify..." fixes
fn into_qualifications(mut self, range: TextRange) -> Vec<ImportEdit> {
self.items.sort_by(|c1, c2| self.context.compare(c1, c2));
self.items
.dedup_by(|c1, c2| (&c1.name, c1.module_name) == (&c2.name, c2.module_name));
self.items
.into_iter()
.filter_map(|item| {
// If we would have to actually import something, don't suggest the qualification
// (we could, maybe we should, but for now, we don't)
if item.import.is_some() {
return None;
}
Some(ImportEdit {
label: format!("qualify {}", item.insert.as_ref()?),
edit: Edit::replacement(item.insert?.into_string(), range.start(), range.end()),
})
})
.collect()
}
/// Attempts to adds the given completion to this collection.
///
/// When added, `true` is returned.
///
/// This might not add the completion for a variety of reasons.
/// For example, if the symbol name does not match this collection's
/// query.
fn add(&mut self, completion: Completion<'db>) -> bool {
if !self.query.is_match_symbol_name(completion.name.as_str()) {
return false;
}
self.add_skip_query(completion)
}
/// Attempts to add the given semantic completion to this collection.
///
/// When added, `true` is returned.
fn add_semantic(&mut self, completion: SemanticCompletion<'db>) -> bool {
self.add(Completion::from_semantic_completion(self.db, completion))
}
/// Attempts to add the given completion to this collection.
///
/// Unlike `Completion::add`, this will skip matching the query
/// pattern associated with this collection of completions. This
/// is useful, for example, when the completions have been filtered
/// by the query pattern already.
///
/// This may still choose not to add the completion. For example,
/// when the completion context determines that the given suggestion
/// is never valid.
fn add_skip_query(&mut self, mut completion: Completion<'db>) -> bool {
// Tags completions with whether they are known to be usable in
// a `raise` context.
//
// It's possible that some completions are usable in a `raise`
// but aren't marked here. That is, false negatives are
// possible but false positives are not.
if let Some(raisable_ty) = self.context.raisable_ty {
if let Some(ty) = completion.ty {
completion.is_definitively_raisable = ty.is_assignable_to(self.db, raisable_ty);
}
}
if self.context.exclude(self.db, &completion) {
return false;
}
self.items.push(completion);
true
}
}
impl<'db> Extend<SemanticCompletion<'db>> for Completions<'db> {
fn extend<T>(&mut self, it: T)
where
T: IntoIterator<Item = SemanticCompletion<'db>>,
{
for c in it {
self.add_semantic(c);
}
}
}
impl<'db> Extend<Completion<'db>> for Completions<'db> {
fn extend<T>(&mut self, it: T)
where
T: IntoIterator<Item = Completion<'db>>,
{
for c in it {
self.add(c);
}
}
}
#[derive(Clone, Debug)]
pub struct Completion<'db> {
/// The label shown to the user for this suggestion.
pub name: Name,
/// The fully qualified name, when available.
///
/// This is only set when `module_name` is available.
pub qualified: Option<Name>,
/// The text that should be inserted at the cursor
/// when the completion is selected.
///
/// When this is not set, `name` is used.
pub insert: Option<Box<str>>,
/// The type of this completion, if available.
///
/// Generally speaking, this is always available
/// *unless* this was a completion corresponding to
/// an unimported symbol. In that case, computing the
/// type of all such symbols could be quite expensive.
pub ty: Option<Type<'db>>,
/// The "kind" of this completion.
///
/// When this is set, it takes priority over any kind
/// inferred from `ty`.
///
/// Usually this is set when `ty` is `None`, since it
/// may be cheaper to compute at scale (e.g., for
/// unimported symbol completions).
///
/// Callers should use [`Completion::kind`] to get the
/// kind, which will take type information into account
/// if this kind is not present.
pub kind: Option<CompletionKind>,
/// The name of the module that this completion comes from.
///
/// This is generally only present when this is a completion
/// suggestion for an unimported symbol.
pub module_name: Option<&'db ModuleName>,
/// An import statement to insert (or ensure is already
/// present) when this completion is selected.
pub import: Option<Edit>,
/// Whether this suggestion came from builtins or not.
///
/// At time of writing (2025-06-26), this information
/// doesn't make it into the LSP response. Instead, we
/// use it mainly in tests so that we can write less
/// noisy tests.
pub builtin: bool,
/// Whether this item only exists for type checking purposes and
/// will be missing at runtime
pub is_type_check_only: bool,
/// Whether this item can definitively be used in a `raise` context.
///
/// Note that this may not always be computed. (i.e., Only computed
/// when we are in a `raise` context.) And also note that if this
/// is `true`, then it's definitively usable in `raise`, but if
/// it's `false`, it _may_ still be usable in `raise`.
pub is_definitively_raisable: bool,
/// The documentation associated with this item, if
/// available.
pub documentation: Option<Docstring>,
}
impl<'db> Completion<'db> {
fn from_semantic_completion(
db: &'db dyn Db,
semantic: SemanticCompletion<'db>,
) -> Completion<'db> {
let definition = semantic.ty.and_then(|ty| Definitions::from_ty(db, ty));
let documentation = definition.and_then(|def| def.docstring(db));
let is_type_check_only = semantic.is_type_check_only(db);
Completion {
name: semantic.name,
qualified: None,
insert: None,
ty: semantic.ty,
kind: None,
module_name: None,
import: None,
builtin: semantic.builtin,
is_type_check_only,
is_definitively_raisable: false,
documentation,
}
}
/// Returns the "kind" of this completion.
///
/// This is meant to be a very general classification of this completion.
/// Typically, this is communicated from the LSP server to a client, and
/// the client uses this information to help improve the UX (perhaps by
/// assigning an icon of some kind to the completion).
pub fn kind(&self, db: &'db dyn Db) -> Option<CompletionKind> {
type CompletionKindVisitor<'db> =
CycleDetector<CompletionKind, Type<'db>, Option<CompletionKind>>;
fn imp<'db>(
db: &'db dyn Db,
ty: Type<'db>,
visitor: &CompletionKindVisitor<'db>,
) -> Option<CompletionKind> {
Some(match ty {
Type::FunctionLiteral(_)
| Type::DataclassDecorator(_)
| Type::WrapperDescriptor(_)
| Type::DataclassTransformer(_)
| Type::Callable(_) => CompletionKind::Function,
Type::BoundMethod(_) | Type::KnownBoundMethod(_) => CompletionKind::Method,
Type::ModuleLiteral(_) => CompletionKind::Module,
Type::ClassLiteral(_) | Type::GenericAlias(_) | Type::SubclassOf(_) => {
CompletionKind::Class
}
// This is a little weird for "struct." I'm mostly interpreting
// "struct" here as a more general "object." ---AG
Type::NominalInstance(_)
| Type::PropertyInstance(_)
| Type::BoundSuper(_)
| Type::TypedDict(_)
| Type::NewTypeInstance(_) => CompletionKind::Struct,
Type::IntLiteral(_)
| Type::BooleanLiteral(_)
| Type::TypeIs(_)
| Type::TypeGuard(_)
| Type::StringLiteral(_)
| Type::LiteralString
| Type::BytesLiteral(_) => CompletionKind::Value,
Type::EnumLiteral(_) => CompletionKind::Enum,
Type::ProtocolInstance(_) => CompletionKind::Interface,
Type::TypeVar(_) => CompletionKind::TypeParameter,
Type::Union(union) => union
.elements(db)
.iter()
.find_map(|&ty| imp(db, ty, visitor))?,
Type::Intersection(intersection) => intersection
.iter_positive(db)
.find_map(|ty| imp(db, ty, visitor))?,
Type::Dynamic(_)
| Type::Never
| Type::SpecialForm(_)
| Type::KnownInstance(_)
| Type::AlwaysTruthy
| Type::AlwaysFalsy => return None,
Type::TypeAlias(alias) => {
visitor.visit(ty, || imp(db, alias.value_type(db), visitor))?
}
})
}
self.kind.or_else(|| {
self.ty
.and_then(|ty| imp(db, ty, &CompletionKindVisitor::default()))
})
}
fn keyword(name: &str) -> Self {
Completion {
name: name.into(),
qualified: None,
insert: None,
ty: None,
kind: Some(CompletionKind::Keyword),
module_name: None,
import: None,
builtin: false,
is_type_check_only: false,
is_definitively_raisable: false,
documentation: None,
}
}
fn value_keyword(name: &str, ty: Type<'db>) -> Completion<'db> {
Completion {
name: name.into(),
qualified: None,
insert: None,
ty: Some(ty),
kind: Some(CompletionKind::Keyword),
module_name: None,
import: None,
builtin: true,
is_type_check_only: false,
is_definitively_raisable: false,
documentation: None,
}
}
fn argument(name: &str, ty: Option<Type<'db>>, documentation: Option<&str>) -> Self {
let insert = Some(format!("{name}=").into_boxed_str());
let documentation = documentation.map(|d| Docstring::new(d.to_owned()));
Completion {
name: name.into(),
qualified: None,
insert,
ty,
kind: Some(CompletionKind::Variable),
module_name: None,
import: None,
builtin: false,
is_type_check_only: false,
is_definitively_raisable: false,
documentation,
}
}
/// Returns true when this completion refers to the
/// `NotImplemented` builtin.
fn is_notimplemented(&self, db: &dyn Db) -> bool {
let Some(ty) = self.ty else { return false };
ty.is_notimplemented(db)
}
}
/// The "kind" of a completion.
///
/// This is taken directly from the LSP completion specification:
/// <https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#completionItemKind>
///
/// The idea here is that [`Completion::kind`] defines the mapping to this from
/// `Type` (and possibly other information), which might be interesting and
/// contentious. Then the outer edges map this to the LSP types, which is
/// expected to be mundane and boring.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum CompletionKind {
Text,
Method,
Function,
Constructor,
Field,
Variable,
Class,
Interface,
Module,
Property,
Unit,
Value,
Enum,
Keyword,
Snippet,
Color,
File,
Reference,
Folder,
EnumMember,
Constant,
Struct,
Event,
Operator,
TypeParameter,
}
#[derive(Clone, Debug)]
pub struct CompletionSettings {
pub auto_import: bool,
}
// N.B. It's important for the defaults here to match the defaults
// established by `CompletionOptions::into_settings`. This is
// because `WorkspaceSettings::default()` uses this definition.
// But `WorkspaceOptions::default().into_settings()` will use the
// `CompletionOptions::into_settings` definition.
impl Default for CompletionSettings {
fn default() -> CompletionSettings {
CompletionSettings { auto_import: true }
}
}
/// The completion context.
///
/// This context is used to determine how to find the
/// initial set of completions to offer to a user.
///
/// The lifetime parameter `'m` refers to the parsed module containing
/// the cursor.
struct Context<'m> {
kind: ContextKind<'m>,
cursor: ContextCursor<'m>,
}
#[derive(Debug)]
enum ContextKind<'m> {
Import(ImportStatement<'m>),
NonImport(ContextNonImport<'m>),
}
/// Context for non-import completions.
#[derive(Debug)]
struct ContextNonImport<'m> {
/// The AST of the completion target.
target: CompletionTargetAst<'m>,
}
impl<'m> Context<'m> {
/// Create a new context for finding completions.
fn new(
db: &'_ dyn Db,
file: File,
parsed: &'m ParsedModuleRef,
source: &'m SourceText,
offset: TextSize,
) -> Option<Context<'m>> {
let cursor = ContextCursor::new(parsed, source, offset);
if cursor.is_in_no_completions_place() {
return None;
}
let kind = if let Some(import) = ImportStatement::detect(db, file, &cursor) {
ContextKind::Import(import)
} else {
let target_token = CompletionTargetTokens::find(&cursor)?;
let target = target_token.ast(&cursor)?;
ContextKind::NonImport(ContextNonImport { target })
};
Some(Context { kind, cursor })
}
/// Returns a filtering context for use with a completion collector.
fn collection_context<'db>(&self, db: &'db dyn Db) -> CollectionContext<'db> {
match self.kind {
ContextKind::Import(_) => CollectionContext::none(),
ContextKind::NonImport(_) => {
let is_raising_exception = self.cursor.is_raising_exception();
CollectionContext {
raisable_ty: is_raising_exception.then(|| {
UnionType::from_elements(
db,
[
KnownClass::BaseException.to_subclass_of(db),
KnownClass::BaseException.to_instance(db),
],
)
}),
is_raising_exception,
valid_keywords: self.cursor.valid_keywords(),
}
}
}
}
}
/// Extracts information about the cursor position.
///
/// This includes text that was typed and the cursor's
/// byte offset in the source code.
///
/// The lifetime parameter `'m` refers to the shorter of the following
/// lifetimes: the parsed module the cursor is in and the actual bytes
/// making up the source file containing the cursor.
struct ContextCursor<'m> {
/// The parsed module containing the cursor.
parsed: &'m ParsedModuleRef,
/// The source code of the module containing the cursor.
source: &'m SourceText,
/// The typed text up to the cursor offset.
///
/// When `Some`, the text is guaranteed to be non-empty.
typed: Option<&'m str>,
/// The byte offset of the cursor.
offset: TextSize,
/// The byte range of the typed text when non-empty.
/// When empty, this is just an empty range at the
/// position of the cursor.
range: TextRange,
/// The tokens that appear before the cursor.
tokens_before: &'m [Token],
}
impl<'m> ContextCursor<'m> {
/// Returns information about the context of the cursor.
fn new(
parsed: &'m ParsedModuleRef,
source: &'m SourceText,
offset: TextSize,
) -> ContextCursor<'m> {
let tokens_before = tokens_start_before(parsed.tokens(), offset);
let Some(range) = ContextCursor::find_typed_text_range(tokens_before, offset) else {
return ContextCursor {
parsed,
source,
typed: None,
offset,
range: TextRange::empty(offset),
tokens_before,
};
};
let text = &source[range];
assert!(
!text.is_empty(),
"expected typed text, when found, to be non-empty"
);
ContextCursor {
parsed,
source,
typed: Some(text),
offset,
range,
tokens_before,
}
}
/// Looks for the byte range of the text typed immediately before
/// the cursor offset given. `tokens_before` should be the tokens
/// from the start of the file up until `offset`.
///
/// If there isn't any typed text or it could not otherwise be
/// found, then `None` is returned.
///
/// When `Some` is returned, the range guaranteed to be non-empty.
fn find_typed_text_range(tokens_before: &[Token], offset: TextSize) -> Option<TextRange> {
let last = tokens_before.last()?;
// It's odd to include `TokenKind::Import` here (among other
// keywords), but it indicates that the user has typed
// `import`. This is useful to know in some contexts. And this
// applies also to the other keywords.
if !matches!(last.kind(), TokenKind::Name) && !last.kind().is_keyword() {
return None;
}
// This one's weird, but if the cursor is beyond
// what is in the closest `Name` token, then it's
// likely we can't infer anything about what has
// been typed. This likely means there is whitespace
// or something that isn't represented in the token
// stream. So just give up.
if last.end() < offset || last.range().is_empty() {
return None;
}
Some(TextRange::new(last.start(), offset))
}
/// Convenience method for `covering_node(cursor.parsed.syntax().into(), ...)`.
fn covering_node(&self, range: TextRange) -> CoveringNode<'m> {
covering_node(self.parsed.syntax().into(), range)
}
/// Whether the last token is in a place where we should not provide completions.
fn is_in_no_completions_place(&self) -> bool {
self.is_in_comment() || self.is_in_string() || self.is_in_definition_place()
}
/// Whether the last token is within a comment or not.
fn is_in_comment(&self) -> bool {
self.tokens_before
.last()
.is_some_and(|t| t.kind().is_comment())
}
/// Whether the last token is positioned within a string token (regular, f-string, t-string, etc).
///
/// Note that this will return `false` when the last token is positioned within an
/// interpolation block in an f-string or a t-string.
fn is_in_string(&self) -> bool {
self.tokens_before.last().is_some_and(|t| {
matches!(
t.kind(),
TokenKind::String | TokenKind::FStringMiddle | TokenKind::TStringMiddle
)
})
}
/// Returns true when the tokens indicate that the definition of a new
/// name is being introduced at the end.
fn is_in_definition_place(&self) -> bool {
fn is_definition_token(token: &Token) -> bool {
matches!(
token.kind(),
TokenKind::Def
| TokenKind::Class
| TokenKind::Type
| TokenKind::As
| TokenKind::For
)
}
let is_definition_keyword = |token: &Token| {
if is_definition_token(token) {
true
} else if token.kind() == TokenKind::Name {
&self.source[token.range()] == "type"
} else {
false
}
};
if match self.tokens_before {
[.., penultimate, _] if self.typed.is_some() => is_definition_keyword(penultimate),
[.., last] if self.typed.is_none() => is_definition_keyword(last),
_ => false,
} {
return true;
}
// Analyze the AST if token matching is insufficient
// to determine if we're inside a name definition.
self.is_in_variable_binding()
}
/// Returns true when the cursor sits on a binding statement.
/// E.g. naming a parameter, type parameter, or `for` <name>).
fn is_in_variable_binding(&self) -> bool {
let covering = self.covering_node(self.range);
covering.ancestors().any(|node| match node {
ast::AnyNodeRef::Parameter(param) => param.name.range.contains_range(self.range),
ast::AnyNodeRef::TypeParamTypeVar(type_param) => {
type_param.name.range.contains_range(self.range)
}
ast::AnyNodeRef::StmtFor(stmt_for) => {
stmt_for.target.range().contains_range(self.range)
}
// The AST does not produce `ast::AnyNodeRef::Parameter` nodes for keywords
// or otherwise invalid syntax. Rather they are captured in a
// `ast::AnyNodeRef::Parameters` node as "empty space". To ensure
// we still suppress suggestions even when the syntax is technically
// invalid we extract the token under the self and check if it makes
// up that "empty space" inside the Parameters Node. If it does, we know
// that we are still binding variables, just that the current state is
// syntatically invalid. Hence we suppress autocomplete suggestons
// also in those cases.
ast::AnyNodeRef::Parameters(params) => {
if !params.range.contains_range(self.range) {
return false;
}
params
.iter()
.map(|param| param.range())
.all(|r| !r.contains_range(self.range))
}
_ => false,
})
}
/// Returns true when the cursor is after a `raise` keyword.
fn is_raising_exception(&self) -> bool {
/// The maximum number of tokens we're willing to
/// look-behind to find a `raise` keyword.
const LIMIT: usize = 10;
// This only looks for things like `raise foo.bar.baz.qu<CURSOR>`.
// Technically, any kind of expression is allowed after `raise`.
// But we may not always want to treat it specially. So we're
// rather conservative about what we consider "raising an
// exception" to be for the purposes of completions. The failure
// mode here is that we may wind up suggesting things that
// shouldn't be raised. The benefit is that when this heuristic
// does work, we won't suggest things that shouldn't be raised.
for token in self.tokens_before.iter().rev().take(LIMIT) {
match token.kind() {
TokenKind::Name | TokenKind::Dot => continue,
TokenKind::Raise => return true,
_ => return false,
}
}
false
}
/// Returns a set of keywords that are valid at
/// the current cursor position.
///
/// Returns None if no context-based exclusions can
/// be identified. Meaning that all keywords are valid.
fn valid_keywords(&self) -> Option<FxHashSet<&'static str>> {
let covering_node = self.covering_node(self.range);
// Check if the cursor is within the naming
// part of a decorator node.
if covering_node
.ancestors()
// We bail if we're specifying arguments as we don't
// want to suppress suggestions there.
.take_while(|node| {
!matches!(node, ast::AnyNodeRef::Arguments(_)) && !node.is_statement()
})
.any(|node| matches!(node, ast::AnyNodeRef::Decorator(_)))
{
return Some(FxHashSet::from_iter(["lambda"]));
}
covering_node.ancestors().find_map(|node| {
self.is_in_for_statement_iterable(node)
.then(|| FxHashSet::from_iter(["yield", "lambda", "await"]))
.or_else(|| {
self.is_expecting_expression(node).then(|| {
FxHashSet::from_iter([
"await", "lambda", "yield", "for", "if", "else", "and", "or", "not",
"in", "is", "True", "False", "None",
])
})
})
})
}
/// Returns true when only an expression is valid after the cursor
/// according to the python grammar.
///
/// `node` should be the smallest AST node fully covering the
/// typed text.
fn is_expecting_expression(&self, node: ast::AnyNodeRef) -> bool {
let contains = |expr: &ast::Expr| expr.range().contains_range(self.range);
match node {
// All checks here are intended to find cases where
// the python grammar disallows anything but expressions.
// if_stmt := 'if' named_expression ':' block elif_stmt
ast::AnyNodeRef::StmtIf(stmt) => {
contains(&stmt.test)
|| stmt
.elif_else_clauses
.iter()
.any(|clause| clause.test.as_ref().is_some_and(contains))
}
// while_stmt := 'while' named_expression ':' block [else_block]
ast::AnyNodeRef::StmtWhile(stmt) => contains(&stmt.test),
// for_stmt := 'for' star_targets 'in' ~ star_expressions ':' [TYPE_COMMENT] block [else_block]
ast::AnyNodeRef::StmtFor(stmt) => contains(&stmt.iter),
// with_item := expression
ast::AnyNodeRef::StmtWith(stmt) => {
stmt.items.iter().any(|item| contains(&item.context_expr))
}
// match_stmt := "match" subject_expr ':' NEWLINE INDENT case_block+ DEDENT
ast::AnyNodeRef::StmtMatch(stmt) => contains(&stmt.subject),
// case_guard := 'if' named_expression
ast::AnyNodeRef::MatchCase(case) => {
case.guard.as_deref().is_some_and(contains)
|| case.pattern.range().contains_range(self.range)
}
// assert_stmt := 'assert' expression [',' expression ]
ast::AnyNodeRef::StmtAssert(stmt) => {
contains(&stmt.test) || stmt.msg.as_deref().is_some_and(contains)
}
// raise_stmt := 'raise' expression ['from' expression ]
ast::AnyNodeRef::StmtRaise(stmt) => {
stmt.exc.as_deref().is_some_and(contains)
|| stmt.cause.as_deref().is_some_and(contains)
}
// return_stmt := 'return' [star_expressions]
ast::AnyNodeRef::StmtReturn(stmt) => stmt.value.as_deref().is_some_and(contains),
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/rename.rs | crates/ty_ide/src/rename.rs | use crate::goto::find_goto_target;
use crate::references::{ReferencesMode, references};
use crate::{Db, ReferenceTarget};
use ruff_db::files::File;
use ruff_text_size::{Ranged, TextSize};
use ty_python_semantic::SemanticModel;
/// Returns the range of the symbol if it can be renamed, None if not.
pub fn can_rename(db: &dyn Db, file: File, offset: TextSize) -> Option<ruff_text_size::TextRange> {
let parsed = ruff_db::parsed::parsed_module(db, file);
let module = parsed.load(db);
let model = SemanticModel::new(db, file);
// Get the definitions for the symbol at the offset
let goto_target = find_goto_target(&model, &module, offset)?;
// Don't allow renaming of import module components
if matches!(
goto_target,
crate::goto::GotoTarget::ImportModuleComponent { .. }
) {
return None;
}
let current_file_in_project = is_file_in_project(db, file);
let definition_targets = goto_target
.get_definition_targets(&model, ReferencesMode::Rename.to_import_alias_resolution())?
.declaration_targets(db)?;
for target in &definition_targets {
let target_file = target.file();
// If definition is outside the project, refuse rename
if !is_file_in_project(db, target_file) {
return None;
}
// If current file is not in project and any definition is outside current file, refuse rename
if !current_file_in_project && target_file != file {
return None;
}
}
Some(goto_target.range())
}
/// Perform a rename operation on the symbol at the given position.
/// Returns all locations that need to be updated with the new name.
pub fn rename(
db: &dyn Db,
file: File,
offset: TextSize,
new_name: &str,
) -> Option<Vec<ReferenceTarget>> {
let parsed = ruff_db::parsed::parsed_module(db, file);
let module = parsed.load(db);
let model = SemanticModel::new(db, file);
// Get the definitions for the symbol at the offset
let goto_target = find_goto_target(&model, &module, offset)?;
// Clients shouldn't call us with an empty new name, but just in case...
if new_name.is_empty() {
return None;
}
// Determine if we should do a multi-file rename or single-file rename
// based on whether the current file is part of the project
let current_file_in_project = is_file_in_project(db, file);
// Choose the appropriate rename mode:
// - If current file is in project, do multi-file rename
// - If current file is not in project, limit to single-file rename
let rename_mode = if current_file_in_project {
ReferencesMode::RenameMultiFile
} else {
ReferencesMode::Rename
};
// Find all references that need to be renamed
references(db, file, &goto_target, rename_mode)
}
/// Helper function to check if a file is included in the project.
fn is_file_in_project(db: &dyn Db, file: File) -> bool {
file.path(db).is_system_virtual_path() || db.project().files(db).contains(&file)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::{CursorTest, IntoDiagnostic, cursor_test};
use insta::assert_snapshot;
use ruff_db::diagnostic::{Annotation, Diagnostic, DiagnosticId, LintName, Severity, Span};
use ruff_db::files::FileRange;
use ruff_text_size::Ranged;
impl CursorTest {
fn prepare_rename(&self) -> String {
let Some(range) = salsa::attach(&self.db, || {
can_rename(&self.db, self.cursor.file, self.cursor.offset)
}) else {
return "Cannot rename".to_string();
};
format!("Can rename symbol at range {range:?}")
}
fn rename(&self, new_name: &str) -> String {
let rename_results = salsa::attach(&self.db, || {
can_rename(&self.db, self.cursor.file, self.cursor.offset)?;
rename(&self.db, self.cursor.file, self.cursor.offset, new_name)
});
let Some(rename_results) = rename_results else {
return "Cannot rename".to_string();
};
if rename_results.is_empty() {
return "No locations to rename".to_string();
}
// Create a single diagnostic with multiple annotations
let rename_diagnostic = RenameResultSet {
locations: rename_results
.into_iter()
.map(|ref_item| FileRange::new(ref_item.file(), ref_item.range()))
.collect(),
};
self.render_diagnostics([rename_diagnostic])
}
}
struct RenameResultSet {
locations: Vec<FileRange>,
}
impl IntoDiagnostic for RenameResultSet {
fn into_diagnostic(self) -> Diagnostic {
let mut main = Diagnostic::new(
DiagnosticId::Lint(LintName::of("rename")),
Severity::Info,
format!("Rename symbol (found {} locations)", self.locations.len()),
);
// Add the first location as primary annotation (the symbol being renamed)
if let Some(first_location) = self.locations.first() {
main.annotate(Annotation::primary(
Span::from(first_location.file()).with_range(first_location.range()),
));
// Add remaining locations as secondary annotations
for location in &self.locations[1..] {
main.annotate(Annotation::secondary(
Span::from(location.file()).with_range(location.range()),
));
}
}
main
}
}
#[test]
fn prepare_rename_parameter() {
let test = cursor_test(
"
def func(<CURSOR>value: int) -> int:
value *= 2
return value
value = 0
",
);
assert_snapshot!(test.prepare_rename(), @"Can rename symbol at range 10..15");
}
#[test]
fn rename_parameter() {
let test = cursor_test(
"
def func(<CURSOR>value: int) -> int:
value *= 2
return value
func(value=42)
",
);
assert_snapshot!(test.rename("number"), @r"
info[rename]: Rename symbol (found 4 locations)
--> main.py:2:10
|
2 | def func(value: int) -> int:
| ^^^^^
3 | value *= 2
| -----
4 | return value
| -----
5 |
6 | func(value=42)
| -----
|
");
}
#[test]
fn rename_function() {
let test = cursor_test(
"
def fu<CURSOR>nc():
pass
result1 = func()
x = func
",
);
assert_snapshot!(test.rename("calculate"), @r"
info[rename]: Rename symbol (found 3 locations)
--> main.py:2:5
|
2 | def func():
| ^^^^
3 | pass
4 |
5 | result1 = func()
| ----
6 | x = func
| ----
|
");
}
#[test]
fn rename_class() {
let test = cursor_test(
"
class My<CURSOR>Class:
def __init__(self):
pass
obj1 = MyClass()
cls = MyClass
",
);
assert_snapshot!(test.rename("MyNewClass"), @r"
info[rename]: Rename symbol (found 3 locations)
--> main.py:2:7
|
2 | class MyClass:
| ^^^^^^^
3 | def __init__(self):
4 | pass
5 |
6 | obj1 = MyClass()
| -------
7 | cls = MyClass
| -------
|
");
}
#[test]
fn rename_invalid_name() {
let test = cursor_test(
"
def fu<CURSOR>nc():
pass
",
);
assert_snapshot!(test.rename(""), @"Cannot rename");
assert_snapshot!(test.rename("valid_name"), @r"
info[rename]: Rename symbol (found 1 locations)
--> main.py:2:5
|
2 | def func():
| ^^^^
3 | pass
|
");
}
#[test]
fn multi_file_function_rename() {
let test = CursorTest::builder()
.source(
"utils.py",
"
def fu<CURSOR>nc(x):
return x * 2
",
)
.source(
"module.py",
"
from utils import func
def test(data):
return func(data)
",
)
.source(
"app.py",
"
from utils import helper_function
class DataProcessor:
def __init__(self):
self.multiplier = helper_function
def process(self, value):
return helper_function(value)
",
)
.build();
assert_snapshot!(test.rename("utility_function"), @r"
info[rename]: Rename symbol (found 3 locations)
--> utils.py:2:5
|
2 | def func(x):
| ^^^^
3 | return x * 2
|
::: module.py:2:19
|
2 | from utils import func
| ----
3 |
4 | def test(data):
5 | return func(data)
| ----
|
");
}
#[test]
fn rename_string_annotation1() {
let test = cursor_test(
r#"
a: "MyCla<CURSOR>ss" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.rename("MyNewClass"), @r#"
info[rename]: Rename symbol (found 2 locations)
--> main.py:2:5
|
2 | a: "MyClass" = 1
| ^^^^^^^
3 |
4 | class MyClass:
| -------
5 | """some docs"""
|
"#);
}
#[test]
fn rename_string_annotation2() {
let test = cursor_test(
r#"
a: "None | MyCl<CURSOR>ass" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.rename("MyNewClass"), @r#"
info[rename]: Rename symbol (found 2 locations)
--> main.py:2:12
|
2 | a: "None | MyClass" = 1
| ^^^^^^^
3 |
4 | class MyClass:
| -------
5 | """some docs"""
|
"#);
}
#[test]
fn rename_string_annotation3() {
let test = cursor_test(
r#"
a: "None |<CURSOR> MyClass" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.rename("MyNewClass"), @"Cannot rename");
}
#[test]
fn rename_string_annotation4() {
let test = cursor_test(
r#"
a: "None | MyClass<CURSOR>" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.rename("MyNewClass"), @r#"
info[rename]: Rename symbol (found 2 locations)
--> main.py:2:12
|
2 | a: "None | MyClass" = 1
| ^^^^^^^
3 |
4 | class MyClass:
| -------
5 | """some docs"""
|
"#);
}
#[test]
fn rename_string_annotation5() {
let test = cursor_test(
r#"
a: "None | MyClass"<CURSOR> = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.rename("MyNewClass"), @"Cannot rename");
}
#[test]
fn rename_string_annotation_dangling1() {
let test = cursor_test(
r#"
a: "MyCl<CURSOR>ass |" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.rename("MyNewClass"), @"Cannot rename");
}
#[test]
fn rename_string_annotation_dangling2() {
let test = cursor_test(
r#"
a: "MyCl<CURSOR>ass | No" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.rename("MyNewClass"), @r#"
info[rename]: Rename symbol (found 2 locations)
--> main.py:2:5
|
2 | a: "MyClass | No" = 1
| ^^^^^^^
3 |
4 | class MyClass:
| -------
5 | """some docs"""
|
"#);
}
#[test]
fn rename_string_annotation_dangling3() {
let test = cursor_test(
r#"
a: "MyClass | N<CURSOR>o" = 1
class MyClass:
"""some docs"""
"#,
);
assert_snapshot!(test.rename("MyNewClass"), @"Cannot rename");
}
#[test]
fn rename_match_name_stmt() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", a<CURSOR>b]:
x = ab
"#,
);
assert_snapshot!(test.rename("XY"), @r#"
info[rename]: Rename symbol (found 2 locations)
--> main.py:4:22
|
2 | def my_func(command: str):
3 | match command.split():
4 | case ["get", ab]:
| ^^
5 | x = ab
| --
|
"#);
}
#[test]
fn rename_match_name_binding() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", ab]:
x = a<CURSOR>b
"#,
);
assert_snapshot!(test.rename("XY"), @r#"
info[rename]: Rename symbol (found 2 locations)
--> main.py:4:22
|
2 | def my_func(command: str):
3 | match command.split():
4 | case ["get", ab]:
| ^^
5 | x = ab
| --
|
"#);
}
#[test]
fn rename_match_rest_stmt() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", *a<CURSOR>b]:
x = ab
"#,
);
assert_snapshot!(test.rename("XY"), @r#"
info[rename]: Rename symbol (found 2 locations)
--> main.py:4:23
|
2 | def my_func(command: str):
3 | match command.split():
4 | case ["get", *ab]:
| ^^
5 | x = ab
| --
|
"#);
}
#[test]
fn rename_match_rest_binding() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", *ab]:
x = a<CURSOR>b
"#,
);
assert_snapshot!(test.rename("XY"), @r#"
info[rename]: Rename symbol (found 2 locations)
--> main.py:4:23
|
2 | def my_func(command: str):
3 | match command.split():
4 | case ["get", *ab]:
| ^^
5 | x = ab
| --
|
"#);
}
#[test]
fn rename_match_as_stmt() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", ("a" | "b") as a<CURSOR>b]:
x = ab
"#,
);
assert_snapshot!(test.rename("XY"), @r#"
info[rename]: Rename symbol (found 2 locations)
--> main.py:4:37
|
2 | def my_func(command: str):
3 | match command.split():
4 | case ["get", ("a" | "b") as ab]:
| ^^
5 | x = ab
| --
|
"#);
}
#[test]
fn rename_match_as_binding() {
let test = cursor_test(
r#"
def my_func(command: str):
match command.split():
case ["get", ("a" | "b") as ab]:
x = a<CURSOR>b
"#,
);
assert_snapshot!(test.rename("XY"), @r#"
info[rename]: Rename symbol (found 2 locations)
--> main.py:4:37
|
2 | def my_func(command: str):
3 | match command.split():
4 | case ["get", ("a" | "b") as ab]:
| ^^
5 | x = ab
| --
|
"#);
}
#[test]
fn rename_match_keyword_stmt() {
let test = cursor_test(
r#"
class Click:
__match_args__ = ("position", "button")
def __init__(self, pos, btn):
self.position: int = pos
self.button: str = btn
def my_func(event: Click):
match event:
case Click(x, button=a<CURSOR>b):
x = ab
"#,
);
assert_snapshot!(test.rename("XY"), @r"
info[rename]: Rename symbol (found 2 locations)
--> main.py:10:30
|
8 | def my_func(event: Click):
9 | match event:
10 | case Click(x, button=ab):
| ^^
11 | x = ab
| --
|
");
}
#[test]
fn rename_match_keyword_binding() {
let test = cursor_test(
r#"
class Click:
__match_args__ = ("position", "button")
def __init__(self, pos, btn):
self.position: int = pos
self.button: str = btn
def my_func(event: Click):
match event:
case Click(x, button=ab):
x = a<CURSOR>b
"#,
);
assert_snapshot!(test.rename("XY"), @r"
info[rename]: Rename symbol (found 2 locations)
--> main.py:10:30
|
8 | def my_func(event: Click):
9 | match event:
10 | case Click(x, button=ab):
| ^^
11 | x = ab
| --
|
");
}
#[test]
fn rename_match_class_name() {
let test = cursor_test(
r#"
class Click:
__match_args__ = ("position", "button")
def __init__(self, pos, btn):
self.position: int = pos
self.button: str = btn
def my_func(event: Click):
match event:
case Cl<CURSOR>ick(x, button=ab):
x = ab
"#,
);
assert_snapshot!(test.rename("XY"), @r#"
info[rename]: Rename symbol (found 3 locations)
--> main.py:2:7
|
2 | class Click:
| ^^^^^
3 | __match_args__ = ("position", "button")
4 | def __init__(self, pos, btn):
|
::: main.py:8:20
|
6 | self.button: str = btn
7 |
8 | def my_func(event: Click):
| -----
9 | match event:
10 | case Click(x, button=ab):
| -----
11 | x = ab
|
"#);
}
#[test]
fn rename_match_class_field_name() {
let test = cursor_test(
r#"
class Click:
__match_args__ = ("position", "button")
def __init__(self, pos, btn):
self.position: int = pos
self.button: str = btn
def my_func(event: Click):
match event:
case Click(x, but<CURSOR>ton=ab):
x = ab
"#,
);
assert_snapshot!(test.rename("XY"), @"Cannot rename");
}
#[test]
fn rename_typevar_name_stmt() {
let test = cursor_test(
r#"
type Alias1[A<CURSOR>B: int = bool] = tuple[AB, list[AB]]
"#,
);
assert_snapshot!(test.rename("XY"), @r"
info[rename]: Rename symbol (found 3 locations)
--> main.py:2:13
|
2 | type Alias1[AB: int = bool] = tuple[AB, list[AB]]
| ^^ -- --
|
");
}
#[test]
fn rename_typevar_name_binding() {
let test = cursor_test(
r#"
type Alias1[AB: int = bool] = tuple[A<CURSOR>B, list[AB]]
"#,
);
assert_snapshot!(test.rename("XY"), @r"
info[rename]: Rename symbol (found 3 locations)
--> main.py:2:13
|
2 | type Alias1[AB: int = bool] = tuple[AB, list[AB]]
| ^^ -- --
|
");
}
#[test]
fn rename_typevar_spec_stmt() {
let test = cursor_test(
r#"
from typing import Callable
type Alias2[**A<CURSOR>B = [int, str]] = Callable[AB, tuple[AB]]
"#,
);
assert_snapshot!(test.rename("XY"), @r"
info[rename]: Rename symbol (found 3 locations)
--> main.py:3:15
|
2 | from typing import Callable
3 | type Alias2[**AB = [int, str]] = Callable[AB, tuple[AB]]
| ^^ -- --
|
");
}
#[test]
fn rename_typevar_spec_binding() {
let test = cursor_test(
r#"
from typing import Callable
type Alias2[**AB = [int, str]] = Callable[A<CURSOR>B, tuple[AB]]
"#,
);
assert_snapshot!(test.rename("XY"), @r"
info[rename]: Rename symbol (found 3 locations)
--> main.py:3:15
|
2 | from typing import Callable
3 | type Alias2[**AB = [int, str]] = Callable[AB, tuple[AB]]
| ^^ -- --
|
");
}
#[test]
fn rename_typevar_tuple_stmt() {
let test = cursor_test(
r#"
type Alias3[*A<CURSOR>B = ()] = tuple[tuple[*AB], tuple[*AB]]
"#,
);
assert_snapshot!(test.rename("XY"), @r"
info[rename]: Rename symbol (found 3 locations)
--> main.py:2:14
|
2 | type Alias3[*AB = ()] = tuple[tuple[*AB], tuple[*AB]]
| ^^ -- --
|
");
}
#[test]
fn rename_typevar_tuple_binding() {
let test = cursor_test(
r#"
type Alias3[*AB = ()] = tuple[tuple[*A<CURSOR>B], tuple[*AB]]
"#,
);
assert_snapshot!(test.rename("XY"), @r"
info[rename]: Rename symbol (found 3 locations)
--> main.py:2:14
|
2 | type Alias3[*AB = ()] = tuple[tuple[*AB], tuple[*AB]]
| ^^ -- --
|
");
}
#[test]
fn cannot_rename_import_module_component() {
// Test that we cannot rename parts of module names in import statements
let test = cursor_test(
"
import <CURSOR>os.path
x = os.path.join('a', 'b')
",
);
assert_snapshot!(test.prepare_rename(), @"Cannot rename");
}
#[test]
fn cannot_rename_from_import_module_component() {
// Test that we cannot rename parts of module names in from import statements
let test = cursor_test(
"
from os.<CURSOR>path import join
result = join('a', 'b')
",
);
assert_snapshot!(test.prepare_rename(), @"Cannot rename");
}
#[test]
fn cannot_rename_external_file() {
// This test verifies that we cannot rename a symbol when it's defined in a file
// that's outside the project (like a standard library function)
let test = cursor_test(
"
import os
x = <CURSOR>os.path.join('a', 'b')
",
);
assert_snapshot!(test.prepare_rename(), @"Cannot rename");
}
#[test]
fn rename_alias_at_import_statement() {
let test = CursorTest::builder()
.source(
"utils.py",
"
def test(): pass
",
)
.source(
"main.py",
"
from utils import test as <CURSOR>alias
result = alias()
",
)
.build();
assert_snapshot!(test.rename("new_alias"), @r"
info[rename]: Rename symbol (found 2 locations)
--> main.py:2:27
|
2 | from utils import test as alias
| ^^^^^
3 | result = alias()
| -----
|
");
}
#[test]
fn rename_alias_at_usage_site() {
// Test renaming an alias when the cursor is on the alias in the usage statement
let test = CursorTest::builder()
.source(
"utils.py",
"
def test(): pass
",
)
.source(
"main.py",
"
from utils import test as alias
result = <CURSOR>alias()
",
)
.build();
assert_snapshot!(test.rename("new_alias"), @r"
info[rename]: Rename symbol (found 2 locations)
--> main.py:2:27
|
2 | from utils import test as alias
| ^^^^^
3 | result = alias()
| -----
|
");
}
#[test]
fn rename_across_import_chain_with_mixed_aliases() {
// Test renaming a symbol that's imported across multiple files with mixed alias patterns
// File 1 (source.py): defines the original function
// File 2 (middle.py): imports without alias from source.py
// File 3 (consumer.py): imports with alias from middle.py
let test = CursorTest::builder()
.source(
"source.py",
"
def original_func<CURSOR>tion():
return 'Hello from source'
",
)
.source(
"middle.py",
"
from source import original_function
def wrapper():
return original_function()
result = original_function()
",
)
.source(
"consumer.py",
"
from middle import original_function as func_alias
def process():
return func_alias()
value1 = func_alias()
",
)
.build();
assert_snapshot!(test.rename("renamed_function"), @r"
info[rename]: Rename symbol (found 5 locations)
--> source.py:2:5
|
2 | def original_function():
| ^^^^^^^^^^^^^^^^^
3 | return 'Hello from source'
|
::: consumer.py:2:20
|
2 | from middle import original_function as func_alias
| -----------------
3 |
4 | def process():
|
::: middle.py:2:20
|
2 | from source import original_function
| -----------------
3 |
4 | def wrapper():
5 | return original_function()
| -----------------
6 |
7 | result = original_function()
| -----------------
|
");
}
#[test]
fn rename_alias_in_import_chain() {
let test = CursorTest::builder()
.source(
"file1.py",
"
def func1(): pass
",
)
.source(
"file2.py",
"
from file1 import func1 as func2
func2()
",
)
.source(
"file3.py",
"
from file2 import func2
class App:
def run(self):
return fu<CURSOR>nc2()
",
)
.build();
assert_snapshot!(test.rename("new_util_name"), @r"
info[rename]: Rename symbol (found 4 locations)
--> file3.py:2:19
|
2 | from file2 import func2
| ^^^^^
3 |
4 | class App:
5 | def run(self):
6 | return func2()
| -----
|
::: file2.py:2:28
|
2 | from file1 import func1 as func2
| -----
3 |
4 | func2()
| -----
|
");
}
#[test]
fn cannot_rename_keyword() {
// Test that we cannot rename Python keywords like "None"
let test = cursor_test(
"
def process_value(value):
if value is <CURSOR>None:
return 'empty'
return str(value)
",
);
assert_snapshot!(test.prepare_rename(), @"Cannot rename");
}
#[test]
fn cannot_rename_builtin_type() {
// Test that we cannot rename Python builtin types like "int"
let test = cursor_test(
"
def convert_to_number(value):
return <CURSOR>int(value)
",
);
assert_snapshot!(test.prepare_rename(), @"Cannot rename");
}
#[test]
fn rename_keyword_argument() {
// Test renaming a keyword argument and its corresponding parameter
let test = cursor_test(
"
def func(x, y=5):
return x + y
result = func(10, <CURSOR>y=20)
",
);
assert_snapshot!(test.rename("z"), @r"
info[rename]: Rename symbol (found 3 locations)
--> main.py:2:13
|
2 | def func(x, y=5):
| ^
3 | return x + y
| -
4 |
5 | result = func(10, y=20)
| -
|
");
}
#[test]
fn rename_parameter_with_keyword_argument() {
// Test renaming a parameter and its corresponding keyword argument
let test = cursor_test(
"
def func(x, <CURSOR>y=5):
return x + y
result = func(10, y=20)
",
);
assert_snapshot!(test.rename("z"), @r"
info[rename]: Rename symbol (found 3 locations)
--> main.py:2:13
|
2 | def func(x, y=5):
| ^
3 | return x + y
| -
4 |
5 | result = func(10, y=20)
| -
|
");
}
#[test]
fn import_alias() {
let test = CursorTest::builder()
.source(
"main.py",
r#"
import warnings
import warnings as <CURSOR>abc
x = abc
y = warnings
"#,
)
.build();
assert_snapshot!(test.rename("z"), @r"
info[rename]: Rename symbol (found 2 locations)
--> main.py:3:20
|
2 | import warnings
3 | import warnings as abc
| ^^^
4 |
5 | x = abc
| ---
6 | y = warnings
|
");
}
#[test]
fn import_alias_to_first_party_definition() {
let test = CursorTest::builder()
.source("lib.py", "def deprecated(): pass")
.source(
"main.py",
r#"
import lib as lib2<CURSOR>
x = lib2
"#,
)
.build();
assert_snapshot!(test.rename("z"), @r"
info[rename]: Rename symbol (found 2 locations)
--> main.py:2:15
|
2 | import lib as lib2
| ^^^^
3 |
4 | x = lib2
| ----
|
");
}
#[test]
fn imported_first_party_definition() {
let test = CursorTest::builder()
.source("lib.py", "def deprecated(): pass")
.source(
"main.py",
r#"
from lib import deprecated<CURSOR>
x = deprecated
"#,
)
.build();
assert_snapshot!(test.rename("z"), @r"
info[rename]: Rename symbol (found 3 locations)
--> main.py:2:17
|
2 | from lib import deprecated
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/importer.rs | crates/ty_ide/src/importer.rs | #![allow(warnings)]
/*!
An abstraction for adding new imports to a single Python source file.
This importer is based on a similar abstraction in `ruff_linter::importer`.
Both of them use the lower-level `ruff_python_importer::Insertion` primitive.
The main differences here are:
1. This works with ty's semantic model instead of ruff's.
2. This owns the task of visiting AST to extract imports. This
design was chosen because it's currently only used for inserting
imports for unimported completion suggestions. If it needs to be
used more broadly, it might make sense to roll construction of an
`Importer` into ty's `SemanticIndex`.
3. It doesn't have as many facilities as `ruff_linter`'s importer.
*/
use rustc_hash::FxHashMap;
use ruff_db::files::File;
use ruff_db::parsed::ParsedModuleRef;
use ruff_db::source::source_text;
use ruff_diagnostics::Edit;
use ruff_python_ast as ast;
use ruff_python_ast::name::Name;
use ruff_python_ast::token::Tokens;
use ruff_python_ast::visitor::source_order::{SourceOrderVisitor, TraversalSignal, walk_stmt};
use ruff_python_codegen::Stylist;
use ruff_python_importer::Insertion;
use ruff_text_size::{Ranged, TextRange, TextSize};
use ty_module_resolver::ModuleName;
use ty_project::Db;
use ty_python_semantic::semantic_index::definition::DefinitionKind;
use ty_python_semantic::types::Type;
use ty_python_semantic::{MemberDefinition, SemanticModel};
pub(crate) struct Importer<'a> {
/// The ty Salsa database.
db: &'a dyn Db,
/// The file corresponding to the module that
/// we want to insert an import statement into.
file: File,
/// The parsed module ref.
parsed: &'a ParsedModuleRef,
/// The tokens representing the Python AST.
tokens: &'a Tokens,
/// The source code for `file`.
source: &'a str,
/// The [`Stylist`] for the Python AST.
stylist: &'a Stylist<'a>,
/// The list of visited, top-level runtime imports in the Python AST.
imports: Vec<AstImport<'a>>,
}
impl<'a> Importer<'a> {
/// Create a new importer.
///
/// The [`Stylist`] dictates the code formatting options of any code
/// edit (if any) produced by this importer.
///
/// The `file` given should correspond to the module that we want
/// to insert an import statement into.
///
/// The `source` is used to get access to the original source
/// text for `file`, which is used to help produce code edits (if
/// any).
///
/// The AST given (corresponding to the contents of `file`) is
/// traversed and top-level imports are extracted from it. This
/// permits adding imports in a way that is harmonious with
/// existing imports.
pub(crate) fn new(
db: &'a dyn Db,
stylist: &'a Stylist<'a>,
file: File,
source: &'a str,
parsed: &'a ParsedModuleRef,
) -> Self {
let imports = TopLevelImports::find(parsed.syntax());
Self {
db,
file,
parsed,
tokens: parsed.tokens(),
source,
stylist,
imports,
}
}
/// Builds a set of members in scope at the given AST node and position.
///
/// Callers should use this routine to build "in scope members" to be used
/// with repeated calls to `Importer::import`. This does some work up-front
/// to avoid doing it for every call to `Importer::import`.
///
/// In general, `at` should be equivalent to `node.start()` (from the
/// [`ruff_text_size::Ranged`] trait). However, in some cases, identifying
/// a good AST node for where the cursor is can be difficult, where as
/// knowing the precise position of the cursor is easy. The AST node in
/// that circumstance may be a very poor approximation that may still
/// result in good auto-import results.
///
/// This API is designed with completions in mind. That is, we might have
/// many possible candidates to add as an import while the position we want
/// to insert them remains invariant.
pub fn members_in_scope_at(
&self,
node: ast::AnyNodeRef<'_>,
at: TextSize,
) -> MembersInScope<'a> {
MembersInScope::new(self.db, self.file, self.parsed, node, at)
}
/// Imports a symbol into this importer's module.
///
/// The given request is assumed to be valid. That is, the module
/// is assumed to be importable and the member is assumed to be a
/// valid thing to import from the given module.
///
/// When possible (particularly when there is no existing import
/// statement to satisfy the given request), the import style on
/// the request is respected. When there is an existing import,
/// then the existing style is always respected instead.
///
/// `members` should be a map of symbols in scope at the position
/// where the imported symbol should be available. This is used
/// to craft import statements in a way that doesn't conflict with
/// symbols in scope. If it's not feasible to provide this map, then
/// providing an empty map is generally fine. But it does mean that
/// the resulting import may shadow (or be shadowed by) some other
/// symbol.
///
/// The "import action" returned includes an edit for inserting
/// the actual import (if necessary) along with the symbol text
/// that should be used to refer to the imported symbol. While
/// the symbol text may be expected to just be equivalent to the
/// request's `member`, it can be different. For example, there
/// might be an alias, or the corresponding module might already be
/// imported in a qualified way.
pub(crate) fn import(
&self,
request: ImportRequest<'_>,
members: &MembersInScope,
) -> ImportAction {
let request = request.avoid_conflicts(self.db, self.file, members);
let mut symbol_text: Box<str> = request.member.unwrap_or(request.module).into();
let Some(response) = self.find(&request, members.at) else {
let insertion = if let Some(future) = self.find_last_future_import(members.at) {
Insertion::end_of_statement(future.stmt, self.source, self.stylist)
} else {
let range = source_text(self.db, self.file)
.as_notebook()
.and_then(|notebook| notebook.cell_offsets().containing_range(members.at));
Insertion::start_of_file(self.parsed.suite(), self.source, self.stylist, range)
};
let import = insertion.into_edit(&request.to_string());
if let Some(member) = request.member
&& matches!(request.style, ImportStyle::Import)
{
symbol_text = format!("{}.{}", request.module, member).into();
}
return ImportAction {
import: Some(import),
symbol_text,
};
};
// When we just have a request to import a module (and not
// any members from that module), then the only way we can be
// here is if we found a pre-existing import that definitively
// satisfies the request. So we're done.
let Some(member) = request.member else {
return ImportAction {
import: None,
symbol_text,
};
};
match response.kind {
ImportResponseKind::Unqualified { ast, alias } => {
let member = alias.asname.as_ref().unwrap_or(&alias.name).as_str();
// As long as it's not a wildcard import, we use whatever name
// the member is imported as when inserting the symbol.
if member != "*" {
symbol_text = member.into();
}
ImportAction {
import: None,
symbol_text,
}
}
ImportResponseKind::Qualified { ast, alias } => {
let module = alias.asname.as_ref().unwrap_or(&alias.name).as_str();
ImportAction {
import: None,
symbol_text: format!("{module}.{symbol_text}").into(),
}
}
ImportResponseKind::Partial(ast) => {
let import = if let Some(insertion) =
Insertion::existing_import(response.import.stmt, self.tokens)
{
insertion.into_edit(member)
} else {
Insertion::end_of_statement(response.import.stmt, self.source, self.stylist)
.into_edit(&format!("from {} import {member}", request.module))
};
ImportAction {
import: Some(import),
symbol_text,
}
}
}
}
/// Look for an import already in this importer's module that
/// satisfies the given request. If found, the corresponding
/// import is returned along with the way in which the import
/// satisfies the request.
fn find<'importer>(
&'importer self,
request: &ImportRequest<'_>,
available_at: TextSize,
) -> Option<ImportResponse<'importer, 'a>> {
let mut choice = None;
let source = source_text(self.db, self.file);
let notebook = source.as_notebook();
for import in &self.imports {
// If the import statement comes after the spot where we
// need the symbol, then we conservatively assume that
// the import statement does not satisfy the request. It
// is possible the import statement *could* satisfy the
// request. For example, if `available_at` is inside a
// function defined before the import statement. But this
// only works if the function is known to be called *after*
// the import statement executes. So... it's complicated.
// In the worst case, we'll end up inserting a superfluous
// import statement at the top of the module.
//
// Also, we can stop here since our import statements are
// sorted by their start location in the source.
if import.stmt.start() >= available_at {
return choice;
}
if let Some(response) = import.satisfies(self.db, self.file, request) {
let partial = matches!(response.kind, ImportResponseKind::Partial { .. });
// The LSP doesn't support edits across cell boundaries.
// Skip over imports that only partially satisfy the import
// because they would require changes to the import (across cell boundaries).
if partial
&& let Some(notebook) = notebook
&& notebook
.cell_offsets()
.has_cell_boundary(TextRange::new(import.stmt.start(), available_at))
{
continue;
}
if choice
.as_ref()
.is_none_or(|c| !c.kind.is_prioritized_over(&response.kind))
{
let is_top_priority =
matches!(response.kind, ImportResponseKind::Unqualified { .. });
choice = Some(response);
// When we find an unqualified import, it's (currently)
// impossible for any later import to override it in
// priority. So we can just quit here.
if is_top_priority {
return choice;
}
}
}
}
choice
}
/// Find the last `from __future__` import statement in the AST.
fn find_last_future_import(&self, at: TextSize) -> Option<&'a AstImport> {
let source = source_text(self.db, self.file);
let notebook = source.as_notebook();
self.imports
.iter()
.take_while(|import| import.stmt.start() <= at)
// Skip over imports from other cells.
.skip_while(|import| {
notebook.is_some_and(|notebook| {
notebook
.cell_offsets()
.has_cell_boundary(TextRange::new(import.stmt.start(), at))
})
})
.take_while(|import| {
import
.stmt
.as_import_from_stmt()
.is_some_and(|import_from| import_from.module.as_deref() == Some("__future__"))
})
.last()
}
}
/// A map of symbols in scope at a particular location in a module.
///
/// Users of an `Importer` must create this map via
/// [`Importer::members_in_scope_at`] in order to use the [`Importer::import`]
/// API. This map provides quick access to symbols in scope to help ensure that
/// the imports inserted are correct and do not conflict with existing symbols.
///
/// Note that this isn't perfect. At time of writing (2025-09-16), the importer
/// makes the trade-off that it's better to insert an incorrect import than to
/// silently do nothing. Perhaps in the future we can find a way to prompt end
/// users for a decision. This behavior is modeled after rust-analyzer, which
/// does the same thing for auto-import on unimported completions.
#[derive(Debug)]
pub struct MembersInScope<'ast> {
at: TextSize,
map: FxHashMap<Name, MemberInScope<'ast>>,
}
impl<'ast> MembersInScope<'ast> {
fn new(
db: &'ast dyn Db,
file: File,
parsed: &'ast ParsedModuleRef,
node: ast::AnyNodeRef<'_>,
at: TextSize,
) -> MembersInScope<'ast> {
let model = SemanticModel::new(db, file);
let map = model
.members_in_scope_at(node)
.into_iter()
.map(|(name, memberdef)| {
let def = memberdef.first_reachable_definition;
let kind = match *def.kind(db) {
DefinitionKind::Import(ref kind) => {
MemberImportKind::Imported(AstImportKind::Import(kind.import(parsed)))
}
DefinitionKind::ImportFrom(ref kind) => {
MemberImportKind::Imported(AstImportKind::ImportFrom(kind.import(parsed)))
}
DefinitionKind::StarImport(ref kind) => {
MemberImportKind::Imported(AstImportKind::ImportFrom(kind.import(parsed)))
}
_ => MemberImportKind::Other,
};
(
name,
MemberInScope {
ty: memberdef.ty,
kind,
},
)
})
.collect();
MembersInScope { at, map }
}
}
#[derive(Debug)]
struct MemberInScope<'ast> {
ty: Type<'ast>,
kind: MemberImportKind<'ast>,
}
impl<'ast> MemberInScope<'ast> {
/// Returns a member with the given type and "irrelevant"
/// definition site. That is, the only definition sites
/// we currently care about are import statements.
fn other(ty: Type<'ast>) -> MemberInScope<'ast> {
MemberInScope {
ty,
kind: MemberImportKind::Other,
}
}
/// Returns true if this symbol satisfies the given import request. This
/// attempts to take the definition site of the symbol into account.
fn satisfies(&self, db: &dyn Db, importing_file: File, request: &ImportRequest<'_>) -> bool {
let MemberImportKind::Imported(ref ast_import) = self.kind else {
return false;
};
ast_import.satisfies(db, importing_file, request).is_some()
}
}
/// A type describing how a symbol was defined.
#[derive(Debug)]
enum MemberImportKind<'ast> {
/// A symbol was introduced through an import statement.
Imported(AstImportKind<'ast>),
/// A symbol was introduced through something other
/// than an import statement.
Other,
}
/// The edits needed to insert the import statement.
///
/// While this is usually just an edit to add an import statement (or
/// modify an existing one), it can also sometimes just be a change
/// to the text that should be inserted for a particular symbol. For
/// example, if one were to ask for `search` from the `re` module, and
/// `re` was already imported, then we'd return no edits for import
/// statements and the text `re.search` to use for the symbol.
#[derive(Debug)]
pub(crate) struct ImportAction {
import: Option<Edit>,
symbol_text: Box<str>,
}
impl ImportAction {
/// Returns an edit to insert an import statement.
pub(crate) fn import(&self) -> Option<&Edit> {
self.import.as_ref()
}
/// Returns the symbol text that should be used.
///
/// Usually this is identical to the symbol text given to the corresponding
/// [`ImportRequest`], but this may sometimes be fully qualified based on
/// existing imports or import preferences.
pub(crate) fn symbol_text(&self) -> &str {
&*self.symbol_text
}
}
/// A borrowed AST of a Python import statement.
#[derive(Debug)]
struct AstImport<'ast> {
/// The original AST statement containing the import.
stmt: &'ast ast::Stmt,
/// The specific type of import.
///
/// Storing this means we can do exhaustive case analysis
/// on the type of the import without needing to constantly
/// unwrap it from a more general `Stmt`. Still, we keep the
/// `Stmt` around because some APIs want that.
kind: AstImportKind<'ast>,
}
impl<'ast> AstImport<'ast> {
/// Returns whether this import satisfies the given request.
///
/// If it does, then this returns *how* the import satisfies
/// the request.
fn satisfies<'importer>(
&'importer self,
db: &'_ dyn Db,
importing_file: File,
request: &ImportRequest<'_>,
) -> Option<ImportResponse<'importer, 'ast>> {
self.kind
.satisfies(db, importing_file, request)
.map(|kind| ImportResponse { import: self, kind })
}
}
/// The specific kind of import.
#[derive(Debug)]
enum AstImportKind<'ast> {
Import(&'ast ast::StmtImport),
ImportFrom(&'ast ast::StmtImportFrom),
}
impl<'ast> AstImportKind<'ast> {
/// Returns whether this import satisfies the given request.
///
/// If it does, then this returns *how* the import satisfies
/// the request.
fn satisfies<'importer>(
&'importer self,
db: &'_ dyn Db,
importing_file: File,
request: &ImportRequest<'_>,
) -> Option<ImportResponseKind<'ast>> {
match *self {
AstImportKind::Import(ast) => {
if request.force_style && !matches!(request.style, ImportStyle::Import) {
return None;
}
let alias = ast
.names
.iter()
.find(|alias| alias.name.as_str() == request.module)?;
Some(ImportResponseKind::Qualified { ast, alias })
}
AstImportKind::ImportFrom(ast) => {
// If the request is for a module itself, then we
// assume that it can never be satisfies by a
// `from ... import ...` statement. For example, a
// `request for collections.abc` needs an
// `import collections.abc`. Now, there could be a
// `from collections import abc`, and we could
// plausibly consider that a match and return a
// symbol text of `abc`. But it's not clear if that's
// the right choice or not.
let member = request.member?;
if request.force_style && !matches!(request.style, ImportStyle::ImportFrom) {
return None;
}
let module = ModuleName::from_import_statement(db, importing_file, ast).ok()?;
if module.as_str() != request.module {
return None;
}
let kind = ast
.names
.iter()
.find(|alias| alias.name.as_str() == "*" || alias.name.as_str() == member)
.map(|alias| ImportResponseKind::Unqualified { ast, alias })
.unwrap_or_else(|| ImportResponseKind::Partial(ast));
Some(kind)
}
}
}
}
/// A request to import a module into the global scope of a Python module.
#[derive(Debug)]
pub(crate) struct ImportRequest<'a> {
/// The module from which the symbol should be imported (e.g.,
/// `foo`, in `from foo import bar`).
module: &'a str,
/// The member to import (e.g., `bar`, in `from foo import bar`).
///
/// When `member` is absent, then this request reflects an import
/// of the module itself. i.e., `import module`.
member: Option<&'a str>,
/// The preferred style to use when importing the symbol (e.g.,
/// `import foo` or `from foo import bar`).
///
/// This style isn't respected if the `module` already has
/// an import statement. In that case, the existing style is
/// respected.
style: ImportStyle,
/// Whether the import style ought to be forced for correctness
/// reasons. For example, to avoid shadowing or introducing a
/// conflicting name.
force_style: bool,
}
impl<'a> ImportRequest<'a> {
/// Create a new [`ImportRequest`] from a `module` and `member`.
///
/// If `module` has no existing imports, the symbol should be
/// imported using the `import` statement.
pub(crate) fn import(module: &'a str, member: &'a str) -> Self {
Self {
module,
member: Some(member),
style: ImportStyle::Import,
force_style: false,
}
}
/// Create a new [`ImportRequest`] from a module and member.
///
/// If `module` has no existing imports, the symbol should be
/// imported using the `import from` statement.
pub(crate) fn import_from(module: &'a str, member: &'a str) -> Self {
Self {
module,
member: Some(member),
style: ImportStyle::ImportFrom,
force_style: false,
}
}
/// Create a new [`ImportRequest`] for bringing the given module
/// into scope.
///
/// This is for just importing the module itself, always via an
/// `import` statement.
pub(crate) fn module(module: &'a str) -> Self {
Self {
module,
member: None,
style: ImportStyle::Import,
force_style: false,
}
}
/// Causes this request to become a command. This will force the
/// requested import style, even if another style would be more
/// appropriate generally.
pub(crate) fn force(mut self) -> Self {
Self {
force_style: true,
..self
}
}
/// Attempts to change the import request style so that the chances
/// of an import conflict are minimized (although not always reduced
/// to zero).
fn avoid_conflicts(self, db: &dyn Db, importing_file: File, members: &MembersInScope) -> Self {
let Some(member) = self.member else {
return Self {
style: ImportStyle::Import,
..self
};
};
match (members.map.get(self.module), members.map.get(member)) {
// Neither symbol exists, so we can just proceed as
// normal.
(None, None) => self,
// The symbol we want to import already exists but
// the module symbol does not, so we can import the
// symbol in a qualified way safely.
(None, Some(member)) => {
// ... unless the symbol we want is already
// imported, then leave it as-is.
if member.satisfies(db, importing_file, &self) {
return self;
}
Self {
style: ImportStyle::Import,
force_style: true,
..self
}
}
// The symbol we want to import doesn't exist but
// the module does. So we can import the symbol we
// want *unqualified* safely.
//
// ... unless the module symbol we found here is
// actually a module symbol.
(
Some(&MemberInScope {
ty: Type::ModuleLiteral(_),
..
}),
None,
) => self,
(Some(_), None) => Self {
style: ImportStyle::ImportFrom,
force_style: true,
..self
},
// Both the module and the member symbols are in
// scope. We *assume* that the module symbol is in
// scope because it is imported. Since the member
// symbol is definitively in scope, we attempt a
// qualified import.
//
// This could lead to a situation where we add an
// `import` that is shadowed by some other symbol.
// This is unfortunate, but it's not clear what we
// should do instead. rust-analyzer will still add
// the conflicting import. I think that's the wiser
// choice, instead of silently doing nothing or
// silently omitting the symbol from completions.
// (I suppose the best choice would be to ask the
// user for an alias for the import or something.)
(Some(_), Some(_)) => Self {
style: ImportStyle::Import,
force_style: false,
..self
},
}
}
}
impl std::fmt::Display for ImportRequest<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self.style {
ImportStyle::Import => write!(f, "import {}", self.module),
ImportStyle::ImportFrom => match self.member {
None => write!(f, "import {}", self.module),
Some(member) => write!(f, "from {} import {member}", self.module),
},
}
}
}
/// The response to an import request.
#[derive(Debug)]
struct ImportResponse<'importer, 'ast> {
import: &'importer AstImport<'ast>,
kind: ImportResponseKind<'ast>,
}
/// The kind of response to an import request.
///
/// This encodes the answer to the question: how does a given import
/// statement satisfy an [`ImportRequest`]? This encodes the different
/// degrees to the request is satisfied.
#[derive(Debug)]
enum ImportResponseKind<'ast> {
/// The import satisfies the request as-is. The symbol is already
/// imported directly and may be used unqualified.
///
/// This always corresponds to a `from <...> import <...>`
/// statement. Note that `<...>` may be a wildcard import!
Unqualified {
/// The AST of the import that satisfied the request.
ast: &'ast ast::StmtImportFrom,
/// The specific alias in the `from <...> import <...>`
/// statement that satisfied the request's `member`.
alias: &'ast ast::Alias,
},
/// The necessary module is imported, but the symbol itself is not
/// in scope. The symbol can be used via `module.symbol`.
///
/// This always corresponds to a `import <...>` statement.
Qualified {
/// The AST of the import that satisfied the request.
ast: &'ast ast::StmtImport,
/// The specific alias in the import statement that
/// satisfied the request's `module`.
alias: &'ast ast::Alias,
},
/// The necessary module is imported via `from module import ...`,
/// but the desired symbol is not listed in `...`.
///
/// This always corresponds to a `from <...> import <...>`
/// statement.
///
/// It is guaranteed that this never contains a wildcard import.
/// (otherwise, this import wouldn't be partial).
Partial(&'ast ast::StmtImportFrom),
}
impl ImportResponseKind<'_> {
/// Returns true if this import statement kind should be
/// prioritized over the one given.
///
/// This assumes that `self` occurs before `other` in the source
/// code.
fn is_prioritized_over(&self, other: &ImportResponseKind<'_>) -> bool {
self.priority() <= other.priority()
}
/// Returns an integer reflecting the "priority" of this
/// import kind relative to other import statements.
///
/// Lower values indicate higher priority.
fn priority(&self) -> usize {
match *self {
ImportResponseKind::Unqualified { .. } => 0,
ImportResponseKind::Partial(_) => 1,
// N.B. When given the choice between adding a
// name to an existing `from ... import ...`
// statement and using an existing `import ...`
// in a qualified manner, we currently choose
// the former. Originally we preferred qualification,
// but there is some evidence that this violates
// expectations.
//
// Ref: https://github.com/astral-sh/ty/issues/1274#issuecomment-3352233790
ImportResponseKind::Qualified { .. } => 2,
}
}
}
/// The style of a Python import statement.
#[derive(Debug)]
enum ImportStyle {
/// Import the symbol using the `import` statement (e.g. `import
/// foo; foo.bar`).
Import,
/// Import the symbol using the `from` statement (e.g. `from foo
/// import bar; bar`).
ImportFrom,
}
/// An error that can occur when trying to add an import.
#[derive(Debug)]
pub(crate) enum AddImportError {
/// The symbol can't be imported, because another symbol is bound to the
/// same name.
ConflictingName(String),
}
impl std::fmt::Display for AddImportError {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
AddImportError::ConflictingName(binding) => std::write!(
fmt,
"Unable to insert `{binding}` into scope due to name conflict"
),
}
}
}
impl std::error::Error for AddImportError {}
/// An AST visitor for extracting top-level imports.
#[derive(Debug, Default)]
struct TopLevelImports<'ast> {
level: u64,
imports: Vec<AstImport<'ast>>,
}
impl<'ast> TopLevelImports<'ast> {
/// Find all top-level imports from the given AST of a Python module.
fn find(module: &'ast ast::ModModule) -> Vec<AstImport<'ast>> {
let mut visitor = TopLevelImports::default();
visitor.visit_body(&module.body);
visitor.imports
}
}
impl<'ast> SourceOrderVisitor<'ast> for TopLevelImports<'ast> {
fn visit_stmt(&mut self, stmt: &'ast ast::Stmt) {
match *stmt {
ast::Stmt::Import(ref node) => {
if self.level == 0 {
let kind = AstImportKind::Import(node);
self.imports.push(AstImport { stmt, kind });
}
}
ast::Stmt::ImportFrom(ref node) => {
if self.level == 0 {
let kind = AstImportKind::ImportFrom(node);
self.imports.push(AstImport { stmt, kind });
}
}
_ => {
// OK because it's not practical for the source code
// depth of a Python to exceed a u64.
//
// Also, it is perhaps a bit too eager to increment
// this for every non-import statement, particularly
// compared to the more refined scope tracking in the
// semantic index builder. However, I don't think
// we need anything more refined here. We only care
// about top-level imports. So as soon as we get into
// something nested, we can bail out.
//
// Although, this does mean, e.g.,
//
// if predicate:
// import whatever
//
// at the module scope is not caught here. If we
// need those imports, I think we'll just want some
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/semantic_tokens.rs | crates/ty_ide/src/semantic_tokens.rs | //! This module walks the AST and collects a set of "semantic tokens" for a file
//! or a range within a file. Each semantic token provides a "token type" and zero
//! or more "modifiers". This information can be used by an editor to provide
//! color coding based on semantic meaning.
//!
//! Visual Studio has a very useful debugger that allows you to inspect the
//! semantic tokens for any given position in the code. Not only is this useful
//! to debug our semantic highlighting, it also allows easy comparison with
//! how Pylance (or other LSPs) highlight a certain token. You can open the scope inspector,
//! with the Command Palette (Command/Ctrl+Shift+P), then select the
//! `Developer: Inspect Editor Tokens and Scopes` command.
//!
//! Current limitations and areas for future improvement:
//!
//! TODO: Need to handle semantic tokens within quoted annotations.
//!
//! TODO: Need to properly handle Annotated expressions. All type arguments other
//! than the first should be treated as value expressions, not as type expressions.
//!
//! TODO: Properties (or perhaps more generally, descriptor objects?) should be
//! classified as property tokens rather than just variables.
//!
//! TODO: Special forms like `Protocol` and `TypedDict` should probably be classified
//! as class tokens, but they are currently classified as variables.
//!
//! TODO: Type aliases (including those defined with the Python 3.12 "type" statement)
//! do not currently have a dedicated semantic token type, but they maybe should.
//!
//! TODO: Additional token modifiers might be added (e.g. for static methods,
//! abstract methods and classes).
use crate::Db;
use bitflags::bitflags;
use itertools::Itertools;
use ruff_db::files::File;
use ruff_db::parsed::parsed_module;
use ruff_python_ast::visitor::source_order::{
SourceOrderVisitor, TraversalSignal, walk_arguments, walk_expr, walk_stmt,
};
use ruff_python_ast::{
self as ast, AnyNodeRef, BytesLiteral, Expr, FString, InterpolatedStringElement, Stmt,
StringLiteral, TypeParam,
};
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use std::ops::Deref;
use ty_python_semantic::semantic_index::definition::Definition;
use ty_python_semantic::types::TypeVarKind;
use ty_python_semantic::{
HasType, SemanticModel, semantic_index::definition::DefinitionKind, types::Type,
types::ide_support::definition_for_name,
};
/// Semantic token types supported by the language server.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum SemanticTokenType {
// This enum must be kept in sync with the SemanticTokenType below.
Namespace,
Class,
Parameter,
SelfParameter,
ClsParameter,
Variable,
Property,
Function,
Method,
Keyword,
String,
Number,
Decorator,
BuiltinConstant,
TypeParameter,
}
impl SemanticTokenType {
/// Returns all supported semantic token types as enum variants.
pub const fn all() -> [SemanticTokenType; 15] {
[
SemanticTokenType::Namespace,
SemanticTokenType::Class,
SemanticTokenType::Parameter,
SemanticTokenType::SelfParameter,
SemanticTokenType::ClsParameter,
SemanticTokenType::Variable,
SemanticTokenType::Property,
SemanticTokenType::Function,
SemanticTokenType::Method,
SemanticTokenType::Keyword,
SemanticTokenType::String,
SemanticTokenType::Number,
SemanticTokenType::Decorator,
SemanticTokenType::BuiltinConstant,
SemanticTokenType::TypeParameter,
]
}
/// Converts this semantic token type to its LSP string representation.
/// Some of these are standardized terms in the LSP specification,
/// while others are specific to the ty language server. It's important
/// to use the standardized ones where possible because clients can
/// use these for standardized color coding and syntax highlighting.
/// For details, refer to this LSP specification:
/// <https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#semanticTokenTypes>
pub const fn as_lsp_concept(&self) -> &'static str {
match self {
SemanticTokenType::Namespace => "namespace",
SemanticTokenType::Class => "class",
SemanticTokenType::Parameter => "parameter",
SemanticTokenType::SelfParameter => "selfParameter",
SemanticTokenType::ClsParameter => "clsParameter",
SemanticTokenType::Variable => "variable",
SemanticTokenType::Property => "property",
SemanticTokenType::Function => "function",
SemanticTokenType::Method => "method",
SemanticTokenType::Keyword => "keyword",
SemanticTokenType::String => "string",
SemanticTokenType::Number => "number",
SemanticTokenType::Decorator => "decorator",
SemanticTokenType::BuiltinConstant => "builtinConstant",
SemanticTokenType::TypeParameter => "typeParameter",
}
}
}
bitflags! {
/// Semantic token modifiers using bit flags.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct SemanticTokenModifier: u32 {
const DEFINITION = 1 << 0;
const READONLY = 1 << 1;
const ASYNC = 1 << 2;
const DOCUMENTATION = 1 << 3;
}
}
impl SemanticTokenModifier {
/// Returns all supported token modifiers for LSP capabilities.
/// Some of these are standardized terms in the LSP specification,
/// while others may be specific to the ty language server. It's
/// important to use the standardized ones where possible because
/// clients can use these for standardized color coding and syntax
/// highlighting. For details, refer to this LSP specification:
/// <https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#semanticTokenModifiers>
pub fn all_names() -> Vec<&'static str> {
vec!["definition", "readonly", "async", "documentation"]
}
}
/// A semantic token with its position and classification.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct SemanticToken {
pub range: TextRange,
pub token_type: SemanticTokenType,
pub modifiers: SemanticTokenModifier,
}
impl Ranged for SemanticToken {
fn range(&self) -> TextRange {
self.range
}
}
/// The result of semantic tokenization.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct SemanticTokens {
tokens: Vec<SemanticToken>,
}
impl SemanticTokens {
/// Create a new `SemanticTokens` instance.
pub fn new(tokens: Vec<SemanticToken>) -> Self {
Self { tokens }
}
}
impl Deref for SemanticTokens {
type Target = [SemanticToken];
fn deref(&self) -> &Self::Target {
&self.tokens
}
}
/// Generates semantic tokens for a Python file within the specified range.
/// Pass None to get tokens for the entire file.
pub fn semantic_tokens(db: &dyn Db, file: File, range: Option<TextRange>) -> SemanticTokens {
let parsed = parsed_module(db, file).load(db);
let model = SemanticModel::new(db, file);
let mut visitor = SemanticTokenVisitor::new(&model, range);
visitor.expecting_docstring = true;
visitor.visit_body(parsed.suite());
SemanticTokens::new(visitor.tokens)
}
/// AST visitor that collects semantic tokens.
#[expect(clippy::struct_excessive_bools)]
struct SemanticTokenVisitor<'db> {
model: &'db SemanticModel<'db>,
tokens: Vec<SemanticToken>,
in_class_scope: bool,
in_type_annotation: bool,
in_target_creating_definition: bool,
in_docstring: bool,
expecting_docstring: bool,
range_filter: Option<TextRange>,
}
impl<'db> SemanticTokenVisitor<'db> {
fn new(model: &'db SemanticModel<'db>, range_filter: Option<TextRange>) -> Self {
Self {
model,
tokens: Vec::new(),
in_class_scope: false,
in_target_creating_definition: false,
in_type_annotation: false,
in_docstring: false,
range_filter,
expecting_docstring: false,
}
}
fn add_token(
&mut self,
ranged: impl Ranged,
token_type: SemanticTokenType,
modifiers: SemanticTokenModifier,
) {
let range = ranged.range();
if range.is_empty() {
return;
}
// Only emit tokens that intersect with the range filter, if one is specified
if let Some(range_filter) = self.range_filter {
// Only include ranges that have a non-empty overlap. Adjacent ranges
// should be excluded.
if range
.intersect(range_filter)
.is_none_or(TextRange::is_empty)
{
return;
}
}
// Debug assertion to ensure tokens are added in file order
debug_assert!(
self.tokens
.last()
.is_none_or(|last| last.start() <= range.start()),
"Tokens must be added in file order: previous token ends at {:?}, new token starts at {:?}",
self.tokens.last().map(SemanticToken::start),
range.start()
);
self.tokens.push(SemanticToken {
range,
token_type,
modifiers,
});
}
fn is_constant_name(name: &str) -> bool {
name.chars()
.all(|c| c.is_uppercase() || c == '_' || c.is_numeric())
&& name.len() > 1
}
fn classify_name(&self, name: &ast::ExprName) -> (SemanticTokenType, SemanticTokenModifier) {
// First try to classify the token based on its definition kind.
let definition = definition_for_name(
self.model,
name,
ty_python_semantic::ImportAliasResolution::ResolveAliases,
);
if let Some(definition) = definition {
let name_str = name.id.as_str();
if let Some(classification) = self.classify_from_definition(definition, name_str) {
return classification;
}
}
// Fall back to type-based classification.
let ty = name.inferred_type(self.model).unwrap_or(Type::unknown());
let name_str = name.id.as_str();
self.classify_from_type_and_name_str(ty, name_str)
}
fn classify_from_definition(
&self,
definition: Definition,
name_str: &str,
) -> Option<(SemanticTokenType, SemanticTokenModifier)> {
let mut modifiers = SemanticTokenModifier::empty();
let db = self.model.db();
let model = SemanticModel::new(db, definition.file(db));
match definition.kind(db) {
DefinitionKind::Function(_) => {
// Check if this is a method based on current scope
if self.in_class_scope {
Some((SemanticTokenType::Method, modifiers))
} else {
Some((SemanticTokenType::Function, modifiers))
}
}
DefinitionKind::Class(_) => Some((SemanticTokenType::Class, modifiers)),
DefinitionKind::TypeVar(_) => Some((SemanticTokenType::TypeParameter, modifiers)),
DefinitionKind::Parameter(parameter) => {
let parsed = parsed_module(db, definition.file(db));
let ty = parameter.node(&parsed.load(db)).inferred_type(&model);
if let Some(ty) = ty {
let type_var = match ty {
Type::TypeVar(type_var) => Some((type_var, false)),
Type::SubclassOf(subclass_of) => {
subclass_of.into_type_var().map(|var| (var, true))
}
_ => None,
};
if let Some((type_var, is_cls)) = type_var
&& matches!(type_var.typevar(db).kind(db), TypeVarKind::TypingSelf)
{
let kind = if is_cls {
SemanticTokenType::ClsParameter
} else {
SemanticTokenType::SelfParameter
};
return Some((kind, modifiers));
}
}
Some((SemanticTokenType::Parameter, modifiers))
}
DefinitionKind::VariadicPositionalParameter(_) => {
Some((SemanticTokenType::Parameter, modifiers))
}
DefinitionKind::VariadicKeywordParameter(_) => {
Some((SemanticTokenType::Parameter, modifiers))
}
DefinitionKind::TypeAlias(_) => Some((SemanticTokenType::TypeParameter, modifiers)),
DefinitionKind::Import(_)
| DefinitionKind::ImportFrom(_)
| DefinitionKind::StarImport(_) => {
// For imports, return None to fall back to type-based classification
// This allows imported names to be classified based on what they actually are
// (e.g., imported classes as Class, imported functions as Function, etc.)
None
}
_ => {
// For other definition kinds (assignments, etc.), apply constant naming convention
if Self::is_constant_name(name_str) {
modifiers |= SemanticTokenModifier::READONLY;
}
let parsed = parsed_module(db, definition.file(db));
let parsed = parsed.load(db);
let value = match definition.kind(db) {
DefinitionKind::Assignment(assignment) => Some(assignment.value(&parsed)),
_ => None,
};
if let Some(value) = value
&& let Some(value_ty) = value.inferred_type(&model)
{
if value_ty.is_class_literal()
|| value_ty.is_subclass_of()
|| value_ty.is_generic_alias()
{
return Some((SemanticTokenType::Class, modifiers));
}
}
Some((SemanticTokenType::Variable, modifiers))
}
}
}
fn classify_from_type_and_name_str(
&self,
ty: Type,
name_str: &str,
) -> (SemanticTokenType, SemanticTokenModifier) {
let mut modifiers = SemanticTokenModifier::empty();
// In type annotation contexts, names that refer to nominal instances or protocol instances
// should be classified as Class tokens (e.g., "int" in "x: int" should be a Class token)
if self.in_type_annotation {
match ty {
Type::NominalInstance(_) | Type::ProtocolInstance(_) => {
return (SemanticTokenType::Class, modifiers);
}
_ => {
// Continue with normal classification for other types in annotations
}
}
}
match ty {
Type::ClassLiteral(_) => (SemanticTokenType::Class, modifiers),
Type::TypeVar(_) => (SemanticTokenType::TypeParameter, modifiers),
Type::FunctionLiteral(_) => {
// Check if this is a method based on current scope
if self.in_class_scope {
(SemanticTokenType::Method, modifiers)
} else {
(SemanticTokenType::Function, modifiers)
}
}
Type::BoundMethod(_) => (SemanticTokenType::Method, modifiers),
Type::ModuleLiteral(_) => (SemanticTokenType::Namespace, modifiers),
_ => {
// Check for constant naming convention
if Self::is_constant_name(name_str) {
modifiers |= SemanticTokenModifier::READONLY;
}
// For other types (variables, modules, etc.), assume variable
(SemanticTokenType::Variable, modifiers)
}
}
}
fn classify_from_type_for_attribute(
ty: Type,
attr_name: &ast::Identifier,
) -> (SemanticTokenType, SemanticTokenModifier) {
let attr_name_str = attr_name.id.as_str();
let mut modifiers = SemanticTokenModifier::empty();
// Classify based on the inferred type of the attribute
match ty {
Type::ClassLiteral(_) => (SemanticTokenType::Class, modifiers),
Type::FunctionLiteral(_) => {
// This is a function accessed as an attribute, likely a method
(SemanticTokenType::Method, modifiers)
}
Type::BoundMethod(_) => {
// Method bound to an instance
(SemanticTokenType::Method, modifiers)
}
Type::ModuleLiteral(_) => {
// Module accessed as an attribute (e.g., from os import path)
(SemanticTokenType::Namespace, modifiers)
}
_ if ty.is_property_instance() => {
// Actual Python property
(SemanticTokenType::Property, modifiers)
}
_ => {
// Check for constant naming convention
if Self::is_constant_name(attr_name_str) {
modifiers |= SemanticTokenModifier::READONLY;
}
// For other types (variables, constants, etc.), classify as variable
(SemanticTokenType::Variable, modifiers)
}
}
}
fn classify_parameter(
&self,
_param: &ast::Parameter,
is_first: bool,
func: &ast::StmtFunctionDef,
) -> SemanticTokenType {
if is_first && self.in_class_scope {
// Check if this is a classmethod (has @classmethod decorator)
// TODO - replace with a more robust way to check whether this is a classmethod
let is_classmethod =
func.decorator_list
.iter()
.any(|decorator| match &decorator.expression {
ast::Expr::Name(name) => name.id.as_str() == "classmethod",
ast::Expr::Attribute(attr) => attr.attr.id.as_str() == "classmethod",
_ => false,
});
// Check if this is a staticmethod (has @staticmethod decorator)
// TODO - replace with a more robust way to check whether this is a staticmethod
let is_staticmethod =
func.decorator_list
.iter()
.any(|decorator| match &decorator.expression {
ast::Expr::Name(name) => name.id.as_str() == "staticmethod",
ast::Expr::Attribute(attr) => attr.attr.id.as_str() == "staticmethod",
_ => false,
});
if is_staticmethod {
// Static methods don't have self/cls parameters
SemanticTokenType::Parameter
} else if is_classmethod {
// First parameter of a classmethod is cls parameter
SemanticTokenType::ClsParameter
} else {
// First parameter of an instance method is self parameter
SemanticTokenType::SelfParameter
}
} else {
SemanticTokenType::Parameter
}
}
fn add_dotted_name_tokens(&mut self, name: &ast::Identifier, token_type: SemanticTokenType) {
let name_str = name.id.as_str();
let name_start = name.start();
// Split the dotted name and calculate positions for each part
let mut current_offset = TextSize::default();
for part in name_str.split('.') {
if !part.is_empty() {
self.add_token(
TextRange::at(name_start + current_offset, part.text_len()),
token_type,
SemanticTokenModifier::empty(),
);
}
// Move past this part and the dot
current_offset += part.text_len() + '.'.text_len();
}
}
fn classify_from_alias_type(
&self,
ty: Type,
local_name: &ast::Identifier,
) -> (SemanticTokenType, SemanticTokenModifier) {
self.classify_from_type_and_name_str(ty, local_name.id.as_str())
}
// Visit parameters for a function or lambda expression and classify
// them as parameters, selfParameter, or clsParameter as appropriate.
fn visit_parameters(
&mut self,
parameters: &ast::Parameters,
func: Option<&ast::StmtFunctionDef>,
) {
let mut param_index = 0;
// The `parameters.iter` method does return the parameters in sorted order but only if
// the AST is well-formed, but e.g. not for:
// ```py
// def foo(self, **key, value):
// return
// ```
// Ideally, the ast would use a single vec for all parameters to avoid this issue as
// discussed here https://github.com/astral-sh/ruff/issues/14315 and
// here https://github.com/astral-sh/ruff/blob/71f8389f61a243a0c7584adffc49134ccf792aba/crates/ruff_python_parser/src/parser/statement.rs#L3176-L3179
let parameters_by_start = parameters
.iter()
.sorted_by_key(ruff_text_size::Ranged::start);
for any_param in parameters_by_start {
let parameter = any_param.as_parameter();
let token_type = match any_param {
ast::AnyParameterRef::NonVariadic(_) => {
// For non-variadic parameters (positional-only, regular, keyword-only),
// check if this should be classified as self/cls parameter
if let Some(func) = func {
let result = self.classify_parameter(parameter, param_index == 0, func);
param_index += 1;
result
} else {
// For lambdas, all parameters are just parameters (no self/cls)
param_index += 1;
SemanticTokenType::Parameter
}
}
ast::AnyParameterRef::Variadic(_) => {
// Variadic parameters (*args, **kwargs) are always just parameters
param_index += 1;
SemanticTokenType::Parameter
}
};
self.add_token(
parameter.name.range(),
token_type,
SemanticTokenModifier::DEFINITION,
);
// Handle parameter type annotations
if let Some(annotation) = ¶meter.annotation {
self.visit_annotation(annotation);
}
if let Some(default) = any_param.default() {
self.visit_expr(default);
}
}
}
}
impl SourceOrderVisitor<'_> for SemanticTokenVisitor<'_> {
fn enter_node(&mut self, node: AnyNodeRef<'_>) -> TraversalSignal {
// If we have a range filter and this node doesn't intersect, skip it
// and all its children as an optimization
if let Some(range_filter) = self.range_filter {
if node.range().intersect(range_filter).is_none() {
return TraversalSignal::Skip;
}
}
TraversalSignal::Traverse
}
fn visit_stmt(&mut self, stmt: &Stmt) {
let expecting_docstring = self.expecting_docstring;
self.expecting_docstring = false;
match stmt {
ast::Stmt::FunctionDef(func) => {
// Visit decorator expressions
for decorator in &func.decorator_list {
self.visit_decorator(decorator);
}
// Function name
self.add_token(
func.name.range(),
if self.in_class_scope {
SemanticTokenType::Method
} else {
SemanticTokenType::Function
},
if func.is_async {
SemanticTokenModifier::DEFINITION | SemanticTokenModifier::ASYNC
} else {
SemanticTokenModifier::DEFINITION
},
);
// Type parameters (Python 3.12+ syntax)
if let Some(type_params) = &func.type_params {
for type_param in &type_params.type_params {
self.visit_type_param(type_param);
}
}
self.visit_parameters(&func.parameters, Some(func));
// Handle return type annotation
if let Some(returns) = &func.returns {
self.visit_annotation(returns);
}
// Clear the in_class_scope flag so inner functions
// are not treated as methods
let prev_in_class = self.in_class_scope;
self.in_class_scope = false;
self.expecting_docstring = true;
self.visit_body(&func.body);
self.expecting_docstring = false;
self.in_class_scope = prev_in_class;
}
ast::Stmt::ClassDef(class) => {
// Visit decorator expressions
for decorator in &class.decorator_list {
self.visit_decorator(decorator);
}
// Class name
self.add_token(
class.name.range(),
SemanticTokenType::Class,
SemanticTokenModifier::DEFINITION,
);
// Type parameters (Python 3.12+ syntax)
if let Some(type_params) = &class.type_params {
for type_param in &type_params.type_params {
self.visit_type_param(type_param);
}
}
// Handle base classes and type annotations in inheritance
if let Some(arguments) = &class.arguments {
walk_arguments(self, arguments);
}
let prev_in_class = self.in_class_scope;
self.in_class_scope = true;
self.expecting_docstring = true;
self.visit_body(&class.body);
self.expecting_docstring = false;
self.in_class_scope = prev_in_class;
}
ast::Stmt::TypeAlias(type_alias) => {
// Type alias name
self.add_token(
type_alias.name.range(),
SemanticTokenType::Class,
SemanticTokenModifier::DEFINITION,
);
// Type parameters (Python 3.12+ syntax)
if let Some(type_params) = &type_alias.type_params {
for type_param in &type_params.type_params {
self.visit_type_param(type_param);
}
}
self.visit_expr(&type_alias.value);
}
ast::Stmt::Import(import) => {
for alias in &import.names {
// Create separate tokens for each part of a dotted module name
self.add_dotted_name_tokens(&alias.name, SemanticTokenType::Namespace);
if let Some(asname) = &alias.asname {
self.add_token(
asname.range(),
SemanticTokenType::Namespace,
SemanticTokenModifier::empty(),
);
}
}
}
ast::Stmt::ImportFrom(import) => {
if let Some(module) = &import.module {
// Create separate tokens for each part of a dotted module name
self.add_dotted_name_tokens(module, SemanticTokenType::Namespace);
}
for alias in &import.names {
if let Some(asname) = &alias.asname {
// For aliased imports (from X import Y as Z), classify Z based on what Y is
let ty = alias.inferred_type(self.model).unwrap_or(Type::unknown());
let (token_type, modifiers) = self.classify_from_alias_type(ty, asname);
self.add_token(asname, token_type, modifiers);
} else {
// For direct imports (from X import Y), use semantic classification
let ty = alias.inferred_type(self.model).unwrap_or(Type::unknown());
let (token_type, modifiers) =
self.classify_from_alias_type(ty, &alias.name);
self.add_token(&alias.name, token_type, modifiers);
}
}
}
ast::Stmt::Nonlocal(nonlocal_stmt) => {
// Handle nonlocal statements - classify identifiers as variables
for identifier in &nonlocal_stmt.names {
self.add_token(
identifier.range(),
SemanticTokenType::Variable,
SemanticTokenModifier::empty(),
);
}
}
ast::Stmt::Global(global_stmt) => {
// Handle global statements - classify identifiers as variables
for identifier in &global_stmt.names {
self.add_token(
identifier.range(),
SemanticTokenType::Variable,
SemanticTokenModifier::empty(),
);
}
}
ast::Stmt::Assign(assignment) => {
self.in_target_creating_definition = true;
for element in &assignment.targets {
self.visit_expr(element);
}
self.in_target_creating_definition = false;
self.visit_expr(&assignment.value);
self.expecting_docstring = true;
}
ast::Stmt::AnnAssign(assignment) => {
self.in_target_creating_definition = true;
self.visit_expr(&assignment.target);
self.in_target_creating_definition = false;
self.visit_expr(&assignment.annotation);
if let Some(value) = &assignment.value {
self.visit_expr(value);
}
self.expecting_docstring = true;
}
ast::Stmt::For(for_stmt) => {
self.in_target_creating_definition = true;
self.visit_expr(&for_stmt.target);
self.in_target_creating_definition = false;
self.visit_expr(&for_stmt.iter);
self.visit_body(&for_stmt.body);
self.visit_body(&for_stmt.orelse);
}
ast::Stmt::With(with_stmt) => {
for item in &with_stmt.items {
self.visit_expr(&item.context_expr);
if let Some(expr) = &item.optional_vars {
self.in_target_creating_definition = true;
self.visit_expr(expr);
self.in_target_creating_definition = false;
}
}
self.visit_body(&with_stmt.body);
}
ast::Stmt::Try(try_stmt) => {
self.visit_body(&try_stmt.body);
for handler in &try_stmt.handlers {
match handler {
ast::ExceptHandler::ExceptHandler(except_handler) => {
if let Some(expr) = &except_handler.type_ {
self.visit_expr(expr);
}
if let Some(name) = &except_handler.name {
self.add_token(
name.range(),
SemanticTokenType::Variable,
SemanticTokenModifier::DEFINITION,
);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/references.rs | crates/ty_ide/src/references.rs | //! This module implements the core functionality of the "references",
//! "document highlight" and "rename" language server features. It locates
//! all references to a named symbol. Unlike a simple text search for the
//! symbol's name, this is a "semantic search" where the text and the semantic
//! meaning must match.
//!
//! Some symbols (such as parameters and local variables) are visible only
//! within their scope. All other symbols, such as those defined at the global
//! scope or within classes, are visible outside of the module. Finding
//! all references to these externally-visible symbols therefore requires
//! an expensive search of all source files in the workspace.
use crate::goto::GotoTarget;
use crate::{Db, NavigationTargets, ReferenceKind, ReferenceTarget};
use ruff_db::files::File;
use ruff_python_ast::find_node::CoveringNode;
use ruff_python_ast::token::Tokens;
use ruff_python_ast::{
self as ast, AnyNodeRef,
visitor::source_order::{SourceOrderVisitor, TraversalSignal},
};
use ruff_text_size::{Ranged, TextRange};
use ty_python_semantic::{ImportAliasResolution, SemanticModel};
/// Mode for references search behavior
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ReferencesMode {
/// Find all references including the declaration
References,
/// Find all references but skip the declaration
ReferencesSkipDeclaration,
/// Find references for rename operations, limited to current file only
Rename,
/// Find references for multi-file rename operations (searches across all files)
RenameMultiFile,
/// Find references for document highlights (limits search to current file)
DocumentHighlights,
}
impl ReferencesMode {
pub(super) fn to_import_alias_resolution(self) -> ImportAliasResolution {
match self {
// Resolve import aliases for find references:
// ```py
// from warnings import deprecated as my_deprecated
//
// @my_deprecated
// def foo
// ```
//
// When finding references on `my_deprecated`, we want to find all usages of `deprecated` across the entire
// project.
Self::References | Self::ReferencesSkipDeclaration => {
ImportAliasResolution::ResolveAliases
}
// For rename, don't resolve import aliases.
//
// ```py
// from warnings import deprecated as my_deprecated
//
// @my_deprecated
// def foo
// ```
// When renaming `my_deprecated`, only rename the alias, but not the original definition in `warnings`.
Self::Rename | Self::RenameMultiFile | Self::DocumentHighlights => {
ImportAliasResolution::PreserveAliases
}
}
}
}
/// Find all references to a symbol at the given position.
/// Search for references across all files in the project.
pub(crate) fn references(
db: &dyn Db,
file: File,
goto_target: &GotoTarget,
mode: ReferencesMode,
) -> Option<Vec<ReferenceTarget>> {
let model = SemanticModel::new(db, file);
let target_definitions = goto_target
.get_definition_targets(&model, mode.to_import_alias_resolution())?
.declaration_targets(db)?;
// Extract the target text from the goto target for fast comparison
let target_text = goto_target.to_string()?;
// Find all of the references to the symbol within this file
let mut references = Vec::new();
references_for_file(
db,
file,
&target_definitions,
&target_text,
mode,
&mut references,
);
// Check if we should search across files based on the mode
let search_across_files = matches!(
mode,
ReferencesMode::References
| ReferencesMode::ReferencesSkipDeclaration
| ReferencesMode::RenameMultiFile
);
// Check if the symbol is potentially visible outside of this module
if search_across_files && is_symbol_externally_visible(goto_target) {
// Look for references in all other files within the workspace
for other_file in &db.project().files(db) {
// Skip the current file as we already processed it
if other_file == file {
continue;
}
// First do a simple text search to see if there is a potential match in the file
let source = ruff_db::source::source_text(db, other_file);
if !source.as_str().contains(target_text.as_ref()) {
continue;
}
// If the target text is found, do the more expensive semantic analysis
references_for_file(
db,
other_file,
&target_definitions,
&target_text,
mode,
&mut references,
);
}
}
if references.is_empty() {
None
} else {
Some(references)
}
}
/// Find all references to a local symbol within the current file.
/// The behavior depends on the provided mode.
fn references_for_file(
db: &dyn Db,
file: File,
target_definitions: &NavigationTargets,
target_text: &str,
mode: ReferencesMode,
references: &mut Vec<ReferenceTarget>,
) {
let parsed = ruff_db::parsed::parsed_module(db, file);
let module = parsed.load(db);
let model = SemanticModel::new(db, file);
let mut finder = LocalReferencesFinder {
model: &model,
target_definitions,
references,
mode,
tokens: module.tokens(),
target_text,
ancestors: Vec::new(),
};
AnyNodeRef::from(module.syntax()).visit_source_order(&mut finder);
}
/// Determines whether a symbol is potentially visible outside of the current module.
fn is_symbol_externally_visible(goto_target: &GotoTarget<'_>) -> bool {
match goto_target {
GotoTarget::Parameter(_)
| GotoTarget::ExceptVariable(_)
| GotoTarget::TypeParamTypeVarName(_)
| GotoTarget::TypeParamParamSpecName(_)
| GotoTarget::TypeParamTypeVarTupleName(_) => false,
// Assume all other goto target types are potentially visible.
// TODO: For local variables, we should be able to return false
// except in cases where the variable is in the global scope
// or uses a "global" binding.
_ => true,
}
}
/// AST visitor to find all references to a specific symbol by comparing semantic definitions
struct LocalReferencesFinder<'a> {
model: &'a SemanticModel<'a>,
tokens: &'a Tokens,
target_definitions: &'a NavigationTargets,
references: &'a mut Vec<ReferenceTarget>,
mode: ReferencesMode,
target_text: &'a str,
ancestors: Vec<AnyNodeRef<'a>>,
}
impl<'a> SourceOrderVisitor<'a> for LocalReferencesFinder<'a> {
fn enter_node(&mut self, node: AnyNodeRef<'a>) -> TraversalSignal {
self.ancestors.push(node);
match node {
AnyNodeRef::ExprName(name_expr) => {
// If the name doesn't match our target text, this isn't a match
if name_expr.id.as_str() != self.target_text {
return TraversalSignal::Traverse;
}
let covering_node = CoveringNode::from_ancestors(self.ancestors.clone());
self.check_reference_from_covering_node(&covering_node);
}
AnyNodeRef::ExprAttribute(attr_expr) => {
self.check_identifier_reference(&attr_expr.attr);
}
AnyNodeRef::StmtFunctionDef(func) if self.should_include_declaration() => {
self.check_identifier_reference(&func.name);
}
AnyNodeRef::StmtClassDef(class) if self.should_include_declaration() => {
self.check_identifier_reference(&class.name);
}
AnyNodeRef::Parameter(parameter) if self.should_include_declaration() => {
self.check_identifier_reference(¶meter.name);
}
AnyNodeRef::Keyword(keyword) => {
if let Some(arg) = &keyword.arg {
self.check_identifier_reference(arg);
}
}
AnyNodeRef::StmtGlobal(global_stmt) if self.should_include_declaration() => {
for name in &global_stmt.names {
self.check_identifier_reference(name);
}
}
AnyNodeRef::StmtNonlocal(nonlocal_stmt) if self.should_include_declaration() => {
for name in &nonlocal_stmt.names {
self.check_identifier_reference(name);
}
}
AnyNodeRef::ExceptHandlerExceptHandler(handler)
if self.should_include_declaration() =>
{
if let Some(name) = &handler.name {
self.check_identifier_reference(name);
}
}
AnyNodeRef::PatternMatchAs(pattern_as) if self.should_include_declaration() => {
if let Some(name) = &pattern_as.name {
self.check_identifier_reference(name);
}
}
AnyNodeRef::PatternMatchStar(pattern_star) if self.should_include_declaration() => {
if let Some(name) = &pattern_star.name {
self.check_identifier_reference(name);
}
}
AnyNodeRef::PatternMatchMapping(pattern_mapping)
if self.should_include_declaration() =>
{
if let Some(rest_name) = &pattern_mapping.rest {
self.check_identifier_reference(rest_name);
}
}
AnyNodeRef::TypeParamParamSpec(param_spec) if self.should_include_declaration() => {
self.check_identifier_reference(¶m_spec.name);
}
AnyNodeRef::TypeParamTypeVarTuple(param_tuple) if self.should_include_declaration() => {
self.check_identifier_reference(¶m_tuple.name);
}
AnyNodeRef::TypeParamTypeVar(param_var) if self.should_include_declaration() => {
self.check_identifier_reference(¶m_var.name);
}
AnyNodeRef::ExprStringLiteral(string_expr) if self.should_include_declaration() => {
// Highlight the sub-AST of a string annotation
if let Some((sub_ast, sub_model)) = self.model.enter_string_annotation(string_expr)
{
let mut sub_finder = LocalReferencesFinder {
model: &sub_model,
target_definitions: self.target_definitions,
references: self.references,
mode: self.mode,
tokens: sub_ast.tokens(),
target_text: self.target_text,
ancestors: Vec::new(),
};
sub_finder.visit_expr(sub_ast.expr());
}
}
AnyNodeRef::Alias(alias) if self.should_include_declaration() => {
// Handle import alias declarations
if let Some(asname) = &alias.asname {
self.check_identifier_reference(asname);
}
// Only check the original name if it matches our target text
// This is for cases where we're renaming the imported symbol name itself
if alias.name.id == self.target_text {
self.check_identifier_reference(&alias.name);
}
}
_ => {}
}
TraversalSignal::Traverse
}
fn leave_node(&mut self, node: AnyNodeRef<'a>) {
debug_assert_eq!(self.ancestors.last(), Some(&node));
self.ancestors.pop();
}
}
impl LocalReferencesFinder<'_> {
/// Check if we should include declarations based on the current mode
fn should_include_declaration(&self) -> bool {
matches!(
self.mode,
ReferencesMode::References
| ReferencesMode::DocumentHighlights
| ReferencesMode::Rename
| ReferencesMode::RenameMultiFile
)
}
/// Helper method to check identifier references for declarations
fn check_identifier_reference(&mut self, identifier: &ast::Identifier) {
// Quick text-based check first
if identifier.id != self.target_text {
return;
}
let mut ancestors_with_identifier = self.ancestors.clone();
ancestors_with_identifier.push(AnyNodeRef::from(identifier));
let covering_node = CoveringNode::from_ancestors(ancestors_with_identifier);
self.check_reference_from_covering_node(&covering_node);
}
/// Determines whether the given covering node is a reference to
/// the symbol we are searching for
fn check_reference_from_covering_node(&mut self, covering_node: &CoveringNode<'_>) {
// Use the start of the covering node as the offset. Any offset within
// the node is fine here. Offsets matter only for import statements
// where the identifier might be a multi-part module name.
let offset = covering_node.node().start();
if let Some(goto_target) =
GotoTarget::from_covering_node(self.model, covering_node, offset, self.tokens)
{
// Get the definitions for this goto target
if let Some(current_definitions) = goto_target
.get_definition_targets(self.model, self.mode.to_import_alias_resolution())
.and_then(|definitions| definitions.declaration_targets(self.model.db()))
{
// Check if any of the current definitions match our target definitions
if self.navigation_targets_match(¤t_definitions) {
// Determine if this is a read or write reference
let kind = self.determine_reference_kind(covering_node);
let target =
ReferenceTarget::new(self.model.file(), covering_node.node().range(), kind);
self.references.push(target);
}
}
}
}
/// Check if `Vec<NavigationTarget>` match our target definitions
fn navigation_targets_match(&self, current_targets: &NavigationTargets) -> bool {
// Since we're comparing the same symbol, all definitions should be equivalent
// We only need to check against the first target definition
if let Some(first_target) = self.target_definitions.iter().next() {
for current_target in current_targets {
if current_target.file == first_target.file
&& current_target.focus_range == first_target.focus_range
{
return true;
}
}
}
false
}
/// Determine whether a reference is a read or write operation based on its context
fn determine_reference_kind(&self, covering_node: &CoveringNode<'_>) -> ReferenceKind {
// Reference kind is only meaningful for DocumentHighlights mode
if !matches!(self.mode, ReferencesMode::DocumentHighlights) {
return ReferenceKind::Other;
}
// Walk up the ancestors to find the context
for ancestor in self.ancestors.iter().rev() {
match ancestor {
// Assignment targets are writes
AnyNodeRef::StmtAssign(assign) => {
// Check if our node is in the targets (left side) of assignment
for target in &assign.targets {
if Self::expr_contains_range(target, covering_node.node().range()) {
return ReferenceKind::Write;
}
}
}
AnyNodeRef::StmtAnnAssign(ann_assign) => {
// Check if our node is the target (left side) of annotated assignment
if Self::expr_contains_range(&ann_assign.target, covering_node.node().range()) {
return ReferenceKind::Write;
}
}
AnyNodeRef::StmtAugAssign(aug_assign) => {
// Check if our node is the target (left side) of augmented assignment
if Self::expr_contains_range(&aug_assign.target, covering_node.node().range()) {
return ReferenceKind::Write;
}
}
// For loop targets are writes
AnyNodeRef::StmtFor(for_stmt) => {
if Self::expr_contains_range(&for_stmt.target, covering_node.node().range()) {
return ReferenceKind::Write;
}
}
// With statement targets are writes
AnyNodeRef::WithItem(with_item) => {
if let Some(optional_vars) = &with_item.optional_vars {
if Self::expr_contains_range(optional_vars, covering_node.node().range()) {
return ReferenceKind::Write;
}
}
}
// Exception handler names are writes
AnyNodeRef::ExceptHandlerExceptHandler(handler) => {
if let Some(name) = &handler.name {
if Self::node_contains_range(
AnyNodeRef::from(name),
covering_node.node().range(),
) {
return ReferenceKind::Write;
}
}
}
AnyNodeRef::StmtFunctionDef(func) => {
if Self::node_contains_range(
AnyNodeRef::from(&func.name),
covering_node.node().range(),
) {
return ReferenceKind::Other;
}
}
AnyNodeRef::StmtClassDef(class) => {
if Self::node_contains_range(
AnyNodeRef::from(&class.name),
covering_node.node().range(),
) {
return ReferenceKind::Other;
}
}
AnyNodeRef::Parameter(param) => {
if Self::node_contains_range(
AnyNodeRef::from(¶m.name),
covering_node.node().range(),
) {
return ReferenceKind::Other;
}
}
AnyNodeRef::StmtGlobal(_) | AnyNodeRef::StmtNonlocal(_) => {
return ReferenceKind::Other;
}
_ => {}
}
}
// Default to read
ReferenceKind::Read
}
/// Helper to check if a node contains a given range
fn node_contains_range(node: AnyNodeRef<'_>, range: TextRange) -> bool {
node.range().contains_range(range)
}
/// Helper to check if an expression contains a given range
fn expr_contains_range(expr: &ast::Expr, range: TextRange) -> bool {
expr.range().contains_range(range)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/selection_range.rs | crates/ty_ide/src/selection_range.rs | use ruff_db::files::File;
use ruff_db::parsed::parsed_module;
use ruff_python_ast::find_node::covering_node;
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::Db;
/// Returns a list of nested selection ranges, where each range contains the next one.
/// The first range in the list is the largest range containing the cursor position.
pub fn selection_range(db: &dyn Db, file: File, offset: TextSize) -> Vec<TextRange> {
let parsed = parsed_module(db, file).load(db);
let range = TextRange::new(offset, offset);
let covering = covering_node(parsed.syntax().into(), range);
let mut ranges = Vec::new();
// Start with the largest range (the root), so iterate ancestors backwards
for node in covering.ancestors().rev() {
if should_include_in_selection(node) {
let range = node.range();
// Eliminate duplicates when parent and child nodes have the same range
if ranges.last() != Some(&range) {
ranges.push(range);
}
}
}
ranges
}
/// Determines if a node should be included in the selection range hierarchy.
/// This filters out intermediate nodes that don't provide meaningful selections.
fn should_include_in_selection(node: ruff_python_ast::AnyNodeRef) -> bool {
use ruff_python_ast::AnyNodeRef;
// We will likely need to tune this based on user feedback. Some users may
// prefer finer-grained selections while others may prefer coarser-grained.
match node {
// Exclude nodes that don't represent meaningful semantic units for selection
AnyNodeRef::StmtExpr(_) => false,
_ => true,
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::CursorTest;
use insta::assert_snapshot;
use ruff_db::diagnostic::{Annotation, Diagnostic, DiagnosticId, LintName, Severity, Span};
use ruff_db::files::FileRange;
use ruff_text_size::Ranged;
/// Test selection range on a simple expression
#[test]
fn test_selection_range_simple_expression() {
let test = CursorTest::builder()
.source(
"main.py",
"
x = 1 + <CURSOR>2
",
)
.build();
assert_snapshot!(test.selection_range(), @r"
info[selection-range]: Selection Range 0
--> main.py:1:1
|
1 | /
2 | | x = 1 + 2
| |__________^
|
info[selection-range]: Selection Range 1
--> main.py:2:1
|
2 | x = 1 + 2
| ^^^^^^^^^
|
info[selection-range]: Selection Range 2
--> main.py:2:5
|
2 | x = 1 + 2
| ^^^^^
|
info[selection-range]: Selection Range 3
--> main.py:2:9
|
2 | x = 1 + 2
| ^
|
");
}
/// Test selection range on a function call
#[test]
fn test_selection_range_function_call() {
let test = CursorTest::builder()
.source(
"main.py",
"
print(\"he<CURSOR>llo\")
",
)
.build();
assert_snapshot!(test.selection_range(), @r#"
info[selection-range]: Selection Range 0
--> main.py:1:1
|
1 | /
2 | | print("hello")
| |_______________^
|
info[selection-range]: Selection Range 1
--> main.py:2:1
|
2 | print("hello")
| ^^^^^^^^^^^^^^
|
info[selection-range]: Selection Range 2
--> main.py:2:6
|
2 | print("hello")
| ^^^^^^^^^
|
info[selection-range]: Selection Range 3
--> main.py:2:7
|
2 | print("hello")
| ^^^^^^^
|
"#);
}
/// Test selection range on a function definition
#[test]
fn test_selection_range_function_definition() {
let test = CursorTest::builder()
.source(
"main.py",
"
def my_<CURSOR>function():
return 42
",
)
.build();
assert_snapshot!(test.selection_range(), @r"
info[selection-range]: Selection Range 0
--> main.py:1:1
|
1 | /
2 | | def my_function():
3 | | return 42
| |______________^
|
info[selection-range]: Selection Range 1
--> main.py:2:1
|
2 | / def my_function():
3 | | return 42
| |_____________^
|
info[selection-range]: Selection Range 2
--> main.py:2:5
|
2 | def my_function():
| ^^^^^^^^^^^
3 | return 42
|
");
}
/// Test selection range on a class definition
#[test]
fn test_selection_range_class_definition() {
let test = CursorTest::builder()
.source(
"main.py",
"
class My<CURSOR>Class:
def __init__(self):
self.value = 1
",
)
.build();
assert_snapshot!(test.selection_range(), @r"
info[selection-range]: Selection Range 0
--> main.py:1:1
|
1 | /
2 | | class MyClass:
3 | | def __init__(self):
4 | | self.value = 1
| |_______________________^
|
info[selection-range]: Selection Range 1
--> main.py:2:1
|
2 | / class MyClass:
3 | | def __init__(self):
4 | | self.value = 1
| |______________________^
|
info[selection-range]: Selection Range 2
--> main.py:2:7
|
2 | class MyClass:
| ^^^^^^^
3 | def __init__(self):
4 | self.value = 1
|
");
}
/// Test selection range on a deeply nested expression with comprehension, lambda, and subscript
#[test]
fn test_selection_range_deeply_nested_expression() {
let test = CursorTest::builder()
.source(
"main.py",
"
result = [(lambda x: x[key.<CURSOR>attr])(item) for item in data if item is not None]
",
)
.build();
assert_snapshot!(test.selection_range(), @r"
info[selection-range]: Selection Range 0
--> main.py:1:1
|
1 | /
2 | | result = [(lambda x: x[key.attr])(item) for item in data if item is not None]
| |______________________________________________________________________________^
|
info[selection-range]: Selection Range 1
--> main.py:2:1
|
2 | result = [(lambda x: x[key.attr])(item) for item in data if item is not None]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
info[selection-range]: Selection Range 2
--> main.py:2:10
|
2 | result = [(lambda x: x[key.attr])(item) for item in data if item is not None]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
info[selection-range]: Selection Range 3
--> main.py:2:11
|
2 | result = [(lambda x: x[key.attr])(item) for item in data if item is not None]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
info[selection-range]: Selection Range 4
--> main.py:2:12
|
2 | result = [(lambda x: x[key.attr])(item) for item in data if item is not None]
| ^^^^^^^^^^^^^^^^^^^^^
|
info[selection-range]: Selection Range 5
--> main.py:2:22
|
2 | result = [(lambda x: x[key.attr])(item) for item in data if item is not None]
| ^^^^^^^^^^^
|
info[selection-range]: Selection Range 6
--> main.py:2:24
|
2 | result = [(lambda x: x[key.attr])(item) for item in data if item is not None]
| ^^^^^^^^
|
info[selection-range]: Selection Range 7
--> main.py:2:28
|
2 | result = [(lambda x: x[key.attr])(item) for item in data if item is not None]
| ^^^^
|
");
}
impl CursorTest {
fn selection_range(&self) -> String {
let ranges = selection_range(&self.db, self.cursor.file, self.cursor.offset);
if ranges.is_empty() {
return "No selection range found".to_string();
}
// Create one diagnostic per range for clearer visualization
let diagnostics: Vec<SelectionRangeDiagnostic> = ranges
.iter()
.enumerate()
.map(|(index, &range)| {
SelectionRangeDiagnostic::new(FileRange::new(self.cursor.file, range), index)
})
.collect();
self.render_diagnostics(diagnostics)
}
}
struct SelectionRangeDiagnostic {
range: FileRange,
index: usize,
}
impl SelectionRangeDiagnostic {
fn new(range: FileRange, index: usize) -> Self {
Self { range, index }
}
}
impl crate::tests::IntoDiagnostic for SelectionRangeDiagnostic {
fn into_diagnostic(self) -> Diagnostic {
let mut diagnostic = Diagnostic::new(
DiagnosticId::Lint(LintName::of("selection-range")),
Severity::Info,
format!("Selection Range {}", self.index),
);
diagnostic.annotate(Annotation::primary(
Span::from(self.range.file()).with_range(self.range.range()),
));
diagnostic
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_ide/src/inlay_hints.rs | crates/ty_ide/src/inlay_hints.rs | use std::{fmt, vec};
use crate::{Db, HasNavigationTargets, NavigationTarget};
use ruff_db::files::File;
use ruff_db::parsed::parsed_module;
use ruff_python_ast::visitor::source_order::{self, SourceOrderVisitor, TraversalSignal};
use ruff_python_ast::{AnyNodeRef, ArgOrKeyword, Expr, ExprUnaryOp, Stmt, UnaryOp};
use ruff_text_size::{Ranged, TextRange, TextSize};
use ty_python_semantic::types::ide_support::inlay_hint_call_argument_details;
use ty_python_semantic::types::{Type, TypeDetail};
use ty_python_semantic::{HasType, SemanticModel};
#[derive(Debug, Clone)]
pub struct InlayHint {
pub position: TextSize,
pub kind: InlayHintKind,
pub label: InlayHintLabel,
pub text_edits: Vec<InlayHintTextEdit>,
}
impl InlayHint {
fn variable_type(
expr: &Expr,
rhs: &Expr,
ty: Type,
db: &dyn Db,
allow_edits: bool,
) -> Option<Self> {
let position = expr.range().end();
// Render the type to a string, and get subspans for all the types that make it up
let details = ty.display(db).to_string_parts();
// Filter out a reptitive hints like `x: T = T()`
if call_matches_name(rhs, &details.label) {
return None;
}
// Ok so the idea here is that we potentially have a random soup of spans here,
// and each byte of the string can have at most one target associate with it.
// Thankfully, they were generally pushed in print order, with the inner smaller types
// appearing before the outer bigger ones.
//
// So we record where we are in the string, and every time we find a type, we
// check if it's further along in the string. If it is, great, we give it the
// span for its range, and then advance where we are.
let mut offset = 0;
let mut label_parts = vec![": ".into()];
for (target, detail) in details.targets.iter().zip(&details.details) {
match detail {
TypeDetail::Type(ty) => {
let start = target.start().to_usize();
let end = target.end().to_usize();
// If we skipped over some bytes, push them with no target
if start > offset {
label_parts.push(details.label[offset..start].into());
}
// Ok, this is the first type that claimed these bytes, give it the target
if start >= offset {
let target = ty.navigation_targets(db).into_iter().next();
label_parts.push(
InlayHintLabelPart::new(&details.label[start..end]).with_target(target),
);
offset = end;
}
}
TypeDetail::SignatureStart
| TypeDetail::SignatureEnd
| TypeDetail::Parameter(_) => {
// Don't care about these
}
}
}
// "flush" the rest of the label without any target
if offset < details.label.len() {
label_parts.push(details.label[offset..details.label.len()].into());
}
let text_edits = if details.is_valid_syntax && allow_edits {
vec![InlayHintTextEdit {
range: TextRange::new(position, position),
new_text: format!(": {}", details.label),
}]
} else {
vec![]
};
Some(Self {
position,
kind: InlayHintKind::Type,
label: InlayHintLabel { parts: label_parts },
text_edits,
})
}
fn call_argument_name(
position: TextSize,
name: &str,
navigation_target: Option<NavigationTarget>,
) -> Self {
let label_parts = vec![
InlayHintLabelPart::new(name).with_target(navigation_target),
"=".into(),
];
Self {
position,
kind: InlayHintKind::CallArgumentName,
label: InlayHintLabel { parts: label_parts },
text_edits: vec![],
}
}
pub fn display(&self) -> InlayHintDisplay<'_> {
InlayHintDisplay { inlay_hint: self }
}
}
#[derive(Debug, Clone)]
pub enum InlayHintKind {
Type,
CallArgumentName,
}
#[derive(Debug, Clone)]
pub struct InlayHintLabel {
parts: Vec<InlayHintLabelPart>,
}
impl InlayHintLabel {
pub fn parts(&self) -> &[InlayHintLabelPart] {
&self.parts
}
pub fn into_parts(self) -> Vec<InlayHintLabelPart> {
self.parts
}
}
pub struct InlayHintDisplay<'a> {
inlay_hint: &'a InlayHint,
}
impl fmt::Display for InlayHintDisplay<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result {
for part in &self.inlay_hint.label.parts {
write!(f, "{}", part.text)?;
}
Ok(())
}
}
#[derive(Default, Debug, Clone)]
pub struct InlayHintLabelPart {
text: String,
target: Option<NavigationTarget>,
}
impl InlayHintLabelPart {
pub fn new(text: impl Into<String>) -> Self {
Self {
text: text.into(),
target: None,
}
}
pub fn text(&self) -> &str {
&self.text
}
pub fn into_text(self) -> String {
self.text
}
pub fn target(&self) -> Option<&NavigationTarget> {
self.target.as_ref()
}
pub fn with_target(self, target: Option<NavigationTarget>) -> Self {
Self { target, ..self }
}
}
impl From<String> for InlayHintLabelPart {
fn from(s: String) -> Self {
Self {
text: s,
target: None,
}
}
}
impl From<&str> for InlayHintLabelPart {
fn from(s: &str) -> Self {
Self {
text: s.to_string(),
target: None,
}
}
}
#[derive(Debug, Clone)]
pub struct InlayHintTextEdit {
pub range: TextRange,
pub new_text: String,
}
pub fn inlay_hints(
db: &dyn Db,
file: File,
range: TextRange,
settings: &InlayHintSettings,
) -> Vec<InlayHint> {
let mut visitor = InlayHintVisitor::new(db, file, range, settings);
let ast = parsed_module(db, file).load(db);
visitor.visit_body(ast.suite());
visitor.hints
}
/// Settings to control the behavior of inlay hints.
#[derive(Clone, Debug)]
pub struct InlayHintSettings {
/// Whether to show variable type hints.
///
/// For example, this would enable / disable hints like the ones quoted below:
/// ```python
/// x": Literal[1]" = 1
/// ```
pub variable_types: bool,
/// Whether to show call argument names.
///
/// For example, this would enable / disable hints like the ones quoted below:
/// ```python
/// def foo(x: int): pass
/// foo("x="1)
/// ```
pub call_argument_names: bool,
// Add any new setting that enables additional inlays to `any_enabled`.
}
impl InlayHintSettings {
pub fn any_enabled(&self) -> bool {
self.variable_types || self.call_argument_names
}
}
impl Default for InlayHintSettings {
fn default() -> Self {
Self {
variable_types: true,
call_argument_names: true,
}
}
}
struct InlayHintVisitor<'a, 'db> {
db: &'db dyn Db,
model: SemanticModel<'db>,
hints: Vec<InlayHint>,
assignment_rhs: Option<&'a Expr>,
range: TextRange,
settings: &'a InlayHintSettings,
in_no_edits_allowed: bool,
}
impl<'a, 'db> InlayHintVisitor<'a, 'db> {
fn new(db: &'db dyn Db, file: File, range: TextRange, settings: &'a InlayHintSettings) -> Self {
Self {
db,
model: SemanticModel::new(db, file),
hints: Vec::new(),
assignment_rhs: None,
range,
settings,
in_no_edits_allowed: false,
}
}
fn add_type_hint(&mut self, expr: &Expr, rhs: &Expr, ty: Type<'db>, allow_edits: bool) {
if !self.settings.variable_types {
return;
}
if let Some(inlay_hint) = InlayHint::variable_type(expr, rhs, ty, self.db, allow_edits) {
self.hints.push(inlay_hint);
}
}
fn add_call_argument_name(
&mut self,
position: TextSize,
name: &str,
navigation_target: Option<NavigationTarget>,
) {
if !self.settings.call_argument_names {
return;
}
if name.starts_with('_') {
return;
}
let inlay_hint = InlayHint::call_argument_name(position, name, navigation_target);
self.hints.push(inlay_hint);
}
}
impl<'a> SourceOrderVisitor<'a> for InlayHintVisitor<'a, '_> {
fn enter_node(&mut self, node: AnyNodeRef<'a>) -> TraversalSignal {
if self.range.intersect(node.range()).is_some() {
TraversalSignal::Traverse
} else {
TraversalSignal::Skip
}
}
fn visit_stmt(&mut self, stmt: &'a Stmt) {
let node = AnyNodeRef::from(stmt);
if !self.enter_node(node).is_traverse() {
return;
}
match stmt {
Stmt::Assign(assign) => {
if !type_hint_is_excessive_for_expr(&assign.value) {
self.assignment_rhs = Some(&*assign.value);
}
if !annotations_are_valid_syntax(assign) {
self.in_no_edits_allowed = true;
}
for target in &assign.targets {
self.visit_expr(target);
}
self.in_no_edits_allowed = false;
self.assignment_rhs = None;
self.visit_expr(&assign.value);
return;
}
Stmt::Expr(expr) => {
self.visit_expr(&expr.value);
return;
}
// TODO
Stmt::FunctionDef(_) => {}
Stmt::For(_) => {}
_ => {}
}
source_order::walk_stmt(self, stmt);
}
fn visit_expr(&mut self, expr: &'a Expr) {
match expr {
Expr::Name(name) => {
if let Some(rhs) = self.assignment_rhs {
if name.ctx.is_store() {
if let Some(ty) = expr.inferred_type(&self.model) {
self.add_type_hint(expr, rhs, ty, !self.in_no_edits_allowed);
}
}
}
source_order::walk_expr(self, expr);
}
Expr::Attribute(attribute) => {
if let Some(rhs) = self.assignment_rhs {
if attribute.ctx.is_store() {
if let Some(ty) = expr.inferred_type(&self.model) {
self.add_type_hint(expr, rhs, ty, !self.in_no_edits_allowed);
}
}
}
source_order::walk_expr(self, expr);
}
Expr::Call(call) => {
let details = inlay_hint_call_argument_details(self.db, &self.model, call)
.unwrap_or_default();
self.visit_expr(&call.func);
for (index, arg_or_keyword) in call.arguments.arguments_source_order().enumerate() {
if let Some((name, parameter_label_offset)) = details.argument_names.get(&index)
&& !arg_matches_name(&arg_or_keyword, name)
{
self.add_call_argument_name(
arg_or_keyword.range().start(),
name,
parameter_label_offset.map(NavigationTarget::from),
);
}
self.visit_expr(arg_or_keyword.value());
}
}
_ => {
source_order::walk_expr(self, expr);
}
}
}
}
/// Given a positional argument, check if the expression is the "same name"
/// as the function argument itself.
///
/// This allows us to filter out reptitive inlay hints like `x=x`, `x=y.x`, etc.
fn arg_matches_name(arg_or_keyword: &ArgOrKeyword, name: &str) -> bool {
// Only care about positional args
let ArgOrKeyword::Arg(arg) = arg_or_keyword else {
return false;
};
let mut expr = *arg;
loop {
match expr {
// `x=x(1, 2)` counts as a match, recurse for it
Expr::Call(expr_call) => expr = &expr_call.func,
// `x=x[0]` is a match, recurse for it
Expr::Subscript(expr_subscript) => expr = &expr_subscript.value,
// `x=x` is a match
Expr::Name(expr_name) => return expr_name.id.as_str() == name,
// `x=y.x` is a match
Expr::Attribute(expr_attribute) => return expr_attribute.attr.as_str() == name,
_ => return false,
}
}
}
/// Given a function call, check if the expression is the "same name"
/// as the function being called.
///
/// This allows us to filter out reptitive inlay hints like `x: T = T(...)`.
/// While still allowing non-trivial ones like `x: T[U] = T()`.
fn call_matches_name(expr: &Expr, name: &str) -> bool {
// Only care about function calls
let Expr::Call(call) = expr else {
return false;
};
match &*call.func {
// `x: T = T()` is a match
Expr::Name(expr_name) => expr_name.id.as_str() == name,
// `x: T = a.T()` is a match
Expr::Attribute(expr_attribute) => expr_attribute.attr.as_str() == name,
_ => false,
}
}
/// Given an expression that's the RHS of an assignment, would it be excessive to
/// emit an inlay type hint for the variable assigned to it?
///
/// This is used to suppress inlay hints for things like `x = 1`, `x, y = (1, 2)`, etc.
fn type_hint_is_excessive_for_expr(expr: &Expr) -> bool {
match expr {
// A tuple of all literals is excessive to typehint
Expr::Tuple(expr_tuple) => expr_tuple.elts.iter().all(type_hint_is_excessive_for_expr),
// Various Literal[...] types which are always excessive to hint
| Expr::BytesLiteral(_)
| Expr::NumberLiteral(_)
| Expr::BooleanLiteral(_)
| Expr::StringLiteral(_)
// `None` isn't terribly verbose, but still redundant
| Expr::NoneLiteral(_)
// This one expands to `str` which isn't verbose but is redundant
| Expr::FString(_)
// This one expands to `Template` which isn't verbose but is redundant
| Expr::TString(_)=> true,
// You too `+1 and `-1`, get back here
Expr::UnaryOp(ExprUnaryOp { op: UnaryOp::UAdd | UnaryOp::USub, operand, .. }) => matches!(**operand, Expr::NumberLiteral(_)),
// Everything else is reasonable
_ => false,
}
}
fn annotations_are_valid_syntax(stmt_assign: &ruff_python_ast::StmtAssign) -> bool {
if stmt_assign.targets.len() > 1 {
return false;
}
if stmt_assign
.targets
.iter()
.any(|target| matches!(target, Expr::Tuple(_)))
{
return false;
}
true
}
#[cfg(test)]
mod tests {
use super::*;
use crate::NavigationTarget;
use crate::tests::IntoDiagnostic;
use insta::{assert_snapshot, internals::SettingsBindDropGuard};
use itertools::Itertools;
use ruff_db::{
diagnostic::{
Annotation, Diagnostic, DiagnosticFormat, DiagnosticId, DisplayDiagnosticConfig,
LintName, Severity, Span, SubDiagnostic, SubDiagnosticSeverity,
},
files::{File, FileRange, system_path_to_file},
source::source_text,
};
use ruff_python_trivia::textwrap::dedent;
use ruff_text_size::TextSize;
use ruff_db::system::{DbWithWritableSystem, SystemPathBuf};
use ty_project::ProjectMetadata;
pub(super) fn inlay_hint_test(source: &str) -> InlayHintTest {
const START: &str = "<START>";
const END: &str = "<END>";
let mut db = ty_project::TestDb::new(ProjectMetadata::new(
"test".into(),
SystemPathBuf::from("/"),
));
db.init_program().unwrap();
let source = dedent(source);
let start = source.find(START);
let end = source
.find(END)
.map(|x| if start.is_some() { x - START.len() } else { x })
.unwrap_or(source.len());
let range = TextRange::new(
TextSize::try_from(start.unwrap_or_default()).unwrap(),
TextSize::try_from(end).unwrap(),
);
let source = source.replace(START, "");
let source = source.replace(END, "");
db.write_file("main.py", source)
.expect("write to memory file system to be successful");
let file = system_path_to_file(&db, "main.py").expect("newly written file to existing");
let mut insta_settings = insta::Settings::clone_current();
insta_settings.add_filter(r#"\\(\w\w|\.|")"#, "/$1");
// Filter out TODO types because they are different between debug and release builds.
insta_settings.add_filter(r"@Todo\(.+\)", "@Todo");
let insta_settings_guard = insta_settings.bind_to_scope();
InlayHintTest {
db,
file,
range,
_insta_settings_guard: insta_settings_guard,
}
}
pub(super) struct InlayHintTest {
pub(super) db: ty_project::TestDb,
pub(super) file: File,
pub(super) range: TextRange,
_insta_settings_guard: SettingsBindDropGuard,
}
impl InlayHintTest {
/// Returns the inlay hints for the given test case.
///
/// All inlay hints are generated using the applicable settings. Use
/// [`inlay_hints_with_settings`] to generate hints with custom settings.
///
/// [`inlay_hints_with_settings`]: Self::inlay_hints_with_settings
fn inlay_hints(&mut self) -> String {
self.inlay_hints_with_settings(&InlayHintSettings {
variable_types: true,
call_argument_names: true,
})
}
fn with_extra_file(&mut self, file_name: &str, content: &str) {
self.db.write_file(file_name, content).unwrap();
}
/// Returns the inlay hints for the given test case with custom settings.
fn inlay_hints_with_settings(&mut self, settings: &InlayHintSettings) -> String {
let hints = inlay_hints(&self.db, self.file, self.range, settings);
let mut inlay_hint_buf = source_text(&self.db, self.file).as_str().to_string();
let mut text_edit_buf = inlay_hint_buf.clone();
let mut tbd_diagnostics = Vec::new();
let mut offset = 0;
let mut edit_offset = 0;
for hint in hints {
let end_position = hint.position.to_usize() + offset;
let mut hint_str = "[".to_string();
for part in hint.label.parts() {
if let Some(target) = part.target().cloned() {
let part_position = u32::try_from(end_position + hint_str.len()).unwrap();
let part_len = u32::try_from(part.text().len()).unwrap();
let label_range =
TextRange::at(TextSize::new(part_position), TextSize::new(part_len));
tbd_diagnostics.push((label_range, target));
}
hint_str.push_str(part.text());
}
for edit in hint.text_edits {
let start = edit.range.start().to_usize() + edit_offset;
let end = edit.range.end().to_usize() + edit_offset;
text_edit_buf.replace_range(start..end, &edit.new_text);
if start == end {
edit_offset += edit.new_text.len();
} else {
edit_offset += edit.new_text.len() - edit.range.len().to_usize();
}
}
hint_str.push(']');
offset += hint_str.len();
inlay_hint_buf.insert_str(end_position, &hint_str);
}
self.db.write_file("main2.py", &inlay_hint_buf).unwrap();
let inlayed_file =
system_path_to_file(&self.db, "main2.py").expect("newly written file to existing");
let location_diagnostics = tbd_diagnostics.into_iter().map(|(label_range, target)| {
InlayHintLocationDiagnostic::new(FileRange::new(inlayed_file, label_range), &target)
});
let mut rendered_diagnostics = location_diagnostics
.map(|diagnostic| self.render_diagnostic(diagnostic))
.join("");
if !rendered_diagnostics.is_empty() {
rendered_diagnostics = format!(
"{}{}",
crate::MarkupKind::PlainText.horizontal_line(),
rendered_diagnostics
.strip_suffix("\n")
.unwrap_or(&rendered_diagnostics)
);
}
let rendered_edit_diagnostic = if edit_offset != 0 {
let edit_diagnostic = InlayHintEditDiagnostic::new(text_edit_buf);
let text_edit_buf = self.render_diagnostic(edit_diagnostic);
format!(
"{}{}",
crate::MarkupKind::PlainText.horizontal_line(),
text_edit_buf
)
} else {
String::new()
};
format!("{inlay_hint_buf}{rendered_diagnostics}{rendered_edit_diagnostic}",)
}
fn render_diagnostic<D>(&self, diagnostic: D) -> String
where
D: IntoDiagnostic,
{
use std::fmt::Write;
let mut buf = String::new();
let config = DisplayDiagnosticConfig::default()
.color(false)
.format(DiagnosticFormat::Full);
let diag = diagnostic.into_diagnostic();
write!(buf, "{}", diag.display(&self.db, &config)).unwrap();
buf
}
}
#[test]
fn test_assign_statement() {
let mut test = inlay_hint_test(
"
def i(x: int, /) -> int:
return x
x = 1
y = x
z = i(1)
w = z
aa = b'foo'
bb = aa
",
);
assert_snapshot!(test.inlay_hints(), @r#"
def i(x: int, /) -> int:
return x
x = 1
y[: Literal[1]] = x
z[: int] = i(1)
w[: int] = z
aa = b'foo'
bb[: Literal[b"foo"]] = aa
---------------------------------------------
info[inlay-hint-location]: Inlay Hint Target
--> stdlib/typing.pyi:487:1
|
485 | """
486 |
487 | Literal: _SpecialForm
| ^^^^^^^
488 | """Special typing form to define literal types (a.k.a. value types).
|
info: Source
--> main2.py:6:5
|
5 | x = 1
6 | y[: Literal[1]] = x
| ^^^^^^^
7 | z[: int] = i(1)
8 | w[: int] = z
|
info[inlay-hint-location]: Inlay Hint Target
--> stdlib/builtins.pyi:348:7
|
347 | @disjoint_base
348 | class int:
| ^^^
349 | """int([x]) -> integer
350 | int(x, base=10) -> integer
|
info: Source
--> main2.py:6:13
|
5 | x = 1
6 | y[: Literal[1]] = x
| ^
7 | z[: int] = i(1)
8 | w[: int] = z
|
info[inlay-hint-location]: Inlay Hint Target
--> stdlib/builtins.pyi:348:7
|
347 | @disjoint_base
348 | class int:
| ^^^
349 | """int([x]) -> integer
350 | int(x, base=10) -> integer
|
info: Source
--> main2.py:7:5
|
5 | x = 1
6 | y[: Literal[1]] = x
7 | z[: int] = i(1)
| ^^^
8 | w[: int] = z
9 | aa = b'foo'
|
info[inlay-hint-location]: Inlay Hint Target
--> stdlib/builtins.pyi:348:7
|
347 | @disjoint_base
348 | class int:
| ^^^
349 | """int([x]) -> integer
350 | int(x, base=10) -> integer
|
info: Source
--> main2.py:8:5
|
6 | y[: Literal[1]] = x
7 | z[: int] = i(1)
8 | w[: int] = z
| ^^^
9 | aa = b'foo'
10 | bb[: Literal[b"foo"]] = aa
|
info[inlay-hint-location]: Inlay Hint Target
--> stdlib/typing.pyi:487:1
|
485 | """
486 |
487 | Literal: _SpecialForm
| ^^^^^^^
488 | """Special typing form to define literal types (a.k.a. value types).
|
info: Source
--> main2.py:10:6
|
8 | w[: int] = z
9 | aa = b'foo'
10 | bb[: Literal[b"foo"]] = aa
| ^^^^^^^
|
info[inlay-hint-location]: Inlay Hint Target
--> stdlib/builtins.pyi:1448:7
|
1447 | @disjoint_base
1448 | class bytes(Sequence[int]):
| ^^^^^
1449 | """bytes(iterable_of_ints) -> bytes
1450 | bytes(string, encoding[, errors]) -> bytes
|
info: Source
--> main2.py:10:14
|
8 | w[: int] = z
9 | aa = b'foo'
10 | bb[: Literal[b"foo"]] = aa
| ^^^^^^
|
---------------------------------------------
info[inlay-hint-edit]: File after edits
info: Source
def i(x: int, /) -> int:
return x
x = 1
y: Literal[1] = x
z: int = i(1)
w: int = z
aa = b'foo'
bb: Literal[b"foo"] = aa
"#);
}
#[test]
fn test_unpacked_tuple_assignment() {
let mut test = inlay_hint_test(
"
def i(x: int, /) -> int:
return x
def s(x: str, /) -> str:
return x
x1, y1 = (1, 'abc')
x2, y2 = (x1, y1)
x3, y3 = (i(1), s('abc'))
x4, y4 = (x3, y3)
",
);
assert_snapshot!(test.inlay_hints(), @r#"
def i(x: int, /) -> int:
return x
def s(x: str, /) -> str:
return x
x1, y1 = (1, 'abc')
x2[: Literal[1]], y2[: Literal["abc"]] = (x1, y1)
x3[: int], y3[: str] = (i(1), s('abc'))
x4[: int], y4[: str] = (x3, y3)
---------------------------------------------
info[inlay-hint-location]: Inlay Hint Target
--> stdlib/typing.pyi:487:1
|
485 | """
486 |
487 | Literal: _SpecialForm
| ^^^^^^^
488 | """Special typing form to define literal types (a.k.a. value types).
|
info: Source
--> main2.py:8:6
|
7 | x1, y1 = (1, 'abc')
8 | x2[: Literal[1]], y2[: Literal["abc"]] = (x1, y1)
| ^^^^^^^
9 | x3[: int], y3[: str] = (i(1), s('abc'))
10 | x4[: int], y4[: str] = (x3, y3)
|
info[inlay-hint-location]: Inlay Hint Target
--> stdlib/builtins.pyi:348:7
|
347 | @disjoint_base
348 | class int:
| ^^^
349 | """int([x]) -> integer
350 | int(x, base=10) -> integer
|
info: Source
--> main2.py:8:14
|
7 | x1, y1 = (1, 'abc')
8 | x2[: Literal[1]], y2[: Literal["abc"]] = (x1, y1)
| ^
9 | x3[: int], y3[: str] = (i(1), s('abc'))
10 | x4[: int], y4[: str] = (x3, y3)
|
info[inlay-hint-location]: Inlay Hint Target
--> stdlib/typing.pyi:487:1
|
485 | """
486 |
487 | Literal: _SpecialForm
| ^^^^^^^
488 | """Special typing form to define literal types (a.k.a. value types).
|
info: Source
--> main2.py:8:24
|
7 | x1, y1 = (1, 'abc')
8 | x2[: Literal[1]], y2[: Literal["abc"]] = (x1, y1)
| ^^^^^^^
9 | x3[: int], y3[: str] = (i(1), s('abc'))
10 | x4[: int], y4[: str] = (x3, y3)
|
info[inlay-hint-location]: Inlay Hint Target
--> stdlib/builtins.pyi:915:7
|
914 | @disjoint_base
915 | class str(Sequence[str]):
| ^^^
916 | """str(object='') -> str
917 | str(bytes_or_buffer[, encoding[, errors]]) -> str
|
info: Source
--> main2.py:8:32
|
7 | x1, y1 = (1, 'abc')
8 | x2[: Literal[1]], y2[: Literal["abc"]] = (x1, y1)
| ^^^^^
9 | x3[: int], y3[: str] = (i(1), s('abc'))
10 | x4[: int], y4[: str] = (x3, y3)
|
info[inlay-hint-location]: Inlay Hint Target
--> stdlib/builtins.pyi:348:7
|
347 | @disjoint_base
348 | class int:
| ^^^
349 | """int([x]) -> integer
350 | int(x, base=10) -> integer
|
info: Source
--> main2.py:9:6
|
7 | x1, y1 = (1, 'abc')
8 | x2[: Literal[1]], y2[: Literal["abc"]] = (x1, y1)
9 | x3[: int], y3[: str] = (i(1), s('abc'))
| ^^^
10 | x4[: int], y4[: str] = (x3, y3)
|
info[inlay-hint-location]: Inlay Hint Target
--> stdlib/builtins.pyi:915:7
|
914 | @disjoint_base
915 | class str(Sequence[str]):
| ^^^
916 | """str(object='') -> str
917 | str(bytes_or_buffer[, encoding[, errors]]) -> str
|
info: Source
--> main2.py:9:17
|
7 | x1, y1 = (1, 'abc')
8 | x2[: Literal[1]], y2[: Literal["abc"]] = (x1, y1)
9 | x3[: int], y3[: str] = (i(1), s('abc'))
| ^^^
10 | x4[: int], y4[: str] = (x3, y3)
|
info[inlay-hint-location]: Inlay Hint Target
--> stdlib/builtins.pyi:348:7
|
347 | @disjoint_base
348 | class int:
| ^^^
349 | """int([x]) -> integer
350 | int(x, base=10) -> integer
|
info: Source
--> main2.py:10:6
|
8 | x2[: Literal[1]], y2[: Literal["abc"]] = (x1, y1)
9 | x3[: int], y3[: str] = (i(1), s('abc'))
10 | x4[: int], y4[: str] = (x3, y3)
| ^^^
|
info[inlay-hint-location]: Inlay Hint Target
--> stdlib/builtins.pyi:915:7
|
914 | @disjoint_base
915 | class str(Sequence[str]):
| ^^^
916 | """str(object='') -> str
917 | str(bytes_or_buffer[, encoding[, errors]]) -> str
|
info: Source
--> main2.py:10:17
|
8 | x2[: Literal[1]], y2[: Literal["abc"]] = (x1, y1)
9 | x3[: int], y3[: str] = (i(1), s('abc'))
10 | x4[: int], y4[: str] = (x3, y3)
| ^^^
|
"#);
}
#[test]
fn test_multiple_assignment() {
let mut test = inlay_hint_test(
"
def i(x: int, /) -> int:
return x
def s(x: str, /) -> str:
return x
x1, y1 = 1, 'abc'
x2, y2 = x1, y1
x3, y3 = i(1), s('abc')
x4, y4 = x3, y3
",
);
assert_snapshot!(test.inlay_hints(), @r#"
def i(x: int, /) -> int:
return x
def s(x: str, /) -> str:
return x
x1, y1 = 1, 'abc'
x2[: Literal[1]], y2[: Literal["abc"]] = x1, y1
x3[: int], y3[: str] = i(1), s('abc')
x4[: int], y4[: str] = x3, y3
---------------------------------------------
info[inlay-hint-location]: Inlay Hint Target
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.