repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/redundant_final_literal.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/redundant_final_literal.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, comparable::ComparableExpr};
use ruff_text_size::{Ranged, TextSize};
use crate::Locator;
use crate::checkers::ast::Checker;
use crate::fix::snippet::SourceCodeSnippet;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for redundant `Final[Literal[...]]` annotations.
///
/// ## Why is this bad?
/// All constant variables annotated as `Final` are understood as implicitly
/// having `Literal` types by a type checker. As such, a `Final[Literal[...]]`
/// annotation can often be replaced with a bare `Final`, annotation, which
/// will have the same meaning to the type checker while being more concise and
/// more readable.
///
/// ## Example
///
/// ```pyi
/// from typing import Final, Literal
///
/// x: Final[Literal[42]]
/// y: Final[Literal[42]] = 42
/// ```
///
/// Use instead:
/// ```pyi
/// from typing import Final, Literal
///
/// x: Final = 42
/// y: Final = 42
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.8.0")]
pub(crate) struct RedundantFinalLiteral {
literal: SourceCodeSnippet,
}
impl Violation for RedundantFinalLiteral {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let RedundantFinalLiteral { literal } = self;
format!(
"`Final[Literal[{literal}]]` can be replaced with a bare `Final`",
literal = literal.truncated_display()
)
}
fn fix_title(&self) -> Option<String> {
Some("Replace with `Final`".to_string())
}
}
/// PYI064
pub(crate) fn redundant_final_literal(checker: &Checker, ann_assign: &ast::StmtAnnAssign) {
if !checker.semantic().seen_typing() {
return;
}
let ast::StmtAnnAssign {
value: assign_value,
annotation,
..
} = ann_assign;
let ast::Expr::Subscript(annotation) = &**annotation else {
return;
};
// Ensure it is `Final[Literal[...]]`.
let ast::Expr::Subscript(ast::ExprSubscript {
value,
slice: literal,
..
}) = &*annotation.slice
else {
return;
};
if !checker.semantic().match_typing_expr(value, "Literal") {
return;
}
// Discards tuples like `Literal[1, 2, 3]` and complex literals like `Literal[{1, 2}]`.
if !matches!(
&**literal,
ast::Expr::StringLiteral(_)
| ast::Expr::BytesLiteral(_)
| ast::Expr::NumberLiteral(_)
| ast::Expr::BooleanLiteral(_)
| ast::Expr::NoneLiteral(_)
| ast::Expr::EllipsisLiteral(_)
) {
return;
}
let mut diagnostic = checker.report_diagnostic(
RedundantFinalLiteral {
literal: SourceCodeSnippet::from_str(checker.locator().slice(literal.range())),
},
ann_assign.range(),
);
// The literal value and the assignment value being different doesn't make sense, so we skip
// fixing in that case.
if let Some(assign_value) = assign_value.as_ref() {
if ComparableExpr::from(assign_value) == ComparableExpr::from(literal) {
diagnostic.set_fix(generate_fix(annotation, None, checker.locator()));
}
} else {
diagnostic.set_fix(generate_fix(annotation, Some(literal), checker.locator()));
}
}
/// Generate a fix to convert a `Final[Literal[...]]` annotation to a `Final` annotation.
fn generate_fix(
annotation: &ast::ExprSubscript,
literal: Option<&ast::Expr>,
locator: &Locator,
) -> Fix {
// Remove the `Literal[...]` part from `Final[Literal[...]]`.
let deletion = Edit::range_deletion(
annotation
.slice
.range()
.sub_start(TextSize::new(1))
.add_end(TextSize::new(1)),
);
// If a literal was provided, insert an assignment.
//
// For example, change `x: Final[Literal[42]]` to `x: Final = 42`.
if let Some(literal) = literal {
let assignment = Edit::insertion(
format!(
" = {literal_source}",
literal_source = locator.slice(literal)
),
annotation.end(),
);
Fix::safe_edits(deletion, std::iter::once(assignment))
} else {
Fix::safe_edit(deletion)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/no_return_argument_annotation.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/no_return_argument_annotation.rs | use std::fmt;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use ruff_python_ast::PythonVersion;
/// ## What it does
/// Checks for uses of `typing.NoReturn` (and `typing_extensions.NoReturn`) for
/// parameter annotations.
///
/// ## Why is this bad?
/// Prefer `Never` over `NoReturn` for parameter annotations. `Never` has a
/// clearer name in these contexts, since it makes little sense to talk about a
/// parameter annotation "not returning".
///
/// This is a purely stylistic lint: the two types have identical semantics for
/// type checkers. Both represent Python's "[bottom type]" (a type that has no
/// members).
///
/// ## Example
/// ```pyi
/// from typing import NoReturn
///
/// def foo(x: NoReturn): ...
/// ```
///
/// Use instead:
/// ```pyi
/// from typing import Never
///
/// def foo(x: Never): ...
/// ```
///
/// ## References
/// - [Python documentation: `typing.Never`](https://docs.python.org/3/library/typing.html#typing.Never)
/// - [Python documentation: `typing.NoReturn`](https://docs.python.org/3/library/typing.html#typing.NoReturn)
///
/// [bottom type]: https://en.wikipedia.org/wiki/Bottom_type
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.272")]
pub(crate) struct NoReturnArgumentAnnotationInStub {
module: TypingModule,
}
impl Violation for NoReturnArgumentAnnotationInStub {
#[derive_message_formats]
fn message(&self) -> String {
let NoReturnArgumentAnnotationInStub { module } = self;
format!("Prefer `{module}.Never` over `NoReturn` for argument annotations")
}
}
/// PYI050
pub(crate) fn no_return_argument_annotation(checker: &Checker, parameters: &ast::Parameters) {
// Ex) def func(arg: NoReturn): ...
// Ex) def func(arg: NoReturn, /): ...
// Ex) def func(*, arg: NoReturn): ...
// Ex) def func(*args: NoReturn): ...
// Ex) def func(**kwargs: NoReturn): ...
for annotation in parameters
.iter()
.filter_map(ast::AnyParameterRef::annotation)
{
if is_no_return(annotation, checker) {
checker.report_diagnostic(
NoReturnArgumentAnnotationInStub {
module: if checker.target_version() >= PythonVersion::PY311 {
TypingModule::Typing
} else {
TypingModule::TypingExtensions
},
},
annotation.range(),
);
}
}
}
fn is_no_return(expr: &ast::Expr, checker: &Checker) -> bool {
checker.match_maybe_stringized_annotation(expr, |expr| {
checker.semantic().match_typing_expr(expr, "NoReturn")
})
}
#[derive(Debug, PartialEq, Eq)]
enum TypingModule {
Typing,
TypingExtensions,
}
impl fmt::Display for TypingModule {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self {
TypingModule::Typing => fmt.write_str("typing"),
TypingModule::TypingExtensions => fmt.write_str("typing_extensions"),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/mod.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/mod.rs | use std::fmt;
use anyhow::Result;
use ruff_python_ast::{Expr, ExprContext, ExprName, ExprSubscript, ExprTuple, name::Name};
use ruff_python_codegen::Generator;
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::TypingImporter;
use crate::{Applicability, Edit, Fix};
pub(crate) use any_eq_ne_annotation::*;
pub(crate) use bad_generator_return_type::*;
pub(crate) use bad_version_info_comparison::*;
pub(crate) use bytestring_usage::*;
pub(crate) use collections_named_tuple::*;
pub(crate) use complex_assignment_in_stub::*;
pub(crate) use complex_if_statement_in_stub::*;
pub(crate) use custom_type_var_for_self::*;
pub(crate) use docstring_in_stubs::*;
pub(crate) use duplicate_literal_member::*;
pub(crate) use duplicate_union_member::*;
pub(crate) use ellipsis_in_non_empty_class_body::*;
pub(crate) use exit_annotations::*;
pub(crate) use future_annotations_in_stub::*;
pub(crate) use generic_not_last_base_class::*;
pub(crate) use iter_method_return_iterable::*;
pub(crate) use no_return_argument_annotation::*;
pub(crate) use non_empty_stub_body::*;
pub(crate) use non_self_return_type::*;
pub(crate) use numeric_literal_too_long::*;
pub(crate) use pass_in_class_body::*;
pub(crate) use pass_statement_stub_body::*;
pub(crate) use pre_pep570_positional_argument::*;
pub(crate) use prefix_type_params::*;
pub(crate) use quoted_annotation_in_stub::*;
pub(crate) use redundant_final_literal::*;
pub(crate) use redundant_literal_union::*;
pub(crate) use redundant_none_literal::*;
pub(crate) use redundant_numeric_union::*;
pub(crate) use simple_defaults::*;
pub(crate) use str_or_repr_defined_in_stub::*;
pub(crate) use string_or_bytes_too_long::*;
pub(crate) use stub_body_multiple_statements::*;
pub(crate) use type_alias_naming::*;
pub(crate) use type_comment_in_stub::*;
pub(crate) use unaliased_collections_abc_set_import::*;
pub(crate) use unnecessary_literal_union::*;
pub(crate) use unnecessary_type_union::*;
pub(crate) use unrecognized_platform::*;
pub(crate) use unrecognized_version_info::*;
pub(crate) use unsupported_method_call_on_all::*;
pub(crate) use unused_private_type_definition::*;
mod any_eq_ne_annotation;
mod bad_generator_return_type;
mod bad_version_info_comparison;
mod bytestring_usage;
mod collections_named_tuple;
mod complex_assignment_in_stub;
mod complex_if_statement_in_stub;
mod custom_type_var_for_self;
mod docstring_in_stubs;
mod duplicate_literal_member;
mod duplicate_union_member;
mod ellipsis_in_non_empty_class_body;
mod exit_annotations;
mod future_annotations_in_stub;
mod generic_not_last_base_class;
mod iter_method_return_iterable;
mod no_return_argument_annotation;
mod non_empty_stub_body;
mod non_self_return_type;
mod numeric_literal_too_long;
mod pass_in_class_body;
mod pass_statement_stub_body;
mod pre_pep570_positional_argument;
mod prefix_type_params;
mod quoted_annotation_in_stub;
mod redundant_final_literal;
mod redundant_literal_union;
mod redundant_none_literal;
mod redundant_numeric_union;
mod simple_defaults;
mod str_or_repr_defined_in_stub;
mod string_or_bytes_too_long;
mod stub_body_multiple_statements;
mod type_alias_naming;
mod type_comment_in_stub;
mod unaliased_collections_abc_set_import;
mod unnecessary_literal_union;
mod unnecessary_type_union;
mod unrecognized_platform;
mod unrecognized_version_info;
mod unsupported_method_call_on_all;
mod unused_private_type_definition;
// TODO(charlie): Replace this with a common utility for selecting the appropriate source
// module for a given `typing` member.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum TypingModule {
Typing,
TypingExtensions,
}
impl TypingModule {
fn as_str(self) -> &'static str {
match self {
TypingModule::Typing => "typing",
TypingModule::TypingExtensions => "typing_extensions",
}
}
}
impl fmt::Display for TypingModule {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str(self.as_str())
}
}
/// Generate a [`Fix`] for two or more type expressions, e.g. `typing.Union[int, float, complex]`.
fn generate_union_fix(
generator: Generator,
importer: &TypingImporter,
nodes: Vec<&Expr>,
annotation: &Expr,
applicability: Applicability,
) -> Result<Fix> {
debug_assert!(nodes.len() >= 2, "At least two nodes required");
let (import_edit, binding) = importer.import(annotation.start())?;
// Construct the expression as `Subscript[typing.Union, Tuple[expr, [expr, ...]]]`
let new_expr = Expr::Subscript(ExprSubscript {
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
value: Box::new(Expr::Name(ExprName {
id: Name::new(binding),
ctx: ExprContext::Store,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
})),
slice: Box::new(Expr::Tuple(ExprTuple {
elts: nodes.into_iter().cloned().collect(),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
ctx: ExprContext::Load,
parenthesized: false,
})),
ctx: ExprContext::Load,
});
Ok(Fix::applicable_edits(
Edit::range_replacement(generator.expr(&new_expr), annotation.range()),
[import_edit],
applicability,
))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/unrecognized_platform.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/unrecognized_platform.rs | use ruff_python_ast::{self as ast, CmpOp, Expr};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::registry::Rule;
/// ## What it does
/// Check for unrecognized `sys.platform` checks. Platform checks should be
/// simple string comparisons.
///
/// **Note**: this rule is only enabled in `.pyi` stub files.
///
/// ## Why is this bad?
/// Some `sys.platform` checks are too complex for type checkers to
/// understand, and thus result in incorrect inferences by these tools.
/// `sys.platform` checks should be simple string comparisons, like
/// `if sys.platform == "linux"`.
///
/// ## Example
/// ```pyi
/// import sys
///
/// if sys.platform == "xunil"[::-1]:
/// # Linux specific definitions
/// ...
/// else:
/// # Posix specific definitions
/// ...
/// ```
///
/// Instead, use a simple string comparison, such as `==` or `!=`:
/// ```pyi
/// import sys
///
/// if sys.platform == "linux":
/// # Linux specific definitions
/// ...
/// else:
/// # Posix specific definitions
/// ...
/// ```
///
/// ## References
/// - [Typing documentation: Version and Platform checking](https://typing.python.org/en/latest/spec/directives.html#version-and-platform-checks)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.246")]
pub(crate) struct UnrecognizedPlatformCheck;
impl Violation for UnrecognizedPlatformCheck {
#[derive_message_formats]
fn message(&self) -> String {
"Unrecognized `sys.platform` check".to_string()
}
}
/// ## What it does
/// Check for unrecognized platform names in `sys.platform` checks.
///
/// **Note**: this rule is only enabled in `.pyi` stub files.
///
/// ## Why is this bad?
/// If a `sys.platform` check compares to a platform name outside of a
/// small set of known platforms (e.g. "linux", "win32", etc.), it's likely
/// a typo or a platform name that is not recognized by type checkers.
///
/// The list of known platforms is: "linux", "win32", "cygwin", "darwin".
///
/// ## Example
/// ```pyi
/// import sys
///
/// if sys.platform == "linus": ...
/// ```
///
/// Use instead:
/// ```pyi
/// import sys
///
/// if sys.platform == "linux": ...
/// ```
///
/// ## References
/// - [Typing documentation: Version and Platform checking](https://typing.python.org/en/latest/spec/directives.html#version-and-platform-checks)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.246")]
pub(crate) struct UnrecognizedPlatformName {
platform: String,
}
impl Violation for UnrecognizedPlatformName {
#[derive_message_formats]
fn message(&self) -> String {
let UnrecognizedPlatformName { platform } = self;
format!("Unrecognized platform `{platform}`")
}
}
/// PYI007, PYI008
pub(crate) fn unrecognized_platform(checker: &Checker, test: &Expr) {
let Expr::Compare(ast::ExprCompare {
left,
ops,
comparators,
..
}) = test
else {
return;
};
let ([op], [right]) = (&**ops, &**comparators) else {
return;
};
if !checker
.semantic()
.resolve_qualified_name(left)
.is_some_and(|qualified_name| matches!(qualified_name.segments(), ["sys", "platform"]))
{
return;
}
// "in" might also make sense but we don't currently have one.
if !matches!(op, CmpOp::Eq | CmpOp::NotEq) {
checker.report_diagnostic_if_enabled(UnrecognizedPlatformCheck, test.range());
return;
}
if let Expr::StringLiteral(ast::ExprStringLiteral { value, .. }) = right {
// Other values are possible but we don't need them right now.
// This protects against typos.
if checker.is_rule_enabled(Rule::UnrecognizedPlatformName) {
if !matches!(value.to_str(), "linux" | "win32" | "cygwin" | "darwin") {
checker.report_diagnostic(
UnrecognizedPlatformName {
platform: value.to_string(),
},
right.range(),
);
}
}
} else {
checker.report_diagnostic_if_enabled(UnrecognizedPlatformCheck, test.range());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/duplicate_union_member.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/duplicate_union_member.rs | use rustc_hash::FxHashSet;
use std::collections::HashSet;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::comparable::ComparableExpr;
use ruff_python_ast::{AtomicNodeIndex, Expr, ExprBinOp, ExprNoneLiteral, Operator, PythonVersion};
use ruff_python_semantic::analyze::typing::{traverse_union, traverse_union_and_optional};
use ruff_text_size::{Ranged, TextRange, TextSize};
use super::generate_union_fix;
use crate::checkers::ast::Checker;
use crate::preview::is_optional_as_none_in_union_enabled;
use crate::{Applicability, Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for duplicate union members.
///
/// ## Why is this bad?
/// Duplicate union members are redundant and should be removed.
///
/// ## Example
/// ```python
/// foo: str | str
/// ```
///
/// Use instead:
/// ```python
/// foo: str
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as safe unless the union contains comments.
///
/// For nested union, the fix will flatten type expressions into a single
/// top-level union.
///
/// ## References
/// - [Python documentation: `typing.Union`](https://docs.python.org/3/library/typing.html#typing.Union)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.262")]
pub(crate) struct DuplicateUnionMember {
duplicate_name: String,
}
impl Violation for DuplicateUnionMember {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
format!("Duplicate union member `{}`", self.duplicate_name)
}
fn fix_title(&self) -> Option<String> {
Some(format!(
"Remove duplicate union member `{}`",
self.duplicate_name
))
}
}
/// PYI016
pub(crate) fn duplicate_union_member<'a>(checker: &Checker, expr: &'a Expr) {
let mut seen_nodes: HashSet<ComparableExpr<'_>, _> = FxHashSet::default();
let mut unique_nodes: Vec<&Expr> = Vec::new();
let mut diagnostics = Vec::new();
let mut union_type = UnionKind::TypingUnion;
let mut optional_present = false;
// Adds a member to `literal_exprs` if it is a `Literal` annotation
let mut check_for_duplicate_members = |expr: &'a Expr, parent: &'a Expr| {
if matches!(parent, Expr::BinOp(_)) {
union_type = UnionKind::PEP604;
}
let virtual_expr = if is_optional_as_none_in_union_enabled(checker.settings())
&& is_optional_type(checker, expr)
{
// If the union member is an `Optional`, add a virtual `None` literal.
optional_present = true;
&VIRTUAL_NONE_LITERAL
} else {
expr
};
// If we've already seen this union member, raise a violation.
if seen_nodes.insert(virtual_expr.into()) {
unique_nodes.push(virtual_expr);
} else {
diagnostics.push(checker.report_diagnostic(
DuplicateUnionMember {
duplicate_name: checker.generator().expr(virtual_expr),
},
// Use the real expression's range for diagnostics.
expr.range(),
));
}
};
// Traverse the union, collect all diagnostic members
if is_optional_as_none_in_union_enabled(checker.settings()) {
traverse_union_and_optional(&mut check_for_duplicate_members, checker.semantic(), expr);
} else {
traverse_union(&mut check_for_duplicate_members, checker.semantic(), expr);
}
if diagnostics.is_empty() {
return;
}
// Do not reduce `Union[None, ... None]` to avoid introducing a `TypeError` unintentionally
// e.g. `isinstance(None, Union[None, None])`, if reduced to `isinstance(None, None)`, causes
// `TypeError: isinstance() arg 2 must be a type, a tuple of types, or a union` to throw.
if unique_nodes.iter().all(|expr| expr.is_none_literal_expr()) && !optional_present {
return;
}
// Mark [`Fix`] as unsafe when comments are in range.
let applicability = if checker.comment_ranges().intersects(expr.range()) {
Applicability::Unsafe
} else {
Applicability::Safe
};
// Generate the flattened fix once.
let fix = if let &[edit_expr] = unique_nodes.as_slice() {
// Generate a [`Fix`] for a single type expression, e.g. `int`.
Some(Fix::applicable_edit(
Edit::range_replacement(checker.generator().expr(edit_expr), expr.range()),
applicability,
))
} else {
match union_type {
// See redundant numeric union
UnionKind::PEP604 => Some(generate_pep604_fix(
checker,
unique_nodes,
expr,
applicability,
)),
UnionKind::TypingUnion => {
// Request `typing.Union`
let Some(importer) = checker.typing_importer("Union", PythonVersion::lowest())
else {
return;
};
generate_union_fix(
checker.generator(),
&importer,
unique_nodes,
expr,
applicability,
)
.ok()
}
}
};
if let Some(fix) = fix {
for diagnostic in &mut diagnostics {
diagnostic.set_fix(fix.clone());
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum UnionKind {
/// E.g., `typing.Union[int, str]`
TypingUnion,
/// E.g., `int | str`
PEP604,
}
/// Generate a [`Fix`] for two or more type expressions, e.g. `int | float | complex`.
fn generate_pep604_fix(
checker: &Checker,
nodes: Vec<&Expr>,
annotation: &Expr,
applicability: Applicability,
) -> Fix {
debug_assert!(nodes.len() >= 2, "At least two nodes required");
let new_expr = nodes
.into_iter()
.fold(None, |acc: Option<Expr>, right: &Expr| {
if let Some(left) = acc {
Some(Expr::BinOp(ExprBinOp {
left: Box::new(left),
op: Operator::BitOr,
right: Box::new(right.clone()),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
}))
} else {
Some(right.clone())
}
})
.unwrap();
Fix::applicable_edit(
Edit::range_replacement(checker.generator().expr(&new_expr), annotation.range()),
applicability,
)
}
static VIRTUAL_NONE_LITERAL: Expr = Expr::NoneLiteral(ExprNoneLiteral {
node_index: AtomicNodeIndex::NONE,
range: TextRange::new(TextSize::new(0), TextSize::new(0)),
});
fn is_optional_type(checker: &Checker, expr: &Expr) -> bool {
checker.semantic().match_typing_expr(expr, "Optional")
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/complex_assignment_in_stub.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/complex_assignment_in_stub.rs | use ruff_python_ast::{Expr, StmtAssign};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for assignments with multiple or non-name targets in stub files.
///
/// ## Why is this bad?
/// In general, stub files should be thought of as "data files" for a type
/// checker, and are not intended to be executed. As such, it's useful to
/// enforce that only a subset of Python syntax is allowed in a stub file, to
/// ensure that everything in the stub is unambiguous for the type checker.
///
/// The need to perform multi-assignment, or assignment to a non-name target,
/// likely indicates a misunderstanding of how stub files are intended to be
/// used.
///
/// ## Example
///
/// ```pyi
/// from typing import TypeAlias
///
/// a = b = int
///
/// class Klass: ...
///
/// Klass.X: TypeAlias = int
/// ```
///
/// Use instead:
///
/// ```pyi
/// from typing import TypeAlias
///
/// a: TypeAlias = int
/// b: TypeAlias = int
///
/// class Klass:
/// X: TypeAlias = int
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.279")]
pub(crate) struct ComplexAssignmentInStub;
impl Violation for ComplexAssignmentInStub {
#[derive_message_formats]
fn message(&self) -> String {
"Stubs should not contain assignments to attributes or multiple targets".to_string()
}
}
/// PYI017
pub(crate) fn complex_assignment_in_stub(checker: &Checker, stmt: &StmtAssign) {
if matches!(stmt.targets.as_slice(), [Expr::Name(_)]) {
return;
}
checker.report_diagnostic(ComplexAssignmentInStub, stmt.range);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/type_comment_in_stub.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/type_comment_in_stub.rs | use std::sync::LazyLock;
use regex::Regex;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_trivia::CommentRanges;
use crate::Locator;
use crate::Violation;
use crate::checkers::ast::LintContext;
/// ## What it does
/// Checks for the use of type comments (e.g., `x = 1 # type: int`) in stub
/// files.
///
/// ## Why is this bad?
/// Stub (`.pyi`) files should use type annotations directly, rather
/// than type comments, even if they're intended to support Python 2, since
/// stub files are not executed at runtime. The one exception is `# type: ignore`.
///
/// ## Example
/// ```pyi
/// x = 1 # type: int
/// ```
///
/// Use instead:
/// ```pyi
/// x: int = 1
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.254")]
pub(crate) struct TypeCommentInStub;
impl Violation for TypeCommentInStub {
#[derive_message_formats]
fn message(&self) -> String {
"Don't use type comments in stub file".to_string()
}
}
/// PYI033
pub(crate) fn type_comment_in_stub(
context: &LintContext,
locator: &Locator,
comment_ranges: &CommentRanges,
) {
for range in comment_ranges {
let comment = locator.slice(range);
if TYPE_COMMENT_REGEX.is_match(comment) && !TYPE_IGNORE_REGEX.is_match(comment) {
context.report_diagnostic_if_enabled(TypeCommentInStub, range);
}
}
}
static TYPE_COMMENT_REGEX: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"^#\s*type:\s*([^#]+)(\s*#.*?)?$").unwrap());
static TYPE_IGNORE_REGEX: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"^#\s*type:\s*ignore([^#]+)?(\s*#.*?)?$").unwrap());
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/bytestring_usage.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/bytestring_usage.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, Expr};
use ruff_python_semantic::Modules;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::{FixAvailability, Violation};
/// ## What it does
/// Checks for uses of `typing.ByteString` or `collections.abc.ByteString`.
///
/// ## Why is this bad?
/// `ByteString` has been deprecated since Python 3.9 and will be removed in
/// Python 3.17. The Python documentation recommends using either
/// `collections.abc.Buffer` (or the `typing_extensions` backport
/// on Python <3.12) or a union like `bytes | bytearray | memoryview` instead.
///
/// ## Example
/// ```python
/// from typing import ByteString
/// ```
///
/// Use instead:
/// ```python
/// from collections.abc import Buffer
/// ```
///
/// ## References
/// - [Python documentation: The `ByteString` type](https://docs.python.org/3/library/typing.html#typing.ByteString)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.6.0")]
pub(crate) struct ByteStringUsage {
origin: ByteStringOrigin,
}
impl Violation for ByteStringUsage {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::None;
#[derive_message_formats]
fn message(&self) -> String {
let ByteStringUsage { origin } = self;
format!("Do not use `{origin}.ByteString`, which has unclear semantics and is deprecated")
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
enum ByteStringOrigin {
Typing,
CollectionsAbc,
}
impl std::fmt::Display for ByteStringOrigin {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
Self::Typing => "typing",
Self::CollectionsAbc => "collections.abc",
})
}
}
/// PYI057
pub(crate) fn bytestring_attribute(checker: &Checker, attribute: &Expr) {
let semantic = checker.semantic();
if !semantic
.seen
.intersects(Modules::TYPING | Modules::COLLECTIONS)
{
return;
}
let Some(qualified_name) = semantic.resolve_qualified_name(attribute) else {
return;
};
let origin = match qualified_name.segments() {
["typing", "ByteString"] => ByteStringOrigin::Typing,
["collections", "abc", "ByteString"] => ByteStringOrigin::CollectionsAbc,
_ => return,
};
let mut diagnostic = checker.report_diagnostic(ByteStringUsage { origin }, attribute.range());
diagnostic.add_primary_tag(ruff_db::diagnostic::DiagnosticTag::Deprecated);
}
/// PYI057
pub(crate) fn bytestring_import(checker: &Checker, import_from: &ast::StmtImportFrom) {
let ast::StmtImportFrom { names, module, .. } = import_from;
let module_id = match module {
Some(module) => module.id.as_str(),
None => return,
};
let origin = match module_id {
"typing" => ByteStringOrigin::Typing,
"collections.abc" => ByteStringOrigin::CollectionsAbc,
_ => return,
};
for name in names {
if name.name.as_str() == "ByteString" {
let mut diagnostic =
checker.report_diagnostic(ByteStringUsage { origin }, name.range());
diagnostic.add_primary_tag(ruff_db::diagnostic::DiagnosticTag::Deprecated);
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/exit_annotations.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/exit_annotations.rs | use std::fmt::{Display, Formatter};
use ruff_python_ast::{
Expr, ExprBinOp, ExprSubscript, ExprTuple, Operator, ParameterWithDefault, Parameters, Stmt,
StmtClassDef, StmtFunctionDef,
};
use smallvec::SmallVec;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_semantic::{SemanticModel, analyze::visibility::is_overload};
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for incorrect function signatures on `__exit__` and `__aexit__`
/// methods.
///
/// ## Why is this bad?
/// Improperly annotated `__exit__` and `__aexit__` methods can cause
/// unexpected behavior when interacting with type checkers.
///
/// ## Example
///
/// ```pyi
/// from types import TracebackType
///
/// class Foo:
/// def __exit__(
/// self, typ: BaseException, exc: BaseException, tb: TracebackType
/// ) -> None: ...
/// ```
///
/// Use instead:
///
/// ```pyi
/// from types import TracebackType
///
/// class Foo:
/// def __exit__(
/// self,
/// typ: type[BaseException] | None,
/// exc: BaseException | None,
/// tb: TracebackType | None,
/// ) -> None: ...
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.279")]
pub(crate) struct BadExitAnnotation {
func_kind: FuncKind,
error_kind: ErrorKind,
}
impl Violation for BadExitAnnotation {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let method_name = self.func_kind.to_string();
match self.error_kind {
ErrorKind::StarArgsNotAnnotated => {
format!("Star-args in `{method_name}` should be annotated with `object`")
}
ErrorKind::MissingArgs => format!(
"If there are no star-args, `{method_name}` should have at least 3 non-keyword-only args (excluding `self`)"
),
ErrorKind::ArgsAfterFirstFourMustHaveDefault => format!(
"All arguments after the first four in `{method_name}` must have a default value"
),
ErrorKind::AllKwargsMustHaveDefault => {
format!("All keyword-only arguments in `{method_name}` must have a default value")
}
ErrorKind::FirstArgBadAnnotation => format!(
"The first argument in `{method_name}` should be annotated with `object` or `type[BaseException] | None`"
),
ErrorKind::SecondArgBadAnnotation => format!(
"The second argument in `{method_name}` should be annotated with `object` or `BaseException | None`"
),
ErrorKind::ThirdArgBadAnnotation => format!(
"The third argument in `{method_name}` should be annotated with `object` or `types.TracebackType | None`"
),
ErrorKind::UnrecognizedExitOverload => format!(
"Annotations for a three-argument `{method_name}` overload (excluding `self`) \
should either be `None, None, None` or `type[BaseException], BaseException, types.TracebackType`"
),
}
}
fn fix_title(&self) -> Option<String> {
if matches!(self.error_kind, ErrorKind::StarArgsNotAnnotated) {
Some("Annotate star-args with `object`".to_string())
} else {
None
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, is_macro::Is)]
enum FuncKind {
Sync,
Async,
}
impl FuncKind {
const fn as_str(self) -> &'static str {
match self {
Self::Async => "__aexit__",
Self::Sync => "__exit__",
}
}
}
impl Display for FuncKind {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum ErrorKind {
StarArgsNotAnnotated,
MissingArgs,
FirstArgBadAnnotation,
SecondArgBadAnnotation,
ThirdArgBadAnnotation,
ArgsAfterFirstFourMustHaveDefault,
AllKwargsMustHaveDefault,
UnrecognizedExitOverload,
}
/// PYI036
pub(crate) fn bad_exit_annotation(checker: &Checker, function: &StmtFunctionDef) {
let StmtFunctionDef {
is_async,
decorator_list,
name,
parameters,
..
} = function;
let func_kind = match name.as_str() {
"__exit__" if !is_async => FuncKind::Sync,
"__aexit__" if *is_async => FuncKind::Async,
_ => return,
};
let semantic = checker.semantic();
let Some(Stmt::ClassDef(parent_class_def)) = semantic.current_statement_parent() else {
return;
};
let non_self_positional_args: SmallVec<[&ParameterWithDefault; 3]> = parameters
.posonlyargs
.iter()
.chain(¶meters.args)
.skip(1)
.collect();
if is_overload(decorator_list, semantic) {
check_positional_args_for_overloaded_method(
checker,
&non_self_positional_args,
func_kind,
parent_class_def,
parameters.range(),
);
return;
}
// If there are less than three positional arguments, at least one of them must be a star-arg,
// and it must be annotated with `object`.
if non_self_positional_args.len() < 3 {
check_short_args_list(checker, parameters, func_kind);
}
// Every positional argument (beyond the first four) must have a default.
for parameter in non_self_positional_args
.iter()
.skip(3)
.filter(|parameter| parameter.default.is_none())
{
checker.report_diagnostic(
BadExitAnnotation {
func_kind,
error_kind: ErrorKind::ArgsAfterFirstFourMustHaveDefault,
},
parameter.range(),
);
}
// ...as should all keyword-only arguments.
for parameter in parameters
.kwonlyargs
.iter()
.filter(|arg| arg.default.is_none())
{
checker.report_diagnostic(
BadExitAnnotation {
func_kind,
error_kind: ErrorKind::AllKwargsMustHaveDefault,
},
parameter.range(),
);
}
check_positional_args_for_non_overloaded_method(checker, &non_self_positional_args, func_kind);
}
/// Determine whether a "short" argument list (i.e., an argument list with less than four elements)
/// contains a star-args argument annotated with `object`. If not, report an error.
fn check_short_args_list(checker: &Checker, parameters: &Parameters, func_kind: FuncKind) {
if let Some(varargs) = ¶meters.vararg {
if let Some(annotation) = varargs
.annotation()
.filter(|ann| !is_object_or_unused(ann, checker.semantic()))
{
let mut diagnostic = checker.report_diagnostic(
BadExitAnnotation {
func_kind,
error_kind: ErrorKind::StarArgsNotAnnotated,
},
annotation.range(),
);
diagnostic.try_set_fix(|| {
let (import_edit, binding) = checker.importer().get_or_import_builtin_symbol(
"object",
annotation.start(),
checker.semantic(),
)?;
let binding_edit = Edit::range_replacement(binding, annotation.range());
Ok(Fix::safe_edits(binding_edit, import_edit))
});
}
} else {
checker.report_diagnostic(
BadExitAnnotation {
func_kind,
error_kind: ErrorKind::MissingArgs,
},
parameters.range(),
);
}
}
/// Determines whether the positional arguments of an `__exit__` or `__aexit__` method
/// (that is not decorated with `@typing.overload`) are annotated correctly.
fn check_positional_args_for_non_overloaded_method(
checker: &Checker,
non_self_positional_params: &[&ParameterWithDefault],
kind: FuncKind,
) {
// For each argument, define the predicate against which to check the annotation.
type AnnotationValidator = fn(&Expr, &SemanticModel) -> bool;
let validations: [(ErrorKind, AnnotationValidator); 3] = [
(ErrorKind::FirstArgBadAnnotation, is_base_exception_type),
(ErrorKind::SecondArgBadAnnotation, |expr, semantic| {
semantic.match_builtin_expr(expr, "BaseException")
}),
(ErrorKind::ThirdArgBadAnnotation, is_traceback_type),
];
for (param, (error_info, predicate)) in
non_self_positional_params.iter().take(3).zip(validations)
{
let Some(annotation) = param.annotation() else {
continue;
};
if is_object_or_unused(annotation, checker.semantic()) {
continue;
}
// If there's an annotation that's not `object` or `Unused`, check that the annotated type
// matches the predicate.
if non_none_annotation_element(annotation, checker.semantic())
.is_some_and(|elem| predicate(elem, checker.semantic()))
{
continue;
}
checker.report_diagnostic(
BadExitAnnotation {
func_kind: kind,
error_kind: error_info,
},
annotation.range(),
);
}
}
/// Determines whether the positional arguments of an `__exit__` or `__aexit__` method
/// overload are annotated correctly.
fn check_positional_args_for_overloaded_method(
checker: &Checker,
non_self_positional_args: &[&ParameterWithDefault],
kind: FuncKind,
parent_class_def: &StmtClassDef,
parameters_range: TextRange,
) {
fn parameter_annotation_loosely_matches_predicate(
parameter: &ParameterWithDefault,
predicate: impl FnOnce(&Expr) -> bool,
semantic: &SemanticModel,
) -> bool {
parameter.annotation().is_none_or(|annotation| {
predicate(annotation) || is_object_or_unused(annotation, semantic)
})
}
let semantic = checker.semantic();
// Collect all the overloads for this method into a SmallVec
let function_overloads: SmallVec<[&StmtFunctionDef; 2]> = parent_class_def
.body
.iter()
.filter_map(|stmt| {
let func_def = stmt.as_function_def_stmt()?;
if &func_def.name == kind.as_str() && is_overload(&func_def.decorator_list, semantic) {
Some(func_def)
} else {
None
}
})
.collect();
// If the number of overloads for this method is not exactly 2, don't do any checking
if function_overloads.len() != 2 {
return;
}
for function_def in &function_overloads {
let StmtFunctionDef {
is_async,
parameters,
..
} = function_def;
// If any overloads are an unexpected sync/async colour, don't do any checking
if *is_async != kind.is_async() {
return;
}
// If any overloads have any variadic arguments, don't do any checking
let Parameters {
range: _,
node_index: _,
posonlyargs,
args,
vararg: None,
kwonlyargs,
kwarg: None,
} = &**parameters
else {
return;
};
// If any overloads have any keyword-only arguments, don't do any checking
if !kwonlyargs.is_empty() {
return;
}
// If the number of non-keyword-only arguments is not exactly equal to 4
// for any overloads, don't do any checking
if posonlyargs.len() + args.len() != 4 {
return;
}
}
debug_assert!(
function_overloads.contains(&semantic.current_statement().as_function_def_stmt().unwrap())
);
// We've now established that no overloads for this method have any variadic parameters,
// no overloads have any keyword-only parameters, all overloads are the expected
// sync/async colour, and all overloads have exactly 3 non-`self` non-keyword-only parameters.
// The method we're currently looking at is one of those overloads.
// It therefore follows that, in order for it to be correctly annotated, it must be
// one of the following two possible overloads:
//
// ```
// @overload
// def __(a)exit__(self, typ: None, exc: None, tb: None) -> None: ...
// @overload
// def __(a)exit__(self, typ: type[BaseException], exc: BaseException, tb: TracebackType) -> None: ...
// ```
//
// We'll allow small variations on either of these (if, e.g. a parameter is unannotated,
// annotated with `object` or `_typeshed.Unused`). *Basically*, though, the rule is:
// - If the function overload matches *either* of those, it's okay.
// - If not: emit a diagnostic.
// Start by checking the first possibility:
if non_self_positional_args.iter().all(|parameter| {
parameter_annotation_loosely_matches_predicate(
parameter,
Expr::is_none_literal_expr,
semantic,
)
}) {
return;
}
// Now check the second:
if parameter_annotation_loosely_matches_predicate(
non_self_positional_args[0],
|annotation| is_base_exception_type(annotation, semantic),
semantic,
) && parameter_annotation_loosely_matches_predicate(
non_self_positional_args[1],
|annotation| semantic.match_builtin_expr(annotation, "BaseException"),
semantic,
) && parameter_annotation_loosely_matches_predicate(
non_self_positional_args[2],
|annotation| is_traceback_type(annotation, semantic),
semantic,
) {
return;
}
// Okay, neither of them match...
checker.report_diagnostic(
BadExitAnnotation {
func_kind: kind,
error_kind: ErrorKind::UnrecognizedExitOverload,
},
parameters_range,
);
}
/// Return the non-`None` annotation element of a PEP 604-style union or `Optional` annotation.
fn non_none_annotation_element<'a>(
annotation: &'a Expr,
semantic: &SemanticModel,
) -> Option<&'a Expr> {
// E.g., `typing.Union` or `typing.Optional`
if let Expr::Subscript(ExprSubscript { value, slice, .. }) = annotation {
let qualified_name = semantic.resolve_qualified_name(value)?;
if semantic.match_typing_qualified_name(&qualified_name, "Optional") {
return if slice.is_none_literal_expr() {
None
} else {
Some(slice)
};
}
if !semantic.match_typing_qualified_name(&qualified_name, "Union") {
return None;
}
let ExprTuple { elts, .. } = slice.as_tuple_expr()?;
let [left, right] = elts.as_slice() else {
return None;
};
return match (left.is_none_literal_expr(), right.is_none_literal_expr()) {
(false, true) => Some(left),
(true, false) => Some(right),
(true, true) => None,
(false, false) => None,
};
}
// PEP 604-style union (e.g., `int | None`)
if let Expr::BinOp(ExprBinOp {
op: Operator::BitOr,
left,
right,
..
}) = annotation
{
if !left.is_none_literal_expr() {
return Some(left);
}
if !right.is_none_literal_expr() {
return Some(right);
}
return None;
}
None
}
/// Return `true` if the [`Expr`] is the `object` builtin or the `_typeshed.Unused` type.
fn is_object_or_unused(expr: &Expr, semantic: &SemanticModel) -> bool {
semantic
.resolve_qualified_name(expr)
.is_some_and(|qualified_name| {
matches!(
qualified_name.segments(),
["" | "builtins", "object"] | ["_typeshed", "Unused"]
)
})
}
/// Return `true` if the [`Expr`] is the `types.TracebackType` type.
fn is_traceback_type(expr: &Expr, semantic: &SemanticModel) -> bool {
semantic
.resolve_qualified_name(expr)
.is_some_and(|qualified_name| {
matches!(qualified_name.segments(), ["types", "TracebackType"])
})
}
/// Return `true` if the [`Expr`] is, e.g., `Type[BaseException]`.
fn is_base_exception_type(expr: &Expr, semantic: &SemanticModel) -> bool {
let Expr::Subscript(ExprSubscript { value, slice, .. }) = expr else {
return false;
};
if semantic.match_typing_expr(value, "Type") || semantic.match_builtin_expr(value, "type") {
semantic.match_builtin_expr(slice, "BaseException")
} else {
false
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/redundant_none_literal.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/redundant_none_literal.rs | use anyhow::Result;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{
self as ast, Expr, ExprBinOp, ExprContext, ExprNoneLiteral, Operator, PythonVersion,
helpers::{pep_604_union, typing_optional},
name::Name,
operator_precedence::OperatorPrecedence,
token::{Tokens, parenthesized_range},
};
use ruff_python_semantic::analyze::typing::{traverse_literal, traverse_union};
use ruff_text_size::{Ranged, TextRange};
use smallvec::SmallVec;
use crate::checkers::ast::Checker;
use crate::{Applicability, Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for redundant `Literal[None]` annotations.
///
/// ## Why is this bad?
/// While `Literal[None]` is a valid type annotation, it is semantically equivalent to `None`.
/// Prefer `None` over `Literal[None]` for both consistency and readability.
///
/// ## Example
/// ```python
/// from typing import Literal
///
/// Literal[None]
/// Literal[1, 2, 3, "foo", 5, None]
/// ```
///
/// Use instead:
/// ```python
/// from typing import Literal
///
/// None
/// Literal[1, 2, 3, "foo", 5] | None
/// ```
///
/// ## Fix safety and availability
/// This rule's fix is marked as safe unless the literal contains comments.
///
/// There is currently no fix available when applying the fix would lead to
/// a `TypeError` from an expression of the form `None | None` or when we
/// are unable to import the symbol `typing.Optional` and the Python version
/// is 3.9 or below.
///
/// ## References
/// - [Typing documentation: Legal parameters for `Literal` at type check time](https://typing.python.org/en/latest/spec/literal.html#legal-parameters-for-literal-at-type-check-time)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.13.0")]
pub(crate) struct RedundantNoneLiteral {
union_kind: UnionKind,
}
impl Violation for RedundantNoneLiteral {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
match self.union_kind {
UnionKind::NoUnion => "Use `None` rather than `Literal[None]`".to_string(),
UnionKind::TypingOptional => {
"Use `Optional[Literal[...]]` rather than `Literal[None, ...]` ".to_string()
}
UnionKind::BitOr => {
"Use `Literal[...] | None` rather than `Literal[None, ...]` ".to_string()
}
}
}
fn fix_title(&self) -> Option<String> {
Some(match self.union_kind {
UnionKind::NoUnion => "Replace with `None`".to_string(),
UnionKind::TypingOptional => "Replace with `Optional[Literal[...]]`".to_string(),
UnionKind::BitOr => "Replace with `Literal[...] | None`".to_string(),
})
}
}
/// PYI061
pub(crate) fn redundant_none_literal<'a>(checker: &Checker, literal_expr: &'a Expr) {
let semantic = checker.semantic();
if !semantic.seen_typing() {
return;
}
let Expr::Subscript(ast::ExprSubscript {
value: literal_subscript,
..
}) = literal_expr
else {
return;
};
let mut none_exprs: SmallVec<[&ExprNoneLiteral; 1]> = SmallVec::new();
let mut literal_elements = vec![];
let mut partition_literal_elements = |expr: &'a Expr, _parent: &'a Expr| {
if let Expr::NoneLiteral(none_expr) = expr {
none_exprs.push(none_expr);
} else {
literal_elements.push(expr);
}
};
traverse_literal(&mut partition_literal_elements, semantic, literal_expr);
if none_exprs.is_empty() {
return;
}
let union_kind = if literal_elements.is_empty() {
UnionKind::NoUnion
} else if (checker.target_version() >= PythonVersion::PY310) || checker.source_type.is_stub() {
UnionKind::BitOr
} else {
UnionKind::TypingOptional
};
// N.B. Applying the fix can leave an unused import to be fixed by the `unused-import` rule.
for none_expr in none_exprs {
let mut diagnostic =
checker.report_diagnostic(RedundantNoneLiteral { union_kind }, none_expr.range());
diagnostic.try_set_optional_fix(|| {
create_fix(
checker,
literal_expr,
literal_subscript,
literal_elements.clone(),
union_kind,
)
// Isolate the fix to ensure multiple fixes on the same expression (like
// `Literal[None,] | Literal[None,]` -> `None | None`) happen across separate passes,
// preventing the production of invalid code.
.map(|fix| {
fix.map(|fix| fix.isolate(Checker::isolation(semantic.current_statement_id())))
})
});
}
}
/// If possible, return a [`Fix`] for a violation of this rule.
///
/// Avoid producing code that would raise an exception when
/// `Literal[None] | None` would be fixed to `None | None`.
/// Instead, do not provide a fix. We don't need to worry about unions
/// that use [`typing.Union`], as `Union[None, None]` is valid Python.
/// See <https://github.com/astral-sh/ruff/issues/14567>.
///
/// [`typing.Union`]: https://docs.python.org/3/library/typing.html#typing.Union
fn create_fix(
checker: &Checker,
literal_expr: &Expr,
literal_subscript: &Expr,
literal_elements: Vec<&Expr>,
union_kind: UnionKind,
) -> Result<Option<Fix>> {
let semantic = checker.semantic();
let enclosing_pep604_union = semantic
.current_expressions()
.skip(1)
.take_while(|expr| {
matches!(
expr,
Expr::BinOp(ExprBinOp {
op: Operator::BitOr,
..
})
)
})
.last();
if let Some(enclosing_pep604_union) = enclosing_pep604_union {
let mut is_fixable = true;
traverse_union(
&mut |expr, _| {
if expr.is_none_literal_expr() {
is_fixable = false;
}
},
semantic,
enclosing_pep604_union,
);
if !is_fixable {
return Ok(None);
}
}
let applicability = if checker.comment_ranges().intersects(literal_expr.range()) {
Applicability::Unsafe
} else {
Applicability::Safe
};
if matches!(union_kind, UnionKind::NoUnion) {
return Ok(Some(Fix::applicable_edit(
Edit::range_replacement("None".to_string(), literal_expr.range()),
applicability,
)));
}
let new_literal_expr = Expr::Subscript(ast::ExprSubscript {
value: Box::new(literal_subscript.clone()),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
ctx: ExprContext::Load,
slice: Box::new(if literal_elements.len() > 1 {
Expr::Tuple(ast::ExprTuple {
elts: literal_elements.into_iter().cloned().collect(),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
ctx: ExprContext::Load,
parenthesized: true,
})
} else {
literal_elements[0].clone()
}),
});
let fix = match union_kind {
UnionKind::TypingOptional => {
let Some(importer) = checker.typing_importer("Optional", PythonVersion::lowest())
else {
return Ok(None);
};
let (import_edit, bound_name) = importer.import(literal_expr.start())?;
let optional_expr = typing_optional(new_literal_expr, Name::from(bound_name));
let content = checker.generator().expr(&optional_expr);
let optional_edit = Edit::range_replacement(content, literal_expr.range());
Fix::applicable_edits(import_edit, [optional_edit], applicability)
}
UnionKind::BitOr => {
let none_expr = Expr::NoneLiteral(ExprNoneLiteral {
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
});
let union_expr = pep_604_union(&[new_literal_expr, none_expr]);
// Check if we need parentheses to preserve operator precedence
let content =
if needs_parentheses_for_precedence(semantic, literal_expr, checker.tokens()) {
format!("({})", checker.generator().expr(&union_expr))
} else {
checker.generator().expr(&union_expr)
};
let union_edit = Edit::range_replacement(content, literal_expr.range());
Fix::applicable_edit(union_edit, applicability)
}
// We dealt with this case earlier to avoid allocating `lhs` and `rhs`
UnionKind::NoUnion => {
unreachable!()
}
};
Ok(Some(fix))
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum UnionKind {
NoUnion,
TypingOptional,
BitOr,
}
/// Check if the union expression needs parentheses to preserve operator precedence.
/// This is needed when the union is part of a larger expression where the `|` operator
/// has lower precedence than the surrounding operations (like attribute access).
fn needs_parentheses_for_precedence(
semantic: &ruff_python_semantic::SemanticModel,
literal_expr: &Expr,
tokens: &Tokens,
) -> bool {
// Get the parent expression to check if we're in a context that needs parentheses
let Some(parent_expr) = semantic.current_expression_parent() else {
return false;
};
// Check if the literal expression is already parenthesized
if parenthesized_range(literal_expr.into(), parent_expr.into(), tokens).is_some() {
return false; // Already parenthesized, don't add more
}
// Check if the parent expression has higher precedence than the `|` operator
let union_precedence = OperatorPrecedence::BitOr;
let parent_precedence = OperatorPrecedence::from(parent_expr);
// If the parent operation has higher precedence than `|`, we need parentheses
parent_precedence > union_precedence
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/non_empty_stub_body.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/non_empty_stub_body.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::is_docstring_stmt;
use ruff_python_ast::{self as ast, Stmt};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for non-empty function stub bodies.
///
/// ## Why is this bad?
/// Stub files are never executed at runtime; they should be thought of as
/// "data files" for type checkers or IDEs. Function bodies are redundant
/// for this purpose.
///
/// ## Example
/// ```pyi
/// def double(x: int) -> int:
/// return x * 2
/// ```
///
/// Use instead:
/// ```pyi
/// def double(x: int) -> int: ...
/// ```
///
/// ## References
/// - [Typing documentation - Writing and Maintaining Stub Files](https://typing.python.org/en/latest/guides/writing_stubs.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.253")]
pub(crate) struct NonEmptyStubBody;
impl AlwaysFixableViolation for NonEmptyStubBody {
#[derive_message_formats]
fn message(&self) -> String {
"Function body must contain only `...`".to_string()
}
fn fix_title(&self) -> String {
"Replace function body with `...`".to_string()
}
}
/// PYI010
pub(crate) fn non_empty_stub_body(checker: &Checker, body: &[Stmt]) {
// Ignore multi-statement bodies (covered by PYI048).
let [stmt] = body else {
return;
};
// Ignore `pass` statements (covered by PYI009).
if stmt.is_pass_stmt() {
return;
}
// Ignore docstrings (covered by PYI021).
if is_docstring_stmt(stmt) {
return;
}
// Ignore `...` (the desired case).
if let Stmt::Expr(ast::StmtExpr {
value,
range: _,
node_index: _,
}) = stmt
{
if value.is_ellipsis_literal_expr() {
return;
}
}
let mut diagnostic = checker.report_diagnostic(NonEmptyStubBody, stmt.range());
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
"...".to_string(),
stmt.range(),
)));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/stub_body_multiple_statements.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/stub_body_multiple_statements.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::Stmt;
use ruff_python_ast::identifier::Identifier;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for functions in stub (`.pyi`) files that contain multiple
/// statements.
///
/// ## Why is this bad?
/// Stub files are never executed, and are only intended to define type hints.
/// As such, functions in stub files should not contain functional code, and
/// should instead contain only a single statement (e.g., `...`).
///
/// ## Example
///
/// ```pyi
/// def function():
/// x = 1
/// y = 2
/// return x + y
/// ```
///
/// Use instead:
///
/// ```pyi
/// def function(): ...
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.271")]
pub(crate) struct StubBodyMultipleStatements;
impl Violation for StubBodyMultipleStatements {
#[derive_message_formats]
fn message(&self) -> String {
"Function body must contain exactly one statement".to_string()
}
}
/// PYI048
pub(crate) fn stub_body_multiple_statements(checker: &Checker, stmt: &Stmt, body: &[Stmt]) {
if body.len() > 1 {
checker.report_diagnostic(StubBodyMultipleStatements, stmt.identifier());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/duplicate_literal_member.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/duplicate_literal_member.rs | use std::collections::HashSet;
use rustc_hash::FxHashSet;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::comparable::ComparableExpr;
use ruff_python_ast::{self as ast, Expr, ExprContext};
use ruff_python_semantic::analyze::typing::traverse_literal;
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Applicability, Edit, Fix};
/// ## What it does
/// Checks for duplicate members in a `typing.Literal[]` slice.
///
/// ## Why is this bad?
/// Duplicate literal members are redundant and should be removed.
///
/// ## Example
/// ```python
/// from typing import Literal
///
/// foo: Literal["a", "b", "a"]
/// ```
///
/// Use instead:
/// ```python
/// from typing import Literal
///
/// foo: Literal["a", "b"]
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as safe, unless the type annotation contains comments.
///
/// Note that while the fix may flatten nested literals into a single top-level literal,
/// the semantics of the annotation will remain unchanged.
///
/// ## References
/// - [Python documentation: `typing.Literal`](https://docs.python.org/3/library/typing.html#typing.Literal)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.6.0")]
pub(crate) struct DuplicateLiteralMember {
duplicate_name: String,
}
impl AlwaysFixableViolation for DuplicateLiteralMember {
#[derive_message_formats]
fn message(&self) -> String {
format!("Duplicate literal member `{}`", self.duplicate_name)
}
fn fix_title(&self) -> String {
"Remove duplicates".to_string()
}
}
/// PYI062
pub(crate) fn duplicate_literal_member<'a>(checker: &Checker, expr: &'a Expr) {
let mut seen_nodes: HashSet<ComparableExpr<'_>, _> = FxHashSet::default();
let mut unique_nodes: Vec<&Expr> = Vec::new();
let mut diagnostics = Vec::new();
// Adds a member to `literal_exprs` if it is a `Literal` annotation
let mut check_for_duplicate_members = |expr: &'a Expr, _: &'a Expr| {
// If we've already seen this literal member, raise a violation.
if seen_nodes.insert(expr.into()) {
unique_nodes.push(expr);
} else {
diagnostics.push(checker.report_diagnostic(
DuplicateLiteralMember {
duplicate_name: checker.generator().expr(expr),
},
expr.range(),
));
}
};
// Traverse the literal, collect all diagnostic members.
traverse_literal(&mut check_for_duplicate_members, checker.semantic(), expr);
if diagnostics.is_empty() {
return;
}
// If there's at least one diagnostic, create a fix to remove the duplicate members.
if let Expr::Subscript(subscript) = expr {
let subscript = Expr::Subscript(ast::ExprSubscript {
slice: Box::new(if let [elt] = unique_nodes.as_slice() {
(*elt).clone()
} else {
Expr::Tuple(ast::ExprTuple {
elts: unique_nodes.into_iter().cloned().collect(),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
ctx: ExprContext::Load,
parenthesized: false,
})
}),
value: subscript.value.clone(),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
ctx: ExprContext::Load,
});
let fix = Fix::applicable_edit(
Edit::range_replacement(checker.generator().expr(&subscript), expr.range()),
if checker.comment_ranges().intersects(expr.range()) {
Applicability::Unsafe
} else {
Applicability::Safe
},
);
for diagnostic in &mut diagnostics {
diagnostic.set_fix(fix.clone());
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/bad_generator_return_type.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/bad_generator_return_type.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_ast::helpers::map_subscript;
use ruff_python_ast::identifier::Identifier;
use ruff_python_semantic::SemanticModel;
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::importer::ImportRequest;
use crate::{Applicability, Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for simple `__iter__` methods that return `Generator`, and for
/// simple `__aiter__` methods that return `AsyncGenerator`.
///
/// ## Why is this bad?
/// Using `(Async)Iterator` for these methods is simpler and more elegant. More
/// importantly, it also reflects the fact that the precise kind of iterator
/// returned from an `__iter__` method is usually an implementation detail that
/// could change at any time. Type annotations help define a contract for a
/// function; implementation details should not leak into that contract.
///
/// For example:
/// ```python
/// from collections.abc import AsyncGenerator, Generator
/// from typing import Any
///
///
/// class CustomIterator:
/// def __iter__(self) -> Generator:
/// yield from range(42)
///
///
/// class CustomIterator2:
/// def __iter__(self) -> Generator[str, Any, None]:
/// yield from "abcdefg"
/// ```
///
/// Use instead:
/// ```python
/// from collections.abc import Iterator
///
///
/// class CustomIterator:
/// def __iter__(self) -> Iterator:
/// yield from range(42)
///
///
/// class CustomIterator2:
/// def __iter__(self) -> Iterator[str]:
/// yield from "abdefg"
/// ```
///
/// ## Fix safety
/// This rule tries hard to avoid false-positive errors, and the rule's fix
/// should always be safe for `.pyi` stub files. However, there is a slightly
/// higher chance that a false positive might be emitted by this rule when
/// applied to runtime Python (`.py` files). As such, the fix is marked as
/// unsafe for any `__iter__` or `__aiter__` method in a `.py` file that has
/// more than two statements (including docstrings) in its body.
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.2.0")]
pub(crate) struct GeneratorReturnFromIterMethod {
return_type: Iterator,
method: Method,
}
impl Violation for GeneratorReturnFromIterMethod {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let GeneratorReturnFromIterMethod {
return_type,
method,
} = self;
format!("Use `{return_type}` as the return value for simple `{method}` methods")
}
fn fix_title(&self) -> Option<String> {
let GeneratorReturnFromIterMethod {
return_type,
method,
} = self;
Some(format!(
"Convert the return annotation of your `{method}` method to `{return_type}`"
))
}
}
/// PYI058
pub(crate) fn bad_generator_return_type(function_def: &ast::StmtFunctionDef, checker: &Checker) {
if function_def.is_async {
return;
}
let name = function_def.name.as_str();
let semantic = checker.semantic();
if !semantic.current_scope().kind.is_class() {
return;
}
let parameters = &function_def.parameters;
if !parameters.kwonlyargs.is_empty()
|| parameters.kwarg.is_some()
|| parameters.vararg.is_some()
{
return;
}
if (parameters.args.len() + parameters.posonlyargs.len()) != 1 {
return;
}
let returns = match &function_def.returns {
Some(returns) => returns.as_ref(),
_ => return,
};
// Determine the module from which the existing annotation is imported (e.g., `typing` or
// `collections.abc`)
let (method, module, member) = {
let Some(qualified_name) = semantic.resolve_qualified_name(map_subscript(returns)) else {
return;
};
match (name, qualified_name.segments()) {
("__iter__", ["typing", "Generator"]) => {
(Method::Iter, Module::Typing, Generator::Generator)
}
("__aiter__", ["typing", "AsyncGenerator"]) => {
(Method::AIter, Module::Typing, Generator::AsyncGenerator)
}
("__iter__", ["typing_extensions", "Generator"]) => {
(Method::Iter, Module::TypingExtensions, Generator::Generator)
}
("__aiter__", ["typing_extensions", "AsyncGenerator"]) => (
Method::AIter,
Module::TypingExtensions,
Generator::AsyncGenerator,
),
("__iter__", ["collections", "abc", "Generator"]) => {
(Method::Iter, Module::CollectionsAbc, Generator::Generator)
}
("__aiter__", ["collections", "abc", "AsyncGenerator"]) => (
Method::AIter,
Module::CollectionsAbc,
Generator::AsyncGenerator,
),
_ => return,
}
};
// `Generator` allows three type parameters; `AsyncGenerator` allows two.
// If type parameters are present,
// check that all parameters except the first one are either `typing.Any` or `None`:
// - if so, collect information on the first parameter for use in the rule's autofix;
// - if not, don't emit the diagnostic
let yield_type_info = match returns {
ast::Expr::Subscript(ast::ExprSubscript { slice, .. }) => match slice.as_ref() {
ast::Expr::Tuple(slice_tuple) => {
if !slice_tuple
.iter()
.skip(1)
.all(|element| is_any_or_none(element, semantic))
{
return;
}
let yield_type = match (name, slice_tuple.elts.as_slice()) {
("__iter__", [yield_type, _, _]) => yield_type,
("__aiter__", [yield_type, _]) => yield_type,
_ => return,
};
Some(YieldTypeInfo {
expr: yield_type,
range: slice_tuple.range,
})
}
_ => return,
},
_ => None,
};
// For .py files (runtime Python!),
// only emit the lint if it's a simple __(a)iter__ implementation
// -- for more complex function bodies,
// it's more likely we'll be emitting a false positive here
if !checker.source_type.is_stub() {
let mut yield_encountered = false;
for stmt in &function_def.body {
match stmt {
ast::Stmt::Pass(_) => continue,
ast::Stmt::Return(ast::StmtReturn { value, .. }) => {
if let Some(ret_val) = value {
if yield_encountered && !ret_val.is_none_literal_expr() {
return;
}
}
}
ast::Stmt::Expr(ast::StmtExpr { value, .. }) => match value.as_ref() {
ast::Expr::StringLiteral(_) | ast::Expr::EllipsisLiteral(_) => continue,
ast::Expr::Yield(_) | ast::Expr::YieldFrom(_) => {
yield_encountered = true;
continue;
}
_ => return,
},
_ => return,
}
}
}
let mut diagnostic = checker.report_diagnostic(
GeneratorReturnFromIterMethod {
return_type: member.to_iter(),
method,
},
function_def.identifier(),
);
diagnostic.try_set_fix(|| {
generate_fix(
function_def,
returns,
yield_type_info,
module,
member,
checker,
)
});
}
/// Returns `true` if the [`ast::Expr`] is a `None` literal or a `typing.Any` expression.
fn is_any_or_none(expr: &ast::Expr, semantic: &SemanticModel) -> bool {
expr.is_none_literal_expr() || semantic.match_typing_expr(expr, "Any")
}
/// Generate a [`Fix`] to convert the return type annotation to `Iterator` or `AsyncIterator`.
fn generate_fix(
function_def: &ast::StmtFunctionDef,
returns: &ast::Expr,
yield_type_info: Option<YieldTypeInfo>,
module: Module,
member: Generator,
checker: &Checker,
) -> anyhow::Result<Fix> {
let expr = map_subscript(returns);
let (import_edit, binding) = checker.importer().get_or_import_symbol(
&ImportRequest::import_from(&module.to_string(), &member.to_iter().to_string()),
expr.start(),
checker.semantic(),
)?;
let binding_edit = Edit::range_replacement(binding, expr.range());
let yield_edit = yield_type_info.map(|yield_type_info| {
Edit::range_replacement(
checker.generator().expr(yield_type_info.expr),
yield_type_info.range(),
)
});
// Mark as unsafe if it's a runtime Python file and the body has more than one statement in it.
let applicability = if checker.source_type.is_stub() || function_def.body.len() == 1 {
Applicability::Safe
} else {
Applicability::Unsafe
};
Ok(Fix::applicable_edits(
import_edit,
std::iter::once(binding_edit).chain(yield_edit),
applicability,
))
}
#[derive(Debug)]
struct YieldTypeInfo<'a> {
expr: &'a ast::Expr,
range: TextRange,
}
impl Ranged for YieldTypeInfo<'_> {
fn range(&self) -> TextRange {
self.range
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Module {
Typing,
TypingExtensions,
CollectionsAbc,
}
impl std::fmt::Display for Module {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Module::Typing => write!(f, "typing"),
Module::TypingExtensions => write!(f, "typing_extensions"),
Module::CollectionsAbc => write!(f, "collections.abc"),
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Method {
Iter,
AIter,
}
impl std::fmt::Display for Method {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Method::Iter => write!(f, "__iter__"),
Method::AIter => write!(f, "__aiter__"),
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Generator {
Generator,
AsyncGenerator,
}
impl std::fmt::Display for Generator {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Generator::Generator => write!(f, "Generator"),
Generator::AsyncGenerator => write!(f, "AsyncGenerator"),
}
}
}
impl Generator {
fn to_iter(self) -> Iterator {
match self {
Generator::Generator => Iterator::Iterator,
Generator::AsyncGenerator => Iterator::AsyncIterator,
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Iterator {
Iterator,
AsyncIterator,
}
impl std::fmt::Display for Iterator {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Iterator::Iterator => write!(f, "Iterator"),
Iterator::AsyncIterator => write!(f, "AsyncIterator"),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/generic_not_last_base_class.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/generic_not_last_base_class.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, helpers::map_subscript};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix::edits::{Parentheses, add_argument, remove_argument};
use crate::{Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for classes inheriting from `typing.Generic[]` where `Generic[]` is
/// not the last base class in the bases tuple.
///
/// ## Why is this bad?
/// If `Generic[]` is not the final class in the bases tuple, unexpected
/// behaviour can occur at runtime (See [this CPython issue][1] for an example).
///
/// The rule is also applied to stub files, where it won't cause issues at
/// runtime. This is because type checkers may not be able to infer an
/// accurate [MRO] for the class, which could lead to unexpected or
/// inaccurate results when they analyze your code.
///
/// For example:
/// ```python
/// from collections.abc import Container, Iterable, Sized
/// from typing import Generic, TypeVar
///
///
/// T = TypeVar("T")
/// K = TypeVar("K")
/// V = TypeVar("V")
///
///
/// class LinkedList(Generic[T], Sized):
/// def push(self, item: T) -> None:
/// self._items.append(item)
///
///
/// class MyMapping(
/// Generic[K, V],
/// Iterable[tuple[K, V]],
/// Container[tuple[K, V]],
/// ):
/// ...
/// ```
///
/// Use instead:
/// ```python
/// from collections.abc import Container, Iterable, Sized
/// from typing import Generic, TypeVar
///
///
/// T = TypeVar("T")
/// K = TypeVar("K")
/// V = TypeVar("V")
///
///
/// class LinkedList(Sized, Generic[T]):
/// def push(self, item: T) -> None:
/// self._items.append(item)
///
///
/// class MyMapping(
/// Iterable[tuple[K, V]],
/// Container[tuple[K, V]],
/// Generic[K, V],
/// ):
/// ...
/// ```
///
/// ## Fix safety
///
/// This rule's fix is always unsafe because reordering base classes can change
/// the behavior of the code by modifying the class's MRO. The fix will also
/// delete trailing comments after the `Generic` base class in multi-line base
/// class lists, if any are present.
///
/// ## Fix availability
///
/// This rule's fix is only available when there are no `*args` present in the base class list.
///
/// ## References
/// - [`typing.Generic` documentation](https://docs.python.org/3/library/typing.html#typing.Generic)
///
/// [1]: https://github.com/python/cpython/issues/106102
/// [MRO]: https://docs.python.org/3/glossary.html#term-method-resolution-order
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.13.0")]
pub(crate) struct GenericNotLastBaseClass;
impl Violation for GenericNotLastBaseClass {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"`Generic[]` should always be the last base class".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Move `Generic[]` to the end".to_string())
}
}
/// PYI059
pub(crate) fn generic_not_last_base_class(checker: &Checker, class_def: &ast::StmtClassDef) {
let Some(bases) = class_def.arguments.as_deref() else {
return;
};
let semantic = checker.semantic();
if !semantic.seen_typing() {
return;
}
let Some(last_base) = bases.args.last() else {
return;
};
let mut generic_base_iter = bases
.args
.iter()
.filter(|base| semantic.match_typing_expr(map_subscript(base), "Generic"));
let Some(generic_base) = generic_base_iter.next() else {
return;
};
// If `Generic[]` exists, but is the last base, don't emit a diagnostic.
if generic_base.range() == last_base.range() {
return;
}
let mut diagnostic = checker.report_diagnostic(GenericNotLastBaseClass, bases.range());
// Avoid suggesting a fix if any of the arguments is starred. This avoids tricky syntax errors
// in cases like
//
// ```python
// class C3(Generic[T], metaclass=type, *[str]): ...
// ```
//
// where we would naively try to put `Generic[T]` after `*[str]`, which is also after a keyword
// argument, causing the error.
if bases
.arguments_source_order()
.any(|arg| arg.value().is_starred_expr())
{
return;
}
// No fix if multiple `Generic[]`s are seen in the class bases.
if generic_base_iter.next().is_none() {
diagnostic.try_set_fix(|| generate_fix(generic_base, bases, checker));
}
}
fn generate_fix(
generic_base: &ast::Expr,
arguments: &ast::Arguments,
checker: &Checker,
) -> anyhow::Result<Fix> {
let locator = checker.locator();
let source = locator.contents();
let tokens = checker.tokens();
let deletion = remove_argument(
generic_base,
arguments,
Parentheses::Preserve,
source,
tokens,
)?;
let insertion = add_argument(locator.slice(generic_base), arguments, tokens);
Ok(Fix::unsafe_edits(deletion, [insertion]))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/complex_if_statement_in_stub.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/complex_if_statement_in_stub.rs | use ruff_python_ast::{self as ast, Expr};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for `if` statements with complex conditionals in stubs.
///
/// ## Why is this bad?
/// Type checkers understand simple conditionals to express variations between
/// different Python versions and platforms. However, complex tests may not be
/// understood by a type checker, leading to incorrect inferences when they
/// analyze your code.
///
/// ## Example
/// ```pyi
/// import sys
///
/// if (3, 10) <= sys.version_info < (3, 12): ...
/// ```
///
/// Use instead:
/// ```pyi
/// import sys
///
/// if sys.version_info >= (3, 10) and sys.version_info < (3, 12): ...
/// ```
///
/// ## References
/// - [Typing documentation: Version and platform checking](https://typing.python.org/en/latest/spec/directives.html#version-and-platform-checks)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.276")]
pub(crate) struct ComplexIfStatementInStub;
impl Violation for ComplexIfStatementInStub {
#[derive_message_formats]
fn message(&self) -> String {
"`if` test must be a simple comparison against `sys.platform` or `sys.version_info`"
.to_string()
}
}
/// PYI002
pub(crate) fn complex_if_statement_in_stub(checker: &Checker, test: &Expr) {
let Expr::Compare(ast::ExprCompare {
left, comparators, ..
}) = test
else {
checker.report_diagnostic(ComplexIfStatementInStub, test.range());
return;
};
if comparators.len() != 1 {
checker.report_diagnostic(ComplexIfStatementInStub, test.range());
return;
}
if left.is_subscript_expr() {
return;
}
if checker
.semantic()
.resolve_qualified_name(left)
.is_some_and(|qualified_name| {
matches!(
qualified_name.segments(),
["sys", "version_info" | "platform"]
)
})
{
return;
}
checker.report_diagnostic(ComplexIfStatementInStub, test.range());
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/redundant_numeric_union.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/redundant_numeric_union.rs | use bitflags::bitflags;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{AnyParameterRef, Expr, ExprBinOp, Operator, Parameters, PythonVersion};
use ruff_python_semantic::analyze::typing::traverse_union;
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::{Applicability, Edit, Fix, FixAvailability, Violation};
use super::generate_union_fix;
/// ## What it does
/// Checks for parameter annotations that contain redundant unions between
/// builtin numeric types (e.g., `int | float`).
///
/// ## Why is this bad?
/// The [typing specification] states:
///
/// > Python’s numeric types `complex`, `float` and `int` are not subtypes of
/// > each other, but to support common use cases, the type system contains a
/// > straightforward shortcut: when an argument is annotated as having type
/// > `float`, an argument of type `int` is acceptable; similar, for an
/// > argument annotated as having type `complex`, arguments of type `float` or
/// > `int` are acceptable.
///
/// As such, a union that includes both `int` and `float` is redundant in the
/// specific context of a parameter annotation, as it is equivalent to a union
/// that only includes `float`. For readability and clarity, unions should omit
/// redundant elements.
///
/// ## Example
///
/// ```pyi
/// def foo(x: float | int | str) -> None: ...
/// ```
///
/// Use instead:
///
/// ```pyi
/// def foo(x: float | str) -> None: ...
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as safe, unless the type annotation contains comments.
///
/// Note that while the fix may flatten nested unions into a single top-level union,
/// the semantics of the annotation will remain unchanged.
///
/// ## References
/// - [Python documentation: The numeric tower](https://docs.python.org/3/library/numbers.html#the-numeric-tower)
/// - [PEP 484: The numeric tower](https://peps.python.org/pep-0484/#the-numeric-tower)
///
/// [typing specification]: https://typing.python.org/en/latest/spec/special-types.html#special-cases-for-float-and-complex
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.279")]
pub(crate) struct RedundantNumericUnion {
redundancy: Redundancy,
}
impl Violation for RedundantNumericUnion {
// Always fixable, but currently under preview.
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let (subtype, supertype) = match self.redundancy {
Redundancy::IntFloatComplex => ("int | float", "complex"),
Redundancy::FloatComplex => ("float", "complex"),
Redundancy::IntComplex => ("int", "complex"),
Redundancy::IntFloat => ("int", "float"),
};
format!("Use `{supertype}` instead of `{subtype} | {supertype}`")
}
fn fix_title(&self) -> Option<String> {
Some("Remove redundant type".to_string())
}
}
/// PYI041
pub(crate) fn redundant_numeric_union(checker: &Checker, parameters: &Parameters) {
for annotation in parameters.iter().filter_map(AnyParameterRef::annotation) {
check_annotation(checker, annotation);
}
}
fn check_annotation<'a>(checker: &Checker, annotation: &'a Expr) {
let mut numeric_flags = NumericFlags::empty();
let mut find_numeric_type = |expr: &Expr, _parent: &Expr| {
let Some(builtin_type) = checker.semantic().resolve_builtin_symbol(expr) else {
return;
};
numeric_flags.seen_builtin_type(builtin_type);
};
// Traverse the union, and remember which numeric types are found.
traverse_union(&mut find_numeric_type, checker.semantic(), annotation);
let Some(redundancy) = Redundancy::from_numeric_flags(numeric_flags) else {
return;
};
// Traverse the union a second time to construct the fix.
let mut necessary_nodes: Vec<&Expr> = Vec::new();
let mut union_type = UnionKind::TypingUnion;
let mut remove_numeric_type = |expr: &'a Expr, parent: &'a Expr| {
let Some(builtin_type) = checker.semantic().resolve_builtin_symbol(expr) else {
// Keep type annotations that are not numeric.
necessary_nodes.push(expr);
return;
};
if matches!(parent, Expr::BinOp(_)) {
union_type = UnionKind::PEP604;
}
// `int` is always dropped, since `float` or `complex` must be present.
// `float` is only dropped if `complex`` is present.
if (builtin_type == "float" && !numeric_flags.contains(NumericFlags::COMPLEX))
|| (builtin_type != "float" && builtin_type != "int")
{
necessary_nodes.push(expr);
}
};
// Traverse the union a second time to construct a [`Fix`].
traverse_union(&mut remove_numeric_type, checker.semantic(), annotation);
let mut diagnostic =
checker.report_diagnostic(RedundantNumericUnion { redundancy }, annotation.range());
if !checker.semantic().execution_context().is_typing()
&& !checker.source_type.is_stub()
&& fix_starts_with_none_none(&necessary_nodes)
{
// If there are multiple `None` literals, we cannot apply the fix in a runtime context.
// E.g., `None | None | int` will cause a `RuntimeError`.
return;
}
// Mark [`Fix`] as unsafe when comments are in range.
let applicability = if checker.comment_ranges().intersects(annotation.range()) {
Applicability::Unsafe
} else {
Applicability::Safe
};
// Generate the flattened fix once.
let fix = if let &[edit_expr] = necessary_nodes.as_slice() {
// Generate a [`Fix`] for a single type expression, e.g. `int`.
Some(Fix::applicable_edit(
Edit::range_replacement(checker.generator().expr(edit_expr), annotation.range()),
applicability,
))
} else {
match union_type {
UnionKind::PEP604 => Some(generate_pep604_fix(
checker,
necessary_nodes,
annotation,
applicability,
)),
UnionKind::TypingUnion => {
let Some(importer) = checker.typing_importer("Union", PythonVersion::lowest())
else {
return;
};
generate_union_fix(
checker.generator(),
&importer,
necessary_nodes,
annotation,
applicability,
)
.ok()
}
}
};
if let Some(fix) = fix {
diagnostic.set_fix(fix);
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
enum Redundancy {
IntFloatComplex,
FloatComplex,
IntComplex,
IntFloat,
}
impl Redundancy {
pub(super) fn from_numeric_flags(numeric_flags: NumericFlags) -> Option<Self> {
if numeric_flags == NumericFlags::INT | NumericFlags::FLOAT | NumericFlags::COMPLEX {
Some(Self::IntFloatComplex)
} else if numeric_flags == NumericFlags::FLOAT | NumericFlags::COMPLEX {
Some(Self::FloatComplex)
} else if numeric_flags == NumericFlags::INT | NumericFlags::COMPLEX {
Some(Self::IntComplex)
} else if numeric_flags == NumericFlags::FLOAT | NumericFlags::INT {
Some(Self::IntFloat)
} else {
None
}
}
}
bitflags! {
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub(super) struct NumericFlags: u8 {
/// `int`
const INT = 1 << 0;
/// `float`
const FLOAT = 1 << 1;
/// `complex`
const COMPLEX = 1 << 2;
}
}
impl NumericFlags {
pub(super) fn seen_builtin_type(&mut self, name: &str) {
let flag: NumericFlags = match name {
"int" => NumericFlags::INT,
"float" => NumericFlags::FLOAT,
"complex" => NumericFlags::COMPLEX,
_ => {
return;
}
};
self.insert(flag);
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum UnionKind {
/// E.g., `typing.Union[int, str]`
TypingUnion,
/// E.g., `int | str`
PEP604,
}
/// Generate a [`Fix`] for two or more type expressions, e.g. `int | float | complex`.
fn generate_pep604_fix(
checker: &Checker,
nodes: Vec<&Expr>,
annotation: &Expr,
applicability: Applicability,
) -> Fix {
debug_assert!(nodes.len() >= 2, "At least two nodes required");
let new_expr = nodes
.into_iter()
.fold(None, |acc: Option<Expr>, right: &Expr| {
if let Some(left) = acc {
Some(Expr::BinOp(ExprBinOp {
left: Box::new(left),
op: Operator::BitOr,
right: Box::new(right.clone()),
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
}))
} else {
Some(right.clone())
}
})
.unwrap();
Fix::applicable_edit(
Edit::range_replacement(checker.generator().expr(&new_expr), annotation.range()),
applicability,
)
}
/// Check whether the proposed fix starts with two `None` literals.
fn fix_starts_with_none_none(nodes: &[&Expr]) -> bool {
nodes.len() >= 2 && nodes.iter().take(2).all(|node| node.is_none_literal_expr())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/non_self_return_type.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/non_self_return_type.rs | use crate::checkers::ast::{Checker, TypingImporter};
use crate::{Applicability, Edit, Fix, FixAvailability, Violation};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_ast::PythonVersion;
use ruff_python_ast::helpers::map_subscript;
use ruff_python_ast::identifier::Identifier;
use ruff_python_semantic::analyze;
use ruff_python_semantic::analyze::class::might_be_generic;
use ruff_python_semantic::analyze::visibility::{is_abstract, is_final, is_overload};
use ruff_python_semantic::{ScopeKind, SemanticModel};
use ruff_text_size::Ranged;
/// ## What it does
/// Checks for methods that are annotated with a fixed return type which
/// should instead be returning `Self`.
///
/// ## Why is this bad?
/// If methods that generally return `self` at runtime are annotated with a
/// fixed return type, and the class is subclassed, type checkers will not be
/// able to infer the correct return type.
///
/// For example:
/// ```python
/// class Shape:
/// def set_scale(self, scale: float) -> Shape:
/// self.scale = scale
/// return self
///
/// class Circle(Shape):
/// def set_radius(self, radius: float) -> Circle:
/// self.radius = radius
/// return self
///
/// # Type checker infers return type as `Shape`, not `Circle`.
/// Circle().set_scale(0.5)
///
/// # Thus, this expression is invalid, as `Shape` has no attribute `set_radius`.
/// Circle().set_scale(0.5).set_radius(2.7)
/// ```
///
/// Specifically, this check enforces that the return type of the following
/// methods is `Self`:
///
/// 1. In-place binary-operation dunder methods, like `__iadd__`, `__imul__`, etc.
/// 1. `__new__`, `__enter__`, and `__aenter__`, if those methods return the
/// class name.
/// 1. `__iter__` methods that return `Iterator`, despite the class inheriting
/// directly from `Iterator`.
/// 1. `__aiter__` methods that return `AsyncIterator`, despite the class
/// inheriting directly from `AsyncIterator`.
///
/// The rule attempts to avoid flagging methods on metaclasses, since
/// [PEP 673] specifies that `Self` is disallowed in metaclasses. Ruff can
/// detect a class as being a metaclass if it inherits from a stdlib
/// metaclass such as `builtins.type` or `abc.ABCMeta`, and additionally
/// infers that a class may be a metaclass if it has a `__new__` method
/// with a similar signature to `type.__new__`. The heuristic used to
/// identify a metaclass-like `__new__` method signature is that it:
///
/// 1. Has exactly 5 parameters (including `cls`)
/// 1. Has a second parameter annotated with `str`
/// 1. Has a third parameter annotated with a `tuple` type
/// 1. Has a fourth parameter annotated with a `dict` type
/// 1. Has a fifth parameter is keyword-variadic (`**kwargs`)
///
/// For example, the following class would be detected as a metaclass, disabling
/// the rule:
///
/// ```python
/// class MyMetaclass(django.db.models.base.ModelBase):
/// def __new__(cls, name: str, bases: tuple[Any, ...], attrs: dict[str, Any], **kwargs: Any) -> MyMetaclass:
/// ...
/// ```
///
/// ## Example
///
/// ```pyi
/// class Foo:
/// def __new__(cls, *args: Any, **kwargs: Any) -> Foo: ...
/// def __enter__(self) -> Foo: ...
/// async def __aenter__(self) -> Foo: ...
/// def __iadd__(self, other: Foo) -> Foo: ...
/// ```
///
/// Use instead:
///
/// ```pyi
/// from typing_extensions import Self
///
/// class Foo:
/// def __new__(cls, *args: Any, **kwargs: Any) -> Self: ...
/// def __enter__(self) -> Self: ...
/// async def __aenter__(self) -> Self: ...
/// def __iadd__(self, other: Foo) -> Self: ...
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe as it changes the meaning of your type annotations.
///
/// ## Availability
///
/// Because this rule relies on the third-party `typing_extensions` module for Python versions
/// before 3.11, its diagnostic will not be emitted, and no fix will be offered, if
/// `typing_extensions` imports have been disabled by the [`lint.typing-extensions`] linter option.
///
/// ## Options
///
/// - `lint.typing-extensions`
///
/// ## References
/// - [Python documentation: `typing.Self`](https://docs.python.org/3/library/typing.html#typing.Self)
///
/// [PEP 673]: https://peps.python.org/pep-0673/#valid-locations-for-self
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.271")]
pub(crate) struct NonSelfReturnType {
class_name: String,
method_name: String,
}
impl Violation for NonSelfReturnType {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let NonSelfReturnType {
class_name,
method_name,
} = self;
if matches!(class_name.as_str(), "__new__") {
"`__new__` methods usually return `self` at runtime".to_string()
} else {
format!(
"`{method_name}` methods in classes like `{class_name}` usually return `self` at runtime"
)
}
}
fn fix_title(&self) -> Option<String> {
Some("Use `Self` as return type".to_string())
}
}
/// PYI034
pub(crate) fn non_self_return_type(
checker: &Checker,
stmt: &ast::Stmt,
is_async: bool,
name: &str,
decorator_list: &[ast::Decorator],
returns: Option<&ast::Expr>,
parameters: &ast::Parameters,
) {
let semantic = checker.semantic();
let ScopeKind::Class(class_def) = semantic.current_scope().kind else {
return;
};
if parameters.args.is_empty() && parameters.posonlyargs.is_empty() {
return;
}
let Some(returns) = returns else {
return;
};
// PEP 673 forbids the use of `typing(_extensions).Self` in metaclasses.
if !matches!(
analyze::class::is_metaclass(class_def, semantic),
analyze::class::IsMetaclass::No
) {
return;
}
// Skip any abstract or overloaded methods.
if is_abstract(decorator_list, semantic) || is_overload(decorator_list, semantic) {
return;
}
if is_async {
if name == "__aenter__"
&& is_name(returns, &class_def.name)
&& !is_final(&class_def.decorator_list, semantic)
{
add_diagnostic(checker, stmt, returns, class_def, name);
}
return;
}
// In-place methods that are expected to return `Self`.
if is_inplace_bin_op(name) {
if !is_self(returns, checker) {
add_diagnostic(checker, stmt, returns, class_def, name);
}
return;
}
if is_name(returns, &class_def.name) {
if matches!(name, "__enter__" | "__new__") && !is_final(&class_def.decorator_list, semantic)
{
add_diagnostic(checker, stmt, returns, class_def, name);
}
return;
}
match name {
"__iter__" => {
if is_iterable_or_iterator(returns, semantic)
&& subclasses_iterator(class_def, semantic)
{
add_diagnostic(checker, stmt, returns, class_def, name);
}
}
"__aiter__" => {
if is_async_iterable_or_iterator(returns, semantic)
&& subclasses_async_iterator(class_def, semantic)
{
add_diagnostic(checker, stmt, returns, class_def, name);
}
}
_ => {}
}
}
/// Add a diagnostic for the given method.
fn add_diagnostic(
checker: &Checker,
stmt: &ast::Stmt,
returns: &ast::Expr,
class_def: &ast::StmtClassDef,
method_name: &str,
) {
let Some(importer) = checker.typing_importer("Self", PythonVersion::PY311) else {
return;
};
let mut diagnostic = checker.report_diagnostic(
NonSelfReturnType {
class_name: class_def.name.to_string(),
method_name: method_name.to_string(),
},
stmt.identifier(),
);
diagnostic.try_set_fix(|| {
replace_with_self_fix(checker.semantic(), &importer, stmt, returns, class_def)
});
}
fn replace_with_self_fix(
semantic: &SemanticModel,
importer: &TypingImporter,
stmt: &ast::Stmt,
returns: &ast::Expr,
class_def: &ast::StmtClassDef,
) -> anyhow::Result<Fix> {
let (self_import, self_binding) = importer.import(returns.start())?;
let mut others = Vec::with_capacity(2);
let remove_first_argument_type_hint = || -> Option<Edit> {
let ast::StmtFunctionDef { parameters, .. } = stmt.as_function_def_stmt()?;
let first = parameters.iter().next()?;
let annotation = first.annotation()?;
is_class_reference(semantic, annotation, &class_def.name)
.then(|| Edit::deletion(first.name().end(), annotation.end()))
};
others.extend(remove_first_argument_type_hint());
others.push(Edit::range_replacement(self_binding, returns.range()));
let applicability = if might_be_generic(class_def, semantic) {
Applicability::DisplayOnly
} else {
Applicability::Unsafe
};
Ok(Fix::applicable_edits(self_import, others, applicability))
}
/// Return true if `annotation` is either `ClassName` or `type[ClassName]`
fn is_class_reference(semantic: &SemanticModel, annotation: &ast::Expr, expected: &str) -> bool {
if is_name(annotation, expected) {
return true;
}
let ast::Expr::Subscript(ast::ExprSubscript { value, slice, .. }) = annotation else {
return false;
};
if !semantic.match_builtin_expr(value, "type") && !semantic.match_typing_expr(value, "Type") {
return false;
}
is_name(slice, expected)
}
/// Returns `true` if the method is an in-place binary operator.
fn is_inplace_bin_op(name: &str) -> bool {
matches!(
name,
"__iadd__"
| "__isub__"
| "__imul__"
| "__imatmul__"
| "__itruediv__"
| "__ifloordiv__"
| "__imod__"
| "__ipow__"
| "__ilshift__"
| "__irshift__"
| "__iand__"
| "__ixor__"
| "__ior__"
)
}
/// Return `true` if the given expression resolves to the given name.
fn is_name(expr: &ast::Expr, name: &str) -> bool {
let ast::Expr::Name(ast::ExprName { id, .. }) = expr else {
return false;
};
id.as_str() == name
}
/// Return `true` if the given expression resolves to `typing.Self`.
fn is_self(expr: &ast::Expr, checker: &Checker) -> bool {
checker.match_maybe_stringized_annotation(expr, |expr| {
checker.semantic().match_typing_expr(expr, "Self")
})
}
/// Return `true` if the given class extends `collections.abc.Iterator`.
fn subclasses_iterator(class_def: &ast::StmtClassDef, semantic: &SemanticModel) -> bool {
analyze::class::any_qualified_base_class(class_def, semantic, &|qualified_name| {
matches!(
qualified_name.segments(),
["typing", "Iterator"] | ["collections", "abc", "Iterator"]
)
})
}
/// Return `true` if the given expression resolves to `collections.abc.Iterable` or `collections.abc.Iterator`.
fn is_iterable_or_iterator(expr: &ast::Expr, semantic: &SemanticModel) -> bool {
semantic
.resolve_qualified_name(map_subscript(expr))
.is_some_and(|qualified_name| {
matches!(
qualified_name.segments(),
["typing", "Iterable" | "Iterator"]
| ["collections", "abc", "Iterable" | "Iterator"]
)
})
}
/// Return `true` if the given class extends `collections.abc.AsyncIterator`.
fn subclasses_async_iterator(class_def: &ast::StmtClassDef, semantic: &SemanticModel) -> bool {
analyze::class::any_qualified_base_class(class_def, semantic, &|qualified_name| {
matches!(
qualified_name.segments(),
["typing", "AsyncIterator"] | ["collections", "abc", "AsyncIterator"]
)
})
}
/// Return `true` if the given expression resolves to `collections.abc.AsyncIterable` or `collections.abc.AsyncIterator`.
fn is_async_iterable_or_iterator(expr: &ast::Expr, semantic: &SemanticModel) -> bool {
semantic
.resolve_qualified_name(map_subscript(expr))
.is_some_and(|qualified_name| {
matches!(
qualified_name.segments(),
["typing", "AsyncIterable" | "AsyncIterator"]
| ["collections", "abc", "AsyncIterable" | "AsyncIterator"]
)
})
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/pass_statement_stub_body.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/pass_statement_stub_body.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::Stmt;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for `pass` statements in empty stub bodies.
///
/// ## Why is this bad?
/// For stylistic consistency, `...` should always be used rather than `pass`
/// in stub files.
///
/// ## Example
/// ```pyi
/// def foo(bar: int) -> list[int]: pass
/// ```
///
/// Use instead:
/// ```pyi
/// def foo(bar: int) -> list[int]: ...
/// ```
///
/// ## References
/// - [Typing documentation - Writing and Maintaining Stub Files](https://typing.python.org/en/latest/guides/writing_stubs.html)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.253")]
pub(crate) struct PassStatementStubBody;
impl AlwaysFixableViolation for PassStatementStubBody {
#[derive_message_formats]
fn message(&self) -> String {
"Empty body should contain `...`, not `pass`".to_string()
}
fn fix_title(&self) -> String {
"Replace `pass` with `...`".to_string()
}
}
/// PYI009
pub(crate) fn pass_statement_stub_body(checker: &Checker, body: &[Stmt]) {
let [Stmt::Pass(pass)] = body else {
return;
};
let mut diagnostic = checker.report_diagnostic(PassStatementStubBody, pass.range());
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
"...".to_string(),
pass.range(),
)));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/str_or_repr_defined_in_stub.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/str_or_repr_defined_in_stub.rs | use ruff_python_ast as ast;
use ruff_python_ast::Stmt;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::identifier::Identifier;
use ruff_python_semantic::analyze::visibility::is_abstract;
use crate::checkers::ast::Checker;
use crate::fix::edits::delete_stmt;
use crate::{AlwaysFixableViolation, Fix};
/// ## What it does
/// Checks for redundant definitions of `__str__` or `__repr__` in stubs.
///
/// ## Why is this bad?
/// Defining `__str__` or `__repr__` in a stub is almost always redundant,
/// as the signatures are almost always identical to those of the default
/// equivalent, `object.__str__` and `object.__repr__`, respectively.
///
/// ## Example
///
/// ```pyi
/// class Foo:
/// def __repr__(self) -> str: ...
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.271")]
pub(crate) struct StrOrReprDefinedInStub {
name: String,
}
impl AlwaysFixableViolation for StrOrReprDefinedInStub {
#[derive_message_formats]
fn message(&self) -> String {
let StrOrReprDefinedInStub { name } = self;
format!("Defining `{name}` in a stub is almost always redundant")
}
fn fix_title(&self) -> String {
let StrOrReprDefinedInStub { name } = self;
format!("Remove definition of `{name}`")
}
}
/// PYI029
pub(crate) fn str_or_repr_defined_in_stub(checker: &Checker, stmt: &Stmt) {
let Stmt::FunctionDef(ast::StmtFunctionDef {
name,
decorator_list,
returns,
parameters,
..
}) = stmt
else {
return;
};
let Some(returns) = returns else {
return;
};
if !matches!(name.as_str(), "__str__" | "__repr__") {
return;
}
if !checker.semantic().current_scope().kind.is_class() {
return;
}
// It is a violation only if the method signature matches that of `object.__str__`
// or `object.__repr__` exactly and the method is not decorated as abstract.
if !parameters.kwonlyargs.is_empty()
|| (parameters.args.len() + parameters.posonlyargs.len()) > 1
{
return;
}
if is_abstract(decorator_list, checker.semantic()) {
return;
}
if !checker.semantic().match_builtin_expr(returns, "str") {
return;
}
let mut diagnostic = checker.report_diagnostic(
StrOrReprDefinedInStub {
name: name.to_string(),
},
stmt.identifier(),
);
let stmt = checker.semantic().current_statement();
let parent = checker.semantic().current_statement_parent();
let edit = delete_stmt(stmt, parent, checker.locator(), checker.indexer());
diagnostic.set_fix(Fix::safe_edit(edit).isolate(Checker::isolation(
checker.semantic().current_statement_parent_id(),
)));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/unrecognized_version_info.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/unrecognized_version_info.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::map_subscript;
use ruff_python_ast::{self as ast, CmpOp, Expr, Int};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::registry::Rule;
/// ## What it does
/// Checks for problematic `sys.version_info`-related conditions in stubs.
///
/// ## Why is this bad?
/// Stub files support simple conditionals to test for differences in Python
/// versions using `sys.version_info`. However, there are a number of common
/// mistakes involving `sys.version_info` comparisons that should be avoided.
/// For example, comparing against a string can lead to unexpected behavior.
///
/// ## Example
/// ```pyi
/// import sys
///
/// if sys.version_info[0] == "2": ...
/// ```
///
/// Use instead:
/// ```pyi
/// import sys
///
/// if sys.version_info[0] == 2: ...
/// ```
///
/// ## References
/// - [Typing documentation: Version and Platform checking](https://typing.python.org/en/latest/spec/directives.html#version-and-platform-checks)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.276")]
pub(crate) struct UnrecognizedVersionInfoCheck;
impl Violation for UnrecognizedVersionInfoCheck {
#[derive_message_formats]
fn message(&self) -> String {
"Unrecognized `sys.version_info` check".to_string()
}
}
/// ## What it does
/// Checks for Python version comparisons in stubs that compare against patch
/// versions (e.g., Python 3.8.3) instead of major and minor versions (e.g.,
/// Python 3.8).
///
/// ## Why is this bad?
/// Stub files support simple conditionals to test for differences in Python
/// versions and platforms. However, type checkers only understand a limited
/// subset of these conditionals. In particular, type checkers don't support
/// patch versions (e.g., Python 3.8.3), only major and minor versions (e.g.,
/// Python 3.8). Therefore, version checks in stubs should only use the major
/// and minor versions.
///
/// ## Example
/// ```pyi
/// import sys
///
/// if sys.version_info >= (3, 4, 3): ...
/// ```
///
/// Use instead:
/// ```pyi
/// import sys
///
/// if sys.version_info >= (3, 4): ...
/// ```
///
/// ## References
/// - [Typing documentation: Version and Platform checking](https://typing.python.org/en/latest/spec/directives.html#version-and-platform-checks)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.276")]
pub(crate) struct PatchVersionComparison;
impl Violation for PatchVersionComparison {
#[derive_message_formats]
fn message(&self) -> String {
"Version comparison must use only major and minor version".to_string()
}
}
/// ## What it does
/// Checks for Python version comparisons that compare against a tuple of the
/// wrong length.
///
/// ## Why is this bad?
/// Stub files support simple conditionals to test for differences in Python
/// versions and platforms. When comparing against `sys.version_info`, avoid
/// comparing against tuples of the wrong length, which can lead to unexpected
/// behavior.
///
/// ## Example
/// ```pyi
/// import sys
///
/// if sys.version_info[:2] == (3,): ...
/// ```
///
/// Use instead:
/// ```pyi
/// import sys
///
/// if sys.version_info[0] == 3: ...
/// ```
///
/// ## References
/// - [Typing documentation: Version and Platform checking](https://typing.python.org/en/latest/spec/directives.html#version-and-platform-checks)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.276")]
pub(crate) struct WrongTupleLengthVersionComparison {
expected_length: usize,
}
impl Violation for WrongTupleLengthVersionComparison {
#[derive_message_formats]
fn message(&self) -> String {
let WrongTupleLengthVersionComparison { expected_length } = self;
format!("Version comparison must be against a length-{expected_length} tuple")
}
}
/// PYI003, PYI004, PYI005
pub(crate) fn unrecognized_version_info(checker: &Checker, test: &Expr) {
let Expr::Compare(ast::ExprCompare {
left,
ops,
comparators,
..
}) = test
else {
return;
};
let ([op], [comparator]) = (&**ops, &**comparators) else {
return;
};
if !checker
.semantic()
.resolve_qualified_name(map_subscript(left))
.is_some_and(|qualified_name| matches!(qualified_name.segments(), ["sys", "version_info"]))
{
return;
}
if let Some(expected) = ExpectedComparator::try_from(left) {
version_check(checker, expected, test, *op, comparator);
} else {
checker.report_diagnostic_if_enabled(UnrecognizedVersionInfoCheck, test.range());
}
}
fn version_check(
checker: &Checker,
expected: ExpectedComparator,
test: &Expr,
op: CmpOp,
comparator: &Expr,
) {
// Single digit comparison, e.g., `sys.version_info[0] == 2`.
if expected == ExpectedComparator::MajorDigit {
if !is_int_constant(comparator) {
checker.report_diagnostic_if_enabled(UnrecognizedVersionInfoCheck, test.range());
}
return;
}
// Tuple comparison, e.g., `sys.version_info == (3, 4)`.
let Expr::Tuple(tuple) = comparator else {
checker.report_diagnostic_if_enabled(UnrecognizedVersionInfoCheck, test.range());
return;
};
if !tuple.iter().all(is_int_constant) {
// All tuple elements must be integers, e.g., `sys.version_info == (3, 4)` instead of
// `sys.version_info == (3.0, 4)`.
checker.report_diagnostic_if_enabled(UnrecognizedVersionInfoCheck, test.range());
} else if tuple.len() > 2 {
// Must compare against major and minor version only, e.g., `sys.version_info == (3, 4)`
// instead of `sys.version_info == (3, 4, 0)`.
checker.report_diagnostic_if_enabled(PatchVersionComparison, test.range());
}
if checker.is_rule_enabled(Rule::WrongTupleLengthVersionComparison) {
if op == CmpOp::Eq || op == CmpOp::NotEq {
let expected_length = match expected {
ExpectedComparator::MajorTuple => 1,
ExpectedComparator::MajorMinorTuple => 2,
_ => return,
};
if tuple.len() != expected_length {
checker.report_diagnostic(
WrongTupleLengthVersionComparison { expected_length },
test.range(),
);
}
}
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
enum ExpectedComparator {
MajorDigit,
MajorTuple,
MajorMinorTuple,
AnyTuple,
}
impl ExpectedComparator {
/// Returns the expected comparator for the given expression, if any.
fn try_from(expr: &Expr) -> Option<Self> {
let Expr::Subscript(ast::ExprSubscript { slice, .. }) = expr else {
return Some(ExpectedComparator::AnyTuple);
};
// Only allow: (1) simple slices of the form `[:n]`, or (2) explicit indexing into the first
// element (major version) of the tuple.
match slice.as_ref() {
Expr::Slice(ast::ExprSlice {
lower: None,
upper: Some(upper),
step: None,
..
}) => {
if let Expr::NumberLiteral(ast::ExprNumberLiteral {
value: ast::Number::Int(upper),
..
}) = upper.as_ref()
{
if *upper == 1 {
return Some(ExpectedComparator::MajorTuple);
}
if *upper == 2 {
return Some(ExpectedComparator::MajorMinorTuple);
}
}
}
Expr::NumberLiteral(ast::ExprNumberLiteral {
value: ast::Number::Int(Int::ZERO),
..
}) => {
return Some(ExpectedComparator::MajorDigit);
}
_ => (),
}
None
}
}
/// Returns `true` if the given expression is an integer constant.
fn is_int_constant(expr: &Expr) -> bool {
matches!(
expr,
Expr::NumberLiteral(ast::ExprNumberLiteral {
value: ast::Number::Int(_),
..
})
)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/iter_method_return_iterable.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/iter_method_return_iterable.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::map_subscript;
use ruff_text_size::Ranged;
use ruff_python_semantic::{Definition, Member, MemberKind};
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for `__iter__` methods in stubs that return `Iterable[T]` instead
/// of an `Iterator[T]`.
///
/// ## Why is this bad?
/// `__iter__` methods should always should return an `Iterator` of some kind,
/// not an `Iterable`.
///
/// In Python, an `Iterable` is an object that has an `__iter__` method; an
/// `Iterator` is an object that has `__iter__` and `__next__` methods. All
/// `__iter__` methods are expected to return `Iterator`s. Type checkers may
/// not always recognize an object as being iterable if its `__iter__` method
/// does not return an `Iterator`.
///
/// Every `Iterator` is an `Iterable`, but not every `Iterable` is an `Iterator`.
/// For example, `list` is an `Iterable`, but not an `Iterator`; you can obtain
/// an iterator over a list's elements by passing the list to `iter()`:
///
/// ```pycon
/// >>> import collections.abc
/// >>> x = [42]
/// >>> isinstance(x, collections.abc.Iterable)
/// True
/// >>> isinstance(x, collections.abc.Iterator)
/// False
/// >>> next(x)
/// Traceback (most recent call last):
/// File "<stdin>", line 1, in <module>
/// TypeError: 'list' object is not an iterator
/// >>> y = iter(x)
/// >>> isinstance(y, collections.abc.Iterable)
/// True
/// >>> isinstance(y, collections.abc.Iterator)
/// True
/// >>> next(y)
/// 42
/// ```
///
/// Using `Iterable` rather than `Iterator` as a return type for an `__iter__`
/// methods would imply that you would not necessarily be able to call `next()`
/// on the returned object, violating the expectations of the interface.
///
/// ## Example
///
/// ```python
/// import collections.abc
///
///
/// class Klass:
/// def __iter__(self) -> collections.abc.Iterable[str]: ...
/// ```
///
/// Use instead:
///
/// ```python
/// import collections.abc
///
///
/// class Klass:
/// def __iter__(self) -> collections.abc.Iterator[str]: ...
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.271")]
pub(crate) struct IterMethodReturnIterable {
is_async: bool,
}
impl Violation for IterMethodReturnIterable {
#[derive_message_formats]
fn message(&self) -> String {
if self.is_async {
"`__aiter__` methods should return an `AsyncIterator`, not an `AsyncIterable`"
.to_string()
} else {
"`__iter__` methods should return an `Iterator`, not an `Iterable`".to_string()
}
}
}
/// PYI045
pub(crate) fn iter_method_return_iterable(checker: &Checker, definition: &Definition) {
let Definition::Member(Member {
kind: MemberKind::Method(function),
..
}) = definition
else {
return;
};
let Some(returns) = function.returns.as_ref() else {
return;
};
let is_async = match function.name.as_str() {
"__iter__" => false,
"__aiter__" => true,
_ => return,
};
// Support both `Iterable` and `Iterable[T]`.
let annotation = map_subscript(returns);
if checker
.semantic()
.resolve_qualified_name(map_subscript(annotation))
.is_some_and(|qualified_name| {
if is_async {
matches!(
qualified_name.segments(),
["typing", "AsyncIterable"] | ["collections", "abc", "AsyncIterable"]
)
} else {
matches!(
qualified_name.segments(),
["typing", "Iterable"] | ["collections", "abc", "Iterable"]
)
}
})
{
checker.report_diagnostic(IterMethodReturnIterable { is_async }, returns.range());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/pre_pep570_positional_argument.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/pre_pep570_positional_argument.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_ast::identifier::Identifier;
use ruff_python_semantic::analyze::function_type;
use crate::Violation;
use crate::checkers::ast::Checker;
use ruff_python_ast::PythonVersion;
/// ## What it does
/// Checks for the presence of [PEP 484]-style positional-only parameters.
///
/// ## Why is this bad?
/// Historically, [PEP 484] recommended prefixing parameter names with double
/// underscores (`__`) to indicate to a type checker that they were
/// positional-only. However, [PEP 570] (introduced in Python 3.8) introduced
/// dedicated syntax for positional-only arguments. If a forward slash (`/`) is
/// present in a function signature on Python 3.8+, all parameters prior to the
/// slash are interpreted as positional-only.
///
/// The new syntax should be preferred as it is more widely used, more concise
/// and more readable. It is also respected by Python at runtime, whereas the
/// old-style syntax was only understood by type checkers.
///
/// ## Example
///
/// ```pyi
/// def foo(__x: int) -> None: ...
/// ```
///
/// Use instead:
///
/// ```pyi
/// def foo(x: int, /) -> None: ...
/// ```
///
/// ## Options
/// - `target-version`
///
/// [PEP 484]: https://peps.python.org/pep-0484/#positional-only-arguments
/// [PEP 570]: https://peps.python.org/pep-0570
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "0.8.0")]
pub(crate) struct Pep484StylePositionalOnlyParameter;
impl Violation for Pep484StylePositionalOnlyParameter {
#[derive_message_formats]
fn message(&self) -> String {
"Use PEP 570 syntax for positional-only parameters".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Add `/` to function signature".to_string())
}
}
/// PYI063
pub(crate) fn pep_484_positional_parameter(checker: &Checker, function_def: &ast::StmtFunctionDef) {
// PEP 570 was introduced in Python 3.8.
if checker.target_version() < PythonVersion::PY38 {
return;
}
if !function_def.parameters.posonlyargs.is_empty() {
return;
}
if function_def.parameters.args.is_empty() {
return;
}
let semantic = checker.semantic();
let scope = semantic.current_scope();
let function_type = function_type::classify(
&function_def.name,
&function_def.decorator_list,
scope,
semantic,
&checker.settings().pep8_naming.classmethod_decorators,
&checker.settings().pep8_naming.staticmethod_decorators,
);
// If the method has a `self` or `cls` argument, skip it.
let skip = usize::from(matches!(
function_type,
function_type::FunctionType::Method | function_type::FunctionType::ClassMethod
));
if let Some(param) = function_def.parameters.args.get(skip) {
if param.uses_pep_484_positional_only_convention() {
checker.report_diagnostic(Pep484StylePositionalOnlyParameter, param.identifier());
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/unnecessary_type_union.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/unnecessary_type_union.rs | use ast::ExprContext;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::helpers::pep_604_union;
use ruff_python_ast::name::Name;
use ruff_python_ast::{self as ast, Expr};
use ruff_python_semantic::analyze::typing::traverse_union;
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::{Applicability, Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Checks for the presence of multiple `type`s in a union.
///
/// ## Why is this bad?
/// `type[T | S]` has identical semantics to `type[T] | type[S]` in a type
/// annotation, but is cleaner and more concise.
///
/// ## Example
/// ```pyi
/// field: type[int] | type[float] | str
/// ```
///
/// Use instead:
/// ```pyi
/// field: type[int | float] | str
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as safe, unless the type annotation contains comments.
///
/// Note that while the fix may flatten nested unions into a single top-level union,
/// the semantics of the annotation will remain unchanged.
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.283")]
pub(crate) struct UnnecessaryTypeUnion {
members: Vec<Name>,
union_kind: UnionKind,
}
impl Violation for UnnecessaryTypeUnion {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let union_str = match self.union_kind {
UnionKind::PEP604 => self.members.join(" | "),
UnionKind::TypingUnion => format!("Union[{}]", self.members.join(", ")),
};
format!(
"Multiple `type` members in a union. Combine them into one, e.g., `type[{union_str}]`."
)
}
fn fix_title(&self) -> Option<String> {
Some("Combine multiple `type` members".to_string())
}
}
/// PYI055
pub(crate) fn unnecessary_type_union<'a>(checker: &Checker, union: &'a Expr) {
let semantic = checker.semantic();
// The `|` operator isn't always safe to allow to runtime-evaluated annotations.
if semantic.execution_context().is_runtime() {
return;
}
// Check if `union` is a PEP604 union (e.g. `float | int`) or a `typing.Union[float, int]`
let subscript = union.as_subscript_expr();
let mut union_kind = match subscript {
Some(subscript) => {
if !semantic.match_typing_expr(&subscript.value, "Union") {
return;
}
UnionKind::TypingUnion
}
None => UnionKind::PEP604,
};
let mut type_exprs: Vec<&Expr> = Vec::new();
let mut other_exprs: Vec<&Expr> = Vec::new();
let mut collect_type_exprs = |expr: &'a Expr, parent: &'a Expr| {
// If a PEP604-style union is used within a `typing.Union`, then the fix can
// use PEP604-style unions.
if matches!(parent, Expr::BinOp(_)) {
union_kind = UnionKind::PEP604;
}
match expr {
Expr::Subscript(ast::ExprSubscript { slice, value, .. }) => {
// The annotation `type[a, b]` is not valid since `type` accepts
// a single parameter. This likely is a confusion with `type[a | b]` or
// `type[Union[a, b]]`. Do not emit a diagnostic for invalid type
// annotations.
if !matches!(**slice, Expr::Tuple(_)) && semantic.match_builtin_expr(value, "type")
{
type_exprs.push(slice);
} else {
other_exprs.push(expr);
}
}
_ => other_exprs.push(expr),
}
};
traverse_union(&mut collect_type_exprs, semantic, union);
// Return if zero or one `type` expressions are found.
if type_exprs.len() <= 1 {
return;
}
let type_members: Vec<Name> = type_exprs
.iter()
.map(|type_expr| Name::new(checker.locator().slice(type_expr)))
.collect();
let mut diagnostic = checker.report_diagnostic(
UnnecessaryTypeUnion {
members: type_members.clone(),
union_kind,
},
union.range(),
);
if semantic.has_builtin_binding("type") {
// Construct the content for the [`Fix`] based on if we encountered a PEP604 union.
let content = match union_kind {
UnionKind::PEP604 => {
let elts: Vec<Expr> = type_exprs.into_iter().cloned().collect();
let types = Expr::Subscript(ast::ExprSubscript {
value: Box::new(Expr::Name(ast::ExprName {
id: Name::new_static("type"),
ctx: ExprContext::Load,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
})),
slice: Box::new(pep_604_union(&elts)),
ctx: ExprContext::Load,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
});
if other_exprs.is_empty() {
checker.generator().expr(&types)
} else {
let elts: Vec<Expr> = std::iter::once(types)
.chain(other_exprs.into_iter().cloned())
.collect();
checker.generator().expr(&pep_604_union(&elts))
}
}
UnionKind::TypingUnion => {
// When subscript is None, it uses the previous match case.
let subscript = subscript.unwrap();
let types = &Expr::Subscript(ast::ExprSubscript {
value: Box::new(Expr::Name(ast::ExprName {
id: Name::new_static("type"),
ctx: ExprContext::Load,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
})),
slice: Box::new(Expr::Subscript(ast::ExprSubscript {
value: subscript.value.clone(),
slice: Box::new(Expr::Tuple(ast::ExprTuple {
elts: type_members
.into_iter()
.map(|type_member| {
Expr::Name(ast::ExprName {
id: type_member,
ctx: ExprContext::Load,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
})
})
.collect(),
ctx: ExprContext::Load,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
parenthesized: true,
})),
ctx: ExprContext::Load,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
})),
ctx: ExprContext::Load,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
});
if other_exprs.is_empty() {
checker.generator().expr(types)
} else {
let mut exprs = Vec::new();
exprs.push(types);
exprs.extend(other_exprs);
let union = Expr::Subscript(ast::ExprSubscript {
value: subscript.value.clone(),
slice: Box::new(Expr::Tuple(ast::ExprTuple {
elts: exprs.into_iter().cloned().collect(),
ctx: ExprContext::Load,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
parenthesized: true,
})),
ctx: ExprContext::Load,
range: TextRange::default(),
node_index: ruff_python_ast::AtomicNodeIndex::NONE,
});
checker.generator().expr(&union)
}
}
};
// Mark [`Fix`] as unsafe when comments are in range.
let applicability = if checker.comment_ranges().intersects(union.range()) {
Applicability::Unsafe
} else {
Applicability::Safe
};
diagnostic.set_fix(Fix::applicable_edit(
Edit::range_replacement(content, union.range()),
applicability,
));
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum UnionKind {
/// E.g., `typing.Union[int, str]`
TypingUnion,
/// E.g., `int | str`
PEP604,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/future_annotations_in_stub.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/future_annotations_in_stub.rs | use ruff_python_ast::StmtImportFrom;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::{Fix, FixAvailability, Violation};
use crate::{checkers::ast::Checker, fix};
/// ## What it does
/// Checks for the presence of the `from __future__ import annotations` import
/// statement in stub files.
///
/// ## Why is this bad?
/// Stub files natively support forward references in all contexts, as stubs are
/// never executed at runtime. (They should be thought of as "data files" for
/// type checkers.) As such, the `from __future__ import annotations` import
/// statement has no effect and should be omitted.
///
/// ## References
/// - [Typing Style Guide](https://typing.python.org/en/latest/guides/writing_stubs.html#language-features)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.273")]
pub(crate) struct FutureAnnotationsInStub;
impl Violation for FutureAnnotationsInStub {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"`from __future__ import annotations` has no effect in stub files, since type checkers automatically treat stubs as having those semantics".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Remove `from __future__ import annotations`".to_string())
}
}
/// PYI044
pub(crate) fn from_future_import(checker: &Checker, target: &StmtImportFrom) {
let StmtImportFrom {
range,
module: Some(module_name),
names,
..
} = target
else {
return;
};
if module_name != "__future__" {
return;
}
if names.iter().all(|alias| &*alias.name != "annotations") {
return;
}
let mut diagnostic = checker.report_diagnostic(FutureAnnotationsInStub, *range);
let stmt = checker.semantic().current_statement();
diagnostic.try_set_fix(|| {
let edit = fix::edits::remove_unused_imports(
std::iter::once("annotations"),
stmt,
None,
checker.locator(),
checker.stylist(),
checker.indexer(),
)?;
Ok(Fix::safe_edit(edit))
});
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/ellipsis_in_non_empty_class_body.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/ellipsis_in_non_empty_class_body.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::whitespace::trailing_comment_start_offset;
use ruff_python_ast::{Stmt, StmtExpr};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
/// Removes ellipses (`...`) in otherwise non-empty class bodies.
///
/// ## Why is this bad?
/// An ellipsis in a class body is only necessary if the class body is
/// otherwise empty. If the class body is non-empty, then the ellipsis
/// is redundant.
///
/// ## Example
/// ```pyi
/// class Foo:
/// ...
/// value: int
/// ```
///
/// Use instead:
/// ```pyi
/// class Foo:
/// value: int
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.270")]
pub(crate) struct EllipsisInNonEmptyClassBody;
impl Violation for EllipsisInNonEmptyClassBody {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"Non-empty class body must not contain `...`".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Remove unnecessary `...`".to_string())
}
}
/// PYI013
pub(crate) fn ellipsis_in_non_empty_class_body(checker: &Checker, body: &[Stmt]) {
// If the class body contains a single statement, then it's fine for it to be an ellipsis.
if body.len() == 1 {
return;
}
for stmt in body {
let Stmt::Expr(StmtExpr { value, .. }) = stmt else {
continue;
};
if value.is_ellipsis_literal_expr() {
let mut diagnostic =
checker.report_diagnostic(EllipsisInNonEmptyClassBody, stmt.range());
// Try to preserve trailing comment if it exists
let edit = if let Some(index) = trailing_comment_start_offset(stmt, checker.source()) {
Edit::range_deletion(stmt.range().add_end(index))
} else {
fix::edits::delete_stmt(stmt, Some(stmt), checker.locator(), checker.indexer())
};
diagnostic.set_fix(Fix::safe_edit(edit).isolate(Checker::isolation(
checker.semantic().current_statement_id(),
)));
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_pyi/rules/numeric_literal_too_long.rs | crates/ruff_linter/src/rules/flake8_pyi/rules/numeric_literal_too_long.rs | use ruff_python_ast::Expr;
use ruff_text_size::{Ranged, TextSize};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for numeric literals with a string representation longer than ten
/// characters.
///
/// ## Why is this bad?
/// If a function has a default value where the literal representation is
/// greater than 10 characters, the value is likely to be an implementation
/// detail or a constant that varies depending on the system you're running on.
///
/// Default values like these should generally be omitted from stubs. Use
/// ellipses (`...`) instead.
///
/// ## Example
///
/// ```pyi
/// def foo(arg: int = 693568516352839939918568862861217771399698285293568) -> None: ...
/// ```
///
/// Use instead:
///
/// ```pyi
/// def foo(arg: int = ...) -> None: ...
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.271")]
pub(crate) struct NumericLiteralTooLong;
impl AlwaysFixableViolation for NumericLiteralTooLong {
#[derive_message_formats]
fn message(&self) -> String {
"Numeric literals with a string representation longer than ten characters are not permitted"
.to_string()
}
fn fix_title(&self) -> String {
"Replace with `...`".to_string()
}
}
/// PYI054
pub(crate) fn numeric_literal_too_long(checker: &Checker, expr: &Expr) {
if expr.range().len() <= TextSize::new(10) {
return;
}
let mut diagnostic = checker.report_diagnostic(NumericLiteralTooLong, expr.range());
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
"...".to_string(),
expr.range(),
)));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_executable/helpers.rs | crates/ruff_linter/src/rules/flake8_executable/helpers.rs | #![cfg(target_family = "unix")]
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use anyhow::Result;
pub(super) fn is_executable(filepath: &Path) -> Result<bool> {
let metadata = filepath.metadata()?;
let permissions = metadata.permissions();
Ok(permissions.mode() & 0o111 != 0)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_executable/mod.rs | crates/ruff_linter/src/rules/flake8_executable/mod.rs | //! Rules from [flake8-executable](https://pypi.org/project/flake8-executable/).
pub(crate) mod helpers;
pub(crate) mod rules;
#[cfg(unix)]
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
#[test_case(Path::new("EXE001_1.py"))]
#[test_case(Path::new("EXE001_2.py"))]
#[test_case(Path::new("EXE001_3.py"))]
#[test_case(Path::new("EXE002_1.py"))]
#[test_case(Path::new("EXE002_2.py"))]
#[test_case(Path::new("EXE002_3.py"))]
#[test_case(Path::new("EXE003.py"))]
#[test_case(Path::new("EXE003_uv.py"))]
#[test_case(Path::new("EXE003_uv_tool.py"))]
#[test_case(Path::new("EXE003_uvx.py"))]
#[test_case(Path::new("EXE004_1.py"))]
#[test_case(Path::new("EXE004_2.py"))]
#[test_case(Path::new("EXE004_3.py"))]
#[test_case(Path::new("EXE004_4.py"))]
#[test_case(Path::new("EXE005_1.py"))]
#[test_case(Path::new("EXE005_2.py"))]
#[test_case(Path::new("EXE005_3.py"))]
fn rules(path: &Path) -> Result<()> {
let snapshot = path.to_string_lossy().into_owned();
let diagnostics = test_path(
Path::new("flake8_executable").join(path).as_path(),
&settings::LinterSettings::for_rules(vec![
Rule::ShebangNotExecutable,
Rule::ShebangMissingExecutableFile,
Rule::ShebangLeadingWhitespace,
Rule::ShebangNotFirstLine,
Rule::ShebangMissingPython,
]),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_executable/rules/shebang_not_executable.rs | crates/ruff_linter/src/rules/flake8_executable/rules/shebang_not_executable.rs | use std::path::Path;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_text_size::TextRange;
use crate::Violation;
use crate::checkers::ast::LintContext;
#[cfg(target_family = "unix")]
use crate::rules::flake8_executable::helpers::is_executable;
/// ## What it does
/// Checks for a shebang directive in a file that is not executable.
///
/// ## Why is this bad?
/// In Python, a shebang (also known as a hashbang) is the first line of a
/// script, which specifies the interpreter that should be used to run the
/// script.
///
/// The presence of a shebang suggests that a file is intended to be
/// executable. If a file contains a shebang but is not executable, then the
/// shebang is misleading, or the file is missing the executable bit.
///
/// If the file is meant to be executable, add the executable bit to the file
/// (e.g., `chmod +x __main__.py` or `git update-index --chmod=+x __main__.py`).
///
/// Otherwise, remove the shebang.
///
/// A file is considered executable if it has the executable bit set (i.e., its
/// permissions mode intersects with `0o111`). As such, _this rule is only
/// available on Unix-like systems_, and is not enforced on Windows or WSL.
///
/// ## Example
/// ```python
/// #!/usr/bin/env python
/// ```
///
/// ## References
/// - [Python documentation: Executable Python Scripts](https://docs.python.org/3/tutorial/appendix.html#executable-python-scripts)
/// - [Git documentation: `git update-index --chmod`](https://git-scm.com/docs/git-update-index#Documentation/git-update-index.txt---chmod-x)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.233")]
pub(crate) struct ShebangNotExecutable;
impl Violation for ShebangNotExecutable {
#[derive_message_formats]
fn message(&self) -> String {
"Shebang is present but file is not executable".to_string()
}
}
/// EXE001
#[cfg(target_family = "unix")]
pub(crate) fn shebang_not_executable(filepath: &Path, range: TextRange, context: &LintContext) {
// WSL supports Windows file systems, which do not have executable bits.
// Instead, everything is executable. Therefore, we skip this rule on WSL.
if is_wsl::is_wsl() {
return;
}
if let Ok(false) = is_executable(filepath) {
context.report_diagnostic_if_enabled(ShebangNotExecutable, range);
}
}
#[cfg(not(target_family = "unix"))]
pub(crate) fn shebang_not_executable(
_filepath: &Path,
_range: TextRange,
_diagnostics: &LintContext,
) {
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_executable/rules/shebang_not_first_line.rs | crates/ruff_linter/src/rules/flake8_executable/rules/shebang_not_first_line.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_trivia::is_python_whitespace;
use ruff_text_size::{TextRange, TextSize};
use crate::Locator;
use crate::Violation;
use crate::checkers::ast::LintContext;
/// ## What it does
/// Checks for a shebang directive that is not at the beginning of the file.
///
/// ## Why is this bad?
/// In Python, a shebang (also known as a hashbang) is the first line of a
/// script, which specifies the interpreter that should be used to run the
/// script.
///
/// The shebang's `#!` prefix must be the first two characters of a file. If
/// the shebang is not at the beginning of the file, it will be ignored, which
/// is likely a mistake.
///
/// ## Example
/// ```python
/// foo = 1
/// #!/usr/bin/env python3
/// ```
///
/// Use instead:
/// ```python
/// #!/usr/bin/env python3
/// foo = 1
/// ```
///
/// ## References
/// - [Python documentation: Executable Python Scripts](https://docs.python.org/3/tutorial/appendix.html#executable-python-scripts)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.229")]
pub(crate) struct ShebangNotFirstLine;
impl Violation for ShebangNotFirstLine {
#[derive_message_formats]
fn message(&self) -> String {
"Shebang should be at the beginning of the file".to_string()
}
}
/// EXE005
pub(crate) fn shebang_not_first_line(range: TextRange, locator: &Locator, context: &LintContext) {
// If the shebang is at the beginning of the file, abort.
if range.start() == TextSize::from(0) {
return;
}
// If the entire prefix is whitespace, abort (this is handled by EXE004).
if locator
.up_to(range.start())
.chars()
.all(|c| is_python_whitespace(c) || matches!(c, '\r' | '\n'))
{
return;
}
context.report_diagnostic_if_enabled(ShebangNotFirstLine, range);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_executable/rules/shebang_missing_executable_file.rs | crates/ruff_linter/src/rules/flake8_executable/rules/shebang_missing_executable_file.rs | use std::path::Path;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::Violation;
use crate::checkers::ast::LintContext;
#[cfg(target_family = "unix")]
use crate::rules::flake8_executable::helpers::is_executable;
/// ## What it does
/// Checks for executable `.py` files that do not have a shebang.
///
/// ## Why is this bad?
/// In Python, a shebang (also known as a hashbang) is the first line of a
/// script, which specifies the interpreter that should be used to run the
/// script.
///
/// If a `.py` file is executable, but does not have a shebang, it may be run
/// with the wrong interpreter, or fail to run at all.
///
/// If the file is meant to be executable, add a shebang, as in:
/// ```python
/// #!/usr/bin/env python
/// ```
///
/// Otherwise, remove the executable bit from the file
/// (e.g., `chmod -x __main__.py` or `git update-index --chmod=-x __main__.py`).
///
/// A file is considered executable if it has the executable bit set (i.e., its
/// permissions mode intersects with `0o111`). As such, _this rule is only
/// available on Unix-like systems_, and is not enforced on Windows or WSL.
///
/// ## References
/// - [Python documentation: Executable Python Scripts](https://docs.python.org/3/tutorial/appendix.html#executable-python-scripts)
/// - [Git documentation: `git update-index --chmod`](https://git-scm.com/docs/git-update-index#Documentation/git-update-index.txt---chmod-x)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.233")]
pub(crate) struct ShebangMissingExecutableFile;
impl Violation for ShebangMissingExecutableFile {
#[derive_message_formats]
fn message(&self) -> String {
"The file is executable but no shebang is present".to_string()
}
}
/// EXE002
#[cfg(target_family = "unix")]
pub(crate) fn shebang_missing_executable_file(filepath: &Path, context: &LintContext) {
// WSL supports Windows file systems, which do not have executable bits.
// Instead, everything is executable. Therefore, we skip this rule on WSL.
if is_wsl::is_wsl() {
return;
}
if let Ok(true) = is_executable(filepath) {
context.report_diagnostic_if_enabled(
ShebangMissingExecutableFile,
ruff_text_size::TextRange::default(),
);
}
}
#[cfg(not(target_family = "unix"))]
pub(crate) fn shebang_missing_executable_file(_filepath: &Path, _diagnostics: &LintContext) {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_executable/rules/shebang_leading_whitespace.rs | crates/ruff_linter/src/rules/flake8_executable/rules/shebang_leading_whitespace.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_trivia::is_python_whitespace;
use ruff_text_size::{TextRange, TextSize};
use crate::Locator;
use crate::checkers::ast::LintContext;
use crate::{AlwaysFixableViolation, Edit, Fix};
/// ## What it does
/// Checks for whitespace before a shebang directive.
///
/// ## Why is this bad?
/// In Python, a shebang (also known as a hashbang) is the first line of a
/// script, which specifies the interpreter that should be used to run the
/// script.
///
/// The shebang's `#!` prefix must be the first two characters of a file. The
/// presence of whitespace before the shebang will cause the shebang to be
/// ignored, which is likely a mistake.
///
/// ## Example
/// ```python
/// #!/usr/bin/env python3
/// ```
///
/// Use instead:
/// ```python
/// #!/usr/bin/env python3
/// ```
///
/// ## References
/// - [Python documentation: Executable Python Scripts](https://docs.python.org/3/tutorial/appendix.html#executable-python-scripts)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.229")]
pub(crate) struct ShebangLeadingWhitespace;
impl AlwaysFixableViolation for ShebangLeadingWhitespace {
#[derive_message_formats]
fn message(&self) -> String {
"Avoid whitespace before shebang".to_string()
}
fn fix_title(&self) -> String {
"Remove whitespace before shebang".to_string()
}
}
/// EXE004
pub(crate) fn shebang_leading_whitespace(
context: &LintContext,
range: TextRange,
locator: &Locator,
) {
// If the shebang is at the beginning of the file, abort.
if range.start() == TextSize::from(0) {
return;
}
// If the entire prefix _isn't_ whitespace, abort (this is handled by EXE005).
if !locator
.up_to(range.start())
.chars()
.all(|c| is_python_whitespace(c) || matches!(c, '\r' | '\n'))
{
return;
}
let prefix = TextRange::up_to(range.start());
if let Some(mut diagnostic) =
context.report_diagnostic_if_enabled(ShebangLeadingWhitespace, prefix)
{
diagnostic.set_fix(Fix::safe_edit(Edit::range_deletion(prefix)));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_executable/rules/mod.rs | crates/ruff_linter/src/rules/flake8_executable/rules/mod.rs | use std::path::Path;
use ruff_python_trivia::CommentRanges;
pub(crate) use shebang_leading_whitespace::*;
pub(crate) use shebang_missing_executable_file::*;
pub(crate) use shebang_missing_python::*;
pub(crate) use shebang_not_executable::*;
pub(crate) use shebang_not_first_line::*;
use crate::Locator;
use crate::checkers::ast::LintContext;
use crate::codes::Rule;
use crate::comments::shebang::ShebangDirective;
mod shebang_leading_whitespace;
mod shebang_missing_executable_file;
mod shebang_missing_python;
mod shebang_not_executable;
mod shebang_not_first_line;
pub(crate) fn from_tokens(
context: &LintContext,
path: &Path,
locator: &Locator,
comment_ranges: &CommentRanges,
) {
let mut has_any_shebang = false;
for range in comment_ranges {
let comment = locator.slice(range);
if let Some(shebang) = ShebangDirective::try_extract(comment) {
has_any_shebang = true;
shebang_missing_python(range, &shebang, context);
if context.is_rule_enabled(Rule::ShebangNotExecutable) {
shebang_not_executable(path, range, context);
}
shebang_leading_whitespace(context, range, locator);
shebang_not_first_line(range, locator, context);
}
}
if !has_any_shebang {
if context.is_rule_enabled(Rule::ShebangMissingExecutableFile) {
shebang_missing_executable_file(path, context);
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_executable/rules/shebang_missing_python.rs | crates/ruff_linter/src/rules/flake8_executable/rules/shebang_missing_python.rs | use ruff_text_size::TextRange;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::Violation;
use crate::checkers::ast::LintContext;
use crate::comments::shebang::ShebangDirective;
/// ## What it does
/// Checks for a shebang directive in `.py` files that does not contain `python`,
/// `pytest`, or `uv run`.
///
/// ## Why is this bad?
/// In Python, a shebang (also known as a hashbang) is the first line of a
/// script, which specifies the command that should be used to run the
/// script.
///
/// For Python scripts, if the shebang does not include a command that explicitly
/// or implicitly specifies an interpreter, then the file will be executed with
/// the default interpreter, which is likely a mistake.
///
/// ## Example
/// ```python
/// #!/usr/bin/env bash
/// ```
///
/// Use instead:
/// ```python
/// #!/usr/bin/env python3
/// ```
///
/// ## References
/// - [Python documentation: Executable Python Scripts](https://docs.python.org/3/tutorial/appendix.html#executable-python-scripts)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.229")]
pub(crate) struct ShebangMissingPython;
impl Violation for ShebangMissingPython {
#[derive_message_formats]
fn message(&self) -> String {
"Shebang should contain `python`, `pytest`, or `uv run`".to_string()
}
}
/// EXE003
pub(crate) fn shebang_missing_python(
range: TextRange,
shebang: &ShebangDirective,
context: &LintContext,
) {
if shebang.contains("python")
|| shebang.contains("pytest")
|| shebang.contains("uv run")
|| shebang.contains("uvx")
|| shebang.contains("uv tool run")
{
return;
}
context.report_diagnostic_if_enabled(ShebangMissingPython, range);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_2020/helpers.rs | crates/ruff_linter/src/rules/flake8_2020/helpers.rs | use ruff_python_ast::Expr;
use ruff_python_semantic::SemanticModel;
pub(super) fn is_sys(expr: &Expr, target: &str, semantic: &SemanticModel) -> bool {
semantic
.resolve_qualified_name(expr)
.is_some_and(|qualified_name| qualified_name.segments() == ["sys", target])
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_2020/mod.rs | crates/ruff_linter/src/rules/flake8_2020/mod.rs | //! Rules from [flake8-2020](https://pypi.org/project/flake8-2020/).
mod helpers;
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
#[test_case(Rule::SysVersionSlice3, Path::new("YTT101.py"))]
#[test_case(Rule::SysVersion2, Path::new("YTT102.py"))]
#[test_case(Rule::SysVersionCmpStr3, Path::new("YTT103.py"))]
#[test_case(Rule::SysVersionInfo0Eq3, Path::new("YTT201.py"))]
#[test_case(Rule::SixPY3, Path::new("YTT202.py"))]
#[test_case(Rule::SysVersionInfo1CmpInt, Path::new("YTT203.py"))]
#[test_case(Rule::SysVersionInfoMinorCmpInt, Path::new("YTT204.py"))]
#[test_case(Rule::SysVersion0, Path::new("YTT301.py"))]
#[test_case(Rule::SysVersionCmpStr10, Path::new("YTT302.py"))]
#[test_case(Rule::SysVersionSlice1, Path::new("YTT303.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_2020").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_2020/rules/mod.rs | crates/ruff_linter/src/rules/flake8_2020/rules/mod.rs | pub(crate) use compare::*;
pub(crate) use name_or_attribute::*;
pub(crate) use subscript::*;
mod compare;
mod name_or_attribute;
mod subscript;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_2020/rules/compare.rs | crates/ruff_linter/src/rules/flake8_2020/rules/compare.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, CmpOp, Expr};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::registry::Rule;
use crate::rules::flake8_2020::helpers::is_sys;
/// ## What it does
/// Checks for comparisons that test `sys.version` against string literals,
/// such that the comparison will evaluate to `False` on Python 3.10 or later.
///
/// ## Why is this bad?
/// Comparing `sys.version` to a string is error-prone and may cause subtle
/// bugs, as the comparison will be performed lexicographically, not
/// semantically. For example, `sys.version > "3.9"` will evaluate to `False`
/// when using Python 3.10, as `"3.10"` is lexicographically "less" than
/// `"3.9"`.
///
/// Instead, use `sys.version_info` to access the current major and minor
/// version numbers as a tuple, which can be compared to other tuples
/// without issue.
///
/// ## Example
/// ```python
/// import sys
///
/// sys.version > "3.9" # `False` on Python 3.10.
/// ```
///
/// Use instead:
/// ```python
/// import sys
///
/// sys.version_info > (3, 9) # `True` on Python 3.10.
/// ```
///
/// ## References
/// - [Python documentation: `sys.version`](https://docs.python.org/3/library/sys.html#sys.version)
/// - [Python documentation: `sys.version_info`](https://docs.python.org/3/library/sys.html#sys.version_info)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.113")]
pub(crate) struct SysVersionCmpStr3;
impl Violation for SysVersionCmpStr3 {
#[derive_message_formats]
fn message(&self) -> String {
"`sys.version` compared to string (python3.10), use `sys.version_info`".to_string()
}
}
/// ## What it does
/// Checks for equality comparisons against the major version returned by
/// `sys.version_info` (e.g., `sys.version_info[0] == 3` or `sys.version_info[0] != 3`).
///
/// ## Why is this bad?
/// Using `sys.version_info[0] == 3` to verify that the major version is
/// Python 3 or greater will fail if the major version number is ever
/// incremented (e.g., to Python 4). This is likely unintended, as code
/// that uses this comparison is likely intended to be run on Python 2,
/// but would now run on Python 4 too. Similarly, using `sys.version_info[0] != 3`
/// to check for Python 2 will also fail if the major version number is
/// incremented.
///
/// Instead, use `>=` to check if the major version number is 3 or greater,
/// or `<` to check if the major version number is less than 3, to future-proof
/// the code.
///
/// ## Example
/// ```python
/// import sys
///
/// if sys.version_info[0] == 3:
/// ...
/// else:
/// print("Python 2") # This will be printed on Python 4.
/// ```
///
/// Use instead:
/// ```python
/// import sys
///
/// if sys.version_info >= (3,):
/// ...
/// else:
/// print("Python 2") # This will not be printed on Python 4.
/// ```
///
/// ## References
/// - [Python documentation: `sys.version`](https://docs.python.org/3/library/sys.html#sys.version)
/// - [Python documentation: `sys.version_info`](https://docs.python.org/3/library/sys.html#sys.version_info)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.113")]
pub(crate) struct SysVersionInfo0Eq3 {
eq: bool,
}
impl Violation for SysVersionInfo0Eq3 {
#[derive_message_formats]
fn message(&self) -> String {
if self.eq {
"`sys.version_info[0] == 3` referenced (python4), use `>=`".to_string()
} else {
"`sys.version_info[0] != 3` referenced (python4), use `<`".to_string()
}
}
}
/// ## What it does
/// Checks for comparisons that test `sys.version_info[1]` against an integer.
///
/// ## Why is this bad?
/// Comparisons based on the current minor version number alone can cause
/// subtle bugs and would likely lead to unintended effects if the Python
/// major version number were ever incremented (e.g., to Python 4).
///
/// Instead, compare `sys.version_info` to a tuple, including the major and
/// minor version numbers, to future-proof the code.
///
/// ## Example
/// ```python
/// import sys
///
/// if sys.version_info[1] < 7:
/// print("Python 3.6 or earlier.") # This will be printed on Python 4.0.
/// ```
///
/// Use instead:
/// ```python
/// import sys
///
/// if sys.version_info < (3, 7):
/// print("Python 3.6 or earlier.")
/// ```
///
/// ## References
/// - [Python documentation: `sys.version`](https://docs.python.org/3/library/sys.html#sys.version)
/// - [Python documentation: `sys.version_info`](https://docs.python.org/3/library/sys.html#sys.version_info)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.113")]
pub(crate) struct SysVersionInfo1CmpInt;
impl Violation for SysVersionInfo1CmpInt {
#[derive_message_formats]
fn message(&self) -> String {
"`sys.version_info[1]` compared to integer (python4), compare `sys.version_info` to \
tuple"
.to_string()
}
}
/// ## What it does
/// Checks for comparisons that test `sys.version_info.minor` against an integer.
///
/// ## Why is this bad?
/// Comparisons based on the current minor version number alone can cause
/// subtle bugs and would likely lead to unintended effects if the Python
/// major version number were ever incremented (e.g., to Python 4).
///
/// Instead, compare `sys.version_info` to a tuple, including the major and
/// minor version numbers, to future-proof the code.
///
/// ## Example
/// ```python
/// import sys
///
/// if sys.version_info.minor < 7:
/// print("Python 3.6 or earlier.") # This will be printed on Python 4.0.
/// ```
///
/// Use instead:
/// ```python
/// import sys
///
/// if sys.version_info < (3, 7):
/// print("Python 3.6 or earlier.")
/// ```
///
/// ## References
/// - [Python documentation: `sys.version`](https://docs.python.org/3/library/sys.html#sys.version)
/// - [Python documentation: `sys.version_info`](https://docs.python.org/3/library/sys.html#sys.version_info)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.113")]
pub(crate) struct SysVersionInfoMinorCmpInt;
impl Violation for SysVersionInfoMinorCmpInt {
#[derive_message_formats]
fn message(&self) -> String {
"`sys.version_info.minor` compared to integer (python4), compare `sys.version_info` \
to tuple"
.to_string()
}
}
/// ## What it does
/// Checks for comparisons that test `sys.version` against string literals,
/// such that the comparison would fail if the major version number were
/// ever incremented to Python 10 or higher.
///
/// ## Why is this bad?
/// Comparing `sys.version` to a string is error-prone and may cause subtle
/// bugs, as the comparison will be performed lexicographically, not
/// semantically.
///
/// Instead, use `sys.version_info` to access the current major and minor
/// version numbers as a tuple, which can be compared to other tuples
/// without issue.
///
/// ## Example
/// ```python
/// import sys
///
/// sys.version >= "3" # `False` on Python 10.
/// ```
///
/// Use instead:
/// ```python
/// import sys
///
/// sys.version_info >= (3,) # `True` on Python 10.
/// ```
///
/// ## References
/// - [Python documentation: `sys.version`](https://docs.python.org/3/library/sys.html#sys.version)
/// - [Python documentation: `sys.version_info`](https://docs.python.org/3/library/sys.html#sys.version_info)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.113")]
pub(crate) struct SysVersionCmpStr10;
impl Violation for SysVersionCmpStr10 {
#[derive_message_formats]
fn message(&self) -> String {
"`sys.version` compared to string (python10), use `sys.version_info`".to_string()
}
}
/// YTT103, YTT201, YTT203, YTT204, YTT302
pub(crate) fn compare(checker: &Checker, left: &Expr, ops: &[CmpOp], comparators: &[Expr]) {
match left {
Expr::Subscript(ast::ExprSubscript { value, slice, .. })
if is_sys(value, "version_info", checker.semantic()) =>
{
if let Expr::NumberLiteral(ast::ExprNumberLiteral {
value: ast::Number::Int(i),
..
}) = slice.as_ref()
{
if *i == 0 {
if let (
[operator @ (CmpOp::Eq | CmpOp::NotEq)],
[
Expr::NumberLiteral(ast::ExprNumberLiteral {
value: ast::Number::Int(n),
..
}),
],
) = (ops, comparators)
{
if *n == 3 && checker.is_rule_enabled(Rule::SysVersionInfo0Eq3) {
checker.report_diagnostic(
SysVersionInfo0Eq3 {
eq: matches!(*operator, CmpOp::Eq),
},
left.range(),
);
}
}
} else if *i == 1 {
if let (
[CmpOp::Lt | CmpOp::LtE | CmpOp::Gt | CmpOp::GtE],
[
Expr::NumberLiteral(ast::ExprNumberLiteral {
value: ast::Number::Int(_),
..
}),
],
) = (ops, comparators)
{
checker.report_diagnostic_if_enabled(SysVersionInfo1CmpInt, left.range());
}
}
}
}
Expr::Attribute(ast::ExprAttribute { value, attr, .. })
if is_sys(value, "version_info", checker.semantic()) && attr == "minor" =>
{
if let (
[CmpOp::Lt | CmpOp::LtE | CmpOp::Gt | CmpOp::GtE],
[
Expr::NumberLiteral(ast::ExprNumberLiteral {
value: ast::Number::Int(_),
..
}),
],
) = (ops, comparators)
{
checker.report_diagnostic_if_enabled(SysVersionInfoMinorCmpInt, left.range());
}
}
_ => {}
}
if is_sys(left, "version", checker.semantic()) {
if let (
[CmpOp::Lt | CmpOp::LtE | CmpOp::Gt | CmpOp::GtE],
[Expr::StringLiteral(ast::ExprStringLiteral { value, .. })],
) = (ops, comparators)
{
if value.len() == 1 {
checker.report_diagnostic_if_enabled(SysVersionCmpStr10, left.range());
} else {
checker.report_diagnostic_if_enabled(SysVersionCmpStr3, left.range());
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_2020/rules/subscript.rs | crates/ruff_linter/src/rules/flake8_2020/rules/subscript.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::{self as ast, Expr};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::registry::Rule;
use crate::rules::flake8_2020::helpers::is_sys;
/// ## What it does
/// Checks for uses of `sys.version[:3]`.
///
/// ## Why is this bad?
/// If the current major or minor version consists of multiple digits,
/// `sys.version[:3]` will truncate the version number (e.g., `"3.10"` would
/// become `"3.1"`). This is likely unintended, and can lead to subtle bugs if
/// the version string is used to test against a specific Python version.
///
/// Instead, use `sys.version_info` to access the current major and minor
/// version numbers as a tuple, which can be compared to other tuples
/// without issue.
///
/// ## Example
/// ```python
/// import sys
///
/// sys.version[:3] # Evaluates to "3.1" on Python 3.10.
/// ```
///
/// Use instead:
/// ```python
/// import sys
///
/// sys.version_info[:2] # Evaluates to (3, 10) on Python 3.10.
/// ```
///
/// ## References
/// - [Python documentation: `sys.version`](https://docs.python.org/3/library/sys.html#sys.version)
/// - [Python documentation: `sys.version_info`](https://docs.python.org/3/library/sys.html#sys.version_info)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.113")]
pub(crate) struct SysVersionSlice3;
impl Violation for SysVersionSlice3 {
#[derive_message_formats]
fn message(&self) -> String {
"`sys.version[:3]` referenced (python3.10), use `sys.version_info`".to_string()
}
}
/// ## What it does
/// Checks for uses of `sys.version[2]`.
///
/// ## Why is this bad?
/// If the current major or minor version consists of multiple digits,
/// `sys.version[2]` will select the first digit of the minor number only
/// (e.g., `"3.10"` would evaluate to `"1"`). This is likely unintended, and
/// can lead to subtle bugs if the version is used to test against a minor
/// version number.
///
/// Instead, use `sys.version_info.minor` to access the current minor version
/// number.
///
/// ## Example
/// ```python
/// import sys
///
/// sys.version[2] # Evaluates to "1" on Python 3.10.
/// ```
///
/// Use instead:
/// ```python
/// import sys
///
/// f"{sys.version_info.minor}" # Evaluates to "10" on Python 3.10.
/// ```
///
/// ## References
/// - [Python documentation: `sys.version`](https://docs.python.org/3/library/sys.html#sys.version)
/// - [Python documentation: `sys.version_info`](https://docs.python.org/3/library/sys.html#sys.version_info)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.113")]
pub(crate) struct SysVersion2;
impl Violation for SysVersion2 {
#[derive_message_formats]
fn message(&self) -> String {
"`sys.version[2]` referenced (python3.10), use `sys.version_info`".to_string()
}
}
/// ## What it does
/// Checks for uses of `sys.version[0]`.
///
/// ## Why is this bad?
/// If the current major or minor version consists of multiple digits,
/// `sys.version[0]` will select the first digit of the major version number
/// only (e.g., `"10.2"` would evaluate to `"1"`). This is likely unintended,
/// and can lead to subtle bugs if the version string is used to test against a
/// major version number.
///
/// Instead, use `sys.version_info.major` to access the current major version
/// number.
///
/// ## Example
/// ```python
/// import sys
///
/// sys.version[0] # If using Python 10, this evaluates to "1".
/// ```
///
/// Use instead:
/// ```python
/// import sys
///
/// f"{sys.version_info.major}" # If using Python 10, this evaluates to "10".
/// ```
///
/// ## References
/// - [Python documentation: `sys.version`](https://docs.python.org/3/library/sys.html#sys.version)
/// - [Python documentation: `sys.version_info`](https://docs.python.org/3/library/sys.html#sys.version_info)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.113")]
pub(crate) struct SysVersion0;
impl Violation for SysVersion0 {
#[derive_message_formats]
fn message(&self) -> String {
"`sys.version[0]` referenced (python10), use `sys.version_info`".to_string()
}
}
/// ## What it does
/// Checks for uses of `sys.version[:1]`.
///
/// ## Why is this bad?
/// If the major version number consists of more than one digit, this will
/// select the first digit of the major version number only (e.g., `"10.0"`
/// would evaluate to `"1"`). This is likely unintended, and can lead to subtle
/// bugs in future versions of Python if the version string is used to test
/// against a specific major version number.
///
/// Instead, use `sys.version_info.major` to access the current major version
/// number.
///
/// ## Example
/// ```python
/// import sys
///
/// sys.version[:1] # If using Python 10, this evaluates to "1".
/// ```
///
/// Use instead:
/// ```python
/// import sys
///
/// f"{sys.version_info.major}" # If using Python 10, this evaluates to "10".
/// ```
///
/// ## References
/// - [Python documentation: `sys.version`](https://docs.python.org/3/library/sys.html#sys.version)
/// - [Python documentation: `sys.version_info`](https://docs.python.org/3/library/sys.html#sys.version_info)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.113")]
pub(crate) struct SysVersionSlice1;
impl Violation for SysVersionSlice1 {
#[derive_message_formats]
fn message(&self) -> String {
"`sys.version[:1]` referenced (python10), use `sys.version_info`".to_string()
}
}
/// YTT101, YTT102, YTT301, YTT303
pub(crate) fn subscript(checker: &Checker, value: &Expr, slice: &Expr) {
if is_sys(value, "version", checker.semantic()) {
match slice {
Expr::Slice(ast::ExprSlice {
lower: None,
upper: Some(upper),
step: None,
range: _,
node_index: _,
}) => {
if let Expr::NumberLiteral(ast::ExprNumberLiteral {
value: ast::Number::Int(i),
..
}) = upper.as_ref()
{
if *i == 1 && checker.is_rule_enabled(Rule::SysVersionSlice1) {
checker.report_diagnostic(SysVersionSlice1, value.range());
} else if *i == 3 && checker.is_rule_enabled(Rule::SysVersionSlice3) {
checker.report_diagnostic(SysVersionSlice3, value.range());
}
}
}
Expr::NumberLiteral(ast::ExprNumberLiteral {
value: ast::Number::Int(i),
..
}) => {
if *i == 2 && checker.is_rule_enabled(Rule::SysVersion2) {
checker.report_diagnostic(SysVersion2, value.range());
} else if *i == 0 && checker.is_rule_enabled(Rule::SysVersion0) {
checker.report_diagnostic(SysVersion0, value.range());
}
}
_ => {}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/flake8_2020/rules/name_or_attribute.rs | crates/ruff_linter/src/rules/flake8_2020/rules/name_or_attribute.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::Expr;
use ruff_python_semantic::Modules;
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for uses of `six.PY3`.
///
/// ## Why is this bad?
/// `six.PY3` will evaluate to `False` on Python 4 and greater. This is likely
/// unintended, and may cause code intended to run on Python 2 to run on Python 4
/// too.
///
/// Instead, use `not six.PY2` to validate that the current Python major version is
/// _not_ equal to 2, to future-proof the code.
///
/// ## Example
/// ```python
/// import six
///
/// six.PY3 # `False` on Python 4.
/// ```
///
/// Use instead:
/// ```python
/// import six
///
/// not six.PY2 # `True` on Python 4.
/// ```
///
/// ## References
/// - [PyPI: `six`](https://pypi.org/project/six/)
/// - [Six documentation: `six.PY2`](https://six.readthedocs.io/#six.PY2)
/// - [Six documentation: `six.PY3`](https://six.readthedocs.io/#six.PY3)
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.113")]
pub(crate) struct SixPY3;
impl Violation for SixPY3 {
#[derive_message_formats]
fn message(&self) -> String {
"`six.PY3` referenced (python4), use `not six.PY2`".to_string()
}
}
/// YTT202
pub(crate) fn name_or_attribute(checker: &Checker, expr: &Expr) {
if !checker.semantic().seen_module(Modules::SIX) {
return;
}
if checker
.semantic()
.resolve_qualified_name(expr)
.is_some_and(|qualified_name| matches!(qualified_name.segments(), ["six", "PY3"]))
{
checker.report_diagnostic(SixPY3, expr.range());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/eradicate/mod.rs | crates/ruff_linter/src/rules/eradicate/mod.rs | //! Rules from [eradicate](https://pypi.org/project/eradicate/).
pub(crate) mod detection;
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_diagnostics, settings};
#[test_case(Rule::CommentedOutCode, Path::new("ERA001.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("eradicate").join(path).as_path(),
&settings::LinterSettings::for_rule(rule_code),
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/eradicate/detection.rs | crates/ruff_linter/src/rules/eradicate/detection.rs | /// See: [eradicate.py](https://github.com/myint/eradicate/blob/98f199940979c94447a461d50d27862b118b282d/eradicate.py)
use aho_corasick::AhoCorasick;
use itertools::Itertools;
use regex::{Regex, RegexSet};
use ruff_python_parser::parse_module;
use ruff_python_trivia::{SimpleTokenKind, SimpleTokenizer};
use ruff_text_size::TextSize;
use std::sync::LazyLock;
static CODE_INDICATORS: LazyLock<AhoCorasick> = LazyLock::new(|| {
AhoCorasick::new([
"(", ")", "[", "]", "{", "}", ":", "=", "%", "return", "break", "continue", "import",
])
.unwrap()
});
static ALLOWLIST_REGEX: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(
r"(?x)
^
(?:
# Case-sensitive
pyright
| pyrefly
| ruff\s*:\s*(disable|enable)
| mypy:
| type:\s*ignore
| SPDX-License-Identifier:
| fmt:\s*(on|off|skip)
| region|endregion
# Case-insensitive
| (?i:
noqa
)
# Unknown case sensitivity
| (?i:
pylint
| nosec
| isort:\s*(on|off|skip|skip_file|split|dont-add-imports(:\s*\[.*?])?)
| (?:en)?coding[:=][\x20\t]*([-_.A-Z0-9]+)
)
# IntelliJ language injection comments:
# * `language` must be lowercase.
# * No spaces around `=`.
# * Language IDs as used in comments must have no spaces,
# though to IntelliJ they can be anything.
# * May optionally contain `prefix=` and/or `suffix=`,
# not declared here since we use `.is_match()`.
| language=[-_.a-zA-Z0-9]+
)
",
)
.unwrap()
});
static HASH_NUMBER: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"#\d").unwrap());
static POSITIVE_CASES: LazyLock<RegexSet> = LazyLock::new(|| {
RegexSet::new([
// Keywords
r"^(?:elif\s+.*\s*:.*|else\s*:.*|try\s*:.*|finally\s*:.*|except.*:.*|case\s+.*\s*:.*)$",
// Partial dictionary
r#"^['"]\w+['"]\s*:.+[,{]\s*(#.*)?$"#,
// Multiline assignment
r"^(?:[(\[]\s*)?(?:\w+\s*,\s*)*\w+\s*([)\]]\s*)?=.*[(\[{]$",
// Brackets,
r"^[()\[\]{}\s]+$",
])
.unwrap()
});
/// Returns `true` if a comment contains Python code.
pub(crate) fn comment_contains_code(line: &str, task_tags: &[String]) -> bool {
let line = line.trim_start_matches([' ', '#']).trim_end();
// Fast path: if none of the indicators are present, the line is not code.
if !CODE_INDICATORS.is_match(line) {
return false;
}
// Fast path: if the comment contains consecutive identifiers, we know it won't parse.
let tokenizer = SimpleTokenizer::starts_at(TextSize::default(), line).skip_trivia();
if tokenizer.tuple_windows().any(|(first, second)| {
first.kind == SimpleTokenKind::Name && second.kind == SimpleTokenKind::Name
}) {
return false;
}
// Ignore task tag comments (e.g., "# TODO(tom): Refactor").
if line
.split(&[' ', ':', '('])
.next()
.is_some_and(|first| task_tags.iter().any(|tag| tag == first))
{
return false;
}
// Ignore whitelisted comments.
if ALLOWLIST_REGEX.is_match(line) {
return false;
}
// Ignore non-comment related hashes (e.g., "# Issue #999").
if HASH_NUMBER.is_match(line) {
return false;
}
// If the comment made it this far, and ends in a continuation, assume it's code.
if line.ends_with('\\') {
return true;
}
// If the comment matches any of the specified positive cases, assume it's code.
if POSITIVE_CASES.is_match(line) {
return true;
}
// Finally, compile the source code.
parse_module(line).is_ok()
}
#[cfg(test)]
mod tests {
use crate::settings::TASK_TAGS;
use super::comment_contains_code;
#[test]
fn comment_contains_code_basic() {
assert!(comment_contains_code("# x = 1", &[]));
assert!(comment_contains_code("# # x = 1", &[]));
assert!(comment_contains_code("#from foo import eradicate", &[]));
assert!(comment_contains_code("#import eradicate", &[]));
assert!(comment_contains_code(r#"#"key": value,"#, &[]));
assert!(comment_contains_code(r#"#"key": "value","#, &[]));
assert!(comment_contains_code(r#"#"key": 1 + 1,"#, &[]));
assert!(comment_contains_code("#'key': 1 + 1,", &[]));
assert!(comment_contains_code(r#"#"key": {"#, &[]));
assert!(comment_contains_code("#}", &[]));
assert!(comment_contains_code("#} )]", &[]));
assert!(!comment_contains_code("#", &[]));
assert!(!comment_contains_code("# This is a (real) comment.", &[]));
assert!(!comment_contains_code("# # A (nested) comment.", &[]));
assert!(!comment_contains_code("# 123", &[]));
assert!(!comment_contains_code("# 123.1", &[]));
assert!(!comment_contains_code("# 1, 2, 3", &[]));
assert!(!comment_contains_code("# ruff: disable[E501]", &[]));
assert!(!comment_contains_code("#ruff:enable[E501, F84]", &[]));
assert!(!comment_contains_code(
"# pylint: disable=redefined-outer-name",
&[]
));
assert!(!comment_contains_code(
"# Issue #999: This is not code",
&[]
));
assert!(!comment_contains_code("# mypy: allow-untyped-calls", &[]));
assert!(!comment_contains_code(
"# SPDX-License-Identifier: MIT",
&[]
));
// TODO(charlie): This should be `true` under aggressive mode.
assert!(!comment_contains_code("#},", &[]));
}
#[test]
fn comment_contains_code_with_print() {
assert!(comment_contains_code("#print(1)", &[]));
assert!(!comment_contains_code("#print", &[]));
assert!(!comment_contains_code("#print 1", &[]));
assert!(!comment_contains_code("#to print", &[]));
}
#[test]
fn comment_contains_code_with_return() {
assert!(comment_contains_code("#return x", &[]));
assert!(!comment_contains_code("#to print", &[]));
}
#[test]
fn comment_contains_code_single_line() {
assert!(comment_contains_code("# case 1: print()", &[]));
assert!(comment_contains_code("# try: get(1, 2, 3)", &[]));
assert!(comment_contains_code("# else: print()", &[]));
assert!(comment_contains_code("# elif x == 10: print()", &[]));
assert!(comment_contains_code(
"# except Exception as e: print(e)",
&[]
));
assert!(comment_contains_code("# except: print()", &[]));
assert!(comment_contains_code("# finally: close_handle()", &[]));
assert!(!comment_contains_code("# try: use cache", &[]));
assert!(!comment_contains_code("# else: we should return", &[]));
assert!(!comment_contains_code(
"# call function except: without cache",
&[]
));
}
#[test]
fn comment_contains_code_with_multiline() {
assert!(comment_contains_code("#else:", &[]));
assert!(comment_contains_code("# else : ", &[]));
assert!(comment_contains_code(r#"# "foo %d" % \\"#, &[]));
assert!(comment_contains_code("#elif True:", &[]));
assert!(comment_contains_code("#x = foo(", &[]));
assert!(comment_contains_code("#except Exception:", &[]));
assert!(comment_contains_code("# case 1:", &[]));
assert!(comment_contains_code("#case 1:", &[]));
assert!(comment_contains_code("# try:", &[]));
assert!(!comment_contains_code("# this is = to that :(", &[]));
assert!(!comment_contains_code("#else", &[]));
assert!(!comment_contains_code("#or else:", &[]));
assert!(!comment_contains_code("#else True:", &[]));
assert!(!comment_contains_code("# in that case:", &[]));
// Unpacking assignments
assert!(comment_contains_code(
"# user_content_type, _ = TimelineEvent.objects.using(db_alias).get_or_create(",
&[]
));
assert!(comment_contains_code(
"# (user_content_type, _) = TimelineEvent.objects.using(db_alias).get_or_create(",
&[]
));
assert!(comment_contains_code(
"# ( user_content_type , _ )= TimelineEvent.objects.using(db_alias).get_or_create(",
&[]
));
assert!(comment_contains_code("# )", &[]));
// This used to return true, but our parser has gotten a bit better
// at rejecting invalid Python syntax. And indeed, this is not valid
// Python code.
assert!(!comment_contains_code(
"# app_label=\"core\", model=\"user\"",
&[]
));
// TODO(charlie): This should be `true` under aggressive mode.
assert!(!comment_contains_code("#def foo():", &[]));
}
#[test]
fn comment_contains_code_with_sentences() {
assert!(!comment_contains_code("#code is good", &[]));
}
#[test]
fn comment_contains_code_with_encoding() {
assert!(comment_contains_code("# codings=utf-8", &[]));
assert!(!comment_contains_code("# coding=utf-8", &[]));
assert!(!comment_contains_code("#coding= utf-8", &[]));
assert!(!comment_contains_code("# coding: utf-8", &[]));
assert!(!comment_contains_code("# encoding: utf8", &[]));
}
#[test]
fn comment_contains_code_with_default_allowlist() {
assert!(!comment_contains_code("# pylint: disable=A0123", &[]));
assert!(!comment_contains_code("# pylint:disable=A0123", &[]));
assert!(!comment_contains_code("# pylint: disable = A0123", &[]));
assert!(!comment_contains_code("# pylint:disable = A0123", &[]));
assert!(!comment_contains_code(
"# pyright: reportErrorName=true",
&[]
));
assert!(!comment_contains_code("# noqa", &[]));
assert!(!comment_contains_code("# NOQA", &[]));
assert!(!comment_contains_code("# noqa: A123", &[]));
assert!(!comment_contains_code("# noqa:A123", &[]));
assert!(!comment_contains_code("# nosec", &[]));
assert!(!comment_contains_code("# region", &[]));
assert!(!comment_contains_code("# endregion", &[]));
assert!(!comment_contains_code("# region.name", &[]));
assert!(!comment_contains_code("# region name", &[]));
assert!(!comment_contains_code("# region: name", &[]));
assert!(!comment_contains_code("# fmt: on", &[]));
assert!(!comment_contains_code("# fmt: off", &[]));
assert!(!comment_contains_code("# fmt:on", &[]));
assert!(!comment_contains_code("# fmt:off", &[]));
assert!(!comment_contains_code("# isort: on", &[]));
assert!(!comment_contains_code("# isort:on", &[]));
assert!(!comment_contains_code("# isort: off", &[]));
assert!(!comment_contains_code("# isort:off", &[]));
assert!(!comment_contains_code("# isort: skip", &[]));
assert!(!comment_contains_code("# isort:skip", &[]));
assert!(!comment_contains_code("# isort: skip_file", &[]));
assert!(!comment_contains_code("# isort:skip_file", &[]));
assert!(!comment_contains_code("# isort: split", &[]));
assert!(!comment_contains_code("# isort:split", &[]));
assert!(!comment_contains_code("# isort: dont-add-imports", &[]));
assert!(!comment_contains_code("# isort:dont-add-imports", &[]));
assert!(!comment_contains_code(
"# isort: dont-add-imports: [\"import os\"]",
&[]
));
assert!(!comment_contains_code(
"# isort:dont-add-imports: [\"import os\"]",
&[]
));
assert!(!comment_contains_code(
"# isort: dont-add-imports:[\"import os\"]",
&[]
));
assert!(!comment_contains_code(
"# isort:dont-add-imports:[\"import os\"]",
&[]
));
assert!(!comment_contains_code("# type: ignore", &[]));
assert!(!comment_contains_code("# type:ignore", &[]));
assert!(!comment_contains_code("# type: ignore[import]", &[]));
assert!(!comment_contains_code("# type:ignore[import]", &[]));
assert!(!comment_contains_code(
"# TODO: Do that",
&["TODO".to_string()]
));
assert!(!comment_contains_code(
"# FIXME: Fix that",
&["FIXME".to_string()]
));
assert!(!comment_contains_code(
"# XXX: What ever",
&["XXX".to_string()]
));
}
#[test]
fn comment_contains_language_injection() {
// `language` with bad casing
assert!(comment_contains_code("# Language=C#", &[]));
assert!(comment_contains_code("# lAngUAgE=inI", &[]));
// Unreasonable language IDs, possibly literals
assert!(comment_contains_code("# language=\"pt\"", &[]));
assert!(comment_contains_code("# language='en'", &[]));
// Spaces around equal sign
assert!(comment_contains_code("# language =xml", &[]));
assert!(comment_contains_code("# language= html", &[]));
assert!(comment_contains_code("# language = RegExp", &[]));
// Leading whitespace
assert!(!comment_contains_code("#language=CSS", &[]));
assert!(!comment_contains_code("# \t language=C++", &[]));
// Human language false negatives
assert!(!comment_contains_code("# language=en", &[]));
assert!(!comment_contains_code("# language=en-US", &[]));
// Casing (fine because such IDs cannot be validated)
assert!(!comment_contains_code("# language=PytHoN", &[]));
assert!(!comment_contains_code("# language=jaVaScrIpt", &[]));
// Space within ID (fine because `Shell` is considered the ID)
assert!(!comment_contains_code("# language=Shell Script", &[]));
// With prefix and/or suffix
assert!(!comment_contains_code("# language=HTML prefix=<body>", &[]));
assert!(!comment_contains_code(
r"# language=Requirements suffix=\n",
&[]
));
assert!(!comment_contains_code(
"language=javascript prefix=(function(){ suffix=})()",
&[]
));
}
#[test]
fn comment_contains_todo() {
let task_tags = TASK_TAGS
.iter()
.map(ToString::to_string)
.collect::<Vec<_>>();
assert!(!comment_contains_code(
"# TODO(tom): Rewrite in Rust",
&task_tags
));
assert!(!comment_contains_code(
"# TODO: Rewrite in Rust",
&task_tags
));
assert!(!comment_contains_code("# TODO:Rewrite in Rust", &task_tags));
assert!(!comment_contains_code(
"# FIXME(tom): Rewrite in Rust",
&task_tags
));
assert!(!comment_contains_code(
"# FIXME: Rewrite in Rust",
&task_tags
));
assert!(!comment_contains_code(
"# FIXME:Rewrite in Rust",
&task_tags
));
assert!(!comment_contains_code(
"# XXX(tom): Rewrite in Rust",
&task_tags
));
assert!(!comment_contains_code("# XXX: Rewrite in Rust", &task_tags));
assert!(!comment_contains_code("# XXX:Rewrite in Rust", &task_tags));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/eradicate/rules/mod.rs | crates/ruff_linter/src/rules/eradicate/rules/mod.rs | pub(crate) use commented_out_code::*;
mod commented_out_code;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/rules/eradicate/rules/commented_out_code.rs | crates/ruff_linter/src/rules/eradicate/rules/commented_out_code.rs | use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_trivia::CommentRanges;
use ruff_source_file::{LineRanges, UniversalNewlineIterator};
use ruff_text_size::TextRange;
use crate::Locator;
use crate::checkers::ast::LintContext;
use crate::{Edit, Fix, FixAvailability, Violation};
use crate::rules::eradicate::detection::comment_contains_code;
/// ## What it does
/// Checks for commented-out Python code.
///
/// ## Why is this bad?
/// Commented-out code is dead code, and is often included inadvertently.
/// It should be removed.
///
/// ## Known problems
/// Prone to false positives when checking comments that resemble Python code,
/// but are not actually Python code ([#4845]).
///
/// ## Example
/// ```python
/// # print("Hello, world!")
/// ```
///
/// ## Options
/// - `lint.task-tags`
///
/// [#4845]: https://github.com/astral-sh/ruff/issues/4845
#[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.0.145")]
pub(crate) struct CommentedOutCode;
impl Violation for CommentedOutCode {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::None;
#[derive_message_formats]
fn message(&self) -> String {
"Found commented-out code".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Remove commented-out code".to_string())
}
}
/// ERA001
pub(crate) fn commented_out_code(
context: &LintContext,
locator: &Locator,
comment_ranges: &CommentRanges,
) {
let mut comments = comment_ranges.into_iter().peekable();
// Iterate over all comments in the document.
while let Some(range) = comments.next() {
let line = locator.line_str(range.start());
if is_script_tag_start(line) {
if skip_script_comments(range, &mut comments, locator) {
continue;
}
}
// Verify that the comment is on its own line, and that it contains code.
if is_own_line_comment(line)
&& comment_contains_code(line, &context.settings().task_tags[..])
{
if let Some(mut diagnostic) =
context.report_diagnostic_if_enabled(CommentedOutCode, range)
{
diagnostic.set_fix(Fix::display_only_edit(Edit::range_deletion(
locator.full_lines_range(range),
)));
}
}
}
}
/// Parses the rest of a [PEP 723](https://peps.python.org/pep-0723/)
/// script comment and moves `comments` past the script comment's end unless
/// the script comment is invalid.
///
/// Returns `true` if it is a valid script comment.
fn skip_script_comments<I>(
script_start: TextRange,
comments: &mut std::iter::Peekable<I>,
locator: &Locator,
) -> bool
where
I: Iterator<Item = TextRange>,
{
let line_end = locator.full_line_end(script_start.end());
let rest = locator.after(line_end);
let mut end_offset = None;
let lines = UniversalNewlineIterator::with_offset(rest, line_end);
for line in lines {
let Some(content) = script_line_content(&line) else {
break;
};
if content == "///" {
end_offset = Some(line.full_end());
}
}
// > Unclosed blocks MUST be ignored.
let Some(end_offset) = end_offset else {
return false;
};
// Skip over all script-comments.
while let Some(comment) = comments.peek() {
if comment.start() >= end_offset {
break;
}
comments.next();
}
true
}
fn script_line_content(line: &str) -> Option<&str> {
let Some(rest) = line.strip_prefix('#') else {
// Not a comment
return None;
};
// An empty line
if rest.is_empty() {
return Some("");
}
// > If there are characters after the # then the first character MUST be a space.
rest.strip_prefix(' ')
}
/// Returns `true` if line contains an own-line comment.
fn is_own_line_comment(line: &str) -> bool {
for char in line.chars() {
if char == '#' {
return true;
}
if !char.is_whitespace() {
return false;
}
}
unreachable!("Comment should contain '#' character")
}
/// Returns `true` if the line appears to start a script tag.
///
/// See: <https://peps.python.org/pep-0723/>
fn is_script_tag_start(line: &str) -> bool {
line == "# /// script"
}
#[cfg(test)]
mod tests {
use ruff_python_parser::parse_module;
use ruff_python_trivia::CommentRanges;
use ruff_source_file::LineRanges;
use ruff_text_size::TextSize;
use crate::Locator;
use crate::rules::eradicate::rules::commented_out_code::skip_script_comments;
#[test]
fn script_comment() {
let code = r#"
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "requests<3",
# "rich",
# ]
# ///
a = 10 # abc
"#;
let parsed = parse_module(code).unwrap();
let locator = Locator::new(code);
let comments = CommentRanges::from(parsed.tokens());
let mut comments = comments.into_iter().peekable();
let script_start = code.find("# /// script").unwrap();
let script_start_range = locator.full_line_range(TextSize::try_from(script_start).unwrap());
let valid = skip_script_comments(script_start_range, &mut comments, &Locator::new(code));
assert!(valid);
let next_comment = comments.next();
assert!(next_comment.is_some());
assert_eq!(&code[next_comment.unwrap()], "# abc");
}
#[test]
fn script_comment_end_precedence() {
let code = r#"
# /// script
# [tool.uv]
# extra-index-url = ["https://pypi.org/simple", """\
# https://example.com/
# ///
# """
# ]
# ///
a = 10 # abc
"#;
let parsed = parse_module(code).unwrap();
let locator = Locator::new(code);
let comments = CommentRanges::from(parsed.tokens());
let mut comments = comments.into_iter().peekable();
let script_start = code.find("# /// script").unwrap();
let script_start_range = locator.full_line_range(TextSize::try_from(script_start).unwrap());
let valid = skip_script_comments(script_start_range, &mut comments, &Locator::new(code));
assert!(valid);
let next_comment = comments.next();
assert!(next_comment.is_some());
assert_eq!(&code[next_comment.unwrap()], "# abc");
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/cst/helpers.rs | crates/ruff_linter/src/cst/helpers.rs | use libcst_native::{
Expression, LeftParen, Name, ParenthesizableWhitespace, ParenthesizedNode, RightParen,
SimpleWhitespace, UnaryOperation,
};
/// Return a [`ParenthesizableWhitespace`] containing a single space.
pub(crate) fn space() -> ParenthesizableWhitespace<'static> {
ParenthesizableWhitespace::SimpleWhitespace(SimpleWhitespace(" "))
}
/// Ensure that a [`ParenthesizableWhitespace`] contains at least one space.
pub(crate) fn or_space(whitespace: ParenthesizableWhitespace) -> ParenthesizableWhitespace {
if whitespace == ParenthesizableWhitespace::default() {
space()
} else {
whitespace
}
}
/// Negate a condition, i.e., `a` => `not a` and `not a` => `a`.
pub(crate) fn negate<'a>(expression: &Expression<'a>) -> Expression<'a> {
if let Expression::UnaryOperation(expression) = expression {
if matches!(expression.operator, libcst_native::UnaryOp::Not { .. }) {
return *expression.expression.clone();
}
}
// If the expression is `True` or `False`, return the opposite.
if let Expression::Name(expression) = expression {
match expression.value {
"True" => {
return Expression::Name(Box::new(Name {
value: "False",
lpar: vec![],
rpar: vec![],
}));
}
"False" => {
return Expression::Name(Box::new(Name {
value: "True",
lpar: vec![],
rpar: vec![],
}));
}
_ => {}
}
}
// If the expression is higher precedence than the unary `not`, we need to wrap it in
// parentheses.
//
// For example: given `a and b`, we need to return `not (a and b)`, rather than `not a and b`.
//
// See: <https://docs.python.org/3/reference/expressions.html#operator-precedence>
let needs_parens = matches!(
expression,
Expression::BooleanOperation(_)
| Expression::IfExp(_)
| Expression::Lambda(_)
| Expression::NamedExpr(_)
);
let has_parens = !expression.lpar().is_empty() && !expression.rpar().is_empty();
// Otherwise, wrap in a `not` operator.
Expression::UnaryOperation(Box::new(UnaryOperation {
operator: libcst_native::UnaryOp::Not {
whitespace_after: space(),
},
expression: Box::new(if needs_parens && !has_parens {
expression
.clone()
.with_parens(LeftParen::default(), RightParen::default())
} else {
expression.clone()
}),
lpar: vec![],
rpar: vec![],
}))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/cst/matchers.rs | crates/ruff_linter/src/cst/matchers.rs | use crate::fix::codemods::CodegenStylist;
use anyhow::{Result, bail};
use libcst_native::{
Arg, Attribute, Call, Comparison, CompoundStatement, Dict, Expression, FormattedString,
FormattedStringContent, FormattedStringExpression, FunctionDef, GeneratorExp, If, Import,
ImportAlias, ImportFrom, ImportNames, IndentedBlock, Lambda, ListComp, Module, SmallStatement,
Statement, Suite, Tuple, With,
};
use ruff_python_codegen::Stylist;
pub(crate) fn match_module(module_text: &str) -> Result<Module<'_>> {
match libcst_native::parse_module(module_text, None) {
Ok(module) => Ok(module),
Err(_) => bail!("Failed to extract CST from source"),
}
}
pub(crate) fn match_statement(statement_text: &str) -> Result<Statement<'_>> {
match libcst_native::parse_statement(statement_text) {
Ok(statement) => Ok(statement),
Err(_) => bail!("Failed to extract statement from source"),
}
}
pub(crate) fn match_import<'a, 'b>(statement: &'a mut Statement<'b>) -> Result<&'a mut Import<'b>> {
if let Statement::Simple(expr) = statement {
if let Some(SmallStatement::Import(expr)) = expr.body.first_mut() {
Ok(expr)
} else {
bail!("Expected SmallStatement::Import")
}
} else {
bail!("Expected Statement::Simple")
}
}
pub(crate) fn match_import_from<'a, 'b>(
statement: &'a mut Statement<'b>,
) -> Result<&'a mut ImportFrom<'b>> {
if let Statement::Simple(expr) = statement {
if let Some(SmallStatement::ImportFrom(expr)) = expr.body.first_mut() {
Ok(expr)
} else {
bail!("Expected SmallStatement::ImportFrom")
}
} else {
bail!("Expected Statement::Simple")
}
}
pub(crate) fn match_aliases<'a, 'b>(
import_from: &'a mut ImportFrom<'b>,
) -> Result<&'a mut Vec<ImportAlias<'b>>> {
if let ImportNames::Aliases(aliases) = &mut import_from.names {
Ok(aliases)
} else {
bail!("Expected ImportNames::Aliases")
}
}
pub(crate) fn match_call<'a, 'b>(expression: &'a Expression<'b>) -> Result<&'a Call<'b>> {
if let Expression::Call(call) = expression {
Ok(call)
} else {
bail!("Expected Expression::Call")
}
}
pub(crate) fn match_call_mut<'a, 'b>(
expression: &'a mut Expression<'b>,
) -> Result<&'a mut Call<'b>> {
if let Expression::Call(call) = expression {
Ok(call)
} else {
bail!("Expected Expression::Call")
}
}
pub(crate) fn match_comparison<'a, 'b>(
expression: &'a mut Expression<'b>,
) -> Result<&'a mut Comparison<'b>> {
if let Expression::Comparison(comparison) = expression {
Ok(comparison)
} else {
bail!("Expected Expression::Comparison")
}
}
pub(crate) fn match_dict<'a, 'b>(expression: &'a mut Expression<'b>) -> Result<&'a mut Dict<'b>> {
if let Expression::Dict(dict) = expression {
Ok(dict)
} else {
bail!("Expected Expression::Dict")
}
}
pub(crate) fn match_attribute<'a, 'b>(
expression: &'a mut Expression<'b>,
) -> Result<&'a mut Attribute<'b>> {
if let Expression::Attribute(attribute) = expression {
Ok(attribute)
} else {
bail!("Expected Expression::Attribute")
}
}
pub(crate) fn match_arg<'a, 'b>(call: &'a Call<'b>) -> Result<&'a Arg<'b>> {
if let Some(arg) = call.args.first() {
Ok(arg)
} else {
bail!("Expected Arg")
}
}
pub(crate) fn match_generator_exp<'a, 'b>(
expression: &'a Expression<'b>,
) -> Result<&'a GeneratorExp<'b>> {
if let Expression::GeneratorExp(generator_exp) = expression {
Ok(generator_exp)
} else {
bail!("Expected Expression::GeneratorExp")
}
}
pub(crate) fn match_tuple<'a, 'b>(expression: &'a Expression<'b>) -> Result<&'a Tuple<'b>> {
if let Expression::Tuple(tuple) = expression {
Ok(tuple)
} else {
bail!("Expected Expression::Tuple")
}
}
pub(crate) fn match_list_comp<'a, 'b>(expression: &'a Expression<'b>) -> Result<&'a ListComp<'b>> {
if let Expression::ListComp(list_comp) = expression {
Ok(list_comp)
} else {
bail!("Expected Expression::ListComp")
}
}
pub(crate) fn match_lambda<'a, 'b>(expression: &'a Expression<'b>) -> Result<&'a Lambda<'b>> {
if let Expression::Lambda(lambda) = expression {
Ok(lambda)
} else {
bail!("Expected Expression::Lambda")
}
}
pub(crate) fn match_formatted_string<'a, 'b>(
expression: &'a mut Expression<'b>,
) -> Result<&'a mut FormattedString<'b>> {
if let Expression::FormattedString(formatted_string) = expression {
Ok(formatted_string)
} else {
bail!("Expected Expression::FormattedString");
}
}
pub(crate) fn match_formatted_string_expression<'a, 'b>(
formatted_string_content: &'a mut FormattedStringContent<'b>,
) -> Result<&'a mut FormattedStringExpression<'b>> {
if let FormattedStringContent::Expression(formatted_string_expression) =
formatted_string_content
{
Ok(formatted_string_expression)
} else {
bail!("Expected FormattedStringContent::Expression")
}
}
pub(crate) fn match_function_def<'a, 'b>(
statement: &'a mut Statement<'b>,
) -> Result<&'a mut FunctionDef<'b>> {
if let Statement::Compound(compound) = statement {
if let CompoundStatement::FunctionDef(function_def) = compound {
Ok(function_def)
} else {
bail!("Expected CompoundStatement::FunctionDef")
}
} else {
bail!("Expected Statement::Compound")
}
}
pub(crate) fn match_indented_block<'a, 'b>(
suite: &'a mut Suite<'b>,
) -> Result<&'a mut IndentedBlock<'b>> {
if let Suite::IndentedBlock(indented_block) = suite {
Ok(indented_block)
} else {
bail!("Expected Suite::IndentedBlock")
}
}
pub(crate) fn match_with<'a, 'b>(statement: &'a mut Statement<'b>) -> Result<&'a mut With<'b>> {
if let Statement::Compound(compound) = statement {
if let CompoundStatement::With(with) = compound {
Ok(with)
} else {
bail!("Expected CompoundStatement::With")
}
} else {
bail!("Expected Statement::Compound")
}
}
pub(crate) fn match_if<'a, 'b>(statement: &'a mut Statement<'b>) -> Result<&'a mut If<'b>> {
if let Statement::Compound(compound) = statement {
if let CompoundStatement::If(if_) = compound {
Ok(if_)
} else {
bail!("Expected CompoundStatement::If")
}
} else {
bail!("Expected Statement::Compound")
}
}
/// Given the source code for an expression, return the parsed [`Expression`].
///
/// If the expression is not guaranteed to be valid as a standalone expression (e.g., if it may
/// span multiple lines and/or require parentheses), use [`transform_expression`] instead.
pub(crate) fn match_expression(expression_text: &str) -> Result<Expression<'_>> {
match libcst_native::parse_expression(expression_text) {
Ok(expression) => Ok(expression),
Err(_) => bail!("Failed to extract expression from source"),
}
}
/// Run a transformation function over an expression.
///
/// Passing an expression to [`match_expression`] directly can lead to parse errors if the
/// expression is not a valid standalone expression (e.g., it was parenthesized in the original
/// source). This method instead wraps the expression in "fake" parentheses, runs the
/// transformation, then removes the "fake" parentheses.
pub(crate) fn transform_expression(
source_code: &str,
stylist: &Stylist,
func: impl FnOnce(Expression) -> Result<Expression>,
) -> Result<String> {
// Wrap the expression in parentheses.
let source_code = format!("({source_code})");
let expression = match_expression(&source_code)?;
// Run the function on the expression.
let expression = func(expression)?;
// Codegen the expression.
let mut source_code = expression.codegen_stylist(stylist);
// Drop the outer parentheses.
source_code.drain(0..1);
source_code.drain(source_code.len() - 1..source_code.len());
Ok(source_code)
}
/// Like [`transform_expression`], but operates on the source code of the expression, rather than
/// the parsed [`Expression`]. This _shouldn't_ exist, but does to accommodate lifetime issues.
pub(crate) fn transform_expression_text(
source_code: &str,
func: impl FnOnce(String) -> Result<String>,
) -> Result<String> {
// Wrap the expression in parentheses.
let source_code = format!("({source_code})");
// Run the function on the expression.
let mut transformed = func(source_code)?;
// Drop the outer parentheses.
transformed.drain(0..1);
transformed.drain(transformed.len() - 1..transformed.len());
Ok(transformed)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/cst/mod.rs | crates/ruff_linter/src/cst/mod.rs | pub(crate) mod helpers;
pub(crate) mod matchers;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/comments/mod.rs | crates/ruff_linter/src/comments/mod.rs | pub(crate) mod shebang;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/comments/shebang.rs | crates/ruff_linter/src/comments/shebang.rs | use std::ops::Deref;
use ruff_python_trivia::Cursor;
/// A shebang directive (e.g., `#!/usr/bin/env python3`).
#[derive(Debug, PartialEq, Eq)]
pub(crate) struct ShebangDirective<'a>(&'a str);
impl<'a> ShebangDirective<'a> {
/// Parse a shebang directive from a line, or return `None` if the line does not contain a
/// shebang directive.
pub(crate) fn try_extract(line: &'a str) -> Option<Self> {
let mut cursor = Cursor::new(line);
// Trim the `#!` prefix.
if !cursor.eat_char('#') {
return None;
}
if !cursor.eat_char('!') {
return None;
}
Some(Self(cursor.chars().as_str()))
}
}
impl Deref for ShebangDirective<'_> {
type Target = str;
fn deref(&self) -> &Self::Target {
self.0
}
}
#[cfg(test)]
mod tests {
use insta::assert_debug_snapshot;
use super::ShebangDirective;
#[test]
fn shebang_non_match() {
let source = "not a match";
assert_debug_snapshot!(ShebangDirective::try_extract(source));
}
#[test]
fn shebang_end_of_line() {
let source = "print('test') #!/usr/bin/python";
assert_debug_snapshot!(ShebangDirective::try_extract(source));
}
#[test]
fn shebang_match() {
let source = "#!/usr/bin/env python";
assert_debug_snapshot!(ShebangDirective::try_extract(source));
}
#[test]
fn shebang_match_trailing_comment() {
let source = "#!/usr/bin/env python # trailing comment";
assert_debug_snapshot!(ShebangDirective::try_extract(source));
}
#[test]
fn shebang_leading_space() {
let source = " #!/usr/bin/env python";
assert_debug_snapshot!(ShebangDirective::try_extract(source));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/fix/codemods.rs | crates/ruff_linter/src/fix/codemods.rs | //! Interface for editing code snippets. These functions take statements or expressions as input,
//! and return the modified code snippet as output.
use std::borrow::Cow;
use anyhow::{Result, bail};
use libcst_native::{
Codegen, CodegenState, Expression, ImportNames, NameOrAttribute, ParenthesizableWhitespace,
SmallStatement, Statement,
};
use rustc_hash::{FxHashMap, FxHashSet};
use smallvec::{SmallVec, smallvec};
use unicode_normalization::UnicodeNormalization;
use ruff_python_ast::Stmt;
use ruff_python_ast::name::UnqualifiedName;
use ruff_python_codegen::Stylist;
use ruff_text_size::Ranged;
use crate::Locator;
use crate::cst::matchers::match_statement;
/// Glue code to make libcst codegen work with ruff's Stylist
pub(crate) trait CodegenStylist<'a>: Codegen<'a> {
fn codegen_stylist(&self, stylist: &'a Stylist) -> String;
}
impl<'a, T: Codegen<'a>> CodegenStylist<'a> for T {
fn codegen_stylist(&self, stylist: &'a Stylist) -> String {
let mut state = CodegenState {
default_newline: stylist.line_ending().as_str(),
default_indent: stylist.indentation(),
..Default::default()
};
self.codegen(&mut state);
state.to_string()
}
}
/// Given an import statement, remove any imports that are specified in the `imports` iterator.
///
/// Returns `Ok(None)` if the statement is empty after removing the imports.
pub(crate) fn remove_imports<'a>(
member_names: impl Iterator<Item = &'a str>,
stmt: &Stmt,
locator: &Locator,
stylist: &Stylist,
) -> Result<Option<String>> {
let module_text = locator.slice(stmt);
let mut tree = match_statement(module_text)?;
let Statement::Simple(body) = &mut tree else {
bail!("Expected Statement::Simple");
};
let aliases = match body.body.first_mut() {
Some(SmallStatement::Import(import_body)) => &mut import_body.names,
Some(SmallStatement::ImportFrom(import_body)) => {
if let ImportNames::Aliases(names) = &mut import_body.names {
names
} else if let ImportNames::Star(..) = &import_body.names {
// Special-case: if the import is a `from ... import *`, then we delete the
// entire statement.
let mut found_star = false;
for member in member_names {
if member == "*" {
found_star = true;
} else {
bail!("Expected \"*\" for unused import (got: \"{member}\")");
}
}
if !found_star {
bail!("Expected \'*\' for unused import");
}
return Ok(None);
} else {
bail!("Expected: ImportNames::Aliases | ImportNames::Star");
}
}
_ => bail!("Expected: SmallStatement::ImportFrom | SmallStatement::Import"),
};
// Preserve the trailing comma (or not) from the last entry.
let trailing_comma = aliases.last().and_then(|alias| alias.comma.clone());
// Remove any imports that are specified in the `imports` iterator (but, e.g., if the name is
// provided once, only remove the first occurrence).
let mut counts = member_names.fold(FxHashMap::<&str, usize>::default(), |mut map, name| {
map.entry(name).and_modify(|c| *c += 1).or_insert(1);
map
});
aliases.retain(|alias| {
let name = qualified_name_from_name_or_attribute(&alias.name);
if let Some(count) = counts.get_mut(name.as_str()).filter(|count| **count > 0) {
*count -= 1;
false
} else {
true
}
});
// But avoid destroying any trailing comments.
if let Some(alias) = aliases.last_mut() {
let has_comment = if let Some(comma) = &alias.comma {
match &comma.whitespace_after {
ParenthesizableWhitespace::SimpleWhitespace(_) => false,
ParenthesizableWhitespace::ParenthesizedWhitespace(whitespace) => {
whitespace.first_line.comment.is_some()
}
}
} else {
false
};
if !has_comment {
alias.comma = trailing_comma;
}
}
if aliases.is_empty() {
return Ok(None);
}
Ok(Some(tree.codegen_stylist(stylist)))
}
/// Given an import statement, remove any imports that are not specified in the `imports` slice.
///
/// Returns the modified import statement.
pub(crate) fn retain_imports(
member_names: &[&str],
stmt: &Stmt,
contents: &str,
stylist: &Stylist,
) -> Result<String> {
let module_text = &contents[stmt.range()];
let mut tree = match_statement(module_text)?;
let Statement::Simple(body) = &mut tree else {
bail!("Expected Statement::Simple");
};
let aliases = match body.body.first_mut() {
Some(SmallStatement::Import(import_body)) => &mut import_body.names,
Some(SmallStatement::ImportFrom(import_body)) => {
if let ImportNames::Aliases(names) = &mut import_body.names {
names
} else {
bail!("Expected: ImportNames::Aliases");
}
}
_ => bail!("Expected: SmallStatement::ImportFrom | SmallStatement::Import"),
};
// Preserve the trailing comma (or not) from the last entry.
let trailing_comma = aliases.last().and_then(|alias| alias.comma.clone());
// Retain any imports that are specified in the `imports` iterator.
let member_names = member_names.iter().copied().collect::<FxHashSet<_>>();
aliases.retain(|alias| {
member_names.contains(qualified_name_from_name_or_attribute(&alias.name).as_str())
});
// But avoid destroying any trailing comments.
if let Some(alias) = aliases.last_mut() {
let has_comment = if let Some(comma) = &alias.comma {
match &comma.whitespace_after {
ParenthesizableWhitespace::SimpleWhitespace(_) => false,
ParenthesizableWhitespace::ParenthesizedWhitespace(whitespace) => {
whitespace.first_line.comment.is_some()
}
}
} else {
false
};
if !has_comment {
alias.comma = trailing_comma;
}
}
Ok(tree.codegen_stylist(stylist))
}
/// Create an NFKC-normalized qualified name from a libCST node.
fn qualified_name_from_name_or_attribute(module: &NameOrAttribute) -> String {
fn collect_segments<'a>(expr: &'a Expression, parts: &mut SmallVec<[&'a str; 8]>) {
match expr {
Expression::Call(expr) => {
collect_segments(&expr.func, parts);
}
Expression::Attribute(expr) => {
collect_segments(&expr.value, parts);
parts.push(expr.attr.value);
}
Expression::Name(expr) => {
parts.push(expr.value);
}
_ => {}
}
}
/// Attempt to create an [`UnqualifiedName`] from a libCST expression.
///
/// Strictly speaking, the `UnqualifiedName` returned by this function may be invalid,
/// since it hasn't been NFKC-normalized. In order for an `UnqualifiedName` to be
/// comparable to one constructed from a `ruff_python_ast` node, it has to undergo
/// NFKC normalization. As a local function, however, this is fine;
/// the outer function always performs NFKC normalization before returning the
/// qualified name to the caller.
fn unqualified_name_from_expression<'a>(
expr: &'a Expression<'a>,
) -> Option<UnqualifiedName<'a>> {
let mut segments = smallvec![];
collect_segments(expr, &mut segments);
if segments.is_empty() {
None
} else {
Some(segments.into_iter().collect())
}
}
let unnormalized = match module {
NameOrAttribute::N(name) => Cow::Borrowed(name.value),
NameOrAttribute::A(attr) => {
let name = attr.attr.value;
let prefix = unqualified_name_from_expression(&attr.value);
prefix.map_or_else(
|| Cow::Borrowed(name),
|prefix| Cow::Owned(format!("{prefix}.{name}")),
)
}
};
unnormalized.nfkc().collect()
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/fix/snippet.rs | crates/ruff_linter/src/fix/snippet.rs | use unicode_width::UnicodeWidthStr;
/// A snippet of source code for user-facing display, as in a diagnostic.
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct SourceCodeSnippet(String);
impl SourceCodeSnippet {
pub(crate) fn new(source_code: String) -> Self {
Self(source_code)
}
pub(crate) fn from_str(source_code: &str) -> Self {
Self(source_code.to_string())
}
/// Return the full snippet for user-facing display, or `None` if the snippet should be
/// truncated.
pub(crate) fn full_display(&self) -> Option<&str> {
if Self::should_truncate(&self.0) {
None
} else {
Some(&self.0)
}
}
/// Return a truncated snippet for user-facing display.
pub(crate) fn truncated_display(&self) -> &str {
if Self::should_truncate(&self.0) {
"..."
} else {
&self.0
}
}
/// Returns `true` if the source code should be truncated when included in a user-facing
/// diagnostic.
fn should_truncate(source_code: &str) -> bool {
source_code.width() > 50 || source_code.contains(['\r', '\n'])
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/fix/edits.rs | crates/ruff_linter/src/fix/edits.rs | //! Interface for generating fix edits from higher-level actions (e.g., "remove an argument").
use anyhow::{Context, Result};
use ruff_python_ast::AnyNodeRef;
use ruff_python_ast::token::{self, Tokens, parenthesized_range};
use ruff_python_ast::{self as ast, Arguments, ExceptHandler, Expr, ExprList, Parameters, Stmt};
use ruff_python_codegen::Stylist;
use ruff_python_index::Indexer;
use ruff_python_trivia::textwrap::dedent_to;
use ruff_python_trivia::{
PythonWhitespace, SimpleTokenKind, SimpleTokenizer, has_leading_content, is_python_whitespace,
};
use ruff_source_file::{LineRanges, NewlineWithTrailingNewline, UniversalNewlines};
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use crate::Edit;
use crate::Locator;
use crate::cst::matchers::{match_function_def, match_indented_block, match_statement};
use crate::fix::codemods;
use crate::fix::codemods::CodegenStylist;
use crate::line_width::{IndentWidth, LineLength, LineWidthBuilder};
/// Return the [`Edit`] to use when deleting a [`Stmt`].
///
/// In some cases, this is as simple as deleting the [`TextRange`] of the [`Stmt`]
/// itself. However, there are a few exceptions:
/// - If the [`Stmt`] is _not_ the terminal statement in a multi-statement line,
/// we need to delete up to the start of the next statement (and avoid
/// deleting any content that precedes the statement).
/// - If the [`Stmt`] is the terminal statement in a multi-statement line, we need
/// to avoid deleting any content that precedes the statement.
/// - If the [`Stmt`] has no trailing and leading content, then it's convenient to
/// remove the entire start and end lines.
/// - If the [`Stmt`] is the last statement in its parent body, replace it with a
/// `pass` instead.
pub(crate) fn delete_stmt(
stmt: &Stmt,
parent: Option<&Stmt>,
locator: &Locator,
indexer: &Indexer,
) -> Edit {
if parent.is_some_and(|parent| is_lone_child(stmt, parent)) {
// If removing this node would lead to an invalid syntax tree, replace
// it with a `pass`.
Edit::range_replacement("pass".to_string(), stmt.range())
} else {
if let Some(semicolon) = trailing_semicolon(stmt.end(), locator) {
let next = next_stmt_break(semicolon, locator);
Edit::deletion(stmt.start(), next)
} else if has_leading_content(stmt.start(), locator.contents()) {
Edit::range_deletion(stmt.range())
} else if let Some(start) =
indexer.preceded_by_continuations(stmt.start(), locator.contents())
{
Edit::deletion(start, stmt.end())
} else {
let range = locator.full_lines_range(stmt.range());
Edit::range_deletion(range)
}
}
}
/// Generate a [`Edit`] to delete a comment (for example: a `noqa` directive).
pub(crate) fn delete_comment(range: TextRange, locator: &Locator) -> Edit {
let line_range = locator.line_range(range.start());
// Compute the leading space.
let prefix = locator.slice(TextRange::new(line_range.start(), range.start()));
let leading_space_len = prefix.text_len() - prefix.trim_whitespace_end().text_len();
// Compute the trailing space.
let suffix = locator.slice(TextRange::new(range.end(), line_range.end()));
let trailing_space_len = suffix.text_len() - suffix.trim_whitespace_start().text_len();
// Ex) `# noqa`
if line_range
== TextRange::new(
range.start() - leading_space_len,
range.end() + trailing_space_len,
)
{
let full_line_end = locator.full_line_end(line_range.end());
Edit::deletion(line_range.start(), full_line_end)
}
// Ex) `x = 1 # noqa`
else if range.end() + trailing_space_len == line_range.end() {
// Replace `x = 1 # noqa` with `x = 1`.
Edit::deletion(range.start() - leading_space_len, line_range.end())
}
// Ex) `x = 1 # noqa # type: ignore`
else if locator
.slice(TextRange::new(
range.end() + trailing_space_len,
line_range.end(),
))
.starts_with('#')
{
// Replace `# noqa # type: ignore` with `# type: ignore`.
Edit::deletion(range.start(), range.end() + trailing_space_len)
}
// Ex) `x = 1 # noqa here`
else {
// Remove `# noqa here` and whitespace
Edit::deletion(range.start() - leading_space_len, line_range.end())
}
}
/// Generate a `Fix` to remove the specified imports from an `import` statement.
pub(crate) fn remove_unused_imports<'a>(
member_names: impl Iterator<Item = &'a str>,
stmt: &Stmt,
parent: Option<&Stmt>,
locator: &Locator,
stylist: &Stylist,
indexer: &Indexer,
) -> Result<Edit> {
match codemods::remove_imports(member_names, stmt, locator, stylist)? {
None => Ok(delete_stmt(stmt, parent, locator, indexer)),
Some(content) => Ok(Edit::range_replacement(content, stmt.range())),
}
}
/// Edits to make the specified imports explicit, e.g. change `import x` to `import x as x`.
pub(crate) fn make_redundant_alias<'a>(
member_names: impl Iterator<Item = &'a str>,
stmt: &Stmt,
) -> Vec<Edit> {
let aliases = match stmt {
Stmt::Import(ast::StmtImport { names, .. }) => names,
Stmt::ImportFrom(ast::StmtImportFrom { names, .. }) => names,
_ => {
return Vec::new();
}
};
member_names
.filter_map(|name| {
aliases
.iter()
.find(|alias| alias.asname.is_none() && *name == alias.name.id)
.map(|alias| Edit::range_replacement(format!("{name} as {name}"), alias.range))
})
.collect()
}
/// Fix to add the specified imports to the `__all__` export list.
pub(crate) fn add_to_dunder_all<'a>(
names: impl Iterator<Item = &'a str>,
expr: &Expr,
stylist: &Stylist,
) -> Vec<Edit> {
let (insertion_point, export_prefix_length) = match expr {
Expr::List(ExprList { elts, .. }) => (
elts.last().map_or(expr.end() - "]".text_len(), Ranged::end),
elts.len(),
),
Expr::Tuple(tup) if tup.parenthesized => (
tup.elts
.last()
.map_or(tup.end() - ")".text_len(), Ranged::end),
tup.len(),
),
Expr::Tuple(tup) if !tup.parenthesized => (
tup.elts
.last()
.expect("unparenthesized empty tuple is not possible")
.range()
.end(),
tup.len(),
),
_ => {
// we don't know how to insert into this expression
return vec![];
}
};
let quote = stylist.quote();
let mut edits: Vec<_> = names
.enumerate()
.map(|(offset, name)| match export_prefix_length + offset {
0 => Edit::insertion(format!("{quote}{name}{quote}"), insertion_point),
_ => Edit::insertion(format!(", {quote}{name}{quote}"), insertion_point),
})
.collect();
if let Expr::Tuple(tup) = expr {
if tup.parenthesized && export_prefix_length + edits.len() == 1 {
edits.push(Edit::insertion(",".to_string(), insertion_point));
}
}
edits
}
#[derive(Debug, Copy, Clone)]
pub(crate) enum Parentheses {
/// Remove parentheses, if the removed argument is the only argument left.
Remove,
/// Preserve parentheses, even if the removed argument is the only argument
Preserve,
}
/// Generic function to remove arguments or keyword arguments in function
/// calls and class definitions. (For classes, `args` should be considered
/// `bases`.)
///
/// Supports the removal of parentheses when this is the only (kw)arg left.
/// For this behavior, set `parentheses` to `Parentheses::Remove`.
pub(crate) fn remove_argument<T: Ranged>(
argument: &T,
arguments: &Arguments,
parentheses: Parentheses,
source: &str,
tokens: &Tokens,
) -> Result<Edit> {
// Partition into arguments before and after the argument to remove.
let (before, after): (Vec<_>, Vec<_>) = arguments
.arguments_source_order()
.map(|arg| arg.range())
.filter(|range| argument.range() != *range)
.partition(|range| range.start() < argument.start());
let arg = arguments
.arguments_source_order()
.find(|arg| arg.range() == argument.range())
.context("Unable to find argument")?;
let parenthesized_range =
token::parenthesized_range(arg.value().into(), arguments.into(), tokens)
.unwrap_or(arg.range());
if !after.is_empty() {
// Case 1: argument or keyword is _not_ the last node, so delete from the start of the
// argument to the end of the subsequent comma.
let mut tokenizer = SimpleTokenizer::starts_at(argument.end(), source);
// Find the trailing comma.
tokenizer
.find(|token| token.kind == SimpleTokenKind::Comma)
.context("Unable to find trailing comma")?;
// Find the next non-whitespace token.
let next = tokenizer
.find(|token| {
token.kind != SimpleTokenKind::Whitespace && token.kind != SimpleTokenKind::Newline
})
.context("Unable to find next token")?;
Ok(Edit::deletion(parenthesized_range.start(), next.start()))
} else if let Some(previous) = before.iter().map(Ranged::end).max() {
// Case 2: argument or keyword is the last node, so delete from the start of the
// previous comma to the end of the argument.
let mut tokenizer = SimpleTokenizer::starts_at(previous, source);
// Find the trailing comma.
let comma = tokenizer
.find(|token| token.kind == SimpleTokenKind::Comma)
.context("Unable to find trailing comma")?;
Ok(Edit::deletion(comma.start(), parenthesized_range.end()))
} else {
// Case 3: argument or keyword is the only node, so delete the arguments (but preserve
// parentheses, if needed).
Ok(match parentheses {
Parentheses::Remove => Edit::range_deletion(arguments.range()),
Parentheses::Preserve => Edit::range_replacement("()".to_string(), arguments.range()),
})
}
}
/// Generic function to add arguments or keyword arguments to function calls.
///
/// The new argument will be inserted before the first existing keyword argument in `arguments`, if
/// there are any present. Otherwise, the new argument is added to the end of the argument list.
pub(crate) fn add_argument(argument: &str, arguments: &Arguments, tokens: &Tokens) -> Edit {
if let Some(ast::Keyword { range, value, .. }) = arguments.keywords.first() {
let keyword = parenthesized_range(value.into(), arguments.into(), tokens).unwrap_or(*range);
Edit::insertion(format!("{argument}, "), keyword.start())
} else if let Some(last) = arguments.arguments_source_order().last() {
// Case 1: existing arguments, so append after the last argument.
let last = parenthesized_range(last.value().into(), arguments.into(), tokens)
.unwrap_or(last.range());
Edit::insertion(format!(", {argument}"), last.end())
} else {
// Case 2: no arguments. Add argument, without any trailing comma.
Edit::insertion(argument.to_string(), arguments.start() + TextSize::from(1))
}
}
/// Generic function to add a (regular) parameter to a function definition.
pub(crate) fn add_parameter(parameter: &str, parameters: &Parameters, source: &str) -> Edit {
if let Some(last) = parameters.args.iter().rfind(|arg| arg.default.is_none()) {
// Case 1: at least one regular parameter, so append after the last one.
Edit::insertion(format!(", {parameter}"), last.end())
} else if !parameters.args.is_empty() {
// Case 2: no regular parameters, but at least one keyword parameter, so add before the
// first.
let pos = parameters.start();
let mut tokenizer = SimpleTokenizer::starts_at(pos, source);
let name = tokenizer
.find(|token| token.kind == SimpleTokenKind::Name)
.expect("Unable to find name token");
Edit::insertion(format!("{parameter}, "), name.start())
} else if let Some(last) = parameters.posonlyargs.last() {
// Case 2: no regular parameter, but a positional-only parameter exists, so add after that.
// We take care to add it *after* the `/` separator.
let pos = last.end();
let mut tokenizer = SimpleTokenizer::starts_at(pos, source);
let slash = tokenizer
.find(|token| token.kind == SimpleTokenKind::Slash)
.expect("Unable to find `/` token");
// Try to find a comma after the slash.
let comma = tokenizer.find(|token| token.kind == SimpleTokenKind::Comma);
if let Some(comma) = comma {
Edit::insertion(format!(" {parameter},"), comma.start() + TextSize::from(1))
} else {
Edit::insertion(format!(", {parameter}"), slash.start())
}
} else if !parameters.kwonlyargs.is_empty() {
// Case 3: no regular parameter, but a keyword-only parameter exist, so add parameter before that.
// We need to backtrack to before the `*` separator.
// We know there is no non-keyword-only params, so we can safely assume that the `*` separator is the first
let pos = parameters.start();
let mut tokenizer = SimpleTokenizer::starts_at(pos, source);
let star = tokenizer
.find(|token| token.kind == SimpleTokenKind::Star)
.expect("Unable to find `*` token");
Edit::insertion(format!("{parameter}, "), star.start())
} else {
// Case 4: no parameters at all, so add parameter after the opening parenthesis.
Edit::insertion(
parameter.to_string(),
parameters.start() + TextSize::from(1),
)
}
}
/// Safely adjust the indentation of the indented block at [`TextRange`].
///
/// The [`TextRange`] is assumed to represent an entire indented block, including the leading
/// indentation of that block. For example, to dedent the body here:
/// ```python
/// if True:
/// print("Hello, world!")
/// ```
///
/// The range would be the entirety of ` print("Hello, world!")`.
pub(crate) fn adjust_indentation(
range: TextRange,
indentation: &str,
locator: &Locator,
indexer: &Indexer,
stylist: &Stylist,
) -> Result<String> {
let contents = locator.slice(range);
// If the range includes a multi-line string, use LibCST to ensure that we don't adjust the
// whitespace _within_ the string.
let contains_multiline_string = indexer.multiline_ranges().intersects(range)
|| indexer.interpolated_string_ranges().intersects(range);
// If the range has mixed indentation, we will use LibCST as well.
let mixed_indentation = contents.universal_newlines().any(|line| {
let trimmed = line.trim_whitespace_start();
if trimmed.is_empty() {
return false;
}
let line_indentation: &str = &line[..line.len() - trimmed.len()];
line_indentation.contains('\t') && line_indentation.contains(' ')
});
// For simple cases, try to do a manual dedent.
if !contains_multiline_string && !mixed_indentation {
if let Some(dedent) = dedent_to(contents, indentation) {
return Ok(dedent);
}
}
let module_text = format!("def f():{}{contents}", stylist.line_ending().as_str());
let mut tree = match_statement(&module_text)?;
let embedding = match_function_def(&mut tree)?;
let indented_block = match_indented_block(&mut embedding.body)?;
indented_block.indent = Some(indentation);
let module_text = indented_block.codegen_stylist(stylist);
let module_text = module_text
.strip_prefix(stylist.line_ending().as_str())
.unwrap()
.to_string();
Ok(module_text)
}
/// Determine if a vector contains only one, specific element.
fn is_only<T: PartialEq>(vec: &[T], value: &T) -> bool {
vec.len() == 1 && vec[0] == *value
}
/// Determine if a child is the only statement in its body.
fn is_lone_child(child: &Stmt, parent: &Stmt) -> bool {
match parent {
Stmt::FunctionDef(ast::StmtFunctionDef { body, .. })
| Stmt::ClassDef(ast::StmtClassDef { body, .. })
| Stmt::With(ast::StmtWith { body, .. }) => {
if is_only(body, child) {
return true;
}
}
Stmt::For(ast::StmtFor { body, orelse, .. })
| Stmt::While(ast::StmtWhile { body, orelse, .. }) => {
if is_only(body, child) || is_only(orelse, child) {
return true;
}
}
Stmt::If(ast::StmtIf {
body,
elif_else_clauses,
..
}) => {
if is_only(body, child)
|| elif_else_clauses
.iter()
.any(|ast::ElifElseClause { body, .. }| is_only(body, child))
{
return true;
}
}
Stmt::Try(ast::StmtTry {
body,
handlers,
orelse,
finalbody,
..
}) => {
if is_only(body, child)
|| is_only(orelse, child)
|| is_only(finalbody, child)
|| handlers.iter().any(|handler| match handler {
ExceptHandler::ExceptHandler(ast::ExceptHandlerExceptHandler {
body, ..
}) => is_only(body, child),
})
{
return true;
}
}
Stmt::Match(ast::StmtMatch { cases, .. }) => {
if cases.iter().any(|case| is_only(&case.body, child)) {
return true;
}
}
_ => {}
}
false
}
/// Return the location of a trailing semicolon following a `Stmt`, if it's part
/// of a multi-statement line.
fn trailing_semicolon(offset: TextSize, locator: &Locator) -> Option<TextSize> {
let contents = locator.after(offset);
for line in NewlineWithTrailingNewline::from(contents) {
let trimmed = line.trim_whitespace_start();
if trimmed.starts_with(';') {
let colon_offset = line.text_len() - trimmed.text_len();
return Some(offset + line.start() + colon_offset);
}
if !trimmed.starts_with('\\') {
break;
}
}
None
}
/// Find the next valid break for a `Stmt` after a semicolon.
fn next_stmt_break(semicolon: TextSize, locator: &Locator) -> TextSize {
let start_location = semicolon + TextSize::from(1);
for line in
NewlineWithTrailingNewline::with_offset(locator.after(start_location), start_location)
{
let trimmed = line.trim_whitespace();
// Skip past any continuations.
if trimmed.starts_with('\\') {
continue;
}
return if trimmed.is_empty() {
// If the line is empty, then despite the previous statement ending in a
// semicolon, we know that it's not a multi-statement line.
line.start()
} else {
// Otherwise, find the start of the next statement. (Or, anything that isn't
// whitespace.)
let relative_offset = line.find(|c: char| !is_python_whitespace(c)).unwrap();
line.start() + TextSize::try_from(relative_offset).unwrap()
};
}
locator.line_end(start_location)
}
/// Add leading whitespace to a snippet, if it's immediately preceded an identifier or keyword.
pub(crate) fn pad_start(mut content: String, start: TextSize, locator: &Locator) -> String {
// Ex) When converting `except(ValueError,)` from a tuple to a single argument, we need to
// insert a space before the fix, to achieve `except ValueError`.
if locator
.up_to(start)
.chars()
.last()
.is_some_and(|char| char.is_ascii_alphabetic())
{
content.insert(0, ' ');
}
content
}
/// Add trailing whitespace to a snippet, if it's immediately followed by an identifier or keyword.
pub(crate) fn pad_end(mut content: String, end: TextSize, locator: &Locator) -> String {
if locator
.after(end)
.chars()
.next()
.is_some_and(|char| char.is_ascii_alphabetic())
{
content.push(' ');
}
content
}
/// Add leading or trailing whitespace to a snippet, if it's immediately preceded or followed by
/// an identifier or keyword.
pub(crate) fn pad(content: String, range: TextRange, locator: &Locator) -> String {
pad_start(
pad_end(content, range.end(), locator),
range.start(),
locator,
)
}
/// Returns `true` if the fix fits within the maximum configured line length.
pub(crate) fn fits(
fix: &str,
node: AnyNodeRef,
locator: &Locator,
line_length: LineLength,
tab_size: IndentWidth,
) -> bool {
all_lines_fit(fix, node, locator, line_length.value() as usize, tab_size)
}
/// Returns `true` if all lines in the fix are shorter than the given line length.
fn all_lines_fit(
fix: &str,
node: AnyNodeRef,
locator: &Locator,
line_length: usize,
tab_size: IndentWidth,
) -> bool {
let prefix = locator.slice(TextRange::new(
locator.line_start(node.start()),
node.start(),
));
// Ensure that all lines are shorter than the line length limit.
fix.universal_newlines().enumerate().all(|(idx, line)| {
// If `template` is a multiline string, `col_offset` should only be applied to the first
// line:
// ```
// a = """{} -> offset = col_offset (= 4)
// {} -> offset = 0
// """.format(0, 1) -> offset = 0
// ```
let measured_length = if idx == 0 {
LineWidthBuilder::new(tab_size)
.add_str(prefix)
.add_str(&line)
.get()
} else {
LineWidthBuilder::new(tab_size).add_str(&line).get()
};
measured_length <= line_length
})
}
#[cfg(test)]
mod tests {
use anyhow::{Result, anyhow};
use ruff_source_file::SourceFileBuilder;
use test_case::test_case;
use ruff_python_ast::Stmt;
use ruff_python_codegen::Stylist;
use ruff_python_parser::{parse_expression, parse_module};
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::fix::apply_fixes;
use crate::fix::edits::{
add_to_dunder_all, make_redundant_alias, next_stmt_break, trailing_semicolon,
};
use crate::{Edit, Fix, Locator, Violation};
/// Parse the given source using [`Mode::Module`] and return the first statement.
fn parse_first_stmt(source: &str) -> Result<Stmt> {
let suite = parse_module(source)?.into_suite();
Ok(suite.into_iter().next().unwrap())
}
#[test]
fn find_semicolon() -> Result<()> {
let contents = "x = 1";
let stmt = parse_first_stmt(contents)?;
let locator = Locator::new(contents);
assert_eq!(trailing_semicolon(stmt.end(), &locator), None);
let contents = "x = 1; y = 1";
let stmt = parse_first_stmt(contents)?;
let locator = Locator::new(contents);
assert_eq!(
trailing_semicolon(stmt.end(), &locator),
Some(TextSize::from(5))
);
let contents = "x = 1 ; y = 1";
let stmt = parse_first_stmt(contents)?;
let locator = Locator::new(contents);
assert_eq!(
trailing_semicolon(stmt.end(), &locator),
Some(TextSize::from(6))
);
let contents = r"
x = 1 \
; y = 1
"
.trim();
let stmt = parse_first_stmt(contents)?;
let locator = Locator::new(contents);
assert_eq!(
trailing_semicolon(stmt.end(), &locator),
Some(TextSize::from(10))
);
Ok(())
}
#[test]
fn find_next_stmt_break() {
let contents = "x = 1; y = 1";
let locator = Locator::new(contents);
assert_eq!(
next_stmt_break(TextSize::from(4), &locator),
TextSize::from(5)
);
let contents = "x = 1 ; y = 1";
let locator = Locator::new(contents);
assert_eq!(
next_stmt_break(TextSize::from(5), &locator),
TextSize::from(6)
);
let contents = r"
x = 1 \
; y = 1
"
.trim();
let locator = Locator::new(contents);
assert_eq!(
next_stmt_break(TextSize::from(10), &locator),
TextSize::from(12)
);
}
#[test]
fn redundant_alias() -> Result<()> {
let contents = "import x, y as y, z as bees";
let stmt = parse_first_stmt(contents)?;
assert_eq!(
make_redundant_alias(["x"].into_iter(), &stmt),
vec![Edit::range_replacement(
String::from("x as x"),
TextRange::new(TextSize::new(7), TextSize::new(8)),
)],
"make just one item redundant"
);
assert_eq!(
make_redundant_alias(vec!["x", "y"].into_iter(), &stmt),
vec![Edit::range_replacement(
String::from("x as x"),
TextRange::new(TextSize::new(7), TextSize::new(8)),
)],
"the second item is already a redundant alias"
);
assert_eq!(
make_redundant_alias(vec!["x", "z"].into_iter(), &stmt),
vec![Edit::range_replacement(
String::from("x as x"),
TextRange::new(TextSize::new(7), TextSize::new(8)),
)],
"the third item is already aliased to something else"
);
Ok(())
}
#[test_case("()", &["x", "y"], r#"("x", "y")"# ; "2 into empty tuple")]
#[test_case("()", &["x"], r#"("x",)"# ; "1 into empty tuple adding a trailing comma")]
#[test_case("[]", &["x", "y"], r#"["x", "y"]"# ; "2 into empty list")]
#[test_case("[]", &["x"], r#"["x"]"# ; "1 into empty list")]
#[test_case(r#""a", "b""#, &["x", "y"], r#""a", "b", "x", "y""# ; "2 into unparenthesized tuple")]
#[test_case(r#""a", "b""#, &["x"], r#""a", "b", "x""# ; "1 into unparenthesized tuple")]
#[test_case(r#""a", "b","#, &["x", "y"], r#""a", "b", "x", "y","# ; "2 into unparenthesized tuple w/trailing comma")]
#[test_case(r#""a", "b","#, &["x"], r#""a", "b", "x","# ; "1 into unparenthesized tuple w/trailing comma")]
#[test_case(r#"("a", "b")"#, &["x", "y"], r#"("a", "b", "x", "y")"# ; "2 into nonempty tuple")]
#[test_case(r#"("a", "b")"#, &["x"], r#"("a", "b", "x")"# ; "1 into nonempty tuple")]
#[test_case(r#"("a", "b",)"#, &["x", "y"], r#"("a", "b", "x", "y",)"# ; "2 into nonempty tuple w/trailing comma")]
#[test_case(r#"("a", "b",)"#, &["x"], r#"("a", "b", "x",)"# ; "1 into nonempty tuple w/trailing comma")]
#[test_case(r#"["a", "b",]"#, &["x", "y"], r#"["a", "b", "x", "y",]"# ; "2 into nonempty list w/trailing comma")]
#[test_case(r#"["a", "b",]"#, &["x"], r#"["a", "b", "x",]"# ; "1 into nonempty list w/trailing comma")]
#[test_case(r#"["a", "b"]"#, &["x", "y"], r#"["a", "b", "x", "y"]"# ; "2 into nonempty list")]
#[test_case(r#"["a", "b"]"#, &["x"], r#"["a", "b", "x"]"# ; "1 into nonempty list")]
fn add_to_dunder_all_test(raw: &str, names: &[&str], expect: &str) -> Result<()> {
let locator = Locator::new(raw);
let edits = {
let parsed = parse_expression(raw)?;
let stylist = Stylist::from_tokens(parsed.tokens(), locator.contents());
add_to_dunder_all(names.iter().copied(), parsed.expr(), &stylist)
};
let diag = {
use crate::rules::pycodestyle::rules::MissingNewlineAtEndOfFile;
let mut iter = edits.into_iter();
// The choice of rule here is arbitrary.
let mut diagnostic = MissingNewlineAtEndOfFile.into_diagnostic(
TextRange::default(),
&SourceFileBuilder::new("<filename>", "<code>").finish(),
);
diagnostic.set_fix(Fix::safe_edits(
iter.next().ok_or(anyhow!("expected edits nonempty"))?,
iter,
));
diagnostic
};
assert_eq!(apply_fixes([diag].iter(), &locator).code, expect);
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/fix/mod.rs | crates/ruff_linter/src/fix/mod.rs | use std::collections::BTreeSet;
use itertools::Itertools;
use rustc_hash::FxHashSet;
use ruff_db::diagnostic::Diagnostic;
use ruff_diagnostics::{IsolationLevel, SourceMap};
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use crate::Locator;
use crate::linter::FixTable;
use crate::registry::Rule;
use crate::settings::types::UnsafeFixes;
use crate::{Edit, Fix};
pub(crate) mod codemods;
pub(crate) mod edits;
pub(crate) mod snippet;
pub(crate) struct FixResult {
/// The resulting source code, after applying all fixes.
pub(crate) code: String,
/// The number of fixes applied for each [`Rule`].
pub(crate) fixes: FixTable,
/// Source map for the fixed source code.
pub(crate) source_map: SourceMap,
}
/// Fix errors in a file, and write the fixed source code to disk.
pub(crate) fn fix_file(
diagnostics: &[Diagnostic],
locator: &Locator,
unsafe_fixes: UnsafeFixes,
) -> Option<FixResult> {
let required_applicability = unsafe_fixes.required_applicability();
let mut with_fixes = diagnostics
.iter()
.filter(|message| {
message
.fix()
.is_some_and(|fix| fix.applies(required_applicability))
})
.peekable();
if with_fixes.peek().is_none() {
None
} else {
Some(apply_fixes(with_fixes, locator))
}
}
/// Apply a series of fixes.
fn apply_fixes<'a>(
diagnostics: impl Iterator<Item = &'a Diagnostic>,
locator: &'a Locator<'a>,
) -> FixResult {
let mut output = String::with_capacity(locator.len());
let mut last_pos: Option<TextSize> = None;
let mut applied: BTreeSet<&Edit> = BTreeSet::default();
let mut isolated: FxHashSet<u32> = FxHashSet::default();
let mut fixed = FixTable::default();
let mut source_map = SourceMap::default();
for (code, name, fix) in diagnostics
.filter_map(|msg| msg.secondary_code().map(|code| (code, msg.name(), msg)))
.filter_map(|(code, name, diagnostic)| diagnostic.fix().map(|fix| (code, name, fix)))
.sorted_by(|(_, name1, fix1), (_, name2, fix2)| cmp_fix(name1, name2, fix1, fix2))
{
let mut edits = fix
.edits()
.iter()
.filter(|edit| !applied.contains(edit))
.peekable();
// If the fix contains at least one new edit, enforce isolation and positional requirements.
if let Some(first) = edits.peek() {
// If this fix requires isolation, and we've already applied another fix in the
// same isolation group, skip it.
if let IsolationLevel::Group(id) = fix.isolation() {
if !isolated.insert(id) {
continue;
}
}
// If this fix overlaps with a fix we've already applied, skip it.
if last_pos.is_some_and(|last_pos| last_pos >= first.start()) {
continue;
}
}
let mut applied_edits = Vec::with_capacity(fix.edits().len());
for edit in edits {
// Add all contents from `last_pos` to `fix.location`.
let slice = locator.slice(TextRange::new(last_pos.unwrap_or_default(), edit.start()));
output.push_str(slice);
// Add the start source marker for the patch.
source_map.push_start_marker(edit, output.text_len());
// Add the patch itself.
output.push_str(edit.content().unwrap_or_default());
// Add the end source marker for the added patch.
source_map.push_end_marker(edit, output.text_len());
// Track that the edit was applied.
last_pos = Some(edit.end());
applied_edits.push(edit);
}
applied.extend(applied_edits.drain(..));
*fixed.entry(code).or_default(name) += 1;
}
// Add the remaining content.
let slice = locator.after(last_pos.unwrap_or_default());
output.push_str(slice);
FixResult {
code: output,
fixes: fixed,
source_map,
}
}
/// Compare two fixes.
fn cmp_fix(name1: &str, name2: &str, fix1: &Fix, fix2: &Fix) -> std::cmp::Ordering {
// Always apply `RedefinedWhileUnused` before `UnusedImport`, as the latter can end up fixing
// the former. But we can't apply this just for `RedefinedWhileUnused` and `UnusedImport` because it violates
// `< is transitive: a < b and b < c implies a < c. The same must hold for both == and >.`
// See https://github.com/astral-sh/ruff/issues/12469#issuecomment-2244392085
let redefined_while_unused = Rule::RedefinedWhileUnused.name().as_str();
if (name1, name2) == (redefined_while_unused, redefined_while_unused) {
std::cmp::Ordering::Equal
} else if name1 == redefined_while_unused {
std::cmp::Ordering::Less
} else if name2 == redefined_while_unused {
std::cmp::Ordering::Greater
} else {
std::cmp::Ordering::Equal
}
// Apply fixes in order of their start position.
.then_with(|| fix1.min_start().cmp(&fix2.min_start()))
// Break ties in the event of overlapping rules, for some specific combinations.
.then_with(|| {
let rules = (name1, name2);
// Apply `MissingTrailingPeriod` fixes before `NewLineAfterLastParagraph` fixes.
let missing_trailing_period = Rule::MissingTrailingPeriod.name().as_str();
let newline_after_last_paragraph = Rule::NewLineAfterLastParagraph.name().as_str();
let if_else_instead_of_dict_get = Rule::IfElseBlockInsteadOfDictGet.name().as_str();
let if_else_instead_of_if_exp = Rule::IfElseBlockInsteadOfIfExp.name().as_str();
if rules == (missing_trailing_period, newline_after_last_paragraph) {
std::cmp::Ordering::Less
} else if rules == (newline_after_last_paragraph, missing_trailing_period) {
std::cmp::Ordering::Greater
}
// Apply `IfElseBlockInsteadOfDictGet` fixes before `IfElseBlockInsteadOfIfExp` fixes.
else if rules == (if_else_instead_of_dict_get, if_else_instead_of_if_exp) {
std::cmp::Ordering::Less
} else if rules == (if_else_instead_of_if_exp, if_else_instead_of_dict_get) {
std::cmp::Ordering::Greater
} else {
std::cmp::Ordering::Equal
}
})
}
#[cfg(test)]
mod tests {
use ruff_diagnostics::SourceMarker;
use ruff_source_file::SourceFileBuilder;
use ruff_text_size::{Ranged, TextSize};
use crate::fix::{FixResult, apply_fixes};
use crate::rules::pycodestyle::rules::MissingNewlineAtEndOfFile;
use crate::{Edit, Fix};
use crate::{Locator, Violation};
use ruff_db::diagnostic::Diagnostic;
fn create_diagnostics(
filename: &str,
source: &str,
edit: impl IntoIterator<Item = Edit>,
) -> Vec<Diagnostic> {
edit.into_iter()
.map(|edit| {
// The choice of rule here is arbitrary.
let mut diagnostic = MissingNewlineAtEndOfFile.into_diagnostic(
edit.range(),
&SourceFileBuilder::new(filename, source).finish(),
);
diagnostic.set_fix(Fix::safe_edit(edit));
diagnostic
})
.collect()
}
#[test]
fn empty_file() {
let locator = Locator::new(r"");
let diagnostics = create_diagnostics("<filename>", locator.contents(), []);
let FixResult {
code,
fixes,
source_map,
} = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(code, "");
assert_eq!(fixes.counts().sum::<usize>(), 0);
assert!(source_map.markers().is_empty());
}
#[test]
fn apply_one_insertion() {
let locator = Locator::new(
r#"
import os
print("hello world")
"#
.trim(),
);
let diagnostics = create_diagnostics(
"<filename>",
locator.contents(),
[Edit::insertion(
"import sys\n".to_string(),
TextSize::new(10),
)],
);
let FixResult {
code,
fixes,
source_map,
} = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(
code,
r#"
import os
import sys
print("hello world")
"#
.trim()
);
assert_eq!(fixes.counts().sum::<usize>(), 1);
assert_eq!(
source_map.markers(),
&[
SourceMarker::new(10.into(), 10.into()),
SourceMarker::new(10.into(), 21.into()),
]
);
}
#[test]
fn apply_one_replacement() {
let locator = Locator::new(
r"
class A(object):
...
"
.trim(),
);
let diagnostics = create_diagnostics(
"<filename>",
locator.contents(),
[Edit::replacement(
"Bar".to_string(),
TextSize::new(8),
TextSize::new(14),
)],
);
let FixResult {
code,
fixes,
source_map,
} = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(
code,
r"
class A(Bar):
...
"
.trim(),
);
assert_eq!(fixes.counts().sum::<usize>(), 1);
assert_eq!(
source_map.markers(),
&[
SourceMarker::new(8.into(), 8.into()),
SourceMarker::new(14.into(), 11.into()),
]
);
}
#[test]
fn apply_one_removal() {
let locator = Locator::new(
r"
class A(object):
...
"
.trim(),
);
let diagnostics = create_diagnostics(
"<filename>",
locator.contents(),
[Edit::deletion(TextSize::new(7), TextSize::new(15))],
);
let FixResult {
code,
fixes,
source_map,
} = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(
code,
r"
class A:
...
"
.trim()
);
assert_eq!(fixes.counts().sum::<usize>(), 1);
assert_eq!(
source_map.markers(),
&[
SourceMarker::new(7.into(), 7.into()),
SourceMarker::new(15.into(), 7.into()),
]
);
}
#[test]
fn apply_two_removals() {
let locator = Locator::new(
r"
class A(object, object, object):
...
"
.trim(),
);
let diagnostics = create_diagnostics(
"<filename>",
locator.contents(),
[
Edit::deletion(TextSize::from(8), TextSize::from(16)),
Edit::deletion(TextSize::from(22), TextSize::from(30)),
],
);
let FixResult {
code,
fixes,
source_map,
} = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(
code,
r"
class A(object):
...
"
.trim()
);
assert_eq!(fixes.counts().sum::<usize>(), 2);
assert_eq!(
source_map.markers(),
&[
SourceMarker::new(8.into(), 8.into()),
SourceMarker::new(16.into(), 8.into()),
SourceMarker::new(22.into(), 14.into()),
SourceMarker::new(30.into(), 14.into()),
]
);
}
#[test]
fn ignore_overlapping_fixes() {
let locator = Locator::new(
r"
class A(object):
...
"
.trim(),
);
let diagnostics = create_diagnostics(
"<filename>",
locator.contents(),
[
Edit::deletion(TextSize::from(7), TextSize::from(15)),
Edit::replacement("ignored".to_string(), TextSize::from(9), TextSize::from(11)),
],
);
let FixResult {
code,
fixes,
source_map,
} = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(
code,
r"
class A:
...
"
.trim(),
);
assert_eq!(fixes.counts().sum::<usize>(), 1);
assert_eq!(
source_map.markers(),
&[
SourceMarker::new(7.into(), 7.into()),
SourceMarker::new(15.into(), 7.into()),
]
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/message/grouped.rs | crates/ruff_linter/src/message/grouped.rs | use std::collections::BTreeMap;
use std::fmt::{Display, Formatter};
use std::io::Write;
use std::num::NonZeroUsize;
use colored::Colorize;
use ruff_db::diagnostic::Diagnostic;
use ruff_diagnostics::Applicability;
use ruff_notebook::NotebookIndex;
use ruff_source_file::{LineColumn, OneIndexed};
use crate::fs::relativize_path;
use crate::message::{Emitter, EmitterContext};
pub struct GroupedEmitter {
show_fix_status: bool,
applicability: Applicability,
}
impl Default for GroupedEmitter {
fn default() -> Self {
Self {
show_fix_status: false,
applicability: Applicability::Safe,
}
}
}
impl GroupedEmitter {
#[must_use]
pub fn with_show_fix_status(mut self, show_fix_status: bool) -> Self {
self.show_fix_status = show_fix_status;
self
}
#[must_use]
pub fn with_applicability(mut self, applicability: Applicability) -> Self {
self.applicability = applicability;
self
}
}
impl Emitter for GroupedEmitter {
fn emit(
&mut self,
writer: &mut dyn Write,
diagnostics: &[Diagnostic],
context: &EmitterContext,
) -> anyhow::Result<()> {
for (filename, messages) in group_diagnostics_by_filename(diagnostics) {
// Compute the maximum number of digits in the row and column, for messages in
// this file.
let mut max_row_length = OneIndexed::MIN;
let mut max_column_length = OneIndexed::MIN;
for message in &messages {
max_row_length = max_row_length.max(message.start_location.line);
max_column_length = max_column_length.max(message.start_location.column);
}
let row_length = max_row_length.digits();
let column_length = max_column_length.digits();
// Print the filename.
writeln!(writer, "{}:", relativize_path(&*filename).underline())?;
// Print each message.
for message in messages {
write!(
writer,
"{}",
DisplayGroupedMessage {
notebook_index: context.notebook_index(&message.expect_ruff_filename()),
message,
show_fix_status: self.show_fix_status,
applicability: self.applicability,
row_length,
column_length,
}
)?;
}
// Print a blank line between files.
writeln!(writer)?;
}
Ok(())
}
}
struct MessageWithLocation<'a> {
message: &'a Diagnostic,
start_location: LineColumn,
}
impl std::ops::Deref for MessageWithLocation<'_> {
type Target = Diagnostic;
fn deref(&self) -> &Self::Target {
self.message
}
}
fn group_diagnostics_by_filename(
diagnostics: &[Diagnostic],
) -> BTreeMap<String, Vec<MessageWithLocation<'_>>> {
let mut grouped_messages = BTreeMap::default();
for diagnostic in diagnostics {
grouped_messages
.entry(diagnostic.expect_ruff_filename())
.or_insert_with(Vec::new)
.push(MessageWithLocation {
message: diagnostic,
start_location: diagnostic.ruff_start_location().unwrap_or_default(),
});
}
grouped_messages
}
struct DisplayGroupedMessage<'a> {
message: MessageWithLocation<'a>,
show_fix_status: bool,
applicability: Applicability,
row_length: NonZeroUsize,
column_length: NonZeroUsize,
notebook_index: Option<&'a NotebookIndex>,
}
impl Display for DisplayGroupedMessage<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let MessageWithLocation {
message,
start_location,
} = &self.message;
write!(
f,
" {row_padding}",
row_padding = " ".repeat(self.row_length.get() - start_location.line.digits().get())
)?;
// Check if we're working on a jupyter notebook and translate positions with cell accordingly
let (row, col) = if let Some(jupyter_index) = self.notebook_index {
write!(
f,
"cell {cell}{sep}",
cell = jupyter_index
.cell(start_location.line)
.unwrap_or(OneIndexed::MIN),
sep = ":".cyan()
)?;
(
jupyter_index
.cell_row(start_location.line)
.unwrap_or(OneIndexed::MIN),
start_location.column,
)
} else {
(start_location.line, start_location.column)
};
writeln!(
f,
"{row}{sep}{col}{col_padding} {code_and_body}",
sep = ":".cyan(),
col_padding =
" ".repeat(self.column_length.get() - start_location.column.digits().get()),
code_and_body = RuleCodeAndBody {
message,
show_fix_status: self.show_fix_status,
applicability: self.applicability
},
)?;
Ok(())
}
}
pub(super) struct RuleCodeAndBody<'a> {
pub(crate) message: &'a Diagnostic,
pub(crate) show_fix_status: bool,
pub(crate) applicability: Applicability,
}
impl Display for RuleCodeAndBody<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
if self.show_fix_status {
if let Some(fix) = self.message.fix() {
// Do not display an indicator for inapplicable fixes
if fix.applies(self.applicability) {
if let Some(code) = self.message.secondary_code() {
write!(f, "{} ", code.red().bold())?;
}
return write!(
f,
"{fix}{body}",
fix = format_args!("[{}] ", "*".cyan()),
body = self.message.concise_message(),
);
}
}
}
if let Some(code) = self.message.secondary_code() {
write!(
f,
"{code} {body}",
code = code.red().bold(),
body = self.message.concise_message(),
)
} else {
write!(
f,
"{code}: {body}",
code = self.message.id().as_str().red().bold(),
body = self.message.concise_message(),
)
}
}
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use ruff_diagnostics::Applicability;
use crate::message::GroupedEmitter;
use crate::message::tests::{
capture_emitter_output, create_diagnostics, create_syntax_error_diagnostics,
};
#[test]
fn default() {
let mut emitter = GroupedEmitter::default();
let content = capture_emitter_output(&mut emitter, &create_diagnostics());
assert_snapshot!(content);
}
#[test]
fn syntax_errors() {
let mut emitter = GroupedEmitter::default();
let content = capture_emitter_output(&mut emitter, &create_syntax_error_diagnostics());
assert_snapshot!(content);
}
#[test]
fn fix_status() {
let mut emitter = GroupedEmitter::default().with_show_fix_status(true);
let content = capture_emitter_output(&mut emitter, &create_diagnostics());
assert_snapshot!(content);
}
#[test]
fn fix_status_unsafe() {
let mut emitter = GroupedEmitter::default()
.with_show_fix_status(true)
.with_applicability(Applicability::Unsafe);
let content = capture_emitter_output(&mut emitter, &create_diagnostics());
assert_snapshot!(content);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/message/sarif.rs | crates/ruff_linter/src/message/sarif.rs | use std::collections::HashSet;
use std::io::Write;
use anyhow::Result;
use log::warn;
use serde::{Serialize, Serializer};
use serde_json::json;
use ruff_db::diagnostic::{Diagnostic, SecondaryCode};
use ruff_source_file::{OneIndexed, SourceFile};
use ruff_text_size::{Ranged, TextRange};
use crate::VERSION;
use crate::fs::normalize_path;
use crate::message::{Emitter, EmitterContext};
use crate::registry::{Linter, RuleNamespace};
/// An emitter for producing SARIF 2.1.0-compliant JSON output.
///
/// Static Analysis Results Interchange Format (SARIF) is a standard format
/// for static analysis results. For full specification, see:
/// [SARIF 2.1.0](https://docs.oasis-open.org/sarif/sarif/v2.1.0/sarif-v2.1.0.html)
pub struct SarifEmitter;
impl Emitter for SarifEmitter {
fn emit(
&mut self,
writer: &mut dyn Write,
diagnostics: &[Diagnostic],
_context: &EmitterContext,
) -> Result<()> {
let results = diagnostics
.iter()
.map(SarifResult::from_message)
.collect::<Result<Vec<_>>>()?;
let unique_rules: HashSet<_> = results
.iter()
.filter_map(|result| result.rule_id.as_secondary_code())
.collect();
let mut rules: Vec<SarifRule> = unique_rules.into_iter().map(SarifRule::from).collect();
rules.sort_by(|a, b| a.code.cmp(b.code));
let output = json!({
"$schema": "https://json.schemastore.org/sarif-2.1.0.json",
"version": "2.1.0",
"runs": [{
"tool": {
"driver": {
"name": "ruff",
"informationUri": "https://github.com/astral-sh/ruff",
"rules": rules,
"version": VERSION.to_string(),
}
},
"results": results,
}],
});
serde_json::to_writer_pretty(writer, &output)?;
Ok(())
}
}
#[derive(Debug, Clone)]
struct SarifRule<'a> {
name: &'a str,
code: &'a SecondaryCode,
linter: &'a str,
summary: &'a str,
explanation: Option<&'a str>,
url: Option<String>,
}
impl<'a> From<&'a SecondaryCode> for SarifRule<'a> {
fn from(code: &'a SecondaryCode) -> Self {
// This is a manual re-implementation of Rule::from_code, but we also want the Linter. This
// avoids calling Linter::parse_code twice.
let (linter, suffix) = Linter::parse_code(code).unwrap();
let rule = linter
.all_rules()
.find(|rule| rule.noqa_code().suffix() == suffix)
.expect("Expected a valid noqa code corresponding to a rule");
Self {
name: rule.into(),
code,
linter: linter.name(),
summary: rule.message_formats()[0],
explanation: rule.explanation(),
url: rule.url(),
}
}
}
impl Serialize for SarifRule<'_> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
json!({
"id": self.code,
"shortDescription": {
"text": self.summary,
},
"fullDescription": {
"text": self.explanation,
},
"help": {
"text": self.summary,
},
"helpUri": self.url,
"properties": {
"id": self.code,
"kind": self.linter,
"name": self.name,
"problem.severity": "error".to_string(),
},
})
.serialize(serializer)
}
}
#[derive(Debug)]
enum RuleCode<'a> {
SecondaryCode(&'a SecondaryCode),
LintId(&'a str),
}
impl RuleCode<'_> {
fn as_secondary_code(&self) -> Option<&SecondaryCode> {
match self {
RuleCode::SecondaryCode(code) => Some(code),
RuleCode::LintId(_) => None,
}
}
fn as_str(&self) -> &str {
match self {
RuleCode::SecondaryCode(code) => code.as_str(),
RuleCode::LintId(id) => id,
}
}
}
impl Serialize for RuleCode<'_> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.as_str())
}
}
impl<'a> From<&'a Diagnostic> for RuleCode<'a> {
fn from(code: &'a Diagnostic) -> Self {
match code.secondary_code() {
Some(diagnostic) => Self::SecondaryCode(diagnostic),
None => Self::LintId(code.id().as_str()),
}
}
}
/// Represents a single result in a SARIF 2.1.0 report.
///
/// See the SARIF 2.1.0 specification for details:
/// [SARIF 2.1.0](https://docs.oasis-open.org/sarif/sarif/v2.1.0/sarif-v2.1.0.html)
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct SarifResult<'a> {
rule_id: RuleCode<'a>,
level: String,
message: SarifMessage,
locations: Vec<SarifLocation>,
#[serde(skip_serializing_if = "Vec::is_empty")]
fixes: Vec<SarifFix>,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct SarifMessage {
text: String,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct SarifPhysicalLocation {
artifact_location: SarifArtifactLocation,
region: SarifRegion,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct SarifLocation {
physical_location: SarifPhysicalLocation,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct SarifFix {
description: RuleDescription,
artifact_changes: Vec<SarifArtifactChange>,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct RuleDescription {
text: Option<String>,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct SarifArtifactChange {
artifact_location: SarifArtifactLocation,
replacements: Vec<SarifReplacement>,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct SarifArtifactLocation {
uri: String,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct SarifReplacement {
deleted_region: SarifRegion,
#[serde(skip_serializing_if = "Option::is_none")]
inserted_content: Option<InsertedContent>,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct InsertedContent {
text: String,
}
#[derive(Debug, Serialize, Clone, Copy)]
#[serde(rename_all = "camelCase")]
struct SarifRegion {
start_line: OneIndexed,
start_column: OneIndexed,
end_line: OneIndexed,
end_column: OneIndexed,
}
impl<'a> SarifResult<'a> {
fn range_to_sarif_region(source_file: &SourceFile, range: TextRange) -> SarifRegion {
let source_code = source_file.to_source_code();
let start_location = source_code.line_column(range.start());
let end_location = source_code.line_column(range.end());
SarifRegion {
start_line: start_location.line,
start_column: start_location.column,
end_line: end_location.line,
end_column: end_location.column,
}
}
fn fix(diagnostic: &'a Diagnostic, uri: &str) -> Option<SarifFix> {
let fix = diagnostic.fix()?;
let Some(source_file) = diagnostic.ruff_source_file() else {
debug_assert!(
false,
"Omitting the fix for diagnostic with id `{}` because the source file is missing. This is a bug in Ruff, please report an issue.",
diagnostic.id()
);
warn!(
"Omitting the fix for diagnostic with id `{}` because the source file is missing. This is a bug in Ruff, please report an issue.",
diagnostic.id()
);
return None;
};
let fix_description = diagnostic
.first_help_text()
.map(std::string::ToString::to_string);
let replacements: Vec<SarifReplacement> = fix
.edits()
.iter()
.map(|edit| {
let range = edit.range();
let deleted_region = Self::range_to_sarif_region(source_file, range);
SarifReplacement {
deleted_region,
inserted_content: edit.content().map(|content| InsertedContent {
text: content.to_string(),
}),
}
})
.collect();
let artifact_changes = vec![SarifArtifactChange {
artifact_location: SarifArtifactLocation {
uri: uri.to_string(),
},
replacements,
}];
Some(SarifFix {
description: RuleDescription {
text: fix_description,
},
artifact_changes,
})
}
#[allow(clippy::unnecessary_wraps)]
fn uri(diagnostic: &Diagnostic) -> Result<String> {
let path = normalize_path(&*diagnostic.expect_ruff_filename());
#[cfg(not(target_arch = "wasm32"))]
return url::Url::from_file_path(&path)
.map_err(|()| anyhow::anyhow!("Failed to convert path to URL: {}", path.display()))
.map(|u| u.to_string());
#[cfg(target_arch = "wasm32")]
return Ok(format!("file://{}", path.display()));
}
fn from_message(diagnostic: &'a Diagnostic) -> Result<Self> {
let start_location = diagnostic.ruff_start_location().unwrap_or_default();
let end_location = diagnostic.ruff_end_location().unwrap_or_default();
let region = SarifRegion {
start_line: start_location.line,
start_column: start_location.column,
end_line: end_location.line,
end_column: end_location.column,
};
let uri = Self::uri(diagnostic)?;
Ok(Self {
rule_id: RuleCode::from(diagnostic),
level: "error".to_string(),
message: SarifMessage {
text: diagnostic.concise_message().to_string(),
},
fixes: Self::fix(diagnostic, &uri).into_iter().collect(),
locations: vec![SarifLocation {
physical_location: SarifPhysicalLocation {
artifact_location: SarifArtifactLocation { uri },
region,
},
}],
})
}
}
#[cfg(test)]
mod tests {
use crate::message::SarifEmitter;
use crate::message::tests::{
capture_emitter_output, create_diagnostics, create_syntax_error_diagnostics,
};
fn get_output() -> String {
let mut emitter = SarifEmitter {};
capture_emitter_output(&mut emitter, &create_diagnostics())
}
#[test]
fn valid_json() {
let content = get_output();
serde_json::from_str::<serde_json::Value>(&content).unwrap();
}
#[test]
fn valid_syntax_error_json() {
let mut emitter = SarifEmitter {};
let content = capture_emitter_output(&mut emitter, &create_syntax_error_diagnostics());
serde_json::from_str::<serde_json::Value>(&content).unwrap();
}
#[test]
fn test_results() {
let content = get_output();
let value = serde_json::from_str::<serde_json::Value>(&content).unwrap();
insta::assert_json_snapshot!(value, {
".runs[0].tool.driver.version" => "[VERSION]",
".runs[0].results[].locations[].physicalLocation.artifactLocation.uri" => "[URI]",
".runs[0].results[].fixes[].artifactChanges[].artifactLocation.uri" => "[URI]",
});
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/message/mod.rs | crates/ruff_linter/src/message/mod.rs | use std::backtrace::BacktraceStatus;
use std::fmt::Display;
use std::io::Write;
use std::path::Path;
use ruff_db::panic::PanicError;
use rustc_hash::FxHashMap;
use ruff_db::diagnostic::{
Annotation, Diagnostic, DiagnosticFormat, DiagnosticId, DisplayDiagnosticConfig,
DisplayDiagnostics, DisplayGithubDiagnostics, FileResolver, GithubRenderer, Input, LintName,
SecondaryCode, Severity, Span, SubDiagnostic, SubDiagnosticSeverity, UnifiedFile,
};
use ruff_db::files::File;
pub use grouped::GroupedEmitter;
use ruff_notebook::NotebookIndex;
use ruff_source_file::{SourceFile, SourceFileBuilder};
use ruff_text_size::{TextRange, TextSize};
pub use sarif::SarifEmitter;
use crate::Fix;
use crate::registry::Rule;
use crate::settings::types::{OutputFormat, RuffOutputFormat};
mod grouped;
mod sarif;
/// Create a `Diagnostic` from a panic.
pub fn create_panic_diagnostic(error: &PanicError, path: Option<&Path>) -> Diagnostic {
let mut diagnostic = Diagnostic::new(
DiagnosticId::Panic,
Severity::Fatal,
error.to_diagnostic_message(path.as_ref().map(|path| path.display())),
);
diagnostic.sub(SubDiagnostic::new(
SubDiagnosticSeverity::Info,
"This indicates a bug in Ruff.",
));
let report_message = "If you could open an issue at \
https://github.com/astral-sh/ruff/issues/new?title=%5Bpanic%5D, \
we'd be very appreciative!";
diagnostic.sub(SubDiagnostic::new(
SubDiagnosticSeverity::Info,
report_message,
));
if let Some(backtrace) = &error.backtrace {
match backtrace.status() {
BacktraceStatus::Disabled => {
diagnostic.sub(SubDiagnostic::new(
SubDiagnosticSeverity::Info,
"run with `RUST_BACKTRACE=1` environment variable to show the full backtrace information",
));
}
BacktraceStatus::Captured => {
diagnostic.sub(SubDiagnostic::new(
SubDiagnosticSeverity::Info,
format!("Backtrace:\n{backtrace}"),
));
}
_ => {}
}
}
if let Some(path) = path {
let file = SourceFileBuilder::new(path.to_string_lossy(), "").finish();
let span = Span::from(file);
let mut annotation = Annotation::primary(span);
annotation.hide_snippet(true);
diagnostic.annotate(annotation);
}
diagnostic
}
#[expect(clippy::too_many_arguments)]
pub fn create_lint_diagnostic<B, S>(
body: B,
suggestion: Option<S>,
range: TextRange,
fix: Option<Fix>,
parent: Option<TextSize>,
file: SourceFile,
noqa_offset: Option<TextSize>,
rule: Rule,
) -> Diagnostic
where
B: Display,
S: Display,
{
let mut diagnostic = Diagnostic::new(
DiagnosticId::Lint(LintName::of(rule.into())),
Severity::Error,
body,
);
let span = Span::from(file).with_range(range);
let mut annotation = Annotation::primary(span);
// The `0..0` range is used to highlight file-level diagnostics.
//
// TODO(brent) We should instead set this flag on annotations for individual lint rules that
// actually need it, but we need to be able to cache the new diagnostic model first. See
// https://github.com/astral-sh/ruff/issues/19688.
if range == TextRange::default() {
annotation.hide_snippet(true);
}
diagnostic.annotate(annotation);
if let Some(suggestion) = suggestion {
diagnostic.help(suggestion);
}
if let Some(fix) = fix {
diagnostic.set_fix(fix);
}
if let Some(parent) = parent {
diagnostic.set_parent(parent);
}
if let Some(noqa_offset) = noqa_offset {
diagnostic.set_noqa_offset(noqa_offset);
}
diagnostic.set_secondary_code(SecondaryCode::new(rule.noqa_code().to_string()));
diagnostic.set_documentation_url(rule.url());
diagnostic
}
impl FileResolver for EmitterContext<'_> {
fn path(&self, _file: File) -> &str {
unimplemented!("Expected a Ruff file for rendering a Ruff diagnostic");
}
fn input(&self, _file: File) -> Input {
unimplemented!("Expected a Ruff file for rendering a Ruff diagnostic");
}
fn notebook_index(&self, file: &UnifiedFile) -> Option<NotebookIndex> {
match file {
UnifiedFile::Ty(_) => {
unimplemented!("Expected a Ruff file for rendering a Ruff diagnostic")
}
UnifiedFile::Ruff(file) => self.notebook_indexes.get(file.name()).cloned(),
}
}
fn is_notebook(&self, file: &UnifiedFile) -> bool {
match file {
UnifiedFile::Ty(_) => {
unimplemented!("Expected a Ruff file for rendering a Ruff diagnostic")
}
UnifiedFile::Ruff(file) => self.notebook_indexes.get(file.name()).is_some(),
}
}
fn current_directory(&self) -> &std::path::Path {
crate::fs::get_cwd()
}
}
/// Display format for [`Diagnostic`]s.
///
/// The emitter serializes a slice of [`Diagnostic`]s and writes them to a [`Write`].
pub trait Emitter {
/// Serializes the `diagnostics` and writes the output to `writer`.
fn emit(
&mut self,
writer: &mut dyn Write,
diagnostics: &[Diagnostic],
context: &EmitterContext,
) -> anyhow::Result<()>;
}
/// Context passed to [`Emitter`].
pub struct EmitterContext<'a> {
notebook_indexes: &'a FxHashMap<String, NotebookIndex>,
}
impl<'a> EmitterContext<'a> {
pub fn new(notebook_indexes: &'a FxHashMap<String, NotebookIndex>) -> Self {
Self { notebook_indexes }
}
/// Tests if the file with `name` is a jupyter notebook.
pub fn is_notebook(&self, name: &str) -> bool {
self.notebook_indexes.contains_key(name)
}
pub fn notebook_index(&self, name: &str) -> Option<&NotebookIndex> {
self.notebook_indexes.get(name)
}
}
pub fn render_diagnostics(
writer: &mut dyn Write,
format: OutputFormat,
config: DisplayDiagnosticConfig,
context: &EmitterContext<'_>,
diagnostics: &[Diagnostic],
) -> std::io::Result<()> {
match DiagnosticFormat::try_from(format) {
Ok(format) => {
let config = config.format(format);
let value = DisplayDiagnostics::new(context, &config, diagnostics);
write!(writer, "{value}")?;
}
Err(RuffOutputFormat::Github) => {
let renderer = GithubRenderer::new(context, "Ruff");
let value = DisplayGithubDiagnostics::new(&renderer, diagnostics);
write!(writer, "{value}")?;
}
Err(RuffOutputFormat::Grouped) => {
GroupedEmitter::default()
.with_show_fix_status(config.show_fix_status())
.with_applicability(config.fix_applicability())
.emit(writer, diagnostics, context)
.map_err(std::io::Error::other)?;
}
Err(RuffOutputFormat::Sarif) => {
SarifEmitter
.emit(writer, diagnostics, context)
.map_err(std::io::Error::other)?;
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use rustc_hash::FxHashMap;
use ruff_db::diagnostic::Diagnostic;
use ruff_python_parser::{Mode, ParseOptions, parse_unchecked};
use ruff_source_file::SourceFileBuilder;
use ruff_text_size::{TextRange, TextSize};
use crate::codes::Rule;
use crate::message::{Emitter, EmitterContext, create_lint_diagnostic};
use crate::{Edit, Fix};
pub(super) fn create_syntax_error_diagnostics() -> Vec<Diagnostic> {
let source = r"from os import
if call(foo
def bar():
pass
";
let source_file = SourceFileBuilder::new("syntax_errors.py", source).finish();
parse_unchecked(source, ParseOptions::from(Mode::Module))
.errors()
.iter()
.map(|parse_error| {
Diagnostic::invalid_syntax(source_file.clone(), &parse_error.error, parse_error)
})
.collect()
}
pub(super) fn create_diagnostics() -> Vec<Diagnostic> {
let fib = r#"import os
def fibonacci(n):
"""Compute the nth number in the Fibonacci sequence."""
x = 1
if n == 0:
return 0
elif n == 1:
return 1
else:
return fibonacci(n - 1) + fibonacci(n - 2)
"#;
let fib_source = SourceFileBuilder::new("fib.py", fib).finish();
let unused_import_start = TextSize::from(7);
let unused_import = create_lint_diagnostic(
"`os` imported but unused",
Some("Remove unused import: `os`"),
TextRange::new(unused_import_start, TextSize::from(9)),
Some(Fix::unsafe_edit(Edit::range_deletion(TextRange::new(
TextSize::from(0),
TextSize::from(10),
)))),
None,
fib_source.clone(),
Some(unused_import_start),
Rule::UnusedImport,
);
let unused_variable_start = TextSize::from(94);
let unused_variable = create_lint_diagnostic(
"Local variable `x` is assigned to but never used",
Some("Remove assignment to unused variable `x`"),
TextRange::new(unused_variable_start, TextSize::from(95)),
Some(Fix::unsafe_edit(Edit::deletion(
TextSize::from(94),
TextSize::from(99),
))),
None,
fib_source,
Some(unused_variable_start),
Rule::UnusedVariable,
);
let file_2 = r"if a == 1: pass";
let undefined_name_start = TextSize::from(3);
let undefined_name = create_lint_diagnostic(
"Undefined name `a`",
Option::<&'static str>::None,
TextRange::new(undefined_name_start, TextSize::from(4)),
None,
None,
SourceFileBuilder::new("undef.py", file_2).finish(),
Some(undefined_name_start),
Rule::UndefinedName,
);
vec![unused_import, unused_variable, undefined_name]
}
pub(super) fn capture_emitter_output(
emitter: &mut dyn Emitter,
diagnostics: &[Diagnostic],
) -> String {
let notebook_indexes = FxHashMap::default();
let context = EmitterContext::new(¬ebook_indexes);
let mut output: Vec<u8> = Vec::new();
emitter.emit(&mut output, diagnostics, &context).unwrap();
String::from_utf8(output).expect("Output to be valid UTF-8")
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/registry/rule_set.rs | crates/ruff_linter/src/registry/rule_set.rs | use std::fmt::{Debug, Display, Formatter};
use std::iter::FusedIterator;
use ruff_macros::CacheKey;
use crate::registry::Rule;
const RULESET_SIZE: usize = 15;
/// A set of [`Rule`]s.
///
/// Uses a bitset where a bit of one signals that the Rule with that [u16] is in this set.
#[derive(Clone, Default, CacheKey, PartialEq, Eq)]
pub struct RuleSet([u64; RULESET_SIZE]);
impl RuleSet {
const EMPTY: [u64; RULESET_SIZE] = [0; RULESET_SIZE];
// 64 fits into a u16 without truncation
#[expect(clippy::cast_possible_truncation)]
const SLICE_BITS: u16 = u64::BITS as u16;
/// Returns an empty rule set.
pub const fn empty() -> Self {
Self(Self::EMPTY)
}
pub fn clear(&mut self) {
self.0 = Self::EMPTY;
}
#[inline]
pub const fn from_rule(rule: Rule) -> Self {
let rule = rule as u16;
let index = (rule / Self::SLICE_BITS) as usize;
debug_assert!(
index < Self::EMPTY.len(),
"Rule index out of bounds. Increase the size of the bitset array."
);
// The bit-position of this specific rule in the slice
let shift = rule % Self::SLICE_BITS;
// Set the index for that rule to 1
let mask = 1 << shift;
let mut bits = Self::EMPTY;
bits[index] = mask;
Self(bits)
}
#[inline]
pub const fn from_rules(rules: &[Rule]) -> Self {
let mut set = RuleSet::empty();
let mut i = 0;
// Uses a while because for loops are not allowed in const functions.
while i < rules.len() {
set = set.union(&RuleSet::from_rule(rules[i]));
i += 1;
}
set
}
/// Returns the union of the two rule sets `self` and `other`
///
/// ## Examples
///
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// let set_1 = RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::AnyType]);
/// let set_2 = RuleSet::from_rules(&[
/// Rule::BadQuotesInlineString,
/// Rule::BooleanPositionalValueInCall,
/// ]);
///
/// let union = set_1.union(&set_2);
///
/// assert!(union.contains(Rule::AmbiguousFunctionName));
/// assert!(union.contains(Rule::AnyType));
/// assert!(union.contains(Rule::BadQuotesInlineString));
/// assert!(union.contains(Rule::BooleanPositionalValueInCall));
/// ```
#[must_use]
pub const fn union(mut self, other: &Self) -> Self {
let mut i = 0;
while i < self.0.len() {
self.0[i] |= other.0[i];
i += 1;
}
self
}
/// Returns `self` without any of the rules contained in `other`.
///
/// ## Examples
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// let set_1 = RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::AnyType]);
/// let set_2 = RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::Debugger]);
///
/// let subtract = set_1.subtract(&set_2);
///
/// assert!(subtract.contains(Rule::AnyType));
/// assert!(!subtract.contains(Rule::AmbiguousFunctionName));
/// ```
#[must_use]
pub const fn subtract(mut self, other: &Self) -> Self {
let mut i = 0;
while i < self.0.len() {
self.0[i] &= !other.0[i];
i += 1;
}
self
}
/// Returns true if `self` and `other` contain at least one common rule.
///
/// ## Examples
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// let set_1 = RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::AnyType]);
///
/// assert!(set_1.intersects(&RuleSet::from_rules(&[
/// Rule::AnyType,
/// Rule::BadQuotesInlineString
/// ])));
///
/// assert!(!set_1.intersects(&RuleSet::from_rules(&[
/// Rule::BooleanPositionalValueInCall,
/// Rule::BadQuotesInlineString
/// ])));
/// ```
pub const fn intersects(&self, other: &Self) -> bool {
let mut i = 0;
while i < self.0.len() {
if self.0[i] & other.0[i] != 0 {
return true;
}
i += 1;
}
false
}
/// Returns `true` if this set contains no rules, `false` otherwise.
///
/// ## Examples
///
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// assert!(RuleSet::empty().is_empty());
/// assert!(
/// !RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::BadQuotesInlineString])
/// .is_empty()
/// );
/// ```
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the number of rules in this set.
///
/// ## Examples
///
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// assert_eq!(RuleSet::empty().len(), 0);
/// assert_eq!(
/// RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::BadQuotesInlineString]).len(),
/// 2
/// );
pub const fn len(&self) -> usize {
let mut len: u32 = 0;
let mut i = 0;
while i < self.0.len() {
len += self.0[i].count_ones();
i += 1;
}
len as usize
}
/// Inserts `rule` into the set.
///
/// ## Examples
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// let mut set = RuleSet::empty();
///
/// assert!(!set.contains(Rule::AnyType));
///
/// set.insert(Rule::AnyType);
///
/// assert!(set.contains(Rule::AnyType));
/// ```
pub fn insert(&mut self, rule: Rule) {
let set = std::mem::take(self);
*self = set.union(&RuleSet::from_rule(rule));
}
#[inline]
pub fn set(&mut self, rule: Rule, enabled: bool) {
if enabled {
self.insert(rule);
} else {
self.remove(rule);
}
}
/// Removes `rule` from the set.
///
/// ## Examples
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// let mut set = RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::AnyType]);
///
/// set.remove(Rule::AmbiguousFunctionName);
///
/// assert!(set.contains(Rule::AnyType));
/// assert!(!set.contains(Rule::AmbiguousFunctionName));
/// ```
pub fn remove(&mut self, rule: Rule) {
let set = std::mem::take(self);
*self = set.subtract(&RuleSet::from_rule(rule));
}
/// Returns `true` if `rule` is in this set.
///
/// ## Examples
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// let set = RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::AnyType]);
///
/// assert!(set.contains(Rule::AmbiguousFunctionName));
/// assert!(!set.contains(Rule::BreakOutsideLoop));
/// ```
#[inline]
pub const fn contains(&self, rule: Rule) -> bool {
let rule = rule as u16;
let index = rule as usize / Self::SLICE_BITS as usize;
let shift = rule % Self::SLICE_BITS;
let mask = 1 << shift;
self.0[index] & mask != 0
}
/// Returns `true` if any of the rules in `rules` are in this set.
#[inline]
pub const fn any(&self, rules: &[Rule]) -> bool {
let mut any = false;
let mut i = 0;
while i < rules.len() {
any |= self.contains(rules[i]);
i += 1;
}
any
}
/// Returns an iterator over the rules in this set.
///
/// ## Examples
///
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// let set = RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::AnyType]);
///
/// let iter: Vec<_> = set.iter().collect();
///
/// assert_eq!(iter, vec![Rule::AnyType, Rule::AmbiguousFunctionName]);
/// ```
pub fn iter(&self) -> RuleSetIterator {
RuleSetIterator {
set: self.clone(),
index: 0,
}
}
}
impl Debug for RuleSet {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_set().entries(self.iter()).finish()
}
}
impl Display for RuleSet {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
if self.is_empty() {
write!(f, "[]")?;
} else {
writeln!(f, "[")?;
for rule in self {
let code = rule.noqa_code();
writeln!(f, "\t{name} ({code}),", name = rule.name())?;
}
write!(f, "]")?;
}
Ok(())
}
}
impl FromIterator<Rule> for RuleSet {
fn from_iter<T: IntoIterator<Item = Rule>>(iter: T) -> Self {
let mut set = RuleSet::empty();
for rule in iter {
set.insert(rule);
}
set
}
}
impl Extend<Rule> for RuleSet {
fn extend<T: IntoIterator<Item = Rule>>(&mut self, iter: T) {
let set = std::mem::take(self);
*self = set.union(&RuleSet::from_iter(iter));
}
}
impl IntoIterator for RuleSet {
type IntoIter = RuleSetIterator;
type Item = Rule;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl IntoIterator for &RuleSet {
type IntoIter = RuleSetIterator;
type Item = Rule;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
pub struct RuleSetIterator {
set: RuleSet,
index: u16,
}
impl Iterator for RuleSetIterator {
type Item = Rule;
fn next(&mut self) -> Option<Self::Item> {
loop {
let slice = self.set.0.get_mut(self.index as usize)?;
// `trailing_zeros` is guaranteed to return a value in [0;64]
#[expect(clippy::cast_possible_truncation)]
let bit = slice.trailing_zeros() as u16;
if bit < RuleSet::SLICE_BITS {
*slice ^= 1 << bit;
let rule_value = self.index * RuleSet::SLICE_BITS + bit;
// SAFETY: RuleSet guarantees that only valid rules are stored in the set.
#[expect(unsafe_code)]
return Some(unsafe { std::mem::transmute::<u16, Rule>(rule_value) });
}
self.index += 1;
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.set.len();
(len, Some(len))
}
}
impl ExactSizeIterator for RuleSetIterator {}
impl FusedIterator for RuleSetIterator {}
#[cfg(test)]
mod tests {
use strum::IntoEnumIterator;
use crate::registry::{Rule, RuleSet};
/// Tests that the set can contain all rules
#[test]
fn test_all_rules() {
for rule in Rule::iter() {
let set = RuleSet::from_rule(rule);
assert!(set.contains(rule));
}
let all_rules_set: RuleSet = Rule::iter().collect();
let all_rules: Vec<_> = all_rules_set.iter().collect();
let expected_rules: Vec<_> = Rule::iter().collect();
assert_eq!(all_rules, expected_rules);
}
#[test]
fn remove_not_existing_rule_from_set() {
let mut set = RuleSet::default();
set.remove(Rule::AmbiguousFunctionName);
assert!(!set.contains(Rule::AmbiguousFunctionName));
assert!(set.is_empty());
assert_eq!(set.into_iter().collect::<Vec<_>>(), vec![]);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_linter/src/importer/mod.rs | crates/ruff_linter/src/importer/mod.rs | //! Code modification struct to add and modify import statements.
//!
//! Enables rules to make module members available (that may be not yet be imported) during fix
//! execution.
use std::error::Error;
use anyhow::Result;
use libcst_native as cst;
use ruff_diagnostics::Edit;
use ruff_python_ast::token::Tokens;
use ruff_python_ast::{self as ast, Expr, ModModule, Stmt};
use ruff_python_codegen::Stylist;
use ruff_python_importer::Insertion;
use ruff_python_parser::Parsed;
use ruff_python_semantic::{
ImportedName, MemberNameImport, ModuleNameImport, NameImport, SemanticModel,
};
use ruff_python_trivia::textwrap::indent;
use ruff_text_size::{Ranged, TextSize};
use crate::cst::matchers::{match_aliases, match_import_from, match_statement};
use crate::fix;
use crate::fix::codemods::CodegenStylist;
pub(crate) struct Importer<'a> {
/// The Python AST to which we are adding imports.
python_ast: &'a [Stmt],
/// The tokens representing the Python AST.
tokens: &'a Tokens,
/// The source code text for `python_ast`.
source: &'a str,
/// The [`Stylist`] for the Python AST.
stylist: &'a Stylist<'a>,
/// The list of visited, top-level runtime imports in the Python AST.
runtime_imports: Vec<&'a Stmt>,
/// The list of visited, top-level `if TYPE_CHECKING:` blocks in the Python AST.
type_checking_blocks: Vec<&'a Stmt>,
}
impl<'a> Importer<'a> {
pub(crate) fn new(
parsed: &'a Parsed<ModModule>,
source: &'a str,
stylist: &'a Stylist<'a>,
) -> Self {
Self {
python_ast: parsed.suite(),
tokens: parsed.tokens(),
source,
stylist,
runtime_imports: Vec::default(),
type_checking_blocks: Vec::default(),
}
}
/// Visit a top-level import statement.
pub(crate) fn visit_import(&mut self, import: &'a Stmt) {
self.runtime_imports.push(import);
}
/// Visit a top-level type-checking block.
pub(crate) fn visit_type_checking_block(&mut self, type_checking_block: &'a Stmt) {
self.type_checking_blocks.push(type_checking_block);
}
/// Add an import statement to import the given module.
///
/// If there are no existing imports, the new import will be added at the top
/// of the file. If there are future imports, the new import will be added
/// after the last future import. Otherwise, it will be added after the most
/// recent top-level import statement.
pub(crate) fn add_import(&self, import: &NameImport, at: TextSize) -> Edit {
let required_import = import.to_string();
if let Some(stmt) = self.preceding_import(at) {
// Insert after the last top-level import.
Insertion::end_of_statement(stmt, self.source, self.stylist).into_edit(&required_import)
} else {
// Check if there are any future imports that we need to respect
if let Some(last_future_import) = self.find_last_future_import() {
// Insert after the last future import
Insertion::end_of_statement(last_future_import, self.source, self.stylist)
.into_edit(&required_import)
} else {
// Insert at the start of the file.
Insertion::start_of_file(self.python_ast, self.source, self.stylist, None)
.into_edit(&required_import)
}
}
}
/// Move an existing import to the top-level, thereby making it available at runtime.
///
/// If there are no existing imports, the new import will be added at the top
/// of the file. Otherwise, it will be added after the most recent top-level
/// import statement.
pub(crate) fn runtime_import_edit(
&self,
import: &ImportedMembers,
at: TextSize,
) -> Result<RuntimeImportEdit> {
// Generate the modified import statement.
let content = fix::codemods::retain_imports(
&import.names,
import.statement,
self.source,
self.stylist,
)?;
// Add the import to the top-level.
let insertion = if let Some(stmt) = self.preceding_import(at) {
// Insert after the last top-level import.
Insertion::end_of_statement(stmt, self.source, self.stylist)
} else {
// Insert at the start of the file.
Insertion::start_of_file(self.python_ast, self.source, self.stylist, None)
};
let add_import_edit = insertion.into_edit(&content);
Ok(RuntimeImportEdit { add_import_edit })
}
/// Move an existing import into a `TYPE_CHECKING` block.
///
/// If there are no existing `TYPE_CHECKING` blocks, a new one will be added at the top
/// of the file. Otherwise, it will be added after the most recent top-level
/// `TYPE_CHECKING` block.
pub(crate) fn typing_import_edit(
&self,
import: &ImportedMembers,
at: TextSize,
semantic: &SemanticModel<'a>,
) -> Result<TypingImportEdit> {
// Generate the modified import statement.
let content = fix::codemods::retain_imports(
&import.names,
import.statement,
self.source,
self.stylist,
)?;
// Add the import to an existing `TYPE_CHECKING` block.
if let Some(block) = self.preceding_type_checking_block(at) {
// Add the import to the existing `TYPE_CHECKING` block.
let type_checking_edit =
if let Some(statement) = Self::type_checking_binding_statement(semantic, block) {
if statement == import.statement {
// Special-case: if the `TYPE_CHECKING` symbol is imported as part of the same
// statement that we're modifying, avoid adding a no-op edit. For example, here,
// the `TYPE_CHECKING` no-op edit would overlap with the edit to remove `Final`
// from the import:
// ```python
// from __future__ import annotations
//
// from typing import Final, TYPE_CHECKING
//
// Const: Final[dict] = {}
// ```
None
} else {
Some(Edit::range_replacement(
self.source[statement.range()].to_string(),
statement.range(),
))
}
} else {
None
};
return Ok(TypingImportEdit {
type_checking_edit,
add_import_edit: self.add_to_type_checking_block(&content, block.start()),
});
}
// Import the `TYPE_CHECKING` symbol from the typing module.
let (type_checking_edit, type_checking) =
if let Some(type_checking) = Self::find_type_checking(at, semantic)? {
// Special-case: if the `TYPE_CHECKING` symbol is imported as part of the same
// statement that we're modifying, avoid adding a no-op edit. For example, here,
// the `TYPE_CHECKING` no-op edit would overlap with the edit to remove `Final`
// from the import:
// ```python
// from __future__ import annotations
//
// from typing import Final, TYPE_CHECKING
//
// Const: Final[dict] = {}
// ```
let edit = if type_checking.statement(semantic) == import.statement {
None
} else {
Some(Edit::range_replacement(
self.source[type_checking.range()].to_string(),
type_checking.range(),
))
};
(edit, type_checking.into_name())
} else {
// Special-case: if the `TYPE_CHECKING` symbol would be added to the same import
// we're modifying, import it as a separate import statement. For example, here,
// we're concurrently removing `Final` and adding `TYPE_CHECKING`, so it's easier to
// use a separate import statement:
// ```python
// from __future__ import annotations
//
// from typing import Final
//
// Const: Final[dict] = {}
// ```
let (edit, name) = self.import_symbol(
&ImportRequest::import_from("typing", "TYPE_CHECKING"),
at,
Some(import.statement),
semantic,
)?;
(Some(edit), name)
};
// Add the import to a new `TYPE_CHECKING` block.
Ok(TypingImportEdit {
type_checking_edit,
add_import_edit: self.add_type_checking_block(
&format!(
"{}if {type_checking}:{}{}",
self.stylist.line_ending().as_str(),
self.stylist.line_ending().as_str(),
indent(&content, self.stylist.indentation())
),
at,
)?,
})
}
fn type_checking_binding_statement(
semantic: &SemanticModel<'a>,
type_checking_block: &Stmt,
) -> Option<&'a Stmt> {
let Stmt::If(ast::StmtIf { test, .. }) = type_checking_block else {
return None;
};
let mut source = test;
while let Expr::Attribute(ast::ExprAttribute { value, .. }) = source.as_ref() {
source = value;
}
semantic
.binding(semantic.resolve_name(source.as_name_expr()?)?)
.statement(semantic)
}
/// Find a reference to `typing.TYPE_CHECKING`.
fn find_type_checking(
at: TextSize,
semantic: &SemanticModel,
) -> Result<Option<ImportedName>, ResolutionError> {
for module in semantic.typing_modules() {
if let Some(imported_name) = Self::find_symbol(
&ImportRequest::import_from(module, "TYPE_CHECKING"),
at,
semantic,
)? {
return Ok(Some(imported_name));
}
}
Ok(None)
}
/// Generate an [`Edit`] to reference the given symbol. Returns the [`Edit`] necessary to make
/// the symbol available in the current scope along with the bound name of the symbol.
///
/// Attempts to reuse existing imports when possible.
pub(crate) fn get_or_import_symbol(
&self,
symbol: &ImportRequest,
at: TextSize,
semantic: &SemanticModel,
) -> Result<(Edit, String), ResolutionError> {
self.get_symbol(symbol, at, semantic)?
.map_or_else(|| self.import_symbol(symbol, at, None, semantic), Ok)
}
/// For a given builtin symbol, determine whether an [`Edit`] is necessary to make the symbol
/// available in the current scope. For example, if `zip` has been overridden in the relevant
/// scope, the `builtins` module will need to be imported in order for a `Fix` to reference
/// `zip`; but otherwise, that won't be necessary.
///
/// Returns a two-item tuple. The first item is either `Some(Edit)` (indicating) that an
/// edit is necessary to make the symbol available, or `None`, indicating that the symbol has
/// not been overridden in the current scope. The second item in the tuple is the bound name
/// of the symbol.
///
/// Attempts to reuse existing imports when possible.
pub(crate) fn get_or_import_builtin_symbol(
&self,
symbol: &str,
at: TextSize,
semantic: &SemanticModel,
) -> Result<(Option<Edit>, String), ResolutionError> {
if semantic.has_builtin_binding(symbol) {
return Ok((None, symbol.to_string()));
}
let (import_edit, binding) =
self.get_or_import_symbol(&ImportRequest::import("builtins", symbol), at, semantic)?;
Ok((Some(import_edit), binding))
}
/// Return the [`ImportedName`] to for existing symbol, if it's present in the given [`SemanticModel`].
fn find_symbol(
symbol: &ImportRequest,
at: TextSize,
semantic: &SemanticModel,
) -> Result<Option<ImportedName>, ResolutionError> {
// If the symbol is already available in the current scope, use it.
let Some(imported_name) =
semantic.resolve_qualified_import_name(symbol.module, symbol.member)
else {
return Ok(None);
};
// If the symbol source (i.e., the import statement) comes after the current location,
// abort. For example, we could be generating an edit within a function, and the import
// could be defined in the module scope, but after the function definition. In this case,
// it's unclear whether we can use the symbol (the function could be called between the
// import and the current location, and thus the symbol would not be available). It's also
// unclear whether should add an import statement at the start of the file, since it could
// be shadowed between the import and the current location.
if imported_name.start() > at {
return Err(ResolutionError::ImportAfterUsage);
}
// If the symbol source (i.e., the import statement) is in a typing-only context, but we're
// in a runtime context, abort.
if imported_name.context().is_typing() && semantic.execution_context().is_runtime() {
return Err(ResolutionError::IncompatibleContext);
}
Ok(Some(imported_name))
}
/// Return an [`Edit`] to reference an existing symbol, if it's present in the given [`SemanticModel`].
fn get_symbol(
&self,
symbol: &ImportRequest,
at: TextSize,
semantic: &SemanticModel,
) -> Result<Option<(Edit, String)>, ResolutionError> {
// Find the symbol in the current scope.
let Some(imported_name) = Self::find_symbol(symbol, at, semantic)? else {
return Ok(None);
};
// We also add a no-op edit to force conflicts with any other fixes that might try to
// remove the import. Consider:
//
// ```python
// import sys
//
// quit()
// ```
//
// Assume you omit this no-op edit. If you run Ruff with `unused-imports` and
// `sys-exit-alias` over this snippet, it will generate two fixes: (1) remove the unused
// `sys` import; and (2) replace `quit()` with `sys.exit()`, under the assumption that `sys`
// is already imported and available.
//
// By adding this no-op edit, we force the `unused-imports` fix to conflict with the
// `sys-exit-alias` fix, and thus will avoid applying both fixes in the same pass.
let import_edit = Edit::range_replacement(
self.source[imported_name.range()].to_string(),
imported_name.range(),
);
Ok(Some((import_edit, imported_name.into_name())))
}
/// Generate an [`Edit`] to reference the given symbol. Returns the [`Edit`] necessary to make
/// the symbol available in the current scope along with the bound name of the symbol.
///
/// For example, assuming `module` is `"functools"` and `member` is `"lru_cache"`, this function
/// could return an [`Edit`] to add `import functools` to the start of the file, alongside with
/// the name on which the `lru_cache` symbol would be made available (`"functools.lru_cache"`).
fn import_symbol(
&self,
symbol: &ImportRequest,
at: TextSize,
except: Option<&Stmt>,
semantic: &SemanticModel,
) -> Result<(Edit, String), ResolutionError> {
if let Some(stmt) = self
.find_import_from(symbol.module, at)
.filter(|stmt| except != Some(stmt))
{
// Case 1: `from functools import lru_cache` is in scope, and we're trying to reference
// `functools.cache`; thus, we add `cache` to the import, and return `"cache"` as the
// bound name.
if semantic.is_available(symbol.member) {
let Ok(import_edit) = self.add_member(stmt, symbol.member) else {
return Err(ResolutionError::InvalidEdit);
};
Ok((import_edit, symbol.member.to_string()))
} else {
Err(ResolutionError::ConflictingName(symbol.member.to_string()))
}
} else {
match symbol.style {
ImportStyle::Import => {
// Case 2a: No `functools` import is in scope; thus, we add `import functools`,
// and return `"functools.cache"` as the bound name.
if semantic.is_available(symbol.module) {
let import_edit = self.add_import(
&NameImport::Import(ModuleNameImport::module(
symbol.module.to_string(),
)),
at,
);
Ok((
import_edit,
format!(
"{module}.{member}",
module = symbol.module,
member = symbol.member
),
))
} else {
Err(ResolutionError::ConflictingName(symbol.module.to_string()))
}
}
ImportStyle::ImportFrom => {
// Case 2b: No `functools` import is in scope; thus, we add
// `from functools import cache`, and return `"cache"` as the bound name.
if semantic.is_available(symbol.member) {
let import_edit = self.add_import(
&NameImport::ImportFrom(MemberNameImport::member(
symbol.module.to_string(),
symbol.member.to_string(),
)),
at,
);
Ok((import_edit, symbol.member.to_string()))
} else {
Err(ResolutionError::ConflictingName(symbol.member.to_string()))
}
}
}
}
}
/// Return the top-level [`Stmt`] that imports the given module using `Stmt::ImportFrom`
/// preceding the given position, if any.
fn find_import_from(&self, module: &str, at: TextSize) -> Option<&Stmt> {
let mut import_from = None;
for stmt in &self.runtime_imports {
if stmt.start() >= at {
break;
}
if let Stmt::ImportFrom(ast::StmtImportFrom {
module: name,
names,
level,
range: _,
node_index: _,
}) = stmt
{
if *level == 0
&& name.as_ref().is_some_and(|name| name == module)
&& names.iter().all(|alias| alias.name.as_str() != "*")
{
import_from = Some(*stmt);
}
}
}
import_from
}
/// Add the given member to an existing `Stmt::ImportFrom` statement.
fn add_member(&self, stmt: &Stmt, member: &str) -> Result<Edit> {
let mut statement = match_statement(&self.source[stmt.range()])?;
let import_from = match_import_from(&mut statement)?;
let aliases = match_aliases(import_from)?;
aliases.push(cst::ImportAlias {
name: cst::NameOrAttribute::N(Box::new(cst::Name {
value: member,
lpar: vec![],
rpar: vec![],
})),
asname: None,
comma: aliases.last().and_then(|alias| alias.comma.clone()),
});
Ok(Edit::range_replacement(
statement.codegen_stylist(self.stylist),
stmt.range(),
))
}
/// Add a `TYPE_CHECKING` block to the given module.
fn add_type_checking_block(&self, content: &str, at: TextSize) -> Result<Edit> {
let insertion = if let Some(stmt) = self.preceding_import(at) {
// Insert after the last top-level import.
Insertion::end_of_statement(stmt, self.source, self.stylist)
} else {
// Insert at the start of the file.
Insertion::start_of_file(self.python_ast, self.source, self.stylist, None)
};
if insertion.is_inline() {
Err(anyhow::anyhow!(
"Cannot insert `TYPE_CHECKING` block inline"
))
} else {
Ok(insertion.into_edit(content))
}
}
/// Add an import statement to an existing `TYPE_CHECKING` block.
fn add_to_type_checking_block(&self, content: &str, at: TextSize) -> Edit {
Insertion::start_of_block(at, self.source, self.stylist, self.tokens).into_edit(content)
}
/// Return the import statement that precedes the given position, if any.
fn preceding_import(&self, at: TextSize) -> Option<&'a Stmt> {
self.runtime_imports
.partition_point(|stmt| stmt.start() < at)
.checked_sub(1)
.map(|idx| self.runtime_imports[idx])
}
/// Return the `TYPE_CHECKING` block that precedes the given position, if any.
fn preceding_type_checking_block(&self, at: TextSize) -> Option<&'a Stmt> {
let block = self.type_checking_blocks.first()?;
if block.start() <= at {
Some(block)
} else {
None
}
}
/// Find the last `from __future__` import statement in the AST.
fn find_last_future_import(&self) -> Option<&'a Stmt> {
let mut body = self.python_ast.iter().peekable();
let _docstring = body.next_if(|stmt| ast::helpers::is_docstring_stmt(stmt));
body.take_while(|stmt| {
stmt.as_import_from_stmt()
.is_some_and(|import_from| import_from.module.as_deref() == Some("__future__"))
})
.last()
}
/// Add a `from __future__ import annotations` import.
pub(crate) fn add_future_import(&self) -> Edit {
let import = &NameImport::ImportFrom(MemberNameImport::member(
"__future__".to_string(),
"annotations".to_string(),
));
// Note that `TextSize::default` should ensure that the import is added at the very
// beginning of the file via `Insertion::start_of_file`.
self.add_import(import, TextSize::default())
}
}
/// An edit to the top-level of a module, making it available at runtime.
#[derive(Debug)]
pub(crate) struct RuntimeImportEdit {
/// The edit to add the import to the top-level of the module.
add_import_edit: Edit,
}
impl RuntimeImportEdit {
pub(crate) fn into_edits(self) -> Vec<Edit> {
vec![self.add_import_edit]
}
}
/// An edit to an import to a typing-only context.
#[derive(Debug)]
pub(crate) struct TypingImportEdit {
/// The edit to add the `TYPE_CHECKING` symbol to the module.
type_checking_edit: Option<Edit>,
/// The edit to add the import to a `TYPE_CHECKING` block.
add_import_edit: Edit,
}
impl TypingImportEdit {
pub(crate) fn into_edits(self) -> (Edit, Option<Edit>) {
if let Some(type_checking_edit) = self.type_checking_edit {
(type_checking_edit, Some(self.add_import_edit))
} else {
(self.add_import_edit, None)
}
}
}
#[derive(Debug)]
enum ImportStyle {
/// Import the symbol using the `import` statement (e.g. `import foo; foo.bar`).
Import,
/// Import the symbol using the `from` statement (e.g. `from foo import bar; bar`).
ImportFrom,
}
#[derive(Debug)]
pub(crate) struct ImportRequest<'a> {
/// The module from which the symbol can be imported (e.g., `foo`, in `from foo import bar`).
module: &'a str,
/// The member to import (e.g., `bar`, in `from foo import bar`).
member: &'a str,
/// The preferred style to use when importing the symbol (e.g., `import foo` or
/// `from foo import bar`), if it's not already in scope.
style: ImportStyle,
}
impl<'a> ImportRequest<'a> {
/// Create a new `ImportRequest` from a module and member. If not present in the scope,
/// the symbol should be imported using the "import" statement.
pub(crate) fn import(module: &'a str, member: &'a str) -> Self {
Self {
module,
member,
style: ImportStyle::Import,
}
}
/// Create a new `ImportRequest` from a module and member. If not present in the scope,
/// the symbol should be imported using the "import from" statement.
pub(crate) fn import_from(module: &'a str, member: &'a str) -> Self {
Self {
module,
member,
style: ImportStyle::ImportFrom,
}
}
}
/// An existing list of module or member imports, located within an import statement.
pub(crate) struct ImportedMembers<'a> {
/// The import statement.
pub(crate) statement: &'a Stmt,
/// The "names" of the imported members.
pub(crate) names: Vec<&'a str>,
}
/// The result of an [`Importer::get_or_import_symbol`] call.
#[derive(Debug)]
pub(crate) enum ResolutionError {
/// The symbol is imported, but the import came after the current location.
ImportAfterUsage,
/// The symbol is imported, but in an incompatible context (e.g., in typing-only context, while
/// we're in a runtime context).
IncompatibleContext,
/// The symbol can't be imported, because another symbol is bound to the same name.
ConflictingName(String),
/// The symbol can't be imported due to an error in editing an existing import statement.
InvalidEdit,
}
impl std::fmt::Display for ResolutionError {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ResolutionError::ImportAfterUsage => {
fmt.write_str("Unable to use existing symbol due to late binding")
}
ResolutionError::IncompatibleContext => {
fmt.write_str("Unable to use existing symbol due to incompatible context")
}
ResolutionError::ConflictingName(binding) => std::write!(
fmt,
"Unable to insert `{binding}` into scope due to name conflict"
),
ResolutionError::InvalidEdit => {
fmt.write_str("Unable to modify existing import statement")
}
}
}
}
impl Error for ResolutionError {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/db.rs | crates/ty_server/src/db.rs | use crate::NotebookDocument;
use crate::session::index::Document;
use crate::system::LSPSystem;
use ruff_db::Db as _;
use ruff_db::files::{File, FilePath};
use ty_project::{Db as ProjectDb, ProjectDatabase};
#[salsa::db]
pub(crate) trait Db: ProjectDb {
/// Returns the LSP [`Document`] corresponding to `File` or
/// `None` if the file isn't open in the editor.
fn document(&self, file: File) -> Option<&Document>;
/// Returns the LSP [`NotebookDocument`] corresponding to `File` or
/// `None` if the file isn't open in the editor or if it isn't a notebook.
fn notebook_document(&self, file: File) -> Option<&NotebookDocument> {
self.document(file)?.as_notebook()
}
}
#[salsa::db]
impl Db for ProjectDatabase {
fn document(&self, file: File) -> Option<&Document> {
self.system()
.as_any()
.downcast_ref::<LSPSystem>()
.and_then(|system| match file.path(self) {
FilePath::System(path) => system.system_path_to_document(path),
FilePath::SystemVirtual(path) => system.system_virtual_path_to_document(path),
FilePath::Vendored(_) => None,
})
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/lib.rs | crates/ty_server/src/lib.rs | use std::{num::NonZeroUsize, sync::Arc};
use anyhow::Context;
use lsp_server::Connection;
use ruff_db::system::{OsSystem, SystemPathBuf};
use crate::db::Db;
pub use crate::logging::{LogLevel, init_logging};
pub use crate::server::{PartialWorkspaceProgress, PartialWorkspaceProgressParams, Server};
pub use crate::session::{ClientOptions, DiagnosticMode, WorkspaceOptions};
pub use document::{NotebookDocument, PositionEncoding, TextDocument};
pub(crate) use session::Session;
mod capabilities;
mod db;
mod document;
mod logging;
mod server;
mod session;
mod system;
pub(crate) const SERVER_NAME: &str = "ty";
pub(crate) const DIAGNOSTIC_NAME: &str = "ty";
/// A common result type used in most cases where a
/// result type is needed.
pub(crate) type Result<T> = anyhow::Result<T>;
pub fn run_server() -> anyhow::Result<()> {
let four = NonZeroUsize::new(4).unwrap();
// by default, we set the number of worker threads to `num_cpus`, with a maximum of 4.
let worker_threads = std::thread::available_parallelism()
.unwrap_or(four)
.min(four);
let (connection, io_threads) = Connection::stdio();
let cwd = {
let cwd = std::env::current_dir().context("Failed to get the current working directory")?;
SystemPathBuf::from_path_buf(cwd).map_err(|path| {
anyhow::anyhow!(
"The current working directory `{}` contains non-Unicode characters. \
ty only supports Unicode paths.",
path.display()
)
})?
};
// This is to complement the `LSPSystem` if the document is not available in the index.
let fallback_system = Arc::new(OsSystem::new(cwd));
let server_result = Server::new(worker_threads, connection, fallback_system, false)
.context("Failed to start server")?
.run();
let io_result = io_threads.join();
let result = match (server_result, io_result) {
(Ok(()), Ok(())) => Ok(()),
(Err(server), Err(io)) => Err(server).context(format!("IO thread error: {io}")),
(Err(server), _) => Err(server),
(_, Err(io)) => Err(io).context("IO thread error"),
};
if let Err(err) = result.as_ref() {
tracing::warn!("Server shut down with an error: {err}");
} else {
tracing::info!("Server shut down");
}
result
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/session.rs | crates/ty_server/src/session.rs | //! Data model, state management, and configuration resolution.
use std::collections::{BTreeMap, HashMap, HashSet, VecDeque};
use std::ops::{Deref, DerefMut};
use std::panic::RefUnwindSafe;
use std::sync::Arc;
use anyhow::{Context, anyhow};
use lsp_server::{Message, RequestId};
use lsp_types::notification::{DidChangeWatchedFiles, Exit, Notification};
use lsp_types::request::{
DocumentDiagnosticRequest, RegisterCapability, Request, Shutdown, UnregisterCapability,
WorkspaceDiagnosticRequest,
};
use lsp_types::{
DiagnosticRegistrationOptions, DiagnosticServerCapabilities,
DidChangeWatchedFilesRegistrationOptions, FileSystemWatcher, Registration, RegistrationParams,
TextDocumentContentChangeEvent, Unregistration, UnregistrationParams, Url,
};
use ruff_db::Db;
use ruff_db::files::{File, system_path_to_file};
use ruff_db::system::{System, SystemPath, SystemPathBuf};
use ruff_python_ast::PySourceType;
use ty_combine::Combine;
use ty_project::metadata::Options;
use ty_project::watch::{ChangeEvent, CreatedKind};
use ty_project::{ChangeResult, CheckMode, Db as _, ProjectDatabase, ProjectMetadata};
use index::DocumentError;
use options::GlobalOptions;
use ty_python_semantic::MisconfigurationMode;
pub(crate) use self::options::InitializationOptions;
pub use self::options::{ClientOptions, DiagnosticMode, WorkspaceOptions};
pub(crate) use self::settings::{GlobalSettings, WorkspaceSettings};
use crate::capabilities::{ResolvedClientCapabilities, server_diagnostic_options};
use crate::document::{DocumentKey, DocumentVersion, NotebookDocument};
use crate::server::{Action, publish_settings_diagnostics};
use crate::session::client::Client;
use crate::session::index::Document;
use crate::session::request_queue::RequestQueue;
use crate::system::{AnySystemPath, LSPSystem};
use crate::{PositionEncoding, TextDocument};
use index::Index;
pub(crate) mod client;
pub(crate) mod index;
mod options;
mod request_queue;
mod settings;
/// The global state for the LSP
pub(crate) struct Session {
/// A native system to use with the [`LSPSystem`].
native_system: Arc<dyn System + 'static + Send + Sync + RefUnwindSafe>,
/// Used to retrieve information about open documents and settings.
///
/// This will be [`None`] when a mutable reference is held to the index via [`index_mut`]
/// to prevent the index from being accessed while it is being modified. It will be restored
/// when the mutable reference ([`MutIndexGuard`]) is dropped.
///
/// [`index_mut`]: Session::index_mut
index: Option<Arc<Index>>,
/// Maps workspace folders to their respective workspace.
workspaces: Workspaces,
/// The projects across all workspaces.
projects: BTreeMap<SystemPathBuf, ProjectState>,
/// The project to use for files outside any workspace. For example, if the user
/// opens the project `<home>/my_project` in VS code but they then opens a Python file from their Desktop.
/// This file isn't part of the active workspace, nor is it part of any project. But we still want
/// to provide some basic functionality like navigation, completions, syntax highlighting, etc.
/// That's what we use the default project for.
default_project: DefaultProject,
/// Initialization options that were provided by the client during server initialization.
initialization_options: InitializationOptions,
/// Resolved global settings that are shared across all workspaces.
global_settings: Arc<GlobalSettings>,
/// The global position encoding, negotiated during LSP initialization.
position_encoding: PositionEncoding,
/// Tracks what LSP features the client supports and doesn't support.
resolved_client_capabilities: ResolvedClientCapabilities,
/// Tracks the pending requests between client and server.
request_queue: RequestQueue,
/// Has the client requested the server to shutdown.
shutdown_requested: bool,
/// Whether the server has dynamically registered the diagnostic capability with the client.
/// Is the connected client a `TestServer` instance.
in_test: bool,
deferred_messages: VecDeque<Message>,
/// A revision counter. It gets incremented on every change to `Session` that
/// could result in different workspace diagnostics.
revision: u64,
/// A pending workspace diagnostics request because there were no diagnostics
/// or no changes when when the request ran last time.
/// We'll re-run the request after every change to `Session` (see `revision`)
/// to see if there are now changes and, if so, respond to the client.
suspended_workspace_diagnostics_request: Option<SuspendedWorkspaceDiagnosticRequest>,
/// Registrations is a set of LSP methods that have been dynamically registered with the
/// client.
registrations: HashSet<String>,
}
/// LSP State for a Project
pub(crate) struct ProjectState {
/// Files that we have outstanding otherwise-untracked pushed diagnostics for.
///
/// In `CheckMode::OpenFiles` we still read some files that the client hasn't
/// told us to open. Notably settings files like `pyproject.toml`. In this
/// mode the client will never pull diagnostics for that file, and because
/// the file isn't formally "open" we also don't have a reliable signal to
/// refresh diagnostics for it either.
///
/// However diagnostics for those files include things like "you typo'd your
/// configuration for the LSP itself", so it's really important that we tell
/// the user about them! So we remember which ones we have emitted diagnostics
/// for so that we can clear the diagnostics for all of them before we go
/// to update any of them.
pub(crate) untracked_files_with_pushed_diagnostics: Vec<Url>,
// Note: This field should be last to ensure the `db` gets dropped last.
// The db drop order matters because we call `Arc::into_inner` on some Arc's
// and we use Salsa's cancellation to guarantee that there's only a single reference to the `Arc`.
// However, this requires that the db drops last.
// This shouldn't matter here because the db's stored in the session are the
// only reference we want to hold on, but better be safe than sorry ;).
pub(crate) db: ProjectDatabase,
}
impl Session {
pub(crate) fn new(
resolved_client_capabilities: ResolvedClientCapabilities,
position_encoding: PositionEncoding,
workspace_urls: Vec<Url>,
initialization_options: InitializationOptions,
native_system: Arc<dyn System + 'static + Send + Sync + RefUnwindSafe>,
in_test: bool,
) -> crate::Result<Self> {
let index = Arc::new(Index::new());
let mut workspaces = Workspaces::default();
// Register workspaces with default settings - they'll be initialized with real settings
// when workspace/configuration response is received
for url in workspace_urls {
workspaces.register(url)?;
}
Ok(Self {
native_system,
position_encoding,
workspaces,
deferred_messages: VecDeque::new(),
index: Some(index),
default_project: DefaultProject::new(),
initialization_options,
global_settings: Arc::new(GlobalSettings::default()),
projects: BTreeMap::new(),
resolved_client_capabilities,
request_queue: RequestQueue::new(),
shutdown_requested: false,
in_test,
suspended_workspace_diagnostics_request: None,
revision: 0,
registrations: HashSet::new(),
})
}
pub(crate) fn request_queue(&self) -> &RequestQueue {
&self.request_queue
}
pub(crate) fn request_queue_mut(&mut self) -> &mut RequestQueue {
&mut self.request_queue
}
pub(crate) fn initialization_options(&self) -> &InitializationOptions {
&self.initialization_options
}
pub(crate) fn is_shutdown_requested(&self) -> bool {
self.shutdown_requested
}
pub(crate) fn set_shutdown_requested(&mut self, requested: bool) {
self.shutdown_requested = requested;
}
pub(crate) fn set_suspended_workspace_diagnostics_request(
&mut self,
request: SuspendedWorkspaceDiagnosticRequest,
client: &Client,
) {
self.suspended_workspace_diagnostics_request = Some(request);
// Run the suspended workspace diagnostic request immediately in case there
// were changes since the workspace diagnostics background thread queued
// the action to suspend the workspace diagnostic request.
self.resume_suspended_workspace_diagnostic_request(client);
}
pub(crate) fn take_suspended_workspace_diagnostic_request(
&mut self,
) -> Option<SuspendedWorkspaceDiagnosticRequest> {
self.suspended_workspace_diagnostics_request.take()
}
/// Resumes (retries) the workspace diagnostic request if there
/// were any changes to the [`Session`] (the revision got bumped)
/// since the workspace diagnostic request ran last time.
///
/// The workspace diagnostic requests is ignored if the request
/// was cancelled in the meantime.
pub(crate) fn resume_suspended_workspace_diagnostic_request(&mut self, client: &Client) {
self.suspended_workspace_diagnostics_request = self
.suspended_workspace_diagnostics_request
.take()
.and_then(|request| {
if !self.request_queue.incoming().is_pending(&request.id) {
// Clear out the suspended request if the request has been cancelled.
tracing::debug!("Skipping suspended workspace diagnostics request `{}` because it was cancelled", request.id);
return None;
}
request.resume_if_revision_changed(self.revision, client)
});
}
/// Bumps the revision.
///
/// The revision is used to track when workspace diagnostics may have changed and need to be re-run.
/// It's okay if a bump doesn't necessarily result in new workspace diagnostics.
///
/// In general, any change to a project database should bump the revision and so should
/// any change to the document states (but also when the open workspaces change etc.).
fn bump_revision(&mut self) {
self.revision += 1;
}
/// The LSP specification doesn't allow configuration requests during initialization,
/// but we need access to the configuration to resolve the settings in turn to create the
/// project databases. This will become more important in the future when we support
/// persistent caching. It's then crucial that we have the correct settings to select the
/// right cache.
///
/// We work around this by queueing up all messages that arrive between the `initialized` notification
/// and the completion of workspace initialization (which waits for the client's configuration response).
///
/// This queuing is only necessary when registering *new* workspaces. Changes to configurations
/// don't need to go through the same process because we can update the existing
/// database in place.
///
/// See <https://github.com/Microsoft/language-server-protocol/issues/567#issuecomment-2085131917>
pub(crate) fn should_defer_message(&mut self, message: Message) -> Option<Message> {
if self.workspaces.all_initialized() {
Some(message)
} else {
match &message {
Message::Request(request) => {
if request.method == Shutdown::METHOD {
return Some(message);
}
tracing::debug!(
"Deferring `{}` request until all workspaces are initialized",
request.method
);
}
Message::Response(_) => {
// We still want to get client responses even during workspace initialization.
return Some(message);
}
Message::Notification(notification) => {
if notification.method == Exit::METHOD {
return Some(message);
}
tracing::debug!(
"Deferring `{}` notification until all workspaces are initialized",
notification.method
);
}
}
self.deferred_messages.push_back(message);
None
}
}
pub(crate) fn workspaces(&self) -> &Workspaces {
&self.workspaces
}
/// Returns a reference to the project's [`ProjectDatabase`] in which the given `path` belongs.
///
/// If the path is a system path, it will return the project database that is closest to the
/// given path, or the default project if no project is found for the path.
///
/// If the path is a virtual path, it will return the first project database in the session.
pub(crate) fn project_db(&self, path: &AnySystemPath) -> &ProjectDatabase {
&self.project_state(path).db
}
/// Returns an iterator, in arbitrary order, over all project databases
/// in this session.
pub(crate) fn project_dbs(&self) -> impl Iterator<Item = &ProjectDatabase> {
self.projects
.values()
.map(|project_state| &project_state.db)
}
/// Returns a mutable reference to the project's [`ProjectDatabase`] in which the given `path`
/// belongs.
///
/// Refer to [`project_db`] for more details on how the project is selected.
///
/// [`project_db`]: Session::project_db
pub(crate) fn project_db_mut(&mut self, path: &AnySystemPath) -> &mut ProjectDatabase {
&mut self.project_state_mut(path).db
}
/// Returns a reference to the project's [`ProjectDatabase`] corresponding to the given path, if
/// any.
pub(crate) fn project_db_for_path(
&self,
path: impl AsRef<SystemPath>,
) -> Option<&ProjectDatabase> {
self.project_state_for_path(path).map(|state| &state.db)
}
/// Returns a reference to the project's [`ProjectState`] in which the given `path` belongs.
///
/// If the path is a system path, it will return the project database that is closest to the
/// given path, or the default project if no project is found for the path.
///
/// If the path is a virtual path, it will return the first project database in the session.
pub(crate) fn project_state(&self, path: &AnySystemPath) -> &ProjectState {
match path {
AnySystemPath::System(system_path) => {
self.project_state_for_path(system_path).unwrap_or_else(|| {
self.default_project
.get(self.index.as_ref(), &self.native_system)
})
}
AnySystemPath::SystemVirtual(_virtual_path) => {
// TODO: Currently, ty only supports single workspace but we need to figure out
// which project should this virtual path belong to when there are multiple
// projects: https://github.com/astral-sh/ty/issues/794
self.projects
.iter()
.next()
.map(|(_, project)| project)
.unwrap()
}
}
}
/// Returns a mutable reference to the project's [`ProjectState`] in which the given `path`
/// belongs.
///
/// Refer to [`project_db`] for more details on how the project is selected.
///
/// [`project_db`]: Session::project_db
pub(crate) fn project_state_mut(&mut self, path: &AnySystemPath) -> &mut ProjectState {
match path {
AnySystemPath::System(system_path) => self
.projects
.range_mut(..=system_path.to_path_buf())
.next_back()
.map(|(_, project)| project)
.unwrap_or_else(|| {
self.default_project
.get_mut(self.index.as_ref(), &self.native_system)
}),
AnySystemPath::SystemVirtual(_virtual_path) => {
// TODO: Currently, ty only supports single workspace but we need to figure out
// which project should this virtual path belong to when there are multiple
// projects: https://github.com/astral-sh/ty/issues/794
self.projects
.iter_mut()
.next()
.map(|(_, project)| project)
.unwrap()
}
}
}
/// Returns a reference to the project's [`ProjectState`] corresponding to the given path, if
/// any.
pub(crate) fn project_state_for_path(
&self,
path: impl AsRef<SystemPath>,
) -> Option<&ProjectState> {
self.projects
.range(..=path.as_ref().to_path_buf())
.next_back()
.map(|(_, project)| project)
}
pub(crate) fn apply_changes(
&mut self,
path: &AnySystemPath,
changes: Vec<ChangeEvent>,
) -> ChangeResult {
let overrides = path.as_system().and_then(|root| {
self.workspaces()
.for_path(root)?
.settings()
.project_options_overrides()
.cloned()
});
self.bump_revision();
self.project_db_mut(path)
.apply_changes(changes, overrides.as_ref())
}
/// Returns a mutable iterator over all project databases that have been initialized to this point.
///
/// This iterator will only yield the default project database if it has been used.
pub(crate) fn projects_mut(&mut self) -> impl Iterator<Item = &'_ mut ProjectDatabase> + '_ {
self.project_states_mut().map(|project| &mut project.db)
}
/// Returns a mutable iterator over all projects that have been initialized to this point.
///
/// This iterator will only yield the default project if it has been used.
pub(crate) fn project_states_mut(&mut self) -> impl Iterator<Item = &'_ mut ProjectState> + '_ {
let default_project = self.default_project.try_get_mut();
self.projects.values_mut().chain(default_project)
}
pub(crate) fn initialize_workspaces(
&mut self,
workspace_settings: Vec<(Url, ClientOptions)>,
client: &Client,
) {
assert!(!self.workspaces.all_initialized());
// These are the options combined from all the global options received by the server for
// each workspace via the workspace configuration request.
let mut combined_global_options: Option<GlobalOptions> = None;
for (url, options) in workspace_settings {
// Combine the global options specified during initialization with the
// workspace-specific options to create the final workspace options.
let ClientOptions {
global, workspace, ..
} = self
.initialization_options
.options
.clone()
.combine(options.clone());
tracing::debug!("Initializing workspace `{url}`: {workspace:#?}");
let unknown_options = &options.unknown;
if !unknown_options.is_empty() {
warn_about_unknown_options(client, Some(&url), unknown_options);
}
combined_global_options.combine_with(Some(global));
let Ok(root) = url.to_file_path() else {
tracing::debug!("Ignoring workspace with non-path root: {url}");
continue;
};
// Realistically I don't think this can fail because we got the path from a Url
let root = match SystemPathBuf::from_path_buf(root) {
Ok(root) => root,
Err(root) => {
tracing::debug!(
"Ignoring workspace with non-UTF8 root: {root}",
root = root.display()
);
continue;
}
};
let workspace_settings = workspace.into_settings(&root, client);
let Some(workspace) = self.workspaces.initialize(&root, workspace_settings) else {
continue;
};
// For now, create one project database per workspace.
// In the future, index the workspace directories to find all projects
// and create a project database for each.
let system = LSPSystem::new(
self.index.as_ref().unwrap().clone(),
self.native_system.clone(),
);
let configuration_file = workspace
.settings
.project_options_overrides()
.and_then(|settings| settings.config_file_override.as_ref());
let metadata = if let Some(configuration_file) = configuration_file {
ProjectMetadata::from_config_file(configuration_file.clone(), &root, &system)
} else {
ProjectMetadata::discover(&root, &system)
};
let project = metadata
.context("Failed to discover project configuration")
.and_then(|mut metadata| {
metadata
.apply_configuration_files(&system)
.context("Failed to apply configuration files")?;
if let Some(overrides) = workspace.settings.project_options_overrides() {
metadata.apply_overrides(overrides);
}
ProjectDatabase::new(metadata, system.clone())
});
let (root, db) = match project {
Ok(db) => (root, db),
Err(err) => {
tracing::error!(
"Failed to create project for workspace `{url}`: {err:#}. \
Falling back to default settings"
);
client.show_error_message(format!(
"Failed to load project for workspace {url}. \
Please refer to the logs for more details.",
));
let db_with_default_settings = ProjectMetadata::from_options(
Options::default(),
root,
None,
MisconfigurationMode::UseDefault,
)
.context("Failed to convert default options to metadata")
.and_then(|metadata| ProjectDatabase::new(metadata, system))
.expect("Default configuration to be valid");
let default_root = db_with_default_settings
.project()
.root(&db_with_default_settings)
.to_path_buf();
(default_root, db_with_default_settings)
}
};
// Carry forward diagnostic state if any exists
let previous = self.projects.remove(&root);
let untracked = previous
.map(|state| state.untracked_files_with_pushed_diagnostics)
.unwrap_or_default();
self.projects.insert(
root.clone(),
ProjectState {
db,
untracked_files_with_pushed_diagnostics: untracked,
},
);
publish_settings_diagnostics(self, client, root);
}
if let Some(global_options) = combined_global_options {
let global_settings = global_options.into_settings();
if global_settings.diagnostic_mode().is_workspace() {
for project in self.projects.values_mut() {
project.db.set_check_mode(CheckMode::AllFiles);
}
}
self.global_settings = Arc::new(global_settings);
}
self.register_capabilities(client);
assert!(
self.workspaces.all_initialized(),
"All workspaces should be initialized after calling `initialize_workspaces`"
);
}
pub(crate) fn take_deferred_messages(&mut self) -> Option<Message> {
if self.workspaces.all_initialized() {
self.deferred_messages.pop_front()
} else {
None
}
}
/// Registers the dynamic capabilities with the client as per the resolved global settings.
///
/// ## Diagnostic capability
///
/// This capability is used to enable / disable workspace diagnostics as per the
/// `ty.diagnosticMode` global setting.
///
/// ## Rename capability
///
/// This capability is used to enable / disable rename functionality as per the
/// `ty.experimental.rename` global setting.
fn register_capabilities(&mut self, client: &Client) {
static DIAGNOSTIC_REGISTRATION_ID: &str = "ty/textDocument/diagnostic";
static FILE_WATCHER_REGISTRATION_ID: &str = "ty/workspace/didChangeWatchedFiles";
let mut registrations = vec![];
let mut unregistrations = vec![];
if self
.resolved_client_capabilities
.supports_diagnostic_dynamic_registration()
{
if self
.registrations
.contains(DocumentDiagnosticRequest::METHOD)
{
unregistrations.push(Unregistration {
id: DIAGNOSTIC_REGISTRATION_ID.into(),
method: DocumentDiagnosticRequest::METHOD.into(),
});
}
let diagnostic_mode = self.global_settings.diagnostic_mode;
match diagnostic_mode {
DiagnosticMode::Off => {
tracing::debug!(
"Skipping registration of diagnostic capability because diagnostics are turned off"
);
}
DiagnosticMode::OpenFilesOnly | DiagnosticMode::Workspace => {
tracing::debug!(
"Registering diagnostic capability with {diagnostic_mode:?} diagnostic mode"
);
registrations.push(Registration {
id: DIAGNOSTIC_REGISTRATION_ID.into(),
method: DocumentDiagnosticRequest::METHOD.into(),
register_options: Some(
serde_json::to_value(
DiagnosticServerCapabilities::RegistrationOptions(
DiagnosticRegistrationOptions {
diagnostic_options: server_diagnostic_options(
diagnostic_mode.is_workspace(),
),
..Default::default()
},
),
)
.unwrap(),
),
});
}
}
}
if let Some(register_options) = self.file_watcher_registration_options() {
if self.registrations.contains(DidChangeWatchedFiles::METHOD) {
unregistrations.push(Unregistration {
id: FILE_WATCHER_REGISTRATION_ID.into(),
method: DidChangeWatchedFiles::METHOD.into(),
});
}
registrations.push(Registration {
id: FILE_WATCHER_REGISTRATION_ID.into(),
method: DidChangeWatchedFiles::METHOD.into(),
register_options: Some(serde_json::to_value(register_options).unwrap()),
});
}
// First, unregister any existing capabilities and then register or re-register them.
self.unregister_dynamic_capability(client, unregistrations);
self.register_dynamic_capability(client, registrations);
}
/// Registers a list of dynamic capabilities with the client.
fn register_dynamic_capability(&mut self, client: &Client, registrations: Vec<Registration>) {
if registrations.is_empty() {
return;
}
for registration in ®istrations {
self.registrations.insert(registration.method.clone());
}
client.send_request::<RegisterCapability>(
self,
RegistrationParams { registrations },
|_: &Client, ()| {
tracing::debug!("Registered dynamic capabilities");
},
);
}
/// Unregisters a list of dynamic capabilities with the client.
fn unregister_dynamic_capability(
&mut self,
client: &Client,
unregistrations: Vec<Unregistration>,
) {
if unregistrations.is_empty() {
return;
}
for unregistration in &unregistrations {
if !self.registrations.remove(&unregistration.method) {
tracing::debug!(
"Unregistration for `{}` was requested, but it was not registered",
unregistration.method
);
}
}
client.send_request::<UnregisterCapability>(
self,
UnregistrationParams {
unregisterations: unregistrations,
},
|_: &Client, ()| {
tracing::debug!("Unregistered dynamic capabilities");
},
);
}
/// Try to register the file watcher provided by the client if the client supports it.
///
/// Note that this should be called *after* workspaces/projects have been initialized.
/// This is required because the globs we use for registering file watching take
/// project search paths into account.
fn file_watcher_registration_options(
&self,
) -> Option<DidChangeWatchedFilesRegistrationOptions> {
fn make_watcher(glob: &str) -> FileSystemWatcher {
FileSystemWatcher {
glob_pattern: lsp_types::GlobPattern::String(glob.into()),
kind: Some(lsp_types::WatchKind::all()),
}
}
fn make_relative_watcher(relative_to: &SystemPath, glob: &str) -> FileSystemWatcher {
let base_uri = Url::from_file_path(relative_to.as_std_path())
.expect("system path must be a valid URI");
let glob_pattern = lsp_types::GlobPattern::Relative(lsp_types::RelativePattern {
base_uri: lsp_types::OneOf::Right(base_uri),
pattern: glob.to_string(),
});
FileSystemWatcher {
glob_pattern,
kind: Some(lsp_types::WatchKind::all()),
}
}
if !self.client_capabilities().supports_file_watcher() {
tracing::warn!(
"Your LSP client doesn't support file watching: \
You may see stale results when files change outside the editor"
);
return None;
}
// We also want to watch everything in the search paths as
// well. But this seems to require "relative" watcher support.
// I had trouble getting this working without using a base uri.
//
// Specifically, I tried this for each search path:
//
// make_watcher(&format!("{path}/**"))
//
// But while this seemed to work for the project root, it
// simply wouldn't result in any file notifications for changes
// to files outside of the project root.
let watchers = if !self.client_capabilities().supports_relative_file_watcher() {
tracing::warn!(
"Your LSP client doesn't support file watching outside of project: \
You may see stale results when dependencies change"
);
// Initialize our list of watchers with the standard globs relative
// to the project root if we can't use relative globs.
vec![make_watcher("**")]
} else {
// Gather up all of our project roots and all of the corresponding
// project root system paths, then deduplicate them relative to
// one another. Then listen to everything.
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/document.rs | crates/ty_server/src/document.rs | //! Types and utilities for working with text, modifying source files, and `ty <-> LSP` type conversion.
mod location;
mod notebook;
mod range;
mod text_document;
use lsp_types::{PositionEncodingKind, Url};
use ruff_db::system::{SystemPathBuf, SystemVirtualPath, SystemVirtualPathBuf};
use crate::system::AnySystemPath;
pub(crate) use location::ToLink;
pub use notebook::NotebookDocument;
pub(crate) use range::{FileRangeExt, PositionExt, RangeExt, TextSizeExt, ToRangeExt};
pub use text_document::TextDocument;
pub(crate) use text_document::{DocumentVersion, LanguageId};
/// A convenient enumeration for supported text encodings. Can be converted to [`lsp_types::PositionEncodingKind`].
// Please maintain the order from least to greatest priority for the derived `Ord` impl.
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum PositionEncoding {
/// UTF 16 is the encoding supported by all LSP clients.
#[default]
UTF16,
/// Second choice because UTF32 uses a fixed 4 byte encoding for each character (makes conversion relatively easy)
UTF32,
/// ty's preferred encoding
UTF8,
}
impl From<PositionEncoding> for ruff_source_file::PositionEncoding {
fn from(value: PositionEncoding) -> Self {
match value {
PositionEncoding::UTF8 => Self::Utf8,
PositionEncoding::UTF16 => Self::Utf16,
PositionEncoding::UTF32 => Self::Utf32,
}
}
}
/// A unique document ID, derived from a URL passed as part of an LSP request.
/// This document ID can point to either be a standalone Python file, a full notebook, or a cell within a notebook.
///
/// The `DocumentKey` is very similar to `AnySystemPath`. The important distinction is that
/// ty doesn't know about individual notebook cells, instead, ty operates on full notebook documents.
/// ty also doesn't support resolving settings per cell, instead, settings are resolved per file or notebook.
///
/// Thus, the motivation of `DocumentKey` is to prevent accidental use of Cell keys for operations
/// that expect to work on a file path level. That's what [`DocumentHandle::to_file_path`]
/// is for, it returns a file path for any document, taking into account that these methods should
/// return the notebook for cell documents and notebooks.
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub(super) enum DocumentKey {
/// A URI using the `file` schema and maps to a valid path.
File(SystemPathBuf),
/// Any other URI.
///
/// Used for Notebook-cells, URI's with non-`file` schemes, or invalid `file` URI's.
Opaque(String),
}
impl DocumentKey {
/// Converts the given [`Url`] to an [`DocumentKey`].
///
/// If the URL scheme is `file`, then the path is converted to a [`SystemPathBuf`] unless
/// the url isn't a valid file path.
///
/// In all other cases, the URL is kept as an opaque identifier ([`Self::Opaque`]).
pub(crate) fn from_url(url: &Url) -> Self {
if url.scheme() == "file" {
if let Ok(path) = url.to_file_path() {
Self::File(SystemPathBuf::from_path_buf(path).expect("URL to be valid UTF-8"))
} else {
tracing::warn!(
"Treating `file:` url `{url}` as opaque URL as it isn't a valid file path"
);
Self::Opaque(url.to_string())
}
} else {
Self::Opaque(url.to_string())
}
}
/// Returns the corresponding [`AnySystemPath`] for this document key.
///
/// Note, calling this method on a `DocumentKey::Opaque` representing a cell document
/// will return a `SystemVirtualPath` corresponding to the cell URI but not the notebook file path.
/// That's most likely not what you want.
pub(super) fn to_file_path(&self) -> AnySystemPath {
match self {
Self::File(path) => AnySystemPath::System(path.clone()),
Self::Opaque(uri) => {
AnySystemPath::SystemVirtual(SystemVirtualPath::new(uri).to_path_buf())
}
}
}
pub(super) fn into_file_path(self) -> AnySystemPath {
match self {
Self::File(path) => AnySystemPath::System(path),
Self::Opaque(uri) => AnySystemPath::SystemVirtual(SystemVirtualPathBuf::from(uri)),
}
}
}
impl From<AnySystemPath> for DocumentKey {
fn from(value: AnySystemPath) -> Self {
match value {
AnySystemPath::System(system_path) => Self::File(system_path),
AnySystemPath::SystemVirtual(virtual_path) => Self::Opaque(virtual_path.to_string()),
}
}
}
impl std::fmt::Display for DocumentKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::File(path) => path.fmt(f),
Self::Opaque(uri) => uri.fmt(f),
}
}
}
impl From<PositionEncoding> for PositionEncodingKind {
fn from(value: PositionEncoding) -> Self {
match value {
PositionEncoding::UTF8 => PositionEncodingKind::UTF8,
PositionEncoding::UTF16 => PositionEncodingKind::UTF16,
PositionEncoding::UTF32 => PositionEncodingKind::UTF32,
}
}
}
impl TryFrom<&PositionEncodingKind> for PositionEncoding {
type Error = ();
fn try_from(value: &PositionEncodingKind) -> Result<Self, Self::Error> {
Ok(if value == &PositionEncodingKind::UTF8 {
PositionEncoding::UTF8
} else if value == &PositionEncodingKind::UTF16 {
PositionEncoding::UTF16
} else if value == &PositionEncodingKind::UTF32 {
PositionEncoding::UTF32
} else {
return Err(());
})
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/system.rs | crates/ty_server/src/system.rs | use std::any::Any;
use std::fmt;
use std::fmt::Display;
use std::hash::{DefaultHasher, Hash, Hasher as _};
use std::panic::RefUnwindSafe;
use std::sync::Arc;
use crate::Db;
use crate::document::{DocumentKey, LanguageId};
use crate::session::index::{Document, Index};
use lsp_types::Url;
use ruff_db::file_revision::FileRevision;
use ruff_db::files::{File, FilePath};
use ruff_db::system::walk_directory::WalkDirectoryBuilder;
use ruff_db::system::{
CaseSensitivity, DirectoryEntry, FileType, GlobError, Metadata, PatternError, Result, System,
SystemPath, SystemPathBuf, SystemVirtualPath, SystemVirtualPathBuf, WritableSystem,
};
use ruff_notebook::{Notebook, NotebookError};
use ruff_python_ast::PySourceType;
use ty_ide::cached_vendored_path;
/// Returns a [`Url`] for the given [`File`].
pub(crate) fn file_to_url(db: &dyn Db, file: File) -> Option<Url> {
match file.path(db) {
FilePath::System(system) => Url::from_file_path(system.as_std_path()).ok(),
FilePath::SystemVirtual(path) => Url::parse(path.as_str()).ok(),
FilePath::Vendored(path) => {
let system_path = cached_vendored_path(db, path)?;
Url::from_file_path(system_path.as_std_path()).ok()
}
}
}
/// Represents either a [`SystemPath`] or a [`SystemVirtualPath`].
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub(crate) enum AnySystemPath {
System(SystemPathBuf),
SystemVirtual(SystemVirtualPathBuf),
}
impl AnySystemPath {
pub(crate) const fn as_system(&self) -> Option<&SystemPathBuf> {
match self {
AnySystemPath::System(system_path_buf) => Some(system_path_buf),
AnySystemPath::SystemVirtual(_) => None,
}
}
#[expect(unused)]
pub(crate) const fn as_virtual(&self) -> Option<&SystemVirtualPath> {
match self {
AnySystemPath::SystemVirtual(path) => Some(path.as_path()),
AnySystemPath::System(_) => None,
}
}
}
impl fmt::Display for AnySystemPath {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
AnySystemPath::System(system_path) => write!(f, "{system_path}"),
AnySystemPath::SystemVirtual(virtual_path) => write!(f, "{virtual_path}"),
}
}
}
#[derive(Debug, Clone)]
pub(crate) struct LSPSystem {
/// A read-only copy of the index where the server stores all the open documents and settings.
///
/// This will be [`None`] when a mutable reference is held to the index via [`index_mut`]
/// method to prevent the index from being accessed while it is being modified. It will be
/// restored when the mutable reference is dropped.
///
/// [`index_mut`]: crate::Session::index_mut
index: Option<Arc<Index>>,
/// A native system implementation.
///
/// This is used to delegate method calls that are not handled by the LSP system. It is also
/// used as a fallback when the documents are not found in the LSP index.
native_system: Arc<dyn System + 'static + Send + Sync + RefUnwindSafe>,
}
impl LSPSystem {
pub(crate) fn new(
index: Arc<Index>,
native_system: Arc<dyn System + 'static + Send + Sync + RefUnwindSafe>,
) -> Self {
Self {
index: Some(index),
native_system,
}
}
/// Takes the index out of the system.
pub(crate) fn take_index(&mut self) -> Option<Arc<Index>> {
self.index.take()
}
/// Sets the index for the system.
pub(crate) fn set_index(&mut self, index: Arc<Index>) {
self.index = Some(index);
}
/// Returns a reference to the contained index.
///
/// # Panics
///
/// Panics if the index is `None`.
fn index(&self) -> &Index {
self.index.as_ref().unwrap()
}
fn document(&self, path: AnySystemPath) -> Option<&Document> {
let index = self.index();
index.document(&DocumentKey::from(path)).ok()
}
fn source_type_from_document(
document: &Document,
extension: Option<&str>,
) -> Option<PySourceType> {
match document {
Document::Text(text) => match text.language_id()? {
LanguageId::Python => Some(
extension
.and_then(PySourceType::try_from_extension)
.unwrap_or(PySourceType::Python),
),
LanguageId::Other => None,
},
Document::Notebook(_) => Some(PySourceType::Ipynb),
}
}
pub(crate) fn system_path_to_document(&self, path: &SystemPath) -> Option<&Document> {
let any_path = AnySystemPath::System(path.to_path_buf());
self.document(any_path)
}
pub(crate) fn system_virtual_path_to_document(
&self,
path: &SystemVirtualPath,
) -> Option<&Document> {
let any_path = AnySystemPath::SystemVirtual(path.to_path_buf());
self.document(any_path)
}
}
impl System for LSPSystem {
fn path_metadata(&self, path: &SystemPath) -> Result<Metadata> {
let document = self.system_path_to_document(path);
if let Some(document) = document {
Ok(Metadata::new(
document_revision(document, self.index()),
None,
FileType::File,
))
} else {
self.native_system.path_metadata(path)
}
}
fn canonicalize_path(&self, path: &SystemPath) -> Result<SystemPathBuf> {
self.native_system.canonicalize_path(path)
}
fn path_exists_case_sensitive(&self, path: &SystemPath, prefix: &SystemPath) -> bool {
self.native_system.path_exists_case_sensitive(path, prefix)
}
fn source_type(&self, path: &SystemPath) -> Option<PySourceType> {
let document = self.system_path_to_document(path)?;
Self::source_type_from_document(document, path.extension())
}
fn virtual_path_source_type(&self, path: &SystemVirtualPath) -> Option<PySourceType> {
let document = self.system_virtual_path_to_document(path)?;
Self::source_type_from_document(document, path.extension())
}
fn read_to_string(&self, path: &SystemPath) -> Result<String> {
let document = self.system_path_to_document(path);
match document {
Some(Document::Text(document)) => Ok(document.contents().to_string()),
_ => self.native_system.read_to_string(path),
}
}
fn read_to_notebook(&self, path: &SystemPath) -> std::result::Result<Notebook, NotebookError> {
let document = self.system_path_to_document(path);
match document {
Some(Document::Text(document)) => Notebook::from_source_code(document.contents()),
Some(Document::Notebook(notebook)) => Ok(notebook.to_ruff_notebook(self.index())),
None => self.native_system.read_to_notebook(path),
}
}
fn read_virtual_path_to_string(&self, path: &SystemVirtualPath) -> Result<String> {
let document = self
.system_virtual_path_to_document(path)
.ok_or_else(|| virtual_path_not_found(path))?;
if let Document::Text(document) = &document {
Ok(document.contents().to_string())
} else {
Err(not_a_text_document(path))
}
}
fn read_virtual_path_to_notebook(
&self,
path: &SystemVirtualPath,
) -> std::result::Result<Notebook, NotebookError> {
let document = self
.system_virtual_path_to_document(path)
.ok_or_else(|| virtual_path_not_found(path))?;
match document {
Document::Text(document) => Notebook::from_source_code(document.contents()),
Document::Notebook(notebook) => Ok(notebook.to_ruff_notebook(self.index())),
}
}
fn current_directory(&self) -> &SystemPath {
self.native_system.current_directory()
}
fn user_config_directory(&self) -> Option<SystemPathBuf> {
self.native_system.user_config_directory()
}
fn cache_dir(&self) -> Option<SystemPathBuf> {
self.native_system.cache_dir()
}
fn read_directory<'a>(
&'a self,
path: &SystemPath,
) -> Result<Box<dyn Iterator<Item = Result<DirectoryEntry>> + 'a>> {
self.native_system.read_directory(path)
}
fn walk_directory(&self, path: &SystemPath) -> WalkDirectoryBuilder {
self.native_system.walk_directory(path)
}
fn glob(
&self,
pattern: &str,
) -> std::result::Result<
Box<dyn Iterator<Item = std::result::Result<SystemPathBuf, GlobError>> + '_>,
PatternError,
> {
self.native_system.glob(pattern)
}
fn as_writable(&self) -> Option<&dyn WritableSystem> {
self.native_system.as_writable()
}
fn as_any(&self) -> &dyn Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
fn case_sensitivity(&self) -> CaseSensitivity {
self.native_system.case_sensitivity()
}
fn env_var(&self, name: &str) -> std::result::Result<String, std::env::VarError> {
self.native_system.env_var(name)
}
fn dyn_clone(&self) -> Box<dyn System> {
Box::new(self.clone())
}
}
fn not_a_text_document(path: impl Display) -> std::io::Error {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("Input is not a text document: {path}"),
)
}
fn virtual_path_not_found(path: impl Display) -> std::io::Error {
std::io::Error::new(
std::io::ErrorKind::NotFound,
format!("Virtual path does not exist: {path}"),
)
}
/// Helper function to get the [`FileRevision`] of the given document.
fn document_revision(document: &Document, index: &Index) -> FileRevision {
// The file revision is just an opaque number which doesn't have any significant meaning other
// than that the file has changed if the revisions are different.
#[expect(clippy::cast_sign_loss)]
match document {
Document::Text(text) => FileRevision::new(text.version() as u128),
Document::Notebook(notebook) => {
// VS Code doesn't always bump the notebook version when the cell content changes.
// Specifically, I noticed that VS Code re-uses the same version when:
// 1. Adding a new cell
// 2. Pasting some code that has an error
//
// The notification updating the cell content on paste re-used the same version as when the cell was added.
// Because of that, hash all cell versions and the notebook versions together.
let mut hasher = DefaultHasher::new();
for cell_url in notebook.cell_urls() {
if let Ok(cell) = index.document(&DocumentKey::from_url(cell_url)) {
cell.version().hash(&mut hasher);
}
}
// Use higher 64 bits for notebook version and lower 64 bits for cell revisions
let notebook_version_high = (notebook.version() as u128) << 64;
let cell_versions_low = u128::from(hasher.finish()) & 0xFFFF_FFFF_FFFF_FFFF;
let combined_revision = notebook_version_high | cell_versions_low;
FileRevision::new(combined_revision)
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server.rs | crates/ty_server/src/server.rs | //! Scheduling, I/O, and API endpoints.
use self::schedule::spawn_main_loop;
use crate::PositionEncoding;
use crate::capabilities::{ResolvedClientCapabilities, server_capabilities};
use crate::session::{InitializationOptions, Session, warn_about_unknown_options};
use anyhow::Context;
use lsp_server::Connection;
use lsp_types::{ClientCapabilities, InitializeParams, MessageType, Url};
use ruff_db::system::System;
use std::num::NonZeroUsize;
use std::panic::{PanicHookInfo, RefUnwindSafe};
use std::sync::Arc;
mod api;
mod lazy_work_done_progress;
mod main_loop;
mod schedule;
use crate::session::client::Client;
pub(crate) use api::Error;
pub(crate) use api::publish_settings_diagnostics;
pub(crate) use main_loop::{
Action, ConnectionSender, Event, MainLoopReceiver, MainLoopSender, SendRequest,
};
pub(crate) type Result<T> = std::result::Result<T, api::Error>;
pub use api::{PartialWorkspaceProgress, PartialWorkspaceProgressParams};
pub struct Server {
connection: Connection,
worker_threads: NonZeroUsize,
main_loop_receiver: MainLoopReceiver,
main_loop_sender: MainLoopSender,
session: Session,
}
impl Server {
pub fn new(
worker_threads: NonZeroUsize,
connection: Connection,
native_system: Arc<dyn System + 'static + Send + Sync + RefUnwindSafe>,
in_test: bool,
) -> crate::Result<Self> {
let (id, init_value) = connection.initialize_start()?;
let InitializeParams {
initialization_options,
capabilities: client_capabilities,
workspace_folders,
..
} = serde_json::from_value(init_value)
.context("Failed to deserialize initialization parameters")?;
let (initialization_options, deserialization_error) =
InitializationOptions::from_value(initialization_options);
if !in_test {
crate::logging::init_logging(
initialization_options.log_level.unwrap_or_default(),
initialization_options.log_file.as_deref(),
);
}
if let Some(error) = deserialization_error {
tracing::error!("Failed to deserialize initialization options: {error}");
}
tracing::debug!("Initialization options: {initialization_options:#?}");
let resolved_client_capabilities = ResolvedClientCapabilities::new(&client_capabilities);
tracing::debug!("Resolved client capabilities: {resolved_client_capabilities}");
let position_encoding = Self::find_best_position_encoding(&client_capabilities);
let server_capabilities =
server_capabilities(position_encoding, resolved_client_capabilities);
let version = ruff_db::program_version().unwrap_or("Unknown");
tracing::info!("Version: {version}");
connection.initialize_finish(
id,
serde_json::json!({
"capabilities": server_capabilities,
"serverInfo": {
"name": crate::SERVER_NAME,
"version": version
}
}),
)?;
// The number 32 was chosen arbitrarily. The main goal was to have enough capacity to queue
// some responses before blocking.
let (main_loop_sender, main_loop_receiver) = crossbeam::channel::bounded(32);
let client = Client::new(main_loop_sender.clone(), connection.sender.clone());
let unknown_options = &initialization_options.options.unknown;
if !unknown_options.is_empty() {
warn_about_unknown_options(&client, None, unknown_options);
}
// Get workspace URLs without settings - settings will come from workspace/configuration
let workspace_urls = workspace_folders
.filter(|folders| !folders.is_empty())
.map(|folders| {
folders
.into_iter()
.map(|folder| folder.uri)
.collect::<Vec<_>>()
})
.or_else(|| {
let current_dir = native_system
.current_directory()
.as_std_path()
.to_path_buf();
tracing::warn!(
"No workspace(s) were provided during initialization. \
Using the current working directory from the fallback system as a \
default workspace: {}",
current_dir.display()
);
let uri = Url::from_file_path(current_dir).ok()?;
Some(vec![uri])
})
.ok_or_else(|| {
anyhow::anyhow!(
"Failed to get the current working directory while creating a \
default workspace."
)
})?;
let workspace_urls = if workspace_urls.len() > 1 {
let first_workspace = workspace_urls.into_iter().next().unwrap();
tracing::warn!(
"Multiple workspaces are not yet supported, using the first workspace: {}",
&first_workspace
);
client.show_warning_message(format_args!(
"Multiple workspaces are not yet supported, using the first workspace: {}",
&first_workspace,
));
vec![first_workspace]
} else {
workspace_urls
};
Ok(Self {
connection,
worker_threads,
main_loop_receiver,
main_loop_sender,
session: Session::new(
resolved_client_capabilities,
position_encoding,
workspace_urls,
initialization_options,
native_system,
in_test,
)?,
})
}
pub fn run(mut self) -> crate::Result<()> {
let client = Client::new(
self.main_loop_sender.clone(),
self.connection.sender.clone(),
);
let _panic_hook = ServerPanicHookHandler::new(client);
spawn_main_loop(move || self.main_loop())?.join()
}
fn find_best_position_encoding(client_capabilities: &ClientCapabilities) -> PositionEncoding {
client_capabilities
.general
.as_ref()
.and_then(|general_capabilities| general_capabilities.position_encodings.as_ref())
.and_then(|encodings| {
encodings
.iter()
.filter_map(|encoding| PositionEncoding::try_from(encoding).ok())
.max() // this selects the highest priority position encoding
})
.unwrap_or_default()
}
}
type PanicHook = Box<dyn Fn(&PanicHookInfo<'_>) + 'static + Sync + Send>;
struct ServerPanicHookHandler {
hook: Option<PanicHook>,
// Hold on to the strong reference for as long as the panic hook is set.
_client: Arc<Client>,
}
impl ServerPanicHookHandler {
fn new(client: Client) -> Self {
let hook = std::panic::take_hook();
let client = Arc::new(client);
// Use a weak reference to the client because it must be dropped when exiting or the
// io-threads join hangs forever (because client has a reference to the connection sender).
let hook_client = Arc::downgrade(&client);
// When we panic, try to notify the client.
std::panic::set_hook(Box::new(move |panic_info| {
use std::io::Write;
let backtrace = std::backtrace::Backtrace::force_capture();
tracing::error!("{panic_info}\n{backtrace}");
// we also need to print to stderr directly for when using `$logTrace` because
// the message won't be sent to the client.
// But don't use `eprintln` because `eprintln` itself may panic if the pipe is broken.
let mut stderr = std::io::stderr().lock();
writeln!(stderr, "{panic_info}\n{backtrace}").ok();
if let Some(client) = hook_client.upgrade() {
client.show_message(
"The ty language server exited with a panic. See the logs for more details.",
MessageType::ERROR,
);
}
}));
Self {
hook: Some(hook),
_client: client,
}
}
}
impl Drop for ServerPanicHookHandler {
fn drop(&mut self) {
if std::thread::panicking() {
// Calling `std::panic::set_hook` while panicking results in a panic.
return;
}
if let Some(hook) = self.hook.take() {
std::panic::set_hook(hook);
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/logging.rs | crates/ty_server/src/logging.rs | //! The logging system for `ty server`.
//!
//! Log messages are controlled by the `logLevel` setting which defaults to `"info"`. Log messages
//! are written to `stderr` by default, which should appear in the logs for most LSP clients. A
//! `logFile` path can also be specified in the settings, and output will be directed there
//! instead.
use std::sync::Arc;
use ruff_db::system::{SystemPath, SystemPathBuf};
use serde::Deserialize;
use tracing::Metadata;
use tracing::level_filters::LevelFilter;
use tracing::subscriber::Interest;
use tracing_subscriber::Layer;
use tracing_subscriber::fmt::time::ChronoLocal;
use tracing_subscriber::fmt::writer::BoxMakeWriter;
use tracing_subscriber::layer::SubscriberExt;
pub fn init_logging(log_level: LogLevel, log_file: Option<&SystemPath>) {
let log_file = log_file
.map(|path| {
// this expands `logFile` so that tildes and environment variables
// are replaced with their values, if possible.
if let Some(expanded) = shellexpand::full(&path.to_string())
.ok()
.map(|path| SystemPathBuf::from(&*path))
{
expanded
} else {
path.to_path_buf()
}
})
.and_then(|path| {
std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(path.as_std_path())
.map_err(|err| {
#[expect(clippy::print_stderr)]
{
eprintln!("Failed to open file at {path} for logging: {err}");
}
})
.ok()
});
let logger = match log_file {
Some(file) => BoxMakeWriter::new(Arc::new(file)),
None => BoxMakeWriter::new(std::io::stderr),
};
let is_trace_level = log_level == LogLevel::Trace;
let subscriber = tracing_subscriber::Registry::default().with(
tracing_subscriber::fmt::layer()
.with_timer(ChronoLocal::new("%Y-%m-%d %H:%M:%S.%f".to_string()))
.with_thread_names(is_trace_level)
.with_target(is_trace_level)
.with_ansi(false)
.with_writer(logger)
.with_filter(LogLevelFilter { filter: log_level }),
);
tracing::subscriber::set_global_default(subscriber)
.expect("should be able to set global default subscriber");
}
/// The log level for the server as provided by the client during initialization.
///
/// The default log level is `info`.
#[derive(Clone, Copy, Debug, Deserialize, Default, PartialEq, Eq, PartialOrd, Ord)]
#[serde(rename_all = "lowercase")]
pub enum LogLevel {
Error,
Warn,
#[default]
Info,
Debug,
Trace,
}
impl LogLevel {
fn trace_level(self) -> tracing::Level {
match self {
Self::Error => tracing::Level::ERROR,
Self::Warn => tracing::Level::WARN,
Self::Info => tracing::Level::INFO,
Self::Debug => tracing::Level::DEBUG,
Self::Trace => tracing::Level::TRACE,
}
}
}
/// Filters out traces which have a log level lower than the `logLevel` set by the client.
struct LogLevelFilter {
filter: LogLevel,
}
impl LogLevelFilter {
fn is_enabled(&self, meta: &Metadata<'_>) -> bool {
let filter = if meta.target().starts_with("ty")
|| meta.target().starts_with("ruff")
|| meta.target().starts_with("e2e")
{
self.filter.trace_level()
} else {
tracing::Level::WARN
};
meta.level() <= &filter
}
}
impl<S> tracing_subscriber::layer::Filter<S> for LogLevelFilter {
fn enabled(
&self,
meta: &tracing::Metadata<'_>,
_: &tracing_subscriber::layer::Context<'_, S>,
) -> bool {
self.is_enabled(meta)
}
fn callsite_enabled(&self, meta: &'static Metadata<'static>) -> Interest {
// The result of `self.enabled(metadata, ...)` will always be
// the same for any given `Metadata`, so we can convert it into
// an `Interest`:
if self.is_enabled(meta) {
Interest::always()
} else {
Interest::never()
}
}
fn max_level_hint(&self) -> Option<LevelFilter> {
Some(LevelFilter::from_level(self.filter.trace_level()))
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/capabilities.rs | crates/ty_server/src/capabilities.rs | use lsp_types::{
self as types, ClientCapabilities, CodeActionKind, CodeActionOptions, CompletionOptions,
DeclarationCapability, DiagnosticOptions, DiagnosticServerCapabilities,
HoverProviderCapability, InlayHintOptions, InlayHintServerCapabilities, MarkupKind,
NotebookCellSelector, NotebookSelector, OneOf, RenameOptions, SelectionRangeProviderCapability,
SemanticTokensFullOptions, SemanticTokensLegend, SemanticTokensOptions,
SemanticTokensServerCapabilities, ServerCapabilities, SignatureHelpOptions,
TextDocumentSyncCapability, TextDocumentSyncKind, TextDocumentSyncOptions,
TypeDefinitionProviderCapability, WorkDoneProgressOptions,
};
use std::str::FromStr;
use crate::PositionEncoding;
bitflags::bitflags! {
/// Represents the resolved client capabilities for the language server.
///
/// This tracks various capabilities that the client supports.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub(crate) struct ResolvedClientCapabilities: u32 {
const WORKSPACE_DIAGNOSTIC_REFRESH = 1 << 0;
const INLAY_HINT_REFRESH = 1 << 1;
const PULL_DIAGNOSTICS = 1 << 2;
const TYPE_DEFINITION_LINK_SUPPORT = 1 << 3;
const DEFINITION_LINK_SUPPORT = 1 << 4;
const DECLARATION_LINK_SUPPORT = 1 << 5;
const PREFER_MARKDOWN_IN_HOVER = 1 << 6;
const MULTILINE_SEMANTIC_TOKENS = 1 << 7;
const SIGNATURE_LABEL_OFFSET_SUPPORT = 1 << 8;
const SIGNATURE_ACTIVE_PARAMETER_SUPPORT = 1 << 9;
const HIERARCHICAL_DOCUMENT_SYMBOL_SUPPORT = 1 << 10;
const WORK_DONE_PROGRESS = 1 << 11;
const FILE_WATCHER_SUPPORT = 1 << 12;
const RELATIVE_FILE_WATCHER_SUPPORT = 1 << 13;
const DIAGNOSTIC_DYNAMIC_REGISTRATION = 1 << 14;
const WORKSPACE_CONFIGURATION = 1 << 15;
const COMPLETION_ITEM_LABEL_DETAILS_SUPPORT = 1 << 16;
const DIAGNOSTIC_RELATED_INFORMATION = 1 << 17;
const PREFER_MARKDOWN_IN_COMPLETION = 1 << 18;
}
}
impl std::fmt::Display for ResolvedClientCapabilities {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut f = f.debug_list();
for (name, _) in self.iter_names() {
f.entry(&name);
}
f.finish()
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub(crate) enum SupportedCommand {
Debug,
}
impl SupportedCommand {
/// Returns the identifier of the command.
const fn identifier(self) -> &'static str {
match self {
SupportedCommand::Debug => "ty.printDebugInformation",
}
}
/// Returns all the commands that the server currently supports.
const fn all() -> [SupportedCommand; 1] {
[SupportedCommand::Debug]
}
}
impl FromStr for SupportedCommand {
type Err = anyhow::Error;
fn from_str(name: &str) -> anyhow::Result<Self, Self::Err> {
Ok(match name {
"ty.printDebugInformation" => Self::Debug,
_ => return Err(anyhow::anyhow!("Invalid command `{name}`")),
})
}
}
impl ResolvedClientCapabilities {
/// Returns `true` if the client supports workspace diagnostic refresh.
pub(crate) const fn supports_workspace_diagnostic_refresh(self) -> bool {
self.contains(Self::WORKSPACE_DIAGNOSTIC_REFRESH)
}
/// Returns `true` if the client supports workspace configuration.
pub(crate) const fn supports_workspace_configuration(self) -> bool {
self.contains(Self::WORKSPACE_CONFIGURATION)
}
/// Returns `true` if the client supports inlay hint refresh.
pub(crate) const fn supports_inlay_hint_refresh(self) -> bool {
self.contains(Self::INLAY_HINT_REFRESH)
}
/// Returns `true` if the client supports pull diagnostics.
pub(crate) const fn supports_pull_diagnostics(self) -> bool {
self.contains(Self::PULL_DIAGNOSTICS)
}
/// Returns `true` if the client supports definition links in goto type definition.
pub(crate) const fn supports_type_definition_link(self) -> bool {
self.contains(Self::TYPE_DEFINITION_LINK_SUPPORT)
}
/// Returns `true` if the client supports definition links in goto definition.
pub(crate) const fn supports_definition_link(self) -> bool {
self.contains(Self::DEFINITION_LINK_SUPPORT)
}
/// Returns `true` if the client supports definition links in goto declaration.
pub(crate) const fn supports_declaration_link(self) -> bool {
self.contains(Self::DECLARATION_LINK_SUPPORT)
}
/// Returns `true` if the client prefers markdown in hover responses.
pub(crate) const fn prefers_markdown_in_hover(self) -> bool {
self.contains(Self::PREFER_MARKDOWN_IN_HOVER)
}
/// Returns `true` if the client supports multiline semantic tokens.
pub(crate) const fn supports_multiline_semantic_tokens(self) -> bool {
self.contains(Self::MULTILINE_SEMANTIC_TOKENS)
}
/// Returns `true` if the client supports signature label offsets in signature help.
pub(crate) const fn supports_signature_label_offset(self) -> bool {
self.contains(Self::SIGNATURE_LABEL_OFFSET_SUPPORT)
}
/// Returns `true` if the client supports per-signature active parameter in signature help.
pub(crate) const fn supports_signature_active_parameter(self) -> bool {
self.contains(Self::SIGNATURE_ACTIVE_PARAMETER_SUPPORT)
}
/// Returns `true` if the client supports hierarchical document symbols.
pub(crate) const fn supports_hierarchical_document_symbols(self) -> bool {
self.contains(Self::HIERARCHICAL_DOCUMENT_SYMBOL_SUPPORT)
}
/// Returns `true` if the client supports work done progress.
pub(crate) const fn supports_work_done_progress(self) -> bool {
self.contains(Self::WORK_DONE_PROGRESS)
}
/// Returns `true` if the client supports file watcher capabilities.
pub(crate) const fn supports_file_watcher(self) -> bool {
self.contains(Self::FILE_WATCHER_SUPPORT)
}
/// Returns `true` if the client supports relative file watcher capabilities.
///
/// This permits specifying a "base uri" that a glob is interpreted
/// relative to.
pub(crate) const fn supports_relative_file_watcher(self) -> bool {
self.contains(Self::RELATIVE_FILE_WATCHER_SUPPORT)
}
/// Returns `true` if the client supports dynamic registration for diagnostic capabilities.
pub(crate) const fn supports_diagnostic_dynamic_registration(self) -> bool {
self.contains(Self::DIAGNOSTIC_DYNAMIC_REGISTRATION)
}
/// Returns `true` if the client has related information support for diagnostics.
pub(crate) const fn supports_diagnostic_related_information(self) -> bool {
self.contains(Self::DIAGNOSTIC_RELATED_INFORMATION)
}
/// Returns `true` if the client supports "label details" in completion items.
pub(crate) const fn supports_completion_item_label_details(self) -> bool {
self.contains(Self::COMPLETION_ITEM_LABEL_DETAILS_SUPPORT)
}
/// Returns `true` if the client prefers Markdown over plain text in completion items.
pub(crate) const fn prefers_markdown_in_completion(self) -> bool {
self.contains(Self::PREFER_MARKDOWN_IN_COMPLETION)
}
pub(super) fn new(client_capabilities: &ClientCapabilities) -> Self {
let mut flags = Self::empty();
let workspace = client_capabilities.workspace.as_ref();
let text_document = client_capabilities.text_document.as_ref();
if workspace
.and_then(|workspace| workspace.diagnostics.as_ref()?.refresh_support)
.unwrap_or_default()
{
flags |= Self::WORKSPACE_DIAGNOSTIC_REFRESH;
}
if workspace
.and_then(|workspace| workspace.configuration)
.unwrap_or_default()
{
flags |= Self::WORKSPACE_CONFIGURATION;
}
if workspace
.and_then(|workspace| workspace.inlay_hint.as_ref()?.refresh_support)
.unwrap_or_default()
{
flags |= Self::INLAY_HINT_REFRESH;
}
if let Some(capabilities) =
workspace.and_then(|workspace| workspace.did_change_watched_files.as_ref())
{
if capabilities.dynamic_registration == Some(true) {
flags |= Self::FILE_WATCHER_SUPPORT;
}
if capabilities.relative_pattern_support == Some(true) {
flags |= Self::RELATIVE_FILE_WATCHER_SUPPORT;
}
}
if let Some(diagnostic) =
text_document.and_then(|text_document| text_document.diagnostic.as_ref())
{
flags |= Self::PULL_DIAGNOSTICS;
if diagnostic.dynamic_registration == Some(true) {
flags |= Self::DIAGNOSTIC_DYNAMIC_REGISTRATION;
}
}
if let Some(publish_diagnostics) =
text_document.and_then(|text_document| text_document.publish_diagnostics.as_ref())
{
if publish_diagnostics.related_information == Some(true) {
flags |= Self::DIAGNOSTIC_RELATED_INFORMATION;
}
}
if text_document
.and_then(|text_document| text_document.type_definition?.link_support)
.unwrap_or_default()
{
flags |= Self::TYPE_DEFINITION_LINK_SUPPORT;
}
if text_document
.and_then(|text_document| text_document.definition?.link_support)
.unwrap_or_default()
{
flags |= Self::DEFINITION_LINK_SUPPORT;
}
if text_document
.and_then(|text_document| text_document.declaration?.link_support)
.unwrap_or_default()
{
flags |= Self::DECLARATION_LINK_SUPPORT;
}
if text_document
.and_then(|text_document| {
Some(
text_document
.hover
.as_ref()?
.content_format
.as_ref()?
.contains(&MarkupKind::Markdown),
)
})
.unwrap_or_default()
{
flags |= Self::PREFER_MARKDOWN_IN_HOVER;
}
if text_document
.and_then(|text_document| {
Some(
text_document
.completion
.as_ref()?
.completion_item
.as_ref()?
.documentation_format
.as_ref()?
.contains(&MarkupKind::Markdown),
)
})
.unwrap_or_default()
{
flags |= Self::PREFER_MARKDOWN_IN_COMPLETION;
}
if text_document
.and_then(|text_document| {
text_document
.semantic_tokens
.as_ref()?
.multiline_token_support
})
.unwrap_or_default()
{
flags |= Self::MULTILINE_SEMANTIC_TOKENS;
}
if text_document
.and_then(|text_document| {
text_document
.signature_help
.as_ref()?
.signature_information
.as_ref()?
.parameter_information
.as_ref()?
.label_offset_support
})
.unwrap_or_default()
{
flags |= Self::SIGNATURE_LABEL_OFFSET_SUPPORT;
}
if text_document
.and_then(|text_document| {
text_document
.signature_help
.as_ref()?
.signature_information
.as_ref()?
.active_parameter_support
})
.unwrap_or_default()
{
flags |= Self::SIGNATURE_ACTIVE_PARAMETER_SUPPORT;
}
if text_document
.and_then(|text_document| {
text_document
.document_symbol
.as_ref()?
.hierarchical_document_symbol_support
})
.unwrap_or_default()
{
flags |= Self::HIERARCHICAL_DOCUMENT_SYMBOL_SUPPORT;
}
if client_capabilities
.window
.as_ref()
.and_then(|window| window.work_done_progress)
.unwrap_or_default()
{
flags |= Self::WORK_DONE_PROGRESS;
}
if text_document
.and_then(|text_document| text_document.completion.as_ref())
.and_then(|completion| completion.completion_item.as_ref())
.and_then(|completion_item| completion_item.label_details_support)
.unwrap_or_default()
{
flags |= Self::COMPLETION_ITEM_LABEL_DETAILS_SUPPORT;
}
flags
}
}
/// Creates the server capabilities based on the resolved client capabilities and resolved global
/// settings from the initialization options.
pub(crate) fn server_capabilities(
position_encoding: PositionEncoding,
resolved_client_capabilities: ResolvedClientCapabilities,
) -> ServerCapabilities {
let diagnostic_provider =
if resolved_client_capabilities.supports_diagnostic_dynamic_registration() {
// If the client supports dynamic registration, we will register the diagnostic
// capabilities dynamically based on the `ty.diagnosticMode` setting.
None
} else {
// Otherwise, we always advertise support for workspace and pull diagnostics.
Some(DiagnosticServerCapabilities::Options(
server_diagnostic_options(true),
))
};
ServerCapabilities {
position_encoding: Some(position_encoding.into()),
code_action_provider: Some(types::CodeActionProviderCapability::Options(
CodeActionOptions {
code_action_kinds: Some(vec![CodeActionKind::QUICKFIX]),
..CodeActionOptions::default()
},
)),
execute_command_provider: Some(types::ExecuteCommandOptions {
commands: SupportedCommand::all()
.map(|command| command.identifier().to_string())
.to_vec(),
work_done_progress_options: WorkDoneProgressOptions {
work_done_progress: Some(false),
},
}),
diagnostic_provider,
text_document_sync: Some(TextDocumentSyncCapability::Options(
TextDocumentSyncOptions {
open_close: Some(true),
change: Some(TextDocumentSyncKind::INCREMENTAL),
..Default::default()
},
)),
type_definition_provider: Some(TypeDefinitionProviderCapability::Simple(true)),
definition_provider: Some(OneOf::Left(true)),
declaration_provider: Some(DeclarationCapability::Simple(true)),
references_provider: Some(OneOf::Left(true)),
rename_provider: Some(OneOf::Right(server_rename_options())),
document_highlight_provider: Some(OneOf::Left(true)),
hover_provider: Some(HoverProviderCapability::Simple(true)),
signature_help_provider: Some(SignatureHelpOptions {
trigger_characters: Some(vec!["(".to_string(), ",".to_string()]),
retrigger_characters: Some(vec![")".to_string()]),
work_done_progress_options: WorkDoneProgressOptions::default(),
}),
inlay_hint_provider: Some(OneOf::Right(InlayHintServerCapabilities::Options(
InlayHintOptions::default(),
))),
semantic_tokens_provider: Some(SemanticTokensServerCapabilities::SemanticTokensOptions(
SemanticTokensOptions {
work_done_progress_options: WorkDoneProgressOptions::default(),
legend: SemanticTokensLegend {
token_types: ty_ide::SemanticTokenType::all()
.iter()
.map(|token_type| token_type.as_lsp_concept().into())
.collect(),
token_modifiers: ty_ide::SemanticTokenModifier::all_names()
.iter()
.map(|&s| s.into())
.collect(),
},
range: Some(true),
full: Some(SemanticTokensFullOptions::Bool(true)),
},
)),
completion_provider: Some(CompletionOptions {
trigger_characters: Some(vec!['.'.to_string()]),
..Default::default()
}),
selection_range_provider: Some(SelectionRangeProviderCapability::Simple(true)),
document_symbol_provider: Some(OneOf::Left(true)),
workspace_symbol_provider: Some(OneOf::Left(true)),
notebook_document_sync: Some(OneOf::Left(lsp_types::NotebookDocumentSyncOptions {
save: Some(false),
notebook_selector: [NotebookSelector::ByCells {
notebook: None,
cells: vec![NotebookCellSelector {
language: "python".to_string(),
}],
}]
.to_vec(),
})),
..Default::default()
}
}
/// Creates the default [`DiagnosticOptions`] for the server.
pub(crate) fn server_diagnostic_options(workspace_diagnostics: bool) -> DiagnosticOptions {
DiagnosticOptions {
identifier: Some(crate::DIAGNOSTIC_NAME.to_string()),
inter_file_dependencies: true,
workspace_diagnostics,
work_done_progress_options: WorkDoneProgressOptions {
// Currently, the server only supports reporting work done progress for "workspace"
// diagnostic mode.
work_done_progress: Some(workspace_diagnostics),
},
}
}
pub(crate) fn server_rename_options() -> RenameOptions {
RenameOptions {
prepare_provider: Some(true),
work_done_progress_options: WorkDoneProgressOptions::default(),
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/session/settings.rs | crates/ty_server/src/session/settings.rs | use super::options::DiagnosticMode;
use ty_ide::{CompletionSettings, InlayHintSettings};
use ty_project::metadata::options::ProjectOptionsOverrides;
/// Resolved client settings that are shared across all workspaces.
#[derive(Clone, Default, Debug, PartialEq)]
pub(crate) struct GlobalSettings {
pub(super) diagnostic_mode: DiagnosticMode,
pub(super) experimental: ExperimentalSettings,
pub(super) show_syntax_errors: bool,
}
impl GlobalSettings {
pub(crate) fn diagnostic_mode(&self) -> DiagnosticMode {
self.diagnostic_mode
}
pub(crate) fn show_syntax_errors(&self) -> bool {
self.show_syntax_errors
}
}
#[derive(Clone, Default, Debug, PartialEq)]
pub(crate) struct ExperimentalSettings;
/// Resolved client settings for a specific workspace.
///
/// These settings are meant to be used directly by the server, and are *not* a 1:1 representation
/// with how the client sends them.
#[derive(Default, Debug)]
pub(crate) struct WorkspaceSettings {
pub(super) disable_language_services: bool,
pub(super) inlay_hints: InlayHintSettings,
pub(super) completions: CompletionSettings,
pub(super) overrides: Option<ProjectOptionsOverrides>,
}
impl WorkspaceSettings {
pub(crate) fn is_language_services_disabled(&self) -> bool {
self.disable_language_services
}
pub(crate) fn project_options_overrides(&self) -> Option<&ProjectOptionsOverrides> {
self.overrides.as_ref()
}
pub(crate) fn inlay_hints(&self) -> &InlayHintSettings {
&self.inlay_hints
}
pub(crate) fn completions(&self) -> &CompletionSettings {
&self.completions
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/session/index.rs | crates/ty_server/src/session/index.rs | use rustc_hash::FxHashMap;
use std::sync::Arc;
use crate::document::{DocumentKey, LanguageId};
use crate::session::DocumentHandle;
use crate::{
PositionEncoding, TextDocument,
document::{DocumentVersion, NotebookDocument},
};
/// Stores and tracks all open documents in a session, along with their associated settings.
#[derive(Debug)]
pub(crate) struct Index {
/// Maps all document file paths to the associated document controller
documents: FxHashMap<DocumentKey, Document>,
}
impl Index {
pub(super) fn new() -> Self {
Self {
documents: FxHashMap::default(),
}
}
pub(super) fn text_documents(
&self,
) -> impl Iterator<Item = (&DocumentKey, &TextDocument)> + '_ {
self.documents.iter().filter_map(|(key, doc)| {
let text_document = doc.as_text()?;
Some((key, text_document))
})
}
pub(crate) fn document_handle(
&self,
url: &lsp_types::Url,
) -> Result<DocumentHandle, DocumentError> {
let key = DocumentKey::from_url(url);
let Some(document) = self.documents.get(&key) else {
return Err(DocumentError::NotFound(key));
};
Ok(DocumentHandle::from_document(document))
}
#[expect(dead_code)]
pub(super) fn notebook_document_keys(&self) -> impl Iterator<Item = &DocumentKey> + '_ {
self.documents
.iter()
.filter(|(_, doc)| doc.as_notebook().is_some())
.map(|(key, _)| key)
}
pub(super) fn update_notebook_document(
&mut self,
notebook_key: &DocumentKey,
cells: Option<lsp_types::NotebookDocumentCellChange>,
metadata: Option<serde_json::Map<String, serde_json::Value>>,
new_version: DocumentVersion,
encoding: PositionEncoding,
) -> crate::Result<()> {
let document = self.document_mut(notebook_key)?;
let Some(notebook) = document.as_notebook_mut() else {
anyhow::bail!("Notebook document path does not point to a notebook document");
};
let (structure, data, text_content) = cells
.map(|cells| {
let lsp_types::NotebookDocumentCellChange {
structure,
data,
text_content,
} = cells;
(structure, data, text_content)
})
.unwrap_or_default();
let (array, did_open, did_close) = structure
.map(|structure| {
let lsp_types::NotebookDocumentCellChangeStructure {
array,
did_open,
did_close,
} = structure;
(array, did_open, did_close)
})
.unwrap_or_else(|| {
(
lsp_types::NotebookCellArrayChange {
start: 0,
delete_count: 0,
cells: None,
},
None,
None,
)
});
tracing::info!(
"version: {}, new_version: {}",
notebook.version(),
new_version
);
notebook.update(array, data.unwrap_or_default(), metadata, new_version)?;
let notebook_path = notebook_key.to_file_path();
for opened_cell in did_open.into_iter().flatten() {
self.documents.insert(
DocumentKey::from_url(&opened_cell.uri),
Document::Text(
TextDocument::new(opened_cell.uri, opened_cell.text, opened_cell.version)
.with_language_id(&opened_cell.language_id)
.with_notebook(notebook_path.clone())
.into(),
),
);
}
for updated_cell in text_content.into_iter().flatten() {
let Ok(document_mut) =
self.document_mut(&DocumentKey::from_url(&updated_cell.document.uri))
else {
tracing::warn!(
"Could not find document for cell {}",
updated_cell.document.uri
);
continue;
};
let Some(document) = document_mut.as_text_mut() else {
continue;
};
if updated_cell.changes.is_empty() {
document.update_version(updated_cell.document.version);
} else {
document.apply_changes(
updated_cell.changes,
updated_cell.document.version,
encoding,
);
}
}
// VS Code sends a separate `didClose` request for every cell
// and they're removed from the metadata (notebook document)
// because they get deleted as part of `change.cells.structure.array`
let _ = did_close;
let notebook = self.document(notebook_key).unwrap().as_notebook().unwrap();
let ruff_notebook = notebook.to_ruff_notebook(self);
tracing::debug!("Updated notebook: {:?}", ruff_notebook.source_code());
Ok(())
}
/// Create a document reference corresponding to the given document key.
///
/// Returns an error if the document is not found or if the path cannot be converted to a URL.
pub(crate) fn document(&self, key: &DocumentKey) -> Result<&Document, DocumentError> {
let Some(document) = self.documents.get(key) else {
return Err(DocumentError::NotFound(key.clone()));
};
Ok(document)
}
pub(super) fn open_text_document(&mut self, document: TextDocument) -> DocumentHandle {
let key = DocumentKey::from_url(document.url());
let handle = DocumentHandle::from_text_document(&document);
self.documents.insert(key, Document::new_text(document));
handle
}
pub(super) fn open_notebook_document(&mut self, document: NotebookDocument) -> DocumentHandle {
let handle = DocumentHandle::from_notebook_document(&document);
let notebook_key = DocumentKey::from_url(document.url());
self.documents
.insert(notebook_key, Document::new_notebook(document));
handle
}
pub(super) fn close_document(&mut self, key: &DocumentKey) -> Result<Document, DocumentError> {
let Some(document) = self.documents.remove(key) else {
return Err(DocumentError::NotFound(key.clone()));
};
Ok(document)
}
pub(super) fn document_mut(
&mut self,
key: &DocumentKey,
) -> Result<&mut Document, DocumentError> {
let Some(controller) = self.documents.get_mut(key) else {
return Err(DocumentError::NotFound(key.clone()));
};
Ok(controller)
}
}
/// A mutable handler to an underlying document.
#[derive(Debug)]
pub(crate) enum Document {
Text(Arc<TextDocument>),
Notebook(Arc<NotebookDocument>),
}
impl Document {
pub(super) fn new_text(document: TextDocument) -> Self {
Self::Text(Arc::new(document))
}
pub(super) fn new_notebook(document: NotebookDocument) -> Self {
Self::Notebook(Arc::new(document))
}
pub(crate) fn version(&self) -> DocumentVersion {
match self {
Self::Text(document) => document.version(),
Self::Notebook(notebook) => notebook.version(),
}
}
pub(crate) fn language_id(&self) -> Option<LanguageId> {
match self {
Self::Text(document) => document.language_id(),
Self::Notebook(_) => None,
}
}
pub(crate) fn as_notebook_mut(&mut self) -> Option<&mut NotebookDocument> {
Some(match self {
Self::Notebook(notebook) => Arc::make_mut(notebook),
Self::Text(_) => return None,
})
}
pub(crate) fn as_notebook(&self) -> Option<&NotebookDocument> {
match self {
Self::Notebook(notebook) => Some(notebook),
Self::Text(_) => None,
}
}
pub(crate) fn as_text(&self) -> Option<&TextDocument> {
match self {
Self::Text(document) => Some(document),
Self::Notebook(_) => None,
}
}
pub(crate) fn as_text_mut(&mut self) -> Option<&mut TextDocument> {
Some(match self {
Self::Text(document) => Arc::make_mut(document),
Self::Notebook(_) => return None,
})
}
}
#[derive(Debug, Clone, thiserror::Error)]
pub(crate) enum DocumentError {
#[error("document not found for key: {0}")]
NotFound(DocumentKey),
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/session/client.rs | crates/ty_server/src/session/client.rs | use crate::Session;
use crate::server::{Action, ConnectionSender, SendRequest};
use crate::server::{Event, MainLoopSender};
use lsp_server::{ErrorCode, Message, Notification, RequestId, ResponseError};
use serde_json::Value;
use std::any::TypeId;
use std::fmt::Display;
#[derive(Debug, Clone)]
pub(crate) struct Client {
/// Channel to send messages back to the main loop.
main_loop_sender: MainLoopSender,
/// Channel to send messages directly to the LSP client without going through the main loop.
///
/// This is generally preferred because it reduces pressure on the main loop but it may not always be
/// possible if access to data on [`Session`] is required, which background tasks don't have.
client_sender: ConnectionSender,
}
impl Client {
pub(crate) fn new(main_loop_sender: MainLoopSender, client_sender: ConnectionSender) -> Self {
Self {
main_loop_sender,
client_sender,
}
}
/// Sends a request of kind `R` to the client, with associated parameters.
///
/// The request is sent immediately.
/// The `response_handler` will be dispatched as soon as the client response
/// is processed on the main-loop. The handler always runs on the main-loop thread.
///
/// Use [`self.send_deferred_request`] if you are in a background task
/// where you don't have access to the session. But note, that the
/// request won't be send immediately, but rather queued up in the main loop
pub(crate) fn send_request<R>(
&self,
session: &Session,
params: R::Params,
response_handler: impl FnOnce(&Client, R::Result) + Send + 'static,
) where
R: lsp_types::request::Request,
{
self.send_request_raw(
session,
SendRequest {
method: R::METHOD.to_string(),
params: serde_json::to_value(params).expect("Params to be serializable"),
response_handler: ClientResponseHandler::for_request::<R>(response_handler),
},
);
}
/// Sends a request of kind `R` to the client, with associated parameters.
///
/// The request isn't sent immediately, but rather queued up in the main loop.
/// The `response_handler` will be dispatched as soon as the client response
/// is processed on the main-loop. The handler always runs on the main-loop thread.
///
/// Use [`self.send_request`] if you are in a foreground task and have access to the session.
pub(crate) fn send_deferred_request<R>(
&self,
params: R::Params,
response_handler: impl FnOnce(&Client, R::Result) + Send + 'static,
) where
R: lsp_types::request::Request,
{
self.main_loop_sender
.send(Event::Action(Action::SendRequest(SendRequest {
method: R::METHOD.to_string(),
params: serde_json::to_value(params).expect("Params to be serializable"),
response_handler: ClientResponseHandler::for_request::<R>(response_handler),
})))
.unwrap();
}
pub(crate) fn send_request_raw(&self, session: &Session, request: SendRequest) {
let id = session
.request_queue()
.outgoing()
.register(request.response_handler);
if let Err(err) = self
.client_sender
.send(Message::Request(lsp_server::Request {
id,
method: request.method.clone(),
params: request.params,
}))
{
tracing::error!(
"Failed to send request `{}` because the client sender is closed: {err}",
request.method
);
}
}
/// Sends a notification to the client.
pub(crate) fn send_notification<N>(&self, params: N::Params)
where
N: lsp_types::notification::Notification,
{
if let Err(err) =
self.client_sender
.send(lsp_server::Message::Notification(Notification::new(
N::METHOD.to_string(),
params,
)))
{
tracing::error!(
"Failed to send notification `{method}` because the client sender is closed: {err}",
method = N::METHOD,
);
}
}
/// Sends a notification without any parameters to the client.
///
/// This is useful for notifications that don't require any data.
#[expect(dead_code)]
pub(crate) fn send_notification_no_params(&self, method: &str) {
if let Err(err) =
self.client_sender
.send(lsp_server::Message::Notification(Notification::new(
method.to_string(),
Value::Null,
)))
{
tracing::error!(
"Failed to send notification `{method}` because the client sender is closed: {err}",
);
}
}
/// Sends a response to the client for a given request ID.
///
/// The response isn't sent immediately. Instead, it's queued up in the main loop
/// and checked for cancellation (each request must have exactly one response).
pub(crate) fn respond<R>(&self, id: &RequestId, result: crate::server::Result<R>)
where
R: serde::Serialize,
{
let response = match result {
Ok(res) => lsp_server::Response::new_ok(id.clone(), res),
Err(crate::server::Error { code, error }) => {
lsp_server::Response::new_err(id.clone(), code as i32, error.to_string())
}
};
self.main_loop_sender
.send(Event::Action(Action::SendResponse(response)))
.unwrap();
}
/// Sends an error response to the client for a given request ID.
///
/// The response isn't sent immediately. Instead, it's queued up in the main loop.
pub(crate) fn respond_err(&self, id: RequestId, error: lsp_server::ResponseError) {
let response = lsp_server::Response {
id,
result: None,
error: Some(error),
};
self.main_loop_sender
.send(Event::Action(Action::SendResponse(response)))
.unwrap();
}
/// Shows a message to the user.
///
/// This opens a pop up in VS Code showing `message`.
pub(crate) fn show_message(&self, message: impl Display, message_type: lsp_types::MessageType) {
self.send_notification::<lsp_types::notification::ShowMessage>(
lsp_types::ShowMessageParams {
typ: message_type,
message: message.to_string(),
},
);
}
/// Sends a request to display a warning to the client with a formatted message. The warning is
/// sent in a `window/showMessage` notification.
///
/// Logs an error if the message could not be sent.
pub(crate) fn show_warning_message(&self, message: impl Display) {
self.show_message(message, lsp_types::MessageType::WARNING);
}
/// Sends a request to display an error to the client with a formatted message. The error is
/// sent in a `window/showMessage` notification.
///
/// Logs an error if the message could not be sent.
pub(crate) fn show_error_message(&self, message: impl Display) {
self.show_message(message, lsp_types::MessageType::ERROR);
}
/// Re-queues this request after a salsa cancellation for a retry.
///
/// The main loop will skip the retry if the client cancelled the request in the meantime.
pub(crate) fn retry(&self, request: lsp_server::Request) {
self.main_loop_sender
.send(Event::Action(Action::RetryRequest(request)))
.unwrap();
}
pub(crate) fn queue_action(&self, action: Action) {
self.main_loop_sender.send(Event::Action(action)).unwrap();
}
pub(crate) fn cancel(&self, session: &mut Session, id: RequestId) {
let method_name = session.request_queue_mut().incoming_mut().cancel(&id);
if let Some(method_name) = method_name {
tracing::debug!("Cancelled request id={id} method={method_name}");
let error = ResponseError {
code: ErrorCode::RequestCanceled as i32,
message: "request was cancelled by client".to_owned(),
data: None,
};
// Use `client_sender` here instead of `respond_err` because
// `respond_err` filters out responses for canceled requests (which we just did!).
if let Err(err) = self
.client_sender
.send(Message::Response(lsp_server::Response {
id,
result: None,
error: Some(error),
}))
{
tracing::error!(
"Failed to send cancellation response for request `{method_name}` because the client sender is closed: {err}",
);
}
}
}
}
/// Type erased handler for client responses.
#[allow(clippy::type_complexity)]
pub(crate) struct ClientResponseHandler(Box<dyn FnOnce(&Client, lsp_server::Response) + Send>);
impl ClientResponseHandler {
fn for_request<R>(response_handler: impl FnOnce(&Client, R::Result) + Send + 'static) -> Self
where
R: lsp_types::request::Request,
{
Self(Box::new(
move |client: &Client, response: lsp_server::Response| {
let _span =
tracing::debug_span!("client_response", id=%response.id, method = R::METHOD)
.entered();
match (response.error, response.result) {
(Some(err), _) => {
tracing::error!(
"Got an error from the client (code {code}, method {method}): {message}",
code = err.code,
message = &err.message,
method = R::METHOD
);
}
(None, Some(response)) => match serde_json::from_value(response) {
Ok(response) => response_handler(client, response),
Err(error) => {
tracing::error!(
"Failed to deserialize client response (method={method}): {error}",
method = R::METHOD
);
}
},
(None, None) => {
if TypeId::of::<R::Result>() == TypeId::of::<()>() {
// We can't call `response_handler(())` directly here, but
// since we _know_ the type expected is `()`, we can use
// `from_value(Value::Null)`. `R::Result` implements `DeserializeOwned`,
// so this branch works in the general case but we'll only
// hit it if the concrete type is `()`, so the `unwrap()` is safe here.
response_handler(client, serde_json::from_value(Value::Null).unwrap());
} else {
tracing::error!(
"Invalid client response: did not contain a result or error (method={method})",
method = R::METHOD
);
}
}
}
},
))
}
pub(crate) fn handle_response(self, client: &Client, response: lsp_server::Response) {
let handler = self.0;
handler(client, response);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/session/options.rs | crates/ty_server/src/session/options.rs | use std::collections::HashMap;
use lsp_types::Url;
use ruff_db::system::{SystemPath, SystemPathBuf};
use ruff_macros::Combine;
use ruff_python_ast::PythonVersion;
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value};
use ty_combine::Combine;
use ty_ide::{CompletionSettings, InlayHintSettings};
use ty_project::metadata::Options as TyOptions;
use ty_project::metadata::options::ProjectOptionsOverrides;
use ty_project::metadata::value::{RangedValue, RelativePathBuf, ValueSource};
use super::settings::{ExperimentalSettings, GlobalSettings, WorkspaceSettings};
use crate::logging::LogLevel;
use crate::session::client::Client;
/// Initialization options that are set once at server startup that never change.
///
/// There are two sets of options that are defined here:
/// 1. Options that are static, set once and are required at server startup. Any changes to these
/// options require a server restart to take effect.
/// 2. Options that are dynamic and can change during the runtime of the server, such as the
/// diagnostic mode.
///
/// The dynamic options are also accepted during the initialization phase, so that we can support
/// clients that do not support the `workspace/didChangeConfiguration` notification.
///
/// Note that this structure has a limitation in that there's no way to specify different options
/// for different workspaces in the initialization options which means that the server will not
/// support multiple workspaces for clients that do not implement the `workspace/configuration`
/// endpoint. Most editors support this endpoint, so this is not a significant limitation.
#[derive(Clone, Debug, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub(crate) struct InitializationOptions {
/// The log level for the language server.
pub(crate) log_level: Option<LogLevel>,
/// Path to the log file, defaults to stderr if not set.
///
/// Tildes (`~`) and environment variables (e.g., `$HOME`) are expanded.
pub(crate) log_file: Option<SystemPathBuf>,
/// The remaining options that are dynamic and can change during the runtime of the server.
#[serde(flatten)]
pub(crate) options: ClientOptions,
}
impl InitializationOptions {
/// Create the initialization options from the given JSON value that corresponds to the
/// initialization options sent by the client.
///
/// It returns a tuple of the initialization options and an optional error if the JSON value
/// could not be deserialized into the initialization options. In case of an error, the default
/// initialization options are returned.
pub(crate) fn from_value(
options: Option<Value>,
) -> (InitializationOptions, Option<serde_json::Error>) {
let Some(options) = options else {
return (InitializationOptions::default(), None);
};
match serde_json::from_value(options) {
Ok(options) => (options, None),
Err(err) => (InitializationOptions::default(), Some(err)),
}
}
}
/// Options that configure the behavior of the language server.
///
/// This is the direct representation of the options that the client sends to the server when
/// asking for workspace configuration. These options are dynamic and can change during the runtime
/// of the server via the `workspace/didChangeConfiguration` notification.
///
/// The representation of the options is split into two parts:
/// 1. Global options contains options that are global to the language server. They are applied to
/// all workspaces managed by the language server.
/// 2. Workspace options contains options that are specific to a workspace. They are applied to the
/// workspace these options are associated with.
#[derive(Clone, Combine, Debug, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct ClientOptions {
#[serde(flatten)]
pub global: GlobalOptions,
#[serde(flatten)]
pub workspace: WorkspaceOptions,
/// Additional options that aren't valid as per the schema but we accept it to provide better
/// error message to the user.
#[serde(flatten)]
pub unknown: HashMap<String, Value>,
}
impl ClientOptions {
#[must_use]
pub fn with_diagnostic_mode(mut self, diagnostic_mode: DiagnosticMode) -> Self {
self.global.diagnostic_mode = Some(diagnostic_mode);
self
}
#[must_use]
pub fn with_disable_language_services(mut self, disable_language_services: bool) -> Self {
self.workspace.disable_language_services = Some(disable_language_services);
self
}
#[must_use]
pub fn with_variable_types_inlay_hints(mut self, variable_types: bool) -> Self {
self.workspace
.inlay_hints
.get_or_insert_default()
.variable_types = Some(variable_types);
self
}
#[must_use]
pub fn with_auto_import(mut self, enabled: bool) -> Self {
self.workspace
.completions
.get_or_insert_default()
.auto_import = Some(enabled);
self
}
#[must_use]
pub fn with_show_syntax_errors(mut self, show_syntax_errors: bool) -> Self {
self.global.show_syntax_errors = Some(show_syntax_errors);
self
}
#[must_use]
pub fn with_unknown(mut self, unknown: HashMap<String, Value>) -> Self {
self.unknown = unknown;
self
}
}
/// Options that are global to the language server.
///
/// These are the dynamic options that are applied to all workspaces managed by the language
/// server.
#[derive(Clone, Combine, Debug, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct GlobalOptions {
/// Diagnostic mode for the language server.
diagnostic_mode: Option<DiagnosticMode>,
/// Experimental features that the server provides on an opt-in basis.
pub(crate) experimental: Option<Experimental>,
/// If `true` or [`None`], show syntax errors as diagnostics.
///
/// This is useful when using ty with other language servers, allowing the user to refer
/// to syntax errors from only one source.
pub(crate) show_syntax_errors: Option<bool>,
}
impl GlobalOptions {
pub(crate) fn into_settings(self) -> GlobalSettings {
let experimental = self
.experimental
.map(Experimental::into_settings)
.unwrap_or_default();
GlobalSettings {
diagnostic_mode: self.diagnostic_mode.unwrap_or_default(),
experimental,
show_syntax_errors: self.show_syntax_errors.unwrap_or(true),
}
}
}
/// Options that are specific to a workspace.
///
/// These are the dynamic options that are applied to a specific workspace.
#[derive(Clone, Combine, Debug, Serialize, Deserialize, Default, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct WorkspaceOptions {
/// Inline configuration, overrides settings from the configuration file.
pub configuration: Option<ConfigurationMap>,
/// Path to a `ty.toml` file, similar to `--config-file` on the CLI
pub configuration_file: Option<String>,
/// Whether to disable language services like code completions, hover, etc.
pub disable_language_services: Option<bool>,
/// Options to configure inlay hints.
pub inlay_hints: Option<InlayHintOptions>,
/// Options to configure completions.
pub completions: Option<CompletionOptions>,
/// Information about the currently active Python environment in the VS Code Python extension.
///
/// This is relevant only for VS Code and is populated by the ty VS Code extension.
pub python_extension: Option<PythonExtension>,
}
impl WorkspaceOptions {
pub(crate) fn into_settings(self, root: &SystemPath, client: &Client) -> WorkspaceSettings {
let configuration_file =
self.configuration_file
.and_then(|config_file| match shellexpand::full(&config_file) {
Ok(path) => Some(SystemPath::absolute(&*path, root)),
Err(error) => {
client.show_error_message(format_args!(
"Failed to expand the environment variables \
for the `ty.configuration_file` setting: {error}"
));
None
}
});
let options_overrides =
self.configuration.and_then(|map| {
match TyOptions::deserialize_with(
ValueSource::Editor,
serde::de::value::MapDeserializer::new(map.0.into_iter()),
) {
Ok(options) => Some(options),
Err(error) => {
client.show_error_message(format_args!(
"Invalid `ty.configuration` options: {error}"
));
None
}
}
});
let mut overrides =
ProjectOptionsOverrides::new(configuration_file, options_overrides.unwrap_or_default());
if let Some(extension) = self.python_extension
&& let Some(active_environment) = extension.active_environment
{
overrides.fallback_python = if let Some(environment) = &active_environment.environment {
environment.folder_uri.to_file_path().ok().and_then(|path| {
Some(RelativePathBuf::python_extension(
SystemPathBuf::from_path_buf(path).ok()?,
))
})
} else {
Some(RelativePathBuf::python_extension(
active_environment.executable.sys_prefix.clone(),
))
};
overrides.fallback_python_version =
active_environment.version.as_ref().and_then(|version| {
Some(RangedValue::python_extension(PythonVersion::from((
u8::try_from(version.major).ok()?,
u8::try_from(version.minor).ok()?,
))))
});
if let Some(python) = &overrides.fallback_python {
tracing::debug!(
"Using the Python environment selected in your editor \
in case the configuration doesn't specify a Python environment: {python}",
python = python.path()
);
}
if let Some(version) = &overrides.fallback_python_version {
tracing::debug!(
"Using the Python version selected in your editor: {version} \
in case the configuration doesn't specify a Python version",
);
}
}
let overrides = if overrides == ProjectOptionsOverrides::default() {
None
} else {
Some(overrides)
};
WorkspaceSettings {
disable_language_services: self.disable_language_services.unwrap_or_default(),
inlay_hints: self
.inlay_hints
.map(InlayHintOptions::into_settings)
.unwrap_or_default(),
completions: self
.completions
.map(CompletionOptions::into_settings)
.unwrap_or_default(),
overrides,
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize, Default, PartialEq, Eq)]
#[serde(transparent)]
pub struct ConfigurationMap(Map<String, Value>);
impl From<Map<String, Value>> for ConfigurationMap {
fn from(map: Map<String, Value>) -> Self {
Self(map)
}
}
impl Combine for ConfigurationMap {
fn combine_with(&mut self, _other: Self) {}
}
#[derive(Clone, Combine, Debug, Serialize, Deserialize, Default, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct InlayHintOptions {
variable_types: Option<bool>,
call_argument_names: Option<bool>,
}
impl InlayHintOptions {
fn into_settings(self) -> InlayHintSettings {
InlayHintSettings {
variable_types: self.variable_types.unwrap_or(true),
call_argument_names: self.call_argument_names.unwrap_or(true),
}
}
}
#[derive(Clone, Combine, Debug, Serialize, Deserialize, Default, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct CompletionOptions {
auto_import: Option<bool>,
}
impl CompletionOptions {
// N.B. It's important for the defaults here to
// match the defaults for `CompletionSettings`.
// This is because `WorkspaceSettings::default()`
// uses `CompletionSettings::default()`. But
// `WorkspaceOptions::default().into_settings()` will use this
// definition.
fn into_settings(self) -> CompletionSettings {
CompletionSettings {
auto_import: self.auto_import.unwrap_or(true),
}
}
}
/// Diagnostic mode for the language server.
#[derive(Clone, Copy, Debug, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum DiagnosticMode {
/// Disable all diagnostics so that ty can be used as an LSP only
Off,
/// Check only currently open files.
#[default]
OpenFilesOnly,
/// Check all files in the workspace.
Workspace,
}
impl DiagnosticMode {
/// Returns `true` if the diagnostic mode is set to check all files in the workspace.
pub(crate) const fn is_workspace(self) -> bool {
matches!(self, DiagnosticMode::Workspace)
}
/// Returns `true` if the diagnostic mode is set to check only currently open files.
pub(crate) const fn is_open_files_only(self) -> bool {
matches!(self, DiagnosticMode::OpenFilesOnly)
}
pub(crate) const fn is_off(self) -> bool {
matches!(self, DiagnosticMode::Off)
}
}
impl Combine for DiagnosticMode {
fn combine_with(&mut self, other: Self) {
// Diagnostic mode is a global option but as it can be updated without a server restart,
// it is part of the dynamic option set. But, there's no easy way to enforce the fact that
// this option should not be set for individual workspaces. The ty VS Code extension
// enforces this but we're not in control of other clients.
//
// So, this is a workaround to ensure that if the diagnostic mode is set to `workspace` in
// either an initialization options or one of the workspace options, it is always set to
// `workspace` in the global options.
if other != DiagnosticMode::default() {
*self = other;
}
}
}
#[derive(Clone, Combine, Debug, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
#[expect(
clippy::empty_structs_with_brackets,
reason = "The LSP fails to deserialize the options when this is a unit type"
)]
pub(crate) struct Experimental {}
impl Experimental {
#[expect(clippy::unused_self)]
fn into_settings(self) -> ExperimentalSettings {
ExperimentalSettings {}
}
}
#[derive(Clone, Debug, Serialize, Deserialize, Default, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct PythonExtension {
active_environment: Option<ActiveEnvironment>,
}
impl Combine for PythonExtension {
fn combine_with(&mut self, _other: Self) {
panic!(
"`python_extension` is not expected to be combined with the initialization options as \
it's only set by the ty VS Code extension in the `workspace/configuration` request."
);
}
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub(crate) struct ActiveEnvironment {
pub(crate) executable: PythonExecutable,
pub(crate) environment: Option<PythonEnvironment>,
pub(crate) version: Option<EnvironmentVersion>,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub(crate) struct EnvironmentVersion {
pub(crate) major: i64,
pub(crate) minor: i64,
#[allow(dead_code)]
pub(crate) patch: i64,
#[allow(dead_code)]
pub(crate) sys_version: String,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub(crate) struct PythonEnvironment {
pub(crate) folder_uri: Url,
#[allow(dead_code)]
#[serde(rename = "type")]
pub(crate) kind: String,
#[allow(dead_code)]
pub(crate) name: Option<String>,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub(crate) struct PythonExecutable {
#[allow(dead_code)]
pub(crate) uri: Url,
pub(crate) sys_prefix: SystemPathBuf,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/session/request_queue.rs | crates/ty_server/src/session/request_queue.rs | use crate::session::client::ClientResponseHandler;
use lsp_server::RequestId;
use rustc_hash::FxHashMap;
use std::cell::{Cell, OnceCell, RefCell};
use std::fmt::Formatter;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::time::Instant;
/// Tracks the pending requests between client and server.
pub(crate) struct RequestQueue {
incoming: Incoming,
outgoing: Outgoing,
}
impl RequestQueue {
pub(super) fn new() -> Self {
Self {
incoming: Incoming::default(),
outgoing: Outgoing::default(),
}
}
pub(crate) fn outgoing_mut(&mut self) -> &mut Outgoing {
&mut self.outgoing
}
/// Returns the server to client request queue.
pub(crate) fn outgoing(&self) -> &Outgoing {
&self.outgoing
}
/// Returns the client to server request queue.
pub(crate) fn incoming(&self) -> &Incoming {
&self.incoming
}
pub(crate) fn incoming_mut(&mut self) -> &mut Incoming {
&mut self.incoming
}
}
/// Requests from client -> server.
///
/// Tracks which requests are pending. Requests that aren't registered are considered completed.
///
/// A request is pending if:
///
/// * it has been registered
/// * it hasn't been cancelled
/// * it hasn't been completed
///
/// Tracking whether a request is pending is required to ensure that the server sends exactly
/// one response for every request as required by the LSP specification.
#[derive(Default, Debug)]
pub(crate) struct Incoming {
pending: FxHashMap<RequestId, PendingRequest>,
}
impl Incoming {
/// Registers a new pending request.
pub(crate) fn register(&mut self, request_id: RequestId, method: String) {
self.pending.insert(request_id, PendingRequest::new(method));
}
/// Cancels the pending request with the given id.
///
/// Returns the method name if the request was still pending, `None` if it was already completed.
pub(super) fn cancel(&mut self, request_id: &RequestId) -> Option<String> {
self.pending.remove(request_id).map(|mut pending| {
if let Some(cancellation_token) = pending.cancellation_token.take() {
cancellation_token.cancel();
}
pending.method
})
}
/// Returns `true` if the request with the given id is still pending.
pub(crate) fn is_pending(&self, request_id: &RequestId) -> bool {
self.pending.contains_key(request_id)
}
/// Returns the cancellation token for the given request id if the request is still pending.
pub(crate) fn cancellation_token(
&self,
request_id: &RequestId,
) -> Option<RequestCancellationToken> {
let pending = self.pending.get(request_id)?;
Some(RequestCancellationToken::clone(
pending
.cancellation_token
.get_or_init(RequestCancellationToken::default),
))
}
/// Marks the request as completed.
///
/// Returns the time when the request was registered and the request method name, or `None` if the request was not pending.
pub(crate) fn complete(&mut self, request_id: &RequestId) -> Option<(Instant, String)> {
self.pending
.remove(request_id)
.map(|pending| (pending.start_time, pending.method))
}
}
/// A request from the client to the server that hasn't been responded yet.
#[derive(Debug)]
struct PendingRequest {
/// The time when the request was registered.
///
/// This does not include the time the request was queued in the main loop before it was registered.
start_time: Instant,
/// The method name of the request.
method: String,
/// A cancellation token to cancel this request.
///
/// This is only initialized for background requests. Local tasks don't support cancellation (unless retried)
/// as they're processed immediately after receiving the request; Making it impossible for a
/// cancellation message to be processed before the task is completed.
cancellation_token: OnceCell<RequestCancellationToken>,
}
impl PendingRequest {
fn new(method: String) -> Self {
Self {
start_time: Instant::now(),
method,
cancellation_token: OnceCell::new(),
}
}
}
/// Token to cancel a specific request.
///
/// Can be shared between threads to check for cancellation *after* a request has been scheduled.
#[derive(Debug, Default)]
pub(crate) struct RequestCancellationToken(Arc<AtomicBool>);
impl RequestCancellationToken {
/// Returns true if the request was cancelled.
pub(crate) fn is_cancelled(&self) -> bool {
self.0.load(std::sync::atomic::Ordering::Relaxed)
}
/// Signals that the request should not be processed because it was cancelled.
fn cancel(&self) {
self.0.store(true, std::sync::atomic::Ordering::Relaxed);
}
fn clone(this: &Self) -> Self {
RequestCancellationToken(this.0.clone())
}
}
/// Requests from server -> client.
#[derive(Default)]
pub(crate) struct Outgoing {
/// The id of the next request sent from the server to the client.
next_request_id: Cell<i32>,
/// A map of request ids to the handlers that process the client-response.
response_handlers: RefCell<FxHashMap<RequestId, ClientResponseHandler>>,
}
impl Outgoing {
/// Registers a handler, returns the id for the request.
#[must_use]
pub(crate) fn register(&self, handler: ClientResponseHandler) -> RequestId {
let id = self.next_request_id.get();
self.next_request_id.set(id + 1);
self.response_handlers
.borrow_mut()
.insert(id.into(), handler);
id.into()
}
/// Marks the request with the given id as complete and returns the handler to process the response.
///
/// Returns `None` if the request was not found.
#[must_use]
pub(crate) fn complete(&mut self, request_id: &RequestId) -> Option<ClientResponseHandler> {
self.response_handlers.get_mut().remove(request_id)
}
}
impl std::fmt::Debug for Outgoing {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Outgoing")
.field("next_request_id", &self.next_request_id)
.field("response_handlers", &"<response handlers>")
.finish()
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/schedule.rs | crates/ty_server/src/server/schedule.rs | use std::num::NonZeroUsize;
use crate::session::Session;
mod task;
mod thread;
use self::{
task::{BackgroundTaskBuilder, SyncTask},
thread::ThreadPriority,
};
use crate::session::client::Client;
pub(super) use task::{BackgroundSchedule, Task};
/// The event loop thread is actually a secondary thread that we spawn from the
/// _actual_ main thread. This secondary thread has a larger stack size
/// than some OS defaults (Windows, for example) and is also designated as
/// high-priority.
pub(crate) fn spawn_main_loop(
func: impl FnOnce() -> crate::Result<()> + Send + 'static,
) -> crate::Result<thread::JoinHandle<crate::Result<()>>> {
// Override OS defaults to avoid stack overflows on platforms with low stack size defaults.
const MAIN_THREAD_STACK_SIZE: usize = 2 * 1024 * 1024;
const MAIN_THREAD_NAME: &str = "ty:main";
Ok(
thread::Builder::new(thread::ThreadPriority::LatencySensitive)
.name(MAIN_THREAD_NAME.into())
.stack_size(MAIN_THREAD_STACK_SIZE)
.spawn(func)?,
)
}
pub(crate) struct Scheduler {
fmt_pool: thread::Pool,
background_pool: thread::Pool,
}
impl Scheduler {
pub(super) fn new(worker_threads: NonZeroUsize) -> Self {
const FMT_THREADS: usize = 1;
Self {
fmt_pool: thread::Pool::new(NonZeroUsize::try_from(FMT_THREADS).unwrap()),
background_pool: thread::Pool::new(worker_threads),
}
}
/// Dispatches a `task` by either running it as a blocking function or
/// executing it on a background thread pool.
pub(super) fn dispatch(&mut self, task: task::Task, session: &mut Session, client: Client) {
match task {
Task::Sync(SyncTask { func }) => {
func(session, &client);
}
Task::Background(BackgroundTaskBuilder {
schedule,
builder: func,
}) => {
let static_func = func(session);
let task = move || static_func(&client);
match schedule {
BackgroundSchedule::Worker => {
self.background_pool.spawn(ThreadPriority::Worker, task);
}
BackgroundSchedule::LatencySensitive => self
.background_pool
.spawn(ThreadPriority::LatencySensitive, task),
BackgroundSchedule::Fmt => {
self.fmt_pool.spawn(ThreadPriority::LatencySensitive, task);
}
}
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api.rs | crates/ty_server/src/server/api.rs | use crate::server::schedule::Task;
use crate::session::Session;
use anyhow::anyhow;
use lsp_server as server;
use lsp_server::{ErrorCode, RequestId};
use lsp_types::notification::Notification;
use lsp_types::request::Request;
use std::panic::{AssertUnwindSafe, UnwindSafe};
mod diagnostics;
mod notifications;
mod requests;
mod semantic_tokens;
mod symbols;
mod traits;
use self::traits::{NotificationHandler, RequestHandler};
use super::{Result, schedule::BackgroundSchedule};
use crate::session::client::Client;
pub(crate) use diagnostics::publish_settings_diagnostics;
pub use requests::{PartialWorkspaceProgress, PartialWorkspaceProgressParams};
use ruff_db::panic::PanicError;
/// Processes a request from the client to the server.
///
/// The LSP specification requires that each request has exactly one response. Therefore,
/// it's crucial that all paths in this method call [`Client::respond`] exactly once.
/// The only exception to this is requests that were cancelled by the client. In this case,
/// the response was already sent by the [`notification::CancelNotificationHandler`].
pub(super) fn request(req: server::Request) -> Task {
let id = req.id.clone();
match req.method.as_str() {
requests::ExecuteCommand::METHOD => sync_request_task::<requests::ExecuteCommand>(req),
requests::CodeActionRequestHandler::METHOD => background_document_request_task::<
requests::CodeActionRequestHandler,
>(req, BackgroundSchedule::Worker),
requests::DocumentDiagnosticRequestHandler::METHOD => background_document_request_task::<
requests::DocumentDiagnosticRequestHandler,
>(
req, BackgroundSchedule::Worker
),
requests::WorkspaceDiagnosticRequestHandler::METHOD => background_request_task::<
requests::WorkspaceDiagnosticRequestHandler,
>(
req, BackgroundSchedule::Worker
),
requests::GotoTypeDefinitionRequestHandler::METHOD => background_document_request_task::<
requests::GotoTypeDefinitionRequestHandler,
>(
req, BackgroundSchedule::Worker
),
requests::GotoDeclarationRequestHandler::METHOD => background_document_request_task::<
requests::GotoDeclarationRequestHandler,
>(
req, BackgroundSchedule::Worker
),
requests::GotoDefinitionRequestHandler::METHOD => background_document_request_task::<
requests::GotoDefinitionRequestHandler,
>(req, BackgroundSchedule::Worker),
requests::HoverRequestHandler::METHOD => background_document_request_task::<
requests::HoverRequestHandler,
>(req, BackgroundSchedule::Worker),
requests::ReferencesRequestHandler::METHOD => background_document_request_task::<
requests::ReferencesRequestHandler,
>(req, BackgroundSchedule::Worker),
requests::DocumentHighlightRequestHandler::METHOD => background_document_request_task::<
requests::DocumentHighlightRequestHandler,
>(
req, BackgroundSchedule::Worker
),
requests::InlayHintRequestHandler::METHOD => background_document_request_task::<
requests::InlayHintRequestHandler,
>(req, BackgroundSchedule::Worker),
requests::SemanticTokensRequestHandler::METHOD => background_document_request_task::<
requests::SemanticTokensRequestHandler,
>(req, BackgroundSchedule::Worker),
requests::SemanticTokensRangeRequestHandler::METHOD => background_document_request_task::<
requests::SemanticTokensRangeRequestHandler,
>(
req, BackgroundSchedule::Worker
),
requests::SignatureHelpRequestHandler::METHOD => background_document_request_task::<
requests::SignatureHelpRequestHandler,
>(req, BackgroundSchedule::Worker),
requests::PrepareRenameRequestHandler::METHOD => background_document_request_task::<
requests::PrepareRenameRequestHandler,
>(req, BackgroundSchedule::Worker),
requests::RenameRequestHandler::METHOD => background_document_request_task::<
requests::RenameRequestHandler,
>(req, BackgroundSchedule::Worker),
requests::CompletionRequestHandler::METHOD => background_document_request_task::<
requests::CompletionRequestHandler,
>(
req, BackgroundSchedule::LatencySensitive
),
requests::SelectionRangeRequestHandler::METHOD => background_document_request_task::<
requests::SelectionRangeRequestHandler,
>(req, BackgroundSchedule::Worker),
requests::DocumentSymbolRequestHandler::METHOD => background_document_request_task::<
requests::DocumentSymbolRequestHandler,
>(req, BackgroundSchedule::Worker),
requests::WorkspaceSymbolRequestHandler::METHOD => background_request_task::<
requests::WorkspaceSymbolRequestHandler,
>(
req, BackgroundSchedule::Worker
),
lsp_types::request::Shutdown::METHOD => sync_request_task::<requests::ShutdownHandler>(req),
method => {
tracing::warn!("Received request {method} which does not have a handler");
let result: Result<()> = Err(Error::new(
anyhow!("Unknown request: {method}"),
server::ErrorCode::MethodNotFound,
));
return Task::immediate(id, result);
}
}
.unwrap_or_else(|err| {
tracing::error!("Encountered error when routing request with ID {id}: {err}");
Task::sync(move |_session, client| {
if matches!(err.code, ErrorCode::InternalError) {
client.show_error_message("ty failed to handle a request from the editor. Check the logs for more details.");
}
respond_silent_error(
id,
client,
lsp_server::ResponseError {
code: err.code as i32,
message: err.to_string(),
data: None,
},
);
})
})
}
pub(super) fn notification(notif: server::Notification) -> Task {
match notif.method.as_str() {
notifications::DidCloseTextDocumentHandler::METHOD => {
sync_notification_task::<notifications::DidCloseTextDocumentHandler>(notif)
}
notifications::DidOpenTextDocumentHandler::METHOD => {
sync_notification_task::<notifications::DidOpenTextDocumentHandler>(notif)
}
notifications::DidChangeTextDocumentHandler::METHOD => {
sync_notification_task::<notifications::DidChangeTextDocumentHandler>(notif)
}
notifications::DidOpenNotebookHandler::METHOD => {
sync_notification_task::<notifications::DidOpenNotebookHandler>(notif)
}
notifications::DidChangeNotebookHandler::METHOD => {
sync_notification_task::<notifications::DidChangeNotebookHandler>(notif)
}
notifications::DidCloseNotebookHandler::METHOD => {
sync_notification_task::<notifications::DidCloseNotebookHandler>(notif)
}
notifications::DidChangeWatchedFiles::METHOD => {
sync_notification_task::<notifications::DidChangeWatchedFiles>(notif)
}
lsp_types::notification::Cancel::METHOD => {
sync_notification_task::<notifications::CancelNotificationHandler>(notif)
}
lsp_types::notification::SetTrace::METHOD => {
tracing::trace!("Ignoring `setTrace` notification");
return Task::nothing();
}
method => {
tracing::warn!("Received notification {method} which does not have a handler.");
return Task::nothing();
}
}
.unwrap_or_else(|err| {
tracing::error!("Encountered error when routing notification: {err}");
Task::sync(move |_session, client| {
if matches!(err.code, ErrorCode::InternalError) {
client.show_error_message(
"ty failed to handle a notification from the editor. Check the logs for more details."
);
}
})
})
}
fn sync_request_task<R: traits::SyncRequestHandler>(req: server::Request) -> Result<Task>
where
<<R as RequestHandler>::RequestType as Request>::Params: UnwindSafe,
{
let (id, params) = cast_request::<R>(req)?;
Ok(Task::sync(move |session, client: &Client| {
let _span = tracing::debug_span!("request", %id, method = R::METHOD).entered();
let result = R::run(session, client, params);
respond::<R>(&id, result, client);
}))
}
fn background_request_task<R: traits::BackgroundRequestHandler>(
req: server::Request,
schedule: BackgroundSchedule,
) -> Result<Task>
where
<<R as RequestHandler>::RequestType as Request>::Params: UnwindSafe,
{
let retry = R::RETRY_ON_CANCELLATION.then(|| req.clone());
let (id, params) = cast_request::<R>(req)?;
Ok(Task::background(schedule, move |session: &Session| {
let cancellation_token = session
.request_queue()
.incoming()
.cancellation_token(&id)
.expect("request should have been tested for cancellation before scheduling");
// SAFETY: The `snapshot` is safe to move across the unwind boundary because it is not used
// after unwinding.
let snapshot = AssertUnwindSafe(session.snapshot_session());
Box::new(move |client| {
let _span = tracing::debug_span!("request", %id, method = R::METHOD).entered();
// Test again if the request was cancelled since it was scheduled on the background task
// and, if so, return early
if cancellation_token.is_cancelled() {
tracing::debug!(
"Ignoring request id={id} method={} because it was cancelled",
R::METHOD
);
// We don't need to send a response here because the `cancel` notification
// handler already responded with a message.
return;
}
if let Err(error) = ruff_db::panic::catch_unwind(|| {
let snapshot = snapshot;
R::handle_request(&id, snapshot.0, client, params);
}) {
panic_response::<R>(&id, client, &error, retry);
}
})
}))
}
fn background_document_request_task<R: traits::BackgroundDocumentRequestHandler>(
req: server::Request,
schedule: BackgroundSchedule,
) -> Result<Task>
where
<<R as RequestHandler>::RequestType as Request>::Params: UnwindSafe,
{
let retry = R::RETRY_ON_CANCELLATION.then(|| req.clone());
let (id, params) = cast_request::<R>(req)?;
Ok(Task::background(schedule, move |session: &Session| {
let cancellation_token = session
.request_queue()
.incoming()
.cancellation_token(&id)
.expect("request should have been tested for cancellation before scheduling");
let url = R::document_url(¶ms);
let Ok(document) = session.snapshot_document(&url) else {
let reason = format!("Document {url} is not open in the session");
tracing::warn!(
"Ignoring request id={id} method={} because {reason}",
R::METHOD
);
return Box::new(|client| {
respond_silent_error(
id,
client,
lsp_server::ResponseError {
code: lsp_server::ErrorCode::InvalidParams as i32,
message: reason,
data: None,
},
);
});
};
let path = document.notebook_or_file_path();
let db = session.project_db(path).clone();
Box::new(move |client| {
let _span = tracing::debug_span!("request", %id, method = R::METHOD).entered();
// Test again if the request was cancelled since it was scheduled on the background task
// and, if so, return early
if cancellation_token.is_cancelled() {
tracing::debug!(
"Ignoring request id={id} method={} because it was cancelled",
R::METHOD
);
// We don't need to send a response here because the `cancel` notification
// handler already responded with a message.
return;
}
if let Err(error) = ruff_db::panic::catch_unwind(|| {
salsa::attach(&db, || {
R::handle_request(&id, &db, document, client, params);
});
}) {
panic_response::<R>(&id, client, &error, retry);
}
})
}))
}
fn panic_response<R>(
id: &RequestId,
client: &Client,
error: &PanicError,
request: Option<lsp_server::Request>,
) where
R: traits::RetriableRequestHandler,
{
// Check if the request was canceled due to some modifications to the salsa database.
if error.payload.downcast_ref::<salsa::Cancelled>().is_some() {
// If the query supports retry, re-queue the request.
// The query is still likely to succeed if the user modified any other document.
if let Some(request) = request {
tracing::debug!(
"request id={} method={} was cancelled by salsa, re-queueing for retry",
request.id,
request.method
);
client.retry(request);
} else {
tracing::debug!(
"request id={} was cancelled by salsa, sending content modified",
id
);
respond_silent_error(id.clone(), client, R::salsa_cancellation_error());
}
} else {
respond::<R>(
id,
Err(Error {
code: lsp_server::ErrorCode::InternalError,
error: anyhow!("request handler {error}"),
}),
client,
);
}
}
fn sync_notification_task<N: traits::SyncNotificationHandler>(
notif: server::Notification,
) -> Result<Task> {
let (id, params) = cast_notification::<N>(notif)?;
Ok(Task::sync(move |session, client| {
let _span = tracing::debug_span!("notification", method = N::METHOD).entered();
if let Err(err) = N::run(session, client, params) {
tracing::error!("An error occurred while running {id}: {err}");
client.show_error_message("ty encountered a problem. Check the logs for more details.");
return;
}
// If there's a pending workspace diagnostic long-polling request,
// resume it, but only if the session revision changed (e.g. because some document changed).
session.resume_suspended_workspace_diagnostic_request(client);
}))
}
#[expect(dead_code)]
fn background_notification_thread<N>(
req: server::Notification,
schedule: BackgroundSchedule,
) -> Result<Task>
where
N: traits::BackgroundDocumentNotificationHandler,
<<N as NotificationHandler>::NotificationType as Notification>::Params: UnwindSafe,
{
let (id, params) = cast_notification::<N>(req)?;
Ok(Task::background(schedule, move |session: &Session| {
let url = N::document_url(¶ms);
let Ok(snapshot) = session.snapshot_document(&url) else {
let reason = format!("Document {url} is not open in the session");
tracing::warn!(
"Ignoring notification id={id} method={} because {reason}",
N::METHOD
);
return Box::new(|_| {});
};
Box::new(move |client| {
let _span = tracing::debug_span!("notification", method = N::METHOD).entered();
let result = match ruff_db::panic::catch_unwind(|| {
N::run_with_snapshot(snapshot, client, params)
}) {
Ok(result) => result,
Err(panic) => {
tracing::error!("An error occurred while running {id}: {panic}");
client.show_error_message(
"ty encountered a panic. Check the logs for more details.",
);
return;
}
};
if let Err(err) = result {
tracing::error!("An error occurred while running {id}: {err}");
client.show_error_message(
"ty encountered a problem. Check the logs for more details.",
);
}
})
}))
}
/// Tries to cast a serialized request from the server into
/// a parameter type for a specific request handler.
/// It is *highly* recommended to not override this function in your
/// implementation.
fn cast_request<Req>(
request: server::Request,
) -> Result<(
RequestId,
<<Req as RequestHandler>::RequestType as Request>::Params,
)>
where
Req: RequestHandler,
<<Req as RequestHandler>::RequestType as Request>::Params: UnwindSafe,
{
request
.extract(Req::METHOD)
.map_err(|err| match err {
json_err @ server::ExtractError::JsonError { .. } => {
anyhow::anyhow!("JSON parsing failure:\n{json_err}")
}
server::ExtractError::MethodMismatch(_) => {
unreachable!("A method mismatch should not be possible here unless you've used a different handler (`Req`) \
than the one whose method name was matched against earlier.")
}
})
.with_failure_code(server::ErrorCode::InvalidParams)
}
/// Sends back a response to the client, but only if the request wasn't cancelled.
fn respond<Req>(
id: &RequestId,
result: Result<<<Req as RequestHandler>::RequestType as Request>::Result>,
client: &Client,
) where
Req: RequestHandler,
{
if let Err(err) = &result {
tracing::error!("An error occurred with request ID {id}: {err}");
client.show_error_message("ty encountered a problem. Check the logs for more details.");
}
client.respond(id, result);
}
/// Sends back an error response to the server using a [`Client`] without showing a warning
/// to the user.
fn respond_silent_error(id: RequestId, client: &Client, error: lsp_server::ResponseError) {
client.respond_err(id, error);
}
/// Tries to cast a serialized request from the server into
/// a parameter type for a specific request handler.
fn cast_notification<N>(
notification: server::Notification,
) -> Result<(
&'static str,
<<N as NotificationHandler>::NotificationType as Notification>::Params,
)>
where
N: NotificationHandler,
{
Ok((
N::METHOD,
notification
.extract(N::METHOD)
.map_err(|err| match err {
json_err @ server::ExtractError::JsonError { .. } => {
anyhow::anyhow!("JSON parsing failure:\n{json_err}")
}
server::ExtractError::MethodMismatch(_) => {
unreachable!("A method mismatch should not be possible here unless you've used a different handler (`N`) \
than the one whose method name was matched against earlier.")
}
})
.with_failure_code(server::ErrorCode::InvalidParams)?,
))
}
pub(crate) struct Error {
pub(crate) code: server::ErrorCode,
pub(crate) error: anyhow::Error,
}
/// A trait to convert result types into the server result type, [`super::Result`].
trait LSPResult<T> {
fn with_failure_code(self, code: server::ErrorCode) -> super::Result<T>;
}
impl<T, E: Into<anyhow::Error>> LSPResult<T> for core::result::Result<T, E> {
fn with_failure_code(self, code: server::ErrorCode) -> super::Result<T> {
self.map_err(|err| Error::new(err.into(), code))
}
}
impl Error {
pub(crate) fn new(err: anyhow::Error, code: server::ErrorCode) -> Self {
Self { code, error: err }
}
}
// Right now, we treat the error code as invisible data that won't
// be printed.
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.error.fmt(f)
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.error.fmt(f)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/lazy_work_done_progress.rs | crates/ty_server/src/server/lazy_work_done_progress.rs | use crate::capabilities::ResolvedClientCapabilities;
use crate::session::client::Client;
use lsp_types::request::WorkDoneProgressCreate;
use lsp_types::{
ProgressParams, ProgressParamsValue, ProgressToken, WorkDoneProgress, WorkDoneProgressBegin,
WorkDoneProgressCreateParams, WorkDoneProgressEnd, WorkDoneProgressReport,
};
use std::fmt::Display;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
static SERVER_WORK_DONE_TOKENS: AtomicUsize = AtomicUsize::new(0);
/// A [work done progress][work-done-progress] that uses the client provided token if available,
/// but falls back to a server initiated progress if supported by the client.
///
/// The LSP specification supports client and server initiated work done progress reporting:
/// * Client: Many requests have a work done progress token or extend `WorkDoneProgressParams`.
/// For those requests, a server can ask clients to start a work done progress report by
/// setting the work done capability for that request in the server's capabilities during initialize.
/// However, as of today (July 2025), VS code and Zed don't support client initiated work done progress
/// tokens except for the `initialize` request (<https://github.com/microsoft/vscode-languageserver-node/issues/528>).
/// * Server: A server can initiate a work done progress report by sending a `WorkDoneProgressCreate` request
/// with a token, which the client can then use to report progress (except during `initialize`).
///
/// This work done progress supports both clients that provide a work done progress token in their requests
/// and clients that do not. If the client does not provide a token, the server will
/// initiate a work done progress report using a unique string token.
///
/// ## Server Initiated Progress
///
/// The implementation initiates a work done progress report lazily when no token is provided in the request.
/// This creation happens async and the LSP specification requires that a server only
/// sends `$/progress` notifications with that token if the create request was successful (no error):
///
/// > code and message set in case an exception happens during the 'window/workDoneProgress/create' request.
/// > In case an error occurs a server must not send any progress notification
/// > using the token provided in the WorkDoneProgressCreateParams.
///
/// The implementation doesn't block on the server response because it feels unfortunate to delay
/// a client request only so that ty can show a progress bar. Therefore, the progress reporting
/// will not be available immediately.
///
/// [work-done-progress]: https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#workDoneProgress
#[derive(Clone)]
pub(super) struct LazyWorkDoneProgress {
inner: Arc<Inner>,
}
impl LazyWorkDoneProgress {
pub(super) fn new(
client: &Client,
request_token: Option<ProgressToken>,
title: &str,
capabilities: ResolvedClientCapabilities,
) -> Self {
if let Some(token) = &request_token {
Self::send_begin(client, token.clone(), title.to_string());
}
let is_server_initiated = request_token.is_none();
let once_token = std::sync::OnceLock::new();
if let Some(token) = request_token {
// SAFETY: The token is guaranteed to be not set yet because we only created it above.
once_token.set(token).unwrap();
}
let work_done = Self {
inner: Arc::new(Inner {
token: once_token,
finish_message: std::sync::Mutex::default(),
client: client.clone(),
}),
};
if is_server_initiated && capabilities.supports_work_done_progress() {
// Use a string token because Zed does not support numeric tokens
let token = ProgressToken::String(format!(
"ty-{}",
SERVER_WORK_DONE_TOKENS.fetch_add(1, Ordering::Relaxed)
));
let work_done = work_done.clone();
let title = title.to_string();
client.send_deferred_request::<WorkDoneProgressCreate>(
WorkDoneProgressCreateParams {
token: token.clone(),
},
move |client, ()| {
Self::send_begin(client, token.clone(), title);
// SAFETY: We only take this branch if `request_token` was `None`
// and we only issue a single request (without retry).
work_done.inner.token.set(token).unwrap();
},
);
}
work_done
}
pub(super) fn set_finish_message(&self, message: String) {
let mut finish_message = self.inner.finish_message.lock().unwrap();
*finish_message = Some(message);
}
fn send_begin(client: &Client, token: ProgressToken, title: String) {
client.send_notification::<lsp_types::notification::Progress>(ProgressParams {
token,
value: ProgressParamsValue::WorkDone(WorkDoneProgress::Begin(WorkDoneProgressBegin {
title,
cancellable: Some(false),
message: None,
percentage: Some(0),
})),
});
}
/// Sends a progress report with the given message and optional percentage.
pub(super) fn report_progress(&self, message: impl Display, percentage: Option<u32>) {
let Some(token) = self.inner.token.get() else {
return;
};
self.inner
.client
.send_notification::<lsp_types::notification::Progress>(ProgressParams {
token: token.clone(),
value: ProgressParamsValue::WorkDone(WorkDoneProgress::Report(
WorkDoneProgressReport {
cancellable: Some(false),
message: Some(message.to_string()),
percentage,
},
)),
});
}
}
struct Inner {
token: std::sync::OnceLock<ProgressToken>,
finish_message: std::sync::Mutex<Option<String>>,
client: Client,
}
impl Drop for Inner {
fn drop(&mut self) {
let Some(token) = self.token.get() else {
return;
};
let finish_message = self
.finish_message
.lock()
.ok()
.and_then(|mut message| message.take());
self.client
.send_notification::<lsp_types::notification::Progress>(ProgressParams {
token: token.clone(),
value: ProgressParamsValue::WorkDone(WorkDoneProgress::End(WorkDoneProgressEnd {
message: finish_message,
})),
});
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/main_loop.rs | crates/ty_server/src/server/main_loop.rs | use crate::server::schedule::Scheduler;
use crate::server::{Server, api};
use crate::session::client::{Client, ClientResponseHandler};
use crate::session::{ClientOptions, SuspendedWorkspaceDiagnosticRequest};
use anyhow::anyhow;
use crossbeam::select;
use lsp_server::Message;
use lsp_types::notification::Notification;
use lsp_types::{ConfigurationParams, Url};
use serde_json::Value;
pub(crate) type ConnectionSender = crossbeam::channel::Sender<Message>;
pub(crate) type MainLoopSender = crossbeam::channel::Sender<Event>;
pub(crate) type MainLoopReceiver = crossbeam::channel::Receiver<Event>;
impl Server {
pub(super) fn main_loop(&mut self) -> crate::Result<()> {
self.initialize(&Client::new(
self.main_loop_sender.clone(),
self.connection.sender.clone(),
));
let mut scheduler = Scheduler::new(self.worker_threads);
while let Ok(next_event) = self.next_event() {
let Some(next_event) = next_event else {
anyhow::bail!("client exited without proper shutdown sequence");
};
let client = Client::new(
self.main_loop_sender.clone(),
self.connection.sender.clone(),
);
match next_event {
Event::Message(msg) => {
let Some(msg) = self.session.should_defer_message(msg) else {
continue;
};
let task = match msg {
Message::Request(req) => {
self.session
.request_queue_mut()
.incoming_mut()
.register(req.id.clone(), req.method.clone());
if self.session.is_shutdown_requested() {
tracing::warn!(
"Received request `{}` after server shutdown was requested, discarding",
&req.method
);
client.respond_err(
req.id,
lsp_server::ResponseError {
code: lsp_server::ErrorCode::InvalidRequest as i32,
message: "Shutdown already requested".to_owned(),
data: None,
},
);
continue;
}
api::request(req)
}
Message::Notification(notification) => {
if notification.method == lsp_types::notification::Exit::METHOD {
if !self.session.is_shutdown_requested() {
return Err(anyhow!(
"Received exit notification before a shutdown request"
));
}
tracing::debug!("Received exit notification, exiting");
return Ok(());
}
api::notification(notification)
}
// Handle the response from the client to a server request
Message::Response(response) => {
if let Some(handler) = self
.session
.request_queue_mut()
.outgoing_mut()
.complete(&response.id)
{
handler.handle_response(&client, response);
} else {
tracing::error!(
"Received a response with ID {}, which was not expected",
response.id
);
}
continue;
}
};
scheduler.dispatch(task, &mut self.session, client);
}
Event::Action(action) => match action {
Action::SendResponse(response) => {
// Filter out responses for already canceled requests.
if let Some((start_time, method)) = self
.session
.request_queue_mut()
.incoming_mut()
.complete(&response.id)
{
let duration = start_time.elapsed();
tracing::debug!(name: "message response", method, %response.id, duration = format_args!("{:0.2?}", duration));
self.connection.sender.send(Message::Response(response))?;
} else {
tracing::debug!(
"Ignoring response for canceled request id={}",
response.id
);
}
}
Action::RetryRequest(request) => {
// Never retry canceled requests.
if self
.session
.request_queue()
.incoming()
.is_pending(&request.id)
{
let task = api::request(request);
scheduler.dispatch(task, &mut self.session, client);
} else {
tracing::debug!(
"Request {}/{} was cancelled, not retrying",
request.method,
request.id
);
}
}
Action::SendRequest(request) => client.send_request_raw(&self.session, request),
Action::SuspendWorkspaceDiagnostics(suspended_request) => {
self.session.set_suspended_workspace_diagnostics_request(
*suspended_request,
&client,
);
}
Action::InitializeWorkspaces(workspaces_with_options) => {
self.session
.initialize_workspaces(workspaces_with_options, &client);
// We do this here after workspaces have been initialized
// so that the file watcher globs can take project search
// paths into account.
// self.try_register_file_watcher(&client);
}
},
}
}
Ok(())
}
/// Waits for the next message from the client or action.
///
/// Returns `Ok(None)` if the client connection is closed.
fn next_event(&mut self) -> Result<Option<Event>, crossbeam::channel::RecvError> {
// We can't queue those into the main loop because that could result in reordering if
// the `select` below picks a client message first.
if let Some(deferred) = self.session.take_deferred_messages() {
match &deferred {
Message::Request(req) => {
tracing::debug!("Processing deferred request `{}`", req.method);
}
Message::Notification(notification) => {
tracing::debug!("Processing deferred notification `{}`", notification.method);
}
Message::Response(response) => {
tracing::debug!("Processing deferred response `{}`", response.id);
}
}
return Ok(Some(Event::Message(deferred)));
}
select!(
recv(self.connection.receiver) -> msg => {
// Ignore disconnect errors, they're handled by the main loop (it will exit).
Ok(msg.ok().map(Event::Message))
},
recv(self.main_loop_receiver) -> event => event.map(Some),
)
}
fn initialize(&mut self, client: &Client) {
self.request_workspace_configurations(client);
}
/// Requests workspace configurations from the client for all the workspaces in the session.
///
/// If the client does not support workspace configuration, it initializes the workspaces
/// using the initialization options provided by the client.
fn request_workspace_configurations(&mut self, client: &Client) {
if !self
.session
.client_capabilities()
.supports_workspace_configuration()
{
tracing::info!(
"Client does not support workspace configuration, initializing workspaces \
using the initialization options"
);
self.session.initialize_workspaces(
self.session
.workspaces()
.urls()
.cloned()
.map(|url| (url, self.session.initialization_options().options.clone()))
.collect::<Vec<_>>(),
client,
);
return;
}
let urls = self
.session
.workspaces()
.urls()
.cloned()
.collect::<Vec<_>>();
let items = urls
.iter()
.map(|root| lsp_types::ConfigurationItem {
scope_uri: Some(root.clone()),
section: Some("ty".to_string()),
})
.collect();
tracing::debug!("Requesting workspace configuration for workspaces");
client.send_request::<lsp_types::request::WorkspaceConfiguration>(
&self.session,
ConfigurationParams { items },
|client, result: Vec<Value>| {
tracing::debug!("Received workspace configurations, initializing workspaces");
// This shouldn't fail because, as per the spec, the client needs to provide a
// `null` value even if it cannot provide a configuration for a workspace.
assert_eq!(
result.len(),
urls.len(),
"Mismatch in number of workspace URLs ({}) and configuration results ({})",
urls.len(),
result.len()
);
let workspaces_with_options: Vec<_> = urls
.into_iter()
.zip(result)
.map(|(url, value)| {
if value.is_null() {
tracing::debug!(
"No workspace options provided for {url}, using default options"
);
return (url, ClientOptions::default());
}
let options: ClientOptions =
serde_json::from_value(value).unwrap_or_else(|err| {
tracing::error!(
"Failed to deserialize workspace options for {url}: {err}. \
Using default options"
);
ClientOptions::default()
});
(url, options)
})
.collect();
client.queue_action(Action::InitializeWorkspaces(workspaces_with_options));
},
);
}
}
/// An action that should be performed on the main loop.
#[derive(Debug)]
pub(crate) enum Action {
/// Send a response to the client
SendResponse(lsp_server::Response),
/// Retry a request that previously failed due to a salsa cancellation.
RetryRequest(lsp_server::Request),
/// Send a request from the server to the client.
SendRequest(SendRequest),
SuspendWorkspaceDiagnostics(Box<SuspendedWorkspaceDiagnosticRequest>),
/// Initialize the workspace after the server received
/// the options from the client.
InitializeWorkspaces(Vec<(Url, ClientOptions)>),
}
#[derive(Debug)]
pub(crate) enum Event {
/// An incoming message from the LSP client.
Message(lsp_server::Message),
Action(Action),
}
pub(crate) struct SendRequest {
pub(crate) method: String,
pub(crate) params: serde_json::Value,
pub(crate) response_handler: ClientResponseHandler,
}
impl std::fmt::Debug for SendRequest {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SendRequest")
.field("method", &self.method)
.field("params", &self.params)
.finish_non_exhaustive()
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/diagnostics.rs | crates/ty_server/src/server/api/diagnostics.rs | use std::collections::HashMap;
use std::hash::{DefaultHasher, Hash as _, Hasher as _};
use lsp_types::notification::PublishDiagnostics;
use lsp_types::{
CodeDescription, Diagnostic, DiagnosticRelatedInformation, DiagnosticSeverity, DiagnosticTag,
NumberOrString, PublishDiagnosticsParams, Url,
};
use ruff_diagnostics::Applicability;
use ruff_text_size::Ranged;
use rustc_hash::FxHashMap;
use ruff_db::diagnostic::{Annotation, Severity, SubDiagnostic};
use ruff_db::files::{File, FileRange};
use ruff_db::system::SystemPathBuf;
use serde::{Deserialize, Serialize};
use ty_project::{Db as _, ProjectDatabase};
use crate::capabilities::ResolvedClientCapabilities;
use crate::document::{FileRangeExt, ToRangeExt};
use crate::session::client::Client;
use crate::session::{DocumentHandle, GlobalSettings};
use crate::system::{AnySystemPath, file_to_url};
use crate::{DIAGNOSTIC_NAME, Db, DiagnosticMode};
use crate::{PositionEncoding, Session};
pub(super) struct Diagnostics {
items: Vec<ruff_db::diagnostic::Diagnostic>,
encoding: PositionEncoding,
file_or_notebook: File,
}
impl Diagnostics {
/// Computes the result ID for `diagnostics`.
///
/// Returns `None` if there are no diagnostics.
pub(super) fn result_id_from_hash(
diagnostics: &[ruff_db::diagnostic::Diagnostic],
) -> Option<String> {
if diagnostics.is_empty() {
return None;
}
// Generate result ID based on raw diagnostic content only
let mut hasher = DefaultHasher::new();
// Hash the length first to ensure different numbers of diagnostics produce different hashes
diagnostics.hash(&mut hasher);
Some(format!("{:016x}", hasher.finish()))
}
/// Computes the result ID for the diagnostics.
///
/// Returns `None` if there are no diagnostics.
pub(super) fn result_id(&self) -> Option<String> {
Self::result_id_from_hash(&self.items)
}
pub(super) fn to_lsp_diagnostics(
&self,
db: &ProjectDatabase,
client_capabilities: ResolvedClientCapabilities,
global_settings: &GlobalSettings,
) -> LspDiagnostics {
if let Some(notebook_document) = db.notebook_document(self.file_or_notebook) {
let mut cell_diagnostics: FxHashMap<Url, Vec<Diagnostic>> = FxHashMap::default();
// Populates all relevant URLs with an empty diagnostic list. This ensures that documents
// without diagnostics still get updated.
for cell_url in notebook_document.cell_urls() {
cell_diagnostics.entry(cell_url.clone()).or_default();
}
for diagnostic in &self.items {
let Some((url, lsp_diagnostic)) = to_lsp_diagnostic(
db,
diagnostic,
self.encoding,
client_capabilities,
global_settings,
) else {
continue;
};
let Some(url) = url else {
tracing::warn!("Unable to find notebook cell");
continue;
};
cell_diagnostics
.entry(url)
.or_default()
.push(lsp_diagnostic);
}
LspDiagnostics::NotebookDocument(cell_diagnostics)
} else {
LspDiagnostics::TextDocument(
self.items
.iter()
.filter_map(|diagnostic| {
Some(
to_lsp_diagnostic(
db,
diagnostic,
self.encoding,
client_capabilities,
global_settings,
)?
.1,
)
})
.collect(),
)
}
}
}
/// Represents the diagnostics for a text document or a notebook document.
pub(super) enum LspDiagnostics {
TextDocument(Vec<Diagnostic>),
/// A map of cell URLs to the diagnostics for that cell.
NotebookDocument(FxHashMap<Url, Vec<Diagnostic>>),
}
impl LspDiagnostics {
/// Returns the diagnostics for a text document.
///
/// # Panics
///
/// Panics if the diagnostics are for a notebook document.
pub(super) fn expect_text_document(self) -> Vec<Diagnostic> {
match self {
LspDiagnostics::TextDocument(diagnostics) => diagnostics,
LspDiagnostics::NotebookDocument(_) => {
panic!("Expected a text document diagnostics, but got notebook diagnostics")
}
}
}
}
pub(super) fn clear_diagnostics_if_needed(
document: &DocumentHandle,
session: &Session,
client: &Client,
) {
if session.client_capabilities().supports_pull_diagnostics() && !document.is_cell_or_notebook()
{
return;
}
clear_diagnostics(document.url(), session, client);
}
/// Clears the diagnostics for the document identified by `uri`.
///
/// This is done by notifying the client with an empty list of diagnostics for the document.
/// For notebook cells, this clears diagnostics for the specific cell.
/// For other document types, this clears diagnostics for the main document.
pub(super) fn clear_diagnostics(uri: &lsp_types::Url, session: &Session, client: &Client) {
if session.global_settings().diagnostic_mode().is_off() {
return;
}
client.send_notification::<PublishDiagnostics>(PublishDiagnosticsParams {
uri: uri.clone(),
diagnostics: vec![],
version: None,
});
}
/// Publishes the diagnostics for the given document snapshot using the [publish diagnostics
/// notification] .
///
/// Unlike [`publish_diagnostics`], this function only publishes diagnostics if a client doesn't support
/// pull diagnostics and `document` is not a notebook or cell (VS Code
/// does not support pull diagnostics for notebooks or cells (as of 2025-11-12).
///
/// [publish diagnostics notification]: https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textDocument_publishDiagnostics
pub(super) fn publish_diagnostics_if_needed(
document: &DocumentHandle,
session: &Session,
client: &Client,
) {
if !document.is_cell_or_notebook() && session.client_capabilities().supports_pull_diagnostics()
{
return;
}
publish_diagnostics(document, session, client);
}
/// Publishes the diagnostics for the given document snapshot using the [publish diagnostics
/// notification].
pub(super) fn publish_diagnostics(document: &DocumentHandle, session: &Session, client: &Client) {
if session.global_settings().diagnostic_mode().is_off() {
return;
}
let db = session.project_db(document.notebook_or_file_path());
let Some(diagnostics) = compute_diagnostics(db, document, session.position_encoding()) else {
return;
};
// Sends a notification to the client with the diagnostics for the document.
let publish_diagnostics_notification = |uri: Url, diagnostics: Vec<Diagnostic>| {
client.send_notification::<PublishDiagnostics>(PublishDiagnosticsParams {
uri,
diagnostics,
version: Some(document.version()),
});
};
match diagnostics.to_lsp_diagnostics(
db,
session.client_capabilities(),
session.global_settings(),
) {
LspDiagnostics::TextDocument(diagnostics) => {
publish_diagnostics_notification(document.url().clone(), diagnostics);
}
LspDiagnostics::NotebookDocument(cell_diagnostics) => {
for (cell_url, diagnostics) in cell_diagnostics {
publish_diagnostics_notification(cell_url, diagnostics);
}
}
}
}
/// Publishes settings diagnostics for all the project at the given path
/// using the [publish diagnostics notification].
///
/// [publish diagnostics notification]: https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textDocument_publishDiagnostics
pub(crate) fn publish_settings_diagnostics(
session: &mut Session,
client: &Client,
path: SystemPathBuf,
) {
// Don't publish settings diagnostics for workspace that are already doing full diagnostics.
//
// Note we DO NOT respect the fact that clients support pulls because these are
// files they *specifically* won't pull diagnostics from us for, because we don't
// claim to be an LSP for them.
match session.global_settings().diagnostic_mode() {
DiagnosticMode::Workspace | DiagnosticMode::Off => {
return;
}
DiagnosticMode::OpenFilesOnly => {}
}
let session_encoding = session.position_encoding();
let client_capabilities = session.client_capabilities();
let project_path = AnySystemPath::System(path);
let (mut diagnostics_by_url, old_untracked) = {
let state = session.project_state_mut(&project_path);
let db = &state.db;
let project = db.project();
let settings_diagnostics = project.check_settings(db);
// We need to send diagnostics if we have non-empty ones, or we have ones to clear.
// These will both almost always be empty so this function will almost always be a no-op.
if settings_diagnostics.is_empty()
&& state.untracked_files_with_pushed_diagnostics.is_empty()
{
return;
}
// Group diagnostics by URL
let mut diagnostics_by_url: FxHashMap<Url, Vec<_>> = FxHashMap::default();
for diagnostic in settings_diagnostics {
if let Some(span) = diagnostic.primary_span() {
let file = span.expect_ty_file();
let Some(url) = file_to_url(db, file) else {
tracing::debug!("Failed to convert file to URL at {}", file.path(db));
continue;
};
diagnostics_by_url.entry(url).or_default().push(diagnostic);
}
}
// Record the URLs we're sending non-empty diagnostics for, so we know to clear them
// the next time we publish settings diagnostics!
let old_untracked = std::mem::replace(
&mut state.untracked_files_with_pushed_diagnostics,
diagnostics_by_url.keys().cloned().collect(),
);
(diagnostics_by_url, old_untracked)
};
// Add empty diagnostics for any files that had diagnostics before but don't now.
// This will clear them (either the file is no longer relevant to us or fixed!)
for url in old_untracked {
diagnostics_by_url.entry(url).or_default();
}
let db = session.project_db(&project_path);
let global_settings = session.global_settings();
// Send the settings diagnostics!
for (url, file_diagnostics) in diagnostics_by_url {
// Convert diagnostics to LSP format
let lsp_diagnostics = file_diagnostics
.into_iter()
.filter_map(|diagnostic| {
Some(
to_lsp_diagnostic(
db,
&diagnostic,
session_encoding,
client_capabilities,
global_settings,
)?
.1,
)
})
.collect::<Vec<_>>();
client.send_notification::<PublishDiagnostics>(PublishDiagnosticsParams {
uri: url,
diagnostics: lsp_diagnostics,
version: None,
});
}
}
pub(super) fn compute_diagnostics(
db: &ProjectDatabase,
document: &DocumentHandle,
encoding: PositionEncoding,
) -> Option<Diagnostics> {
let Some(file) = document.notebook_or_file(db) else {
tracing::info!(
"No file found for snapshot for `{}`",
document.notebook_or_file_path()
);
return None;
};
let diagnostics = db.check_file(file);
Some(Diagnostics {
items: diagnostics,
encoding,
file_or_notebook: file,
})
}
/// Converts the tool specific [`Diagnostic`][ruff_db::diagnostic::Diagnostic] to an LSP
/// [`Diagnostic`].
pub(super) fn to_lsp_diagnostic(
db: &dyn Db,
diagnostic: &ruff_db::diagnostic::Diagnostic,
encoding: PositionEncoding,
client_capabilities: ResolvedClientCapabilities,
global_settings: &GlobalSettings,
) -> Option<(Option<lsp_types::Url>, Diagnostic)> {
if diagnostic.is_invalid_syntax() && !global_settings.show_syntax_errors() {
return None;
}
let supports_related_information =
client_capabilities.supports_diagnostic_related_information();
let location = diagnostic.primary_span().and_then(|span| {
let file = span.expect_ty_file();
span.range()?
.to_lsp_range(db, file, encoding)
.unwrap_or_default()
.to_location()
});
let (range, url) = match location {
Some(location) => (location.range, Some(location.uri)),
None => (lsp_types::Range::default(), None),
};
let severity = match diagnostic.severity() {
Severity::Info => DiagnosticSeverity::INFORMATION,
Severity::Warning => DiagnosticSeverity::WARNING,
Severity::Error | Severity::Fatal => DiagnosticSeverity::ERROR,
};
let tags = diagnostic
.primary_tags()
.map(|tags| {
tags.iter()
.map(|tag| match tag {
ruff_db::diagnostic::DiagnosticTag::Unnecessary => DiagnosticTag::UNNECESSARY,
ruff_db::diagnostic::DiagnosticTag::Deprecated => DiagnosticTag::DEPRECATED,
})
.collect::<Vec<DiagnosticTag>>()
})
.filter(|mapped_tags| !mapped_tags.is_empty());
let code_description = diagnostic.documentation_url().and_then(|url| {
let href = Url::parse(url).ok()?;
Some(CodeDescription { href })
});
let related_information =
if supports_related_information {
let mut related_information = Vec::new();
related_information.extend(diagnostic.secondary_annotations().filter_map(
|annotation| annotation_to_related_information(db, annotation, encoding),
));
for sub_diagnostic in diagnostic.sub_diagnostics() {
related_information.extend(sub_diagnostic_to_related_information(
db,
sub_diagnostic,
encoding,
));
related_information.extend(
sub_diagnostic
.annotations()
.iter()
.filter(|annotation| !annotation.is_primary())
.filter_map(|annotation| {
annotation_to_related_information(db, annotation, encoding)
}),
);
}
Some(related_information)
} else {
None
};
let data = DiagnosticData::try_from_diagnostic(db, diagnostic, encoding);
Some((
url,
Diagnostic {
range,
severity: Some(severity),
tags,
code: Some(NumberOrString::String(diagnostic.id().to_string())),
code_description,
source: Some(DIAGNOSTIC_NAME.into()),
message: if supports_related_information {
// Show both the primary and annotation messages if available,
// because we don't create a related information for the primary message.
if let Some(annotation_message) = diagnostic
.primary_annotation()
.and_then(|annotation| annotation.get_message())
{
format!("{}: {annotation_message}", diagnostic.primary_message())
} else {
diagnostic.primary_message().to_string()
}
} else {
diagnostic.concise_message().to_string()
},
related_information,
data: serde_json::to_value(data).ok(),
},
))
}
/// Converts an [`Annotation`] to a [`DiagnosticRelatedInformation`].
fn annotation_to_related_information(
db: &dyn Db,
annotation: &Annotation,
encoding: PositionEncoding,
) -> Option<DiagnosticRelatedInformation> {
let span = annotation.get_span();
let annotation_message = annotation.get_message()?;
let range = FileRange::try_from(span).ok()?;
let location = range.to_lsp_range(db, encoding)?.into_location()?;
Some(DiagnosticRelatedInformation {
location,
message: annotation_message.to_string(),
})
}
/// Converts a [`SubDiagnostic`] to a [`DiagnosticRelatedInformation`].
fn sub_diagnostic_to_related_information(
db: &dyn Db,
diagnostic: &SubDiagnostic,
encoding: PositionEncoding,
) -> Option<DiagnosticRelatedInformation> {
let primary_annotation = diagnostic.primary_annotation()?;
let span = primary_annotation.get_span();
let range = FileRange::try_from(span).ok()?;
let location = range.to_lsp_range(db, encoding)?.into_location()?;
Some(DiagnosticRelatedInformation {
location,
message: diagnostic.concise_message().to_string(),
})
}
#[derive(Serialize, Deserialize)]
pub(crate) struct DiagnosticData {
pub(crate) fix_title: String,
pub(crate) edits: HashMap<Url, Vec<lsp_types::TextEdit>>,
}
impl DiagnosticData {
fn try_from_diagnostic(
db: &dyn Db,
diagnostic: &ruff_db::diagnostic::Diagnostic,
encoding: PositionEncoding,
) -> Option<Self> {
let fix = diagnostic
.fix()
.filter(|fix| fix.applies(Applicability::Unsafe))?;
let primary_span = diagnostic.primary_span()?;
let file = primary_span.expect_ty_file();
let mut lsp_edits: HashMap<Url, Vec<lsp_types::TextEdit>> = HashMap::new();
for edit in fix.edits() {
let location = edit
.range()
.to_lsp_range(db, file, encoding)?
.to_location()?;
lsp_edits
.entry(location.uri)
.or_default()
.push(lsp_types::TextEdit {
range: location.range,
new_text: edit.content().unwrap_or_default().to_string(),
});
}
Some(Self {
fix_title: diagnostic
.first_help_text()
.map(ToString::to_string)
.unwrap_or_else(|| format!("Fix {}", diagnostic.id())),
edits: lsp_edits,
})
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/semantic_tokens.rs | crates/ty_server/src/server/api/semantic_tokens.rs | use lsp_types::SemanticToken;
use ruff_db::source::{line_index, source_text};
use ruff_source_file::OneIndexed;
use ruff_text_size::{Ranged, TextRange};
use ty_ide::{SemanticTokenModifier, SemanticTokenType, semantic_tokens};
use ty_project::ProjectDatabase;
use crate::document::{PositionEncoding, ToRangeExt};
/// Common logic for generating semantic tokens, either for full document or a specific range.
/// If no range is provided, the entire file is processed.
pub(crate) fn generate_semantic_tokens(
db: &ProjectDatabase,
file: ruff_db::files::File,
range: Option<TextRange>,
encoding: PositionEncoding,
multiline_token_support: bool,
) -> Vec<SemanticToken> {
let source = source_text(db, file);
let line_index = line_index(db, file);
let semantic_token_data = semantic_tokens(db, file, range);
let mut encoder = Encoder {
tokens: Vec::with_capacity(semantic_token_data.len()),
prev_line: 0,
prev_start: 0,
};
for token in &*semantic_token_data {
let Some(lsp_range) = token
.range()
.to_lsp_range(db, file, encoding)
.map(|lsp_range| lsp_range.local_range())
else {
continue;
};
if lsp_range.start.line == lsp_range.end.line {
let len = lsp_range.end.character - lsp_range.start.character;
encoder.push_token_at(lsp_range.start, len, token.token_type, token.modifiers);
} else if multiline_token_support {
// If the client supports multiline-tokens,
// compute the length of the entire range.
let mut len = 0;
for line in lsp_range.start.line..lsp_range.end.line {
let line_len = line_index.line_len(
OneIndexed::from_zero_indexed(line as usize),
&source,
encoding.into(),
);
len += u32::try_from(line_len).unwrap();
}
// Subtract the first line because we added the length from the beginning.
len -= lsp_range.start.character;
// We didn't compute the length of the last line, add it now.
len += lsp_range.end.character;
encoder.push_token_at(lsp_range.start, len, token.token_type, token.modifiers);
} else {
// Multiline token but the client only supports single line tokens
// Push a token for each line.
for line in lsp_range.start.line..=lsp_range.end.line {
let start_character = if line == lsp_range.start.line {
lsp_range.start.character
} else {
0
};
let start = lsp_types::Position {
line,
character: start_character,
};
let end = if line == lsp_range.end.line {
lsp_range.end.character
} else {
let line_len = line_index.line_len(
OneIndexed::from_zero_indexed(line as usize),
&source,
encoding.into(),
);
u32::try_from(line_len).unwrap()
};
let len = end - start.character;
encoder.push_token_at(start, len, token.token_type, token.modifiers);
}
}
}
encoder.tokens
}
struct Encoder {
tokens: Vec<SemanticToken>,
prev_line: u32,
prev_start: u32,
}
impl Encoder {
fn push_token_at(
&mut self,
start: lsp_types::Position,
length: u32,
ty: SemanticTokenType,
modifiers: SemanticTokenModifier,
) {
// LSP semantic tokens are encoded as deltas
let delta_line = start.line - self.prev_line;
let delta_start = if delta_line == 0 {
start.character - self.prev_start
} else {
start.character
};
let token_type = ty as u32;
let token_modifiers = modifiers.bits();
self.tokens.push(SemanticToken {
delta_line,
delta_start,
length,
token_type,
token_modifiers_bitset: token_modifiers,
});
self.prev_line = start.line;
self.prev_start = start.character;
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/symbols.rs | crates/ty_server/src/server/api/symbols.rs | //! Utility functions common to language server request handlers
//! that return symbol information.
use lsp_types::{SymbolInformation, SymbolKind};
use ty_ide::SymbolInfo;
use crate::Db;
use crate::document::{PositionEncoding, ToRangeExt};
/// Convert `ty_ide` `SymbolKind` to LSP `SymbolKind`
pub(crate) fn convert_symbol_kind(kind: ty_ide::SymbolKind) -> SymbolKind {
match kind {
ty_ide::SymbolKind::Module => SymbolKind::MODULE,
ty_ide::SymbolKind::Class => SymbolKind::CLASS,
ty_ide::SymbolKind::Method => SymbolKind::METHOD,
ty_ide::SymbolKind::Function => SymbolKind::FUNCTION,
ty_ide::SymbolKind::Variable => SymbolKind::VARIABLE,
ty_ide::SymbolKind::Constant => SymbolKind::CONSTANT,
ty_ide::SymbolKind::Property => SymbolKind::PROPERTY,
ty_ide::SymbolKind::Field => SymbolKind::FIELD,
ty_ide::SymbolKind::Constructor => SymbolKind::CONSTRUCTOR,
ty_ide::SymbolKind::Parameter => SymbolKind::VARIABLE,
ty_ide::SymbolKind::TypeParameter => SymbolKind::TYPE_PARAMETER,
ty_ide::SymbolKind::Import => SymbolKind::MODULE,
}
}
/// Convert a `ty_ide` `SymbolInfo` to LSP `SymbolInformation`
///
/// Returns `None` if the symbol's range cannot be converted to a location
/// (e.g., if the file cannot be converted to a URL).
pub(crate) fn convert_to_lsp_symbol_information(
db: &dyn Db,
file: ruff_db::files::File,
symbol: SymbolInfo<'_>,
encoding: PositionEncoding,
) -> Option<SymbolInformation> {
let symbol_kind = convert_symbol_kind(symbol.kind);
let location = symbol
.full_range
.to_lsp_range(db, file, encoding)?
.to_location()?;
Some(SymbolInformation {
name: symbol.name.into_owned(),
kind: symbol_kind,
tags: None,
#[allow(deprecated)]
deprecated: None,
location,
container_name: None,
})
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/traits.rs | crates/ty_server/src/server/api/traits.rs | //! Traits for handling requests and notifications from the LSP client.
//!
//! This module defines the trait abstractions used by the language server to handle incoming
//! requests and notifications from clients. It provides a type-safe way to implement LSP handlers
//! with different execution models (synchronous or asynchronous) and automatic retry capabilities.
//!
//! All request and notification handlers must implement the base traits [`RequestHandler`] and
//! [`NotificationHandler`], respectively, which associate them with specific LSP request or
//! notification types. These base traits are then extended by more specific traits that define
//! the execution model of the handler.
//!
//! The [`SyncRequestHandler`] and [`SyncNotificationHandler`] traits are for handlers that
//! executes synchronously on the main loop, providing mutable access to the [`Session`] that
//! contains the current state of the server. This is useful for handlers that need to modify
//! the server state such as when the content of a file changes.
//!
//! The [`BackgroundDocumentRequestHandler`] and [`BackgroundDocumentNotificationHandler`] traits
//! are for handlers that operate on a single document and can be executed on a background thread.
//! These handlers will have access to a snapshot of the document at the time of the request or
//! notification, allowing them to perform operations without blocking the main loop. There is also
//! the [`BackgroundRequestHandler`] trait for handlers that operate on the entire session, which
//! includes all the workspaces, instead of a single document and can also be executed on a
//! background thread like fetching the workspace diagnostics.
//!
//! The [`RetriableRequestHandler`] trait is a marker trait for handlers that can be retried if the
//! Salsa database is modified during execution.
//!
//! The [`SyncNotificationHandler`] is the most common trait that would be used because most
//! notifications are specific to a single document and require updating the server state.
//! Similarly, the [`BackgroundDocumentRequestHandler`] is the most common request handler that
//! would be used as most requests are document-specific and can be executed in the background.
//!
//! See the `./requests` and `./notifications` directories for concrete implementations of these
//! traits in action.
use crate::session::client::Client;
use crate::session::{DocumentSnapshot, Session, SessionSnapshot};
use lsp_server::RequestId;
use std::borrow::Cow;
use lsp_types::Url;
use lsp_types::notification::Notification;
use lsp_types::request::Request;
use ty_project::ProjectDatabase;
/// A supertrait for any server request handler.
pub(super) trait RequestHandler {
type RequestType: Request;
const METHOD: &'static str = <<Self as RequestHandler>::RequestType>::METHOD;
}
/// A request handler that needs mutable access to the session.
///
/// This will block the main message receiver loop, meaning that no incoming requests or
/// notifications will be handled while `run` is executing. Try to avoid doing any I/O or
/// long-running computations.
pub(super) trait SyncRequestHandler: RequestHandler {
fn run(
session: &mut Session,
client: &Client,
params: <<Self as RequestHandler>::RequestType as Request>::Params,
) -> super::Result<<<Self as RequestHandler>::RequestType as Request>::Result>;
}
pub(super) trait RetriableRequestHandler: RequestHandler {
/// Whether this request can be cancelled if the Salsa database is modified.
const RETRY_ON_CANCELLATION: bool = false;
/// The error to return if the request was cancelled due to a modification to the Salsa
/// database.
///
/// By default, this returns a [`ContentModified`] error to indicate that the content of a
/// document has changed since the request was made.
///
/// [`ContentModified`]: lsp_server::ErrorCode::ContentModified
fn salsa_cancellation_error() -> lsp_server::ResponseError {
lsp_server::ResponseError {
code: lsp_server::ErrorCode::ContentModified as i32,
message: "content modified".to_string(),
data: None,
}
}
}
/// A request handler that can be run on a background thread.
///
/// This handler is specific to requests that operate on a single document.
pub(super) trait BackgroundDocumentRequestHandler: RetriableRequestHandler {
/// Returns the URL of the document that this request handler operates on.
fn document_url(
params: &<<Self as RequestHandler>::RequestType as Request>::Params,
) -> Cow<'_, Url>;
/// Processes the request parameters and returns the LSP request result.
///
/// This is the main method that handlers implement. It takes the request parameters
/// from the client and computes the appropriate response data for the LSP request.
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
client: &Client,
params: <<Self as RequestHandler>::RequestType as Request>::Params,
) -> super::Result<<<Self as RequestHandler>::RequestType as Request>::Result>;
/// Handles the entire request lifecycle and sends the response to the client.
///
/// It allows handlers to customize how the server sends the response to the client.
fn handle_request(
id: &RequestId,
db: &ProjectDatabase,
snapshot: DocumentSnapshot,
client: &Client,
params: <<Self as RequestHandler>::RequestType as Request>::Params,
) {
let result = Self::run_with_snapshot(db, &snapshot, client, params);
if let Err(err) = &result {
tracing::error!("An error occurred with request ID {id}: {err}");
client.show_error_message("ty encountered a problem. Check the logs for more details.");
}
client.respond(id, result);
}
}
/// A request handler that can be run on a background thread.
///
/// Unlike [`BackgroundDocumentRequestHandler`], this handler operates on the entire session,
/// which includes all the workspaces, without being tied to a specific document. It is useful for
/// operations that require access to the entire session state, such as fetching workspace
/// diagnostics.
pub(super) trait BackgroundRequestHandler: RetriableRequestHandler {
/// Processes the request parameters and returns the LSP request result.
///
/// This is the main method that handlers implement. It takes the request parameters
/// from the client and computes the appropriate response data for the LSP request.
fn run(
snapshot: &SessionSnapshot,
client: &Client,
params: <<Self as RequestHandler>::RequestType as Request>::Params,
) -> super::Result<<<Self as RequestHandler>::RequestType as Request>::Result>;
/// Handles the request lifecycle and sends the response to the client.
///
/// It allows handlers to customize how the server sends the response to the client.
fn handle_request(
id: &RequestId,
snapshot: SessionSnapshot,
client: &Client,
params: <<Self as RequestHandler>::RequestType as Request>::Params,
) {
let result = Self::run(&snapshot, client, params);
if let Err(err) = &result {
tracing::error!("An error occurred with request ID {id}: {err}");
client.show_error_message("ty encountered a problem. Check the logs for more details.");
}
client.respond(id, result);
}
}
/// A supertrait for any server notification handler.
pub(super) trait NotificationHandler {
type NotificationType: Notification;
const METHOD: &'static str = <<Self as NotificationHandler>::NotificationType>::METHOD;
}
/// A notification handler that needs mutable access to the session.
///
/// This will block the main message receiver loop, meaning that no incoming requests or
/// notifications will be handled while `run` is executing. Try to avoid doing any I/O or
/// long-running computations.
pub(super) trait SyncNotificationHandler: NotificationHandler {
fn run(
session: &mut Session,
client: &Client,
params: <<Self as NotificationHandler>::NotificationType as Notification>::Params,
) -> super::Result<()>;
}
/// A notification handler that can be run on a background thread.
pub(super) trait BackgroundDocumentNotificationHandler: NotificationHandler {
/// Returns the URL of the document that this notification handler operates on.
fn document_url(
params: &<<Self as NotificationHandler>::NotificationType as Notification>::Params,
) -> Cow<'_, Url>;
fn run_with_snapshot(
snapshot: DocumentSnapshot,
client: &Client,
params: <<Self as NotificationHandler>::NotificationType as Notification>::Params,
) -> super::Result<()>;
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/notifications.rs | crates/ty_server/src/server/api/notifications.rs | mod cancel;
mod did_change;
mod did_change_notebook;
mod did_change_watched_files;
mod did_close;
mod did_close_notebook;
mod did_open;
mod did_open_notebook;
pub(super) use cancel::CancelNotificationHandler;
pub(super) use did_change::DidChangeTextDocumentHandler;
pub(super) use did_change_notebook::DidChangeNotebookHandler;
pub(super) use did_change_watched_files::DidChangeWatchedFiles;
pub(super) use did_close::DidCloseTextDocumentHandler;
pub(super) use did_close_notebook::DidCloseNotebookHandler;
pub(super) use did_open::DidOpenTextDocumentHandler;
pub(super) use did_open_notebook::DidOpenNotebookHandler;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests.rs | crates/ty_server/src/server/api/requests.rs | mod code_action;
mod completion;
mod diagnostic;
mod doc_highlights;
mod document_symbols;
mod execute_command;
mod goto_declaration;
mod goto_definition;
mod goto_type_definition;
mod hover;
mod inlay_hints;
mod prepare_rename;
mod references;
mod rename;
mod selection_range;
mod semantic_tokens;
mod semantic_tokens_range;
mod shutdown;
mod signature_help;
mod workspace_diagnostic;
mod workspace_symbols;
pub(super) use code_action::CodeActionRequestHandler;
pub(super) use completion::CompletionRequestHandler;
pub(super) use diagnostic::DocumentDiagnosticRequestHandler;
pub(super) use doc_highlights::DocumentHighlightRequestHandler;
pub(super) use document_symbols::DocumentSymbolRequestHandler;
pub(super) use execute_command::ExecuteCommand;
pub(super) use goto_declaration::GotoDeclarationRequestHandler;
pub(super) use goto_definition::GotoDefinitionRequestHandler;
pub(super) use goto_type_definition::GotoTypeDefinitionRequestHandler;
pub(super) use hover::HoverRequestHandler;
pub(super) use inlay_hints::InlayHintRequestHandler;
pub(super) use prepare_rename::PrepareRenameRequestHandler;
pub(super) use references::ReferencesRequestHandler;
pub(super) use rename::RenameRequestHandler;
pub(super) use selection_range::SelectionRangeRequestHandler;
pub(super) use semantic_tokens::SemanticTokensRequestHandler;
pub(super) use semantic_tokens_range::SemanticTokensRangeRequestHandler;
pub(super) use shutdown::ShutdownHandler;
pub(super) use signature_help::SignatureHelpRequestHandler;
pub(super) use workspace_diagnostic::WorkspaceDiagnosticRequestHandler;
pub(super) use workspace_symbols::WorkspaceSymbolRequestHandler;
pub use workspace_diagnostic::{PartialWorkspaceProgress, PartialWorkspaceProgressParams};
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/doc_highlights.rs | crates/ty_server/src/server/api/requests/doc_highlights.rs | use std::borrow::Cow;
use lsp_types::request::DocumentHighlightRequest;
use lsp_types::{DocumentHighlight, DocumentHighlightKind, DocumentHighlightParams, Url};
use ty_ide::{ReferenceKind, document_highlights};
use ty_project::ProjectDatabase;
use crate::document::{PositionExt, ToRangeExt};
use crate::server::api::traits::{
BackgroundDocumentRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
pub(crate) struct DocumentHighlightRequestHandler;
impl RequestHandler for DocumentHighlightRequestHandler {
type RequestType = DocumentHighlightRequest;
}
impl BackgroundDocumentRequestHandler for DocumentHighlightRequestHandler {
fn document_url(params: &DocumentHighlightParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document_position_params.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
params: DocumentHighlightParams,
) -> crate::server::Result<Option<Vec<DocumentHighlight>>> {
if snapshot
.workspace_settings()
.is_language_services_disabled()
{
return Ok(None);
}
let Some(file) = snapshot.to_notebook_or_file(db) else {
return Ok(None);
};
let Some(offset) = params.text_document_position_params.position.to_text_size(
db,
file,
snapshot.url(),
snapshot.encoding(),
) else {
return Ok(None);
};
let Some(highlights_result) = document_highlights(db, file, offset) else {
return Ok(None);
};
let highlights: Vec<_> = highlights_result
.into_iter()
.filter_map(|target| {
let range = target
.range()
.to_lsp_range(db, file, snapshot.encoding())?
.local_range();
let kind = match target.kind() {
ReferenceKind::Read => Some(DocumentHighlightKind::READ),
ReferenceKind::Write => Some(DocumentHighlightKind::WRITE),
ReferenceKind::Other => Some(DocumentHighlightKind::TEXT),
};
Some(DocumentHighlight { range, kind })
})
.collect();
Ok(Some(highlights))
}
}
impl RetriableRequestHandler for DocumentHighlightRequestHandler {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/semantic_tokens_range.rs | crates/ty_server/src/server/api/requests/semantic_tokens_range.rs | use std::borrow::Cow;
use lsp_types::{SemanticTokens, SemanticTokensRangeParams, SemanticTokensRangeResult, Url};
use ty_project::ProjectDatabase;
use crate::document::RangeExt;
use crate::server::api::semantic_tokens::generate_semantic_tokens;
use crate::server::api::traits::{
BackgroundDocumentRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
pub(crate) struct SemanticTokensRangeRequestHandler;
impl RequestHandler for SemanticTokensRangeRequestHandler {
type RequestType = lsp_types::request::SemanticTokensRangeRequest;
}
impl BackgroundDocumentRequestHandler for SemanticTokensRangeRequestHandler {
fn document_url(params: &SemanticTokensRangeParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
params: SemanticTokensRangeParams,
) -> crate::server::Result<Option<SemanticTokensRangeResult>> {
if snapshot
.workspace_settings()
.is_language_services_disabled()
{
return Ok(None);
}
let Some(file) = snapshot.to_notebook_or_file(db) else {
return Ok(None);
};
// Convert LSP range to text offsets
let Some(requested_range) =
params
.range
.to_text_range(db, file, snapshot.url(), snapshot.encoding())
else {
return Ok(None);
};
let lsp_tokens = generate_semantic_tokens(
db,
file,
Some(requested_range),
snapshot.encoding(),
snapshot
.resolved_client_capabilities()
.supports_multiline_semantic_tokens(),
);
Ok(Some(SemanticTokensRangeResult::Tokens(SemanticTokens {
result_id: None,
data: lsp_tokens,
})))
}
}
impl RetriableRequestHandler for SemanticTokensRangeRequestHandler {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/shutdown.rs | crates/ty_server/src/server/api/requests/shutdown.rs | use crate::Session;
use crate::server::api::traits::{RequestHandler, SyncRequestHandler};
use crate::session::client::Client;
use lsp_types::{WorkspaceDiagnosticReport, WorkspaceDiagnosticReportResult};
use salsa::Database;
pub(crate) struct ShutdownHandler;
impl RequestHandler for ShutdownHandler {
type RequestType = lsp_types::request::Shutdown;
}
impl SyncRequestHandler for ShutdownHandler {
fn run(session: &mut Session, client: &Client, _params: ()) -> crate::server::Result<()> {
tracing::debug!("Received shutdown request, waiting for exit notification");
// Respond to any pending workspace diagnostic requests
if let Some(suspended_workspace_request) =
session.take_suspended_workspace_diagnostic_request()
{
client.respond(
&suspended_workspace_request.id,
Ok(WorkspaceDiagnosticReportResult::Report(
WorkspaceDiagnosticReport::default(),
)),
);
}
session.set_shutdown_requested(true);
// Trigger cancellation for every db to cancel any compute intensive background tasks
// (e.g. workspace diagnostics or workspace symbols).
for db in session.projects_mut() {
db.trigger_cancellation();
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/goto_declaration.rs | crates/ty_server/src/server/api/requests/goto_declaration.rs | use std::borrow::Cow;
use lsp_types::request::{GotoDeclaration, GotoDeclarationParams};
use lsp_types::{GotoDefinitionResponse, Url};
use ty_ide::goto_declaration;
use ty_project::ProjectDatabase;
use crate::document::{PositionExt, ToLink};
use crate::server::api::traits::{
BackgroundDocumentRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
pub(crate) struct GotoDeclarationRequestHandler;
impl RequestHandler for GotoDeclarationRequestHandler {
type RequestType = GotoDeclaration;
}
impl BackgroundDocumentRequestHandler for GotoDeclarationRequestHandler {
fn document_url(params: &GotoDeclarationParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document_position_params.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
params: GotoDeclarationParams,
) -> crate::server::Result<Option<GotoDefinitionResponse>> {
if snapshot
.workspace_settings()
.is_language_services_disabled()
{
return Ok(None);
}
let Some(file) = snapshot.to_notebook_or_file(db) else {
return Ok(None);
};
let Some(offset) = params.text_document_position_params.position.to_text_size(
db,
file,
snapshot.url(),
snapshot.encoding(),
) else {
return Ok(None);
};
let Some(ranged) = goto_declaration(db, file, offset) else {
return Ok(None);
};
if snapshot
.resolved_client_capabilities()
.supports_declaration_link()
{
let src = Some(ranged.range);
let links: Vec<_> = ranged
.into_iter()
.filter_map(|target| target.to_link(db, src, snapshot.encoding()))
.collect();
Ok(Some(GotoDefinitionResponse::Link(links)))
} else {
let locations: Vec<_> = ranged
.into_iter()
.filter_map(|target| target.to_location(db, snapshot.encoding()))
.collect();
Ok(Some(GotoDefinitionResponse::Array(locations)))
}
}
}
impl RetriableRequestHandler for GotoDeclarationRequestHandler {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/completion.rs | crates/ty_server/src/server/api/requests/completion.rs | use std::borrow::Cow;
use std::time::Instant;
use lsp_types::request::Completion;
use lsp_types::{
CompletionItem, CompletionItemKind, CompletionItemLabelDetails, CompletionList,
CompletionParams, CompletionResponse, Documentation, TextEdit, Url,
};
use ruff_source_file::OneIndexed;
use ruff_text_size::Ranged;
use ty_ide::{CompletionKind, completion};
use ty_project::ProjectDatabase;
use crate::document::{PositionExt, ToRangeExt};
use crate::server::api::traits::{
BackgroundDocumentRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
pub(crate) struct CompletionRequestHandler;
impl RequestHandler for CompletionRequestHandler {
type RequestType = Completion;
}
impl BackgroundDocumentRequestHandler for CompletionRequestHandler {
fn document_url(params: &CompletionParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document_position.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
params: CompletionParams,
) -> crate::server::Result<Option<CompletionResponse>> {
let start = Instant::now();
if snapshot
.workspace_settings()
.is_language_services_disabled()
{
return Ok(None);
}
let Some(file) = snapshot.to_notebook_or_file(db) else {
return Ok(None);
};
let Some(offset) = params.text_document_position.position.to_text_size(
db,
file,
snapshot.url(),
snapshot.encoding(),
) else {
return Ok(None);
};
let settings = snapshot.workspace_settings().completions();
let completions = completion(db, settings, file, offset);
if completions.is_empty() {
return Ok(None);
}
// Safety: we just checked that completions is not empty.
let max_index_len = OneIndexed::new(completions.len()).unwrap().digits().get();
let items: Vec<CompletionItem> = completions
.into_iter()
.enumerate()
.map(|(i, comp)| {
let kind = comp.kind(db).map(ty_kind_to_lsp_kind);
let type_display = comp.ty.map(|ty| ty.display(db).to_string());
let import_edit = comp.import.as_ref().and_then(|edit| {
let range = edit
.range()
.to_lsp_range(db, file, snapshot.encoding())?
.local_range();
Some(TextEdit {
range,
new_text: edit.content().map(ToString::to_string).unwrap_or_default(),
})
});
let name = comp.insert.as_deref().unwrap_or(&comp.name).to_string();
let import_suffix = comp
.module_name
.and_then(|name| import_edit.is_some().then(|| format!(" (import {name})")));
let (label, label_details) = if snapshot
.resolved_client_capabilities()
.supports_completion_item_label_details()
{
let label_details = CompletionItemLabelDetails {
detail: import_suffix,
description: type_display.clone(),
};
(name, Some(label_details))
} else {
let label = import_suffix
.map(|suffix| format!("{name}{suffix}"))
.unwrap_or_else(|| name);
(label, None)
};
let documentation = comp.documentation.map(|docstring| {
let (kind, value) = if snapshot
.resolved_client_capabilities()
.prefers_markdown_in_completion()
{
(lsp_types::MarkupKind::Markdown, docstring.render_markdown())
} else {
(
lsp_types::MarkupKind::PlainText,
docstring.render_plaintext(),
)
};
Documentation::MarkupContent(lsp_types::MarkupContent { kind, value })
});
CompletionItem {
label,
kind,
sort_text: Some(format!("{i:-max_index_len$}")),
detail: type_display,
label_details,
insert_text: comp.insert.map(String::from),
additional_text_edits: import_edit.map(|edit| vec![edit]),
documentation,
..Default::default()
}
})
.collect();
let len = items.len();
let response = CompletionResponse::List(CompletionList {
is_incomplete: true,
items,
});
tracing::debug!(
"Completions request returned {len} suggestions in {elapsed:?}",
elapsed = Instant::now().duration_since(start)
);
Ok(Some(response))
}
}
impl RetriableRequestHandler for CompletionRequestHandler {
const RETRY_ON_CANCELLATION: bool = true;
}
fn ty_kind_to_lsp_kind(kind: CompletionKind) -> CompletionItemKind {
// Gimme my dang globs in tight scopes!
#[allow(clippy::enum_glob_use)]
use self::CompletionKind::*;
// ref https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#completionItemKind
match kind {
Text => CompletionItemKind::TEXT,
Method => CompletionItemKind::METHOD,
Function => CompletionItemKind::FUNCTION,
Constructor => CompletionItemKind::CONSTRUCTOR,
Field => CompletionItemKind::FIELD,
Variable => CompletionItemKind::VARIABLE,
Class => CompletionItemKind::CLASS,
Interface => CompletionItemKind::INTERFACE,
Module => CompletionItemKind::MODULE,
Property => CompletionItemKind::PROPERTY,
Unit => CompletionItemKind::UNIT,
Value => CompletionItemKind::VALUE,
Enum => CompletionItemKind::ENUM,
Keyword => CompletionItemKind::KEYWORD,
Snippet => CompletionItemKind::SNIPPET,
Color => CompletionItemKind::COLOR,
File => CompletionItemKind::FILE,
Reference => CompletionItemKind::REFERENCE,
Folder => CompletionItemKind::FOLDER,
EnumMember => CompletionItemKind::ENUM_MEMBER,
Constant => CompletionItemKind::CONSTANT,
Struct => CompletionItemKind::STRUCT,
Event => CompletionItemKind::EVENT,
Operator => CompletionItemKind::OPERATOR,
TypeParameter => CompletionItemKind::TYPE_PARAMETER,
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/rename.rs | crates/ty_server/src/server/api/requests/rename.rs | use std::borrow::Cow;
use std::collections::HashMap;
use lsp_types::request::Rename;
use lsp_types::{RenameParams, TextEdit, Url, WorkspaceEdit};
use ty_ide::rename;
use ty_project::ProjectDatabase;
use crate::document::{PositionExt, ToLink};
use crate::server::api::traits::{
BackgroundDocumentRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
pub(crate) struct RenameRequestHandler;
impl RequestHandler for RenameRequestHandler {
type RequestType = Rename;
}
impl BackgroundDocumentRequestHandler for RenameRequestHandler {
fn document_url(params: &RenameParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document_position.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
params: RenameParams,
) -> crate::server::Result<Option<WorkspaceEdit>> {
if snapshot
.workspace_settings()
.is_language_services_disabled()
{
return Ok(None);
}
let Some(file) = snapshot.to_notebook_or_file(db) else {
return Ok(None);
};
let Some(offset) = params.text_document_position.position.to_text_size(
db,
file,
snapshot.url(),
snapshot.encoding(),
) else {
return Ok(None);
};
let Some(rename_results) = rename(db, file, offset, ¶ms.new_name) else {
return Ok(None);
};
// Group text edits by file
let mut changes: HashMap<Url, Vec<TextEdit>> = HashMap::new();
for reference in rename_results {
if let Some(location) = reference.to_location(db, snapshot.encoding()) {
let edit = TextEdit {
range: location.range,
new_text: params.new_name.clone(),
};
changes.entry(location.uri).or_default().push(edit);
}
}
if changes.is_empty() {
return Ok(None);
}
Ok(Some(WorkspaceEdit {
changes: Some(changes),
document_changes: None,
change_annotations: None,
}))
}
}
impl RetriableRequestHandler for RenameRequestHandler {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/semantic_tokens.rs | crates/ty_server/src/server/api/requests/semantic_tokens.rs | use std::borrow::Cow;
use lsp_types::{SemanticTokens, SemanticTokensParams, SemanticTokensResult, Url};
use ruff_db::source::source_text;
use ty_project::ProjectDatabase;
use crate::db::Db;
use crate::server::api::semantic_tokens::generate_semantic_tokens;
use crate::server::api::traits::{
BackgroundDocumentRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
pub(crate) struct SemanticTokensRequestHandler;
impl RequestHandler for SemanticTokensRequestHandler {
type RequestType = lsp_types::request::SemanticTokensFullRequest;
}
impl BackgroundDocumentRequestHandler for SemanticTokensRequestHandler {
fn document_url(params: &SemanticTokensParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
_params: SemanticTokensParams,
) -> crate::server::Result<Option<SemanticTokensResult>> {
if snapshot
.workspace_settings()
.is_language_services_disabled()
{
return Ok(None);
}
let Some(file) = snapshot.to_notebook_or_file(db) else {
return Ok(None);
};
// If this document is a notebook cell, limit the highlighting range
// to the lines of this cell (instead of highlighting the entire notebook).
// Not only avoids this unnecessary work, this is also required
// because all ranges in the response must be within this **this document**.
let mut cell_range = None;
if snapshot.document().is_cell()
&& let Some(notebook_document) = db.notebook_document(file)
&& let Some(notebook) = source_text(db, file).as_notebook()
{
let cell_index = notebook_document.cell_index_by_uri(snapshot.url());
cell_range = cell_index.and_then(|index| notebook.cell_range(index));
}
let lsp_tokens = generate_semantic_tokens(
db,
file,
cell_range,
snapshot.encoding(),
snapshot
.resolved_client_capabilities()
.supports_multiline_semantic_tokens(),
);
Ok(Some(SemanticTokensResult::Tokens(SemanticTokens {
result_id: None,
data: lsp_tokens,
})))
}
}
impl RetriableRequestHandler for SemanticTokensRequestHandler {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/references.rs | crates/ty_server/src/server/api/requests/references.rs | use std::borrow::Cow;
use lsp_types::request::References;
use lsp_types::{Location, ReferenceParams, Url};
use ty_ide::find_references;
use ty_project::ProjectDatabase;
use crate::document::{PositionExt, ToLink};
use crate::server::api::traits::{
BackgroundDocumentRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
pub(crate) struct ReferencesRequestHandler;
impl RequestHandler for ReferencesRequestHandler {
type RequestType = References;
}
impl BackgroundDocumentRequestHandler for ReferencesRequestHandler {
fn document_url(params: &ReferenceParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document_position.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
params: ReferenceParams,
) -> crate::server::Result<Option<Vec<Location>>> {
if snapshot
.workspace_settings()
.is_language_services_disabled()
{
return Ok(None);
}
let Some(file) = snapshot.to_notebook_or_file(db) else {
return Ok(None);
};
let Some(offset) = params.text_document_position.position.to_text_size(
db,
file,
snapshot.url(),
snapshot.encoding(),
) else {
return Ok(None);
};
let include_declaration = params.context.include_declaration;
let Some(references_result) = find_references(db, file, offset, include_declaration) else {
return Ok(None);
};
let locations: Vec<_> = references_result
.into_iter()
.filter_map(|target| target.to_location(db, snapshot.encoding()))
.collect();
Ok(Some(locations))
}
}
impl RetriableRequestHandler for ReferencesRequestHandler {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/selection_range.rs | crates/ty_server/src/server/api/requests/selection_range.rs | use std::borrow::Cow;
use lsp_types::request::SelectionRangeRequest;
use lsp_types::{SelectionRange as LspSelectionRange, SelectionRangeParams, Url};
use ty_ide::selection_range;
use ty_project::ProjectDatabase;
use crate::document::{PositionExt, ToRangeExt};
use crate::server::api::traits::{
BackgroundDocumentRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
pub(crate) struct SelectionRangeRequestHandler;
impl RequestHandler for SelectionRangeRequestHandler {
type RequestType = SelectionRangeRequest;
}
impl BackgroundDocumentRequestHandler for SelectionRangeRequestHandler {
fn document_url(params: &SelectionRangeParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
params: SelectionRangeParams,
) -> crate::server::Result<Option<Vec<LspSelectionRange>>> {
if snapshot
.workspace_settings()
.is_language_services_disabled()
{
return Ok(None);
}
let Some(file) = snapshot.to_notebook_or_file(db) else {
return Ok(None);
};
let mut results = Vec::new();
for position in params.positions {
let Some(offset) = position.to_text_size(db, file, snapshot.url(), snapshot.encoding())
else {
continue;
};
let ranges = selection_range(db, file, offset);
if !ranges.is_empty() {
// Convert ranges to nested LSP SelectionRange structure
let mut lsp_range = None;
for &range in &ranges {
let Some(range) = range
.to_lsp_range(db, file, snapshot.encoding())
.map(|lsp_range| lsp_range.local_range())
else {
break;
};
lsp_range = Some(LspSelectionRange {
range,
parent: lsp_range.map(Box::new),
});
}
if let Some(range) = lsp_range {
results.push(range);
}
}
}
if results.is_empty() {
Ok(None)
} else {
Ok(Some(results))
}
}
}
impl RetriableRequestHandler for SelectionRangeRequestHandler {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/workspace_diagnostic.rs | crates/ty_server/src/server/api/requests/workspace_diagnostic.rs | use std::collections::BTreeMap;
use std::sync::Mutex;
use std::time::{Duration, Instant};
use lsp_server::RequestId;
use lsp_types::request::WorkspaceDiagnosticRequest;
use lsp_types::{
FullDocumentDiagnosticReport, PreviousResultId, ProgressToken,
UnchangedDocumentDiagnosticReport, Url, WorkspaceDiagnosticParams, WorkspaceDiagnosticReport,
WorkspaceDiagnosticReportPartialResult, WorkspaceDiagnosticReportResult,
WorkspaceDocumentDiagnosticReport, WorkspaceFullDocumentDiagnosticReport,
WorkspaceUnchangedDocumentDiagnosticReport, notification::Notification,
};
use ruff_db::diagnostic::Diagnostic;
use ruff_db::files::File;
use ruff_db::source::source_text;
use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize};
use ty_project::{ProgressReporter, ProjectDatabase};
use crate::PositionEncoding;
use crate::capabilities::ResolvedClientCapabilities;
use crate::document::DocumentKey;
use crate::server::api::diagnostics::{Diagnostics, to_lsp_diagnostic};
use crate::server::api::traits::{
BackgroundRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::server::lazy_work_done_progress::LazyWorkDoneProgress;
use crate::server::{Action, Result};
use crate::session::client::Client;
use crate::session::index::Index;
use crate::session::{GlobalSettings, SessionSnapshot, SuspendedWorkspaceDiagnosticRequest};
use crate::system::file_to_url;
/// Handler for [Workspace diagnostics](workspace-diagnostics)
///
/// Workspace diagnostics are special in many ways compared to other request handlers.
/// This is mostly due to the fact that computing them is expensive. Because of that,
/// the LSP supports multiple optimizations of which we all make use:
///
/// ## Partial results
///
/// Many clients support partial results. They allow a server
/// to send multiple responses (in the form of `$/progress` notifications) for
/// the same request. We use partial results to stream the results for
/// changed files. This has the obvious benefit is that users
/// don't need to wait for the entire check to complete before they see any diagnostics.
/// The other benefit of "chunking" the work also helps client to incrementally
/// update (and repaint) the diagnostics instead of all at once.
/// We did see lags in VS code for projects with 10k+ diagnostics before implementing
/// this improvement.
///
/// ## Result IDs
///
/// The server can compute a result id for every file which the client
/// sends back in the next pull or workspace diagnostic request. The way we use
/// the result id is that we compute a fingerprint of the file's diagnostics (a hash)
/// and compare it with the result id sent by the server. We know that
/// the diagnostics for a file are unchanged (the client still has the most recent review)
/// if the ids compare equal.
///
/// Result IDs are also useful to identify files for which ty no longer emits
/// any diagnostics. For example, file A contained a syntax error that has now been fixed
/// by the user. The client will send us a result id for file A but we won't match it with
/// any new diagnostics because all errors in the file were fixed. The fact that we can't
/// match up the result ID tells us that we need to clear the diagnostics on the client
/// side by sending an empty diagnostic report (report without any diagnostics). We'll set the
/// result id to `None` so that the client stops sending us a result id for this file.
///
/// Sending unchanged instead of the full diagnostics for files that haven't changed
/// helps reduce the data that's sent from the server to the client and it also enables long-polling
/// (see the next section).
///
/// ## Long polling
///
/// As of today (1st of August 2025), VS code's LSP client automatically schedules a
/// workspace diagnostic request every two seconds because it doesn't know *when* to pull
/// for new workspace diagnostics (it doesn't know what actions invalidate the diagnostics).
/// However, running the workspace diagnostics every two seconds is wasting a lot of CPU cycles (and battery life as a result)
/// if the user's only browsing the project (it requires ty to iterate over all files).
/// That's why we implement long polling (as recommended in the LSP) for workspace diagnostics.
///
/// The basic idea of long-polling is that the server doesn't respond if there are no diagnostics
/// or all diagnostics are unchanged. Instead, the server keeps the request open (it doesn't respond)
/// and only responses when the diagnostics change. This puts the server in full control of when
/// to recheck a workspace and a client can simply wait for the response to come in.
///
/// One challenge with long polling for ty's server architecture is that we can't just keep
/// the background thread running because holding on to the [`ProjectDatabase`] references
/// prevents notifications from acquiring the exclusive db lock (or the long polling background thread
/// panics if a notification tries to do so). What we do instead is that this request handler
/// doesn't send a response if there are no diagnostics or all are unchanged and it
/// sets a "[snapshot](SuspendedWorkspaceDiagnosticRequest)" of the workspace diagnostic request on the [`Session`].
/// The second part to this is in the notification request handling. ty retries the
/// suspended workspace diagnostic request (if any) after every notification if the notification
/// changed the [`Session`]'s state.
///
/// [workspace-diagnostics](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#workspace_diagnostic)
pub(crate) struct WorkspaceDiagnosticRequestHandler;
impl RequestHandler for WorkspaceDiagnosticRequestHandler {
type RequestType = WorkspaceDiagnosticRequest;
}
impl BackgroundRequestHandler for WorkspaceDiagnosticRequestHandler {
fn run(
snapshot: &SessionSnapshot,
client: &Client,
params: WorkspaceDiagnosticParams,
) -> Result<WorkspaceDiagnosticReportResult> {
if !snapshot.global_settings().diagnostic_mode().is_workspace() {
tracing::debug!("Workspace diagnostics is disabled; returning empty report");
return Ok(WorkspaceDiagnosticReportResult::Report(
WorkspaceDiagnosticReport { items: vec![] },
));
}
let writer = ResponseWriter::new(
params.partial_result_params.partial_result_token,
params.previous_result_ids,
snapshot,
client,
);
// Use the work done progress token from the client request, if provided
// Note: neither VS Code nor Zed currently support this,
// see https://github.com/microsoft/vscode-languageserver-node/issues/528
// That's why we fall back to server-initiated progress if no token is provided.
let work_done_progress = LazyWorkDoneProgress::new(
client,
params.work_done_progress_params.work_done_token,
"Checking",
snapshot.resolved_client_capabilities(),
);
let mut reporter = WorkspaceDiagnosticsProgressReporter::new(work_done_progress, writer);
for db in snapshot.projects() {
db.check_with_reporter(&mut reporter);
}
Ok(reporter.into_final_report())
}
fn handle_request(
id: &RequestId,
snapshot: SessionSnapshot,
client: &Client,
params: WorkspaceDiagnosticParams,
) {
let result = Self::run(&snapshot, client, params.clone());
// Test if this is a no-op result, in which case we should long-poll the request and
// only respond once some diagnostics have changed to get the latest result ids.
//
// Bulk response: This the simple case. Simply test if all diagnostics are unchanged (or empty)
// Streaming: This trickier but follows the same principle.
// * If the server sent any partial results, then `result` is a `Partial` result (in which
// case we shouldn't do any long polling because some diagnostics changed).
// * If this is a full report, then check if all items are unchanged (or empty), the same as for
// the non-streaming case.
if let Ok(WorkspaceDiagnosticReportResult::Report(full)) = &result {
let all_unchanged = full
.items
.iter()
.all(|item| matches!(item, WorkspaceDocumentDiagnosticReport::Unchanged(_)));
if all_unchanged {
tracing::debug!(
"Suspending workspace diagnostic request, all diagnostics are unchanged or the project has no diagnostics"
);
client.queue_action(Action::SuspendWorkspaceDiagnostics(Box::new(
SuspendedWorkspaceDiagnosticRequest {
id: id.clone(),
params: serde_json::to_value(¶ms).unwrap(),
revision: snapshot.revision(),
},
)));
// Don't respond, keep the request open (long polling).
return;
}
}
client.respond(id, result);
}
}
impl RetriableRequestHandler for WorkspaceDiagnosticRequestHandler {
fn salsa_cancellation_error() -> lsp_server::ResponseError {
lsp_server::ResponseError {
code: lsp_server::ErrorCode::ServerCancelled as i32,
message: "server cancelled the request".to_owned(),
data: serde_json::to_value(lsp_types::DiagnosticServerCancellationData {
retrigger_request: true,
})
.ok(),
}
}
}
/// ty progress reporter that streams the diagnostics to the client
/// and sends progress reports (checking X/Y files).
///
/// Diagnostics are only streamed if the client sends a partial result token.
struct WorkspaceDiagnosticsProgressReporter<'a> {
work_done: LazyWorkDoneProgress,
state: Mutex<ProgressReporterState<'a>>,
}
impl<'a> WorkspaceDiagnosticsProgressReporter<'a> {
fn new(work_done: LazyWorkDoneProgress, response: ResponseWriter<'a>) -> Self {
Self {
state: Mutex::new(ProgressReporterState {
total_files: 0,
checked_files: 0,
last_response_sent: Instant::now(),
response,
}),
work_done,
}
}
fn into_final_report(self) -> WorkspaceDiagnosticReportResult {
let state = self.state.into_inner().unwrap();
state.response.into_final_report()
}
}
impl ProgressReporter for WorkspaceDiagnosticsProgressReporter<'_> {
fn set_files(&mut self, files: usize) {
let state = self.state.get_mut().unwrap();
state.total_files += files;
state.report_progress(&self.work_done);
}
fn report_checked_file(&self, db: &ProjectDatabase, file: File, diagnostics: &[Diagnostic]) {
// Another thread might have panicked at this point because of a salsa cancellation which
// poisoned the result. If the response is poisoned, just don't report and wait for our thread
// to unwind with a salsa cancellation next.
let Ok(mut state) = self.state.lock() else {
return;
};
state.checked_files += 1;
if state.checked_files == state.total_files {
state.report_progress(&self.work_done);
} else if state.last_response_sent.elapsed() >= Duration::from_millis(50) {
state.last_response_sent = Instant::now();
state.report_progress(&self.work_done);
}
// Don't report empty diagnostics. We clear previous diagnostics in `into_response`
// which also handles the case where a file no longer has diagnostics because
// it's no longer part of the project.
if !diagnostics.is_empty() {
state
.response
.write_diagnostics_for_file(db, file, diagnostics);
}
state.response.maybe_flush();
}
fn report_diagnostics(&mut self, db: &ProjectDatabase, diagnostics: Vec<Diagnostic>) {
let mut by_file: BTreeMap<File, Vec<Diagnostic>> = BTreeMap::new();
for diagnostic in diagnostics {
if let Some(file) = diagnostic.primary_span().map(|span| span.expect_ty_file()) {
by_file.entry(file).or_default().push(diagnostic);
} else {
tracing::debug!(
"Ignoring diagnostic without a file: {diagnostic}",
diagnostic = diagnostic.primary_message()
);
}
}
let response = &mut self.state.get_mut().unwrap().response;
for (file, diagnostics) in by_file {
response.write_diagnostics_for_file(db, file, &diagnostics);
}
response.maybe_flush();
}
}
struct ProgressReporterState<'a> {
total_files: usize,
checked_files: usize,
last_response_sent: Instant,
response: ResponseWriter<'a>,
}
impl ProgressReporterState<'_> {
fn report_progress(&self, work_done: &LazyWorkDoneProgress) {
let checked = self.checked_files;
let total = self.total_files;
#[allow(clippy::cast_possible_truncation)]
let percentage = if total > 0 {
Some((checked * 100 / total) as u32)
} else {
None
};
work_done.report_progress(format!("{checked}/{total} files"), percentage);
if checked == total {
work_done.set_finish_message(format!("Checked {total} files"));
}
}
}
#[derive(Debug)]
struct ResponseWriter<'a> {
mode: ReportingMode,
index: &'a Index,
position_encoding: PositionEncoding,
client_capabilities: ResolvedClientCapabilities,
// It's important that we use `AnySystemPath` over `Url` here because
// `file_to_url` isn't guaranteed to return the exact same URL as the one provided
// by the client.
previous_result_ids: FxHashMap<DocumentKey, (Url, String)>,
global_settings: &'a GlobalSettings,
}
impl<'a> ResponseWriter<'a> {
fn new(
partial_result_token: Option<ProgressToken>,
previous_result_ids: Vec<PreviousResultId>,
snapshot: &'a SessionSnapshot,
client: &Client,
) -> Self {
let index = snapshot.index();
let position_encoding = snapshot.position_encoding();
let mode = if let Some(token) = partial_result_token {
ReportingMode::Streaming(Streaming {
first: true,
client: client.clone(),
token,
is_test: snapshot.in_test(),
last_flush: Instant::now(),
changed: Vec::new(),
unchanged: Vec::with_capacity(previous_result_ids.len()),
})
} else {
ReportingMode::Bulk(Vec::new())
};
let previous_result_ids = previous_result_ids
.into_iter()
.map(|prev| (DocumentKey::from_url(&prev.uri), (prev.uri, prev.value)))
.collect();
Self {
mode,
index,
position_encoding,
client_capabilities: snapshot.resolved_client_capabilities(),
previous_result_ids,
global_settings: snapshot.global_settings(),
}
}
fn write_diagnostics_for_file(
&mut self,
db: &ProjectDatabase,
file: File,
diagnostics: &[Diagnostic],
) {
let Some(url) = file_to_url(db, file) else {
tracing::debug!("Failed to convert file path to URL at {}", file.path(db));
return;
};
if source_text(db, file).is_notebook() {
// Notebooks only support publish diagnostics.
// and we can't convert text ranges to notebook ranges unless
// the document is open in the editor, in which case
// we publish the diagnostics already.
return;
}
let key = DocumentKey::from_url(&url);
let version = self
.index
.document_handle(&url)
.map(|doc| i64::from(doc.version()))
.ok();
let result_id = Diagnostics::result_id_from_hash(diagnostics);
let previous_result_id = self.previous_result_ids.remove(&key).map(|(_url, id)| id);
let report = match result_id {
Some(new_id) if Some(&new_id) == previous_result_id.as_ref() => {
WorkspaceDocumentDiagnosticReport::Unchanged(
WorkspaceUnchangedDocumentDiagnosticReport {
uri: url,
version,
unchanged_document_diagnostic_report: UnchangedDocumentDiagnosticReport {
result_id: new_id,
},
},
)
}
new_id => {
let lsp_diagnostics = diagnostics
.iter()
.filter_map(|diagnostic| {
Some(
to_lsp_diagnostic(
db,
diagnostic,
self.position_encoding,
self.client_capabilities,
self.global_settings,
)?
.1,
)
})
.collect::<Vec<_>>();
WorkspaceDocumentDiagnosticReport::Full(WorkspaceFullDocumentDiagnosticReport {
uri: url,
version,
full_document_diagnostic_report: FullDocumentDiagnosticReport {
result_id: new_id,
items: lsp_diagnostics,
},
})
}
};
self.write_report(report);
}
fn write_report(&mut self, report: WorkspaceDocumentDiagnosticReport) {
match &mut self.mode {
ReportingMode::Streaming(streaming) => {
streaming.write_report(report);
}
ReportingMode::Bulk(all) => {
all.push(report);
}
}
}
/// Flush any pending reports if streaming diagnostics.
///
/// Note: The flush is throttled when streaming.
fn maybe_flush(&mut self) {
match &mut self.mode {
ReportingMode::Streaming(streaming) => streaming.maybe_flush(),
ReportingMode::Bulk(_) => {}
}
}
/// Creates the final response after all files have been processed.
///
/// The result can be a partial or full report depending on whether the server's streaming
/// diagnostics and if it already sent some diagnostics.
fn into_final_report(mut self) -> WorkspaceDiagnosticReportResult {
let mut items = Vec::new();
// Handle files that had diagnostics in previous request but no longer have any
// Any remaining entries in previous_results are files that were fixed
for (key, (previous_url, previous_result_id)) in self.previous_result_ids {
// This file had diagnostics before but doesn't now, so we need to report it as having no diagnostics
let version = self
.index
.document(&key)
.ok()
.map(|doc| i64::from(doc.version()));
let new_result_id = Diagnostics::result_id_from_hash(&[]);
let report = match new_result_id {
Some(new_id) if new_id == previous_result_id => {
WorkspaceDocumentDiagnosticReport::Unchanged(
WorkspaceUnchangedDocumentDiagnosticReport {
uri: previous_url,
version,
unchanged_document_diagnostic_report:
UnchangedDocumentDiagnosticReport { result_id: new_id },
},
)
}
new_id => {
WorkspaceDocumentDiagnosticReport::Full(WorkspaceFullDocumentDiagnosticReport {
uri: previous_url,
version,
full_document_diagnostic_report: FullDocumentDiagnosticReport {
result_id: new_id,
items: vec![], // No diagnostics
},
})
}
};
items.push(report);
}
match &mut self.mode {
ReportingMode::Streaming(streaming) => {
items.extend(
std::mem::take(&mut streaming.changed)
.into_iter()
.map(WorkspaceDocumentDiagnosticReport::Full),
);
items.extend(
std::mem::take(&mut streaming.unchanged)
.into_iter()
.map(WorkspaceDocumentDiagnosticReport::Unchanged),
);
}
ReportingMode::Bulk(all) => {
all.extend(items);
items = std::mem::take(all);
}
}
self.mode.create_result(items)
}
}
#[derive(Debug)]
enum ReportingMode {
/// Streams the diagnostics to the client as they are computed (file by file).
/// Requires that the client provides a partial result token.
Streaming(Streaming),
/// For clients that don't support streaming diagnostics. Collects all workspace
/// diagnostics and sends them in the `workspace/diagnostic` response.
Bulk(Vec<WorkspaceDocumentDiagnosticReport>),
}
impl ReportingMode {
fn create_result(
&mut self,
items: Vec<WorkspaceDocumentDiagnosticReport>,
) -> WorkspaceDiagnosticReportResult {
match self {
ReportingMode::Streaming(streaming) => streaming.create_result(items),
ReportingMode::Bulk(..) => {
WorkspaceDiagnosticReportResult::Report(WorkspaceDiagnosticReport { items })
}
}
}
}
#[derive(Debug)]
struct Streaming {
first: bool,
client: Client,
/// The partial result token.
token: ProgressToken,
/// Throttles the flush reports to not happen more than once every 100ms.
last_flush: Instant,
is_test: bool,
/// The reports for files with changed diagnostics.
/// The implementation uses batching to avoid too many
/// requests for large projects (can slow down the entire
/// analysis).
changed: Vec<WorkspaceFullDocumentDiagnosticReport>,
/// All the unchanged reports. Don't stream them,
/// since nothing has changed.
unchanged: Vec<WorkspaceUnchangedDocumentDiagnosticReport>,
}
impl Streaming {
fn write_report(&mut self, report: WorkspaceDocumentDiagnosticReport) {
match report {
WorkspaceDocumentDiagnosticReport::Full(full) => {
self.changed.push(full);
}
WorkspaceDocumentDiagnosticReport::Unchanged(unchanged) => {
self.unchanged.push(unchanged);
}
}
}
fn maybe_flush(&mut self) {
if self.changed.is_empty() {
return;
}
// Flush every ~50ms or whenever we have two items and this is a test run.
let should_flush = if self.is_test {
self.changed.len() >= 2
} else {
self.last_flush.elapsed().as_millis() >= 50
};
if !should_flush {
return;
}
let items = self
.changed
.drain(..)
.map(WorkspaceDocumentDiagnosticReport::Full)
.collect();
let report = self.create_result(items);
self.client
.send_notification::<PartialWorkspaceProgress>(PartialWorkspaceProgressParams {
token: self.token.clone(),
value: report,
});
self.last_flush = Instant::now();
}
fn create_result(
&mut self,
items: Vec<WorkspaceDocumentDiagnosticReport>,
) -> WorkspaceDiagnosticReportResult {
// As per the LSP spec:
// > partial result: The first literal send need to be a WorkspaceDiagnosticReport followed
// > by `n` WorkspaceDiagnosticReportPartialResult literals defined as follows:
if self.first {
self.first = false;
WorkspaceDiagnosticReportResult::Report(WorkspaceDiagnosticReport { items })
} else {
WorkspaceDiagnosticReportResult::Partial(WorkspaceDiagnosticReportPartialResult {
items,
})
}
}
}
/// The `$/progress` notification for partial workspace diagnostics.
///
/// This type is missing in `lsp_types`. That's why we define it here.
pub struct PartialWorkspaceProgress;
impl Notification for PartialWorkspaceProgress {
type Params = PartialWorkspaceProgressParams;
const METHOD: &'static str = "$/progress";
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub struct PartialWorkspaceProgressParams {
pub token: ProgressToken,
pub value: WorkspaceDiagnosticReportResult,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/inlay_hints.rs | crates/ty_server/src/server/api/requests/inlay_hints.rs | use std::borrow::Cow;
use std::time::Instant;
use lsp_types::request::InlayHintRequest;
use lsp_types::{InlayHintParams, Url};
use ruff_db::files::File;
use ty_ide::{InlayHintKind, InlayHintLabel, InlayHintTextEdit, inlay_hints};
use ty_project::ProjectDatabase;
use crate::PositionEncoding;
use crate::document::{RangeExt, TextSizeExt, ToLink};
use crate::server::api::traits::{
BackgroundDocumentRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
pub(crate) struct InlayHintRequestHandler;
impl RequestHandler for InlayHintRequestHandler {
type RequestType = InlayHintRequest;
}
impl BackgroundDocumentRequestHandler for InlayHintRequestHandler {
fn document_url(params: &InlayHintParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
params: InlayHintParams,
) -> crate::server::Result<Option<Vec<lsp_types::InlayHint>>> {
let start = Instant::now();
let workspace_settings = snapshot.workspace_settings();
if workspace_settings.is_language_services_disabled()
|| !workspace_settings.inlay_hints().any_enabled()
{
return Ok(None);
}
let Some(file) = snapshot.to_notebook_or_file(db) else {
return Ok(None);
};
let Some(range) = params
.range
.to_text_range(db, file, snapshot.url(), snapshot.encoding())
else {
return Ok(None);
};
let inlay_hints = inlay_hints(db, file, range, workspace_settings.inlay_hints());
let inlay_hints: Vec<lsp_types::InlayHint> = inlay_hints
.into_iter()
.filter_map(|hint| {
Some(lsp_types::InlayHint {
position: hint
.position
.to_lsp_position(db, file, snapshot.encoding())?
.local_position(),
label: inlay_hint_label(&hint.label, db, snapshot.encoding()),
kind: Some(inlay_hint_kind(&hint.kind)),
tooltip: None,
padding_left: None,
padding_right: None,
data: None,
text_edits: Some(
hint.text_edits
.into_iter()
.filter_map(|text_edit| {
inlay_hint_text_edit(text_edit, db, file, snapshot.encoding())
})
.collect(),
),
})
})
.collect();
tracing::debug!(
"Inlay hint request returned {} hints in {:?}",
inlay_hints.len(),
start.elapsed()
);
Ok(Some(inlay_hints))
}
}
impl RetriableRequestHandler for InlayHintRequestHandler {}
fn inlay_hint_kind(inlay_hint_kind: &InlayHintKind) -> lsp_types::InlayHintKind {
match inlay_hint_kind {
InlayHintKind::Type => lsp_types::InlayHintKind::TYPE,
InlayHintKind::CallArgumentName => lsp_types::InlayHintKind::PARAMETER,
}
}
fn inlay_hint_label(
inlay_hint_label: &InlayHintLabel,
db: &ProjectDatabase,
encoding: PositionEncoding,
) -> lsp_types::InlayHintLabel {
let mut label_parts = Vec::new();
for part in inlay_hint_label.parts() {
label_parts.push(lsp_types::InlayHintLabelPart {
value: part.text().into(),
location: part
.target()
.and_then(|target| target.to_location(db, encoding)),
tooltip: None,
command: None,
});
}
lsp_types::InlayHintLabel::LabelParts(label_parts)
}
fn inlay_hint_text_edit(
inlay_hint_text_edit: InlayHintTextEdit,
db: &ProjectDatabase,
file: File,
encoding: PositionEncoding,
) -> Option<lsp_types::TextEdit> {
Some(lsp_types::TextEdit {
range: lsp_types::Range {
start: inlay_hint_text_edit
.range
.start()
.to_lsp_position(db, file, encoding)?
.local_position(),
end: inlay_hint_text_edit
.range
.end()
.to_lsp_position(db, file, encoding)?
.local_position(),
},
new_text: inlay_hint_text_edit.new_text,
})
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/hover.rs | crates/ty_server/src/server/api/requests/hover.rs | use std::borrow::Cow;
use crate::document::{FileRangeExt, PositionExt};
use crate::server::api::traits::{
BackgroundDocumentRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
use lsp_types::request::HoverRequest;
use lsp_types::{HoverContents, HoverParams, MarkupContent, Url};
use ty_ide::{MarkupKind, hover};
use ty_project::ProjectDatabase;
pub(crate) struct HoverRequestHandler;
impl RequestHandler for HoverRequestHandler {
type RequestType = HoverRequest;
}
impl BackgroundDocumentRequestHandler for HoverRequestHandler {
fn document_url(params: &HoverParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document_position_params.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
params: HoverParams,
) -> crate::server::Result<Option<lsp_types::Hover>> {
if snapshot
.workspace_settings()
.is_language_services_disabled()
{
return Ok(None);
}
let Some(file) = snapshot.to_notebook_or_file(db) else {
return Ok(None);
};
let Some(offset) = params.text_document_position_params.position.to_text_size(
db,
file,
snapshot.url(),
snapshot.encoding(),
) else {
return Ok(None);
};
let Some(range_info) = hover(db, file, offset) else {
return Ok(None);
};
let (markup_kind, lsp_markup_kind) = if snapshot
.resolved_client_capabilities()
.prefers_markdown_in_hover()
{
(MarkupKind::Markdown, lsp_types::MarkupKind::Markdown)
} else {
(MarkupKind::PlainText, lsp_types::MarkupKind::PlainText)
};
let contents = range_info.display(db, markup_kind).to_string();
Ok(Some(lsp_types::Hover {
contents: HoverContents::Markup(MarkupContent {
kind: lsp_markup_kind,
value: contents,
}),
range: range_info
.file_range()
.to_lsp_range(db, snapshot.encoding())
.map(|lsp_range| lsp_range.local_range()),
}))
}
}
impl RetriableRequestHandler for HoverRequestHandler {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/document_symbols.rs | crates/ty_server/src/server/api/requests/document_symbols.rs | use std::borrow::Cow;
use lsp_types::request::DocumentSymbolRequest;
use lsp_types::{DocumentSymbol, DocumentSymbolParams, SymbolInformation, Url};
use ruff_db::files::File;
use ty_ide::{HierarchicalSymbols, SymbolId, SymbolInfo, document_symbols};
use ty_project::ProjectDatabase;
use crate::Db;
use crate::document::{PositionEncoding, ToRangeExt};
use crate::server::api::symbols::{convert_symbol_kind, convert_to_lsp_symbol_information};
use crate::server::api::traits::{
BackgroundDocumentRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
pub(crate) struct DocumentSymbolRequestHandler;
impl RequestHandler for DocumentSymbolRequestHandler {
type RequestType = DocumentSymbolRequest;
}
impl BackgroundDocumentRequestHandler for DocumentSymbolRequestHandler {
fn document_url(params: &DocumentSymbolParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
_params: DocumentSymbolParams,
) -> crate::server::Result<Option<lsp_types::DocumentSymbolResponse>> {
if snapshot
.workspace_settings()
.is_language_services_disabled()
{
return Ok(None);
}
let Some(file) = snapshot.to_notebook_or_file(db) else {
return Ok(None);
};
// Check if the client supports hierarchical document symbols
let supports_hierarchical = snapshot
.resolved_client_capabilities()
.supports_hierarchical_document_symbols();
let symbols = document_symbols(db, file);
if symbols.is_empty() {
return Ok(None);
}
if supports_hierarchical {
let symbols = symbols.to_hierarchical();
let lsp_symbols: Vec<DocumentSymbol> = symbols
.iter()
.filter_map(|(id, symbol)| {
convert_to_lsp_document_symbol(
db,
file,
&symbols,
id,
symbol,
snapshot.encoding(),
)
})
.collect();
Ok(Some(lsp_types::DocumentSymbolResponse::Nested(lsp_symbols)))
} else {
// Return flattened symbols as SymbolInformation
let lsp_symbols: Vec<SymbolInformation> = symbols
.iter()
.filter_map(|(_, symbol)| {
convert_to_lsp_symbol_information(db, file, symbol, snapshot.encoding())
})
.collect();
Ok(Some(lsp_types::DocumentSymbolResponse::Flat(lsp_symbols)))
}
}
}
impl RetriableRequestHandler for DocumentSymbolRequestHandler {}
fn convert_to_lsp_document_symbol(
db: &dyn Db,
file: File,
symbols: &HierarchicalSymbols,
id: SymbolId,
symbol: SymbolInfo<'_>,
encoding: PositionEncoding,
) -> Option<DocumentSymbol> {
let symbol_kind = convert_symbol_kind(symbol.kind);
Some(DocumentSymbol {
name: symbol.name.into_owned(),
detail: None,
kind: symbol_kind,
tags: None,
#[allow(deprecated)]
deprecated: None,
range: symbol
.full_range
.to_lsp_range(db, file, encoding)?
.local_range(),
selection_range: symbol
.name_range
.to_lsp_range(db, file, encoding)?
.local_range(),
children: Some(
symbols
.children(id)
.filter_map(|(child_id, child)| {
convert_to_lsp_document_symbol(db, file, symbols, child_id, child, encoding)
})
.collect(),
),
})
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/execute_command.rs | crates/ty_server/src/server/api/requests/execute_command.rs | use crate::capabilities::SupportedCommand;
use crate::server;
use crate::server::api::LSPResult;
use crate::server::api::RequestHandler;
use crate::server::api::traits::SyncRequestHandler;
use crate::session::Session;
use crate::session::client::Client;
use lsp_server::ErrorCode;
use lsp_types::{self as types, request as req};
use std::fmt::Write;
use std::str::FromStr;
use ty_project::Db as _;
pub(crate) struct ExecuteCommand;
impl RequestHandler for ExecuteCommand {
type RequestType = req::ExecuteCommand;
}
impl SyncRequestHandler for ExecuteCommand {
fn run(
session: &mut Session,
_client: &Client,
params: types::ExecuteCommandParams,
) -> server::Result<Option<serde_json::Value>> {
let command = SupportedCommand::from_str(¶ms.command)
.with_failure_code(ErrorCode::InvalidParams)?;
match command {
SupportedCommand::Debug => Ok(Some(serde_json::Value::String(
debug_information(session).with_failure_code(ErrorCode::InternalError)?,
))),
}
}
}
/// Returns a string with detailed memory usage.
fn debug_information(session: &Session) -> crate::Result<String> {
let mut buffer = String::new();
writeln!(
buffer,
"Client capabilities: {:#?}",
session.client_capabilities()
)?;
writeln!(
buffer,
"Position encoding: {:#?}",
session.position_encoding()
)?;
writeln!(buffer, "Global settings: {:#?}", session.global_settings())?;
writeln!(
buffer,
"Open text documents: {}",
session.text_document_handles().count()
)?;
writeln!(buffer)?;
for (root, workspace) in session.workspaces() {
writeln!(buffer, "Workspace {root} ({})", workspace.url())?;
writeln!(buffer, "Settings: {:#?}", workspace.settings())?;
writeln!(buffer)?;
}
for db in session.project_dbs() {
writeln!(buffer, "Project at {}", db.project().root(db))?;
writeln!(buffer, "Settings: {:#?}", db.project().settings(db))?;
writeln!(buffer)?;
writeln!(
buffer,
"Memory report:\n{}",
db.salsa_memory_dump().display_full()
)?;
}
Ok(buffer)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/prepare_rename.rs | crates/ty_server/src/server/api/requests/prepare_rename.rs | use std::borrow::Cow;
use lsp_types::request::PrepareRenameRequest;
use lsp_types::{PrepareRenameResponse, TextDocumentPositionParams, Url};
use ty_ide::can_rename;
use ty_project::ProjectDatabase;
use crate::document::{PositionExt, ToRangeExt};
use crate::server::api::traits::{
BackgroundDocumentRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
pub(crate) struct PrepareRenameRequestHandler;
impl RequestHandler for PrepareRenameRequestHandler {
type RequestType = PrepareRenameRequest;
}
impl BackgroundDocumentRequestHandler for PrepareRenameRequestHandler {
fn document_url(params: &TextDocumentPositionParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
params: TextDocumentPositionParams,
) -> crate::server::Result<Option<PrepareRenameResponse>> {
if snapshot
.workspace_settings()
.is_language_services_disabled()
{
return Ok(None);
}
let Some(file) = snapshot.to_notebook_or_file(db) else {
return Ok(None);
};
let Some(offset) =
params
.position
.to_text_size(db, file, snapshot.url(), snapshot.encoding())
else {
return Ok(None);
};
let Some(range) = can_rename(db, file, offset) else {
return Ok(None);
};
let Some(lsp_range) = range
.to_lsp_range(db, file, snapshot.encoding())
.map(|lsp_range| lsp_range.local_range())
else {
return Ok(None);
};
Ok(Some(PrepareRenameResponse::Range(lsp_range)))
}
}
impl RetriableRequestHandler for PrepareRenameRequestHandler {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/goto_definition.rs | crates/ty_server/src/server/api/requests/goto_definition.rs | use std::borrow::Cow;
use lsp_types::request::GotoDefinition;
use lsp_types::{GotoDefinitionParams, GotoDefinitionResponse, Url};
use ty_ide::goto_definition;
use ty_project::ProjectDatabase;
use crate::document::{PositionExt, ToLink};
use crate::server::api::traits::{
BackgroundDocumentRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
pub(crate) struct GotoDefinitionRequestHandler;
impl RequestHandler for GotoDefinitionRequestHandler {
type RequestType = GotoDefinition;
}
impl BackgroundDocumentRequestHandler for GotoDefinitionRequestHandler {
fn document_url(params: &GotoDefinitionParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document_position_params.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
params: GotoDefinitionParams,
) -> crate::server::Result<Option<GotoDefinitionResponse>> {
if snapshot
.workspace_settings()
.is_language_services_disabled()
{
return Ok(None);
}
let Some(file) = snapshot.to_notebook_or_file(db) else {
return Ok(None);
};
let Some(offset) = params.text_document_position_params.position.to_text_size(
db,
file,
snapshot.url(),
snapshot.encoding(),
) else {
return Ok(None);
};
let Some(ranged) = goto_definition(db, file, offset) else {
return Ok(None);
};
if snapshot
.resolved_client_capabilities()
.supports_definition_link()
{
let src = Some(ranged.range);
let links: Vec<_> = ranged
.into_iter()
.filter_map(|target| target.to_link(db, src, snapshot.encoding()))
.collect();
Ok(Some(GotoDefinitionResponse::Link(links)))
} else {
let locations: Vec<_> = ranged
.into_iter()
.filter_map(|target| target.to_location(db, snapshot.encoding()))
.collect();
Ok(Some(GotoDefinitionResponse::Array(locations)))
}
}
}
impl RetriableRequestHandler for GotoDefinitionRequestHandler {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/goto_type_definition.rs | crates/ty_server/src/server/api/requests/goto_type_definition.rs | use std::borrow::Cow;
use lsp_types::request::{GotoTypeDefinition, GotoTypeDefinitionParams};
use lsp_types::{GotoDefinitionResponse, Url};
use ty_ide::goto_type_definition;
use ty_project::ProjectDatabase;
use crate::document::{PositionExt, ToLink};
use crate::server::api::traits::{
BackgroundDocumentRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
pub(crate) struct GotoTypeDefinitionRequestHandler;
impl RequestHandler for GotoTypeDefinitionRequestHandler {
type RequestType = GotoTypeDefinition;
}
impl BackgroundDocumentRequestHandler for GotoTypeDefinitionRequestHandler {
fn document_url(params: &GotoTypeDefinitionParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document_position_params.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
params: GotoTypeDefinitionParams,
) -> crate::server::Result<Option<GotoDefinitionResponse>> {
if snapshot
.workspace_settings()
.is_language_services_disabled()
{
return Ok(None);
}
let Some(file) = snapshot.to_notebook_or_file(db) else {
return Ok(None);
};
let Some(offset) = params.text_document_position_params.position.to_text_size(
db,
file,
snapshot.url(),
snapshot.encoding(),
) else {
return Ok(None);
};
let Some(ranged) = goto_type_definition(db, file, offset) else {
return Ok(None);
};
if snapshot
.resolved_client_capabilities()
.supports_type_definition_link()
{
let src = Some(ranged.range);
let links: Vec<_> = ranged
.into_iter()
.filter_map(|target| target.to_link(db, src, snapshot.encoding()))
.collect();
Ok(Some(GotoDefinitionResponse::Link(links)))
} else {
let locations: Vec<_> = ranged
.into_iter()
.filter_map(|target| target.to_location(db, snapshot.encoding()))
.collect();
Ok(Some(GotoDefinitionResponse::Array(locations)))
}
}
}
impl RetriableRequestHandler for GotoTypeDefinitionRequestHandler {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_server/src/server/api/requests/signature_help.rs | crates/ty_server/src/server/api/requests/signature_help.rs | use std::borrow::Cow;
use crate::document::{PositionEncoding, PositionExt};
use crate::server::api::traits::{
BackgroundDocumentRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::DocumentSnapshot;
use crate::session::client::Client;
use lsp_types::request::SignatureHelpRequest;
use lsp_types::{
Documentation, ParameterInformation, ParameterLabel, SignatureHelp, SignatureHelpParams,
SignatureInformation, Url,
};
use ty_ide::signature_help;
use ty_project::ProjectDatabase;
pub(crate) struct SignatureHelpRequestHandler;
impl RequestHandler for SignatureHelpRequestHandler {
type RequestType = SignatureHelpRequest;
}
impl BackgroundDocumentRequestHandler for SignatureHelpRequestHandler {
fn document_url(params: &SignatureHelpParams) -> Cow<'_, Url> {
Cow::Borrowed(¶ms.text_document_position_params.text_document.uri)
}
fn run_with_snapshot(
db: &ProjectDatabase,
snapshot: &DocumentSnapshot,
_client: &Client,
params: SignatureHelpParams,
) -> crate::server::Result<Option<SignatureHelp>> {
if snapshot
.workspace_settings()
.is_language_services_disabled()
{
return Ok(None);
}
let Some(file) = snapshot.to_notebook_or_file(db) else {
return Ok(None);
};
let Some(offset) = params.text_document_position_params.position.to_text_size(
db,
file,
snapshot.url(),
snapshot.encoding(),
) else {
return Ok(None);
};
// Extract signature help capabilities from the client
let resolved_capabilities = snapshot.resolved_client_capabilities();
let Some(signature_help_info) = signature_help(db, file, offset) else {
return Ok(None);
};
// Compute active parameter from the active signature
let active_parameter = signature_help_info
.active_signature
.and_then(|s| signature_help_info.signatures.get(s))
.and_then(|sig| sig.active_parameter)
.and_then(|p| u32::try_from(p).ok());
// Convert from IDE types to LSP types
let signatures = signature_help_info
.signatures
.into_iter()
.map(|sig| {
let parameters = sig
.parameters
.into_iter()
.map(|param| {
let label = if resolved_capabilities.supports_signature_label_offset() {
// Find the parameter's offset in the signature label
if let Some(start) = sig.label.find(¶m.label) {
let encoding = snapshot.encoding();
// Convert byte offsets to character offsets based on negotiated encoding
let start_char_offset = match encoding {
PositionEncoding::UTF8 => start,
PositionEncoding::UTF16 => {
sig.label[..start].encode_utf16().count()
}
PositionEncoding::UTF32 => sig.label[..start].chars().count(),
};
let end_char_offset = match encoding {
PositionEncoding::UTF8 => start + param.label.len(),
PositionEncoding::UTF16 => sig.label
[..start + param.label.len()]
.encode_utf16()
.count(),
PositionEncoding::UTF32 => {
sig.label[..start + param.label.len()].chars().count()
}
};
let start_u32 =
u32::try_from(start_char_offset).unwrap_or(u32::MAX);
let end_u32 = u32::try_from(end_char_offset).unwrap_or(u32::MAX);
ParameterLabel::LabelOffsets([start_u32, end_u32])
} else {
ParameterLabel::Simple(param.label)
}
} else {
ParameterLabel::Simple(param.label)
};
ParameterInformation {
label,
documentation: param.documentation.map(Documentation::String),
}
})
.collect();
let active_parameter =
if resolved_capabilities.supports_signature_active_parameter() {
sig.active_parameter.and_then(|p| u32::try_from(p).ok())
} else {
None
};
SignatureInformation {
label: sig.label,
documentation: sig
.documentation
.map(|docstring| Documentation::String(docstring.render_plaintext())),
parameters: Some(parameters),
active_parameter,
}
})
.collect();
let signature_help = SignatureHelp {
signatures,
active_signature: signature_help_info
.active_signature
.and_then(|s| u32::try_from(s).ok()),
active_parameter,
};
Ok(Some(signature_help))
}
}
impl RetriableRequestHandler for SignatureHelpRequestHandler {}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.