repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/other/keyword.rs | crates/ruff_python_formatter/src/other/keyword.rs | use ruff_formatter::write;
use ruff_python_ast::Keyword;
use crate::prelude::*;
#[derive(Default)]
pub struct FormatKeyword;
impl FormatNodeRule<Keyword> for FormatKeyword {
fn fmt_fields(&self, item: &Keyword, f: &mut PyFormatter) -> FormatResult<()> {
let Keyword {
range: _,
node_index: _,
arg,
value,
} = item;
// Comments after the `=` or `**` are reassigned as leading comments on the value.
if let Some(arg) = arg {
write!(f, [arg.format(), token("="), value.format()])
} else {
write!(f, [token("**"), value.format()])
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/other/match_case.rs | crates/ruff_python_formatter/src/other/match_case.rs | use ruff_formatter::{FormatRuleWithOptions, format_args, write};
use ruff_python_ast::MatchCase;
use crate::expression::maybe_parenthesize_expression;
use crate::expression::parentheses::Parenthesize;
use crate::pattern::maybe_parenthesize_pattern;
use crate::prelude::*;
use crate::statement::clause::{ClauseHeader, clause};
use crate::statement::suite::SuiteKind;
#[derive(Default)]
pub struct FormatMatchCase {
last_suite_in_statement: bool,
}
impl FormatRuleWithOptions<MatchCase, PyFormatContext<'_>> for FormatMatchCase {
type Options = bool;
fn with_options(mut self, options: Self::Options) -> Self {
self.last_suite_in_statement = options;
self
}
}
impl FormatNodeRule<MatchCase> for FormatMatchCase {
fn fmt_fields(&self, item: &MatchCase, f: &mut PyFormatter) -> FormatResult<()> {
let MatchCase {
range: _,
node_index: _,
pattern,
guard,
body,
} = item;
let comments = f.context().comments().clone();
let dangling_item_comments = comments.dangling(item);
let format_guard = guard.as_deref().map(|guard| {
format_with(|f| {
write!(f, [space(), token("if"), space()])?;
maybe_parenthesize_expression(guard, item, Parenthesize::IfBreaksParenthesized)
.fmt(f)
})
});
write!(
f,
[clause(
ClauseHeader::MatchCase(item),
&format_args![
token("case"),
space(),
maybe_parenthesize_pattern(pattern, item),
format_guard
],
dangling_item_comments,
body,
SuiteKind::other(self.last_suite_in_statement),
)]
)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/other/interpolated_string.rs | crates/ruff_python_formatter/src/other/interpolated_string.rs | use ruff_python_ast::{AnyStringFlags, InterpolatedStringElements};
use ruff_source_file::LineRanges;
use ruff_text_size::Ranged;
#[derive(Clone, Copy, Debug)]
pub(crate) struct InterpolatedStringContext {
/// The string flags of the enclosing f/t-string part.
enclosing_flags: AnyStringFlags,
layout: InterpolatedStringLayout,
}
impl InterpolatedStringContext {
pub(crate) const fn new(flags: AnyStringFlags, layout: InterpolatedStringLayout) -> Self {
Self {
enclosing_flags: flags,
layout,
}
}
pub(crate) fn flags(self) -> AnyStringFlags {
self.enclosing_flags
}
pub(crate) const fn is_multiline(self) -> bool {
matches!(self.layout, InterpolatedStringLayout::Multiline)
}
}
#[derive(Copy, Clone, Debug)]
pub(crate) enum InterpolatedStringLayout {
/// Original f/t-string is flat.
/// Don't break expressions to keep the string flat.
Flat,
/// Original f/t-string has multiline expressions in the replacement fields.
/// Allow breaking expressions across multiple lines.
Multiline,
}
impl InterpolatedStringLayout {
// Heuristic: Allow breaking the f/t-string expressions across multiple lines
// only if there already is at least one multiline expression. This puts the
// control in the hands of the user to decide if they want to break the
// f/t-string expressions across multiple lines or not. This is similar to
// how Prettier does it for template literals in JavaScript.
//
// If it's single quoted f-string and it contains a multiline expression, then we
// assume that the target version of Python supports it (3.12+). If there are comments
// used in any of the expression of the f-string, then it's always going to be multiline
// and we assume that the target version of Python supports it (3.12+).
//
// Reference: https://prettier.io/docs/en/next/rationale.html#template-literals
pub(crate) fn from_interpolated_string_elements(
elements: &InterpolatedStringElements,
source: &str,
) -> Self {
if elements
.interpolations()
.any(|expr| source.contains_line_break(expr.range()))
{
Self::Multiline
} else {
Self::Flat
}
}
pub(crate) const fn is_flat(self) -> bool {
matches!(self, InterpolatedStringLayout::Flat)
}
pub(crate) const fn is_multiline(self) -> bool {
matches!(self, InterpolatedStringLayout::Multiline)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/other/identifier.rs | crates/ruff_python_formatter/src/other/identifier.rs | use ruff_formatter::{FormatOwnedWithRule, FormatRefWithRule};
use ruff_python_ast::Identifier;
use ruff_python_trivia::is_python_whitespace;
use ruff_text_size::Ranged;
use crate::prelude::*;
pub struct FormatIdentifier;
impl FormatRule<Identifier, PyFormatContext<'_>> for FormatIdentifier {
fn fmt(&self, item: &Identifier, f: &mut PyFormatter) -> FormatResult<()> {
source_text_slice(item.range()).fmt(f)
}
}
impl<'ast> AsFormat<PyFormatContext<'ast>> for Identifier {
type Format<'a> = FormatRefWithRule<'a, Identifier, FormatIdentifier, PyFormatContext<'ast>>;
fn format(&self) -> Self::Format<'_> {
FormatRefWithRule::new(self, FormatIdentifier)
}
}
impl<'ast> IntoFormat<PyFormatContext<'ast>> for Identifier {
type Format = FormatOwnedWithRule<Identifier, FormatIdentifier, PyFormatContext<'ast>>;
fn into_format(self) -> Self::Format {
FormatOwnedWithRule::new(self, FormatIdentifier)
}
}
/// A formatter for a dot-delimited identifier, as seen in import statements:
/// ```python
/// import foo.bar
/// from tqdm . auto import tqdm
/// ```
///
/// Dot-delimited identifiers can contain newlines via continuations (backslashes) after the
/// dot-delimited segment, as in:
/// ```python
/// import foo\
/// .bar
/// ```
///
/// While identifiers can typically be formatted via verbatim source code slices, dot-delimited
/// identifiers with newlines must be formatted via `text`. This struct implements both the fast
/// and slow paths for such identifiers.
#[derive(Debug)]
pub(crate) struct DotDelimitedIdentifier<'a>(&'a Identifier);
impl<'a> DotDelimitedIdentifier<'a> {
pub(crate) fn new(identifier: &'a Identifier) -> Self {
Self(identifier)
}
}
impl Format<PyFormatContext<'_>> for DotDelimitedIdentifier<'_> {
fn fmt(&self, f: &mut PyFormatter) -> FormatResult<()> {
// An import identifier can contain whitespace around the dots:
// ```python
// import importlib . metadata
// ```
// It can also contain newlines by inserting continuations (backslashes) after
// a dot-delimited segment, as in:
// ```python
// import foo\
// .bar
// ```
if f.context().source()[self.0.range()]
.chars()
.any(|c| is_python_whitespace(c) || matches!(c, '\n' | '\r' | '\\'))
{
let no_whitespace: String = f.context().source()[self.0.range()]
.chars()
.filter(|c| !is_python_whitespace(*c) && !matches!(c, '\n' | '\r' | '\\'))
.collect();
text(&no_whitespace).fmt(f)
} else {
source_text_slice(self.0.range()).fmt(f)
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/other/interpolated_string_element.rs | crates/ruff_python_formatter/src/other/interpolated_string_element.rs | use std::borrow::Cow;
use ruff_formatter::{Buffer, FormatOptions as _, RemoveSoftLinesBuffer, format_args, write};
use ruff_python_ast::{
AnyStringFlags, ConversionFlag, Expr, InterpolatedElement, InterpolatedStringElement,
InterpolatedStringLiteralElement,
};
use ruff_text_size::{Ranged, TextSlice};
use crate::comments::dangling_open_parenthesis_comments;
use crate::context::{
InterpolatedStringState, NodeLevel, WithInterpolatedStringState, WithNodeLevel,
};
use crate::expression::left_most;
use crate::prelude::*;
use crate::string::normalize_string;
use crate::verbatim::verbatim_text;
use super::interpolated_string::InterpolatedStringContext;
/// Formats an f-string element which is either a literal or a formatted expression.
///
/// This delegates the actual formatting to the appropriate formatter.
pub(crate) struct FormatInterpolatedStringElement<'a> {
element: &'a InterpolatedStringElement,
context: InterpolatedStringContext,
}
impl<'a> FormatInterpolatedStringElement<'a> {
pub(crate) fn new(
element: &'a InterpolatedStringElement,
context: InterpolatedStringContext,
) -> Self {
Self { element, context }
}
}
impl Format<PyFormatContext<'_>> for FormatInterpolatedStringElement<'_> {
fn fmt(&self, f: &mut PyFormatter) -> FormatResult<()> {
match self.element {
InterpolatedStringElement::Literal(string_literal) => {
FormatFStringLiteralElement::new(string_literal, self.context.flags()).fmt(f)
}
InterpolatedStringElement::Interpolation(expression) => {
FormatInterpolatedElement::new(expression, self.context).fmt(f)
}
}
}
}
/// Formats an f-string literal element.
pub(crate) struct FormatFStringLiteralElement<'a> {
element: &'a InterpolatedStringLiteralElement,
/// Flags of the enclosing F-string part
fstring_flags: AnyStringFlags,
}
impl<'a> FormatFStringLiteralElement<'a> {
pub(crate) fn new(
element: &'a InterpolatedStringLiteralElement,
fstring_flags: AnyStringFlags,
) -> Self {
Self {
element,
fstring_flags,
}
}
}
impl Format<PyFormatContext<'_>> for FormatFStringLiteralElement<'_> {
fn fmt(&self, f: &mut PyFormatter) -> FormatResult<()> {
let literal_content = f.context().source().slice(self.element);
let normalized = normalize_string(literal_content, 0, self.fstring_flags, false);
match &normalized {
Cow::Borrowed(_) => source_text_slice(self.element.range()).fmt(f),
Cow::Owned(normalized) => text(normalized).fmt(f),
}
}
}
/// Formats an f-string expression element.
pub(crate) struct FormatInterpolatedElement<'a> {
element: &'a InterpolatedElement,
context: InterpolatedStringContext,
}
impl<'a> FormatInterpolatedElement<'a> {
pub(crate) fn new(
element: &'a InterpolatedElement,
context: InterpolatedStringContext,
) -> Self {
Self { element, context }
}
}
impl Format<PyFormatContext<'_>> for FormatInterpolatedElement<'_> {
fn fmt(&self, f: &mut PyFormatter) -> FormatResult<()> {
let InterpolatedElement {
expression,
debug_text,
conversion,
format_spec,
..
} = self.element;
let expression = &**expression;
if let Some(debug_text) = debug_text {
token("{").fmt(f)?;
let comments = f.context().comments();
// If the element has a debug text, preserve the same formatting as
// in the source code (`verbatim`). This requires us to mark all of
// the surrounding comments as formatted.
comments.mark_verbatim_node_comments_formatted(self.element.into());
// Above method doesn't mark the leading and trailing comments of the element.
// There can't be any leading comments for an expression element, but there
// can be trailing comments. For example,
//
// ```python
// f"""foo {
// x:.3f
// # trailing comment
// }"""
// ```
for trailing_comment in comments.trailing(self.element) {
trailing_comment.mark_formatted();
}
write!(
f,
[
NormalizedDebugText(&debug_text.leading),
verbatim_text(expression),
NormalizedDebugText(&debug_text.trailing),
]
)?;
// Even if debug text is present, any whitespace between the
// conversion flag and the format spec doesn't need to be preserved.
match conversion {
ConversionFlag::Str => text("!s").fmt(f)?,
ConversionFlag::Ascii => text("!a").fmt(f)?,
ConversionFlag::Repr => text("!r").fmt(f)?,
ConversionFlag::None => (),
}
if let Some(format_spec) = format_spec.as_deref() {
write!(f, [token(":"), verbatim_text(format_spec)])?;
}
token("}").fmt(f)
} else {
let comments = f.context().comments().clone();
let dangling_item_comments = comments.dangling(self.element);
let multiline = self.context.is_multiline();
// If an expression starts with a `{`, we need to add a space before the
// curly brace to avoid turning it into a literal curly with `{{`.
//
// For example,
// ```python
// f"{ {'x': 1, 'y': 2} }"
// # ^ ^
// ```
//
// We need to preserve the space highlighted by `^`. The whitespace
// before the closing curly brace is not strictly necessary, but it's
// added to maintain consistency.
let bracket_spacing =
needs_bracket_spacing(expression, f.context()).then_some(format_with(|f| {
if multiline {
soft_line_break_or_space().fmt(f)
} else {
space().fmt(f)
}
}));
let item = format_with(|f: &mut PyFormatter| {
// Update the context to be inside the f-string expression element.
let state = match f.context().interpolated_string_state() {
InterpolatedStringState::InsideInterpolatedElement(_)
| InterpolatedStringState::NestedInterpolatedElement(_) => {
InterpolatedStringState::NestedInterpolatedElement(self.context)
}
InterpolatedStringState::Outside => {
InterpolatedStringState::InsideInterpolatedElement(self.context)
}
};
let f = &mut WithInterpolatedStringState::new(state, f);
write!(f, [bracket_spacing, expression.format()])?;
// Conversion comes first, then the format spec.
match conversion {
ConversionFlag::Str => text("!s").fmt(f)?,
ConversionFlag::Ascii => text("!a").fmt(f)?,
ConversionFlag::Repr => text("!r").fmt(f)?,
ConversionFlag::None => (),
}
if let Some(format_spec) = format_spec.as_deref() {
// ```py
// f"{
// foo
// # comment 27
// :test}"
// ```
if comments.has_trailing(expression) {
soft_line_break().fmt(f)?;
}
token(":").fmt(f)?;
for element in &format_spec.elements {
FormatInterpolatedStringElement::new(element, self.context).fmt(f)?;
}
}
if conversion.is_none() && format_spec.is_none() {
bracket_spacing.fmt(f)?;
}
Ok(())
});
let open_parenthesis_comments = if dangling_item_comments.is_empty() {
None
} else {
Some(dangling_open_parenthesis_comments(dangling_item_comments))
};
token("{").fmt(f)?;
{
let mut f = WithNodeLevel::new(NodeLevel::ParenthesizedExpression, f);
if self.context.is_multiline() {
if format_spec.is_none() {
group(&format_args![
open_parenthesis_comments,
soft_block_indent(&item)
])
.fmt(&mut f)?;
} else {
// For strings ending with a format spec, don't add a newline between the end of the format spec
// and closing curly brace because that is invalid syntax for single quoted strings and
// the newline is preserved as part of the format spec for triple quoted strings.
group(&format_args![
open_parenthesis_comments,
indent(&format_args![soft_line_break(), item])
])
.fmt(&mut f)?;
}
} else {
let mut buffer = RemoveSoftLinesBuffer::new(&mut *f);
write!(buffer, [open_parenthesis_comments, item])?;
}
}
token("}").fmt(f)
}
}
}
fn needs_bracket_spacing(expr: &Expr, context: &PyFormatContext) -> bool {
// Ruff parenthesizes single element tuples, that's why we shouldn't insert
// a space around the curly braces for those.
if expr
.as_tuple_expr()
.is_some_and(|tuple| !tuple.parenthesized && tuple.elts.len() == 1)
{
return false;
}
matches!(
left_most(expr, context.comments().ranges(), context.source()),
Expr::Dict(_) | Expr::DictComp(_) | Expr::Set(_) | Expr::SetComp(_)
)
}
struct NormalizedDebugText<'a>(&'a str);
impl Format<PyFormatContext<'_>> for NormalizedDebugText<'_> {
fn fmt(&self, f: &mut PyFormatter) -> FormatResult<()> {
let normalized = normalize_newlines(self.0, ['\r']);
f.write_element(FormatElement::Text {
text_width: TextWidth::from_text(&normalized, f.options().indent_width()),
text: normalized.into_owned().into_boxed_str(),
});
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/other/bytes_literal.rs | crates/ruff_python_formatter/src/other/bytes_literal.rs | use ruff_python_ast::BytesLiteral;
use crate::prelude::*;
use crate::string::StringNormalizer;
#[derive(Default)]
pub struct FormatBytesLiteral;
impl FormatNodeRule<BytesLiteral> for FormatBytesLiteral {
fn fmt_fields(&self, item: &BytesLiteral, f: &mut PyFormatter) -> FormatResult<()> {
StringNormalizer::from_context(f.context())
.normalize(item.into())
.fmt(f)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/other/mod.rs | crates/ruff_python_formatter/src/other/mod.rs | pub(crate) mod alias;
pub(crate) mod arguments;
pub(crate) mod bytes_literal;
pub(crate) mod commas;
pub(crate) mod comprehension;
pub(crate) mod decorator;
pub(crate) mod elif_else_clause;
pub(crate) mod except_handler_except_handler;
pub(crate) mod f_string;
pub(crate) mod identifier;
pub(crate) mod interpolated_string;
pub(crate) mod interpolated_string_element;
pub(crate) mod keyword;
pub(crate) mod match_case;
pub(crate) mod parameter;
pub(crate) mod parameter_with_default;
pub(crate) mod parameters;
pub(crate) mod string_literal;
pub(crate) mod t_string;
pub(crate) mod with_item;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/other/alias.rs | crates/ruff_python_formatter/src/other/alias.rs | use ruff_formatter::write;
use ruff_python_ast::Alias;
use crate::comments::trailing_comments;
use crate::other::identifier::DotDelimitedIdentifier;
use crate::prelude::*;
#[derive(Default)]
pub struct FormatAlias;
impl FormatNodeRule<Alias> for FormatAlias {
fn fmt_fields(&self, item: &Alias, f: &mut PyFormatter) -> FormatResult<()> {
let Alias {
range: _,
node_index: _,
name,
asname,
} = item;
write!(f, [DotDelimitedIdentifier::new(name)])?;
let comments = f.context().comments().clone();
// ```python
// from foo import (
// bar # comment
// as baz,
// )
// ```
if comments.has_trailing(name) {
write!(
f,
[
trailing_comments(comments.trailing(name)),
hard_line_break()
]
)?;
} else if asname.is_some() {
write!(f, [space()])?;
}
if let Some(asname) = asname {
write!(f, [token("as")])?;
// ```python
// from foo import (
// bar as # comment
// baz,
// )
// ```
if comments.has_leading(asname) {
write!(
f,
[
trailing_comments(comments.leading(asname)),
hard_line_break()
]
)?;
} else {
write!(f, [space()])?;
}
write!(f, [asname.format()])?;
}
// Dangling comment between alias and comma on a following line
// ```python
// from foo import (
// bar # comment
// ,
// )
// ```
let dangling = comments.dangling(item);
if !dangling.is_empty() {
write!(f, [trailing_comments(comments.dangling(item))])?;
// Black will move the comma and merge comments if there is no own-line comment between
// the alias and the comma.
//
// Eg:
// ```python
// from foo import (
// bar # one
// , # two
// )
// ```
//
// Will become:
// ```python
// from foo import (
// bar, # one # two)
// ```
//
// Only force a hard line break if an own-line dangling comment is present.
if dangling
.iter()
.any(|comment| comment.line_position().is_own_line())
{
write!(f, [hard_line_break()])?;
}
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/other/parameter_with_default.rs | crates/ruff_python_formatter/src/other/parameter_with_default.rs | use ruff_formatter::write;
use ruff_python_ast::ParameterWithDefault;
use ruff_python_trivia::{SimpleTokenKind, SimpleTokenizer};
use ruff_text_size::{Ranged, TextRange};
use crate::prelude::*;
#[derive(Default)]
pub struct FormatParameterWithDefault;
impl FormatNodeRule<ParameterWithDefault> for FormatParameterWithDefault {
fn fmt_fields(&self, item: &ParameterWithDefault, f: &mut PyFormatter) -> FormatResult<()> {
let ParameterWithDefault {
range: _,
node_index: _,
parameter,
default,
} = item;
write!(f, [parameter.format()])?;
if let Some(default) = default {
let space = parameter.annotation.is_some().then_some(space());
// ```python
// def f(
// a = # parameter trailing comment; needs line break
// 1,
// b =
// # default leading comment; needs line break
// 2,
// c = ( # the default leading can only be end-of-line with parentheses; no line break
// 3
// ),
// d = (
// # own line leading comment with parentheses; no line break
// 4
// )
// )
// ```
let needs_line_break_trailing = f.context().comments().has_trailing(parameter);
let default_first_comment = f.context().comments().leading(default.as_ref()).first();
let needs_line_break_leading =
default_first_comment.is_some_and(|default_leading_comment| {
let mut tokenizer = SimpleTokenizer::new(
f.context().source(),
TextRange::new(parameter.end(), default_leading_comment.start()),
)
.skip_trivia()
.skip_while(|token| token.kind == SimpleTokenKind::RParen);
let equals = tokenizer.next();
debug_assert!(
equals.is_some_and(|token| token.kind == SimpleTokenKind::Equals)
);
let lparens = tokenizer.next();
debug_assert!(
lparens
.as_ref()
.is_none_or(|token| token.kind == SimpleTokenKind::LParen)
);
lparens.is_none()
});
let needs_line_break = needs_line_break_trailing || needs_line_break_leading;
write!(
f,
[
space,
token("="),
(!needs_line_break).then_some(space),
needs_line_break.then_some(hard_line_break()),
default.format()
]
)?;
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/other/t_string.rs | crates/ruff_python_formatter/src/other/t_string.rs | use super::interpolated_string_element::FormatInterpolatedStringElement;
use crate::other::interpolated_string::{InterpolatedStringContext, InterpolatedStringLayout};
use crate::prelude::*;
use crate::string::{StringNormalizer, StringQuotes};
use ruff_formatter::write;
use ruff_python_ast::{StringFlags, TString};
/// Formats a t-string which is part of a larger t-string expression.
///
/// For example, this would be used to format the t-string part in `"foo" t"bar {x}"`
/// or the standalone t-string in `t"foo {x} bar"`.
#[derive(Default)]
pub struct FormatTString;
impl FormatNodeRule<TString> for FormatTString {
fn fmt_fields(&self, item: &TString, f: &mut PyFormatter) -> FormatResult<()> {
let normalizer = StringNormalizer::from_context(f.context());
let string_kind = normalizer.choose_quotes(item.into()).flags();
let context = InterpolatedStringContext::new(
string_kind,
InterpolatedStringLayout::from_interpolated_string_elements(
&item.elements,
f.context().source(),
),
);
// Starting prefix and quote
let quotes = StringQuotes::from(string_kind);
write!(f, [string_kind.prefix(), quotes])?;
for element in &item.elements {
FormatInterpolatedStringElement::new(element, context).fmt(f)?;
}
// Ending quote
quotes.fmt(f)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/other/comprehension.rs | crates/ruff_python_formatter/src/other/comprehension.rs | use ruff_formatter::{Buffer, FormatResult, format_args, write};
use ruff_python_ast::{Comprehension, Expr};
use ruff_python_trivia::{SimpleTokenKind, find_only_token_in_range};
use ruff_text_size::{Ranged, TextRange};
use crate::comments::{leading_comments, trailing_comments};
use crate::expression::expr_tuple::TupleParentheses;
use crate::expression::parentheses::is_expression_parenthesized;
use crate::prelude::*;
#[derive(Default)]
pub struct FormatComprehension;
impl FormatNodeRule<Comprehension> for FormatComprehension {
fn fmt_fields(&self, item: &Comprehension, f: &mut PyFormatter) -> FormatResult<()> {
struct Spacer<'a> {
expression: &'a Expr,
preserve_parentheses: bool,
}
impl Format<PyFormatContext<'_>> for Spacer<'_> {
fn fmt(&self, f: &mut PyFormatter) -> FormatResult<()> {
let has_leading_comments = f.context().comments().has_leading(self.expression);
// Don't add a soft line break for parenthesized expressions with a leading comment.
// The comments are rendered **inside** the parentheses and adding a softline break
// unnecessarily forces the parentheses to be on their own line.
// ```python
// y = [
// ...
// if
// (
// # See how the `(` gets forced on its own line? We don't want that.
// ...
// )
// ]
// ```
let will_be_parenthesized = self.preserve_parentheses
&& is_expression_parenthesized(
self.expression.into(),
f.context().comments().ranges(),
f.context().source(),
);
if has_leading_comments && !will_be_parenthesized {
soft_line_break_or_space().fmt(f)
} else {
space().fmt(f)
}
}
}
let Comprehension {
range: _,
node_index: _,
target,
iter,
ifs,
is_async,
} = item;
if *is_async {
write!(f, [token("async"), space()])?;
}
let comments = f.context().comments().clone();
let dangling_item_comments = comments.dangling(item);
let (before_target_comments, dangling_comments) = dangling_item_comments.split_at(
dangling_item_comments.partition_point(|comment| comment.end() < target.start()),
);
let in_token = find_only_token_in_range(
TextRange::new(target.end(), iter.start()),
SimpleTokenKind::In,
f.context().source(),
);
let (before_in_comments, dangling_comments) = dangling_comments.split_at(
dangling_comments.partition_point(|comment| comment.end() < in_token.start()),
);
let (trailing_in_comments, dangling_if_comments) = dangling_comments
.split_at(dangling_comments.partition_point(|comment| comment.start() < iter.start()));
let in_spacer = format_with(|f| {
if before_in_comments.is_empty() {
space().fmt(f)
} else {
soft_line_break_or_space().fmt(f)
}
});
write!(
f,
[
token("for"),
trailing_comments(before_target_comments),
Spacer {
expression: target,
preserve_parentheses: !target.is_tuple_expr()
},
ExprTupleWithoutParentheses(target),
in_spacer,
leading_comments(before_in_comments),
token("in"),
trailing_comments(trailing_in_comments),
Spacer {
expression: iter,
preserve_parentheses: true
},
iter.format(),
]
)?;
if !ifs.is_empty() {
let joined = format_with(|f| {
let mut joiner = f.join_with(soft_line_break_or_space());
let mut dangling_if_comments = dangling_if_comments;
for if_case in ifs {
let (if_comments, rest) = dangling_if_comments.split_at(
dangling_if_comments
.partition_point(|comment| comment.start() < if_case.start()),
);
let (own_line_if_comments, end_of_line_if_comments) = if_comments.split_at(
if_comments
.partition_point(|comment| comment.line_position().is_own_line()),
);
joiner.entry(&format_args!(
leading_comments(own_line_if_comments),
token("if"),
trailing_comments(end_of_line_if_comments),
Spacer {
expression: if_case,
preserve_parentheses: true
},
if_case.format(),
));
dangling_if_comments = rest;
}
debug_assert!(dangling_if_comments.is_empty());
joiner.finish()
});
write!(f, [soft_line_break_or_space(), joined])?;
}
Ok(())
}
}
struct ExprTupleWithoutParentheses<'a>(&'a Expr);
impl Format<PyFormatContext<'_>> for ExprTupleWithoutParentheses<'_> {
fn fmt(&self, f: &mut PyFormatter) -> FormatResult<()> {
match self.0 {
Expr::Tuple(expr_tuple) => expr_tuple
.format()
.with_options(TupleParentheses::Never)
.fmt(f),
other => other.format().fmt(f),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/other/commas.rs | crates/ruff_python_formatter/src/other/commas.rs | use ruff_formatter::FormatContext;
use ruff_python_trivia::{SimpleToken, SimpleTokenKind, SimpleTokenizer};
use ruff_text_size::TextRange;
use crate::MagicTrailingComma;
use crate::prelude::*;
/// Returns `true` if the range ends with a magic trailing comma (and the magic trailing comma
/// should be respected).
pub(crate) fn has_magic_trailing_comma(range: TextRange, context: &PyFormatContext) -> bool {
match context.options().magic_trailing_comma() {
MagicTrailingComma::Respect => has_trailing_comma(range, context),
MagicTrailingComma::Ignore => false,
}
}
/// Returns `true` if the range ends with a trailing comma.
pub(crate) fn has_trailing_comma(range: TextRange, context: &PyFormatContext) -> bool {
let first_token = SimpleTokenizer::new(context.source(), range)
.skip_trivia()
// Skip over any closing parentheses belonging to the expression
.find(|token| token.kind() != SimpleTokenKind::RParen);
matches!(
first_token,
Some(SimpleToken {
kind: SimpleTokenKind::Comma,
..
})
)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/src/other/parameter.rs | crates/ruff_python_formatter/src/other/parameter.rs | use crate::expression::parentheses::is_expression_parenthesized;
use crate::prelude::*;
use ruff_python_ast::Parameter;
#[derive(Default)]
pub struct FormatParameter;
impl FormatNodeRule<Parameter> for FormatParameter {
fn fmt_fields(&self, item: &Parameter, f: &mut PyFormatter) -> FormatResult<()> {
let Parameter {
range: _,
node_index: _,
name,
annotation,
} = item;
name.format().fmt(f)?;
if let Some(annotation) = annotation.as_deref() {
token(":").fmt(f)?;
if f.context().comments().has_leading(annotation)
&& !is_expression_parenthesized(
annotation.into(),
f.context().comments().ranges(),
f.context().source(),
)
{
hard_line_break().fmt(f)?;
} else {
space().fmt(f)?;
}
annotation.format().fmt(f)?;
}
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/tests/normalizer.rs | crates/ruff_python_formatter/tests/normalizer.rs | use regex::Regex;
use std::sync::LazyLock;
use ruff_python_ast::{
self as ast, BytesLiteralFlags, Expr, FStringFlags, FStringPart, InterpolatedStringElement,
InterpolatedStringLiteralElement, Stmt, StringFlags,
};
use ruff_python_ast::{AtomicNodeIndex, visitor::transformer::Transformer};
use ruff_python_ast::{StringLiteralFlags, visitor::transformer};
use ruff_text_size::{Ranged, TextRange};
/// A struct to normalize AST nodes for the purpose of comparing formatted representations for
/// semantic equivalence.
///
/// Vis-à-vis comparing ASTs, comparing these normalized representations does the following:
/// - Ignores non-abstraction information that we've encoded into the AST, e.g., the difference
/// between `class C: ...` and `class C(): ...`, which is part of our AST but not `CPython`'s.
/// - Normalize strings. The formatter can re-indent docstrings, so we need to compare string
/// contents ignoring whitespace. (Black does the same.)
/// - The formatter can also reformat code snippets when they're Python code, which can of
/// course change the string in arbitrary ways. Black itself does not reformat code snippets,
/// so we carve our own path here by stripping everything that looks like code snippets from
/// string literals.
/// - Ignores nested tuples in deletions. (Black does the same.)
pub(crate) struct Normalizer;
impl Normalizer {
/// Transform an AST module into a normalized representation.
#[allow(dead_code)]
pub(crate) fn visit_module(&self, module: &mut ast::Mod) {
match module {
ast::Mod::Module(module) => {
self.visit_body(&mut module.body);
}
ast::Mod::Expression(expression) => {
self.visit_expr(&mut expression.body);
}
}
}
}
impl Transformer for Normalizer {
fn visit_stmt(&self, stmt: &mut Stmt) {
if let Stmt::Delete(delete) = stmt {
// Treat `del a, b` and `del (a, b)` equivalently.
if let [Expr::Tuple(tuple)] = delete.targets.as_slice() {
delete.targets = tuple.elts.clone();
}
}
transformer::walk_stmt(self, stmt);
}
fn visit_expr(&self, expr: &mut Expr) {
// Ruff supports joining implicitly concatenated strings. The code below implements this
// at an AST level by joining the string literals in the AST if they can be joined (it doesn't mean that
// they'll be joined in the formatted output but they could).
// Comparable expression handles some of this by comparing the concatenated string
// but not joining here doesn't play nicely with other string normalizations done in the
// Normalizer.
match expr {
Expr::StringLiteral(string) => {
if string.value.is_implicit_concatenated() {
let can_join = string.value.iter().all(|literal| {
!literal.flags.is_triple_quoted() && !literal.flags.prefix().is_raw()
});
if can_join {
string.value = ast::StringLiteralValue::single(ast::StringLiteral {
value: Box::from(string.value.to_str()),
range: string.range,
flags: StringLiteralFlags::empty(),
node_index: AtomicNodeIndex::NONE,
});
}
}
}
Expr::BytesLiteral(bytes) => {
if bytes.value.is_implicit_concatenated() {
let can_join = bytes.value.iter().all(|literal| {
!literal.flags.is_triple_quoted() && !literal.flags.prefix().is_raw()
});
if can_join {
bytes.value = ast::BytesLiteralValue::single(ast::BytesLiteral {
value: bytes.value.bytes().collect(),
range: bytes.range,
flags: BytesLiteralFlags::empty(),
node_index: AtomicNodeIndex::NONE,
});
}
}
}
Expr::FString(fstring) => {
if fstring.value.is_implicit_concatenated() {
let can_join = fstring.value.iter().all(|part| match part {
FStringPart::Literal(literal) => {
!literal.flags.is_triple_quoted() && !literal.flags.prefix().is_raw()
}
FStringPart::FString(string) => {
!string.flags.is_triple_quoted() && !string.flags.prefix().is_raw()
}
});
if can_join {
#[derive(Default)]
struct Collector {
elements: Vec<InterpolatedStringElement>,
}
impl Collector {
// The logic for concatenating adjacent string literals
// occurs here, implicitly: when we encounter a sequence
// of string literals, the first gets pushed to the
// `elements` vector, while subsequent strings
// are concatenated onto this top string.
fn push_literal(&mut self, literal: &str, range: TextRange) {
if let Some(InterpolatedStringElement::Literal(existing_literal)) =
self.elements.last_mut()
{
let value = std::mem::take(&mut existing_literal.value);
let mut value = value.into_string();
value.push_str(literal);
existing_literal.value = value.into_boxed_str();
existing_literal.range =
TextRange::new(existing_literal.start(), range.end());
} else {
self.elements.push(InterpolatedStringElement::Literal(
InterpolatedStringLiteralElement {
range,
value: literal.into(),
node_index: AtomicNodeIndex::NONE,
},
));
}
}
fn push_expression(&mut self, expression: ast::InterpolatedElement) {
self.elements
.push(InterpolatedStringElement::Interpolation(expression));
}
}
let mut collector = Collector::default();
for part in &fstring.value {
match part {
ast::FStringPart::Literal(string_literal) => {
collector
.push_literal(&string_literal.value, string_literal.range);
}
ast::FStringPart::FString(fstring) => {
for element in &fstring.elements {
match element {
ast::InterpolatedStringElement::Literal(literal) => {
collector
.push_literal(&literal.value, literal.range);
}
ast::InterpolatedStringElement::Interpolation(
expression,
) => {
collector.push_expression(expression.clone());
}
}
}
}
}
}
fstring.value = ast::FStringValue::single(ast::FString {
elements: collector.elements.into(),
range: fstring.range,
flags: FStringFlags::empty(),
node_index: AtomicNodeIndex::NONE,
});
}
}
}
_ => {}
}
transformer::walk_expr(self, expr);
}
fn visit_interpolated_string_element(
&self,
interpolated_string_element: &mut InterpolatedStringElement,
) {
let InterpolatedStringElement::Interpolation(interpolation) = interpolated_string_element
else {
return;
};
let Some(debug) = &mut interpolation.debug_text else {
return;
};
// Changing the newlines to the configured newline is okay because Python normalizes all newlines to `\n`
debug.leading = debug.leading.replace("\r\n", "\n").replace('\r', "\n");
debug.trailing = debug.trailing.replace("\r\n", "\n").replace('\r', "\n");
}
fn visit_string_literal(&self, string_literal: &mut ast::StringLiteral) {
static STRIP_DOC_TESTS: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(
r"(?mx)
(
# strip doctest PS1 prompt lines
^\s*>>>\s.*(\n|$)
|
# strip doctest PS2 prompt lines
# Also handles the case of an empty ... line.
^\s*\.\.\.((\n|$)|\s.*(\n|$))
)+
",
)
.unwrap()
});
static STRIP_RST_BLOCKS: LazyLock<Regex> = LazyLock::new(|| {
// This is kind of unfortunate, but it's pretty tricky (likely
// impossible) to detect a reStructuredText block with a simple
// regex. So we just look for the start of a block and remove
// everything after it. Talk about a hammer.
Regex::new(r"::(?s:.*)").unwrap()
});
static STRIP_MARKDOWN_BLOCKS: LazyLock<Regex> = LazyLock::new(|| {
// This covers more than valid Markdown blocks, but that's OK.
Regex::new(r"(```|~~~)\p{any}*(```|~~~|$)").unwrap()
});
// Start by (1) stripping everything that looks like a code
// snippet, since code snippets may be completely reformatted if
// they are Python code.
string_literal.value = STRIP_DOC_TESTS
.replace_all(
&string_literal.value,
"<DOCTEST-CODE-SNIPPET: Removed by normalizer>\n",
)
.into_owned()
.into_boxed_str();
string_literal.value = STRIP_RST_BLOCKS
.replace_all(
&string_literal.value,
"<RSTBLOCK-CODE-SNIPPET: Removed by normalizer>\n",
)
.into_owned()
.into_boxed_str();
string_literal.value = STRIP_MARKDOWN_BLOCKS
.replace_all(
&string_literal.value,
"<MARKDOWN-CODE-SNIPPET: Removed by normalizer>\n",
)
.into_owned()
.into_boxed_str();
// Normalize a string by (2) stripping any leading and trailing space from each
// line, and (3) removing any blank lines from the start and end of the string.
string_literal.value = string_literal
.value
.lines()
.map(str::trim)
.collect::<Vec<_>>()
.join("\n")
.trim()
.to_owned()
.into_boxed_str();
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_formatter/tests/fixtures.rs | crates/ruff_python_formatter/tests/fixtures.rs | use crate::normalizer::Normalizer;
use anyhow::anyhow;
use datatest_stable::Utf8Path;
use insta::assert_snapshot;
use ruff_db::diagnostic::{
Annotation, Diagnostic, DiagnosticFormat, DiagnosticId, DisplayDiagnosticConfig,
DisplayDiagnostics, DummyFileResolver, Severity, Span, SubDiagnostic, SubDiagnosticSeverity,
};
use ruff_formatter::FormatOptions;
use ruff_python_ast::Mod;
use ruff_python_ast::comparable::ComparableMod;
use ruff_python_ast::visitor::source_order::SourceOrderVisitor;
use ruff_python_formatter::{PreviewMode, PyFormatOptions, format_module_source, format_range};
use ruff_python_parser::{ParseOptions, Parsed, UnsupportedSyntaxError, parse};
use ruff_source_file::{LineIndex, OneIndexed, SourceFileBuilder};
use ruff_text_size::{Ranged, TextRange, TextSize};
use rustc_hash::FxHashMap;
use similar::TextDiff;
use std::borrow::Cow;
use std::collections::hash_map::Entry;
use std::fmt::{Formatter, Write};
use std::hash::{DefaultHasher, Hash, Hasher};
use std::io::BufReader;
use std::ops::Range;
use std::path::Path;
use std::{fmt, fs};
mod normalizer;
#[expect(clippy::needless_pass_by_value)]
fn black_compatibility(input_path: &Utf8Path, content: String) -> datatest_stable::Result<()> {
let test_name = input_path
.strip_prefix("./resources/test/fixtures/black")
.unwrap_or(input_path)
.as_str();
let options_path = input_path.with_extension("options.json");
let options: PyFormatOptions = if let Ok(options_file) = fs::File::open(&options_path) {
let reader = BufReader::new(options_file);
serde_json::from_reader(reader).map_err(|err| {
anyhow!("Expected option file {options_path:?} to be a valid Json file: {err}")
})?
} else {
PyFormatOptions::from_extension(input_path.as_std_path())
};
let first_line = content.lines().next().unwrap_or_default();
let formatted_code =
if first_line.starts_with("# flags:") && first_line.contains("--line-ranges=") {
let line_index = LineIndex::from_source_text(&content);
let ranges = first_line
.split_ascii_whitespace()
.filter_map(|chunk| {
let (_, lines) = chunk.split_once("--line-ranges=")?;
let (lower, upper) = lines.split_once('-')?;
let lower = lower
.parse::<OneIndexed>()
.expect("Expected a valid line number");
let upper = upper
.parse::<OneIndexed>()
.expect("Expected a valid line number");
let range_start = line_index.line_start(lower, &content);
let range_end = line_index.line_end(upper, &content);
Some(TextRange::new(range_start, range_end))
})
.rev();
let mut formatted_code = content.clone();
for range in ranges {
let formatted = format_range(&content, range, options.clone()).map_err(|err| {
anyhow!("Range-formatting to succeed but encountered error {err}")
})?;
let range = formatted.source_range();
formatted_code.replace_range(Range::<usize>::from(range), formatted.as_code());
}
// We can't do stability checks for range formatting because we don't know the updated rangs.
formatted_code
} else {
let printed = format_module_source(&content, options.clone())
.map_err(|err| anyhow!("Formatting to succeed but encountered error {err}"))?;
let formatted_code = printed.into_code();
ensure_stability_when_formatting_twice(&formatted_code, &options, input_path);
formatted_code
};
let extension = input_path
.extension()
.expect("Test file to have py or pyi extension");
let expected_path = input_path.with_extension(format!("{extension}.expect"));
let expected_output = fs::read_to_string(&expected_path)
.unwrap_or_else(|_| panic!("Expected Black output file '{expected_path:?}' to exist"));
let unsupported_syntax_errors =
ensure_unchanged_ast(&content, &formatted_code, &options, input_path);
// Black and Ruff formatting matches. Delete any existing snapshot files because the Black output
// already perfectly captures the expected output.
// The following code mimics insta's logic generating the snapshot name for a test.
let workspace_path = std::env::var("CARGO_MANIFEST_DIR").unwrap();
let full_snapshot_name = format!("black_compatibility@{test_name}.snap",);
let snapshot_path = Path::new(&workspace_path)
.join("tests/snapshots")
.join(full_snapshot_name);
if formatted_code == expected_output {
if snapshot_path.exists() && snapshot_path.is_file() {
// SAFETY: This is a convenience feature. That's why we don't want to abort
// when deleting a no longer needed snapshot fails.
fs::remove_file(&snapshot_path).ok();
}
let new_snapshot_path = snapshot_path.with_extension("snap.new");
if new_snapshot_path.exists() && new_snapshot_path.is_file() {
// SAFETY: This is a convenience feature. That's why we don't want to abort
// when deleting a no longer needed snapshot fails.
fs::remove_file(&new_snapshot_path).ok();
}
} else {
// Black and Ruff have different formatting. Write out a snapshot that covers the differences
// today.
let mut snapshot = String::new();
write!(snapshot, "{}", Header::new("Input")).unwrap();
write!(snapshot, "{}", CodeFrame::new("python", &content)).unwrap();
write!(snapshot, "{}", Header::new("Black Differences")).unwrap();
let diff = TextDiff::from_lines(expected_output.as_str(), &formatted_code)
.unified_diff()
.header("Black", "Ruff")
.to_string();
write!(snapshot, "{}", CodeFrame::new("diff", &diff)).unwrap();
write!(snapshot, "{}", Header::new("Ruff Output")).unwrap();
write!(snapshot, "{}", CodeFrame::new("python", &formatted_code)).unwrap();
write!(snapshot, "{}", Header::new("Black Output")).unwrap();
write!(snapshot, "{}", CodeFrame::new("python", &expected_output)).unwrap();
if !unsupported_syntax_errors.is_empty() {
write!(snapshot, "{}", Header::new("New Unsupported Syntax Errors")).unwrap();
writeln!(
snapshot,
"{}",
DisplayDiagnostics::new(
&DummyFileResolver,
&DisplayDiagnosticConfig::default().format(DiagnosticFormat::Full),
&unsupported_syntax_errors
)
)
.unwrap();
}
let mut settings = insta::Settings::clone_current();
settings.set_omit_expression(true);
settings.set_input_file(input_path);
settings.set_prepend_module_to_snapshot(false);
settings.set_snapshot_suffix(test_name);
let _settings = settings.bind_to_scope();
assert_snapshot!(snapshot);
}
Ok(())
}
#[expect(clippy::needless_pass_by_value)]
fn format(input_path: &Utf8Path, content: String) -> datatest_stable::Result<()> {
let test_name = input_path
.strip_prefix("./resources/test/fixtures/ruff")
.unwrap_or(input_path)
.as_str();
let mut snapshot = format!("## Input\n{}", CodeFrame::new("python", &content));
let options_path = input_path.with_extension("options.json");
if let Ok(options_file) = fs::File::open(&options_path) {
let reader = BufReader::new(options_file);
let options: Vec<PyFormatOptions> = serde_json::from_reader(reader).map_err(|_| {
anyhow!("Expected option file {options_path:?} to be a valid Json file")
})?;
writeln!(snapshot, "## Outputs").unwrap();
for (i, options) in options.into_iter().enumerate() {
let (formatted_code, unsupported_syntax_errors) =
format_file(&content, &options, input_path);
writeln!(
snapshot,
"### Output {}\n{}{}",
i + 1,
CodeFrame::new("", &DisplayPyOptions(&options)),
CodeFrame::new("python", &formatted_code)
)
.unwrap();
if options.preview().is_enabled() {
continue;
}
// We want to capture the differences in the preview style in our fixtures
let options_preview = options.with_preview(PreviewMode::Enabled);
let (formatted_preview, _) = format_file(&content, &options_preview, input_path);
if formatted_code != formatted_preview {
// Having both snapshots makes it hard to see the difference, so we're keeping only
// diff.
writeln!(
snapshot,
"#### Preview changes\n{}",
CodeFrame::new(
"diff",
TextDiff::from_lines(&formatted_code, &formatted_preview)
.unified_diff()
.header("Stable", "Preview")
)
)
.unwrap();
}
if !unsupported_syntax_errors.is_empty() {
writeln!(
snapshot,
"### Unsupported Syntax Errors\n{}",
DisplayDiagnostics::new(
&DummyFileResolver,
&DisplayDiagnosticConfig::default().format(DiagnosticFormat::Full),
&unsupported_syntax_errors
)
)
.unwrap();
}
}
} else {
// We want to capture the differences in the preview style in our fixtures
let options = PyFormatOptions::from_extension(input_path.as_std_path());
let (formatted_code, unsupported_syntax_errors) =
format_file(&content, &options, input_path);
let options_preview = options.with_preview(PreviewMode::Enabled);
let (formatted_preview, _) = format_file(&content, &options_preview, input_path);
if formatted_code == formatted_preview {
writeln!(
snapshot,
"## Output\n{}",
CodeFrame::new("python", &formatted_code)
)
.unwrap();
} else {
// Having both snapshots makes it hard to see the difference, so we're keeping only
// diff.
writeln!(
snapshot,
"## Output\n{}\n## Preview changes\n{}",
CodeFrame::new("python", &formatted_code),
CodeFrame::new(
"diff",
TextDiff::from_lines(&formatted_code, &formatted_preview)
.unified_diff()
.header("Stable", "Preview")
)
)
.unwrap();
}
if !unsupported_syntax_errors.is_empty() {
writeln!(
snapshot,
"## Unsupported Syntax Errors\n{}",
DisplayDiagnostics::new(
&DummyFileResolver,
&DisplayDiagnosticConfig::default().format(DiagnosticFormat::Full),
&unsupported_syntax_errors
)
)
.unwrap();
}
}
let mut settings = insta::Settings::clone_current();
settings.set_omit_expression(true);
settings.set_input_file(input_path);
settings.set_prepend_module_to_snapshot(false);
settings.set_snapshot_suffix(test_name);
let _settings = settings.bind_to_scope();
assert_snapshot!(snapshot);
Ok(())
}
datatest_stable::harness! {
{ test = black_compatibility, root = "./resources/test/fixtures/black", pattern = r".+\.pyi?$" },
{ test = format, root="./resources/test/fixtures/ruff", pattern = r".+\.pyi?$" }
}
fn format_file(
source: &str,
options: &PyFormatOptions,
input_path: &Utf8Path,
) -> (String, Vec<Diagnostic>) {
let (unformatted, formatted_code) = if source.contains("<RANGE_START>") {
let mut content = source.to_string();
let without_markers = content
.replace("<RANGE_START>", "")
.replace("<RANGE_END>", "");
while let Some(range_start_marker) = content.find("<RANGE_START>") {
// Remove the start marker
content.replace_range(
range_start_marker..range_start_marker + "<RANGE_START>".len(),
"",
);
let range_end_marker = content[range_start_marker..]
.find("<RANGE_END>")
.expect("Matching <RANGE_END> marker for <RANGE_START> to exist")
+ range_start_marker;
content.replace_range(range_end_marker..range_end_marker + "<RANGE_END>".len(), "");
// Replace all other markers to get a valid Python input
let format_input = content
.replace("<RANGE_START>", "")
.replace("<RANGE_END>", "");
let range = TextRange::new(
TextSize::try_from(range_start_marker).unwrap(),
TextSize::try_from(range_end_marker).unwrap(),
);
let formatted =
format_range(&format_input, range, options.clone()).unwrap_or_else(|err| {
panic!(
"Range-formatting of {input_path} to succeed but encountered error {err}",
)
});
content.replace_range(
Range::<usize>::from(formatted.source_range()),
formatted.as_code(),
);
}
(Cow::Owned(without_markers), content)
} else {
let printed = format_module_source(source, options.clone()).unwrap_or_else(|err| {
panic!("Formatting `{input_path} was expected to succeed but it failed: {err}",)
});
let formatted_code = printed.into_code();
ensure_stability_when_formatting_twice(&formatted_code, options, input_path);
(Cow::Borrowed(source), formatted_code)
};
let unsupported_syntax_errors =
ensure_unchanged_ast(&unformatted, &formatted_code, options, input_path);
(formatted_code, unsupported_syntax_errors)
}
/// Format another time and make sure that there are no changes anymore
fn ensure_stability_when_formatting_twice(
formatted_code: &str,
options: &PyFormatOptions,
input_path: &Utf8Path,
) {
let reformatted = match format_module_source(formatted_code, options.clone()) {
Ok(reformatted) => reformatted,
Err(err) => {
let mut diag = Diagnostic::from(&err);
if let Some(range) = err.range() {
let file = SourceFileBuilder::new(input_path.as_str(), formatted_code).finish();
let span = Span::from(file).with_range(range);
diag.annotate(Annotation::primary(span));
}
panic!(
"Expected formatted code of {input_path} to be valid syntax: {err}:\
\n---\n{formatted_code}---\n{}",
diag.display(&DummyFileResolver, &DisplayDiagnosticConfig::default()),
);
}
};
if reformatted.as_code() != formatted_code {
let diff = TextDiff::from_lines(formatted_code, reformatted.as_code())
.unified_diff()
.header("Formatted once", "Formatted twice")
.to_string();
panic!(
r#"Reformatting the formatted code of {input_path} a second time resulted in formatting changes.
Options:
{options}
---
{diff}---
Formatted once:
---
{formatted_code}---
Formatted twice:
---
{reformatted}---"#,
options = &DisplayPyOptions(options),
reformatted = reformatted.as_code(),
);
}
}
/// Ensure that formatting doesn't change the AST and doesn't introduce any new unsupported syntax errors.
///
/// Like Black, there are a few exceptions to this "invariant" which are encoded in
/// [`NormalizedMod`] and related structs. Namely, formatting can change indentation within strings,
/// and can also flatten tuples within `del` statements.
///
/// Returns any new [`UnsupportedSyntaxError`]s in the formatted code as [`Diagnostic`]s for
/// snapshotting.
///
/// As noted in the sub-diagnostic message, new syntax errors should only be accepted when they are
/// the result of an existing syntax error in the input. For example, the formatter knows that
/// escapes in f-strings are only allowed after Python 3.12, so it can replace escaped quotes with
/// reused outer quote characters, which are also valid after 3.12, even if the configured Python
/// version is lower. Such cases disrupt the fingerprint filter because the syntax error, and thus
/// its fingerprint, is different from the input syntax error. More typical cases like using a
/// t-string before 3.14 will be filtered out and not included in snapshots.
fn ensure_unchanged_ast(
unformatted_code: &str,
formatted_code: &str,
options: &PyFormatOptions,
input_path: &Utf8Path,
) -> Vec<Diagnostic> {
let source_type = options.source_type();
// Parse the unformatted code.
let unformatted_parsed = parse(
unformatted_code,
ParseOptions::from(source_type).with_target_version(options.target_version()),
)
.expect("Unformatted code to be valid syntax");
let unformatted_unsupported_syntax_errors =
collect_unsupported_syntax_errors(&unformatted_parsed);
let mut unformatted_ast = unformatted_parsed.into_syntax();
Normalizer.visit_module(&mut unformatted_ast);
let unformatted_ast = ComparableMod::from(&unformatted_ast);
// Parse the formatted code.
let formatted_parsed = parse(
formatted_code,
ParseOptions::from(source_type).with_target_version(options.target_version()),
)
.expect("Formatted code to be valid syntax");
// Assert that there are no new unsupported syntax errors
let mut formatted_unsupported_syntax_errors =
collect_unsupported_syntax_errors(&formatted_parsed);
formatted_unsupported_syntax_errors
.retain(|fingerprint, _| !unformatted_unsupported_syntax_errors.contains_key(fingerprint));
let file = SourceFileBuilder::new(input_path.file_name().unwrap(), formatted_code).finish();
let diagnostics = formatted_unsupported_syntax_errors
.values()
.map(|error| {
let mut diag = Diagnostic::new(DiagnosticId::InvalidSyntax, Severity::Error, error);
let span = Span::from(file.clone()).with_range(error.range());
diag.annotate(Annotation::primary(span));
let sub = SubDiagnostic::new(
SubDiagnosticSeverity::Warning,
"Only accept new syntax errors if they are also present in the input. \
The formatter should not introduce syntax errors.",
);
diag.sub(sub);
diag
})
.collect::<Vec<_>>();
let mut formatted_ast = formatted_parsed.into_syntax();
Normalizer.visit_module(&mut formatted_ast);
let formatted_ast = ComparableMod::from(&formatted_ast);
if formatted_ast != unformatted_ast {
let diff = TextDiff::from_lines(
&format!("{unformatted_ast:#?}"),
&format!("{formatted_ast:#?}"),
)
.unified_diff()
.header("Unformatted", "Formatted")
.to_string();
panic!(
r#"Reformatting the unformatted code of {input_path} resulted in AST changes.
---
{diff}
"#,
);
}
diagnostics
}
struct Header<'a> {
title: &'a str,
}
impl<'a> Header<'a> {
fn new(title: &'a str) -> Self {
Self { title }
}
}
impl std::fmt::Display for Header<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
writeln!(f, "## {}", self.title)?;
writeln!(f)
}
}
struct CodeFrame<'a> {
language: &'a str,
code: &'a dyn std::fmt::Display,
}
impl<'a> CodeFrame<'a> {
fn new(language: &'a str, code: &'a dyn std::fmt::Display) -> Self {
Self { language, code }
}
}
impl std::fmt::Display for CodeFrame<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
writeln!(f, "```{}", self.language)?;
write!(f, "{}", self.code)?;
writeln!(f, "```")?;
writeln!(f)
}
}
struct DisplayPyOptions<'a>(&'a PyFormatOptions);
impl fmt::Display for DisplayPyOptions<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
writeln!(
f,
r#"indent-style = {indent_style}
line-width = {line_width}
indent-width = {indent_width}
quote-style = {quote_style:?}
line-ending = {line_ending:?}
magic-trailing-comma = {magic_trailing_comma:?}
docstring-code = {docstring_code:?}
docstring-code-line-width = {docstring_code_line_width:?}
preview = {preview:?}
target_version = {target_version}
source_type = {source_type:?}"#,
indent_style = self.0.indent_style(),
indent_width = self.0.indent_width().value(),
line_width = self.0.line_width().value(),
quote_style = self.0.quote_style(),
line_ending = self.0.line_ending(),
magic_trailing_comma = self.0.magic_trailing_comma(),
docstring_code = self.0.docstring_code(),
docstring_code_line_width = self.0.docstring_code_line_width(),
preview = self.0.preview(),
target_version = self.0.target_version(),
source_type = self.0.source_type()
)
}
}
/// A visitor to collect a sequence of node IDs for fingerprinting [`UnsupportedSyntaxError`]s.
///
/// It visits each statement in the AST in source order and saves its range. The index of the node
/// enclosing a syntax error's range can then be retrieved with the `node_id` method. This `node_id`
/// should be stable across formatting runs since the formatter won't add or remove statements.
struct StmtVisitor {
nodes: Vec<TextRange>,
}
impl StmtVisitor {
fn new(parsed: &Parsed<Mod>) -> Self {
let mut visitor = Self { nodes: Vec::new() };
visitor.visit_mod(parsed.syntax());
visitor
}
/// Return the index of the statement node that contains `range`.
fn node_id(&self, range: TextRange) -> usize {
self.nodes
.iter()
.enumerate()
.filter(|(_, node)| node.contains_range(range))
.min_by_key(|(_, node)| node.len())
.expect("Expected an enclosing node in the AST")
.0
}
}
impl<'a> SourceOrderVisitor<'a> for StmtVisitor {
fn visit_stmt(&mut self, stmt: &'a ruff_python_ast::Stmt) {
self.nodes.push(stmt.range());
ruff_python_ast::visitor::source_order::walk_stmt(self, stmt);
}
}
/// Collects the unsupported syntax errors and assigns a unique hash to each error.
fn collect_unsupported_syntax_errors(
parsed: &Parsed<Mod>,
) -> FxHashMap<u64, UnsupportedSyntaxError> {
let mut collected = FxHashMap::default();
if parsed.unsupported_syntax_errors().is_empty() {
return collected;
}
let visitor = StmtVisitor::new(parsed);
for error in parsed.unsupported_syntax_errors() {
let node_id = visitor.node_id(error.range);
let mut error_fingerprint = fingerprint_unsupported_syntax_error(error, node_id, 0);
// Make sure that we do not get a fingerprint that is already in use
// by adding in the previously generated one.
loop {
match collected.entry(error_fingerprint) {
Entry::Occupied(_) => {
error_fingerprint =
fingerprint_unsupported_syntax_error(error, node_id, error_fingerprint);
}
Entry::Vacant(entry) => {
entry.insert(error.clone());
break;
}
}
}
}
collected
}
fn fingerprint_unsupported_syntax_error(
error: &UnsupportedSyntaxError,
node_id: usize,
salt: u64,
) -> u64 {
let mut hasher = DefaultHasher::new();
let UnsupportedSyntaxError {
kind,
target_version,
// Don't hash the range because the location between the formatted and unformatted code
// is likely to be different
range: _,
} = error;
salt.hash(&mut hasher);
kind.hash(&mut hasher);
target_version.hash(&mut hasher);
node_id.hash(&mut hasher);
hasher.finish()
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_test/src/db.rs | crates/ty_test/src/db.rs | use camino::{Utf8Component, Utf8PathBuf};
use ruff_db::Db as SourceDb;
use ruff_db::diagnostic::Severity;
use ruff_db::files::{File, Files};
use ruff_db::system::{
CaseSensitivity, DbWithWritableSystem, InMemorySystem, OsSystem, System, SystemPath,
SystemPathBuf, WritableSystem,
};
use ruff_db::vendored::VendoredFileSystem;
use ruff_notebook::{Notebook, NotebookError};
use salsa::Setter as _;
use std::borrow::Cow;
use std::sync::Arc;
use tempfile::TempDir;
use ty_module_resolver::SearchPaths;
use ty_python_semantic::lint::{LintRegistry, RuleSelection};
use ty_python_semantic::{AnalysisSettings, Db as SemanticDb, Program, default_lint_registry};
use crate::config::Analysis;
#[salsa::db]
#[derive(Clone)]
pub(crate) struct Db {
storage: salsa::Storage<Self>,
files: Files,
system: MdtestSystem,
vendored: VendoredFileSystem,
rule_selection: Arc<RuleSelection>,
settings: Option<Settings>,
}
impl Db {
pub(crate) fn setup() -> Self {
let rule_selection = RuleSelection::all(default_lint_registry(), Severity::Info);
let mut db = Self {
system: MdtestSystem::in_memory(),
storage: salsa::Storage::new(Some(Box::new({
move |event| {
tracing::trace!("event: {:?}", event);
}
}))),
vendored: ty_vendored::file_system().clone(),
files: Files::default(),
rule_selection: Arc::new(rule_selection),
settings: None,
};
db.settings = Some(Settings::new(&db));
db
}
fn settings(&self) -> Settings {
self.settings.unwrap()
}
pub(crate) fn update_analysis_options(&mut self, options: Option<&Analysis>) {
let analysis = if let Some(options) = options {
let AnalysisSettings {
respect_type_ignore_comments: respect_type_ignore_comments_default,
} = AnalysisSettings::default();
AnalysisSettings {
respect_type_ignore_comments: options
.respect_type_ignore_comments
.unwrap_or(respect_type_ignore_comments_default),
}
} else {
AnalysisSettings::default()
};
let settings = self.settings();
if settings.analysis(self) != &analysis {
settings.set_analysis(self).to(analysis);
}
}
pub(crate) fn use_os_system_with_temp_dir(&mut self, cwd: SystemPathBuf, temp_dir: TempDir) {
self.system.with_os(cwd, temp_dir);
Files::sync_all(self);
}
pub(crate) fn use_in_memory_system(&mut self) {
self.system.with_in_memory();
Files::sync_all(self);
}
pub(crate) fn create_directory_all(&self, path: &SystemPath) -> ruff_db::system::Result<()> {
self.system.create_directory_all(path)
}
}
#[salsa::db]
impl SourceDb for Db {
fn vendored(&self) -> &VendoredFileSystem {
&self.vendored
}
fn system(&self) -> &dyn System {
&self.system
}
fn files(&self) -> &Files {
&self.files
}
fn python_version(&self) -> ruff_python_ast::PythonVersion {
Program::get(self).python_version(self)
}
}
#[salsa::db]
impl ty_module_resolver::Db for Db {
fn search_paths(&self) -> &SearchPaths {
Program::get(self).search_paths(self)
}
}
#[salsa::db]
impl SemanticDb for Db {
fn should_check_file(&self, file: File) -> bool {
!file.path(self).is_vendored_path()
}
fn rule_selection(&self, _file: File) -> &RuleSelection {
&self.rule_selection
}
fn lint_registry(&self) -> &LintRegistry {
default_lint_registry()
}
fn verbose(&self) -> bool {
false
}
fn analysis_settings(&self) -> &AnalysisSettings {
self.settings().analysis(self)
}
}
#[salsa::db]
impl salsa::Database for Db {}
impl DbWithWritableSystem for Db {
type System = MdtestSystem;
fn writable_system(&self) -> &Self::System {
&self.system
}
}
#[salsa::input(debug)]
struct Settings {
#[default]
#[returns(ref)]
analysis: AnalysisSettings,
}
#[derive(Debug, Clone)]
pub(crate) struct MdtestSystem(Arc<MdtestSystemInner>);
#[derive(Debug)]
enum MdtestSystemInner {
InMemory(InMemorySystem),
Os {
os_system: OsSystem,
_temp_dir: TempDir,
},
}
impl MdtestSystem {
fn in_memory() -> Self {
Self(Arc::new(MdtestSystemInner::InMemory(
InMemorySystem::default(),
)))
}
fn as_system(&self) -> &dyn WritableSystem {
match &*self.0 {
MdtestSystemInner::InMemory(system) => system,
MdtestSystemInner::Os { os_system, .. } => os_system,
}
}
fn with_os(&mut self, cwd: SystemPathBuf, temp_dir: TempDir) {
self.0 = Arc::new(MdtestSystemInner::Os {
os_system: OsSystem::new(cwd),
_temp_dir: temp_dir,
});
}
fn with_in_memory(&mut self) {
if let MdtestSystemInner::InMemory(in_memory) = &*self.0 {
in_memory.fs().remove_all();
} else {
self.0 = Arc::new(MdtestSystemInner::InMemory(InMemorySystem::default()));
}
}
fn normalize_path<'a>(&self, path: &'a SystemPath) -> Cow<'a, SystemPath> {
match &*self.0 {
MdtestSystemInner::InMemory(_) => Cow::Borrowed(path),
MdtestSystemInner::Os { os_system, .. } => {
// Make all paths relative to the current directory
// to avoid writing or reading from outside the temp directory.
let without_root: Utf8PathBuf = path
.components()
.skip_while(|component| {
matches!(
component,
Utf8Component::RootDir | Utf8Component::Prefix(..)
)
})
.collect();
Cow::Owned(os_system.current_directory().join(&without_root))
}
}
}
}
impl System for MdtestSystem {
fn path_metadata(
&self,
path: &SystemPath,
) -> ruff_db::system::Result<ruff_db::system::Metadata> {
self.as_system().path_metadata(&self.normalize_path(path))
}
fn canonicalize_path(&self, path: &SystemPath) -> ruff_db::system::Result<SystemPathBuf> {
let canonicalized = self
.as_system()
.canonicalize_path(&self.normalize_path(path))?;
if let MdtestSystemInner::Os { os_system, .. } = &*self.0 {
// Make the path relative to the current directory
Ok(canonicalized
.strip_prefix(os_system.current_directory())
.unwrap()
.to_owned())
} else {
Ok(canonicalized)
}
}
fn read_to_string(&self, path: &SystemPath) -> ruff_db::system::Result<String> {
self.as_system().read_to_string(&self.normalize_path(path))
}
fn read_to_notebook(&self, path: &SystemPath) -> Result<Notebook, NotebookError> {
self.as_system()
.read_to_notebook(&self.normalize_path(path))
}
fn read_virtual_path_to_string(
&self,
path: &ruff_db::system::SystemVirtualPath,
) -> ruff_db::system::Result<String> {
self.as_system().read_virtual_path_to_string(path)
}
fn read_virtual_path_to_notebook(
&self,
path: &ruff_db::system::SystemVirtualPath,
) -> Result<Notebook, NotebookError> {
self.as_system().read_virtual_path_to_notebook(path)
}
fn path_exists_case_sensitive(&self, path: &SystemPath, prefix: &SystemPath) -> bool {
self.as_system()
.path_exists_case_sensitive(&self.normalize_path(path), &self.normalize_path(prefix))
}
fn case_sensitivity(&self) -> CaseSensitivity {
self.as_system().case_sensitivity()
}
fn current_directory(&self) -> &SystemPath {
self.as_system().current_directory()
}
fn user_config_directory(&self) -> Option<SystemPathBuf> {
self.as_system().user_config_directory()
}
fn cache_dir(&self) -> Option<SystemPathBuf> {
self.as_system().cache_dir()
}
fn read_directory<'a>(
&'a self,
path: &SystemPath,
) -> ruff_db::system::Result<
Box<dyn Iterator<Item = ruff_db::system::Result<ruff_db::system::DirectoryEntry>> + 'a>,
> {
self.as_system().read_directory(&self.normalize_path(path))
}
fn walk_directory(
&self,
path: &SystemPath,
) -> ruff_db::system::walk_directory::WalkDirectoryBuilder {
self.as_system().walk_directory(&self.normalize_path(path))
}
fn glob(
&self,
pattern: &str,
) -> Result<
Box<dyn Iterator<Item = Result<SystemPathBuf, ruff_db::system::GlobError>> + '_>,
ruff_db::system::PatternError,
> {
self.as_system()
.glob(self.normalize_path(SystemPath::new(pattern)).as_str())
}
fn as_writable(&self) -> Option<&dyn WritableSystem> {
Some(self)
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
fn dyn_clone(&self) -> Box<dyn System> {
Box::new(self.clone())
}
}
impl WritableSystem for MdtestSystem {
fn create_new_file(&self, path: &SystemPath) -> ruff_db::system::Result<()> {
self.as_system().create_new_file(&self.normalize_path(path))
}
fn write_file(&self, path: &SystemPath, content: &str) -> ruff_db::system::Result<()> {
self.as_system()
.write_file(&self.normalize_path(path), content)
}
fn create_directory_all(&self, path: &SystemPath) -> ruff_db::system::Result<()> {
self.as_system()
.create_directory_all(&self.normalize_path(path))
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_test/src/config.rs | crates/ty_test/src/config.rs | //! TOML-deserializable ty configuration, similar to `ty.toml`, to be able to
//! control some configuration options from Markdown files. For now, this supports the
//! following limited structure:
//!
//! ```toml
//! log = true # or log = "ty=WARN"
//!
//! [environment]
//! python-version = "3.10"
//!
//! [project]
//! dependencies = ["pydantic==2.12.2"]
//! ```
use anyhow::Context;
use ruff_db::system::{SystemPath, SystemPathBuf};
use ruff_python_ast::PythonVersion;
use serde::{Deserialize, Serialize};
use ty_python_semantic::PythonPlatform;
#[derive(Deserialize, Debug, Default, Clone)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
pub(crate) struct MarkdownTestConfig {
pub(crate) environment: Option<Environment>,
pub(crate) log: Option<Log>,
pub(crate) analysis: Option<Analysis>,
/// The [`ruff_db::system::System`] to use for tests.
///
/// Defaults to the case-sensitive [`ruff_db::system::InMemorySystem`].
pub(crate) system: Option<SystemKind>,
/// Project configuration for installing external dependencies.
pub(crate) project: Option<Project>,
}
impl MarkdownTestConfig {
pub(crate) fn from_str(s: &str) -> anyhow::Result<Self> {
toml::from_str(s).context("Error while parsing Markdown TOML config")
}
pub(crate) fn python_version(&self) -> Option<PythonVersion> {
self.environment.as_ref()?.python_version
}
pub(crate) fn python_platform(&self) -> Option<PythonPlatform> {
self.environment.as_ref()?.python_platform.clone()
}
pub(crate) fn typeshed(&self) -> Option<&SystemPath> {
self.environment.as_ref()?.typeshed.as_deref()
}
pub(crate) fn extra_paths(&self) -> Option<&[SystemPathBuf]> {
self.environment.as_ref()?.extra_paths.as_deref()
}
pub(crate) fn python(&self) -> Option<&SystemPath> {
self.environment.as_ref()?.python.as_deref()
}
pub(crate) fn dependencies(&self) -> Option<&[String]> {
self.project.as_ref()?.dependencies.as_deref()
}
}
#[derive(Deserialize, Debug, Default, Clone)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
pub(crate) struct Environment {
/// Target Python version to assume when resolving types.
///
/// The Python version affects allowed syntax, type definitions of the standard library, and
/// type definitions of first- and third-party modules that are conditional on the Python version.
///
/// By default, the Python version is inferred as the lower bound of the project's
/// `requires-python` field from the `pyproject.toml`, if available. Otherwise, the latest
/// stable version supported by ty is used (see `ty check --help` output).
///
/// ty will not infer the Python version from the Python environment at this time.
pub(crate) python_version: Option<PythonVersion>,
/// Target platform to assume when resolving types.
pub(crate) python_platform: Option<PythonPlatform>,
/// Path to a custom typeshed directory.
pub(crate) typeshed: Option<SystemPathBuf>,
/// Additional search paths to consider when resolving modules.
pub(crate) extra_paths: Option<Vec<SystemPathBuf>>,
/// Path to the Python environment.
///
/// ty uses the Python environment to resolve type information and third-party dependencies.
///
/// If a path to a Python interpreter is provided, e.g., `.venv/bin/python3`, ty will attempt to
/// find an environment two directories up from the interpreter's path, e.g., `.venv`. At this
/// time, ty does not invoke the interpreter to determine the location of the environment. This
/// means that ty will not resolve dynamic executables such as a shim.
///
/// ty will search in the resolved environment's `site-packages` directories for type
/// information and third-party imports.
#[serde(skip_serializing_if = "Option::is_none")]
pub python: Option<SystemPathBuf>,
}
#[derive(Deserialize, Default, Debug, Clone)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
pub(crate) struct Analysis {
/// Whether ty should support `type: ignore` comments.
pub(crate) respect_type_ignore_comments: Option<bool>,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(untagged)]
pub(crate) enum Log {
/// Enable logging with tracing when `true`.
Bool(bool),
/// Enable logging and only show filters that match the given [env-filter](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html)
Filter(String),
}
/// The system to use for tests.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize, Default)]
#[serde(rename_all = "kebab-case")]
pub(crate) enum SystemKind {
/// Use an in-memory system with a case-sensitive file system.
///
/// This is recommended for all tests because it's fast.
#[default]
InMemory,
/// Use the os system.
///
/// This system should only be used when testing system or OS specific behavior.
Os,
}
/// Project configuration for tests that need external dependencies.
#[derive(Deserialize, Debug, Default, Clone)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
pub(crate) struct Project {
/// List of Python package dependencies in `pyproject.toml` format.
///
/// These will be installed using `uv sync` into a temporary virtual environment.
/// The site-packages directory will then be copied into the test's filesystem.
///
/// Example: `dependencies = ["pydantic==2.12.2"]`
pub(crate) dependencies: Option<Vec<String>>,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_test/src/lib.rs | crates/ty_test/src/lib.rs | use crate::config::Log;
use crate::db::Db;
use crate::parser::{BacktickOffsets, EmbeddedFileSourceMap};
use anyhow::anyhow;
use camino::Utf8Path;
use colored::Colorize;
use config::SystemKind;
use parser as test_parser;
use ruff_db::Db as _;
use ruff_db::diagnostic::{Diagnostic, DiagnosticId, DisplayDiagnosticConfig};
use ruff_db::files::{File, FileRootKind, system_path_to_file};
use ruff_db::panic::{PanicError, catch_unwind};
use ruff_db::parsed::parsed_module;
use ruff_db::system::{DbWithWritableSystem as _, SystemPath, SystemPathBuf};
use ruff_db::testing::{setup_logging, setup_logging_with_filter};
use ruff_diagnostics::Applicability;
use ruff_source_file::{LineIndex, OneIndexed};
use std::backtrace::BacktraceStatus;
use std::fmt::{Display, Write};
use ty_module_resolver::{
Module, SearchPath, SearchPathSettings, list_modules, resolve_module_confident,
};
use ty_python_semantic::pull_types::pull_types;
use ty_python_semantic::types::{UNDEFINED_REVEAL, check_types};
use ty_python_semantic::{
MisconfigurationMode, Program, ProgramSettings, PythonEnvironment, PythonPlatform,
PythonVersionSource, PythonVersionWithSource, SysPrefixPathOrigin,
};
mod assertion;
mod config;
mod db;
mod diagnostic;
mod external_dependencies;
mod matcher;
mod parser;
use ty_static::EnvVars;
/// Run `path` as a markdown test suite with given `title`.
///
/// Panic on test failure, and print failure details.
pub fn run(
absolute_fixture_path: &Utf8Path,
relative_fixture_path: &Utf8Path,
source: &str,
snapshot_path: &Utf8Path,
short_title: &str,
test_name: &str,
output_format: OutputFormat,
) -> anyhow::Result<()> {
let suite = test_parser::parse(short_title, source)
.map_err(|err| anyhow!("Failed to parse fixture: {err}"))?;
let mut db = db::Db::setup();
let filter = std::env::var(EnvVars::MDTEST_TEST_FILTER).ok();
let mut any_failures = false;
let mut assertion = String::new();
for test in suite.tests() {
if filter
.as_ref()
.is_some_and(|f| !(test.uncontracted_name().contains(f) || test.name() == *f))
{
continue;
}
let _tracing = test.configuration().log.as_ref().and_then(|log| match log {
Log::Bool(enabled) => enabled.then(setup_logging),
Log::Filter(filter) => setup_logging_with_filter(filter),
});
let result = run_test(
&mut db,
absolute_fixture_path,
relative_fixture_path,
snapshot_path,
&test,
);
let inconsistencies = if result.as_ref().is_ok_and(|t| t.has_been_skipped()) {
Ok(())
} else {
run_module_resolution_consistency_test(&db)
};
let this_test_failed = result.is_err() || inconsistencies.is_err();
any_failures = any_failures || this_test_failed;
if this_test_failed && output_format.is_cli() {
let _ = writeln!(assertion, "\n\n{}\n", test.name().bold().underline());
}
if let Err(failures) = result {
let md_index = LineIndex::from_source_text(source);
for test_failures in failures {
let source_map =
EmbeddedFileSourceMap::new(&md_index, test_failures.backtick_offsets);
for (relative_line_number, failures) in test_failures.by_line.iter() {
let file = match output_format {
OutputFormat::Cli => relative_fixture_path.as_str(),
OutputFormat::GitHub => absolute_fixture_path.as_str(),
};
let absolute_line_number =
match source_map.to_absolute_line_number(relative_line_number) {
Ok(line_number) => line_number,
Err(last_line_number) => {
let _ = writeln!(
assertion,
"{}",
output_format.display_error(
file,
last_line_number,
"Found a trailing assertion comment \
(e.g., `# revealed:` or `# error:`) \
not followed by any statement."
)
);
continue;
}
};
for failure in failures {
let _ = writeln!(
assertion,
"{}",
output_format.display_error(file, absolute_line_number, failure)
);
}
}
}
}
if let Err(inconsistencies) = inconsistencies {
any_failures = true;
for inconsistency in inconsistencies {
match output_format {
OutputFormat::Cli => {
let info = relative_fixture_path.to_string().cyan();
let _ = writeln!(assertion, " {info} {inconsistency}");
}
OutputFormat::GitHub => {
let _ = writeln!(
assertion,
"::error file={absolute_fixture_path}::{inconsistency}"
);
}
}
}
}
if this_test_failed && output_format.is_cli() {
let escaped_test_name = test.name().replace('\'', "\\'");
let _ = writeln!(
assertion,
"\nTo rerun this specific test, \
set the environment variable: {}='{escaped_test_name}'",
EnvVars::MDTEST_TEST_FILTER,
);
let _ = writeln!(
assertion,
"{}='{escaped_test_name}' cargo test -p ty_python_semantic \
--test mdtest -- {test_name}",
EnvVars::MDTEST_TEST_FILTER,
);
let _ = writeln!(assertion, "\n{}", "-".repeat(50));
}
}
assert!(!any_failures, "{}", &assertion);
Ok(())
}
/// Defines the format in which mdtest should print an error to the terminal
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum OutputFormat {
/// The format `cargo test` should use by default.
Cli,
/// A format that will provide annotations from GitHub Actions
/// if mdtest fails on a PR.
/// See <https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/workflow-commands-for-github-actions#setting-an-error-message>
GitHub,
}
impl OutputFormat {
const fn is_cli(self) -> bool {
matches!(self, OutputFormat::Cli)
}
fn display_error(self, file: &str, line: OneIndexed, failure: impl Display) -> impl Display {
struct Display<'a, T> {
format: OutputFormat,
file: &'a str,
line: OneIndexed,
failure: T,
}
impl<T> std::fmt::Display for Display<'_, T>
where
T: std::fmt::Display,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let Display {
format,
file,
line,
failure,
} = self;
match format {
OutputFormat::Cli => {
write!(
f,
" {file_line} {failure}",
file_line = format!("{file}:{line}").cyan()
)
}
OutputFormat::GitHub => {
write!(f, "::error file={file},line={line}::{failure}")
}
}
}
}
Display {
format: self,
file,
line,
failure,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum TestOutcome {
Success,
Skipped,
}
impl TestOutcome {
const fn has_been_skipped(self) -> bool {
matches!(self, TestOutcome::Skipped)
}
}
fn run_test(
db: &mut db::Db,
absolute_fixture_path: &Utf8Path,
relative_fixture_path: &Utf8Path,
snapshot_path: &Utf8Path,
test: &parser::MarkdownTest,
) -> Result<TestOutcome, Failures> {
// Initialize the system and remove all files and directories to reset the system to a clean state.
match test.configuration().system.unwrap_or_default() {
SystemKind::InMemory => {
db.use_in_memory_system();
}
SystemKind::Os => {
let dir = tempfile::TempDir::new().expect("Creating a temporary directory to succeed");
let root_path = dir
.path()
.canonicalize()
.expect("Canonicalizing to succeed");
let root_path = SystemPathBuf::from_path_buf(root_path)
.expect("Temp directory to be a valid UTF8 path")
.simplified()
.to_path_buf();
db.use_os_system_with_temp_dir(root_path, dir);
}
}
let project_root = SystemPathBuf::from("/src");
db.create_directory_all(&project_root)
.expect("Creating the project root to succeed");
db.files()
.try_add_root(db, &project_root, FileRootKind::Project);
let src_path = project_root.clone();
let custom_typeshed_path = test.configuration().typeshed();
let python_version = test.configuration().python_version().unwrap_or_default();
// Setup virtual environment with dependencies if specified
let venv_for_external_dependencies = SystemPathBuf::from("/.venv");
if let Some(dependencies) = test.configuration().dependencies() {
if !std::env::var("MDTEST_EXTERNAL").is_ok_and(|v| v == "1") {
return Ok(TestOutcome::Skipped);
}
let python_platform = test.configuration().python_platform().expect(
"Tests with external dependencies must specify `python-platform` in the configuration",
);
let lockfile_path = absolute_fixture_path.with_extension("lock");
external_dependencies::setup_venv(
db,
dependencies,
python_version,
&python_platform,
&venv_for_external_dependencies,
&lockfile_path,
)
.expect("Failed to setup in-memory virtual environment with dependencies");
}
let mut typeshed_files = vec![];
let mut has_custom_versions_file = false;
let test_files: Vec<_> = test
.files()
.filter_map(|embedded| {
if embedded.lang == "ignore" {
return None;
}
assert!(
matches!(
embedded.lang,
"py" | "pyi" | "python" | "text" | "cfg" | "pth"
),
"Supported file types are: py (or python), pyi, text, cfg and ignore"
);
let mut full_path = embedded.full_path(&project_root);
if let Some(relative_path_to_custom_typeshed) = custom_typeshed_path
.and_then(|typeshed| full_path.strip_prefix(typeshed.join("stdlib")).ok())
{
if relative_path_to_custom_typeshed.as_str() == "VERSIONS" {
has_custom_versions_file = true;
} else if relative_path_to_custom_typeshed
.extension()
.is_some_and(|ext| ext == "pyi")
{
typeshed_files.push(relative_path_to_custom_typeshed.to_path_buf());
}
} else if let Some(component_index) = full_path
.components()
.position(|c| c.as_str() == "<path-to-site-packages>")
{
// If the path contains `<path-to-site-packages>`, we need to replace it with the
// actual site-packages directory based on the Python platform and version.
let mut components = full_path.components();
let mut new_path: SystemPathBuf =
components.by_ref().take(component_index).collect();
if cfg!(target_os = "windows") {
new_path.extend(["Lib", "site-packages"]);
} else {
new_path.push("lib");
new_path.push(format!("python{python_version}"));
new_path.push("site-packages");
}
new_path.extend(components.skip(1));
full_path = new_path;
}
let temp_string;
let to_write = if embedded.lang == "pth" && !embedded.code.starts_with('/') {
// Make any relative .pths be relative to src_path
temp_string = format!("{src_path}/{}", embedded.code);
&*temp_string
} else {
&*embedded.code
};
db.write_file(&full_path, to_write).unwrap();
if !(full_path.starts_with(&src_path)
&& matches!(embedded.lang, "py" | "python" | "pyi"))
{
// These files need to be written to the file system (above), but we don't run any checks on them.
return None;
}
let file = system_path_to_file(db, full_path).unwrap();
Some(TestFile {
file,
backtick_offsets: embedded.backtick_offsets.clone(),
})
})
.collect();
// Create a custom typeshed `VERSIONS` file if none was provided.
if let Some(typeshed_path) = custom_typeshed_path {
db.files()
.try_add_root(db, typeshed_path, FileRootKind::LibrarySearchPath);
if !has_custom_versions_file {
let versions_file = typeshed_path.join("stdlib/VERSIONS");
let contents = typeshed_files
.iter()
.fold(String::new(), |mut content, path| {
// This is intentionally kept simple:
let module_name = path
.as_str()
.trim_end_matches(".pyi")
.trim_end_matches("/__init__")
.replace('/', ".");
let _ = writeln!(content, "{module_name}: 3.8-");
content
});
db.write_file(&versions_file, contents).unwrap();
}
}
let configuration = test.configuration();
let site_packages_paths = if configuration.dependencies().is_some() {
// If dependencies were specified, use the venv we just set up
let environment = PythonEnvironment::new(
&venv_for_external_dependencies,
SysPrefixPathOrigin::PythonCliFlag,
db.system(),
)
.expect("Python environment to point to a valid path");
environment
.site_packages_paths(db.system())
.expect("Python environment to be valid")
.into_vec()
} else if let Some(python) = configuration.python() {
let environment =
PythonEnvironment::new(python, SysPrefixPathOrigin::PythonCliFlag, db.system())
.expect("Python environment to point to a valid path");
environment
.site_packages_paths(db.system())
.expect("Python environment to be valid")
.into_vec()
} else {
vec![]
};
// Make any relative extra-paths be relative to src_path
let extra_paths = configuration
.extra_paths()
.unwrap_or_default()
.iter()
.map(|path| {
if path.is_absolute() {
path.clone()
} else {
src_path.join(path)
}
})
.collect();
let settings = ProgramSettings {
python_version: PythonVersionWithSource {
version: python_version,
source: PythonVersionSource::Cli,
},
python_platform: configuration
.python_platform()
.unwrap_or(PythonPlatform::Identifier("linux".to_string())),
search_paths: SearchPathSettings {
src_roots: vec![src_path],
extra_paths,
custom_typeshed: custom_typeshed_path.map(SystemPath::to_path_buf),
site_packages_paths,
real_stdlib_path: None,
misconfiguration_mode: MisconfigurationMode::Fail,
}
.to_search_paths(db.system(), db.vendored())
.expect("Failed to resolve search path settings"),
};
Program::init_or_update(db, settings);
db.update_analysis_options(configuration.analysis.as_ref());
// When snapshot testing is enabled, this is populated with
// all diagnostics. Otherwise it remains empty.
let mut snapshot_diagnostics = vec![];
let mut any_pull_types_failures = false;
let mut panic_info = None;
let mut failures: Failures = test_files
.iter()
.filter_map(|test_file| {
let parsed = parsed_module(db, test_file.file).load(db);
let mut diagnostics: Vec<Diagnostic> = parsed
.errors()
.iter()
.map(|error| Diagnostic::invalid_syntax(test_file.file, &error.error, error))
.collect();
diagnostics.extend(
parsed
.unsupported_syntax_errors()
.iter()
.map(|error| Diagnostic::invalid_syntax(test_file.file, error, error)),
);
let mdtest_result = attempt_test(db, check_types, test_file);
let type_diagnostics = match mdtest_result {
Ok(diagnostics) => diagnostics,
Err(failures) => {
if test.should_expect_panic().is_ok() {
panic_info = Some(failures.info);
return None;
}
return Some(failures.into_file_failures(db, "run mdtest", None));
}
};
diagnostics.extend(type_diagnostics);
diagnostics.sort_by(|left, right| {
left.rendering_sort_key(db)
.cmp(&right.rendering_sort_key(db))
});
let failure = match matcher::match_file(db, test_file.file, &diagnostics) {
Ok(()) => None,
Err(line_failures) => Some(FileFailures {
backtick_offsets: test_file.backtick_offsets.clone(),
by_line: line_failures,
}),
};
// Filter out `revealed-type` and `undefined-reveal` diagnostics from snapshots,
// since they make snapshots very noisy!
if test.should_snapshot_diagnostics() {
snapshot_diagnostics.extend(diagnostics.into_iter().filter(|diagnostic| {
diagnostic.id() != DiagnosticId::RevealedType
&& !diagnostic.id().is_lint_named(&UNDEFINED_REVEAL.name())
}));
}
let pull_types_result = attempt_test(db, pull_types, test_file);
match pull_types_result {
Ok(()) => {}
Err(failures) => {
any_pull_types_failures = true;
if !test.should_skip_pulling_types() {
return Some(failures.into_file_failures(
db,
"\"pull types\"",
Some(
"Note: either fix the panic or add the `<!-- pull-types:skip -->` \
directive to this test",
),
));
}
}
}
failure
})
.collect();
match panic_info {
Some(panic_info) => {
let expected_message = test
.should_expect_panic()
.expect("panic_info is only set when `should_expect_panic` is `Ok`");
let message = panic_info
.payload
.as_str()
.unwrap_or("Box<dyn Any>")
.to_string();
if let Some(expected_message) = expected_message {
assert!(
message.contains(expected_message),
"Test `{}` is expected to panic with `{expected_message}`, but panicked with `{message}` instead.",
test.name()
);
}
}
None => {
if let Ok(message) = test.should_expect_panic() {
if let Some(message) = message {
panic!(
"Test `{}` is expected to panic with `{message}`, but it didn't.",
test.name()
);
}
panic!("Test `{}` is expected to panic but it didn't.", test.name());
}
}
}
if test.should_skip_pulling_types() && !any_pull_types_failures {
let mut by_line = matcher::FailuresByLine::default();
by_line.push(
OneIndexed::from_zero_indexed(0),
vec![
"Remove the `<!-- pull-types:skip -->` directive from this test: pulling types \
succeeded for all files in the test."
.to_string(),
],
);
let failure = FileFailures {
backtick_offsets: test_files[0].backtick_offsets.clone(),
by_line,
};
failures.push(failure);
}
if snapshot_diagnostics.is_empty() && test.should_snapshot_diagnostics() {
panic!(
"Test `{}` requested snapshotting diagnostics but it didn't produce any.",
test.name()
);
} else if !snapshot_diagnostics.is_empty() {
let snapshot =
create_diagnostic_snapshot(db, relative_fixture_path, test, snapshot_diagnostics);
let name = test.name().replace(' ', "_").replace(':', "__");
insta::with_settings!(
{
snapshot_path => snapshot_path,
input_file => name.clone(),
filters => vec![(r"\\", "/")],
prepend_module_to_snapshot => false,
},
{ insta::assert_snapshot!(name, snapshot) }
);
}
if failures.is_empty() {
Ok(TestOutcome::Success)
} else {
Err(failures)
}
}
/// Reports an inconsistency between "list modules" and "resolve module."
///
/// Values of this type are only constructed when `from_list` and
/// `from_resolve` are not equivalent.
struct ModuleInconsistency<'db> {
db: &'db db::Db,
/// The module returned from `list_module`.
from_list: Module<'db>,
/// The module returned, if any, from `resolve_module`.
from_resolve: Option<Module<'db>>,
}
/// Tests that "list modules" is consistent with "resolve module."
///
/// This only checks that everything returned by `list_module` is the
/// identical module we get back from `resolve_module`. It does not
/// check that all possible outputs of `resolve_module` are captured by
/// `list_module`.
fn run_module_resolution_consistency_test(db: &db::Db) -> Result<(), Vec<ModuleInconsistency<'_>>> {
let mut errs = vec![];
for from_list in list_modules(db) {
// TODO: For now list_modules does not partake in desperate module resolution so
// only compare against confident module resolution.
errs.push(match resolve_module_confident(db, from_list.name(db)) {
None => ModuleInconsistency {
db,
from_list,
from_resolve: None,
},
Some(from_resolve) if from_list != from_resolve => ModuleInconsistency {
db,
from_list,
from_resolve: Some(from_resolve),
},
_ => continue,
});
}
if errs.is_empty() { Ok(()) } else { Err(errs) }
}
impl std::fmt::Display for ModuleInconsistency<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn fmt_module(
db: &db::Db,
f: &mut std::fmt::Formatter,
module: &Module<'_>,
) -> std::fmt::Result {
let name = module.name(db);
let path = module
.file(db)
.map(|file| file.path(db).to_string())
.unwrap_or_else(|| "N/A".to_string());
let search_path = module
.search_path(db)
.map(SearchPath::to_string)
.unwrap_or_else(|| "N/A".to_string());
let known = module
.known(db)
.map(|known| known.to_string())
.unwrap_or_else(|| "N/A".to_string());
write!(
f,
"Module(\
name={name}, \
file={path}, \
kind={kind:?}, \
search_path={search_path}, \
known={known}\
)",
kind = module.kind(db),
)
}
write!(f, "Found ")?;
fmt_module(self.db, f, &self.from_list)?;
match self.from_resolve {
None => write!(
f,
" when listing modules, but `resolve_module` returned `None`",
)?,
Some(ref got) => {
write!(f, " when listing modules, but `resolve_module` returned ",)?;
fmt_module(self.db, f, got)?;
}
}
Ok(())
}
}
type Failures = Vec<FileFailures>;
/// The failures for a single file in a test by line number.
struct FileFailures {
/// Positional information about the code block(s) to reconstruct absolute line numbers.
backtick_offsets: Vec<BacktickOffsets>,
/// The failures by lines in the file.
by_line: matcher::FailuresByLine,
}
/// File in a test.
struct TestFile {
file: File,
/// Positional information about the code block(s) to reconstruct absolute line numbers.
backtick_offsets: Vec<BacktickOffsets>,
}
fn create_diagnostic_snapshot(
db: &mut db::Db,
relative_fixture_path: &Utf8Path,
test: &parser::MarkdownTest,
diagnostics: impl IntoIterator<Item = Diagnostic>,
) -> String {
let display_config = DisplayDiagnosticConfig::default()
.color(false)
.show_fix_diff(true)
.with_fix_applicability(Applicability::DisplayOnly);
let mut snapshot = String::new();
writeln!(snapshot).unwrap();
writeln!(snapshot, "---").unwrap();
writeln!(snapshot, "mdtest name: {}", test.uncontracted_name()).unwrap();
writeln!(snapshot, "mdtest path: {relative_fixture_path}").unwrap();
writeln!(snapshot, "---").unwrap();
writeln!(snapshot).unwrap();
writeln!(snapshot, "# Python source files").unwrap();
writeln!(snapshot).unwrap();
for file in test.files() {
writeln!(snapshot, "## {}", file.relative_path()).unwrap();
writeln!(snapshot).unwrap();
// Note that we don't use ```py here because the line numbering
// we add makes it invalid Python. This sacrifices syntax
// highlighting when you look at the snapshot on GitHub,
// but the line numbers are extremely useful for analyzing
// snapshots. So we keep them.
writeln!(snapshot, "```").unwrap();
let line_number_width = file.code.lines().count().to_string().len();
for (i, line) in file.code.lines().enumerate() {
let line_number = i + 1;
writeln!(snapshot, "{line_number:>line_number_width$} | {line}").unwrap();
}
writeln!(snapshot, "```").unwrap();
writeln!(snapshot).unwrap();
}
writeln!(snapshot, "# Diagnostics").unwrap();
writeln!(snapshot).unwrap();
for (i, diag) in diagnostics.into_iter().enumerate() {
if i > 0 {
writeln!(snapshot).unwrap();
}
writeln!(snapshot, "```").unwrap();
write!(snapshot, "{}", diag.display(db, &display_config)).unwrap();
writeln!(snapshot, "```").unwrap();
}
snapshot
}
/// Run a function over an embedded test file, catching any panics that occur in the process.
///
/// If no panic occurs, the result of the function is returned as an `Ok()` variant.
///
/// If a panic occurs, a nicely formatted [`FileFailures`] is returned as an `Err()` variant.
/// This will be formatted into a diagnostic message by `ty_test`.
fn attempt_test<'db, 'a, T, F>(
db: &'db Db,
test_fn: F,
test_file: &'a TestFile,
) -> Result<T, AttemptTestError<'a>>
where
F: FnOnce(&'db dyn ty_python_semantic::Db, File) -> T + std::panic::UnwindSafe,
{
catch_unwind(|| test_fn(db, test_file.file))
.map_err(|info| AttemptTestError { info, test_file })
}
struct AttemptTestError<'a> {
info: PanicError,
test_file: &'a TestFile,
}
impl AttemptTestError<'_> {
fn into_file_failures(
self,
db: &Db,
action: &str,
clarification: Option<&str>,
) -> FileFailures {
let info = self.info;
let mut by_line = matcher::FailuresByLine::default();
let mut messages = vec![];
match info.location {
Some(location) => messages.push(format!(
"Attempting to {action} caused a panic at {location}"
)),
None => messages.push(format!(
"Attempting to {action} caused a panic at an unknown location",
)),
}
if let Some(clarification) = clarification {
messages.push(clarification.to_string());
}
messages.push(String::new());
match info.payload.as_str() {
Some(message) => messages.push(message.to_string()),
// Mimic the default panic hook's rendering of the panic payload if it's
// not a string.
None => messages.push("Box<dyn Any>".to_string()),
}
messages.push(String::new());
if let Some(backtrace) = info.backtrace {
match backtrace.status() {
BacktraceStatus::Disabled => {
let msg =
"run with `RUST_BACKTRACE=1` environment variable to display a backtrace";
messages.push(msg.to_string());
}
BacktraceStatus::Captured => {
messages.extend(backtrace.to_string().split('\n').map(String::from));
}
_ => {}
}
}
if let Some(backtrace) = info.salsa_backtrace {
salsa::attach(db, || {
messages.extend(format!("{backtrace:#}").split('\n').map(String::from));
});
}
by_line.push(OneIndexed::from_zero_indexed(0), messages);
FileFailures {
backtick_offsets: self.test_file.backtick_offsets.clone(),
by_line,
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_test/src/parser.rs | crates/ty_test/src/parser.rs | use std::{
borrow::Cow,
collections::hash_map::Entry,
fmt::{Formatter, LowerHex, Write},
hash::Hash,
};
use anyhow::bail;
use ruff_db::system::{SystemPath, SystemPathBuf};
use rustc_hash::FxHashMap;
use crate::config::MarkdownTestConfig;
use ruff_index::{IndexVec, newtype_index};
use ruff_python_ast::PySourceType;
use ruff_python_trivia::Cursor;
use ruff_source_file::{LineIndex, LineRanges, OneIndexed};
use ruff_text_size::{TextLen, TextRange, TextSize};
use rustc_stable_hash::{FromStableHash, SipHasher128Hash, StableSipHasher128};
/// Parse the Markdown `source` as a test suite with given `title`.
pub(crate) fn parse<'s>(title: &'s str, source: &'s str) -> anyhow::Result<MarkdownTestSuite<'s>> {
let parser = Parser::new(title, source);
parser.parse()
}
/// A parsed markdown file containing tests.
///
/// Borrows from the source string and filepath it was created from.
#[derive(Debug)]
pub(crate) struct MarkdownTestSuite<'s> {
/// Header sections.
sections: IndexVec<SectionId, Section<'s>>,
/// Test files embedded within the Markdown file.
files: IndexVec<EmbeddedFileId, EmbeddedFile<'s>>,
}
impl<'s> MarkdownTestSuite<'s> {
pub(crate) fn tests(&self) -> MarkdownTestIterator<'_, 's> {
MarkdownTestIterator {
suite: self,
current_file_index: 0,
}
}
}
struct Hash128([u64; 2]);
impl FromStableHash for Hash128 {
type Hash = SipHasher128Hash;
fn from(SipHasher128Hash(hash): SipHasher128Hash) -> Hash128 {
Hash128(hash)
}
}
impl LowerHex for Hash128 {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let Self(hash) = self;
// Only write the first half for concision
write!(f, "{:x}", hash[0])
}
}
/// A single test inside a [`MarkdownTestSuite`].
///
/// A test is a single header section (or the implicit root section, if there are no Markdown
/// headers in the file), containing one or more embedded Python files as fenced code blocks, and
/// containing no nested header subsections.
#[derive(Debug)]
pub(crate) struct MarkdownTest<'m, 's> {
suite: &'m MarkdownTestSuite<'s>,
section: &'m Section<'s>,
files: &'m [EmbeddedFile<'s>],
}
impl<'m, 's> MarkdownTest<'m, 's> {
const MAX_TITLE_LENGTH: usize = 20;
const ELLIPSIS: char = '\u{2026}';
fn contracted_title(title: &str) -> String {
if title.len() <= Self::MAX_TITLE_LENGTH {
return (*title).to_string();
}
format!(
"{}{}",
title
.chars()
.take(Self::MAX_TITLE_LENGTH)
.collect::<String>(),
Self::ELLIPSIS
)
}
fn joined_name(&self, contracted: bool) -> String {
let mut name_fragments = vec![];
let mut parent_id = self.section.parent_id;
while let Some(next_id) = parent_id {
let parent = &self.suite.sections[next_id];
name_fragments.insert(0, parent.title);
parent_id = parent.parent_id;
}
name_fragments.push(self.section.title);
let full_name = name_fragments.join(" - ");
if !contracted {
return full_name;
}
let mut contracted_name = name_fragments
.iter()
.map(|fragment| Self::contracted_title(fragment))
.collect::<Vec<_>>()
.join(" - ");
let mut hasher = StableSipHasher128::new();
full_name.hash(&mut hasher);
let _ = write!(contracted_name, " ({:x})", hasher.finish::<Hash128>());
contracted_name
}
pub(crate) fn uncontracted_name(&self) -> String {
self.joined_name(false)
}
pub(crate) fn name(&self) -> String {
self.joined_name(true)
}
pub(crate) fn files(&self) -> impl Iterator<Item = &'m EmbeddedFile<'s>> {
self.files.iter()
}
pub(crate) fn configuration(&self) -> &MarkdownTestConfig {
&self.section.config
}
pub(super) fn should_snapshot_diagnostics(&self) -> bool {
self.section
.directives
.has_directive_set(MdtestDirective::SnapshotDiagnostics)
}
pub(super) fn should_expect_panic(&self) -> Result<Option<&str>, ()> {
self.section.directives.get(MdtestDirective::ExpectPanic)
}
pub(super) fn should_skip_pulling_types(&self) -> bool {
self.section
.directives
.has_directive_set(MdtestDirective::PullTypesSkip)
}
}
/// Iterator yielding all [`MarkdownTest`]s in a [`MarkdownTestSuite`].
#[derive(Debug)]
pub(crate) struct MarkdownTestIterator<'m, 's> {
suite: &'m MarkdownTestSuite<'s>,
current_file_index: usize,
}
impl<'m, 's> Iterator for MarkdownTestIterator<'m, 's> {
type Item = MarkdownTest<'m, 's>;
fn next(&mut self) -> Option<Self::Item> {
let mut current_file_index = self.current_file_index;
let mut file = self.suite.files.get(current_file_index.into());
let section_id = file?.section;
while file.is_some_and(|file| file.section == section_id) {
current_file_index += 1;
file = self.suite.files.get(current_file_index.into());
}
let files = &self.suite.files[EmbeddedFileId::from_usize(self.current_file_index)
..EmbeddedFileId::from_usize(current_file_index)];
self.current_file_index = current_file_index;
Some(MarkdownTest {
suite: self.suite,
section: &self.suite.sections[section_id],
files,
})
}
}
#[newtype_index]
struct SectionId;
/// A single header section of a [`MarkdownTestSuite`], or the implicit root "section".
///
/// A header section is the part of a Markdown file beginning with a `#`-prefixed header line, and
/// extending until the next header line at the same or higher outline level (that is, with the
/// same number or fewer `#` characters).
///
/// A header section may either contain one or more embedded Python files (making it a
/// [`MarkdownTest`]), or it may contain nested sections (headers with more `#` characters), but
/// not both.
#[derive(Debug)]
struct Section<'s> {
title: &'s str,
level: u8,
parent_id: Option<SectionId>,
config: MarkdownTestConfig,
directives: MdtestDirectives,
}
#[newtype_index]
struct EmbeddedFileId;
/// Holds information about the start and the end of a code block in a Markdown file.
///
/// The start is the offset of the first triple-backtick in the code block, and the end is the
/// offset of the (start of the) closing triple-backtick.
#[derive(Debug, Clone)]
pub(crate) struct BacktickOffsets(TextSize, TextSize);
/// Holds information about the position and length of all code blocks that are part of
/// a single embedded file in a Markdown file. This is used to reconstruct absolute line
/// numbers (in the Markdown file) from relative line numbers (in the embedded file).
///
/// If we have a Markdown section with multiple code blocks like this:
///
/// 01 # Test
/// 02
/// 03 Part 1:
/// 04
/// 05 ```py
/// 06 a = 1 # Relative line number: 1
/// 07 b = 2 # Relative line number: 2
/// 08 ```
/// 09
/// 10 Part 2:
/// 11
/// 12 ```py
/// 13 c = 3 # Relative line number: 3
/// 14 ```
///
/// We want to reconstruct the absolute line number (left) from relative
/// line numbers. The information we have is the start line and the line
/// count of each code block:
///
/// - Block 1: (start = 5, count = 2)
/// - Block 2: (start = 12, count = 1)
///
/// For example, if we see a relative line number of 3, we see that it is
/// larger than the line count of the first block, so we subtract the line
/// count of the first block, and then add the new relative line number (1)
/// to the absolute start line of the second block (12), resulting in an
/// absolute line number of 13.
pub(crate) struct EmbeddedFileSourceMap {
start_line_and_line_count: Vec<(usize, usize)>,
}
impl EmbeddedFileSourceMap {
pub(crate) fn new(
md_index: &LineIndex,
dimensions: impl IntoIterator<Item = BacktickOffsets>,
) -> EmbeddedFileSourceMap {
EmbeddedFileSourceMap {
start_line_and_line_count: dimensions
.into_iter()
.map(|d| {
let start_line = md_index.line_index(d.0).get();
let end_line = md_index.line_index(d.1).get();
let code_line_count = (end_line - start_line) - 1;
(start_line, code_line_count)
})
.collect(),
}
}
/// Returns the absolute line number in the markdown file for a given line number
/// relative to the concatenated code blocks.
///
/// Returns an `Err` if the relative line number is out of bounds where
/// the returned value is the absolute line number of the last code block.
///
/// # Panics
/// If called when the markdown file has no code blocks.
pub(crate) fn to_absolute_line_number(
&self,
relative_line_number: OneIndexed,
) -> std::result::Result<OneIndexed, OneIndexed> {
let mut relative_line_number = relative_line_number.get();
for (start_line, line_count) in &self.start_line_and_line_count {
if relative_line_number > *line_count {
relative_line_number -= *line_count;
} else {
let absolute_line_number = start_line + relative_line_number;
return Ok(OneIndexed::new(absolute_line_number)
.expect("absolute line number must be >= 1"));
}
}
let last_line_number = self
.start_line_and_line_count
.last()
.and_then(|(start_line, line_count)| OneIndexed::new(start_line + line_count));
Err(last_line_number.expect("markdown file to have at least one code block"))
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub(crate) enum EmbeddedFilePath<'s> {
Autogenerated(PySourceType),
Explicit(&'s str),
}
impl EmbeddedFilePath<'_> {
pub(crate) fn as_str(&self) -> &str {
match self {
EmbeddedFilePath::Autogenerated(PySourceType::Python) => "mdtest_snippet.py",
EmbeddedFilePath::Autogenerated(PySourceType::Stub) => "mdtest_snippet.pyi",
EmbeddedFilePath::Autogenerated(PySourceType::Ipynb) => "mdtest_snippet.ipynb",
EmbeddedFilePath::Explicit(path) => path,
}
}
fn is_explicit(&self) -> bool {
matches!(self, EmbeddedFilePath::Explicit(_))
}
fn is_allowed_explicit_path(path: &str) -> bool {
[PySourceType::Python, PySourceType::Stub]
.iter()
.all(|source_type| path != EmbeddedFilePath::Autogenerated(*source_type).as_str())
}
}
/// A single file embedded in a [`Section`] as a fenced code block.
///
/// Currently must be a Python file (`py` language), a type stub (`pyi`) or a [typeshed `VERSIONS`]
/// file.
///
/// TOML configuration blocks are also supported, but are not stored as `EmbeddedFile`s. In the
/// future we plan to support `pth` files as well.
///
/// A Python embedded file makes its containing [`Section`] into a [`MarkdownTest`], and will be
/// type-checked and searched for inline-comment assertions to match against the diagnostics from
/// type checking.
///
/// [typeshed `VERSIONS`]: https://github.com/python/typeshed/blob/c546278aae47de0b2b664973da4edb613400f6ce/stdlib/VERSIONS#L1-L18
#[derive(Debug)]
pub(crate) struct EmbeddedFile<'s> {
section: SectionId,
path: EmbeddedFilePath<'s>,
pub(crate) lang: &'s str,
pub(crate) code: Cow<'s, str>,
pub(crate) backtick_offsets: Vec<BacktickOffsets>,
}
impl EmbeddedFile<'_> {
fn append_code(&mut self, backtick_offsets: BacktickOffsets, new_code: &str) {
// Treat empty code blocks as non-existent, instead of creating
// an additional empty line:
if new_code.is_empty() {
return;
}
self.backtick_offsets.push(backtick_offsets);
let existing_code = self.code.to_mut();
existing_code.push('\n');
existing_code.push_str(new_code);
}
pub(crate) fn relative_path(&self) -> &str {
self.path.as_str()
}
/// Returns the full path using unix file-path convention.
pub(crate) fn full_path(&self, project_root: &SystemPath) -> SystemPathBuf {
// Don't use `SystemPath::absolute` here because it's platform dependent
// and we want to use unix file-path convention.
let relative_path = self.relative_path();
if relative_path.starts_with('/') {
SystemPathBuf::from(relative_path)
} else {
project_root.join(relative_path)
}
}
}
#[derive(Debug)]
struct SectionStack(Vec<SectionId>);
impl SectionStack {
fn new(root_section_id: SectionId) -> Self {
Self(vec![root_section_id])
}
fn push(&mut self, section_id: SectionId) {
self.0.push(section_id);
}
fn pop(&mut self) -> Option<SectionId> {
let popped = self.0.pop();
debug_assert_ne!(popped, None, "Should never pop the implicit root section");
debug_assert!(
!self.0.is_empty(),
"Should never pop the implicit root section"
);
popped
}
fn top(&mut self) -> SectionId {
*self
.0
.last()
.expect("Should never pop the implicit root section")
}
}
/// Parse the source of a Markdown file into a [`MarkdownTestSuite`].
#[derive(Debug)]
struct Parser<'s> {
/// [`Section`]s of the final [`MarkdownTestSuite`].
sections: IndexVec<SectionId, Section<'s>>,
/// [`EmbeddedFile`]s of the final [`MarkdownTestSuite`].
files: IndexVec<EmbeddedFileId, EmbeddedFile<'s>>,
/// The unparsed remainder of the Markdown source.
cursor: Cursor<'s>,
// Number of consecutive empty lines.
preceding_blank_lines: usize,
// Explicitly specified path for the upcoming code block.
explicit_path: Option<&'s str>,
source: &'s str,
/// Stack of ancestor sections.
stack: SectionStack,
/// Names of embedded files in current active section.
current_section_files: FxHashMap<EmbeddedFilePath<'s>, EmbeddedFileId>,
/// Whether or not the current section has a config block.
current_section_has_config: bool,
/// Whether or not any section in the file has external dependencies.
/// Only one section per file is allowed to have dependencies (for lockfile support).
file_has_dependencies: bool,
}
impl<'s> Parser<'s> {
fn new(title: &'s str, source: &'s str) -> Self {
let mut sections = IndexVec::default();
let root_section_id = sections.push(Section {
title,
level: 0,
parent_id: None,
config: MarkdownTestConfig::default(),
directives: MdtestDirectives::default(),
});
Self {
sections,
source,
files: IndexVec::default(),
cursor: Cursor::new(source),
preceding_blank_lines: 0,
explicit_path: None,
stack: SectionStack::new(root_section_id),
current_section_files: FxHashMap::default(),
current_section_has_config: false,
file_has_dependencies: false,
}
}
fn parse(mut self) -> anyhow::Result<MarkdownTestSuite<'s>> {
self.parse_impl()?;
Ok(self.finish())
}
fn finish(mut self) -> MarkdownTestSuite<'s> {
self.sections.shrink_to_fit();
self.files.shrink_to_fit();
MarkdownTestSuite {
sections: self.sections,
files: self.files,
}
}
fn skip_non_newline_whitespace(&mut self) {
self.cursor.eat_while(|c| c.is_whitespace() && c != '\n');
}
fn skip_to_beginning_of_next_line(&mut self) -> bool {
if let Some(position) = memchr::memchr(b'\n', self.cursor.as_bytes()) {
self.cursor.skip_bytes(position + 1);
true
} else {
false
}
}
fn consume_until(&mut self, mut end_predicate: impl FnMut(char) -> bool) -> Option<&'s str> {
let start = self.cursor.offset().to_usize();
while !self.cursor.is_eof() {
if end_predicate(self.cursor.first()) {
return Some(&self.source[start..self.cursor.offset().to_usize()]);
}
self.cursor.bump();
}
None
}
fn parse_impl(&mut self) -> anyhow::Result<()> {
const SECTION_CONFIG_SNAPSHOT: &str = "snapshot-diagnostics";
const SECTION_CONFIG_PULLTYPES: &str = "pull-types:skip";
const SECTION_CONFIG_EXPECT_PANIC: &str = "expect-panic";
const HTML_COMMENT_ALLOWLIST: &[&str] = &["blacken-docs:on", "blacken-docs:off"];
const CODE_BLOCK_END: &[u8] = b"```";
const HTML_COMMENT_END: &[u8] = b"-->";
while let Some(first) = self.cursor.bump() {
match first {
'<' if self.cursor.eat_char3('!', '-', '-') => {
if let Some(position) =
memchr::memmem::find(self.cursor.as_bytes(), HTML_COMMENT_END)
{
let html_comment = self.cursor.as_str()[..position].trim();
let (directive, value) = match html_comment.split_once(':') {
Some((directive, value)) => {
(directive.trim(), Some(value.trim().to_string()))
}
None => (html_comment, None),
};
match directive {
SECTION_CONFIG_SNAPSHOT => {
anyhow::ensure!(
value.is_none(),
"The `{SECTION_CONFIG_SNAPSHOT}` directive does not take a value."
);
self.process_mdtest_directive(
MdtestDirective::SnapshotDiagnostics,
None,
)?;
}
SECTION_CONFIG_PULLTYPES => {
anyhow::ensure!(
value.is_none(),
"The `{SECTION_CONFIG_PULLTYPES}` directive does not take a value."
);
self.process_mdtest_directive(
MdtestDirective::PullTypesSkip,
None,
)?;
}
SECTION_CONFIG_EXPECT_PANIC => {
self.process_mdtest_directive(MdtestDirective::ExpectPanic, value)?;
}
_ => {
if !HTML_COMMENT_ALLOWLIST.contains(&html_comment) {
bail!(
"Unknown HTML comment `{html_comment}` -- possibly a typo? \
(Add to `HTML_COMMENT_ALLOWLIST` if this is a false positive)"
);
}
}
}
self.cursor.skip_bytes(position + HTML_COMMENT_END.len());
} else {
bail!("Unterminated HTML comment.");
}
}
'#' => {
self.explicit_path = None;
self.preceding_blank_lines = 0;
// Determine header level (number of '#' characters)
let mut header_level = 1;
while self.cursor.eat_char('#') {
header_level += 1;
}
// Parse header title
if let Some(title) = self.consume_until(|c| c == '\n') {
let title = title.trim();
if !title.is_empty() {
self.process_header(header_level, title)?;
}
}
}
'`' => {
if self.cursor.eat_char2('`', '`') {
// We saw the triple-backtick beginning of a code block.
let backtick_offset_start = self.cursor.offset() - "```".text_len();
if self.preceding_blank_lines < 1 && self.explicit_path.is_none() {
bail!(
"Code blocks must start on a new line and be preceded by at least one blank line."
);
}
self.skip_non_newline_whitespace();
// Parse the code block language specifier
let lang = self
.consume_until(|c| matches!(c, ' ' | '\n'))
.unwrap_or_default();
self.skip_non_newline_whitespace();
if !self.cursor.eat_char('\n') {
bail!(
"Trailing code-block metadata is not supported. Only the code block language can be specified."
);
}
if let Some(position) =
memchr::memmem::find(self.cursor.as_bytes(), CODE_BLOCK_END)
{
let mut code = &self.cursor.as_str()[..position];
self.cursor.skip_bytes(position + CODE_BLOCK_END.len());
if code.ends_with('\n') {
code = &code[..code.len() - '\n'.len_utf8()];
}
let backtick_offset_end = self.cursor.offset() - "```".text_len();
self.process_code_block(
lang,
code,
BacktickOffsets(backtick_offset_start, backtick_offset_end),
)?;
} else {
let code_block_start = self.cursor.token_len();
let line = self.source.count_lines(TextRange::up_to(code_block_start));
bail!("Unterminated code block at line {line}.");
}
self.explicit_path = None;
} else if self.preceding_blank_lines > 0 {
// This could be a line that specifies an explicit path for a Markdown code block (`module.py`:)
self.explicit_path = None;
if let Some(path) = self.consume_until(|c| matches!(c, '`' | '\n')) {
if self.cursor.eat_char('`') {
self.skip_non_newline_whitespace();
if self.cursor.eat_char(':') {
self.explicit_path = Some(path);
}
}
}
}
self.preceding_blank_lines = 0;
}
'\n' => {
self.preceding_blank_lines += 1;
continue;
}
c => {
self.preceding_blank_lines = 0;
self.explicit_path = None;
if c.is_whitespace() {
self.skip_non_newline_whitespace();
if self.cursor.eat_char('`')
&& self.cursor.eat_char('`')
&& self.cursor.eat_char('`')
{
bail!("Indented code blocks are not supported.");
}
}
}
}
if !self.skip_to_beginning_of_next_line() {
break;
}
}
Ok(())
}
fn process_header(&mut self, header_level: usize, title: &'s str) -> anyhow::Result<()> {
self.pop_sections_to_level(header_level);
let parent = self.stack.top();
let section = Section {
title,
level: header_level.try_into()?,
parent_id: Some(parent),
config: self.sections[parent].config.clone(),
directives: self.sections[parent].directives.clone(),
};
if !self.current_section_files.is_empty() {
bail!(
"Header '{}' not valid inside a test case; parent '{}' has code files.",
section.title,
self.sections[parent].title,
);
}
let section_id = self.sections.push(section);
self.stack.push(section_id);
self.current_section_files.clear();
self.current_section_has_config = false;
Ok(())
}
fn process_code_block(
&mut self,
lang: &'s str,
code: &'s str,
backtick_offsets: BacktickOffsets,
) -> anyhow::Result<()> {
// We never pop the implicit root section.
let section = self.stack.top();
let test_name = self.sections[section].title;
if lang == "toml" {
return self.process_config_block(code);
}
if lang == "ignore" {
return Ok(());
}
if let Some(explicit_path) = self.explicit_path {
let expected_extension = if lang == "python" { "py" } else { lang };
if !expected_extension.is_empty()
&& lang != "text"
&& !SystemPath::new(explicit_path)
.extension()
.is_none_or(|extension| extension.eq_ignore_ascii_case(expected_extension))
{
let backtick_start = self.line_index(backtick_offsets.0);
bail!(
"File extension of test file path `{explicit_path}` in test `{test_name}` does not match language specified `{lang}` of code block at line `{backtick_start}`"
);
}
}
let path = match self.explicit_path {
Some(path) => {
if !EmbeddedFilePath::is_allowed_explicit_path(path) {
bail!(
"The file name `{path}` in test `{test_name}` must not be used explicitly.",
);
}
EmbeddedFilePath::Explicit(path)
}
None => match lang {
"py" | "python" => EmbeddedFilePath::Autogenerated(PySourceType::Python),
"pyi" => EmbeddedFilePath::Autogenerated(PySourceType::Stub),
"" => {
bail!(
"Cannot auto-generate file name for code block with empty language specifier in test `{test_name}`"
);
}
_ => {
bail!(
"Cannot auto-generate file name for code block with language `{lang}` in test `{test_name}`"
);
}
},
};
let has_merged_snippets = self.current_section_has_merged_snippets();
let has_explicit_file_paths = self.current_section_has_explicit_file_paths();
match self.current_section_files.entry(path.clone()) {
Entry::Vacant(entry) => {
if has_merged_snippets {
bail!(
"Merged snippets in test `{test_name}` are not allowed in the presence of other files."
);
}
let index = self.files.push(EmbeddedFile {
path: path.clone(),
section,
lang,
code: Cow::Borrowed(code),
backtick_offsets: vec![backtick_offsets],
});
entry.insert(index);
}
Entry::Occupied(entry) => {
if path.is_explicit() {
bail!(
"Test `{test_name}` has duplicate files named `{}`.",
path.as_str(),
);
}
if has_explicit_file_paths {
bail!(
"Merged snippets in test `{test_name}` are not allowed in the presence of other files."
);
}
let index = *entry.get();
self.files[index].append_code(backtick_offsets, code);
}
}
Ok(())
}
fn current_section_has_explicit_file_paths(&self) -> bool {
self.current_section_files
.iter()
.any(|(path, _)| path.is_explicit())
}
fn current_section_has_merged_snippets(&self) -> bool {
self.current_section_files
.values()
.any(|id| self.files[*id].backtick_offsets.len() > 1)
}
fn process_config_block(&mut self, code: &str) -> anyhow::Result<()> {
if self.current_section_has_config {
bail!("Multiple TOML configuration blocks in the same section are not allowed.");
}
let config = MarkdownTestConfig::from_str(code)?;
if config.dependencies().is_some() {
if self.file_has_dependencies {
bail!(
"Multiple sections with `[project]` dependencies in the same file are not allowed. \
External dependencies must be specified in a single top-level configuration block."
);
}
self.file_has_dependencies = true;
}
let current_section = &mut self.sections[self.stack.top()];
current_section.config = config;
self.current_section_has_config = true;
Ok(())
}
fn process_mdtest_directive(
&mut self,
directive: MdtestDirective,
value: Option<String>,
) -> anyhow::Result<()> {
if self.current_section_has_config {
bail!(
"Section config to enable {directive} must come before \
everything else (including TOML configuration blocks).",
);
}
if !self.current_section_files.is_empty() {
bail!(
"Section config to enable {directive} must come before \
everything else (including embedded files).",
);
}
let current_section = &mut self.sections[self.stack.top()];
if current_section.directives.has_directive_set(directive) {
bail!(
"Section config to enable {directive} should appear \
at most once.",
);
}
current_section.directives.add_directive(directive, value);
Ok(())
}
fn pop_sections_to_level(&mut self, level: usize) {
while level <= self.sections[self.stack.top()].level.into() {
self.stack.pop();
// We would have errored before pushing a child section if there were files, so we know
// no parent section can have files.
self.current_section_files.clear();
}
}
fn line_index(&self, char_index: TextSize) -> u32 {
self.source.count_lines(TextRange::up_to(char_index))
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
enum MdtestDirective {
/// A directive to enable snapshotting diagnostics.
SnapshotDiagnostics,
/// A directive to skip pull types.
PullTypesSkip,
ExpectPanic,
}
impl std::fmt::Display for MdtestDirective {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
MdtestDirective::SnapshotDiagnostics => f.write_str("snapshotting diagnostics"),
MdtestDirective::PullTypesSkip => f.write_str("skipping the pull-types visitor"),
MdtestDirective::ExpectPanic => f.write_str("expect test to panic"),
}
}
}
/// The directives applied to a Markdown test section.
#[derive(Default, Debug, Clone, PartialEq, Eq)]
pub(crate) struct MdtestDirectives {
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_test/src/assertion.rs | crates/ty_test/src/assertion.rs | //! Parse type and type-error assertions in Python comment form.
//!
//! Parses comments of the form `# revealed: SomeType` and `# error: 8 [rule-code] "message text"`.
//! In the latter case, the `8` is a column number, and `"message text"` asserts that the full
//! diagnostic message contains the text `"message text"`; all three are optional (`# error:` will
//! match any error.)
//!
//! Assertion comments may be placed at end-of-line:
//!
//! ```py
//! x: int = "foo" # error: [invalid-assignment]
//! ```
//!
//! Or as a full-line comment on the preceding line:
//!
//! ```py
//! # error: [invalid-assignment]
//! x: int = "foo"
//! ```
//!
//! Multiple assertion comments may apply to the same line; in this case all (or all but the last)
//! must be full-line comments:
//!
//! ```py
//! # error: [unbound-name]
//! reveal_type(x) # revealed: Unbound
//! ```
//!
//! or
//!
//! ```py
//! # error: [unbound-name]
//! # revealed: Unbound
//! reveal_type(x)
//! ```
use crate::db::Db;
use ruff_db::files::File;
use ruff_db::parsed::parsed_module;
use ruff_db::source::{SourceText, line_index, source_text};
use ruff_python_trivia::{CommentRanges, Cursor};
use ruff_source_file::{LineIndex, OneIndexed};
use ruff_text_size::{Ranged, TextRange, TextSize};
use smallvec::SmallVec;
use std::ops::Deref;
use std::str::FromStr;
/// Diagnostic assertion comments in a single embedded file.
#[derive(Debug)]
pub(crate) struct InlineFileAssertions {
comment_ranges: CommentRanges,
source: SourceText,
lines: LineIndex,
}
impl InlineFileAssertions {
pub(crate) fn from_file(db: &Db, file: File) -> Self {
let source = source_text(db, file);
let lines = line_index(db, file);
let parsed = parsed_module(db, file).load(db);
let comment_ranges = CommentRanges::from(parsed.tokens());
Self {
comment_ranges,
source,
lines,
}
}
fn line_number(&self, range: &impl Ranged) -> OneIndexed {
self.lines.line_index(range.start())
}
fn is_own_line_comment(&self, ranged_assertion: &AssertionWithRange) -> bool {
CommentRanges::is_own_line(ranged_assertion.start(), self.source.as_str())
}
}
impl<'a> IntoIterator for &'a InlineFileAssertions {
type Item = LineAssertions<'a>;
type IntoIter = LineAssertionsIterator<'a>;
fn into_iter(self) -> Self::IntoIter {
Self::IntoIter {
file_assertions: self,
inner: AssertionWithRangeIterator {
file_assertions: self,
inner: self.comment_ranges.into_iter(),
}
.peekable(),
}
}
}
/// An [`UnparsedAssertion`] with the [`TextRange`] of its original inline comment.
#[derive(Debug)]
struct AssertionWithRange<'a>(UnparsedAssertion<'a>, TextRange);
impl<'a> Deref for AssertionWithRange<'a> {
type Target = UnparsedAssertion<'a>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Ranged for AssertionWithRange<'_> {
fn range(&self) -> TextRange {
self.1
}
}
impl<'a> From<AssertionWithRange<'a>> for UnparsedAssertion<'a> {
fn from(value: AssertionWithRange<'a>) -> Self {
value.0
}
}
/// Iterator that yields all assertions within a single embedded Python file.
#[derive(Debug)]
struct AssertionWithRangeIterator<'a> {
file_assertions: &'a InlineFileAssertions,
inner: std::iter::Copied<std::slice::Iter<'a, TextRange>>,
}
impl<'a> Iterator for AssertionWithRangeIterator<'a> {
type Item = AssertionWithRange<'a>;
fn next(&mut self) -> Option<Self::Item> {
loop {
let inner_next = self.inner.next()?;
let comment = &self.file_assertions.source[inner_next];
if let Some(assertion) = UnparsedAssertion::from_comment(comment) {
return Some(AssertionWithRange(assertion, inner_next));
}
}
}
}
impl std::iter::FusedIterator for AssertionWithRangeIterator<'_> {}
/// A vector of [`UnparsedAssertion`]s belonging to a single line.
///
/// Most lines will have zero or one assertion, so we use a [`SmallVec`] optimized for a single
/// element to avoid most heap vector allocations.
type AssertionVec<'a> = SmallVec<[UnparsedAssertion<'a>; 1]>;
#[derive(Debug)]
pub(crate) struct LineAssertionsIterator<'a> {
file_assertions: &'a InlineFileAssertions,
inner: std::iter::Peekable<AssertionWithRangeIterator<'a>>,
}
impl<'a> Iterator for LineAssertionsIterator<'a> {
type Item = LineAssertions<'a>;
fn next(&mut self) -> Option<Self::Item> {
let file = self.file_assertions;
let ranged_assertion = self.inner.next()?;
let mut collector = AssertionVec::new();
let mut line_number = file.line_number(&ranged_assertion);
// Collect all own-line comments on consecutive lines; these all apply to the same line of
// code. For example:
//
// ```py
// # error: [unbound-name]
// # revealed: Unbound
// reveal_type(x)
// ```
//
if file.is_own_line_comment(&ranged_assertion) {
collector.push(ranged_assertion.into());
let mut only_own_line = true;
while let Some(ranged_assertion) = self.inner.peek() {
let next_line_number = line_number.saturating_add(1);
if file.line_number(ranged_assertion) == next_line_number {
if !file.is_own_line_comment(ranged_assertion) {
only_own_line = false;
}
line_number = next_line_number;
collector.push(self.inner.next().unwrap().into());
// If we see an end-of-line comment, it has to be the end of the stack,
// otherwise we'd botch this case, attributing all three errors to the `bar`
// line:
//
// ```py
// # error:
// foo # error:
// bar # error:
// ```
//
if !only_own_line {
break;
}
} else {
break;
}
}
if only_own_line {
// The collected comments apply to the _next_ line in the code.
line_number = line_number.saturating_add(1);
}
} else {
// We have a line-trailing comment; it applies to its own line, and is not grouped.
collector.push(ranged_assertion.into());
}
Some(LineAssertions {
line_number,
assertions: collector,
})
}
}
impl std::iter::FusedIterator for LineAssertionsIterator<'_> {}
/// One or more assertions referring to the same line of code.
#[derive(Debug)]
pub(crate) struct LineAssertions<'a> {
/// The line these assertions refer to.
///
/// Not necessarily the same line the assertion comment is located on; for an own-line comment,
/// it's the next non-assertion line.
pub(crate) line_number: OneIndexed,
/// The assertions referring to this line.
pub(crate) assertions: AssertionVec<'a>,
}
impl<'a> Deref for LineAssertions<'a> {
type Target = [UnparsedAssertion<'a>];
fn deref(&self) -> &Self::Target {
&self.assertions
}
}
/// A single diagnostic assertion comment.
///
/// This type represents an *attempted* assertion, but not necessarily a *valid* assertion.
/// Parsing is done lazily in `matcher.rs`; this allows us to emit nicer error messages
/// in the event of an invalid assertion
#[derive(Debug)]
pub(crate) enum UnparsedAssertion<'a> {
/// A `# revealed:` assertion.
Revealed(&'a str),
/// An `# error:` assertion.
Error(&'a str),
}
impl<'a> UnparsedAssertion<'a> {
/// Returns `Some(_)` if the comment starts with `# error:` or `# revealed:`,
/// indicating that it is an assertion comment.
fn from_comment(comment: &'a str) -> Option<Self> {
let comment = comment.trim().strip_prefix('#')?.trim();
let (keyword, body) = comment.split_once(':')?;
let keyword = keyword.trim();
// Support other pragma comments coming after `error` or `revealed`, e.g.
// `# error: [code] # type: ignore` (nested pragma comments)
let body = if let Some((before_nested, _)) = body.split_once('#') {
before_nested
} else {
body
};
let body = body.trim();
match keyword {
"revealed" => Some(Self::Revealed(body)),
"error" => Some(Self::Error(body)),
_ => None,
}
}
/// Parse the attempted assertion into a [`ParsedAssertion`] structured representation.
pub(crate) fn parse(&self) -> Result<ParsedAssertion<'a>, PragmaParseError<'a>> {
match self {
Self::Revealed(revealed) => {
if revealed.is_empty() {
Err(PragmaParseError::EmptyRevealTypeAssertion)
} else {
Ok(ParsedAssertion::Revealed(revealed))
}
}
Self::Error(error) => ErrorAssertion::from_str(error)
.map(ParsedAssertion::Error)
.map_err(PragmaParseError::ErrorAssertionParseError),
}
}
}
impl std::fmt::Display for UnparsedAssertion<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Revealed(expected_type) => write!(f, "revealed: {expected_type}"),
Self::Error(assertion) => write!(f, "error: {assertion}"),
}
}
}
/// An assertion comment that has been parsed and validated for correctness.
#[derive(Debug)]
pub(crate) enum ParsedAssertion<'a> {
/// A `# revealed:` assertion.
Revealed(&'a str),
/// An `# error:` assertion.
Error(ErrorAssertion<'a>),
}
impl std::fmt::Display for ParsedAssertion<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Revealed(expected_type) => write!(f, "revealed: {expected_type}"),
Self::Error(assertion) => assertion.fmt(f),
}
}
}
/// A parsed and validated `# error:` assertion comment.
#[derive(Debug)]
pub(crate) struct ErrorAssertion<'a> {
/// The diagnostic rule code we expect.
pub(crate) rule: Option<&'a str>,
/// The column we expect the diagnostic range to start at.
pub(crate) column: Option<OneIndexed>,
/// A string we expect to be contained in the diagnostic message.
pub(crate) message_contains: Option<&'a str>,
}
impl<'a> ErrorAssertion<'a> {
fn from_str(source: &'a str) -> Result<Self, ErrorAssertionParseError<'a>> {
ErrorAssertionParser::new(source).parse()
}
}
impl std::fmt::Display for ErrorAssertion<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("error:")?;
if let Some(column) = self.column {
write!(f, " {column}")?;
}
if let Some(rule) = self.rule {
write!(f, " [{rule}]")?;
}
if let Some(message) = self.message_contains {
write!(f, r#" "{message}""#)?;
}
Ok(())
}
}
/// A parser to convert a string into a [`ErrorAssertion`].
#[derive(Debug, Clone)]
struct ErrorAssertionParser<'a> {
cursor: Cursor<'a>,
/// string slice representing all characters *after* the `# error:` prefix.
comment_source: &'a str,
}
impl<'a> ErrorAssertionParser<'a> {
fn new(comment: &'a str) -> Self {
Self {
cursor: Cursor::new(comment),
comment_source: comment,
}
}
/// Consume characters in the assertion comment until we find a non-whitespace character
fn skip_whitespace(&mut self) {
self.cursor.eat_while(char::is_whitespace);
}
/// Attempt to parse the assertion comment into a [`ErrorAssertion`].
fn parse(mut self) -> Result<ErrorAssertion<'a>, ErrorAssertionParseError<'a>> {
let mut column = None;
let mut rule = None;
self.skip_whitespace();
while let Some(character) = self.cursor.bump() {
match character {
// column number
'0'..='9' => {
if column.is_some() {
return Err(ErrorAssertionParseError::MultipleColumnNumbers);
}
if rule.is_some() {
return Err(ErrorAssertionParseError::ColumnNumberAfterRuleCode);
}
let offset = self.cursor.offset() - TextSize::new(1);
self.cursor.eat_while(|c| !c.is_whitespace());
let column_str =
&self.comment_source[TextRange::new(offset, self.cursor.offset())];
column = OneIndexed::from_str(column_str)
.map(Some)
.map_err(|e| ErrorAssertionParseError::BadColumnNumber(column_str, e))?;
}
// rule code
'[' => {
if rule.is_some() {
return Err(ErrorAssertionParseError::MultipleRuleCodes);
}
let offset = self.cursor.offset();
self.cursor.eat_while(|c| c != ']');
if self.cursor.is_eof() {
return Err(ErrorAssertionParseError::UnclosedRuleCode);
}
rule = Some(
self.comment_source[TextRange::new(offset, self.cursor.offset())].trim(),
);
self.cursor.bump();
}
// message text
'"' => {
let comment_source = self.comment_source.trim();
return if comment_source.ends_with('"') {
let rest = &comment_source
[self.cursor.offset().to_usize()..comment_source.len() - 1];
Ok(ErrorAssertion {
rule,
column,
message_contains: Some(rest),
})
} else {
Err(ErrorAssertionParseError::UnclosedMessage)
};
}
// Some other assumptions we make don't hold true if we hit this branch:
'\n' | '\r' => {
unreachable!("Assertion comments should never contain newlines")
}
// something else (bad!)...
unexpected => {
return Err(ErrorAssertionParseError::UnexpectedCharacter {
character: unexpected,
offset: self.cursor.offset().to_usize(),
});
}
}
self.skip_whitespace();
}
if rule.is_some() {
Ok(ErrorAssertion {
rule,
column,
message_contains: None,
})
} else {
Err(ErrorAssertionParseError::NoRuleOrMessage)
}
}
}
/// Enumeration of ways in which parsing an assertion comment can fail.
///
/// The assertion comment could be either a "revealed" assertion or an "error" assertion.
#[derive(Debug, thiserror::Error)]
pub(crate) enum PragmaParseError<'a> {
#[error("Must specify which type should be revealed")]
EmptyRevealTypeAssertion,
#[error("{0}")]
ErrorAssertionParseError(ErrorAssertionParseError<'a>),
}
/// Enumeration of ways in which parsing an *error* assertion comment can fail.
#[derive(Debug, thiserror::Error)]
pub(crate) enum ErrorAssertionParseError<'a> {
#[error("no rule or message text")]
NoRuleOrMessage,
#[error("bad column number `{0}`")]
BadColumnNumber(&'a str, #[source] std::num::ParseIntError),
#[error("column number must precede the rule code")]
ColumnNumberAfterRuleCode,
#[error("multiple column numbers in one assertion")]
MultipleColumnNumbers,
#[error("expected ']' to close rule code")]
UnclosedRuleCode,
#[error("cannot use multiple rule codes in one assertion")]
MultipleRuleCodes,
#[error("expected '\"' to be the final character in an assertion with an error message")]
UnclosedMessage,
#[error(
"unexpected character `{character}` at offset {offset} (relative to the `:` in the assertion comment)"
)]
UnexpectedCharacter { character: char, offset: usize },
}
#[cfg(test)]
mod tests {
use super::*;
use ruff_db::system::DbWithWritableSystem as _;
use ruff_db::{Db as _, files::system_path_to_file};
use ruff_python_trivia::textwrap::dedent;
use ruff_source_file::OneIndexed;
use ty_module_resolver::SearchPathSettings;
use ty_python_semantic::{Program, ProgramSettings, PythonPlatform, PythonVersionWithSource};
fn get_assertions(source: &str) -> InlineFileAssertions {
let mut db = Db::setup();
let settings = ProgramSettings {
python_version: PythonVersionWithSource::default(),
python_platform: PythonPlatform::default(),
search_paths: SearchPathSettings::new(Vec::new())
.to_search_paths(db.system(), db.vendored())
.unwrap(),
};
Program::init_or_update(&mut db, settings);
db.write_file("/src/test.py", source).unwrap();
let file = system_path_to_file(&db, "/src/test.py").unwrap();
InlineFileAssertions::from_file(&db, file)
}
fn as_vec(assertions: &InlineFileAssertions) -> Vec<LineAssertions<'_>> {
assertions.into_iter().collect()
}
#[test]
fn ty_display() {
let assertions = get_assertions(&dedent(
"
reveal_type(1) # revealed: Literal[1]
",
));
let [line] = &as_vec(&assertions)[..] else {
panic!("expected one line");
};
assert_eq!(line.line_number, OneIndexed::from_zero_indexed(1));
let [assert] = &line.assertions[..] else {
panic!("expected one assertion");
};
assert_eq!(format!("{assert}"), "revealed: Literal[1]");
}
#[test]
fn error() {
let assertions = get_assertions(&dedent(
"
x # error:
",
));
let [line] = &as_vec(&assertions)[..] else {
panic!("expected one line");
};
assert_eq!(line.line_number, OneIndexed::from_zero_indexed(1));
let [assert] = &line.assertions[..] else {
panic!("expected one assertion");
};
assert_eq!(format!("{assert}"), "error: ");
}
#[test]
fn prior_line() {
let assertions = get_assertions(&dedent(
"
# revealed: Literal[1]
reveal_type(1)
",
));
let [line] = &as_vec(&assertions)[..] else {
panic!("expected one line");
};
assert_eq!(line.line_number, OneIndexed::from_zero_indexed(2));
let [assert] = &line.assertions[..] else {
panic!("expected one assertion");
};
assert_eq!(format!("{assert}"), "revealed: Literal[1]");
}
#[test]
fn stacked_prior_line() {
let assertions = get_assertions(&dedent(
"
# revealed: Unbound
# error: [unbound-name]
reveal_type(x)
",
));
let [line] = &as_vec(&assertions)[..] else {
panic!("expected one line");
};
assert_eq!(line.line_number, OneIndexed::from_zero_indexed(3));
let [assert1, assert2] = &line.assertions[..] else {
panic!("expected two assertions");
};
assert_eq!(format!("{assert1}"), "revealed: Unbound");
assert_eq!(format!("{assert2}"), "error: [unbound-name]");
}
#[test]
fn stacked_mixed() {
let assertions = get_assertions(&dedent(
"
# revealed: Unbound
reveal_type(x) # error: [unbound-name]
",
));
let [line] = &as_vec(&assertions)[..] else {
panic!("expected one line");
};
assert_eq!(line.line_number, OneIndexed::from_zero_indexed(2));
let [assert1, assert2] = &line.assertions[..] else {
panic!("expected two assertions");
};
assert_eq!(format!("{assert1}"), "revealed: Unbound");
assert_eq!(format!("{assert2}"), "error: [unbound-name]");
}
#[test]
fn multiple_lines() {
let assertions = get_assertions(&dedent(
r#"
# error: [invalid-assignment]
x: int = "foo"
y # error: [unbound-name]
"#,
));
let [line1, line2] = &as_vec(&assertions)[..] else {
panic!("expected two lines");
};
assert_eq!(line1.line_number, OneIndexed::from_zero_indexed(2));
assert_eq!(line2.line_number, OneIndexed::from_zero_indexed(3));
let [UnparsedAssertion::Error(error1)] = &line1.assertions[..] else {
panic!("expected one error assertion");
};
let error1 = ErrorAssertion::from_str(error1).unwrap();
assert_eq!(error1.rule, Some("invalid-assignment"));
let [UnparsedAssertion::Error(error2)] = &line2.assertions[..] else {
panic!("expected one error assertion");
};
let error2 = ErrorAssertion::from_str(error2).unwrap();
assert_eq!(error2.rule, Some("unbound-name"));
}
#[test]
fn multiple_lines_mixed_stack() {
let assertions = get_assertions(&dedent(
r#"
# error: [invalid-assignment]
x: int = reveal_type("foo") # revealed: str
y # error: [unbound-name]
"#,
));
let [line1, line2] = &as_vec(&assertions)[..] else {
panic!("expected two lines");
};
assert_eq!(line1.line_number, OneIndexed::from_zero_indexed(2));
assert_eq!(line2.line_number, OneIndexed::from_zero_indexed(3));
let [
UnparsedAssertion::Error(error1),
UnparsedAssertion::Revealed(expected_ty),
] = &line1.assertions[..]
else {
panic!("expected one error assertion and one Revealed assertion");
};
let error1 = ErrorAssertion::from_str(error1).unwrap();
assert_eq!(error1.rule, Some("invalid-assignment"));
assert_eq!(expected_ty.trim(), "str");
let [UnparsedAssertion::Error(error2)] = &line2.assertions[..] else {
panic!("expected one error assertion");
};
let error2 = ErrorAssertion::from_str(error2).unwrap();
assert_eq!(error2.rule, Some("unbound-name"));
}
#[test]
fn error_with_rule() {
let assertions = get_assertions(&dedent(
"
x # error: [unbound-name]
",
));
let [line] = &as_vec(&assertions)[..] else {
panic!("expected one line");
};
assert_eq!(line.line_number, OneIndexed::from_zero_indexed(1));
let [assert] = &line.assertions[..] else {
panic!("expected one assertion");
};
assert_eq!(format!("{assert}"), "error: [unbound-name]");
}
#[test]
fn error_with_rule_and_column() {
let assertions = get_assertions(&dedent(
"
x # error: 1 [unbound-name]
",
));
let [line] = &as_vec(&assertions)[..] else {
panic!("expected one line");
};
assert_eq!(line.line_number, OneIndexed::from_zero_indexed(1));
let [assert] = &line.assertions[..] else {
panic!("expected one assertion");
};
assert_eq!(format!("{assert}"), "error: 1 [unbound-name]");
}
#[test]
fn error_with_rule_and_message() {
let assertions = get_assertions(&dedent(
r#"
# error: [unbound-name] "`x` is unbound"
x
"#,
));
let [line] = &as_vec(&assertions)[..] else {
panic!("expected one line");
};
assert_eq!(line.line_number, OneIndexed::from_zero_indexed(2));
let [assert] = &line.assertions[..] else {
panic!("expected one assertion");
};
assert_eq!(
format!("{assert}"),
r#"error: [unbound-name] "`x` is unbound""#
);
}
#[test]
fn error_with_message_and_column() {
let assertions = get_assertions(&dedent(
r#"
# error: 1 "`x` is unbound"
x
"#,
));
let [line] = &as_vec(&assertions)[..] else {
panic!("expected one line");
};
assert_eq!(line.line_number, OneIndexed::from_zero_indexed(2));
let [assert] = &line.assertions[..] else {
panic!("expected one assertion");
};
assert_eq!(format!("{assert}"), r#"error: 1 "`x` is unbound""#);
}
#[test]
fn error_with_rule_and_message_and_column() {
let assertions = get_assertions(&dedent(
r#"
# error: 1 [unbound-name] "`x` is unbound"
x
"#,
));
let [line] = &as_vec(&assertions)[..] else {
panic!("expected one line");
};
assert_eq!(line.line_number, OneIndexed::from_zero_indexed(2));
let [assert] = &line.assertions[..] else {
panic!("expected one assertion");
};
assert_eq!(
format!("{assert}"),
r#"error: 1 [unbound-name] "`x` is unbound""#
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_test/src/matcher.rs | crates/ty_test/src/matcher.rs | //! Match [`Diagnostic`]s against assertions and produce test failure
//! messages for any mismatches.
use std::borrow::Cow;
use std::cmp::Ordering;
use std::ops::Range;
use std::sync::LazyLock;
use colored::Colorize;
use path_slash::PathExt;
use ruff_db::diagnostic::{Diagnostic, DiagnosticId};
use ruff_db::files::File;
use ruff_db::source::{SourceText, line_index, source_text};
use ruff_source_file::{LineIndex, OneIndexed};
use crate::assertion::{InlineFileAssertions, ParsedAssertion, UnparsedAssertion};
use crate::db::Db;
use crate::diagnostic::SortedDiagnostics;
#[derive(Debug, Default)]
pub(super) struct FailuresByLine {
failures: Vec<String>,
lines: Vec<LineFailures>,
}
impl FailuresByLine {
pub(super) fn iter(&self) -> impl Iterator<Item = (OneIndexed, &[String])> {
self.lines.iter().map(|line_failures| {
(
line_failures.line_number,
&self.failures[line_failures.range.clone()],
)
})
}
pub(super) fn push(&mut self, line_number: OneIndexed, messages: Vec<String>) {
let start = self.failures.len();
self.failures.extend(messages);
self.lines.push(LineFailures {
line_number,
range: start..self.failures.len(),
});
}
fn is_empty(&self) -> bool {
self.lines.is_empty()
}
}
#[derive(Debug)]
struct LineFailures {
line_number: OneIndexed,
range: Range<usize>,
}
pub(super) fn match_file(
db: &Db,
file: File,
diagnostics: &[Diagnostic],
) -> Result<(), FailuresByLine> {
// Parse assertions from comments in the file, and get diagnostics from the file; both
// ordered by line number.
let assertions = InlineFileAssertions::from_file(db, file);
let diagnostics = SortedDiagnostics::new(diagnostics, &line_index(db, file));
// Get iterators over assertions and diagnostics grouped by line, in ascending line order.
let mut line_assertions = assertions.into_iter();
let mut line_diagnostics = diagnostics.iter_lines();
let mut current_assertions = line_assertions.next();
let mut current_diagnostics = line_diagnostics.next();
let matcher = Matcher::from_file(db, file);
let mut failures = FailuresByLine::default();
loop {
match (¤t_assertions, ¤t_diagnostics) {
(Some(assertions), Some(diagnostics)) => {
match assertions.line_number.cmp(&diagnostics.line_number) {
Ordering::Equal => {
// We have assertions and diagnostics on the same line; check for
// matches and error on any that don't match, then advance both
// iterators.
matcher
.match_line(diagnostics, assertions)
.unwrap_or_else(|messages| {
failures.push(assertions.line_number, messages);
});
current_assertions = line_assertions.next();
current_diagnostics = line_diagnostics.next();
}
Ordering::Less => {
// We have assertions on an earlier line than diagnostics; report these
// assertions as all unmatched, and advance the assertions iterator.
failures.push(assertions.line_number, unmatched(assertions));
current_assertions = line_assertions.next();
}
Ordering::Greater => {
// We have diagnostics on an earlier line than assertions; report these
// diagnostics as all unmatched, and advance the diagnostics iterator.
failures.push(diagnostics.line_number, unmatched(diagnostics));
current_diagnostics = line_diagnostics.next();
}
}
}
(Some(assertions), None) => {
// We've exhausted diagnostics but still have assertions; report these assertions
// as unmatched and advance the assertions iterator.
failures.push(assertions.line_number, unmatched(assertions));
current_assertions = line_assertions.next();
}
(None, Some(diagnostics)) => {
// We've exhausted assertions but still have diagnostics; report these
// diagnostics as unmatched and advance the diagnostics iterator.
failures.push(diagnostics.line_number, unmatched(diagnostics));
current_diagnostics = line_diagnostics.next();
}
// When we've exhausted both diagnostics and assertions, break.
(None, None) => break,
}
}
if failures.is_empty() {
Ok(())
} else {
Err(failures)
}
}
trait Unmatched {
fn unmatched(&self) -> String;
}
fn unmatched<'a, T: Unmatched + 'a>(unmatched: &'a [T]) -> Vec<String> {
unmatched.iter().map(Unmatched::unmatched).collect()
}
trait UnmatchedWithColumn {
fn unmatched_with_column(&self, column: OneIndexed) -> String;
}
// This is necessary since we only parse assertions lazily,
// and sometimes we know before parsing any assertions that an assertion will be unmatched,
// e.g. if we've exhausted all diagnostics but there are still assertions left.
//
// TODO: the lazy parsing means that we sometimes won't report malformed assertions as
// being invalid if we detect that they'll be unmatched before parsing them.
// That's perhaps not the best user experience.
impl Unmatched for UnparsedAssertion<'_> {
fn unmatched(&self) -> String {
format!("{} {self}", "unmatched assertion:".red())
}
}
impl Unmatched for ParsedAssertion<'_> {
fn unmatched(&self) -> String {
format!("{} {self}", "unmatched assertion:".red())
}
}
fn maybe_add_undefined_reveal_clarification(
diagnostic: &Diagnostic,
original: std::fmt::Arguments,
) -> String {
if diagnostic.id().is_lint_named("undefined-reveal") {
format!(
"{} add a `# revealed` assertion on this line (original diagnostic: {original})",
"used built-in `reveal_type`:".yellow()
)
} else {
format!("{} {original}", "unexpected error:".red())
}
}
impl Unmatched for &Diagnostic {
fn unmatched(&self) -> String {
maybe_add_undefined_reveal_clarification(
self,
format_args!(
r#"[{id}] "{message}""#,
id = self.id(),
message = self.concise_message()
),
)
}
}
impl UnmatchedWithColumn for &Diagnostic {
fn unmatched_with_column(&self, column: OneIndexed) -> String {
maybe_add_undefined_reveal_clarification(
self,
format_args!(
r#"{column} [{id}] "{message}""#,
id = self.id(),
message = self.concise_message()
),
)
}
}
/// Discard `@Todo`-type metadata from expected types, which is not available
/// when running in release mode.
fn discard_todo_metadata(ty: &str) -> Cow<'_, str> {
#[cfg(not(debug_assertions))]
{
static TODO_METADATA_REGEX: LazyLock<regex::Regex> =
LazyLock::new(|| regex::Regex::new(r"@Todo\([^)]*\)").unwrap());
TODO_METADATA_REGEX.replace_all(ty, "@Todo")
}
#[cfg(debug_assertions)]
Cow::Borrowed(ty)
}
/// Normalize paths in diagnostics to Unix paths before comparing them against
/// the expected type. Doing otherwise means that it's hard to write cross-platform
/// tests, since in some edge cases the display of a type can include a path to the
/// file in which the type was defined (e.g. `foo.bar.A @ src/foo/bar.py:10` on Unix,
/// but `foo.bar.A @ src\foo\bar.py:10` on Windows).
fn normalize_paths(ty: &str) -> Cow<'_, str> {
static PATH_IN_CLASS_DISPLAY_REGEX: LazyLock<regex::Regex> =
LazyLock::new(|| regex::Regex::new(r"( @ )(.+)(\.pyi?:\d)").unwrap());
fn normalize_path_captures(path_captures: ®ex::Captures) -> String {
let normalized_path = std::path::Path::new(&path_captures[2])
.to_slash()
.expect("Python module paths should be valid UTF-8");
format!(
"{}{}{}",
&path_captures[1], normalized_path, &path_captures[3]
)
}
PATH_IN_CLASS_DISPLAY_REGEX.replace_all(ty, normalize_path_captures)
}
struct Matcher {
line_index: LineIndex,
source: SourceText,
}
impl Matcher {
fn from_file(db: &Db, file: File) -> Self {
Self {
line_index: line_index(db, file),
source: source_text(db, file),
}
}
/// Check a slice of [`Diagnostic`]s against a slice of
/// [`UnparsedAssertion`]s.
///
/// Return vector of [`Unmatched`] for any unmatched diagnostics or
/// assertions.
fn match_line<'a, 'b>(
&self,
diagnostics: &'a [&'a Diagnostic],
assertions: &'a [UnparsedAssertion<'b>],
) -> Result<(), Vec<String>>
where
'b: 'a,
{
let mut failures = vec![];
let mut unmatched = diagnostics.to_vec();
for assertion in assertions {
match assertion.parse() {
Ok(assertion) => {
if !self.matches(&assertion, &mut unmatched) {
failures.push(assertion.unmatched());
}
}
Err(error) => {
failures.push(format!("{} {}", "invalid assertion:".red(), error));
}
}
}
for diagnostic in unmatched {
failures.push(diagnostic.unmatched_with_column(self.column(diagnostic)));
}
if failures.is_empty() {
Ok(())
} else {
Err(failures)
}
}
fn column(&self, diagnostic: &Diagnostic) -> OneIndexed {
diagnostic
.primary_span()
.and_then(|span| span.range())
.map(|range| {
self.line_index
.line_column(range.start(), &self.source)
.column
})
.unwrap_or(OneIndexed::from_zero_indexed(0))
}
/// Check if `assertion` matches any [`Diagnostic`]s in `unmatched`.
///
/// If so, return `true` and remove the matched diagnostics from `unmatched`. Otherwise, return
/// `false`.
///
/// An `Error` assertion can only match one diagnostic; even if it could match more than one,
/// we short-circuit after the first match.
///
/// A `Revealed` assertion must match a revealed-type diagnostic, and may also match an
/// undefined-reveal diagnostic, if present.
fn matches(&self, assertion: &ParsedAssertion, unmatched: &mut Vec<&Diagnostic>) -> bool {
match assertion {
ParsedAssertion::Error(error) => {
let position = unmatched.iter().position(|diagnostic| {
let lint_name_matches = !error.rule.is_some_and(|rule| {
!(diagnostic.id().is_lint_named(rule) || diagnostic.id().as_str() == rule)
});
let column_matches = error
.column
.is_none_or(|col| col == self.column(diagnostic));
let message_matches = error.message_contains.is_none_or(|needle| {
normalize_paths(&diagnostic.concise_message().to_str()).contains(needle)
});
lint_name_matches && column_matches && message_matches
});
if let Some(position) = position {
unmatched.swap_remove(position);
true
} else {
false
}
}
ParsedAssertion::Revealed(expected_type) => {
let expected_type = discard_todo_metadata(expected_type);
let expected_reveal_type_message = format!("`{expected_type}`");
let diagnostic_matches_reveal = |diagnostic: &Diagnostic| {
if diagnostic.id() != DiagnosticId::RevealedType {
return false;
}
let primary_message = diagnostic.primary_message();
let Some(primary_annotation) =
(diagnostic.primary_annotation()).and_then(|a| a.get_message())
else {
return false;
};
// reveal_type, reveal_protocol_interface
if matches!(
primary_message,
"Revealed type" | "Revealed protocol interface"
) && primary_annotation == expected_reveal_type_message
{
return true;
}
// reveal_when_assignable_to, reveal_when_subtype_of, reveal_mro
if matches!(
primary_message,
"Assignability holds" | "Subtyping holds" | "Revealed MRO"
) && primary_annotation == expected_type
{
return true;
}
false
};
let mut matched_revealed_type = None;
let mut matched_undefined_reveal = None;
for (index, diagnostic) in unmatched.iter().enumerate() {
if matched_revealed_type.is_none() && diagnostic_matches_reveal(diagnostic) {
matched_revealed_type = Some(index);
} else if matched_undefined_reveal.is_none()
&& diagnostic.id().is_lint_named("undefined-reveal")
{
matched_undefined_reveal = Some(index);
}
if matched_revealed_type.is_some() && matched_undefined_reveal.is_some() {
break;
}
}
let mut idx = 0;
unmatched.retain(|_| {
let retain =
Some(idx) != matched_revealed_type && Some(idx) != matched_undefined_reveal;
idx += 1;
retain
});
matched_revealed_type.is_some()
}
}
}
}
#[cfg(test)]
mod tests {
use super::FailuresByLine;
use ruff_db::Db;
use ruff_db::diagnostic::{Annotation, Diagnostic, DiagnosticId, Severity, Span};
use ruff_db::files::{File, system_path_to_file};
use ruff_db::system::DbWithWritableSystem as _;
use ruff_python_trivia::textwrap::dedent;
use ruff_source_file::OneIndexed;
use ruff_text_size::TextRange;
use ty_module_resolver::SearchPathSettings;
use ty_python_semantic::{Program, ProgramSettings, PythonPlatform, PythonVersionWithSource};
struct ExpectedDiagnostic {
id: DiagnosticId,
message: &'static str,
range: TextRange,
}
impl ExpectedDiagnostic {
fn new(id: DiagnosticId, message: &'static str, offset: usize) -> Self {
let offset: u32 = offset.try_into().unwrap();
Self {
id,
message,
range: TextRange::new(offset.into(), (offset + 1).into()),
}
}
fn into_diagnostic(self, file: File) -> Diagnostic {
let mut diag = if self.id == DiagnosticId::RevealedType {
Diagnostic::new(self.id, Severity::Error, "Revealed type")
} else {
Diagnostic::new(self.id, Severity::Error, self.message)
};
let span = Span::from(file).with_range(self.range);
let mut annotation = Annotation::primary(span);
if self.id == DiagnosticId::RevealedType {
annotation = annotation.message(self.message);
}
diag.annotate(annotation);
diag
}
}
fn get_result(
source: &str,
expected_diagnostics: Vec<ExpectedDiagnostic>,
) -> Result<(), FailuresByLine> {
colored::control::set_override(false);
let mut db = crate::db::Db::setup();
let settings = ProgramSettings {
python_version: PythonVersionWithSource::default(),
python_platform: PythonPlatform::default(),
search_paths: SearchPathSettings::new(Vec::new())
.to_search_paths(db.system(), db.vendored())
.expect("Valid search paths settings"),
};
Program::init_or_update(&mut db, settings);
db.write_file("/src/test.py", source).unwrap();
let file = system_path_to_file(&db, "/src/test.py").unwrap();
let diagnostics: Vec<Diagnostic> = expected_diagnostics
.into_iter()
.map(|diagnostic| diagnostic.into_diagnostic(file))
.collect();
super::match_file(&db, file, &diagnostics)
}
fn assert_fail(result: Result<(), FailuresByLine>, messages: &[(usize, &[&str])]) {
let Err(failures) = result else {
panic!("expected a failure");
};
let expected: Vec<(OneIndexed, Vec<String>)> = messages
.iter()
.map(|(idx, msgs)| {
(
OneIndexed::from_zero_indexed(*idx),
msgs.iter().map(ToString::to_string).collect(),
)
})
.collect();
let failures: Vec<(OneIndexed, Vec<String>)> = failures
.iter()
.map(|(idx, msgs)| (idx, msgs.to_vec()))
.collect();
assert_eq!(failures, expected);
}
fn assert_ok(result: &Result<(), FailuresByLine>) {
assert!(result.is_ok(), "{result:?}");
}
#[test]
fn revealed_match() {
let result = get_result(
"x # revealed: Foo",
vec![ExpectedDiagnostic::new(
DiagnosticId::RevealedType,
"`Foo`",
0,
)],
);
assert_ok(&result);
}
#[test]
fn revealed_wrong_rule() {
let result = get_result(
"x # revealed: Foo",
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("not-revealed-type"),
"`Foo`",
0,
)],
);
assert_fail(
result,
&[(
0,
&[
"unmatched assertion: revealed: Foo",
r#"unexpected error: 1 [not-revealed-type] "`Foo`""#,
],
)],
);
}
#[test]
fn revealed_wrong_message() {
let result = get_result(
"x # revealed: Foo",
vec![ExpectedDiagnostic::new(
DiagnosticId::RevealedType,
"Something else",
0,
)],
);
assert_fail(
result,
&[(
0,
&[
"unmatched assertion: revealed: Foo",
r#"unexpected error: 1 [revealed-type] "Revealed type: Something else""#,
],
)],
);
}
#[test]
fn revealed_unmatched() {
let result = get_result("x # revealed: Foo", vec![]);
assert_fail(result, &[(0, &["unmatched assertion: revealed: Foo"])]);
}
#[test]
fn revealed_match_with_undefined() {
let result = get_result(
"x # revealed: Foo",
vec![
ExpectedDiagnostic::new(DiagnosticId::RevealedType, "`Foo`", 0),
ExpectedDiagnostic::new(
DiagnosticId::lint("undefined-reveal"),
"Doesn't matter",
0,
),
],
);
assert_ok(&result);
}
#[test]
fn revealed_match_with_only_undefined() {
let result = get_result(
"x # revealed: Foo",
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("undefined-reveal"),
"Doesn't matter",
0,
)],
);
assert_fail(result, &[(0, &["unmatched assertion: revealed: Foo"])]);
}
#[test]
fn revealed_mismatch_with_undefined() {
let result = get_result(
"x # revealed: Foo",
vec![
ExpectedDiagnostic::new(DiagnosticId::RevealedType, "`Bar`", 0),
ExpectedDiagnostic::new(
DiagnosticId::lint("undefined-reveal"),
"Doesn't matter",
0,
),
],
);
assert_fail(
result,
&[(
0,
&[
"unmatched assertion: revealed: Foo",
r#"unexpected error: 1 [revealed-type] "Revealed type: `Bar`""#,
],
)],
);
}
#[test]
fn undefined_reveal_type_unmatched() {
let result = get_result(
"reveal_type(1)",
vec![
ExpectedDiagnostic::new(
DiagnosticId::lint("undefined-reveal"),
"undefined reveal message",
0,
),
ExpectedDiagnostic::new(DiagnosticId::RevealedType, "`Literal[1]`", 12),
],
);
assert_fail(
result,
&[(
0,
&[
"used built-in `reveal_type`: add a `# revealed` assertion on this line (\
original diagnostic: [undefined-reveal] \"undefined reveal message\")",
r#"unexpected error: [revealed-type] "Revealed type: `Literal[1]`""#,
],
)],
);
}
#[test]
fn undefined_reveal_type_mismatched() {
let result = get_result(
"reveal_type(1) # error: [something-else]",
vec![
ExpectedDiagnostic::new(
DiagnosticId::lint("undefined-reveal"),
"undefined reveal message",
0,
),
ExpectedDiagnostic::new(DiagnosticId::RevealedType, "`Literal[1]`", 12),
],
);
assert_fail(
result,
&[(
0,
&[
"unmatched assertion: error: [something-else]",
"used built-in `reveal_type`: add a `# revealed` assertion on this line (\
original diagnostic: 1 [undefined-reveal] \"undefined reveal message\")",
r#"unexpected error: 13 [revealed-type] "Revealed type: `Literal[1]`""#,
],
)],
);
}
#[test]
fn error_unmatched() {
let result = get_result("x # error: [rule]", vec![]);
assert_fail(result, &[(0, &["unmatched assertion: error: [rule]"])]);
}
#[test]
fn error_match_rule() {
let result = get_result(
"x # error: [some-rule]",
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("some-rule"),
"Any message",
0,
)],
);
assert_ok(&result);
}
#[test]
fn error_match_rule_no_whitespace() {
let result = get_result(
"x #error:[some-rule]",
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("some-rule"),
"Any message",
0,
)],
);
assert_ok(&result);
}
#[test]
fn error_match_rule_lots_of_whitespace() {
let result = get_result(
"x # error : [ some-rule ]",
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("some-rule"),
"Any message",
0,
)],
);
assert_ok(&result);
}
#[test]
fn error_wrong_rule() {
let result = get_result(
"x # error: [some-rule]",
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("anything"),
"Any message",
0,
)],
);
assert_fail(
result,
&[(
0,
&[
"unmatched assertion: error: [some-rule]",
r#"unexpected error: 1 [anything] "Any message""#,
],
)],
);
}
#[test]
fn error_match_message() {
let result = get_result(
r#"x # error: "contains this""#,
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("anything"),
"message contains this",
0,
)],
);
assert_ok(&result);
}
#[test]
fn error_wrong_message() {
let result = get_result(
r#"x # error: "contains this""#,
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("anything"),
"Any message",
0,
)],
);
assert_fail(
result,
&[(
0,
&[
r#"unmatched assertion: error: "contains this""#,
r#"unexpected error: 1 [anything] "Any message""#,
],
)],
);
}
#[test]
fn error_match_column_and_rule() {
let result = get_result(
"x # error: 1 [some-rule]",
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("some-rule"),
"Any message",
0,
)],
);
assert_ok(&result);
}
#[test]
fn error_match_column_and_rule_and_message() {
let result = get_result(
r#"x # error: 5 [some-rule] "Some message""#,
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("some-rule"),
"Some message",
4,
)],
);
assert_ok(&result);
}
#[test]
fn error_wrong_column() {
let result = get_result(
"x # error: 2 [rule]",
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("rule"),
"Any message",
0,
)],
);
assert_fail(
result,
&[(
0,
&[
"unmatched assertion: error: 2 [rule]",
r#"unexpected error: 1 [rule] "Any message""#,
],
)],
);
}
#[test]
fn error_match_column_and_message() {
let result = get_result(
r#"x # error: 1 "contains this""#,
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("anything"),
"message contains this",
0,
)],
);
assert_ok(&result);
}
#[test]
fn error_match_rule_and_message() {
let result = get_result(
r#"x # error: [a-rule] "contains this""#,
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("a-rule"),
"message contains this",
0,
)],
);
assert_ok(&result);
}
#[test]
fn error_match_all() {
let result = get_result(
r#"x # error: 1 [a-rule] "contains this""#,
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("a-rule"),
"message contains this",
0,
)],
);
assert_ok(&result);
}
#[test]
fn error_match_all_wrong_column() {
let result = get_result(
r#"x # error: 2 [some-rule] "contains this""#,
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("some-rule"),
"message contains this",
0,
)],
);
assert_fail(
result,
&[(
0,
&[
r#"unmatched assertion: error: 2 [some-rule] "contains this""#,
r#"unexpected error: 1 [some-rule] "message contains this""#,
],
)],
);
}
#[test]
fn error_match_all_wrong_rule() {
let result = get_result(
r#"x # error: 1 [some-rule] "contains this""#,
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("other-rule"),
"message contains this",
0,
)],
);
assert_fail(
result,
&[(
0,
&[
r#"unmatched assertion: error: 1 [some-rule] "contains this""#,
r#"unexpected error: 1 [other-rule] "message contains this""#,
],
)],
);
}
#[test]
fn error_match_all_wrong_message() {
let result = get_result(
r#"x # error: 1 [some-rule] "contains this""#,
vec![ExpectedDiagnostic::new(
DiagnosticId::lint("some-rule"),
"Any message",
0,
)],
);
assert_fail(
result,
&[(
0,
&[
r#"unmatched assertion: error: 1 [some-rule] "contains this""#,
r#"unexpected error: 1 [some-rule] "Any message""#,
],
)],
);
}
#[test]
fn interspersed_matches_and_mismatches() {
let source = dedent(
r#"
1 # error: [line-one]
2
3 # error: [line-three]
4 # error: [line-four]
5
6: # error: [line-six]
"#,
);
let two = source.find('2').unwrap();
let three = source.find('3').unwrap();
let five = source.find('5').unwrap();
let result = get_result(
&source,
vec![
ExpectedDiagnostic::new(DiagnosticId::lint("line-two"), "msg", two),
ExpectedDiagnostic::new(DiagnosticId::lint("line-three"), "msg", three),
ExpectedDiagnostic::new(DiagnosticId::lint("line-five"), "msg", five),
],
);
assert_fail(
result,
&[
(1, &["unmatched assertion: error: [line-one]"]),
(2, &[r#"unexpected error: [line-two] "msg""#]),
(4, &["unmatched assertion: error: [line-four]"]),
(5, &[r#"unexpected error: [line-five] "msg""#]),
(6, &["unmatched assertion: error: [line-six]"]),
],
);
}
#[test]
fn more_diagnostics_than_assertions() {
let source = dedent(
r#"
1 # error: [line-one]
2
"#,
);
let one = source.find('1').unwrap();
let two = source.find('2').unwrap();
let result = get_result(
&source,
vec![
ExpectedDiagnostic::new(DiagnosticId::lint("line-one"), "msg", one),
ExpectedDiagnostic::new(DiagnosticId::lint("line-two"), "msg", two),
],
);
assert_fail(result, &[(2, &[r#"unexpected error: [line-two] "msg""#])]);
}
#[test]
fn multiple_assertions_and_diagnostics_same_line() {
let source = dedent(
"
# error: [one-rule]
# error: [other-rule]
x
",
);
let x = source.find('x').unwrap();
let result = get_result(
&source,
vec![
ExpectedDiagnostic::new(DiagnosticId::lint("one-rule"), "msg", x),
ExpectedDiagnostic::new(DiagnosticId::lint("other-rule"), "msg", x),
],
);
assert_ok(&result);
}
#[test]
fn multiple_assertions_and_diagnostics_same_line_all_same() {
let source = dedent(
"
# error: [one-rule]
# error: [one-rule]
x
",
);
let x = source.find('x').unwrap();
let result = get_result(
&source,
vec![
ExpectedDiagnostic::new(DiagnosticId::lint("one-rule"), "msg", x),
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_test/src/external_dependencies.rs | crates/ty_test/src/external_dependencies.rs | use crate::db::Db;
use anyhow::{Context, Result, anyhow, bail};
use camino::Utf8Path;
use ruff_db::system::{DbWithWritableSystem as _, OsSystem, SystemPath};
use ruff_python_ast::PythonVersion;
use ty_python_semantic::{PythonEnvironment, PythonPlatform, SysPrefixPathOrigin};
/// Setup a virtual environment in the in-memory filesystem of `db` with
/// the specified dependencies installed.
pub(crate) fn setup_venv(
db: &mut Db,
dependencies: &[String],
python_version: PythonVersion,
python_platform: &PythonPlatform,
dest_venv_path: &SystemPath,
lockfile_path: &Utf8Path,
) -> Result<()> {
// Create a temporary directory for the project
let temp_dir = tempfile::Builder::new()
.prefix("mdtest-venv-")
.tempdir()
.context("Failed to create temporary directory for mdtest virtual environment")?;
// Canonicalize here to fix problems with `.strip_prefix()` later on Windows
let temp_dir_path = dunce::canonicalize(temp_dir.path())
.context("Failed to canonicalize temporary directory path")?;
let temp_path = SystemPath::from_std_path(&temp_dir_path)
.ok_or_else(|| {
anyhow!(
"Temporary directory path is not valid UTF-8: {}",
temp_dir_path.display()
)
})?
.to_path_buf();
// Generate a minimal pyproject.toml
let pyproject_toml = format!(
r#"[project]
name = "mdtest-deps"
version = "0.1.0"
requires-python = "~={python_version}.0"
dependencies = [
{deps}
]
"#,
python_version = python_version,
deps = dependencies
.iter()
.map(|dep| format!(" \"{dep}\","))
.collect::<Vec<_>>()
.join("\n")
);
std::fs::write(
temp_path.join("pyproject.toml").as_std_path(),
pyproject_toml,
)
.context("Failed to write pyproject.toml")?;
// Convert PythonPlatform to uv's platform format
let uv_platform = match python_platform {
PythonPlatform::Identifier(id) => match id.as_str() {
"win32" => "windows",
"darwin" => "macos",
"linux" => "linux",
other => other,
},
PythonPlatform::All => {
bail!("For an mdtest with external dependencies, a Python platform must be specified");
}
};
let upgrade_lockfile = std::env::var("MDTEST_UPGRADE_LOCKFILES").is_ok_and(|v| v == "1");
let use_locked = if upgrade_lockfile {
// In upgrade mode, we'll generate a new lockfile
false
} else if lockfile_path.exists() {
// Copy existing lockfile to temp directory
let temp_lockfile = temp_path.join("uv.lock");
std::fs::copy(lockfile_path, temp_lockfile.as_std_path())
.with_context(|| format!("Failed to copy lockfile from '{lockfile_path}'"))?;
true
} else {
// No existing lockfile - error in normal mode
bail!(
"Lockfile not found at '{lockfile_path}'. Run with `MDTEST_UPGRADE_LOCKFILES=1` to generate it.",
);
};
// Run `uv sync` to install dependencies
let mut uv_sync = std::process::Command::new("uv");
uv_sync
.args(["sync", "--python-platform", uv_platform])
.current_dir(temp_path.as_std_path());
if use_locked {
uv_sync.arg("--locked");
}
let uv_sync_output = uv_sync
.output()
.context("Failed to run `uv sync`. Is `uv` installed?")?;
if !uv_sync_output.status.success() {
let stderr = String::from_utf8_lossy(&uv_sync_output.stderr);
if use_locked
&& stderr.contains("`uv.lock` needs to be updated, but `--locked` was provided.")
{
bail!(
"Lockfile is out of date. Use one of these commands to regenerate it:\n\
\n\
uv run crates/ty_python_semantic/mdtest.py -e external/\n\
\n\
Or using cargo:\n\
\n\
MDTEST_EXTERNAL=1 MDTEST_UPGRADE_LOCKFILES=1 cargo test -p ty_python_semantic --test mdtest mdtest__external"
);
}
bail!(
"`uv sync` failed with exit code {:?}:\n{}",
uv_sync_output.status.code(),
stderr
);
}
// In upgrade mode, copy the generated lockfile back to the source location
if upgrade_lockfile {
let temp_lockfile = temp_path.join("uv.lock");
let temp_lockfile = temp_lockfile.as_std_path();
if temp_lockfile.exists() {
std::fs::copy(temp_lockfile, lockfile_path)
.with_context(|| format!("Failed to write lockfile to '{lockfile_path}'"))?;
} else {
bail!(
"Expected uv to create a lockfile at '{}'",
temp_lockfile.display()
);
}
}
let venv_path = temp_path.join(".venv");
copy_site_packages_to_db(db, &venv_path, dest_venv_path, python_version)
}
/// Copy the site-packages directory from a real virtual environment to the in-memory filesystem of `db`.
///
/// This recursively copies all files from the venv's site-packages directory into the
/// in-memory filesystem at the specified destination path.
fn copy_site_packages_to_db(
db: &mut Db,
venv_path: &SystemPath,
dest_venv_path: &SystemPath,
_python_version: PythonVersion,
) -> Result<()> {
// Discover the site-packages directory in the virtual environment
let system = OsSystem::new(venv_path);
let env = PythonEnvironment::new(venv_path, SysPrefixPathOrigin::LocalVenv, &system)
.context("Failed to create Python environment for temporary virtual environment")?;
let site_packages_paths = env
.site_packages_paths(&system)
.context(format!("Failed to discover site-packages in '{venv_path}'"))?;
let site_packages_path = site_packages_paths
.into_iter()
.next()
.ok_or_else(|| anyhow!("No site-packages directory found in '{venv_path}'"))?;
// Create the destination directory structure
let relative_site_packages = site_packages_path.strip_prefix(venv_path).map_err(|_| {
anyhow!("site-packages path '{site_packages_path}' is not under venv path '{venv_path}'")
})?;
let dest_site_packages = dest_venv_path.join(relative_site_packages);
db.create_directory_all(&dest_site_packages)
.context("Failed to create site-packages directory in database")?;
// Recursively copy all files from site-packages
copy_directory_recursive(db, &site_packages_path, &dest_site_packages)?;
Ok(())
}
fn copy_directory_recursive(db: &mut Db, src: &SystemPath, dest: &SystemPath) -> Result<()> {
use std::fs;
for entry in fs::read_dir(src.as_std_path())
.with_context(|| format!("Failed to read directory {src}"))?
{
let entry = entry.with_context(|| format!("Failed to read directory entry in {src}"))?;
let entry_path = entry.path();
let file_type = entry
.file_type()
.with_context(|| format!("Failed to get file type for {}", entry_path.display()))?;
let src_path = SystemPath::from_std_path(&entry_path)
.ok_or_else(|| anyhow!("Path {} is not valid UTF-8", entry_path.display()))?;
let file_name = entry.file_name();
let file_name_str = file_name.to_str().ok_or_else(|| {
anyhow!(
"File name {} is not valid UTF-8",
file_name.to_string_lossy()
)
})?;
let dest_path = dest.join(file_name_str);
if file_type.is_dir() {
// Skip __pycache__ directories and other unnecessary directories
if file_name_str == "__pycache__" || file_name_str.ends_with(".dist-info") {
continue;
}
db.create_directory_all(&dest_path)
.with_context(|| format!("Failed to create directory {dest_path}"))?;
copy_directory_recursive(db, src_path, &dest_path)?;
} else if file_type.is_file() {
let is_python_source = entry_path.extension().is_some_and(|ext| {
ext.eq_ignore_ascii_case("py") || ext.eq_ignore_ascii_case("pyi")
});
if !is_python_source {
// Skip all non-Python files (binaries, data files, etc.)
continue;
}
let contents = fs::read_to_string(src_path.as_std_path())
.with_context(|| format!("Failed to read file {src_path}"))?;
db.write_file(&dest_path, contents)
.with_context(|| format!("Failed to write file {dest_path}"))?;
}
}
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_test/src/diagnostic.rs | crates/ty_test/src/diagnostic.rs | //! Sort and group diagnostics by line number, so they can be correlated with assertions.
//!
//! We don't assume that we will get the diagnostics in source order.
use ruff_db::diagnostic::Diagnostic;
use ruff_source_file::{LineIndex, OneIndexed};
use std::ops::{Deref, Range};
/// All diagnostics for one embedded Python file, sorted and grouped by start line number.
///
/// The diagnostics are kept in a flat vector, sorted by line number. A separate vector of
/// [`LineDiagnosticRange`] has one entry for each contiguous slice of the diagnostics vector
/// containing diagnostics which all start on the same line.
#[derive(Debug)]
pub(crate) struct SortedDiagnostics<'a> {
diagnostics: Vec<&'a Diagnostic>,
line_ranges: Vec<LineDiagnosticRange>,
}
impl<'a> SortedDiagnostics<'a> {
pub(crate) fn new(
diagnostics: impl IntoIterator<Item = &'a Diagnostic>,
line_index: &LineIndex,
) -> Self {
let mut diagnostics: Vec<_> = diagnostics
.into_iter()
.map(|diagnostic| DiagnosticWithLine {
line_number: diagnostic
.primary_span()
.and_then(|span| span.range())
.map_or(OneIndexed::from_zero_indexed(0), |range| {
line_index.line_index(range.start())
}),
diagnostic,
})
.collect();
diagnostics.sort_unstable_by_key(|diagnostic_with_line| diagnostic_with_line.line_number);
let mut diags = Self {
diagnostics: Vec::with_capacity(diagnostics.len()),
line_ranges: vec![],
};
let mut current_line_number = None;
let mut start = 0;
for DiagnosticWithLine {
line_number,
diagnostic,
} in diagnostics
{
match current_line_number {
None => {
current_line_number = Some(line_number);
}
Some(current) => {
if line_number != current {
let end = diags.diagnostics.len();
diags.line_ranges.push(LineDiagnosticRange {
line_number: current,
diagnostic_index_range: start..end,
});
start = end;
current_line_number = Some(line_number);
}
}
}
diags.diagnostics.push(diagnostic);
}
if let Some(line_number) = current_line_number {
diags.line_ranges.push(LineDiagnosticRange {
line_number,
diagnostic_index_range: start..diags.diagnostics.len(),
});
}
diags
}
pub(crate) fn iter_lines(&self) -> LineDiagnosticsIterator<'_> {
LineDiagnosticsIterator {
diagnostics: self.diagnostics.as_slice(),
inner: self.line_ranges.iter(),
}
}
}
/// Range delineating diagnostics in [`SortedDiagnostics`] that begin on a single line.
#[derive(Debug)]
struct LineDiagnosticRange {
line_number: OneIndexed,
diagnostic_index_range: Range<usize>,
}
/// Iterator to group sorted diagnostics by line.
pub(crate) struct LineDiagnosticsIterator<'a> {
diagnostics: &'a [&'a Diagnostic],
inner: std::slice::Iter<'a, LineDiagnosticRange>,
}
impl<'a> Iterator for LineDiagnosticsIterator<'a> {
type Item = LineDiagnostics<'a>;
fn next(&mut self) -> Option<Self::Item> {
let LineDiagnosticRange {
line_number,
diagnostic_index_range,
} = self.inner.next()?;
Some(LineDiagnostics {
line_number: *line_number,
diagnostics: &self.diagnostics[diagnostic_index_range.clone()],
})
}
}
impl std::iter::FusedIterator for LineDiagnosticsIterator<'_> {}
/// All diagnostics that start on a single line of source code in one embedded Python file.
#[derive(Debug)]
pub(crate) struct LineDiagnostics<'a> {
/// Line number on which these diagnostics start.
pub(crate) line_number: OneIndexed,
/// Diagnostics starting on this line.
pub(crate) diagnostics: &'a [&'a Diagnostic],
}
impl<'a> Deref for LineDiagnostics<'a> {
type Target = [&'a Diagnostic];
fn deref(&self) -> &Self::Target {
self.diagnostics
}
}
#[derive(Debug)]
struct DiagnosticWithLine<'a> {
line_number: OneIndexed,
diagnostic: &'a Diagnostic,
}
#[cfg(test)]
mod tests {
use crate::db::Db;
use ruff_db::diagnostic::{Annotation, Diagnostic, DiagnosticId, LintName, Severity, Span};
use ruff_db::files::system_path_to_file;
use ruff_db::source::line_index;
use ruff_db::system::DbWithWritableSystem as _;
use ruff_source_file::OneIndexed;
use ruff_text_size::{TextRange, TextSize};
#[test]
fn sort_and_group() {
let mut db = Db::setup();
db.write_file("/src/test.py", "one\ntwo\n").unwrap();
let file = system_path_to_file(&db, "/src/test.py").unwrap();
let lines = line_index(&db, file);
let ranges = [
TextRange::new(TextSize::new(0), TextSize::new(1)),
TextRange::new(TextSize::new(5), TextSize::new(10)),
TextRange::new(TextSize::new(1), TextSize::new(7)),
];
let diagnostics: Vec<_> = ranges
.into_iter()
.map(|range| {
let mut diag = Diagnostic::new(
DiagnosticId::Lint(LintName::of("dummy")),
Severity::Error,
"dummy",
);
let span = Span::from(file).with_range(range);
diag.annotate(Annotation::primary(span));
diag
})
.collect();
let sorted = super::SortedDiagnostics::new(diagnostics.iter(), &lines);
let grouped = sorted.iter_lines().collect::<Vec<_>>();
let [line1, line2] = &grouped[..] else {
panic!("expected two lines");
};
assert_eq!(line1.line_number, OneIndexed::from_zero_indexed(0));
assert_eq!(line1.diagnostics.len(), 2);
assert_eq!(line2.line_number, OneIndexed::from_zero_indexed(1));
assert_eq!(line2.diagnostics.len(), 1);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/build.rs | crates/ruff/build.rs | use std::{
fs,
path::{Path, PathBuf},
process::Command,
};
fn main() {
// The workspace root directory is not available without walking up the tree
// https://github.com/rust-lang/cargo/issues/3946
let workspace_root = Path::new(&std::env::var("CARGO_MANIFEST_DIR").unwrap())
.join("..")
.join("..");
commit_info(&workspace_root);
let target = std::env::var("TARGET").unwrap();
println!("cargo::rustc-env=RUST_HOST_TARGET={target}");
}
fn commit_info(workspace_root: &Path) {
// If not in a git repository, do not attempt to retrieve commit information
let git_dir = workspace_root.join(".git");
if !git_dir.exists() {
return;
}
if let Some(git_head_path) = git_head(&git_dir) {
println!("cargo:rerun-if-changed={}", git_head_path.display());
let git_head_contents = fs::read_to_string(git_head_path);
if let Ok(git_head_contents) = git_head_contents {
// The contents are either a commit or a reference in the following formats
// - "<commit>" when the head is detached
// - "ref <ref>" when working on a branch
// If a commit, checking if the HEAD file has changed is sufficient
// If a ref, we need to add the head file for that ref to rebuild on commit
let mut git_ref_parts = git_head_contents.split_whitespace();
git_ref_parts.next();
if let Some(git_ref) = git_ref_parts.next() {
let git_ref_path = git_dir.join(git_ref);
println!("cargo:rerun-if-changed={}", git_ref_path.display());
}
}
}
let output = match Command::new("git")
.arg("log")
.arg("-1")
.arg("--date=short")
.arg("--abbrev=9")
.arg("--format=%H %h %cd %(describe:tags)")
.output()
{
Ok(output) if output.status.success() => output,
_ => return,
};
let stdout = String::from_utf8(output.stdout).unwrap();
let mut parts = stdout.split_whitespace();
let mut next = || parts.next().unwrap();
println!("cargo::rustc-env=RUFF_COMMIT_HASH={}", next());
println!("cargo::rustc-env=RUFF_COMMIT_SHORT_HASH={}", next());
println!("cargo::rustc-env=RUFF_COMMIT_DATE={}", next());
// Describe can fail for some commits
// https://git-scm.com/docs/pretty-formats#Documentation/pretty-formats.txt-emdescribeoptionsem
if let Some(describe) = parts.next() {
let mut describe_parts = describe.split('-');
println!(
"cargo::rustc-env=RUFF_LAST_TAG={}",
describe_parts.next().unwrap()
);
// If this is the tagged commit, this component will be missing
println!(
"cargo::rustc-env=RUFF_LAST_TAG_DISTANCE={}",
describe_parts.next().unwrap_or("0")
);
}
}
fn git_head(git_dir: &Path) -> Option<PathBuf> {
// The typical case is a standard git repository.
let git_head_path = git_dir.join("HEAD");
if git_head_path.exists() {
return Some(git_head_path);
}
if !git_dir.is_file() {
return None;
}
// If `.git/HEAD` doesn't exist and `.git` is actually a file,
// then let's try to attempt to read it as a worktree. If it's
// a worktree, then its contents will look like this, e.g.:
//
// gitdir: /home/andrew/astral/uv/main/.git/worktrees/pr2
//
// And the HEAD file we want to watch will be at:
//
// /home/andrew/astral/uv/main/.git/worktrees/pr2/HEAD
let contents = fs::read_to_string(git_dir).ok()?;
let (label, worktree_path) = contents.split_once(':')?;
if label != "gitdir" {
return None;
}
let worktree_path = worktree_path.trim();
Some(PathBuf::from(worktree_path))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/diagnostics.rs | crates/ruff/src/diagnostics.rs | #![cfg_attr(target_family = "wasm", allow(dead_code))]
use std::borrow::Cow;
use std::fs::File;
use std::io;
use std::io::Write;
use std::ops::{Add, AddAssign};
use std::path::Path;
use anyhow::{Context, Result};
use colored::Colorize;
use log::{debug, warn};
use ruff_db::diagnostic::Diagnostic;
use ruff_linter::codes::Rule;
use ruff_linter::linter::{FixTable, FixerResult, LinterResult, ParseSource, lint_fix, lint_only};
use ruff_linter::package::PackageRoot;
use ruff_linter::pyproject_toml::lint_pyproject_toml;
use ruff_linter::settings::types::UnsafeFixes;
use ruff_linter::settings::{LinterSettings, flags};
use ruff_linter::source_kind::{SourceError, SourceKind};
use ruff_linter::{IOError, Violation, fs};
use ruff_notebook::{NotebookError, NotebookIndex};
use ruff_python_ast::{PySourceType, SourceType, TomlSourceType};
use ruff_source_file::SourceFileBuilder;
use ruff_text_size::TextRange;
use ruff_workspace::Settings;
use rustc_hash::FxHashMap;
use crate::cache::{Cache, FileCache, FileCacheKey};
/// A collection of [`Diagnostic`]s and additional information needed to render them.
///
/// Note that `notebook_indexes` may be empty if there are no diagnostics because the
/// `NotebookIndex` isn't cached in this case. This isn't a problem for any current uses as of
/// 2025-08-12, which are all related to diagnostic rendering, but could be surprising if used
/// differently in the future.
#[derive(Debug, Default, PartialEq)]
pub(crate) struct Diagnostics {
pub(crate) inner: Vec<Diagnostic>,
pub(crate) fixed: FixMap,
pub(crate) notebook_indexes: FxHashMap<String, NotebookIndex>,
}
impl Diagnostics {
pub(crate) fn new(
diagnostics: Vec<Diagnostic>,
notebook_indexes: FxHashMap<String, NotebookIndex>,
) -> Self {
Self {
inner: diagnostics,
fixed: FixMap::default(),
notebook_indexes,
}
}
/// Generate [`Diagnostics`] based on a [`SourceError`].
pub(crate) fn from_source_error(
err: &SourceError,
path: Option<&Path>,
settings: &LinterSettings,
) -> Self {
match err {
// IO errors.
SourceError::Io(_)
| SourceError::Notebook(NotebookError::Io(_) | NotebookError::Json(_)) => {
if settings.rules.enabled(Rule::IOError) {
let name = path.map_or_else(|| "-".into(), Path::to_string_lossy);
let source_file = SourceFileBuilder::new(name, "").finish();
Self::new(
vec![
IOError {
message: err.to_string(),
}
.into_diagnostic(TextRange::default(), &source_file),
],
FxHashMap::default(),
)
} else {
match path {
Some(path) => {
warn!(
"{}{}{} {err}",
"Failed to lint ".bold(),
fs::relativize_path(path).bold(),
":".bold()
);
}
None => {
warn!("{}{} {err}", "Failed to lint".bold(), ":".bold());
}
}
Self::default()
}
}
// Syntax errors.
SourceError::Notebook(
NotebookError::InvalidJson(_)
| NotebookError::InvalidSchema(_)
| NotebookError::InvalidFormat(_),
) => {
let name = path.map_or_else(|| "-".into(), Path::to_string_lossy);
let dummy = SourceFileBuilder::new(name, "").finish();
Self::new(
vec![Diagnostic::invalid_syntax(dummy, err, TextRange::default())],
FxHashMap::default(),
)
}
}
}
}
impl Add for Diagnostics {
type Output = Diagnostics;
fn add(mut self, other: Self) -> Self::Output {
self += other;
self
}
}
impl AddAssign for Diagnostics {
fn add_assign(&mut self, other: Self) {
self.inner.extend(other.inner);
self.fixed += other.fixed;
self.notebook_indexes.extend(other.notebook_indexes);
}
}
/// A collection of fixes indexed by file path.
#[derive(Debug, Default, PartialEq)]
pub(crate) struct FixMap(FxHashMap<String, FixTable>);
impl FixMap {
/// Returns `true` if there are no fixes in the map.
pub(crate) fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Returns an iterator over the fixes in the map, along with the file path.
pub(crate) fn iter(&self) -> impl Iterator<Item = (&String, &FixTable)> {
self.0.iter()
}
/// Returns an iterator over the fixes in the map.
pub(crate) fn values(&self) -> impl Iterator<Item = &FixTable> {
self.0.values()
}
}
impl FromIterator<(String, FixTable)> for FixMap {
fn from_iter<T: IntoIterator<Item = (String, FixTable)>>(iter: T) -> Self {
Self(
iter.into_iter()
.filter(|(_, fixes)| !fixes.is_empty())
.collect(),
)
}
}
impl AddAssign for FixMap {
fn add_assign(&mut self, rhs: Self) {
for (filename, fixed) in rhs.0 {
if fixed.is_empty() {
continue;
}
let fixed_in_file = self.0.entry(filename).or_default();
for (rule, name, count) in fixed.iter() {
if count > 0 {
*fixed_in_file.entry(rule).or_default(name) += count;
}
}
}
}
}
/// Lint the source code at the given `Path`.
pub(crate) fn lint_path(
path: &Path,
package: Option<PackageRoot<'_>>,
settings: &LinterSettings,
cache: Option<&Cache>,
noqa: flags::Noqa,
fix_mode: flags::FixMode,
unsafe_fixes: UnsafeFixes,
) -> Result<Diagnostics> {
// Check the cache.
let caching = match cache {
Some(cache) if noqa.is_enabled() => {
let relative_path = cache
.relative_path(path)
.expect("wrong package cache for file");
let cache_key = FileCacheKey::from_path(path).context("Failed to create cache key")?;
let cached_diagnostics = cache
.get(relative_path, &cache_key)
.is_some_and(FileCache::linted);
if cached_diagnostics {
return Ok(Diagnostics::default());
}
// Stash the file metadata for later so when we update the cache it reflects the prerun
// information
Some((cache, relative_path, cache_key))
}
_ => None,
};
debug!("Checking: {}", path.display());
let source_type = match settings.extension.get(path).map(PySourceType::from) {
Some(source_type) => source_type,
None => match SourceType::from(path) {
SourceType::Toml(TomlSourceType::Pyproject) => {
let diagnostics = if settings
.rules
.iter_enabled()
.any(|rule_code| rule_code.lint_source().is_pyproject_toml())
{
let contents = match std::fs::read_to_string(path).map_err(SourceError::from) {
Ok(contents) => contents,
Err(err) => {
return Ok(Diagnostics::from_source_error(&err, Some(path), settings));
}
};
let source_file =
SourceFileBuilder::new(path.to_string_lossy(), contents).finish();
lint_pyproject_toml(&source_file, settings)
} else {
vec![]
};
return Ok(Diagnostics {
inner: diagnostics,
..Diagnostics::default()
});
}
SourceType::Toml(_) => return Ok(Diagnostics::default()),
SourceType::Python(source_type) => source_type,
},
};
// Extract the sources from the file.
let source_kind = match SourceKind::from_path(path, source_type) {
Ok(Some(source_kind)) => source_kind,
Ok(None) => return Ok(Diagnostics::default()),
Err(err) => {
return Ok(Diagnostics::from_source_error(&err, Some(path), settings));
}
};
// Lint the file.
let (result, transformed, fixed) =
if matches!(fix_mode, flags::FixMode::Apply | flags::FixMode::Diff) {
if let Ok(FixerResult {
result,
transformed,
fixed,
}) = lint_fix(
path,
package,
noqa,
unsafe_fixes,
settings,
&source_kind,
source_type,
) {
if !fixed.is_empty() {
match fix_mode {
flags::FixMode::Apply => transformed.write(&mut File::create(path)?)?,
flags::FixMode::Diff => {
write!(
&mut io::stdout().lock(),
"{}",
source_kind.diff(&transformed, Some(path)).unwrap()
)?;
}
flags::FixMode::Generate => {}
}
}
let transformed = if let Cow::Owned(transformed) = transformed {
transformed
} else {
source_kind
};
(result, transformed, fixed)
} else {
// If we fail to fix, lint the original source code.
let result = lint_only(
path,
package,
settings,
noqa,
&source_kind,
source_type,
ParseSource::None,
);
let transformed = source_kind;
let fixed = FixTable::default();
(result, transformed, fixed)
}
} else {
let result = lint_only(
path,
package,
settings,
noqa,
&source_kind,
source_type,
ParseSource::None,
);
let transformed = source_kind;
let fixed = FixTable::default();
(result, transformed, fixed)
};
let diagnostics = result.diagnostics;
if let Some((cache, relative_path, key)) = caching {
// `FixMode::Apply` and `FixMode::Diff` rely on side-effects (writing to disk,
// and writing the diff to stdout, respectively). If a file has diagnostics
// with fixes, we need to avoid reading from and writing to the cache in these
// modes.
let use_fixes = match fix_mode {
flags::FixMode::Generate => true,
flags::FixMode::Apply | flags::FixMode::Diff => fixed.is_empty(),
};
// We don't cache files with diagnostics.
let linted = diagnostics.is_empty() && use_fixes;
cache.set_linted(relative_path.to_owned(), &key, linted);
}
let notebook_indexes = if let SourceKind::IpyNotebook(notebook) = transformed {
FxHashMap::from_iter([(path.to_string_lossy().to_string(), notebook.into_index())])
} else {
FxHashMap::default()
};
Ok(Diagnostics {
inner: diagnostics,
fixed: FixMap::from_iter([(fs::relativize_path(path), fixed)]),
notebook_indexes,
})
}
/// Generate `Diagnostic`s from source code content derived from stdin.
pub(crate) fn lint_stdin(
path: Option<&Path>,
package: Option<PackageRoot<'_>>,
contents: String,
settings: &Settings,
noqa: flags::Noqa,
fix_mode: flags::FixMode,
) -> Result<Diagnostics> {
let source_type = match path.and_then(|path| settings.linter.extension.get(path)) {
None => match path.map(SourceType::from).unwrap_or_default() {
SourceType::Python(source_type) => source_type,
SourceType::Toml(source_type) if source_type.is_pyproject() => {
if !settings
.linter
.rules
.iter_enabled()
.any(|rule_code| rule_code.lint_source().is_pyproject_toml())
{
return Ok(Diagnostics::default());
}
let path = path.unwrap();
let source_file =
SourceFileBuilder::new(path.to_string_lossy(), contents.clone()).finish();
match fix_mode {
flags::FixMode::Diff | flags::FixMode::Generate => {}
flags::FixMode::Apply => write!(&mut io::stdout().lock(), "{contents}")?,
}
return Ok(Diagnostics {
inner: lint_pyproject_toml(&source_file, &settings.linter),
fixed: FixMap::from_iter([(fs::relativize_path(path), FixTable::default())]),
notebook_indexes: FxHashMap::default(),
});
}
SourceType::Toml(_) => return Ok(Diagnostics::default()),
},
Some(language) => PySourceType::from(language),
};
// Extract the sources from the file.
let source_kind = match SourceKind::from_source_code(contents, source_type) {
Ok(Some(source_kind)) => source_kind,
Ok(None) => return Ok(Diagnostics::default()),
Err(err) => {
return Ok(Diagnostics::from_source_error(&err, path, &settings.linter));
}
};
// Lint the inputs.
let (LinterResult { diagnostics, .. }, transformed, fixed) =
if matches!(fix_mode, flags::FixMode::Apply | flags::FixMode::Diff) {
if let Ok(FixerResult {
result,
transformed,
fixed,
}) = lint_fix(
path.unwrap_or_else(|| Path::new("-")),
package,
noqa,
settings.unsafe_fixes,
&settings.linter,
&source_kind,
source_type,
) {
match fix_mode {
flags::FixMode::Apply => {
// Write the contents to stdout, regardless of whether any errors were fixed.
transformed.write(&mut io::stdout().lock())?;
}
flags::FixMode::Diff => {
// But only write a diff if it's non-empty.
if !fixed.is_empty() {
write!(
&mut io::stdout().lock(),
"{}",
source_kind.diff(&transformed, path).unwrap()
)?;
}
}
flags::FixMode::Generate => {}
}
let transformed = if let Cow::Owned(transformed) = transformed {
transformed
} else {
source_kind
};
(result, transformed, fixed)
} else {
// If we fail to fix, lint the original source code.
let result = lint_only(
path.unwrap_or_else(|| Path::new("-")),
package,
&settings.linter,
noqa,
&source_kind,
source_type,
ParseSource::None,
);
// Write the contents to stdout anyway.
if fix_mode.is_apply() {
source_kind.write(&mut io::stdout().lock())?;
}
let transformed = source_kind;
let fixed = FixTable::default();
(result, transformed, fixed)
}
} else {
let result = lint_only(
path.unwrap_or_else(|| Path::new("-")),
package,
&settings.linter,
noqa,
&source_kind,
source_type,
ParseSource::None,
);
let transformed = source_kind;
let fixed = FixTable::default();
(result, transformed, fixed)
};
let notebook_indexes = if let SourceKind::IpyNotebook(notebook) = transformed {
FxHashMap::from_iter([(
path.map_or_else(|| "-".into(), |path| path.to_string_lossy().to_string()),
notebook.into_index(),
)])
} else {
FxHashMap::default()
};
Ok(Diagnostics {
inner: diagnostics,
fixed: FixMap::from_iter([(
fs::relativize_path(path.unwrap_or_else(|| Path::new("-"))),
fixed,
)]),
notebook_indexes,
})
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/lib.rs | crates/ruff/src/lib.rs | #![allow(clippy::print_stdout)]
use std::fs::File;
use std::io::{self, BufWriter, Write, stdout};
use std::path::{Path, PathBuf};
use std::process::ExitCode;
use std::sync::mpsc::channel;
use anyhow::Result;
use clap::CommandFactory;
use colored::Colorize;
use log::error;
use notify::{RecursiveMode, Watcher, recommended_watcher};
use args::{GlobalConfigArgs, ServerCommand};
use ruff_db::diagnostic::{Diagnostic, Severity};
use ruff_linter::logging::{LogLevel, set_up_logging};
use ruff_linter::settings::flags::FixMode;
use ruff_linter::settings::types::OutputFormat;
use ruff_linter::{fs, warn_user, warn_user_once};
use ruff_workspace::Settings;
use crate::args::{
AnalyzeCommand, AnalyzeGraphCommand, Args, CheckCommand, Command, FormatCommand,
};
use crate::printer::{Flags as PrinterFlags, Printer};
pub mod args;
mod cache;
mod commands;
mod diagnostics;
mod printer;
pub mod resolve;
mod stdin;
mod version;
#[derive(Copy, Clone)]
pub enum ExitStatus {
/// Linting was successful and there were no linting errors.
Success,
/// Linting was successful but there were linting errors.
Failure,
/// Linting failed.
Error,
}
impl From<ExitStatus> for ExitCode {
fn from(status: ExitStatus) -> Self {
match status {
ExitStatus::Success => ExitCode::from(0),
ExitStatus::Failure => ExitCode::from(1),
ExitStatus::Error => ExitCode::from(2),
}
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum ChangeKind {
Configuration,
SourceFile,
}
/// Return the [`ChangeKind`] based on the list of modified file paths.
///
/// Returns `None` if no relevant changes were detected.
fn change_detected(event: ¬ify::Event) -> Option<ChangeKind> {
// If any `.toml` files were modified, return `ChangeKind::Configuration`. Otherwise, return
// `ChangeKind::SourceFile` if any `.py`, `.pyi`, `.pyw`, or `.ipynb` files were modified.
let mut source_file = false;
if event.kind.is_access() || event.kind.is_other() {
return None;
}
if event.need_rescan() {
return Some(ChangeKind::Configuration);
}
for path in &event.paths {
if let Some(suffix) = path.extension() {
match suffix.to_str() {
Some("toml") => {
return Some(ChangeKind::Configuration);
}
Some("py" | "pyi" | "pyw" | "ipynb") => source_file = true,
_ => {}
}
}
}
if source_file {
return Some(ChangeKind::SourceFile);
}
None
}
/// Returns true if the command should read from standard input.
fn is_stdin(files: &[PathBuf], stdin_filename: Option<&Path>) -> bool {
// If the user provided a `--stdin-filename`, always read from standard input.
if stdin_filename.is_some() {
if let Some(file) = files.iter().find(|file| file.as_path() != Path::new("-")) {
warn_user_once!(
"Ignoring file {} in favor of standard input.",
file.display()
);
}
return true;
}
let [file] = files else {
return false;
};
// If the user provided exactly `-`, read from standard input.
file == Path::new("-")
}
/// Returns the default set of files if none are provided, otherwise returns provided files.
fn resolve_default_files(files: Vec<PathBuf>, is_stdin: bool) -> Vec<PathBuf> {
if files.is_empty() {
if is_stdin {
vec![Path::new("-").to_path_buf()]
} else {
vec![Path::new(".").to_path_buf()]
}
} else {
files
}
}
pub fn run(
Args {
command,
global_options,
}: Args,
) -> Result<ExitStatus> {
{
ruff_db::set_program_version(crate::version::version().to_string()).unwrap();
let default_panic_hook = std::panic::take_hook();
std::panic::set_hook(Box::new(move |info| {
#[expect(clippy::print_stderr)]
{
eprintln!(
r#"
{}{} {} If you could open an issue at:
https://github.com/astral-sh/ruff/issues/new?title=%5BPanic%5D
...quoting the executed command, along with the relevant file contents and `pyproject.toml` settings, we'd be very appreciative!
"#,
"error".red().bold(),
":".bold(),
"Ruff crashed.".bold(),
);
}
default_panic_hook(info);
}));
}
// Don't set up logging for the server command, as it has its own logging setup
// and setting the global logger can only be done once.
if !matches!(command, Command::Server { .. }) {
set_up_logging(global_options.log_level())?;
}
match command {
Command::Version { output_format } => {
commands::version::version(output_format)?;
Ok(ExitStatus::Success)
}
Command::Rule {
rule,
all,
output_format,
} => {
if all {
commands::rule::rules(output_format)?;
}
if let Some(rule) = rule {
commands::rule::rule(rule, output_format)?;
}
Ok(ExitStatus::Success)
}
Command::Config {
option,
output_format,
} => {
commands::config::config(option.as_deref(), output_format)?;
Ok(ExitStatus::Success)
}
Command::Linter { output_format } => {
commands::linter::linter(output_format)?;
Ok(ExitStatus::Success)
}
Command::Clean => {
commands::clean::clean(global_options.log_level())?;
Ok(ExitStatus::Success)
}
Command::GenerateShellCompletion { shell } => {
shell.generate(&mut Args::command(), &mut stdout());
Ok(ExitStatus::Success)
}
Command::Check(args) => check(args, global_options),
Command::Format(args) => format(args, global_options),
Command::Server(args) => server(args),
Command::Analyze(AnalyzeCommand::Graph(args)) => analyze_graph(args, global_options),
}
}
fn format(args: FormatCommand, global_options: GlobalConfigArgs) -> Result<ExitStatus> {
let cli_output_format_set = args.output_format.is_some();
let (cli, config_arguments) = args.partition(global_options)?;
let pyproject_config = resolve::resolve(&config_arguments, cli.stdin_filename.as_deref())?;
if cli_output_format_set && !pyproject_config.settings.formatter.preview.is_enabled() {
warn_user_once!(
"The --output-format flag for the formatter is unstable and requires preview mode to use."
);
}
if is_stdin(&cli.files, cli.stdin_filename.as_deref()) {
commands::format_stdin::format_stdin(&cli, &config_arguments, &pyproject_config)
} else {
commands::format::format(cli, &config_arguments, &pyproject_config)
}
}
fn analyze_graph(
args: AnalyzeGraphCommand,
global_options: GlobalConfigArgs,
) -> Result<ExitStatus> {
let (cli, config_arguments) = args.partition(global_options)?;
commands::analyze_graph::analyze_graph(cli, &config_arguments)
}
fn server(args: ServerCommand) -> Result<ExitStatus> {
commands::server::run_server(args.resolve_preview())
}
pub fn check(args: CheckCommand, global_options: GlobalConfigArgs) -> Result<ExitStatus> {
let (cli, config_arguments) = args.partition(global_options)?;
// Construct the "default" settings. These are used when no `pyproject.toml`
// files are present, or files are injected from outside of the hierarchy.
let pyproject_config = resolve::resolve(&config_arguments, cli.stdin_filename.as_deref())?;
let mut writer: Box<dyn Write> = match cli.output_file {
Some(path) if !cli.watch => {
colored::control::set_override(false);
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)?;
}
let file = File::create(path)?;
Box::new(BufWriter::new(file))
}
_ => Box::new(BufWriter::new(io::stdout())),
};
let stderr_writer = Box::new(BufWriter::new(io::stderr()));
let is_stdin = is_stdin(&cli.files, cli.stdin_filename.as_deref());
let files = resolve_default_files(cli.files, is_stdin);
if cli.show_settings {
commands::show_settings::show_settings(
&files,
&pyproject_config,
&config_arguments,
&mut writer,
)?;
return Ok(ExitStatus::Success);
}
if cli.show_files {
commands::show_files::show_files(
&files,
&pyproject_config,
&config_arguments,
&mut writer,
)?;
return Ok(ExitStatus::Success);
}
// Extract options that are included in `Settings`, but only apply at the top
// level.
let Settings {
fix,
fix_only,
unsafe_fixes,
output_format,
show_fixes,
..
} = pyproject_config.settings;
// Fix rules are as follows:
// - By default, generate all fixes, but don't apply them to the filesystem.
// - If `--fix` or `--fix-only` is set, apply applicable fixes to the filesystem (or
// print them to stdout, if we're reading from stdin).
// - If `--diff` or `--fix-only` are set, don't print any violations (only applicable fixes)
// - By default, applicable fixes only include [`Applicability::Automatic`], but if
// `--unsafe-fixes` is set, then [`Applicability::Suggested`] fixes are included.
let fix_mode = if cli.diff {
FixMode::Diff
} else if fix || fix_only {
FixMode::Apply
} else {
FixMode::Generate
};
let cache = !cli.no_cache;
let noqa = !cli.ignore_noqa;
let mut printer_flags = PrinterFlags::empty();
if !(cli.diff || fix_only) {
printer_flags |= PrinterFlags::SHOW_VIOLATIONS;
}
if show_fixes {
printer_flags |= PrinterFlags::SHOW_FIX_SUMMARY;
}
#[cfg(debug_assertions)]
if cache {
// `--no-cache` doesn't respect code changes, and so is often confusing during
// development.
warn_user!("Detected debug build without --no-cache.");
}
if let Some(reason) = &cli.add_noqa {
if !fix_mode.is_generate() {
warn_user!("--fix is incompatible with --add-noqa.");
}
if reason.contains(['\n', '\r']) {
return Err(anyhow::anyhow!(
"--add-noqa <reason> cannot contain newline characters"
));
}
let reason_opt = (!reason.is_empty()).then_some(reason.as_str());
let modifications =
commands::add_noqa::add_noqa(&files, &pyproject_config, &config_arguments, reason_opt)?;
if modifications > 0 && config_arguments.log_level >= LogLevel::Default {
let s = if modifications == 1 { "" } else { "s" };
#[expect(clippy::print_stderr)]
{
eprintln!("Added {modifications} noqa directive{s}.");
}
}
return Ok(ExitStatus::Success);
}
let printer = Printer::new(
output_format,
config_arguments.log_level,
fix_mode,
unsafe_fixes,
printer_flags,
);
// the settings should already be combined with the CLI overrides at this point
// TODO(jane): let's make this `PreviewMode`
// TODO: this should reference the global preview mode once https://github.com/astral-sh/ruff/issues/8232
// is resolved.
let preview = pyproject_config.settings.linter.preview.is_enabled();
if cli.watch {
if output_format != OutputFormat::default() {
warn_user!(
"`--output-format {}` is always used in watch mode.",
OutputFormat::default()
);
}
// Configure the file watcher.
let (tx, rx) = channel();
let mut watcher = recommended_watcher(tx)?;
for file in &files {
watcher.watch(file, RecursiveMode::Recursive)?;
}
if let Some(file) = pyproject_config.path.as_ref() {
watcher.watch(file, RecursiveMode::Recursive)?;
}
// Perform an initial run instantly.
Printer::clear_screen()?;
printer.write_to_user("Starting linter in watch mode...\n");
let diagnostics = commands::check::check(
&files,
&pyproject_config,
&config_arguments,
cache.into(),
noqa.into(),
fix_mode,
unsafe_fixes,
)?;
printer.write_continuously(&mut writer, &diagnostics, preview)?;
// In watch mode, we may need to re-resolve the configuration.
// TODO(charlie): Re-compute other derivative values, like the `printer`.
let mut pyproject_config = pyproject_config;
loop {
match rx.recv() {
Ok(event) => {
let Some(change_kind) = change_detected(&event?) else {
continue;
};
if matches!(change_kind, ChangeKind::Configuration) {
pyproject_config =
resolve::resolve(&config_arguments, cli.stdin_filename.as_deref())?;
}
Printer::clear_screen()?;
printer.write_to_user("File change detected...\n");
let diagnostics = commands::check::check(
&files,
&pyproject_config,
&config_arguments,
cache.into(),
noqa.into(),
fix_mode,
unsafe_fixes,
)?;
printer.write_continuously(&mut writer, &diagnostics, preview)?;
}
Err(err) => return Err(err.into()),
}
}
} else {
// Generate lint violations.
let diagnostics = if is_stdin {
commands::check_stdin::check_stdin(
cli.stdin_filename.map(fs::normalize_path).as_deref(),
&pyproject_config,
&config_arguments,
noqa.into(),
fix_mode,
)?
} else {
commands::check::check(
&files,
&pyproject_config,
&config_arguments,
cache.into(),
noqa.into(),
fix_mode,
unsafe_fixes,
)?
};
// Always try to print violations (though the printer itself may suppress output)
// If we're writing fixes via stdin, the transformed source code goes to the writer
// so send the summary to stderr instead
let mut summary_writer = if is_stdin && matches!(fix_mode, FixMode::Apply | FixMode::Diff) {
stderr_writer
} else {
writer
};
if cli.statistics {
printer.write_statistics(&diagnostics, &mut summary_writer)?;
} else {
printer.write_once(&diagnostics, &mut summary_writer, preview)?;
}
if !cli.exit_zero {
let max_severity = diagnostics
.inner
.iter()
.map(Diagnostic::severity)
.max()
.unwrap_or(Severity::Info);
if max_severity.is_fatal() {
// When a panic/fatal error is reported, prompt the user to open an issue on github.
// Diagnostics with severity `fatal` will be sorted to the bottom, and printing the
// message here instead of attaching it to the diagnostic ensures that we only print
// it once instead of repeating it for each diagnostic. Prints to stderr to prevent
// the message from being captured by tools parsing the normal output.
let message = "Panic during linting indicates a bug in Ruff. If you could open an issue at:
https://github.com/astral-sh/ruff/issues/new?title=%5BLinter%20panic%5D
...with the relevant file contents, the `pyproject.toml` settings, and the stack trace above, we'd be very appreciative!
";
error!("{message}");
return Ok(ExitStatus::Error);
}
if cli.diff {
// If we're printing a diff, we always want to exit non-zero if there are
// any fixable violations (since we've printed the diff, but not applied the
// fixes).
if !diagnostics.fixed.is_empty() {
return Ok(ExitStatus::Failure);
}
} else if fix_only {
// If we're only fixing, we want to exit zero (since we've fixed all fixable
// violations), unless we're explicitly asked to exit non-zero on fix.
if cli.exit_non_zero_on_fix {
if !diagnostics.fixed.is_empty() {
return Ok(ExitStatus::Failure);
}
}
} else {
// If we're running the linter (not just fixing), we want to exit non-zero if
// there are any violations, unless we're explicitly asked to exit zero on
// fix.
if cli.exit_non_zero_on_fix {
if !diagnostics.fixed.is_empty() || !diagnostics.inner.is_empty() {
return Ok(ExitStatus::Failure);
}
} else {
if !diagnostics.inner.is_empty() {
return Ok(ExitStatus::Failure);
}
}
}
}
}
Ok(ExitStatus::Success)
}
#[cfg(test)]
mod test_file_change_detector {
use std::path::PathBuf;
use crate::{ChangeKind, change_detected};
#[test]
fn detect_correct_file_change() {
assert_eq!(
Some(ChangeKind::Configuration),
change_detected(¬ify::Event {
kind: notify::EventKind::Create(notify::event::CreateKind::File),
paths: vec![
PathBuf::from("tmp/pyproject.toml"),
PathBuf::from("tmp/bin/ruff.rs"),
],
attrs: notify::event::EventAttributes::default(),
}),
);
assert_eq!(
Some(ChangeKind::Configuration),
change_detected(¬ify::Event {
kind: notify::EventKind::Create(notify::event::CreateKind::File),
paths: vec![
PathBuf::from("pyproject.toml"),
PathBuf::from("tmp/bin/ruff.rs"),
],
attrs: notify::event::EventAttributes::default(),
}),
);
assert_eq!(
Some(ChangeKind::Configuration),
change_detected(¬ify::Event {
kind: notify::EventKind::Create(notify::event::CreateKind::File),
paths: vec![
PathBuf::from("tmp1/tmp2/tmp3/pyproject.toml"),
PathBuf::from("tmp/bin/ruff.rs"),
],
attrs: notify::event::EventAttributes::default(),
}),
);
assert_eq!(
Some(ChangeKind::Configuration),
change_detected(¬ify::Event {
kind: notify::EventKind::Create(notify::event::CreateKind::File),
paths: vec![
PathBuf::from("tmp/ruff.toml"),
PathBuf::from("tmp/bin/ruff.rs"),
],
attrs: notify::event::EventAttributes::default(),
}),
);
assert_eq!(
Some(ChangeKind::Configuration),
change_detected(¬ify::Event {
kind: notify::EventKind::Create(notify::event::CreateKind::File),
paths: vec![
PathBuf::from("tmp/.ruff.toml"),
PathBuf::from("tmp/bin/ruff.rs"),
],
attrs: notify::event::EventAttributes::default(),
}),
);
assert_eq!(
Some(ChangeKind::SourceFile),
change_detected(¬ify::Event {
kind: notify::EventKind::Create(notify::event::CreateKind::File),
paths: vec![
PathBuf::from("tmp/rule.py"),
PathBuf::from("tmp/bin/ruff.rs"),
],
attrs: notify::event::EventAttributes::default(),
}),
);
assert_eq!(
Some(ChangeKind::SourceFile),
change_detected(¬ify::Event {
kind: notify::EventKind::Create(notify::event::CreateKind::File),
paths: vec![
PathBuf::from("tmp/rule.pyi"),
PathBuf::from("tmp/bin/ruff.rs"),
],
attrs: notify::event::EventAttributes::default(),
}),
);
assert_eq!(
Some(ChangeKind::Configuration),
change_detected(¬ify::Event {
kind: notify::EventKind::Create(notify::event::CreateKind::File),
paths: vec![
PathBuf::from("pyproject.toml"),
PathBuf::from("tmp/rule.py"),
],
attrs: notify::event::EventAttributes::default(),
}),
);
assert_eq!(
Some(ChangeKind::Configuration),
change_detected(¬ify::Event {
kind: notify::EventKind::Create(notify::event::CreateKind::File),
paths: vec![
PathBuf::from("tmp/rule.py"),
PathBuf::from("pyproject.toml"),
],
attrs: notify::event::EventAttributes::default(),
}),
);
assert_eq!(
None,
change_detected(¬ify::Event {
kind: notify::EventKind::Create(notify::event::CreateKind::File),
paths: vec![
PathBuf::from("tmp/rule.js"),
PathBuf::from("tmp/bin/ruff.rs"),
],
attrs: notify::event::EventAttributes::default(),
}),
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/stdin.rs | crates/ruff/src/stdin.rs | use std::io;
use std::io::{Read, Write};
/// Read a string from `stdin`.
pub(crate) fn read_from_stdin() -> Result<String, io::Error> {
let mut buffer = String::new();
io::stdin().lock().read_to_string(&mut buffer)?;
Ok(buffer)
}
/// Read bytes from `stdin` and write them to `stdout`.
pub(crate) fn parrot_stdin() -> Result<(), io::Error> {
let mut buffer = String::new();
io::stdin().lock().read_to_string(&mut buffer)?;
io::stdout().write_all(buffer.as_bytes())?;
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/version.rs | crates/ruff/src/version.rs | //! Code for representing Ruff's release version number.
use serde::Serialize;
use std::fmt;
/// Information about the git repository where Ruff was built from.
#[derive(Serialize)]
pub(crate) struct CommitInfo {
short_commit_hash: String,
commit_hash: String,
commit_date: String,
last_tag: Option<String>,
commits_since_last_tag: u32,
}
/// Ruff's version.
#[derive(Serialize)]
pub(crate) struct VersionInfo {
/// Ruff's version, such as "0.5.1"
version: String,
/// Information about the git commit we may have been built from.
///
/// `None` if not built from a git repo or if retrieval failed.
commit_info: Option<CommitInfo>,
}
impl fmt::Display for VersionInfo {
/// Formatted version information: "<version>[+<commits>] (<commit> <date>)"
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.version)?;
if let Some(ref ci) = self.commit_info {
if ci.commits_since_last_tag > 0 {
write!(f, "+{}", ci.commits_since_last_tag)?;
}
write!(f, " ({} {})", ci.short_commit_hash, ci.commit_date)?;
}
Ok(())
}
}
/// Returns information about Ruff's version.
pub(crate) fn version() -> VersionInfo {
// Environment variables are only read at compile-time
macro_rules! option_env_str {
($name:expr) => {
option_env!($name).map(|s| s.to_string())
};
}
// This version is pulled from Cargo.toml and set by Cargo
let version = option_env_str!("CARGO_PKG_VERSION").unwrap();
// Commit info is pulled from git and set by `build.rs`
let commit_info = option_env_str!("RUFF_COMMIT_HASH").map(|commit_hash| CommitInfo {
short_commit_hash: option_env_str!("RUFF_COMMIT_SHORT_HASH").unwrap(),
commit_hash,
commit_date: option_env_str!("RUFF_COMMIT_DATE").unwrap(),
last_tag: option_env_str!("RUFF_LAST_TAG"),
commits_since_last_tag: option_env_str!("RUFF_LAST_TAG_DISTANCE")
.as_deref()
.map_or(0, |value| value.parse::<u32>().unwrap_or(0)),
});
VersionInfo {
version,
commit_info,
}
}
#[cfg(test)]
mod tests {
use insta::{assert_json_snapshot, assert_snapshot};
use super::{CommitInfo, VersionInfo};
#[test]
fn version_formatting() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: None,
};
assert_snapshot!(version);
}
#[test]
fn version_formatting_with_commit_info() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: Some(CommitInfo {
short_commit_hash: "53b0f5d92".to_string(),
commit_hash: "53b0f5d924110e5b26fbf09f6fd3a03d67b475b7".to_string(),
last_tag: Some("v0.0.1".to_string()),
commit_date: "2023-10-19".to_string(),
commits_since_last_tag: 0,
}),
};
assert_snapshot!(version);
}
#[test]
fn version_formatting_with_commits_since_last_tag() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: Some(CommitInfo {
short_commit_hash: "53b0f5d92".to_string(),
commit_hash: "53b0f5d924110e5b26fbf09f6fd3a03d67b475b7".to_string(),
last_tag: Some("v0.0.1".to_string()),
commit_date: "2023-10-19".to_string(),
commits_since_last_tag: 24,
}),
};
assert_snapshot!(version);
}
#[test]
fn version_serializable() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: Some(CommitInfo {
short_commit_hash: "53b0f5d92".to_string(),
commit_hash: "53b0f5d924110e5b26fbf09f6fd3a03d67b475b7".to_string(),
last_tag: Some("v0.0.1".to_string()),
commit_date: "2023-10-19".to_string(),
commits_since_last_tag: 0,
}),
};
assert_json_snapshot!(version);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/args.rs | crates/ruff/src/args.rs | use std::cmp::Ordering;
use std::fmt::{Formatter, Write as _};
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::sync::Arc;
use crate::commands::completions::config::{OptionString, OptionStringParser};
use anyhow::bail;
use clap::builder::Styles;
use clap::builder::styling::{AnsiColor, Effects};
use clap::builder::{TypedValueParser, ValueParserFactory};
use clap::{Parser, Subcommand};
use colored::Colorize;
use itertools::Itertools;
use path_absolutize::path_dedot;
use regex::Regex;
use ruff_graph::Direction;
use ruff_linter::line_width::LineLength;
use ruff_linter::logging::LogLevel;
use ruff_linter::registry::Rule;
use ruff_linter::settings::types::{
ExtensionPair, FilePattern, OutputFormat, PatternPrefixPair, PerFileIgnore, PreviewMode,
PythonVersion, UnsafeFixes,
};
use ruff_linter::{RuleParser, RuleSelector, RuleSelectorParser};
use ruff_options_metadata::{OptionEntry, OptionsMetadata};
use ruff_python_ast as ast;
use ruff_source_file::{LineIndex, OneIndexed, PositionEncoding};
use ruff_text_size::TextRange;
use ruff_workspace::configuration::{Configuration, RuleSelection};
use ruff_workspace::options::{Options, PycodestyleOptions};
use ruff_workspace::resolver::ConfigurationTransformer;
use rustc_hash::FxHashMap;
use toml;
/// All configuration options that can be passed "globally",
/// i.e., can be passed to all subcommands
#[derive(Debug, Default, Clone, clap::Args)]
pub struct GlobalConfigArgs {
#[clap(flatten)]
log_level_args: LogLevelArgs,
/// Either a path to a TOML configuration file (`pyproject.toml` or `ruff.toml`),
/// or a TOML `<KEY> = <VALUE>` pair
/// (such as you might find in a `ruff.toml` configuration file)
/// overriding a specific configuration option.
/// Overrides of individual settings using this option always take precedence
/// over all configuration files, including configuration files that were also
/// specified using `--config`.
#[arg(
long,
action = clap::ArgAction::Append,
value_name = "CONFIG_OPTION",
value_parser = ConfigArgumentParser,
global = true,
help_heading = "Global options",
)]
pub config: Vec<SingleConfigArgument>,
/// Ignore all configuration files.
//
// Note: We can't mark this as conflicting with `--config` here
// as `--config` can be used for specifying configuration overrides
// as well as configuration files.
// Specifying a configuration file conflicts with `--isolated`;
// specifying a configuration override does not.
// If a user specifies `ruff check --isolated --config=ruff.toml`,
// we emit an error later on, after the initial parsing by clap.
#[arg(long, help_heading = "Global options", global = true)]
pub isolated: bool,
}
impl GlobalConfigArgs {
pub fn log_level(&self) -> LogLevel {
LogLevel::from(&self.log_level_args)
}
#[must_use]
fn partition(self) -> (LogLevel, Vec<SingleConfigArgument>, bool) {
(self.log_level(), self.config, self.isolated)
}
}
// Configures Clap v3-style help menu colors
const STYLES: Styles = Styles::styled()
.header(AnsiColor::Green.on_default().effects(Effects::BOLD))
.usage(AnsiColor::Green.on_default().effects(Effects::BOLD))
.literal(AnsiColor::Cyan.on_default().effects(Effects::BOLD))
.placeholder(AnsiColor::Cyan.on_default());
#[derive(Debug, Parser)]
#[command(
author,
name = "ruff",
about = "Ruff: An extremely fast Python linter and code formatter.",
after_help = "For help with a specific command, see: `ruff help <command>`."
)]
#[command(version)]
#[command(styles = STYLES)]
pub struct Args {
#[command(subcommand)]
pub(crate) command: Command,
#[clap(flatten)]
pub(crate) global_options: GlobalConfigArgs,
}
#[expect(clippy::large_enum_variant)]
#[derive(Debug, clap::Subcommand)]
pub enum Command {
/// Run Ruff on the given files or directories.
Check(CheckCommand),
/// Explain a rule (or all rules).
#[command(group = clap::ArgGroup::new("selector").multiple(false).required(true))]
Rule {
/// Rule to explain
#[arg(value_parser=RuleParser, group = "selector", hide_possible_values = true)]
rule: Option<Rule>,
/// Explain all rules
#[arg(long, conflicts_with = "rule", group = "selector")]
all: bool,
/// Output format
#[arg(long, value_enum, default_value = "text")]
output_format: HelpFormat,
},
/// List or describe the available configuration options.
Config {
/// Config key to show
#[arg(
value_parser = OptionStringParser,
hide_possible_values = true
)]
option: Option<OptionString>,
/// Output format
#[arg(long, value_enum, default_value = "text")]
output_format: HelpFormat,
},
/// List all supported upstream linters.
Linter {
/// Output format
#[arg(long, value_enum, default_value = "text")]
output_format: HelpFormat,
},
/// Clear any caches in the current directory and any subdirectories.
Clean,
/// Generate shell completion.
#[clap(hide = true)]
GenerateShellCompletion { shell: clap_complete_command::Shell },
/// Run the Ruff formatter on the given files or directories.
Format(FormatCommand),
/// Run the language server.
Server(ServerCommand),
/// Run analysis over Python source code.
#[clap(subcommand)]
Analyze(AnalyzeCommand),
/// Display Ruff's version
Version {
#[arg(long, value_enum, default_value = "text")]
output_format: HelpFormat,
},
}
#[derive(Debug, Subcommand)]
pub enum AnalyzeCommand {
/// Generate a map of Python file dependencies or dependents.
Graph(AnalyzeGraphCommand),
}
#[derive(Clone, Debug, clap::Parser)]
#[expect(clippy::struct_excessive_bools)]
pub struct AnalyzeGraphCommand {
/// List of files or directories to include.
#[clap(help = "List of files or directories to include [default: .]")]
files: Vec<PathBuf>,
/// The direction of the import map. By default, generates a dependency map, i.e., a map from
/// file to files that it depends on. Use `--direction dependents` to generate a map from file
/// to files that depend on it.
#[clap(long, value_enum, default_value_t)]
direction: Direction,
/// Attempt to detect imports from string literals.
#[clap(long)]
detect_string_imports: bool,
/// The minimum number of dots in a string import to consider it a valid import.
#[clap(long)]
min_dots: Option<usize>,
/// Enable preview mode. Use `--no-preview` to disable.
#[arg(long, overrides_with("no_preview"))]
preview: bool,
#[clap(long, overrides_with("preview"), hide = true)]
no_preview: bool,
/// The minimum Python version that should be supported.
#[arg(long, value_enum)]
target_version: Option<PythonVersion>,
/// Path to a virtual environment to use for resolving additional dependencies
#[arg(long)]
python: Option<PathBuf>,
/// Include imports that are only used for type checking (i.e., imports within `if TYPE_CHECKING:` blocks).
/// Use `--no-type-checking-imports` to exclude imports that are only used for type checking.
#[arg(long, overrides_with("no_type_checking_imports"))]
type_checking_imports: bool,
#[arg(long, overrides_with("type_checking_imports"), hide = true)]
no_type_checking_imports: bool,
}
// The `Parser` derive is for ruff_dev, for ruff `Args` would be sufficient
#[derive(Clone, Debug, clap::Parser)]
#[expect(clippy::struct_excessive_bools)]
pub struct CheckCommand {
/// List of files or directories to check.
#[clap(help = "List of files or directories to check [default: .]")]
pub files: Vec<PathBuf>,
/// Apply fixes to resolve lint violations.
/// Use `--no-fix` to disable or `--unsafe-fixes` to include unsafe fixes.
#[arg(long, overrides_with("no_fix"))]
fix: bool,
#[clap(long, overrides_with("fix"), hide = true)]
no_fix: bool,
/// Include fixes that may not retain the original intent of the code.
/// Use `--no-unsafe-fixes` to disable.
#[arg(long, overrides_with("no_unsafe_fixes"))]
unsafe_fixes: bool,
#[arg(long, overrides_with("unsafe_fixes"), hide = true)]
no_unsafe_fixes: bool,
/// Show an enumeration of all fixed lint violations.
/// Use `--no-show-fixes` to disable.
#[arg(long, overrides_with("no_show_fixes"))]
show_fixes: bool,
#[clap(long, overrides_with("show_fixes"), hide = true)]
no_show_fixes: bool,
/// Avoid writing any fixed files back; instead, output a diff for each changed file to stdout, and exit 0 if there are no diffs.
/// Implies `--fix-only`.
#[arg(long, conflicts_with = "show_fixes")]
pub diff: bool,
/// Run in watch mode by re-running whenever files change.
#[arg(short, long)]
pub watch: bool,
/// Apply fixes to resolve lint violations, but don't report on, or exit non-zero for, leftover violations. Implies `--fix`.
/// Use `--no-fix-only` to disable or `--unsafe-fixes` to include unsafe fixes.
#[arg(long, overrides_with("no_fix_only"))]
fix_only: bool,
#[clap(long, overrides_with("fix_only"), hide = true)]
no_fix_only: bool,
/// Ignore any `# noqa` comments.
#[arg(long)]
ignore_noqa: bool,
/// Output serialization format for violations.
/// The default serialization format is "full".
#[arg(long, value_enum, env = "RUFF_OUTPUT_FORMAT")]
pub output_format: Option<OutputFormat>,
/// Specify file to write the linter output to (default: stdout).
#[arg(short, long, env = "RUFF_OUTPUT_FILE")]
pub output_file: Option<PathBuf>,
/// The minimum Python version that should be supported.
#[arg(long, value_enum)]
pub target_version: Option<PythonVersion>,
/// Enable preview mode; checks will include unstable rules and fixes.
/// Use `--no-preview` to disable.
#[arg(long, overrides_with("no_preview"))]
preview: bool,
#[clap(long, overrides_with("preview"), hide = true)]
no_preview: bool,
/// Comma-separated list of rule codes to enable (or ALL, to enable all rules).
#[arg(
long,
value_delimiter = ',',
value_name = "RULE_CODE",
value_parser = RuleSelectorParser,
help_heading = "Rule selection",
hide_possible_values = true
)]
pub select: Option<Vec<RuleSelector>>,
/// Comma-separated list of rule codes to disable.
#[arg(
long,
value_delimiter = ',',
value_name = "RULE_CODE",
value_parser = RuleSelectorParser,
help_heading = "Rule selection",
hide_possible_values = true
)]
pub ignore: Option<Vec<RuleSelector>>,
/// Like --select, but adds additional rule codes on top of those already specified.
#[arg(
long,
value_delimiter = ',',
value_name = "RULE_CODE",
value_parser = RuleSelectorParser,
help_heading = "Rule selection",
hide_possible_values = true
)]
pub extend_select: Option<Vec<RuleSelector>>,
/// Like --ignore. (Deprecated: You can just use --ignore instead.)
#[arg(
long,
value_delimiter = ',',
value_name = "RULE_CODE",
value_parser = RuleSelectorParser,
help_heading = "Rule selection",
hide = true
)]
pub extend_ignore: Option<Vec<RuleSelector>>,
/// List of mappings from file pattern to code to exclude.
#[arg(long, value_delimiter = ',', help_heading = "Rule selection")]
pub per_file_ignores: Option<Vec<PatternPrefixPair>>,
/// Like `--per-file-ignores`, but adds additional ignores on top of those already specified.
#[arg(long, value_delimiter = ',', help_heading = "Rule selection")]
pub extend_per_file_ignores: Option<Vec<PatternPrefixPair>>,
/// List of paths, used to omit files and/or directories from analysis.
#[arg(
long,
value_delimiter = ',',
value_name = "FILE_PATTERN",
help_heading = "File selection"
)]
pub exclude: Option<Vec<FilePattern>>,
/// Like --exclude, but adds additional files and directories on top of those already excluded.
#[arg(
long,
value_delimiter = ',',
value_name = "FILE_PATTERN",
help_heading = "File selection"
)]
pub extend_exclude: Option<Vec<FilePattern>>,
/// List of rule codes to treat as eligible for fix. Only applicable when fix itself is enabled (e.g., via `--fix`).
#[arg(
long,
value_delimiter = ',',
value_name = "RULE_CODE",
value_parser = RuleSelectorParser,
help_heading = "Rule selection",
hide_possible_values = true
)]
pub fixable: Option<Vec<RuleSelector>>,
/// List of rule codes to treat as ineligible for fix. Only applicable when fix itself is enabled (e.g., via `--fix`).
#[arg(
long,
value_delimiter = ',',
value_name = "RULE_CODE",
value_parser = RuleSelectorParser,
help_heading = "Rule selection",
hide_possible_values = true
)]
pub unfixable: Option<Vec<RuleSelector>>,
/// Like --fixable, but adds additional rule codes on top of those already specified.
#[arg(
long,
value_delimiter = ',',
value_name = "RULE_CODE",
value_parser = RuleSelectorParser,
help_heading = "Rule selection",
hide_possible_values = true
)]
pub extend_fixable: Option<Vec<RuleSelector>>,
/// Like --unfixable. (Deprecated: You can just use --unfixable instead.)
#[arg(
long,
value_delimiter = ',',
value_name = "RULE_CODE",
value_parser = RuleSelectorParser,
help_heading = "Rule selection",
hide = true
)]
pub extend_unfixable: Option<Vec<RuleSelector>>,
/// Respect file exclusions via `.gitignore` and other standard ignore files.
/// Use `--no-respect-gitignore` to disable.
#[arg(
long,
overrides_with("no_respect_gitignore"),
help_heading = "File selection"
)]
respect_gitignore: bool,
#[clap(long, overrides_with("respect_gitignore"), hide = true)]
no_respect_gitignore: bool,
/// Enforce exclusions, even for paths passed to Ruff directly on the command-line.
/// Use `--no-force-exclude` to disable.
#[arg(
long,
overrides_with("no_force_exclude"),
help_heading = "File selection"
)]
force_exclude: bool,
#[clap(long, overrides_with("force_exclude"), hide = true)]
no_force_exclude: bool,
/// Set the line-length for length-associated rules and automatic formatting.
#[arg(long, help_heading = "Rule configuration", hide = true)]
pub line_length: Option<LineLength>,
/// Regular expression matching the name of dummy variables.
#[arg(long, help_heading = "Rule configuration", hide = true)]
pub dummy_variable_rgx: Option<Regex>,
/// Disable cache reads.
#[arg(short, long, env = "RUFF_NO_CACHE", help_heading = "Miscellaneous")]
pub no_cache: bool,
/// Path to the cache directory.
#[arg(long, env = "RUFF_CACHE_DIR", help_heading = "Miscellaneous")]
pub cache_dir: Option<PathBuf>,
/// The name of the file when passing it through stdin.
#[arg(long, help_heading = "Miscellaneous")]
pub stdin_filename: Option<PathBuf>,
/// List of mappings from file extension to language (one of `python`, `ipynb`, `pyi`). For
/// example, to treat `.ipy` files as IPython notebooks, use `--extension ipy:ipynb`.
#[arg(long, value_delimiter = ',')]
pub extension: Option<Vec<ExtensionPair>>,
/// Exit with status code "0", even upon detecting lint violations.
#[arg(
short,
long,
help_heading = "Miscellaneous",
conflicts_with = "exit_non_zero_on_fix"
)]
pub exit_zero: bool,
/// Exit with a non-zero status code if any files were modified via fix, even if no lint violations remain.
#[arg(long, help_heading = "Miscellaneous", conflicts_with = "exit_zero")]
pub exit_non_zero_on_fix: bool,
/// Show counts for every rule with at least one violation.
#[arg(
long,
// Unsupported default-command arguments.
conflicts_with = "diff",
conflicts_with = "watch",
)]
pub statistics: bool,
/// Enable automatic additions of `noqa` directives to failing lines.
/// Optionally provide a reason to append after the codes.
#[arg(
long,
value_name = "REASON",
default_missing_value = "",
num_args = 0..=1,
require_equals = true,
// conflicts_with = "add_noqa",
conflicts_with = "show_files",
conflicts_with = "show_settings",
// Unsupported default-command arguments.
conflicts_with = "ignore_noqa",
conflicts_with = "statistics",
conflicts_with = "stdin_filename",
conflicts_with = "watch",
conflicts_with = "fix",
conflicts_with = "diff",
)]
pub add_noqa: Option<String>,
/// See the files Ruff will be run against with the current settings.
#[arg(
long,
// Fake subcommands.
conflicts_with = "add_noqa",
// conflicts_with = "show_files",
conflicts_with = "show_settings",
// Unsupported default-command arguments.
conflicts_with = "ignore_noqa",
conflicts_with = "statistics",
conflicts_with = "stdin_filename",
conflicts_with = "watch",
)]
pub show_files: bool,
/// See the settings Ruff will use to lint a given Python file.
#[arg(
long,
// Fake subcommands.
conflicts_with = "add_noqa",
conflicts_with = "show_files",
// conflicts_with = "show_settings",
// Unsupported default-command arguments.
conflicts_with = "ignore_noqa",
conflicts_with = "statistics",
conflicts_with = "stdin_filename",
conflicts_with = "watch",
)]
pub show_settings: bool,
}
#[derive(Clone, Debug, clap::Parser)]
#[expect(clippy::struct_excessive_bools)]
pub struct FormatCommand {
/// List of files or directories to format.
#[clap(help = "List of files or directories to format [default: .]")]
pub files: Vec<PathBuf>,
/// Avoid writing any formatted files back; instead, exit with a non-zero status code if any
/// files would have been modified, and zero otherwise.
#[arg(long)]
pub check: bool,
/// Avoid writing any formatted files back; instead, exit with a non-zero status code and the
/// difference between the current file and how the formatted file would look like.
#[arg(long)]
pub diff: bool,
/// Disable cache reads.
#[arg(short, long, env = "RUFF_NO_CACHE", help_heading = "Miscellaneous")]
pub no_cache: bool,
/// Path to the cache directory.
#[arg(long, env = "RUFF_CACHE_DIR", help_heading = "Miscellaneous")]
pub cache_dir: Option<PathBuf>,
/// Respect file exclusions via `.gitignore` and other standard ignore files.
/// Use `--no-respect-gitignore` to disable.
#[arg(
long,
overrides_with("no_respect_gitignore"),
help_heading = "File selection"
)]
respect_gitignore: bool,
#[clap(long, overrides_with("respect_gitignore"), hide = true)]
no_respect_gitignore: bool,
/// List of paths, used to omit files and/or directories from analysis.
#[arg(
long,
value_delimiter = ',',
value_name = "FILE_PATTERN",
help_heading = "File selection"
)]
pub exclude: Option<Vec<FilePattern>>,
/// Enforce exclusions, even for paths passed to Ruff directly on the command-line.
/// Use `--no-force-exclude` to disable.
#[arg(
long,
overrides_with("no_force_exclude"),
help_heading = "File selection"
)]
force_exclude: bool,
#[clap(long, overrides_with("force_exclude"), hide = true)]
no_force_exclude: bool,
/// Set the line-length.
#[arg(long, help_heading = "Format configuration")]
pub line_length: Option<LineLength>,
/// The name of the file when passing it through stdin.
#[arg(long, help_heading = "Miscellaneous")]
pub stdin_filename: Option<PathBuf>,
/// List of mappings from file extension to language (one of `python`, `ipynb`, `pyi`). For
/// example, to treat `.ipy` files as IPython notebooks, use `--extension ipy:ipynb`.
#[arg(long, value_delimiter = ',')]
pub extension: Option<Vec<ExtensionPair>>,
/// The minimum Python version that should be supported.
#[arg(long, value_enum)]
pub target_version: Option<PythonVersion>,
/// Enable preview mode; enables unstable formatting.
/// Use `--no-preview` to disable.
#[arg(long, overrides_with("no_preview"))]
preview: bool,
#[clap(long, overrides_with("preview"), hide = true)]
no_preview: bool,
/// When specified, Ruff will try to only format the code in the given range.
/// It might be necessary to extend the start backwards or the end forwards, to fully enclose a logical line.
/// The `<RANGE>` uses the format `<start_line>:<start_column>-<end_line>:<end_column>`.
///
/// - The line and column numbers are 1 based.
/// - The column specifies the nth-unicode codepoint on that line.
/// - The end offset is exclusive.
/// - The column numbers are optional. You can write `--range=1-2` instead of `--range=1:1-2:1`.
/// - The end position is optional. You can write `--range=2` to format the entire document starting from the second line.
/// - The start position is optional. You can write `--range=-3` to format the first three lines of the document.
///
/// The option can only be used when formatting a single file. Range formatting of notebooks is unsupported.
#[clap(long, help_heading = "Editor options", verbatim_doc_comment)]
pub range: Option<FormatRange>,
/// Exit with a non-zero status code if any files were modified via format, even if all files were formatted successfully.
#[arg(long, help_heading = "Miscellaneous", alias = "exit-non-zero-on-fix")]
pub exit_non_zero_on_format: bool,
/// Output serialization format for violations, when used with `--check`.
/// The default serialization format is "full".
///
/// Note that this option is currently only respected in preview mode. A warning will be emitted
/// if this flag is used on stable.
#[arg(long, value_enum, env = "RUFF_OUTPUT_FORMAT")]
pub output_format: Option<OutputFormat>,
}
#[derive(Copy, Clone, Debug, clap::Parser)]
pub struct ServerCommand {
/// Enable preview mode. Use `--no-preview` to disable.
///
/// This enables unstable server features and turns on the preview mode for the linter
/// and the formatter.
#[arg(long, overrides_with("no_preview"))]
preview: bool,
#[clap(long, overrides_with("preview"), hide = true)]
no_preview: bool,
}
impl ServerCommand {
pub(crate) fn resolve_preview(self) -> Option<bool> {
resolve_bool_arg(self.preview, self.no_preview)
}
}
#[derive(Debug, Clone, Copy, clap::ValueEnum)]
pub enum HelpFormat {
Text,
Json,
}
#[expect(clippy::module_name_repetitions)]
#[derive(Debug, Default, Clone, clap::Args)]
pub struct LogLevelArgs {
/// Enable verbose logging.
#[arg(
short,
long,
global = true,
group = "verbosity",
help_heading = "Log levels"
)]
pub verbose: bool,
/// Print diagnostics, but nothing else.
#[arg(
short,
long,
global = true,
group = "verbosity",
help_heading = "Log levels"
)]
pub quiet: bool,
/// Disable all logging (but still exit with status code "1" upon detecting diagnostics).
#[arg(
short,
long,
global = true,
group = "verbosity",
help_heading = "Log levels"
)]
pub silent: bool,
}
impl From<&LogLevelArgs> for LogLevel {
fn from(args: &LogLevelArgs) -> Self {
if args.silent {
Self::Silent
} else if args.quiet {
Self::Quiet
} else if args.verbose {
Self::Verbose
} else {
Self::Default
}
}
}
/// Configuration-related arguments passed via the CLI.
#[derive(Default)]
pub struct ConfigArguments {
/// Whether the user specified --isolated on the command line
pub(crate) isolated: bool,
/// The logging level to be used, derived from command-line arguments passed
pub(crate) log_level: LogLevel,
/// Path to a pyproject.toml or ruff.toml configuration file (etc.).
/// Either 0 or 1 configuration file paths may be provided on the command line.
config_file: Option<PathBuf>,
/// Overrides provided via the `--config "KEY=VALUE"` option.
/// An arbitrary number of these overrides may be provided on the command line.
/// These overrides take precedence over all configuration files,
/// even configuration files that were also specified using `--config`.
overrides: Configuration,
/// Overrides provided via dedicated flags such as `--line-length` etc.
/// These overrides take precedence over all configuration files,
/// and also over all overrides specified using any `--config "KEY=VALUE"` flags.
per_flag_overrides: ExplicitConfigOverrides,
}
impl ConfigArguments {
pub fn config_file(&self) -> Option<&Path> {
self.config_file.as_deref()
}
fn from_cli_arguments(
global_options: GlobalConfigArgs,
per_flag_overrides: ExplicitConfigOverrides,
) -> anyhow::Result<Self> {
let (log_level, config_options, isolated) = global_options.partition();
let mut config_file: Option<PathBuf> = None;
let mut overrides = Configuration::default();
for option in config_options {
match option {
SingleConfigArgument::SettingsOverride(overridden_option) => {
let overridden_option = Arc::try_unwrap(overridden_option)
.unwrap_or_else(|option| option.deref().clone());
overrides = overrides.combine(Configuration::from_options(
overridden_option,
None,
&path_dedot::CWD,
)?);
}
SingleConfigArgument::FilePath(path) => {
if isolated {
bail!(
"\
The argument `--config={}` cannot be used with `--isolated`
tip: You cannot specify a configuration file and also specify `--isolated`,
as `--isolated` causes ruff to ignore all configuration files.
For more information, try `--help`.
",
path.display()
);
}
if let Some(ref config_file) = config_file {
let (first, second) = (config_file.display(), path.display());
bail!(
"\
You cannot specify more than one configuration file on the command line.
tip: remove either `--config={first}` or `--config={second}`.
For more information, try `--help`.
"
);
}
config_file = Some(path);
}
}
}
Ok(Self {
isolated,
log_level,
config_file,
overrides,
per_flag_overrides,
})
}
}
impl ConfigurationTransformer for ConfigArguments {
fn transform(&self, config: Configuration) -> Configuration {
let with_config_overrides = self.overrides.clone().combine(config);
self.per_flag_overrides.transform(with_config_overrides)
}
}
impl CheckCommand {
/// Partition the CLI into command-line arguments and configuration
/// overrides.
pub fn partition(
self,
global_options: GlobalConfigArgs,
) -> anyhow::Result<(CheckArguments, ConfigArguments)> {
let check_arguments = CheckArguments {
add_noqa: self.add_noqa,
diff: self.diff,
exit_non_zero_on_fix: self.exit_non_zero_on_fix,
exit_zero: self.exit_zero,
files: self.files,
ignore_noqa: self.ignore_noqa,
no_cache: self.no_cache,
output_file: self.output_file,
show_files: self.show_files,
show_settings: self.show_settings,
statistics: self.statistics,
stdin_filename: self.stdin_filename,
watch: self.watch,
};
let cli_overrides = ExplicitConfigOverrides {
dummy_variable_rgx: self.dummy_variable_rgx,
exclude: self.exclude,
extend_exclude: self.extend_exclude,
extend_fixable: self.extend_fixable,
extend_ignore: self.extend_ignore,
extend_per_file_ignores: self.extend_per_file_ignores,
extend_select: self.extend_select,
extend_unfixable: self.extend_unfixable,
fixable: self.fixable,
ignore: self.ignore,
line_length: self.line_length,
per_file_ignores: self.per_file_ignores,
preview: resolve_bool_arg(self.preview, self.no_preview).map(PreviewMode::from),
respect_gitignore: resolve_bool_arg(self.respect_gitignore, self.no_respect_gitignore),
select: self.select,
target_version: self.target_version.map(ast::PythonVersion::from),
unfixable: self.unfixable,
// TODO(charlie): Included in `pyproject.toml`, but not inherited.
cache_dir: self.cache_dir,
fix: resolve_bool_arg(self.fix, self.no_fix),
fix_only: resolve_bool_arg(self.fix_only, self.no_fix_only),
unsafe_fixes: resolve_bool_arg(self.unsafe_fixes, self.no_unsafe_fixes)
.map(UnsafeFixes::from),
force_exclude: resolve_bool_arg(self.force_exclude, self.no_force_exclude),
output_format: self.output_format,
show_fixes: resolve_bool_arg(self.show_fixes, self.no_show_fixes),
extension: self.extension,
..ExplicitConfigOverrides::default()
};
let config_args = ConfigArguments::from_cli_arguments(global_options, cli_overrides)?;
Ok((check_arguments, config_args))
}
}
impl FormatCommand {
/// Partition the CLI into command-line arguments and configuration
/// overrides.
pub fn partition(
self,
global_options: GlobalConfigArgs,
) -> anyhow::Result<(FormatArguments, ConfigArguments)> {
let format_arguments = FormatArguments {
check: self.check,
diff: self.diff,
files: self.files,
no_cache: self.no_cache,
stdin_filename: self.stdin_filename,
range: self.range,
exit_non_zero_on_format: self.exit_non_zero_on_format,
};
let cli_overrides = ExplicitConfigOverrides {
line_length: self.line_length,
respect_gitignore: resolve_bool_arg(self.respect_gitignore, self.no_respect_gitignore),
exclude: self.exclude,
preview: resolve_bool_arg(self.preview, self.no_preview).map(PreviewMode::from),
force_exclude: resolve_bool_arg(self.force_exclude, self.no_force_exclude),
target_version: self.target_version.map(ast::PythonVersion::from),
cache_dir: self.cache_dir,
extension: self.extension,
output_format: self.output_format,
..ExplicitConfigOverrides::default()
};
let config_args = ConfigArguments::from_cli_arguments(global_options, cli_overrides)?;
Ok((format_arguments, config_args))
}
}
impl AnalyzeGraphCommand {
/// Partition the CLI into command-line arguments and configuration
/// overrides.
pub fn partition(
self,
global_options: GlobalConfigArgs,
) -> anyhow::Result<(AnalyzeGraphArgs, ConfigArguments)> {
let format_arguments = AnalyzeGraphArgs {
files: self.files,
direction: self.direction,
python: self.python,
};
let cli_overrides = ExplicitConfigOverrides {
detect_string_imports: if self.detect_string_imports {
Some(true)
} else {
None
},
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/printer.rs | crates/ruff/src/printer.rs | use std::cmp::Reverse;
use std::hash::Hash;
use std::io::Write;
use anyhow::Result;
use bitflags::bitflags;
use colored::Colorize;
use itertools::{Itertools, iterate};
use ruff_linter::linter::FixTable;
use serde::Serialize;
use ruff_db::diagnostic::{Diagnostic, DisplayDiagnosticConfig, SecondaryCode};
use ruff_linter::fs::relativize_path;
use ruff_linter::logging::LogLevel;
use ruff_linter::message::{EmitterContext, render_diagnostics};
use ruff_linter::notify_user;
use ruff_linter::settings::flags::{self};
use ruff_linter::settings::types::{OutputFormat, UnsafeFixes};
use crate::diagnostics::{Diagnostics, FixMap};
bitflags! {
#[derive(Default, Debug, Copy, Clone)]
pub(crate) struct Flags: u8 {
/// Whether to show violations when emitting diagnostics.
const SHOW_VIOLATIONS = 1 << 0;
/// Whether to show a summary of the fixed violations when emitting diagnostics.
const SHOW_FIX_SUMMARY = 1 << 1;
}
}
#[derive(Serialize)]
struct ExpandedStatistics<'a> {
code: Option<&'a SecondaryCode>,
name: &'static str,
count: usize,
#[serde(rename = "fixable")]
all_fixable: bool,
fixable_count: usize,
}
impl ExpandedStatistics<'_> {
fn any_fixable(&self) -> bool {
self.fixable_count > 0
}
}
/// Accumulator type for grouping diagnostics by code.
/// Format: (`code`, `representative_diagnostic`, `total_count`, `fixable_count`)
type DiagnosticGroup<'a> = (Option<&'a SecondaryCode>, &'a Diagnostic, usize, usize);
pub(crate) struct Printer {
format: OutputFormat,
log_level: LogLevel,
fix_mode: flags::FixMode,
unsafe_fixes: UnsafeFixes,
flags: Flags,
}
impl Printer {
pub(crate) const fn new(
format: OutputFormat,
log_level: LogLevel,
fix_mode: flags::FixMode,
unsafe_fixes: UnsafeFixes,
flags: Flags,
) -> Self {
Self {
format,
log_level,
fix_mode,
unsafe_fixes,
flags,
}
}
pub(crate) fn write_to_user(&self, message: &str) {
if self.log_level >= LogLevel::Default {
notify_user!("{}", message);
}
}
fn write_summary_text(&self, writer: &mut dyn Write, diagnostics: &Diagnostics) -> Result<()> {
if self.log_level >= LogLevel::Default {
let fixables = FixableStatistics::try_from(diagnostics, self.unsafe_fixes);
let fixed = diagnostics
.fixed
.values()
.flat_map(FixTable::counts)
.sum::<usize>();
if self.flags.intersects(Flags::SHOW_VIOLATIONS) {
let remaining = diagnostics.inner.len();
let total = fixed + remaining;
if fixed > 0 {
let s = if total == 1 { "" } else { "s" };
writeln!(
writer,
"Found {total} error{s} ({fixed} fixed, {remaining} remaining)."
)?;
} else if remaining > 0 {
let s = if remaining == 1 { "" } else { "s" };
writeln!(writer, "Found {remaining} error{s}.")?;
} else if remaining == 0 {
writeln!(writer, "All checks passed!")?;
}
if let Some(fixables) = fixables {
let fix_prefix = format!("[{}]", "*".cyan());
if self.unsafe_fixes.is_hint() {
if fixables.applicable > 0 && fixables.inapplicable_unsafe > 0 {
let es = if fixables.inapplicable_unsafe == 1 {
""
} else {
"es"
};
writeln!(
writer,
"{fix_prefix} {} fixable with the `--fix` option ({} hidden fix{es} can be enabled with the `--unsafe-fixes` option).",
fixables.applicable, fixables.inapplicable_unsafe
)?;
} else if fixables.applicable > 0 {
// Only applicable fixes
writeln!(
writer,
"{fix_prefix} {} fixable with the `--fix` option.",
fixables.applicable,
)?;
} else {
// Only inapplicable fixes
let es = if fixables.inapplicable_unsafe == 1 {
""
} else {
"es"
};
writeln!(
writer,
"No fixes available ({} hidden fix{es} can be enabled with the `--unsafe-fixes` option).",
fixables.inapplicable_unsafe
)?;
}
} else {
if fixables.applicable > 0 {
writeln!(
writer,
"{fix_prefix} {} fixable with the `--fix` option.",
fixables.applicable
)?;
}
}
}
} else {
// Check if there are unapplied fixes
let unapplied = {
if let Some(fixables) = fixables {
fixables.inapplicable_unsafe
} else {
0
}
};
if unapplied > 0 {
let es = if unapplied == 1 { "" } else { "es" };
if fixed > 0 {
let s = if fixed == 1 { "" } else { "s" };
if self.fix_mode.is_apply() {
writeln!(
writer,
"Fixed {fixed} error{s} ({unapplied} additional fix{es} available with `--unsafe-fixes`)."
)?;
} else {
writeln!(
writer,
"Would fix {fixed} error{s} ({unapplied} additional fix{es} available with `--unsafe-fixes`)."
)?;
}
} else {
if self.fix_mode.is_apply() {
writeln!(
writer,
"No errors fixed ({unapplied} fix{es} available with `--unsafe-fixes`)."
)?;
} else {
writeln!(
writer,
"No errors would be fixed ({unapplied} fix{es} available with `--unsafe-fixes`)."
)?;
}
}
} else {
if fixed > 0 {
let s = if fixed == 1 { "" } else { "s" };
if self.fix_mode.is_apply() {
writeln!(writer, "Fixed {fixed} error{s}.")?;
} else {
writeln!(writer, "Would fix {fixed} error{s}.")?;
}
}
}
}
}
Ok(())
}
pub(crate) fn write_once(
&self,
diagnostics: &Diagnostics,
writer: &mut dyn Write,
preview: bool,
) -> Result<()> {
if matches!(self.log_level, LogLevel::Silent) {
return Ok(());
}
if !self.flags.intersects(Flags::SHOW_VIOLATIONS) {
if matches!(
self.format,
OutputFormat::Full | OutputFormat::Concise | OutputFormat::Grouped
) {
if self.flags.intersects(Flags::SHOW_FIX_SUMMARY) {
if !diagnostics.fixed.is_empty() {
writeln!(writer)?;
print_fix_summary(writer, &diagnostics.fixed)?;
writeln!(writer)?;
}
}
self.write_summary_text(writer, diagnostics)?;
}
return Ok(());
}
let context = EmitterContext::new(&diagnostics.notebook_indexes);
let fixables = FixableStatistics::try_from(diagnostics, self.unsafe_fixes);
let config = DisplayDiagnosticConfig::default()
.preview(preview)
.hide_severity(true)
.color(!cfg!(test) && colored::control::SHOULD_COLORIZE.should_colorize())
.with_show_fix_status(show_fix_status(self.fix_mode, fixables.as_ref()))
.with_fix_applicability(self.unsafe_fixes.required_applicability())
.show_fix_diff(preview);
render_diagnostics(writer, self.format, config, &context, &diagnostics.inner)?;
if matches!(
self.format,
OutputFormat::Full | OutputFormat::Concise | OutputFormat::Grouped
) {
if self.flags.intersects(Flags::SHOW_FIX_SUMMARY) {
if !diagnostics.fixed.is_empty() {
writeln!(writer)?;
print_fix_summary(writer, &diagnostics.fixed)?;
writeln!(writer)?;
}
}
self.write_summary_text(writer, diagnostics)?;
}
writer.flush()?;
Ok(())
}
pub(crate) fn write_statistics(
&self,
diagnostics: &Diagnostics,
writer: &mut dyn Write,
) -> Result<()> {
let required_applicability = self.unsafe_fixes.required_applicability();
let statistics: Vec<ExpandedStatistics> = diagnostics
.inner
.iter()
.sorted_by_key(|diagnostic| diagnostic.secondary_code())
.fold(vec![], |mut acc: Vec<DiagnosticGroup>, diagnostic| {
let is_fixable = diagnostic
.fix()
.is_some_and(|fix| fix.applies(required_applicability));
let code = diagnostic.secondary_code();
if let Some((prev_code, _prev_message, count, fixable_count)) = acc.last_mut() {
if *prev_code == code {
*count += 1;
if is_fixable {
*fixable_count += 1;
}
return acc;
}
}
acc.push((code, diagnostic, 1, usize::from(is_fixable)));
acc
})
.iter()
.map(
|&(code, message, count, fixable_count)| ExpandedStatistics {
code,
name: message.name(),
count,
// Backward compatibility: `fixable` is true only when all violations are fixable.
// See: https://github.com/astral-sh/ruff/pull/21513
all_fixable: fixable_count == count,
fixable_count,
},
)
.sorted_by_key(|statistic| Reverse(statistic.count))
.collect();
if statistics.is_empty() {
return Ok(());
}
match self.format {
OutputFormat::Full | OutputFormat::Concise => {
// Compute the maximum number of digits in the count and code, for all messages,
// to enable pretty-printing.
let count_width = num_digits(
statistics
.iter()
.map(|statistic| statistic.count)
.max()
.unwrap(),
);
let code_width = statistics
.iter()
.map(|statistic| statistic.code.map_or(0, |s| s.len()))
.max()
.unwrap();
let any_fixable = statistics.iter().any(ExpandedStatistics::any_fixable);
let all_fixable = format!("[{}] ", "*".cyan());
let partially_fixable = format!("[{}] ", "-".cyan());
let unfixable = "[ ] ";
// By default, we mimic Flake8's `--statistics` format.
for statistic in &statistics {
writeln!(
writer,
"{:>count_width$}\t{:<code_width$}\t{}{}",
statistic.count.to_string().bold(),
statistic
.code
.map(SecondaryCode::as_str)
.unwrap_or_default()
.red()
.bold(),
if any_fixable {
if statistic.all_fixable {
&all_fixable
} else if statistic.any_fixable() {
&partially_fixable
} else {
unfixable
}
} else {
""
},
statistic.name,
)?;
}
self.write_summary_text(writer, diagnostics)?;
return Ok(());
}
OutputFormat::Json => {
writeln!(writer, "{}", serde_json::to_string_pretty(&statistics)?)?;
}
_ => {
anyhow::bail!(
"Unsupported serialization format for statistics: {:?}",
self.format
)
}
}
writer.flush()?;
Ok(())
}
pub(crate) fn write_continuously(
&self,
writer: &mut dyn Write,
diagnostics: &Diagnostics,
preview: bool,
) -> Result<()> {
if matches!(self.log_level, LogLevel::Silent) {
return Ok(());
}
if self.log_level >= LogLevel::Default {
let s = if diagnostics.inner.len() == 1 {
""
} else {
"s"
};
notify_user!(
"Found {} error{s}. Watching for file changes.",
diagnostics.inner.len()
);
}
let fixables = FixableStatistics::try_from(diagnostics, self.unsafe_fixes);
if !diagnostics.inner.is_empty() {
if self.log_level >= LogLevel::Default {
writeln!(writer)?;
}
let context = EmitterContext::new(&diagnostics.notebook_indexes);
let format = if preview {
self.format
} else {
OutputFormat::Concise
};
let config = DisplayDiagnosticConfig::default()
.preview(preview)
.hide_severity(true)
.color(!cfg!(test) && colored::control::SHOULD_COLORIZE.should_colorize())
.with_show_fix_status(show_fix_status(self.fix_mode, fixables.as_ref()))
.with_fix_applicability(self.unsafe_fixes.required_applicability())
.show_fix_diff(preview);
render_diagnostics(writer, format, config, &context, &diagnostics.inner)?;
}
writer.flush()?;
Ok(())
}
pub(crate) fn clear_screen() -> Result<()> {
#[cfg(not(target_family = "wasm"))]
clearscreen::clear()?;
Ok(())
}
}
fn num_digits(n: usize) -> usize {
iterate(n, |&n| n / 10)
.take_while(|&n| n > 0)
.count()
.max(1)
}
/// Return `true` if the [`Printer`] should indicate that a rule is fixable.
fn show_fix_status(fix_mode: flags::FixMode, fixables: Option<&FixableStatistics>) -> bool {
// If we're in application mode, avoid indicating that a rule is fixable.
// If the specific violation were truly fixable, it would've been fixed in
// this pass! (We're occasionally unable to determine whether a specific
// violation is fixable without trying to fix it, so if fix is not
// enabled, we may inadvertently indicate that a rule is fixable.)
(!fix_mode.is_apply()) && fixables.is_some_and(FixableStatistics::any_applicable_fixes)
}
fn print_fix_summary(writer: &mut dyn Write, fixed: &FixMap) -> Result<()> {
let total = fixed
.values()
.map(|table| table.counts().sum::<usize>())
.sum::<usize>();
assert!(total > 0);
let num_digits = num_digits(
fixed
.values()
.filter_map(|table| table.counts().max())
.max()
.unwrap(),
);
let s = if total == 1 { "" } else { "s" };
let label = format!("Fixed {total} error{s}:");
writeln!(writer, "{}", label.bold().green())?;
for (filename, table) in fixed
.iter()
.sorted_by_key(|(filename, ..)| filename.as_str())
{
writeln!(
writer,
"{} {}{}",
"-".cyan(),
relativize_path(filename).bold(),
":".cyan()
)?;
for (code, name, count) in table.iter().sorted_by_key(|(.., count)| Reverse(*count)) {
writeln!(
writer,
" {count:>num_digits$} × {code} ({name})",
code = code.to_string().red().bold(),
)?;
}
}
Ok(())
}
/// Statistics for [applicable][ruff_diagnostics::Applicability] fixes.
#[derive(Debug)]
struct FixableStatistics {
applicable: u32,
inapplicable_unsafe: u32,
}
impl FixableStatistics {
fn try_from(diagnostics: &Diagnostics, unsafe_fixes: UnsafeFixes) -> Option<Self> {
let mut applicable = 0;
let mut inapplicable_unsafe = 0;
for message in &diagnostics.inner {
if let Some(fix) = message.fix() {
if fix.applies(unsafe_fixes.required_applicability()) {
applicable += 1;
} else {
// Do not include inapplicable fixes at other levels that do not provide an opt-in
if fix.applicability().is_unsafe() {
inapplicable_unsafe += 1;
}
}
}
}
if applicable == 0 && inapplicable_unsafe == 0 {
None
} else {
Some(Self {
applicable,
inapplicable_unsafe,
})
}
}
fn any_applicable_fixes(&self) -> bool {
self.applicable > 0
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/resolve.rs | crates/ruff/src/resolve.rs | use std::path::Path;
use anyhow::{Result, bail};
use log::debug;
use path_absolutize::path_dedot;
use ruff_workspace::configuration::Configuration;
use ruff_workspace::pyproject::{self, find_fallback_target_version};
use ruff_workspace::resolver::{
ConfigurationOrigin, ConfigurationTransformer, PyprojectConfig, PyprojectDiscoveryStrategy,
resolve_root_settings,
};
use ruff_python_ast as ast;
use crate::args::ConfigArguments;
/// Resolve the relevant settings strategy and defaults for the current
/// invocation.
pub fn resolve(
config_arguments: &ConfigArguments,
stdin_filename: Option<&Path>,
) -> Result<PyprojectConfig> {
let Ok(cwd) = std::env::current_dir() else {
bail!("Working directory does not exist")
};
// First priority: if we're running in isolated mode, use the default settings.
if config_arguments.isolated {
let config = config_arguments.transform(Configuration::default());
let settings = config.into_settings(&cwd)?;
debug!("Isolated mode, not reading any pyproject.toml");
return Ok(PyprojectConfig::new(
PyprojectDiscoveryStrategy::Fixed,
settings,
None,
));
}
// Second priority: the user specified a `pyproject.toml` file. Use that
// `pyproject.toml` for _all_ configuration, and resolve paths relative to the
// current working directory. (This matches ESLint's behavior.)
if let Some(pyproject) = config_arguments.config_file() {
let settings = resolve_root_settings(
pyproject,
config_arguments,
ConfigurationOrigin::UserSpecified,
)?;
debug!(
"Using user-specified configuration file at: {}",
pyproject.display()
);
return Ok(PyprojectConfig::new(
PyprojectDiscoveryStrategy::Fixed,
settings,
Some(pyproject.to_path_buf()),
));
}
// Third priority: find a `pyproject.toml` file in either an ancestor of
// `stdin_filename` (if set) or the current working path all paths relative to
// that directory. (With `Strategy::Hierarchical`, we'll end up finding
// the "closest" `pyproject.toml` file for every Python file later on,
// so these act as the "default" settings.)
if let Some(pyproject) = pyproject::find_settings_toml(stdin_filename.unwrap_or(&cwd))? {
debug!(
"Using configuration file (via parent) at: {}",
pyproject.display()
);
let settings =
resolve_root_settings(&pyproject, config_arguments, ConfigurationOrigin::Ancestor)?;
return Ok(PyprojectConfig::new(
PyprojectDiscoveryStrategy::Hierarchical,
settings,
Some(pyproject),
));
}
// Fourth priority: find a user-specific `pyproject.toml`, but resolve all paths
// relative the current working directory. (With `Strategy::Hierarchical`, we'll
// end up the "closest" `pyproject.toml` file for every Python file later on, so
// these act as the "default" settings.)
if let Some(pyproject) = pyproject::find_user_settings_toml() {
struct FallbackTransformer<'a> {
arguments: &'a ConfigArguments,
}
impl ConfigurationTransformer for FallbackTransformer<'_> {
fn transform(&self, mut configuration: Configuration) -> Configuration {
// The `requires-python` constraint from the `pyproject.toml` takes precedence
// over the `target-version` from the user configuration.
let fallback = find_fallback_target_version(&*path_dedot::CWD);
if let Some(fallback) = fallback {
debug!("Derived `target-version` from found `requires-python`: {fallback:?}");
configuration.target_version = Some(fallback.into());
}
self.arguments.transform(configuration)
}
}
debug!(
"Using configuration file (via cwd) at: {}",
pyproject.display()
);
let settings = resolve_root_settings(
&pyproject,
&FallbackTransformer {
arguments: config_arguments,
},
ConfigurationOrigin::UserSettings,
)?;
return Ok(PyprojectConfig::new(
PyprojectDiscoveryStrategy::Hierarchical,
settings,
Some(pyproject),
));
}
// Fallback: load Ruff's default settings, and resolve all paths relative to the
// current working directory. (With `Strategy::Hierarchical`, we'll end up the
// "closest" `pyproject.toml` file for every Python file later on, so these act
// as the "default" settings.)
debug!("Using Ruff default settings");
let mut config = config_arguments.transform(Configuration::default());
if config.target_version.is_none() {
// If we have arrived here we know that there was no `pyproject.toml`
// containing a `[tool.ruff]` section found in an ancestral directory.
// (This is an implicit requirement in the function
// `pyproject::find_settings_toml`.)
// However, there may be a `pyproject.toml` with a `requires-python`
// specified, and that is what we look for in this step.
let fallback = find_fallback_target_version(stdin_filename.unwrap_or(&cwd));
if let Some(version) = fallback {
debug!("Derived `target-version` from found `requires-python`: {version:?}");
}
config.target_version = fallback.map(ast::PythonVersion::from);
}
let settings = config.into_settings(&cwd)?;
Ok(PyprojectConfig::new(
PyprojectDiscoveryStrategy::Hierarchical,
settings,
None,
))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/main.rs | crates/ruff/src/main.rs | use std::io::Write;
use std::process::ExitCode;
use anyhow::Context;
use clap::Parser;
use colored::Colorize;
use ruff::args::Args;
use ruff::{ExitStatus, run};
#[cfg(target_os = "windows")]
#[global_allocator]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
#[cfg(all(
not(target_os = "windows"),
not(target_os = "openbsd"),
not(target_os = "aix"),
not(target_os = "android"),
any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
target_arch = "riscv64"
)
))]
#[global_allocator]
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
pub fn main() -> ExitCode {
// Enabled ANSI colors on Windows 10.
#[cfg(windows)]
assert!(colored::control::set_virtual_terminal(true).is_ok());
// support FORCE_COLOR env var
if let Some(force_color) = std::env::var_os("FORCE_COLOR") {
if !force_color.is_empty() {
colored::control::set_override(true);
}
}
let args = wild::args_os();
let args = match argfile::expand_args_from(args, argfile::parse_fromfile, argfile::PREFIX)
.context("Failed to read CLI arguments from files")
{
Ok(args) => args,
Err(err) => return report_error(&err),
};
let args = Args::parse_from(args);
match run(args) {
Ok(code) => code.into(),
Err(err) => report_error(&err),
}
}
fn report_error(err: &anyhow::Error) -> ExitCode {
{
// Exit "gracefully" on broken pipe errors.
//
// See: https://github.com/BurntSushi/ripgrep/blob/bf63fe8f258afc09bae6caa48f0ae35eaf115005/crates/core/main.rs#L47C1-L61C14
for cause in err.chain() {
if let Some(ioerr) = cause.downcast_ref::<std::io::Error>() {
if ioerr.kind() == std::io::ErrorKind::BrokenPipe {
return ExitCode::from(0);
}
}
}
// Use `writeln` instead of `eprintln` to avoid panicking when the stderr pipe is broken.
let mut stderr = std::io::stderr().lock();
// This communicates that this isn't a linter error but ruff itself hard-errored for
// some reason (e.g. failed to resolve the configuration)
writeln!(stderr, "{}", "ruff failed".red().bold()).ok();
// Currently we generally only see one error, but e.g. with io errors when resolving
// the configuration it is help to chain errors ("resolving configuration failed" ->
// "failed to read file: subdir/pyproject.toml")
for cause in err.chain() {
writeln!(stderr, " {} {cause}", "Cause:".bold()).ok();
}
}
ExitStatus::Error.into()
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/cache.rs | crates/ruff/src/cache.rs | use std::fmt::Debug;
use std::fs::{self, File};
use std::hash::Hasher;
use std::io::{self, BufReader, Write};
use std::path::{Path, PathBuf};
use std::sync::Mutex;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::{Duration, SystemTime};
use anyhow::{Context, Result};
use filetime::FileTime;
use itertools::Itertools;
use log::{debug, error};
use rayon::iter::ParallelIterator;
use rayon::iter::{IntoParallelIterator, ParallelBridge};
use rustc_hash::FxHashMap;
use tempfile::NamedTempFile;
use ruff_cache::{CacheKey, CacheKeyHasher};
use ruff_linter::package::PackageRoot;
use ruff_linter::{VERSION, warn_user};
use ruff_macros::CacheKey;
use ruff_workspace::Settings;
use ruff_workspace::resolver::Resolver;
/// [`Path`] that is relative to the package root in [`PackageCache`].
pub(crate) type RelativePath = Path;
/// [`PathBuf`] that is relative to the package root in [`PackageCache`].
pub(crate) type RelativePathBuf = PathBuf;
#[derive(CacheKey)]
pub(crate) struct FileCacheKey {
/// Timestamp when the file was last modified before the (cached) check.
file_last_modified: FileTime,
/// Permissions of the file before the (cached) check.
file_permissions_mode: u32,
}
impl FileCacheKey {
pub(crate) fn from_path(path: &Path) -> io::Result<FileCacheKey> {
// Construct a cache key for the file
let metadata = path.metadata()?;
#[cfg(unix)]
let permissions = {
use std::os::unix::fs::PermissionsExt;
metadata.permissions().mode()
};
#[cfg(windows)]
let permissions: u32 = metadata.permissions().readonly().into();
Ok(FileCacheKey {
file_last_modified: FileTime::from_last_modification_time(&metadata),
file_permissions_mode: permissions,
})
}
}
/// Cache.
///
/// `Cache` holds everything required to display the diagnostics for a single
/// package. The on-disk representation is represented in [`PackageCache`] (and
/// related) types.
///
/// This type manages the cache file, reading it from disk and writing it back
/// to disk (if required).
#[derive(Debug)]
pub(crate) struct Cache {
/// Location of the cache.
path: PathBuf,
/// Package cache read from disk.
package: PackageCache,
/// Changes made compared to the (current) `package`.
///
/// Files that are linted, but are not in `package.files` or are in
/// `package.files` but are outdated. This gets merged with `package.files`
/// when the cache is written back to disk in [`Cache::store`].
changes: Mutex<Vec<Change>>,
/// The "current" timestamp used as cache for the updates of
/// [`FileCache::last_seen`]
#[expect(clippy::struct_field_names)]
last_seen_cache: u64,
}
impl Cache {
/// Open or create a new cache.
///
/// `package_root` is the path to root of the package that is contained
/// within this cache and must be canonicalized (to avoid considering `./`
/// and `../project` being different).
///
/// Finally `settings` is used to ensure we don't open a cache for different
/// settings. It also defines the directory where to store the cache.
pub(crate) fn open(package_root: PathBuf, settings: &Settings) -> Self {
debug_assert!(package_root.is_absolute(), "package root not canonicalized");
let key = format!("{}", cache_key(&package_root, settings));
let path = PathBuf::from_iter([&settings.cache_dir, Path::new(VERSION), Path::new(&key)]);
let file = match File::open(&path) {
Ok(file) => file,
Err(err) if err.kind() == io::ErrorKind::NotFound => {
// No cache exist yet, return an empty cache.
return Cache::empty(path, package_root);
}
Err(err) => {
warn_user!("Failed to open cache file `{}`: {err}", path.display());
return Cache::empty(path, package_root);
}
};
let mut package: PackageCache =
match bincode::decode_from_reader(BufReader::new(file), bincode::config::standard()) {
Ok(package) => package,
Err(err) => {
warn_user!("Failed parse cache file `{}`: {err}", path.display());
return Cache::empty(path, package_root);
}
};
// Sanity check.
if package.package_root != package_root {
warn_user!(
"Different package root in cache: expected `{}`, got `{}`",
package_root.display(),
package.package_root.display(),
);
package.files.clear();
}
Cache::new(path, package)
}
/// Create an empty `Cache`.
fn empty(path: PathBuf, package_root: PathBuf) -> Self {
let package = PackageCache {
package_root,
files: FxHashMap::default(),
};
Cache::new(path, package)
}
#[expect(clippy::cast_possible_truncation)]
fn new(path: PathBuf, package: PackageCache) -> Self {
Cache {
path,
package,
changes: Mutex::new(Vec::new()),
// SAFETY: this will be truncated to the year ~2554 (so don't use
// this code after that!).
last_seen_cache: SystemTime::UNIX_EPOCH.elapsed().unwrap().as_millis() as u64,
}
}
/// Applies the pending changes and persists the cache to disk, if it has been changed.
pub(crate) fn persist(mut self) -> Result<()> {
if !self.save() {
// No changes made, no need to write the same cache file back to
// disk.
return Ok(());
}
// Write the cache to a temporary file first and then rename it for an "atomic" write.
// Protects against data loss if the process is killed during the write and races between different ruff
// processes, resulting in a corrupted cache file. https://github.com/astral-sh/ruff/issues/8147#issuecomment-1943345964
let mut temp_file =
NamedTempFile::new_in(self.path.parent().expect("Write path must have a parent"))
.context("Failed to create temporary file")?;
// Serialize to in-memory buffer because hyperfine benchmark showed that it's faster than
// using a `BufWriter` and our cache files are small enough that streaming isn't necessary.
let serialized = bincode::encode_to_vec(&self.package, bincode::config::standard())
.context("Failed to serialize cache data")?;
temp_file
.write_all(&serialized)
.context("Failed to write serialized cache to temporary file.")?;
if let Err(err) = temp_file.persist(&self.path) {
// On Windows, writing to the cache file can fail if the file is still open (e.g., if
// the user is running Ruff from multiple processes over the same directory).
if cfg!(windows) && err.error.kind() == io::ErrorKind::PermissionDenied {
warn_user!(
"Failed to write cache file `{}`: {}",
self.path.display(),
err.error
);
} else {
return Err(err).with_context(|| {
format!(
"Failed to rename temporary cache file to {}",
self.path.display()
)
});
}
}
Ok(())
}
/// Applies the pending changes without storing the cache to disk.
#[expect(clippy::cast_possible_truncation)]
pub(crate) fn save(&mut self) -> bool {
/// Maximum duration for which we keep a file in cache that hasn't been seen.
const MAX_LAST_SEEN: Duration = Duration::from_secs(30 * 24 * 60 * 60); // 30 days.
let changes = std::mem::take(self.changes.get_mut().unwrap());
if changes.is_empty() {
return false;
}
// Remove cached files that we haven't seen in a while.
let now = self.last_seen_cache;
self.package.files.retain(|_, file| {
// SAFETY: this will be truncated to the year ~2554.
(now - *file.last_seen.get_mut()) <= MAX_LAST_SEEN.as_millis() as u64
});
// Apply any changes made and keep track of when we last saw files.
for change in changes {
let entry = self
.package
.files
.entry(change.path)
.and_modify(|existing| {
if existing.key != change.new_key {
// Reset the data if the key change.
existing.data = FileCacheData::default();
}
existing.key = change.new_key;
existing
.last_seen
.store(self.last_seen_cache, Ordering::Relaxed);
})
.or_insert_with(|| FileCache {
key: change.new_key,
last_seen: AtomicU64::new(self.last_seen_cache),
data: FileCacheData::default(),
});
change.new_data.apply(&mut entry.data);
}
true
}
/// Returns the relative path based on `path` and the package root.
///
/// Returns `None` if `path` is not within the package.
pub(crate) fn relative_path<'a>(&self, path: &'a Path) -> Option<&'a RelativePath> {
path.strip_prefix(&self.package.package_root).ok()
}
/// Get the cached results for a single file at relative `path`. This
/// uses `key` to determine if the results are still accurate.
/// (i.e. if the file hasn't been modified since the cached run).
///
/// This returns `None` if `key` differs from the cached key or if the
/// cache doesn't contain results for the file.
pub(crate) fn get(&self, path: &RelativePath, key: &FileCacheKey) -> Option<&FileCache> {
let file = self.package.files.get(path)?;
let mut hasher = CacheKeyHasher::new();
key.cache_key(&mut hasher);
// Make sure the file hasn't changed since the cached run.
if file.key != hasher.finish() {
return None;
}
file.last_seen.store(self.last_seen_cache, Ordering::SeqCst);
Some(file)
}
pub(crate) fn is_formatted(&self, path: &RelativePath, key: &FileCacheKey) -> bool {
self.get(path, key)
.is_some_and(|entry| entry.data.formatted)
}
/// Add or update a file cache at `path` relative to the package root.
fn update(&self, path: RelativePathBuf, key: &FileCacheKey, data: ChangeData) {
let mut hasher = CacheKeyHasher::new();
key.cache_key(&mut hasher);
self.changes.lock().unwrap().push(Change {
path,
new_key: hasher.finish(),
new_data: data,
});
}
pub(crate) fn set_linted(&self, path: RelativePathBuf, key: &FileCacheKey, yes: bool) {
self.update(path, key, ChangeData::Linted(yes));
}
pub(crate) fn set_formatted(&self, path: RelativePathBuf, key: &FileCacheKey) {
self.update(path, key, ChangeData::Formatted);
}
}
/// On disk representation of a cache of a package.
#[derive(bincode::Encode, Debug, bincode::Decode)]
struct PackageCache {
/// Path to the root of the package.
///
/// Usually this is a directory, but it can also be a single file in case of
/// single file "packages", e.g. scripts.
package_root: PathBuf,
/// Mapping of source file path to it's cached data.
files: FxHashMap<RelativePathBuf, FileCache>,
}
/// On disk representation of the cache per source file.
#[derive(bincode::Decode, Debug, bincode::Encode)]
pub(crate) struct FileCache {
/// Key that determines if the cached item is still valid.
key: u64,
/// Timestamp when we last linted this file.
///
/// Represented as the number of milliseconds since Unix epoch. This will
/// break in 1970 + ~584 years (~2554).
last_seen: AtomicU64,
data: FileCacheData,
}
impl FileCache {
/// Return whether or not the file in the cache was linted and found to have no diagnostics.
pub(crate) fn linted(&self) -> bool {
self.data.linted
}
}
#[derive(Debug, Default, bincode::Decode, bincode::Encode)]
struct FileCacheData {
linted: bool,
formatted: bool,
}
/// Returns a hash key based on the `package_root`, `settings` and the crate
/// version.
fn cache_key(package_root: &Path, settings: &Settings) -> u64 {
let mut hasher = CacheKeyHasher::new();
package_root.cache_key(&mut hasher);
settings.cache_key(&mut hasher);
hasher.finish()
}
/// Initialize the cache at the specified `Path`.
pub(crate) fn init(path: &Path) -> Result<()> {
// Create the cache directories.
fs::create_dir_all(path.join(VERSION))?;
// Add the CACHEDIR.TAG.
cachedir::ensure_tag(path)?;
// Add the .gitignore.
match fs::OpenOptions::new()
.write(true)
.create_new(true)
.open(path.join(".gitignore"))
{
Ok(mut file) => file.write_all(b"# Automatically created by ruff.\n*\n")?,
Err(err) if err.kind() == io::ErrorKind::AlreadyExists => (),
Err(err) => return Err(err.into()),
}
Ok(())
}
pub(crate) trait PackageCaches {
fn get(&self, package_root: &Path) -> Option<&Cache>;
fn persist(self) -> Result<()>;
}
impl<T> PackageCaches for Option<T>
where
T: PackageCaches,
{
fn get(&self, package_root: &Path) -> Option<&Cache> {
match self {
None => None,
Some(caches) => caches.get(package_root),
}
}
fn persist(self) -> Result<()> {
match self {
None => Ok(()),
Some(caches) => caches.persist(),
}
}
}
pub(crate) struct PackageCacheMap<'a>(FxHashMap<&'a Path, Cache>);
impl<'a> PackageCacheMap<'a> {
pub(crate) fn init(
package_roots: &FxHashMap<&'a Path, Option<PackageRoot<'a>>>,
resolver: &Resolver,
) -> Self {
fn init_cache(path: &Path) {
if let Err(e) = init(path) {
error!("Failed to initialize cache at {}: {e:?}", path.display());
}
}
for settings in resolver.settings() {
init_cache(&settings.cache_dir);
}
Self(
package_roots
.iter()
.map(|(package, package_root)| {
package_root.map(PackageRoot::path).unwrap_or(package)
})
.unique()
.par_bridge()
.map(|cache_root| {
let settings = resolver.resolve(cache_root);
let cache = Cache::open(cache_root.to_path_buf(), settings);
(cache_root, cache)
})
.collect(),
)
}
}
impl PackageCaches for PackageCacheMap<'_> {
fn get(&self, package_root: &Path) -> Option<&Cache> {
let cache = self.0.get(package_root);
if cache.is_none() {
debug!("No cache found for {}", package_root.display());
}
cache
}
fn persist(self) -> Result<()> {
self.0
.into_par_iter()
.try_for_each(|(_, cache)| cache.persist())
}
}
#[derive(Debug)]
struct Change {
path: PathBuf,
new_key: u64,
new_data: ChangeData,
}
#[derive(Debug)]
enum ChangeData {
Linted(bool),
Formatted,
}
impl ChangeData {
fn apply(self, data: &mut FileCacheData) {
match self {
ChangeData::Linted(yes) => {
data.linted = yes;
}
ChangeData::Formatted => {
data.formatted = true;
}
}
}
}
#[cfg(test)]
mod tests {
use std::env::temp_dir;
use std::fs;
use std::io;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::sync::atomic::AtomicU64;
use std::time::SystemTime;
use anyhow::Result;
use filetime::{FileTime, set_file_mtime};
use itertools::Itertools;
use test_case::test_case;
use ruff_cache::CACHE_DIR_NAME;
use ruff_linter::package::PackageRoot;
use ruff_linter::settings::LinterSettings;
use ruff_linter::settings::flags;
use ruff_linter::settings::types::UnsafeFixes;
use ruff_python_ast::{PySourceType, PythonVersion};
use ruff_workspace::Settings;
use crate::cache::{self, ChangeData, FileCache, FileCacheData, FileCacheKey};
use crate::cache::{Cache, RelativePathBuf};
use crate::commands::format::{FormatCommandError, FormatMode, FormatResult, format_path};
use crate::diagnostics::{Diagnostics, lint_path};
#[test_case("../ruff_linter/resources/test/fixtures", "ruff_tests/cache_same_results_ruff_linter"; "ruff_linter_fixtures")]
#[test_case("../ruff_notebook/resources/test/fixtures", "ruff_tests/cache_same_results_ruff_notebook"; "ruff_notebook_fixtures")]
fn same_results(package_root: &str, cache_dir_path: &str) {
let mut cache_dir = temp_dir();
cache_dir.push(cache_dir_path);
let _ = fs::remove_dir_all(&cache_dir);
cache::init(&cache_dir).unwrap();
let settings = Settings {
cache_dir,
linter: LinterSettings {
unresolved_target_version: PythonVersion::latest().into(),
..Default::default()
},
..Settings::default()
};
let package_root = fs::canonicalize(package_root).unwrap();
let cache = Cache::open(package_root.clone(), &settings);
assert_eq!(cache.changes.lock().unwrap().len(), 0);
let mut paths = Vec::new();
let mut paths_with_diagnostics = Vec::new();
let mut expected_diagnostics = Diagnostics::default();
for entry in fs::read_dir(&package_root).unwrap() {
let entry = entry.unwrap();
if !entry.file_type().unwrap().is_dir() {
continue;
}
let dir_path = entry.path();
if dir_path.ends_with(CACHE_DIR_NAME) {
continue;
}
for entry in fs::read_dir(dir_path).unwrap() {
let entry = entry.unwrap();
if !entry.file_type().unwrap().is_file() {
continue;
}
let path = entry.path();
if path.ends_with("pyproject.toml") || path.ends_with("R.ipynb") {
continue;
}
let mut diagnostics = lint_path(
&path,
Some(PackageRoot::root(&package_root)),
&settings.linter,
Some(&cache),
flags::Noqa::Enabled,
flags::FixMode::Generate,
UnsafeFixes::Enabled,
)
.unwrap();
if diagnostics.inner.is_empty() {
// We won't load a notebook index from the cache for files without diagnostics,
// so remove them from `expected_diagnostics` too. This allows us to keep the
// full equality assertion below.
diagnostics
.notebook_indexes
.remove(&path.to_string_lossy().to_string());
} else {
paths_with_diagnostics.push(path.clone());
}
paths.push(path);
expected_diagnostics += diagnostics;
}
}
assert_ne!(paths, &[] as &[std::path::PathBuf], "no files checked");
cache.persist().unwrap();
let cache = Cache::open(package_root.clone(), &settings);
assert_ne!(cache.package.files.len(), 0);
paths_with_diagnostics.sort();
for path in &paths {
if paths_with_diagnostics.binary_search(path).is_ok() {
continue; // We don't cache files with diagnostics.
}
let relative_path = cache.relative_path(path).unwrap();
assert!(
cache.package.files.contains_key(relative_path),
"missing file from cache: `{}`",
relative_path.display()
);
}
let mut got_diagnostics = Diagnostics::default();
for path in paths {
got_diagnostics += lint_path(
&path,
Some(PackageRoot::root(&package_root)),
&settings.linter,
Some(&cache),
flags::Noqa::Enabled,
flags::FixMode::Generate,
UnsafeFixes::Enabled,
)
.unwrap();
}
assert_eq!(
expected_diagnostics, got_diagnostics,
"left == {expected_diagnostics:#?}, right == {got_diagnostics:#?}",
);
}
#[test]
fn cache_adds_file_on_lint() {
let source: &[u8] = b"a = 1\n\n__all__ = list([\"a\"])\n";
let test_cache = TestCache::new("cache_adds_file_on_lint");
let cache = test_cache.open();
test_cache.write_source_file("source.py", source);
assert_eq!(cache.changes.lock().unwrap().len(), 0);
cache.persist().unwrap();
let cache = test_cache.open();
test_cache
.lint_file_with_cache("source.py", &cache)
.expect("Failed to lint test file");
assert_eq!(
cache.changes.lock().unwrap().len(),
1,
"A single new file should be added to the cache"
);
cache.persist().unwrap();
}
#[test]
fn cache_adds_files_on_lint() {
let source: &[u8] = b"a = 1\n\n__all__ = list([\"a\"])\n";
let test_cache = TestCache::new("cache_adds_files_on_lint");
let cache = test_cache.open();
test_cache.write_source_file("source_1.py", source);
test_cache.write_source_file("source_2.py", source);
assert_eq!(cache.changes.lock().unwrap().len(), 0);
cache.persist().unwrap();
let cache = test_cache.open();
test_cache
.lint_file_with_cache("source_1.py", &cache)
.expect("Failed to lint test file");
test_cache
.lint_file_with_cache("source_2.py", &cache)
.expect("Failed to lint test file");
assert_eq!(
cache.changes.lock().unwrap().len(),
2,
"Both files should be added to the cache"
);
cache.persist().unwrap();
}
#[test]
fn cache_does_not_add_file_on_lint_with_diagnostic() {
let source: &[u8] = b"a = 1\n\n__all__ = list([\"a\", \"b\"])\n";
let test_cache = TestCache::new("cache_does_not_add_file_on_lint_with_diagnostic");
let cache = test_cache.open();
test_cache.write_source_file("source.py", source);
assert_eq!(cache.changes.lock().unwrap().len(), 0);
cache.persist().unwrap();
let cache = test_cache.open();
let results = test_cache
.lint_file_with_cache("source.py", &cache)
.expect("Failed to lint test file");
assert_eq!(results.inner.len(), 1, "Expected one F822 diagnostic");
assert_eq!(
cache.changes.lock().unwrap().len(),
1,
"Files with diagnostics still trigger change events"
);
assert!(
cache
.changes
.lock()
.unwrap()
.last()
.is_some_and(|change| matches!(change.new_data, ChangeData::Linted(false))),
"Files with diagnostics are marked as unlinted"
);
cache.persist().unwrap();
}
#[test]
fn cache_adds_files_on_format() {
let source: &[u8] = b"a = 1\n\n__all__ = list([\"a\", \"b\"])\n";
let test_cache = TestCache::new("cache_adds_files_on_format");
let cache = test_cache.open();
test_cache.write_source_file("source_1.py", source);
test_cache.write_source_file("source_2.py", source);
assert_eq!(cache.changes.lock().unwrap().len(), 0);
cache.persist().unwrap();
let cache = test_cache.open();
test_cache
.format_file_with_cache("source_1.py", &cache)
.expect("Failed to format test file");
test_cache
.format_file_with_cache("source_2.py", &cache)
.expect("Failed to format test file");
assert_eq!(
cache.changes.lock().unwrap().len(),
2,
"Both files should be added to the cache"
);
cache.persist().unwrap();
}
#[test]
fn cache_invalidated_on_file_modified_time() {
let source: &[u8] = b"a = 1\n\n__all__ = list([\"a\"])\n";
let test_cache = TestCache::new("cache_invalidated_on_file_modified_time");
let cache = test_cache.open();
let source_path = test_cache.write_source_file("source.py", source);
assert_eq!(cache.changes.lock().unwrap().len(), 0);
let expected_diagnostics = test_cache
.lint_file_with_cache("source.py", &cache)
.expect("Failed to lint test file");
cache.persist().unwrap();
let cache = test_cache.open();
// Update the modified time of the file to a time in the future
set_file_mtime(
source_path,
FileTime::from_system_time(SystemTime::now() + std::time::Duration::from_secs(1)),
)
.unwrap();
let got_diagnostics = test_cache
.lint_file_with_cache("source.py", &cache)
.expect("Failed to lint test file");
assert_eq!(
cache.changes.lock().unwrap().len(),
1,
"Cache should not be used, the file should be treated as new and added to the cache"
);
assert_eq!(
expected_diagnostics, got_diagnostics,
"The diagnostics should not change"
);
}
#[test]
fn cache_invalidated_on_permission_change() {
// Regression test for issue #3086.
#[cfg(unix)]
fn flip_execute_permission_bit(path: &Path) -> io::Result<()> {
use std::os::unix::fs::PermissionsExt;
let file = fs::OpenOptions::new().write(true).open(path)?;
let perms = file.metadata()?.permissions();
file.set_permissions(PermissionsExt::from_mode(perms.mode() ^ 0o111))
}
#[cfg(windows)]
fn flip_read_only_permission(path: &Path) -> io::Result<()> {
let file = fs::OpenOptions::new().write(true).open(path)?;
let mut perms = file.metadata()?.permissions();
perms.set_readonly(!perms.readonly());
file.set_permissions(perms)
}
let source: &[u8] = b"a = 1\n\n__all__ = list([\"a\"])\n";
let test_cache = TestCache::new("cache_invalidated_on_permission_change");
let cache = test_cache.open();
let path = test_cache.write_source_file("source.py", source);
assert_eq!(cache.changes.lock().unwrap().len(), 0);
let expected_diagnostics = test_cache
.lint_file_with_cache("source.py", &cache)
.unwrap();
cache.persist().unwrap();
let cache = test_cache.open();
// Flip the permissions on the file
#[cfg(unix)]
flip_execute_permission_bit(&path).unwrap();
#[cfg(windows)]
flip_read_only_permission(&path).unwrap();
let got_diagnostics = test_cache
.lint_file_with_cache("source.py", &cache)
.unwrap();
assert_eq!(
cache.changes.lock().unwrap().len(),
1,
"Cache should not be used, the file should be treated as new and added to the cache"
);
assert_eq!(
expected_diagnostics, got_diagnostics,
"The diagnostics should not change"
);
}
#[test]
fn cache_removes_stale_files_on_persist() {
let test_cache = TestCache::new("cache_removes_stale_files_on_persist");
let mut cache = test_cache.open();
// Add a file to the cache that hasn't been linted or seen since the '70s!
let old_path_key = RelativePathBuf::from("old.py");
cache.package.files.insert(
old_path_key,
FileCache {
key: 123,
last_seen: AtomicU64::new(123),
data: FileCacheData::default(),
},
);
// Now actually lint a file.
let source: &[u8] = b"a = 1\n\n__all__ = list([\"a\"])\n";
test_cache.write_source_file("new.py", source);
let new_path_key = RelativePathBuf::from("new.py");
assert_eq!(cache.changes.lock().unwrap().len(), 0);
test_cache
.lint_file_with_cache("new.py", &cache)
.expect("Failed to lint test file");
// Storing the cache should remove the old (`old.py`) file.
cache.persist().unwrap();
// So we when we open the cache again it shouldn't contain `old.py`.
let cache = test_cache.open();
assert_eq!(
cache.package.files.keys().collect_vec(),
vec![&new_path_key],
"Only the new file should be present"
);
}
#[test]
fn format_updates_cache_entry() {
let source: &[u8] = b"a = 1\n\n__all__ = list([\"a\"])\n";
let test_cache = TestCache::new("format_updates_cache_entry");
let cache = test_cache.open();
test_cache.write_source_file("source.py", source);
assert_eq!(cache.changes.lock().unwrap().len(), 0);
cache.persist().unwrap();
let cache = test_cache.open();
// Cache the lint results
test_cache
.lint_file_with_cache("source.py", &cache)
.expect("Failed to lint test file");
cache.persist().unwrap();
let mut cache = test_cache.open();
// Now lint the file
test_cache
.format_file_with_cache("source.py", &cache)
.expect("Failed to format test file");
cache.save();
assert_eq!(cache.package.files.len(), 1);
let Some(file_cache) = cache.get(
Path::new("source.py"),
&FileCacheKey::from_path(&test_cache.package_root.join("source.py")).unwrap(),
) else {
panic!("Cache entry for `source.py` is missing.");
};
assert!(file_cache.data.linted);
assert!(file_cache.data.formatted);
}
#[test]
fn file_changes_invalidate_file_cache() {
let source: &[u8] = b"a = 1\n\n__all__ = list([\"a\", \"b\"])\n";
let test_cache = TestCache::new("file_changes_invalidate_file_cache");
let cache = test_cache.open();
let source_path = test_cache.write_source_file("source.py", source);
assert_eq!(cache.changes.lock().unwrap().len(), 0);
cache.persist().unwrap();
let cache = test_cache.open();
// Cache the format and lint results
test_cache
.lint_file_with_cache("source.py", &cache)
.expect("Failed to lint test file");
test_cache
.format_file_with_cache("source.py", &cache)
.expect("Failed to format test file");
cache.persist().unwrap();
let mut cache = test_cache.open();
assert_eq!(cache.package.files.len(), 1);
set_file_mtime(
&source_path,
FileTime::from_system_time(SystemTime::now() + std::time::Duration::from_secs(1)),
)
.unwrap();
test_cache
.format_file_with_cache("source.py", &cache)
.expect("Failed to format test file");
cache.save();
assert_eq!(cache.package.files.len(), 1);
let Some(file_cache) = cache.get(
Path::new("source.py"),
&FileCacheKey::from_path(&source_path).unwrap(),
) else {
panic!("Cache entry for `source.py` is missing.");
};
assert!(!file_cache.data.linted);
assert!(file_cache.data.formatted);
}
struct TestCache {
package_root: PathBuf,
settings: Settings,
}
impl TestCache {
fn new(test_case: &str) -> Self {
// Build a new cache directory and clear it
let mut test_dir = temp_dir();
test_dir.push("ruff_tests/cache");
test_dir.push(test_case);
let _ = fs::remove_dir_all(&test_dir);
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/config.rs | crates/ruff/src/commands/config.rs | use anyhow::{Result, anyhow};
use crate::args::HelpFormat;
use ruff_options_metadata::OptionsMetadata;
use ruff_workspace::options::Options;
#[expect(clippy::print_stdout)]
pub(crate) fn config(key: Option<&str>, format: HelpFormat) -> Result<()> {
match key {
None => {
let metadata = Options::metadata();
match format {
HelpFormat::Text => {
println!("{metadata}");
}
HelpFormat::Json => {
println!("{}", &serde_json::to_string_pretty(&metadata)?);
}
}
}
Some(key) => match Options::metadata().find(key) {
None => {
return Err(anyhow!("Unknown option: {key}"));
}
Some(entry) => match format {
HelpFormat::Text => {
print!("{entry}");
}
HelpFormat::Json => {
println!("{}", &serde_json::to_string_pretty(&entry)?);
}
},
},
}
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/linter.rs | crates/ruff/src/commands/linter.rs | use std::fmt::Write;
use std::io;
use std::io::BufWriter;
use anyhow::Result;
use itertools::Itertools;
use serde::Serialize;
use strum::IntoEnumIterator;
use ruff_linter::registry::{Linter, RuleNamespace};
use crate::args::HelpFormat;
#[derive(Serialize)]
struct LinterInfo {
prefix: &'static str,
name: &'static str,
#[serde(skip_serializing_if = "Option::is_none")]
url: Option<&'static str>,
#[serde(skip_serializing_if = "Option::is_none")]
categories: Option<Vec<LinterCategoryInfo>>,
}
#[derive(Serialize)]
struct LinterCategoryInfo {
prefix: &'static str,
name: &'static str,
}
pub(crate) fn linter(format: HelpFormat) -> Result<()> {
let mut stdout = BufWriter::new(io::stdout().lock());
let mut output = String::new();
match format {
HelpFormat::Text => {
for linter in Linter::iter() {
let prefix = match linter.common_prefix() {
"" => linter
.upstream_categories()
.unwrap()
.iter()
.map(|c| c.prefix)
.join("/"),
prefix => prefix.to_string(),
};
writeln!(output, "{:>4} {}", prefix, linter.name()).unwrap();
}
}
HelpFormat::Json => {
let linters: Vec<_> = Linter::iter()
.map(|linter_info| LinterInfo {
prefix: linter_info.common_prefix(),
name: linter_info.name(),
url: linter_info.url(),
categories: linter_info.upstream_categories().map(|cats| {
cats.iter()
.map(|c| LinterCategoryInfo {
prefix: c.prefix,
name: c.category,
})
.collect()
}),
})
.collect();
output.push_str(&serde_json::to_string_pretty(&linters)?);
output.push('\n');
}
}
io::Write::write_fmt(&mut stdout, format_args!("{output}"))?;
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/analyze_graph.rs | crates/ruff/src/commands/analyze_graph.rs | use crate::args::{AnalyzeGraphArgs, ConfigArguments};
use crate::resolve::resolve;
use crate::{ExitStatus, resolve_default_files};
use anyhow::Result;
use log::{debug, warn};
use path_absolutize::CWD;
use ruff_db::system::{SystemPath, SystemPathBuf};
use ruff_graph::{Direction, ImportMap, ModuleDb, ModuleImports};
use ruff_linter::package::PackageRoot;
use ruff_linter::source_kind::SourceKind;
use ruff_linter::{warn_user, warn_user_once};
use ruff_python_ast::{PySourceType, SourceType};
use ruff_workspace::resolver::{ResolvedFile, match_exclusion, python_files_in_path};
use rustc_hash::FxHashMap;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
/// Generate an import map.
pub(crate) fn analyze_graph(
args: AnalyzeGraphArgs,
config_arguments: &ConfigArguments,
) -> Result<ExitStatus> {
// Construct the "default" settings. These are used when no `pyproject.toml`
// files are present, or files are injected from outside the hierarchy.
let pyproject_config = resolve(config_arguments, None)?;
if pyproject_config.settings.analyze.preview.is_disabled() {
warn_user!("`ruff analyze graph` is experimental and may change without warning");
}
// Write all paths relative to the current working directory.
let root =
SystemPathBuf::from_path_buf(CWD.clone()).expect("Expected a UTF-8 working directory");
// Find all Python files.
let files = resolve_default_files(args.files, false);
let (paths, resolver) = python_files_in_path(&files, &pyproject_config, config_arguments)?;
if paths.is_empty() {
warn_user_once!("No Python files found under the given path(s)");
return Ok(ExitStatus::Success);
}
// Resolve all package roots.
let package_roots = resolver
.package_roots(
&paths
.iter()
.flatten()
.map(ResolvedFile::path)
.collect::<Vec<_>>(),
)
.into_iter()
.map(|(path, package)| {
(
path.to_path_buf(),
package.map(PackageRoot::path).map(Path::to_path_buf),
)
})
.collect::<FxHashMap<_, _>>();
// Create a database from the source roots.
let src_roots = package_roots
.values()
.filter_map(|package| package.as_deref())
.filter_map(|package| package.parent())
.map(Path::to_path_buf)
.filter_map(|path| SystemPathBuf::from_path_buf(path).ok())
.collect();
let db = ModuleDb::from_src_roots(
src_roots,
pyproject_config
.settings
.analyze
.target_version
.as_tuple()
.into(),
args.python
.and_then(|python| SystemPathBuf::from_path_buf(python).ok()),
)?;
let imports = {
// Create a cache for resolved globs.
let glob_resolver = Arc::new(Mutex::new(GlobResolver::default()));
// Collect and resolve the imports for each file.
let result = Arc::new(Mutex::new(Vec::new()));
let inner_result = Arc::clone(&result);
let db = db.clone();
rayon::scope(move |scope| {
for resolved_file in paths {
let Ok(resolved_file) = resolved_file else {
continue;
};
let path = resolved_file.path();
let package = path
.parent()
.and_then(|parent| package_roots.get(parent))
.and_then(Clone::clone);
// Resolve the per-file settings.
let settings = resolver.resolve(path);
let string_imports = settings.analyze.string_imports;
let include_dependencies = settings.analyze.include_dependencies.get(path).cloned();
let type_checking_imports = settings.analyze.type_checking_imports;
// Skip excluded files.
if (settings.file_resolver.force_exclude || !resolved_file.is_root())
&& match_exclusion(
resolved_file.path(),
resolved_file.file_name(),
&settings.analyze.exclude,
)
{
continue;
}
// Ignore non-Python files.
let source_type = match settings.analyze.extension.get(path) {
None => match SourceType::from(&path) {
SourceType::Python(source_type) => source_type,
SourceType::Toml(_) => {
debug!("Ignoring TOML file: {}", path.display());
continue;
}
},
Some(language) => PySourceType::from(language),
};
// Convert to system paths.
let Ok(package) = package.map(SystemPathBuf::from_path_buf).transpose() else {
warn!("Failed to convert package to system path");
continue;
};
let Ok(path) = SystemPathBuf::from_path_buf(resolved_file.into_path()) else {
warn!("Failed to convert path to system path");
continue;
};
let db = db.clone();
let glob_resolver = glob_resolver.clone();
let root = root.clone();
let result = inner_result.clone();
scope.spawn(move |_| {
// Extract source code (handles both .py and .ipynb files)
let source_kind = match SourceKind::from_path(path.as_std_path(), source_type) {
Ok(Some(source_kind)) => source_kind,
Ok(None) => {
debug!("Skipping non-Python notebook: {path}");
return;
}
Err(err) => {
warn!("Failed to read source for {path}: {err}");
return;
}
};
let source_code = source_kind.source_code();
// Identify any imports via static analysis.
let mut imports = ModuleImports::detect(
&db,
source_code,
source_type,
&path,
package.as_deref(),
string_imports,
type_checking_imports,
)
.unwrap_or_else(|err| {
warn!("Failed to generate import map for {path}: {err}");
ModuleImports::default()
});
debug!("Discovered {} imports for {}", imports.len(), path);
// Append any imports that were statically defined in the configuration.
if let Some((root, globs)) = include_dependencies {
let mut glob_resolver = glob_resolver.lock().unwrap();
imports.extend(glob_resolver.resolve(root, globs));
}
// Convert the path (and imports) to be relative to the working directory.
let path = path
.strip_prefix(&root)
.map(SystemPath::to_path_buf)
.unwrap_or(path);
let imports = imports.relative_to(&root);
result.lock().unwrap().push((path, imports));
});
}
});
// Collect the results.
Arc::into_inner(result).unwrap().into_inner()?
};
// Generate the import map.
let import_map = match args.direction {
Direction::Dependencies => ImportMap::dependencies(imports),
Direction::Dependents => ImportMap::dependents(imports),
};
// Print to JSON.
writeln!(
std::io::stdout(),
"{}",
serde_json::to_string_pretty(&import_map)?
)?;
std::mem::forget(db);
Ok(ExitStatus::Success)
}
/// A resolver for glob sets.
#[derive(Default, Debug)]
struct GlobResolver {
cache: GlobCache,
}
impl GlobResolver {
/// Resolve a set of globs, anchored at a given root.
fn resolve(&mut self, root: PathBuf, globs: Vec<String>) -> Vec<SystemPathBuf> {
if let Some(cached) = self.cache.get(&root, &globs) {
return cached.clone();
}
let walker = match globwalk::GlobWalkerBuilder::from_patterns(&root, &globs)
.file_type(globwalk::FileType::FILE)
.build()
{
Ok(walker) => walker,
Err(err) => {
warn!("Failed to read glob walker: {err}");
return Vec::new();
}
};
let mut paths = Vec::new();
for entry in walker {
let entry = match entry {
Ok(entry) => entry,
Err(err) => {
warn!("Failed to read glob entry: {err}");
continue;
}
};
let path = match SystemPathBuf::from_path_buf(entry.into_path()) {
Ok(path) => path,
Err(err) => {
warn!("Failed to convert path to system path: {}", err.display());
continue;
}
};
paths.push(path);
}
self.cache.insert(root, globs, paths.clone());
paths
}
}
/// A cache for resolved globs.
#[derive(Default, Debug)]
struct GlobCache(FxHashMap<PathBuf, FxHashMap<Vec<String>, Vec<SystemPathBuf>>>);
impl GlobCache {
/// Insert a resolved glob.
fn insert(&mut self, root: PathBuf, globs: Vec<String>, paths: Vec<SystemPathBuf>) {
self.0.entry(root).or_default().insert(globs, paths);
}
/// Get a resolved glob.
fn get(&self, root: &Path, globs: &[String]) -> Option<&Vec<SystemPathBuf>> {
self.0.get(root).and_then(|map| map.get(globs))
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/format_stdin.rs | crates/ruff/src/commands/format_stdin.rs | use std::io::stdout;
use std::path::Path;
use anyhow::Result;
use log::error;
use ruff_linter::source_kind::{SourceError, SourceKind};
use ruff_python_ast::{PySourceType, SourceType};
use ruff_workspace::FormatterSettings;
use ruff_workspace::resolver::{PyprojectConfig, Resolver, match_exclusion, python_file_at_path};
use crate::ExitStatus;
use crate::args::{ConfigArguments, FormatArguments, FormatRange};
use crate::commands::format::{
FormatCommandError, FormatMode, FormatResult, FormattedSource, format_source,
warn_incompatible_formatter_settings,
};
use crate::stdin::{parrot_stdin, read_from_stdin};
/// Run the formatter over a single file, read from `stdin`.
pub(crate) fn format_stdin(
cli: &FormatArguments,
config_arguments: &ConfigArguments,
pyproject_config: &PyprojectConfig,
) -> Result<ExitStatus> {
let mut resolver = Resolver::new(pyproject_config);
warn_incompatible_formatter_settings(&resolver);
let mode = FormatMode::from_cli(cli);
if resolver.force_exclude() {
if let Some(filename) = cli.stdin_filename.as_deref() {
if !python_file_at_path(filename, &mut resolver, config_arguments)? {
if mode.is_write() {
parrot_stdin()?;
}
return Ok(ExitStatus::Success);
}
if filename.file_name().is_some_and(|name| {
match_exclusion(filename, name, &resolver.base_settings().formatter.exclude)
}) {
if mode.is_write() {
parrot_stdin()?;
}
return Ok(ExitStatus::Success);
}
}
}
let path = cli.stdin_filename.as_deref();
let settings = &resolver.base_settings().formatter;
let source_type = match path.and_then(|path| settings.extension.get(path)) {
None => match path.map(SourceType::from).unwrap_or_default() {
SourceType::Python(source_type) => source_type,
SourceType::Toml(_) => {
if mode.is_write() {
parrot_stdin()?;
}
return Ok(ExitStatus::Success);
}
},
Some(language) => PySourceType::from(language),
};
// Format the file.
match format_source_code(path, cli.range, settings, source_type, mode) {
Ok(result) => match mode {
FormatMode::Write => Ok(ExitStatus::Success),
FormatMode::Check | FormatMode::Diff => {
if result.is_formatted() {
Ok(ExitStatus::Failure)
} else {
Ok(ExitStatus::Success)
}
}
},
Err(err) => {
error!("{err}");
Ok(ExitStatus::Error)
}
}
}
/// Format source code read from `stdin`.
fn format_source_code(
path: Option<&Path>,
range: Option<FormatRange>,
settings: &FormatterSettings,
source_type: PySourceType,
mode: FormatMode,
) -> Result<FormatResult, FormatCommandError> {
// Read the source from stdin.
let source_code = read_from_stdin()
.map_err(|err| FormatCommandError::Read(path.map(Path::to_path_buf), err.into()))?;
let source_kind = match SourceKind::from_source_code(source_code, source_type) {
Ok(Some(source_kind)) => source_kind,
Ok(None) => return Ok(FormatResult::Unchanged),
Err(err) => {
return Err(FormatCommandError::Read(path.map(Path::to_path_buf), err));
}
};
// Format the source.
let formatted = format_source(&source_kind, source_type, path, settings, range)?;
match &formatted {
FormattedSource::Formatted(formatted) => match mode {
FormatMode::Write => {
let mut writer = stdout().lock();
formatted
.write(&mut writer)
.map_err(|err| FormatCommandError::Write(path.map(Path::to_path_buf), err))?;
}
FormatMode::Check => {}
FormatMode::Diff => {
use std::io::Write;
write!(
&mut stdout().lock(),
"{}",
source_kind.diff(formatted, path).unwrap()
)
.map_err(|err| {
FormatCommandError::Write(path.map(Path::to_path_buf), SourceError::Io(err))
})?;
}
},
FormattedSource::Unchanged => {
// Write to stdout regardless of whether the source was formatted
if mode.is_write() {
let mut writer = stdout().lock();
source_kind
.write(&mut writer)
.map_err(|err| FormatCommandError::Write(path.map(Path::to_path_buf), err))?;
}
}
}
Ok(FormatResult::from(formatted))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/show_settings.rs | crates/ruff/src/commands/show_settings.rs | use std::io::Write;
use std::path::PathBuf;
use anyhow::{Result, bail};
use itertools::Itertools;
use ruff_workspace::resolver::{PyprojectConfig, ResolvedFile, python_files_in_path};
use crate::args::ConfigArguments;
/// Print the user-facing configuration settings.
pub(crate) fn show_settings(
files: &[PathBuf],
pyproject_config: &PyprojectConfig,
config_arguments: &ConfigArguments,
writer: &mut impl Write,
) -> Result<()> {
// Collect all files in the hierarchy.
let (paths, resolver) = python_files_in_path(files, pyproject_config, config_arguments)?;
// Print the list of files.
let Some(path) = paths
.into_iter()
.flatten()
.map(ResolvedFile::into_path)
.sorted_unstable()
.next()
else {
bail!("No files found under the given path");
};
let settings = resolver.resolve(&path);
writeln!(writer, "Resolved settings for: \"{}\"", path.display())?;
if let Some(settings_path) = pyproject_config.path.as_ref() {
writeln!(writer, "Settings path: \"{}\"", settings_path.display())?;
}
write!(writer, "{settings}")?;
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/version.rs | crates/ruff/src/commands/version.rs | use std::io::{self, BufWriter, Write};
use anyhow::Result;
use crate::args::HelpFormat;
/// Display version information
pub(crate) fn version(output_format: HelpFormat) -> Result<()> {
let mut stdout = BufWriter::new(io::stdout().lock());
let version_info = crate::version::version();
match output_format {
HelpFormat::Text => {
writeln!(stdout, "ruff {}", &version_info)?;
}
HelpFormat::Json => {
serde_json::to_writer_pretty(stdout, &version_info)?;
}
}
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/check.rs | crates/ruff/src/commands/check.rs | use std::fmt::Write;
use std::io;
use std::path::{Path, PathBuf};
use std::time::Instant;
use anyhow::Result;
use colored::Colorize;
use ignore::Error;
use log::{debug, warn};
#[cfg(not(target_family = "wasm"))]
use rayon::prelude::*;
use ruff_linter::message::create_panic_diagnostic;
use rustc_hash::FxHashMap;
use ruff_db::diagnostic::Diagnostic;
use ruff_db::panic::catch_unwind;
use ruff_linter::package::PackageRoot;
use ruff_linter::registry::Rule;
use ruff_linter::settings::types::UnsafeFixes;
use ruff_linter::settings::{LinterSettings, flags};
use ruff_linter::{IOError, Violation, fs, warn_user_once};
use ruff_source_file::SourceFileBuilder;
use ruff_text_size::TextRange;
use ruff_workspace::resolver::{
PyprojectConfig, ResolvedFile, match_exclusion, python_files_in_path,
};
use crate::args::ConfigArguments;
use crate::cache::{Cache, PackageCacheMap, PackageCaches};
use crate::diagnostics::Diagnostics;
/// Run the linter over a collection of files.
pub(crate) fn check(
files: &[PathBuf],
pyproject_config: &PyprojectConfig,
config_arguments: &ConfigArguments,
cache: flags::Cache,
noqa: flags::Noqa,
fix_mode: flags::FixMode,
unsafe_fixes: UnsafeFixes,
) -> Result<Diagnostics> {
// Collect all the Python files to check.
let start = Instant::now();
let (paths, resolver) = python_files_in_path(files, pyproject_config, config_arguments)?;
debug!("Identified files to lint in: {:?}", start.elapsed());
if paths.is_empty() {
warn_user_once!("No Python files found under the given path(s)");
return Ok(Diagnostics::default());
}
// Discover the package root for each Python file.
let package_roots = resolver.package_roots(
&paths
.iter()
.flatten()
.map(ResolvedFile::path)
.collect::<Vec<_>>(),
);
// Load the caches.
let caches = if cache.is_enabled() {
Some(PackageCacheMap::init(&package_roots, &resolver))
} else {
None
};
let start = Instant::now();
let diagnostics_per_file = paths.par_iter().filter_map(|resolved_file| {
let result = match resolved_file {
Ok(resolved_file) => {
let path = resolved_file.path();
let package = path
.parent()
.and_then(|parent| package_roots.get(parent))
.and_then(|package| *package);
let settings = resolver.resolve(path);
if (settings.file_resolver.force_exclude || !resolved_file.is_root())
&& match_exclusion(
resolved_file.path(),
resolved_file.file_name(),
&settings.linter.exclude,
)
{
return None;
}
let cache_root = package
.map(PackageRoot::path)
.unwrap_or_else(|| path.parent().unwrap_or(path));
let cache = caches.get(cache_root);
lint_path(
path,
package,
&settings.linter,
cache,
noqa,
fix_mode,
unsafe_fixes,
)
.map_err(|e| {
(Some(path.to_path_buf()), {
let mut error = e.to_string();
for cause in e.chain() {
write!(&mut error, "\n Cause: {cause}").unwrap();
}
error
})
})
}
Err(e) => Err((
if let Error::WithPath { path, .. } = e {
Some(path.clone())
} else {
None
},
e.io_error()
.map_or_else(|| e.to_string(), io::Error::to_string),
)),
};
Some(result.unwrap_or_else(|(path, message)| {
if let Some(path) = &path {
let settings = resolver.resolve(path);
if settings.linter.rules.enabled(Rule::IOError) {
let dummy =
SourceFileBuilder::new(path.to_string_lossy().as_ref(), "").finish();
Diagnostics::new(
vec![IOError { message }.into_diagnostic(TextRange::default(), &dummy)],
FxHashMap::default(),
)
} else {
warn!(
"{}{}{} {message}",
"Failed to lint ".bold(),
fs::relativize_path(path).bold(),
":".bold()
);
Diagnostics::default()
}
} else {
warn!("{} {message}", "Encountered error:".bold());
Diagnostics::default()
}
}))
});
// Aggregate the diagnostics of all checked files and count the checked files.
// This can't be a regular for loop because we use `par_iter`.
let (mut all_diagnostics, checked_files) = diagnostics_per_file
.fold(
|| (Diagnostics::default(), 0u64),
|(all_diagnostics, checked_files), file_diagnostics| {
(all_diagnostics + file_diagnostics, checked_files + 1)
},
)
.reduce(
|| (Diagnostics::default(), 0u64),
|a, b| (a.0 + b.0, a.1 + b.1),
);
all_diagnostics
.inner
.sort_by(Diagnostic::ruff_start_ordering);
// Store the caches.
caches.persist()?;
let duration = start.elapsed();
debug!("Checked {checked_files:?} files in: {duration:?}");
Ok(all_diagnostics)
}
/// Wraps [`lint_path`](crate::diagnostics::lint_path) in a [`catch_unwind`](std::panic::catch_unwind) and emits
/// a diagnostic if the linting the file panics.
fn lint_path(
path: &Path,
package: Option<PackageRoot<'_>>,
settings: &LinterSettings,
cache: Option<&Cache>,
noqa: flags::Noqa,
fix_mode: flags::FixMode,
unsafe_fixes: UnsafeFixes,
) -> Result<Diagnostics> {
let result = catch_unwind(|| {
crate::diagnostics::lint_path(path, package, settings, cache, noqa, fix_mode, unsafe_fixes)
});
match result {
Ok(inner) => inner,
Err(error) => {
let diagnostic = create_panic_diagnostic(&error, Some(path));
Ok(Diagnostics::new(vec![diagnostic], FxHashMap::default()))
}
}
}
#[cfg(test)]
#[cfg(unix)]
mod test {
use std::fs;
use std::os::unix::fs::OpenOptionsExt;
use anyhow::Result;
use rustc_hash::FxHashMap;
use tempfile::TempDir;
use ruff_db::diagnostic::{DiagnosticFormat, DisplayDiagnosticConfig, DisplayDiagnostics};
use ruff_linter::message::EmitterContext;
use ruff_linter::registry::Rule;
use ruff_linter::settings::types::UnsafeFixes;
use ruff_linter::settings::{LinterSettings, flags};
use ruff_workspace::Settings;
use ruff_workspace::resolver::{PyprojectConfig, PyprojectDiscoveryStrategy};
use crate::args::ConfigArguments;
use super::check;
/// We check that regular python files, pyproject.toml and jupyter notebooks all handle io
/// errors gracefully
#[test]
fn unreadable_files() -> Result<()> {
let path = "E902.py";
let rule_code = Rule::IOError;
// Create inaccessible files
let tempdir = TempDir::new()?;
let pyproject_toml = tempdir.path().join("pyproject.toml");
let python_file = tempdir.path().join("code.py");
let notebook = tempdir.path().join("notebook.ipynb");
for file in [&pyproject_toml, &python_file, ¬ebook] {
fs::OpenOptions::new()
.create(true)
.truncate(true)
.write(true)
.mode(0o000)
.open(file)?;
}
// Configure
let snapshot = format!("{}_{}", rule_code.noqa_code(), path);
// invalid pyproject.toml is not active by default
let settings = Settings {
linter: LinterSettings::for_rules(vec![rule_code, Rule::InvalidPyprojectToml]),
..Settings::default()
};
let pyproject_config =
PyprojectConfig::new(PyprojectDiscoveryStrategy::Fixed, settings, None);
// Run
let diagnostics = check(
&[tempdir.path().to_path_buf()],
&pyproject_config,
&ConfigArguments::default(),
flags::Cache::Disabled,
flags::Noqa::Disabled,
flags::FixMode::Generate,
UnsafeFixes::Enabled,
)
.unwrap();
let config = DisplayDiagnosticConfig::default()
.format(DiagnosticFormat::Concise)
.hide_severity(true);
let messages = DisplayDiagnostics::new(
&EmitterContext::new(&FxHashMap::default()),
&config,
&diagnostics.inner,
)
.to_string();
insta::with_settings!({
omit_expression => true,
filters => vec![
// The tempdir is always different (and platform dependent)
(tempdir.path().to_str().unwrap(), "/home/ferris/project"),
]
}, {
insta::assert_snapshot!(snapshot, messages);
});
Ok(())
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/check_stdin.rs | crates/ruff/src/commands/check_stdin.rs | use std::path::Path;
use anyhow::Result;
use ruff_db::diagnostic::Diagnostic;
use ruff_linter::package::PackageRoot;
use ruff_linter::packaging;
use ruff_linter::settings::flags;
use ruff_workspace::resolver::{PyprojectConfig, Resolver, match_exclusion, python_file_at_path};
use crate::args::ConfigArguments;
use crate::diagnostics::{Diagnostics, lint_stdin};
use crate::stdin::{parrot_stdin, read_from_stdin};
/// Run the linter over a single file, read from `stdin`.
pub(crate) fn check_stdin(
filename: Option<&Path>,
pyproject_config: &PyprojectConfig,
overrides: &ConfigArguments,
noqa: flags::Noqa,
fix_mode: flags::FixMode,
) -> Result<Diagnostics> {
let mut resolver = Resolver::new(pyproject_config);
if resolver.force_exclude() {
if let Some(filename) = filename {
if !python_file_at_path(filename, &mut resolver, overrides)? {
if fix_mode.is_apply() {
parrot_stdin()?;
}
return Ok(Diagnostics::default());
}
if filename.file_name().is_some_and(|name| {
match_exclusion(filename, name, &resolver.base_settings().linter.exclude)
}) {
if fix_mode.is_apply() {
parrot_stdin()?;
}
return Ok(Diagnostics::default());
}
}
}
let stdin = read_from_stdin()?;
let package_root = filename.and_then(Path::parent).and_then(|path| {
packaging::detect_package_root(path, &resolver.base_settings().linter.namespace_packages)
.map(PackageRoot::root)
});
let mut diagnostics = lint_stdin(
filename,
package_root,
stdin,
resolver.base_settings(),
noqa,
fix_mode,
)?;
diagnostics
.inner
.sort_unstable_by(Diagnostic::ruff_start_ordering);
Ok(diagnostics)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/mod.rs | crates/ruff/src/commands/mod.rs | pub(crate) mod completions;
pub(crate) mod add_noqa;
pub(crate) mod analyze_graph;
pub(crate) mod check;
pub(crate) mod check_stdin;
pub(crate) mod clean;
pub(crate) mod config;
pub(crate) mod format;
pub(crate) mod format_stdin;
pub(crate) mod linter;
pub(crate) mod rule;
pub(crate) mod server;
pub(crate) mod show_files;
pub(crate) mod show_settings;
pub(crate) mod version;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/format.rs | crates/ruff/src/commands/format.rs | use std::fmt::{Display, Formatter};
use std::fs::File;
use std::io;
use std::io::{Write, stderr, stdout};
use std::path::{Path, PathBuf};
use std::time::Instant;
use anyhow::Result;
use colored::Colorize;
use itertools::Itertools;
use log::{error, warn};
use rayon::iter::Either::{Left, Right};
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use ruff_db::diagnostic::{
Annotation, Diagnostic, DiagnosticId, DisplayDiagnosticConfig, Severity, Span,
};
use ruff_linter::message::{EmitterContext, create_panic_diagnostic, render_diagnostics};
use ruff_linter::settings::types::OutputFormat;
use ruff_notebook::NotebookIndex;
use ruff_python_parser::ParseError;
use rustc_hash::{FxHashMap, FxHashSet};
use thiserror::Error;
use tracing::debug;
use ruff_db::panic::{PanicError, catch_unwind};
use ruff_diagnostics::{Edit, Fix, SourceMap};
use ruff_linter::fs;
use ruff_linter::logging::{DisplayParseError, LogLevel};
use ruff_linter::package::PackageRoot;
use ruff_linter::registry::Rule;
use ruff_linter::rules::flake8_quotes::settings::Quote;
use ruff_linter::source_kind::{SourceError, SourceKind};
use ruff_linter::warn_user_once;
use ruff_python_ast::{PySourceType, SourceType};
use ruff_python_formatter::{FormatModuleError, QuoteStyle, format_module_source, format_range};
use ruff_source_file::{LineIndex, LineRanges, OneIndexed, SourceFileBuilder};
use ruff_text_size::{TextLen, TextRange, TextSize};
use ruff_workspace::FormatterSettings;
use ruff_workspace::resolver::{
PyprojectConfig, ResolvedFile, Resolver, match_exclusion, python_files_in_path,
};
use crate::args::{ConfigArguments, FormatArguments, FormatRange};
use crate::cache::{Cache, FileCacheKey, PackageCacheMap, PackageCaches};
use crate::{ExitStatus, resolve_default_files};
#[derive(Debug, Copy, Clone, is_macro::Is)]
pub(crate) enum FormatMode {
/// Write the formatted contents back to the file.
Write,
/// Check if the file is formatted, but do not write the formatted contents back.
Check,
/// Check if the file is formatted, show a diff if not.
Diff,
}
impl FormatMode {
pub(crate) fn from_cli(cli: &FormatArguments) -> Self {
if cli.diff {
FormatMode::Diff
} else if cli.check {
FormatMode::Check
} else {
FormatMode::Write
}
}
}
/// Format a set of files, and return the exit status.
pub(crate) fn format(
cli: FormatArguments,
config_arguments: &ConfigArguments,
pyproject_config: &PyprojectConfig,
) -> Result<ExitStatus> {
let mode = FormatMode::from_cli(&cli);
let files = resolve_default_files(cli.files, false);
let (paths, resolver) = python_files_in_path(&files, pyproject_config, config_arguments)?;
let output_format = pyproject_config.settings.output_format;
let preview = pyproject_config.settings.formatter.preview;
if paths.is_empty() {
warn_user_once!("No Python files found under the given path(s)");
return Ok(ExitStatus::Success);
}
if cli.range.is_some() && paths.len() > 1 {
return Err(anyhow::anyhow!(
"The `--range` option is only supported when formatting a single file but the specified paths resolve to {} files.",
paths.len()
));
}
warn_incompatible_formatter_settings(&resolver);
// Discover the package root for each Python file.
let package_roots = resolver.package_roots(
&paths
.iter()
.flatten()
.map(ResolvedFile::path)
.collect::<Vec<_>>(),
);
let caches = if cli.no_cache {
None
} else {
// `--no-cache` doesn't respect code changes, and so is often confusing during
// development.
#[cfg(debug_assertions)]
crate::warn_user!("Detected debug build without --no-cache.");
Some(PackageCacheMap::init(&package_roots, &resolver))
};
let start = Instant::now();
let (results, mut errors): (Vec<_>, Vec<_>) = paths
.par_iter()
.filter_map(|entry| {
match entry {
Ok(resolved_file) => {
let path = resolved_file.path();
let settings = resolver.resolve(path);
let source_type = match settings.formatter.extension.get(path) {
None => match SourceType::from(path) {
SourceType::Python(source_type) => source_type,
SourceType::Toml(_) => {
// Ignore any non-Python files.
return None;
}
},
Some(language) => PySourceType::from(language),
};
// Ignore files that are excluded from formatting
if (settings.file_resolver.force_exclude || !resolved_file.is_root())
&& match_exclusion(
path,
resolved_file.file_name(),
&settings.formatter.exclude,
)
{
return None;
}
let package = path
.parent()
.and_then(|parent| package_roots.get(parent).copied())
.flatten();
let cache_root = package
.map(PackageRoot::path)
.unwrap_or_else(|| path.parent().unwrap_or(path));
let cache = caches.get(cache_root);
Some(
match catch_unwind(|| {
format_path(
path,
&settings.formatter,
source_type,
mode,
cli.range,
cache,
)
}) {
Ok(inner) => inner.map(|result| FormatPathResult {
path: resolved_file.path().to_path_buf(),
result,
}),
Err(error) => Err(FormatCommandError::Panic(
Some(resolved_file.path().to_path_buf()),
Box::new(error),
)),
},
)
}
Err(err) => Some(Err(FormatCommandError::Ignore(err.clone()))),
}
})
.partition_map(|result| match result {
Ok(diagnostic) => Left(diagnostic),
Err(err) => Right(err),
});
let duration = start.elapsed();
debug!(
"Formatted {} files in {:.2?}",
results.len() + errors.len(),
duration
);
// Store the caches.
caches.persist()?;
// Report on any errors.
//
// We only convert errors to `Diagnostic`s in `Check` mode with preview enabled, otherwise we
// fall back on printing simple messages.
if !(preview.is_enabled() && mode.is_check()) {
errors.sort_unstable_by(|a, b| a.path().cmp(&b.path()));
for error in &errors {
error!("{error}");
}
}
let results = FormatResults::new(results.as_slice(), mode);
match mode {
FormatMode::Write => {}
FormatMode::Check => {
if preview.is_enabled() {
results.write_changed_preview(&mut stdout().lock(), output_format, &errors)?;
} else {
results.write_changed(&mut stdout().lock())?;
}
}
FormatMode::Diff => {
results.write_diff(&mut stdout().lock())?;
}
}
// Report on the formatting changes.
if config_arguments.log_level >= LogLevel::Default {
if mode.is_diff() {
// Allow piping the diff to e.g. a file by writing the summary to stderr
results.write_summary(&mut stderr().lock())?;
} else if !preview.is_enabled() || output_format.is_human_readable() {
results.write_summary(&mut stdout().lock())?;
}
}
match mode {
FormatMode::Write => {
if errors.is_empty() {
if cli.exit_non_zero_on_format && results.any_formatted() {
Ok(ExitStatus::Failure)
} else {
Ok(ExitStatus::Success)
}
} else {
Ok(ExitStatus::Error)
}
}
FormatMode::Check | FormatMode::Diff => {
if errors.is_empty() {
if results.any_formatted() {
Ok(ExitStatus::Failure)
} else {
Ok(ExitStatus::Success)
}
} else {
Ok(ExitStatus::Error)
}
}
}
}
/// Format the file at the given [`Path`].
#[tracing::instrument(level = "debug", skip_all, fields(path = %path.display()))]
pub(crate) fn format_path(
path: &Path,
settings: &FormatterSettings,
source_type: PySourceType,
mode: FormatMode,
range: Option<FormatRange>,
cache: Option<&Cache>,
) -> Result<FormatResult, FormatCommandError> {
if let Some(cache) = cache {
let relative_path = cache
.relative_path(path)
.expect("wrong package cache for file");
if let Ok(cache_key) = FileCacheKey::from_path(path) {
if cache.is_formatted(relative_path, &cache_key) {
return Ok(FormatResult::Unchanged);
}
}
}
// Extract the sources from the file.
let unformatted = match SourceKind::from_path(path, source_type) {
Ok(Some(source_kind)) => source_kind,
// Non-Python Jupyter notebook.
Ok(None) => return Ok(FormatResult::Skipped),
Err(err) => {
return Err(FormatCommandError::Read(Some(path.to_path_buf()), err));
}
};
// Don't write back to the cache if formatting a range.
let cache = cache.filter(|_| range.is_none());
// Format the source.
let format_result = match format_source(&unformatted, source_type, Some(path), settings, range)?
{
FormattedSource::Formatted(formatted) => match mode {
FormatMode::Write => {
let mut writer = File::create(path).map_err(|err| {
FormatCommandError::Write(Some(path.to_path_buf()), err.into())
})?;
formatted
.write(&mut writer)
.map_err(|err| FormatCommandError::Write(Some(path.to_path_buf()), err))?;
if let Some(cache) = cache {
if let Ok(cache_key) = FileCacheKey::from_path(path) {
let relative_path = cache
.relative_path(path)
.expect("wrong package cache for file");
cache.set_formatted(relative_path.to_path_buf(), &cache_key);
}
}
FormatResult::Formatted
}
FormatMode::Check | FormatMode::Diff => FormatResult::Diff {
unformatted,
formatted,
},
},
FormattedSource::Unchanged => {
if let Some(cache) = cache {
if let Ok(cache_key) = FileCacheKey::from_path(path) {
let relative_path = cache
.relative_path(path)
.expect("wrong package cache for file");
cache.set_formatted(relative_path.to_path_buf(), &cache_key);
}
}
FormatResult::Unchanged
}
};
Ok(format_result)
}
#[derive(Debug)]
pub(crate) enum FormattedSource {
/// The source was formatted, and the [`SourceKind`] contains the transformed source code.
Formatted(SourceKind),
/// The source was unchanged.
Unchanged,
}
impl From<FormattedSource> for FormatResult {
fn from(value: FormattedSource) -> Self {
match value {
FormattedSource::Formatted { .. } => FormatResult::Formatted,
FormattedSource::Unchanged => FormatResult::Unchanged,
}
}
}
/// Format a [`SourceKind`], returning the transformed [`SourceKind`], or `None` if the source was
/// unchanged.
pub(crate) fn format_source(
source_kind: &SourceKind,
source_type: PySourceType,
path: Option<&Path>,
settings: &FormatterSettings,
range: Option<FormatRange>,
) -> Result<FormattedSource, FormatCommandError> {
match &source_kind {
SourceKind::Python(unformatted) => {
let options = settings.to_format_options(source_type, unformatted, path);
let formatted = if let Some(range) = range {
let line_index = LineIndex::from_source_text(unformatted);
let byte_range = range.to_text_range(unformatted, &line_index);
format_range(unformatted, byte_range, options).map(|formatted_range| {
let mut formatted = unformatted.clone();
formatted.replace_range(
std::ops::Range::<usize>::from(formatted_range.source_range()),
formatted_range.as_code(),
);
formatted
})
} else {
// Using `Printed::into_code` requires adding `ruff_formatter` as a direct dependency, and I suspect that Rust can optimize the closure away regardless.
#[expect(clippy::redundant_closure_for_method_calls)]
format_module_source(unformatted, options).map(|formatted| formatted.into_code())
};
let formatted = formatted.map_err(|err| {
if let FormatModuleError::ParseError(err) = err {
DisplayParseError::from_source_kind(
err,
path.map(Path::to_path_buf),
source_kind,
)
.into()
} else {
FormatCommandError::Format(path.map(Path::to_path_buf), err)
}
})?;
if formatted.len() == unformatted.len() && formatted == *unformatted {
Ok(FormattedSource::Unchanged)
} else {
Ok(FormattedSource::Formatted(SourceKind::Python(formatted)))
}
}
SourceKind::IpyNotebook(notebook) => {
if !notebook.is_python_notebook() {
return Ok(FormattedSource::Unchanged);
}
if range.is_some() {
return Err(FormatCommandError::RangeFormatNotebook(
path.map(Path::to_path_buf),
));
}
let options = settings.to_format_options(source_type, notebook.source_code(), path);
let mut output: Option<String> = None;
let mut last: Option<TextSize> = None;
let mut source_map = SourceMap::default();
// Format each cell individually.
for (start, end) in notebook.cell_offsets().iter().tuple_windows::<(_, _)>() {
let range = TextRange::new(*start, *end);
let unformatted = ¬ebook.source_code()[range];
// Format the cell.
let formatted =
format_module_source(unformatted, options.clone()).map_err(|err| {
if let FormatModuleError::ParseError(err) = err {
// Offset the error by the start of the cell
DisplayParseError::from_source_kind(
ParseError {
error: err.error,
location: err.location.checked_add(*start).unwrap(),
},
path.map(Path::to_path_buf),
source_kind,
)
.into()
} else {
FormatCommandError::Format(path.map(Path::to_path_buf), err)
}
})?;
// If the cell is unchanged, skip it.
let formatted = formatted.as_code();
if formatted.len() == unformatted.len() && formatted == unformatted {
continue;
}
// If this is the first newly-formatted cell, initialize the output.
let output = output
.get_or_insert_with(|| String::with_capacity(notebook.source_code().len()));
// Add all contents from `last` to the current cell.
let slice = ¬ebook.source_code()
[TextRange::new(last.unwrap_or_default(), range.start())];
output.push_str(slice);
// Add the start source marker for the cell.
source_map.push_marker(*start, output.text_len());
// Add the cell itself.
output.push_str(formatted);
// Add the end source marker for the added cell.
source_map.push_marker(*end, output.text_len());
// Track that the cell was formatted.
last = Some(*end);
}
// If the file was unchanged, return `None`.
let (Some(mut output), Some(last)) = (output, last) else {
return Ok(FormattedSource::Unchanged);
};
// Add the remaining content.
let slice = ¬ebook.source_code()[usize::from(last)..];
output.push_str(slice);
// Update the notebook.
let mut formatted = notebook.clone();
formatted.update(&source_map, output);
Ok(FormattedSource::Formatted(SourceKind::IpyNotebook(
formatted,
)))
}
}
}
/// The result of an individual formatting operation.
#[derive(Debug, Clone, is_macro::Is)]
pub(crate) enum FormatResult {
/// The file was formatted and written back to disk.
Formatted,
/// The file needs to be formatted, as the `formatted` and `unformatted` contents differ.
Diff {
unformatted: SourceKind,
formatted: SourceKind,
},
/// The file was unchanged, as the formatted contents matched the existing contents.
Unchanged,
/// Skipped formatting because its an unsupported file format
Skipped,
}
/// The coupling of a [`FormatResult`] with the path of the file that was analyzed.
#[derive(Debug)]
struct FormatPathResult {
path: PathBuf,
result: FormatResult,
}
/// The results of formatting a set of files
#[derive(Debug)]
struct FormatResults<'a> {
/// The individual formatting results.
results: &'a [FormatPathResult],
/// The format mode that was used.
mode: FormatMode,
}
impl<'a> FormatResults<'a> {
fn new(results: &'a [FormatPathResult], mode: FormatMode) -> Self {
Self { results, mode }
}
/// Returns `true` if any of the files require formatting.
fn any_formatted(&self) -> bool {
self.results.iter().any(|result| match result.result {
FormatResult::Formatted | FormatResult::Diff { .. } => true,
FormatResult::Unchanged | FormatResult::Skipped => false,
})
}
/// Write a diff of the formatting changes to the given writer.
fn write_diff(&self, f: &mut impl Write) -> io::Result<()> {
for (path, unformatted, formatted) in self
.results
.iter()
.filter_map(|result| {
if let FormatResult::Diff {
unformatted,
formatted,
} = &result.result
{
Some((result.path.as_path(), unformatted, formatted))
} else {
None
}
})
.sorted_unstable_by_key(|(path, _, _)| *path)
{
write!(f, "{}", unformatted.diff(formatted, Some(path)).unwrap())?;
}
Ok(())
}
/// Write a list of the files that would be changed to the given writer.
fn write_changed(&self, f: &mut impl Write) -> io::Result<()> {
for path in self
.results
.iter()
.filter_map(|result| {
if result.result.is_diff() {
Some(result.path.as_path())
} else {
None
}
})
.sorted_unstable()
{
writeln!(f, "Would reformat: {}", fs::relativize_path(path).bold())?;
}
Ok(())
}
/// Write a list of the files that would be changed and any errors to the given writer.
fn write_changed_preview(
&self,
f: &mut impl Write,
output_format: OutputFormat,
errors: &[FormatCommandError],
) -> io::Result<()> {
let mut notebook_index = FxHashMap::default();
let diagnostics: Vec<_> = errors
.iter()
.map(Diagnostic::from)
.chain(self.to_diagnostics(&mut notebook_index))
.sorted_unstable_by(Diagnostic::ruff_start_ordering)
.collect();
let context = EmitterContext::new(¬ebook_index);
let config = DisplayDiagnosticConfig::default()
.hide_severity(true)
.show_fix_diff(true)
.color(!cfg!(test) && colored::control::SHOULD_COLORIZE.should_colorize());
render_diagnostics(f, output_format, config, &context, &diagnostics)
}
/// Write a summary of the formatting results to the given writer.
fn write_summary(&self, f: &mut impl Write) -> io::Result<()> {
// Compute the number of changed and unchanged files.
let mut changed = 0u32;
let mut unchanged = 0u32;
for result in self.results {
match &result.result {
FormatResult::Formatted => {
changed += 1;
}
FormatResult::Unchanged => unchanged += 1,
FormatResult::Diff { .. } => {
changed += 1;
}
FormatResult::Skipped => {}
}
}
// Write out a summary of the formatting results.
if changed > 0 && unchanged > 0 {
writeln!(
f,
"{} file{} {}, {} file{} {}",
changed,
if changed == 1 { "" } else { "s" },
match self.mode {
FormatMode::Write => "reformatted",
FormatMode::Check | FormatMode::Diff => "would be reformatted",
},
unchanged,
if unchanged == 1 { "" } else { "s" },
match self.mode {
FormatMode::Write => "left unchanged",
FormatMode::Check | FormatMode::Diff => "already formatted",
},
)
} else if changed > 0 {
writeln!(
f,
"{} file{} {}",
changed,
if changed == 1 { "" } else { "s" },
match self.mode {
FormatMode::Write => "reformatted",
FormatMode::Check | FormatMode::Diff => "would be reformatted",
}
)
} else if unchanged > 0 {
writeln!(
f,
"{} file{} {}",
unchanged,
if unchanged == 1 { "" } else { "s" },
match self.mode {
FormatMode::Write => "left unchanged",
FormatMode::Check | FormatMode::Diff => "already formatted",
},
)
} else {
Ok(())
}
}
/// Convert formatted files into [`Diagnostic`]s.
fn to_diagnostics(
&self,
notebook_index: &mut FxHashMap<String, NotebookIndex>,
) -> impl Iterator<Item = Diagnostic> {
/// The number of unmodified context lines rendered in diffs.
///
/// Note that this should be kept in sync with the argument to `TextDiff::grouped_ops` in
/// the diff rendering in `ruff_db` (currently 3). The `similar` crate uses two times that
/// argument as a cutoff for rendering unmodified lines.
const CONTEXT_LINES: u32 = 6;
self.results.iter().filter_map(|result| {
let (unformatted, formatted) = match &result.result {
FormatResult::Skipped | FormatResult::Unchanged => return None,
FormatResult::Diff {
unformatted,
formatted,
} => (unformatted, formatted),
FormatResult::Formatted => {
debug_assert!(
false,
"Expected `FormatResult::Diff` for changed files in check mode"
);
return None;
}
};
let mut diagnostic = Diagnostic::new(
DiagnosticId::Unformatted,
Severity::Error,
"File would be reformatted",
);
// Locate the first and last characters that differ to use as the diagnostic
// range and to narrow the `Edit` range.
let modified_range = ModifiedRange::new(unformatted, formatted);
let path = result.path.to_string_lossy();
// For scripts, this is a single `Edit` using the `ModifiedRange` above, but notebook
// edits must be split by cell in order to render them as diffs.
//
// We also attempt to estimate the line number width for aligning the
// annotate-snippets header. This is only an estimate because we don't actually know
// if the maximum line number present in the document will be rendered as part of
// the diff, either as a changed line or as an unchanged context line. For
// notebooks, we refine our estimate by checking the number of lines in each cell
// individually, otherwise we could use `formatted.source_code().count_lines(...)`
// in both cases.
let (fix, line_count) = if let SourceKind::IpyNotebook(formatted) = formatted
&& let SourceKind::IpyNotebook(unformatted) = unformatted
{
notebook_index.insert(path.to_string(), unformatted.index().clone());
let mut edits = formatted
.cell_offsets()
.ranges()
.zip(unformatted.cell_offsets().ranges())
.filter_map(|(formatted_range, unformatted_range)| {
// Filter out cells that weren't modified. We use `intersect` instead of
// `contains_range` because the full modified range might start or end in
// the middle of a cell:
//
// ```
// | cell 1 | cell 2 | cell 3 |
// |----------------| modified range
// ```
//
// The intersection will be `Some` for all three cells in this case.
if modified_range
.unformatted
.intersect(unformatted_range)
.is_some()
{
let formatted = &formatted.source_code()[formatted_range];
let edit = if formatted.is_empty() {
Edit::range_deletion(unformatted_range)
} else {
Edit::range_replacement(formatted.to_string(), unformatted_range)
};
Some(edit)
} else {
None
}
});
let fix = Fix::safe_edits(
edits
.next()
.expect("Formatted files must have at least one edit"),
edits,
);
let source = formatted.source_code();
let line_count = formatted
.cell_offsets()
.ranges()
.filter_map(|range| {
if modified_range.formatted.contains_range(range) {
Some(source.count_lines(range))
} else {
None
}
})
.max()
.unwrap_or_default();
(fix, line_count)
} else {
let formatted_code = &formatted.source_code()[modified_range.formatted];
let edit = if formatted_code.is_empty() {
Edit::range_deletion(modified_range.unformatted)
} else {
Edit::range_replacement(formatted_code.to_string(), modified_range.unformatted)
};
let fix = Fix::safe_edit(edit);
let line_count = formatted
.source_code()
.count_lines(TextRange::up_to(modified_range.formatted.end()));
(fix, line_count)
};
let source_file = SourceFileBuilder::new(path, unformatted.source_code()).finish();
let span = Span::from(source_file).with_range(modified_range.unformatted);
let mut annotation = Annotation::primary(span);
annotation.hide_snippet(true);
diagnostic.annotate(annotation);
diagnostic.set_fix(fix);
// TODO(brent) this offset is a hack to get the header of the diagnostic message, which
// is rendered by our fork of `annotate-snippets`, to align with our manually-rendered
// diff. `annotate-snippets` computes the alignment of the arrow in the header based on
// the maximum line number width in its rendered snippet. However, we don't have a
// reasonable range to underline in an annotation, so we don't send `annotate-snippets`
// a snippet to measure. If we commit to staying on our fork, a more robust way of
// handling this would be to move the diff rendering in
// `ruff_db::diagnostic::render::full` into `annotate-snippets`, likely as another
// `DisplayLine` variant and update the `lineno_width` calculation in
// `DisplayList::fmt`. That would handle this offset "automatically."
let line_count = (line_count + CONTEXT_LINES).min(
formatted
.source_code()
.count_lines(TextRange::up_to(formatted.source_code().text_len())),
);
let lines = OneIndexed::new(line_count as usize).unwrap_or_default();
diagnostic.set_header_offset(lines.digits().get());
Some(diagnostic)
})
}
}
/// An error that can occur while formatting a set of files.
#[derive(Error, Debug)]
pub(crate) enum FormatCommandError {
Ignore(#[from] ignore::Error),
Parse(#[from] DisplayParseError),
Panic(Option<PathBuf>, Box<PanicError>),
Read(Option<PathBuf>, SourceError),
Format(Option<PathBuf>, FormatModuleError),
Write(Option<PathBuf>, SourceError),
RangeFormatNotebook(Option<PathBuf>),
}
impl FormatCommandError {
fn path(&self) -> Option<&Path> {
match self {
Self::Ignore(err) => {
if let ignore::Error::WithPath { path, .. } = err {
Some(path.as_path())
} else {
None
}
}
Self::Parse(err) => err.path(),
Self::Panic(path, _)
| Self::Read(path, _)
| Self::Format(path, _)
| Self::Write(path, _)
| Self::RangeFormatNotebook(path) => path.as_deref(),
}
}
}
impl From<&FormatCommandError> for Diagnostic {
fn from(error: &FormatCommandError) -> Self {
let annotation = error.path().map(|path| {
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/server.rs | crates/ruff/src/commands/server.rs | use crate::ExitStatus;
use anyhow::Result;
pub(crate) fn run_server(preview: Option<bool>) -> Result<ExitStatus> {
ruff_server::run(preview)?;
Ok(ExitStatus::Success)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/clean.rs | crates/ruff/src/commands/clean.rs | use std::fs::remove_dir_all;
use std::io::{self, BufWriter, Write};
use anyhow::Result;
use colored::Colorize;
use path_absolutize::path_dedot;
use walkdir::WalkDir;
use ruff_cache::CACHE_DIR_NAME;
use ruff_linter::fs;
use ruff_linter::logging::LogLevel;
/// Clear any caches in the current directory or any subdirectories.
pub(crate) fn clean(level: LogLevel) -> Result<()> {
let mut stderr = BufWriter::new(io::stderr().lock());
for entry in WalkDir::new(&*path_dedot::CWD)
.into_iter()
.filter_map(Result::ok)
.filter(|entry| entry.file_type().is_dir())
{
let cache = entry.path().join(CACHE_DIR_NAME);
if cache.is_dir() {
if level >= LogLevel::Default {
writeln!(
stderr,
"Removing cache at: {}",
fs::relativize_path(&cache).bold()
)?;
}
remove_dir_all(&cache)?;
}
}
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/rule.rs | crates/ruff/src/commands/rule.rs | use std::fmt::Write as _;
use std::io::{self, BufWriter, Write};
use anyhow::Result;
use serde::ser::SerializeSeq;
use serde::{Serialize, Serializer};
use strum::IntoEnumIterator;
use ruff_linter::FixAvailability;
use ruff_linter::codes::RuleGroup;
use ruff_linter::registry::{Linter, Rule, RuleNamespace};
use crate::args::HelpFormat;
#[derive(Serialize)]
struct Explanation<'a> {
name: &'a str,
code: String,
linter: &'a str,
summary: &'a str,
message_formats: &'a [&'a str],
fix: String,
fix_availability: FixAvailability,
#[expect(clippy::struct_field_names)]
explanation: Option<&'a str>,
preview: bool,
status: RuleGroup,
source_location: SourceLocation,
}
impl<'a> Explanation<'a> {
fn from_rule(rule: &'a Rule) -> Self {
let code = rule.noqa_code().to_string();
let (linter, _) = Linter::parse_code(&code).unwrap();
let fix = rule.fixable().to_string();
Self {
name: rule.name().as_str(),
code,
linter: linter.name(),
summary: rule.message_formats()[0],
message_formats: rule.message_formats(),
fix,
fix_availability: rule.fixable(),
explanation: rule.explanation(),
preview: rule.is_preview(),
status: rule.group(),
source_location: SourceLocation {
file: rule.file(),
line: rule.line(),
},
}
}
}
fn format_rule_text(rule: Rule) -> String {
let mut output = String::new();
let _ = write!(&mut output, "# {} ({})", rule.name(), rule.noqa_code());
output.push('\n');
output.push('\n');
let (linter, _) = Linter::parse_code(&rule.noqa_code().to_string()).unwrap();
let _ = write!(
&mut output,
"Derived from the **{}** linter.",
linter.name()
);
output.push('\n');
output.push('\n');
let fix_availability = rule.fixable();
if matches!(
fix_availability,
FixAvailability::Always | FixAvailability::Sometimes
) {
output.push_str(&fix_availability.to_string());
output.push('\n');
output.push('\n');
}
if rule.is_preview() {
output.push_str(
r"This rule is in preview and is not stable. The `--preview` flag is required for use.",
);
output.push('\n');
output.push('\n');
}
if let Some(explanation) = rule.explanation() {
output.push_str(explanation.trim());
} else {
output.push_str("Message formats:");
for format in rule.message_formats() {
output.push('\n');
let _ = write!(&mut output, "* {format}");
}
}
output
}
/// Explain a `Rule` to the user.
pub(crate) fn rule(rule: Rule, format: HelpFormat) -> Result<()> {
let mut stdout = BufWriter::new(io::stdout().lock());
match format {
HelpFormat::Text => {
writeln!(stdout, "{}", format_rule_text(rule))?;
}
HelpFormat::Json => {
serde_json::to_writer_pretty(stdout, &Explanation::from_rule(&rule))?;
}
}
Ok(())
}
/// Explain all rules to the user.
pub(crate) fn rules(format: HelpFormat) -> Result<()> {
let mut stdout = BufWriter::new(io::stdout().lock());
match format {
HelpFormat::Text => {
for rule in Rule::iter() {
writeln!(stdout, "{}", format_rule_text(rule))?;
writeln!(stdout)?;
}
}
HelpFormat::Json => {
let mut serializer = serde_json::Serializer::pretty(stdout);
let mut seq = serializer.serialize_seq(None)?;
for rule in Rule::iter() {
seq.serialize_element(&Explanation::from_rule(&rule))?;
}
seq.end()?;
}
}
Ok(())
}
/// The location of the rule's implementation in the Ruff source tree, relative to the repository
/// root.
///
/// For most rules this will point to the `#[derive(ViolationMetadata)]` line above the rule's
/// struct.
#[derive(Serialize)]
struct SourceLocation {
file: &'static str,
line: u32,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/show_files.rs | crates/ruff/src/commands/show_files.rs | use std::io::Write;
use std::path::PathBuf;
use anyhow::Result;
use itertools::Itertools;
use ruff_linter::warn_user_once;
use ruff_workspace::resolver::{PyprojectConfig, ResolvedFile, python_files_in_path};
use crate::args::ConfigArguments;
/// Show the list of files to be checked based on current settings.
pub(crate) fn show_files(
files: &[PathBuf],
pyproject_config: &PyprojectConfig,
config_arguments: &ConfigArguments,
writer: &mut impl Write,
) -> Result<()> {
// Collect all files in the hierarchy.
let (paths, _resolver) = python_files_in_path(files, pyproject_config, config_arguments)?;
if paths.is_empty() {
warn_user_once!("No Python files found under the given path(s)");
return Ok(());
}
// Print the list of files.
for path in paths
.into_iter()
.flatten()
.map(ResolvedFile::into_path)
.sorted_unstable()
{
writeln!(writer, "{}", path.to_string_lossy())?;
}
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/add_noqa.rs | crates/ruff/src/commands/add_noqa.rs | use std::path::PathBuf;
use std::time::Instant;
use anyhow::Result;
use log::{debug, error};
#[cfg(not(target_family = "wasm"))]
use rayon::prelude::*;
use ruff_linter::linter::add_noqa_to_path;
use ruff_linter::source_kind::SourceKind;
use ruff_linter::warn_user_once;
use ruff_python_ast::{PySourceType, SourceType};
use ruff_workspace::resolver::{
PyprojectConfig, ResolvedFile, match_exclusion, python_files_in_path,
};
use crate::args::ConfigArguments;
/// Add `noqa` directives to a collection of files.
pub(crate) fn add_noqa(
files: &[PathBuf],
pyproject_config: &PyprojectConfig,
config_arguments: &ConfigArguments,
reason: Option<&str>,
) -> Result<usize> {
// Collect all the files to check.
let start = Instant::now();
let (paths, resolver) = python_files_in_path(files, pyproject_config, config_arguments)?;
let duration = start.elapsed();
debug!("Identified files to lint in: {duration:?}");
if paths.is_empty() {
warn_user_once!("No Python files found under the given path(s)");
return Ok(0);
}
// Discover the package root for each Python file.
let package_roots = resolver.package_roots(
&paths
.iter()
.flatten()
.map(ResolvedFile::path)
.collect::<Vec<_>>(),
);
let start = Instant::now();
let modifications: usize = paths
.par_iter()
.flatten()
.filter_map(|resolved_file| {
let SourceType::Python(source_type @ (PySourceType::Python | PySourceType::Stub)) =
SourceType::from(resolved_file.path())
else {
return None;
};
let path = resolved_file.path();
let package = resolved_file
.path()
.parent()
.and_then(|parent| package_roots.get(parent))
.and_then(|package| *package);
let settings = resolver.resolve(path);
if (settings.file_resolver.force_exclude || !resolved_file.is_root())
&& match_exclusion(
resolved_file.path(),
resolved_file.file_name(),
&settings.linter.exclude,
)
{
return None;
}
let source_kind = match SourceKind::from_path(path, source_type) {
Ok(Some(source_kind)) => source_kind,
Ok(None) => return None,
Err(e) => {
error!("Failed to extract source from {}: {e}", path.display());
return None;
}
};
match add_noqa_to_path(
path,
package,
&source_kind,
source_type,
&settings.linter,
reason,
) {
Ok(count) => Some(count),
Err(e) => {
error!("Failed to add noqa to {}: {e}", path.display());
None
}
}
})
.sum();
let duration = start.elapsed();
debug!("Added noqa to files in: {duration:?}");
Ok(modifications)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/completions/config.rs | crates/ruff/src/commands/completions/config.rs | use clap::builder::{PossibleValue, TypedValueParser, ValueParserFactory};
use itertools::Itertools;
use std::str::FromStr;
use ruff_options_metadata::{OptionField, OptionSet, OptionsMetadata, Visit};
use ruff_workspace::options::Options;
#[derive(Default)]
struct CollectOptionsVisitor {
values: Vec<(String, String)>,
parents: Vec<String>,
}
impl IntoIterator for CollectOptionsVisitor {
type Item = (String, String);
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.values.into_iter()
}
}
impl Visit for CollectOptionsVisitor {
fn record_set(&mut self, name: &str, group: OptionSet) {
let fully_qualified_name = self
.parents
.iter()
.map(String::as_str)
.chain(std::iter::once(name))
.collect::<Vec<_>>()
.join(".");
// Only add the set to completion list if it has it's own documentation.
self.values.push((
fully_qualified_name,
group.documentation().unwrap_or("").to_owned(),
));
self.parents.push(name.to_owned());
group.record(self);
self.parents.pop();
}
fn record_field(&mut self, name: &str, field: OptionField) {
let fqn = self
.parents
.iter()
.map(String::as_str)
.chain(std::iter::once(name))
.collect::<Vec<_>>()
.join(".");
self.values.push((fqn, field.doc.to_owned()));
}
}
/// Opaque type used solely to enable tab completions
/// for `ruff option [OPTION]` command.
#[derive(Clone, Debug)]
pub struct OptionString(String);
impl From<String> for OptionString {
fn from(s: String) -> Self {
OptionString(s)
}
}
impl From<OptionString> for String {
fn from(value: OptionString) -> Self {
value.0
}
}
impl From<&str> for OptionString {
fn from(s: &str) -> Self {
OptionString(s.to_string())
}
}
impl std::ops::Deref for OptionString {
type Target = str;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl FromStr for OptionString {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
Options::metadata()
.has(s)
.then(|| OptionString(s.to_owned()))
.ok_or(())
}
}
#[derive(Clone)]
pub struct OptionStringParser;
impl ValueParserFactory for OptionString {
type Parser = OptionStringParser;
fn value_parser() -> Self::Parser {
OptionStringParser
}
}
impl TypedValueParser for OptionStringParser {
type Value = OptionString;
fn parse_ref(
&self,
cmd: &clap::Command,
arg: Option<&clap::Arg>,
value: &std::ffi::OsStr,
) -> Result<Self::Value, clap::Error> {
let value = value
.to_str()
.ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?;
value.parse().map_err(|()| {
let mut error = clap::Error::new(clap::error::ErrorKind::ValueValidation).with_cmd(cmd);
if let Some(arg) = arg {
error.insert(
clap::error::ContextKind::InvalidArg,
clap::error::ContextValue::String(arg.to_string()),
);
}
error.insert(
clap::error::ContextKind::InvalidValue,
clap::error::ContextValue::String(value.to_string()),
);
error
})
}
fn possible_values(&self) -> Option<Box<dyn Iterator<Item = PossibleValue> + '_>> {
let mut visitor = CollectOptionsVisitor::default();
Options::metadata().record(&mut visitor);
Some(Box::new(visitor.into_iter().map(|(name, doc)| {
let first_paragraph = doc
.lines()
.take_while(|line| !line.trim_end().is_empty())
// Replace double quotes with single quotes,to avoid clap's lack of escaping
// when creating zsh completions. This has no security implications, as it only
// affects the help string, which is never executed
.map(|s| s.replace('"', "'"))
.join(" ");
PossibleValue::new(name).help(first_paragraph)
})))
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/src/commands/completions/mod.rs | crates/ruff/src/commands/completions/mod.rs | pub(crate) mod config;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/tests/config.rs | crates/ruff/tests/config.rs | //! Tests for the `ruff config` subcommand.
use std::process::Command;
use insta_cmd::{assert_cmd_snapshot, get_cargo_bin};
const BIN_NAME: &str = "ruff";
#[test]
fn lint_select() {
assert_cmd_snapshot!(
Command::new(get_cargo_bin(BIN_NAME)).arg("config").arg("lint.select"), @r#"
success: true
exit_code: 0
----- stdout -----
A list of rule codes or prefixes to enable. Prefixes can specify exact
rules (like `F841`), entire categories (like `F`), or anything in
between.
When breaking ties between enabled and disabled rules (via `select` and
`ignore`, respectively), more specific prefixes override less
specific prefixes. `ignore` takes precedence over `select` if the
same prefix appears in both.
Default value: ["E4", "E7", "E9", "F"]
Type: list[RuleSelector]
Example usage:
```toml
# On top of the defaults (`E4`, E7`, `E9`, and `F`), enable flake8-bugbear (`B`) and flake8-quotes (`Q`).
select = ["E4", "E7", "E9", "F", "B", "Q"]
```
----- stderr -----
"#
);
}
#[test]
fn lint_select_json() {
assert_cmd_snapshot!(
Command::new(get_cargo_bin(BIN_NAME)).arg("config").arg("lint.select").arg("--output-format").arg("json"), @r##"
success: true
exit_code: 0
----- stdout -----
{
"doc": "A list of rule codes or prefixes to enable. Prefixes can specify exact\nrules (like `F841`), entire categories (like `F`), or anything in\nbetween.\n\nWhen breaking ties between enabled and disabled rules (via `select` and\n`ignore`, respectively), more specific prefixes override less\nspecific prefixes. `ignore` takes precedence over `select` if the\nsame prefix appears in both.",
"default": "[\"E4\", \"E7\", \"E9\", \"F\"]",
"value_type": "list[RuleSelector]",
"scope": null,
"example": "# On top of the defaults (`E4`, E7`, `E9`, and `F`), enable flake8-bugbear (`B`) and flake8-quotes (`Q`).\nselect = [\"E4\", \"E7\", \"E9\", \"F\", \"B\", \"Q\"]",
"deprecated": null
}
----- stderr -----
"##
);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/tests/direxist_guard.rs | crates/ruff/tests/direxist_guard.rs | //! Test to verify Ruff's behavior when run from deleted directory.
//! It has to be isolated in a separate module.
//! Tests in the same module become flaky under `cargo test`s parallel execution
//! due to in-test working directory manipulation.
#![cfg(target_family = "unix")]
use std::env::set_current_dir;
use std::process::Command;
use insta_cmd::{assert_cmd_snapshot, get_cargo_bin};
const BIN_NAME: &str = "ruff";
#[test]
fn check_in_deleted_directory_errors() {
let temp_dir = tempfile::tempdir().unwrap();
let temp_path = temp_dir.path().to_path_buf();
set_current_dir(&temp_path).unwrap();
drop(temp_dir);
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME)).arg("check"), @r###"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: Working directory does not exist
"###);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/tests/analyze_graph.rs | crates/ruff/tests/analyze_graph.rs | //! Tests the interaction of the `analyze graph` command.
#![cfg(not(target_arch = "wasm32"))]
#![cfg(not(windows))]
use assert_fs::prelude::*;
use std::process::Command;
use std::str;
use anyhow::Result;
use assert_fs::fixture::ChildPath;
use insta_cmd::{assert_cmd_snapshot, get_cargo_bin};
use tempfile::TempDir;
fn command() -> Command {
let mut command = Command::new(get_cargo_bin("ruff"));
command.arg("analyze");
command.arg("graph");
command.arg("--preview");
command.env_clear();
command
}
const INSTA_FILTERS: &[(&str, &str)] = &[
// Rewrite Windows output to Unix output
(r"\\", "/"),
];
#[test]
fn dependencies() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
import ruff.b
"#})?;
root.child("ruff")
.child("b.py")
.write_str(indoc::indoc! {r#"
from ruff import c
"#})?;
root.child("ruff")
.child("c.py")
.write_str(indoc::indoc! {r#"
from . import d
"#})?;
root.child("ruff")
.child("d.py")
.write_str(indoc::indoc! {r#"
from .e import f
"#})?;
root.child("ruff")
.child("e.py")
.write_str(indoc::indoc! {r#"
def f(): pass
"#})?;
root.child("ruff")
.child("e.pyi")
.write_str(indoc::indoc! {r#"
def f() -> None: ...
"#})?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r#"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [
"ruff/c.py"
],
"ruff/c.py": [
"ruff/d.py"
],
"ruff/d.py": [
"ruff/e.py",
"ruff/e.pyi"
],
"ruff/e.py": [],
"ruff/e.pyi": []
}
----- stderr -----
"#);
});
Ok(())
}
#[test]
fn dependents() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
import ruff.b
"#})?;
root.child("ruff")
.child("b.py")
.write_str(indoc::indoc! {r#"
from ruff import c
"#})?;
root.child("ruff")
.child("c.py")
.write_str(indoc::indoc! {r#"
from . import d
"#})?;
root.child("ruff")
.child("d.py")
.write_str(indoc::indoc! {r#"
from .e import f
"#})?;
root.child("ruff")
.child("e.py")
.write_str(indoc::indoc! {r#"
def f(): pass
"#})?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().arg("--direction").arg("dependents").current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [],
"ruff/b.py": [
"ruff/a.py"
],
"ruff/c.py": [
"ruff/b.py"
],
"ruff/d.py": [
"ruff/c.py"
],
"ruff/e.py": [
"ruff/d.py"
]
}
----- stderr -----
"###);
});
Ok(())
}
#[test]
fn string_detection() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
import ruff.b
"#})?;
root.child("ruff")
.child("b.py")
.write_str(indoc::indoc! {r#"
import importlib
importlib.import_module("ruff.c")
"#})?;
root.child("ruff").child("c.py").write_str("")?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [],
"ruff/c.py": []
}
----- stderr -----
"###);
});
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().arg("--detect-string-imports").current_dir(&root), @r#"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [],
"ruff/c.py": []
}
----- stderr -----
"#);
});
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().arg("--detect-string-imports").arg("--min-dots").arg("1").current_dir(&root), @r#"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [
"ruff/c.py"
],
"ruff/c.py": []
}
----- stderr -----
"#);
});
Ok(())
}
#[test]
fn string_detection_from_config() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
// Configure string import detection with a lower min-dots via ruff.toml
root.child("ruff.toml").write_str(indoc::indoc! {r#"
[analyze]
detect-string-imports = true
string-imports-min-dots = 1
"#})?;
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
import ruff.b
"#})?;
root.child("ruff")
.child("b.py")
.write_str(indoc::indoc! {r#"
import importlib
importlib.import_module("ruff.c")
"#})?;
root.child("ruff").child("c.py").write_str("")?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r#"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [
"ruff/c.py"
],
"ruff/c.py": []
}
----- stderr -----
"#);
});
Ok(())
}
#[test]
fn globs() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff.toml").write_str(indoc::indoc! {r#"
[analyze]
include-dependencies = { "ruff/a.py" = ["ruff/b.py"], "ruff/b.py" = ["ruff/*.py"], "ruff/c.py" = ["*.json"] }
"#})?;
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff").child("a.py").write_str("")?;
root.child("ruff").child("b.py").write_str("")?;
root.child("ruff").child("c.py").write_str("")?;
root.child("ruff").child("d.json").write_str("")?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [
"ruff/__init__.py",
"ruff/a.py",
"ruff/b.py",
"ruff/c.py"
],
"ruff/c.py": [
"ruff/d.json"
]
}
----- stderr -----
"###);
});
Ok(())
}
#[test]
fn exclude() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff.toml").write_str(indoc::indoc! {r#"
[analyze]
exclude = ["ruff/c.py"]
"#})?;
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
import ruff.b
"#})?;
root.child("ruff").child("b.py").write_str("")?;
root.child("ruff").child("c.py").write_str("")?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": []
}
----- stderr -----
"###);
});
Ok(())
}
#[test]
fn wildcard() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
from ruff.b import *
"#})?;
root.child("ruff")
.child("b.py")
.write_str(indoc::indoc! {r#"
from ruff import c
"#})?;
root.child("ruff")
.child("c.py")
.write_str(indoc::indoc! {r#"
from ruff.utils import *
"#})?;
root.child("ruff")
.child("utils")
.child("__init__.py")
.write_str("from .helpers import *")?;
root.child("ruff")
.child("utils")
.child("helpers.py")
.write_str("")?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [
"ruff/c.py"
],
"ruff/c.py": [
"ruff/utils/__init__.py"
],
"ruff/utils/__init__.py": [
"ruff/utils/helpers.py"
],
"ruff/utils/helpers.py": []
}
----- stderr -----
"###);
});
Ok(())
}
#[test]
fn nested_imports() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
match x:
case 1:
import ruff.b
"#})?;
root.child("ruff")
.child("b.py")
.write_str(indoc::indoc! {r#"
try:
import ruff.c
except ImportError as e:
import ruff.d
"#})?;
root.child("ruff")
.child("c.py")
.write_str(indoc::indoc! {r#"def c(): ..."#})?;
root.child("ruff")
.child("d.py")
.write_str(indoc::indoc! {r#"def d(): ..."#})?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r#"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [
"ruff/c.py",
"ruff/d.py"
],
"ruff/c.py": [],
"ruff/d.py": []
}
----- stderr -----
"#);
});
Ok(())
}
/// Test for venv resolution with the `--python` flag.
///
/// Based on the [albatross-virtual-workspace] example from the uv repo and the report in [#16598].
///
/// [albatross-virtual-workspace]: https://github.com/astral-sh/uv/tree/aa629c4a/scripts/workspaces/albatross-virtual-workspace
/// [#16598]: https://github.com/astral-sh/ruff/issues/16598
#[test]
fn venv() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
// packages
// ├── albatross
// │ ├── check_installed_albatross.py
// │ ├── pyproject.toml
// │ └── src
// │ └── albatross
// │ └── __init__.py
// └── bird-feeder
// ├── check_installed_bird_feeder.py
// ├── pyproject.toml
// └── src
// └── bird_feeder
// └── __init__.py
let packages = root.child("packages");
let albatross = packages.child("albatross");
albatross
.child("check_installed_albatross.py")
.write_str("from albatross import fly")?;
albatross
.child("pyproject.toml")
.write_str(indoc::indoc! {r#"
[project]
name = "albatross"
version = "0.1.0"
requires-python = ">=3.12"
dependencies = ["bird-feeder", "tqdm>=4,<5"]
[tool.uv.sources]
bird-feeder = { workspace = true }
"#})?;
albatross
.child("src")
.child("albatross")
.child("__init__.py")
.write_str("import tqdm; from bird_feeder import use")?;
let bird_feeder = packages.child("bird-feeder");
bird_feeder
.child("check_installed_bird_feeder.py")
.write_str("from bird_feeder import use; from albatross import fly")?;
bird_feeder
.child("pyproject.toml")
.write_str(indoc::indoc! {r#"
[project]
name = "bird-feeder"
version = "1.0.0"
requires-python = ">=3.12"
dependencies = ["anyio>=4.3.0,<5"]
"#})?;
bird_feeder
.child("src")
.child("bird_feeder")
.child("__init__.py")
.write_str("import anyio")?;
let venv = root.child(".venv");
let bin = venv.child("bin");
bin.child("python").touch()?;
let home = format!("home = {}", bin.to_string_lossy());
venv.child("pyvenv.cfg").write_str(&home)?;
let site_packages = venv.child("lib").child("python3.12").child("site-packages");
site_packages
.child("_albatross.pth")
.write_str(&albatross.join("src").to_string_lossy())?;
site_packages
.child("_bird_feeder.pth")
.write_str(&bird_feeder.join("src").to_string_lossy())?;
site_packages.child("tqdm").child("__init__.py").touch()?;
// without `--python .venv`, the result should only include dependencies within the albatross
// package
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(
command().arg("packages/albatross").current_dir(&root),
@r#"
success: true
exit_code: 0
----- stdout -----
{
"packages/albatross/check_installed_albatross.py": [
"packages/albatross/src/albatross/__init__.py"
],
"packages/albatross/src/albatross/__init__.py": []
}
----- stderr -----
"#);
});
// with `--python .venv` both workspace and third-party dependencies are included
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(
command().args(["--python", ".venv"]).arg("packages/albatross").current_dir(&root),
@r#"
success: true
exit_code: 0
----- stdout -----
{
"packages/albatross/check_installed_albatross.py": [
"packages/albatross/src/albatross/__init__.py"
],
"packages/albatross/src/albatross/__init__.py": [
".venv/lib/python3.12/site-packages/tqdm/__init__.py",
"packages/bird-feeder/src/bird_feeder/__init__.py"
]
}
----- stderr -----
"#);
});
// test the error message for a non-existent venv. it's important that the `ruff analyze graph`
// flag matches the ty flag used to generate the error message (`--python`)
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(
command().args(["--python", "none"]).arg("packages/albatross").current_dir(&root),
@r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: Invalid `--python` argument `none`: does not point to a Python executable or a directory on disk
Cause: No such file or directory (os error 2)
");
});
Ok(())
}
#[test]
fn notebook_basic() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
def helper():
pass
"#})?;
// Create a basic notebook with a simple import
root.child("notebook.ipynb").write_str(indoc::indoc! {r#"
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from ruff.a import helper"
]
}
],
"metadata": {
"language_info": {
"name": "python",
"version": "3.12.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
"#})?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"notebook.ipynb": [
"ruff/a.py"
],
"ruff/__init__.py": [],
"ruff/a.py": []
}
----- stderr -----
"###);
});
Ok(())
}
#[test]
fn notebook_with_magic() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
def helper():
pass
"#})?;
// Create a notebook with IPython magic commands and imports
root.child("notebook.ipynb").write_str(indoc::indoc! {r#"
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from ruff.a import helper"
]
}
],
"metadata": {
"language_info": {
"name": "python",
"version": "3.12.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
"#})?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"notebook.ipynb": [
"ruff/a.py"
],
"ruff/__init__.py": [],
"ruff/a.py": []
}
----- stderr -----
"###);
});
Ok(())
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/tests/show_settings.rs | crates/ruff/tests/show_settings.rs | use anyhow::Context;
use insta_cmd::{assert_cmd_snapshot, get_cargo_bin};
use std::path::Path;
use std::process::Command;
use tempfile::TempDir;
const BIN_NAME: &str = "ruff";
#[test]
fn display_default_settings() -> anyhow::Result<()> {
let tempdir = TempDir::new().context("Failed to create temp directory.")?;
// Tempdir path's on macos are symlinks, which doesn't play nicely with
// our snapshot filtering.
let project_dir =
dunce::canonicalize(tempdir.path()).context("Failed to canonical tempdir path.")?;
std::fs::write(
project_dir.join("pyproject.toml"),
r#"
[project]
name = "ruff"
version = "0.9.2"
requires-python = ">=3.7"
[tool.ruff]
line-length = 100
[tool.ruff.lint]
ignore = [
# Conflicts with the formatter
"COM812", "ISC001"
]
"#,
)?;
std::fs::write(project_dir.join("test.py"), r#"print("Hello")"#)
.context("Failed to write test.py.")?;
insta::with_settings!({filters => vec![
(&*tempdir_filter(&project_dir), "<temp_dir>/"),
(r#"\\(\w\w|\s|\.|")"#, "/$1"),
]}, {
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(["check", "--show-settings", "test.py"])
.current_dir(project_dir));
});
Ok(())
}
fn tempdir_filter(project_dir: &Path) -> String {
format!(r#"{}\\?/?"#, regex::escape(project_dir.to_str().unwrap()))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/tests/version.rs | crates/ruff/tests/version.rs | //! Tests for the --version command
use std::fs;
use std::process::Command;
use anyhow::Result;
use insta_cmd::{assert_cmd_snapshot, get_cargo_bin};
use tempfile::TempDir;
const BIN_NAME: &str = "ruff";
const VERSION_FILTER: [(&str, &str); 1] = [(
r"\d+\.\d+\.\d+(\+\d+)?( \(\w{9} \d\d\d\d-\d\d-\d\d\))?",
"[VERSION]",
)];
#[test]
fn version_basics() {
insta::with_settings!({filters => VERSION_FILTER.to_vec()}, {
assert_cmd_snapshot!(
Command::new(get_cargo_bin(BIN_NAME)).arg("version"), @r"
success: true
exit_code: 0
----- stdout -----
ruff [VERSION]
----- stderr -----
"
);
});
}
/// `--config` is a global option,
/// so it's allowed to pass --config to subcommands such as `version`
/// -- the flag is simply ignored
#[test]
fn config_option_allowed_but_ignored() -> Result<()> {
let tempdir = TempDir::new()?;
let ruff_dot_toml = tempdir.path().join("ruff.toml");
fs::File::create(&ruff_dot_toml)?;
insta::with_settings!({filters => VERSION_FILTER.to_vec()}, {
assert_cmd_snapshot!(
Command::new(get_cargo_bin(BIN_NAME))
.arg("version")
.arg("--config")
.arg(&ruff_dot_toml)
.args(["--config", "lint.isort.extra-standard-library = ['foo', 'bar']"]), @r"
success: true
exit_code: 0
----- stdout -----
ruff [VERSION]
----- stderr -----
"
);
});
Ok(())
}
#[test]
fn config_option_ignored_but_validated() {
insta::with_settings!({filters => VERSION_FILTER.to_vec()}, {
assert_cmd_snapshot!(
Command::new(get_cargo_bin(BIN_NAME))
.arg("version")
.args(["--config", "foo = bar"]), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: invalid value 'foo = bar' for '--config <CONFIG_OPTION>'
tip: A `--config` flag must either be a path to a `.toml` configuration file
or a TOML `<KEY> = <VALUE>` pair overriding a specific configuration
option
The supplied argument is not valid TOML:
TOML parse error at line 1, column 7
|
1 | foo = bar
| ^^^
string values must be quoted, expected literal string
For more information, try '--help'.
"
);
});
}
/// `--isolated` is also a global option,
#[test]
fn isolated_option_allowed() {
insta::with_settings!({filters => VERSION_FILTER.to_vec()}, {
assert_cmd_snapshot!(
Command::new(get_cargo_bin(BIN_NAME)).arg("version").arg("--isolated"), @r"
success: true
exit_code: 0
----- stdout -----
ruff [VERSION]
----- stderr -----
"
);
});
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/tests/resolve_files.rs | crates/ruff/tests/resolve_files.rs | #![cfg(not(target_family = "wasm"))]
use std::path::Path;
use std::process::Command;
use std::str;
use insta_cmd::{assert_cmd_snapshot, get_cargo_bin};
const BIN_NAME: &str = "ruff";
#[cfg(not(target_os = "windows"))]
const TEST_FILTERS: &[(&str, &str)] = &[(".*/resources/test/fixtures/", "[BASEPATH]/")];
#[cfg(target_os = "windows")]
const TEST_FILTERS: &[(&str, &str)] = &[
(r".*\\resources\\test\\fixtures\\", "[BASEPATH]\\"),
(r"\\", "/"),
];
#[test]
fn check_project_include_defaults() {
// Defaults to checking the current working directory
//
// The test directory includes:
// - A pyproject.toml which specifies an include
// - A nested pyproject.toml which has a Ruff section
//
// The nested project should all be checked instead of respecting the parent includes
insta::with_settings!({
filters => TEST_FILTERS.to_vec()
}, {
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(["check", "--show-files"]).current_dir(Path::new("./resources/test/fixtures/include-test")), @r"
success: true
exit_code: 0
----- stdout -----
[BASEPATH]/include-test/a.py
[BASEPATH]/include-test/nested-project/e.py
[BASEPATH]/include-test/nested-project/pyproject.toml
[BASEPATH]/include-test/subdirectory/c.py
----- stderr -----
warning: The top-level linter settings are deprecated in favour of their counterparts in the `lint` section. Please update the following options in `nested-project/pyproject.toml`:
- 'select' -> 'lint.select'
");
});
}
#[test]
fn check_project_respects_direct_paths() {
// Given a direct path not included in the project `includes`, it should be checked
insta::with_settings!({
filters => TEST_FILTERS.to_vec()
}, {
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(["check", "--show-files", "b.py"]).current_dir(Path::new("./resources/test/fixtures/include-test")), @r"
success: true
exit_code: 0
----- stdout -----
[BASEPATH]/include-test/b.py
----- stderr -----
");
});
}
#[test]
fn check_project_respects_subdirectory_includes() {
// Given a direct path to a subdirectory, the include should be respected
insta::with_settings!({
filters => TEST_FILTERS.to_vec()
}, {
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(["check", "--show-files", "subdirectory"]).current_dir(Path::new("./resources/test/fixtures/include-test")), @r"
success: true
exit_code: 0
----- stdout -----
[BASEPATH]/include-test/subdirectory/c.py
----- stderr -----
");
});
}
#[test]
fn check_project_from_project_subdirectory_respects_includes() {
// Run from a project subdirectory, the include specified in the parent directory should be respected
insta::with_settings!({
filters => TEST_FILTERS.to_vec()
}, {
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(["check", "--show-files"]).current_dir(Path::new("./resources/test/fixtures/include-test/subdirectory")), @r"
success: true
exit_code: 0
----- stdout -----
[BASEPATH]/include-test/subdirectory/c.py
----- stderr -----
");
});
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/tests/integration_test.rs | crates/ruff/tests/integration_test.rs | #![cfg(not(target_family = "wasm"))]
use std::fs;
#[cfg(unix)]
use std::fs::Permissions;
#[cfg(unix)]
use std::os::unix::fs::{OpenOptionsExt, PermissionsExt};
use std::path::Path;
use std::process::Command;
use std::str;
#[cfg(unix)]
use anyhow::Context;
use anyhow::Result;
#[cfg(unix)]
use clap::Parser;
use insta_cmd::{assert_cmd_snapshot, get_cargo_bin};
#[cfg(unix)]
use path_absolutize::path_dedot;
use tempfile::TempDir;
#[cfg(unix)]
use ruff::args::Args;
#[cfg(unix)]
use ruff::run;
const BIN_NAME: &str = "ruff";
fn ruff_cmd() -> Command {
Command::new(get_cargo_bin(BIN_NAME))
}
/// Builder for `ruff check` commands.
#[derive(Debug, Default)]
struct RuffCheck<'a> {
output_format: Option<&'a str>,
config: Option<&'a Path>,
filename: Option<&'a str>,
args: Vec<&'a str>,
}
impl<'a> RuffCheck<'a> {
/// Set the `--config` option.
#[must_use]
fn config(mut self, config: &'a Path) -> Self {
self.config = Some(config);
self
}
/// Set the `--output-format` option.
#[must_use]
fn output_format(mut self, format: &'a str) -> Self {
self.output_format = Some(format);
self
}
/// Set the input file to pass to `ruff check`.
#[must_use]
fn filename(mut self, filename: &'a str) -> Self {
self.filename = Some(filename);
self
}
/// Set the list of positional arguments.
#[must_use]
fn args(mut self, args: impl IntoIterator<Item = &'a str>) -> Self {
self.args = args.into_iter().collect();
self
}
/// Generate a [`Command`] for the `ruff check` command.
fn build(self) -> Command {
let mut cmd = ruff_cmd();
cmd.arg("check");
if let Some(output_format) = self.output_format {
cmd.args(["--output-format", output_format]);
}
cmd.arg("--no-cache");
if let Some(path) = self.config {
cmd.arg("--config");
cmd.arg(path);
} else {
cmd.arg("--isolated");
}
if let Some(filename) = self.filename {
cmd.arg(filename);
} else {
cmd.arg("-");
}
cmd.args(self.args);
cmd
}
}
#[test]
fn stdin_success() {
let mut cmd = RuffCheck::default().args([]).build();
assert_cmd_snapshot!(cmd
.pass_stdin(""), @r"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
");
}
#[test]
fn stdin_error() {
let mut cmd = RuffCheck::default().args([]).build();
assert_cmd_snapshot!(cmd
.pass_stdin("import os\n"), @r"
success: false
exit_code: 1
----- stdout -----
F401 [*] `os` imported but unused
--> -:1:8
|
1 | import os
| ^^
|
help: Remove unused import: `os`
Found 1 error.
[*] 1 fixable with the `--fix` option.
----- stderr -----
");
}
#[test]
fn stdin_filename() {
let mut cmd = RuffCheck::default()
.args(["--stdin-filename", "F401.py"])
.build();
assert_cmd_snapshot!(cmd
.pass_stdin("import os\n"), @r"
success: false
exit_code: 1
----- stdout -----
F401 [*] `os` imported but unused
--> F401.py:1:8
|
1 | import os
| ^^
|
help: Remove unused import: `os`
Found 1 error.
[*] 1 fixable with the `--fix` option.
----- stderr -----
");
}
#[test]
fn check_default_files() -> Result<()> {
let tempdir = TempDir::new()?;
fs::write(
tempdir.path().join("foo.py"),
r"
import foo # unused import
",
)?;
fs::write(
tempdir.path().join("bar.py"),
r"
import bar # unused import
",
)?;
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(["check", "--isolated", "--no-cache", "--select", "F401"]).current_dir(tempdir.path()), @r"
success: false
exit_code: 1
----- stdout -----
F401 [*] `bar` imported but unused
--> bar.py:2:8
|
2 | import bar # unused import
| ^^^
|
help: Remove unused import: `bar`
F401 [*] `foo` imported but unused
--> foo.py:2:8
|
2 | import foo # unused import
| ^^^
|
help: Remove unused import: `foo`
Found 2 errors.
[*] 2 fixable with the `--fix` option.
----- stderr -----
");
Ok(())
}
#[test]
fn check_warn_stdin_filename_with_files() {
let mut cmd = RuffCheck::default()
.args(["--stdin-filename", "F401.py"])
.filename("foo.py")
.build();
assert_cmd_snapshot!(cmd
.pass_stdin("import os\n"), @r"
success: false
exit_code: 1
----- stdout -----
F401 [*] `os` imported but unused
--> F401.py:1:8
|
1 | import os
| ^^
|
help: Remove unused import: `os`
Found 1 error.
[*] 1 fixable with the `--fix` option.
----- stderr -----
warning: Ignoring file foo.py in favor of standard input.
");
}
/// Raise `TCH` errors in `.py` files ...
#[test]
fn stdin_source_type_py() {
let mut cmd = RuffCheck::default()
.args(["--stdin-filename", "TCH.py"])
.build();
assert_cmd_snapshot!(cmd
.pass_stdin("import os\n"), @r"
success: false
exit_code: 1
----- stdout -----
F401 [*] `os` imported but unused
--> TCH.py:1:8
|
1 | import os
| ^^
|
help: Remove unused import: `os`
Found 1 error.
[*] 1 fixable with the `--fix` option.
----- stderr -----
");
}
/// ... but not in `.pyi` files.
#[test]
fn stdin_source_type_pyi() {
let mut cmd = RuffCheck::default()
.args(["--stdin-filename", "TCH.pyi", "--select", "TCH"])
.build();
assert_cmd_snapshot!(cmd
.pass_stdin("import os\n"), @r"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
");
}
#[cfg(unix)]
#[test]
fn stdin_json() {
let directory = path_dedot::CWD.to_str().unwrap();
let binding = Path::new(directory).join("F401.py");
let file_path = binding.display();
let mut cmd = RuffCheck::default()
.output_format("json")
.args(["--stdin-filename", "F401.py"])
.build();
insta::with_settings!({filters => vec![
(file_path.to_string().as_str(), "/path/to/F401.py"),
]}, {
assert_cmd_snapshot!(cmd.pass_stdin("import os\n"));
});
}
#[test]
fn stdin_fix_py() {
let mut cmd = RuffCheck::default().args(["--fix"]).build();
assert_cmd_snapshot!(cmd
.pass_stdin("import os\nimport sys\n\nprint(sys.version)\n"), @r"
success: true
exit_code: 0
----- stdout -----
import sys
print(sys.version)
----- stderr -----
Found 1 error (1 fixed, 0 remaining).
");
}
#[test]
fn stdin_fix_jupyter() {
let mut cmd = RuffCheck::default()
.args(["--fix", "--stdin-filename", "Jupyter.ipynb"])
.build();
assert_cmd_snapshot!(cmd
.pass_stdin(r#"{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "dccc687c-96e2-4604-b957-a8a89b5bec06",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"print(1)"
]
},
{
"cell_type": "markdown",
"id": "19e1b029-f516-4662-a9b9-623b93edac1a",
"metadata": {},
"source": [
"Foo"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "cdce7b92-b0fb-4c02-86f6-e233b26fa84f",
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"print(x)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "e40b33d2-7fe4-46c5-bdf0-8802f3052565",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"1\n"
]
}
],
"source": [
"print(1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a1899bc8-d46f-4ec0-b1d1-e1ca0f04bf60",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}"#), @r#"
success: false
exit_code: 1
----- stdout -----
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "dccc687c-96e2-4604-b957-a8a89b5bec06",
"metadata": {},
"outputs": [],
"source": [
"print(1)"
]
},
{
"cell_type": "markdown",
"id": "19e1b029-f516-4662-a9b9-623b93edac1a",
"metadata": {},
"source": [
"Foo"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "cdce7b92-b0fb-4c02-86f6-e233b26fa84f",
"metadata": {},
"outputs": [],
"source": [
"print(x)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "e40b33d2-7fe4-46c5-bdf0-8802f3052565",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"1\n"
]
}
],
"source": [
"print(1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a1899bc8-d46f-4ec0-b1d1-e1ca0f04bf60",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
----- stderr -----
F821 Undefined name `x`
--> Jupyter.ipynb:cell 3:1:7
|
1 | print(x)
| ^
|
Found 3 errors (2 fixed, 1 remaining).
"#);
}
#[test]
fn stdin_override_parser_ipynb() {
let mut cmd = RuffCheck::default()
.args(["--extension", "py:ipynb", "--stdin-filename", "Jupyter.py"])
.build();
assert_cmd_snapshot!(cmd
.pass_stdin(r#"{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "dccc687c-96e2-4604-b957-a8a89b5bec06",
"metadata": {},
"outputs": [],
"source": [
"import os"
]
},
{
"cell_type": "markdown",
"id": "19e1b029-f516-4662-a9b9-623b93edac1a",
"metadata": {},
"source": [
"Foo"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "cdce7b92-b0fb-4c02-86f6-e233b26fa84f",
"metadata": {},
"outputs": [],
"source": [
"import sys"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "e40b33d2-7fe4-46c5-bdf0-8802f3052565",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"1\n"
]
}
],
"source": [
"print(1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a1899bc8-d46f-4ec0-b1d1-e1ca0f04bf60",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}"#), @r"
success: false
exit_code: 1
----- stdout -----
F401 [*] `os` imported but unused
--> Jupyter.py:cell 1:1:8
|
1 | import os
| ^^
|
help: Remove unused import: `os`
F401 [*] `sys` imported but unused
--> Jupyter.py:cell 3:1:8
|
1 | import sys
| ^^^
|
help: Remove unused import: `sys`
Found 2 errors.
[*] 2 fixable with the `--fix` option.
----- stderr -----
");
}
#[test]
fn stdin_override_parser_py() {
let mut cmd = RuffCheck::default()
.args([
"--extension",
"ipynb:python",
"--stdin-filename",
"F401.ipynb",
])
.build();
assert_cmd_snapshot!(cmd
.pass_stdin("import os\n"), @r"
success: false
exit_code: 1
----- stdout -----
F401 [*] `os` imported but unused
--> F401.ipynb:1:8
|
1 | import os
| ^^
|
help: Remove unused import: `os`
Found 1 error.
[*] 1 fixable with the `--fix` option.
----- stderr -----
");
}
#[test]
fn stdin_fix_when_not_fixable_should_still_print_contents() {
let mut cmd = RuffCheck::default().args(["--fix"]).build();
assert_cmd_snapshot!(cmd
.pass_stdin("import os\nimport sys\n\nif (1, 2):\n print(sys.version)\n"), @r###"
success: false
exit_code: 1
----- stdout -----
import sys
if (1, 2):
print(sys.version)
----- stderr -----
F634 If test is a tuple, which is always `True`
--> -:3:4
|
1 | import sys
2 |
3 | if (1, 2):
| ^^^^^^
4 | print(sys.version)
|
Found 2 errors (1 fixed, 1 remaining).
"###);
}
#[test]
fn stdin_fix_when_no_issues_should_still_print_contents() {
let mut cmd = RuffCheck::default().args(["--fix"]).build();
assert_cmd_snapshot!(cmd
.pass_stdin("import sys\n\nprint(sys.version)\n"), @r"
success: true
exit_code: 0
----- stdout -----
import sys
print(sys.version)
----- stderr -----
All checks passed!
");
}
#[test]
fn stdin_format_jupyter() {
assert_cmd_snapshot!(ruff_cmd()
.args(["format", "--stdin-filename", "Jupyter.ipynb", "--isolated"])
.pass_stdin(r#"{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "dccc687c-96e2-4604-b957-a8a89b5bec06",
"metadata": {},
"outputs": [],
"source": [
"x=1"
]
},
{
"cell_type": "markdown",
"id": "19e1b029-f516-4662-a9b9-623b93edac1a",
"metadata": {},
"source": [
"Foo"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cdce7b92-b0fb-4c02-86f6-e233b26fa84f",
"metadata": {},
"outputs": [],
"source": [
"def func():\n",
" pass\n",
"print(1)\n",
"import os"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
"#), @r#"
success: true
exit_code: 0
----- stdout -----
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "dccc687c-96e2-4604-b957-a8a89b5bec06",
"metadata": {},
"outputs": [],
"source": [
"x = 1"
]
},
{
"cell_type": "markdown",
"id": "19e1b029-f516-4662-a9b9-623b93edac1a",
"metadata": {},
"source": [
"Foo"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cdce7b92-b0fb-4c02-86f6-e233b26fa84f",
"metadata": {},
"outputs": [],
"source": [
"def func():\n",
" pass\n",
"\n",
"\n",
"print(1)\n",
"import os"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
----- stderr -----
"#);
}
#[test]
fn stdin_parse_error() {
let mut cmd = RuffCheck::default().build();
assert_cmd_snapshot!(cmd
.pass_stdin("from foo import\n"), @r"
success: false
exit_code: 1
----- stdout -----
invalid-syntax: Expected one or more symbol names after import
--> -:1:16
|
1 | from foo import
| ^
|
Found 1 error.
----- stderr -----
");
}
#[test]
fn stdin_multiple_parse_error() {
let mut cmd = RuffCheck::default().build();
assert_cmd_snapshot!(cmd
.pass_stdin("from foo import\nbar =\n"), @r"
success: false
exit_code: 1
----- stdout -----
invalid-syntax: Expected one or more symbol names after import
--> -:1:16
|
1 | from foo import
| ^
2 | bar =
|
invalid-syntax: Expected an expression
--> -:2:6
|
1 | from foo import
2 | bar =
| ^
|
Found 2 errors.
----- stderr -----
");
}
#[test]
fn parse_error_not_included() {
// Parse errors are always shown
let mut cmd = RuffCheck::default().args(["--select=I"]).build();
assert_cmd_snapshot!(cmd
.pass_stdin("foo =\n"), @r"
success: false
exit_code: 1
----- stdout -----
invalid-syntax: Expected an expression
--> -:1:6
|
1 | foo =
| ^
|
Found 1 error.
----- stderr -----
");
}
#[test]
fn full_output_preview() {
let mut cmd = RuffCheck::default().args(["--preview"]).build();
assert_cmd_snapshot!(cmd
.pass_stdin("l = 1"), @r"
success: false
exit_code: 1
----- stdout -----
E741 Ambiguous variable name: `l`
--> -:1:1
|
1 | l = 1
| ^
|
Found 1 error.
----- stderr -----
");
}
#[test]
fn full_output_preview_config() -> Result<()> {
let tempdir = TempDir::new()?;
let pyproject_toml = tempdir.path().join("pyproject.toml");
fs::write(
&pyproject_toml,
r"
[tool.ruff]
preview = true
",
)?;
let mut cmd = RuffCheck::default().config(&pyproject_toml).build();
assert_cmd_snapshot!(cmd.pass_stdin("l = 1"), @r"
success: false
exit_code: 1
----- stdout -----
E741 Ambiguous variable name: `l`
--> -:1:1
|
1 | l = 1
| ^
|
Found 1 error.
----- stderr -----
");
Ok(())
}
#[test]
fn full_output_format() {
let mut cmd = RuffCheck::default().output_format("full").build();
assert_cmd_snapshot!(cmd
.pass_stdin("l = 1"), @r"
success: false
exit_code: 1
----- stdout -----
E741 Ambiguous variable name: `l`
--> -:1:1
|
1 | l = 1
| ^
|
Found 1 error.
----- stderr -----
");
}
#[test]
fn rule_f401() {
assert_cmd_snapshot!(ruff_cmd().args(["rule", "F401"]));
}
#[test]
fn rule_f401_output_json() {
insta::with_settings!({filters => vec![
(r#"("file": ")[^"]+(",)"#, "$1<FILE>$2"),
]}, {
assert_cmd_snapshot!(ruff_cmd().args(["rule", "F401", "--output-format", "json"]));
});
}
#[test]
fn rule_f401_output_text() {
assert_cmd_snapshot!(ruff_cmd().args(["rule", "F401", "--output-format", "text"]));
}
#[test]
fn rule_invalid_rule_name() {
assert_cmd_snapshot!(ruff_cmd().args(["rule", "RUF404"]), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: invalid value 'RUF404' for '[RULE]'
For more information, try '--help'.
");
}
#[test]
fn rule_invalid_rule_name_output_json() {
assert_cmd_snapshot!(ruff_cmd().args(["rule", "RUF404", "--output-format", "json"]), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: invalid value 'RUF404' for '[RULE]'
For more information, try '--help'.
");
}
#[test]
fn rule_invalid_rule_name_output_text() {
assert_cmd_snapshot!(ruff_cmd().args(["rule", "RUF404", "--output-format", "text"]), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: invalid value 'RUF404' for '[RULE]'
For more information, try '--help'.
");
}
#[test]
fn show_statistics() {
let mut cmd = RuffCheck::default()
.args(["--select", "C416", "--statistics"])
.build();
assert_cmd_snapshot!(cmd
.pass_stdin(r#"
def mvce(keys, values):
return {key: value for key, value in zip(keys, values)}
"#), @r"
success: false
exit_code: 1
----- stdout -----
1 C416 unnecessary-comprehension
Found 1 error.
No fixes available (1 hidden fix can be enabled with the `--unsafe-fixes` option).
----- stderr -----
");
}
#[test]
fn show_statistics_unsafe_fixes() {
let mut cmd = RuffCheck::default()
.args(["--select", "C416", "--statistics", "--unsafe-fixes"])
.build();
assert_cmd_snapshot!(cmd
.pass_stdin(r#"
def mvce(keys, values):
return {key: value for key, value in zip(keys, values)}
"#), @r"
success: false
exit_code: 1
----- stdout -----
1 C416 [*] unnecessary-comprehension
Found 1 error.
[*] 1 fixable with the `--fix` option.
----- stderr -----
");
}
#[test]
fn show_statistics_json() {
let mut cmd = RuffCheck::default()
.args([
"--select",
"C416",
"--statistics",
"--output-format",
"json",
])
.build();
assert_cmd_snapshot!(cmd
.pass_stdin(r#"
def mvce(keys, values):
return {key: value for key, value in zip(keys, values)}
"#), @r#"
success: false
exit_code: 1
----- stdout -----
[
{
"code": "C416",
"name": "unnecessary-comprehension",
"count": 1,
"fixable": false,
"fixable_count": 0
}
]
----- stderr -----
"#);
}
#[test]
fn show_statistics_json_unsafe_fixes() {
let mut cmd = RuffCheck::default()
.args([
"--select",
"C416",
"--statistics",
"--unsafe-fixes",
"--output-format",
"json",
])
.build();
assert_cmd_snapshot!(cmd
.pass_stdin(r#"
def mvce(keys, values):
return {key: value for key, value in zip(keys, values)}
"#), @r#"
success: false
exit_code: 1
----- stdout -----
[
{
"code": "C416",
"name": "unnecessary-comprehension",
"count": 1,
"fixable": true,
"fixable_count": 1
}
]
----- stderr -----
"#);
}
#[test]
fn show_statistics_json_partial_fix() {
let mut cmd = RuffCheck::default()
.args([
"--select",
"UP035",
"--statistics",
"--output-format",
"json",
])
.build();
assert_cmd_snapshot!(cmd
.pass_stdin("from typing import List, AsyncGenerator"), @r#"
success: false
exit_code: 1
----- stdout -----
[
{
"code": "UP035",
"name": "deprecated-import",
"count": 2,
"fixable": false,
"fixable_count": 1
}
]
----- stderr -----
"#);
}
#[test]
fn show_statistics_partial_fix() {
let mut cmd = RuffCheck::default()
.args(["--select", "UP035", "--statistics"])
.build();
assert_cmd_snapshot!(cmd
.pass_stdin("from typing import List, AsyncGenerator"), @r"
success: false
exit_code: 1
----- stdout -----
2 UP035 [-] deprecated-import
Found 2 errors.
[*] 1 fixable with the `--fix` option.
----- stderr -----
");
}
#[test]
fn show_statistics_syntax_errors() {
let mut cmd = RuffCheck::default()
.args(["--statistics", "--target-version=py39", "--preview"])
.build();
// ParseError
assert_cmd_snapshot!(
cmd.pass_stdin("x ="),
@r"
success: false
exit_code: 1
----- stdout -----
1 invalid-syntax
Found 1 error.
----- stderr -----
");
// match before 3.10, UnsupportedSyntaxError
assert_cmd_snapshot!(
cmd.pass_stdin("match 2:\n case 1: ..."),
@r"
success: false
exit_code: 1
----- stdout -----
1 invalid-syntax
Found 1 error.
----- stderr -----
");
// rebound comprehension variable, SemanticSyntaxError
assert_cmd_snapshot!(
cmd.pass_stdin("[x := 1 for x in range(0)]"),
@r"
success: false
exit_code: 1
----- stdout -----
1 invalid-syntax
Found 1 error.
----- stderr -----
");
}
#[test]
fn preview_enabled_prefix() {
// All the RUF9XX test rules should be triggered
let mut cmd = RuffCheck::default()
.args(["--select", "RUF9", "--output-format=concise", "--preview"])
.build();
assert_cmd_snapshot!(cmd, @r"
success: false
exit_code: 1
----- stdout -----
-:1:1: RUF900 Hey this is a stable test rule.
-:1:1: RUF901 [*] Hey this is a stable test rule with a safe fix.
-:1:1: RUF902 Hey this is a stable test rule with an unsafe fix.
-:1:1: RUF903 Hey this is a stable test rule with a display only fix.
-:1:1: RUF911 Hey this is a preview test rule.
-:1:1: RUF950 Hey this is a test rule that was redirected from another.
Found 6 errors.
[*] 1 fixable with the `--fix` option (1 hidden fix can be enabled with the `--unsafe-fixes` option).
----- stderr -----
");
}
#[test]
fn preview_enabled_all() {
let mut cmd = RuffCheck::default()
.args(["--select", "ALL", "--output-format=concise", "--preview"])
.build();
assert_cmd_snapshot!(cmd, @r"
success: false
exit_code: 1
----- stdout -----
-:1:1: D100 Missing docstring in public module
-:1:1: CPY001 Missing copyright notice at top of file
-:1:1: RUF900 Hey this is a stable test rule.
-:1:1: RUF901 [*] Hey this is a stable test rule with a safe fix.
-:1:1: RUF902 Hey this is a stable test rule with an unsafe fix.
-:1:1: RUF903 Hey this is a stable test rule with a display only fix.
-:1:1: RUF911 Hey this is a preview test rule.
-:1:1: RUF950 Hey this is a test rule that was redirected from another.
Found 8 errors.
[*] 1 fixable with the `--fix` option (1 hidden fix can be enabled with the `--unsafe-fixes` option).
----- stderr -----
warning: `incorrect-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `incorrect-blank-line-before-class`.
warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line`.
");
}
#[test]
fn preview_enabled_direct() {
// Should be enabled without warning
let mut cmd = RuffCheck::default()
.args(["--select", "RUF911", "--output-format=concise", "--preview"])
.build();
assert_cmd_snapshot!(cmd, @r"
success: false
exit_code: 1
----- stdout -----
-:1:1: RUF911 Hey this is a preview test rule.
Found 1 error.
----- stderr -----
");
}
#[test]
fn preview_disabled_direct() {
// RUFF911 is preview so we should warn without selecting
let mut cmd = RuffCheck::default()
.args(["--select", "RUF911", "--output-format=concise"])
.build();
assert_cmd_snapshot!(cmd, @r"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
warning: Selection `RUF911` has no effect because preview is not enabled.
");
}
#[test]
fn preview_disabled_prefix_empty() {
// Warns that the selection is empty since all of the RUF91 rules are in preview
let mut cmd = RuffCheck::default()
.args(["--select", "RUF91", "--output-format=concise"])
.build();
assert_cmd_snapshot!(cmd, @r"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
warning: Selection `RUF91` has no effect because preview is not enabled.
");
}
#[test]
fn preview_disabled_does_not_warn_for_empty_ignore_selections() {
// Does not warn that the selection is empty since the user is not trying to enable the rule
let mut cmd = RuffCheck::default()
.args(["--ignore", "RUF9", "--output-format=concise"])
.build();
assert_cmd_snapshot!(cmd, @r"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
");
}
#[test]
fn preview_disabled_does_not_warn_for_empty_fixable_selections() {
// Does not warn that the selection is empty since the user is not trying to enable the rule
let mut cmd = RuffCheck::default()
.args(["--fixable", "RUF9", "--output-format=concise"])
.build();
assert_cmd_snapshot!(cmd, @r"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
");
}
#[test]
fn preview_group_selector() {
// `--select PREVIEW` should error (selector was removed)
let mut cmd = RuffCheck::default()
.args([
"--select",
"PREVIEW",
"--preview",
"--output-format=concise",
])
.build();
assert_cmd_snapshot!(cmd
.pass_stdin("I=42\n"), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: invalid value 'PREVIEW' for '--select <RULE_CODE>'
For more information, try '--help'.
");
}
#[test]
fn preview_enabled_group_ignore() {
// Should detect stable and unstable rules, RUF9 is more specific than RUF so ignore has no effect
let mut cmd = RuffCheck::default()
.args([
"--select",
"RUF9",
"--ignore",
"RUF",
"--preview",
"--output-format=concise",
])
.build();
assert_cmd_snapshot!(cmd, @r"
success: false
exit_code: 1
----- stdout -----
-:1:1: RUF900 Hey this is a stable test rule.
-:1:1: RUF901 [*] Hey this is a stable test rule with a safe fix.
-:1:1: RUF902 Hey this is a stable test rule with an unsafe fix.
-:1:1: RUF903 Hey this is a stable test rule with a display only fix.
-:1:1: RUF911 Hey this is a preview test rule.
-:1:1: RUF950 Hey this is a test rule that was redirected from another.
Found 6 errors.
[*] 1 fixable with the `--fix` option (1 hidden fix can be enabled with the `--unsafe-fixes` option).
----- stderr -----
");
}
#[test]
fn removed_direct() {
// Selection of a removed rule should fail
let mut cmd = RuffCheck::default().args(["--select", "RUF931"]).build();
assert_cmd_snapshot!(cmd, @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: Rule `RUF931` was removed and cannot be selected.
");
}
#[test]
fn removed_direct_multiple() {
// Selection of multiple removed rule should fail with a message
// including all the rules
let mut cmd = RuffCheck::default()
.args(["--select", "RUF930", "--select", "RUF931"])
.build();
assert_cmd_snapshot!(cmd, @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: The following rules have been removed and cannot be selected:
- RUF930
- RUF931
");
}
#[test]
fn removed_indirect() {
// Selection _including_ a removed rule without matching should not fail
// nor should the rule be used
let mut cmd = RuffCheck::default().args(["--select", "RUF93"]).build();
assert_cmd_snapshot!(cmd, @r"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
");
}
#[test]
fn removed_ignore_direct() {
let mut cmd = RuffCheck::default().args(["--ignore", "UP027"]).build();
assert_cmd_snapshot!(cmd, @r"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/tests/cli/analyze_graph.rs | crates/ruff/tests/cli/analyze_graph.rs | use std::process::Command;
use insta_cmd::assert_cmd_snapshot;
use crate::CliTest;
#[test]
fn type_checking_imports() -> anyhow::Result<()> {
let test = AnalyzeTest::with_files([
("ruff/__init__.py", ""),
(
"ruff/a.py",
r#"
from typing import TYPE_CHECKING
import ruff.b
if TYPE_CHECKING:
import ruff.c
"#,
),
(
"ruff/b.py",
r#"
if TYPE_CHECKING:
from ruff import c
"#,
),
("ruff/c.py", ""),
])?;
assert_cmd_snapshot!(test.command(), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py",
"ruff/c.py"
],
"ruff/b.py": [
"ruff/c.py"
],
"ruff/c.py": []
}
----- stderr -----
"###);
assert_cmd_snapshot!(
test.command()
.arg("--no-type-checking-imports"),
@r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [],
"ruff/c.py": []
}
----- stderr -----
"###
);
Ok(())
}
#[test]
fn type_checking_imports_from_config() -> anyhow::Result<()> {
let test = AnalyzeTest::with_files([
("ruff/__init__.py", ""),
(
"ruff/a.py",
r#"
from typing import TYPE_CHECKING
import ruff.b
if TYPE_CHECKING:
import ruff.c
"#,
),
(
"ruff/b.py",
r#"
if TYPE_CHECKING:
from ruff import c
"#,
),
("ruff/c.py", ""),
(
"ruff.toml",
r#"
[analyze]
type-checking-imports = false
"#,
),
])?;
assert_cmd_snapshot!(test.command(), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [],
"ruff/c.py": []
}
----- stderr -----
"###);
test.write_file(
"ruff.toml",
r#"
[analyze]
type-checking-imports = true
"#,
)?;
assert_cmd_snapshot!(test.command(), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py",
"ruff/c.py"
],
"ruff/b.py": [
"ruff/c.py"
],
"ruff/c.py": []
}
----- stderr -----
"###
);
Ok(())
}
struct AnalyzeTest {
cli_test: CliTest,
}
impl AnalyzeTest {
pub(crate) fn new() -> anyhow::Result<Self> {
Ok(Self {
cli_test: CliTest::with_settings(|_, mut settings| {
settings.add_filter(r#"\\\\"#, "/");
settings
})?,
})
}
fn with_files<'a>(files: impl IntoIterator<Item = (&'a str, &'a str)>) -> anyhow::Result<Self> {
let case = Self::new()?;
case.write_files(files)?;
Ok(case)
}
#[expect(unused)]
fn with_file(path: impl AsRef<std::path::Path>, content: &str) -> anyhow::Result<Self> {
let fixture = Self::new()?;
fixture.write_file(path, content)?;
Ok(fixture)
}
fn command(&self) -> Command {
let mut command = self.cli_test.command();
command.arg("analyze").arg("graph").arg("--preview");
command
}
}
impl std::ops::Deref for AnalyzeTest {
type Target = CliTest;
fn deref(&self) -> &Self::Target {
&self.cli_test
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/tests/cli/lint.rs | crates/ruff/tests/cli/lint.rs | //! Tests the interaction of the `lint` configuration section
use std::fs;
use std::process::Command;
use std::str;
use anyhow::Result;
use insta_cmd::{assert_cmd_snapshot, get_cargo_bin};
use crate::CliTest;
const BIN_NAME: &str = "ruff";
const STDIN_BASE_OPTIONS: &[&str] = &["check", "--no-cache", "--output-format", "concise"];
impl CliTest {
fn check_command(&self) -> Command {
let mut command = self.command();
command.args(STDIN_BASE_OPTIONS);
command
}
}
#[test]
fn top_level_options() -> Result<()> {
let test = CliTest::new()?;
test.write_file(
"ruff.toml",
r#"
extend-select = ["B", "Q"]
[flake8-quotes]
inline-quotes = "single"
"#,
)?;
assert_cmd_snapshot!(test.check_command()
.arg("--config")
.arg("ruff.toml")
.args(["--stdin-filename", "test.py"])
.arg("-")
.pass_stdin(r#"a = "abcba".strip("aba")"#), @r"
success: false
exit_code: 1
----- stdout -----
test.py:1:5: Q000 [*] Double quotes found but single quotes preferred
test.py:1:5: B005 Using `.strip()` with multi-character strings is misleading
test.py:1:19: Q000 [*] Double quotes found but single quotes preferred
Found 3 errors.
[*] 2 fixable with the `--fix` option.
----- stderr -----
warning: The top-level linter settings are deprecated in favour of their counterparts in the `lint` section. Please update the following options in `ruff.toml`:
- 'extend-select' -> 'lint.extend-select'
- 'flake8-quotes' -> 'lint.flake8-quotes'
");
Ok(())
}
#[test]
fn lint_options() -> Result<()> {
let case = CliTest::with_file(
"ruff.toml",
r#"
[lint]
extend-select = ["B", "Q"]
[lint.flake8-quotes]
inline-quotes = "single"
"#,
)?;
assert_cmd_snapshot!(
case.check_command()
.arg("--config")
.arg("ruff.toml")
.arg("-")
.pass_stdin(r#"a = "abcba".strip("aba")"#), @r"
success: false
exit_code: 1
----- stdout -----
-:1:5: Q000 [*] Double quotes found but single quotes preferred
-:1:5: B005 Using `.strip()` with multi-character strings is misleading
-:1:19: Q000 [*] Double quotes found but single quotes preferred
Found 3 errors.
[*] 2 fixable with the `--fix` option.
----- stderr -----
");
Ok(())
}
/// Tests that configurations from the top-level and `lint` section are merged together.
#[test]
fn mixed_levels() -> Result<()> {
let test = CliTest::new()?;
test.write_file(
"ruff.toml",
r#"
extend-select = ["B", "Q"]
[lint.flake8-quotes]
inline-quotes = "single"
"#,
)?;
assert_cmd_snapshot!(test.check_command()
.arg("--config")
.arg("ruff.toml")
.arg("-")
.pass_stdin(r#"a = "abcba".strip("aba")"#), @r"
success: false
exit_code: 1
----- stdout -----
-:1:5: Q000 [*] Double quotes found but single quotes preferred
-:1:5: B005 Using `.strip()` with multi-character strings is misleading
-:1:19: Q000 [*] Double quotes found but single quotes preferred
Found 3 errors.
[*] 2 fixable with the `--fix` option.
----- stderr -----
warning: The top-level linter settings are deprecated in favour of their counterparts in the `lint` section. Please update the following options in `ruff.toml`:
- 'extend-select' -> 'lint.extend-select'
");
Ok(())
}
/// Tests that options in the `lint` section have higher precedence than top-level options (because they are more specific).
#[test]
fn precedence() -> Result<()> {
let test = CliTest::new()?;
test.write_file(
"ruff.toml",
r#"
[lint]
extend-select = ["B", "Q"]
[flake8-quotes]
inline-quotes = "double"
[lint.flake8-quotes]
inline-quotes = "single"
"#,
)?;
assert_cmd_snapshot!(test.check_command()
.arg("--config")
.arg("ruff.toml")
.arg("-")
.pass_stdin(r#"a = "abcba".strip("aba")"#), @r"
success: false
exit_code: 1
----- stdout -----
-:1:5: Q000 [*] Double quotes found but single quotes preferred
-:1:5: B005 Using `.strip()` with multi-character strings is misleading
-:1:19: Q000 [*] Double quotes found but single quotes preferred
Found 3 errors.
[*] 2 fixable with the `--fix` option.
----- stderr -----
warning: The top-level linter settings are deprecated in favour of their counterparts in the `lint` section. Please update the following options in `ruff.toml`:
- 'flake8-quotes' -> 'lint.flake8-quotes'
");
Ok(())
}
#[test]
fn exclude() -> Result<()> {
let case = CliTest::new()?;
case.write_file(
"ruff.toml",
r#"
extend-select = ["B", "Q"]
extend-exclude = ["out"]
[lint]
exclude = ["test.py", "generated.py"]
[lint.flake8-quotes]
inline-quotes = "single"
"#,
)?;
case.write_file(
"main.py",
r#"from test import say_hy
if __name__ == "__main__":
say_hy("dear Ruff contributor")
"#,
)?;
// Excluded file but passed to the CLI directly, should be linted
case.write_file(
"test.py",
r#"def say_hy(name: str):
print(f"Hy {name}")"#,
)?;
case.write_file(
"generated.py",
r#"NUMBERS = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19
]
OTHER = "OTHER"
"#,
)?;
case.write_file("out/a.py", r#"a = "a""#)?;
assert_cmd_snapshot!(
case.check_command()
.args(["--config", "ruff.toml"])
// Explicitly pass test.py, should be linted regardless of it being excluded by lint.exclude
.arg("test.py")
// Lint all other files in the directory, should respect the `exclude` and `lint.exclude` options
.arg("."), @r"
success: false
exit_code: 1
----- stdout -----
main.py:3:16: Q000 [*] Double quotes found but single quotes preferred
main.py:4:12: Q000 [*] Double quotes found but single quotes preferred
test.py:2:15: Q000 [*] Double quotes found but single quotes preferred
Found 3 errors.
[*] 3 fixable with the `--fix` option.
----- stderr -----
warning: The top-level linter settings are deprecated in favour of their counterparts in the `lint` section. Please update the following options in `ruff.toml`:
- 'extend-select' -> 'lint.extend-select'
");
Ok(())
}
/// Regression test for <https://github.com/astral-sh/ruff/issues/20035>
#[test]
fn deduplicate_directory_and_explicit_file() -> Result<()> {
let case = CliTest::new()?;
case.write_file(
"ruff.toml",
r#"
[lint]
exclude = ["main.py"]
"#,
)?;
case.write_file("main.py", "import os\n")?;
assert_cmd_snapshot!(
case.check_command()
.args(["--config", "ruff.toml"])
.arg(".")
// Explicitly pass main.py, should be linted regardless of it being excluded by lint.exclude
.arg("main.py"),
@r"
success: false
exit_code: 1
----- stdout -----
main.py:1:8: F401 [*] `os` imported but unused
Found 1 error.
[*] 1 fixable with the `--fix` option.
----- stderr -----
"
);
Ok(())
}
#[test]
fn exclude_stdin() -> Result<()> {
let case = CliTest::with_file(
"ruff.toml",
r#"
extend-select = ["B", "Q"]
[lint]
exclude = ["generated.py"]
[lint.flake8-quotes]
inline-quotes = "single"
"#,
)?;
assert_cmd_snapshot!(
case.check_command()
.args(["--config", "ruff.toml"])
.args(["--stdin-filename", "generated.py"])
.arg("-")
.pass_stdin(r#"
from test import say_hy
if __name__ == "__main__":
say_hy("dear Ruff contributor")
"#), @r"
success: false
exit_code: 1
----- stdout -----
generated.py:4:16: Q000 [*] Double quotes found but single quotes preferred
generated.py:5:12: Q000 [*] Double quotes found but single quotes preferred
Found 2 errors.
[*] 2 fixable with the `--fix` option.
----- stderr -----
warning: The top-level linter settings are deprecated in favour of their counterparts in the `lint` section. Please update the following options in `ruff.toml`:
- 'extend-select' -> 'lint.extend-select'
");
Ok(())
}
#[test]
fn line_too_long_width_override() -> Result<()> {
let test = CliTest::new()?;
test.write_file(
"ruff.toml",
r#"
line-length = 80
select = ["E501"]
[pycodestyle]
max-line-length = 100
"#,
)?;
assert_cmd_snapshot!(test.check_command()
.arg("--config")
.arg("ruff.toml")
.args(["--stdin-filename", "test.py"])
.arg("-")
.pass_stdin(r#"
# longer than 80, but less than 100
_ = "---------------------------------------------------------------------------亜亜亜亜亜亜"
# longer than 100
_ = "---------------------------------------------------------------------------亜亜亜亜亜亜亜亜亜亜亜亜亜亜"
"#), @r"
success: false
exit_code: 1
----- stdout -----
test.py:5:91: E501 Line too long (109 > 100)
Found 1 error.
----- stderr -----
warning: The top-level linter settings are deprecated in favour of their counterparts in the `lint` section. Please update the following options in `ruff.toml`:
- 'select' -> 'lint.select'
- 'pycodestyle' -> 'lint.pycodestyle'
");
Ok(())
}
#[test]
fn per_file_ignores_stdin() -> Result<()> {
let fixture = CliTest::with_file(
"ruff.toml",
r#"
extend-select = ["B", "Q"]
[lint.flake8-quotes]
inline-quotes = "single"
"#,
)?;
assert_cmd_snapshot!(fixture
.check_command()
.args(["--config", "ruff.toml"])
.args(["--stdin-filename", "generated.py"])
.args(["--per-file-ignores", "generated.py:Q"])
.arg("-")
.pass_stdin(r#"
import os
from test import say_hy
if __name__ == "__main__":
say_hy("dear Ruff contributor")
"#), @r"
success: false
exit_code: 1
----- stdout -----
generated.py:2:8: F401 [*] `os` imported but unused
Found 1 error.
[*] 1 fixable with the `--fix` option.
----- stderr -----
warning: The top-level linter settings are deprecated in favour of their counterparts in the `lint` section. Please update the following options in `ruff.toml`:
- 'extend-select' -> 'lint.extend-select'
");
Ok(())
}
#[test]
fn extend_per_file_ignores_stdin() -> Result<()> {
let fixture = CliTest::with_file(
"ruff.toml",
r#"
extend-select = ["B", "Q"]
[lint.flake8-quotes]
inline-quotes = "single"
"#,
)?;
assert_cmd_snapshot!(fixture
.check_command()
.args(["--config", "ruff.toml"])
.args(["--stdin-filename", "generated.py"])
.args(["--extend-per-file-ignores", "generated.py:Q"])
.arg("-")
.pass_stdin(r#"
import os
from test import say_hy
if __name__ == "__main__":
say_hy("dear Ruff contributor")
"#), @r"
success: false
exit_code: 1
----- stdout -----
generated.py:2:8: F401 [*] `os` imported but unused
Found 1 error.
[*] 1 fixable with the `--fix` option.
----- stderr -----
warning: The top-level linter settings are deprecated in favour of their counterparts in the `lint` section. Please update the following options in `ruff.toml`:
- 'extend-select' -> 'lint.extend-select'
");
Ok(())
}
/// Regression test for [#8858](https://github.com/astral-sh/ruff/issues/8858)
#[test]
fn parent_configuration_override() -> Result<()> {
let fixture = CliTest::new()?;
fixture.write_file(
"ruff.toml",
r#"
[lint]
select = ["ALL"]
"#,
)?;
fixture.write_file(
"subdirectory/ruff.toml",
r#"
[lint]
ignore = ["D203", "D212"]
"#,
)?;
assert_cmd_snapshot!(fixture
.check_command()
.current_dir(fixture.root().join("subdirectory"))
, @r"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
warning: No Python files found under the given path(s)
");
Ok(())
}
#[test]
fn nonexistent_config_file() {
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(STDIN_BASE_OPTIONS)
.args(["--config", "foo.toml", "."]), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: invalid value 'foo.toml' for '--config <CONFIG_OPTION>'
tip: A `--config` flag must either be a path to a `.toml` configuration file
or a TOML `<KEY> = <VALUE>` pair overriding a specific configuration
option
It looks like you were trying to pass a path to a configuration file.
The path `foo.toml` does not point to a configuration file
For more information, try '--help'.
");
}
#[test]
fn config_override_rejected_if_invalid_toml() {
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(STDIN_BASE_OPTIONS)
.args(["--config", "foo = bar", "."]), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: invalid value 'foo = bar' for '--config <CONFIG_OPTION>'
tip: A `--config` flag must either be a path to a `.toml` configuration file
or a TOML `<KEY> = <VALUE>` pair overriding a specific configuration
option
The supplied argument is not valid TOML:
TOML parse error at line 1, column 7
|
1 | foo = bar
| ^^^
string values must be quoted, expected literal string
For more information, try '--help'.
");
}
#[test]
fn too_many_config_files() -> Result<()> {
let fixture = CliTest::new()?;
fixture.write_file("ruff.toml", "")?;
fixture.write_file("ruff2.toml", "")?;
assert_cmd_snapshot!(fixture
.check_command()
.arg("--config")
.arg("ruff.toml")
.arg("--config")
.arg("ruff2.toml")
.arg("."), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: You cannot specify more than one configuration file on the command line.
tip: remove either `--config=ruff.toml` or `--config=ruff2.toml`.
For more information, try `--help`.
");
Ok(())
}
#[test]
fn extend_passed_via_config_argument() {
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(STDIN_BASE_OPTIONS)
.args(["--config", "extend = 'foo.toml'", "."]), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: invalid value 'extend = 'foo.toml'' for '--config <CONFIG_OPTION>'
tip: Cannot include `extend` in a --config flag value
For more information, try '--help'.
");
}
#[test]
fn nonexistent_extend_file() -> Result<()> {
let fixture = CliTest::new()?;
fixture.write_file(
"ruff.toml",
r#"
extend = "ruff2.toml"
"#,
)?;
fixture.write_file(
"ruff2.toml",
r#"
extend = "ruff3.toml"
"#,
)?;
assert_cmd_snapshot!(fixture
.check_command(), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: Failed to load extended configuration `[TMP]/ruff3.toml` (`[TMP]/ruff.toml` extends `[TMP]/ruff2.toml` extends `[TMP]/ruff3.toml`)
Cause: Failed to read [TMP]/ruff3.toml
Cause: No such file or directory (os error 2)
");
Ok(())
}
#[test]
fn circular_extend() -> Result<()> {
let fixture = CliTest::new()?;
fixture.write_file(
"ruff.toml",
r#"
extend = "ruff2.toml"
"#,
)?;
fixture.write_file(
"ruff2.toml",
r#"
extend = "ruff3.toml"
"#,
)?;
fixture.write_file(
"ruff3.toml",
r#"
extend = "ruff.toml"
"#,
)?;
assert_cmd_snapshot!(fixture
.check_command(),
@r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: Circular configuration detected: `[TMP]/ruff.toml` extends `[TMP]/ruff2.toml` extends `[TMP]/ruff3.toml` extends `[TMP]/ruff.toml`
");
Ok(())
}
#[test]
fn parse_error_extends() -> Result<()> {
let fixture = CliTest::new()?;
fixture.write_file(
"ruff.toml",
r#"
extend = "ruff2.toml"
"#,
)?;
fixture.write_file(
"ruff2.toml",
r#"
[lint]
select = [E501]
"#,
)?;
assert_cmd_snapshot!(
fixture.check_command(),
@r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: Failed to load extended configuration `[TMP]/ruff2.toml` (`[TMP]/ruff.toml` extends `[TMP]/ruff2.toml`)
Cause: Failed to parse [TMP]/ruff2.toml
Cause: TOML parse error at line 3, column 11
|
3 | select = [E501]
| ^^^^
string values must be quoted, expected literal string
");
Ok(())
}
#[test]
fn config_file_and_isolated() -> Result<()> {
let fixture = CliTest::new()?;
fixture.write_file("ruff.toml", "")?;
assert_cmd_snapshot!(fixture
.check_command()
.arg("--config")
.arg("ruff.toml")
.arg("--isolated")
.arg("."), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: The argument `--config=ruff.toml` cannot be used with `--isolated`
tip: You cannot specify a configuration file and also specify `--isolated`,
as `--isolated` causes ruff to ignore all configuration files.
For more information, try `--help`.
");
Ok(())
}
#[test]
fn config_override_via_cli() -> Result<()> {
let fixture = CliTest::with_file(
"ruff.toml",
r#"
line-length = 100
[lint]
select = ["I"]
[lint.isort]
combine-as-imports = true
"#,
)?;
let test_code = r#"
from foo import (
aaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbb as bbbbbbbbbbbbbbbb,
cccccccccccccccc,
ddddddddddd as ddddddddddddd,
eeeeeeeeeeeeeee,
ffffffffffff as ffffffffffffff,
ggggggggggggg,
hhhhhhh as hhhhhhhhhhh,
iiiiiiiiiiiiii,
jjjjjjjjjjjjj as jjjjjj,
)
x = "longer_than_90_charactersssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss"
"#;
assert_cmd_snapshot!(fixture
.check_command()
.arg("--config")
.arg("ruff.toml")
.args(["--config", "line-length=90"])
.args(["--config", "lint.extend-select=['E501', 'F841']"])
.args(["--config", "lint.isort.combine-as-imports = false"])
.arg("-")
.pass_stdin(test_code), @r"
success: false
exit_code: 1
----- stdout -----
-:2:1: I001 [*] Import block is un-sorted or un-formatted
-:15:91: E501 Line too long (97 > 90)
Found 2 errors.
[*] 1 fixable with the `--fix` option.
----- stderr -----
");
Ok(())
}
#[test]
fn valid_toml_but_nonexistent_option_provided_via_config_argument() {
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(STDIN_BASE_OPTIONS)
.args([".", "--config", "extend-select=['F481']"]), // No such code as F481!
@r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: invalid value 'extend-select=['F481']' for '--config <CONFIG_OPTION>'
tip: A `--config` flag must either be a path to a `.toml` configuration file
or a TOML `<KEY> = <VALUE>` pair overriding a specific configuration
option
Could not parse the supplied argument as a `ruff.toml` configuration option:
Unknown rule selector: `F481`
For more information, try '--help'.
");
}
#[test]
fn each_toml_option_requires_a_new_flag_1() {
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(STDIN_BASE_OPTIONS)
// commas can't be used to delimit different config overrides;
// you need a new --config flag for each override
.args([".", "--config", "extend-select=['F841'], line-length=90"]),
@r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: invalid value 'extend-select=['F841'], line-length=90' for '--config <CONFIG_OPTION>'
tip: A `--config` flag must either be a path to a `.toml` configuration file
or a TOML `<KEY> = <VALUE>` pair overriding a specific configuration
option
The supplied argument is not valid TOML:
TOML parse error at line 1, column 23
|
1 | extend-select=['F841'], line-length=90
| ^
unexpected key or value, expected newline, `#`
For more information, try '--help'.
");
}
#[test]
fn each_toml_option_requires_a_new_flag_2() {
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(STDIN_BASE_OPTIONS)
// spaces *also* can't be used to delimit different config overrides;
// you need a new --config flag for each override
.args([".", "--config", "extend-select=['F841'] line-length=90"]),
@r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: invalid value 'extend-select=['F841'] line-length=90' for '--config <CONFIG_OPTION>'
tip: A `--config` flag must either be a path to a `.toml` configuration file
or a TOML `<KEY> = <VALUE>` pair overriding a specific configuration
option
The supplied argument is not valid TOML:
TOML parse error at line 1, column 24
|
1 | extend-select=['F841'] line-length=90
| ^
unexpected key or value, expected newline, `#`
For more information, try '--help'.
");
}
#[test]
fn value_given_to_table_key_is_not_inline_table_1() {
// https://github.com/astral-sh/ruff/issues/13995
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(STDIN_BASE_OPTIONS)
.args([".", "--config", r#"lint.flake8-pytest-style="csv""#]),
@r#"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: invalid value 'lint.flake8-pytest-style="csv"' for '--config <CONFIG_OPTION>'
tip: A `--config` flag must either be a path to a `.toml` configuration file
or a TOML `<KEY> = <VALUE>` pair overriding a specific configuration
option
`lint.flake8-pytest-style` is a table of configuration options.
Did you want to override one of the table's subkeys?
Possible choices:
- `lint.flake8-pytest-style.fixture-parentheses`
- `lint.flake8-pytest-style.parametrize-names-type`
- `lint.flake8-pytest-style.parametrize-values-type`
- `lint.flake8-pytest-style.parametrize-values-row-type`
- `lint.flake8-pytest-style.raises-require-match-for`
- `lint.flake8-pytest-style.raises-extend-require-match-for`
- `lint.flake8-pytest-style.mark-parentheses`
- `lint.flake8-pytest-style.warns-require-match-for`
- `lint.flake8-pytest-style.warns-extend-require-match-for`
For more information, try '--help'.
"#);
}
#[test]
fn value_given_to_table_key_is_not_inline_table_2() {
// https://github.com/astral-sh/ruff/issues/13995
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(STDIN_BASE_OPTIONS)
.args([".", "--config", r#"lint=123"#]),
@r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: invalid value 'lint=123' for '--config <CONFIG_OPTION>'
tip: A `--config` flag must either be a path to a `.toml` configuration file
or a TOML `<KEY> = <VALUE>` pair overriding a specific configuration
option
`lint` is a table of configuration options.
Did you want to override one of the table's subkeys?
Possible choices:
- `lint.allowed-confusables`
- `lint.dummy-variable-rgx`
- `lint.extend-ignore`
- `lint.extend-select`
- `lint.extend-fixable`
- `lint.external`
- `lint.fixable`
- `lint.ignore`
- `lint.extend-safe-fixes`
- `lint.extend-unsafe-fixes`
- `lint.ignore-init-module-imports`
- `lint.logger-objects`
- `lint.select`
- `lint.explicit-preview-rules`
- `lint.task-tags`
- `lint.typing-modules`
- `lint.unfixable`
- `lint.per-file-ignores`
- `lint.extend-per-file-ignores`
- `lint.exclude`
- `lint.preview`
- `lint.typing-extensions`
- `lint.future-annotations`
For more information, try '--help'.
");
}
#[test]
fn config_doubly_overridden_via_cli() -> Result<()> {
let fixture = CliTest::with_file(
"ruff.toml",
r#"
line-length = 100
[lint]
select=["E501"]
"#,
)?;
let test_code = "x = 'longer_than_90_charactersssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss'";
assert_cmd_snapshot!(fixture
.check_command()
// The --line-length flag takes priority over both the config file
// and the `--config="line-length=110"` flag,
// despite them both being specified after this flag on the command line:
.args(["--line-length", "90"])
.arg("--config")
.arg("ruff.toml")
.args(["--config", "line-length=110"])
.arg("-")
.pass_stdin(test_code), @r"
success: false
exit_code: 1
----- stdout -----
-:1:91: E501 Line too long (97 > 90)
Found 1 error.
----- stderr -----
");
Ok(())
}
#[test]
fn complex_config_setting_overridden_via_cli() -> Result<()> {
let fixture = CliTest::with_file("ruff.toml", "lint.select = ['N801']")?;
let test_code = "class violates_n801: pass";
assert_cmd_snapshot!(fixture
.check_command()
.arg("--config")
.arg("ruff.toml")
.args(["--config", "lint.per-file-ignores = {'generated.py' = ['N801']}"])
.args(["--stdin-filename", "generated.py"])
.arg("-")
.pass_stdin(test_code), @r"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
");
Ok(())
}
#[test]
fn deprecated_config_option_overridden_via_cli() {
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(STDIN_BASE_OPTIONS)
.args(["--config", "select=['N801']", "-"])
.pass_stdin("class lowercase: ..."),
@r"
success: false
exit_code: 1
----- stdout -----
-:1:7: N801 Class name `lowercase` should use CapWords convention
Found 1 error.
----- stderr -----
warning: The top-level linter settings are deprecated in favour of their counterparts in the `lint` section. Please update the following options in your `--config` CLI arguments:
- 'select' -> 'lint.select'
");
}
#[test]
fn extension() -> Result<()> {
let fixture = CliTest::new()?;
fixture.write_file(
"ruff.toml",
r#"
include = ["*.ipy"]
"#,
)?;
fixture.write_file(
"main.ipy",
r#"
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "ad6f36d9-4b7d-4562-8d00-f15a0f1fbb6d",
"metadata": {},
"outputs": [],
"source": [
"import os"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
"#,
)?;
assert_cmd_snapshot!(fixture
.check_command()
.args(["--config", "ruff.toml"])
.args(["--extension", "ipy:ipynb"])
.arg("."), @r"
success: false
exit_code: 1
----- stdout -----
main.ipy:cell 1:1:8: F401 [*] `os` imported but unused
Found 1 error.
[*] 1 fixable with the `--fix` option.
----- stderr -----
");
Ok(())
}
#[test]
fn warn_invalid_noqa_with_no_diagnostics() {
assert_cmd_snapshot!(
Command::new(get_cargo_bin(BIN_NAME))
.args(STDIN_BASE_OPTIONS)
.args(["--isolated"])
.arg("--select")
.arg("F401")
.arg("-")
.pass_stdin(
r#"
# ruff: noqa: AAA101
print("Hello world!")
"#
)
);
}
#[test]
fn file_noqa_external() -> Result<()> {
let fixture = CliTest::with_file(
"ruff.toml",
r#"
[lint]
external = ["AAA"]
"#,
)?;
assert_cmd_snapshot!(fixture
.check_command()
.arg("--config")
.arg("ruff.toml")
.arg("-")
.pass_stdin(r#"
# flake8: noqa: AAA101, BBB102
import os
"#), @r"
success: false
exit_code: 1
----- stdout -----
-:3:8: F401 [*] `os` imported but unused
Found 1 error.
[*] 1 fixable with the `--fix` option.
----- stderr -----
warning: Invalid rule code provided to `# ruff: noqa` at -:2: BBB102
");
Ok(())
}
#[test]
fn required_version_exact_mismatch() -> Result<()> {
let version = env!("CARGO_PKG_VERSION");
let fixture = CliTest::with_file(
"ruff.toml",
r#"
required-version = "0.1.0"
"#,
)?;
insta::with_settings!({
filters => vec![(version, "[VERSION]")]
}, {
assert_cmd_snapshot!(fixture
.check_command()
.arg("--config")
.arg("ruff.toml")
.arg("-")
.pass_stdin(r#"
import os
"#), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: Required version `==0.1.0` does not match the running version `[VERSION]`
");
});
Ok(())
}
#[test]
fn required_version_exact_match() -> Result<()> {
let version = env!("CARGO_PKG_VERSION");
let fixture = CliTest::with_file(
"ruff.toml",
&format!(
r#"
required-version = "{version}"
"#
),
)?;
insta::with_settings!({
filters => vec![(version, "[VERSION]")]
}, {
assert_cmd_snapshot!(fixture
.check_command()
.arg("--config")
.arg("ruff.toml")
.arg("-")
.pass_stdin(r#"
import os
"#), @r"
success: false
exit_code: 1
----- stdout -----
-:2:8: F401 [*] `os` imported but unused
Found 1 error.
[*] 1 fixable with the `--fix` option.
----- stderr -----
");
});
Ok(())
}
#[test]
fn required_version_bound_mismatch() -> Result<()> {
let version = env!("CARGO_PKG_VERSION");
let fixture = CliTest::with_file(
"ruff.toml",
&format!(
r#"
required-version = ">{version}"
"#
),
)?;
insta::with_settings!({
filters => vec![(version, "[VERSION]")]
}, {
assert_cmd_snapshot!(fixture
.check_command()
.arg("--config")
.arg("ruff.toml")
.arg("-")
.pass_stdin(r#"
import os
"#), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: Required version `>[VERSION]` does not match the running version `[VERSION]`
");
});
Ok(())
}
#[test]
fn required_version_bound_match() -> Result<()> {
let fixture = CliTest::with_file(
"ruff.toml",
r#"
required-version = ">=0.1.0"
"#,
)?;
assert_cmd_snapshot!(fixture
.check_command()
.arg("--config")
.arg("ruff.toml")
.arg("-")
.pass_stdin(r#"
import os
"#), @r"
success: false
exit_code: 1
----- stdout -----
-:2:8: F401 [*] `os` imported but unused
Found 1 error.
[*] 1 fixable with the `--fix` option.
----- stderr -----
");
Ok(())
}
/// Expand environment variables in `--config` paths provided via the CLI.
#[test]
fn config_expand() -> Result<()> {
let fixture = CliTest::with_file(
"ruff.toml",
r#"
[lint]
select = ["F"]
ignore = ["F841"]
"#,
)?;
assert_cmd_snapshot!(fixture
.check_command()
.arg("--config")
.arg("${NAME}.toml")
.env("NAME", "ruff")
.arg("-")
.pass_stdin(r#"
import os
def func():
x = 1
"#), @r"
success: false
exit_code: 1
----- stdout -----
-:2:8: F401 [*] `os` imported but unused
Found 1 error.
[*] 1 fixable with the `--fix` option.
----- stderr -----
");
Ok(())
}
/// Per-file selects via ! negation in per-file-ignores
#[test]
fn negated_per_file_ignores() -> Result<()> {
let fixture = CliTest::new()?;
fixture.write_file(
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/tests/cli/format.rs | crates/ruff/tests/cli/format.rs | #![cfg(not(target_family = "wasm"))]
use std::fs;
use std::path::Path;
use anyhow::Result;
use insta_cmd::assert_cmd_snapshot;
use super::{CliTest, tempdir_filter};
#[test]
fn default_options() -> Result<()> {
let test = CliTest::new()?;
assert_cmd_snapshot!(test.format_command()
.args(["--isolated", "--stdin-filename", "test.py"])
.arg("-")
.pass_stdin(r#"
def foo(arg1, arg2,):
print('Shouldn\'t change quotes')
if condition:
print('Hy "Micha"') # Should not change quotes
"#), @r#"
success: true
exit_code: 0
----- stdout -----
def foo(
arg1,
arg2,
):
print("Shouldn't change quotes")
if condition:
print('Hy "Micha"') # Should not change quotes
----- stderr -----
"#);
Ok(())
}
#[test]
fn default_files() -> Result<()> {
let test = CliTest::with_files([
("foo.py", r#"foo = "needs formatting""#),
("bar.py", r#"bar = "needs formatting""#),
])?;
assert_cmd_snapshot!(test.format_command()
.arg("--isolated")
.arg("--check"), @r"
success: false
exit_code: 1
----- stdout -----
Would reformat: bar.py
Would reformat: foo.py
2 files would be reformatted
----- stderr -----
");
Ok(())
}
#[test]
fn format_warn_stdin_filename_with_files() -> Result<()> {
let test = CliTest::new()?;
assert_cmd_snapshot!(test.format_command()
.args(["--isolated", "--stdin-filename", "foo.py"])
.arg("foo.py")
.pass_stdin("foo = 1"), @r"
success: true
exit_code: 0
----- stdout -----
foo = 1
----- stderr -----
warning: Ignoring file foo.py in favor of standard input.
");
Ok(())
}
#[test]
fn nonexistent_config_file() -> Result<()> {
let test = CliTest::new()?;
assert_cmd_snapshot!(test.format_command()
.args(["--config", "foo.toml", "."]), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: invalid value 'foo.toml' for '--config <CONFIG_OPTION>'
tip: A `--config` flag must either be a path to a `.toml` configuration file
or a TOML `<KEY> = <VALUE>` pair overriding a specific configuration
option
It looks like you were trying to pass a path to a configuration file.
The path `foo.toml` does not point to a configuration file
For more information, try '--help'.
");
Ok(())
}
#[test]
fn config_override_rejected_if_invalid_toml() -> Result<()> {
let test = CliTest::new()?;
assert_cmd_snapshot!(test.format_command()
.args(["--config", "foo = bar", "."]), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: invalid value 'foo = bar' for '--config <CONFIG_OPTION>'
tip: A `--config` flag must either be a path to a `.toml` configuration file
or a TOML `<KEY> = <VALUE>` pair overriding a specific configuration
option
The supplied argument is not valid TOML:
TOML parse error at line 1, column 7
|
1 | foo = bar
| ^^^
string values must be quoted, expected literal string
For more information, try '--help'.
");
Ok(())
}
#[test]
fn too_many_config_files() -> Result<()> {
let test = CliTest::with_files([("ruff.toml", ""), ("ruff2.toml", "")])?;
assert_cmd_snapshot!(test.format_command()
.arg("--config")
.arg("ruff.toml")
.arg("--config")
.arg("ruff2.toml")
.arg("."), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: You cannot specify more than one configuration file on the command line.
tip: remove either `--config=ruff.toml` or `--config=ruff2.toml`.
For more information, try `--help`.
");
Ok(())
}
#[test]
fn config_file_and_isolated() -> Result<()> {
let test = CliTest::with_file("ruff.toml", "")?;
assert_cmd_snapshot!(test.format_command()
.arg("--isolated")
.arg("--config")
.arg("ruff.toml")
.arg("."), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: The argument `--config=ruff.toml` cannot be used with `--isolated`
tip: You cannot specify a configuration file and also specify `--isolated`,
as `--isolated` causes ruff to ignore all configuration files.
For more information, try `--help`.
");
Ok(())
}
#[test]
fn config_override_via_cli() -> Result<()> {
let test = CliTest::with_file("ruff.toml", "line-length = 70")?;
let fixture = r#"
def foo():
print("looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong string")
"#;
assert_cmd_snapshot!(test.format_command()
.arg("--config")
.arg("ruff.toml")
// This overrides the long line length set in the config file
.args(["--config", "line-length=80"])
.arg("-")
.pass_stdin(fixture), @r#"
success: true
exit_code: 0
----- stdout -----
def foo():
print(
"looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong string"
)
----- stderr -----
"#);
Ok(())
}
#[test]
fn config_doubly_overridden_via_cli() -> Result<()> {
let test = CliTest::with_file("ruff.toml", "line-length = 70")?;
let fixture = r#"
def foo():
print("looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong string")
"#;
assert_cmd_snapshot!(test.format_command()
.arg("--config")
.arg("ruff.toml")
// This overrides the long line length set in the config file...
.args(["--config", "line-length=80"])
// ...but this overrides them both:
.args(["--line-length", "100"])
.arg("-")
.pass_stdin(fixture), @r#"
success: true
exit_code: 0
----- stdout -----
def foo():
print("looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong string")
----- stderr -----
"#);
Ok(())
}
#[test]
fn format_options() -> Result<()> {
let test = CliTest::with_file(
"ruff.toml",
r#"
indent-width = 8
line-length = 84
[format]
indent-style = "tab"
quote-style = "single"
skip-magic-trailing-comma = true
line-ending = "cr-lf"
"#,
)?;
assert_cmd_snapshot!(test.format_command()
.arg("--config")
.arg("ruff.toml")
.arg("-")
.pass_stdin(r#"
def foo(arg1, arg2,):
print("Shouldn't change quotes. It exceeds the line width with the tab size 8")
if condition:
print("Should change quotes")
"#), @r#"
success: true
exit_code: 0
----- stdout -----
def foo(arg1, arg2):
print(
"Shouldn't change quotes. It exceeds the line width with the tab size 8"
)
if condition:
print('Should change quotes')
----- stderr -----
"#);
Ok(())
}
#[test]
fn docstring_options() -> Result<()> {
let test = CliTest::with_file(
"ruff.toml",
r"
[format]
docstring-code-format = true
docstring-code-line-length = 20
",
)?;
assert_cmd_snapshot!(test.format_command()
.arg("--config")
.arg("ruff.toml")
.arg("-")
.pass_stdin(r"
def f(x):
'''
Something about `f`. And an example:
.. code-block:: python
foo, bar, quux = this_is_a_long_line(lion, hippo, lemur, bear)
Another example:
```py
foo, bar, quux = this_is_a_long_line(lion, hippo, lemur, bear)
```
And another:
>>> foo, bar, quux = this_is_a_long_line(lion, hippo, lemur, bear)
'''
pass
"), @r#"
success: true
exit_code: 0
----- stdout -----
def f(x):
"""
Something about `f`. And an example:
.. code-block:: python
foo, bar, quux = (
this_is_a_long_line(
lion,
hippo,
lemur,
bear,
)
)
Another example:
```py
foo, bar, quux = (
this_is_a_long_line(
lion,
hippo,
lemur,
bear,
)
)
```
And another:
>>> foo, bar, quux = (
... this_is_a_long_line(
... lion,
... hippo,
... lemur,
... bear,
... )
... )
"""
pass
----- stderr -----
"#);
Ok(())
}
#[test]
fn mixed_line_endings() -> Result<()> {
let test = CliTest::with_files([
(
"main.py",
"from test import say_hy\n\nif __name__ == \"__main__\":\n say_hy(\"dear Ruff contributor\")\n",
),
(
"test.py",
"def say_hy(name: str):\r\n print(f\"Hy {name}\")\r\n",
),
])?;
assert_cmd_snapshot!(test.format_command()
.arg("--diff")
.arg("--isolated")
.arg("."), @r"
success: true
exit_code: 0
----- stdout -----
----- stderr -----
2 files already formatted
");
Ok(())
}
#[test]
fn exclude() -> Result<()> {
let test = CliTest::with_files([
(
"ruff.toml",
r#"
extend-exclude = ["out"]
[format]
exclude = ["test.py", "generated.py"]
"#,
),
(
"main.py",
r#"
from test import say_hy
if __name__ == "__main__":
say_hy("dear Ruff contributor")
"#,
),
// Excluded file but passed to the CLI directly, should be formatted
(
"test.py",
r#"
def say_hy(name: str):
print(f"Hy {name}")"#,
),
(
"generated.py",
r#"NUMBERS = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19
]
OTHER = "OTHER"
"#,
),
("out/a.py", "a = a"),
])?;
assert_cmd_snapshot!(test.format_command()
.args(["--check", "--config", "ruff.toml"])
// Explicitly pass test.py, should be formatted regardless of it being excluded by format.exclude
.arg("test.py")
// Format all other files in the directory, should respect the `exclude` and `format.exclude` options
.arg("."), @r"
success: false
exit_code: 1
----- stdout -----
Would reformat: main.py
Would reformat: test.py
2 files would be reformatted
----- stderr -----
");
Ok(())
}
/// Regression test for <https://github.com/astral-sh/ruff/issues/20035>
#[test]
fn deduplicate_directory_and_explicit_file() -> Result<()> {
let test = CliTest::with_file("main.py", "x = 1\n")?;
assert_cmd_snapshot!(
test.format_command()
.arg("--check")
.arg(".")
.arg("main.py"),
@r"
success: false
exit_code: 1
----- stdout -----
Would reformat: main.py
1 file would be reformatted
----- stderr -----
"
);
Ok(())
}
#[test]
fn syntax_error() -> Result<()> {
let test = CliTest::with_file(
"main.py",
r"
from module import =
",
)?;
assert_cmd_snapshot!(test.format_command()
.arg("--check")
.arg("--isolated")
.arg("main.py"), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: Failed to parse main.py:2:20: Expected an import name
");
Ok(())
}
#[test]
fn messages() -> Result<()> {
let test = CliTest::with_file(
"main.py",
r#"
from test import say_hy
if __name__ == "__main__":
say_hy("dear Ruff contributor")
"#,
)?;
assert_cmd_snapshot!(test.format_command()
.arg("--isolated")
.arg("--check")
.arg("main.py"), @r"
success: false
exit_code: 1
----- stdout -----
Would reformat: main.py
1 file would be reformatted
----- stderr -----
");
assert_cmd_snapshot!(test.format_command()
.arg("--isolated")
.arg("main.py"), @r"
success: true
exit_code: 0
----- stdout -----
1 file reformatted
----- stderr -----
");
assert_cmd_snapshot!(test.format_command()
.arg("--isolated")
.arg("main.py"), @r"
success: true
exit_code: 0
----- stdout -----
1 file left unchanged
----- stderr -----
");
Ok(())
}
#[test_case::test_case("concise")]
#[test_case::test_case("full")]
#[test_case::test_case("json")]
#[test_case::test_case("json-lines")]
#[test_case::test_case("junit")]
#[test_case::test_case("grouped")]
#[test_case::test_case("github")]
#[test_case::test_case("gitlab")]
#[test_case::test_case("pylint")]
#[test_case::test_case("rdjson")]
#[test_case::test_case("azure")]
#[test_case::test_case("sarif")]
fn output_format(output_format: &str) -> Result<()> {
const CONTENT: &str = r#"
from test import say_hy
if __name__ == "__main__":
say_hy("dear Ruff contributor")
"#;
let test = CliTest::with_settings(|_project_dir, mut settings| {
// JSON double escapes backslashes
settings.add_filter(r#""[^"]+\\?/?input.py"#, r#""[TMP]/input.py"#);
settings
})?;
test.write_file("input.py", CONTENT)?;
let snapshot = format!("output_format_{output_format}");
assert_cmd_snapshot!(
snapshot,
test.format_command().args([
"--output-format",
output_format,
"--preview",
"--check",
"input.py",
]),
);
Ok(())
}
#[test]
fn output_format_notebook() -> Result<()> {
let crate_root = Path::new(env!("CARGO_MANIFEST_DIR"));
let fixtures = crate_root.join("resources").join("test").join("fixtures");
let path = fixtures.join("unformatted.ipynb");
let test = CliTest::with_settings(|_, mut settings| {
settings.add_filter(&tempdir_filter(crate_root.to_str().unwrap()), "CRATE_ROOT/");
settings
})?;
assert_cmd_snapshot!(
test.format_command().args(["--isolated", "--preview", "--check"]).arg(path),
@r"
success: false
exit_code: 1
----- stdout -----
unformatted: File would be reformatted
--> CRATE_ROOT/resources/test/fixtures/unformatted.ipynb:cell 1:1:1
::: cell 1
1 | import numpy
- maths = (numpy.arange(100)**2).sum()
- stats= numpy.asarray([1,2,3,4]).median()
2 +
3 + maths = (numpy.arange(100) ** 2).sum()
4 + stats = numpy.asarray([1, 2, 3, 4]).median()
::: cell 3
1 | # A cell with IPython escape command
2 | def some_function(foo, bar):
3 | pass
4 +
5 +
6 | %matplotlib inline
::: cell 4
1 | foo = %pwd
- def some_function(foo,bar,):
2 +
3 +
4 + def some_function(
5 + foo,
6 + bar,
7 + ):
8 | # Another cell with IPython escape command
9 | foo = %pwd
10 | print(foo)
1 file would be reformatted
----- stderr -----
"
);
Ok(())
}
#[test]
fn exit_non_zero_on_format() -> Result<()> {
let test = CliTest::new()?;
let contents = r#"
from test import say_hy
if __name__ == "__main__":
say_hy("dear Ruff contributor")
"#;
test.write_file("main.py", contents)?;
// First format should exit with code 1 since the file needed formatting
assert_cmd_snapshot!(test.format_command()
.arg("--isolated")
.arg("--exit-non-zero-on-format")
.arg("main.py"), @r"
success: false
exit_code: 1
----- stdout -----
1 file reformatted
----- stderr -----
");
// Second format should exit with code 0 since no files needed formatting
assert_cmd_snapshot!(test.format_command()
.arg("--isolated")
.arg("--exit-non-zero-on-format")
.arg("main.py"), @r"
success: true
exit_code: 0
----- stdout -----
1 file left unchanged
----- stderr -----
");
// Repeat the tests above with the --exit-non-zero-on-fix alias
test.write_file("main.py", contents)?;
// First format should exit with code 1 since the file needed formatting
assert_cmd_snapshot!(test.format_command()
.arg("--isolated")
.arg("--exit-non-zero-on-fix")
.arg("main.py"), @r"
success: false
exit_code: 1
----- stdout -----
1 file reformatted
----- stderr -----
");
// Second format should exit with code 0 since no files needed formatting
assert_cmd_snapshot!(test.format_command()
.arg("--isolated")
.arg("--exit-non-zero-on-fix")
.arg("main.py"), @r"
success: true
exit_code: 0
----- stdout -----
1 file left unchanged
----- stderr -----
");
Ok(())
}
#[test]
fn force_exclude() -> Result<()> {
let test = CliTest::with_files([
(
"ruff.toml",
r#"
extend-exclude = ["out"]
[format]
exclude = ["test.py", "generated.py"]
"#,
),
(
"main.py",
r#"
from test import say_hy
if __name__ == "__main__":
say_hy("dear Ruff contributor")
"#,
),
// Excluded file but passed to the CLI directly, should be formatted
(
"test.py",
r#"
def say_hy(name: str):
print(f"Hy {name}")"#,
),
(
"generated.py",
r#"NUMBERS = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19
]
OTHER = "OTHER"
"#,
),
("out/a.py", "a = a"),
])?;
assert_cmd_snapshot!(test.format_command()
.args(["--force-exclude", "--check", "--config", "ruff.toml"])
// Explicitly pass test.py, should not be formatted because of --force-exclude
.arg("test.py")
// Format all other files in the directory, should respect the `exclude` and `format.exclude` options
.arg("."), @r"
success: false
exit_code: 1
----- stdout -----
Would reformat: main.py
1 file would be reformatted
----- stderr -----
");
Ok(())
}
#[test]
fn exclude_stdin() -> Result<()> {
let test = CliTest::with_file(
"ruff.toml",
r#"
extend-select = ["B", "Q"]
ignore = ["Q000", "Q001", "Q002", "Q003"]
[format]
exclude = ["generated.py"]
"#,
)?;
assert_cmd_snapshot!(test.format_command()
.args(["--config", "ruff.toml", "--stdin-filename", "generated.py", "-"])
.pass_stdin(r#"
from test import say_hy
if __name__ == '__main__':
say_hy("dear Ruff contributor")
"#), @r#"
success: true
exit_code: 0
----- stdout -----
from test import say_hy
if __name__ == "__main__":
say_hy("dear Ruff contributor")
----- stderr -----
warning: The top-level linter settings are deprecated in favour of their counterparts in the `lint` section. Please update the following options in `ruff.toml`:
- 'extend-select' -> 'lint.extend-select'
- 'ignore' -> 'lint.ignore'
"#);
Ok(())
}
#[test]
fn force_exclude_stdin() -> Result<()> {
let test = CliTest::with_file(
"ruff.toml",
r#"
extend-select = ["B", "Q"]
ignore = ["Q000", "Q001", "Q002", "Q003"]
[format]
exclude = ["generated.py"]
"#,
)?;
assert_cmd_snapshot!(test.format_command()
.args(["--config", "ruff.toml", "--stdin-filename", "generated.py", "--force-exclude", "-"])
.pass_stdin(r#"
from test import say_hy
if __name__ == '__main__':
say_hy("dear Ruff contributor")
"#), @r#"
success: true
exit_code: 0
----- stdout -----
from test import say_hy
if __name__ == '__main__':
say_hy("dear Ruff contributor")
----- stderr -----
warning: The top-level linter settings are deprecated in favour of their counterparts in the `lint` section. Please update the following options in `ruff.toml`:
- 'extend-select' -> 'lint.extend-select'
- 'ignore' -> 'lint.ignore'
"#);
Ok(())
}
#[test]
fn format_option_inheritance() -> Result<()> {
let test = CliTest::with_files([
(
"ruff.toml",
r#"
extend = "base.toml"
[lint]
extend-select = ["COM812"]
[format]
quote-style = "single"
"#,
),
(
"base.toml",
r#"
[format]
indent-style = "tab"
"#,
),
])?;
assert_cmd_snapshot!(test.format_command()
.arg("--config")
.arg("ruff.toml")
.arg("-")
.pass_stdin(r#"
def foo(arg1, arg2,):
print("Shouldn't change quotes")
if condition:
print("Should change quotes")
"#), @r#"
success: true
exit_code: 0
----- stdout -----
def foo(
arg1,
arg2,
):
print("Shouldn't change quotes")
if condition:
print('Should change quotes')
----- stderr -----
warning: The following rule may cause conflicts when used with the formatter: `COM812`. To avoid unexpected behavior, we recommend disabling this rule, either by removing it from the `lint.select` or `lint.extend-select` configuration, or adding it to the `lint.ignore` configuration.
"#);
Ok(())
}
#[test]
fn deprecated_options() -> Result<()> {
let test = CliTest::with_file(
"ruff.toml",
r"
tab-size = 2
",
)?;
assert_cmd_snapshot!(test.format_command()
.arg("--config")
.arg("ruff.toml")
.arg("-")
.pass_stdin(r"
if True:
pass
"), @r"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: Failed to load configuration `[TMP]/ruff.toml`
Cause: Failed to parse [TMP]/ruff.toml
Cause: TOML parse error at line 1, column 1
|
1 |
| ^
unknown field `tab-size`
");
Ok(())
}
/// Since 0.1.0 the legacy format option is no longer supported
#[test]
fn legacy_format_option() -> Result<()> {
let test = CliTest::with_file(
"ruff.toml",
r#"
format = "json"
"#,
)?;
assert_cmd_snapshot!(test.command()
.args(["check", "--select", "F401", "--no-cache", "--config"])
.arg("ruff.toml")
.arg("-")
.pass_stdin(r"
import os
"), @r#"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: Failed to load configuration `[TMP]/ruff.toml`
Cause: Failed to parse [TMP]/ruff.toml
Cause: TOML parse error at line 2, column 10
|
2 | format = "json"
| ^^^^^^
invalid type: string "json", expected struct FormatOptions
"#);
Ok(())
}
#[test]
fn conflicting_options() -> Result<()> {
let test = CliTest::with_files([
(
"ruff.toml",
r#"
indent-width = 2
[lint]
select = ["ALL"]
ignore = ["D203", "D212", "ISC001"]
[lint.isort]
lines-after-imports = 3
lines-between-types = 2
force-wrap-aliases = true
combine-as-imports = true
split-on-trailing-comma = true
[lint.flake8-quotes]
inline-quotes = "single"
docstring-quotes = "single"
multiline-quotes = "single"
[lint.flake8-implicit-str-concat]
allow-multiline = false
[format]
skip-magic-trailing-comma = true
indent-style = "tab"
"#,
),
(
"test.py",
r#"
def say_hy(name: str):
print(f"Hy {name}")"#,
),
])?;
assert_cmd_snapshot!(test.format_command()
.arg("--config")
.arg("ruff.toml")
.arg("test.py"), @r#"
success: true
exit_code: 0
----- stdout -----
1 file reformatted
----- stderr -----
warning: The following rule may cause conflicts when used with the formatter: `COM812`. To avoid unexpected behavior, we recommend disabling this rule, either by removing it from the `lint.select` or `lint.extend-select` configuration, or adding it to the `lint.ignore` configuration.
warning: The `format.indent-style="tab"` option is incompatible with `W191`, which lints against all uses of tabs. We recommend disabling these rules when using the formatter, which enforces a consistent indentation style. Alternatively, set the `format.indent-style` option to `"space"`.
warning: The `lint.flake8-implicit-str-concat.allow-multiline = false` option is incompatible with the formatter unless `ISC001` is enabled. We recommend enabling `ISC001` or setting `allow-multiline=true`.
warning: The `format.indent-style="tab"` option is incompatible with `D206`, with requires space-based indentation. We recommend disabling these rules when using the formatter, which enforces a consistent indentation style. Alternatively, set the `format.indent-style` option to `"space"`.
warning: The `flake8-quotes.inline-quotes="single"` option is incompatible with the formatter's `format.quote-style="double"`. We recommend disabling `Q000` and `Q003` when using the formatter, which enforces a consistent quote style. Alternatively, set both options to either `"single"` or `"double"`.
warning: The `flake8-quotes.multiline-quotes="single"` option is incompatible with the formatter. We recommend disabling `Q001` when using the formatter, which enforces double quotes for multiline strings. Alternatively, set the `flake8-quotes.multiline-quotes` option to `"double"`.`
warning: The `flake8-quotes.docstring-quotes="single"` option is incompatible with the formatter. We recommend disabling `Q002` when using the formatter, which enforces double quotes for docstrings. Alternatively, set the `flake8-quotes.docstring-quotes` option to `"double"`.`
warning: The isort option `isort.lines-after-imports` with a value other than `-1`, `1` or `2` is incompatible with the formatter. To avoid unexpected behavior, we recommend setting the option to one of: `2`, `1`, or `-1` (default).
warning: The isort option `isort.lines-between-types` with a value greater than 1 is incompatible with the formatter. To avoid unexpected behavior, we recommend setting the option to one of: `1` or `0` (default).
warning: The isort option `isort.force-wrap-aliases` is incompatible with the formatter `format.skip-magic-trailing-comma=true` option. To avoid unexpected behavior, we recommend either setting `isort.force-wrap-aliases=false` or `format.skip-magic-trailing-comma=false`.
warning: The isort option `isort.split-on-trailing-comma` is incompatible with the formatter `format.skip-magic-trailing-comma=true` option. To avoid unexpected behavior, we recommend either setting `isort.split-on-trailing-comma=false` or `format.skip-magic-trailing-comma=false`.
"#);
Ok(())
}
#[test]
fn conflicting_options_stdin() -> Result<()> {
let test = CliTest::with_file(
"ruff.toml",
r#"
indent-width = 2
[lint]
select = ["ALL"]
ignore = ["D203", "D212"]
[lint.isort]
lines-after-imports = 3
lines-between-types = 2
force-wrap-aliases = true
combine-as-imports = true
split-on-trailing-comma = true
[lint.flake8-quotes]
inline-quotes = "single"
docstring-quotes = "single"
multiline-quotes = "single"
[format]
skip-magic-trailing-comma = true
indent-style = "tab"
"#,
)?;
assert_cmd_snapshot!(test.format_command()
.arg("--config")
.arg("ruff.toml")
.arg("-")
.pass_stdin(r#"
def say_hy(name: str):
print(f"Hy {name}")"#), @r#"
success: true
exit_code: 0
----- stdout -----
def say_hy(name: str):
print(f"Hy {name}")
----- stderr -----
warning: The following rule may cause conflicts when used with the formatter: `COM812`. To avoid unexpected behavior, we recommend disabling this rule, either by removing it from the `lint.select` or `lint.extend-select` configuration, or adding it to the `lint.ignore` configuration.
warning: The `format.indent-style="tab"` option is incompatible with `W191`, which lints against all uses of tabs. We recommend disabling these rules when using the formatter, which enforces a consistent indentation style. Alternatively, set the `format.indent-style` option to `"space"`.
warning: The `format.indent-style="tab"` option is incompatible with `D206`, with requires space-based indentation. We recommend disabling these rules when using the formatter, which enforces a consistent indentation style. Alternatively, set the `format.indent-style` option to `"space"`.
warning: The `flake8-quotes.inline-quotes="single"` option is incompatible with the formatter's `format.quote-style="double"`. We recommend disabling `Q000` and `Q003` when using the formatter, which enforces a consistent quote style. Alternatively, set both options to either `"single"` or `"double"`.
warning: The `flake8-quotes.multiline-quotes="single"` option is incompatible with the formatter. We recommend disabling `Q001` when using the formatter, which enforces double quotes for multiline strings. Alternatively, set the `flake8-quotes.multiline-quotes` option to `"double"`.`
warning: The `flake8-quotes.docstring-quotes="single"` option is incompatible with the formatter. We recommend disabling `Q002` when using the formatter, which enforces double quotes for docstrings. Alternatively, set the `flake8-quotes.docstring-quotes` option to `"double"`.`
warning: The isort option `isort.lines-after-imports` with a value other than `-1`, `1` or `2` is incompatible with the formatter. To avoid unexpected behavior, we recommend setting the option to one of: `2`, `1`, or `-1` (default).
warning: The isort option `isort.lines-between-types` with a value greater than 1 is incompatible with the formatter. To avoid unexpected behavior, we recommend setting the option to one of: `1` or `0` (default).
warning: The isort option `isort.force-wrap-aliases` is incompatible with the formatter `format.skip-magic-trailing-comma=true` option. To avoid unexpected behavior, we recommend either setting `isort.force-wrap-aliases=false` or `format.skip-magic-trailing-comma=false`.
warning: The isort option `isort.split-on-trailing-comma` is incompatible with the formatter `format.skip-magic-trailing-comma=true` option. To avoid unexpected behavior, we recommend either setting `isort.split-on-trailing-comma=false` or `format.skip-magic-trailing-comma=false`.
"#);
Ok(())
}
#[test]
fn valid_linter_options() -> Result<()> {
let test = CliTest::with_files([
(
"ruff.toml",
r#"
[lint]
select = ["ALL"]
ignore = ["D203", "D212", "COM812", "ISC001"]
[lint.isort]
lines-after-imports = 2
lines-between-types = 1
force-wrap-aliases = true
combine-as-imports = true
split-on-trailing-comma = true
[lint.flake8-quotes]
inline-quotes = "single"
docstring-quotes = "double"
multiline-quotes = "double"
[format]
skip-magic-trailing-comma = false
quote-style = "single"
"#,
),
(
"test.py",
r#"
def say_hy(name: str):
print(f"Hy {name}")"#,
),
])?;
assert_cmd_snapshot!(test.format_command()
.arg("--config")
.arg("ruff.toml")
.arg("test.py"), @r"
success: true
exit_code: 0
----- stdout -----
1 file reformatted
----- stderr -----
");
Ok(())
}
#[test]
fn valid_linter_options_preserve() -> Result<()> {
let test = CliTest::with_files([
(
"ruff.toml",
r#"
[lint]
select = ["Q"]
[lint.flake8-quotes]
inline-quotes = "single"
docstring-quotes = "single"
multiline-quotes = "single"
[format]
quote-style = "preserve"
"#,
),
(
"test.py",
r#"
def say_hy(name: str):
print(f"Hy {name}")"#,
),
])?;
assert_cmd_snapshot!(test.format_command()
.arg("--config")
.arg("ruff.toml")
.arg("test.py"), @r"
success: true
exit_code: 0
----- stdout -----
1 file reformatted
----- stderr -----
");
Ok(())
}
#[test]
fn all_rules_default_options() -> Result<()> {
let test = CliTest::with_files([
(
"ruff.toml",
r#"
[lint]
select = ["ALL"]
"#,
),
(
"test.py",
r#"
def say_hy(name: str):
print(f"Hy {name}")"#,
),
])?;
assert_cmd_snapshot!(test.format_command()
.arg("--config")
.arg("ruff.toml")
.arg("test.py"), @r"
success: true
exit_code: 0
----- stdout -----
1 file reformatted
----- stderr -----
warning: `incorrect-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `incorrect-blank-line-before-class`.
warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line`.
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff/tests/cli/main.rs | crates/ruff/tests/cli/main.rs | //! Test fixture utilities for ruff CLI tests
//!
//! The core concept is borrowed from ty/tests/cli/main.rs and can be extended
//! with more functionality from there in the future if needed.
#![cfg(not(target_family = "wasm"))]
use anyhow::{Context as _, Result};
use insta::internals::SettingsBindDropGuard;
use insta_cmd::get_cargo_bin;
use std::{
fs,
path::{Path, PathBuf},
process::Command,
};
use tempfile::TempDir;
mod analyze_graph;
mod format;
mod lint;
const BIN_NAME: &str = "ruff";
/// Creates a regex filter for replacing temporary directory paths in snapshots
pub(crate) fn tempdir_filter(path: impl AsRef<str>) -> String {
format!(r"{}[\\/]?", regex::escape(path.as_ref()))
}
/// A test fixture for running ruff CLI tests with temporary directories and files.
///
/// This fixture provides:
/// - Temporary directory management
/// - File creation utilities
/// - Proper snapshot filtering for cross-platform compatibility
/// - Pre-configured ruff command creation
///
/// # Example
///
/// ```rust,no_run
/// use crate::common::RuffTestFixture;
///
/// let fixture = RuffTestFixture::with_file("ruff.toml", "select = ['E']")?;
/// let output = fixture.command().args(["check", "."]).output()?;
/// ```
pub(crate) struct CliTest {
_temp_dir: TempDir,
_settings_scope: SettingsBindDropGuard,
project_dir: PathBuf,
}
impl CliTest {
/// Creates a new test fixture with an empty temporary directory.
///
/// This sets up:
/// - A temporary directory that's automatically cleaned up
/// - Insta snapshot filters for cross-platform path compatibility
/// - Environment isolation for consistent test behavior
pub(crate) fn new() -> Result<Self> {
Self::with_settings(|_, settings| settings)
}
pub(crate) fn with_files<'a>(
files: impl IntoIterator<Item = (&'a str, &'a str)>,
) -> anyhow::Result<Self> {
let case = Self::new()?;
case.write_files(files)?;
Ok(case)
}
pub(crate) fn with_settings(
setup_settings: impl FnOnce(&Path, insta::Settings) -> insta::Settings,
) -> Result<Self> {
let temp_dir = TempDir::new()?;
// Canonicalize the tempdir path because macOS uses symlinks for tempdirs
// and that doesn't play well with our snapshot filtering.
// Simplify with dunce because otherwise we get UNC paths on Windows.
let project_dir = dunce::simplified(
&temp_dir
.path()
.canonicalize()
.context("Failed to canonicalize project path")?,
)
.to_path_buf();
let mut settings = setup_settings(&project_dir, insta::Settings::clone_current());
settings.add_filter(&tempdir_filter(project_dir.to_str().unwrap()), "[TMP]/");
settings.add_filter(r#"\\([\w&&[^nr"]]\w|\s|\.)"#, "/$1");
settings.add_filter(r"(Panicked at) [^:]+:\d+:\d+", "$1 <location>");
settings.add_filter(ruff_linter::VERSION, "[VERSION]");
settings.add_filter(
r#"The system cannot find the file specified."#,
"No such file or directory",
);
let settings_scope = settings.bind_to_scope();
Ok(Self {
project_dir,
_temp_dir: temp_dir,
_settings_scope: settings_scope,
})
}
/// Creates a test fixture with a single file.
///
/// # Arguments
///
/// * `path` - The relative path for the file
/// * `content` - The content to write to the file
///
/// # Example
///
/// ```rust,no_run
/// let fixture = RuffTestFixture::with_file("ruff.toml", "select = ['E']")?;
/// ```
pub(crate) fn with_file(path: impl AsRef<Path>, content: &str) -> Result<Self> {
let fixture = Self::new()?;
fixture.write_file(path, content)?;
Ok(fixture)
}
/// Ensures that the parent directory of a path exists.
fn ensure_parent_directory(path: &Path) -> Result<()> {
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)
.with_context(|| format!("Failed to create directory `{}`", parent.display()))?;
}
Ok(())
}
/// Writes a file to the test directory.
///
/// Parent directories are created automatically if they don't exist.
/// Content is dedented to remove common leading whitespace for cleaner test code.
///
/// # Arguments
///
/// * `path` - The relative path for the file
/// * `content` - The content to write to the file
pub(crate) fn write_file(&self, path: impl AsRef<Path>, content: &str) -> Result<()> {
let path = path.as_ref();
let file_path = self.project_dir.join(path);
Self::ensure_parent_directory(&file_path)?;
let content = ruff_python_trivia::textwrap::dedent(content);
fs::write(&file_path, content.as_ref())
.with_context(|| format!("Failed to write file `{}`", file_path.display()))?;
Ok(())
}
pub(crate) fn write_files<'a>(
&self,
files: impl IntoIterator<Item = (&'a str, &'a str)>,
) -> Result<()> {
for file in files {
self.write_file(file.0, file.1)?;
}
Ok(())
}
/// Returns the path to the test directory root.
pub(crate) fn root(&self) -> &Path {
&self.project_dir
}
/// Creates a pre-configured ruff command for testing.
///
/// The command is set up with:
/// - The correct ruff binary path
/// - Working directory set to the test directory
/// - Clean environment variables for consistent behavior
///
/// You can chain additional arguments and options as needed.
///
/// # Example
///
/// ```rust,no_run
/// let output = fixture
/// .command()
/// .args(["check", "--select", "E"])
/// .arg(".")
/// .output()?;
/// ```
pub(crate) fn command(&self) -> Command {
let mut command = Command::new(get_cargo_bin(BIN_NAME));
command.current_dir(&self.project_dir);
// Unset all environment variables because they can affect test behavior.
command.env_clear();
command
}
pub(crate) fn format_command(&self) -> Command {
let mut command = self.command();
command.args(["format", "--no-cache"]);
command
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/lexer.rs | crates/ruff_python_parser/src/lexer.rs | //! This module takes care of lexing Python source text.
//!
//! This means source code is scanned and translated into separate tokens. The rules
//! governing what is and is not a valid token are defined in the Python reference
//! guide section on [Lexical analysis].
//!
//! [Lexical analysis]: https://docs.python.org/3/reference/lexical_analysis.html
use std::cmp::Ordering;
use std::str::FromStr;
use unicode_ident::{is_xid_continue, is_xid_start};
use unicode_normalization::UnicodeNormalization;
use ruff_python_ast::name::Name;
use ruff_python_ast::str_prefix::{AnyStringPrefix, StringLiteralPrefix};
use ruff_python_ast::token::{TokenFlags, TokenKind};
use ruff_python_ast::{Int, IpyEscapeKind, StringFlags};
use ruff_python_trivia::is_python_whitespace;
use ruff_text_size::{TextLen, TextRange, TextSize};
use crate::Mode;
use crate::error::{InterpolatedStringErrorType, LexicalError, LexicalErrorType};
use crate::lexer::cursor::{Cursor, EOF_CHAR};
use crate::lexer::indentation::{Indentation, Indentations, IndentationsCheckpoint};
use crate::lexer::interpolated_string::{
InterpolatedStringContext, InterpolatedStrings, InterpolatedStringsCheckpoint,
};
use crate::string::InterpolatedStringKind;
use crate::token::TokenValue;
mod cursor;
mod indentation;
mod interpolated_string;
const BOM: char = '\u{feff}';
/// A lexer for Python source code.
#[derive(Debug)]
pub struct Lexer<'src> {
/// Source code to be lexed.
source: &'src str,
/// A pointer to the current character of the source code which is being lexed.
cursor: Cursor<'src>,
/// The kind of the current token.
current_kind: TokenKind,
/// The range of the current token.
current_range: TextRange,
/// The value of the current token.
current_value: TokenValue,
/// Flags for the current token.
current_flags: TokenFlags,
/// Lexer state.
state: State,
/// Represents the current level of nesting in the lexer, indicating the depth of parentheses.
/// The lexer is within a parenthesized context if the value is greater than 0.
nesting: u32,
/// A stack of indentation representing the current indentation level.
indentations: Indentations,
pending_indentation: Option<Indentation>,
/// Lexer mode.
mode: Mode,
/// F-string and t-string contexts.
interpolated_strings: InterpolatedStrings,
/// Errors encountered while lexing.
errors: Vec<LexicalError>,
}
impl<'src> Lexer<'src> {
/// Create a new lexer for the given input source which starts at the given offset.
///
/// If the start offset is greater than 0, the cursor is moved ahead that many bytes.
/// This means that the input source should be the complete source code and not the
/// sliced version.
pub(crate) fn new(source: &'src str, mode: Mode, start_offset: TextSize) -> Self {
assert!(
u32::try_from(source.len()).is_ok(),
"Lexer only supports files with a size up to 4GB"
);
let (state, nesting) = if mode == Mode::ParenthesizedExpression {
(State::Other, 1)
} else {
(State::AfterNewline, 0)
};
let mut lexer = Lexer {
source,
cursor: Cursor::new(source),
state,
current_kind: TokenKind::EndOfFile,
current_range: TextRange::empty(start_offset),
current_value: TokenValue::None,
current_flags: TokenFlags::empty(),
nesting,
indentations: Indentations::default(),
pending_indentation: None,
mode,
interpolated_strings: InterpolatedStrings::default(),
errors: Vec::new(),
};
if start_offset == TextSize::new(0) {
// TODO: Handle possible mismatch between BOM and explicit encoding declaration.
lexer.cursor.eat_char(BOM);
} else {
lexer.cursor.skip_bytes(start_offset.to_usize());
}
lexer
}
/// Returns the kind of the current token.
pub(crate) fn current_kind(&self) -> TokenKind {
self.current_kind
}
/// Returns the range of the current token.
pub(crate) fn current_range(&self) -> TextRange {
self.current_range
}
/// Returns the flags for the current token.
pub(crate) fn current_flags(&self) -> TokenFlags {
self.current_flags
}
/// Takes the token value corresponding to the current token out of the lexer, replacing it
/// with the default value.
///
/// All the subsequent call to this method without moving the lexer would always return the
/// default value which is [`TokenValue::None`].
pub(crate) fn take_value(&mut self) -> TokenValue {
std::mem::take(&mut self.current_value)
}
/// Helper function to push the given error, updating the current range with the error location
/// and return the [`TokenKind::Unknown`] token.
fn push_error(&mut self, error: LexicalError) -> TokenKind {
self.current_range = error.location();
self.errors.push(error);
TokenKind::Unknown
}
/// Lex the next token.
pub fn next_token(&mut self) -> TokenKind {
self.cursor.start_token();
self.current_value = TokenValue::None;
self.current_flags = TokenFlags::empty();
self.current_kind = self.lex_token();
// For `Unknown` token, the `push_error` method updates the current range.
if !matches!(self.current_kind, TokenKind::Unknown) {
self.current_range = self.token_range();
}
self.current_kind
}
fn lex_token(&mut self) -> TokenKind {
if let Some(interpolated_string) = self.interpolated_strings.current() {
if !interpolated_string.is_in_interpolation(self.nesting) {
if let Some(token) = self.lex_interpolated_string_middle_or_end() {
if token.is_interpolated_string_end() {
self.interpolated_strings.pop();
}
return token;
}
}
}
// Return dedent tokens until the current indentation level matches the indentation of the next token.
else if let Some(indentation) = self.pending_indentation.take() {
match self.indentations.current().try_compare(indentation) {
Ok(Ordering::Greater) => {
self.pending_indentation = Some(indentation);
if self.indentations.dedent_one(indentation).is_err() {
return self.push_error(LexicalError::new(
LexicalErrorType::IndentationError,
self.token_range(),
));
}
return TokenKind::Dedent;
}
Ok(_) => {}
Err(_) => {
return self.push_error(LexicalError::new(
LexicalErrorType::IndentationError,
self.token_range(),
));
}
}
}
if self.state.is_after_newline() {
if let Some(indentation) = self.eat_indentation() {
return indentation;
}
} else {
if let Err(error) = self.skip_whitespace() {
return self.push_error(error);
}
}
// The lexer might've skipped whitespaces, so update the start offset
self.cursor.start_token();
if let Some(c) = self.cursor.bump() {
if c.is_ascii() {
self.consume_ascii_character(c)
} else if is_unicode_identifier_start(c) {
let identifier = self.lex_identifier(c);
self.state = State::Other;
identifier
} else {
self.push_error(LexicalError::new(
LexicalErrorType::UnrecognizedToken { tok: c },
self.token_range(),
))
}
} else {
// Reached the end of the file. Emit a trailing newline token if not at the beginning of a logical line,
// empty the dedent stack, and finally, return the EndOfFile token.
self.consume_end()
}
}
fn eat_indentation(&mut self) -> Option<TokenKind> {
let mut indentation = Indentation::root();
loop {
match self.cursor.first() {
' ' => {
self.cursor.bump();
indentation = indentation.add_space();
}
'\t' => {
self.cursor.bump();
indentation = indentation.add_tab();
}
'\\' => {
self.cursor.bump();
if self.cursor.eat_char('\r') {
self.cursor.eat_char('\n');
} else if !self.cursor.eat_char('\n') {
return Some(self.push_error(LexicalError::new(
LexicalErrorType::LineContinuationError,
TextRange::at(self.offset() - '\\'.text_len(), '\\'.text_len()),
)));
}
if self.cursor.is_eof() {
return Some(self.push_error(LexicalError::new(
LexicalErrorType::Eof,
self.token_range(),
)));
}
indentation = Indentation::root();
}
// Form feed
'\x0C' => {
self.cursor.bump();
indentation = Indentation::root();
}
_ => break,
}
}
// Handle indentation if this is a new, not all empty, logical line
if !matches!(self.cursor.first(), '\n' | '\r' | '#' | EOF_CHAR) {
self.state = State::NonEmptyLogicalLine;
// Set to false so that we don't handle indentation on the next call.
return self.handle_indentation(indentation);
}
None
}
fn handle_indentation(&mut self, indentation: Indentation) -> Option<TokenKind> {
match self.indentations.current().try_compare(indentation) {
// Dedent
Ok(Ordering::Greater) => {
self.pending_indentation = Some(indentation);
if self.indentations.dedent_one(indentation).is_err() {
return Some(self.push_error(LexicalError::new(
LexicalErrorType::IndentationError,
self.token_range(),
)));
}
// The lexer might've eaten some whitespaces to calculate the `indentation`. For
// example:
//
// ```py
// if first:
// if second:
// pass
// foo
// # ^
// ```
//
// Here, the cursor is at `^` and the `indentation` contains the whitespaces before
// the `pass` token.
self.cursor.start_token();
Some(TokenKind::Dedent)
}
Ok(Ordering::Equal) => None,
// Indent
Ok(Ordering::Less) => {
self.indentations.indent(indentation);
Some(TokenKind::Indent)
}
Err(_) => Some(self.push_error(LexicalError::new(
LexicalErrorType::IndentationError,
self.token_range(),
))),
}
}
fn skip_whitespace(&mut self) -> Result<(), LexicalError> {
loop {
match self.cursor.first() {
' ' => {
self.cursor.bump();
}
'\t' => {
self.cursor.bump();
}
'\\' => {
self.cursor.bump();
if self.cursor.eat_char('\r') {
self.cursor.eat_char('\n');
} else if !self.cursor.eat_char('\n') {
return Err(LexicalError::new(
LexicalErrorType::LineContinuationError,
TextRange::at(self.offset() - '\\'.text_len(), '\\'.text_len()),
));
}
if self.cursor.is_eof() {
return Err(LexicalError::new(LexicalErrorType::Eof, self.token_range()));
}
}
// Form feed
'\x0C' => {
self.cursor.bump();
}
_ => break,
}
}
Ok(())
}
// Dispatch based on the given character.
fn consume_ascii_character(&mut self, c: char) -> TokenKind {
let token = match c {
c if is_ascii_identifier_start(c) => self.lex_identifier(c),
'0'..='9' => self.lex_number(c),
'#' => return self.lex_comment(),
'\'' | '"' => self.lex_string(c),
'=' => {
if self.cursor.eat_char('=') {
TokenKind::EqEqual
} else {
self.state = State::AfterEqual;
return TokenKind::Equal;
}
}
'+' => {
if self.cursor.eat_char('=') {
TokenKind::PlusEqual
} else {
TokenKind::Plus
}
}
'*' => {
if self.cursor.eat_char('=') {
TokenKind::StarEqual
} else if self.cursor.eat_char('*') {
if self.cursor.eat_char('=') {
TokenKind::DoubleStarEqual
} else {
TokenKind::DoubleStar
}
} else {
TokenKind::Star
}
}
c @ ('%' | '!')
if self.mode == Mode::Ipython
&& self.state.is_after_equal()
&& self.nesting == 0 =>
{
// SAFETY: Safe because `c` has been matched against one of the possible escape command token
self.lex_ipython_escape_command(IpyEscapeKind::try_from(c).unwrap())
}
c @ ('%' | '!' | '?' | '/' | ';' | ',')
if self.mode == Mode::Ipython && self.state.is_new_logical_line() =>
{
let kind = if let Ok(kind) = IpyEscapeKind::try_from([c, self.cursor.first()]) {
self.cursor.bump();
kind
} else {
// SAFETY: Safe because `c` has been matched against one of the possible escape command token
IpyEscapeKind::try_from(c).unwrap()
};
self.lex_ipython_escape_command(kind)
}
'?' if self.mode == Mode::Ipython => TokenKind::Question,
'/' => {
if self.cursor.eat_char('=') {
TokenKind::SlashEqual
} else if self.cursor.eat_char('/') {
if self.cursor.eat_char('=') {
TokenKind::DoubleSlashEqual
} else {
TokenKind::DoubleSlash
}
} else {
TokenKind::Slash
}
}
'%' => {
if self.cursor.eat_char('=') {
TokenKind::PercentEqual
} else {
TokenKind::Percent
}
}
'|' => {
if self.cursor.eat_char('=') {
TokenKind::VbarEqual
} else {
TokenKind::Vbar
}
}
'^' => {
if self.cursor.eat_char('=') {
TokenKind::CircumflexEqual
} else {
TokenKind::CircumFlex
}
}
'&' => {
if self.cursor.eat_char('=') {
TokenKind::AmperEqual
} else {
TokenKind::Amper
}
}
'-' => {
if self.cursor.eat_char('=') {
TokenKind::MinusEqual
} else if self.cursor.eat_char('>') {
TokenKind::Rarrow
} else {
TokenKind::Minus
}
}
'@' => {
if self.cursor.eat_char('=') {
TokenKind::AtEqual
} else {
TokenKind::At
}
}
'!' => {
if self.cursor.eat_char('=') {
TokenKind::NotEqual
} else {
TokenKind::Exclamation
}
}
'~' => TokenKind::Tilde,
'(' => {
self.nesting += 1;
TokenKind::Lpar
}
')' => {
self.nesting = self.nesting.saturating_sub(1);
TokenKind::Rpar
}
'[' => {
self.nesting += 1;
TokenKind::Lsqb
}
']' => {
self.nesting = self.nesting.saturating_sub(1);
TokenKind::Rsqb
}
'{' => {
self.nesting += 1;
TokenKind::Lbrace
}
'}' => {
if let Some(interpolated_string) = self.interpolated_strings.current_mut() {
if interpolated_string.nesting() == self.nesting {
let error_type = LexicalErrorType::from_interpolated_string_error(
InterpolatedStringErrorType::SingleRbrace,
interpolated_string.kind(),
);
return self.push_error(LexicalError::new(error_type, self.token_range()));
}
interpolated_string.try_end_format_spec(self.nesting);
}
self.nesting = self.nesting.saturating_sub(1);
TokenKind::Rbrace
}
':' => {
if self
.interpolated_strings
.current_mut()
.is_some_and(|interpolated_string| {
interpolated_string.try_start_format_spec(self.nesting)
})
{
TokenKind::Colon
} else if self.cursor.eat_char('=') {
TokenKind::ColonEqual
} else {
TokenKind::Colon
}
}
';' => TokenKind::Semi,
'<' => {
if self.cursor.eat_char('<') {
if self.cursor.eat_char('=') {
TokenKind::LeftShiftEqual
} else {
TokenKind::LeftShift
}
} else if self.cursor.eat_char('=') {
TokenKind::LessEqual
} else {
TokenKind::Less
}
}
'>' => {
if self.cursor.eat_char('>') {
if self.cursor.eat_char('=') {
TokenKind::RightShiftEqual
} else {
TokenKind::RightShift
}
} else if self.cursor.eat_char('=') {
TokenKind::GreaterEqual
} else {
TokenKind::Greater
}
}
',' => TokenKind::Comma,
'.' => {
if self.cursor.first().is_ascii_digit() {
self.lex_decimal_number('.')
} else if self.cursor.eat_char2('.', '.') {
TokenKind::Ellipsis
} else {
TokenKind::Dot
}
}
'\n' => {
return if self.nesting == 0 && !self.state.is_new_logical_line() {
self.state = State::AfterNewline;
TokenKind::Newline
} else {
if let Some(interpolated_string) = self.interpolated_strings.current_mut() {
interpolated_string.try_end_format_spec(self.nesting);
}
TokenKind::NonLogicalNewline
};
}
'\r' => {
self.cursor.eat_char('\n');
return if self.nesting == 0 && !self.state.is_new_logical_line() {
self.state = State::AfterNewline;
TokenKind::Newline
} else {
if let Some(interpolated_string) = self.interpolated_strings.current_mut() {
interpolated_string.try_end_format_spec(self.nesting);
}
TokenKind::NonLogicalNewline
};
}
_ => {
self.state = State::Other;
return self.push_error(LexicalError::new(
LexicalErrorType::UnrecognizedToken { tok: c },
self.token_range(),
));
}
};
self.state = State::Other;
token
}
/// Lex an identifier. Also used for keywords and string/bytes literals with a prefix.
fn lex_identifier(&mut self, first: char) -> TokenKind {
// Detect potential string like rb'' b'' f'' t'' u'' r''
let quote = match (first, self.cursor.first()) {
(_, quote @ ('\'' | '"')) => self.try_single_char_prefix(first).then(|| {
self.cursor.bump();
quote
}),
(_, second) if is_quote(self.cursor.second()) => {
self.try_double_char_prefix([first, second]).then(|| {
self.cursor.bump();
// SAFETY: Safe because of the `is_quote` check in this match arm's guard
self.cursor.bump().unwrap()
})
}
_ => None,
};
if let Some(quote) = quote {
if self.current_flags.is_interpolated_string() {
if let Some(kind) = self.lex_interpolated_string_start(quote) {
return kind;
}
}
return self.lex_string(quote);
}
// Keep track of whether the identifier is ASCII-only or not.
//
// This is important because Python applies NFKC normalization to
// identifiers: https://docs.python.org/3/reference/lexical_analysis.html#identifiers.
// We need to therefore do the same in our lexer, but applying NFKC normalization
// unconditionally is extremely expensive. If we know an identifier is ASCII-only,
// (by far the most common case), we can skip NFKC normalization of the identifier.
let mut is_ascii = first.is_ascii();
self.cursor
.eat_while(|c| is_identifier_continuation(c, &mut is_ascii));
let text = self.token_text();
if !is_ascii {
self.current_value = TokenValue::Name(text.nfkc().collect::<Name>());
return TokenKind::Name;
}
// Short circuit for names that are longer than any known keyword.
// It helps Rust to predict that the Name::new call in the keyword match's default branch
// is guaranteed to fit into a stack allocated (inline) Name.
if text.len() > 8 {
self.current_value = TokenValue::Name(Name::new(text));
return TokenKind::Name;
}
match text {
"False" => TokenKind::False,
"None" => TokenKind::None,
"True" => TokenKind::True,
"and" => TokenKind::And,
"as" => TokenKind::As,
"assert" => TokenKind::Assert,
"async" => TokenKind::Async,
"await" => TokenKind::Await,
"break" => TokenKind::Break,
"case" => TokenKind::Case,
"class" => TokenKind::Class,
"continue" => TokenKind::Continue,
"def" => TokenKind::Def,
"del" => TokenKind::Del,
"elif" => TokenKind::Elif,
"else" => TokenKind::Else,
"except" => TokenKind::Except,
"finally" => TokenKind::Finally,
"for" => TokenKind::For,
"from" => TokenKind::From,
"global" => TokenKind::Global,
"if" => TokenKind::If,
"import" => TokenKind::Import,
"in" => TokenKind::In,
"is" => TokenKind::Is,
"lambda" => TokenKind::Lambda,
"match" => TokenKind::Match,
"nonlocal" => TokenKind::Nonlocal,
"not" => TokenKind::Not,
"or" => TokenKind::Or,
"pass" => TokenKind::Pass,
"raise" => TokenKind::Raise,
"return" => TokenKind::Return,
"try" => TokenKind::Try,
"type" => TokenKind::Type,
"while" => TokenKind::While,
"with" => TokenKind::With,
"yield" => TokenKind::Yield,
_ => {
self.current_value = TokenValue::Name(Name::new(text));
TokenKind::Name
}
}
}
/// Try lexing the single character string prefix, updating the token flags accordingly.
/// Returns `true` if it matches.
fn try_single_char_prefix(&mut self, first: char) -> bool {
match first {
'f' | 'F' => self.current_flags |= TokenFlags::F_STRING,
't' | 'T' => self.current_flags |= TokenFlags::T_STRING,
'u' | 'U' => self.current_flags |= TokenFlags::UNICODE_STRING,
'b' | 'B' => self.current_flags |= TokenFlags::BYTE_STRING,
'r' => self.current_flags |= TokenFlags::RAW_STRING_LOWERCASE,
'R' => self.current_flags |= TokenFlags::RAW_STRING_UPPERCASE,
_ => return false,
}
true
}
/// Try lexing the double character string prefix, updating the token flags accordingly.
/// Returns `true` if it matches.
fn try_double_char_prefix(&mut self, value: [char; 2]) -> bool {
match value {
['r', 'f' | 'F'] | ['f' | 'F', 'r'] => {
self.current_flags |= TokenFlags::F_STRING | TokenFlags::RAW_STRING_LOWERCASE;
}
['R', 'f' | 'F'] | ['f' | 'F', 'R'] => {
self.current_flags |= TokenFlags::F_STRING | TokenFlags::RAW_STRING_UPPERCASE;
}
['r', 't' | 'T'] | ['t' | 'T', 'r'] => {
self.current_flags |= TokenFlags::T_STRING | TokenFlags::RAW_STRING_LOWERCASE;
}
['R', 't' | 'T'] | ['t' | 'T', 'R'] => {
self.current_flags |= TokenFlags::T_STRING | TokenFlags::RAW_STRING_UPPERCASE;
}
['r', 'b' | 'B'] | ['b' | 'B', 'r'] => {
self.current_flags |= TokenFlags::BYTE_STRING | TokenFlags::RAW_STRING_LOWERCASE;
}
['R', 'b' | 'B'] | ['b' | 'B', 'R'] => {
self.current_flags |= TokenFlags::BYTE_STRING | TokenFlags::RAW_STRING_UPPERCASE;
}
_ => return false,
}
true
}
/// Lex a f-string or t-string start token if positioned at the start of an f-string or t-string.
fn lex_interpolated_string_start(&mut self, quote: char) -> Option<TokenKind> {
#[cfg(debug_assertions)]
debug_assert_eq!(self.cursor.previous(), quote);
if quote == '"' {
self.current_flags |= TokenFlags::DOUBLE_QUOTES;
}
if self.cursor.eat_char2(quote, quote) {
self.current_flags |= TokenFlags::TRIPLE_QUOTED_STRING;
}
let ftcontext = InterpolatedStringContext::new(self.current_flags, self.nesting)?;
let kind = ftcontext.kind();
self.interpolated_strings.push(ftcontext);
Some(kind.start_token())
}
/// Lex an f-string or t-string middle or end token.
fn lex_interpolated_string_middle_or_end(&mut self) -> Option<TokenKind> {
// SAFETY: Safe because the function is only called when `self.fstrings` is not empty.
let interpolated_string = self.interpolated_strings.current().unwrap();
let string_kind = interpolated_string.kind();
let interpolated_flags = interpolated_string.flags();
// Check if we're at the end of the f-string.
if interpolated_string.is_triple_quoted() {
let quote_char = interpolated_string.quote_char();
if self.cursor.eat_char3(quote_char, quote_char, quote_char) {
self.current_flags = interpolated_string.flags();
return Some(string_kind.end_token());
}
} else if self.cursor.eat_char(interpolated_string.quote_char()) {
self.current_flags = interpolated_string.flags();
return Some(string_kind.end_token());
}
// We have to decode `{{` and `}}` into `{` and `}` respectively. As an
// optimization, we only allocate a new string we find any escaped curly braces,
// otherwise this string will remain empty and we'll use a source slice instead.
let mut normalized = String::new();
// Tracks the last offset of token value that has been written to `normalized`.
let mut last_offset = self.offset();
// This isn't going to change for the duration of the loop.
let in_format_spec = interpolated_string.is_in_format_spec(self.nesting);
let mut in_named_unicode = false;
loop {
match self.cursor.first() {
// The condition is to differentiate between the `NUL` (`\0`) character
// in the source code and the one returned by `self.cursor.first()` when
// we reach the end of the source code.
EOF_CHAR if self.cursor.is_eof() => {
let error = if interpolated_string.is_triple_quoted() {
InterpolatedStringErrorType::UnterminatedTripleQuotedString
} else {
InterpolatedStringErrorType::UnterminatedString
};
self.nesting = interpolated_string.nesting();
self.interpolated_strings.pop();
self.current_flags |= TokenFlags::UNCLOSED_STRING;
self.push_error(LexicalError::new(
LexicalErrorType::from_interpolated_string_error(error, string_kind),
self.token_range(),
));
break;
}
'\n' | '\r' if !interpolated_string.is_triple_quoted() => {
// https://github.com/astral-sh/ruff/issues/18632
let error_type = if in_format_spec {
InterpolatedStringErrorType::NewlineInFormatSpec
} else {
InterpolatedStringErrorType::UnterminatedString
};
self.nesting = interpolated_string.nesting();
self.interpolated_strings.pop();
self.current_flags |= TokenFlags::UNCLOSED_STRING;
self.push_error(LexicalError::new(
LexicalErrorType::from_interpolated_string_error(error_type, string_kind),
self.token_range(),
));
break;
}
'\\' => {
self.cursor.bump(); // '\'
if matches!(self.cursor.first(), '{' | '}') {
// Don't consume `{` or `}` as we want them to be emitted as tokens.
// They will be handled in the next iteration.
continue;
} else if !interpolated_string.is_raw_string() {
if self.cursor.eat_char2('N', '{') {
in_named_unicode = true;
continue;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/lib.rs | crates/ruff_python_parser/src/lib.rs | //! This crate can be used to parse Python source code into an Abstract
//! Syntax Tree.
//!
//! ## Overview
//!
//! The process by which source code is parsed into an AST can be broken down
//! into two general stages: [lexical analysis] and [parsing].
//!
//! During lexical analysis, the source code is converted into a stream of lexical
//! tokens that represent the smallest meaningful units of the language. For example,
//! the source code `print("Hello world")` would _roughly_ be converted into the following
//! stream of tokens:
//!
//! ```text
//! Name("print"), LeftParen, String("Hello world"), RightParen
//! ```
//!
//! These tokens are then consumed by the `ruff_python_parser`, which matches them against a set of
//! grammar rules to verify that the source code is syntactically valid and to construct
//! an AST that represents the source code.
//!
//! During parsing, the `ruff_python_parser` consumes the tokens generated by the lexer and constructs
//! a tree representation of the source code. The tree is made up of nodes that represent
//! the different syntactic constructs of the language. If the source code is syntactically
//! invalid, parsing fails and an error is returned. After a successful parse, the AST can
//! be used to perform further analysis on the source code. Continuing with the example
//! above, the AST generated by the `ruff_python_parser` would _roughly_ look something like this:
//!
//! ```text
//! node: Expr {
//! value: {
//! node: Call {
//! func: {
//! node: Name {
//! id: "print",
//! ctx: Load,
//! },
//! },
//! args: [
//! node: Constant {
//! value: Str("Hello World"),
//! kind: None,
//! },
//! ],
//! keywords: [],
//! },
//! },
//! },
//!```
//!
//! **Note:** The Tokens/ASTs shown above are not the exact tokens/ASTs generated by the `ruff_python_parser`.
//! Refer to the [playground](https://play.ruff.rs) for the correct representation.
//!
//! ## Source code layout
//!
//! The functionality of this crate is split into several modules:
//!
//! - token: This module contains the definition of the tokens that are generated by the lexer.
//! - [lexer]: This module contains the lexer and is responsible for generating the tokens.
//! - parser: This module contains an interface to the [Parsed] and is responsible for generating the AST.
//! - mode: This module contains the definition of the different modes that the `ruff_python_parser` can be in.
//!
//! [lexical analysis]: https://en.wikipedia.org/wiki/Lexical_analysis
//! [parsing]: https://en.wikipedia.org/wiki/Parsing
//! [lexer]: crate::lexer
pub use crate::error::{
InterpolatedStringErrorType, LexicalErrorType, ParseError, ParseErrorType,
UnsupportedSyntaxError, UnsupportedSyntaxErrorKind,
};
pub use crate::parser::ParseOptions;
use crate::parser::Parser;
use ruff_python_ast::token::Tokens;
use ruff_python_ast::{
Expr, Mod, ModExpression, ModModule, PySourceType, StringFlags, StringLiteral, Suite,
};
use ruff_text_size::{Ranged, TextRange};
mod error;
pub mod lexer;
mod parser;
pub mod semantic_errors;
mod string;
mod token;
mod token_set;
mod token_source;
pub mod typing;
/// Parse a full Python module usually consisting of multiple lines.
///
/// This is a convenience function that can be used to parse a full Python program without having to
/// specify the [`Mode`] or the location. It is probably what you want to use most of the time.
///
/// # Example
///
/// For example, parsing a simple function definition and a call to that function:
///
/// ```
/// use ruff_python_parser::parse_module;
///
/// let source = r#"
/// def foo():
/// return 42
///
/// print(foo())
/// "#;
///
/// let module = parse_module(source);
/// assert!(module.is_ok());
/// ```
pub fn parse_module(source: &str) -> Result<Parsed<ModModule>, ParseError> {
Parser::new(source, ParseOptions::from(Mode::Module))
.parse()
.try_into_module()
.unwrap()
.into_result()
}
/// Parses a single Python expression.
///
/// This convenience function can be used to parse a single expression without having to
/// specify the Mode or the location.
///
/// # Example
///
/// For example, parsing a single expression denoting the addition of two numbers:
///
/// ```
/// use ruff_python_parser::parse_expression;
///
/// let expr = parse_expression("1 + 2");
/// assert!(expr.is_ok());
/// ```
pub fn parse_expression(source: &str) -> Result<Parsed<ModExpression>, ParseError> {
Parser::new(source, ParseOptions::from(Mode::Expression))
.parse()
.try_into_expression()
.unwrap()
.into_result()
}
/// Parses a Python expression for the given range in the source.
///
/// This function allows to specify the range of the expression in the source code, other than
/// that, it behaves exactly like [`parse_expression`].
///
/// # Example
///
/// Parsing one of the numeric literal which is part of an addition expression:
///
/// ```
/// use ruff_python_parser::parse_expression_range;
/// # use ruff_text_size::{TextRange, TextSize};
///
/// let parsed = parse_expression_range("11 + 22 + 33", TextRange::new(TextSize::new(5), TextSize::new(7)));
/// assert!(parsed.is_ok());
/// ```
pub fn parse_expression_range(
source: &str,
range: TextRange,
) -> Result<Parsed<ModExpression>, ParseError> {
let source = &source[..range.end().to_usize()];
Parser::new_starts_at(source, range.start(), ParseOptions::from(Mode::Expression))
.parse()
.try_into_expression()
.unwrap()
.into_result()
}
/// Parses a Python expression as if it is parenthesized.
///
/// It behaves similarly to [`parse_expression_range`] but allows what would be valid within parenthesis
///
/// # Example
///
/// Parsing an expression that would be valid within parenthesis:
///
/// ```
/// use ruff_python_parser::parse_parenthesized_expression_range;
/// # use ruff_text_size::{TextRange, TextSize};
///
/// let parsed = parse_parenthesized_expression_range("'''\n int | str'''", TextRange::new(TextSize::new(3), TextSize::new(14)));
/// assert!(parsed.is_ok());
pub fn parse_parenthesized_expression_range(
source: &str,
range: TextRange,
) -> Result<Parsed<ModExpression>, ParseError> {
let source = &source[..range.end().to_usize()];
let parsed = Parser::new_starts_at(
source,
range.start(),
ParseOptions::from(Mode::ParenthesizedExpression),
)
.parse();
parsed.try_into_expression().unwrap().into_result()
}
/// Parses a Python expression from a string annotation.
///
/// # Example
///
/// Parsing a string annotation:
///
/// ```
/// use ruff_python_parser::parse_string_annotation;
/// use ruff_python_ast::{StringLiteral, StringLiteralFlags, AtomicNodeIndex};
/// use ruff_text_size::{TextRange, TextSize};
///
/// let string = StringLiteral {
/// value: "'''\n int | str'''".to_string().into_boxed_str(),
/// flags: StringLiteralFlags::empty(),
/// range: TextRange::new(TextSize::new(0), TextSize::new(16)),
/// node_index: AtomicNodeIndex::NONE
/// };
/// let parsed = parse_string_annotation("'''\n int | str'''", &string);
/// assert!(!parsed.is_ok());
/// ```
pub fn parse_string_annotation(
source: &str,
string: &StringLiteral,
) -> Result<Parsed<ModExpression>, ParseError> {
let range = string
.range()
.add_start(string.flags.opener_len())
.sub_end(string.flags.closer_len());
let source = &source[..range.end().to_usize()];
if string.flags.is_triple_quoted() {
parse_parenthesized_expression_range(source, range)
} else {
parse_expression_range(source, range)
}
}
/// Parse the given Python source code using the specified [`ParseOptions`].
///
/// This function is the most general function to parse Python code. Based on the [`Mode`] supplied
/// via the [`ParseOptions`], it can be used to parse a single expression, a full Python program,
/// an interactive expression or a Python program containing IPython escape commands.
///
/// # Example
///
/// If we want to parse a simple expression, we can use the [`Mode::Expression`] mode during
/// parsing:
///
/// ```
/// use ruff_python_parser::{parse, Mode, ParseOptions};
///
/// let parsed = parse("1 + 2", ParseOptions::from(Mode::Expression));
/// assert!(parsed.is_ok());
/// ```
///
/// Alternatively, we can parse a full Python program consisting of multiple lines:
///
/// ```
/// use ruff_python_parser::{parse, Mode, ParseOptions};
///
/// let source = r#"
/// class Greeter:
///
/// def greet(self):
/// print("Hello, world!")
/// "#;
/// let parsed = parse(source, ParseOptions::from(Mode::Module));
/// assert!(parsed.is_ok());
/// ```
///
/// Additionally, we can parse a Python program containing IPython escapes:
///
/// ```
/// use ruff_python_parser::{parse, Mode, ParseOptions};
///
/// let source = r#"
/// %timeit 1 + 2
/// ?str.replace
/// !ls
/// "#;
/// let parsed = parse(source, ParseOptions::from(Mode::Ipython));
/// assert!(parsed.is_ok());
/// ```
pub fn parse(source: &str, options: ParseOptions) -> Result<Parsed<Mod>, ParseError> {
parse_unchecked(source, options).into_result()
}
/// Parse the given Python source code using the specified [`ParseOptions`].
///
/// This is same as the [`parse`] function except that it doesn't check for any [`ParseError`]
/// and returns the [`Parsed`] as is.
pub fn parse_unchecked(source: &str, options: ParseOptions) -> Parsed<Mod> {
Parser::new(source, options).parse()
}
/// Parse the given Python source code using the specified [`PySourceType`].
pub fn parse_unchecked_source(source: &str, source_type: PySourceType) -> Parsed<ModModule> {
// SAFETY: Safe because `PySourceType` always parses to a `ModModule`
Parser::new(source, ParseOptions::from(source_type))
.parse()
.try_into_module()
.unwrap()
}
/// Represents the parsed source code.
#[derive(Debug, PartialEq, Clone, get_size2::GetSize)]
pub struct Parsed<T> {
syntax: T,
tokens: Tokens,
errors: Vec<ParseError>,
unsupported_syntax_errors: Vec<UnsupportedSyntaxError>,
}
impl<T> Parsed<T> {
/// Returns the syntax node represented by this parsed output.
pub fn syntax(&self) -> &T {
&self.syntax
}
/// Returns all the tokens for the parsed output.
pub fn tokens(&self) -> &Tokens {
&self.tokens
}
/// Returns a list of syntax errors found during parsing.
pub fn errors(&self) -> &[ParseError] {
&self.errors
}
/// Returns a list of version-related syntax errors found during parsing.
pub fn unsupported_syntax_errors(&self) -> &[UnsupportedSyntaxError] {
&self.unsupported_syntax_errors
}
/// Consumes the [`Parsed`] output and returns the contained syntax node.
pub fn into_syntax(self) -> T {
self.syntax
}
/// Consumes the [`Parsed`] output and returns a list of syntax errors found during parsing.
pub fn into_errors(self) -> Vec<ParseError> {
self.errors
}
/// Returns `true` if the parsed source code is valid i.e., it has no [`ParseError`]s.
///
/// Note that this does not include version-related [`UnsupportedSyntaxError`]s.
///
/// See [`Parsed::has_no_syntax_errors`] for a version that takes these into account.
pub fn has_valid_syntax(&self) -> bool {
self.errors.is_empty()
}
/// Returns `true` if the parsed source code is invalid i.e., it has [`ParseError`]s.
///
/// Note that this does not include version-related [`UnsupportedSyntaxError`]s.
///
/// See [`Parsed::has_no_syntax_errors`] for a version that takes these into account.
pub fn has_invalid_syntax(&self) -> bool {
!self.has_valid_syntax()
}
/// Returns `true` if the parsed source code does not contain any [`ParseError`]s *or*
/// [`UnsupportedSyntaxError`]s.
///
/// See [`Parsed::has_valid_syntax`] for a version specific to [`ParseError`]s.
pub fn has_no_syntax_errors(&self) -> bool {
self.has_valid_syntax() && self.unsupported_syntax_errors.is_empty()
}
/// Returns `true` if the parsed source code contains any [`ParseError`]s *or*
/// [`UnsupportedSyntaxError`]s.
///
/// See [`Parsed::has_invalid_syntax`] for a version specific to [`ParseError`]s.
pub fn has_syntax_errors(&self) -> bool {
!self.has_no_syntax_errors()
}
/// Returns the [`Parsed`] output as a [`Result`], returning [`Ok`] if it has no syntax errors,
/// or [`Err`] containing the first [`ParseError`] encountered.
///
/// Note that any [`unsupported_syntax_errors`](Parsed::unsupported_syntax_errors) will not
/// cause [`Err`] to be returned.
pub fn as_result(&self) -> Result<&Parsed<T>, &[ParseError]> {
if self.has_valid_syntax() {
Ok(self)
} else {
Err(&self.errors)
}
}
/// Consumes the [`Parsed`] output and returns a [`Result`] which is [`Ok`] if it has no syntax
/// errors, or [`Err`] containing the first [`ParseError`] encountered.
///
/// Note that any [`unsupported_syntax_errors`](Parsed::unsupported_syntax_errors) will not
/// cause [`Err`] to be returned.
pub(crate) fn into_result(self) -> Result<Parsed<T>, ParseError> {
if self.has_valid_syntax() {
Ok(self)
} else {
Err(self.into_errors().into_iter().next().unwrap())
}
}
}
impl Parsed<Mod> {
/// Attempts to convert the [`Parsed<Mod>`] into a [`Parsed<ModModule>`].
///
/// This method checks if the `syntax` field of the output is a [`Mod::Module`]. If it is, the
/// method returns [`Some(Parsed<ModModule>)`] with the contained module. Otherwise, it
/// returns [`None`].
///
/// [`Some(Parsed<ModModule>)`]: Some
pub fn try_into_module(self) -> Option<Parsed<ModModule>> {
match self.syntax {
Mod::Module(module) => Some(Parsed {
syntax: module,
tokens: self.tokens,
errors: self.errors,
unsupported_syntax_errors: self.unsupported_syntax_errors,
}),
Mod::Expression(_) => None,
}
}
/// Attempts to convert the [`Parsed<Mod>`] into a [`Parsed<ModExpression>`].
///
/// This method checks if the `syntax` field of the output is a [`Mod::Expression`]. If it is,
/// the method returns [`Some(Parsed<ModExpression>)`] with the contained expression.
/// Otherwise, it returns [`None`].
///
/// [`Some(Parsed<ModExpression>)`]: Some
pub fn try_into_expression(self) -> Option<Parsed<ModExpression>> {
match self.syntax {
Mod::Module(_) => None,
Mod::Expression(expression) => Some(Parsed {
syntax: expression,
tokens: self.tokens,
errors: self.errors,
unsupported_syntax_errors: self.unsupported_syntax_errors,
}),
}
}
}
impl Parsed<ModModule> {
/// Returns the module body contained in this parsed output as a [`Suite`].
pub fn suite(&self) -> &Suite {
&self.syntax.body
}
/// Consumes the [`Parsed`] output and returns the module body as a [`Suite`].
pub fn into_suite(self) -> Suite {
self.syntax.body
}
}
impl Parsed<ModExpression> {
/// Returns the expression contained in this parsed output.
pub fn expr(&self) -> &Expr {
&self.syntax.body
}
/// Returns a mutable reference to the expression contained in this parsed output.
pub fn expr_mut(&mut self) -> &mut Expr {
&mut self.syntax.body
}
/// Consumes the [`Parsed`] output and returns the contained [`Expr`].
pub fn into_expr(self) -> Expr {
*self.syntax.body
}
}
/// Control in the different modes by which a source file can be parsed.
///
/// The mode argument specifies in what way code must be parsed.
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub enum Mode {
/// The code consists of a sequence of statements.
Module,
/// The code consists of a single expression.
Expression,
/// The code consists of a single expression and is parsed as if it is parenthesized. The parentheses themselves aren't required.
/// This allows for having valid multiline expression without the need of parentheses
/// and is specifically useful for parsing string annotations.
ParenthesizedExpression,
/// The code consists of a sequence of statements which can include the
/// escape commands that are part of IPython syntax.
///
/// ## Supported escape commands:
///
/// - [Magic command system] which is limited to [line magics] and can start
/// with `?` or `??`.
/// - [Dynamic object information] which can start with `?` or `??`.
/// - [System shell access] which can start with `!` or `!!`.
/// - [Automatic parentheses and quotes] which can start with `/`, `;`, or `,`.
///
/// [Magic command system]: https://ipython.readthedocs.io/en/stable/interactive/reference.html#magic-command-system
/// [line magics]: https://ipython.readthedocs.io/en/stable/interactive/magics.html#line-magics
/// [Dynamic object information]: https://ipython.readthedocs.io/en/stable/interactive/reference.html#dynamic-object-information
/// [System shell access]: https://ipython.readthedocs.io/en/stable/interactive/reference.html#system-shell-access
/// [Automatic parentheses and quotes]: https://ipython.readthedocs.io/en/stable/interactive/reference.html#automatic-parentheses-and-quotes
Ipython,
}
impl std::str::FromStr for Mode {
type Err = ModeParseError;
fn from_str(s: &str) -> Result<Self, ModeParseError> {
match s {
"exec" | "single" => Ok(Mode::Module),
"eval" => Ok(Mode::Expression),
"ipython" => Ok(Mode::Ipython),
_ => Err(ModeParseError),
}
}
}
/// A type that can be represented as [Mode].
pub trait AsMode {
fn as_mode(&self) -> Mode;
}
impl AsMode for PySourceType {
fn as_mode(&self) -> Mode {
match self {
PySourceType::Python | PySourceType::Stub => Mode::Module,
PySourceType::Ipynb => Mode::Ipython,
}
}
}
/// Returned when a given mode is not valid.
#[derive(Debug)]
pub struct ModeParseError;
impl std::fmt::Display for ModeParseError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, r#"mode must be "exec", "eval", "ipython", or "single""#)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/token_set.rs | crates/ruff_python_parser/src/token_set.rs | use ruff_python_ast::token::TokenKind;
/// A bit-set of `TokenKind`s
#[derive(Clone, Copy)]
pub(crate) struct TokenSet(u128);
impl TokenSet {
pub(crate) const fn new<const N: usize>(kinds: [TokenKind; N]) -> TokenSet {
let mut res = 0u128;
let mut i = 0usize;
while i < N {
let kind = kinds[i];
res |= mask(kind);
i += 1;
}
TokenSet(res)
}
pub(crate) const fn union(self, other: TokenSet) -> TokenSet {
TokenSet(self.0 | other.0)
}
pub(crate) const fn remove(self, kind: TokenKind) -> TokenSet {
TokenSet(self.0 & !mask(kind))
}
pub(crate) const fn contains(&self, kind: TokenKind) -> bool {
self.0 & mask(kind) != 0
}
}
const fn mask(kind: TokenKind) -> u128 {
1u128 << (kind as usize)
}
impl<const N: usize> From<[TokenKind; N]> for TokenSet {
fn from(kinds: [TokenKind; N]) -> Self {
TokenSet::new(kinds)
}
}
#[test]
fn token_set_works_for_tokens() {
use ruff_python_ast::token::TokenKind::*;
let mut ts = TokenSet::new([EndOfFile, Name]);
assert!(ts.contains(EndOfFile));
assert!(ts.contains(Name));
assert!(!ts.contains(Plus));
ts = ts.remove(Name);
assert!(!ts.contains(Name));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/token_source.rs | crates/ruff_python_parser/src/token_source.rs | use ruff_python_ast::token::{Token, TokenFlags, TokenKind};
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::Mode;
use crate::error::LexicalError;
use crate::lexer::{Lexer, LexerCheckpoint};
use crate::string::InterpolatedStringKind;
use crate::token::TokenValue;
/// Token source for the parser that skips over any trivia tokens.
#[derive(Debug)]
pub(crate) struct TokenSource<'src> {
/// The underlying source for the tokens.
lexer: Lexer<'src>,
/// A vector containing all the tokens emitted by the lexer. This is returned when the parser
/// is finished consuming all the tokens. Note that unlike the emitted tokens, this vector
/// holds both the trivia and non-trivia tokens.
tokens: Vec<Token>,
}
impl<'src> TokenSource<'src> {
/// Create a new token source for the given lexer.
pub(crate) fn new(lexer: Lexer<'src>) -> Self {
// TODO(dhruvmanila): Use `allocate_tokens_vec`
TokenSource {
lexer,
tokens: vec![],
}
}
/// Create a new token source from the given source code which starts at the given offset.
pub(crate) fn from_source(source: &'src str, mode: Mode, start_offset: TextSize) -> Self {
let lexer = Lexer::new(source, mode, start_offset);
let mut source = TokenSource::new(lexer);
// Initialize the token source so that the current token is set correctly.
source.do_bump();
source
}
/// Returns the kind of the current token.
pub(crate) fn current_kind(&self) -> TokenKind {
self.lexer.current_kind()
}
/// Returns the range of the current token.
pub(crate) fn current_range(&self) -> TextRange {
self.lexer.current_range()
}
/// Returns the flags for the current token.
pub(crate) fn current_flags(&self) -> TokenFlags {
self.lexer.current_flags()
}
/// Calls the underlying [`take_value`] method on the lexer. Refer to its documentation
/// for more info.
///
/// [`take_value`]: Lexer::take_value
pub(crate) fn take_value(&mut self) -> TokenValue {
self.lexer.take_value()
}
/// Calls the underlying [`re_lex_logical_token`] method on the lexer with the new lexer
/// position and updates the token vector accordingly.
///
/// [`re_lex_logical_token`]: Lexer::re_lex_logical_token
pub(crate) fn re_lex_logical_token(&mut self) {
let mut non_logical_newline = None;
#[cfg(debug_assertions)]
let last_non_trivia_end_before = {
self.tokens
.iter()
.rev()
.find(|tok| !tok.kind().is_trivia())
.map(ruff_text_size::Ranged::end)
};
for (index, token) in self.tokens.iter().enumerate().rev() {
match token.kind() {
TokenKind::NonLogicalNewline => {
non_logical_newline = Some((index, token.start()));
}
TokenKind::Comment => continue,
_ => break,
}
}
if !self
.lexer
.re_lex_logical_token(non_logical_newline.map(|(_, start)| start))
{
return;
}
let non_logical_line_index = non_logical_newline
.expect(
"`re_lex_logical_token` should only return `true` if `non_logical_line` is `Some`",
)
.0;
// Trim the already bumped logical line token (and comments coming after it) as it might now have become a logical line token
self.tokens.truncate(non_logical_line_index);
#[cfg(debug_assertions)]
{
let last_non_trivia_end_now = {
self.tokens
.iter()
.rev()
.find(|tok| !tok.kind().is_trivia())
.map(ruff_text_size::Ranged::end)
};
assert_eq!(last_non_trivia_end_before, last_non_trivia_end_now);
}
// Ensure `current` is positioned at a non-trivia token.
if self.current_kind().is_trivia() {
self.bump(self.current_kind());
}
}
pub(crate) fn re_lex_string_token_in_interpolation_element(
&mut self,
kind: InterpolatedStringKind,
) {
self.lexer
.re_lex_string_token_in_interpolation_element(kind);
}
pub(crate) fn re_lex_raw_string_in_format_spec(&mut self) {
self.lexer.re_lex_raw_string_in_format_spec();
}
/// Returns the next non-trivia token without consuming it.
///
/// Use [`peek2`] to get the next two tokens.
///
/// [`peek2`]: TokenSource::peek2
pub(crate) fn peek(&mut self) -> TokenKind {
let checkpoint = self.lexer.checkpoint();
let next = self.next_non_trivia_token();
self.lexer.rewind(checkpoint);
next
}
/// Returns the next two non-trivia tokens without consuming it.
///
/// Use [`peek`] to only get the next token.
///
/// [`peek`]: TokenSource::peek
pub(crate) fn peek2(&mut self) -> (TokenKind, TokenKind) {
let checkpoint = self.lexer.checkpoint();
let first = self.next_non_trivia_token();
let second = self.next_non_trivia_token();
self.lexer.rewind(checkpoint);
(first, second)
}
/// Bumps the token source to the next non-trivia token.
///
/// It pushes the given kind to the token vector with the current token range.
pub(crate) fn bump(&mut self, kind: TokenKind) {
self.tokens
.push(Token::new(kind, self.current_range(), self.current_flags()));
self.do_bump();
}
/// Bumps the token source to the next non-trivia token without adding the current token to the
/// token vector. It does add the trivia tokens to the token vector.
fn do_bump(&mut self) {
loop {
let kind = self.lexer.next_token();
if kind.is_trivia() {
self.tokens
.push(Token::new(kind, self.current_range(), self.current_flags()));
continue;
}
break;
}
}
/// Returns the next non-trivia token without adding it to the token vector.
fn next_non_trivia_token(&mut self) -> TokenKind {
loop {
let kind = self.lexer.next_token();
if kind.is_trivia() {
continue;
}
break kind;
}
}
/// Creates a checkpoint to which the token source can later return to using [`Self::rewind`].
pub(crate) fn checkpoint(&self) -> TokenSourceCheckpoint {
TokenSourceCheckpoint {
lexer_checkpoint: self.lexer.checkpoint(),
tokens_position: self.tokens.len(),
}
}
/// Restore the token source to the given checkpoint.
pub(crate) fn rewind(&mut self, checkpoint: TokenSourceCheckpoint) {
let TokenSourceCheckpoint {
lexer_checkpoint,
tokens_position,
} = checkpoint;
self.lexer.rewind(lexer_checkpoint);
self.tokens.truncate(tokens_position);
}
/// Returns a slice of [`Token`] that are within the given `range`.
pub(crate) fn in_range(&self, range: TextRange) -> &[Token] {
let start = self
.tokens
.iter()
.rposition(|tok| tok.start() == range.start());
let end = self.tokens.iter().rposition(|tok| tok.end() == range.end());
let (Some(start), Some(end)) = (start, end) else {
return &self.tokens;
};
&self.tokens[start..=end]
}
/// Consumes the token source, returning the collected tokens, comment ranges, and any errors
/// encountered during lexing. The token collection includes both the trivia and non-trivia
/// tokens.
pub(crate) fn finish(mut self) -> (Vec<Token>, Vec<LexicalError>) {
assert_eq!(
self.current_kind(),
TokenKind::EndOfFile,
"TokenSource was not fully consumed"
);
// The `EndOfFile` token shouldn't be included in the token stream, it's mainly to signal
// the parser to stop. This isn't in `do_bump` because it only needs to be done once.
if let Some(last) = self.tokens.pop() {
assert_eq!(last.kind(), TokenKind::EndOfFile);
}
(self.tokens, self.lexer.finish())
}
}
pub(crate) struct TokenSourceCheckpoint {
lexer_checkpoint: LexerCheckpoint,
tokens_position: usize,
}
/// Allocates a [`Vec`] with an approximated capacity to fit all tokens
/// of `contents`.
///
/// See [#9546](https://github.com/astral-sh/ruff/pull/9546) for a more detailed explanation.
#[expect(dead_code)]
fn allocate_tokens_vec(contents: &str) -> Vec<Token> {
let lower_bound = contents.len().saturating_mul(15) / 100;
Vec::with_capacity(lower_bound)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/string.rs | crates/ruff_python_parser/src/string.rs | //! Parsing of string literals, bytes literals, and implicit string concatenation.
use bstr::ByteSlice;
use std::fmt;
use ruff_python_ast::token::TokenKind;
use ruff_python_ast::{self as ast, AnyStringFlags, AtomicNodeIndex, Expr, StringFlags};
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::error::{LexicalError, LexicalErrorType};
#[derive(Debug)]
pub(crate) enum StringType {
Str(ast::StringLiteral),
Bytes(ast::BytesLiteral),
FString(ast::FString),
TString(ast::TString),
}
impl Ranged for StringType {
fn range(&self) -> TextRange {
match self {
Self::Str(node) => node.range(),
Self::Bytes(node) => node.range(),
Self::FString(node) => node.range(),
Self::TString(node) => node.range(),
}
}
}
impl From<StringType> for Expr {
fn from(string: StringType) -> Self {
match string {
StringType::Str(node) => Expr::from(node),
StringType::Bytes(node) => Expr::from(node),
StringType::FString(node) => Expr::from(node),
StringType::TString(node) => Expr::from(node),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum InterpolatedStringKind {
FString,
TString,
}
impl InterpolatedStringKind {
#[inline]
pub(crate) const fn start_token(self) -> TokenKind {
match self {
InterpolatedStringKind::FString => TokenKind::FStringStart,
InterpolatedStringKind::TString => TokenKind::TStringStart,
}
}
#[inline]
pub(crate) const fn middle_token(self) -> TokenKind {
match self {
InterpolatedStringKind::FString => TokenKind::FStringMiddle,
InterpolatedStringKind::TString => TokenKind::TStringMiddle,
}
}
#[inline]
pub(crate) const fn end_token(self) -> TokenKind {
match self {
InterpolatedStringKind::FString => TokenKind::FStringEnd,
InterpolatedStringKind::TString => TokenKind::TStringEnd,
}
}
}
impl fmt::Display for InterpolatedStringKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
InterpolatedStringKind::FString => f.write_str("f-string"),
InterpolatedStringKind::TString => f.write_str("t-string"),
}
}
}
enum EscapedChar {
Literal(char),
Escape(char),
}
struct StringParser {
/// The raw content of the string e.g., the `foo` part in `"foo"`.
source: Box<str>,
/// Current position of the parser in the source.
cursor: usize,
/// Flags that can be used to query information about the string.
flags: AnyStringFlags,
/// The location of the first character in the source from the start of the file.
offset: TextSize,
/// The range of the string literal.
range: TextRange,
}
impl StringParser {
fn new(source: Box<str>, flags: AnyStringFlags, offset: TextSize, range: TextRange) -> Self {
Self {
source,
cursor: 0,
flags,
offset,
range,
}
}
#[inline]
fn skip_bytes(&mut self, bytes: usize) -> &str {
let skipped_str = &self.source[self.cursor..self.cursor + bytes];
self.cursor += bytes;
skipped_str
}
/// Returns the current position of the parser considering the offset.
#[inline]
fn position(&self) -> TextSize {
self.compute_position(self.cursor)
}
/// Computes the position of the cursor considering the offset.
#[inline]
fn compute_position(&self, cursor: usize) -> TextSize {
self.offset + TextSize::try_from(cursor).unwrap()
}
/// Returns the next byte in the string, if there is one.
///
/// # Panics
///
/// When the next byte is a part of a multi-byte character.
#[inline]
fn next_byte(&mut self) -> Option<u8> {
self.source[self.cursor..].as_bytes().first().map(|&byte| {
self.cursor += 1;
byte
})
}
#[inline]
fn next_char(&mut self) -> Option<char> {
self.source[self.cursor..].chars().next().inspect(|c| {
self.cursor += c.len_utf8();
})
}
#[inline]
fn peek_byte(&self) -> Option<u8> {
self.source[self.cursor..].as_bytes().first().copied()
}
fn parse_unicode_literal(&mut self, literal_number: usize) -> Result<char, LexicalError> {
let mut p: u32 = 0u32;
for i in 1..=literal_number {
let start = self.position();
match self.next_char() {
Some(c) => match c.to_digit(16) {
Some(d) => p += d << ((literal_number - i) * 4),
None => {
return Err(LexicalError::new(
LexicalErrorType::UnicodeError,
TextRange::at(start, TextSize::try_from(c.len_utf8()).unwrap()),
));
}
},
None => {
return Err(LexicalError::new(
LexicalErrorType::UnicodeError,
TextRange::empty(self.position()),
));
}
}
}
match p {
0xD800..=0xDFFF => Ok(std::char::REPLACEMENT_CHARACTER),
_ => std::char::from_u32(p).ok_or(LexicalError::new(
LexicalErrorType::UnicodeError,
TextRange::empty(self.position()),
)),
}
}
fn parse_octet(&mut self, o: u8) -> char {
let mut radix_bytes = [o, 0, 0];
let mut len = 1;
while len < 3 {
let Some(b'0'..=b'7') = self.peek_byte() else {
break;
};
radix_bytes[len] = self.next_byte().unwrap();
len += 1;
}
// OK because radix_bytes is always going to be in the ASCII range.
let radix_str = std::str::from_utf8(&radix_bytes[..len]).expect("ASCII bytes");
let value = u32::from_str_radix(radix_str, 8).unwrap();
char::from_u32(value).unwrap()
}
fn parse_unicode_name(&mut self) -> Result<char, LexicalError> {
let start_pos = self.position();
let Some('{') = self.next_char() else {
return Err(LexicalError::new(
LexicalErrorType::MissingUnicodeLbrace,
TextRange::empty(start_pos),
));
};
let start_pos = self.position();
let Some(close_idx) = self.source[self.cursor..].find('}') else {
return Err(LexicalError::new(
LexicalErrorType::MissingUnicodeRbrace,
TextRange::empty(self.compute_position(self.source.len())),
));
};
let name_and_ending = self.skip_bytes(close_idx + 1);
let name = &name_and_ending[..name_and_ending.len() - 1];
unicode_names2::character(name).ok_or_else(|| {
LexicalError::new(
LexicalErrorType::UnicodeError,
// The cursor is right after the `}` character, so we subtract 1 to get the correct
// range of the unicode name.
TextRange::new(
start_pos,
self.compute_position(self.cursor - '}'.len_utf8()),
),
)
})
}
/// Parse an escaped character, returning the new character.
fn parse_escaped_char(&mut self) -> Result<Option<EscapedChar>, LexicalError> {
let Some(first_char) = self.next_char() else {
// TODO: check when this error case happens
return Err(LexicalError::new(
LexicalErrorType::StringError,
TextRange::empty(self.position()),
));
};
let new_char = match first_char {
'\\' => '\\',
'\'' => '\'',
'\"' => '"',
'a' => '\x07',
'b' => '\x08',
'f' => '\x0c',
'n' => '\n',
'r' => '\r',
't' => '\t',
'v' => '\x0b',
o @ '0'..='7' => self.parse_octet(o as u8),
'x' => self.parse_unicode_literal(2)?,
'u' if !self.flags.is_byte_string() => self.parse_unicode_literal(4)?,
'U' if !self.flags.is_byte_string() => self.parse_unicode_literal(8)?,
'N' if !self.flags.is_byte_string() => self.parse_unicode_name()?,
// Special cases where the escape sequence is not a single character
'\n' => return Ok(None),
'\r' => {
if self.peek_byte() == Some(b'\n') {
self.next_byte();
}
return Ok(None);
}
_ => return Ok(Some(EscapedChar::Escape(first_char))),
};
Ok(Some(EscapedChar::Literal(new_char)))
}
fn parse_interpolated_string_middle(
mut self,
) -> Result<ast::InterpolatedStringLiteralElement, LexicalError> {
// Fast-path: if the f-string or t-string doesn't contain any escape sequences, return the literal.
let Some(mut index) = memchr::memchr3(b'{', b'}', b'\\', self.source.as_bytes()) else {
return Ok(ast::InterpolatedStringLiteralElement {
value: self.source,
range: self.range,
node_index: AtomicNodeIndex::NONE,
});
};
let mut value = String::with_capacity(self.source.len());
loop {
// Add the characters before the escape sequence (or curly brace) to the string.
let before_with_slash_or_brace = self.skip_bytes(index + 1);
let before = &before_with_slash_or_brace[..before_with_slash_or_brace.len() - 1];
value.push_str(before);
// Add the escaped character to the string.
match &self.source.as_bytes()[self.cursor - 1] {
// If there are any curly braces inside a `F/TStringMiddle` token,
// then they were escaped (i.e. `{{` or `}}`). This means that
// we need increase the location by 2 instead of 1.
b'{' => {
self.offset += TextSize::from(1);
value.push('{');
}
b'}' => {
self.offset += TextSize::from(1);
value.push('}');
}
// We can encounter a `\` as the last character in a `F/TStringMiddle`
// token which is valid in this context. For example,
//
// ```python
// f"\{foo} \{bar:\}"
// # ^ ^^ ^
// ```
//
// Here, the `F/TStringMiddle` token content will be "\" and " \"
// which is invalid if we look at the content in isolation:
//
// ```python
// "\"
// ```
//
// However, the content is syntactically valid in the context of
// the f/t-string because it's a substring of the entire f/t-string.
// This is still an invalid escape sequence, but we don't want to
// raise a syntax error as is done by the CPython parser. It might
// be supported in the future, refer to point 3: https://peps.python.org/pep-0701/#rejected-ideas
b'\\' => {
if !self.flags.is_raw_string() && self.peek_byte().is_some() {
match self.parse_escaped_char()? {
None => {}
Some(EscapedChar::Literal(c)) => value.push(c),
Some(EscapedChar::Escape(c)) => {
value.push('\\');
value.push(c);
}
}
} else {
value.push('\\');
}
}
ch => {
unreachable!("Expected '{{', '}}', or '\\' but got {:?}", ch);
}
}
let Some(next_index) =
memchr::memchr3(b'{', b'}', b'\\', self.source[self.cursor..].as_bytes())
else {
// Add the rest of the string to the value.
let rest = &self.source[self.cursor..];
value.push_str(rest);
break;
};
index = next_index;
}
Ok(ast::InterpolatedStringLiteralElement {
value: value.into_boxed_str(),
range: self.range,
node_index: AtomicNodeIndex::NONE,
})
}
fn parse_bytes(mut self) -> Result<StringType, LexicalError> {
if let Some(index) = self.source.as_bytes().find_non_ascii_byte() {
let ch = self.source.chars().nth(index).unwrap();
return Err(LexicalError::new(
LexicalErrorType::InvalidByteLiteral,
TextRange::at(
self.compute_position(index),
TextSize::try_from(ch.len_utf8()).unwrap(),
),
));
}
if self.flags.is_raw_string() {
// For raw strings, no escaping is necessary.
return Ok(StringType::Bytes(ast::BytesLiteral {
value: self.source.into_boxed_bytes(),
range: self.range,
flags: self.flags.into(),
node_index: AtomicNodeIndex::NONE,
}));
}
let Some(mut escape) = memchr::memchr(b'\\', self.source.as_bytes()) else {
// If the string doesn't contain any escape sequences, return the owned string.
return Ok(StringType::Bytes(ast::BytesLiteral {
value: self.source.into_boxed_bytes(),
range: self.range,
flags: self.flags.into(),
node_index: AtomicNodeIndex::NONE,
}));
};
// If the string contains escape sequences, we need to parse them.
let mut value = Vec::with_capacity(self.source.len());
loop {
// Add the characters before the escape sequence to the string.
let before_with_slash = self.skip_bytes(escape + 1);
let before = &before_with_slash[..before_with_slash.len() - 1];
value.extend_from_slice(before.as_bytes());
// Add the escaped character to the string.
match self.parse_escaped_char()? {
None => {}
Some(EscapedChar::Literal(c)) => value.push(c as u8),
Some(EscapedChar::Escape(c)) => {
value.push(b'\\');
value.push(c as u8);
}
}
let Some(next_escape) = memchr::memchr(b'\\', self.source[self.cursor..].as_bytes())
else {
// Add the rest of the string to the value.
let rest = &self.source[self.cursor..];
value.extend_from_slice(rest.as_bytes());
break;
};
// Update the position of the next escape sequence.
escape = next_escape;
}
Ok(StringType::Bytes(ast::BytesLiteral {
value: value.into_boxed_slice(),
range: self.range,
flags: self.flags.into(),
node_index: AtomicNodeIndex::NONE,
}))
}
fn parse_string(mut self) -> Result<StringType, LexicalError> {
if self.flags.is_raw_string() {
// For raw strings, no escaping is necessary.
return Ok(StringType::Str(ast::StringLiteral {
value: self.source,
range: self.range,
flags: self.flags.into(),
node_index: AtomicNodeIndex::NONE,
}));
}
let Some(mut escape) = memchr::memchr(b'\\', self.source.as_bytes()) else {
// If the string doesn't contain any escape sequences, return the owned string.
return Ok(StringType::Str(ast::StringLiteral {
value: self.source,
range: self.range,
flags: self.flags.into(),
node_index: AtomicNodeIndex::NONE,
}));
};
// If the string contains escape sequences, we need to parse them.
let mut value = String::with_capacity(self.source.len());
loop {
// Add the characters before the escape sequence to the string.
let before_with_slash = self.skip_bytes(escape + 1);
let before = &before_with_slash[..before_with_slash.len() - 1];
value.push_str(before);
// Add the escaped character to the string.
match self.parse_escaped_char()? {
None => {}
Some(EscapedChar::Literal(c)) => value.push(c),
Some(EscapedChar::Escape(c)) => {
value.push('\\');
value.push(c);
}
}
let Some(next_escape) = self.source[self.cursor..].find('\\') else {
// Add the rest of the string to the value.
let rest = &self.source[self.cursor..];
value.push_str(rest);
break;
};
// Update the position of the next escape sequence.
escape = next_escape;
}
Ok(StringType::Str(ast::StringLiteral {
value: value.into_boxed_str(),
range: self.range,
flags: self.flags.into(),
node_index: AtomicNodeIndex::NONE,
}))
}
fn parse(self) -> Result<StringType, LexicalError> {
if self.flags.is_byte_string() {
self.parse_bytes()
} else {
self.parse_string()
}
}
}
pub(crate) fn parse_string_literal(
source: Box<str>,
flags: AnyStringFlags,
range: TextRange,
) -> Result<StringType, LexicalError> {
StringParser::new(source, flags, range.start() + flags.opener_len(), range).parse()
}
// TODO(dhruvmanila): Move this to the new parser
pub(crate) fn parse_interpolated_string_literal_element(
source: Box<str>,
flags: AnyStringFlags,
range: TextRange,
) -> Result<ast::InterpolatedStringLiteralElement, LexicalError> {
StringParser::new(source, flags, range.start(), range).parse_interpolated_string_middle()
}
#[cfg(test)]
mod tests {
use ruff_python_ast::Suite;
use crate::error::LexicalErrorType;
use crate::{InterpolatedStringErrorType, ParseError, ParseErrorType, Parsed, parse_module};
const WINDOWS_EOL: &str = "\r\n";
const MAC_EOL: &str = "\r";
const UNIX_EOL: &str = "\n";
fn parse_suite(source: &str) -> Result<Suite, ParseError> {
parse_module(source).map(Parsed::into_suite)
}
fn string_parser_escaped_eol(eol: &str) -> Suite {
let source = format!(r"'text \{eol}more text'");
parse_suite(&source).unwrap()
}
#[test]
fn test_string_parser_escaped_unix_eol() {
let suite = string_parser_escaped_eol(UNIX_EOL);
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_string_parser_escaped_mac_eol() {
let suite = string_parser_escaped_eol(MAC_EOL);
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_string_parser_escaped_windows_eol() {
let suite = string_parser_escaped_eol(WINDOWS_EOL);
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_fstring() {
let source = r#"f"{a}{ b }{{foo}}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_fstring_nested_spec() {
let source = r#"f"{foo:{spec}}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_fstring_not_nested_spec() {
let source = r#"f"{foo:spec}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_empty_fstring() {
let source = r#"f"""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_fstring_parse_self_documenting_base() {
let source = r#"f"{user=}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_fstring_parse_self_documenting_base_more() {
let source = r#"f"mix {user=} with text and {second=}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_fstring_parse_self_documenting_format() {
let source = r#"f"{user=:>10}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
fn parse_fstring_error(source: &str) -> InterpolatedStringErrorType {
parse_suite(source)
.map_err(|e| match e.error {
ParseErrorType::Lexical(LexicalErrorType::FStringError(e)) => e,
ParseErrorType::FStringError(e) => e,
e => unreachable!("Expected FStringError: {:?}", e),
})
.expect_err("Expected error")
}
#[test]
fn test_parse_invalid_fstring() {
use InterpolatedStringErrorType::{InvalidConversionFlag, LambdaWithoutParentheses};
assert_eq!(parse_fstring_error(r#"f"{5!x}""#), InvalidConversionFlag);
assert_eq!(
parse_fstring_error("f'{lambda x:{x}}'"),
LambdaWithoutParentheses
);
// NOTE: The parser produces the `LambdaWithoutParentheses` for this case, but
// since the parser only return the first error to maintain compatibility with
// the rest of the codebase, this test case fails. The `LambdaWithoutParentheses`
// error appears after the unexpected `FStringMiddle` token, which is between the
// `:` and the `{`.
// assert_eq!(parse_fstring_error("f'{lambda x: {x}}'"), LambdaWithoutParentheses);
assert!(parse_suite(r#"f"{class}""#).is_err());
}
#[test]
fn test_parse_fstring_not_equals() {
let source = r#"f"{1 != 2}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_fstring_equals() {
let source = r#"f"{42 == 42}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_fstring_self_doc_prec_space() {
let source = r#"f"{x =}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_fstring_self_doc_trailing_space() {
let source = r#"f"{x= }""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_fstring_yield_expr() {
let source = r#"f"{yield}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_tstring() {
let source = r#"t"{a}{ b }{{foo}}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_tstring_nested_spec() {
let source = r#"t"{foo:{spec}}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_tstring_not_nested_spec() {
let source = r#"t"{foo:spec}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_empty_tstring() {
let source = r#"t"""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_tstring_parse_self_documenting_base() {
let source = r#"t"{user=}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_tstring_parse_self_documenting_base_more() {
let source = r#"t"mix {user=} with text and {second=}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_tstring_parse_self_documenting_format() {
let source = r#"t"{user=:>10}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
fn parse_tstring_error(source: &str) -> InterpolatedStringErrorType {
parse_suite(source)
.map_err(|e| match e.error {
ParseErrorType::Lexical(LexicalErrorType::TStringError(e)) => e,
ParseErrorType::TStringError(e) => e,
e => unreachable!("Expected TStringError: {:?}", e),
})
.expect_err("Expected error")
}
#[test]
fn test_parse_invalid_tstring() {
use InterpolatedStringErrorType::{InvalidConversionFlag, LambdaWithoutParentheses};
assert_eq!(parse_tstring_error(r#"t"{5!x}""#), InvalidConversionFlag);
assert_eq!(
parse_tstring_error("t'{lambda x:{x}}'"),
LambdaWithoutParentheses
);
// NOTE: The parser produces the `LambdaWithoutParentheses` for this case, but
// since the parser only return the first error to maintain compatibility with
// the rest of the codebase, this test case fails. The `LambdaWithoutParentheses`
// error appears after the unexpected `tStringMiddle` token, which is between the
// `:` and the `{`.
// assert_eq!(parse_tstring_error("f'{lambda x: {x}}'"), LambdaWithoutParentheses);
assert!(parse_suite(r#"t"{class}""#).is_err());
}
#[test]
fn test_parse_tstring_not_equals() {
let source = r#"t"{1 != 2}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_tstring_equals() {
let source = r#"t"{42 == 42}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_tstring_self_doc_prec_space() {
let source = r#"t"{x =}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_tstring_self_doc_trailing_space() {
let source = r#"t"{x= }""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_tstring_yield_expr() {
let source = r#"t"{yield}""#;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_string_concat() {
let source = "'Hello ' 'world'";
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_u_string_concat_1() {
let source = "'Hello ' u'world'";
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_u_string_concat_2() {
let source = "u'Hello ' 'world'";
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_f_string_concat_1() {
let source = "'Hello ' f'world'";
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_f_string_concat_2() {
let source = "'Hello ' f'world'";
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_f_string_concat_3() {
let source = "'Hello ' f'world{\"!\"}'";
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_f_string_concat_4() {
let source = "'Hello ' f'world{\"!\"}' 'again!'";
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_u_f_string_concat_1() {
let source = "u'Hello ' f'world'";
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_u_f_string_concat_2() {
let source = "u'Hello ' f'world' '!'";
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_t_string_concat_1_error() {
let source = "'Hello ' t'world'";
let suite = parse_suite(source).unwrap_err();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_t_string_concat_2_error() {
let source = "'Hello ' t'world'";
let suite = parse_suite(source).unwrap_err();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_t_string_concat_3_error() {
let source = "'Hello ' t'world{\"!\"}'";
let suite = parse_suite(source).unwrap_err();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_t_string_concat_4_error() {
let source = "'Hello ' t'world{\"!\"}' 'again!'";
let suite = parse_suite(source).unwrap_err();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_u_t_string_concat_1_error() {
let source = "u'Hello ' t'world'";
let suite = parse_suite(source).unwrap_err();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_u_t_string_concat_2_error() {
let source = "u'Hello ' t'world' '!'";
let suite = parse_suite(source).unwrap_err();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_f_t_string_concat_1_error() {
let source = "f'Hello ' t'world'";
let suite = parse_suite(source).unwrap_err();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_f_t_string_concat_2_error() {
let source = "f'Hello ' t'world' '!'";
let suite = parse_suite(source).unwrap_err();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_parse_string_triple_quotes_with_kind() {
let source = "u'''Hello, world!'''";
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_single_quoted_byte() {
// single quote
let source = r##"b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff'"##;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_double_quoted_byte() {
// double quote
let source = r##"b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff""##;
let suite = parse_suite(source).unwrap();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_escape_char_in_byte_literal() {
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/error.rs | crates/ruff_python_parser/src/error.rs | use std::fmt::{self, Display};
use ruff_python_ast::PythonVersion;
use ruff_python_ast::token::TokenKind;
use ruff_text_size::{Ranged, TextRange};
use crate::string::InterpolatedStringKind;
/// Represents represent errors that occur during parsing and are
/// returned by the `parse_*` functions.
#[derive(Debug, PartialEq, Eq, Clone, get_size2::GetSize)]
pub struct ParseError {
pub error: ParseErrorType,
pub location: TextRange,
}
impl std::ops::Deref for ParseError {
type Target = ParseErrorType;
fn deref(&self) -> &Self::Target {
&self.error
}
}
impl std::error::Error for ParseError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
Some(&self.error)
}
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{} at byte range {:?}", &self.error, self.location)
}
}
impl From<LexicalError> for ParseError {
fn from(error: LexicalError) -> Self {
ParseError {
location: error.location(),
error: ParseErrorType::Lexical(error.into_error()),
}
}
}
impl Ranged for ParseError {
fn range(&self) -> TextRange {
self.location
}
}
impl ParseError {
pub fn error(self) -> ParseErrorType {
self.error
}
}
/// Represents the different types of errors that can occur during parsing of an f-string or t-string.
#[derive(Debug, Clone, PartialEq, Eq, get_size2::GetSize)]
pub enum InterpolatedStringErrorType {
/// Expected a right brace after an opened left brace.
UnclosedLbrace,
/// An invalid conversion flag was encountered.
InvalidConversionFlag,
/// A single right brace was encountered.
SingleRbrace,
/// Unterminated string.
UnterminatedString,
/// Unterminated triple-quoted string.
UnterminatedTripleQuotedString,
/// A lambda expression without parentheses was encountered.
LambdaWithoutParentheses,
/// Conversion flag does not immediately follow exclamation.
ConversionFlagNotImmediatelyAfterExclamation,
/// Newline inside of a format spec for a single quoted f- or t-string.
NewlineInFormatSpec,
}
impl std::fmt::Display for InterpolatedStringErrorType {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::UnclosedLbrace => write!(f, "expecting `}}`"),
Self::InvalidConversionFlag => write!(f, "invalid conversion character"),
Self::SingleRbrace => write!(f, "single `}}` is not allowed"),
Self::UnterminatedString => write!(f, "unterminated string"),
Self::UnterminatedTripleQuotedString => write!(f, "unterminated triple-quoted string"),
Self::LambdaWithoutParentheses => {
write!(f, "lambda expressions are not allowed without parentheses")
}
Self::ConversionFlagNotImmediatelyAfterExclamation => write!(
f,
"conversion type must come right after the exclamation mark"
),
Self::NewlineInFormatSpec => {
write!(
f,
"newlines are not allowed in format specifiers when using single quotes"
)
}
}
}
}
/// Represents the different types of errors that can occur during parsing.
#[derive(Debug, PartialEq, Eq, Clone, get_size2::GetSize)]
pub enum ParseErrorType {
/// An unexpected error occurred.
OtherError(String),
/// An empty slice was found during parsing, e.g `data[]`.
EmptySlice,
/// An empty global names list was found during parsing.
EmptyGlobalNames,
/// An empty nonlocal names list was found during parsing.
EmptyNonlocalNames,
/// An empty delete targets list was found during parsing.
EmptyDeleteTargets,
/// An empty import names list was found during parsing.
EmptyImportNames,
/// An empty type parameter list was found during parsing.
EmptyTypeParams,
/// An unparenthesized named expression was found where it is not allowed.
UnparenthesizedNamedExpression,
/// An unparenthesized tuple expression was found where it is not allowed.
UnparenthesizedTupleExpression,
/// An unparenthesized generator expression was found where it is not allowed.
UnparenthesizedGeneratorExpression,
/// An invalid usage of a lambda expression was found.
InvalidLambdaExpressionUsage,
/// An invalid usage of a yield expression was found.
InvalidYieldExpressionUsage,
/// An invalid usage of a starred expression was found.
InvalidStarredExpressionUsage,
/// A star pattern was found outside a sequence pattern.
InvalidStarPatternUsage,
/// A parameter was found after a vararg.
ParamAfterVarKeywordParam,
/// A non-default parameter follows a default parameter.
NonDefaultParamAfterDefaultParam,
/// A default value was found for a `*` or `**` parameter.
VarParameterWithDefault,
/// A keyword argument was repeated.
DuplicateKeywordArgumentError(String),
/// An invalid expression was found in the assignment target.
InvalidAssignmentTarget,
/// An invalid expression was found in the named assignment target.
InvalidNamedAssignmentTarget,
/// An invalid expression was found in the annotated assignment target.
InvalidAnnotatedAssignmentTarget,
/// An invalid expression was found in the augmented assignment target.
InvalidAugmentedAssignmentTarget,
/// An invalid expression was found in the delete target.
InvalidDeleteTarget,
/// A positional argument was found after a keyword argument.
PositionalAfterKeywordArgument,
/// A positional argument was found after a keyword argument unpacking.
PositionalAfterKeywordUnpacking,
/// An iterable argument unpacking was found after keyword argument unpacking.
InvalidArgumentUnpackingOrder,
/// An invalid usage of iterable unpacking in a comprehension was found.
IterableUnpackingInComprehension,
/// Multiple simple statements were found in the same line without a `;` separating them.
SimpleStatementsOnSameLine,
/// A simple statement and a compound statement was found in the same line.
SimpleAndCompoundStatementOnSameLine,
/// Expected one or more keyword parameter after `*` separator.
ExpectedKeywordParam,
/// Expected a real number for a complex literal pattern.
ExpectedRealNumber,
/// Expected an imaginary number for a complex literal pattern.
ExpectedImaginaryNumber,
/// Expected an expression at the current parser location.
ExpectedExpression,
/// The parser expected a specific token that was not found.
ExpectedToken {
expected: TokenKind,
found: TokenKind,
},
/// An unexpected indentation was found during parsing.
UnexpectedIndentation,
/// The statement being parsed cannot be `async`.
UnexpectedTokenAfterAsync(TokenKind),
/// Ipython escape command was found
UnexpectedIpythonEscapeCommand,
/// An unexpected token was found at the end of an expression parsing
UnexpectedExpressionToken,
/// An f-string error containing the [`InterpolatedStringErrorType`].
FStringError(InterpolatedStringErrorType),
/// A t-string error containing the [`InterpolatedStringErrorType`].
TStringError(InterpolatedStringErrorType),
/// Parser encountered an error during lexing.
Lexical(LexicalErrorType),
}
impl ParseErrorType {
pub(crate) fn from_interpolated_string_error(
error: InterpolatedStringErrorType,
string_kind: InterpolatedStringKind,
) -> Self {
match string_kind {
InterpolatedStringKind::FString => Self::FStringError(error),
InterpolatedStringKind::TString => Self::TStringError(error),
}
}
}
impl std::error::Error for ParseErrorType {}
impl std::fmt::Display for ParseErrorType {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
ParseErrorType::OtherError(msg) => write!(f, "{msg}"),
ParseErrorType::ExpectedToken { found, expected } => {
write!(f, "Expected {expected}, found {found}",)
}
ParseErrorType::Lexical(lex_error) => write!(f, "{lex_error}"),
ParseErrorType::SimpleStatementsOnSameLine => {
f.write_str("Simple statements must be separated by newlines or semicolons")
}
ParseErrorType::SimpleAndCompoundStatementOnSameLine => f.write_str(
"Compound statements are not allowed on the same line as simple statements",
),
ParseErrorType::UnexpectedTokenAfterAsync(kind) => {
write!(
f,
"Expected `def`, `with` or `for` to follow `async`, found {kind}",
)
}
ParseErrorType::InvalidArgumentUnpackingOrder => {
f.write_str("Iterable argument unpacking cannot follow keyword argument unpacking")
}
ParseErrorType::IterableUnpackingInComprehension => {
f.write_str("Iterable unpacking cannot be used in a comprehension")
}
ParseErrorType::UnparenthesizedNamedExpression => {
f.write_str("Unparenthesized named expression cannot be used here")
}
ParseErrorType::UnparenthesizedTupleExpression => {
f.write_str("Unparenthesized tuple expression cannot be used here")
}
ParseErrorType::UnparenthesizedGeneratorExpression => {
f.write_str("Unparenthesized generator expression cannot be used here")
}
ParseErrorType::InvalidYieldExpressionUsage => {
f.write_str("Yield expression cannot be used here")
}
ParseErrorType::InvalidLambdaExpressionUsage => {
f.write_str("Lambda expression cannot be used here")
}
ParseErrorType::InvalidStarredExpressionUsage => {
f.write_str("Starred expression cannot be used here")
}
ParseErrorType::PositionalAfterKeywordArgument => {
f.write_str("Positional argument cannot follow keyword argument")
}
ParseErrorType::PositionalAfterKeywordUnpacking => {
f.write_str("Positional argument cannot follow keyword argument unpacking")
}
ParseErrorType::EmptySlice => f.write_str("Expected index or slice expression"),
ParseErrorType::EmptyGlobalNames => {
f.write_str("Global statement must have at least one name")
}
ParseErrorType::EmptyNonlocalNames => {
f.write_str("Nonlocal statement must have at least one name")
}
ParseErrorType::EmptyDeleteTargets => {
f.write_str("Delete statement must have at least one target")
}
ParseErrorType::EmptyImportNames => {
f.write_str("Expected one or more symbol names after import")
}
ParseErrorType::EmptyTypeParams => f.write_str("Type parameter list cannot be empty"),
ParseErrorType::ParamAfterVarKeywordParam => {
f.write_str("Parameter cannot follow var-keyword parameter")
}
ParseErrorType::NonDefaultParamAfterDefaultParam => {
f.write_str("Parameter without a default cannot follow a parameter with a default")
}
ParseErrorType::ExpectedKeywordParam => {
f.write_str("Expected one or more keyword parameter after `*` separator")
}
ParseErrorType::VarParameterWithDefault => {
f.write_str("Parameter with `*` or `**` cannot have default value")
}
ParseErrorType::InvalidStarPatternUsage => {
f.write_str("Star pattern cannot be used here")
}
ParseErrorType::ExpectedRealNumber => {
f.write_str("Expected a real number in complex literal pattern")
}
ParseErrorType::ExpectedImaginaryNumber => {
f.write_str("Expected an imaginary number in complex literal pattern")
}
ParseErrorType::ExpectedExpression => f.write_str("Expected an expression"),
ParseErrorType::UnexpectedIndentation => f.write_str("Unexpected indentation"),
ParseErrorType::InvalidAssignmentTarget => f.write_str("Invalid assignment target"),
ParseErrorType::InvalidAnnotatedAssignmentTarget => {
f.write_str("Invalid annotated assignment target")
}
ParseErrorType::InvalidNamedAssignmentTarget => {
f.write_str("Assignment expression target must be an identifier")
}
ParseErrorType::InvalidAugmentedAssignmentTarget => {
f.write_str("Invalid augmented assignment target")
}
ParseErrorType::InvalidDeleteTarget => f.write_str("Invalid delete target"),
ParseErrorType::DuplicateKeywordArgumentError(arg_name) => {
write!(f, "Duplicate keyword argument {arg_name:?}")
}
ParseErrorType::UnexpectedIpythonEscapeCommand => {
f.write_str("IPython escape commands are only allowed in `Mode::Ipython`")
}
ParseErrorType::FStringError(fstring_error) => {
write!(f, "f-string: {fstring_error}")
}
ParseErrorType::TStringError(tstring_error) => {
write!(f, "t-string: {tstring_error}")
}
ParseErrorType::UnexpectedExpressionToken => {
write!(f, "Unexpected token at the end of an expression")
}
}
}
}
/// Represents an error that occur during lexing and are
/// returned by the `parse_*` functions in the iterator in the
/// [lexer] implementation.
///
/// [lexer]: crate::lexer
#[derive(Debug, Clone, PartialEq)]
pub struct LexicalError {
/// The type of error that occurred.
error: LexicalErrorType,
/// The location of the error.
location: TextRange,
}
impl LexicalError {
/// Creates a new `LexicalError` with the given error type and location.
pub fn new(error: LexicalErrorType, location: TextRange) -> Self {
Self { error, location }
}
pub fn error(&self) -> &LexicalErrorType {
&self.error
}
pub fn into_error(self) -> LexicalErrorType {
self.error
}
pub fn location(&self) -> TextRange {
self.location
}
}
impl std::ops::Deref for LexicalError {
type Target = LexicalErrorType;
fn deref(&self) -> &Self::Target {
self.error()
}
}
impl std::error::Error for LexicalError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
Some(self.error())
}
}
impl std::fmt::Display for LexicalError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"{} at byte offset {}",
self.error(),
u32::from(self.location().start())
)
}
}
/// Represents the different types of errors that can occur during lexing.
#[derive(Debug, Clone, PartialEq, Eq, get_size2::GetSize)]
pub enum LexicalErrorType {
// TODO: Can probably be removed, the places it is used seem to be able
// to use the `UnicodeError` variant instead.
#[doc(hidden)]
StringError,
/// A string literal without the closing quote.
UnclosedStringError,
/// Decoding of a unicode escape sequence in a string literal failed.
UnicodeError,
/// Missing the `{` for unicode escape sequence.
MissingUnicodeLbrace,
/// Missing the `}` for unicode escape sequence.
MissingUnicodeRbrace,
/// The indentation is not consistent.
IndentationError,
/// An unrecognized token was encountered.
UnrecognizedToken { tok: char },
/// An f-string error containing the [`InterpolatedStringErrorType`].
FStringError(InterpolatedStringErrorType),
/// A t-string error containing the [`InterpolatedStringErrorType`].
TStringError(InterpolatedStringErrorType),
/// Invalid character encountered in a byte literal.
InvalidByteLiteral,
/// An unexpected character was encountered after a line continuation.
LineContinuationError,
/// An unexpected end of file was encountered.
Eof,
/// An unexpected error occurred.
OtherError(Box<str>),
}
impl std::error::Error for LexicalErrorType {}
impl LexicalErrorType {
pub(crate) fn from_interpolated_string_error(
error: InterpolatedStringErrorType,
string_kind: InterpolatedStringKind,
) -> Self {
match string_kind {
InterpolatedStringKind::FString => Self::FStringError(error),
InterpolatedStringKind::TString => Self::TStringError(error),
}
}
}
impl std::fmt::Display for LexicalErrorType {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::StringError => write!(f, "Got unexpected string"),
Self::FStringError(error) => write!(f, "f-string: {error}"),
Self::TStringError(error) => write!(f, "t-string: {error}"),
Self::InvalidByteLiteral => {
write!(f, "bytes can only contain ASCII literal characters")
}
Self::UnicodeError => write!(f, "Got unexpected unicode"),
Self::IndentationError => {
write!(f, "unindent does not match any outer indentation level")
}
Self::UnrecognizedToken { tok } => {
write!(f, "Got unexpected token {tok}")
}
Self::LineContinuationError => {
write!(f, "Expected a newline after line continuation character")
}
Self::Eof => write!(f, "unexpected EOF while parsing"),
Self::OtherError(msg) => write!(f, "{msg}"),
Self::UnclosedStringError => {
write!(f, "missing closing quote in string literal")
}
Self::MissingUnicodeLbrace => {
write!(f, "Missing `{{` in Unicode escape sequence")
}
Self::MissingUnicodeRbrace => {
write!(f, "Missing `}}` in Unicode escape sequence")
}
}
}
}
/// Represents a version-related syntax error detected during parsing.
///
/// An example of a version-related error is the use of a `match` statement before Python 3.10, when
/// it was first introduced. See [`UnsupportedSyntaxErrorKind`] for other kinds of errors.
#[derive(Debug, PartialEq, Clone, get_size2::GetSize)]
pub struct UnsupportedSyntaxError {
pub kind: UnsupportedSyntaxErrorKind,
pub range: TextRange,
/// The target [`PythonVersion`] for which this error was detected.
pub target_version: PythonVersion,
}
impl Ranged for UnsupportedSyntaxError {
fn range(&self) -> TextRange {
self.range
}
}
/// The type of tuple unpacking for [`UnsupportedSyntaxErrorKind::StarTuple`].
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy, get_size2::GetSize)]
pub enum StarTupleKind {
Return,
Yield,
}
/// The type of PEP 701 f-string error for [`UnsupportedSyntaxErrorKind::Pep701FString`].
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy, get_size2::GetSize)]
pub enum FStringKind {
Backslash,
Comment,
NestedQuote,
}
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy, get_size2::GetSize)]
pub enum UnparenthesizedNamedExprKind {
SequenceIndex,
SetLiteral,
SetComprehension,
}
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy, get_size2::GetSize)]
pub enum UnsupportedSyntaxErrorKind {
Match,
Walrus,
ExceptStar,
/// Represents the use of an unparenthesized named expression (`:=`) in a set literal, set
/// comprehension, or sequence index before Python 3.10.
///
/// ## Examples
///
/// These are allowed on Python 3.10:
///
/// ```python
/// {x := 1, 2, 3} # set literal
/// {last := x for x in range(3)} # set comprehension
/// lst[x := 1] # sequence index
/// ```
///
/// But on Python 3.9 the named expression needs to be parenthesized:
///
/// ```python
/// {(x := 1), 2, 3} # set literal
/// {(last := x) for x in range(3)} # set comprehension
/// lst[(x := 1)] # sequence index
/// ```
///
/// However, unparenthesized named expressions are never allowed in slices:
///
/// ```python
/// lst[x:=1:-1] # syntax error
/// lst[1:x:=1] # syntax error
/// lst[1:3:x:=1] # syntax error
///
/// lst[(x:=1):-1] # ok
/// lst[1:(x:=1)] # ok
/// lst[1:3:(x:=1)] # ok
/// ```
///
/// ## References
///
/// - [Python 3.10 Other Language Changes](https://docs.python.org/3/whatsnew/3.10.html#other-language-changes)
UnparenthesizedNamedExpr(UnparenthesizedNamedExprKind),
/// Represents the use of a parenthesized keyword argument name after Python 3.8.
///
/// ## Example
///
/// From [BPO 34641] it sounds like this was only accidentally supported and was removed when
/// noticed. Code like this used to be valid:
///
/// ```python
/// f((a)=1)
/// ```
///
/// After Python 3.8, you have to omit the parentheses around `a`:
///
/// ```python
/// f(a=1)
/// ```
///
/// [BPO 34641]: https://github.com/python/cpython/issues/78822
ParenthesizedKeywordArgumentName,
/// Represents the use of unparenthesized tuple unpacking in a `return` statement or `yield`
/// expression before Python 3.8.
///
/// ## Examples
///
/// Before Python 3.8, this syntax was allowed:
///
/// ```python
/// rest = (4, 5, 6)
///
/// def f():
/// t = 1, 2, 3, *rest
/// return t
///
/// def g():
/// t = 1, 2, 3, *rest
/// yield t
/// ```
///
/// But this was not:
///
/// ```python
/// rest = (4, 5, 6)
///
/// def f():
/// return 1, 2, 3, *rest
///
/// def g():
/// yield 1, 2, 3, *rest
/// ```
///
/// Instead, parentheses were required in the `return` and `yield` cases:
///
/// ```python
/// rest = (4, 5, 6)
///
/// def f():
/// return (1, 2, 3, *rest)
///
/// def g():
/// yield (1, 2, 3, *rest)
/// ```
///
/// This was reported in [BPO 32117] and updated in Python 3.8 to allow the unparenthesized
/// form.
///
/// [BPO 32117]: https://github.com/python/cpython/issues/76298
StarTuple(StarTupleKind),
/// Represents the use of a "relaxed" [PEP 614] decorator before Python 3.9.
///
/// ## Examples
///
/// Prior to Python 3.9, decorators were defined to be [`dotted_name`]s, optionally followed by
/// an argument list. For example:
///
/// ```python
/// @buttons.clicked.connect
/// def foo(): ...
///
/// @buttons.clicked.connect(1, 2, 3)
/// def foo(): ...
/// ```
///
/// As pointed out in the PEP, this prevented reasonable extensions like subscripts:
///
/// ```python
/// buttons = [QPushButton(f'Button {i}') for i in range(10)]
///
/// @buttons[0].clicked.connect
/// def spam(): ...
/// ```
///
/// Python 3.9 removed these restrictions and expanded the [decorator grammar] to include any
/// assignment expression and include cases like the example above.
///
/// [PEP 614]: https://peps.python.org/pep-0614/
/// [`dotted_name`]: https://docs.python.org/3.8/reference/compound_stmts.html#grammar-token-dotted-name
/// [decorator grammar]: https://docs.python.org/3/reference/compound_stmts.html#grammar-token-python-grammar-decorator
RelaxedDecorator(RelaxedDecoratorError),
/// Represents the use of a [PEP 570] positional-only parameter before Python 3.8.
///
/// ## Examples
///
/// Python 3.8 added the `/` syntax for marking preceding parameters as positional-only:
///
/// ```python
/// def foo(a, b, /, c): ...
/// ```
///
/// This means `a` and `b` in this case can only be provided by position, not by name. In other
/// words, this code results in a `TypeError` at runtime:
///
/// ```pycon
/// >>> def foo(a, b, /, c): ...
/// ...
/// >>> foo(a=1, b=2, c=3)
/// Traceback (most recent call last):
/// File "<python-input-3>", line 1, in <module>
/// foo(a=1, b=2, c=3)
/// ~~~^^^^^^^^^^^^^^^
/// TypeError: foo() got some positional-only arguments passed as keyword arguments: 'a, b'
/// ```
///
/// [PEP 570]: https://peps.python.org/pep-0570/
PositionalOnlyParameter,
/// Represents the use of a [type parameter list] before Python 3.12.
///
/// ## Examples
///
/// Before Python 3.12, generic parameters had to be declared separately using a class like
/// [`typing.TypeVar`], which could then be used in a function or class definition:
///
/// ```python
/// from typing import Generic, TypeVar
///
/// T = TypeVar("T")
///
/// def f(t: T): ...
/// class C(Generic[T]): ...
/// ```
///
/// [PEP 695], included in Python 3.12, introduced the new type parameter syntax, which allows
/// these to be written more compactly and without a separate type variable:
///
/// ```python
/// def f[T](t: T): ...
/// class C[T]: ...
/// ```
///
/// [type parameter list]: https://docs.python.org/3/reference/compound_stmts.html#type-parameter-lists
/// [PEP 695]: https://peps.python.org/pep-0695/
/// [`typing.TypeVar`]: https://docs.python.org/3/library/typing.html#typevar
TypeParameterList,
TypeAliasStatement,
TypeParamDefault,
/// Represents the use of a [PEP 701] f-string before Python 3.12.
///
/// ## Examples
///
/// As described in the PEP, each of these cases were invalid before Python 3.12:
///
/// ```python
/// # nested quotes
/// f'Magic wand: { bag['wand'] }'
///
/// # escape characters
/// f"{'\n'.join(a)}"
///
/// # comments
/// f'''A complex trick: {
/// bag['bag'] # recursive bags!
/// }'''
///
/// # arbitrary nesting
/// f"{f"{f"{f"{f"{f"{1+1}"}"}"}"}"}"
/// ```
///
/// These restrictions were lifted in Python 3.12, meaning that all of these examples are now
/// valid.
///
/// [PEP 701]: https://peps.python.org/pep-0701/
Pep701FString(FStringKind),
/// Represents the use of a parenthesized `with` item before Python 3.9.
///
/// ## Examples
///
/// As described in [BPO 12782], `with` uses like this were not allowed on Python 3.8:
///
/// ```python
/// with (open("a_really_long_foo") as foo,
/// open("a_really_long_bar") as bar):
/// pass
/// ```
///
/// because parentheses were not allowed within the `with` statement itself (see [this comment]
/// in particular). However, parenthesized expressions were still allowed, including the cases
/// below, so the issue can be pretty subtle and relates specifically to parenthesized items
/// with `as` bindings.
///
/// ```python
/// with (foo, bar): ... # okay
/// with (
/// open('foo.txt')) as foo: ... # also okay
/// with (
/// foo,
/// bar,
/// baz,
/// ): ... # also okay, just a tuple
/// with (
/// foo,
/// bar,
/// baz,
/// ) as tup: ... # also okay, binding the tuple
/// ```
///
/// This restriction was lifted in 3.9 but formally included in the [release notes] for 3.10.
///
/// [BPO 12782]: https://github.com/python/cpython/issues/56991
/// [this comment]: https://github.com/python/cpython/issues/56991#issuecomment-1093555141
/// [release notes]: https://docs.python.org/3/whatsnew/3.10.html#summary-release-highlights
ParenthesizedContextManager,
/// Represents the use of a [PEP 646] star expression in an index.
///
/// ## Examples
///
/// Before Python 3.11, star expressions were not allowed in index/subscript operations (within
/// square brackets). This restriction was lifted in [PEP 646] to allow for star-unpacking of
/// `typing.TypeVarTuple`s, also added in Python 3.11. As such, this is the primary motivating
/// example from the PEP:
///
/// ```python
/// from typing import TypeVar, TypeVarTuple
///
/// DType = TypeVar('DType')
/// Shape = TypeVarTuple('Shape')
///
/// class Array(Generic[DType, *Shape]): ...
/// ```
///
/// But it applies to simple indexing as well:
///
/// ```python
/// vector[*x]
/// array[a, *b]
/// ```
///
/// [PEP 646]: https://peps.python.org/pep-0646/#change-1-star-expressions-in-indexes
StarExpressionInIndex,
/// Represents the use of a [PEP 646] star annotations in a function definition.
///
/// ## Examples
///
/// Before Python 3.11, star annotations were not allowed in function definitions. This
/// restriction was lifted in [PEP 646] to allow type annotations for `typing.TypeVarTuple`,
/// also added in Python 3.11:
///
/// ```python
/// from typing import TypeVarTuple
///
/// Ts = TypeVarTuple('Ts')
///
/// def foo(*args: *Ts): ...
/// ```
///
/// Unlike [`UnsupportedSyntaxErrorKind::StarExpressionInIndex`], this does not include any
/// other annotation positions:
///
/// ```python
/// x: *Ts # Syntax error
/// def foo(x: *Ts): ... # Syntax error
/// ```
///
/// [PEP 646]: https://peps.python.org/pep-0646/#change-2-args-as-a-typevartuple
StarAnnotation,
/// Represents the use of tuple unpacking in a `for` statement iterator clause before Python
/// 3.9.
///
/// ## Examples
///
/// Like [`UnsupportedSyntaxErrorKind::StarTuple`] in `return` and `yield` statements, prior to
/// Python 3.9, tuple unpacking in the iterator clause of a `for` statement required
/// parentheses:
///
/// ```python
/// # valid on Python 3.8 and earlier
/// for i in (*a, *b): ...
/// ```
///
/// Omitting the parentheses was invalid:
///
/// ```python
/// for i in *a, *b: ... # SyntaxError
/// ```
///
/// This was changed as part of the [PEG parser rewrite] included in Python 3.9 but not
/// documented directly until the [Python 3.11 release].
///
/// [PEG parser rewrite]: https://peps.python.org/pep-0617/
/// [Python 3.11 release]: https://docs.python.org/3/whatsnew/3.11.html#other-language-changes
UnparenthesizedUnpackInFor,
/// Represents the use of multiple exception names in an except clause without an `as` binding, before Python 3.14.
///
/// ## Examples
/// Before Python 3.14, catching multiple exceptions required
/// parentheses like so:
///
/// ```python
/// try:
/// ...
/// except (ExceptionA, ExceptionB, ExceptionC):
/// ...
/// ```
///
/// Starting with Python 3.14, thanks to [PEP 758], it was permitted
/// to omit the parentheses:
///
/// ```python
/// try:
/// ...
/// except ExceptionA, ExceptionB, ExceptionC:
/// ...
/// ```
///
/// However, parentheses are still required in the presence of an `as`:
///
/// ```python
/// try:
/// ...
/// except (ExceptionA, ExceptionB, ExceptionC) as e:
/// ...
/// ```
///
///
/// [PEP 758]: https://peps.python.org/pep-0758/
UnparenthesizedExceptionTypes,
/// Represents the use of a template string (t-string)
/// literal prior to the implementation of [PEP 750]
/// in Python 3.14.
///
/// [PEP 750]: https://peps.python.org/pep-0750/
TemplateStrings,
}
impl Display for UnsupportedSyntaxError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let kind = match self.kind {
UnsupportedSyntaxErrorKind::Match => "Cannot use `match` statement",
UnsupportedSyntaxErrorKind::Walrus => "Cannot use named assignment expression (`:=`)",
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/semantic_errors.rs | crates/ruff_python_parser/src/semantic_errors.rs | //! [`SemanticSyntaxChecker`] for AST-based syntax errors.
//!
//! This checker is not responsible for traversing the AST itself. Instead, its
//! [`SemanticSyntaxChecker::visit_stmt`] and [`SemanticSyntaxChecker::visit_expr`] methods should
//! be called in a parent `Visitor`'s `visit_stmt` and `visit_expr` methods, respectively.
use ruff_python_ast::{
self as ast, Expr, ExprContext, IrrefutablePatternKind, Pattern, PythonVersion, Stmt, StmtExpr,
StmtFunctionDef, StmtImportFrom,
comparable::ComparableExpr,
helpers,
visitor::{Visitor, walk_expr, walk_stmt},
};
use ruff_text_size::{Ranged, TextRange, TextSize};
use rustc_hash::{FxBuildHasher, FxHashSet};
use std::fmt::Display;
#[derive(Debug, Default)]
pub struct SemanticSyntaxChecker {
/// The checker has traversed past the `__future__` import boundary.
///
/// For example, the checker could be visiting `x` in:
///
/// ```python
/// from __future__ import annotations
///
/// import os
///
/// x: int = 1
/// ```
///
/// Python considers it a syntax error to import from `__future__` after any other
/// non-`__future__`-importing statements.
seen_futures_boundary: bool,
/// The checker has traversed past the module docstring boundary (i.e. seen any statement in the
/// module).
seen_module_docstring_boundary: bool,
}
impl SemanticSyntaxChecker {
pub fn new() -> Self {
Self::default()
}
}
impl SemanticSyntaxChecker {
fn add_error<Ctx: SemanticSyntaxContext>(
context: &Ctx,
kind: SemanticSyntaxErrorKind,
range: TextRange,
) {
context.report_semantic_error(SemanticSyntaxError {
kind,
range,
python_version: context.python_version(),
});
}
fn check_stmt<Ctx: SemanticSyntaxContext>(&mut self, stmt: &ast::Stmt, ctx: &Ctx) {
match stmt {
Stmt::ImportFrom(StmtImportFrom {
range,
module,
level,
names,
..
}) => {
if matches!(module.as_deref(), Some("__future__")) {
for name in names {
if !is_known_future_feature(&name.name) {
// test_ok valid_future_feature
// from __future__ import annotations
// test_err invalid_future_feature
// from __future__ import invalid_feature
// from __future__ import annotations, invalid_feature
// from __future__ import invalid_feature_1, invalid_feature_2
Self::add_error(
ctx,
SemanticSyntaxErrorKind::FutureFeatureNotDefined(
name.name.to_string(),
),
name.range,
);
}
}
if self.seen_futures_boundary {
Self::add_error(ctx, SemanticSyntaxErrorKind::LateFutureImport, *range);
}
}
for alias in names {
if alias.name.as_str() == "*" && !ctx.in_module_scope() {
// test_err import_from_star
// def f1():
// from module import *
// class C:
// from module import *
// def f2():
// from ..module import *
// def f3():
// from module import *, *
// test_ok import_from_star
// from module import *
Self::add_error(
ctx,
SemanticSyntaxErrorKind::NonModuleImportStar(
helpers::format_import_from(*level, module.as_deref()).to_string(),
),
*range,
);
break;
}
}
}
Stmt::Match(match_stmt) => {
Self::irrefutable_match_case(match_stmt, ctx);
for case in &match_stmt.cases {
let mut visitor = MatchPatternVisitor {
names: FxHashSet::default(),
ctx,
};
visitor.visit_pattern(&case.pattern);
}
}
Stmt::FunctionDef(ast::StmtFunctionDef {
type_params,
parameters,
..
}) => {
if let Some(type_params) = type_params {
Self::duplicate_type_parameter_name(type_params, ctx);
}
Self::duplicate_parameter_name(parameters, ctx);
}
Stmt::Global(ast::StmtGlobal { names, .. }) => {
for name in names {
if ctx.is_bound_parameter(name) {
Self::add_error(
ctx,
SemanticSyntaxErrorKind::GlobalParameter(name.to_string()),
name.range,
);
}
}
}
Stmt::ClassDef(ast::StmtClassDef {
type_params: Some(type_params),
..
})
| Stmt::TypeAlias(ast::StmtTypeAlias {
type_params: Some(type_params),
..
}) => {
Self::duplicate_type_parameter_name(type_params, ctx);
Self::type_parameter_default_order(type_params, ctx);
}
Stmt::Assign(ast::StmtAssign { targets, value, .. }) => {
if let [Expr::Starred(ast::ExprStarred { range, .. })] = targets.as_slice() {
// test_ok single_starred_assignment_target
// (*a,) = (1,)
// *a, = (1,)
// [*a] = (1,)
// test_err single_starred_assignment_target
// *a = (1,)
Self::add_error(
ctx,
SemanticSyntaxErrorKind::SingleStarredAssignment,
*range,
);
}
// test_ok assign_stmt_starred_expr_value
// _ = 4
// _ = [4]
// _ = (*[1],)
// _ = *[1],
// test_err assign_stmt_starred_expr_value
// _ = *[42]
// _ = *{42}
// _ = *list()
// _ = *(p + q)
Self::invalid_star_expression(value, ctx);
}
Stmt::Return(ast::StmtReturn {
value,
range,
node_index: _,
}) => {
if let Some(value) = value {
// test_err single_star_return
// def f(): return *x
Self::invalid_star_expression(value, ctx);
}
if !ctx.in_function_scope() {
Self::add_error(ctx, SemanticSyntaxErrorKind::ReturnOutsideFunction, *range);
}
}
Stmt::For(ast::StmtFor {
target,
iter,
is_async,
..
}) => {
// test_err single_star_for
// for _ in *x: ...
// for *x in xs: ...
Self::invalid_star_expression(target, ctx);
Self::invalid_star_expression(iter, ctx);
if *is_async {
Self::await_outside_async_function(
ctx,
stmt,
AwaitOutsideAsyncFunctionKind::AsyncFor,
);
}
}
Stmt::With(ast::StmtWith { is_async: true, .. }) => {
Self::await_outside_async_function(
ctx,
stmt,
AwaitOutsideAsyncFunctionKind::AsyncWith,
);
}
Stmt::Nonlocal(ast::StmtNonlocal { names, range, .. }) => {
// test_ok nonlocal_declaration_at_module_level
// def _():
// nonlocal x
// test_err nonlocal_declaration_at_module_level
// nonlocal x
// nonlocal x, y
if ctx.in_module_scope() {
Self::add_error(
ctx,
SemanticSyntaxErrorKind::NonlocalDeclarationAtModuleLevel,
*range,
);
}
if !ctx.in_module_scope() {
for name in names {
if !ctx.has_nonlocal_binding(name) {
Self::add_error(
ctx,
SemanticSyntaxErrorKind::NonlocalWithoutBinding(name.to_string()),
name.range,
);
}
}
}
}
Stmt::Break(ast::StmtBreak { range, .. }) => {
if !ctx.in_loop_context() {
Self::add_error(ctx, SemanticSyntaxErrorKind::BreakOutsideLoop, *range);
}
}
Stmt::Continue(ast::StmtContinue { range, .. }) => {
if !ctx.in_loop_context() {
Self::add_error(ctx, SemanticSyntaxErrorKind::ContinueOutsideLoop, *range);
}
}
_ => {}
}
Self::debug_shadowing(stmt, ctx);
Self::check_annotation(stmt, ctx);
}
fn check_annotation<Ctx: SemanticSyntaxContext>(stmt: &ast::Stmt, ctx: &Ctx) {
match stmt {
Stmt::AnnAssign(ast::StmtAnnAssign {
target, annotation, ..
}) => {
if ctx.python_version() > PythonVersion::PY313 {
// test_ok valid_annotation_py313
// # parse_options: {"target-version": "3.13"}
// a: (x := 1)
// def outer():
// b: (yield 1)
// c: (yield from 1)
// async def outer():
// d: (await 1)
// test_err invalid_annotation_py314
// # parse_options: {"target-version": "3.14"}
// a: (x := 1)
// def outer():
// b: (yield 1)
// c: (yield from 1)
// async def outer():
// d: (await 1)
let mut visitor = InvalidExpressionVisitor {
position: InvalidExpressionPosition::TypeAnnotation,
ctx,
};
visitor.visit_expr(annotation);
}
if let Expr::Name(ast::ExprName { id, .. }) = target.as_ref() {
if let Some(global_stmt) = ctx.global(id.as_str()) {
let global_start = global_stmt.start();
if !ctx.in_module_scope() || target.start() < global_start {
Self::add_error(
ctx,
SemanticSyntaxErrorKind::AnnotatedGlobal(id.to_string()),
target.range(),
);
}
}
}
}
Stmt::FunctionDef(ast::StmtFunctionDef {
type_params,
parameters,
returns,
..
}) => {
// test_ok valid_annotation_function_py313
// # parse_options: {"target-version": "3.13"}
// def f() -> (y := 3): ...
// def g(arg: (x := 1)): ...
// def outer():
// def i(x: (yield 1)): ...
// def k() -> (yield 1): ...
// def m(x: (yield from 1)): ...
// def o() -> (yield from 1): ...
// async def outer():
// def f() -> (await 1): ...
// def g(arg: (await 1)): ...
// test_err invalid_annotation_function_py314
// # parse_options: {"target-version": "3.14"}
// def f() -> (y := 3): ...
// def g(arg: (x := 1)): ...
// def outer():
// def i(x: (yield 1)): ...
// def k() -> (yield 1): ...
// def m(x: (yield from 1)): ...
// def o() -> (yield from 1): ...
// async def outer():
// def f() -> (await 1): ...
// def g(arg: (await 1)): ...
// test_err invalid_annotation_function
// def d[T]() -> (await 1): ...
// def e[T](arg: (await 1)): ...
// def f[T]() -> (y := 3): ...
// def g[T](arg: (x := 1)): ...
// def h[T](x: (yield 1)): ...
// def j[T]() -> (yield 1): ...
// def l[T](x: (yield from 1)): ...
// def n[T]() -> (yield from 1): ...
// def p[T: (yield 1)](): ... # yield in TypeVar bound
// def q[T = (yield 1)](): ... # yield in TypeVar default
// def r[*Ts = (yield 1)](): ... # yield in TypeVarTuple default
// def s[**Ts = (yield 1)](): ... # yield in ParamSpec default
// def t[T: (x := 1)](): ... # named expr in TypeVar bound
// def u[T = (x := 1)](): ... # named expr in TypeVar default
// def v[*Ts = (x := 1)](): ... # named expr in TypeVarTuple default
// def w[**Ts = (x := 1)](): ... # named expr in ParamSpec default
// def t[T: (await 1)](): ... # await in TypeVar bound
// def u[T = (await 1)](): ... # await in TypeVar default
// def v[*Ts = (await 1)](): ... # await in TypeVarTuple default
// def w[**Ts = (await 1)](): ... # await in ParamSpec default
let mut visitor = InvalidExpressionVisitor {
position: InvalidExpressionPosition::TypeAnnotation,
ctx,
};
if let Some(type_params) = type_params {
visitor.visit_type_params(type_params);
}
// the __future__ annotation error takes precedence over the generic error
if ctx.future_annotations_or_stub() || ctx.python_version() > PythonVersion::PY313 {
visitor.position = InvalidExpressionPosition::TypeAnnotation;
} else if type_params.is_some() {
visitor.position = InvalidExpressionPosition::GenericDefinition;
} else {
return;
}
for param in parameters
.iter()
.filter_map(ast::AnyParameterRef::annotation)
{
visitor.visit_expr(param);
}
if let Some(returns) = returns {
visitor.visit_expr(returns);
}
}
Stmt::ClassDef(ast::StmtClassDef {
type_params: Some(type_params),
arguments,
..
}) => {
// test_ok valid_annotation_class
// class F(y := list): ...
// def f():
// class G((yield 1)): ...
// class H((yield from 1)): ...
// async def f():
// class G((await 1)): ...
// test_err invalid_annotation_class
// class F[T](y := list): ...
// class I[T]((yield 1)): ...
// class J[T]((yield from 1)): ...
// class K[T: (yield 1)]: ... # yield in TypeVar
// class L[T: (x := 1)]: ... # named expr in TypeVar
// class M[T]((await 1)): ...
// class N[T: (await 1)]: ...
let mut visitor = InvalidExpressionVisitor {
position: InvalidExpressionPosition::TypeAnnotation,
ctx,
};
visitor.visit_type_params(type_params);
if let Some(arguments) = arguments {
visitor.position = InvalidExpressionPosition::GenericDefinition;
visitor.visit_arguments(arguments);
}
}
Stmt::TypeAlias(ast::StmtTypeAlias {
type_params, value, ..
}) => {
// test_err invalid_annotation_type_alias
// type X[T: (yield 1)] = int # TypeVar bound
// type X[T = (yield 1)] = int # TypeVar default
// type X[*Ts = (yield 1)] = int # TypeVarTuple default
// type X[**Ts = (yield 1)] = int # ParamSpec default
// type Y = (yield 1) # yield in value
// type Y = (x := 1) # named expr in value
// type Y[T: (await 1)] = int # await in bound
// type Y = (await 1) # await in value
let mut visitor = InvalidExpressionVisitor {
position: InvalidExpressionPosition::TypeAlias,
ctx,
};
visitor.visit_expr(value);
if let Some(type_params) = type_params {
visitor.visit_type_params(type_params);
}
}
_ => {}
}
}
/// Emit a [`SemanticSyntaxErrorKind::InvalidStarExpression`] if `expr` is starred.
fn invalid_star_expression<Ctx: SemanticSyntaxContext>(expr: &Expr, ctx: &Ctx) {
// test_ok single_star_in_tuple
// def f(): yield (*x,)
// def f(): return (*x,)
// for _ in (*x,): ...
// for (*x,) in xs: ...
if expr.is_starred_expr() {
Self::add_error(
ctx,
SemanticSyntaxErrorKind::InvalidStarExpression,
expr.range(),
);
}
}
fn multiple_star_expression<Ctx: SemanticSyntaxContext>(
ctx: &Ctx,
expr_ctx: ExprContext,
elts: &[Expr],
range: TextRange,
) {
if expr_ctx.is_store() {
let mut has_starred = false;
for elt in elts {
if elt.is_starred_expr() {
if has_starred {
// test_err multiple_starred_assignment_target
// (*a, *b) = (1, 2)
// [*a, *b] = (1, 2)
// (*a, *b, c) = (1, 2, 3)
// [*a, *b, c] = (1, 2, 3)
// (*a, *b, (*c, *d)) = (1, 2)
// test_ok multiple_starred_assignment_target
// (*a, b) = (1, 2)
// (*_, normed), *_ = [(1,), 2]
Self::add_error(
ctx,
SemanticSyntaxErrorKind::MultipleStarredExpressions,
range,
);
return;
}
has_starred = true;
}
}
}
}
/// Check for [`SemanticSyntaxErrorKind::WriteToDebug`] in `stmt`.
fn debug_shadowing<Ctx: SemanticSyntaxContext>(stmt: &ast::Stmt, ctx: &Ctx) {
match stmt {
Stmt::FunctionDef(ast::StmtFunctionDef {
name,
type_params,
parameters,
..
}) => {
// test_err debug_shadow_function
// def __debug__(): ... # function name
// def f[__debug__](): ... # type parameter name
// def f(__debug__): ... # parameter name
Self::check_identifier(name, ctx);
if let Some(type_params) = type_params {
for type_param in type_params.iter() {
Self::check_identifier(type_param.name(), ctx);
}
}
for parameter in parameters {
Self::check_identifier(parameter.name(), ctx);
}
}
Stmt::ClassDef(ast::StmtClassDef {
name, type_params, ..
}) => {
// test_err debug_shadow_class
// class __debug__: ... # class name
// class C[__debug__]: ... # type parameter name
Self::check_identifier(name, ctx);
if let Some(type_params) = type_params {
for type_param in type_params.iter() {
Self::check_identifier(type_param.name(), ctx);
}
}
}
Stmt::TypeAlias(ast::StmtTypeAlias {
type_params: Some(type_params),
..
}) => {
// test_err debug_shadow_type_alias
// type __debug__ = list[int] # visited as an Expr but still flagged
// type Debug[__debug__] = str
for type_param in type_params.iter() {
Self::check_identifier(type_param.name(), ctx);
}
}
Stmt::Import(ast::StmtImport { names, .. })
| Stmt::ImportFrom(ast::StmtImportFrom { names, .. }) => {
// test_err debug_shadow_import
// import __debug__
// import debug as __debug__
// from x import __debug__
// from x import debug as __debug__
// test_ok debug_rename_import
// import __debug__ as debug
// from __debug__ import Some
// from x import __debug__ as debug
for name in names {
match &name.asname {
Some(asname) => Self::check_identifier(asname, ctx),
None => Self::check_identifier(&name.name, ctx),
}
}
}
Stmt::Try(ast::StmtTry { handlers, .. }) => {
// test_err debug_shadow_try
// try: ...
// except Exception as __debug__: ...
for handler in handlers
.iter()
.filter_map(ast::ExceptHandler::as_except_handler)
{
if let Some(name) = &handler.name {
Self::check_identifier(name, ctx);
}
}
}
// test_err debug_shadow_with
// with open("foo.txt") as __debug__: ...
_ => {}
}
}
/// Check if `ident` is equal to `__debug__` and emit a
/// [`SemanticSyntaxErrorKind::WriteToDebug`] if so.
fn check_identifier<Ctx: SemanticSyntaxContext>(ident: &ast::Identifier, ctx: &Ctx) {
if ident.id == "__debug__" {
Self::add_error(
ctx,
SemanticSyntaxErrorKind::WriteToDebug(WriteToDebugKind::Store),
ident.range,
);
}
}
fn duplicate_type_parameter_name<Ctx: SemanticSyntaxContext>(
type_params: &ast::TypeParams,
ctx: &Ctx,
) {
if type_params.len() < 2 {
return;
}
for (i, type_param) in type_params.iter().enumerate() {
if type_params
.iter()
.take(i)
.any(|t| t.name().id == type_param.name().id)
{
// test_ok non_duplicate_type_parameter_names
// type Alias[T] = list[T]
// def f[T](t: T): ...
// class C[T]: ...
// class C[T, U, V]: ...
// type Alias[T, U: str, V: (str, bytes), *Ts, **P, D = default] = ...
// test_err duplicate_type_parameter_names
// type Alias[T, T] = ...
// def f[T, T](t: T): ...
// class C[T, T]: ...
// type Alias[T, U: str, V: (str, bytes), *Ts, **P, T = default] = ...
// def f[T, T, T](): ... # two errors
// def f[T, *T](): ... # star is still duplicate
// def f[T, **T](): ... # as is double star
Self::add_error(
ctx,
SemanticSyntaxErrorKind::DuplicateTypeParameter,
type_param.range(),
);
}
}
}
fn type_parameter_default_order<Ctx: SemanticSyntaxContext>(
type_params: &ast::TypeParams,
ctx: &Ctx,
) {
let mut seen_default = false;
for type_param in type_params.iter() {
let has_default = match type_param {
ast::TypeParam::TypeVar(ast::TypeParamTypeVar { default, .. })
| ast::TypeParam::TypeVarTuple(ast::TypeParamTypeVarTuple { default, .. })
| ast::TypeParam::ParamSpec(ast::TypeParamParamSpec { default, .. }) => {
default.is_some()
}
};
if seen_default && !has_default {
// test_err type_parameter_default_order
// class C[T = int, U]: ...
// class C[T1, T2 = int, T3, T4]: ...
// type Alias[T = int, U] = ...
Self::add_error(
ctx,
SemanticSyntaxErrorKind::TypeParameterDefaultOrder(
type_param.name().id.to_string(),
),
type_param.range(),
);
}
if has_default {
seen_default = true;
}
}
}
fn duplicate_parameter_name<Ctx: SemanticSyntaxContext>(
parameters: &ast::Parameters,
ctx: &Ctx,
) {
if parameters.len() < 2 {
return;
}
let mut all_arg_names =
FxHashSet::with_capacity_and_hasher(parameters.len(), FxBuildHasher);
for parameter in parameters {
let range = parameter.name().range();
let param_name = parameter.name().as_str();
if !all_arg_names.insert(param_name) {
// test_err params_duplicate_names
// def foo(a, a=10, *a, a, a: str, **a): ...
Self::add_error(
ctx,
SemanticSyntaxErrorKind::DuplicateParameter(param_name.to_string()),
range,
);
}
}
}
fn irrefutable_match_case<Ctx: SemanticSyntaxContext>(stmt: &ast::StmtMatch, ctx: &Ctx) {
// test_ok irrefutable_case_pattern_at_end
// match x:
// case 2: ...
// case var: ...
// match x:
// case 2: ...
// case _: ...
// match x:
// case var if True: ... # don't try to refute a guarded pattern
// case 2: ...
// test_err irrefutable_case_pattern
// match x:
// case var: ... # capture pattern
// case 2: ...
// match x:
// case _: ...
// case 2: ... # wildcard pattern
// match x:
// case var1 as var2: ... # as pattern with irrefutable left-hand side
// case 2: ...
// match x:
// case enum.variant | var: ... # or pattern with irrefutable part
// case 2: ...
for case in stmt
.cases
.iter()
.rev()
.skip(1)
.filter_map(|case| match case.guard {
Some(_) => None,
None => case.pattern.irrefutable_pattern(),
})
{
Self::add_error(
ctx,
SemanticSyntaxErrorKind::IrrefutableCasePattern(case.kind),
case.range,
);
}
}
/// Check `stmt` for semantic syntax errors and update the checker's internal state.
///
/// Note that this method should only be called when traversing `stmt` *and* its children. For
/// example, if traversal of function bodies needs to be deferred, avoid calling `visit_stmt` on
/// the function itself until the deferred body is visited too. Failing to defer `visit_stmt` in
/// this case will break any internal state that depends on function scopes, such as `async`
/// context detection.
pub fn visit_stmt<Ctx: SemanticSyntaxContext>(&mut self, stmt: &ast::Stmt, ctx: &Ctx) {
// check for errors
self.check_stmt(stmt, ctx);
// update internal state
match stmt {
Stmt::Expr(StmtExpr { value, .. })
if !self.seen_module_docstring_boundary && value.is_string_literal_expr() => {}
Stmt::ImportFrom(StmtImportFrom { module, .. }) => {
// Allow __future__ imports until we see a non-__future__ import.
if !matches!(module.as_deref(), Some("__future__")) {
self.seen_futures_boundary = true;
}
}
Stmt::FunctionDef(StmtFunctionDef { is_async, body, .. }) => {
if *is_async {
let mut visitor = ReturnVisitor::default();
visitor.visit_body(body);
if visitor.has_yield {
if let Some(return_range) = visitor.return_range {
Self::add_error(
ctx,
SemanticSyntaxErrorKind::ReturnInGenerator,
return_range,
);
}
}
}
self.seen_futures_boundary = true;
}
_ => {
self.seen_futures_boundary = true;
}
}
self.seen_module_docstring_boundary = true;
}
/// Check `expr` for semantic syntax errors and update the checker's internal state.
pub fn visit_expr<Ctx: SemanticSyntaxContext>(&mut self, expr: &Expr, ctx: &Ctx) {
match expr {
Expr::ListComp(ast::ExprListComp {
elt, generators, ..
})
| Expr::SetComp(ast::ExprSetComp {
elt, generators, ..
}) => {
Self::check_generator_expr(elt, generators, ctx);
Self::async_comprehension_in_sync_comprehension(ctx, generators);
for generator in generators.iter().filter(|g| g.is_async) {
Self::await_outside_async_function(
ctx,
generator,
AwaitOutsideAsyncFunctionKind::AsyncComprehension,
);
}
}
Expr::DictComp(ast::ExprDictComp {
key,
value,
generators,
..
}) => {
Self::check_generator_expr(key, generators, ctx);
Self::check_generator_expr(value, generators, ctx);
Self::async_comprehension_in_sync_comprehension(ctx, generators);
for generator in generators.iter().filter(|g| g.is_async) {
Self::await_outside_async_function(
ctx,
generator,
AwaitOutsideAsyncFunctionKind::AsyncComprehension,
);
}
}
Expr::Generator(ast::ExprGenerator {
elt, generators, ..
}) => {
Self::check_generator_expr(elt, generators, ctx);
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/typing.rs | crates/ruff_python_parser/src/typing.rs | //! This module takes care of parsing a type annotation.
use ruff_python_ast::relocate::relocate_expr;
use ruff_python_ast::{Expr, ExprStringLiteral, ModExpression, StringLiteral};
use ruff_text_size::Ranged;
use crate::{ParseError, Parsed, parse_expression, parse_string_annotation};
type AnnotationParseResult = Result<ParsedAnnotation, ParseError>;
#[derive(Debug)]
pub struct ParsedAnnotation {
parsed: Parsed<ModExpression>,
kind: AnnotationKind,
}
impl ParsedAnnotation {
pub fn parsed(&self) -> &Parsed<ModExpression> {
&self.parsed
}
pub fn expression(&self) -> &Expr {
self.parsed.expr()
}
pub fn kind(&self) -> AnnotationKind {
self.kind
}
}
#[derive(Copy, Clone, Debug)]
pub enum AnnotationKind {
/// The annotation is defined as part a simple string literal,
/// e.g. `x: "List[int]" = []`. Annotations within simple literals
/// can be accurately located. For example, we can underline specific
/// expressions within the annotation and apply automatic fixes, which is
/// not possible for complex string literals.
Simple,
/// The annotation is defined as part of a complex string literal, such as
/// a literal containing an implicit concatenation or escaped characters,
/// e.g. `x: "List" "[int]" = []`. These are comparatively rare, but valid.
Complex,
}
impl AnnotationKind {
/// Returns `true` if the annotation kind is simple.
pub const fn is_simple(self) -> bool {
matches!(self, AnnotationKind::Simple)
}
}
/// Parses the given string expression node as a type annotation. The given `source` is the entire
/// source code.
pub fn parse_type_annotation(
string_expr: &ExprStringLiteral,
source: &str,
) -> AnnotationParseResult {
if let Some(string_literal) = string_expr.as_single_part_string() {
// Compare the raw contents (without quotes) of the expression with the parsed contents
// contained in the string literal.
if &source[string_literal.content_range()] == string_literal.as_str() {
parse_simple_type_annotation(string_literal, source)
} else {
// The raw contents of the string doesn't match the parsed content. This could be the
// case for annotations that contain escaped quotes.
parse_complex_type_annotation(string_expr)
}
} else {
// String is implicitly concatenated.
parse_complex_type_annotation(string_expr)
}
}
fn parse_simple_type_annotation(
string_literal: &StringLiteral,
source: &str,
) -> AnnotationParseResult {
Ok(ParsedAnnotation {
parsed: parse_string_annotation(source, string_literal)?,
kind: AnnotationKind::Simple,
})
}
fn parse_complex_type_annotation(string_expr: &ExprStringLiteral) -> AnnotationParseResult {
let mut parsed = parse_expression(string_expr.value.to_str())?;
relocate_expr(parsed.expr_mut(), string_expr.range());
Ok(ParsedAnnotation {
parsed,
kind: AnnotationKind::Complex,
})
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/token.rs | crates/ruff_python_parser/src/token.rs | use ruff_python_ast::{Int, IpyEscapeKind, name::Name};
#[derive(Clone, Debug, Default)]
pub(crate) enum TokenValue {
#[default]
None,
/// Token value for a name, commonly known as an identifier.
///
/// Unicode names are NFKC-normalized by the lexer,
/// matching [the behaviour of Python's lexer](https://docs.python.org/3/reference/lexical_analysis.html#identifiers)
Name(Name),
/// Token value for an integer.
Int(Int),
/// Token value for a floating point number.
Float(f64),
/// Token value for a complex number.
Complex {
/// The real part of the complex number.
real: f64,
/// The imaginary part of the complex number.
imag: f64,
},
/// Token value for a string.
String(Box<str>),
/// Token value that includes the portion of text inside the f-string that's not
/// part of the expression part and isn't an opening or closing brace.
InterpolatedStringMiddle(Box<str>),
/// Token value for IPython escape commands. These are recognized by the lexer
/// only when the mode is [`Mode::Ipython`].
IpyEscapeCommand {
/// The magic command value.
value: Box<str>,
/// The kind of magic command.
kind: IpyEscapeKind,
},
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/parser/tests.rs | crates/ruff_python_parser/src/parser/tests.rs | use crate::{Mode, ParseOptions, parse, parse_expression, parse_module};
#[test]
fn test_modes() {
let source = "a[0][1][2][3][4]";
assert!(parse(source, ParseOptions::from(Mode::Expression)).is_ok());
assert!(parse(source, ParseOptions::from(Mode::Module)).is_ok());
}
#[test]
fn test_expr_mode_invalid_syntax1() {
let source = "first second";
let error = parse_expression(source).unwrap_err();
insta::assert_debug_snapshot!(error);
}
#[test]
fn test_expr_mode_invalid_syntax2() {
let source = r"first
second
";
let error = parse_expression(source).unwrap_err();
insta::assert_debug_snapshot!(error);
}
#[test]
fn test_expr_mode_invalid_syntax3() {
let source = r"first
second
third
";
let error = parse_expression(source).unwrap_err();
insta::assert_debug_snapshot!(error);
}
#[test]
fn test_expr_mode_valid_syntax() {
let source = "first
";
let parsed = parse_expression(source).unwrap();
insta::assert_debug_snapshot!(parsed.expr());
}
#[test]
fn test_unicode_aliases() {
// https://github.com/RustPython/RustPython/issues/4566
let source = r#"x = "\N{BACKSPACE}another cool trick""#;
let suite = parse_module(source).unwrap().into_suite();
insta::assert_debug_snapshot!(suite);
}
#[test]
fn test_ipython_escape_commands() {
let parsed = parse(
r"
# Normal Python code
(
a
%
b
)
# Dynamic object info
??a.foo
?a.foo
?a.foo?
??a.foo()??
# Line magic
%timeit a = b
%timeit foo(b) % 3
%alias showPath pwd && ls -a
%timeit a =\
foo(b); b = 2
%matplotlib --inline
%matplotlib \
--inline
# System shell access
!pwd && ls -a | sed 's/^/\ /'
!pwd \
&& ls -a | sed 's/^/\\ /'
!!cd /Users/foo/Library/Application\ Support/
# Let's add some Python code to make sure that earlier escapes were handled
# correctly and that we didn't consume any of the following code as a result
# of the escapes.
def foo():
return (
a
!=
b
)
# Transforms into `foo(..)`
/foo 1 2
;foo 1 2
,foo 1 2
# Indented escape commands
for a in range(5):
!ls
p1 = !pwd
p2: str = !pwd
foo = %foo \
bar
% foo
foo = %foo # comment
# Help end line magics
foo?
foo.bar??
foo.bar.baz?
foo[0]??
foo[0][1]?
foo.bar[0].baz[1]??
foo.bar[0].baz[2].egg??
"
.trim(),
ParseOptions::from(Mode::Ipython),
)
.unwrap();
insta::assert_debug_snapshot!(parsed.syntax());
}
#[test]
fn test_fstring_expr_inner_line_continuation_and_t_string() {
let source = r#"f'{\t"i}'"#;
let parsed = parse_expression(source);
let error = parsed.unwrap_err();
insta::assert_debug_snapshot!(error);
}
#[test]
fn test_fstring_expr_inner_line_continuation_newline_t_string() {
let source = r#"f'{\
t"i}'"#;
let parsed = parse_expression(source);
let error = parsed.unwrap_err();
insta::assert_debug_snapshot!(error);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/parser/pattern.rs | crates/ruff_python_parser/src/parser/pattern.rs | use ruff_python_ast::name::Name;
use ruff_python_ast::token::TokenKind;
use ruff_python_ast::{
self as ast, AtomicNodeIndex, Expr, ExprContext, Number, Operator, Pattern, Singleton,
};
use ruff_text_size::{Ranged, TextSize};
use crate::ParseErrorType;
use crate::parser::progress::ParserProgress;
use crate::parser::{Parser, RecoveryContextKind, SequenceMatchPatternParentheses, recovery};
use crate::token::TokenValue;
use crate::token_set::TokenSet;
use super::expression::ExpressionContext;
/// The set of tokens that can start a literal pattern.
const LITERAL_PATTERN_START_SET: TokenSet = TokenSet::new([
TokenKind::None,
TokenKind::True,
TokenKind::False,
TokenKind::String,
TokenKind::Int,
TokenKind::Float,
TokenKind::Complex,
TokenKind::Minus, // Unary minus
]);
/// The set of tokens that can start a pattern.
const PATTERN_START_SET: TokenSet = TokenSet::new([
// Star pattern
TokenKind::Star,
// Capture pattern
// Wildcard pattern ('_' is a name token)
// Value pattern (name or attribute)
// Class pattern
TokenKind::Name,
// Group pattern
TokenKind::Lpar,
// Sequence pattern
TokenKind::Lsqb,
// Mapping pattern
TokenKind::Lbrace,
])
.union(LITERAL_PATTERN_START_SET);
/// The set of tokens that can start a mapping pattern.
const MAPPING_PATTERN_START_SET: TokenSet = TokenSet::new([
// Double star pattern
TokenKind::DoubleStar,
// Value pattern
TokenKind::Name,
])
.union(LITERAL_PATTERN_START_SET);
impl Parser<'_> {
/// Returns `true` if the current token is a valid start of a pattern.
pub(super) fn at_pattern_start(&self) -> bool {
self.at_ts(PATTERN_START_SET) || self.at_soft_keyword()
}
/// Returns `true` if the current token is a valid start of a mapping pattern.
pub(super) fn at_mapping_pattern_start(&self) -> bool {
self.at_ts(MAPPING_PATTERN_START_SET) || self.at_soft_keyword()
}
/// Entry point to start parsing a pattern.
///
/// See: <https://docs.python.org/3/reference/compound_stmts.html#grammar-token-python-grammar-patterns>
pub(super) fn parse_match_patterns(&mut self) -> Pattern {
let start = self.node_start();
// We don't yet know if it's a sequence pattern or a single pattern, so
// we need to allow star pattern here.
let pattern = self.parse_match_pattern(AllowStarPattern::Yes);
if self.at(TokenKind::Comma) {
Pattern::MatchSequence(self.parse_sequence_match_pattern(pattern, start, None))
} else {
// We know it's not a sequence pattern now, so check for star pattern usage.
if pattern.is_match_star() {
self.add_error(ParseErrorType::InvalidStarPatternUsage, &pattern);
}
pattern
}
}
/// Parses an `or_pattern` or an `as_pattern`.
///
/// See: <https://docs.python.org/3/reference/compound_stmts.html#grammar-token-python-grammar-pattern>
fn parse_match_pattern(&mut self, allow_star_pattern: AllowStarPattern) -> Pattern {
let start = self.node_start();
// We don't yet know if it's an or pattern or an as pattern, so use whatever
// was passed in.
let mut lhs = self.parse_match_pattern_lhs(allow_star_pattern);
// Or pattern
if self.at(TokenKind::Vbar) {
// We know it's an `or` pattern now, so check for star pattern usage.
if lhs.is_match_star() {
self.add_error(ParseErrorType::InvalidStarPatternUsage, &lhs);
}
let mut patterns = vec![lhs];
let mut progress = ParserProgress::default();
while self.eat(TokenKind::Vbar) {
progress.assert_progressing(self);
let pattern = self.parse_match_pattern_lhs(AllowStarPattern::No);
patterns.push(pattern);
}
lhs = Pattern::MatchOr(ast::PatternMatchOr {
range: self.node_range(start),
patterns,
node_index: AtomicNodeIndex::NONE,
});
}
// As pattern
if self.eat(TokenKind::As) {
// We know it's an `as` pattern now, so check for star pattern usage.
if lhs.is_match_star() {
self.add_error(ParseErrorType::InvalidStarPatternUsage, &lhs);
}
let ident = self.parse_identifier();
lhs = Pattern::MatchAs(ast::PatternMatchAs {
range: self.node_range(start),
name: Some(ident),
pattern: Some(Box::new(lhs)),
node_index: AtomicNodeIndex::NONE,
});
}
lhs
}
/// Parses a pattern.
///
/// See: <https://docs.python.org/3/reference/compound_stmts.html#grammar-token-python-grammar-closed_pattern>
fn parse_match_pattern_lhs(&mut self, allow_star_pattern: AllowStarPattern) -> Pattern {
let start = self.node_start();
let mut lhs = match self.current_token_kind() {
TokenKind::Lbrace => Pattern::MatchMapping(self.parse_match_pattern_mapping()),
TokenKind::Star => {
let star_pattern = self.parse_match_pattern_star();
if allow_star_pattern.is_no() {
self.add_error(ParseErrorType::InvalidStarPatternUsage, &star_pattern);
}
Pattern::MatchStar(star_pattern)
}
TokenKind::Lpar | TokenKind::Lsqb => self.parse_parenthesized_or_sequence_pattern(),
_ => self.parse_match_pattern_literal(),
};
if self.at(TokenKind::Lpar) {
lhs = Pattern::MatchClass(self.parse_match_pattern_class(lhs, start));
}
if matches!(
self.current_token_kind(),
TokenKind::Plus | TokenKind::Minus
) {
lhs = Pattern::MatchValue(self.parse_complex_literal_pattern(lhs, start));
}
lhs
}
/// Parses a mapping pattern.
///
/// # Panics
///
/// If the parser isn't positioned at a `{` token.
///
/// See: <https://docs.python.org/3/reference/compound_stmts.html#mapping-patterns>
fn parse_match_pattern_mapping(&mut self) -> ast::PatternMatchMapping {
let start = self.node_start();
self.bump(TokenKind::Lbrace);
let mut keys = vec![];
let mut patterns = vec![];
let mut rest = None;
self.parse_comma_separated_list(RecoveryContextKind::MatchPatternMapping, |parser| {
let mapping_item_start = parser.node_start();
if parser.eat(TokenKind::DoubleStar) {
let identifier = parser.parse_identifier();
if rest.is_some() {
parser.add_error(
ParseErrorType::OtherError(
"Only one double star pattern is allowed".to_string(),
),
parser.node_range(mapping_item_start),
);
}
// TODO(dhruvmanila): It's not possible to retain multiple double starred
// patterns because of the way the mapping node is represented in the grammar.
// The last value will always win. Update the AST representation.
// See: https://github.com/astral-sh/ruff/pull/10477#discussion_r1535143536
rest = Some(identifier);
} else {
let key = match parser.parse_match_pattern_lhs(AllowStarPattern::No) {
Pattern::MatchValue(ast::PatternMatchValue { value, .. }) => *value,
Pattern::MatchSingleton(ast::PatternMatchSingleton {
value,
range,
node_index,
}) => match value {
Singleton::None => {
Expr::NoneLiteral(ast::ExprNoneLiteral { range, node_index })
}
Singleton::True => Expr::BooleanLiteral(ast::ExprBooleanLiteral {
value: true,
range,
node_index,
}),
Singleton::False => Expr::BooleanLiteral(ast::ExprBooleanLiteral {
value: false,
range,
node_index,
}),
},
pattern => {
parser.add_error(
ParseErrorType::OtherError("Invalid mapping pattern key".to_string()),
&pattern,
);
recovery::pattern_to_expr(pattern)
}
};
keys.push(key);
parser.expect(TokenKind::Colon);
patterns.push(parser.parse_match_pattern(AllowStarPattern::No));
if rest.is_some() {
parser.add_error(
ParseErrorType::OtherError(
"Pattern cannot follow a double star pattern".to_string(),
),
parser.node_range(mapping_item_start),
);
}
}
});
self.expect(TokenKind::Rbrace);
ast::PatternMatchMapping {
range: self.node_range(start),
keys,
patterns,
rest,
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses a star pattern.
///
/// # Panics
///
/// If the parser isn't positioned at a `*` token.
///
/// See: <https://docs.python.org/3/reference/compound_stmts.html#grammar-token-python-grammar-star_pattern>
fn parse_match_pattern_star(&mut self) -> ast::PatternMatchStar {
let start = self.node_start();
self.bump(TokenKind::Star);
let ident = self.parse_identifier();
ast::PatternMatchStar {
range: self.node_range(start),
name: if ident.is_valid() && ident.id == "_" {
None
} else {
Some(ident)
},
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses a parenthesized pattern or a sequence pattern.
///
/// # Panics
///
/// If the parser isn't positioned at a `(` or `[` token.
///
/// See: <https://docs.python.org/3/reference/compound_stmts.html#sequence-patterns>
fn parse_parenthesized_or_sequence_pattern(&mut self) -> Pattern {
let start = self.node_start();
let parentheses = if self.eat(TokenKind::Lpar) {
SequenceMatchPatternParentheses::Tuple
} else {
self.bump(TokenKind::Lsqb);
SequenceMatchPatternParentheses::List
};
if matches!(
self.current_token_kind(),
TokenKind::Newline | TokenKind::Colon
) {
// TODO(dhruvmanila): This recovery isn't possible currently because
// of the soft keyword transformer. If there's a missing closing
// parenthesis, it'll consider `case` a name token instead.
self.add_error(
ParseErrorType::OtherError(format!(
"Missing '{closing}'",
closing = if parentheses.is_list() { "]" } else { ")" }
)),
self.current_token_range(),
);
}
if self.eat(parentheses.closing_kind()) {
return Pattern::MatchSequence(ast::PatternMatchSequence {
patterns: vec![],
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
});
}
let mut pattern = self.parse_match_pattern(AllowStarPattern::Yes);
if parentheses.is_list() || self.at(TokenKind::Comma) {
pattern = Pattern::MatchSequence(self.parse_sequence_match_pattern(
pattern,
start,
Some(parentheses),
));
} else {
self.expect(parentheses.closing_kind());
}
pattern
}
/// Parses the rest of a sequence pattern, given the first element.
///
/// If the `parentheses` is `None`, it is an [open sequence pattern].
///
/// See: <https://docs.python.org/3/reference/compound_stmts.html#sequence-patterns>
///
/// [open sequence pattern]: https://docs.python.org/3/reference/compound_stmts.html#grammar-token-python-grammar-open_sequence_pattern
fn parse_sequence_match_pattern(
&mut self,
first_element: Pattern,
start: TextSize,
parentheses: Option<SequenceMatchPatternParentheses>,
) -> ast::PatternMatchSequence {
if parentheses.is_some_and(|parentheses| {
self.at(parentheses.closing_kind()) || self.peek() == parentheses.closing_kind()
}) {
// The comma is optional if it is a single-element sequence
self.eat(TokenKind::Comma);
} else {
self.expect(TokenKind::Comma);
}
let mut patterns = vec![first_element];
self.parse_comma_separated_list(
RecoveryContextKind::SequenceMatchPattern(parentheses),
|parser| patterns.push(parser.parse_match_pattern(AllowStarPattern::Yes)),
);
if let Some(parentheses) = parentheses {
self.expect(parentheses.closing_kind());
}
ast::PatternMatchSequence {
range: self.node_range(start),
patterns,
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses a literal pattern.
///
/// See: <https://docs.python.org/3/reference/compound_stmts.html#grammar-token-python-grammar-literal_pattern>
fn parse_match_pattern_literal(&mut self) -> Pattern {
let start = self.node_start();
match self.current_token_kind() {
TokenKind::None => {
self.bump(TokenKind::None);
Pattern::MatchSingleton(ast::PatternMatchSingleton {
value: Singleton::None,
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
})
}
TokenKind::True => {
self.bump(TokenKind::True);
Pattern::MatchSingleton(ast::PatternMatchSingleton {
value: Singleton::True,
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
})
}
TokenKind::False => {
self.bump(TokenKind::False);
Pattern::MatchSingleton(ast::PatternMatchSingleton {
value: Singleton::False,
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
})
}
TokenKind::String | TokenKind::FStringStart | TokenKind::TStringStart => {
let str = self.parse_strings();
Pattern::MatchValue(ast::PatternMatchValue {
value: Box::new(str),
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
})
}
TokenKind::Complex => {
let TokenValue::Complex { real, imag } = self.bump_value(TokenKind::Complex) else {
unreachable!()
};
let range = self.node_range(start);
Pattern::MatchValue(ast::PatternMatchValue {
value: Box::new(Expr::NumberLiteral(ast::ExprNumberLiteral {
value: Number::Complex { real, imag },
range,
node_index: AtomicNodeIndex::NONE,
})),
range,
node_index: AtomicNodeIndex::NONE,
})
}
TokenKind::Int => {
let TokenValue::Int(value) = self.bump_value(TokenKind::Int) else {
unreachable!()
};
let range = self.node_range(start);
Pattern::MatchValue(ast::PatternMatchValue {
value: Box::new(Expr::NumberLiteral(ast::ExprNumberLiteral {
value: Number::Int(value),
range,
node_index: AtomicNodeIndex::NONE,
})),
range,
node_index: AtomicNodeIndex::NONE,
})
}
TokenKind::Float => {
let TokenValue::Float(value) = self.bump_value(TokenKind::Float) else {
unreachable!()
};
let range = self.node_range(start);
Pattern::MatchValue(ast::PatternMatchValue {
value: Box::new(Expr::NumberLiteral(ast::ExprNumberLiteral {
value: Number::Float(value),
range,
node_index: AtomicNodeIndex::NONE,
})),
range,
node_index: AtomicNodeIndex::NONE,
})
}
kind => {
// The `+` is only for better error recovery.
if let Some(unary_arithmetic_op) = kind.as_unary_arithmetic_operator() {
if matches!(
self.peek(),
TokenKind::Int | TokenKind::Float | TokenKind::Complex
) {
let unary_expr = self.parse_unary_expression(
unary_arithmetic_op,
ExpressionContext::default(),
);
if unary_expr.op.is_u_add() {
self.add_error(
ParseErrorType::OtherError(
"Unary '+' is not allowed as a literal pattern".to_string(),
),
&unary_expr,
);
}
return Pattern::MatchValue(ast::PatternMatchValue {
value: Box::new(Expr::UnaryOp(unary_expr)),
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
});
}
}
if self.at_name_or_keyword() {
if self.peek() == TokenKind::Dot {
// test_ok match_attr_pattern_soft_keyword
// match foo:
// case match.bar: ...
// case case.bar: ...
// case type.bar: ...
// case match.case.type.bar.type.case.match: ...
let id = Expr::Name(self.parse_name());
let attribute = self.parse_attr_expr_for_match_pattern(id, start);
Pattern::MatchValue(ast::PatternMatchValue {
value: Box::new(attribute),
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
})
} else {
// test_ok match_as_pattern_soft_keyword
// match foo:
// case case: ...
// match foo:
// case match: ...
// match foo:
// case type: ...
let ident = self.parse_identifier();
// test_ok match_as_pattern
// match foo:
// case foo_bar: ...
// match foo:
// case _: ...
Pattern::MatchAs(ast::PatternMatchAs {
range: ident.range,
pattern: None,
name: if &ident == "_" { None } else { Some(ident) },
node_index: AtomicNodeIndex::NONE,
})
}
} else {
// Upon encountering an unexpected token, return a `Pattern::MatchValue` containing
// an empty `Expr::Name`.
self.add_error(
ParseErrorType::OtherError("Expected a pattern".to_string()),
self.current_token_range(),
);
let invalid_node = Expr::Name(ast::ExprName {
range: self.missing_node_range(),
id: Name::empty(),
ctx: ExprContext::Invalid,
node_index: AtomicNodeIndex::NONE,
});
Pattern::MatchValue(ast::PatternMatchValue {
range: invalid_node.range(),
value: Box::new(invalid_node),
node_index: AtomicNodeIndex::NONE,
})
}
}
}
}
/// Parses a complex literal pattern, given the `lhs` pattern and the `start`
/// position of the pattern.
///
/// # Panics
///
/// If the parser isn't positioned at a `+` or `-` token.
///
/// See: <https://docs.python.org/3/reference/compound_stmts.html#literal-patterns>
fn parse_complex_literal_pattern(
&mut self,
lhs: Pattern,
start: TextSize,
) -> ast::PatternMatchValue {
let operator = if self.eat(TokenKind::Plus) {
Operator::Add
} else {
self.bump(TokenKind::Minus);
Operator::Sub
};
let lhs_value = if let Pattern::MatchValue(lhs) = lhs {
if !is_real_number(&lhs.value) {
self.add_error(ParseErrorType::ExpectedRealNumber, &lhs);
}
lhs.value
} else {
self.add_error(ParseErrorType::ExpectedRealNumber, &lhs);
Box::new(recovery::pattern_to_expr(lhs))
};
let rhs_pattern = self.parse_match_pattern_lhs(AllowStarPattern::No);
let rhs_value = if let Pattern::MatchValue(rhs) = rhs_pattern {
if !is_complex_number(&rhs.value) {
self.add_error(ParseErrorType::ExpectedImaginaryNumber, &rhs);
}
rhs.value
} else {
self.add_error(ParseErrorType::ExpectedImaginaryNumber, &rhs_pattern);
Box::new(recovery::pattern_to_expr(rhs_pattern))
};
let range = self.node_range(start);
ast::PatternMatchValue {
value: Box::new(Expr::BinOp(ast::ExprBinOp {
left: lhs_value,
op: operator,
right: rhs_value,
range,
node_index: AtomicNodeIndex::NONE,
})),
range,
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses an attribute expression until the current token is not a `.`.
fn parse_attr_expr_for_match_pattern(&mut self, mut lhs: Expr, start: TextSize) -> Expr {
while self.current_token_kind() == TokenKind::Dot {
lhs = Expr::Attribute(self.parse_attribute_expression(lhs, start));
}
lhs
}
/// Parses the [pattern arguments] in a class pattern.
///
/// # Panics
///
/// If the parser isn't positioned at a `(` token.
///
/// See: <https://docs.python.org/3/reference/compound_stmts.html#class-patterns>
///
/// [pattern arguments]: https://docs.python.org/3/reference/compound_stmts.html#grammar-token-python-grammar-pattern_arguments
fn parse_match_pattern_class(
&mut self,
cls: Pattern,
start: TextSize,
) -> ast::PatternMatchClass {
let arguments_start = self.node_start();
let cls = match cls {
Pattern::MatchAs(ast::PatternMatchAs {
pattern: None,
name: Some(ident),
..
}) => {
if ident.is_valid() {
Box::new(Expr::Name(ast::ExprName {
range: ident.range(),
id: ident.id,
ctx: ExprContext::Load,
node_index: AtomicNodeIndex::NONE,
}))
} else {
Box::new(Expr::Name(ast::ExprName {
range: ident.range(),
id: Name::empty(),
ctx: ExprContext::Invalid,
node_index: AtomicNodeIndex::NONE,
}))
}
}
Pattern::MatchValue(ast::PatternMatchValue { value, .. })
if matches!(&*value, Expr::Attribute(_)) =>
{
value
}
pattern => {
self.add_error(
ParseErrorType::OtherError("Invalid value for a class pattern".to_string()),
&pattern,
);
Box::new(recovery::pattern_to_expr(pattern))
}
};
self.bump(TokenKind::Lpar);
let mut patterns = vec![];
let mut keywords = vec![];
let mut has_seen_pattern = false;
let mut has_seen_keyword_pattern = false;
self.parse_comma_separated_list(
RecoveryContextKind::MatchPatternClassArguments,
|parser| {
let pattern_start = parser.node_start();
let pattern = parser.parse_match_pattern(AllowStarPattern::No);
if parser.eat(TokenKind::Equal) {
has_seen_pattern = false;
has_seen_keyword_pattern = true;
let key = if let Pattern::MatchAs(ast::PatternMatchAs {
pattern: None,
name: Some(name),
..
}) = pattern
{
name
} else {
parser.add_error(
ParseErrorType::OtherError(
"Expected an identifier for the keyword pattern".to_string(),
),
&pattern,
);
ast::Identifier {
id: Name::empty(),
range: parser.missing_node_range(),
node_index: AtomicNodeIndex::NONE,
}
};
let value_pattern = parser.parse_match_pattern(AllowStarPattern::No);
keywords.push(ast::PatternKeyword {
attr: key,
pattern: value_pattern,
range: parser.node_range(pattern_start),
node_index: AtomicNodeIndex::NONE,
});
} else {
has_seen_pattern = true;
patterns.push(pattern);
}
if has_seen_keyword_pattern && has_seen_pattern {
parser.add_error(
ParseErrorType::OtherError(
"Positional patterns cannot follow keyword patterns".to_string(),
),
parser.node_range(pattern_start),
);
}
},
);
self.expect(TokenKind::Rpar);
ast::PatternMatchClass {
cls,
arguments: ast::PatternArguments {
patterns,
keywords,
range: self.node_range(arguments_start),
node_index: AtomicNodeIndex::NONE,
},
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
}
}
}
#[derive(Debug, Clone, Copy)]
enum AllowStarPattern {
Yes,
No,
}
impl AllowStarPattern {
const fn is_no(self) -> bool {
matches!(self, AllowStarPattern::No)
}
}
/// Returns `true` if the given expression is a real number literal or a unary
/// addition or subtraction of a real number literal.
const fn is_real_number(expr: &Expr) -> bool {
match expr {
Expr::NumberLiteral(ast::ExprNumberLiteral {
value: ast::Number::Int(_) | ast::Number::Float(_),
..
}) => true,
Expr::UnaryOp(ast::ExprUnaryOp {
op: ast::UnaryOp::UAdd | ast::UnaryOp::USub,
operand,
..
}) => is_real_number(operand),
_ => false,
}
}
/// Returns `true` if the given expression is a complex number literal.
const fn is_complex_number(expr: &Expr) -> bool {
matches!(
expr,
Expr::NumberLiteral(ast::ExprNumberLiteral {
value: ast::Number::Complex { .. },
..
})
)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/parser/helpers.rs | crates/ruff_python_parser/src/parser/helpers.rs | use ruff_python_ast::token::TokenKind;
use ruff_python_ast::{self as ast, CmpOp, Expr, ExprContext, Number};
use ruff_text_size::{Ranged, TextRange};
use crate::error::RelaxedDecoratorError;
/// Set the `ctx` for `Expr::Id`, `Expr::Attribute`, `Expr::Subscript`, `Expr::Starred`,
/// `Expr::Tuple` and `Expr::List`. If `expr` is either `Expr::Tuple` or `Expr::List`,
/// recursively sets the `ctx` for their elements.
pub(super) fn set_expr_ctx(expr: &mut Expr, new_ctx: ExprContext) {
match expr {
Expr::Name(ast::ExprName { ctx, .. })
| Expr::Attribute(ast::ExprAttribute { ctx, .. })
| Expr::Subscript(ast::ExprSubscript { ctx, .. }) => *ctx = new_ctx,
Expr::Starred(ast::ExprStarred { value, ctx, .. }) => {
*ctx = new_ctx;
set_expr_ctx(value, new_ctx);
}
Expr::UnaryOp(ast::ExprUnaryOp { operand, .. }) => {
set_expr_ctx(operand, new_ctx);
}
Expr::List(ast::ExprList { elts, ctx, .. })
| Expr::Tuple(ast::ExprTuple { elts, ctx, .. }) => {
*ctx = new_ctx;
for element in elts.iter_mut() {
set_expr_ctx(element, new_ctx);
}
}
_ => {}
}
}
/// Converts a [`TokenKind`] array of size 2 to its correspondent [`CmpOp`].
pub(super) const fn token_kind_to_cmp_op(tokens: [TokenKind; 2]) -> Option<CmpOp> {
Some(match tokens {
[TokenKind::Is, TokenKind::Not] => CmpOp::IsNot,
[TokenKind::Is, _] => CmpOp::Is,
[TokenKind::Not, TokenKind::In] => CmpOp::NotIn,
[TokenKind::In, _] => CmpOp::In,
[TokenKind::EqEqual, _] => CmpOp::Eq,
[TokenKind::NotEqual, _] => CmpOp::NotEq,
[TokenKind::Less, _] => CmpOp::Lt,
[TokenKind::LessEqual, _] => CmpOp::LtE,
[TokenKind::Greater, _] => CmpOp::Gt,
[TokenKind::GreaterEqual, _] => CmpOp::GtE,
_ => return None,
})
}
/// Helper for `parse_decorators` to determine if `expr` is a [`dotted_name`] from the decorator
/// grammar before Python 3.9.
///
/// Returns `Some((error, range))` if `expr` is not a `dotted_name`, or `None` if it is a `dotted_name`.
///
/// [`dotted_name`]: https://docs.python.org/3.8/reference/compound_stmts.html#grammar-token-dotted-name
pub(super) fn detect_invalid_pre_py39_decorator_node(
expr: &Expr,
) -> Option<(RelaxedDecoratorError, TextRange)> {
let description = match expr {
Expr::Name(_) => return None,
Expr::Attribute(attribute) => {
return detect_invalid_pre_py39_decorator_node(&attribute.value);
}
Expr::Call(_) => return Some((RelaxedDecoratorError::CallExpression, expr.range())),
Expr::NumberLiteral(number) => match &number.value {
Number::Int(_) => "an int literal",
Number::Float(_) => "a float literal",
Number::Complex { .. } => "a complex literal",
},
Expr::BoolOp(_) => "boolean expression",
Expr::BinOp(_) => "binary-operation expression",
Expr::UnaryOp(_) => "unary-operation expression",
Expr::Await(_) => "`await` expression",
Expr::Lambda(_) => "lambda expression",
Expr::If(_) => "conditional expression",
Expr::Dict(_) => "a dict literal",
Expr::Set(_) => "a set literal",
Expr::List(_) => "a list literal",
Expr::Tuple(_) => "a tuple literal",
Expr::Starred(_) => "starred expression",
Expr::Slice(_) => "slice expression",
Expr::BytesLiteral(_) => "a bytes literal",
Expr::StringLiteral(_) => "a string literal",
Expr::EllipsisLiteral(_) => "an ellipsis literal",
Expr::NoneLiteral(_) => "a `None` literal",
Expr::BooleanLiteral(_) => "a boolean literal",
Expr::ListComp(_) => "a list comprehension",
Expr::SetComp(_) => "a set comprehension",
Expr::DictComp(_) => "a dict comprehension",
Expr::Generator(_) => "generator expression",
Expr::Yield(_) => "`yield` expression",
Expr::YieldFrom(_) => "`yield from` expression",
Expr::Compare(_) => "comparison expression",
Expr::FString(_) => "f-string",
Expr::TString(_) => "t-string",
Expr::Named(_) => "assignment expression",
Expr::Subscript(_) => "subscript expression",
Expr::IpyEscapeCommand(_) => "IPython escape command",
};
Some((RelaxedDecoratorError::Other(description), expr.range()))
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/parser/expression.rs | crates/ruff_python_parser/src/parser/expression.rs | use std::ops::Deref;
use bitflags::bitflags;
use rustc_hash::{FxBuildHasher, FxHashSet};
use ruff_python_ast::name::Name;
use ruff_python_ast::token::TokenKind;
use ruff_python_ast::{
self as ast, AnyStringFlags, AtomicNodeIndex, BoolOp, CmpOp, ConversionFlag, Expr, ExprContext,
FString, InterpolatedStringElement, InterpolatedStringElements, IpyEscapeKind, Number,
Operator, OperatorPrecedence, StringFlags, TString, UnaryOp,
};
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use crate::error::{FStringKind, StarTupleKind, UnparenthesizedNamedExprKind};
use crate::parser::progress::ParserProgress;
use crate::parser::{FunctionKind, Parser, helpers};
use crate::string::{
InterpolatedStringKind, StringType, parse_interpolated_string_literal_element,
parse_string_literal,
};
use crate::token::TokenValue;
use crate::token_set::TokenSet;
use crate::{
InterpolatedStringErrorType, Mode, ParseErrorType, UnsupportedSyntaxError,
UnsupportedSyntaxErrorKind,
};
use super::{InterpolatedStringElementsKind, Parenthesized, RecoveryContextKind};
/// A token set consisting of a newline or end of file.
const NEWLINE_EOF_SET: TokenSet = TokenSet::new([TokenKind::Newline, TokenKind::EndOfFile]);
/// Tokens that represents a literal expression.
const LITERAL_SET: TokenSet = TokenSet::new([
TokenKind::Int,
TokenKind::Float,
TokenKind::Complex,
TokenKind::String,
TokenKind::Ellipsis,
TokenKind::True,
TokenKind::False,
TokenKind::None,
]);
/// Tokens that represents either an expression or the start of one.
pub(super) const EXPR_SET: TokenSet = TokenSet::new([
TokenKind::Name,
TokenKind::Minus,
TokenKind::Plus,
TokenKind::Tilde,
TokenKind::Star,
TokenKind::DoubleStar,
TokenKind::Lpar,
TokenKind::Lbrace,
TokenKind::Lsqb,
TokenKind::Lambda,
TokenKind::Await,
TokenKind::Not,
TokenKind::Yield,
TokenKind::FStringStart,
TokenKind::TStringStart,
TokenKind::IpyEscapeCommand,
])
.union(LITERAL_SET);
/// Tokens that can appear after an expression.
pub(super) const END_EXPR_SET: TokenSet = TokenSet::new([
// Ex) `expr` (without a newline)
TokenKind::EndOfFile,
// Ex) `expr`
TokenKind::Newline,
// Ex) `expr;`
TokenKind::Semi,
// Ex) `data[expr:]`
// Ex) `def foo() -> expr:`
// Ex) `{expr: expr}`
TokenKind::Colon,
// Ex) `{expr}`
TokenKind::Rbrace,
// Ex) `[expr]`
TokenKind::Rsqb,
// Ex) `(expr)`
TokenKind::Rpar,
// Ex) `expr,`
TokenKind::Comma,
// Ex)
//
// if True:
// expr
// # <- Dedent
// x
TokenKind::Dedent,
// Ex) `expr if expr else expr`
TokenKind::If,
TokenKind::Else,
// Ex) `with expr as target:`
// Ex) `except expr as NAME:`
TokenKind::As,
// Ex) `raise expr from expr`
TokenKind::From,
// Ex) `[expr for expr in iter]`
TokenKind::For,
// Ex) `[expr async for expr in iter]`
TokenKind::Async,
// Ex) `expr in expr`
TokenKind::In,
// Ex) `name: expr = expr`
// Ex) `f"{expr=}"`
TokenKind::Equal,
// Ex) `f"{expr!s}"`
TokenKind::Exclamation,
]);
/// Tokens that can appear at the end of a sequence.
const END_SEQUENCE_SET: TokenSet = END_EXPR_SET.remove(TokenKind::Comma);
impl<'src> Parser<'src> {
/// Returns `true` if the parser is at a name or keyword (including soft keyword) token.
pub(super) fn at_name_or_keyword(&self) -> bool {
self.at(TokenKind::Name) || self.current_token_kind().is_keyword()
}
/// Returns `true` if the parser is at a name or soft keyword token.
pub(super) fn at_name_or_soft_keyword(&self) -> bool {
self.at(TokenKind::Name) || self.at_soft_keyword()
}
/// Returns `true` if the parser is at a soft keyword token.
pub(super) fn at_soft_keyword(&self) -> bool {
self.current_token_kind().is_soft_keyword()
}
/// Returns `true` if the current token is the start of an expression.
pub(super) fn at_expr(&self) -> bool {
self.at_ts(EXPR_SET) || self.at_soft_keyword()
}
/// Returns `true` if the current token ends a sequence.
pub(super) fn at_sequence_end(&self) -> bool {
self.at_ts(END_SEQUENCE_SET)
}
/// Parses every Python expression.
///
/// Matches the `expressions` rule in the [Python grammar]. The [`ExpressionContext`] can be
/// used to match the `star_expressions` rule.
///
/// [Python grammar]: https://docs.python.org/3/reference/grammar.html
pub(super) fn parse_expression_list(&mut self, context: ExpressionContext) -> ParsedExpr {
let start = self.node_start();
let parsed_expr = self.parse_conditional_expression_or_higher_impl(context);
if self.at(TokenKind::Comma) {
Expr::Tuple(self.parse_tuple_expression(
parsed_expr.expr,
start,
Parenthesized::No,
|p| p.parse_conditional_expression_or_higher_impl(context),
))
.into()
} else {
parsed_expr
}
}
/// Parses every Python expression except unparenthesized tuple.
///
/// Matches the `named_expression` rule in the [Python grammar]. The [`ExpressionContext`] can
/// be used to match the `star_named_expression` rule.
///
/// NOTE: If you have expressions separated by commas and want to parse them individually
/// instead of as a tuple, as done by [`Parser::parse_expression_list`], use this function.
///
/// [Python grammar]: https://docs.python.org/3/reference/grammar.html
pub(super) fn parse_named_expression_or_higher(
&mut self,
context: ExpressionContext,
) -> ParsedExpr {
let start = self.node_start();
let parsed_expr = self.parse_conditional_expression_or_higher_impl(context);
if self.at(TokenKind::ColonEqual) {
Expr::Named(self.parse_named_expression(parsed_expr.expr, start)).into()
} else {
parsed_expr
}
}
/// Parses every Python expression except unparenthesized tuple and named expressions.
///
/// Matches the `expression` rule in the [Python grammar].
///
/// This uses the default [`ExpressionContext`]. Use
/// [`Parser::parse_conditional_expression_or_higher_impl`] if you prefer to pass in the
/// context.
///
/// NOTE: If you have expressions separated by commas and want to parse them individually
/// instead of as a tuple, as done by [`Parser::parse_expression_list`] use this function.
///
/// [Python grammar]: https://docs.python.org/3/reference/grammar.html
pub(super) fn parse_conditional_expression_or_higher(&mut self) -> ParsedExpr {
self.parse_conditional_expression_or_higher_impl(ExpressionContext::default())
}
pub(super) fn parse_conditional_expression_or_higher_impl(
&mut self,
context: ExpressionContext,
) -> ParsedExpr {
if self.at(TokenKind::Lambda) {
Expr::Lambda(self.parse_lambda_expr()).into()
} else {
let start = self.node_start();
let parsed_expr = self.parse_simple_expression(context);
if self.at(TokenKind::If) {
Expr::If(self.parse_if_expression(parsed_expr.expr, start)).into()
} else {
parsed_expr
}
}
}
/// Parses every Python expression except unparenthesized tuples, named expressions,
/// and `if` expression.
///
/// This is a combination of the `disjunction`, `starred_expression`, `yield_expr`
/// and `lambdef` rules of the [Python grammar].
///
/// Note that this function parses lambda expression but reports an error as they're not
/// allowed in this context. This is done for better error recovery.
/// Use [`Parser::parse_conditional_expression_or_higher`] or any methods which calls into the
/// specified method to allow parsing lambda expression.
///
/// [Python grammar]: https://docs.python.org/3/reference/grammar.html
fn parse_simple_expression(&mut self, context: ExpressionContext) -> ParsedExpr {
self.parse_binary_expression_or_higher(OperatorPrecedence::None, context)
}
/// Parses a binary expression using the [Pratt parsing algorithm].
///
/// [Pratt parsing algorithm]: https://matklad.github.io/2020/04/13/simple-but-powerful-pratt-parsing.html
fn parse_binary_expression_or_higher(
&mut self,
left_precedence: OperatorPrecedence,
context: ExpressionContext,
) -> ParsedExpr {
let start = self.node_start();
let lhs = self.parse_lhs_expression(left_precedence, context);
self.parse_binary_expression_or_higher_recursive(lhs, left_precedence, context, start)
}
pub(super) fn parse_binary_expression_or_higher_recursive(
&mut self,
mut left: ParsedExpr,
left_precedence: OperatorPrecedence,
context: ExpressionContext,
start: TextSize,
) -> ParsedExpr {
let mut progress = ParserProgress::default();
loop {
progress.assert_progressing(self);
let current_token = self.current_token_kind();
if matches!(current_token, TokenKind::In) && context.is_in_excluded() {
// Omit the `in` keyword when parsing the target expression in a comprehension or
// a `for` statement.
break;
}
let Some(operator) = BinaryLikeOperator::try_from_tokens(current_token, self.peek())
else {
// Not an operator.
break;
};
let new_precedence = operator.precedence();
let stop_at_current_operator = if new_precedence.is_right_associative() {
new_precedence < left_precedence
} else {
new_precedence <= left_precedence
};
if stop_at_current_operator {
break;
}
left.expr = match operator {
BinaryLikeOperator::Boolean(bool_op) => {
Expr::BoolOp(self.parse_boolean_expression(left.expr, start, bool_op, context))
}
BinaryLikeOperator::Comparison(cmp_op) => Expr::Compare(
self.parse_comparison_expression(left.expr, start, cmp_op, context),
),
BinaryLikeOperator::Binary(bin_op) => {
self.bump(TokenKind::from(bin_op));
let right = self.parse_binary_expression_or_higher(new_precedence, context);
Expr::BinOp(ast::ExprBinOp {
left: Box::new(left.expr),
op: bin_op,
right: Box::new(right.expr),
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
})
}
};
}
left
}
/// Parses the left-hand side of an expression.
///
/// This includes prefix expressions such as unary operators, boolean `not`,
/// `await`, `lambda`. It also parses atoms and postfix expressions.
///
/// The given [`OperatorPrecedence`] is used to determine if the parsed expression
/// is valid in that context. For example, a unary operator is not valid
/// in an `await` expression in which case the `left_precedence` would
/// be [`OperatorPrecedence::Await`].
fn parse_lhs_expression(
&mut self,
left_precedence: OperatorPrecedence,
context: ExpressionContext,
) -> ParsedExpr {
let start = self.node_start();
let token = self.current_token_kind();
if let Some(unary_op) = token.as_unary_operator() {
let expr = self.parse_unary_expression(unary_op, context);
if matches!(unary_op, UnaryOp::Not) {
if left_precedence > OperatorPrecedence::Not {
self.add_error(
ParseErrorType::OtherError(
"Boolean 'not' expression cannot be used here".to_string(),
),
&expr,
);
}
} else {
if left_precedence > OperatorPrecedence::PosNegBitNot
// > The power operator `**` binds less tightly than an arithmetic
// > or bitwise unary operator on its right, that is, 2**-1 is 0.5.
//
// Reference: https://docs.python.org/3/reference/expressions.html#id21
&& left_precedence != OperatorPrecedence::Exponent
{
self.add_error(
ParseErrorType::OtherError(format!(
"Unary '{unary_op}' expression cannot be used here",
)),
&expr,
);
}
}
return Expr::UnaryOp(expr).into();
}
match self.current_token_kind() {
TokenKind::Star => {
let starred_expr = self.parse_starred_expression(context);
if left_precedence > OperatorPrecedence::None
|| !context.is_starred_expression_allowed()
{
self.add_error(ParseErrorType::InvalidStarredExpressionUsage, &starred_expr);
}
return Expr::Starred(starred_expr).into();
}
TokenKind::Await => {
let await_expr = self.parse_await_expression();
// `await` expressions cannot be nested
if left_precedence >= OperatorPrecedence::Await {
self.add_error(
ParseErrorType::OtherError(
"Await expression cannot be used here".to_string(),
),
&await_expr,
);
}
return Expr::Await(await_expr).into();
}
TokenKind::Lambda => {
// Lambda expression isn't allowed in this context but we'll still parse it and
// report an error for better recovery.
let lambda_expr = self.parse_lambda_expr();
self.add_error(ParseErrorType::InvalidLambdaExpressionUsage, &lambda_expr);
return Expr::Lambda(lambda_expr).into();
}
TokenKind::Yield => {
let expr = self.parse_yield_expression();
if left_precedence > OperatorPrecedence::None
|| !context.is_yield_expression_allowed()
{
self.add_error(ParseErrorType::InvalidYieldExpressionUsage, &expr);
}
return expr.into();
}
_ => {}
}
let lhs = self.parse_atom();
ParsedExpr {
expr: self.parse_postfix_expression(lhs.expr, start),
is_parenthesized: lhs.is_parenthesized,
}
}
/// Parses an expression with a minimum precedence of bitwise `or`.
///
/// This methods actually parses the expression using the `expression` rule
/// of the [Python grammar] and then validates the parsed expression. In a
/// sense, it matches the `bitwise_or` rule of the [Python grammar].
///
/// [Python grammar]: https://docs.python.org/3/reference/grammar.html
fn parse_expression_with_bitwise_or_precedence(&mut self) -> ParsedExpr {
let parsed_expr = self.parse_conditional_expression_or_higher();
if parsed_expr.is_parenthesized {
// Parentheses resets the precedence, so we don't need to validate it.
return parsed_expr;
}
let expr_name = match parsed_expr.expr {
Expr::Compare(_) => "Comparison",
Expr::BoolOp(_)
| Expr::UnaryOp(ast::ExprUnaryOp {
op: ast::UnaryOp::Not,
..
}) => "Boolean",
Expr::If(_) => "Conditional",
Expr::Lambda(_) => "Lambda",
_ => return parsed_expr,
};
self.add_error(
ParseErrorType::OtherError(format!("{expr_name} expression cannot be used here")),
&parsed_expr,
);
parsed_expr
}
/// Parses a name.
///
/// For an invalid name, the `id` field will be an empty string and the `ctx`
/// field will be [`ExprContext::Invalid`].
///
/// See: <https://docs.python.org/3/reference/expressions.html#atom-identifiers>
pub(super) fn parse_name(&mut self) -> ast::ExprName {
let identifier = self.parse_identifier();
let ctx = if identifier.is_valid() {
ExprContext::Load
} else {
ExprContext::Invalid
};
ast::ExprName {
range: identifier.range,
id: identifier.id,
ctx,
node_index: AtomicNodeIndex::NONE,
}
}
pub(super) fn parse_missing_name(&mut self) -> ast::ExprName {
let identifier = self.parse_missing_identifier();
ast::ExprName {
range: identifier.range,
id: identifier.id,
ctx: ExprContext::Invalid,
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses an identifier.
///
/// For an invalid identifier, the `id` field will be an empty string.
///
/// See: <https://docs.python.org/3/reference/expressions.html#atom-identifiers>
pub(super) fn parse_identifier(&mut self) -> ast::Identifier {
let range = self.current_token_range();
if self.at(TokenKind::Name) {
let TokenValue::Name(name) = self.bump_value(TokenKind::Name) else {
unreachable!();
};
return ast::Identifier {
id: name,
range,
node_index: AtomicNodeIndex::NONE,
};
}
if self.current_token_kind().is_soft_keyword() {
let id = Name::new(self.src_text(range));
self.bump_soft_keyword_as_name();
return ast::Identifier {
id,
range,
node_index: AtomicNodeIndex::NONE,
};
}
if self.current_token_kind().is_keyword() {
// Non-soft keyword
self.add_error(
ParseErrorType::OtherError(format!(
"Expected an identifier, but found a keyword {} that cannot be used here",
self.current_token_kind()
)),
range,
);
let id = Name::new(self.src_text(range));
self.bump_any();
ast::Identifier {
id,
range,
node_index: AtomicNodeIndex::NONE,
}
} else {
self.parse_missing_identifier()
}
}
fn parse_missing_identifier(&mut self) -> ast::Identifier {
self.add_error(
ParseErrorType::OtherError("Expected an identifier".into()),
self.current_token_range(),
);
ast::Identifier {
id: Name::empty(),
range: self.missing_node_range(),
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses an atom.
///
/// See: <https://docs.python.org/3/reference/expressions.html#atoms>
fn parse_atom(&mut self) -> ParsedExpr {
let start = self.node_start();
let lhs = match self.current_token_kind() {
TokenKind::Float => {
let TokenValue::Float(value) = self.bump_value(TokenKind::Float) else {
unreachable!()
};
Expr::NumberLiteral(ast::ExprNumberLiteral {
value: Number::Float(value),
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
})
}
TokenKind::Complex => {
let TokenValue::Complex { real, imag } = self.bump_value(TokenKind::Complex) else {
unreachable!()
};
Expr::NumberLiteral(ast::ExprNumberLiteral {
value: Number::Complex { real, imag },
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
})
}
TokenKind::Int => {
let TokenValue::Int(value) = self.bump_value(TokenKind::Int) else {
unreachable!()
};
Expr::NumberLiteral(ast::ExprNumberLiteral {
value: Number::Int(value),
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
})
}
TokenKind::True => {
self.bump(TokenKind::True);
Expr::BooleanLiteral(ast::ExprBooleanLiteral {
value: true,
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
})
}
TokenKind::False => {
self.bump(TokenKind::False);
Expr::BooleanLiteral(ast::ExprBooleanLiteral {
value: false,
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
})
}
TokenKind::None => {
self.bump(TokenKind::None);
Expr::NoneLiteral(ast::ExprNoneLiteral {
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
})
}
TokenKind::Ellipsis => {
self.bump(TokenKind::Ellipsis);
Expr::EllipsisLiteral(ast::ExprEllipsisLiteral {
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
})
}
TokenKind::Name => Expr::Name(self.parse_name()),
TokenKind::IpyEscapeCommand => {
Expr::IpyEscapeCommand(self.parse_ipython_escape_command_expression())
}
TokenKind::String | TokenKind::FStringStart | TokenKind::TStringStart => {
self.parse_strings()
}
TokenKind::Lpar => {
return self.parse_parenthesized_expression();
}
TokenKind::Lsqb => self.parse_list_like_expression(),
TokenKind::Lbrace => self.parse_set_or_dict_like_expression(),
kind => {
if kind.is_keyword() {
Expr::Name(self.parse_name())
} else {
self.add_error(
ParseErrorType::ExpectedExpression,
self.current_token_range(),
);
Expr::Name(ast::ExprName {
range: self.missing_node_range(),
id: Name::empty(),
ctx: ExprContext::Invalid,
node_index: AtomicNodeIndex::NONE,
})
}
}
};
lhs.into()
}
/// Parses a postfix expression in a loop until there are no postfix expressions left to parse.
///
/// For a given left-hand side, a postfix expression can begin with either `(` for a call
/// expression, `[` for a subscript expression, or `.` for an attribute expression.
///
/// This method does nothing if the current token is not a candidate for a postfix expression.
pub(super) fn parse_postfix_expression(&mut self, mut lhs: Expr, start: TextSize) -> Expr {
loop {
lhs = match self.current_token_kind() {
TokenKind::Lpar => Expr::Call(self.parse_call_expression(lhs, start)),
TokenKind::Lsqb => Expr::Subscript(self.parse_subscript_expression(lhs, start)),
TokenKind::Dot => Expr::Attribute(self.parse_attribute_expression(lhs, start)),
_ => break lhs,
};
}
}
/// Parse a call expression.
///
/// The function name is parsed by the caller and passed as `func` along with
/// the `start` position of the call expression.
///
/// # Panics
///
/// If the parser isn't position at a `(` token.
///
/// See: <https://docs.python.org/3/reference/expressions.html#calls>
pub(super) fn parse_call_expression(&mut self, func: Expr, start: TextSize) -> ast::ExprCall {
let arguments = self.parse_arguments();
ast::ExprCall {
func: Box::new(func),
arguments,
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses an argument list.
///
/// # Panics
///
/// If the parser isn't positioned at a `(` token.
///
/// See: <https://docs.python.org/3/reference/expressions.html#grammar-token-python-grammar-argument_list>
pub(super) fn parse_arguments(&mut self) -> ast::Arguments {
let start = self.node_start();
self.bump(TokenKind::Lpar);
let mut args = vec![];
let mut keywords = vec![];
let mut seen_keyword_argument = false; // foo = 1
let mut seen_keyword_unpacking = false; // **foo
let has_trailing_comma =
self.parse_comma_separated_list(RecoveryContextKind::Arguments, |parser| {
let argument_start = parser.node_start();
if parser.eat(TokenKind::DoubleStar) {
let value = parser.parse_conditional_expression_or_higher();
keywords.push(ast::Keyword {
arg: None,
value: value.expr,
range: parser.node_range(argument_start),
node_index: AtomicNodeIndex::NONE,
});
seen_keyword_unpacking = true;
} else {
let start = parser.node_start();
let mut parsed_expr = parser
.parse_named_expression_or_higher(ExpressionContext::starred_conditional());
match parser.current_token_kind() {
TokenKind::Async | TokenKind::For => {
if parsed_expr.is_unparenthesized_starred_expr() {
parser.add_error(
ParseErrorType::IterableUnpackingInComprehension,
&parsed_expr,
);
}
parsed_expr = Expr::Generator(parser.parse_generator_expression(
parsed_expr.expr,
start,
Parenthesized::No,
))
.into();
}
_ => {
if seen_keyword_unpacking
&& parsed_expr.is_unparenthesized_starred_expr()
{
parser.add_error(
ParseErrorType::InvalidArgumentUnpackingOrder,
&parsed_expr,
);
}
}
}
let arg_range = parser.node_range(start);
if parser.eat(TokenKind::Equal) {
seen_keyword_argument = true;
let arg = if let ParsedExpr {
expr: Expr::Name(ident_expr),
is_parenthesized,
} = parsed_expr
{
// test_ok parenthesized_kwarg_py37
// # parse_options: {"target-version": "3.7"}
// f((a)=1)
// test_err parenthesized_kwarg_py38
// # parse_options: {"target-version": "3.8"}
// f((a)=1)
// f((a) = 1)
// f( ( a ) = 1)
if is_parenthesized {
parser.add_unsupported_syntax_error(
UnsupportedSyntaxErrorKind::ParenthesizedKeywordArgumentName,
arg_range,
);
}
ast::Identifier {
id: ident_expr.id,
range: ident_expr.range,
node_index: AtomicNodeIndex::NONE,
}
} else {
// TODO(dhruvmanila): Parser shouldn't drop the `parsed_expr` if it's
// not a name expression. We could add the expression into `args` but
// that means the error is a missing comma instead.
parser.add_error(
ParseErrorType::OtherError("Expected a parameter name".to_string()),
&parsed_expr,
);
ast::Identifier {
id: Name::empty(),
range: parsed_expr.range(),
node_index: AtomicNodeIndex::NONE,
}
};
let value = parser.parse_conditional_expression_or_higher();
keywords.push(ast::Keyword {
arg: Some(arg),
value: value.expr,
range: parser.node_range(argument_start),
node_index: AtomicNodeIndex::NONE,
});
} else {
if !parsed_expr.is_unparenthesized_starred_expr() {
if seen_keyword_unpacking {
parser.add_error(
ParseErrorType::PositionalAfterKeywordUnpacking,
&parsed_expr,
);
} else if seen_keyword_argument {
parser.add_error(
ParseErrorType::PositionalAfterKeywordArgument,
&parsed_expr,
);
}
}
args.push(parsed_expr.expr);
}
}
});
self.expect(TokenKind::Rpar);
let arguments = ast::Arguments {
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
args: args.into_boxed_slice(),
keywords: keywords.into_boxed_slice(),
};
self.validate_arguments(&arguments, has_trailing_comma);
arguments
}
/// Parses a subscript expression.
///
/// # Panics
///
/// If the parser isn't positioned at a `[` token.
///
/// See: <https://docs.python.org/3/reference/expressions.html#subscriptions>
fn parse_subscript_expression(
&mut self,
mut value: Expr,
start: TextSize,
) -> ast::ExprSubscript {
self.bump(TokenKind::Lsqb);
// To prevent the `value` context from being `Del` within a `del` statement,
// we set the context as `Load` here.
helpers::set_expr_ctx(&mut value, ExprContext::Load);
// Slice range doesn't include the `[` token.
let slice_start = self.node_start();
// Create an error when receiving an empty slice to parse, e.g. `x[]`
if self.eat(TokenKind::Rsqb) {
let slice_range = self.node_range(slice_start);
self.add_error(ParseErrorType::EmptySlice, slice_range);
return ast::ExprSubscript {
value: Box::new(value),
slice: Box::new(Expr::Name(ast::ExprName {
range: slice_range,
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/parser/progress.rs | crates/ruff_python_parser/src/parser/progress.rs | use crate::parser::Parser;
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
pub(super) struct TokenId(u32);
impl TokenId {
/// Increments the value of the token ID.
pub(super) fn increment(&mut self) {
// It's fine to just wrap around because the main purpose is to check whether
// the previous token ID is different from the current token ID.
self.0 = self.0.wrapping_add(1);
}
}
/// Captures the progress of the parser and allows to test if the parsing is still making progress
#[derive(Debug, Copy, Clone, Default)]
pub(super) struct ParserProgress(Option<TokenId>);
impl ParserProgress {
/// Returns true if the parser has passed this position
#[inline]
fn has_progressed(self, p: &Parser) -> bool {
match self.0 {
None => true,
Some(prev_token_id) => prev_token_id != p.current_token_id(),
}
}
/// Asserts that the parsing is still making progress.
///
/// # Panics
///
/// Panics if the parser hasn't progressed since the last call.
#[inline]
pub(super) fn assert_progressing(&mut self, p: &Parser) {
assert!(
self.has_progressed(p),
"The parser is no longer progressing. Stuck at '{}' {:?}:{:?}",
p.src_text(p.current_token_range()),
p.current_token_kind(),
p.current_token_range(),
);
self.0 = Some(p.current_token_id());
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/parser/options.rs | crates/ruff_python_parser/src/parser/options.rs | use ruff_python_ast::{PySourceType, PythonVersion};
use crate::{AsMode, Mode};
/// Options for controlling how a source file is parsed.
///
/// You can construct a [`ParseOptions`] directly from a [`Mode`]:
///
/// ```
/// use ruff_python_parser::{Mode, ParseOptions};
///
/// let options = ParseOptions::from(Mode::Module);
/// ```
///
/// or from a [`PySourceType`]
///
/// ```
/// use ruff_python_ast::PySourceType;
/// use ruff_python_parser::ParseOptions;
///
/// let options = ParseOptions::from(PySourceType::Python);
/// ```
#[derive(Clone, Debug)]
pub struct ParseOptions {
/// Specify the mode in which the code will be parsed.
pub(crate) mode: Mode,
/// Target version for detecting version-related syntax errors.
pub(crate) target_version: PythonVersion,
}
impl ParseOptions {
#[must_use]
pub fn with_target_version(mut self, target_version: PythonVersion) -> Self {
self.target_version = target_version;
self
}
pub fn target_version(&self) -> PythonVersion {
self.target_version
}
}
impl From<Mode> for ParseOptions {
fn from(mode: Mode) -> Self {
Self {
mode,
target_version: PythonVersion::default(),
}
}
}
impl From<PySourceType> for ParseOptions {
fn from(source_type: PySourceType) -> Self {
Self {
mode: source_type.as_mode(),
target_version: PythonVersion::default(),
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/parser/mod.rs | crates/ruff_python_parser/src/parser/mod.rs | use std::cmp::Ordering;
use bitflags::bitflags;
use ruff_python_ast::token::TokenKind;
use ruff_python_ast::{AtomicNodeIndex, Mod, ModExpression, ModModule};
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::error::UnsupportedSyntaxError;
use crate::parser::expression::ExpressionContext;
use crate::parser::progress::{ParserProgress, TokenId};
use crate::string::InterpolatedStringKind;
use crate::token::TokenValue;
use crate::token_set::TokenSet;
use crate::token_source::{TokenSource, TokenSourceCheckpoint};
use crate::{Mode, ParseError, ParseErrorType, UnsupportedSyntaxErrorKind};
use crate::{Parsed, Tokens};
pub use crate::parser::options::ParseOptions;
mod expression;
mod helpers;
mod options;
mod pattern;
mod progress;
mod recovery;
mod statement;
#[cfg(test)]
mod tests;
#[derive(Debug)]
pub(crate) struct Parser<'src> {
source: &'src str,
/// Token source for the parser that skips over any non-trivia token.
tokens: TokenSource<'src>,
/// Stores all the syntax errors found during the parsing.
errors: Vec<ParseError>,
/// Stores non-fatal syntax errors found during parsing, such as version-related errors.
unsupported_syntax_errors: Vec<UnsupportedSyntaxError>,
/// Options for how the code will be parsed.
options: ParseOptions,
/// The ID of the current token. This is used to track the progress of the parser
/// to avoid infinite loops when the parser is stuck.
current_token_id: TokenId,
/// The end of the previous token processed. This is used to determine a node's end.
prev_token_end: TextSize,
/// The recovery context in which the parser is currently in.
recovery_context: RecoveryContext,
/// The start offset in the source code from which to start parsing at.
start_offset: TextSize,
}
impl<'src> Parser<'src> {
/// Create a new parser for the given source code.
pub(crate) fn new(source: &'src str, options: ParseOptions) -> Self {
Parser::new_starts_at(source, TextSize::new(0), options)
}
/// Create a new parser for the given source code which starts parsing at the given offset.
pub(crate) fn new_starts_at(
source: &'src str,
start_offset: TextSize,
options: ParseOptions,
) -> Self {
let tokens = TokenSource::from_source(source, options.mode, start_offset);
Parser {
options,
source,
errors: Vec::new(),
unsupported_syntax_errors: Vec::new(),
tokens,
recovery_context: RecoveryContext::empty(),
prev_token_end: TextSize::new(0),
start_offset,
current_token_id: TokenId::default(),
}
}
/// Consumes the [`Parser`] and returns the parsed [`Parsed`].
pub(crate) fn parse(mut self) -> Parsed<Mod> {
let syntax = match self.options.mode {
Mode::Expression | Mode::ParenthesizedExpression => {
Mod::Expression(self.parse_single_expression())
}
Mode::Module | Mode::Ipython => Mod::Module(self.parse_module()),
};
self.finish(syntax)
}
/// Parses a single expression.
///
/// This is to be used for [`Mode::Expression`].
///
/// ## Recovery
///
/// After parsing a single expression, an error is reported and all remaining tokens are
/// dropped by the parser.
fn parse_single_expression(&mut self) -> ModExpression {
let start = self.node_start();
let parsed_expr = self.parse_expression_list(ExpressionContext::default());
// All remaining newlines are actually going to be non-logical newlines.
self.eat(TokenKind::Newline);
if !self.at(TokenKind::EndOfFile) {
self.add_error(
ParseErrorType::UnexpectedExpressionToken,
self.current_token_range(),
);
// TODO(dhruvmanila): How should error recovery work here? Just truncate after the expression?
let mut progress = ParserProgress::default();
loop {
progress.assert_progressing(self);
if self.at(TokenKind::EndOfFile) {
break;
}
self.bump_any();
}
}
self.bump(TokenKind::EndOfFile);
ModExpression {
body: Box::new(parsed_expr.expr),
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses a Python module.
///
/// This is to be used for [`Mode::Module`] and [`Mode::Ipython`].
fn parse_module(&mut self) -> ModModule {
let body = self.parse_list_into_vec(
RecoveryContextKind::ModuleStatements,
Parser::parse_statement,
);
self.bump(TokenKind::EndOfFile);
ModModule {
body,
range: TextRange::new(self.start_offset, self.current_token_range().end()),
node_index: AtomicNodeIndex::NONE,
}
}
fn finish(self, syntax: Mod) -> Parsed<Mod> {
assert_eq!(
self.current_token_kind(),
TokenKind::EndOfFile,
"Parser should be at the end of the file."
);
// TODO consider re-integrating lexical error handling into the parser?
let parse_errors = self.errors;
let (tokens, lex_errors) = self.tokens.finish();
// Fast path for when there are no lex errors.
// There's no fast path for when there are no parse errors because a lex error
// always results in a parse error.
if lex_errors.is_empty() {
return Parsed {
syntax,
tokens: Tokens::new(tokens),
errors: parse_errors,
unsupported_syntax_errors: self.unsupported_syntax_errors,
};
}
let mut merged = Vec::with_capacity(parse_errors.len().saturating_add(lex_errors.len()));
let mut parse_errors = parse_errors.into_iter().peekable();
let mut lex_errors = lex_errors.into_iter().peekable();
while let (Some(parse_error), Some(lex_error)) = (parse_errors.peek(), lex_errors.peek()) {
match parse_error
.location
.start()
.cmp(&lex_error.location().start())
{
Ordering::Less => merged.push(parse_errors.next().unwrap()),
Ordering::Equal => {
// Skip the parse error if we already have a lex error at the same location..
parse_errors.next().unwrap();
merged.push(lex_errors.next().unwrap().into());
}
Ordering::Greater => merged.push(lex_errors.next().unwrap().into()),
}
}
merged.extend(parse_errors);
merged.extend(lex_errors.map(ParseError::from));
Parsed {
syntax,
tokens: Tokens::new(tokens),
errors: merged,
unsupported_syntax_errors: self.unsupported_syntax_errors,
}
}
/// Returns the start position for a node that starts at the current token.
fn node_start(&self) -> TextSize {
self.current_token_range().start()
}
fn node_range(&self, start: TextSize) -> TextRange {
// It's possible during error recovery that the parsing didn't consume any tokens. In that
// case, `last_token_end` still points to the end of the previous token but `start` is the
// start of the current token. Calling `TextRange::new(start, self.last_token_end)` would
// panic in that case because `start > end`. This path "detects" this case and creates an
// empty range instead.
//
// The reason it's `<=` instead of just `==` is because there could be whitespaces between
// the two tokens. For example:
//
// ```python
// # last token end
// # | current token (newline) start
// # v v
// def foo \n
// # ^
// # assume there's trailing whitespace here
// ```
//
// Or, there could tokens that are considered "trivia" and thus aren't emitted by the token
// source. These are comments and non-logical newlines. For example:
//
// ```python
// # last token end
// # v
// def foo # comment\n
// # ^ current token (newline) start
// ```
//
// In either of the above cases, there's a "gap" between the end of the last token and start
// of the current token.
if self.prev_token_end <= start {
// We need to create an empty range at the last token end instead of the start because
// otherwise this node range will fall outside the range of it's parent node. Taking
// the above example:
//
// ```python
// if True:
// # function start
// # | function end
// # v v
// def foo # comment
// # ^ current token start
// ```
//
// Here, the current token start is the start of parameter range but the function ends
// at `foo`. Even if there's a function body, the range of parameters would still be
// before the comment.
// test_err node_range_with_gaps
// def foo # comment
// def bar(): ...
// def baz
TextRange::empty(self.prev_token_end)
} else {
TextRange::new(start, self.prev_token_end)
}
}
fn missing_node_range(&self) -> TextRange {
// TODO(dhruvmanila): This range depends on whether the missing node is
// on the leftmost or the rightmost of the expression. It's incorrect for
// the leftmost missing node because the range is outside the expression
// range. For example,
//
// ```python
// value = ** y
// # ^^^^ expression range
// # ^ last token end
// ```
TextRange::empty(self.prev_token_end)
}
/// Moves the parser to the next token.
fn do_bump(&mut self, kind: TokenKind) {
if !matches!(
self.current_token_kind(),
// TODO explore including everything up to the dedent as part of the body.
TokenKind::Dedent
// Don't include newlines in the body
| TokenKind::Newline
// TODO(micha): Including the semi feels more correct but it isn't compatible with lalrpop and breaks the
// formatters semicolon detection. Exclude it for now
| TokenKind::Semi
) {
self.prev_token_end = self.current_token_range().end();
}
self.tokens.bump(kind);
self.current_token_id.increment();
}
/// Returns the next token kind without consuming it.
fn peek(&mut self) -> TokenKind {
self.tokens.peek()
}
/// Returns the next two token kinds without consuming it.
fn peek2(&mut self) -> (TokenKind, TokenKind) {
self.tokens.peek2()
}
/// Returns the current token kind.
#[inline]
fn current_token_kind(&self) -> TokenKind {
self.tokens.current_kind()
}
/// Returns the range of the current token.
#[inline]
fn current_token_range(&self) -> TextRange {
self.tokens.current_range()
}
/// Returns the current token ID.
#[inline]
fn current_token_id(&self) -> TokenId {
self.current_token_id
}
/// Bumps the current token assuming it is of the given kind.
///
/// # Panics
///
/// If the current token is not of the given kind.
fn bump(&mut self, kind: TokenKind) {
assert_eq!(self.current_token_kind(), kind);
self.do_bump(kind);
}
/// Take the token value from the underlying token source and bump the current token.
///
/// # Panics
///
/// If the current token is not of the given kind.
fn bump_value(&mut self, kind: TokenKind) -> TokenValue {
let value = self.tokens.take_value();
self.bump(kind);
value
}
/// Bumps the current token assuming it is found in the given token set.
///
/// # Panics
///
/// If the current token is not found in the given token set.
fn bump_ts(&mut self, ts: TokenSet) {
let kind = self.current_token_kind();
assert!(ts.contains(kind));
self.do_bump(kind);
}
/// Bumps the current token regardless of its kind and advances to the next token.
///
/// # Panics
///
/// If the parser is at end of file.
fn bump_any(&mut self) {
let kind = self.current_token_kind();
assert_ne!(kind, TokenKind::EndOfFile);
self.do_bump(kind);
}
/// Bumps the soft keyword token as a `Name` token.
///
/// # Panics
///
/// If the current token is not a soft keyword.
pub(crate) fn bump_soft_keyword_as_name(&mut self) {
assert!(self.at_soft_keyword());
self.do_bump(TokenKind::Name);
}
/// Consume the current token if it is of the given kind. Returns `true` if it matches, `false`
/// otherwise.
fn eat(&mut self, kind: TokenKind) -> bool {
if self.at(kind) {
self.do_bump(kind);
true
} else {
false
}
}
/// Eat the current token if its of the expected kind, otherwise adds an appropriate error.
fn expect(&mut self, expected: TokenKind) -> bool {
if self.eat(expected) {
return true;
}
self.add_error(
ParseErrorType::ExpectedToken {
found: self.current_token_kind(),
expected,
},
self.current_token_range(),
);
false
}
fn add_error<T>(&mut self, error: ParseErrorType, ranged: T)
where
T: Ranged,
{
fn inner(errors: &mut Vec<ParseError>, error: ParseErrorType, range: TextRange) {
// Avoid flagging multiple errors at the same location
let is_same_location = errors
.last()
.is_some_and(|last| last.location.start() == range.start());
if !is_same_location {
errors.push(ParseError {
error,
location: range,
});
}
}
inner(&mut self.errors, error, ranged.range());
}
/// Add an [`UnsupportedSyntaxError`] with the given [`UnsupportedSyntaxErrorKind`] and
/// [`TextRange`] if its minimum version is less than [`Parser::target_version`].
fn add_unsupported_syntax_error(&mut self, kind: UnsupportedSyntaxErrorKind, range: TextRange) {
if kind.is_unsupported(self.options.target_version) {
self.unsupported_syntax_errors.push(UnsupportedSyntaxError {
kind,
range,
target_version: self.options.target_version,
});
}
}
/// Returns `true` if the current token is of the given kind.
fn at(&self, kind: TokenKind) -> bool {
self.current_token_kind() == kind
}
/// Returns `true` if the current token is found in the given token set.
fn at_ts(&self, ts: TokenSet) -> bool {
ts.contains(self.current_token_kind())
}
fn src_text<T>(&self, ranged: T) -> &'src str
where
T: Ranged,
{
&self.source[ranged.range()]
}
/// Parses a list of elements into a vector where each element is parsed using
/// the given `parse_element` function.
fn parse_list_into_vec<T>(
&mut self,
recovery_context_kind: RecoveryContextKind,
parse_element: impl Fn(&mut Parser<'src>) -> T,
) -> Vec<T> {
let mut elements = Vec::new();
self.parse_list(recovery_context_kind, |p| elements.push(parse_element(p)));
elements
}
/// Parses a list of elements where each element is parsed using the given
/// `parse_element` function.
///
/// The difference between this function and `parse_list_into_vec` is that
/// this function does not return the parsed elements. Instead, it is the
/// caller's responsibility to handle the parsed elements. This is the reason
/// that the `parse_element` parameter is bound to [`FnMut`] instead of [`Fn`].
fn parse_list(
&mut self,
recovery_context_kind: RecoveryContextKind,
mut parse_element: impl FnMut(&mut Parser<'src>),
) {
let mut progress = ParserProgress::default();
let saved_context = self.recovery_context;
self.recovery_context = self
.recovery_context
.union(RecoveryContext::from_kind(recovery_context_kind));
loop {
progress.assert_progressing(self);
if recovery_context_kind.is_list_element(self) {
parse_element(self);
} else if recovery_context_kind.is_regular_list_terminator(self) {
break;
} else {
// Run the error recovery: If the token is recognised as an element or terminator
// of an enclosing list, then we try to re-lex in the context of a logical line and
// break out of list parsing.
if self.is_enclosing_list_element_or_terminator() {
self.tokens.re_lex_logical_token();
break;
}
self.add_error(
recovery_context_kind.create_error(self),
self.current_token_range(),
);
self.bump_any();
}
}
self.recovery_context = saved_context;
}
/// Parses a comma separated list of elements into a vector where each element
/// is parsed using the given `parse_element` function.
fn parse_comma_separated_list_into_vec<T>(
&mut self,
recovery_context_kind: RecoveryContextKind,
parse_element: impl Fn(&mut Parser<'src>) -> T,
) -> Vec<T> {
let mut elements = Vec::new();
self.parse_comma_separated_list(recovery_context_kind, |p| elements.push(parse_element(p)));
elements
}
/// Parses a comma separated list of elements where each element is parsed
/// using the given `parse_element` function.
///
/// The difference between this function and `parse_comma_separated_list_into_vec`
/// is that this function does not return the parsed elements. Instead, it is the
/// caller's responsibility to handle the parsed elements. This is the reason
/// that the `parse_element` parameter is bound to [`FnMut`] instead of [`Fn`].
///
/// Returns `true` if there is a trailing comma present.
fn parse_comma_separated_list(
&mut self,
recovery_context_kind: RecoveryContextKind,
mut parse_element: impl FnMut(&mut Parser<'src>),
) -> bool {
let mut progress = ParserProgress::default();
let saved_context = self.recovery_context;
self.recovery_context = self
.recovery_context
.union(RecoveryContext::from_kind(recovery_context_kind));
let mut first_element = true;
let mut trailing_comma_range: Option<TextRange> = None;
loop {
progress.assert_progressing(self);
if recovery_context_kind.is_list_element(self) {
parse_element(self);
// Only unset this when we've completely parsed a single element. This is mainly to
// raise the correct error in case the first element isn't valid and the current
// token isn't a comma. Without this knowledge, the parser would later expect a
// comma instead of raising the context error.
first_element = false;
let maybe_comma_range = self.current_token_range();
if self.eat(TokenKind::Comma) {
trailing_comma_range = Some(maybe_comma_range);
continue;
}
trailing_comma_range = None;
}
// test_ok comma_separated_regular_list_terminator
// # The first element is parsed by `parse_list_like_expression` and the comma after
// # the first element is expected by `parse_list_expression`
// [0]
// [0, 1]
// [0, 1,]
// [0, 1, 2]
// [0, 1, 2,]
if recovery_context_kind.is_regular_list_terminator(self) {
break;
}
// test_err comma_separated_missing_comma_between_elements
// # The comma between the first two elements is expected in `parse_list_expression`.
// [0, 1 2]
if recovery_context_kind.is_list_element(self) {
// This is a special case to expect a comma between two elements and should be
// checked before running the error recovery. This is because the error recovery
// will always run as the parser is currently at a list element.
self.expect(TokenKind::Comma);
continue;
}
// Run the error recovery: If the token is recognised as an element or terminator of an
// enclosing list, then we try to re-lex in the context of a logical line and break out
// of list parsing.
if self.is_enclosing_list_element_or_terminator() {
self.tokens.re_lex_logical_token();
break;
}
if first_element || self.at(TokenKind::Comma) {
// There are two conditions when we need to add the recovery context error:
//
// 1. If the parser is at a comma which means that there's a missing element
// otherwise the comma would've been consumed by the first `eat` call above.
// And, the parser doesn't take the re-lexing route on a comma token.
// 2. If it's the first element and the current token is not a comma which means
// that it's an invalid element.
// test_err comma_separated_missing_element_between_commas
// [0, 1, , 2]
// test_err comma_separated_missing_first_element
// call(= 1)
self.add_error(
recovery_context_kind.create_error(self),
self.current_token_range(),
);
trailing_comma_range = if self.at(TokenKind::Comma) {
Some(self.current_token_range())
} else {
None
};
} else {
// Otherwise, there should've been a comma at this position. This could be because
// the element isn't consumed completely by `parse_element`.
// test_err comma_separated_missing_comma
// call(**x := 1)
self.expect(TokenKind::Comma);
trailing_comma_range = None;
}
self.bump_any();
}
if let Some(trailing_comma_range) = trailing_comma_range {
if !recovery_context_kind.allow_trailing_comma() {
self.add_error(
ParseErrorType::OtherError("Trailing comma not allowed".to_string()),
trailing_comma_range,
);
}
}
self.recovery_context = saved_context;
trailing_comma_range.is_some()
}
#[cold]
fn is_enclosing_list_element_or_terminator(&self) -> bool {
for context in self.recovery_context.kind_iter() {
if context.is_list_terminator(self) || context.is_list_element(self) {
return true;
}
}
false
}
/// Creates a checkpoint to which the parser can later return to using [`Self::rewind`].
fn checkpoint(&self) -> ParserCheckpoint {
ParserCheckpoint {
tokens: self.tokens.checkpoint(),
errors_position: self.errors.len(),
unsupported_syntax_errors_position: self.unsupported_syntax_errors.len(),
current_token_id: self.current_token_id,
prev_token_end: self.prev_token_end,
recovery_context: self.recovery_context,
}
}
/// Restore the parser to the given checkpoint.
fn rewind(&mut self, checkpoint: ParserCheckpoint) {
let ParserCheckpoint {
tokens,
errors_position,
unsupported_syntax_errors_position,
current_token_id,
prev_token_end,
recovery_context,
} = checkpoint;
self.tokens.rewind(tokens);
self.errors.truncate(errors_position);
self.unsupported_syntax_errors
.truncate(unsupported_syntax_errors_position);
self.current_token_id = current_token_id;
self.prev_token_end = prev_token_end;
self.recovery_context = recovery_context;
}
}
struct ParserCheckpoint {
tokens: TokenSourceCheckpoint,
errors_position: usize,
unsupported_syntax_errors_position: usize,
current_token_id: TokenId,
prev_token_end: TextSize,
recovery_context: RecoveryContext,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum SequenceMatchPatternParentheses {
Tuple,
List,
}
impl SequenceMatchPatternParentheses {
/// Returns the token kind that closes the parentheses.
const fn closing_kind(self) -> TokenKind {
match self {
SequenceMatchPatternParentheses::Tuple => TokenKind::Rpar,
SequenceMatchPatternParentheses::List => TokenKind::Rsqb,
}
}
/// Returns `true` if the parentheses are for a list pattern e.g., `case [a, b]: ...`.
const fn is_list(self) -> bool {
matches!(self, SequenceMatchPatternParentheses::List)
}
}
#[derive(Debug, PartialEq, Copy, Clone)]
enum FunctionKind {
/// A lambda expression, e.g., `lambda x: x`
Lambda,
/// A function definition, e.g., `def f(x): ...`
FunctionDef,
}
impl FunctionKind {
/// Returns the token that terminates a list of parameters.
const fn list_terminator(self) -> TokenKind {
match self {
FunctionKind::Lambda => TokenKind::Colon,
FunctionKind::FunctionDef => TokenKind::Rpar,
}
}
}
#[derive(Debug, PartialEq, Copy, Clone)]
enum WithItemKind {
/// A list of `with` items that are surrounded by parentheses.
///
/// ```python
/// with (item1, item2): ...
/// with (item1, item2 as foo): ...
/// ```
///
/// The parentheses belongs to the `with` statement.
Parenthesized,
/// The `with` item has a parenthesized expression.
///
/// ```python
/// with (item) as foo: ...
/// ```
///
/// The parentheses belongs to the context expression.
ParenthesizedExpression,
/// The `with` items aren't parenthesized in any way.
///
/// ```python
/// with item: ...
/// with item as foo: ...
/// with item1, item2: ...
/// ```
///
/// There are no parentheses around the items.
Unparenthesized,
}
impl WithItemKind {
/// Returns `true` if the with items are parenthesized.
const fn is_parenthesized(self) -> bool {
matches!(self, WithItemKind::Parenthesized)
}
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
enum InterpolatedStringElementsKind {
/// The regular f-string elements.
///
/// For example, the `"hello "`, `x`, and `" world"` elements in:
/// ```py
/// f"hello {x:.2f} world"
/// ```
Regular(InterpolatedStringKind),
/// The f-string elements are part of the format specifier.
///
/// For example, the `.2f` in:
/// ```py
/// f"hello {x:.2f} world"
/// ```
FormatSpec,
}
impl InterpolatedStringElementsKind {
const fn list_terminator(self) -> TokenKind {
match self {
InterpolatedStringElementsKind::Regular(string_kind) => string_kind.end_token(),
// test_ok fstring_format_spec_terminator
// f"hello {x:} world"
// f"hello {x:.3f} world"
InterpolatedStringElementsKind::FormatSpec => TokenKind::Rbrace,
}
}
}
#[derive(Debug, PartialEq, Copy, Clone)]
enum Parenthesized {
/// The elements are parenthesized, e.g., `(a, b)`.
Yes,
/// The elements are not parenthesized, e.g., `a, b`.
No,
}
impl From<bool> for Parenthesized {
fn from(value: bool) -> Self {
if value {
Parenthesized::Yes
} else {
Parenthesized::No
}
}
}
impl Parenthesized {
/// Returns `true` if the parenthesized value is `Yes`.
const fn is_yes(self) -> bool {
matches!(self, Parenthesized::Yes)
}
}
#[derive(Copy, Clone, Debug)]
enum ListTerminatorKind {
/// The current token terminates the list.
Regular,
/// The current token doesn't terminate the list, but is useful for better error recovery.
ErrorRecovery,
}
#[derive(Copy, Clone, Debug)]
enum RecoveryContextKind {
/// When parsing a list of statements at the module level i.e., at the top level of a file.
ModuleStatements,
/// When parsing a list of statements in a block e.g., the body of a function or a class.
BlockStatements,
/// The `elif` clauses of an `if` statement
Elif,
/// The `except` clauses of a `try` statement
Except,
/// When parsing a list of assignment targets
AssignmentTargets,
/// When parsing a list of type parameters
TypeParams,
/// When parsing a list of names in a `from ... import ...` statement
ImportFromAsNames(Parenthesized),
/// When parsing a list of names in an `import` statement
ImportNames,
/// When parsing a list of slice elements e.g., `data[1, 2]`.
///
/// This is different from `ListElements` as the surrounding context is
/// different in that the list is part of a subscript expression.
Slices,
/// When parsing a list of elements in a list expression e.g., `[1, 2]`
ListElements,
/// When parsing a list of elements in a set expression e.g., `{1, 2}`
SetElements,
/// When parsing a list of elements in a dictionary expression e.g., `{1: "a", **data}`
DictElements,
/// When parsing a list of elements in a tuple expression e.g., `(1, 2)`
TupleElements(Parenthesized),
/// When parsing a list of patterns in a match statement with an optional
/// parentheses, e.g., `case a, b: ...`, `case (a, b): ...`, `case [a, b]: ...`
SequenceMatchPattern(Option<SequenceMatchPatternParentheses>),
/// When parsing a mapping pattern in a match statement
MatchPatternMapping,
/// When parsing a list of arguments in a class pattern for the match statement
MatchPatternClassArguments,
/// When parsing a list of arguments in a function call or a class definition
Arguments,
/// When parsing a `del` statement
DeleteTargets,
/// When parsing a list of identifiers
Identifiers,
/// When parsing a list of parameters in a function definition which can be
/// either a function definition or a lambda expression.
Parameters(FunctionKind),
/// When parsing a list of items in a `with` statement
WithItems(WithItemKind),
/// When parsing a list of f-string or t-string elements which are either literal elements, expressions, or interpolations.
InterpolatedStringElements(InterpolatedStringElementsKind),
}
impl RecoveryContextKind {
/// Returns `true` if a trailing comma is allowed in the current context.
const fn allow_trailing_comma(self) -> bool {
matches!(
self,
RecoveryContextKind::Slices
| RecoveryContextKind::TupleElements(_)
| RecoveryContextKind::SetElements
| RecoveryContextKind::ListElements
| RecoveryContextKind::DictElements
| RecoveryContextKind::Arguments
| RecoveryContextKind::MatchPatternMapping
| RecoveryContextKind::SequenceMatchPattern(_)
| RecoveryContextKind::MatchPatternClassArguments
// Only allow a trailing comma if the with item itself is parenthesized
| RecoveryContextKind::WithItems(WithItemKind::Parenthesized)
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/parser/statement.rs | crates/ruff_python_parser/src/parser/statement.rs | use compact_str::CompactString;
use std::fmt::{Display, Write};
use ruff_python_ast::name::Name;
use ruff_python_ast::token::TokenKind;
use ruff_python_ast::{
self as ast, AtomicNodeIndex, ExceptHandler, Expr, ExprContext, IpyEscapeKind, Operator,
PythonVersion, Stmt, WithItem,
};
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::error::StarTupleKind;
use crate::parser::expression::{EXPR_SET, ParsedExpr};
use crate::parser::progress::ParserProgress;
use crate::parser::{
FunctionKind, Parser, RecoveryContext, RecoveryContextKind, WithItemKind, helpers,
};
use crate::token::TokenValue;
use crate::token_set::TokenSet;
use crate::{Mode, ParseErrorType, UnsupportedSyntaxErrorKind};
use super::Parenthesized;
use super::expression::ExpressionContext;
/// Tokens that represent compound statements.
const COMPOUND_STMT_SET: TokenSet = TokenSet::new([
TokenKind::Match,
TokenKind::If,
TokenKind::With,
TokenKind::While,
TokenKind::For,
TokenKind::Try,
TokenKind::Def,
TokenKind::Class,
TokenKind::Async,
TokenKind::At,
]);
/// Tokens that represent simple statements, but doesn't include expressions.
const SIMPLE_STMT_SET: TokenSet = TokenSet::new([
TokenKind::Pass,
TokenKind::Return,
TokenKind::Break,
TokenKind::Continue,
TokenKind::Global,
TokenKind::Nonlocal,
TokenKind::Assert,
TokenKind::Yield,
TokenKind::Del,
TokenKind::Raise,
TokenKind::Import,
TokenKind::From,
TokenKind::Type,
TokenKind::IpyEscapeCommand,
]);
/// Tokens that represent simple statements, including expressions.
const SIMPLE_STMT_WITH_EXPR_SET: TokenSet = SIMPLE_STMT_SET.union(EXPR_SET);
/// Tokens that represents all possible statements, including simple, compound,
/// and expression statements.
const STMTS_SET: TokenSet = SIMPLE_STMT_WITH_EXPR_SET.union(COMPOUND_STMT_SET);
/// Tokens that represent operators that can be used in augmented assignments.
const AUGMENTED_ASSIGN_SET: TokenSet = TokenSet::new([
TokenKind::PlusEqual,
TokenKind::MinusEqual,
TokenKind::StarEqual,
TokenKind::DoubleStarEqual,
TokenKind::SlashEqual,
TokenKind::DoubleSlashEqual,
TokenKind::PercentEqual,
TokenKind::AtEqual,
TokenKind::AmperEqual,
TokenKind::VbarEqual,
TokenKind::CircumflexEqual,
TokenKind::LeftShiftEqual,
TokenKind::RightShiftEqual,
]);
impl<'src> Parser<'src> {
/// Returns `true` if the current token is the start of a compound statement.
pub(super) fn at_compound_stmt(&self) -> bool {
self.at_ts(COMPOUND_STMT_SET)
}
/// Returns `true` if the current token is the start of a simple statement,
/// including expressions.
fn at_simple_stmt(&self) -> bool {
self.at_ts(SIMPLE_STMT_WITH_EXPR_SET) || self.at_soft_keyword()
}
/// Returns `true` if the current token is the start of a simple, compound or expression
/// statement.
pub(super) fn at_stmt(&self) -> bool {
self.at_ts(STMTS_SET) || self.at_soft_keyword()
}
/// Checks if the parser is currently positioned at the start of a type parameter.
pub(super) fn at_type_param(&self) -> bool {
let token = self.current_token_kind();
matches!(
token,
TokenKind::Star | TokenKind::DoubleStar | TokenKind::Name
) || token.is_keyword()
}
/// Parses a compound or a single simple statement.
///
/// See:
/// - <https://docs.python.org/3/reference/compound_stmts.html>
/// - <https://docs.python.org/3/reference/simple_stmts.html>
pub(super) fn parse_statement(&mut self) -> Stmt {
let start = self.node_start();
match self.current_token_kind() {
TokenKind::If => Stmt::If(self.parse_if_statement()),
TokenKind::For => Stmt::For(self.parse_for_statement(start)),
TokenKind::While => Stmt::While(self.parse_while_statement()),
TokenKind::Def => Stmt::FunctionDef(self.parse_function_definition(vec![], start)),
TokenKind::Class => Stmt::ClassDef(self.parse_class_definition(vec![], start)),
TokenKind::Try => Stmt::Try(self.parse_try_statement()),
TokenKind::With => Stmt::With(self.parse_with_statement(start)),
TokenKind::At => self.parse_decorators(),
TokenKind::Async => self.parse_async_statement(),
token => {
if token == TokenKind::Match {
// Match is considered a soft keyword, so we will treat it as an identifier if
// it's followed by an unexpected token.
match self.classify_match_token() {
MatchTokenKind::Keyword => {
return Stmt::Match(self.parse_match_statement());
}
MatchTokenKind::KeywordOrIdentifier => {
if let Some(match_stmt) = self.try_parse_match_statement() {
return Stmt::Match(match_stmt);
}
}
MatchTokenKind::Identifier => {}
}
}
self.parse_single_simple_statement()
}
}
}
/// Parses a single simple statement.
///
/// This statement must be terminated by a newline or semicolon.
///
/// Use [`Parser::parse_simple_statements`] to parse a sequence of simple statements.
fn parse_single_simple_statement(&mut self) -> Stmt {
let stmt = self.parse_simple_statement();
// The order of the token is important here.
let has_eaten_semicolon = self.eat(TokenKind::Semi);
let has_eaten_newline = self.eat(TokenKind::Newline);
if !has_eaten_newline {
if !has_eaten_semicolon && self.at_simple_stmt() {
// test_err simple_stmts_on_same_line
// a b
// a + b c + d
// break; continue pass; continue break
self.add_error(
ParseErrorType::SimpleStatementsOnSameLine,
self.current_token_range(),
);
} else if self.at_compound_stmt() {
// test_err simple_and_compound_stmt_on_same_line
// a; if b: pass; b
self.add_error(
ParseErrorType::SimpleAndCompoundStatementOnSameLine,
self.current_token_range(),
);
}
}
stmt
}
/// Parses a sequence of simple statements.
///
/// If there is more than one statement in this sequence, it is expected to be separated by a
/// semicolon. The sequence can optionally end with a semicolon, but regardless of whether
/// a semicolon is present or not, it is expected to end with a newline.
///
/// Matches the `simple_stmts` rule in the [Python grammar].
///
/// [Python grammar]: https://docs.python.org/3/reference/grammar.html
fn parse_simple_statements(&mut self) -> Vec<Stmt> {
let mut stmts = vec![];
let mut progress = ParserProgress::default();
loop {
progress.assert_progressing(self);
stmts.push(self.parse_simple_statement());
if !self.eat(TokenKind::Semi) {
if self.at_simple_stmt() {
// test_err simple_stmts_on_same_line_in_block
// if True: break; continue pass; continue break
self.add_error(
ParseErrorType::SimpleStatementsOnSameLine,
self.current_token_range(),
);
} else {
// test_ok simple_stmts_in_block
// if True: pass
// if True: pass;
// if True: pass; continue
// if True: pass; continue;
// x = 1
break;
}
}
if !self.at_simple_stmt() {
break;
}
}
// Ideally, we should use `expect` here but we use `eat` for better error message. Later,
// if the parser isn't at the start of a compound statement, we'd `expect` a newline.
if !self.eat(TokenKind::Newline) {
if self.at_compound_stmt() {
// test_err simple_and_compound_stmt_on_same_line_in_block
// if True: pass if False: pass
// if True: pass; if False: pass
self.add_error(
ParseErrorType::SimpleAndCompoundStatementOnSameLine,
self.current_token_range(),
);
} else {
// test_err multiple_clauses_on_same_line
// if True: pass elif False: pass else: pass
// if True: pass; elif False: pass; else: pass
// for x in iter: break else: pass
// for x in iter: break; else: pass
// try: pass except exc: pass else: pass finally: pass
// try: pass; except exc: pass; else: pass; finally: pass
self.add_error(
ParseErrorType::ExpectedToken {
found: self.current_token_kind(),
expected: TokenKind::Newline,
},
self.current_token_range(),
);
}
}
// test_ok simple_stmts_with_semicolons
// return; import a; from x import y; z; type T = int
stmts
}
/// Parses a simple statement.
///
/// See: <https://docs.python.org/3/reference/simple_stmts.html>
fn parse_simple_statement(&mut self) -> Stmt {
match self.current_token_kind() {
TokenKind::Return => Stmt::Return(self.parse_return_statement()),
TokenKind::Import => Stmt::Import(self.parse_import_statement()),
TokenKind::From => Stmt::ImportFrom(self.parse_from_import_statement()),
TokenKind::Pass => Stmt::Pass(self.parse_pass_statement()),
TokenKind::Continue => Stmt::Continue(self.parse_continue_statement()),
TokenKind::Break => Stmt::Break(self.parse_break_statement()),
TokenKind::Raise => Stmt::Raise(self.parse_raise_statement()),
TokenKind::Del => Stmt::Delete(self.parse_delete_statement()),
TokenKind::Assert => Stmt::Assert(self.parse_assert_statement()),
TokenKind::Global => Stmt::Global(self.parse_global_statement()),
TokenKind::Nonlocal => Stmt::Nonlocal(self.parse_nonlocal_statement()),
TokenKind::IpyEscapeCommand => {
Stmt::IpyEscapeCommand(self.parse_ipython_escape_command_statement())
}
token => {
if token == TokenKind::Type {
// Type is considered a soft keyword, so we will treat it as an identifier if
// it's followed by an unexpected token.
let (first, second) = self.peek2();
if (first == TokenKind::Name || first.is_soft_keyword())
&& matches!(second, TokenKind::Lsqb | TokenKind::Equal)
{
return Stmt::TypeAlias(self.parse_type_alias_statement());
}
}
let start = self.node_start();
// simple_stmt: `... | yield_stmt | star_expressions | ...`
let parsed_expr =
self.parse_expression_list(ExpressionContext::yield_or_starred_bitwise_or());
if self.at(TokenKind::Equal) {
Stmt::Assign(self.parse_assign_statement(parsed_expr, start))
} else if self.at(TokenKind::Colon) {
Stmt::AnnAssign(self.parse_annotated_assignment_statement(parsed_expr, start))
} else if let Some(op) = self.current_token_kind().as_augmented_assign_operator() {
Stmt::AugAssign(self.parse_augmented_assignment_statement(
parsed_expr,
op,
start,
))
} else if self.options.mode == Mode::Ipython && self.at(TokenKind::Question) {
Stmt::IpyEscapeCommand(
self.parse_ipython_help_end_escape_command_statement(&parsed_expr),
)
} else {
Stmt::Expr(ast::StmtExpr {
range: self.node_range(start),
value: Box::new(parsed_expr.expr),
node_index: AtomicNodeIndex::NONE,
})
}
}
}
}
/// Parses a delete statement.
///
/// # Panics
///
/// If the parser isn't positioned at a `del` token.
///
/// See: <https://docs.python.org/3/reference/simple_stmts.html#grammar-token-python-grammar-del_stmt>
fn parse_delete_statement(&mut self) -> ast::StmtDelete {
let start = self.node_start();
self.bump(TokenKind::Del);
// test_err del_incomplete_target
// del x, y.
// z
// del x, y[
// z
let targets = self.parse_comma_separated_list_into_vec(
RecoveryContextKind::DeleteTargets,
|parser| {
// Allow starred expression to raise a better error message for
// an invalid delete target later.
let mut target = parser.parse_conditional_expression_or_higher_impl(
ExpressionContext::starred_conditional(),
);
helpers::set_expr_ctx(&mut target.expr, ExprContext::Del);
// test_err invalid_del_target
// del x + 1
// del {'x': 1}
// del {'x', 'y'}
// del None, True, False, 1, 1.0, "abc"
parser.validate_delete_target(&target.expr);
target.expr
},
);
if targets.is_empty() {
// test_err del_stmt_empty
// del
self.add_error(
ParseErrorType::EmptyDeleteTargets,
self.current_token_range(),
);
}
ast::StmtDelete {
targets,
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses a `return` statement.
///
/// # Panics
///
/// If the parser isn't positioned at a `return` token.
///
/// See: <https://docs.python.org/3/reference/simple_stmts.html#grammar-token-python-grammar-return_stmt>
fn parse_return_statement(&mut self) -> ast::StmtReturn {
let start = self.node_start();
self.bump(TokenKind::Return);
// test_err return_stmt_invalid_expr
// return *
// return yield x
// return yield from x
// return x := 1
// return *x and y
let value = self.at_expr().then(|| {
let parsed_expr = self.parse_expression_list(ExpressionContext::starred_bitwise_or());
// test_ok iter_unpack_return_py37
// # parse_options: {"target-version": "3.7"}
// rest = (4, 5, 6)
// def f(): return (1, 2, 3, *rest)
// test_ok iter_unpack_return_py38
// # parse_options: {"target-version": "3.8"}
// rest = (4, 5, 6)
// def f(): return 1, 2, 3, *rest
// test_err iter_unpack_return_py37
// # parse_options: {"target-version": "3.7"}
// rest = (4, 5, 6)
// def f(): return 1, 2, 3, *rest
self.check_tuple_unpacking(
&parsed_expr,
UnsupportedSyntaxErrorKind::StarTuple(StarTupleKind::Return),
);
Box::new(parsed_expr.expr)
});
ast::StmtReturn {
range: self.node_range(start),
value,
node_index: AtomicNodeIndex::NONE,
}
}
/// Report [`UnsupportedSyntaxError`]s for each starred element in `expr` if it is an
/// unparenthesized tuple.
///
/// This method can be used to check for tuple unpacking in `return`, `yield`, and `for`
/// statements, which are only allowed after [Python 3.8] and [Python 3.9], respectively.
///
/// [Python 3.8]: https://github.com/python/cpython/issues/76298
/// [Python 3.9]: https://github.com/python/cpython/issues/90881
pub(super) fn check_tuple_unpacking(&mut self, expr: &Expr, kind: UnsupportedSyntaxErrorKind) {
if kind.is_supported(self.options.target_version) {
return;
}
let Expr::Tuple(ast::ExprTuple {
elts,
parenthesized: false,
..
}) = expr
else {
return;
};
for elt in elts {
if elt.is_starred_expr() {
self.add_unsupported_syntax_error(kind, elt.range());
}
}
}
/// Parses a `raise` statement.
///
/// # Panics
///
/// If the parser isn't positioned at a `raise` token.
///
/// See: <https://docs.python.org/3/reference/simple_stmts.html#grammar-token-python-grammar-raise_stmt>
fn parse_raise_statement(&mut self) -> ast::StmtRaise {
let start = self.node_start();
self.bump(TokenKind::Raise);
let exc = match self.current_token_kind() {
TokenKind::Newline => None,
TokenKind::From => {
// test_err raise_stmt_from_without_exc
// raise from exc
// raise from None
self.add_error(
ParseErrorType::OtherError(
"Exception missing in `raise` statement with cause".to_string(),
),
self.current_token_range(),
);
None
}
_ => {
// test_err raise_stmt_invalid_exc
// raise *x
// raise yield x
// raise x := 1
let exc = self.parse_expression_list(ExpressionContext::default());
if let Some(ast::ExprTuple {
parenthesized: false,
..
}) = exc.as_tuple_expr()
{
// test_err raise_stmt_unparenthesized_tuple_exc
// raise x,
// raise x, y
// raise x, y from z
self.add_error(ParseErrorType::UnparenthesizedTupleExpression, &exc);
}
Some(Box::new(exc.expr))
}
};
let cause = self.eat(TokenKind::From).then(|| {
// test_err raise_stmt_invalid_cause
// raise x from *y
// raise x from yield y
// raise x from y := 1
let cause = self.parse_expression_list(ExpressionContext::default());
if let Some(ast::ExprTuple {
parenthesized: false,
..
}) = cause.as_tuple_expr()
{
// test_err raise_stmt_unparenthesized_tuple_cause
// raise x from y,
// raise x from y, z
self.add_error(ParseErrorType::UnparenthesizedTupleExpression, &cause);
}
Box::new(cause.expr)
});
ast::StmtRaise {
range: self.node_range(start),
exc,
cause,
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses an import statement.
///
/// # Panics
///
/// If the parser isn't positioned at an `import` token.
///
/// See: <https://docs.python.org/3/reference/simple_stmts.html#the-import-statement>
fn parse_import_statement(&mut self) -> ast::StmtImport {
let start = self.node_start();
self.bump(TokenKind::Import);
// test_err import_stmt_parenthesized_names
// import (a)
// import (a, b)
// test_err import_stmt_star_import
// import *
// import x, *, y
// test_err import_stmt_trailing_comma
// import ,
// import x, y,
let names = self
.parse_comma_separated_list_into_vec(RecoveryContextKind::ImportNames, |p| {
p.parse_alias(ImportStyle::Import)
});
if names.is_empty() {
// test_err import_stmt_empty
// import
self.add_error(ParseErrorType::EmptyImportNames, self.current_token_range());
}
ast::StmtImport {
range: self.node_range(start),
names,
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses a `from` import statement.
///
/// # Panics
///
/// If the parser isn't positioned at a `from` token.
///
/// See: <https://docs.python.org/3/reference/simple_stmts.html#grammar-token-python-grammar-import_stmt>
fn parse_from_import_statement(&mut self) -> ast::StmtImportFrom {
let start = self.node_start();
self.bump(TokenKind::From);
let mut leading_dots = 0;
let mut progress = ParserProgress::default();
loop {
progress.assert_progressing(self);
if self.eat(TokenKind::Dot) {
leading_dots += 1;
} else if self.eat(TokenKind::Ellipsis) {
leading_dots += 3;
} else {
break;
}
}
let module = if self.at_name_or_soft_keyword() {
// test_ok from_import_soft_keyword_module_name
// from match import pattern
// from type import bar
// from case import pattern
// from match.type.case import foo
Some(self.parse_dotted_name())
} else {
if leading_dots == 0 {
// test_err from_import_missing_module
// from
// from import x
self.add_error(
ParseErrorType::OtherError("Expected a module name".to_string()),
self.current_token_range(),
);
}
None
};
// test_ok from_import_no_space
// from.import x
// from...import x
self.expect(TokenKind::Import);
let names_start = self.node_start();
let mut names = vec![];
let mut seen_star_import = false;
let parenthesized = Parenthesized::from(self.eat(TokenKind::Lpar));
// test_err from_import_unparenthesized_trailing_comma
// from a import b,
// from a import b as c,
// from a import b, c,
self.parse_comma_separated_list(
RecoveryContextKind::ImportFromAsNames(parenthesized),
|parser| {
// test_err from_import_dotted_names
// from x import a.
// from x import a.b
// from x import a, b.c, d, e.f, g
let alias = parser.parse_alias(ImportStyle::ImportFrom);
seen_star_import |= alias.name.id == "*";
names.push(alias);
},
);
if names.is_empty() {
// test_err from_import_empty_names
// from x import
// from x import ()
// from x import ,,
self.add_error(ParseErrorType::EmptyImportNames, self.current_token_range());
}
if seen_star_import && names.len() > 1 {
// test_err from_import_star_with_other_names
// from x import *, a
// from x import a, *, b
// from x import *, a as b
// from x import *, *, a
self.add_error(
ParseErrorType::OtherError("Star import must be the only import".to_string()),
self.node_range(names_start),
);
}
if parenthesized.is_yes() {
// test_err from_import_missing_rpar
// from x import (a, b
// 1 + 1
// from x import (a, b,
// 2 + 2
self.expect(TokenKind::Rpar);
}
ast::StmtImportFrom {
module,
names,
level: leading_dots,
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses an `import` or `from` import name.
///
/// See:
/// - <https://docs.python.org/3/reference/simple_stmts.html#the-import-statement>
/// - <https://docs.python.org/3/library/ast.html#ast.alias>
fn parse_alias(&mut self, style: ImportStyle) -> ast::Alias {
let start = self.node_start();
if self.eat(TokenKind::Star) {
let range = self.node_range(start);
return ast::Alias {
name: ast::Identifier {
id: Name::new_static("*"),
range,
node_index: AtomicNodeIndex::NONE,
},
asname: None,
range,
node_index: AtomicNodeIndex::NONE,
};
}
let name = match style {
ImportStyle::Import => self.parse_dotted_name(),
ImportStyle::ImportFrom => self.parse_identifier(),
};
let asname = if self.eat(TokenKind::As) {
if self.at_name_or_soft_keyword() {
// test_ok import_as_name_soft_keyword
// import foo as match
// import bar as case
// import baz as type
Some(self.parse_identifier())
} else {
// test_err import_alias_missing_asname
// import x as
self.add_error(
ParseErrorType::OtherError("Expected symbol after `as`".to_string()),
self.current_token_range(),
);
None
}
} else {
None
};
ast::Alias {
range: self.node_range(start),
name,
asname,
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses a dotted name.
///
/// A dotted name is a sequence of identifiers separated by a single dot.
fn parse_dotted_name(&mut self) -> ast::Identifier {
let start = self.node_start();
let mut dotted_name: CompactString = self.parse_identifier().id.into();
let mut progress = ParserProgress::default();
while self.eat(TokenKind::Dot) {
progress.assert_progressing(self);
// test_err dotted_name_multiple_dots
// import a..b
// import a...b
dotted_name.push('.');
dotted_name.push_str(&self.parse_identifier());
}
// test_ok dotted_name_normalized_spaces
// import a.b.c
// import a . b . c
ast::Identifier {
id: Name::from(dotted_name),
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses a `pass` statement.
///
/// # Panics
///
/// If the parser isn't positioned at a `pass` token.
///
/// See: <https://docs.python.org/3/reference/simple_stmts.html#grammar-token-python-grammar-pass_stmt>
fn parse_pass_statement(&mut self) -> ast::StmtPass {
let start = self.node_start();
self.bump(TokenKind::Pass);
ast::StmtPass {
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses a `continue` statement.
///
/// # Panics
///
/// If the parser isn't positioned at a `continue` token.
///
/// See: <https://docs.python.org/3/reference/simple_stmts.html#grammar-token-python-grammar-continue_stmt>
fn parse_continue_statement(&mut self) -> ast::StmtContinue {
let start = self.node_start();
self.bump(TokenKind::Continue);
ast::StmtContinue {
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses a `break` statement.
///
/// # Panics
///
/// If the parser isn't positioned at a `break` token.
///
/// See: <https://docs.python.org/3/reference/simple_stmts.html#grammar-token-python-grammar-break_stmt>
fn parse_break_statement(&mut self) -> ast::StmtBreak {
let start = self.node_start();
self.bump(TokenKind::Break);
ast::StmtBreak {
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses an `assert` statement.
///
/// # Panics
///
/// If the parser isn't positioned at an `assert` token.
///
/// See: <https://docs.python.org/3/reference/simple_stmts.html#the-assert-statement>
fn parse_assert_statement(&mut self) -> ast::StmtAssert {
let start = self.node_start();
self.bump(TokenKind::Assert);
// test_err assert_empty_test
// assert
// test_err assert_invalid_test_expr
// assert *x
// assert assert x
// assert yield x
// assert x := 1
let test = self.parse_conditional_expression_or_higher();
let msg = if self.eat(TokenKind::Comma) {
if self.at_expr() {
// test_err assert_invalid_msg_expr
// assert False, *x
// assert False, assert x
// assert False, yield x
// assert False, x := 1
Some(Box::new(self.parse_conditional_expression_or_higher().expr))
} else {
// test_err assert_empty_msg
// assert x,
self.add_error(
ParseErrorType::ExpectedExpression,
self.current_token_range(),
);
None
}
} else {
None
};
ast::StmtAssert {
test: Box::new(test.expr),
msg,
range: self.node_range(start),
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses a global statement.
///
/// # Panics
///
/// If the parser isn't positioned at a `global` token.
///
/// See: <https://docs.python.org/3/reference/simple_stmts.html#grammar-token-python-grammar-global_stmt>
fn parse_global_statement(&mut self) -> ast::StmtGlobal {
let start = self.node_start();
self.bump(TokenKind::Global);
// test_err global_stmt_trailing_comma
// global ,
// global x,
// global x, y,
// test_err global_stmt_expression
// global x + 1
let names = self.parse_comma_separated_list_into_vec(
RecoveryContextKind::Identifiers,
Parser::parse_identifier,
);
if names.is_empty() {
// test_err global_stmt_empty
// global
self.add_error(ParseErrorType::EmptyGlobalNames, self.current_token_range());
}
// test_ok global_stmt
// global x
// global x, y, z
ast::StmtGlobal {
range: self.node_range(start),
names,
node_index: AtomicNodeIndex::NONE,
}
}
/// Parses a nonlocal statement.
///
/// # Panics
///
/// If the parser isn't positioned at a `nonlocal` token.
///
/// See: <https://docs.python.org/3/reference/simple_stmts.html#grammar-token-python-grammar-nonlocal_stmt>
fn parse_nonlocal_statement(&mut self) -> ast::StmtNonlocal {
let start = self.node_start();
self.bump(TokenKind::Nonlocal);
// test_err nonlocal_stmt_trailing_comma
// def _():
// nonlocal ,
// nonlocal x,
// nonlocal x, y,
// test_err nonlocal_stmt_expression
// def _():
// nonlocal x + 1
let names = self.parse_comma_separated_list_into_vec(
RecoveryContextKind::Identifiers,
Parser::parse_identifier,
);
if names.is_empty() {
// test_err nonlocal_stmt_empty
// def _():
// nonlocal
self.add_error(
ParseErrorType::EmptyNonlocalNames,
self.current_token_range(),
);
}
// test_ok nonlocal_stmt
// def _():
// nonlocal x
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | true |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/parser/recovery.rs | crates/ruff_python_parser/src/parser/recovery.rs | use ruff_python_ast::name::Name;
use ruff_python_ast::{self as ast, Expr, ExprContext, Pattern};
use ruff_text_size::{Ranged, TextLen, TextRange};
/// Convert the given [`Pattern`] to an [`Expr`].
///
/// This is used to convert an invalid use of pattern to their equivalent expression
/// to preserve the structure of the pattern.
///
/// The conversion is done as follows:
/// - `PatternMatchSingleton`: Boolean and None literals
/// - `PatternMatchValue`: The value itself
/// - `PatternMatchSequence`: List literal
/// - `PatternMatchMapping`: Dictionary literal
/// - `PatternMatchClass`: Call expression
/// - `PatternMatchStar`: Starred expression
/// - `PatternMatchAs`: The pattern itself or the name
/// - `PatternMatchOr`: Binary expression with `|` operator
///
/// Note that the sequence pattern is always converted to a list literal even
/// if it was surrounded by parentheses.
///
/// # Note
///
/// This function returns an invalid [`ast::ExprName`] if the given pattern is a [`Pattern::MatchAs`]
/// with both the pattern and name present. This is because it cannot be converted to an expression
/// without dropping one of them as there's no way to represent `x as y` as a valid expression.
pub(super) fn pattern_to_expr(pattern: Pattern) -> Expr {
match pattern {
Pattern::MatchSingleton(ast::PatternMatchSingleton {
range,
node_index,
value,
}) => match value {
ast::Singleton::True => Expr::BooleanLiteral(ast::ExprBooleanLiteral {
value: true,
range,
node_index,
}),
ast::Singleton::False => Expr::BooleanLiteral(ast::ExprBooleanLiteral {
value: false,
range,
node_index,
}),
ast::Singleton::None => Expr::NoneLiteral(ast::ExprNoneLiteral { range, node_index }),
},
Pattern::MatchValue(ast::PatternMatchValue { value, .. }) => *value,
// We don't know which kind of sequence this is: `case [1, 2]:` or `case (1, 2):`.
Pattern::MatchSequence(ast::PatternMatchSequence {
range,
node_index,
patterns,
}) => Expr::List(ast::ExprList {
elts: patterns.into_iter().map(pattern_to_expr).collect(),
ctx: ExprContext::Store,
range,
node_index,
}),
Pattern::MatchMapping(ast::PatternMatchMapping {
range,
node_index,
keys,
patterns,
rest,
}) => {
let mut items: Vec<ast::DictItem> = keys
.into_iter()
.zip(patterns)
.map(|(key, pattern)| ast::DictItem {
key: Some(key),
value: pattern_to_expr(pattern),
})
.collect();
if let Some(rest) = rest {
let value = Expr::Name(ast::ExprName {
range: rest.range,
node_index: node_index.clone(),
id: rest.id,
ctx: ExprContext::Store,
});
items.push(ast::DictItem { key: None, value });
}
Expr::Dict(ast::ExprDict {
range,
node_index,
items,
})
}
Pattern::MatchClass(ast::PatternMatchClass {
range,
node_index,
cls,
arguments,
}) => Expr::Call(ast::ExprCall {
range,
node_index: node_index.clone(),
func: cls,
arguments: ast::Arguments {
range: arguments.range,
node_index: node_index.clone(),
args: arguments
.patterns
.into_iter()
.map(pattern_to_expr)
.collect(),
keywords: arguments
.keywords
.into_iter()
.map(|keyword_pattern| ast::Keyword {
range: keyword_pattern.range,
node_index: node_index.clone(),
arg: Some(keyword_pattern.attr),
value: pattern_to_expr(keyword_pattern.pattern),
})
.collect(),
},
}),
Pattern::MatchStar(ast::PatternMatchStar {
range,
node_index,
name,
}) => {
if let Some(name) = name {
Expr::Starred(ast::ExprStarred {
range,
node_index: node_index.clone(),
value: Box::new(Expr::Name(ast::ExprName {
range: name.range,
node_index,
id: name.id,
ctx: ExprContext::Store,
})),
ctx: ExprContext::Store,
})
} else {
Expr::Starred(ast::ExprStarred {
range,
node_index: node_index.clone(),
value: Box::new(Expr::Name(ast::ExprName {
range: TextRange::new(range.end() - "_".text_len(), range.end()),
id: Name::new_static("_"),
ctx: ExprContext::Store,
node_index,
})),
ctx: ExprContext::Store,
})
}
}
Pattern::MatchAs(ast::PatternMatchAs {
range,
node_index,
pattern,
name,
}) => match (pattern, name) {
(Some(_), Some(_)) => Expr::Name(ast::ExprName {
range,
node_index,
id: Name::empty(),
ctx: ExprContext::Invalid,
}),
(Some(pattern), None) => pattern_to_expr(*pattern),
(None, Some(name)) => Expr::Name(ast::ExprName {
range: name.range,
node_index,
id: name.id,
ctx: ExprContext::Store,
}),
(None, None) => Expr::Name(ast::ExprName {
range,
node_index,
id: Name::new_static("_"),
ctx: ExprContext::Store,
}),
},
Pattern::MatchOr(ast::PatternMatchOr {
patterns,
node_index,
..
}) => {
let to_bin_expr = |left: Pattern, right: Pattern| ast::ExprBinOp {
range: TextRange::new(left.start(), right.end()),
left: Box::new(pattern_to_expr(left)),
op: ast::Operator::BitOr,
right: Box::new(pattern_to_expr(right)),
node_index: node_index.clone(),
};
let mut iter = patterns.into_iter();
match (iter.next(), iter.next()) {
(Some(left), Some(right)) => {
Expr::BinOp(iter.fold(to_bin_expr(left, right), |expr_bin_op, pattern| {
ast::ExprBinOp {
range: TextRange::new(expr_bin_op.start(), pattern.end()),
left: Box::new(Expr::BinOp(expr_bin_op)),
op: ast::Operator::BitOr,
right: Box::new(pattern_to_expr(pattern)),
node_index: node_index.clone(),
}
}))
}
_ => unreachable!("Or patterns can only be formed with at least two patterns."),
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/lexer/cursor.rs | crates/ruff_python_parser/src/lexer/cursor.rs | use std::str::Chars;
use ruff_text_size::{TextLen, TextSize};
pub(crate) const EOF_CHAR: char = '\0';
/// A cursor represents a pointer in the source code.
///
/// Based on [`rustc`'s `Cursor`](https://github.com/rust-lang/rust/blob/d1b7355d3d7b4ead564dbecb1d240fcc74fff21b/compiler/rustc_lexer/src/cursor.rs)
#[derive(Clone, Debug)]
pub(super) struct Cursor<'src> {
/// An iterator over the [`char`]'s of the source code.
chars: Chars<'src>,
/// Length of the source code. This is used as a marker to indicate the start of the current
/// token which is being lexed.
source_length: TextSize,
/// Stores the previous character for debug assertions.
#[cfg(debug_assertions)]
prev_char: char,
}
impl<'src> Cursor<'src> {
pub(crate) fn new(source: &'src str) -> Self {
Self {
source_length: source.text_len(),
chars: source.chars(),
#[cfg(debug_assertions)]
prev_char: EOF_CHAR,
}
}
/// Returns the previous character. Useful for debug assertions.
#[cfg(debug_assertions)]
pub(super) const fn previous(&self) -> char {
self.prev_char
}
/// Peeks the next character from the input stream without consuming it.
/// Returns [`EOF_CHAR`] if the position is past the end of the file.
pub(super) fn first(&self) -> char {
self.chars.clone().next().unwrap_or(EOF_CHAR)
}
/// Peeks the second character from the input stream without consuming it.
/// Returns [`EOF_CHAR`] if the position is past the end of the file.
pub(super) fn second(&self) -> char {
let mut chars = self.chars.clone();
chars.next();
chars.next().unwrap_or(EOF_CHAR)
}
/// Returns the remaining text to lex.
///
/// Use [`Cursor::text_len`] to get the length of the remaining text.
pub(super) fn rest(&self) -> &'src str {
self.chars.as_str()
}
/// Returns the length of the remaining text.
///
/// Use [`Cursor::rest`] to get the remaining text.
// SAFETY: The `source.text_len` call in `new` would panic if the string length is larger than a `u32`.
#[expect(clippy::cast_possible_truncation)]
pub(super) fn text_len(&self) -> TextSize {
TextSize::new(self.chars.as_str().len() as u32)
}
/// Returns the length of the current token length.
///
/// This is to be used after setting the start position of the token using
/// [`Cursor::start_token`].
pub(super) fn token_len(&self) -> TextSize {
self.source_length - self.text_len()
}
/// Mark the current position of the cursor as the start of the token which is going to be
/// lexed.
///
/// Use [`Cursor::token_len`] to get the length of the lexed token.
pub(super) fn start_token(&mut self) {
self.source_length = self.text_len();
}
/// Returns `true` if the cursor is at the end of file.
pub(super) fn is_eof(&self) -> bool {
self.chars.as_str().is_empty()
}
/// Moves the cursor to the next character, returning the previous character.
/// Returns [`None`] if there is no next character.
pub(super) fn bump(&mut self) -> Option<char> {
let prev = self.chars.next()?;
#[cfg(debug_assertions)]
{
self.prev_char = prev;
}
Some(prev)
}
pub(super) fn eat_char(&mut self, c: char) -> bool {
if self.first() == c {
self.bump();
true
} else {
false
}
}
pub(super) fn eat_char2(&mut self, c1: char, c2: char) -> bool {
let mut chars = self.chars.clone();
if chars.next() == Some(c1) && chars.next() == Some(c2) {
self.bump();
self.bump();
true
} else {
false
}
}
pub(super) fn eat_char3(&mut self, c1: char, c2: char, c3: char) -> bool {
let mut chars = self.chars.clone();
if chars.next() == Some(c1) && chars.next() == Some(c2) && chars.next() == Some(c3) {
self.bump();
self.bump();
self.bump();
true
} else {
false
}
}
pub(super) fn eat_if<F>(&mut self, mut predicate: F) -> Option<char>
where
F: FnMut(char) -> bool,
{
if predicate(self.first()) && !self.is_eof() {
self.bump()
} else {
None
}
}
/// Eats symbols while predicate returns true or until the end of file is reached.
#[inline]
pub(super) fn eat_while(&mut self, mut predicate: impl FnMut(char) -> bool) {
// It was tried making optimized version of this for eg. line comments, but
// LLVM can inline all of this and compile it down to fast iteration over bytes.
while predicate(self.first()) && !self.is_eof() {
self.bump();
}
}
/// Skips the next `count` bytes.
///
/// ## Panics
/// - If `count` is larger than the remaining bytes in the input stream.
/// - If `count` indexes into a multi-byte character.
pub(super) fn skip_bytes(&mut self, count: usize) {
#[cfg(debug_assertions)]
{
self.prev_char = self.chars.as_str()[..count]
.chars()
.next_back()
.unwrap_or('\0');
}
self.chars = self.chars.as_str()[count..].chars();
}
/// Skips to the end of the input stream.
pub(super) fn skip_to_end(&mut self) {
self.chars = "".chars();
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/lexer/interpolated_string.rs | crates/ruff_python_parser/src/lexer/interpolated_string.rs | use ruff_python_ast::StringFlags;
use crate::string::InterpolatedStringKind;
use super::TokenFlags;
/// The context representing the current f-string or t-string that the lexer is in.
#[derive(Clone, Debug)]
pub(crate) struct InterpolatedStringContext {
flags: TokenFlags,
/// The level of nesting for the lexer when it entered the current f/t-string.
/// The nesting level includes all kinds of parentheses i.e., round, square,
/// and curly.
nesting: u32,
/// The current depth of format spec for the current f/t-string. This is because
/// there can be multiple format specs nested for the same f-string.
/// For example, `{a:{b:{c}}}` has 3 format specs.
format_spec_depth: u32,
}
impl InterpolatedStringContext {
pub(crate) const fn new(flags: TokenFlags, nesting: u32) -> Option<Self> {
if flags.is_interpolated_string() {
Some(Self {
flags,
nesting,
format_spec_depth: 0,
})
} else {
None
}
}
pub(crate) fn kind(&self) -> InterpolatedStringKind {
if self.flags.is_f_string() {
InterpolatedStringKind::FString
} else if self.flags.is_t_string() {
InterpolatedStringKind::TString
} else {
unreachable!("Can only be constructed when f-string or t-string flag is present")
}
}
pub(crate) const fn flags(&self) -> TokenFlags {
self.flags
}
pub(crate) const fn nesting(&self) -> u32 {
self.nesting
}
/// Returns the quote character for the current f-string.
pub(crate) fn quote_char(&self) -> char {
self.flags.quote_style().as_char()
}
/// Returns the triple quotes for the current f-string if it is a triple-quoted
/// f-string, `None` otherwise.
pub(crate) fn triple_quotes(&self) -> Option<&'static str> {
if self.is_triple_quoted() {
Some(self.flags.quote_str())
} else {
None
}
}
/// Returns `true` if the current f-string is a raw f-string.
pub(crate) fn is_raw_string(&self) -> bool {
self.flags.is_raw_string()
}
/// Returns `true` if the current f-string is a triple-quoted f-string.
pub(crate) fn is_triple_quoted(&self) -> bool {
self.flags.is_triple_quoted()
}
/// Calculates the number of open parentheses for the current f-string
/// based on the current level of nesting for the lexer.
const fn open_parentheses_count(&self, current_nesting: u32) -> u32 {
current_nesting.saturating_sub(self.nesting)
}
/// Returns `true` if the lexer is in an f-string expression or t-string interpolation i.e., between
/// two curly braces.
pub(crate) const fn is_in_interpolation(&self, current_nesting: u32) -> bool {
self.open_parentheses_count(current_nesting) > self.format_spec_depth
}
/// Returns `true` if the lexer is in a f-string format spec i.e., after a colon.
pub(crate) const fn is_in_format_spec(&self, current_nesting: u32) -> bool {
self.format_spec_depth > 0 && !self.is_in_interpolation(current_nesting)
}
/// Returns `true` if the context is in a valid position to start format spec
/// i.e., at the same level of nesting as the opening parentheses token.
/// Increments the format spec depth if it is.
///
/// This assumes that the current character for the lexer is a colon (`:`).
pub(crate) fn try_start_format_spec(&mut self, current_nesting: u32) -> bool {
if self
.open_parentheses_count(current_nesting)
.saturating_sub(self.format_spec_depth)
== 1
{
self.format_spec_depth += 1;
true
} else {
false
}
}
/// Decrements the format spec depth if the current f-string is in a format
/// spec.
pub(crate) fn try_end_format_spec(&mut self, current_nesting: u32) {
if self.is_in_format_spec(current_nesting) {
self.format_spec_depth = self.format_spec_depth.saturating_sub(1);
}
}
}
/// The interpolated strings stack is used to keep track of all the f-strings and t-strings that the
/// lexer encounters. This is necessary because f-strings and t-strings can be nested.
#[derive(Debug, Default)]
pub(crate) struct InterpolatedStrings {
stack: Vec<InterpolatedStringContext>,
}
impl InterpolatedStrings {
pub(crate) fn push(&mut self, context: InterpolatedStringContext) {
self.stack.push(context);
}
pub(crate) fn pop(&mut self) -> Option<InterpolatedStringContext> {
self.stack.pop()
}
pub(crate) fn current(&self) -> Option<&InterpolatedStringContext> {
self.stack.last()
}
pub(crate) fn current_mut(&mut self) -> Option<&mut InterpolatedStringContext> {
self.stack.last_mut()
}
pub(crate) fn checkpoint(&self) -> InterpolatedStringsCheckpoint {
InterpolatedStringsCheckpoint(self.stack.clone())
}
pub(crate) fn rewind(&mut self, checkpoint: InterpolatedStringsCheckpoint) {
self.stack = checkpoint.0;
}
}
#[derive(Debug, Clone)]
pub(crate) struct InterpolatedStringsCheckpoint(Vec<InterpolatedStringContext>);
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/src/lexer/indentation.rs | crates/ruff_python_parser/src/lexer/indentation.rs | use static_assertions::assert_eq_size;
use std::cmp::Ordering;
use std::fmt::Debug;
/// The column index of an indentation.
///
/// A space increments the column by one. A tab adds up to 2 (if tab size is 2) indices, but just one
/// if the column isn't even.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Default)]
pub(super) struct Column(u32);
impl Column {
pub(super) const fn new(column: u32) -> Self {
Self(column)
}
}
/// The number of characters in an indentation. Each character accounts for 1.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Default)]
pub(super) struct Character(u32);
impl Character {
pub(super) const fn new(characters: u32) -> Self {
Self(characters)
}
}
/// The [Indentation](https://docs.python.org/3/reference/lexical_analysis.html#indentation) of a logical line.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
pub(super) struct Indentation {
column: Column,
character: Character,
}
impl Indentation {
const TAB_SIZE: u32 = 2;
pub(super) const fn root() -> Self {
Self {
column: Column::new(0),
character: Character::new(0),
}
}
#[cfg(test)]
pub(super) const fn new(column: Column, character: Character) -> Self {
Self { column, character }
}
#[must_use]
pub(super) fn add_space(self) -> Self {
Self {
character: Character(self.character.0 + 1),
column: Column(self.column.0 + 1),
}
}
#[must_use]
pub(super) fn add_tab(self) -> Self {
Self {
character: Character(self.character.0 + 1),
// Compute the column index:
// * Adds `TAB_SIZE` if `column` is a multiple of `TAB_SIZE`
// * Rounds `column` up to the next multiple of `TAB_SIZE` otherwise.
// https://github.com/python/cpython/blob/2cf99026d6320f38937257da1ab014fc873a11a6/Parser/tokenizer.c#L1818
column: Column((self.column.0 / Self::TAB_SIZE + 1) * Self::TAB_SIZE),
}
}
pub(super) fn try_compare(self, other: Indentation) -> Result<Ordering, UnexpectedIndentation> {
let column_ordering = self.column.cmp(&other.column);
let character_ordering = self.character.cmp(&other.character);
if column_ordering == character_ordering {
Ok(column_ordering)
} else {
Err(UnexpectedIndentation)
}
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub(super) struct UnexpectedIndentation;
/// The indentations stack is used to keep track of the current indentation level
/// [See Indentation](docs.python.org/3/reference/lexical_analysis.html#indentation).
#[derive(Debug, Clone, Default)]
pub(super) struct Indentations {
stack: Vec<Indentation>,
}
impl Indentations {
pub(super) fn indent(&mut self, indent: Indentation) {
debug_assert_eq!(self.current().try_compare(indent), Ok(Ordering::Less));
self.stack.push(indent);
}
/// Dedent one level to eventually reach `new_indentation`.
///
/// Returns `Err` if the `new_indentation` is greater than the new current indentation level.
pub(super) fn dedent_one(
&mut self,
new_indentation: Indentation,
) -> Result<Option<Indentation>, UnexpectedIndentation> {
let previous = self.dedent();
match new_indentation.try_compare(*self.current())? {
Ordering::Less | Ordering::Equal => Ok(previous),
// ```python
// if True:
// pass
// pass <- The indentation is greater than the expected indent of 0.
// ```
Ordering::Greater => Err(UnexpectedIndentation),
}
}
pub(super) fn dedent(&mut self) -> Option<Indentation> {
self.stack.pop()
}
pub(super) fn current(&self) -> &Indentation {
static ROOT: Indentation = Indentation::root();
self.stack.last().unwrap_or(&ROOT)
}
pub(crate) fn checkpoint(&self) -> IndentationsCheckpoint {
IndentationsCheckpoint(self.stack.clone())
}
pub(crate) fn rewind(&mut self, checkpoint: IndentationsCheckpoint) {
self.stack = checkpoint.0;
}
}
#[derive(Debug, Clone)]
pub(crate) struct IndentationsCheckpoint(Vec<Indentation>);
assert_eq_size!(Indentation, u64);
#[cfg(test)]
mod tests {
use super::{Character, Column, Indentation};
use std::cmp::Ordering;
#[test]
fn indentation_try_compare() {
let tab = Indentation::new(Column::new(8), Character::new(1));
assert_eq!(tab.try_compare(tab), Ok(Ordering::Equal));
let two_tabs = Indentation::new(Column::new(16), Character::new(2));
assert_eq!(two_tabs.try_compare(tab), Ok(Ordering::Greater));
assert_eq!(tab.try_compare(two_tabs), Ok(Ordering::Less));
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/tests/fixtures.rs | crates/ruff_python_parser/tests/fixtures.rs | use std::cell::RefCell;
use std::cmp::Ordering;
use std::fmt::{Formatter, Write};
use datatest_stable::Utf8Path;
use itertools::Itertools;
use ruff_annotate_snippets::{Level, Renderer, Snippet};
use ruff_python_ast::token::{Token, Tokens};
use ruff_python_ast::visitor::Visitor;
use ruff_python_ast::visitor::source_order::{SourceOrderVisitor, TraversalSignal, walk_module};
use ruff_python_ast::{self as ast, AnyNodeRef, Mod, PythonVersion};
use ruff_python_parser::semantic_errors::{
SemanticSyntaxChecker, SemanticSyntaxContext, SemanticSyntaxError,
};
use ruff_python_parser::{Mode, ParseErrorType, ParseOptions, Parsed, parse_unchecked};
use ruff_source_file::{LineIndex, OneIndexed, SourceCode};
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
#[expect(clippy::needless_pass_by_value, clippy::unnecessary_wraps)]
fn valid_syntax(path: &Utf8Path, content: String) -> datatest_stable::Result<()> {
test_valid_syntax(path, &content, "./resources/valid");
Ok(())
}
#[expect(clippy::needless_pass_by_value, clippy::unnecessary_wraps)]
fn invalid_syntax(path: &Utf8Path, content: String) -> datatest_stable::Result<()> {
test_invalid_syntax(path, &content, "./resources/invalid");
Ok(())
}
#[expect(clippy::needless_pass_by_value, clippy::unnecessary_wraps)]
fn inline_ok(path: &Utf8Path, content: String) -> datatest_stable::Result<()> {
test_valid_syntax(path, &content, "./resources/inline/ok");
Ok(())
}
#[expect(clippy::needless_pass_by_value, clippy::unnecessary_wraps)]
fn inline_err(path: &Utf8Path, content: String) -> datatest_stable::Result<()> {
test_invalid_syntax(path, &content, "./resources/inline/err");
Ok(())
}
datatest_stable::harness! {
{ test = valid_syntax, root = "./resources/valid", pattern = r"\.pyi?$" },
{ test = inline_ok, root = "./resources/inline/ok", pattern = r"\.pyi?$" },
{ test = invalid_syntax, root = "./resources/invalid", pattern = r"\.pyi?$" },
{ test = inline_err, root="./resources/inline/err", pattern = r"\.pyi?$" }
}
/// Asserts that the parser generates no syntax errors for a valid program.
/// Snapshots the AST.
fn test_valid_syntax(input_path: &Utf8Path, source: &str, root: &str) {
let test_name = input_path.strip_prefix(root).unwrap_or(input_path).as_str();
let options = extract_options(source).unwrap_or_else(|| {
ParseOptions::from(Mode::Module).with_target_version(PythonVersion::latest_preview())
});
let parsed = parse_unchecked(source, options.clone());
if parsed.has_syntax_errors() {
let line_index = LineIndex::from_source_text(source);
let source_code = SourceCode::new(source, &line_index);
let mut message = "Expected no syntax errors for a valid program but the parser generated the following errors:\n".to_string();
for error in parsed.errors() {
writeln!(
&mut message,
"{}\n",
CodeFrame {
range: error.location,
error,
source_code: &source_code,
}
)
.unwrap();
}
for error in parsed.unsupported_syntax_errors() {
writeln!(
&mut message,
"{}\n",
CodeFrame {
range: error.range,
error: &ParseErrorType::OtherError(error.to_string()),
source_code: &source_code,
}
)
.unwrap();
}
panic!("{input_path:?}: {message}");
}
validate_tokens(parsed.tokens(), source.text_len());
validate_ast(&parsed, source.text_len());
let mut output = String::new();
writeln!(&mut output, "## AST").unwrap();
writeln!(&mut output, "\n```\n{:#?}\n```", parsed.syntax()).unwrap();
let parsed = parsed.try_into_module().expect("Parsed with Mode::Module");
let mut visitor =
SemanticSyntaxCheckerVisitor::new(source).with_python_version(options.target_version());
for stmt in parsed.suite() {
visitor.visit_stmt(stmt);
}
let semantic_syntax_errors = visitor.into_diagnostics();
if !semantic_syntax_errors.is_empty() {
let mut message = "Expected no semantic syntax errors for a valid program:\n".to_string();
let line_index = LineIndex::from_source_text(source);
let source_code = SourceCode::new(source, &line_index);
for error in semantic_syntax_errors {
writeln!(
&mut message,
"{}\n",
CodeFrame {
range: error.range,
error: &ParseErrorType::OtherError(error.to_string()),
source_code: &source_code,
}
)
.unwrap();
}
panic!("{input_path:?}: {message}");
}
insta::with_settings!({
omit_expression => true,
input_file => input_path,
prepend_module_to_snapshot => false,
snapshot_suffix => test_name
}, {
insta::assert_snapshot!(output);
});
}
/// Assert that the parser generates at least one syntax error for the given input file.
/// Snapshots the AST and the error messages.
fn test_invalid_syntax(input_path: &Utf8Path, source: &str, root: &str) {
let test_name = input_path.strip_prefix(root).unwrap_or(input_path).as_str();
let options = extract_options(source).unwrap_or_else(|| {
ParseOptions::from(Mode::Module).with_target_version(PythonVersion::PY314)
});
let parsed = parse_unchecked(source, options.clone());
validate_tokens(parsed.tokens(), source.text_len());
validate_ast(&parsed, source.text_len());
let mut output = String::new();
writeln!(&mut output, "## AST").unwrap();
writeln!(&mut output, "\n```\n{:#?}\n```", parsed.syntax()).unwrap();
let line_index = LineIndex::from_source_text(source);
let source_code = SourceCode::new(source, &line_index);
if !parsed.errors().is_empty() {
writeln!(&mut output, "## Errors\n").unwrap();
}
for error in parsed.errors() {
writeln!(
&mut output,
"{}\n",
CodeFrame {
range: error.location,
error,
source_code: &source_code,
}
)
.unwrap();
}
if !parsed.unsupported_syntax_errors().is_empty() {
writeln!(&mut output, "## Unsupported Syntax Errors\n").unwrap();
}
for error in parsed.unsupported_syntax_errors() {
writeln!(
&mut output,
"{}\n",
CodeFrame {
range: error.range,
error: &ParseErrorType::OtherError(error.to_string()),
source_code: &source_code,
}
)
.unwrap();
}
let parsed = parsed.try_into_module().expect("Parsed with Mode::Module");
let mut visitor =
SemanticSyntaxCheckerVisitor::new(source).with_python_version(options.target_version());
for stmt in parsed.suite() {
visitor.visit_stmt(stmt);
}
let semantic_syntax_errors = visitor.into_diagnostics();
assert!(
parsed.has_syntax_errors() || !semantic_syntax_errors.is_empty(),
"Expected parser to generate at least one syntax error for a program containing syntax errors."
);
if !semantic_syntax_errors.is_empty() {
writeln!(&mut output, "## Semantic Syntax Errors\n").unwrap();
}
for error in semantic_syntax_errors {
writeln!(
&mut output,
"{}\n",
CodeFrame {
range: error.range,
error: &ParseErrorType::OtherError(error.to_string()),
source_code: &source_code,
}
)
.unwrap();
}
insta::with_settings!({
omit_expression => true,
input_file => input_path,
prepend_module_to_snapshot => false,
snapshot_suffix => test_name
}, {
insta::assert_snapshot!(output);
});
}
/// Copy of [`ParseOptions`] for deriving [`Deserialize`] with serde as a dev-dependency.
#[derive(serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
struct JsonParseOptions {
#[serde(default)]
mode: JsonMode,
#[serde(default)]
target_version: PythonVersion,
}
/// Copy of [`Mode`] for deserialization.
#[derive(Default, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
enum JsonMode {
#[default]
Module,
Expression,
ParenthesizedExpression,
Ipython,
}
impl From<JsonParseOptions> for ParseOptions {
fn from(value: JsonParseOptions) -> Self {
let mode = match value.mode {
JsonMode::Module => Mode::Module,
JsonMode::Expression => Mode::Expression,
JsonMode::ParenthesizedExpression => Mode::ParenthesizedExpression,
JsonMode::Ipython => Mode::Ipython,
};
Self::from(mode).with_target_version(value.target_version)
}
}
/// Extract [`ParseOptions`] from an initial pragma line, if present.
///
/// For example,
///
/// ```python
/// # parse_options: { "target-version": "3.10" }
/// def f(): ...
fn extract_options(source: &str) -> Option<ParseOptions> {
let header = source.lines().next()?;
let (_label, options) = header.split_once("# parse_options: ")?;
let options: Option<JsonParseOptions> = serde_json::from_str(options.trim()).ok();
options.map(ParseOptions::from)
}
// Test that is intentionally ignored by default.
// Use it for quickly debugging a parser issue.
#[test]
#[ignore]
#[expect(clippy::print_stdout)]
fn parser_quick_test() {
let source = "\
f'{'
f'{foo!r'
";
let parsed = parse_unchecked(source, ParseOptions::from(Mode::Module));
println!("AST:\n----\n{:#?}", parsed.syntax());
println!("Tokens:\n-------\n{:#?}", parsed.tokens());
if parsed.has_invalid_syntax() {
println!("Errors:\n-------");
let line_index = LineIndex::from_source_text(source);
let source_code = SourceCode::new(source, &line_index);
for error in parsed.errors() {
// Sometimes the code frame doesn't show the error message, so we print
// the message as well.
println!("Syntax Error: {error}");
println!(
"{}\n",
CodeFrame {
range: error.location,
error,
source_code: &source_code,
}
);
}
println!();
}
}
struct CodeFrame<'a> {
range: TextRange,
error: &'a ParseErrorType,
source_code: &'a SourceCode<'a, 'a>,
}
impl std::fmt::Display for CodeFrame<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
// Copied and modified from ruff_linter/src/message/text.rs
let content_start_index = self.source_code.line_index(self.range.start());
let mut start_index = content_start_index.saturating_sub(2);
// Trim leading empty lines.
while start_index < content_start_index {
if !self.source_code.line_text(start_index).trim().is_empty() {
break;
}
start_index = start_index.saturating_add(1);
}
let content_end_index = self.source_code.line_index(self.range.end());
let mut end_index = content_end_index
.saturating_add(2)
.min(OneIndexed::from_zero_indexed(self.source_code.line_count()));
// Trim trailing empty lines.
while end_index > content_end_index {
if !self.source_code.line_text(end_index).trim().is_empty() {
break;
}
end_index = end_index.saturating_sub(1);
}
let start_offset = self.source_code.line_start(start_index);
let end_offset = self.source_code.line_end(end_index);
let annotation_range = self.range - start_offset;
let source = self
.source_code
.slice(TextRange::new(start_offset, end_offset));
let label = format!("Syntax Error: {error}", error = self.error);
let span = usize::from(annotation_range.start())..usize::from(annotation_range.end());
let annotation = Level::Error.span(span).label(&label);
let snippet = Snippet::source(source)
.line_start(start_index.get())
.annotation(annotation)
.fold(false);
let message = Level::None.title("").snippet(snippet);
let renderer = Renderer::plain().cut_indicator("…");
let rendered = renderer.render(message);
writeln!(f, "{rendered}")
}
}
/// Verifies that:
/// * the ranges are strictly increasing when loop the tokens in insertion order
/// * all ranges are within the length of the source code
fn validate_tokens(tokens: &[Token], source_length: TextSize) {
let mut previous: Option<&Token> = None;
for token in tokens {
assert!(
token.end() <= source_length,
"Token range exceeds the source code length. Token: {token:#?}",
);
if let Some(previous) = previous {
assert_eq!(
previous.range().ordering(token.range()),
Ordering::Less,
"Token ranges are not in increasing order
Previous token: {previous:#?}
Current token: {token:#?}
Tokens: {tokens:#?}
",
);
}
previous = Some(token);
}
}
/// Verifies that:
/// * the range of the parent node fully encloses all its child nodes
/// * the ranges are strictly increasing when traversing the nodes in pre-order.
/// * all ranges are within the length of the source code.
fn validate_ast(parsed: &Parsed<Mod>, source_len: TextSize) {
walk_module(
&mut ValidateAstVisitor::new(parsed.tokens(), source_len),
parsed.syntax(),
);
}
#[derive(Debug)]
struct ValidateAstVisitor<'a> {
tokens: std::iter::Peekable<std::slice::Iter<'a, Token>>,
parents: Vec<AnyNodeRef<'a>>,
previous: Option<AnyNodeRef<'a>>,
source_length: TextSize,
}
impl<'a> ValidateAstVisitor<'a> {
fn new(tokens: &'a Tokens, source_length: TextSize) -> Self {
Self {
tokens: tokens.iter().peekable(),
parents: Vec::new(),
previous: None,
source_length,
}
}
}
impl ValidateAstVisitor<'_> {
/// Check that the node's start doesn't fall within a token.
/// Called in `enter_node` before visiting children.
fn assert_start_boundary(&mut self, node: AnyNodeRef<'_>) {
// Skip tokens that end at or before the node starts.
self.tokens
.peeking_take_while(|t| t.end() <= node.start())
.last();
if let Some(next) = self.tokens.peek() {
// At this point, next_token.end() > node.start()
assert!(
next.start() >= node.start(),
"The start of the node falls within a token.\nNode: {node:#?}\n\nToken: {next:#?}\n\nRoot: {root:#?}",
root = self.parents.first()
);
}
}
/// Check that the node's end doesn't fall within a token.
/// Called in `leave_node` after visiting children, so all tokens
/// within the node have been consumed.
fn assert_end_boundary(&mut self, node: AnyNodeRef<'_>) {
// Skip tokens that end at or before the node ends.
self.tokens
.peeking_take_while(|t| t.end() <= node.end())
.last();
if let Some(next) = self.tokens.peek() {
// At this point, `next_token.end() > node.end()`
assert!(
next.start() >= node.end(),
"The end of the node falls within a token.\nNode: {node:#?}\n\nToken: {next:#?}\n\nRoot: {root:#?}",
root = self.parents.first()
);
}
}
}
impl<'ast> SourceOrderVisitor<'ast> for ValidateAstVisitor<'ast> {
fn enter_node(&mut self, node: AnyNodeRef<'ast>) -> TraversalSignal {
assert!(
node.end() <= self.source_length,
"The range of the node exceeds the length of the source code. Node: {node:#?}",
);
if let Some(previous) = self.previous {
assert_ne!(
previous.range().ordering(node.range()),
Ordering::Greater,
"The ranges of the nodes are not strictly increasing when traversing the AST in pre-order.\nPrevious node: {previous:#?}\n\nCurrent node: {node:#?}\n\nRoot: {root:#?}",
root = self.parents.first()
);
}
if let Some(parent) = self.parents.last() {
assert!(
parent.range().contains_range(node.range()),
"The range of the parent node does not fully enclose the range of the child node.\nParent node: {parent:#?}\n\nChild node: {node:#?}\n\nRoot: {root:#?}",
root = self.parents.first()
);
}
self.assert_start_boundary(node);
self.parents.push(node);
TraversalSignal::Traverse
}
fn leave_node(&mut self, node: AnyNodeRef<'ast>) {
self.assert_end_boundary(node);
self.parents.pop().expect("Expected tree to be balanced");
self.previous = Some(node);
}
}
enum Scope {
Module,
Function { is_async: bool },
Comprehension { is_async: bool },
Class,
}
struct SemanticSyntaxCheckerVisitor<'a> {
checker: SemanticSyntaxChecker,
diagnostics: RefCell<Vec<SemanticSyntaxError>>,
python_version: PythonVersion,
source: &'a str,
scopes: Vec<Scope>,
}
impl<'a> SemanticSyntaxCheckerVisitor<'a> {
fn new(source: &'a str) -> Self {
Self {
checker: SemanticSyntaxChecker::new(),
diagnostics: RefCell::default(),
python_version: PythonVersion::default(),
source,
scopes: vec![Scope::Module],
}
}
#[must_use]
fn with_python_version(mut self, python_version: PythonVersion) -> Self {
self.python_version = python_version;
self
}
fn into_diagnostics(self) -> Vec<SemanticSyntaxError> {
self.diagnostics.into_inner()
}
fn with_semantic_checker(&mut self, f: impl FnOnce(&mut SemanticSyntaxChecker, &Self)) {
let mut checker = std::mem::take(&mut self.checker);
f(&mut checker, self);
self.checker = checker;
}
}
impl SemanticSyntaxContext for SemanticSyntaxCheckerVisitor<'_> {
fn future_annotations_or_stub(&self) -> bool {
false
}
fn python_version(&self) -> PythonVersion {
self.python_version
}
fn report_semantic_error(&self, error: SemanticSyntaxError) {
self.diagnostics.borrow_mut().push(error);
}
fn source(&self) -> &str {
self.source
}
fn global(&self, _name: &str) -> Option<TextRange> {
None
}
fn has_nonlocal_binding(&self, _name: &str) -> bool {
true
}
fn in_async_context(&self) -> bool {
if let Some(scope) = self.scopes.iter().next_back() {
match scope {
Scope::Class | Scope::Module => false,
Scope::Comprehension { is_async } => *is_async,
Scope::Function { is_async } => *is_async,
}
} else {
false
}
}
fn in_sync_comprehension(&self) -> bool {
for scope in &self.scopes {
if let Scope::Comprehension { is_async: false } = scope {
return true;
}
}
false
}
fn in_module_scope(&self) -> bool {
self.scopes.len() == 1
}
fn in_function_scope(&self) -> bool {
true
}
fn in_notebook(&self) -> bool {
false
}
fn in_await_allowed_context(&self) -> bool {
true
}
fn in_yield_allowed_context(&self) -> bool {
true
}
fn in_generator_context(&self) -> bool {
true
}
fn in_loop_context(&self) -> bool {
true
}
fn is_bound_parameter(&self, _name: &str) -> bool {
false
}
}
impl Visitor<'_> for SemanticSyntaxCheckerVisitor<'_> {
fn visit_stmt(&mut self, stmt: &ast::Stmt) {
self.with_semantic_checker(|semantic, context| semantic.visit_stmt(stmt, context));
match stmt {
ast::Stmt::ClassDef(ast::StmtClassDef {
arguments,
body,
decorator_list,
type_params,
..
}) => {
for decorator in decorator_list {
self.visit_decorator(decorator);
}
if let Some(type_params) = type_params {
self.visit_type_params(type_params);
}
if let Some(arguments) = arguments {
self.visit_arguments(arguments);
}
self.scopes.push(Scope::Class);
self.visit_body(body);
self.scopes.pop().unwrap();
}
ast::Stmt::FunctionDef(ast::StmtFunctionDef { is_async, .. }) => {
self.scopes.push(Scope::Function {
is_async: *is_async,
});
ast::visitor::walk_stmt(self, stmt);
self.scopes.pop().unwrap();
}
_ => {
ast::visitor::walk_stmt(self, stmt);
}
}
}
fn visit_expr(&mut self, expr: &ast::Expr) {
self.with_semantic_checker(|semantic, context| semantic.visit_expr(expr, context));
match expr {
ast::Expr::Lambda(_) => {
self.scopes.push(Scope::Function { is_async: false });
ast::visitor::walk_expr(self, expr);
self.scopes.pop().unwrap();
}
ast::Expr::ListComp(ast::ExprListComp {
elt, generators, ..
})
| ast::Expr::SetComp(ast::ExprSetComp {
elt, generators, ..
})
| ast::Expr::Generator(ast::ExprGenerator {
elt, generators, ..
}) => {
for comprehension in generators {
self.visit_comprehension(comprehension);
}
self.scopes.push(Scope::Comprehension {
is_async: generators.iter().any(|generator| generator.is_async),
});
self.visit_expr(elt);
self.scopes.pop().unwrap();
}
ast::Expr::DictComp(ast::ExprDictComp {
key,
value,
generators,
..
}) => {
for comprehension in generators {
self.visit_comprehension(comprehension);
}
self.scopes.push(Scope::Comprehension {
is_async: generators.iter().any(|generator| generator.is_async),
});
self.visit_expr(key);
self.visit_expr(value);
self.scopes.pop().unwrap();
}
_ => {
ast::visitor::walk_expr(self, expr);
}
}
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_python_parser/tests/generate_inline_tests.rs | crates/ruff_python_parser/tests/generate_inline_tests.rs | //! This module takes specially formatted comments from `ruff_python_parser` code
//! and turns them into test fixtures. The code is derived from `rust-analyzer`
//! and `biome`.
//!
//! References:
//! - <https://github.com/rust-lang/rust-analyzer/blob/e4a405f877efd820bef9c0e77a02494e47c17512/crates/parser/src/tests/sourcegen_inline_tests.rs>
//! - <https://github.com/biomejs/biome/blob/b9f8ffea9967b098ec4c8bf74fa96826a879f043/xtask/codegen/src/parser_tests.rs>
use std::collections::HashMap;
use std::fmt;
use std::fs;
use std::ops::{AddAssign, Deref, DerefMut};
use std::path::{Path, PathBuf};
use anyhow::{Context, Result};
fn project_root() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../")
.canonicalize()
.unwrap()
}
#[test]
fn generate_inline_tests() -> Result<()> {
let parser_dir = project_root().join("crates/ruff_python_parser/src/");
let tests = TestCollection::try_from(parser_dir.as_path())?;
let mut test_files = TestFiles::default();
test_files += install_tests(&tests.ok, "crates/ruff_python_parser/resources/inline/ok")?;
test_files += install_tests(&tests.err, "crates/ruff_python_parser/resources/inline/err")?;
if !test_files.is_empty() {
anyhow::bail!("{test_files}");
}
Ok(())
}
#[derive(Debug, Default)]
struct TestFiles {
unreferenced: Vec<PathBuf>,
updated: Vec<PathBuf>,
}
impl TestFiles {
fn is_empty(&self) -> bool {
self.unreferenced.is_empty() && self.updated.is_empty()
}
}
impl AddAssign<TestFiles> for TestFiles {
fn add_assign(&mut self, other: TestFiles) {
self.unreferenced.extend(other.unreferenced);
self.updated.extend(other.updated);
}
}
impl fmt::Display for TestFiles {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.is_empty() {
writeln!(f, "No unreferenced or updated test files found")
} else {
let root_dir = project_root();
if !self.unreferenced.is_empty() {
writeln!(
f,
"Unreferenced test files found for which no comment exists:",
)?;
for path in &self.unreferenced {
writeln!(f, " {}", path.strip_prefix(&root_dir).unwrap().display())?;
}
writeln!(f, "Please delete these files manually")?;
}
if !self.updated.is_empty() {
if !self.unreferenced.is_empty() {
writeln!(f)?;
}
writeln!(
f,
"Following files were not up-to date and has been updated:",
)?;
for path in &self.updated {
writeln!(f, " {}", path.strip_prefix(&root_dir).unwrap().display())?;
}
writeln!(
f,
"Re-run the tests with `cargo test` to update the test snapshots"
)?;
if std::env::var("CI").is_ok() {
writeln!(
f,
"NOTE: Run the tests locally and commit the updated files"
)?;
}
}
Ok(())
}
}
}
fn install_tests(tests: &HashMap<String, Test>, target_dir: &str) -> Result<TestFiles> {
let root_dir = project_root();
let tests_dir = root_dir.join(target_dir);
if !tests_dir.is_dir() {
fs::create_dir_all(&tests_dir)?;
}
// Test kind is irrelevant for existing test cases.
let existing = existing_tests(&tests_dir)?;
let mut updated_files = vec![];
for (name, test) in tests {
let path = match existing.get(name) {
Some(path) => path.clone(),
None => tests_dir.join(name).with_extension("py"),
};
match fs::read_to_string(&path) {
Ok(old_contents) if old_contents == test.contents => continue,
_ => {}
}
fs::write(&path, &test.contents)
.with_context(|| format!("Failed to write to {:?}", path.display()))?;
updated_files.push(path);
}
Ok(TestFiles {
unreferenced: existing
.into_iter()
.filter(|(name, _)| !tests.contains_key(name))
.map(|(_, path)| path)
.collect::<Vec<_>>(),
updated: updated_files,
})
}
#[derive(Default, Debug)]
struct TestCollection {
ok: HashMap<String, Test>,
err: HashMap<String, Test>,
}
impl TryFrom<&Path> for TestCollection {
type Error = anyhow::Error;
fn try_from(path: &Path) -> Result<Self> {
let mut tests = TestCollection::default();
for entry in walkdir::WalkDir::new(path) {
let entry = entry?;
if !entry.file_type().is_file() {
continue;
}
if entry.path().extension().unwrap_or_default() != "rs" {
continue;
}
let text = fs::read_to_string(entry.path())?;
for test in collect_tests(&text) {
if test.is_ok() {
if let Some(old_test) = tests.ok.insert(test.name.clone(), test) {
anyhow::bail!(
"Duplicate test found: {name:?} (search '// test_ok {name}' for the location)\n",
name = old_test.name
);
}
} else if let Some(old_test) = tests.err.insert(test.name.clone(), test) {
anyhow::bail!(
"Duplicate test found: {name:?} (search '// test_err {name}' for the location)\n",
name = old_test.name
);
}
}
}
Ok(tests)
}
}
#[derive(Debug, Clone, Copy)]
enum TestKind {
Ok,
Err,
}
/// A test of the following form:
///
/// ```text
/// // (test_ok|test_err) name
/// // <code>
/// ```
#[derive(Debug)]
struct Test {
name: String,
contents: String,
kind: TestKind,
}
impl Test {
const fn is_ok(&self) -> bool {
matches!(self.kind, TestKind::Ok)
}
}
/// Collect the tests from the given source text.
fn collect_tests(text: &str) -> Vec<Test> {
let mut tests = Vec::new();
for comment_block in extract_comment_blocks(text) {
let first_line = &comment_block[0];
let (kind, name) = match first_line.split_once(' ') {
Some(("test_ok", suffix)) => (TestKind::Ok, suffix),
Some(("test_err", suffix)) => (TestKind::Err, suffix),
_ => continue,
};
let text: String = comment_block[1..]
.iter()
.cloned()
.chain([String::new()])
.collect::<Vec<_>>()
.join("\n");
assert!(!text.trim().is_empty() && text.ends_with('\n'));
tests.push(Test {
name: name.to_string(),
contents: text,
kind,
});
}
tests
}
#[derive(Debug, Default)]
struct CommentBlock(Vec<String>);
impl Deref for CommentBlock {
type Target = Vec<String>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for CommentBlock {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// Extract the comment blocks from the given source text.
///
/// A comment block is a sequence of lines that start with `// ` and are separated
/// by an empty line. An empty comment line (`//`) is also part of the block.
fn extract_comment_blocks(text: &str) -> Vec<CommentBlock> {
const COMMENT_PREFIX: &str = "// ";
const COMMENT_PREFIX_LEN: usize = COMMENT_PREFIX.len();
let mut comment_blocks = Vec::new();
let mut block = CommentBlock::default();
for line in text.lines().map(str::trim_start) {
if line == "//" {
block.push(String::new());
continue;
}
if line.starts_with(COMMENT_PREFIX) {
block.push(line[COMMENT_PREFIX_LEN..].to_string());
} else {
if !block.is_empty() {
comment_blocks.push(std::mem::take(&mut block));
}
}
}
if !block.is_empty() {
comment_blocks.push(block);
}
comment_blocks
}
/// Returns the existing tests in the given directory.
fn existing_tests(dir: &Path) -> Result<HashMap<String, PathBuf>> {
let mut tests = HashMap::new();
for file in fs::read_dir(dir)? {
let path = file?.path();
if path.extension().unwrap_or_default() != "py" {
continue;
}
let name = path
.file_stem()
.map(|x| x.to_string_lossy().to_string())
.unwrap();
if let Some(old) = tests.insert(name, path) {
anyhow::bail!("Multiple test file exists for {old:?}");
}
}
Ok(tests)
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_text_size/src/lib.rs | crates/ruff_text_size/src/lib.rs | //! Newtypes for working with text sizes/ranges in a more type-safe manner.
//!
//! This library can help with two things:
//! * Reducing storage requirements for offsets and ranges, under the
//! assumption that 32 bits is enough.
//! * Providing standard vocabulary types for applications where text ranges
//! are pervasive.
//!
//! However, you should not use this library simply because you work with
//! strings. In the overwhelming majority of cases, using `usize` and
//! `std::ops::Range<usize>` is better. In particular, if you are publishing a
//! library, using only std types in the interface would make it more
//! interoperable. Similarly, if you are writing something like a lexer, which
//! produces, but does not *store* text ranges, then sticking to `usize` would
//! be better.
//!
//! Minimal Supported Rust Version: latest stable.
#![forbid(unsafe_code)]
#![warn(missing_debug_implementations, missing_docs)]
mod range;
mod size;
mod traits;
#[cfg(feature = "schemars")]
mod schemars_impls;
#[cfg(feature = "serde")]
mod serde_impls;
pub use crate::{
range::TextRange, size::TextSize, traits::Ranged, traits::TextLen, traits::TextSlice,
};
#[cfg(target_pointer_width = "16")]
compile_error!("text-size assumes usize >= u32 and does not work on 16-bit targets");
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_text_size/src/range.rs | crates/ruff_text_size/src/range.rs | use cmp::Ordering;
use {
crate::TextSize,
std::{
cmp, fmt,
ops::{Add, AddAssign, Bound, Index, IndexMut, Range, RangeBounds, Sub, SubAssign},
},
};
/// A range in text, represented as a pair of [`TextSize`][struct@TextSize].
///
/// It is a logic error for `start` to be greater than `end`.
#[derive(Default, Copy, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct TextRange {
// Invariant: start <= end
start: TextSize,
end: TextSize,
}
impl fmt::Debug for TextRange {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}..{}", self.start().raw, self.end().raw)
}
}
impl TextRange {
/// Creates a new `TextRange` with the given `start` and `end` (`start..end`).
///
/// # Panics
///
/// Panics if `end < start`.
///
/// # Examples
///
/// ```rust
/// # use ruff_text_size::*;
/// let start = TextSize::from(5);
/// let end = TextSize::from(10);
/// let range = TextRange::new(start, end);
///
/// assert_eq!(range.start(), start);
/// assert_eq!(range.end(), end);
/// assert_eq!(range.len(), end - start);
/// ```
#[inline]
#[track_caller]
pub const fn new(start: TextSize, end: TextSize) -> TextRange {
assert!(start.raw <= end.raw);
TextRange { start, end }
}
/// Create a new `TextRange` with the given `offset` and `len` (`offset..offset + len`).
///
/// # Examples
///
/// ```rust
/// # use ruff_text_size::*;
/// let text = "0123456789";
///
/// let offset = TextSize::from(2);
/// let length = TextSize::from(5);
/// let range = TextRange::at(offset, length);
///
/// assert_eq!(range, TextRange::new(offset, offset + length));
/// assert_eq!(&text[range], "23456")
/// ```
#[inline]
pub fn at(offset: TextSize, len: TextSize) -> TextRange {
TextRange::new(offset, offset + len)
}
/// Create a zero-length range at the specified offset (`offset..offset`).
///
/// # Examples
///
/// ```rust
/// # use ruff_text_size::*;
/// let point: TextSize;
/// # point = TextSize::from(3);
/// let range = TextRange::empty(point);
/// assert!(range.is_empty());
/// assert_eq!(range, TextRange::new(point, point));
/// ```
#[inline]
pub fn empty(offset: TextSize) -> TextRange {
TextRange {
start: offset,
end: offset,
}
}
/// Create a range up to the given end (`..end`).
///
/// # Examples
///
/// ```rust
/// # use ruff_text_size::*;
/// let point: TextSize;
/// # point = TextSize::from(12);
/// let range = TextRange::up_to(point);
///
/// assert_eq!(range.len(), point);
/// assert_eq!(range, TextRange::new(0.into(), point));
/// assert_eq!(range, TextRange::at(0.into(), point));
/// ```
#[inline]
pub fn up_to(end: TextSize) -> TextRange {
TextRange {
start: 0.into(),
end,
}
}
}
/// Identity methods.
impl TextRange {
/// The start point of this range.
#[inline]
pub const fn start(self) -> TextSize {
self.start
}
/// The end point of this range.
#[inline]
pub const fn end(self) -> TextSize {
self.end
}
/// The size of this range.
#[inline]
pub const fn len(self) -> TextSize {
// HACK for const fn: math on primitives only
TextSize {
raw: self.end().raw - self.start().raw,
}
}
/// Check if this range is empty.
#[inline]
pub const fn is_empty(self) -> bool {
// HACK for const fn: math on primitives only
self.start().raw == self.end().raw
}
}
/// Manipulation methods.
impl TextRange {
/// Check if this range contains an offset.
///
/// The end index is considered excluded.
///
/// # Examples
///
/// ```rust
/// # use ruff_text_size::*;
/// let (start, end): (TextSize, TextSize);
/// # start = 10.into(); end = 20.into();
/// let range = TextRange::new(start, end);
/// assert!(range.contains(start));
/// assert!(!range.contains(end));
/// ```
#[inline]
pub fn contains(self, offset: TextSize) -> bool {
self.start() <= offset && offset < self.end()
}
/// Check if this range contains an offset.
///
/// The end index is considered included.
///
/// # Examples
///
/// ```rust
/// # use ruff_text_size::*;
/// let (start, end): (TextSize, TextSize);
/// # start = 10.into(); end = 20.into();
/// let range = TextRange::new(start, end);
/// assert!(range.contains_inclusive(start));
/// assert!(range.contains_inclusive(end));
/// ```
#[inline]
pub fn contains_inclusive(self, offset: TextSize) -> bool {
self.start() <= offset && offset <= self.end()
}
/// Check if this range completely contains another range.
///
/// # Examples
///
/// ```rust
/// # use ruff_text_size::*;
/// let larger = TextRange::new(0.into(), 20.into());
/// let smaller = TextRange::new(5.into(), 15.into());
/// assert!(larger.contains_range(smaller));
/// assert!(!smaller.contains_range(larger));
///
/// // a range always contains itself
/// assert!(larger.contains_range(larger));
/// assert!(smaller.contains_range(smaller));
/// ```
#[inline]
pub fn contains_range(self, other: TextRange) -> bool {
self.start() <= other.start() && other.end() <= self.end()
}
/// The range covered by both ranges, if it exists.
/// If the ranges touch but do not overlap, the output range is empty.
///
/// # Examples
///
/// ```rust
/// # use ruff_text_size::*;
/// assert_eq!(
/// TextRange::intersect(
/// TextRange::new(0.into(), 10.into()),
/// TextRange::new(5.into(), 15.into()),
/// ),
/// Some(TextRange::new(5.into(), 10.into())),
/// );
/// ```
#[inline]
pub fn intersect(self, other: TextRange) -> Option<TextRange> {
let start = cmp::max(self.start(), other.start());
let end = cmp::min(self.end(), other.end());
if end < start {
return None;
}
Some(TextRange::new(start, end))
}
/// Extends the range to cover `other` as well.
///
/// # Examples
///
/// ```rust
/// # use ruff_text_size::*;
/// assert_eq!(
/// TextRange::cover(
/// TextRange::new(0.into(), 5.into()),
/// TextRange::new(15.into(), 20.into()),
/// ),
/// TextRange::new(0.into(), 20.into()),
/// );
/// ```
#[inline]
#[must_use]
pub fn cover(self, other: TextRange) -> TextRange {
let start = cmp::min(self.start(), other.start());
let end = cmp::max(self.end(), other.end());
TextRange::new(start, end)
}
/// Extends the range to cover `other` offsets as well.
///
/// # Examples
///
/// ```rust
/// # use ruff_text_size::*;
/// assert_eq!(
/// TextRange::empty(0.into()).cover_offset(20.into()),
/// TextRange::new(0.into(), 20.into()),
/// )
/// ```
#[inline]
#[must_use]
pub fn cover_offset(self, offset: TextSize) -> TextRange {
self.cover(TextRange::empty(offset))
}
/// Add an offset to this range.
///
/// Note that this is not appropriate for changing where a `TextRange` is
/// within some string; rather, it is for changing the reference anchor
/// that the `TextRange` is measured against.
///
/// The unchecked version (`Add::add`) will _always_ panic on overflow,
/// in contrast to primitive integers, which check in debug mode only.
#[inline]
pub fn checked_add(self, offset: TextSize) -> Option<TextRange> {
Some(TextRange {
start: self.start.checked_add(offset)?,
end: self.end.checked_add(offset)?,
})
}
/// Subtract an offset from this range.
///
/// Note that this is not appropriate for changing where a `TextRange` is
/// within some string; rather, it is for changing the reference anchor
/// that the `TextRange` is measured against.
///
/// The unchecked version (`Sub::sub`) will _always_ panic on overflow,
/// in contrast to primitive integers, which check in debug mode only.
#[inline]
pub fn checked_sub(self, offset: TextSize) -> Option<TextRange> {
Some(TextRange {
start: self.start.checked_sub(offset)?,
end: self.end.checked_sub(offset)?,
})
}
/// Relative order of the two ranges (overlapping ranges are considered
/// equal).
///
///
/// This is useful when, for example, binary searching an array of disjoint
/// ranges.
///
/// # Examples
///
/// ```
/// # use ruff_text_size::*;
/// # use std::cmp::Ordering;
///
/// let a = TextRange::new(0.into(), 3.into());
/// let b = TextRange::new(4.into(), 5.into());
/// assert_eq!(a.ordering(b), Ordering::Less);
///
/// let a = TextRange::new(0.into(), 3.into());
/// let b = TextRange::new(3.into(), 5.into());
/// assert_eq!(a.ordering(b), Ordering::Less);
///
/// let a = TextRange::new(0.into(), 3.into());
/// let b = TextRange::new(2.into(), 5.into());
/// assert_eq!(a.ordering(b), Ordering::Equal);
///
/// let a = TextRange::new(0.into(), 3.into());
/// let b = TextRange::new(2.into(), 2.into());
/// assert_eq!(a.ordering(b), Ordering::Equal);
///
/// let a = TextRange::new(2.into(), 3.into());
/// let b = TextRange::new(2.into(), 2.into());
/// assert_eq!(a.ordering(b), Ordering::Greater);
/// ```
#[inline]
pub fn ordering(self, other: TextRange) -> Ordering {
if self.end() <= other.start() {
Ordering::Less
} else if other.end() <= self.start() {
Ordering::Greater
} else {
Ordering::Equal
}
}
/// Subtracts an offset from the start position.
///
///
/// ## Panics
/// If `start - amount` is less than zero.
///
/// ## Examples
///
/// ```
/// use ruff_text_size::{Ranged, TextRange, TextSize};
///
/// let range = TextRange::new(TextSize::from(5), TextSize::from(10));
/// assert_eq!(range.sub_start(TextSize::from(2)), TextRange::new(TextSize::from(3), TextSize::from(10)));
/// ```
#[inline]
#[must_use]
pub fn sub_start(&self, amount: TextSize) -> TextRange {
TextRange::new(self.start() - amount, self.end())
}
/// Adds an offset to the start position.
///
/// ## Panics
/// If `start + amount > end`
///
/// ## Examples
///
/// ```
/// use ruff_text_size::{Ranged, TextRange, TextSize};
///
/// let range = TextRange::new(TextSize::from(5), TextSize::from(10));
/// assert_eq!(range.add_start(TextSize::from(3)), TextRange::new(TextSize::from(8), TextSize::from(10)));
/// ```
#[inline]
#[must_use]
pub fn add_start(&self, amount: TextSize) -> TextRange {
TextRange::new(self.start() + amount, self.end())
}
/// Subtracts an offset from the end position.
///
///
/// ## Panics
/// If `end - amount < 0` or `end - amount < start`
///
/// ## Examples
///
/// ```
/// use ruff_text_size::{Ranged, TextRange, TextSize};
///
/// let range = TextRange::new(TextSize::from(5), TextSize::from(10));
/// assert_eq!(range.sub_end(TextSize::from(2)), TextRange::new(TextSize::from(5), TextSize::from(8)));
/// ```
#[inline]
#[must_use]
pub fn sub_end(&self, amount: TextSize) -> TextRange {
TextRange::new(self.start(), self.end() - amount)
}
/// Adds an offset to the end position.
///
///
/// ## Panics
/// If `end + amount > u32::MAX`
///
/// ## Examples
///
/// ```
/// use ruff_text_size::{Ranged, TextRange, TextSize};
///
/// let range = TextRange::new(TextSize::from(5), TextSize::from(10));
/// assert_eq!(range.add_end(TextSize::from(2)), TextRange::new(TextSize::from(5), TextSize::from(12)));
/// ```
#[inline]
#[must_use]
pub fn add_end(&self, amount: TextSize) -> TextRange {
TextRange::new(self.start(), self.end() + amount)
}
}
/// Conversion methods.
impl TextRange {
/// A convenience routine for converting this range to a
/// standard library range that satisfies the `RangeBounds`
/// trait.
///
/// This is also available as a `From` trait implementation,
/// but this method avoids the need to specify types to help
/// inference.
#[inline]
#[must_use]
pub fn to_std_range(&self) -> Range<usize> {
(*self).into()
}
}
impl Index<TextRange> for str {
type Output = str;
#[inline]
fn index(&self, index: TextRange) -> &str {
&self[Range::<usize>::from(index)]
}
}
impl Index<TextRange> for String {
type Output = str;
#[inline]
fn index(&self, index: TextRange) -> &str {
&self[Range::<usize>::from(index)]
}
}
impl IndexMut<TextRange> for str {
#[inline]
fn index_mut(&mut self, index: TextRange) -> &mut str {
&mut self[Range::<usize>::from(index)]
}
}
impl IndexMut<TextRange> for String {
#[inline]
fn index_mut(&mut self, index: TextRange) -> &mut str {
&mut self[Range::<usize>::from(index)]
}
}
impl RangeBounds<TextSize> for TextRange {
fn start_bound(&self) -> Bound<&TextSize> {
Bound::Included(&self.start)
}
fn end_bound(&self) -> Bound<&TextSize> {
Bound::Excluded(&self.end)
}
}
impl From<Range<TextSize>> for TextRange {
#[inline]
fn from(r: Range<TextSize>) -> Self {
TextRange::new(r.start, r.end)
}
}
impl<T> From<TextRange> for Range<T>
where
T: From<TextSize>,
{
#[inline]
fn from(r: TextRange) -> Self {
r.start().into()..r.end().into()
}
}
macro_rules! ops {
(impl $Op:ident for TextRange by fn $f:ident = $op:tt) => {
impl $Op<&TextSize> for TextRange {
type Output = TextRange;
#[inline]
fn $f(self, other: &TextSize) -> TextRange {
self $op *other
}
}
impl<T> $Op<T> for &TextRange
where
TextRange: $Op<T, Output=TextRange>,
{
type Output = TextRange;
#[inline]
fn $f(self, other: T) -> TextRange {
*self $op other
}
}
};
}
impl Add<TextSize> for TextRange {
type Output = TextRange;
#[inline]
fn add(self, offset: TextSize) -> TextRange {
self.checked_add(offset)
.expect("TextRange +offset overflowed")
}
}
impl Sub<TextSize> for TextRange {
type Output = TextRange;
#[inline]
fn sub(self, offset: TextSize) -> TextRange {
self.checked_sub(offset)
.expect("TextRange -offset overflowed")
}
}
ops!(impl Add for TextRange by fn add = +);
ops!(impl Sub for TextRange by fn sub = -);
impl<A> AddAssign<A> for TextRange
where
TextRange: Add<A, Output = TextRange>,
{
#[inline]
fn add_assign(&mut self, rhs: A) {
*self = *self + rhs;
}
}
impl<S> SubAssign<S> for TextRange
where
TextRange: Sub<S, Output = TextRange>,
{
#[inline]
fn sub_assign(&mut self, rhs: S) {
*self = *self - rhs;
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_text_size/src/traits.rs | crates/ruff_text_size/src/traits.rs | use std::sync::Arc;
use {crate::TextRange, crate::TextSize, std::convert::TryInto};
use priv_in_pub::Sealed;
mod priv_in_pub {
pub trait Sealed {}
}
/// Primitives with a textual length that can be passed to [`TextSize::of`].
pub trait TextLen: Copy + Sealed {
/// The textual length of this primitive.
fn text_len(self) -> TextSize;
}
impl Sealed for &'_ str {}
impl TextLen for &'_ str {
#[inline]
fn text_len(self) -> TextSize {
self.len().try_into().unwrap()
}
}
impl Sealed for &'_ String {}
impl TextLen for &'_ String {
#[inline]
fn text_len(self) -> TextSize {
self.as_str().text_len()
}
}
impl Sealed for char {}
impl TextLen for char {
#[inline]
#[expect(clippy::cast_possible_truncation)]
fn text_len(self) -> TextSize {
(self.len_utf8() as u32).into()
}
}
/// A ranged item in the source text.
pub trait Ranged {
/// The range of this item in the source text.
fn range(&self) -> TextRange;
/// The start offset of this item in the source text.
fn start(&self) -> TextSize {
self.range().start()
}
/// The end offset of this item in the source text.
fn end(&self) -> TextSize {
self.range().end()
}
}
impl Ranged for TextRange {
fn range(&self) -> TextRange {
*self
}
}
impl<T> Ranged for &T
where
T: Ranged,
{
fn range(&self) -> TextRange {
T::range(self)
}
}
impl<T> Ranged for Arc<T>
where
T: Ranged,
{
fn range(&self) -> TextRange {
T::range(self)
}
}
/// A slice of the source text.
pub trait TextSlice: Sealed {
/// Returns the slice of the text within the given `range`.
///
/// ## Note
///
/// This is the same as `&self[range]` if `self` is a `str` and `range` a `TextRange`.
///
/// ## Panics
/// If the range is out of bounds.
fn slice(&self, range: impl Ranged) -> &str;
}
impl Sealed for str {}
impl TextSlice for str {
fn slice(&self, ranged: impl Ranged) -> &str {
&self[ranged.range()]
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_text_size/src/schemars_impls.rs | crates/ruff_text_size/src/schemars_impls.rs | //! This module implements the [`JsonSchema`] trait from the `schemars` crate for
//! [`TextSize`] and [`TextRange`] if the `schemars` feature is enabled. This trait
//! exposes meta-information on how a given type is serialized and deserialized
//! using `serde`, and is currently used to generate autocomplete information
//! for the `rome.json` configuration file and TypeScript types for the node.js
//! bindings to the Workspace API
use crate::{TextRange, TextSize};
use schemars::{JsonSchema, Schema, SchemaGenerator};
use std::borrow::Cow;
impl JsonSchema for TextSize {
fn schema_name() -> Cow<'static, str> {
Cow::Borrowed("TextSize")
}
fn json_schema(r#gen: &mut SchemaGenerator) -> Schema {
// TextSize is represented as a raw u32, see serde_impls.rs for the
// actual implementation
<u32>::json_schema(r#gen)
}
}
impl JsonSchema for TextRange {
fn schema_name() -> Cow<'static, str> {
Cow::Borrowed("TextRange")
}
fn json_schema(r#gen: &mut SchemaGenerator) -> Schema {
// TextSize is represented as (TextSize, TextSize), see serde_impls.rs
// for the actual implementation
<(TextSize, TextSize)>::json_schema(r#gen)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_text_size/src/serde_impls.rs | crates/ruff_text_size/src/serde_impls.rs | use {
crate::{TextRange, TextSize},
serde::{Deserialize, Deserializer, Serialize, Serializer, de},
};
impl Serialize for TextSize {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.raw.serialize(serializer)
}
}
impl<'de> Deserialize<'de> for TextSize {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
u32::deserialize(deserializer).map(TextSize::from)
}
}
impl Serialize for TextRange {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
(self.start(), self.end()).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for TextRange {
#[expect(clippy::nonminimal_bool)]
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let (start, end) = Deserialize::deserialize(deserializer)?;
if !(start <= end) {
return Err(de::Error::custom(format!(
"invalid range: {start:?}..{end:?}"
)));
}
Ok(TextRange::new(start, end))
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_text_size/src/size.rs | crates/ruff_text_size/src/size.rs | use {
crate::TextLen,
std::{
convert::TryFrom,
fmt, iter,
num::TryFromIntError,
ops::{Add, AddAssign, Sub, SubAssign},
},
};
/// A measure of text length. Also, equivalently, an index into text.
///
/// This is a UTF-8 bytes offset stored as `u32`, but
/// most clients should treat it as an opaque measure.
///
/// For cases that need to escape `TextSize` and return to working directly
/// with primitive integers, `TextSize` can be converted losslessly to/from
/// `u32` via [`From`] conversions as well as losslessly be converted [`Into`]
/// `usize`. The `usize -> TextSize` direction can be done via [`TryFrom`].
///
/// These escape hatches are primarily required for unit testing and when
/// converting from UTF-8 size to another coordinate space, such as UTF-16.
#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "get-size", derive(get_size2::GetSize))]
pub struct TextSize {
pub(crate) raw: u32,
}
impl fmt::Debug for TextSize {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.raw)
}
}
impl TextSize {
/// A `TextSize` of zero.
pub const ZERO: TextSize = TextSize::new(0);
/// Creates a new `TextSize` at the given `offset`.
///
/// # Examples
///
/// ```rust
/// # use ruff_text_size::*;
/// assert_eq!(TextSize::from(4), TextSize::new(4));
/// ```
pub const fn new(offset: u32) -> Self {
Self { raw: offset }
}
/// The text size of some primitive text-like object.
///
/// Accepts `char`, `&str`, and `&String`.
///
/// # Examples
///
/// ```rust
/// # use ruff_text_size::*;
/// let char_size = TextSize::of('🦀');
/// assert_eq!(char_size, TextSize::from(4));
///
/// let str_size = TextSize::of("rust-analyzer");
/// assert_eq!(str_size, TextSize::from(13));
/// ```
#[inline]
pub fn of<T: TextLen>(text: T) -> TextSize {
text.text_len()
}
/// Returns current raw `offset` as u32.
///
/// # Examples
///
/// ```rust
/// # use ruff_text_size::*;
/// assert_eq!(TextSize::from(4).to_u32(), 4);
/// ```
pub const fn to_u32(&self) -> u32 {
self.raw
}
/// Returns current raw `offset` as usize.
///
/// # Examples
///
/// ```rust
/// # use ruff_text_size::*;
/// assert_eq!(TextSize::from(4).to_usize(), 4);
/// ```
pub const fn to_usize(&self) -> usize {
self.raw as usize
}
}
/// Methods to act like a primitive integer type, where reasonably applicable.
// Last updated for parity with Rust 1.42.0.
impl TextSize {
/// Checked addition. Returns `None` if overflow occurred.
#[inline]
pub fn checked_add(self, rhs: TextSize) -> Option<TextSize> {
self.raw.checked_add(rhs.raw).map(|raw| TextSize { raw })
}
/// Checked subtraction. Returns `None` if overflow occurred.
#[inline]
pub fn checked_sub(self, rhs: TextSize) -> Option<TextSize> {
self.raw.checked_sub(rhs.raw).map(|raw| TextSize { raw })
}
/// Saturating addition. Returns maximum `TextSize` if overflow occurred.
#[inline]
#[must_use]
pub fn saturating_add(self, rhs: TextSize) -> TextSize {
TextSize {
raw: self.raw.saturating_add(rhs.raw),
}
}
/// Saturating subtraction. Returns minimum `TextSize` if overflow
/// occurred.
#[inline]
#[must_use]
pub fn saturating_sub(self, rhs: TextSize) -> TextSize {
TextSize {
raw: self.raw.saturating_sub(rhs.raw),
}
}
}
impl From<u32> for TextSize {
#[inline]
fn from(raw: u32) -> Self {
TextSize::new(raw)
}
}
impl From<TextSize> for u32 {
#[inline]
fn from(value: TextSize) -> Self {
value.to_u32()
}
}
impl TryFrom<usize> for TextSize {
type Error = TryFromIntError;
#[inline]
fn try_from(value: usize) -> Result<Self, TryFromIntError> {
Ok(u32::try_from(value)?.into())
}
}
impl From<TextSize> for usize {
#[inline]
fn from(value: TextSize) -> Self {
value.to_usize()
}
}
macro_rules! ops {
(impl $Op:ident for TextSize by fn $f:ident = $op:tt) => {
impl $Op<TextSize> for TextSize {
type Output = TextSize;
#[inline]
fn $f(self, other: TextSize) -> TextSize {
TextSize { raw: self.raw $op other.raw }
}
}
impl $Op<&TextSize> for TextSize {
type Output = TextSize;
#[inline]
fn $f(self, other: &TextSize) -> TextSize {
self $op *other
}
}
impl<T> $Op<T> for &TextSize
where
TextSize: $Op<T, Output=TextSize>,
{
type Output = TextSize;
#[inline]
fn $f(self, other: T) -> TextSize {
*self $op other
}
}
};
}
ops!(impl Add for TextSize by fn add = +);
ops!(impl Sub for TextSize by fn sub = -);
impl<A> AddAssign<A> for TextSize
where
TextSize: Add<A, Output = TextSize>,
{
#[inline]
fn add_assign(&mut self, rhs: A) {
*self = *self + rhs;
}
}
impl<S> SubAssign<S> for TextSize
where
TextSize: Sub<S, Output = TextSize>,
{
#[inline]
fn sub_assign(&mut self, rhs: S) {
*self = *self - rhs;
}
}
impl<A> iter::Sum<A> for TextSize
where
TextSize: Add<A, Output = TextSize>,
{
#[inline]
fn sum<I: Iterator<Item = A>>(iter: I) -> TextSize {
iter.fold(0.into(), Add::add)
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_text_size/tests/constructors.rs | crates/ruff_text_size/tests/constructors.rs | use ruff_text_size::TextSize;
#[derive(Copy, Clone)]
struct BadRope<'a>(&'a [&'a str]);
impl BadRope<'_> {
fn text_len(self) -> TextSize {
self.0.iter().copied().map(TextSize::of).sum()
}
}
#[test]
fn main() {
let x: char = 'c';
let _ = TextSize::of(x);
let x: &str = "hello";
let _ = TextSize::of(x);
let x: &String = &"hello".into();
let _ = TextSize::of(x);
let _ = BadRope(&[""]).text_len();
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_text_size/tests/serde.rs | crates/ruff_text_size/tests/serde.rs | use {
ruff_text_size::{TextRange, TextSize},
serde_test::{Token, assert_de_tokens_error, assert_tokens},
std::ops,
};
fn size(x: u32) -> TextSize {
TextSize::from(x)
}
fn range(x: ops::Range<u32>) -> TextRange {
TextRange::new(x.start.into(), x.end.into())
}
#[test]
fn size_serialization() {
assert_tokens(&size(00), &[Token::U32(00)]);
assert_tokens(&size(10), &[Token::U32(10)]);
assert_tokens(&size(20), &[Token::U32(20)]);
assert_tokens(&size(30), &[Token::U32(30)]);
}
#[test]
fn range_serialization() {
assert_tokens(
&range(00..10),
&[
Token::Tuple { len: 2 },
Token::U32(00),
Token::U32(10),
Token::TupleEnd,
],
);
assert_tokens(
&range(10..20),
&[
Token::Tuple { len: 2 },
Token::U32(10),
Token::U32(20),
Token::TupleEnd,
],
);
assert_tokens(
&range(20..30),
&[
Token::Tuple { len: 2 },
Token::U32(20),
Token::U32(30),
Token::TupleEnd,
],
);
assert_tokens(
&range(30..40),
&[
Token::Tuple { len: 2 },
Token::U32(30),
Token::U32(40),
Token::TupleEnd,
],
);
}
#[test]
fn invalid_range_deserialization() {
assert_tokens::<TextRange>(
&range(62..92),
&[
Token::Tuple { len: 2 },
Token::U32(62),
Token::U32(92),
Token::TupleEnd,
],
);
assert_de_tokens_error::<TextRange>(
&[
Token::Tuple { len: 2 },
Token::U32(92),
Token::U32(62),
Token::TupleEnd,
],
"invalid range: 92..62",
);
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_text_size/tests/main.rs | crates/ruff_text_size/tests/main.rs | use {
ruff_text_size::{TextRange, TextSize},
std::ops,
};
fn size(x: u32) -> TextSize {
TextSize::from(x)
}
fn range(x: ops::Range<u32>) -> TextRange {
TextRange::new(x.start.into(), x.end.into())
}
#[test]
fn sum() {
let xs: Vec<TextSize> = vec![size(0), size(1), size(2)];
assert_eq!(xs.iter().sum::<TextSize>(), size(3));
assert_eq!(xs.into_iter().sum::<TextSize>(), size(3));
}
#[test]
fn math() {
assert_eq!(size(10) + size(5), size(15));
assert_eq!(size(10) - size(5), size(5));
}
#[test]
fn checked_math() {
assert_eq!(size(1).checked_add(size(1)), Some(size(2)));
assert_eq!(size(1).checked_sub(size(1)), Some(size(0)));
assert_eq!(size(1).checked_sub(size(2)), None);
assert_eq!(size(!0).checked_add(size(1)), None);
}
#[test]
#[rustfmt::skip]
fn contains() {
assert!( range(2..4).contains_range(range(2..3)));
assert!( ! range(2..4).contains_range(range(1..3)));
}
#[test]
fn intersect() {
assert_eq!(range(1..2).intersect(range(2..3)), Some(range(2..2)));
assert_eq!(range(1..5).intersect(range(2..3)), Some(range(2..3)));
assert_eq!(range(1..2).intersect(range(3..4)), None);
}
#[test]
fn cover() {
assert_eq!(range(1..2).cover(range(2..3)), range(1..3));
assert_eq!(range(1..5).cover(range(2..3)), range(1..5));
assert_eq!(range(1..2).cover(range(4..5)), range(1..5));
}
#[test]
fn cover_offset() {
assert_eq!(range(1..3).cover_offset(size(0)), range(0..3));
assert_eq!(range(1..3).cover_offset(size(1)), range(1..3));
assert_eq!(range(1..3).cover_offset(size(2)), range(1..3));
assert_eq!(range(1..3).cover_offset(size(3)), range(1..3));
assert_eq!(range(1..3).cover_offset(size(4)), range(1..4));
}
#[test]
#[rustfmt::skip]
fn contains_point() {
assert!( ! range(1..3).contains(size(0)));
assert!( range(1..3).contains(size(1)));
assert!( range(1..3).contains(size(2)));
assert!( ! range(1..3).contains(size(3)));
assert!( ! range(1..3).contains(size(4)));
assert!( ! range(1..3).contains_inclusive(size(0)));
assert!( range(1..3).contains_inclusive(size(1)));
assert!( range(1..3).contains_inclusive(size(2)));
assert!( range(1..3).contains_inclusive(size(3)));
assert!( ! range(1..3).contains_inclusive(size(4)));
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_text_size/tests/indexing.rs | crates/ruff_text_size/tests/indexing.rs | use ruff_text_size::TextRange;
#[test]
fn main() {
let range = TextRange::default();
let _ = &""[range];
let _ = &String::new()[range];
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_text_size/tests/auto_traits.rs | crates/ruff_text_size/tests/auto_traits.rs | use {
ruff_text_size::{TextRange, TextSize},
static_assertions::assert_impl_all,
std::{
fmt::Debug,
hash::Hash,
marker::{Send, Sync},
panic::{RefUnwindSafe, UnwindSafe},
},
};
// auto traits
assert_impl_all!(TextSize: Send, Sync, Unpin, UnwindSafe, RefUnwindSafe);
assert_impl_all!(TextRange: Send, Sync, Unpin, UnwindSafe, RefUnwindSafe);
// common traits
assert_impl_all!(TextSize: Copy, Debug, Default, Hash, Ord);
assert_impl_all!(TextRange: Copy, Debug, Default, Hash, Eq);
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_combine/src/lib.rs | crates/ty_combine/src/lib.rs | #![warn(
clippy::disallowed_methods,
reason = "Prefer System trait methods over std methods in ty crates"
)]
use std::{collections::HashMap, hash::BuildHasher};
use ordermap::OrderMap;
use ruff_db::system::SystemPathBuf;
use ruff_python_ast::PythonVersion;
/// Combine two values, preferring the values in `self`.
///
/// The logic should follow that of Cargo's `config.toml`:
///
/// > If a key is specified in multiple config files, the values will get merged together.
/// > Numbers, strings, and booleans will use the value in the deeper config directory taking
/// > precedence over ancestor directories, where the home directory is the lowest priority.
/// > Arrays will be joined together with higher precedence items being placed later in the
/// > merged array.
///
/// ## uv Compatibility
///
/// The merging behavior differs from uv in that values with higher precedence in arrays
/// are placed later in the merged array. This is because we want to support overriding
/// earlier values and values from other configurations, including unsetting them.
/// For example: patterns coming last in file inclusion and exclusion patterns
/// allow overriding earlier patterns, matching the `gitignore` behavior.
/// Generally speaking, it feels more intuitive if later values override earlier values
/// than the other way around: `ty --exclude png --exclude "!important.png"`.
///
/// The main downside of this approach is that the ordering can be surprising in cases
/// where the option has a "first match" semantic and not a "last match" wins.
/// One such example is `extra-paths` where the semantics is given by Python:
/// the module on the first matching search path wins.
///
/// ```toml
/// [environment]
/// extra-paths = ["b", "c"]
/// ```
///
/// ```bash
/// ty --extra-paths a
/// ```
///
/// That's why a user might expect that this configuration results in `["a", "b", "c"]`,
/// because the CLI has higher precedence. However, the current implementation results in a
/// resolved extra search path of `["b", "c", "a"]`, which means `a` will be tried last.
///
/// There's an argument here that the user should be able to specify the order of the paths,
/// because only then is the user in full control of where to insert the path when specifying `extra-paths`
/// in multiple sources.
///
/// ## Macro
/// You can automatically derive `Combine` for structs with named fields by using `derive(ruff_macros::Combine)`.
pub trait Combine {
#[must_use]
fn combine(mut self, other: Self) -> Self
where
Self: Sized,
{
self.combine_with(other);
self
}
fn combine_with(&mut self, other: Self);
}
impl<T> Combine for Option<T>
where
T: Combine,
{
fn combine(self, other: Self) -> Self
where
Self: Sized,
{
match (self, other) {
(Some(a), Some(b)) => Some(a.combine(b)),
(None, Some(b)) => Some(b),
(a, _) => a,
}
}
fn combine_with(&mut self, other: Self) {
match (self, other) {
(Some(a), Some(b)) => {
a.combine_with(b);
}
(a @ None, Some(b)) => {
*a = Some(b);
}
_ => {}
}
}
}
impl<T> Combine for Vec<T> {
fn combine_with(&mut self, mut other: Self) {
// `self` takes precedence over `other` but values with higher precedence must be placed after.
// Swap the vectors so that `other` is the one that gets extended, so that the values of `self` come after.
std::mem::swap(self, &mut other);
self.extend(other);
}
}
impl<K, V, S> Combine for HashMap<K, V, S>
where
K: Eq + std::hash::Hash,
S: BuildHasher,
{
fn combine_with(&mut self, mut other: Self) {
// `self` takes precedence over `other` but `extend` overrides existing values.
// Swap the hash maps so that `self` is the one that gets extended.
std::mem::swap(self, &mut other);
self.extend(other);
}
}
impl<K, V, S> Combine for OrderMap<K, V, S>
where
K: Eq + std::hash::Hash,
S: BuildHasher,
{
fn combine_with(&mut self, other: Self) {
for (k, v) in other {
self.entry(k).or_insert(v);
}
}
}
/// Implements [`Combine`] for a value that always returns `self` when combined with another value.
macro_rules! impl_noop_combine {
($name:ident) => {
impl Combine for $name {
#[inline(always)]
fn combine_with(&mut self, _other: Self) {}
#[inline(always)]
fn combine(self, _other: Self) -> Self {
self
}
}
};
}
impl_noop_combine!(SystemPathBuf);
impl_noop_combine!(PythonVersion);
// std types
impl_noop_combine!(bool);
impl_noop_combine!(usize);
impl_noop_combine!(u8);
impl_noop_combine!(u16);
impl_noop_combine!(u32);
impl_noop_combine!(u64);
impl_noop_combine!(u128);
impl_noop_combine!(isize);
impl_noop_combine!(i8);
impl_noop_combine!(i16);
impl_noop_combine!(i32);
impl_noop_combine!(i64);
impl_noop_combine!(i128);
impl_noop_combine!(String);
#[cfg(test)]
mod tests {
use ordermap::OrderMap;
use std::collections::HashMap;
use super::Combine;
#[test]
fn combine_option() {
assert_eq!(Some(1).combine(Some(2)), Some(1));
assert_eq!(None.combine(Some(2)), Some(2));
assert_eq!(Some(1).combine(None), Some(1));
}
#[test]
fn combine_vec() {
assert_eq!(None.combine(Some(vec![1, 2, 3])), Some(vec![1, 2, 3]));
assert_eq!(Some(vec![1, 2, 3]).combine(None), Some(vec![1, 2, 3]));
assert_eq!(
Some(vec![1, 2, 3]).combine(Some(vec![4, 5, 6])),
Some(vec![4, 5, 6, 1, 2, 3])
);
}
#[test]
fn combine_map() {
let a: HashMap<u32, _> = HashMap::from_iter([(1, "a"), (2, "a"), (3, "a")]);
let b: HashMap<u32, _> = HashMap::from_iter([(0, "b"), (2, "b"), (5, "b")]);
assert_eq!(None.combine(Some(b.clone())), Some(b.clone()));
assert_eq!(Some(a.clone()).combine(None), Some(a.clone()));
assert_eq!(
Some(a).combine(Some(b)),
Some(HashMap::from_iter([
(0, "b"),
// The value from `a` takes precedence
(1, "a"),
(2, "a"),
(3, "a"),
(5, "b")
]))
);
}
#[test]
fn combine_order_map() {
let a: OrderMap<u32, _> = OrderMap::from_iter([(1, "a"), (2, "a"), (3, "a")]);
let b: OrderMap<u32, _> = OrderMap::from_iter([(0, "b"), (2, "b"), (5, "b")]);
assert_eq!(None.combine(Some(b.clone())), Some(b.clone()));
assert_eq!(Some(a.clone()).combine(None), Some(a.clone()));
assert_eq!(
Some(a).combine(Some(b)),
// The value from `a` takes precedence
Some(OrderMap::from_iter([
(1, "a"),
(2, "a"),
(3, "a"),
(0, "b"),
(5, "b")
]))
);
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_static/src/lib.rs | crates/ty_static/src/lib.rs | #![warn(
clippy::disallowed_methods,
reason = "Prefer System trait methods over std methods in ty crates"
)]
pub use env_vars::*;
mod env_vars;
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ty_static/src/env_vars.rs | crates/ty_static/src/env_vars.rs | use ruff_macros::attribute_env_vars_metadata;
/// Declares all environment variable used throughout `ty` and its crates.
pub struct EnvVars;
#[attribute_env_vars_metadata]
impl EnvVars {
/// If set, ty will use this value as the log level for its `--verbose` output.
/// Accepts any filter compatible with the `tracing_subscriber` crate.
///
/// For example:
///
/// - `TY_LOG=uv=debug` is the equivalent of `-vv` to the command line
/// - `TY_LOG=trace` will enable all trace-level logging.
///
/// See the [tracing documentation](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#example-syntax)
/// for more.
pub const TY_LOG: &'static str = "TY_LOG";
/// If set to `"1"` or `"true"`, ty will enable flamegraph profiling.
/// This creates a `tracing.folded` file that can be used to generate flame graphs
/// for performance analysis.
pub const TY_LOG_PROFILE: &'static str = "TY_LOG_PROFILE";
/// Control memory usage reporting format after ty execution.
///
/// Accepted values:
///
/// * `short` - Display short memory report
/// * `mypy_primer` - Display `mypy_primer` format and suppress workspace diagnostics
/// * `full` - Display full memory report
#[attr_hidden]
pub const TY_MEMORY_REPORT: &'static str = "TY_MEMORY_REPORT";
/// Specifies an upper limit for the number of tasks ty is allowed to run in parallel.
///
/// For example, how many files should be checked in parallel.
/// This isn't the same as a thread limit. ty may spawn additional threads
/// when necessary, e.g. to watch for file system changes or a dedicated UI thread.
pub const TY_MAX_PARALLELISM: &'static str = "TY_MAX_PARALLELISM";
/// Path to a `ty.toml` configuration file to use.
///
/// When set, ty will use this file for configuration instead of
/// discovering configuration files automatically.
///
/// Equivalent to the `--config-file` command-line argument.
pub const TY_CONFIG_FILE: &'static str = "TY_CONFIG_FILE";
/// Used to detect an activated virtual environment.
pub const VIRTUAL_ENV: &'static str = "VIRTUAL_ENV";
/// Adds additional directories to ty's search paths.
/// The format is the same as the shell’s PATH:
/// one or more directory pathnames separated by os appropriate pathsep
/// (e.g. colons on Unix or semicolons on Windows).
pub const PYTHONPATH: &'static str = "PYTHONPATH";
/// Used to determine the name of the active Conda environment.
pub const CONDA_DEFAULT_ENV: &'static str = "CONDA_DEFAULT_ENV";
/// Used to detect the path of an active Conda environment.
/// If both `VIRTUAL_ENV` and `CONDA_PREFIX` are present, `VIRTUAL_ENV` will be preferred.
pub const CONDA_PREFIX: &'static str = "CONDA_PREFIX";
/// Used to determine the root install path of Conda.
pub const CONDA_ROOT: &'static str = "_CONDA_ROOT";
/// Filter which tests to run in mdtest.
///
/// Only tests whose names contain this filter string will be executed.
#[attr_hidden]
pub const MDTEST_TEST_FILTER: &'static str = "MDTEST_TEST_FILTER";
/// Switch mdtest output format to GitHub Actions annotations.
///
/// If set (to any value), mdtest will output errors in GitHub Actions format.
#[attr_hidden]
pub const MDTEST_GITHUB_ANNOTATIONS_FORMAT: &'static str = "MDTEST_GITHUB_ANNOTATIONS_FORMAT";
// Externally defined environment variables
/// Specifies an upper limit for the number of threads ty uses when performing work in parallel.
/// Equivalent to `TY_MAX_PARALLELISM`.
///
/// This is a standard Rayon environment variable.
pub const RAYON_NUM_THREADS: &'static str = "RAYON_NUM_THREADS";
/// Path to user-level configuration directory on Unix systems.
pub const XDG_CONFIG_HOME: &'static str = "XDG_CONFIG_HOME";
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_macros/src/combine_options.rs | crates/ruff_macros/src/combine_options.rs | use quote::{quote, quote_spanned};
use syn::{Data, DataStruct, DeriveInput, Field, Fields, Path, PathSegment, Type, TypePath};
pub(crate) fn derive_impl(input: DeriveInput) -> syn::Result<proc_macro2::TokenStream> {
let DeriveInput { ident, data, .. } = input;
match data {
Data::Struct(DataStruct {
fields: Fields::Named(fields),
..
}) => {
let output = fields
.named
.iter()
.map(handle_field)
.collect::<Result<Vec<_>, _>>()?;
Ok(quote! {
#[automatically_derived]
impl crate::configuration::CombinePluginOptions for #ident {
fn combine(self, other: Self) -> Self {
#[expect(deprecated)]
Self {
#(
#output
),*
}
}
}
})
}
_ => Err(syn::Error::new(
ident.span(),
"Can only derive CombineOptions from structs with named fields.",
)),
}
}
fn handle_field(field: &Field) -> syn::Result<proc_macro2::TokenStream> {
let ident = field
.ident
.as_ref()
.expect("Expected to handle named fields");
match &field.ty {
Type::Path(TypePath {
path: Path { segments, .. },
..
}) => match segments.first() {
Some(PathSegment {
ident: type_ident, ..
}) if type_ident == "Option" => Ok(quote_spanned!(
ident.span() => #ident: self.#ident.or(other.#ident)
)),
_ => Err(syn::Error::new(
ident.span(),
"Expected `Option<_>` or `Vec<_>` as type.",
)),
},
_ => Err(syn::Error::new(ident.span(), "Expected type.")),
}
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
astral-sh/ruff | https://github.com/astral-sh/ruff/blob/8464aca795bc3580ca15fcb52b21616939cea9a9/crates/ruff_macros/src/config.rs | crates/ruff_macros/src/config.rs | use proc_macro2::{TokenStream, TokenTree};
use quote::{quote, quote_spanned};
use syn::meta::ParseNestedMeta;
use syn::spanned::Spanned;
use syn::{
AngleBracketedGenericArguments, Attribute, Data, DataStruct, DeriveInput, ExprLit, Field,
Fields, Lit, LitStr, Meta, Path, PathArguments, PathSegment, Type, TypePath,
};
use ruff_python_trivia::textwrap::dedent;
pub(crate) fn derive_impl(input: DeriveInput) -> syn::Result<TokenStream> {
let DeriveInput {
ident,
data,
attrs: struct_attributes,
..
} = input;
match data {
Data::Struct(DataStruct {
fields: Fields::Named(fields),
..
}) => {
let mut output = vec![];
for field in &fields.named {
if let Some(attr) = field
.attrs
.iter()
.find(|attr| attr.path().is_ident("option"))
{
output.push(handle_option(field, attr)?);
} else if field
.attrs
.iter()
.any(|attr| attr.path().is_ident("option_group"))
{
output.push(handle_option_group(field)?);
} else if let Some(serde) = field
.attrs
.iter()
.find(|attr| attr.path().is_ident("serde"))
{
// If a field has the `serde(flatten)` attribute, flatten the options into the parent
// by calling `Type::record` instead of `visitor.visit_set`
if let (Type::Path(ty), Meta::List(list)) = (&field.ty, &serde.meta) {
for token in list.tokens.clone() {
if let TokenTree::Ident(ident) = token {
if ident == "flatten" {
output.push(quote_spanned!(
ty.span() => (<#ty>::record(visit))
));
break;
}
}
}
}
}
}
let docs = struct_attributes
.iter()
.filter(|attr| attr.path().is_ident("doc"));
// Convert the list of `doc` attributes into a single string.
let doc = dedent(
&docs
.map(parse_doc)
.collect::<syn::Result<Vec<_>>>()?
.join("\n"),
)
.trim_matches('\n')
.to_string();
let documentation = if doc.is_empty() {
None
} else {
Some(quote!(
fn documentation() -> Option<&'static str> {
Some(&#doc)
}
))
};
Ok(quote! {
#[automatically_derived]
impl ruff_options_metadata::OptionsMetadata for #ident {
fn record(visit: &mut dyn ruff_options_metadata::Visit) {
#(#output);*
}
#documentation
}
})
}
_ => Err(syn::Error::new(
ident.span(),
"Can only derive ConfigurationOptions from structs with named fields.",
)),
}
}
/// For a field with type `Option<Foobar>` where `Foobar` itself is a struct
/// deriving `ConfigurationOptions`, create code that calls retrieves options
/// from that group: `Foobar::get_available_options()`
fn handle_option_group(field: &Field) -> syn::Result<proc_macro2::TokenStream> {
let ident = field
.ident
.as_ref()
.expect("Expected to handle named fields");
match &field.ty {
Type::Path(TypePath {
path: Path { segments, .. },
..
}) => match segments.first() {
Some(PathSegment {
ident: type_ident,
arguments:
PathArguments::AngleBracketed(AngleBracketedGenericArguments { args, .. }),
}) if type_ident == "Option" => {
let path = &args[0];
let kebab_name = LitStr::new(&ident.to_string().replace('_', "-"), ident.span());
Ok(quote_spanned!(
ident.span() => (visit.record_set(#kebab_name, ruff_options_metadata::OptionSet::of::<#path>()))
))
}
_ => Err(syn::Error::new(
ident.span(),
"Expected `Option<_>` as type.",
)),
},
_ => Err(syn::Error::new(ident.span(), "Expected type.")),
}
}
/// Parse a `doc` attribute into it a string literal.
fn parse_doc(doc: &Attribute) -> syn::Result<String> {
match &doc.meta {
syn::Meta::NameValue(syn::MetaNameValue {
value:
syn::Expr::Lit(ExprLit {
lit: Lit::Str(lit_str),
..
}),
..
}) => Ok(lit_str.value()),
_ => Err(syn::Error::new(doc.span(), "Expected doc attribute.")),
}
}
/// Parse an `#[option(doc="...", default="...", value_type="...",
/// example="...")]` attribute and return data in the form of an `OptionField`.
fn handle_option(field: &Field, attr: &Attribute) -> syn::Result<proc_macro2::TokenStream> {
let docs: Vec<&Attribute> = field
.attrs
.iter()
.filter(|attr| attr.path().is_ident("doc"))
.collect();
if docs.is_empty() {
return Err(syn::Error::new(
field.span(),
"Missing documentation for field",
));
}
// Convert the list of `doc` attributes into a single string.
let doc = dedent(
&docs
.into_iter()
.map(parse_doc)
.collect::<syn::Result<Vec<_>>>()?
.join("\n"),
)
.trim_matches('\n')
.to_string();
let ident = field
.ident
.as_ref()
.expect("Expected to handle named fields");
let FieldAttributes {
default,
value_type,
example,
scope,
} = parse_field_attributes(attr)?;
let kebab_name = LitStr::new(&ident.to_string().replace('_', "-"), ident.span());
let scope = if let Some(scope) = scope {
quote!(Some(#scope))
} else {
quote!(None)
};
let deprecated = if let Some(deprecated) = field
.attrs
.iter()
.find(|attr| attr.path().is_ident("deprecated"))
{
fn quote_option(option: Option<String>) -> TokenStream {
match option {
None => quote!(None),
Some(value) => quote!(Some(#value)),
}
}
let deprecated = parse_deprecated_attribute(deprecated)?;
let note = quote_option(deprecated.note);
let since = quote_option(deprecated.since);
quote!(Some(ruff_options_metadata::Deprecated { since: #since, message: #note }))
} else {
quote!(None)
};
Ok(quote_spanned!(
ident.span() => {
visit.record_field(#kebab_name, ruff_options_metadata::OptionField{
doc: &#doc,
default: &#default,
value_type: &#value_type,
example: &#example,
scope: #scope,
deprecated: #deprecated
})
}
))
}
#[derive(Debug)]
struct FieldAttributes {
default: String,
value_type: String,
example: String,
scope: Option<String>,
}
fn parse_field_attributes(attribute: &Attribute) -> syn::Result<FieldAttributes> {
let mut default = None;
let mut value_type = None;
let mut example = None;
let mut scope = None;
attribute.parse_nested_meta(|meta| {
if meta.path.is_ident("default") {
default = Some(get_string_literal(&meta, "default", "option")?.value());
} else if meta.path.is_ident("value_type") {
value_type = Some(get_string_literal(&meta, "value_type", "option")?.value());
} else if meta.path.is_ident("scope") {
scope = Some(get_string_literal(&meta, "scope", "option")?.value());
} else if meta.path.is_ident("example") {
let example_text = get_string_literal(&meta, "value_type", "option")?.value();
example = Some(dedent(&example_text).trim_matches('\n').to_string());
} else {
return Err(syn::Error::new(
meta.path.span(),
format!(
"Deprecated meta {:?} is not supported by ruff's option macro.",
meta.path.get_ident()
),
));
}
Ok(())
})?;
let Some(default) = default else {
return Err(syn::Error::new(
attribute.span(),
"Mandatory `default` field is missing in `#[option]` attribute. Specify the default using `#[option(default=\"..\")]`.",
));
};
let Some(value_type) = value_type else {
return Err(syn::Error::new(
attribute.span(),
"Mandatory `value_type` field is missing in `#[option]` attribute. Specify the value type using `#[option(value_type=\"..\")]`.",
));
};
let Some(example) = example else {
return Err(syn::Error::new(
attribute.span(),
"Mandatory `example` field is missing in `#[option]` attribute. Add an example using `#[option(example=\"..\")]`.",
));
};
Ok(FieldAttributes {
default,
value_type,
example,
scope,
})
}
fn parse_deprecated_attribute(attribute: &Attribute) -> syn::Result<DeprecatedAttribute> {
let mut deprecated = DeprecatedAttribute::default();
attribute.parse_nested_meta(|meta| {
if meta.path.is_ident("note") {
deprecated.note = Some(get_string_literal(&meta, "note", "deprecated")?.value());
} else if meta.path.is_ident("since") {
deprecated.since = Some(get_string_literal(&meta, "since", "deprecated")?.value());
} else {
return Err(syn::Error::new(
meta.path.span(),
format!(
"Deprecated meta {:?} is not supported by ruff's option macro.",
meta.path.get_ident()
),
));
}
Ok(())
})?;
Ok(deprecated)
}
fn get_string_literal(
meta: &ParseNestedMeta,
meta_name: &str,
attribute_name: &str,
) -> syn::Result<syn::LitStr> {
let expr: syn::Expr = meta.value()?.parse()?;
let mut value = &expr;
while let syn::Expr::Group(e) = value {
value = &e.expr;
}
if let syn::Expr::Lit(ExprLit {
lit: Lit::Str(lit), ..
}) = value
{
let suffix = lit.suffix();
if !suffix.is_empty() {
return Err(syn::Error::new(
lit.span(),
format!("unexpected suffix `{suffix}` on string literal"),
));
}
Ok(lit.clone())
} else {
Err(syn::Error::new(
expr.span(),
format!("expected {attribute_name} attribute to be a string: `{meta_name} = \"...\"`"),
))
}
}
#[derive(Default, Debug)]
struct DeprecatedAttribute {
since: Option<String>,
note: Option<String>,
}
| rust | MIT | 8464aca795bc3580ca15fcb52b21616939cea9a9 | 2026-01-04T15:31:59.413821Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.